diff --git a/.circleci/config.yml b/.circleci/config.yml index 53dd0e16..5275a449 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,38 +1,64 @@ -version: 2 +version: 2.1 jobs: + docker-build: + resource_class: small + working_directory: /home/circleci/zync + docker: + - image: circleci/buildpack-deps:latest + environment: + POSGRES_CONTAINER_NAME: db + DATABASE_URL: postgresql://postgres:postgres@db:5432/zync + POSTGRES_USER: postgres + POSTGRES_PASSWORD: postgres + POSTGRES_DB: zync + RAILS_ENV: production + steps: + - checkout + - setup_remote_docker + - run: docker build --tag zync:build --file ./Dockerfile . + - run: docker network create net0 + - run: docker run --net net0 --name ${POSGRES_CONTAINER_NAME} -d -p 5432:5432 -e POSTGRES_USER=${POSTGRES_USER} -e POSTGRES_PASSWORD=${POSTGRES_PASSWORD} -e POSTGRES_DB=${POSTGRES_DB} postgres:12-alpine + - run: + command: | + docker run --net net0 -e RAILS_ENV=${RAILS_ENV} -e DATABASE_URL=${DATABASE_URL} \ + zync:build rails db:setup build: - working_directory: /home/circleci/zync + parameters: + postgresql_version: + type: string + working_directory: /opt/app-root/zync docker: - - image: circleci/ruby:2.4.6 - - image: circleci/postgres:10-alpine-ram + - image: registry.access.redhat.com/ubi7/ruby-27 + - image: circleci/postgres:<< parameters.postgresql_version >>-ram environment: RAILS_ENV: test DISABLE_SPRING: 1 # we can't really run spring as it hangs on local circleci build DATABASE_URL: postgres://postgres:@localhost/circle_test steps: - - run: sudo sh -c 'apt update && apt install -y --no-install-recommends -y postgresql-client' - - checkout # Restore bundle cache - restore_cache: keys: - - zync-bundle-{{ arch }}-{{ checksum "Gemfile.lock" }} - - zync-bundle-{{ arch }}-{{ .Branch }} - - zync-branch-{{ arch }}-master + - zync-bundle-v2-{{ arch }}-{{ checksum "Gemfile.lock" }} + - zync-bundle-v2-{{ arch }}-{{ .Branch }} + - zync-branch-v2-{{ arch }}-master - run: name: bundle install command: | - gem install bundler --version=2.0.1 - bundle install --deployment --path vendor/bundle --jobs $(grep -c processor /proc/cpuinfo) --retry 3 + gem install bundler --version=$(grep -A 1 "BUNDLED WITH" Gemfile.lock | tr -d ' '| tail -n 1) + bundle config --local force_ruby_platform true + bundle config set --local deployment 'true' + bundle config set --local path 'vendor/bundle' + bundle install --jobs $(grep -c processor /proc/cpuinfo) --retry 3 - run: name: boot zync command: BUNDLE_WITHOUT=development:test bundle exec bin/rails runner --environment=production 'puts Rails.env' - save_cache: - key: zync-bundle-{{ arch }}-{{ checksum "Gemfile.lock" }} + key: zync-bundle-v2-{{ arch }}-{{ checksum "Gemfile.lock" }} paths: - vendor/bundle @@ -53,6 +79,16 @@ jobs: path: test/reports - save_cache: - key: zync-branch-{{ arch }}-{{ .Branch }} + key: zync-branch-v2-{{ arch }}-{{ .Branch }} paths: - vendor/bundle + +workflows: + version: 2.1 + build_and_test_docker: + jobs: + - build: + matrix: + parameters: + postgresql_version: [ "10-alpine", "12-alpine" ] + - docker-build diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..e5a14eb4 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,6 @@ +Dockerfile +.dockerignore +.bundle +log +openshift.local.clusterup +tmp diff --git a/.github/workflows/fast-forward-candidate-branch.yml b/.github/workflows/fast-forward-candidate-branch.yml new file mode 100644 index 00000000..43e9e16a --- /dev/null +++ b/.github/workflows/fast-forward-candidate-branch.yml @@ -0,0 +1,28 @@ +name: Fast-forward candidate branch to HEAD of default branch +on: + workflow_dispatch: + inputs: + release: + description: 'The release version number (e.g, 2.11)' + required: true + ref: + description: 'The SHA1 or branch name the candidate branch will point to' + required: true + default: origin/master + +jobs: + fast-forward: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + with: + ref: 3scale-${{ github.event.inputs.release }}-candidate + fetch-depth: 0 + token: ${{ secrets.FF_CANDIDATE_BRANCH_PAT_TOKEN }} + - run: | + export candidate_branch="3scale-${{ github.event.inputs.release }}-candidate" + git checkout ${candidate_branch} && git pull || git checkout -b ${candidate_branch} + git merge ${{ github.event.inputs.ref }} --ff-only + git push origin ${candidate_branch} + + name: Push to candidate branch diff --git a/.gitignore b/.gitignore index c559fad9..0d07a04c 100644 --- a/.gitignore +++ b/.gitignore @@ -24,3 +24,8 @@ /coverage .byebug_history +.env + +openshift.local.clusterup +.ruby-version +.tool-versions diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 00000000..d271e28b --- /dev/null +++ b/.travis.yml @@ -0,0 +1,31 @@ +language: ruby +sudo: false +dist: bionic +arch: +- ppc64le +- s390x +services: + - postgresql + +env: + - DATABASE_URL=postgres://postgres@localhost:5431/travis_test + - RAILS_ENV:test + +include: +- os: linux + addons: + packages: + - build-essential + - make + - gcc + - wget + - gem + - shared-mime-info.ppc64le + - zlib.ppc64le + - zlib-devel.ppc64le + +before_install: + - ./.travis/setup_${TRAVIS_OS_NAME}_environment.sh + +script: + - ./.travis/run_test_${TRAVIS_OS_NAME}.sh diff --git a/.travis/run_test_linux.sh b/.travis/run_test_linux.sh new file mode 100755 index 00000000..91bec766 --- /dev/null +++ b/.travis/run_test_linux.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +# Rails Tests +bundle exec bin/rails test + +# License Finder +bundle exec license_finder + diff --git a/.travis/setup_linux_environment.sh b/.travis/setup_linux_environment.sh new file mode 100755 index 00000000..bd95eac9 --- /dev/null +++ b/.travis/setup_linux_environment.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +set -ev + +# Config & Install +gem install bundler:$(grep -A 1 "BUNDLED WITH" Gemfile.lock | tr -d ' '| tail -n 1) +bundle install --deployment --path vendor/bundle --jobs $(grep -c processor /proc/cpuinfo) --retry 3 + +# Rails db:setup +bundle exec bin/rails db:wait db:setup diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..0b6a92c4 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,33 @@ +FROM registry.access.redhat.com/ubi8/ruby-27 + +USER root +RUN dnf --setopt=skip_missing_names_on_install=False,tsflags=nodocs --save \ + && rpm -Uvh https://download.postgresql.org/pub/repos/yum/reporpms/EL-7-x86_64/pgdg-redhat-repo-latest.noarch.rpm \ + && dnf update -y \ + && dnf remove -y postgresql \ + && dnf install -y shared-mime-info postgresql12 postgresql12-devel postgresql12-libs \ + && dnf clean all \ + && rm -rf /var/cache/yum + +USER default +WORKDIR ${APP_ROOT} + +RUN gem install bundler --version=2.2.19 --no-document + +COPY --chown=default:root Gemfile* ./ + +RUN bundle config build.pg --with-pg-config=/usr/pgsql-12/bin/pg_config \ + && bundle install --deployment --path vendor/bundle --jobs $(grep -c processor /proc/cpuinfo) --retry 3 + +COPY --chown=default:root . . + +ENV RAILS_LOG_TO_STDOUT=1 + +RUN bundle exec bin/rails server -e production -d; \ + rm -rf tmp/pids + +RUN mkdir -p -m 0775 tmp/cache log \ + && chown -fR default tmp log db \ + && chmod -fR g+w tmp log db + +CMD [".s2i/bin/run"] diff --git a/Gemfile b/Gemfile index d924eb93..134c5b58 100644 --- a/Gemfile +++ b/Gemfile @@ -8,12 +8,11 @@ end # Bundle edge Rails instead: gem 'rails', github: 'rails/rails' -gem 'rails', '~> 5.2.3' +gem 'rails', '~> 6.1.2' gem 'pg', '>= 0.20' -gem 'schema_plus_enums' - # Use Puma as the app server -gem 'puma', '~> 3.12' + +gem 'puma', '~> 5.2' # Build JSON APIs with ease. Read more: https://github.com/rails/jbuilder # gem 'jbuilder', '~> 2.5' # Use ActiveModel has_secure_password @@ -25,10 +24,12 @@ gem 'puma', '~> 3.12' # Use Rack CORS for handling Cross-Origin Resource Sharing (CORS), making cross-origin AJAX possible # gem 'rack-cors' -gem 'responders', '~> 2.4.1' -gem '3scale-api', '~> 0.1.9' +gem 'activerecord-pg_enum' + +gem 'responders', '~> 3.0.1' +gem '3scale-api' -gem 'bootsnap' +gem 'bootsnap', '>= 1.4.4' gem 'que', '>= 1.0.0.beta3' gem 'que-web' @@ -40,8 +41,9 @@ gem 'bugsnag-capistrano', '< 2', require: false # This fork allows setting SSL_CERT_FILE and SSL_CERT_DIR # https://github.com/nahi/httpclient/issues/369 -gem 'httpclient', github: 'mikz/httpclient', branch: 'ssl-env-cert' +gem 'httpclient', github: '3scale/httpclient', branch: 'ssl-env-cert' gem 'oauth2' +gem 'k8s-client', '>= 0.10' gem 'lograge' @@ -49,9 +51,9 @@ gem 'message_bus' # for publishing notifications about integration status gem 'validate_url' -gem 'prometheus-client', require: %w[prometheus/client] +gem 'prometheus-client', '~> 2.1.0', require: %w[prometheus/client] gem 'yabeda-rails' -gem 'yabeda-prometheus' +gem 'yabeda-prometheus', '~> 0.6.1' gem 'yabeda-puma-plugin' group :development, :test do @@ -61,15 +63,12 @@ group :development, :test do gem 'pry-rescue' gem 'pry-stack_explorer' - gem 'license_finder', '~> 5.8' + gem 'license_finder', '~> 6.10' gem 'license_finder_xml_reporter', git: 'https://github.com/3scale/license_finder_xml_reporter.git', tag: '1.0.0' + # rubyzip is a transitive depencency from license_finder with vulnerability on < 1.3.0 + gem 'rubyzip', '>= 1.3.0' - gem 'httplog' -end - -group :development do - # Spring speeds up development by keeping your application running in the background. Read more: https://github.com/rails/spring - gem 'spring' + # gem 'httplog' end # Windows does not include zoneinfo files, so bundle the tzinfo-data gem @@ -77,6 +76,8 @@ gem 'tzinfo-data', platforms: [:mingw, :mswin, :x64_mingw, :jruby] group :test do gem 'minitest-reporters' - gem 'webmock', '~>3.5' + gem 'minitest-stub-const' + gem 'webmock' gem 'codecov', require: false + gem 'simplecov', '~> 0.21.2', require: false end diff --git a/Gemfile.lock b/Gemfile.lock index 2d93fdab..281c1ec0 100644 --- a/Gemfile.lock +++ b/Gemfile.lock @@ -1,3 +1,10 @@ +GIT + remote: https://github.com/3scale/httpclient.git + revision: fec23fb32fb899b87a8b2c94e2d2069b6b4c633c + branch: ssl-env-cert + specs: + httpclient (2.8.3) + GIT remote: https://github.com/3scale/license_finder_xml_reporter.git revision: ab8072d11344b5e6d05565ccc84cb4745d01e6f8 @@ -6,157 +13,203 @@ GIT license_finder_xml_reporter (0.1.0) license_finder -GIT - remote: https://github.com/mikz/httpclient.git - revision: fec23fb32fb899b87a8b2c94e2d2069b6b4c633c - branch: ssl-env-cert - specs: - httpclient (2.8.3) - GEM remote: https://rubygems.org/ specs: - 3scale-api (0.1.9) - actioncable (5.2.3) - actionpack (= 5.2.3) + 3scale-api (1.4.0) + actioncable (6.1.4.1) + actionpack (= 6.1.4.1) + activesupport (= 6.1.4.1) nio4r (~> 2.0) websocket-driver (>= 0.6.1) - actionmailer (5.2.3) - actionpack (= 5.2.3) - actionview (= 5.2.3) - activejob (= 5.2.3) + actionmailbox (6.1.4.1) + actionpack (= 6.1.4.1) + activejob (= 6.1.4.1) + activerecord (= 6.1.4.1) + activestorage (= 6.1.4.1) + activesupport (= 6.1.4.1) + mail (>= 2.7.1) + actionmailer (6.1.4.1) + actionpack (= 6.1.4.1) + actionview (= 6.1.4.1) + activejob (= 6.1.4.1) + activesupport (= 6.1.4.1) mail (~> 2.5, >= 2.5.4) rails-dom-testing (~> 2.0) - actionpack (5.2.3) - actionview (= 5.2.3) - activesupport (= 5.2.3) - rack (~> 2.0) + actionpack (6.1.4.1) + actionview (= 6.1.4.1) + activesupport (= 6.1.4.1) + rack (~> 2.0, >= 2.0.9) rack-test (>= 0.6.3) rails-dom-testing (~> 2.0) - rails-html-sanitizer (~> 1.0, >= 1.0.2) - actionview (5.2.3) - activesupport (= 5.2.3) + rails-html-sanitizer (~> 1.0, >= 1.2.0) + actiontext (6.1.4.1) + actionpack (= 6.1.4.1) + activerecord (= 6.1.4.1) + activestorage (= 6.1.4.1) + activesupport (= 6.1.4.1) + nokogiri (>= 1.8.5) + actionview (6.1.4.1) + activesupport (= 6.1.4.1) builder (~> 3.1) erubi (~> 1.4) rails-dom-testing (~> 2.0) - rails-html-sanitizer (~> 1.0, >= 1.0.3) - activejob (5.2.3) - activesupport (= 5.2.3) + rails-html-sanitizer (~> 1.1, >= 1.2.0) + activejob (6.1.4.1) + activesupport (= 6.1.4.1) globalid (>= 0.3.6) - activemodel (5.2.3) - activesupport (= 5.2.3) - activerecord (5.2.3) - activemodel (= 5.2.3) - activesupport (= 5.2.3) - arel (>= 9.0) - activestorage (5.2.3) - actionpack (= 5.2.3) - activerecord (= 5.2.3) - marcel (~> 0.3.1) - activesupport (5.2.3) + activemodel (6.1.4.1) + activesupport (= 6.1.4.1) + activerecord (6.1.4.1) + activemodel (= 6.1.4.1) + activesupport (= 6.1.4.1) + activerecord-pg_enum (1.2.2) + activerecord (>= 4.1.0) + activesupport + pg + activestorage (6.1.4.1) + actionpack (= 6.1.4.1) + activejob (= 6.1.4.1) + activerecord (= 6.1.4.1) + activesupport (= 6.1.4.1) + marcel (~> 1.0.0) + mini_mime (>= 1.1.0) + activesupport (6.1.4.1) concurrent-ruby (~> 1.0, >= 1.0.2) - i18n (>= 0.7, < 2) - minitest (~> 5.1) - tzinfo (~> 1.1) - addressable (2.6.0) - public_suffix (>= 2.0.2, < 4.0) + i18n (>= 1.6, < 2) + minitest (>= 5.1) + tzinfo (~> 2.0) + zeitwerk (~> 2.3) + addressable (2.8.0) + public_suffix (>= 2.0.2, < 5.0) ansi (1.5.0) - arel (9.0.0) binding_of_caller (0.8.0) debug_inspector (>= 0.0.1) - bootsnap (1.4.4) + bootsnap (1.7.2) msgpack (~> 1.0) - bugsnag (6.11.1) + bugsnag (6.19.0) concurrent-ruby (~> 1.0) bugsnag-capistrano (1.1.2) - builder (3.2.3) - byebug (11.0.0) - codecov (0.1.14) - json - simplecov - url - coderay (1.1.2) - concurrent-ruby (1.1.5) - crack (0.4.3) - safe_yaml (~> 1.0.0) - crass (1.0.4) + builder (3.2.4) + byebug (11.1.3) + codecov (0.4.3) + simplecov (>= 0.15, < 0.22) + coderay (1.1.3) + concurrent-ruby (1.1.9) + crack (0.4.5) + rexml + crass (1.0.6) debug_inspector (0.0.3) - docile (1.3.1) - dry-initializer (2.5.0) - erubi (1.8.0) - erubis (2.7.0) - faraday (0.15.3) + docile (1.3.5) + dry-configurable (0.9.0) + concurrent-ruby (~> 1.0) + dry-core (~> 0.4, >= 0.4.7) + dry-container (0.7.2) + concurrent-ruby (~> 1.0) + dry-configurable (~> 0.1, >= 0.1.3) + dry-core (0.4.9) + concurrent-ruby (~> 1.0) + dry-equalizer (0.3.0) + dry-inflector (0.2.0) + dry-initializer (3.0.4) + dry-logic (0.6.1) + concurrent-ruby (~> 1.0) + dry-core (~> 0.2) + dry-equalizer (~> 0.2) + dry-struct (0.5.1) + dry-core (~> 0.4, >= 0.4.3) + dry-equalizer (~> 0.2) + dry-types (~> 0.13) + ice_nine (~> 0.11) + dry-types (0.13.4) + concurrent-ruby (~> 1.0) + dry-container (~> 0.3) + dry-core (~> 0.4, >= 0.4.4) + dry-equalizer (~> 0.2) + dry-inflector (~> 0.1, >= 0.1.2) + dry-logic (~> 0.4, >= 0.4.2) + erubi (1.10.0) + excon (0.71.1) + faraday (1.3.0) + faraday-net_http (~> 1.0) multipart-post (>= 1.2, < 3) - globalid (0.4.2) - activesupport (>= 4.2.0) - hashdiff (0.3.8) - httplog (1.3.0) - rack (>= 1.0) - rainbow (>= 2.0.0) - i18n (1.6.0) + ruby2_keywords + faraday-net_http (1.0.1) + globalid (0.5.2) + activesupport (>= 5.0) + hashdiff (1.0.1) + i18n (1.8.10) concurrent-ruby (~> 1.0) + ice_nine (0.11.2) interception (0.5) - its-it (1.3.0) - json (2.1.0) - jwt (2.1.0) - key_struct (0.4.2) - license_finder (5.8.0) + json (2.5.1) + jsonpath (0.9.9) + multi_json + to_regexp (~> 0.2.1) + jwt (2.2.2) + k8s-client (0.10.4) + dry-struct (~> 0.5.0) + dry-types (~> 0.13.0) + excon (~> 0.66) + hashdiff (~> 1.0.0) + jsonpath (~> 0.9.5) + recursive-open-struct (~> 1.1.0) + yajl-ruby (~> 1.4.0) + yaml-safe_load_stream (~> 0.1) + license_finder (6.10.1) bundler - rubyzip - thor - toml (= 0.2.0) + rubyzip (>= 1, < 3) + thor (~> 1.0.1) + tomlrb (>= 1.3, < 2.1) with_env (= 1.1.0) - xml-simple - lograge (0.10.0) + xml-simple (~> 1.1.5) + lograge (0.11.2) actionpack (>= 4) activesupport (>= 4) railties (>= 4) request_store (~> 1.0) - loofah (2.2.3) + loofah (2.12.0) crass (~> 1.0.2) nokogiri (>= 1.5.9) mail (2.7.1) mini_mime (>= 0.1.1) - marcel (0.3.3) - mimemagic (~> 0.3.2) - message_bus (2.1.6) + marcel (1.0.1) + message_bus (2.2.3) rack (>= 1.1.3) - method_source (0.9.2) - mimemagic (0.3.3) - mini_mime (1.0.1) - mini_portile2 (2.4.0) - minitest (5.11.3) - minitest-reporters (1.3.6) + method_source (1.0.0) + mini_mime (1.1.0) + mini_portile2 (2.6.1) + minitest (5.14.4) + minitest-reporters (1.4.3) ansi builder minitest (>= 5.0) ruby-progressbar - modware (0.1.3) - key_struct (~> 0.4) - msgpack (1.2.10) - multi_json (1.13.1) + minitest-stub-const (0.6) + msgpack (1.4.2) + multi_json (1.15.0) multi_xml (0.6.0) - multipart-post (2.0.0) - mustermann (1.0.3) - nio4r (2.3.1) - nokogiri (1.10.3) - mini_portile2 (~> 2.4.0) - oauth2 (1.4.1) - faraday (>= 0.8, < 0.16.0) + multipart-post (2.1.1) + mustermann (1.1.1) + ruby2_keywords (~> 0.0.1) + nio4r (2.5.8) + nokogiri (1.12.3) + mini_portile2 (~> 2.6.1) + racc (~> 1.4) + oauth2 (1.4.7) + faraday (>= 0.8, < 2.0) jwt (>= 1.0, < 3.0) multi_json (~> 1.3) multi_xml (~> 0.5) rack (>= 1.2, < 3) - parslet (1.8.2) - pg (1.1.4) - prometheus-client (0.9.0) - quantile (~> 0.2.1) - pry (0.12.2) - coderay (~> 1.1.0) - method_source (~> 0.9.0) - pry-byebug (3.7.0) + pg (1.2.3) + prometheus-client (2.1.0) + pry (0.13.1) + coderay (~> 1.1) + method_source (~> 1.0) + pry-byebug (3.9.0) byebug (~> 11.0) - pry (~> 0.10) + pry (~> 0.13.0) pry-rails (0.3.9) pry (>= 0.10.4) pry-rescue (1.5.0) @@ -165,152 +218,148 @@ GEM pry-stack_explorer (0.4.9.3) binding_of_caller (>= 0.7) pry (>= 0.9.11) - public_suffix (3.0.3) - puma (3.12.1) - quantile (0.2.1) - que (1.0.0.beta3) - que-web (0.8.0) - erubis + public_suffix (4.0.6) + puma (5.2.1) + nio4r (~> 2.0) + que (1.0.0.beta4) + que-web (0.9.3) que (~> 1.0.0.beta3) sinatra - rack (2.0.6) - rack-protection (2.0.5) + racc (1.5.2) + rack (2.2.3) + rack-protection (2.1.0) rack rack-test (1.1.0) rack (>= 1.0, < 3) - rails (5.2.3) - actioncable (= 5.2.3) - actionmailer (= 5.2.3) - actionpack (= 5.2.3) - actionview (= 5.2.3) - activejob (= 5.2.3) - activemodel (= 5.2.3) - activerecord (= 5.2.3) - activestorage (= 5.2.3) - activesupport (= 5.2.3) - bundler (>= 1.3.0) - railties (= 5.2.3) + rails (6.1.4.1) + actioncable (= 6.1.4.1) + actionmailbox (= 6.1.4.1) + actionmailer (= 6.1.4.1) + actionpack (= 6.1.4.1) + actiontext (= 6.1.4.1) + actionview (= 6.1.4.1) + activejob (= 6.1.4.1) + activemodel (= 6.1.4.1) + activerecord (= 6.1.4.1) + activestorage (= 6.1.4.1) + activesupport (= 6.1.4.1) + bundler (>= 1.15.0) + railties (= 6.1.4.1) sprockets-rails (>= 2.0.0) rails-dom-testing (2.0.3) activesupport (>= 4.2.0) nokogiri (>= 1.6) - rails-html-sanitizer (1.0.4) - loofah (~> 2.2, >= 2.2.2) - railties (5.2.3) - actionpack (= 5.2.3) - activesupport (= 5.2.3) + rails-html-sanitizer (1.4.1) + loofah (~> 2.3) + railties (6.1.4.1) + actionpack (= 6.1.4.1) + activesupport (= 6.1.4.1) method_source - rake (>= 0.8.7) - thor (>= 0.19.0, < 2.0) - rainbow (3.0.0) - rake (12.3.2) - request_store (1.4.1) + rake (>= 0.13) + thor (~> 1.0) + rake (13.0.6) + recursive-open-struct (1.1.0) + request_store (1.5.0) rack (>= 1.4) - responders (2.4.1) - actionpack (>= 4.2.0, < 6.0) - railties (>= 4.2.0, < 6.0) - ruby-progressbar (1.10.0) - rubyzip (1.2.2) - safe_yaml (1.0.4) - schema_monkey (2.1.5) - activerecord (>= 4.2) - modware (~> 0.1) - schema_plus_core (2.2.3) - activerecord (~> 5.0) - its-it (~> 1.2) - schema_monkey (~> 2.1) - schema_plus_enums (0.1.8) - activerecord (>= 4.2, < 5.3) - its-it (~> 1.2) - schema_plus_core - simplecov (0.16.1) + responders (3.0.1) + actionpack (>= 5.0) + railties (>= 5.0) + rexml (3.2.5) + ruby-progressbar (1.11.0) + ruby2_keywords (0.0.2) + rubyzip (2.3.0) + simplecov (0.21.2) docile (~> 1.1) - json (>= 1.8, < 3) - simplecov-html (~> 0.10.0) - simplecov-html (0.10.2) - sinatra (2.0.5) + simplecov-html (~> 0.11) + simplecov_json_formatter (~> 0.1) + simplecov-html (0.12.3) + simplecov_json_formatter (0.1.2) + sinatra (2.1.0) mustermann (~> 1.0) - rack (~> 2.0) - rack-protection (= 2.0.5) + rack (~> 2.2) + rack-protection (= 2.1.0) tilt (~> 2.0) - spring (2.0.2) - activesupport (>= 4.2) - sprockets (3.7.2) + sprockets (4.0.2) concurrent-ruby (~> 1.0) rack (> 1, < 3) - sprockets-rails (3.2.1) + sprockets-rails (3.2.2) actionpack (>= 4.0) activesupport (>= 4.0) sprockets (>= 3.0.0) - thor (0.20.3) - thread_safe (0.3.6) - tilt (2.0.9) - toml (0.2.0) - parslet (~> 1.8.0) - tzinfo (1.2.5) - thread_safe (~> 0.1) - url (0.3.2) - validate_url (1.0.6) + thor (1.0.1) + tilt (2.0.10) + to_regexp (0.2.1) + tomlrb (2.0.1) + tzinfo (2.0.4) + concurrent-ruby (~> 1.0) + validate_url (1.0.13) activemodel (>= 3.0.0) public_suffix - webmock (3.5.1) + webmock (3.11.2) addressable (>= 2.3.6) crack (>= 0.3.2) - hashdiff - websocket-driver (0.7.0) + hashdiff (>= 0.4.0, < 2.0.0) + websocket-driver (0.7.5) websocket-extensions (>= 0.1.0) - websocket-extensions (0.1.3) + websocket-extensions (0.1.5) with_env (1.1.0) - xml-simple (1.1.5) - yabeda (0.1.3) + xml-simple (1.1.8) + yabeda (0.8.0) concurrent-ruby dry-initializer - yabeda-prometheus (0.1.4) - yabeda - yabeda-puma-plugin (0.1.0) + yabeda-prometheus (0.6.1) + prometheus-client (>= 0.10, < 3.0) + rack + yabeda (~> 0.5) + yabeda-puma-plugin (0.6.0) json puma - yabeda - yabeda-rails (0.1.3) + yabeda (~> 0.5) + yabeda-rails (0.7.1) rails - yabeda + yabeda (~> 0.8) + yajl-ruby (1.4.1) + yaml-safe_load_stream (0.1.1) + zeitwerk (2.4.2) PLATFORMS ruby DEPENDENCIES - 3scale-api (~> 0.1.9) - bootsnap + 3scale-api + activerecord-pg_enum + bootsnap (>= 1.4.4) bugsnag bugsnag-capistrano (< 2) codecov httpclient! - httplog - license_finder (~> 5.8) + k8s-client (>= 0.10) + license_finder (~> 6.10) license_finder_xml_reporter! lograge message_bus minitest-reporters + minitest-stub-const oauth2 pg (>= 0.20) - prometheus-client + prometheus-client (~> 2.1.0) pry-byebug pry-rails pry-rescue pry-stack_explorer - puma (~> 3.12) + puma (~> 5.2) que (>= 1.0.0.beta3) que-web - rails (~> 5.2.3) - responders (~> 2.4.1) - schema_plus_enums - spring + rails (~> 6.1.2) + responders (~> 3.0.1) + rubyzip (>= 1.3.0) + simplecov (~> 0.21.2) tzinfo-data validate_url - webmock (~> 3.5) - yabeda-prometheus + webmock + yabeda-prometheus (~> 0.6.1) yabeda-puma-plugin yabeda-rails BUNDLED WITH - 2.0.1 + 2.2.19 diff --git a/INSTALL.md b/INSTALL.md index f7ac31f6..e93bde3b 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -1,17 +1,49 @@ -# Install Zync (only explained for MacOS so far) -1. Download this repository. -`git clone git@github.com:3scale/zync.git` +# Install Zync -2. Move to the folder of the project. -`cd zync` +**Note:** you can check the [Quickstart guide](doc/Quickstart.md) for list of commands that can quickly get you going. -3. Install the dependencies. There is `Brewfile` containing all the dependencies. -`brew bundle` +## Download this repository. +``` +git clone git@github.com:3scale/zync.git +``` -4. Start postgres. -`brew services start postgresql` +## Install dependencies. -5. Setup Zync. -`./bin/setup` +To run Zync you need access to a running [PostgreSQL](https://www.postgresql.org) server. You can install one with your operating system package +manager, as a container or run it remotely. -The command `brew services start postgresql` starts the service of PostgreSQL. If `./bin/setup` aborts, make sure that the `PostgreSQL` service is running. Verify with `brew services list` that has a status `started` and looking green. If the status is `started` but coloured orange, fix the errors indicated in the log located in `/usr/local/var/log/postgres.log` +The minimum requirement for the machine running Zync is to have + - Ruby 2.7.x + - `psql` client tool - needed when running for `db:setup` + - `libpq-devel` - needed to build `pg` gem during `bundle install`. + +You may have to adjust `config/database.yml` or `DATABASE_URL` environment variable, see below. + +## Setup Zync + +There is a `setup` script to install gem dependencies, +seed the database and run Zync server. + +``` +./bin/setup +``` + +Make sure to edit configuration or set needed environment variables +beforehand. Most important environment variables you can use: + + - `ZYNC_AUTHENTICATION_TOKEN` - this one must match your running [Porta](https://github.com/3scale/porta) configuration + - `DATABASE_URL` - depending on your PostgreSQL and `database.yml` configuration, you may want to set this one + - `PROMETHEUS_EXPORTER_PORT` - in case you are running other 3scale components like `que` and Porta, you may need to set a different port for each of them through this variable to avoid conflict between them + - `PORT` - change port where Zync is running (e.g. `5000`) to avoid conflict with a locally running [Porta](https://github.com/3scale/porta) server or other software, you can also use the `-p 5000` command line option + +## Start Zync + +When starting Zync, make sure to use a non-conflicting port on your machine +``` +bundle exec rails server -p 5000 +``` + +When starting que, make sure to set a non-conflicting Prometheus port +``` +PROMETHEUS_EXPORTER_PORT=9395 bundle exec rake que +``` diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..d8d735d8 --- /dev/null +++ b/Makefile @@ -0,0 +1,15 @@ +.PHONY: build push help +.DEFAULT_GOAL := help + +IMAGE_NAME := quay.io/3scale/zync +TAG := latest +IMAGE_TAG := $(IMAGE_NAME):$(TAG) + +build: + docker build . --tag $(IMAGE_TAG) + +push: + docker push $(IMAGE_TAG) + +help: ## Print this help + @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) diff --git a/README.md b/README.md index b7da14ab..d8c9f6f4 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ Zync is going to take your 3scale data and pushes it somewhere else, reliably. O This component is deployed as part of the overall 3scale API Management solution. -Please follow [these instructions](INSTALL.md) on how to set up your development environment locally and [these instructions](INTEGRATE.md) to integrate it with [Porta](https://github.com/3scale/porta) and Keycloak. +Please see [these instructions](INSTALL.md) and [Quickstart guide](doc/Quickstart.md) on how to set up your development environment locally and [these instructions](INTEGRATE.md) to integrate it with [Porta](https://github.com/3scale/porta) and Keycloak. ## Terminology @@ -16,13 +16,13 @@ Before describing the design it is important to define the terms. * **3scale** - 3scale API Manager (Porta). -* **Tenant** - mapping of Provider id registered in 3scale API Manager to domain and access token. +* **Tenant** - mapping of Provider id registered in 3scale API Manager to the domain and access token. * **Model** - relevant object in **3scale** like Application, Limit, Metric, Service. * **Notification** - Message sent to **Zync** describing the **model** that changed and having all required properties to fetch it from the API later. * Example: Application 3456, tenant_id: 26 * Example: Limit 4566, metric_id: 36, application_id: 46, tenant_id: 16 * **Update** - **Zync** fetches updated **Model** from the **Notifier** via the API using the information provided by the **Notification**. -* **Lock** - mechanism that preventing concurrent data access to the same scope. +* **Lock** - the mechanism that prevents concurrent data access to the same scope. * Example: **Tenant Lock** would mean only one can be running for one **Tenant**. * Example: **Model** **Lock** - only one per uniquely identified **Model**. * **Entry** - The information from the API provided by the **Update**. @@ -35,9 +35,9 @@ Before describing the design it is important to define the terms. ## Design -**Zync** is meant to synchronize data from **3scale** to external systems (like IDPs). Some people use Web-hooks for this but without further logic they can be unreliable and arrive out of order. This tool is meant to synchronize the final state to a different systems. +**Zync** is meant to synchronize data from **3scale** to external systems (like IDPs). Some people use Web-hooks for this but without further logic, they can be unreliable and arrive out of order. This tool is meant to synchronize the final state to a different system. -The flow is defined as **3scale** -> **Zync** ( <- **3scale**) -> **Integration**. So **3scale** notifies **Zync** there was a change to a **model** but does not say more than primary key and information required to fetch it from the **3scale** API. In some cases **model** needs just its primary key (**id**) and in some it needs other metadata (usually primary keys of its parents) to compose the API call (service_id, metric_id, …). +The flow is defined as **3scale** -> **Zync** ( <- **3scale**) -> **Integration**. So **3scale** notifies **Zync** there was a change to a **model** but does not say more than the primary key and information required to fetch it from the **3scale** API. In some cases **model** needs just its primary key (**id**) and in some, it needs other metadata (usually primary keys of its parents) to compose the API call (service_id, metric_id, …). **Zync** upon receiving the notification will acquire an **update model lock** and try to perform an **update**. Any information received this way is added as an **entry** to the **log** and the **model lock** is released. That **entry** can be either new data or information that the record is no longer there (404 from the API). If new **notification** came when the **model lock** was acquired, it is going to be processed after the lock is released. @@ -45,13 +45,13 @@ After adding **entry** to the **log** an **integration** is triggered and acquir ## Properties -Given the locking on the **model** there will be some parallelization, but also updates to one object will be serialized. This needs to be done to limit the network issues and ensure the request was delivered before issuing new one. +Given the locking on the **model** there will be some parallelization, but also updates to one object will be serialized. This needs to be done to limit the network issues and ensure the request was delivered before issuing a new one. -Because **Zync** will keep a **log** of **events** it will be able to replay changes and recover last state just taking last revisions of each **model** and even remove the ones that have been created before but have been deleted. +Because **Zync** will keep a **log** of **events** it will be able to replay changes and recover the last state just taking last revisions of each **model** and even remove the ones that have been created before but have been deleted. ## Data Model -**Record** types are for enforcing correctness of data on the database level and referential integrity. There is one relationship (Model -> Record) that can't have foreign constraint but can be recreated from other data. +**Record** types are for enforcing correctness of data on the database level and referential integrity. There is one relationship (Model -> Record) that can't have foreign constraints but can be recreated from other data. ### Tenant diff --git a/app/adapters/abstract_adapter.rb b/app/adapters/abstract_adapter.rb index eb49091b..b58953c6 100644 --- a/app/adapters/abstract_adapter.rb +++ b/app/adapters/abstract_adapter.rb @@ -59,15 +59,13 @@ def headers JSON_TYPE = Mime[:json] private_constant :JSON_TYPE - NULL_TYPE = Mime::Type.lookup(nil) - attr_reader :http_client def build_http_client(endpoint) HTTPClient.new do self.debug_dev = $stderr if ENV.fetch('DEBUG', '0') == '1' - self.set_auth endpoint, *endpoint.auth + self.set_auth endpoint.uri.dup, *endpoint.auth Rails.application.config.x.http_client.deep_symbolize_keys .slice(:connect_timeout, :send_timeout, :receive_timeout).each do |key, value| @@ -94,9 +92,10 @@ def parse_client(_) def self.parse_response(response) body = response.body - case Mime::Type.lookup(response.content_type) + content_type = response.content_type.presence or return body + + case Mime::Type.lookup(content_type) when JSON_TYPE then JSON.parse(body) - when NULL_TYPE then body else raise InvalidResponseError, { response: response, message: 'Unknown Content-Type' } end end @@ -195,7 +194,7 @@ def access_token def oauth_client OAuth2::Client.new(@endpoint.client_id, @endpoint.client_secret, site: @endpoint.uri.dup, token_url: token_endpoint) do |builder| - builder.adapter(:httpclient).last.instance_variable_set(:@client, http_client) + builder.adapter(:httpclient).instance_variable_set(:@client, http_client) end end @@ -203,7 +202,7 @@ def oauth_client def fetch_oidc_discovery response = http_client.get(well_known_url) - config = AbstractAdapter.parse_response(response) + config = response.ok? && AbstractAdapter.parse_response(response) case config when ->(obj) { obj.respond_to?(:[]) } then config diff --git a/app/adapters/keycloak_adapter.rb b/app/adapters/keycloak_adapter.rb index a3e9618f..1126a6ed 100644 --- a/app/adapters/keycloak_adapter.rb +++ b/app/adapters/keycloak_adapter.rb @@ -65,7 +65,7 @@ def redirectUris=(uris) end def oidc_configuration=(params) - write_attribute :oidc_configuration, OAuthConfiguration.new(params) + _write_attribute 'oidc_configuration', OAuthConfiguration.new(params) end def persisted? diff --git a/app/adapters/generic_adapter.rb b/app/adapters/rest_adapter.rb similarity index 96% rename from app/adapters/generic_adapter.rb rename to app/adapters/rest_adapter.rb index 3f984d10..eacfbdfb 100644 --- a/app/adapters/generic_adapter.rb +++ b/app/adapters/rest_adapter.rb @@ -3,7 +3,7 @@ require 'uri' # KeycloakAdapter adapter to create/update/delete Clients on using the KeycloakAdapter Client Registration API. -class GenericAdapter < AbstractAdapter +class RESTAdapter < AbstractAdapter def self.build_client(*attrs) Client.new(*attrs) end @@ -31,6 +31,12 @@ def test parse http_client.get(oidc.well_known_url, header: headers) end + def authentication + super + rescue OIDC::AuthenticationError + nil + end + # The Client entity. Mapping the OpenID Connect Client Metadata representation. # https://tools.ietf.org/html/rfc7591#section-2 class Client diff --git a/app/jobs/application_job.rb b/app/jobs/application_job.rb index cc900cb6..ca2c65ce 100644 --- a/app/jobs/application_job.rb +++ b/app/jobs/application_job.rb @@ -1,4 +1,47 @@ # frozen_string_literal: true + +require 'que/active_record/model' + # Base class for all Jobs class ApplicationJob < ActiveJob::Base + # Copied from ActiveJob::Exceptions, but uses debug log level. + def self.retry_on(exception, wait: 3.seconds, attempts: 5, queue: nil, priority: nil, jitter: ActiveJob::Exceptions.const_get(:JITTER_DEFAULT)) + rescue_from exception do |error| + if executions < attempts + logger.debug "Retrying #{self.class} in #{wait} seconds, due to a #{exception}. The original exception was #{error.cause.inspect}." + retry_job wait: determine_delay(seconds_or_duration_or_algorithm: wait, executions: executions, jitter: jitter), queue: queue, priority: priority, error: error + else + if block_given? + instrument :retry_stopped, error: error do + yield self, error + end + else + instrument :retry_stopped, error: error + logger.debug "Stopped retrying #{self.class} due to a #{exception}, which reoccurred on #{executions} attempts. The original exception was #{error.cause.inspect}." + raise error + end + end + end + end + + class_attribute :deduplicate + + before_enqueue :delete_duplicates, if: :deduplicate? + around_enqueue if: :deduplicate? do |job, block| + job.class.model.transaction(&block) + end + + def relation + record = self.class.model + arguments = serialize.slice('arguments') + record.where('args @> ?', [arguments].to_json) + end + + def delete_duplicates + relation.delete_all + end + + def self.model + Que::ActiveRecord::Model.by_job_class(to_s) + end end diff --git a/app/jobs/process_entry_job.rb b/app/jobs/process_entry_job.rb index af2dea79..057395d0 100644 --- a/app/jobs/process_entry_job.rb +++ b/app/jobs/process_entry_job.rb @@ -1,41 +1,113 @@ # frozen_string_literal: true + # Process each Entry after it is created. # So schedule Integration jobs to perform the integration work. class ProcessEntryJob < ApplicationJob queue_as :default + class_attribute :proxy_integration_services + def perform(entry) model_integrations_for(entry).each do |integration, model| ProcessIntegrationEntryJob.perform_later(integration, model) end end - def model_integrations_for(entry) + def self.model_integrations_for(entry) + self.ensure_integrations_for(entry) + model = entry.model + integrations = Integration.for_model(model) + integrations.each.with_object(model) + end - integrations = Integration.retry_record_not_unique do - case model.record - when Proxy then CreateProxyIntegration.new(entry).call - end + def self.ensure_integrations_for(entry) + case entry.model.record + when Proxy + proxy_integration_services.map { |integration| integration.new(entry) }.each(&:call) + when Provider + CreateK8SIntegration.new(entry).call + end + end - Integration.for_model(model) + protected + + delegate :model_integrations_for, to: :class + + class ModelIntegration + attr_reader :model, :data, :entry + + def initialize(entry) + @entry = entry + @model = Model.find_by!(record: entry.model.record) + @data = entry.data || {}.freeze end + def integrations + ::Integration.where(tenant: tenant, model: model) + end - integrations.each.with_object(model) + def call + raise NoMethodError, __method__ + end + + protected + + delegate :transaction, to: :model + delegate :tenant, to: :entry end - # Wrapper for creating KeycloakAdapter when Proxy is created - CreateProxyIntegration = Struct.new(:entry) do - attr_reader :service, :data + class ProxyIntegration + attr_reader :service, :data, :entry - def initialize(*) - super + def initialize(entry) + @entry = entry @service = Model.find_by!(record: proxy.record.service) @data = entry.data || {}.freeze end + def call + raise NoMethodError, __method__ + end + + def model + raise NoMethodError, __method__ + end + + def integrations + ::Integration.where(tenant: tenant, model: service) + end + + protected + + delegate :transaction, to: :model + delegate :tenant, to: :entry + + def proxy + entry.model + end + end + + class CreateK8SIntegration < ModelIntegration + class_attribute :integration_type, default: Integration::Kubernetes + + class_attribute :enabled, default: Rails.application.config.x.openshift.enabled + + def call + return unless enabled? + + transaction do + integration = integrations.create_or_find_by!(type: integration_type.to_s) + integration.update(state: Integration.states.fetch(:active)) + + ProcessIntegrationEntryJob.perform_later(integration, model) + end + end + end + + # Wrapper for creating Keycloak/Generic Adapter when Proxy is created + class CreateOIDCProxyIntegration < ProxyIntegration def endpoint data[:oidc_issuer_endpoint] end @@ -65,38 +137,32 @@ def call end end - delegate :transaction, to: :model - delegate :tenant, to: :entry - def cleanup integrations.update_all(state: Integration.states.fetch(:disabled)) end def model case type - when 'generic' - ::Integration::Generic + when 'rest' + ::Integration::REST when 'keycloak', nil ::Integration::Keycloak else raise UnknownOIDCIssuerTypeError, type end end - def integrations - ::Integration.where(tenant: tenant, model: service) - end - # Unknown oidc_issuer_type in the entry. class UnknownOIDCIssuerTypeError < StandardError; end def find_integration model - .create_with(endpoint: endpoint) - .find_or_create_by!(integrations.where_values_hash) - end - - def proxy - entry.model + .create_with(endpoint: endpoint) + .create_or_find_by!(integrations.where_values_hash) end end + + self.proxy_integration_services = [ + CreateOIDCProxyIntegration, + CreateK8SIntegration + ].freeze end diff --git a/app/jobs/process_integration_entry_job.rb b/app/jobs/process_integration_entry_job.rb index 0bfdf6cf..901771fb 100644 --- a/app/jobs/process_integration_entry_job.rb +++ b/app/jobs/process_integration_entry_job.rb @@ -1,15 +1,23 @@ # frozen_string_literal: true + # Update Integration with latest changes to the model. # Load latest Entry and push it through the Integration. class ProcessIntegrationEntryJob < ApplicationJob queue_as :default + self.deduplicate = true + delegate :instrument, to: 'ActiveSupport::Notifications' def perform(integration, model, service: DiscoverIntegrationService.call(integration)) return unless service + if integration.try(:disabled?) + logger.info "#{integration.to_gid} is disabled, skipping" + return + end + result = invoke(model, integration, service) do |invocation| payload = build_payload(model, integration, invocation) @@ -29,17 +37,17 @@ def call(_payload) service.call(entry) finish(success: true) - rescue + rescue StandardError finish(success: false) raise end - def finish(success: ) - state.update_attributes(success: success, finished_at: timestamp) + def finish(success:) + state.update(success: success, finished_at: timestamp) end def start - state.update_attributes(started_at: timestamp, entry: entry, success: nil) + state.update(started_at: timestamp, entry: entry, success: nil) end def to_proc @@ -76,14 +84,14 @@ def call(payload, &block) value = instrument('perform.process_integration_entry', payload, &block) Result.new(true, value) - rescue => exception - Result.new(false, value, exception) + rescue StandardError => e + Result.new(false, value, e) end def build_payload(model, integration, invocation) { - entry_data: invocation.entry_data, integration: integration, model: model, - service: invocation.service_name, record: model.record + entry_data: invocation.entry_data, integration: integration, model: model, + service: invocation.service_name, record: model.record } end @@ -130,10 +138,9 @@ def transform_payload(payload) def extract_tenant(payload) tenant = payload.fetch(:model).tenant - [ tenant, { user_ids: [ tenant.to_gid_param ] } ] + [tenant, { user_ids: [tenant.to_gid_param] }] end - def build_message_bus(tenant) MessageBus::Instance.new.tap do |message_bus| message_bus.config.merge!(MessageBus.config) diff --git a/app/jobs/update_job.rb b/app/jobs/update_job.rb index 6195b8c3..2a88bd72 100644 --- a/app/jobs/update_job.rb +++ b/app/jobs/update_job.rb @@ -2,6 +2,7 @@ # Uses FetchService to get Entity and persist it in database. # Maintains UpdateState and can be only one running at a time by using a lock on model. + class UpdateJob < ApplicationJob include JobWithTimestamp queue_as :default @@ -9,6 +10,8 @@ class UpdateJob < ApplicationJob retry_on Errno::ECONNREFUSED, wait: :exponentially_longer, attempts: 10 retry_on Model::LockTimeoutError, wait: :exponentially_longer, attempts: 10 + self.deduplicate = true + def initialize(*) super @fetch = FetchService @@ -20,11 +23,11 @@ def perform(model) UpdateState.acquire_lock(model) do |state| # this is not going to be visible outside the transaction, does it matter? # what matters is that it could be rolled back - state.update_attributes(started_at: timestamp) + state.update(started_at: timestamp) entry = fetch.call(model) - state.update_attributes(success: entry.save, finished_at: timestamp) + state.update(success: entry.save, finished_at: timestamp) end end end diff --git a/app/models/integration.rb b/app/models/integration.rb index d8f2bee7..e417f123 100644 --- a/app/models/integration.rb +++ b/app/models/integration.rb @@ -17,6 +17,6 @@ def self.for_model(model) end def enabled? - true + Rails.application.config.integrations.fetch(self.class.name.demodulize.downcase, true) end end diff --git a/app/models/integration/keycloak.rb b/app/models/integration/keycloak.rb index 30631aef..f412a06e 100644 --- a/app/models/integration/keycloak.rb +++ b/app/models/integration/keycloak.rb @@ -1,4 +1,4 @@ # frozen_string_literal: true -class Integration::Keycloak < Integration::Generic +class Integration::Keycloak < Integration::REST end diff --git a/app/models/integration/kubernetes.rb b/app/models/integration/kubernetes.rb new file mode 100644 index 00000000..d335df05 --- /dev/null +++ b/app/models/integration/kubernetes.rb @@ -0,0 +1,17 @@ +# frozen_string_literal: true + +# Generic HTTP adapter for implementing custom integrations. +class Integration::Kubernetes < Integration + store_accessor :configuration, %i[ server ] + + validates :server, url: { allow_nil: true, no_local: true } + + def enabled? + super && K8s::Client === K8s::Client.autoconfig + rescue K8s::Error::Configuration + false + rescue => error + Bugsnag.notify(error) + false + end +end diff --git a/app/models/integration/generic.rb b/app/models/integration/rest.rb similarity index 57% rename from app/models/integration/generic.rb rename to app/models/integration/rest.rb index 601ec98a..19cd4bf0 100644 --- a/app/models/integration/generic.rb +++ b/app/models/integration/rest.rb @@ -1,12 +1,12 @@ # frozen_string_literal: true -# Generic HTTP adapter for implementing custom integrations. -class Integration::Generic < Integration +# REST HTTP adapter for implementing custom integrations. +class Integration::REST < Integration store_accessor :configuration, %i[ endpoint ] validates :endpoint, url: { allow_nil: true, no_local: true } def enabled? - endpoint.present? + super && endpoint.present? end end diff --git a/app/models/model.rb b/app/models/model.rb index c62b467d..a8affee3 100644 --- a/app/models/model.rb +++ b/app/models/model.rb @@ -14,7 +14,13 @@ def weak_record end # Error raised when weak lock can't be acquired. - class LockTimeoutError < StandardError; end + class LockTimeoutError < StandardError + + # No need to report this error. + def skip_bugsnag + true + end + end def self.create_record!(tenant) retry_record_not_unique do diff --git a/app/models/notification.rb b/app/models/notification.rb index 696beeda..d9a28479 100644 --- a/app/models/notification.rb +++ b/app/models/notification.rb @@ -11,7 +11,7 @@ def initialize(data) @data = ActiveSupport::HashWithIndifferentAccess.new(data) end - ALLOWED_MODELS = Set.new(%w(Application Proxy Service)).freeze + ALLOWED_MODELS = Set.new(%w(Application Proxy Service Provider)).freeze NULL_TYPE = Object.new.tap do |object| def object.find_or_create_by!(*); end def object.attribute_names; end diff --git a/app/models/provider.rb b/app/models/provider.rb new file mode 100644 index 00000000..bbdb3832 --- /dev/null +++ b/app/models/provider.rb @@ -0,0 +1,5 @@ +# frozen_string_literal: true + +class Provider < ApplicationRecord + belongs_to :tenant +end diff --git a/app/models/tenant.rb b/app/models/tenant.rb index d1a63e1d..81de0998 100644 --- a/app/models/tenant.rb +++ b/app/models/tenant.rb @@ -7,7 +7,7 @@ class Tenant < ApplicationRecord def self.upsert(params) retry_record_not_unique do tenant = find_or_create_by(id: params.require(:id)) - tenant.update_attributes(params) + tenant.update(params) tenant end end diff --git a/app/services/discover_integration_service.rb b/app/services/discover_integration_service.rb index 936b520e..39bd7519 100644 --- a/app/services/discover_integration_service.rb +++ b/app/services/discover_integration_service.rb @@ -1,4 +1,5 @@ # frozen_string_literal: true + # Returns a Service for each Integration. # Each Integration can be using different Service. # This class creates a mapping between Integration and Service. @@ -18,8 +19,10 @@ def call(integration) klass = case integration when Integration::Keycloak Integration::KeycloakService - when Integration::Generic + when Integration::REST Integration::GenericService + when Integration::Kubernetes + Integration::KubernetesService when integration Integration::EchoService else # the only one for now diff --git a/app/services/fetch_service.rb b/app/services/fetch_service.rb index a8da9315..18c13b8d 100644 --- a/app/services/fetch_service.rb +++ b/app/services/fetch_service.rb @@ -1,4 +1,5 @@ # frozen_string_literal: true + # Fetches Model information from upstream and returns the Entity. class FetchService @@ -13,7 +14,7 @@ class << self # Returned when unknown model is passed in. class UnsupportedModel < StandardError; end - # @return [ThreeScale::API] + # @return [ThreeScale::API::Client] def build_client(tenant) http_client = ThreeScale::API::InstrumentedHttpClient.new(endpoint: tenant.endpoint, provider_key: tenant.access_token) @@ -30,52 +31,52 @@ def call(model) fetch_client(model) when Proxy fetch_proxy(model) + when Provider + fetch_provider(model) else raise UnsupportedModel, "unsupported model #{record.class}" end end + def fetch_provider(model) + fetch_entry(model) do |client| + client.show_provider + end + end + def fetch_service(model) build_entry(model) end def fetch_proxy(model) - client = build_client(model.tenant) - - begin - proxy = client.show_proxy(model.record.service_id) - # right now the client raises runtime error, but rather should return a result - rescue RuntimeError - proxy = nil # 404'd + fetch_entry(model) do |client| + client.show_proxy(model.record.service_id) end - - build_entry(model, data: proxy) end def fetch_application(model) - client = build_client(model.tenant) - - begin - application = client.show_application(model.record_id) - # right now the client raises runtime error, but rather should return a result - rescue RuntimeError - application = nil # 404'd + fetch_entry(model) do |client| + client.show_application(model.record_id) end - - build_entry(model, data: application) end def fetch_client(model) + fetch_entry(model) do |client| + client.find_application(application_id: model.record.client_id) + end + end + + def fetch_entry(model) client = build_client(model.tenant) - begin - application = client.find_application(application_id: model.record.client_id) - # right now the client raises runtime error, but rather should return a result - rescue RuntimeError - application = nil # 404'd + data = begin + yield client + rescue RuntimeError => e + Rails.logger.error(e) + nil end - build_entry(model, data: application) + build_entry(model, data: data) end def build_entry(model, **attributes) diff --git a/app/services/incoming_notification_service.rb b/app/services/incoming_notification_service.rb index b5eebd72..d5e5447e 100644 --- a/app/services/incoming_notification_service.rb +++ b/app/services/incoming_notification_service.rb @@ -1,4 +1,5 @@ # frozen_string_literal: true + # This is action that is performed on incoming notificaiton. # Its purpose is to wrap the persistence and triggering update logic. diff --git a/app/services/integration/echo_service.rb b/app/services/integration/echo_service.rb index 936e5b7f..51198a90 100644 --- a/app/services/integration/echo_service.rb +++ b/app/services/integration/echo_service.rb @@ -1,4 +1,5 @@ # frozen_string_literal: true + # Example Integration that just prints what it is doing the log. class Integration::EchoService < Integration::ServiceBase diff --git a/app/services/integration/generic_service.rb b/app/services/integration/generic_service.rb index 4bc9aeeb..7e9e9f3c 100644 --- a/app/services/integration/generic_service.rb +++ b/app/services/integration/generic_service.rb @@ -1,8 +1,8 @@ # frozen_string_literal: true -# Handles persisting/removing clients using the Generic HTTP adapter. +# Handles persisting/removing clients using the REST HTTP adapter. class Integration::GenericService < Integration::AbstractService - self.adapter_class = ::GenericAdapter + self.adapter_class = ::RESTAdapter def remove(client) payload = { client: client, adapter: adapter } diff --git a/app/services/integration/keycloak_service.rb b/app/services/integration/keycloak_service.rb index 05c7c614..19ab1b99 100644 --- a/app/services/integration/keycloak_service.rb +++ b/app/services/integration/keycloak_service.rb @@ -39,5 +39,4 @@ def update_client(client) def persist?(client) client.secret end - end diff --git a/app/services/integration/kubernetes_service.rb b/app/services/integration/kubernetes_service.rb new file mode 100644 index 00000000..1bcbc155 --- /dev/null +++ b/app/services/integration/kubernetes_service.rb @@ -0,0 +1,304 @@ +# frozen_string_literal: true + +class Integration::KubernetesService < Integration::ServiceBase + attr_reader :namespace + + class_attribute :maintain_tls_spec, + default: ActiveModel::Type::Boolean.new.cast(ENV['KUBERNETES_ROUTE_TLS']) + + def initialize(integration, namespace: self.class.namespace) + super(integration) + @namespace = namespace + @client = K8s::Client.autoconfig(namespace: namespace).extend(MergePatch) + end + + module MergePatch + # @param resource [K8s::Resource] + # @param attrs [Hash] + # @return [K8s::Client] + def merge_resource(resource, attrs) + client_for_resource(resource).merge_patch(resource.metadata.name, attrs) + end + end + + def self.namespace + ENV.fetch('KUBERNETES_NAMESPACE') { File.read(File.join((ENV['TELEPRESENCE_ROOT'] || '/'), 'var/run/secrets/kubernetes.io/serviceaccount/namespace')) } + end + + def call(entry) + case entry.record + when Proxy then handle_proxy(entry) + when Provider then handle_provider(entry) + end + end + + def handle_proxy(entry) + persist_proxy?(entry) ? persist_proxy(entry) : delete_proxy(entry) + end + + def handle_provider(entry) + persist?(entry) ? persist_provider(entry) : delete_provider(entry) + end + + attr_reader :client + + def persist_proxy?(entry) + entry.data&.dig('deployment_option') == 'hosted' + end + + def persist?(entry) + entry.data + end + + def owner_reference_controller(resource) + owner_references = resource.metadata.ownerReferences or return + controller = owner_references.find(&:controller) + + client.get_resource(controller.merge(metadata: { namespace: namespace, name: controller.name })) + end + + def owner_reference_root(resource) + while (owner = owner_reference_controller(resource)) + resource = owner + end + + resource + rescue K8s::Error::Forbidden + # likely some resource like the operator + resource + end + + def get_owner + pod_name = ENV['KUBERNETES_POD_NAME'] || ENV['POD_NAME'] || ENV['HOSTNAME'] + + pod = client.api('v1').resource('pods', namespace: namespace).get(pod_name) + owner_reference_root(pod) + end + + def as_reference(owner) + K8s::API::MetaV1::OwnerReference.new( + kind: owner.kind, + apiVersion: owner.apiVersion, + name: owner.metadata.name, + uid: owner.metadata.uid + ) + end + + def annotations_for(entry) + { + '3scale.net/gid': entry.to_gid.to_s, + 'zync.3scale.net/gid': entry.model.record.to_gid.to_s, + } + end + + def label_selector_from(resource) + resource.metadata.labels.to_h.with_indifferent_access.slice( + '3scale.net/created-by', '3scale.net/tenant_id', 'zync.3scale.net/record', 'zync.3scale.net/route-to' + ) + end + + def labels_for(entry) + { + '3scale.net/created-by': 'zync', + '3scale.net/tenant_id': String(entry.tenant_id), + 'zync.3scale.net/record': entry.model.record.to_gid_param, + } + end + + def labels_for_proxy(entry) + service_id = entry.last_known_data.fetch('service_id') { return } + + labels_for(entry).merge( + 'zync.3scale.net/ingress': 'proxy', + '3scale.net/service_id': String(service_id) + ) + end + + def labels_for_provider(entry) + provider_id = entry.last_known_data.fetch('id') + + labels_for(entry).merge( + 'zync.3scale.net/ingress': 'provider', + '3scale.net/provider_id': String(provider_id) + ) + end + + class Route < K8s::Resource + def initialize(attributes, **options) + super attributes.with_indifferent_access + .merge(apiVersion: 'route.openshift.io/v1', kind: 'Route') + .reverse_merge(metadata: {}), **options + end + end + + class RouteSpec < K8s::Resource + def initialize(url, service, port) + uri = URI(url) + tls_options = { + insecureEdgeTerminationPolicy: 'Redirect', + termination: 'edge' + } if uri.class == URI::HTTPS || uri.scheme.blank? + + super({ + host: uri.host || uri.path, + port: { targetPort: port }, + to: { + kind: 'Service', + name: service + } + }.merge(tls: tls_options)) + end + end + + def build_proxy_routes(entry) + build_routes('zync-3scale-api-', [ + RouteSpec.new(entry.data.fetch('endpoint'), 'apicast-production', 'gateway'), + RouteSpec.new(entry.data.fetch('sandbox_endpoint'), 'apicast-staging', 'gateway') + ], labels: labels_for_proxy(entry), annotations: annotations_for(entry)) + end + + def build_routes(name, specs = [], owner: get_owner, **metadata) + specs.map do |spec| + Route.new( + metadata: { + generateName: name, + namespace: namespace, + labels: owner.metadata.labels, + ownerReferences: [as_reference(owner)] + }.deep_merge(metadata.deep_merge( + labels: { + 'zync.3scale.net/route-to': spec.to_h.dig(:to, :name), + }, + annotations: { + 'zync.3scale.net/host': spec.host, + } + )), + spec: spec + ) + end + end + + def build_provider_routes(entry) + data = entry.data + domain, admin_domain = data.values_at('domain', 'admin_domain') + metadata = { labels: labels_for_provider(entry), annotations: annotations_for(entry) } + + if admin_domain == domain # master account + build_routes('zync-3scale-master-', [ + RouteSpec.new(data.fetch('domain'), 'system-master', 'http') + ], **metadata) + else + build_routes('zync-3scale-provider-', [ + RouteSpec.new(data.fetch('domain'), 'system-developer', 'http'), + RouteSpec.new(data.fetch('admin_domain'), 'system-provider', 'http') + ], **metadata) + end + end + + def cleanup_but(list, label_selector) + client + .client_for_resource(list.first, namespace: namespace) + .list(labelSelector: label_selector) + .each do |resource| + equal = list.any? { |object| object.metadata.uid === resource.metadata.uid && resource.metadata.selfLink == object.metadata.selfLink } + Rails.logger.warn "Deleting #{resource.metadata} from k8s because it is not on #{list}" + + client.delete_resource(resource) unless equal + end + end + + def extract_route_patch(resource) + { + metadata: resource.metadata.to_h, + spec: { host: resource.spec.host }, + } + end + + protected def persist_resources(list) + list.map do |resource| + existing = client + .client_for_resource(resource, namespace: namespace) + .list(labelSelector: label_selector_from(resource)) + + client.get_resource case existing.size + when 0 + client.create_resource(resource) + when 1 + update_resource(existing.first, resource) + else + existing.each(&client.method(:delete_resource)) + client.create_resource(resource) + end + end + end + + def cleanup_routes(routes) + routes.each do |route| + begin + verify_route_status(route) + rescue InvalidStatus => error + # they need to be re-created anyway, OpenShift won't re-admit them + client.delete_resource(route) if error.reason == 'HostAlreadyClaimed' && error.type == 'Admitted' + raise + end + end + end + + class InvalidStatus < StandardError + attr_reader :type, :reason + + def initialize(condition) + @type, @reason = condition.type, condition.reason + super(condition.message) + end + end + + class MissingStatusIngress < InvalidStatus + MISSING_STATUS_INGRESS_CONDITION = ActiveSupport::OrderedOptions.new.merge(type: 'unknown', reason: 'unknown', message: "Kubernetes resource status missing 'ingress' property").freeze + + def initialize + super(MISSING_STATUS_INGRESS_CONDITION) + end + end + + def verify_route_status(route) + ingress = (route.status.ingress or raise MissingStatusIngress).find { |ingress| ingress.host == route.spec.host } + condition = ingress.conditions.find { |condition| condition.type == 'Admitted' } + + raise InvalidStatus, condition unless condition.status == 'True' + end + + def update_resource(existing, resource) + resource.spec.delete_field(:tls) if maintain_tls_spec? + + client.merge_resource(existing, resource) + rescue K8s::Error::Invalid + resource.spec.tls = existing.spec.tls if maintain_tls_spec? + client.delete_resource(existing) + client.create_resource(resource) + end + + def persist_proxy(entry) + routes = build_proxy_routes(entry) + + cleanup_routes persist_resources(routes) + end + + def delete_proxy(entry) + label_selector = labels_for_proxy(entry) + + cleanup_but([Route.new({})], label_selector) + end + + def persist_provider(entry) + routes = build_provider_routes(entry) + + cleanup_routes persist_resources(routes) + end + + def delete_provider(entry) + label_selector = labels_for_provider(entry) + + cleanup_but([Route.new({})], label_selector) + end +end diff --git a/app/services/integration/service_base.rb b/app/services/integration/service_base.rb index 365c710d..f34bb611 100644 --- a/app/services/integration/service_base.rb +++ b/app/services/integration/service_base.rb @@ -7,4 +7,8 @@ class Integration::ServiceBase def initialize(integration) @integration = integration end + + def call(_entry) + raise NoMethodError, __method__ + end end diff --git a/bin/rails b/bin/rails index 07396602..225ac226 100755 --- a/bin/rails +++ b/bin/rails @@ -1,4 +1,5 @@ #!/usr/bin/env ruby + APP_PATH = File.expand_path('../config/application', __dir__) -require_relative '../config/boot' -require 'rails/commands' +require_relative "../config/boot" +require "rails/commands" diff --git a/bin/rake b/bin/rake index 17240489..4fbf10b9 100755 --- a/bin/rake +++ b/bin/rake @@ -1,4 +1,4 @@ #!/usr/bin/env ruby -require_relative '../config/boot' -require 'rake' +require_relative "../config/boot" +require "rake" Rake.application.run diff --git a/bin/setup b/bin/setup index a334d86a..57923026 100755 --- a/bin/setup +++ b/bin/setup @@ -1,6 +1,5 @@ #!/usr/bin/env ruby -require 'fileutils' -include FileUtils +require "fileutils" # path to your application root. APP_ROOT = File.expand_path('..', __dir__) @@ -9,8 +8,9 @@ def system!(*args) system(*args) || abort("\n== Command #{args} failed ==") end -chdir APP_ROOT do - # This script is a starting point to setup your application. +FileUtils.chdir APP_ROOT do + # This script is a way to set up or update your development environment automatically. + # This script is idempotent, so that you can run it at any time and get an expectable outcome. # Add necessary setup steps to this file. puts '== Installing dependencies ==' @@ -19,11 +19,11 @@ chdir APP_ROOT do # puts "\n== Copying sample files ==" # unless File.exist?('config/database.yml') - # cp 'config/database.yml.sample', 'config/database.yml' + # FileUtils.cp 'config/database.yml.sample', 'config/database.yml' # end puts "\n== Preparing database ==" - system! 'bin/rails db:setup' + system! 'bin/rails db:prepare' puts "\n== Removing old logs and tempfiles ==" system! 'bin/rails log:clear tmp:clear' diff --git a/bin/spring b/bin/spring deleted file mode 100755 index df22d97b..00000000 --- a/bin/spring +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env ruby -# frozen_string_literal: true - -# This file loads spring without using Bundler, in order to be fast. -# It gets overwritten when you run the `spring binstub` command. - -unless defined?(Spring) - require 'rubygems' - require 'bundler' - - lockfile = Bundler::LockfileParser.new(Bundler.default_lockfile.read) - spring = lockfile.specs.detect { |spec| spec.name == "spring" } - if spring - Gem.use_paths Gem.dir, Bundler.bundle_path.to_s, *Gem.path - gem 'spring', spring.version - require 'spring/binstub' - end -end diff --git a/config.ru b/config.ru index 7eae2644..a97566a7 100644 --- a/config.ru +++ b/config.ru @@ -1,6 +1,7 @@ # frozen_string_literal: true # This file is used by Rack-based servers to start the application. -require_relative 'config/environment' +require_relative "config/environment" run Rails.application +Rails.application.load_server diff --git a/config/application.rb b/config/application.rb index b048a03e..a0f58b68 100644 --- a/config/application.rb +++ b/config/application.rb @@ -1,13 +1,16 @@ # frozen_string_literal: true -require_relative 'boot' +require_relative "boot" require "rails" # Pick the frameworks you want: require "active_model/railtie" require "active_job/railtie" require "active_record/railtie" +# require "active_storage/engine" require "action_controller/railtie" # require "action_mailer/railtie" +# require "action_mailbox/engine" +# require "action_text/engine" require "action_view/railtie" # require "action_cable/engine" # require "sprockets/railtie" @@ -37,9 +40,15 @@ class Application < Rails::Application # Initialize configuration defaults for originally generated Rails version. config.load_defaults 5.1 - # Settings in config/environments/* take precedence over those specified here. - # Application configuration should go into files in config/initializers - # -- all .rb files in that directory are automatically loaded. + config.integrations = config_for(:integrations) + + # Configuration for the application, engines, and railties goes here. + # + # These settings can be overridden in specific environments using the files + # in config/environments, which are processed later. + # + # config.time_zone = "Central Time (US & Canada)" + # config.eager_load_paths << Rails.root.join("extras") # Only loads a smaller set of middleware suitable for API only apps. # Middleware like session, flash, cookies can be added back manually. @@ -62,6 +71,23 @@ class Application < Rails::Application config.middleware.delete(ActionDispatch::Flash) # remove it after message bus loaded end + initializer 'k8s-client.logger' do + case config.log_level + when :debug + K8s::Logging.debug! + K8s::Transport.debug! + when :info + K8s::Logging.verbose! + K8s::Transport.verbose! + when :error + K8s::Logging.quiet! + K8s::Transport.quiet! + else + K8s::Logging.log_level = K8s::Transport.log_level = Rails.logger.level + end + end + config.x.keycloak = config_for(:keycloak) || Hash.new + config.x.openshift = ActiveSupport::InheritableOptions.new(config_for(:openshift)&.deep_symbolize_keys) end end diff --git a/config/boot.rb b/config/boot.rb index c04863fa..38a47b2c 100644 --- a/config/boot.rb +++ b/config/boot.rb @@ -2,5 +2,5 @@ ENV['BUNDLE_GEMFILE'] ||= File.expand_path('../Gemfile', __dir__) -require 'bundler/setup' # Set up gems listed in the Gemfile. -require 'bootsnap/setup' # Speed up boot time by caching expensive operations. +require "bundler/setup" # Set up gems listed in the Gemfile. +require "bootsnap/setup" # Speed up boot time by caching expensive operations. diff --git a/config/environment.rb b/config/environment.rb index 12ea62f8..a21c0c58 100644 --- a/config/environment.rb +++ b/config/environment.rb @@ -1,6 +1,6 @@ # frozen_string_literal: true # Load the Rails application. -require_relative 'application' +require_relative "application" # Initialize the Rails application. Rails.application.initialize! diff --git a/config/environments/development.rb b/config/environments/development.rb index 3331c43b..d8e8b3ef 100644 --- a/config/environments/development.rb +++ b/config/environments/development.rb @@ -1,13 +1,14 @@ # frozen_string_literal: true +require "active_support/core_ext/integer/time" + Rails.application.configure do config.middleware.insert_before Rack::Sendfile, ActionDispatch::DebugLocks - # Settings specified here will take precedence over those in config/application.rb. - # In the development environment your application's code is reloaded on - # every request. This slows down response time but is perfect for development + # In the development environment your application's code is reloaded any time + # it changes. This slows down response time but is perfect for development # since you don't have to restart the web server when you make code changes. config.cache_classes = false @@ -20,8 +21,6 @@ # Enable/disable caching. By default caching is disabled. # Run rails dev:cache to toggle caching. if Rails.root.join('tmp', 'caching-dev.txt').exist? - config.action_controller.perform_caching = true - config.cache_store = :memory_store config.public_file_server.headers = { 'Cache-Control' => "public, max-age=#{2.days.to_i}" @@ -35,6 +34,12 @@ # Print deprecation notices to the Rails logger. config.active_support.deprecation = :log + # Raise exceptions for disallowed deprecations. + config.active_support.disallowed_deprecation = :raise + + # Tell Active Support which deprecation messages to disallow. + config.active_support.disallowed_deprecation_warnings = [] + # Raise an error on page load if there are pending migrations. config.active_record.migration_error = :page_load @@ -42,15 +47,23 @@ config.active_record.verbose_query_logs = true - # Raises error for missing translations - # config.action_view.raise_on_missing_translations = true + # Raises error for missing translations. + # config.i18n.raise_on_missing_translations = true + + # Annotate rendered view with file names. + # config.action_view.annotate_rendered_view_with_filenames = true # Use an evented file watcher to asynchronously detect changes in source code, # routes, locales, etc. This feature depends on the listen gem. - # config.file_watcher = ActiveSupport::EventedFileUpdateChecker + #config.file_watcher = ActiveSupport::EventedFileUpdateChecker + + # Uncomment if you wish to allow Action Cable access from any origin. + # config.action_cable.disable_request_forgery_protection = true end -HttpLog.configure do |config| - config.enabled = true - config.color = true +if defined?(HttpLog) + HttpLog.configure do |config| + config.enabled = true + config.color = true + end end diff --git a/config/environments/production.rb b/config/environments/production.rb index 26edf5d0..9873f7c5 100644 --- a/config/environments/production.rb +++ b/config/environments/production.rb @@ -1,4 +1,6 @@ # frozen_string_literal: true +require "active_support/core_ext/integer/time" + Rails.application.configure do # Settings specified here will take precedence over those in config/application.rb. @@ -13,7 +15,6 @@ # Full error reports are disabled and caching is turned on. config.consider_all_requests_local = false - config.action_controller.perform_caching = true # Ensures that a master key has been made available in either ENV["RAILS_MASTER_KEY"] # or in config/master.key. This key is used to decrypt credentials (and other encrypted files). @@ -24,31 +25,28 @@ config.public_file_server.enabled = ENV['RAILS_SERVE_STATIC_FILES'].present? # Enable serving of images, stylesheets, and JavaScripts from an asset server. - # config.action_controller.asset_host = 'http://assets.example.com' + # config.asset_host = 'http://assets.example.com' # Specifies the header that your server uses for sending files. # config.action_dispatch.x_sendfile_header = 'X-Sendfile' # for Apache # config.action_dispatch.x_sendfile_header = 'X-Accel-Redirect' # for NGINX - # Force all access to the app over SSL, use Strict-Transport-Security, and use secure cookies. # config.force_ssl = true - # Use the lowest log level to ensure availability of diagnostic information - # when problems arise. + # Include generic and useful information about system operation, but avoid logging too much + # information to avoid inadvertent exposure of personally identifiable information (PII). config.log_level = ENV.fetch('RAILS_LOG_LEVEL', 'debug').to_sym # Prepend all log lines with the following tags. config.log_tags = [ ] - config.lograge.enabled = true - # Use a different cache store in production. # config.cache_store = :mem_cache_store - # Use a real queuing backend for Active Job (and separate queues per environment) + # Use a real queuing backend for Active Job (and separate queues per environment). # config.active_job.queue_adapter = :resque - # config.active_job.queue_name_prefix = "zync_#{Rails.env}" + # config.active_job.queue_name_prefix = "zync_production" # Enable locale fallbacks for I18n (makes lookups for any locale fall back to # the I18n.default_locale when a translation cannot be found). @@ -57,8 +55,17 @@ # Send deprecation notices to registered listeners. config.active_support.deprecation = :notify + # Log disallowed deprecations. + config.active_support.disallowed_deprecation = :log + + # Tell Active Support which deprecation messages to disallow. + config.active_support.disallowed_deprecation_warnings = [] + + # Use default logging formatter so that PID and timestamp are not suppressed. + config.log_formatter = ::Logger::Formatter.new + # Use a different logger for distributed setups. - # require 'syslog/logger' + # require "syslog/logger" # config.logger = ActiveSupport::TaggedLogging.new(Syslog::Logger.new 'app-name') if ENV["RAILS_LOG_TO_STDOUT"].present? @@ -69,4 +76,25 @@ # Do not dump schema after migrations. config.active_record.dump_schema_after_migration = false + + # Inserts middleware to perform automatic connection switching. + # The `database_selector` hash is used to pass options to the DatabaseSelector + # middleware. The `delay` is used to determine how long to wait after a write + # to send a subsequent read to the primary. + # + # The `database_resolver` class is used by the middleware to determine which + # database is appropriate to use based on the time delay. + # + # The `database_resolver_context` class is used by the middleware to set + # timestamps for the last write to the primary. The resolver uses the context + # class timestamps to determine how long to wait before reading from the + # replica. + # + # By default Rails will store a last write timestamp in the session. The + # DatabaseSelector middleware is designed as such you can define your own + # strategy for connection switching and pass that into the middleware through + # these configuration options. + # config.active_record.database_selector = { delay: 2.seconds } + # config.active_record.database_resolver = ActiveRecord::Middleware::DatabaseSelector::Resolver + # config.active_record.database_resolver_context = ActiveRecord::Middleware::DatabaseSelector::Resolver::Session end diff --git a/config/environments/test.rb b/config/environments/test.rb index b5e645d4..7528b333 100644 --- a/config/environments/test.rb +++ b/config/environments/test.rb @@ -1,11 +1,14 @@ # frozen_string_literal: true +require "active_support/core_ext/integer/time" + +# The test environment is used exclusively to run your application's +# test suite. You never need to work with it otherwise. Remember that +# your test database is "scratch space" for the test suite and is wiped +# and recreated between test runs. Don't rely on the data there! + Rails.application.configure do # Settings specified here will take precedence over those in config/application.rb. - # The test environment is used exclusively to run your application's - # test suite. You never need to work with it otherwise. Remember that - # your test database is "scratch space" for the test suite and is wiped - # and recreated between test runs. Don't rely on the data there! config.cache_classes = true # Do not eager load code on boot. This avoids loading your whole application @@ -22,6 +25,7 @@ # Show full error reports and disable caching. config.consider_all_requests_local = true config.action_controller.perform_caching = false + config.cache_store = :null_store # Raise exceptions instead of rendering exception templates. config.action_dispatch.show_exceptions = false @@ -32,11 +36,22 @@ # Print deprecation notices to the stderr. config.active_support.deprecation = :stderr - # Raises error for missing translations - # config.action_view.raise_on_missing_translations = true + # Raise exceptions for disallowed deprecations. + config.active_support.disallowed_deprecation = :raise + + # Tell Active Support which deprecation messages to disallow. + config.active_support.disallowed_deprecation_warnings = [] + + # Raises error for missing translations. + # config.i18n.raise_on_missing_translations = true + + # Annotate rendered view with file names. + # config.action_view.annotate_rendered_view_with_filenames = true end -HttpLog.configure do |config| - config.enabled = true - config.color = true +if defined?(HttpLog) + HttpLog.configure do |config| + config.enabled = true + config.color = true + end end diff --git a/config/initializers/backtrace_silencers.rb b/config/initializers/backtrace_silencers.rb index d0f0d3b5..6bea00bc 100644 --- a/config/initializers/backtrace_silencers.rb +++ b/config/initializers/backtrace_silencers.rb @@ -2,7 +2,8 @@ # Be sure to restart your server when you modify this file. # You can add backtrace silencers for libraries that you're using but don't wish to see in your backtraces. -# Rails.backtrace_cleaner.add_silencer { |line| line =~ /my_noisy_library/ } +# Rails.backtrace_cleaner.add_silencer { |line| /my_noisy_library/.match?(line) } -# You can also remove all the silencers if you're trying to debug a problem that might stem from framework code. -# Rails.backtrace_cleaner.remove_silencers! +# You can also remove all the silencers if you're trying to debug a problem that might stem from framework code +# by setting BACKTRACE=1 before calling your invocation, like "BACKTRACE=1 ./bin/rails runner 'MyClass.perform'". +# Rails.backtrace_cleaner.remove_silencers! if ENV["BACKTRACE"] diff --git a/config/initializers/new_framework_defaults_5_2.rb b/config/initializers/new_framework_defaults_5_2.rb deleted file mode 100644 index 3262bea3..00000000 --- a/config/initializers/new_framework_defaults_5_2.rb +++ /dev/null @@ -1,38 +0,0 @@ -# Be sure to restart your server when you modify this file. -# -# This file contains migration options to ease your Rails 5.2 upgrade. -# -# Once upgraded flip defaults one by one to migrate to the new default. -# -# Read the Guide for Upgrading Ruby on Rails for more info on each option. - -# Make Active Record use stable #cache_key alongside new #cache_version method. -# This is needed for recyclable cache keys. -Rails.application.config.active_record.cache_versioning = true - -# Use AES-256-GCM authenticated encryption for encrypted cookies. -# Also, embed cookie expiry in signed or encrypted cookies for increased security. -# -# This option is not backwards compatible with earlier Rails versions. -# It's best enabled when your entire app is migrated and stable on 5.2. -# -# Existing cookies will be converted on read then written with the new scheme. -Rails.application.config.action_dispatch.use_authenticated_cookie_encryption = true - -# Use AES-256-GCM authenticated encryption as default cipher for encrypting messages -# instead of AES-256-CBC, when use_authenticated_message_encryption is set to true. -Rails.application.config.active_support.use_authenticated_message_encryption = true - -# Add default protection from forgery to ActionController::Base instead of in -# ApplicationController. -Rails.application.config.action_controller.default_protect_from_forgery = true - -# Store boolean values are in sqlite3 databases as 1 and 0 instead of 't' and -# 'f' after migrating old data. -Rails.application.config.active_record.sqlite3.represent_boolean_as_integer = true - -# Use SHA-1 instead of MD5 to generate non-sensitive digests, such as the ETag header. -Rails.application.config.active_support.use_sha1_digests = true - -# Make `form_with` generate id attributes for any generated HTML tags. -Rails.application.config.action_view.form_with_generates_ids = true diff --git a/config/initializers/new_framework_defaults_6_1.rb b/config/initializers/new_framework_defaults_6_1.rb new file mode 100644 index 00000000..9526b835 --- /dev/null +++ b/config/initializers/new_framework_defaults_6_1.rb @@ -0,0 +1,67 @@ +# Be sure to restart your server when you modify this file. +# +# This file contains migration options to ease your Rails 6.1 upgrade. +# +# Once upgraded flip defaults one by one to migrate to the new default. +# +# Read the Guide for Upgrading Ruby on Rails for more info on each option. + +# Support for inversing belongs_to -> has_many Active Record associations. +# Rails.application.config.active_record.has_many_inversing = true + +# Track Active Storage variants in the database. +# Rails.application.config.active_storage.track_variants = true + +# Apply random variation to the delay when retrying failed jobs. +# Rails.application.config.active_job.retry_jitter = 0.15 + +# Stop executing `after_enqueue`/`after_perform` callbacks if +# `before_enqueue`/`before_perform` respectively halts with `throw :abort`. +# Rails.application.config.active_job.skip_after_callbacks_if_terminated = true + +# Specify cookies SameSite protection level: either :none, :lax, or :strict. +# +# This change is not backwards compatible with earlier Rails versions. +# It's best enabled when your entire app is migrated and stable on 6.1. +# Rails.application.config.action_dispatch.cookies_same_site_protection = :lax + +# Generate CSRF tokens that are encoded in URL-safe Base64. +# +# This change is not backwards compatible with earlier Rails versions. +# It's best enabled when your entire app is migrated and stable on 6.1. +# Rails.application.config.action_controller.urlsafe_csrf_tokens = true + +# Specify whether `ActiveSupport::TimeZone.utc_to_local` returns a time with an +# UTC offset or a UTC time. +# ActiveSupport.utc_to_local_returns_utc_offset_times = true + +# Change the default HTTP status code to `308` when redirecting non-GET/HEAD +# requests to HTTPS in `ActionDispatch::SSL` middleware. +# Rails.application.config.action_dispatch.ssl_default_redirect_status = 308 + +# Use new connection handling API. For most applications this won't have any +# effect. For applications using multiple databases, this new API provides +# support for granular connection swapping. +# Rails.application.config.active_record.legacy_connection_handling = false + +# Make `form_with` generate non-remote forms by default. +# Rails.application.config.action_view.form_with_generates_remote_forms = false + +# Set the default queue name for the analysis job to the queue adapter default. +# Rails.application.config.active_storage.queues.analysis = nil + +# Set the default queue name for the purge job to the queue adapter default. +# Rails.application.config.active_storage.queues.purge = nil + +# Set the default queue name for the incineration job to the queue adapter default. +# Rails.application.config.action_mailbox.queues.incineration = nil + +# Set the default queue name for the routing job to the queue adapter default. +# Rails.application.config.action_mailbox.queues.routing = nil + +# Set the default queue name for the mail deliver job to the queue adapter default. +# Rails.application.config.action_mailer.deliver_later_queue_name = nil + +# Generate a `Link` header that gives a hint to modern browsers about +# preloading assets when using `javascript_include_tag` and `stylesheet_link_tag`. +# Rails.application.config.action_view.preload_links_header = true diff --git a/config/initializers/prometheus.rb b/config/initializers/prometheus.rb index e3d706f8..db59c1f9 100644 --- a/config/initializers/prometheus.rb +++ b/config/initializers/prometheus.rb @@ -1,2 +1,5 @@ require 'prometheus/active_job_subscriber' require 'prometheus/active_record' + +Yabeda.configure! if Yabeda.respond_to?(:configure!) +Prometheus::ActiveJobSubscriber.attach_to :active_job diff --git a/config/initializers/rails_6.rb b/config/initializers/rails_6.rb new file mode 100644 index 00000000..9e101eca --- /dev/null +++ b/config/initializers/rails_6.rb @@ -0,0 +1,24 @@ +# frozen_string_literal: true + +# Remove this when upgrading to Rails 6 +module Rails6 + module ActiveRecord + module Relation + # File activerecord/lib/active_record/relation.rb, line 218 + def create_or_find_by!(attributes, &block) + transaction(requires_new: true) { create!(attributes, &block) } + rescue ::ActiveRecord::RecordNotUnique + find_by!(attributes) + end + + # File activerecord/lib/active_record/relation.rb, line 209 + def create_or_find_by(attributes, &block) + transaction(requires_new: true) { create(attributes, &block) } + rescue ::ActiveRecord::RecordNotUnique + find_by!(attributes) + end + end + end +end + +ActiveRecord::Relation.prepend(Rails6::ActiveRecord::Relation) diff --git a/config/integrations.yml b/config/integrations.yml new file mode 100644 index 00000000..85bcb928 --- /dev/null +++ b/config/integrations.yml @@ -0,0 +1,11 @@ +shared: &shared + kubernetes: <%= ENV.fetch('DISABLE_K8S_ROUTES_CREATION', '0') == '0' %> + +production: + <<: *shared + +development: + <<: *shared + +test: + <<: *shared diff --git a/config/locales/en.yml b/config/locales/en.yml index decc5a85..cf9b342d 100644 --- a/config/locales/en.yml +++ b/config/locales/en.yml @@ -27,7 +27,7 @@ # 'true': 'foo' # # To learn more, please read the Rails Internationalization guide -# available at http://guides.rubyonrails.org/i18n.html. +# available at https://guides.rubyonrails.org/i18n.html. en: hello: "Hello world" diff --git a/config/openshift.yml b/config/openshift.yml new file mode 100644 index 00000000..b8138c37 --- /dev/null +++ b/config/openshift.yml @@ -0,0 +1,18 @@ +default: &default + enabled: <%= + ENV.values_at( + 'APICAST_STAGING_SERVICE_PORT_GATEWAY', + 'APICAST_STAGING_SERVICE_PORT_GATEWAY', + 'SYSTEM_DEVELOPER_SERVICE_PORT_HTTP', + 'SYSTEM_PROVIDER_SERVICE_PORT_HTTP', + 'SYSTEM_MASTER_SERVICE_PORT_HTTP', + ).all? + %> + +development: + <<: *default +production: + <<: *default + +test: + enabled: false diff --git a/config/puma.rb b/config/puma.rb index 2fb90cb4..0c0ba2d2 100644 --- a/config/puma.rb +++ b/config/puma.rb @@ -1,23 +1,31 @@ -# frozen_string_literal: true # Puma can serve each request in a thread from an internal thread pool. # The `threads` method setting takes two numbers: a minimum and maximum. # Any libraries that use thread pools should be configured to match # the maximum value specified for Puma. Default is set to 5 threads for minimum # and maximum; this matches the default thread size of Active Record. # -threads_count = ENV.fetch("RAILS_MAX_THREADS") { 5 } -threads threads_count, threads_count +max_threads_count = ENV.fetch("RAILS_MAX_THREADS") { 5 } +min_threads_count = ENV.fetch("RAILS_MIN_THREADS") { max_threads_count } +threads min_threads_count, max_threads_count + +# Specifies the `worker_timeout` threshold that Puma will use to wait before +# terminating a worker in development environments. +# +worker_timeout 3600 if ENV.fetch("RAILS_ENV", "development") == "development" # Specifies the `port` that Puma will listen on to receive requests; default is 3000. # -port ENV.fetch("PORT") { 3000 } +port ENV.fetch("PORT") { 3000 } # Specifies the `environment` that Puma will run in. # environment ENV.fetch("RAILS_ENV") { "development" } +# Specifies the `pidfile` that Puma will use. +pidfile ENV.fetch("PIDFILE") { "tmp/pids/server.pid" } + # Specifies the number of `workers` to boot in clustered mode. -# Workers are forked webserver processes. If using threads and workers together +# Workers are forked web server processes. If using threads and workers together # the concurrency of the application would be max `threads` * `workers`. # Workers do not work on JRuby or Windows (both of which do not support # processes). @@ -27,32 +35,10 @@ # Use the `preload_app!` method when specifying a `workers` number. # This directive tells Puma to first boot the application and load code # before forking the application. This takes advantage of Copy On Write -# process behavior so workers use less memory. If you use this option -# you need to make sure to reconnect any threads in the `on_worker_boot` -# block. +# process behavior so workers use less memory. # # preload_app! -# If you are preloading your application and using Active Record, it's -# recommended that you close any connections to the database before workers -# are forked to prevent connection leakage. -# -# before_fork do -# ActiveRecord::Base.connection_pool.disconnect! if defined?(ActiveRecord) -# end - -# The code in the `on_worker_boot` will be called if you are using -# clustered mode by specifying a number of `workers`. After each worker -# process is booted, this block will be run. If you are using the `preload_app!` -# option, you will want to use this block to reconnect to any threads -# or connections that may have been created at application boot, as Ruby -# cannot share connections between processes. -# -# on_worker_boot do -# ActiveRecord::Base.establish_connection if defined?(ActiveRecord) -# end -# - queue_requests false # let the higher layer figure that out # plugin 'metrics' diff --git a/config/que.yml b/config/que.yml index b77128a6..f75528b0 100644 --- a/config/que.yml +++ b/config/que.yml @@ -1,3 +1,7 @@ +production: + worker_count: 3 + wait_period: 100 + development: worker_count: 3 wait_period: 100 diff --git a/config/spring.rb b/config/spring.rb deleted file mode 100644 index ce5bb8a1..00000000 --- a/config/spring.rb +++ /dev/null @@ -1,28 +0,0 @@ -# frozen_string_literal: true -%w( - .ruby-version - .rbenv-vars - tmp/restart.txt - tmp/caching-dev.txt -).each { |path| Spring.watch(path) } - -require 'spring/application' - -module QueWorkers - def disconnect_database - ::Que.worker_count = 0 - ::Que.mode = :off - - ::MessageBus.off - ::MessageBus.destroy - - super - end -end - -Spring::Application.prepend(QueWorkers) - -Spring.after_fork do - ::MessageBus.on - ::MessageBus.after_fork -end diff --git a/db/migrate/20190530080459_add_integration_state.rb b/db/migrate/20190530080459_add_integration_state.rb index c6b828c4..3abfb2d5 100644 --- a/db/migrate/20190530080459_add_integration_state.rb +++ b/db/migrate/20190530080459_add_integration_state.rb @@ -1,6 +1,6 @@ class AddIntegrationState < ActiveRecord::Migration[5.2] def up - create_enum :integration_state, 'active', 'disabled' + create_enum :integration_state, %w[active disabled] add_column :integrations, :state, :integration_state default = 'active' Integration.in_batches.update_all(state: default) diff --git a/db/migrate/20190605094424_create_providers.rb b/db/migrate/20190605094424_create_providers.rb new file mode 100644 index 00000000..40e95e85 --- /dev/null +++ b/db/migrate/20190605094424_create_providers.rb @@ -0,0 +1,9 @@ +class CreateProviders < ActiveRecord::Migration[5.2] + def change + create_table :providers do |t| + t.references :tenant, foreign_key: true + + t.timestamps + end + end +end diff --git a/db/migrate/20210504152609_add_index_models_on_record.rb b/db/migrate/20210504152609_add_index_models_on_record.rb new file mode 100644 index 00000000..f692272b --- /dev/null +++ b/db/migrate/20210504152609_add_index_models_on_record.rb @@ -0,0 +1,6 @@ +class AddIndexModelsOnRecord < ActiveRecord::Migration[6.1] + def change + remove_index :models, name: "index_models_on_record_type_and_record_id" , if_exists: true + add_index :models, [:record_id, :record_type], name: "index_models_on_record", if_not_exists: true, unique: true + end +end diff --git a/db/structure-12.sql b/db/structure-12.sql new file mode 100644 index 00000000..bfdb861f --- /dev/null +++ b/db/structure-12.sql @@ -0,0 +1,1517 @@ +SET statement_timeout = 0; +SET lock_timeout = 0; +SET idle_in_transaction_session_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SELECT pg_catalog.set_config('search_path', '', false); +SET check_function_bodies = false; +SET xmloption = content; +SET client_min_messages = warning; +SET row_security = off; + +-- +-- Name: integration_state; Type: TYPE; Schema: public; Owner: - +-- + +CREATE TYPE public.integration_state AS ENUM ( + 'active', + 'disabled' +); + + +-- +-- Name: que_validate_tags(jsonb); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION public.que_validate_tags(tags_array jsonb) RETURNS boolean + LANGUAGE sql + AS $$ + SELECT bool_and( + jsonb_typeof(value) = 'string' + AND + char_length(value::text) <= 100 + ) + FROM jsonb_array_elements(tags_array) +$$; + + +SET default_tablespace = ''; + +SET default_table_access_method = heap; + +-- +-- Name: que_jobs; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.que_jobs ( + priority smallint DEFAULT 100 NOT NULL, + run_at timestamp with time zone DEFAULT now() NOT NULL, + id bigint NOT NULL, + job_class text NOT NULL, + error_count integer DEFAULT 0 NOT NULL, + last_error_message text, + queue text DEFAULT 'default'::text NOT NULL, + last_error_backtrace text, + finished_at timestamp with time zone, + expired_at timestamp with time zone, + args jsonb DEFAULT '[]'::jsonb NOT NULL, + data jsonb DEFAULT '{}'::jsonb NOT NULL, + CONSTRAINT error_length CHECK (((char_length(last_error_message) <= 500) AND (char_length(last_error_backtrace) <= 10000))), + CONSTRAINT job_class_length CHECK ((char_length( +CASE job_class + WHEN 'ActiveJob::QueueAdapters::QueAdapter::JobWrapper'::text THEN ((args -> 0) ->> 'job_class'::text) + ELSE job_class +END) <= 200)), + CONSTRAINT queue_length CHECK ((char_length(queue) <= 100)), + CONSTRAINT valid_args CHECK ((jsonb_typeof(args) = 'array'::text)), + CONSTRAINT valid_data CHECK (((jsonb_typeof(data) = 'object'::text) AND ((NOT (data ? 'tags'::text)) OR ((jsonb_typeof((data -> 'tags'::text)) = 'array'::text) AND (jsonb_array_length((data -> 'tags'::text)) <= 5) AND public.que_validate_tags((data -> 'tags'::text)))))) +) +WITH (fillfactor='90'); + + +-- +-- Name: TABLE que_jobs; Type: COMMENT; Schema: public; Owner: - +-- + +COMMENT ON TABLE public.que_jobs IS '4'; + + +-- +-- Name: que_determine_job_state(public.que_jobs); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION public.que_determine_job_state(job public.que_jobs) RETURNS text + LANGUAGE sql + AS $$ + SELECT + CASE + WHEN job.expired_at IS NOT NULL THEN 'expired' + WHEN job.finished_at IS NOT NULL THEN 'finished' + WHEN job.error_count > 0 THEN 'errored' + WHEN job.run_at > CURRENT_TIMESTAMP THEN 'scheduled' + ELSE 'ready' + END +$$; + + +-- +-- Name: que_job_notify(); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION public.que_job_notify() RETURNS trigger + LANGUAGE plpgsql + AS $$ + DECLARE + locker_pid integer; + sort_key json; + BEGIN + -- Don't do anything if the job is scheduled for a future time. + IF NEW.run_at IS NOT NULL AND NEW.run_at > now() THEN + RETURN null; + END IF; + + -- Pick a locker to notify of the job's insertion, weighted by their number + -- of workers. Should bounce pseudorandomly between lockers on each + -- invocation, hence the md5-ordering, but still touch each one equally, + -- hence the modulo using the job_id. + SELECT pid + INTO locker_pid + FROM ( + SELECT *, last_value(row_number) OVER () + 1 AS count + FROM ( + SELECT *, row_number() OVER () - 1 AS row_number + FROM ( + SELECT * + FROM public.que_lockers ql, generate_series(1, ql.worker_count) AS id + WHERE listening AND queues @> ARRAY[NEW.queue] + ORDER BY md5(pid::text || id::text) + ) t1 + ) t2 + ) t3 + WHERE NEW.id % count = row_number; + + IF locker_pid IS NOT NULL THEN + -- There's a size limit to what can be broadcast via LISTEN/NOTIFY, so + -- rather than throw errors when someone enqueues a big job, just + -- broadcast the most pertinent information, and let the locker query for + -- the record after it's taken the lock. The worker will have to hit the + -- DB in order to make sure the job is still visible anyway. + SELECT row_to_json(t) + INTO sort_key + FROM ( + SELECT + 'job_available' AS message_type, + NEW.queue AS queue, + NEW.priority AS priority, + NEW.id AS id, + -- Make sure we output timestamps as UTC ISO 8601 + to_char(NEW.run_at AT TIME ZONE 'UTC', 'YYYY-MM-DD"T"HH24:MI:SS.US"Z"') AS run_at + ) t; + + PERFORM pg_notify('que_listener_' || locker_pid::text, sort_key::text); + END IF; + + RETURN null; + END +$$; + + +-- +-- Name: que_state_notify(); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION public.que_state_notify() RETURNS trigger + LANGUAGE plpgsql + AS $$ + DECLARE + row record; + message json; + previous_state text; + current_state text; + BEGIN + IF TG_OP = 'INSERT' THEN + previous_state := 'nonexistent'; + current_state := public.que_determine_job_state(NEW); + row := NEW; + ELSIF TG_OP = 'DELETE' THEN + previous_state := public.que_determine_job_state(OLD); + current_state := 'nonexistent'; + row := OLD; + ELSIF TG_OP = 'UPDATE' THEN + previous_state := public.que_determine_job_state(OLD); + current_state := public.que_determine_job_state(NEW); + + -- If the state didn't change, short-circuit. + IF previous_state = current_state THEN + RETURN null; + END IF; + + row := NEW; + ELSE + RAISE EXCEPTION 'Unrecognized TG_OP: %', TG_OP; + END IF; + + SELECT row_to_json(t) + INTO message + FROM ( + SELECT + 'job_change' AS message_type, + row.id AS id, + row.queue AS queue, + + coalesce(row.data->'tags', '[]'::jsonb) AS tags, + + to_char(row.run_at AT TIME ZONE 'UTC', 'YYYY-MM-DD"T"HH24:MI:SS.US"Z"') AS run_at, + to_char(now() AT TIME ZONE 'UTC', 'YYYY-MM-DD"T"HH24:MI:SS.US"Z"') AS time, + + CASE row.job_class + WHEN 'ActiveJob::QueueAdapters::QueAdapter::JobWrapper' THEN + coalesce( + row.args->0->>'job_class', + 'ActiveJob::QueueAdapters::QueAdapter::JobWrapper' + ) + ELSE + row.job_class + END AS job_class, + + previous_state AS previous_state, + current_state AS current_state + ) t; + + PERFORM pg_notify('que_state', message::text); + + RETURN null; + END +$$; + + +-- +-- Name: applications; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.applications ( + id bigint NOT NULL, + tenant_id bigint NOT NULL, + service_id bigint NOT NULL, + created_at timestamp without time zone NOT NULL, + updated_at timestamp without time zone NOT NULL +); + + +-- +-- Name: applications_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE public.applications_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: applications_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE public.applications_id_seq OWNED BY public.applications.id; + + +-- +-- Name: ar_internal_metadata; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.ar_internal_metadata ( + key character varying NOT NULL, + value character varying, + created_at timestamp(6) without time zone NOT NULL, + updated_at timestamp(6) without time zone NOT NULL +); + + +-- +-- Name: clients; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.clients ( + id bigint NOT NULL, + service_id bigint NOT NULL, + tenant_id bigint NOT NULL, + client_id character varying NOT NULL, + created_at timestamp without time zone NOT NULL, + updated_at timestamp without time zone NOT NULL +); + + +-- +-- Name: clients_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE public.clients_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: clients_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE public.clients_id_seq OWNED BY public.clients.id; + + +-- +-- Name: entries; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.entries ( + id bigint NOT NULL, + data jsonb, + tenant_id bigint NOT NULL, + model_id bigint NOT NULL, + created_at timestamp without time zone NOT NULL, + updated_at timestamp without time zone NOT NULL +); + + +-- +-- Name: entries_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE public.entries_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: entries_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE public.entries_id_seq OWNED BY public.entries.id; + + +-- +-- Name: integration_states; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.integration_states ( + id bigint NOT NULL, + started_at timestamp without time zone, + finished_at timestamp without time zone, + success boolean, + model_id bigint NOT NULL, + entry_id bigint, + integration_id bigint NOT NULL, + created_at timestamp without time zone NOT NULL, + updated_at timestamp without time zone NOT NULL +); + + +-- +-- Name: integration_states_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE public.integration_states_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: integration_states_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE public.integration_states_id_seq OWNED BY public.integration_states.id; + + +-- +-- Name: integrations; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.integrations ( + id bigint NOT NULL, + configuration jsonb, + type character varying NOT NULL, + tenant_id bigint, + model_id bigint, + created_at timestamp without time zone NOT NULL, + updated_at timestamp without time zone NOT NULL, + state public.integration_state DEFAULT 'active'::public.integration_state NOT NULL +); + + +-- +-- Name: integrations_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE public.integrations_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: integrations_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE public.integrations_id_seq OWNED BY public.integrations.id; + + +-- +-- Name: message_bus; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.message_bus ( + id bigint NOT NULL, + channel text NOT NULL, + value text NOT NULL, + added_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, + CONSTRAINT message_bus_value_check CHECK ((octet_length(value) >= 2)) +); + + +-- +-- Name: message_bus_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE public.message_bus_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: message_bus_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE public.message_bus_id_seq OWNED BY public.message_bus.id; + + +-- +-- Name: metrics; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.metrics ( + id bigint NOT NULL, + service_id bigint NOT NULL, + tenant_id bigint NOT NULL, + created_at timestamp without time zone NOT NULL, + updated_at timestamp without time zone NOT NULL +); + + +-- +-- Name: metrics_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE public.metrics_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: metrics_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE public.metrics_id_seq OWNED BY public.metrics.id; + + +-- +-- Name: models; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.models ( + id bigint NOT NULL, + tenant_id bigint NOT NULL, + record_type character varying NOT NULL, + record_id bigint NOT NULL, + created_at timestamp without time zone NOT NULL, + updated_at timestamp without time zone NOT NULL +); + + +-- +-- Name: models_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE public.models_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: models_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE public.models_id_seq OWNED BY public.models.id; + + +-- +-- Name: notifications; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.notifications ( + id bigint NOT NULL, + model_id bigint NOT NULL, + data jsonb NOT NULL, + tenant_id bigint NOT NULL, + created_at timestamp without time zone NOT NULL, + updated_at timestamp without time zone NOT NULL +); + + +-- +-- Name: notifications_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE public.notifications_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: notifications_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE public.notifications_id_seq OWNED BY public.notifications.id; + + +-- +-- Name: providers; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.providers ( + id bigint NOT NULL, + tenant_id bigint, + created_at timestamp without time zone NOT NULL, + updated_at timestamp without time zone NOT NULL +); + + +-- +-- Name: providers_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE public.providers_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: providers_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE public.providers_id_seq OWNED BY public.providers.id; + + +-- +-- Name: proxies; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.proxies ( + id bigint NOT NULL, + tenant_id bigint NOT NULL, + service_id bigint NOT NULL, + created_at timestamp without time zone NOT NULL, + updated_at timestamp without time zone NOT NULL +); + + +-- +-- Name: proxies_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE public.proxies_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: proxies_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE public.proxies_id_seq OWNED BY public.proxies.id; + + +-- +-- Name: que_jobs_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE public.que_jobs_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: que_jobs_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE public.que_jobs_id_seq OWNED BY public.que_jobs.id; + + +-- +-- Name: que_lockers; Type: TABLE; Schema: public; Owner: - +-- + +CREATE UNLOGGED TABLE public.que_lockers ( + pid integer NOT NULL, + worker_count integer NOT NULL, + worker_priorities integer[] NOT NULL, + ruby_pid integer NOT NULL, + ruby_hostname text NOT NULL, + queues text[] NOT NULL, + listening boolean NOT NULL, + CONSTRAINT valid_queues CHECK (((array_ndims(queues) = 1) AND (array_length(queues, 1) IS NOT NULL))), + CONSTRAINT valid_worker_priorities CHECK (((array_ndims(worker_priorities) = 1) AND (array_length(worker_priorities, 1) IS NOT NULL))) +); + + +-- +-- Name: que_values; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.que_values ( + key text NOT NULL, + value jsonb DEFAULT '{}'::jsonb NOT NULL, + CONSTRAINT valid_value CHECK ((jsonb_typeof(value) = 'object'::text)) +) +WITH (fillfactor='90'); + + +-- +-- Name: schema_migrations; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.schema_migrations ( + version character varying NOT NULL +); + + +-- +-- Name: services; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.services ( + id bigint NOT NULL, + tenant_id bigint NOT NULL, + created_at timestamp without time zone NOT NULL, + updated_at timestamp without time zone NOT NULL +); + + +-- +-- Name: services_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE public.services_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: services_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE public.services_id_seq OWNED BY public.services.id; + + +-- +-- Name: tenants; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.tenants ( + id bigint NOT NULL, + endpoint character varying NOT NULL, + access_token character varying NOT NULL, + created_at timestamp without time zone NOT NULL, + updated_at timestamp without time zone NOT NULL +); + + +-- +-- Name: tenants_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE public.tenants_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: tenants_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE public.tenants_id_seq OWNED BY public.tenants.id; + + +-- +-- Name: update_states; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.update_states ( + id bigint NOT NULL, + started_at timestamp without time zone, + finished_at timestamp without time zone, + success boolean DEFAULT false NOT NULL, + model_id bigint NOT NULL, + created_at timestamp without time zone NOT NULL, + updated_at timestamp without time zone NOT NULL +); + + +-- +-- Name: update_states_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE public.update_states_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: update_states_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE public.update_states_id_seq OWNED BY public.update_states.id; + + +-- +-- Name: usage_limits; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.usage_limits ( + id bigint NOT NULL, + metric_id bigint NOT NULL, + plan_id integer NOT NULL, + tenant_id bigint NOT NULL, + created_at timestamp without time zone NOT NULL, + updated_at timestamp without time zone NOT NULL +); + + +-- +-- Name: usage_limits_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE public.usage_limits_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: usage_limits_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE public.usage_limits_id_seq OWNED BY public.usage_limits.id; + + +-- +-- Name: applications id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.applications ALTER COLUMN id SET DEFAULT nextval('public.applications_id_seq'::regclass); + + +-- +-- Name: clients id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.clients ALTER COLUMN id SET DEFAULT nextval('public.clients_id_seq'::regclass); + + +-- +-- Name: entries id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.entries ALTER COLUMN id SET DEFAULT nextval('public.entries_id_seq'::regclass); + + +-- +-- Name: integration_states id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.integration_states ALTER COLUMN id SET DEFAULT nextval('public.integration_states_id_seq'::regclass); + + +-- +-- Name: integrations id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.integrations ALTER COLUMN id SET DEFAULT nextval('public.integrations_id_seq'::regclass); + + +-- +-- Name: message_bus id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.message_bus ALTER COLUMN id SET DEFAULT nextval('public.message_bus_id_seq'::regclass); + + +-- +-- Name: metrics id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.metrics ALTER COLUMN id SET DEFAULT nextval('public.metrics_id_seq'::regclass); + + +-- +-- Name: models id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.models ALTER COLUMN id SET DEFAULT nextval('public.models_id_seq'::regclass); + + +-- +-- Name: notifications id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.notifications ALTER COLUMN id SET DEFAULT nextval('public.notifications_id_seq'::regclass); + + +-- +-- Name: providers id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.providers ALTER COLUMN id SET DEFAULT nextval('public.providers_id_seq'::regclass); + + +-- +-- Name: proxies id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.proxies ALTER COLUMN id SET DEFAULT nextval('public.proxies_id_seq'::regclass); + + +-- +-- Name: que_jobs id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.que_jobs ALTER COLUMN id SET DEFAULT nextval('public.que_jobs_id_seq'::regclass); + + +-- +-- Name: services id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.services ALTER COLUMN id SET DEFAULT nextval('public.services_id_seq'::regclass); + + +-- +-- Name: tenants id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.tenants ALTER COLUMN id SET DEFAULT nextval('public.tenants_id_seq'::regclass); + + +-- +-- Name: update_states id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.update_states ALTER COLUMN id SET DEFAULT nextval('public.update_states_id_seq'::regclass); + + +-- +-- Name: usage_limits id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.usage_limits ALTER COLUMN id SET DEFAULT nextval('public.usage_limits_id_seq'::regclass); + + +-- +-- Name: applications applications_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.applications + ADD CONSTRAINT applications_pkey PRIMARY KEY (id); + + +-- +-- Name: ar_internal_metadata ar_internal_metadata_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.ar_internal_metadata + ADD CONSTRAINT ar_internal_metadata_pkey PRIMARY KEY (key); + + +-- +-- Name: clients clients_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.clients + ADD CONSTRAINT clients_pkey PRIMARY KEY (id); + + +-- +-- Name: entries entries_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.entries + ADD CONSTRAINT entries_pkey PRIMARY KEY (id); + + +-- +-- Name: integration_states integration_states_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.integration_states + ADD CONSTRAINT integration_states_pkey PRIMARY KEY (id); + + +-- +-- Name: integrations integrations_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.integrations + ADD CONSTRAINT integrations_pkey PRIMARY KEY (id); + + +-- +-- Name: message_bus message_bus_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.message_bus + ADD CONSTRAINT message_bus_pkey PRIMARY KEY (id); + + +-- +-- Name: metrics metrics_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.metrics + ADD CONSTRAINT metrics_pkey PRIMARY KEY (id); + + +-- +-- Name: models models_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.models + ADD CONSTRAINT models_pkey PRIMARY KEY (id); + + +-- +-- Name: notifications notifications_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.notifications + ADD CONSTRAINT notifications_pkey PRIMARY KEY (id); + + +-- +-- Name: providers providers_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.providers + ADD CONSTRAINT providers_pkey PRIMARY KEY (id); + + +-- +-- Name: proxies proxies_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.proxies + ADD CONSTRAINT proxies_pkey PRIMARY KEY (id); + + +-- +-- Name: que_jobs que_jobs_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.que_jobs + ADD CONSTRAINT que_jobs_pkey PRIMARY KEY (id); + + +-- +-- Name: que_lockers que_lockers_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.que_lockers + ADD CONSTRAINT que_lockers_pkey PRIMARY KEY (pid); + + +-- +-- Name: que_values que_values_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.que_values + ADD CONSTRAINT que_values_pkey PRIMARY KEY (key); + + +-- +-- Name: schema_migrations schema_migrations_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.schema_migrations + ADD CONSTRAINT schema_migrations_pkey PRIMARY KEY (version); + + +-- +-- Name: services services_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.services + ADD CONSTRAINT services_pkey PRIMARY KEY (id); + + +-- +-- Name: tenants tenants_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.tenants + ADD CONSTRAINT tenants_pkey PRIMARY KEY (id); + + +-- +-- Name: update_states update_states_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.update_states + ADD CONSTRAINT update_states_pkey PRIMARY KEY (id); + + +-- +-- Name: usage_limits usage_limits_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.usage_limits + ADD CONSTRAINT usage_limits_pkey PRIMARY KEY (id); + + +-- +-- Name: index_applications_on_service_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_applications_on_service_id ON public.applications USING btree (service_id); + + +-- +-- Name: index_applications_on_tenant_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_applications_on_tenant_id ON public.applications USING btree (tenant_id); + + +-- +-- Name: index_clients_on_client_id_and_service_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_clients_on_client_id_and_service_id ON public.clients USING btree (client_id, service_id); + + +-- +-- Name: index_clients_on_service_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_clients_on_service_id ON public.clients USING btree (service_id); + + +-- +-- Name: index_clients_on_tenant_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_clients_on_tenant_id ON public.clients USING btree (tenant_id); + + +-- +-- Name: index_entries_on_model_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_entries_on_model_id ON public.entries USING btree (model_id); + + +-- +-- Name: index_entries_on_tenant_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_entries_on_tenant_id ON public.entries USING btree (tenant_id); + + +-- +-- Name: index_integration_states_on_entry_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_integration_states_on_entry_id ON public.integration_states USING btree (entry_id); + + +-- +-- Name: index_integration_states_on_integration_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_integration_states_on_integration_id ON public.integration_states USING btree (integration_id); + + +-- +-- Name: index_integration_states_on_model_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_integration_states_on_model_id ON public.integration_states USING btree (model_id); + + +-- +-- Name: index_integration_states_on_model_id_and_integration_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_integration_states_on_model_id_and_integration_id ON public.integration_states USING btree (model_id, integration_id); + + +-- +-- Name: index_integrations_on_model_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_integrations_on_model_id ON public.integrations USING btree (model_id); + + +-- +-- Name: index_integrations_on_tenant_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_integrations_on_tenant_id ON public.integrations USING btree (tenant_id); + + +-- +-- Name: index_integrations_on_tenant_id_and_type; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_integrations_on_tenant_id_and_type ON public.integrations USING btree (tenant_id, type) WHERE (model_id IS NULL); + + +-- +-- Name: index_integrations_on_tenant_id_and_type_and_model_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_integrations_on_tenant_id_and_type_and_model_id ON public.integrations USING btree (tenant_id, type, model_id) WHERE (model_id IS NOT NULL); + + +-- +-- Name: index_metrics_on_service_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_metrics_on_service_id ON public.metrics USING btree (service_id); + + +-- +-- Name: index_metrics_on_tenant_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_metrics_on_tenant_id ON public.metrics USING btree (tenant_id); + + +-- +-- Name: index_models_on_record; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_models_on_record ON public.models USING btree (record_type, record_id); + + +-- +-- Name: index_models_on_tenant_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_models_on_tenant_id ON public.models USING btree (tenant_id); + + +-- +-- Name: index_notifications_on_model_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_notifications_on_model_id ON public.notifications USING btree (model_id); + + +-- +-- Name: index_notifications_on_tenant_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_notifications_on_tenant_id ON public.notifications USING btree (tenant_id); + + +-- +-- Name: index_providers_on_tenant_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_providers_on_tenant_id ON public.providers USING btree (tenant_id); + + +-- +-- Name: index_proxies_on_service_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_proxies_on_service_id ON public.proxies USING btree (service_id); + + +-- +-- Name: index_proxies_on_tenant_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_proxies_on_tenant_id ON public.proxies USING btree (tenant_id); + + +-- +-- Name: index_services_on_tenant_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_services_on_tenant_id ON public.services USING btree (tenant_id); + + +-- +-- Name: index_update_states_on_model_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_update_states_on_model_id ON public.update_states USING btree (model_id); + + +-- +-- Name: index_usage_limits_on_metric_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_usage_limits_on_metric_id ON public.usage_limits USING btree (metric_id); + + +-- +-- Name: index_usage_limits_on_tenant_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_usage_limits_on_tenant_id ON public.usage_limits USING btree (tenant_id); + + +-- +-- Name: que_jobs_args_gin_idx; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX que_jobs_args_gin_idx ON public.que_jobs USING gin (args jsonb_path_ops); + + +-- +-- Name: que_jobs_data_gin_idx; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX que_jobs_data_gin_idx ON public.que_jobs USING gin (data jsonb_path_ops); + + +-- +-- Name: que_poll_idx; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX que_poll_idx ON public.que_jobs USING btree (queue, priority, run_at, id) WHERE ((finished_at IS NULL) AND (expired_at IS NULL)); + + +-- +-- Name: table_added_at_index; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX table_added_at_index ON public.message_bus USING btree (added_at); + + +-- +-- Name: table_channel_id_index; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX table_channel_id_index ON public.message_bus USING btree (channel, id); + + +-- +-- Name: que_jobs que_job_notify; Type: TRIGGER; Schema: public; Owner: - +-- + +CREATE TRIGGER que_job_notify AFTER INSERT ON public.que_jobs FOR EACH ROW EXECUTE FUNCTION public.que_job_notify(); + + +-- +-- Name: que_jobs que_state_notify; Type: TRIGGER; Schema: public; Owner: - +-- + +CREATE TRIGGER que_state_notify AFTER INSERT OR DELETE OR UPDATE ON public.que_jobs FOR EACH ROW EXECUTE FUNCTION public.que_state_notify(); + + +-- +-- Name: integration_states fk_rails_1133bc1397; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.integration_states + ADD CONSTRAINT fk_rails_1133bc1397 FOREIGN KEY (model_id) REFERENCES public.models(id); + + +-- +-- Name: proxies fk_rails_1b8514170a; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.proxies + ADD CONSTRAINT fk_rails_1b8514170a FOREIGN KEY (service_id) REFERENCES public.services(id); + + +-- +-- Name: usage_limits fk_rails_29f5c8eedd; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.usage_limits + ADD CONSTRAINT fk_rails_29f5c8eedd FOREIGN KEY (metric_id) REFERENCES public.metrics(id); + + +-- +-- Name: notifications fk_rails_3833a979e0; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.notifications + ADD CONSTRAINT fk_rails_3833a979e0 FOREIGN KEY (model_id) REFERENCES public.models(id); + + +-- +-- Name: entries fk_rails_463bb0a9cc; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.entries + ADD CONSTRAINT fk_rails_463bb0a9cc FOREIGN KEY (model_id) REFERENCES public.models(id); + + +-- +-- Name: models fk_rails_47bc1b5b2f; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.models + ADD CONSTRAINT fk_rails_47bc1b5b2f FOREIGN KEY (tenant_id) REFERENCES public.tenants(id); + + +-- +-- Name: clients fk_rails_4904dbddb8; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.clients + ADD CONSTRAINT fk_rails_4904dbddb8 FOREIGN KEY (tenant_id) REFERENCES public.tenants(id); + + +-- +-- Name: proxies fk_rails_574a99191a; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.proxies + ADD CONSTRAINT fk_rails_574a99191a FOREIGN KEY (tenant_id) REFERENCES public.tenants(id); + + +-- +-- Name: integration_states fk_rails_5f9da38b71; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.integration_states + ADD CONSTRAINT fk_rails_5f9da38b71 FOREIGN KEY (entry_id) REFERENCES public.entries(id); + + +-- +-- Name: update_states fk_rails_66e50c4ac9; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.update_states + ADD CONSTRAINT fk_rails_66e50c4ac9 FOREIGN KEY (model_id) REFERENCES public.models(id); + + +-- +-- Name: usage_limits fk_rails_7464a81431; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.usage_limits + ADD CONSTRAINT fk_rails_7464a81431 FOREIGN KEY (tenant_id) REFERENCES public.tenants(id); + + +-- +-- Name: notifications fk_rails_7c99fe0556; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.notifications + ADD CONSTRAINT fk_rails_7c99fe0556 FOREIGN KEY (tenant_id) REFERENCES public.tenants(id); + + +-- +-- Name: clients fk_rails_82a7d45fdb; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.clients + ADD CONSTRAINT fk_rails_82a7d45fdb FOREIGN KEY (service_id) REFERENCES public.services(id); + + +-- +-- Name: integration_states fk_rails_9c9a857590; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.integration_states + ADD CONSTRAINT fk_rails_9c9a857590 FOREIGN KEY (integration_id) REFERENCES public.integrations(id); + + +-- +-- Name: entries fk_rails_acc13c3cee; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.entries + ADD CONSTRAINT fk_rails_acc13c3cee FOREIGN KEY (tenant_id) REFERENCES public.tenants(id); + + +-- +-- Name: providers fk_rails_ba1a501ef5; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.providers + ADD CONSTRAINT fk_rails_ba1a501ef5 FOREIGN KEY (tenant_id) REFERENCES public.tenants(id); + + +-- +-- Name: applications fk_rails_c363b8b058; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.applications + ADD CONSTRAINT fk_rails_c363b8b058 FOREIGN KEY (service_id) REFERENCES public.services(id); + + +-- +-- Name: metrics fk_rails_c50b7368c1; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.metrics + ADD CONSTRAINT fk_rails_c50b7368c1 FOREIGN KEY (tenant_id) REFERENCES public.tenants(id); + + +-- +-- Name: metrics fk_rails_c7fa7e0e14; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.metrics + ADD CONSTRAINT fk_rails_c7fa7e0e14 FOREIGN KEY (service_id) REFERENCES public.services(id); + + +-- +-- Name: services fk_rails_c99dfff855; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.services + ADD CONSTRAINT fk_rails_c99dfff855 FOREIGN KEY (tenant_id) REFERENCES public.tenants(id); + + +-- +-- Name: applications fk_rails_cbcddd5826; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.applications + ADD CONSTRAINT fk_rails_cbcddd5826 FOREIGN KEY (tenant_id) REFERENCES public.tenants(id); + + +-- +-- Name: integrations fk_rails_cd54ced205; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.integrations + ADD CONSTRAINT fk_rails_cd54ced205 FOREIGN KEY (model_id) REFERENCES public.models(id); + + +-- +-- Name: integrations fk_rails_d329ca1b17; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.integrations + ADD CONSTRAINT fk_rails_d329ca1b17 FOREIGN KEY (tenant_id) REFERENCES public.tenants(id); + + +-- +-- PostgreSQL database dump complete +-- + +SET search_path TO "$user", public; + +INSERT INTO "schema_migrations" (version) VALUES +('20170602105141'), +('20170602105146'), +('20170602112320'), +('20170602115805'), +('20170602120831'), +('20170602120909'), +('20170602122059'), +('20170602142516'), +('20170602162517'), +('20170605112051'), +('20170605112058'), +('20170612073714'), +('20170620114832'), +('20181019101631'), +('20190410112007'), +('20190530080459'), +('20190603140450'), +('20190605094424'), +('20210504152609'); + + diff --git a/db/structure.sql b/db/structure.sql index 50515214..907f2388 100644 --- a/db/structure.sql +++ b/db/structure.sql @@ -37,8 +37,6 @@ $$; SET default_tablespace = ''; -SET default_with_oids = false; - -- -- Name: que_jobs; Type: TABLE; Schema: public; Owner: - -- @@ -264,8 +262,8 @@ ALTER SEQUENCE public.applications_id_seq OWNED BY public.applications.id; CREATE TABLE public.ar_internal_metadata ( key character varying NOT NULL, value character varying, - created_at timestamp without time zone NOT NULL, - updated_at timestamp without time zone NOT NULL + created_at timestamp(6) without time zone NOT NULL, + updated_at timestamp(6) without time zone NOT NULL ); @@ -536,6 +534,37 @@ CREATE SEQUENCE public.notifications_id_seq ALTER SEQUENCE public.notifications_id_seq OWNED BY public.notifications.id; +-- +-- Name: providers; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.providers ( + id bigint NOT NULL, + tenant_id bigint, + created_at timestamp without time zone NOT NULL, + updated_at timestamp without time zone NOT NULL +); + + +-- +-- Name: providers_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE public.providers_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: providers_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE public.providers_id_seq OWNED BY public.providers.id; + + -- -- Name: proxies; Type: TABLE; Schema: public; Owner: - -- @@ -818,6 +847,13 @@ ALTER TABLE ONLY public.models ALTER COLUMN id SET DEFAULT nextval('public.model ALTER TABLE ONLY public.notifications ALTER COLUMN id SET DEFAULT nextval('public.notifications_id_seq'::regclass); +-- +-- Name: providers id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.providers ALTER COLUMN id SET DEFAULT nextval('public.providers_id_seq'::regclass); + + -- -- Name: proxies id; Type: DEFAULT; Schema: public; Owner: - -- @@ -940,6 +976,14 @@ ALTER TABLE ONLY public.notifications ADD CONSTRAINT notifications_pkey PRIMARY KEY (id); +-- +-- Name: providers providers_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.providers + ADD CONSTRAINT providers_pkey PRIMARY KEY (id); + + -- -- Name: proxies proxies_pkey; Type: CONSTRAINT; Schema: public; Owner: - -- @@ -1132,10 +1176,10 @@ CREATE INDEX index_metrics_on_tenant_id ON public.metrics USING btree (tenant_id -- --- Name: index_models_on_record_type_and_record_id; Type: INDEX; Schema: public; Owner: - +-- Name: index_models_on_record; Type: INDEX; Schema: public; Owner: - -- -CREATE UNIQUE INDEX index_models_on_record_type_and_record_id ON public.models USING btree (record_type, record_id); +CREATE UNIQUE INDEX index_models_on_record ON public.models USING btree (record_type, record_id); -- @@ -1159,6 +1203,13 @@ CREATE INDEX index_notifications_on_model_id ON public.notifications USING btree CREATE INDEX index_notifications_on_tenant_id ON public.notifications USING btree (tenant_id); +-- +-- Name: index_providers_on_tenant_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_providers_on_tenant_id ON public.providers USING btree (tenant_id); + + -- -- Name: index_proxies_on_service_id; Type: INDEX; Schema: public; Owner: - -- @@ -1370,6 +1421,14 @@ ALTER TABLE ONLY public.entries ADD CONSTRAINT fk_rails_acc13c3cee FOREIGN KEY (tenant_id) REFERENCES public.tenants(id); +-- +-- Name: providers fk_rails_ba1a501ef5; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.providers + ADD CONSTRAINT fk_rails_ba1a501ef5 FOREIGN KEY (tenant_id) REFERENCES public.tenants(id); + + -- -- Name: applications fk_rails_c363b8b058; Type: FK CONSTRAINT; Schema: public; Owner: - -- @@ -1449,6 +1508,6 @@ INSERT INTO "schema_migrations" (version) VALUES ('20181019101631'), ('20190410112007'), ('20190530080459'), -('20190603140450'); - - +('20190603140450'), +('20190605094424'), +('20210504152609'); diff --git a/doc/Quickstart.md b/doc/Quickstart.md new file mode 100644 index 00000000..afc03b41 --- /dev/null +++ b/doc/Quickstart.md @@ -0,0 +1,56 @@ +## Overview + +A quick way to get Zync running locally without many details. +Check [README](../README.md) and [INSTALL](../INSTALL.md) for +more details. + +## Download this repository. +``` +git clone git@github.com:3scale/zync.git +``` + +## Run PostgreSQL on Mac + +``` +cd zync +brew bundle +brew services start postgresql +``` + +**Note:** The command `brew services start postgresql` starts the service of PostgreSQL. If later `./bin/setup` aborts, make sure that the `PostgreSQL` service is running. Verify with `brew services list` that has a status `started` and looking green. If the status is `started` but coloured orange, fix the errors indicated in the log located in `/usr/local/var/log/postgres.log` + +## Run PostgreSQL on Fedora 34 + +```shell +sudo dnf module install postgresql:10 +sudo dnf install libpq-devel +sudo /usr/bin/postgresql-setup --initdb +sudo systemctl enable postgresql +sudo systemctl start postgresql +sudo -i -u postgres createuser $USER +sudo -i -u postgres createdb -O $USER zync_development +sudo -i -u postgres createdb -O $USER zync_test +sudo -i -u postgres createdb -O $USER zync_production +``` + +## Run PostgreSQL as a container with Docker or Podman + +``` +docker run -d -p 5432:5432 -e POSTGRES_USER=postgres -e POSTGRES_DB=zync --name postgres10-zync docker.io/circleci/postgres:10.5-alpine +``` + +**Note:** With such a setup make sure to have `DATABASE_URL` environment variable set +prior starting Zync. You will also have to install on the host machine `psql` client +tool (needed for `db:setup`) and `libpq-devel` (needed to build `pg` gem). + +``` +export DATABASE_URL=postgresql://postgres:@localhost:5432/zync +``` + +## Start Zync + +``` +export ZYNC_AUTHENTICATION_TOKEN=token # must match porta config +./bin/setup +PROMETHEUS_EXPORTER_PORT=9395 bundle exec rake que +``` diff --git a/doc/dependency_decisions.yml b/doc/dependency_decisions.yml index cb2e1682..d9322b48 100644 --- a/doc/dependency_decisions.yml +++ b/doc/dependency_decisions.yml @@ -1,56 +1,56 @@ --- -- - :whitelist +- - :permit - MIT - - :who: - :why: + - :who: + :why: :versions: [] :when: 2017-06-23 06:12:33.037106000 Z -- - :whitelist +- - :permit - Apache 2.0 - - :who: - :why: + - :who: + :why: :versions: [] :when: 2017-06-23 06:12:54.025599000 Z -- - :whitelist +- - :permit - Simplified BSD - - :who: - :why: + - :who: + :why: :versions: [] :when: 2017-06-23 06:12:59.933965000 Z -- - :whitelist +- - :permit - New BSD - - :who: - :why: + - :who: + :why: :versions: [] :when: 2017-06-23 06:13:02.855385000 Z -- - :whitelist +- - :permit - ruby - - :who: - :why: + - :who: + :why: :versions: [] :when: 2017-06-23 06:13:06.311014000 Z - - :ignore_group - development - - :who: - :why: + - :who: + :why: :versions: [] :when: 2017-06-23 06:15:58.484416000 Z - - :ignore_group - test - - :who: - :why: + - :who: + :why: :versions: [] :when: 2017-06-23 06:16:00.472944000 Z - - :license - bundler - MIT - - :who: + - :who: :why: Bundler is distributed under MIT license https://github.com/bundler/bundler/blob/1102ec1a06d5971fc3c870b1ed50de8f826c36e4/bundler.gemspec\#L10 :versions: [] :when: 2017-06-23 06:34:01.682953000 Z -- - :whitelist +- - :permit - BSD - - :who: - :why: + - :who: + :why: :versions: [] :when: 2019-04-11 17:56:14.390898000 Z diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 00000000..d8788bba --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,30 @@ +version: '3.4' + +services: + zync: + build: . + command: bash -c "rm -f tmp/pids/server.pid && UNICORN_WORKERS=2 bin/rails server -b 0.0.0.0" + depends_on: + - postgres + links: + - postgres + ports: + - "9393:9393" + environment: + - DATABASE_URL=postgresql://postgres:@postgres:5432/ + + postgres: + image: postgres:10.5 + ports: + - "5432:5432" + environment: + - PGUSER=postgres + healthcheck: + test: ["CMD", "pg_isready", "-U", "postgres"] + timeout: 20s + retries: 10 + volumes: + - postgres-data:/var/lib/postgresql/data + +volumes: + postgres-data: diff --git a/examples/rest-api/.gitignore b/examples/rest-api/.gitignore new file mode 100644 index 00000000..79f33806 --- /dev/null +++ b/examples/rest-api/.gitignore @@ -0,0 +1 @@ +clients.yml diff --git a/examples/rest-api/Gemfile b/examples/rest-api/Gemfile new file mode 100644 index 00000000..c7a4cfbb --- /dev/null +++ b/examples/rest-api/Gemfile @@ -0,0 +1,6 @@ +# frozen_string_literal: true +source 'https://rubygems.org' + +gem 'sinatra' + +gem 'pry-byebug' diff --git a/examples/rest-api/Gemfile.lock b/examples/rest-api/Gemfile.lock new file mode 100644 index 00000000..ec0224ae --- /dev/null +++ b/examples/rest-api/Gemfile.lock @@ -0,0 +1,32 @@ +GEM + remote: https://rubygems.org/ + specs: + byebug (11.0.1) + coderay (1.1.2) + method_source (0.9.2) + mustermann (1.0.3) + pry (0.12.2) + coderay (~> 1.1.0) + method_source (~> 0.9.0) + pry-byebug (3.7.0) + byebug (~> 11.0) + pry (~> 0.10) + rack (2.2.3) + rack-protection (2.0.5) + rack + sinatra (2.0.5) + mustermann (~> 1.0) + rack (~> 2.0) + rack-protection (= 2.0.5) + tilt (~> 2.0) + tilt (2.0.9) + +PLATFORMS + ruby + +DEPENDENCIES + pry-byebug + sinatra + +BUNDLED WITH + 2.1.4 diff --git a/examples/rest-api/README.md b/examples/rest-api/README.md new file mode 100644 index 00000000..86ad2e62 --- /dev/null +++ b/examples/rest-api/README.md @@ -0,0 +1,58 @@ +# Zync REST API example + +This example project implements Zync's REST API protocol to synchronize OAuth2 clients. + +## Prerequisites + +Given 3scale API is configured to use: + * OpenID Connect as the Authentication, + * "REST API" as a OpenID Connect Issuer Type and + * "http://id:secret@example.com/api" as OpenID Connect Issuer. + +When a 3scale application is created/updated/deleted zync will try to replay that change to "http://example.com/api". + +## Creating, updating and deleting Clients + +Zync will make the following requests to create/update/delete clients: + +* `PUT /clients/:client_id` (create, update) +* `DELETE /clients/:client_id` (delete) + +All endpoints must reply 2xx status code. Otherwise, the request will be retried. + +### Payload + +The request payload in case of create and update is `application/json`: + +```json +{ + "client_id": "ee305610", + "client_secret": "ac0e42db426b4377096c6590e2b06aed", + "client_name": "oidc-app", + "redirect_uris": ["http://example.com"], + "grant_types": ["client_credentials", "password"] +} +``` + +The request to delete a client has no payload. + +## Using OAuth2 authentication + +Zync will make GET request to `/.well-known/openid-configuration` endpoint and expect an `application/json` response. +The response payload should contain the following: + +```json +{ + "token_endpoint": "http://idp.example.com/auth/realm/token" +} +``` + +Zync will use that `token_endpoint` URL to exchange the client_id and client_secret provided in the OpenID Connect Issuer URL +for an access token using the OAuth2 protocol. + +If the API responds with a not successful response, Zync will fallback to HTTP Basic/Digest authentication using provided credentials. + +## References + +* OpenAPI Specification document [openapi.yml](openapi.yml) +* Sinatra application [app.rb](app.rb) diff --git a/examples/rest-api/app.rb b/examples/rest-api/app.rb new file mode 100644 index 00000000..8299d005 --- /dev/null +++ b/examples/rest-api/app.rb @@ -0,0 +1,54 @@ +# frozen_string_literal: true + +require 'sinatra' +require 'json' +require 'yaml/store' + +$store = YAML::Store.new('clients.yml') +$basic_auth = Rack::Auth::Basic.new(->(_) { [] }, 'REST API') do |username, password| + username.length > 0 && password.length > 0 +end + +def json(object) + headers 'Content-Type' => 'application/json' + body JSON(object) +end + +get '/.well-known/openid-configuration' do + # point zync where to exchange the OAuth2 access token + json({ token_endpoint: 'https://example.com/auth/realms/master/protocol/openid-connect/token' }) +end + +put '/clients/:client_id' do |client_id| + # {"client_id"=>"ee305610", + # "client_secret"=>"ac0e42db426b4377096c6590e2b06aed", + # "client_name"=>"oidc-app", + # "redirect_uris"=>["http://example.com"], + # "grant_types"=>["client_credentials", "password"]} + client = JSON.parse(request.body.read) + + # store the client + $store.transaction do + $store[client_id] = client + end + + json(client) +end + +delete '/clients/:client_id' do |client_id| + # Request HTTP Basic authentication + if (status, headers, body = $basic_auth.call(env)) + self.headers headers + error status, body + end + + client = nil + + # remove the client + $store.transaction do + client = $store[client_id] + $store.delete(client_id) + end + + json(client) +end diff --git a/examples/rest-api/config.ru b/examples/rest-api/config.ru new file mode 100644 index 00000000..221725af --- /dev/null +++ b/examples/rest-api/config.ru @@ -0,0 +1,5 @@ +# frozen_string_literal: true + +require_relative 'app' + +run Sinatra::Application diff --git a/examples/rest-api/openapi.yml b/examples/rest-api/openapi.yml new file mode 100644 index 00000000..40f1c3b1 --- /dev/null +++ b/examples/rest-api/openapi.yml @@ -0,0 +1,146 @@ +--- +openapi: 3.0.2 +info: + title: Zync REST API + version: 1.0.0 +paths: + /clients/{clientId}: + get: + summary: Get a Client + operationId: readClient + parameters: + - name: clientId + in: path + description: client_id + required: true + schema: + type: string + responses: + 200: + description: Client resource was found. + content: + application/json: + schema: + $ref: '#/components/schemas/Client' + security: + - OIDC: [] + Basic: [] + Digest: [] + put: + summary: Create or update the Client + operationId: saveClient + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Client' + examples: + Client: + value: + client_id: foo-bar + client_secret: some-secret + required: true + responses: + 200: + description: When the Client was updated. + content: + application/json: + schema: + $ref: '#/components/schemas/Client' + 201: + description: When the Client was created on the IDP. + content: + application/json: + schema: + $ref: '#/components/schemas/Client' + delete: + summary: Delete the Client + operationId: deleteClient + responses: + 200: + description: When the client was deleted. + content: + application/json: + schema: + $ref: '#/components/schemas/Client' + 204: + description: When the Client was already gone from the IDP. + content: + application/json: {} + parameters: + - name: clientId + in: path + required: true + /.well-known/openid-configuration: + get: + responses: + 200: + description: Enable OAuth2.0 authentication by responding with a token endpoint + of an IDP. + content: + application/json: + schema: + $ref: '#/components/schemas/OIDC' + examples: + Enable OAuth2.0 authentication: + value: + token_endpoint: https://idp.example.com/auth/realms/myrealm + security: + - {} +components: + schemas: + Client: + title: Root Type for Client + description: A Client representation. + required: [] + type: object + properties: + client_id: + type: string + client_secret: + type: string + client_name: + type: string + redirect_uris: + description: A list of allowed redirect uris. + type: array + items: + type: string + grant_types: + description: A list of allowed grant types. + type: array + items: + type: string + example: |- + { + "client_id": "foo-bar", + "client_secret": "some-secret" + } + OIDC: + title: Root Type for OIDC + description: OpenID Connect Configuration to define where to get access token. + type: object + properties: + token_endpoint: + type: string + example: |- + { + "token_endpoint": "https://idp.example.com/auth/realms/myrealm" + } + securitySchemes: + OIDC: + type: openIdConnect + description: |- + Use OpenID Connect for authentication. + Zync will try to access `/.well-known/openid-configuration` and use "token_endpoint" property from the JSON response. + Then it will exchange its' credentials for an access token and will use that access token to access this API. + Basic: + type: http + description: Zync will try to send provided credentials as HTTP Basic authentication + in case it gets a 401 response with proper WWW-Authenticate header. + scheme: basic + Digest: + type: http + description: Zync will try to send provided credentials as HTTP Basic authentication + in case it gets a 401 response with proper WWW-Authenticate header. + scheme: digest diff --git a/lib/prometheus/active_job_subscriber.rb b/lib/prometheus/active_job_subscriber.rb index 37527a1e..2901e378 100644 --- a/lib/prometheus/active_job_subscriber.rb +++ b/lib/prometheus/active_job_subscriber.rb @@ -5,13 +5,15 @@ module Prometheus ## ActiveJob Subscriber to record Prometheus metrics. ## Those metrics are per process, so they have to be aggregated by Prometheus. class ActiveJobSubscriber < ActiveSupport::Subscriber + PROMETHEUS_TAGS = %i[adapter job_name].freeze + Yabeda.configure do group :que do - counter :job_retries_total, comment: 'A number of Jobs retried by this process' - counter :job_failures_total, comment: 'A number of Jobs errored by this process' - counter :job_performed_total, comment: 'A number of Jobs performed by this process' - counter :job_enqueued_total, comment: 'A number of Jobs enqueued by this process' - histogram :job_duration_seconds do + counter :job_retries_total, comment: 'A number of Jobs retried by this process', tags: ActiveJobSubscriber::PROMETHEUS_TAGS + counter :job_failures_total, comment: 'A number of Jobs errored by this process', tags: ActiveJobSubscriber::PROMETHEUS_TAGS + counter :job_performed_total, comment: 'A number of Jobs performed by this process', tags: ActiveJobSubscriber::PROMETHEUS_TAGS + counter :job_enqueued_total, comment: 'A number of Jobs enqueued by this process', tags: ActiveJobSubscriber::PROMETHEUS_TAGS + histogram :job_duration_seconds, tags: ActiveJobSubscriber::PROMETHEUS_TAGS do comment 'A histogram of Jobs perform times by this process' buckets false end @@ -22,7 +24,11 @@ class ActiveJobSubscriber < ActiveSupport::Subscriber def initialize super @metrics = Yabeda.que - @job_runtime_seconds = Yabeda::Prometheus.registry.summary(:que_job_runtime_seconds, 'A summary of Jobs perform times') + @job_runtime_seconds = Yabeda::Prometheus.registry.summary( + :que_job_runtime_seconds, + docstring: 'A summary of Jobs perform times', + labels: ActiveJobSubscriber::PROMETHEUS_TAGS + ) end def enqueue(event) @@ -52,7 +58,7 @@ def observe_duration(event, labels) duration = event.duration / 1000.0 job_duration_seconds.measure(labels, duration) - job_runtime_seconds.observe(labels, duration) + job_runtime_seconds.observe(duration, labels: labels) end def observe_perform(payload, labels) @@ -77,7 +83,5 @@ def extract_labels(payload) to: :@metrics attr_reader :job_runtime_seconds - - attach_to :active_job end end diff --git a/lib/prometheus/active_record.rb b/lib/prometheus/active_record.rb index 0848f89c..99fa9bb0 100644 --- a/lib/prometheus/active_record.rb +++ b/lib/prometheus/active_record.rb @@ -1,19 +1,23 @@ # frozen_string_literal: true +RAILS_CONNECTION_PROMETHEUS_TAGS = %i[state].freeze + Yabeda.configure do group :rails_connection_pool do - size = gauge :size, comment: 'Size of the connection pool' - connections = gauge :connections, comment: 'Number of connections in the connection pool' - waiting = gauge :waiting, comment: 'Number of waiting in the queue of the connection pool' - - no_labels = {}.freeze + # Empty label values SHOULD be treated as if the label was not present. + # @see https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#label + no_labels = { state: nil }.freeze busy = { state: :busy }.freeze dead = { state: :dead }.freeze idle = { state: :idle }.freeze + tags = RAILS_CONNECTION_PROMETHEUS_TAGS + + size = gauge :size, comment: 'Size of the connection pool', tags: tags + connections = gauge :connections, comment: 'Number of connections in the connection pool', tags: tags + waiting = gauge :waiting, comment: 'Number of waiting in the queue of the connection pool', tags: tags collect do stat = ActiveRecord::Base.connection_pool.stat - size.set(no_labels, stat.fetch(:size)) connections.set(no_labels, stat.fetch(:connections)) connections.set(busy, stat.fetch(:busy)) diff --git a/lib/prometheus/que_stats.rb b/lib/prometheus/que_stats.rb index a00850a4..b61f91cf 100644 --- a/lib/prometheus/que_stats.rb +++ b/lib/prometheus/que_stats.rb @@ -1,80 +1,190 @@ # frozen_string_literal: true require 'prometheus/client/metric' +require 'que/active_record/model' module Prometheus - # Prometheus metric to get job stats from Que. module QueStats - module_function + mattr_accessor :read_only_transaction, default: true, instance_accessor: false + + class Stats + def call + raise NoMethodError, __method__ + end + + protected - WORKER_STATS = <<~SQL - SELECT SUM(worker_count) AS workers, COUNT(*) AS nodes FROM que_lockers - SQL + DEFAULT_STATEMENT_TIMEOUT = 'SET LOCAL statement_timeout TO DEFAULT' + READ_ONLY_TRANSACTION = 'SET TRANSACTION ISOLATION LEVEL SERIALIZABLE READ ONLY DEFERRABLE' - def worker_stats - execute do |connection| - connection.select_one(WORKER_STATS) + def execute + connection.transaction(requires_new: true, joinable: false) do + connection.execute(DEFAULT_STATEMENT_TIMEOUT) + connection.execute(READ_ONLY_TRANSACTION) if Prometheus::QueStats.read_only_transaction + yield connection + end end + + delegate :connection, to: ActiveRecord::Base.name end - def job_stats(filter = nil) - filter = "WHERE #{filter}" if filter - sql = <<~SQL - SELECT args->0->>'job_class' AS job_class, COUNT(*) as count - FROM que_jobs #{filter} - GROUP BY args->0->>'job_class' + class WorkerStats < Stats + WORKER_STATS = <<~SQL + SELECT COALESCE(SUM("worker_count"),0) AS workers_count, COUNT(*) AS nodes_count FROM que_lockers SQL + private_constant :WORKER_STATS + + def call + execute do |connection| + connection.select_one(WORKER_STATS) + end + end + + def workers + call.fetch('workers_count') + end + + alias_method :all, :workers - execute do |connection| - connection.select_all(sql) + def nodes + call.fetch('nodes_count') end end - mattr_accessor :read_only_transaction, default: true, instance_accessor: false + class JobStats < Stats + def initialize + @job_classes_table = Arel::Table.new(:jobs) + @stats_count_table = Arel::Table.new(:stats) + end + + attr_reader :job_classes_table, :stats_count_table + + def call(*filters) + filtered_stats_count_relation = filters.reduce(build_stats_count_relation) { |relation, filter| relation.where(filter) } + common_tables = [job_classes_arel, Arel::Nodes::As.new(stats_count_table, filtered_stats_count_relation.arel)] + + relation = job_classes_table.join(stats_count_table, Arel::Nodes::OuterJoin). + on(job_class_column.eq(stats_count_table[JOB_CLASS_COLUMN_NAME])). + project([job_class_column, Arel::Nodes::NamedFunction.new('coalesce', [stats_count_table['count'], 0]).as('count')]). + with(common_tables) + + execute do |connection| + connection.select_all(relation.to_sql) + end + end + + alias_method :all, :call + + def ready + call(['error_count = ?', 0], { expired_at: nil, finished_at: nil }, ['run_at <= ?', Time.zone.now]) + end + + def scheduled + call(['error_count = ?', 0], { expired_at: nil, finished_at: nil }, ['run_at > ?', Time.zone.now]) + end + + def finished + call('finished_at IS NOT NULL') + end + + def failed + call(['error_count > ?', 0], { expired_at: nil, finished_at: nil }) + end + + def expired + call('expired_at IS NOT NULL') + end - class << self protected - def execute - connection.transaction(requires_new: true, joinable: false) do - connection.execute(DEFAULT_STATEMENT_TIMEOUT) - connection.execute(READ_ONLY_TRANSACTION) if read_only_transaction - yield connection + # ApplicationJob did not have to be here, but it's just harder to test otherwise because of ApplicationJob#delete_duplicates + JOB_CLASSES = %w[ApplicationJob ProcessEntryJob ProcessIntegrationEntryJob UpdateJob].inspect.tr('"', "'").freeze + private_constant :JOB_CLASSES + + JOB_CLASS_COLUMN_NAME = 'job_name' + private_constant :JOB_CLASS_COLUMN_NAME + + def job_class_column + @job_classes_table[JOB_CLASS_COLUMN_NAME] + end + + def job_classes_arel + @job_classes_arel ||= begin + job_classes = Arel.sql("SELECT unnest(ARRAY#{JOB_CLASSES})").as(JOB_CLASS_COLUMN_NAME) + Arel::Nodes::As.new(job_classes_table, Arel.sql("(#{job_classes.to_sql})")) end end - delegate :connection, to: ActiveRecord::Base.name + def build_stats_count_relation + Que::ActiveRecord::Model.select([Arel.sql("args->0->>'job_class'").as(JOB_CLASS_COLUMN_NAME), Arel.star.count.as('count')]).group("args->0->>'job_class'") + end end - DEFAULT_STATEMENT_TIMEOUT = 'SET LOCAL statement_timeout TO DEFAULT' - READ_ONLY_TRANSACTION = 'SET TRANSACTION ISOLATION LEVEL SERIALIZABLE READ ONLY DEFERRABLE' - end -end + class StatsCollector + PROMETHEUS_TAGS = %i[type].freeze -Yabeda.configure do - group :que do - scheduled_jobs = gauge :jobs_scheduled_total, comment: 'Que Jobs to be executed' - workers = gauge :workers_total, comment: 'Que Workers running' - - collect do - Prometheus::QueStats.job_stats.each do |stats| - scheduled_jobs.set({ job: stats.fetch('job_class') }, stats.fetch('count')) + def initialize(gauge, stats) + @gauge = gauge + @stats = stats end - Prometheus::QueStats.job_stats(%q[(args->0->>'retries')::integer > 0]).each do |stats| - scheduled_jobs.set({ job: stats.fetch('job_class'), type: 'retry' }, stats.fetch('count')) + attr_reader :gauge, :stats + + def call(type = nil) + type_hash, stats_count = type_hash_and_stats_count(type) + gauge.set({ }.merge(type_hash), stats_count) end - Prometheus::QueStats.job_stats('run_at < now()').each do |stats| - scheduled_jobs.set({ job: stats.fetch('job_class'), type: 'scheduled' }, stats.fetch('count')) + protected + + def type_hash_and_stats_count(type) + type_hash = { type: type } + stats_count = stats.public_send(type ? type : :all) + [type_hash, stats_count] end + end - Prometheus::QueStats.job_stats('run_at > now()').each do |stats| - scheduled_jobs.set({ job: stats.fetch('job_class'), type: 'future' }, stats.fetch('count')) + class GroupedStatsCollector < StatsCollector + PROMETHEUS_TAGS = %i[job_name type].freeze + + def initialize(gauge, stats, grouped_by:) + super(gauge, stats) + @grouped_by = grouped_by end - workers.set({ }, Prometheus::QueStats.worker_stats.fetch('workers')) + attr_reader :grouped_by + + def call(type = nil) + type_hash, grouped_stats_count = type_hash_and_stats_count(type) + grouped_stats_count.each do |stats_count| + gauge.set({ grouped_by.to_sym => stats_count.fetch(grouped_by) }.merge(type_hash), stats_count.fetch('count')) + end + end end end end +Yabeda.configure do + group :que do + workers = gauge :workers_total, + comment: 'Que Workers running', + tags: Prometheus::QueStats::StatsCollector::PROMETHEUS_TAGS + worker_stats = Prometheus::QueStats::WorkerStats.new + collector = Prometheus::QueStats::StatsCollector.new(workers, worker_stats) + collect(&collector.method(:call)) + end +end + +Yabeda.configure do + group :que do + jobs = gauge :jobs_scheduled_total, + comment: 'Que Jobs to be executed', + tags: Prometheus::QueStats::GroupedStatsCollector::PROMETHEUS_TAGS + job_stats = Prometheus::QueStats::JobStats.new + collector = Prometheus::QueStats::GroupedStatsCollector.new(jobs, job_stats, grouped_by: 'job_name') + collect do + collector.call + %w[ready scheduled finished failed expired].each(&collector.method(:call)) + end + end +end diff --git a/lib/tasks/db.rake b/lib/tasks/db.rake index ac1d0e17..8e4e9692 100644 --- a/lib/tasks/db.rake +++ b/lib/tasks/db.rake @@ -16,4 +16,24 @@ namespace :db do retry end end + + task :schema_env => :load_config do + db_config = ActiveRecord::Base.configurations.configs_for(env_name: ActiveRecord::Tasks::DatabaseTasks.env).first + ActiveRecord::Tasks::PostgreSQLDatabaseTasks.new(db_config).set_schema_file + end +end + +ActiveRecord::Tasks::PostgreSQLDatabaseTasks.prepend(Module.new do + def set_schema_file + clear_active_connections! + establish_master_connection + server_version = connection.select_value("SHOW server_version").to_i + suffix = server_version < 11 ? '' : '-12' + ENV['SCHEMA'] ||= "db/structure#{suffix}.sql" + clear_active_connections! + end +end) + +%w[db:schema:dump db:schema:load db:prepare db:test:prepare].each do |taskname| + Rake::Task[taskname].enhance(['db:schema_env']) end diff --git a/lib/tasks/que.rake b/lib/tasks/que.rake index b3b703cc..1972d9ef 100644 --- a/lib/tasks/que.rake +++ b/lib/tasks/que.rake @@ -1,6 +1,22 @@ # frozen_string_literal: true -desc 'Start que worker' -task que: :environment do |_, args| - exec("que ./config/environment.rb que/prometheus #{args.extras.join}") +task que: 'que:exec' + +namespace :que do + desc 'Start que worker' + task exec: :environment do |_, args| + exec("que ./config/environment.rb que/prometheus #{args.extras.join}") + end + + desc 'Reschedule all jobs to be executed now' + task reschedule: :environment do + require 'que/active_record/model' + + Que::ActiveRecord::Model.update_all(run_at: Time.now) + end + + desc 'Force updating all models' + task force_update: :environment do + Model.find_each(&UpdateJob.method(:perform_later)) + end end diff --git a/openshift/02-ruby-24-centos7-imagestream.yml b/openshift/02-ruby-25-ubi7-imagestream.yml similarity index 70% rename from openshift/02-ruby-24-centos7-imagestream.yml rename to openshift/02-ruby-25-ubi7-imagestream.yml index aafa68f8..93160b08 100644 --- a/openshift/02-ruby-24-centos7-imagestream.yml +++ b/openshift/02-ruby-25-ubi7-imagestream.yml @@ -3,12 +3,12 @@ kind: ImageStream metadata: labels: app: zync - name: ruby-24-centos7 + name: ruby-27-ubi7 spec: tags: - from: kind: DockerImage - name: centos/ruby-24-centos7 + name: registry.access.redhat.com/ubi7/ruby-27 name: latest referencePolicy: type: Source diff --git a/openshift/03-buildconfig-template.yml b/openshift/03-buildconfig-template.yml index 6eb44760..64afa80a 100644 --- a/openshift/03-buildconfig-template.yml +++ b/openshift/03-buildconfig-template.yml @@ -25,18 +25,18 @@ objects: uri: https://github.com/3scale/zync.git type: Git strategy: - sourceStrategy: + dockerStrategy: from: kind: ImageStreamTag - name: ruby-24-centos7:latest - type: Source + name: ruby-25-ubi7:latest + type: Docker triggers: - github: secret: "${GITHUB_SECRET}" type: GitHub - type: ImageChange - type: ConfigChange - + parameters: - name: GITHUB_SECRET displayName: GitHub WebHook Secret diff --git a/test/adapters/abstract_adapter_test.rb b/test/adapters/abstract_adapter_test.rb index a56f2f9b..e00332a0 100644 --- a/test/adapters/abstract_adapter_test.rb +++ b/test/adapters/abstract_adapter_test.rb @@ -23,4 +23,19 @@ class AbstractAdapterTest < ActiveSupport::TestCase assert_equal uri, subject.new('http://id:secret@lvh.me:3000/auth/realm/name/').endpoint end + + test 'http_client' do + HTTPClient::Util.stub_const(:AddressableEnabled, false) do + assert_kind_of subject, subject.new('http://id:secret@example.com') + end + end + + test 'oidc discovery' do + stub_request(:get, "https://example.com/.well-known/openid-configuration"). + to_return(status: 404, body: '', headers: {}) + + assert_raises AbstractAdapter::OIDC::AuthenticationError do + assert_nil subject.new('https://example.com').authentication + end + end end diff --git a/test/adapters/generic_adapter_test.rb b/test/adapters/generic_adapter_test.rb index aacb0714..56f09465 100644 --- a/test/adapters/generic_adapter_test.rb +++ b/test/adapters/generic_adapter_test.rb @@ -3,17 +3,17 @@ class GenericAdapterTest < ActiveSupport::TestCase test 'new' do - assert GenericAdapter.new('http://id:secret@lvh.me:3000/auth/realm/name') + assert RESTAdapter.new('http://id:secret@lvh.me:3000/auth/realm/name') end test 'endpoint' do - adapter = GenericAdapter.new('http://id:secret@lvh.me:3000/auth/realm/name') + adapter = RESTAdapter.new('http://id:secret@lvh.me:3000/auth/realm/name') assert_kind_of URI, adapter.endpoint end test 'setting access token' do - subject = GenericAdapter.new('http://lvh.me:3000') + subject = RESTAdapter.new('http://lvh.me:3000') subject.authentication = 'sometoken' @@ -24,10 +24,10 @@ class GenericAdapterTest < ActiveSupport::TestCase uri = URI('http://lvh.me:3000/auth/realm/name/') assert_equal uri, - GenericAdapter.new('http://id:secret@lvh.me:3000/auth/realm/name').endpoint + RESTAdapter.new('http://id:secret@lvh.me:3000/auth/realm/name').endpoint assert_equal uri, - GenericAdapter.new('http://id:secret@lvh.me:3000/auth/realm/name/').endpoint + RESTAdapter.new('http://id:secret@lvh.me:3000/auth/realm/name/').endpoint end test 'timeout error' do @@ -37,7 +37,7 @@ class GenericAdapterTest < ActiveSupport::TestCase get_token = stub_request(:post, 'http://lvh.me:3000/auth/realm/name/protocol/openid-connect/token').to_timeout - adapter = GenericAdapter.new('http://id:secret@lvh.me:3000/auth/realm/name') + adapter = RESTAdapter.new('http://id:secret@lvh.me:3000/auth/realm/name') log = Object.new class << log @@ -61,8 +61,8 @@ def error(object) end test 'create client' do - adapter = GenericAdapter.new('http://example.com/adapter', authentication: 'token') - client = GenericAdapter::Client.new(name: 'Foo', id: 'foo', secret: 'bar') + adapter = RESTAdapter.new('http://example.com/adapter', authentication: 'token') + client = RESTAdapter::Client.new(name: 'Foo', id: 'foo', secret: 'bar') create = stub_request(:put, "http://example.com/adapter/clients/foo"). with( @@ -75,8 +75,8 @@ def error(object) end test 'update client' do - adapter = GenericAdapter.new('http://example.com/adapter', authentication: 'token') - client = GenericAdapter::Client.new(name: 'Foo', id: 'foo', secret: 'bar') + adapter = RESTAdapter.new('http://example.com/adapter', authentication: 'token') + client = RESTAdapter::Client.new(name: 'Foo', id: 'foo', secret: 'bar') update = stub_request(:put, "http://example.com/adapter/clients/foo"). with( @@ -89,8 +89,8 @@ def error(object) end test 'delete client' do - adapter = GenericAdapter.new('http://example.com/adapter', authentication: 'token') - client = GenericAdapter::Client.new(id: 'foo') + adapter = RESTAdapter.new('http://example.com/adapter', authentication: 'token') + client = RESTAdapter::Client.new(id: 'foo') delete = stub_request(:delete, "http://example.com/adapter/clients/foo").to_return(status: 200) @@ -100,8 +100,8 @@ def error(object) end test 'read client' do - adapter = GenericAdapter.new('http://example.com/adapter', authentication: 'token') - client = GenericAdapter::Client.new(id: 'foo') + adapter = RESTAdapter.new('http://example.com/adapter', authentication: 'token') + client = RESTAdapter::Client.new(id: 'foo') body = { client_id: 'foo', client_name: 'Foo'} read = stub_request(:get, "http://example.com/adapter/clients/foo") @@ -110,7 +110,7 @@ def error(object) client = adapter.read_client(client) - assert_kind_of GenericAdapter::Client, client + assert_kind_of RESTAdapter::Client, client assert_equal 'Foo', client.name assert_equal 'foo', client.id @@ -118,7 +118,7 @@ def error(object) end test 'test' do - adapter = GenericAdapter.new('http://id:secret@example.com/auth/realm/name') + adapter = RESTAdapter.new('http://id:secret@example.com/auth/realm/name') form_urlencoded = { 'Content-Type'=>'application/x-www-form-urlencoded' } token = stub_request(:post, 'http://example.com/auth/realm/name/get-token'). @@ -139,9 +139,9 @@ def error(object) stub_request(:get, 'http://lvh.me:3000/auth/realm/name/.well-known/openid-configuration'). to_return(status: 200, body: 'somebody', headers: {'Content-Type' => 'text/plain'} ) - adapter = GenericAdapter.new('http://id:secret@lvh.me:3000/auth/realm/name', authentication: 'something') + adapter = RESTAdapter.new('http://id:secret@lvh.me:3000/auth/realm/name', authentication: 'something') - assert_raises GenericAdapter::InvalidResponseError do + assert_raises RESTAdapter::InvalidResponseError do adapter.test end end @@ -154,26 +154,26 @@ def error(object) }.deep_stringify_keys Rails.application.config.x.stub(:generic, config) do - client = GenericAdapter::Client.new(name: 'foo') + client = RESTAdapter::Client.new(name: 'foo') assert_includes client.to_h.fetch(:grant_types), :client_credentials end end test 'client hash' do - client = GenericAdapter::Client.new(name: 'name') + client = RESTAdapter::Client.new(name: 'name') assert_includes client.to_h, :client_name end test 'client serialization' do - client = GenericAdapter::Client.new(name: 'name') + client = RESTAdapter::Client.new(name: 'name') assert_equal client.to_h.to_json, client.to_json end test 'oauth flows' do - client = GenericAdapter::Client.new({ + client = RESTAdapter::Client.new({ id: 'client_id', oidc_configuration: { implicit_flow_enabled: true, diff --git a/test/adapters/rest_adapter_test.rb b/test/adapters/rest_adapter_test.rb new file mode 100644 index 00000000..55b178a4 --- /dev/null +++ b/test/adapters/rest_adapter_test.rb @@ -0,0 +1,67 @@ +# frozen_string_literal: true +require 'test_helper' + +class RESTAdapterTest < ActiveSupport::TestCase + class_attribute :subject, default: RESTAdapter + + test 'oidc discovery' do + stub_request(:get, "https://example.com/.well-known/openid-configuration"). + to_return(status: 404, body: '', headers: {}) + + assert_nil subject.new('https://example.com').authentication + end + + test 'create client with OAuth auth' do + stub_request(:get, "https://example.com/.well-known/openid-configuration"). + to_return(status: 200, body: { token_endpoint: 'http://auth.example.com/oauth/token' }.to_json, headers: { 'Content-Type' => 'application/json' }) + + stub_request(:post, "http://auth.example.com/oauth/token"). + with( + body: { client_id: "id", client_secret: 'secret', grant_type: "client_credentials" }, + headers: { 'Content-Type'=>'application/x-www-form-urlencoded' }). + to_return(status: 200, body: "access_token=token-value", headers: { 'Content-Type'=>'application/x-www-form-urlencoded' }) + + client = RESTAdapter::Client.new(id: 'foo') + stub_request(:put, "https://example.com/clients/foo"). + with( + body: client.to_json, + headers: { 'Content-Type'=>'application/json', 'Authorization' => 'Bearer token-value' }). + to_return(status: 200, body: { status: 'ok' }.to_json, headers: { 'Content-Type' => 'application/json' }) + + assert subject.new('https://id:secret@example.com').create_client(client) + end + + test 'create client without auth' do + client = RESTAdapter::Client.new(id: 'foo') + + stub_request(:get, "https://example.com/.well-known/openid-configuration"). + to_return(status: 404, body: '', headers: {}) + + stub_request(:put, "https://example.com/clients/foo"). + with( + body: client.to_json, + headers: { 'Content-Type'=>'application/json' }). + to_return(status: 200, body: { status: 'ok' }.to_json, headers: { 'Content-Type' => 'application/json' }) + + assert subject.new('https://example.com').create_client(client) + end + + test 'create client with basic auth' do + client = RESTAdapter::Client.new(id: 'foo') + adapter = subject.new('https://user:pass@example.com') + # WebMock does not support request retries on 401 status + adapter.send(:http_client).force_basic_auth = true + + stub_request(:get, "https://example.com/.well-known/openid-configuration"). + to_return(status: 404, body: '', headers: {}) + + stub_request(:put, 'https://example.com/clients/foo'). + with( + basic_auth: %w[user pass], + body: client.to_json, + headers: { 'Content-Type'=>'application/json' }). + to_return(status: 200, body: { status: 'ok' }.to_json, headers: { 'Content-Type' => 'application/json' }) + + assert adapter.create_client(client) + end +end diff --git a/test/fixtures/entries.yml b/test/fixtures/entries.yml index a741a2ed..cbcaecc6 100644 --- a/test/fixtures/entries.yml +++ b/test/fixtures/entries.yml @@ -26,7 +26,7 @@ client: created_at: <%= 1.week.ago %> service: - data: + data: tenant: two model: service @@ -34,5 +34,15 @@ proxy: data: oidc_issuer_endpoint: http://example.com/auth/realm/master oidc_issuer_type: keycloak + service_id: 2 tenant: two model: proxy + + +provider: + data: + id: 2 + domain: provider.example.com + admin_domain: provider-admin.example.com + tenant: two + model: provider diff --git a/test/fixtures/integrations.yml b/test/fixtures/integrations.yml index 0f2d4a93..31e43296 100644 --- a/test/fixtures/integrations.yml +++ b/test/fixtures/integrations.yml @@ -15,6 +15,6 @@ keycloak: generic: tenant: one model: first_service - type: Integration::Generic + type: Integration::REST configuration: endpoint: 'http://id:pass@example.com/generic/api' diff --git a/test/fixtures/models.yml b/test/fixtures/models.yml index d049467c..ef4d2a23 100644 --- a/test/fixtures/models.yml +++ b/test/fixtures/models.yml @@ -19,3 +19,7 @@ service: proxy: tenant: two record: two (Proxy) + +provider: + tenant: two + record: two (Provider) diff --git a/test/fixtures/providers.yml b/test/fixtures/providers.yml new file mode 100644 index 00000000..1500b155 --- /dev/null +++ b/test/fixtures/providers.yml @@ -0,0 +1,7 @@ +# Read about fixtures at http://api.rubyonrails.org/classes/ActiveRecord/FixtureSet.html + +one: + tenant: one + +two: + tenant: two diff --git a/test/jobs/process_entry_job_test.rb b/test/jobs/process_entry_job_test.rb index f7158047..997b1f72 100644 --- a/test/jobs/process_entry_job_test.rb +++ b/test/jobs/process_entry_job_test.rb @@ -15,9 +15,15 @@ class ProcessEntryJobTest < ActiveJob::TestCase job = ProcessEntryJob.new proxy = entries(:proxy) - integrations = job.model_integrations_for(proxy) + ProcessEntryJob::CreateK8SIntegration.stub(:enabled, true) do + integrations = job.model_integrations_for(proxy) + + integrations.each do |integration| + assert_kind_of Integration::Kubernetes, integration + end - assert_equal 0, integrations.size + assert_equal 1, integrations.size + end end test 'model integrations for client' do @@ -53,6 +59,39 @@ class ProcessEntryJobTest < ActiveJob::TestCase end end + class CreateProxyIntegrationWithFiber < ProcessEntryJob::CreateOIDCProxyIntegration + def find_integration + Fiber.yield + super + end + end + + class ProcessEntryJobWithFiber < ProcessEntryJob + self.proxy_integration_services = [CreateProxyIntegrationWithFiber] + end + + test 'race condition between entry jobs to create same proxy integration' do + entry = entries(:proxy) + + existing_integrations = Integration.where(tenant: entry.tenant) + UpdateState.where(model: existing_integrations).delete_all + existing_integrations.delete_all + + fiber1 = Fiber.new { ProcessEntryJobWithFiber.ensure_integrations_for(entry) } + fiber2 = Fiber.new { ProcessEntryJobWithFiber.ensure_integrations_for(entry) } + + fiber1.resume + fiber2.resume + + assert_difference(existing_integrations.method(:count)) do + fiber2.resume # creates the integration first + end + + assert_no_difference(existing_integrations.method(:count)) do + fiber1.resume + end + end + test 'skips deleted proxy' do proxy = entries(:proxy) diff --git a/test/jobs/process_integration_entry_job_test.rb b/test/jobs/process_integration_entry_job_test.rb index a7008885..c4ff3a35 100644 --- a/test/jobs/process_integration_entry_job_test.rb +++ b/test/jobs/process_integration_entry_job_test.rb @@ -63,4 +63,34 @@ class ProcessIntegrationEntryJobTest < ActiveJob::TestCase assert_mock service end + + test 'relation' do + service = ProcessIntegrationEntryJob.new(integrations(:keycloak), models(:service)) + application = ProcessIntegrationEntryJob.new(integrations(:keycloak), models(:application)) + + refute_equal application.relation.to_sql, service.relation.to_sql + + adapter = ActiveJob::QueueAdapters::QueAdapter.new + + assert_difference application.relation.method(:count), 2 do + adapter.enqueue(application) + adapter.enqueue(application) + + assert_difference service.relation.method(:count), 2 do + adapter.enqueue(service) + adapter.enqueue(service) + end + end + end + + test 'perform later' do + adapter = ActiveJob::QueueAdapters::QueAdapter.new + job = ProcessIntegrationEntryJob.new(integrations(:keycloak), models(:application)) + + adapter.enqueue(job) + + assert_difference job.relation.method(:count), -1 do + ApplicationJob.perform_later(job) # this is not using the same adapter, so it actually just removes previous one + end + end end diff --git a/test/jobs/update_job_test.rb b/test/jobs/update_job_test.rb index a685ec4f..989620e7 100644 --- a/test/jobs/update_job_test.rb +++ b/test/jobs/update_job_test.rb @@ -13,6 +13,36 @@ class UpdateJobTest < ActiveJob::TestCase end end + test 'relation' do + application = UpdateJob.new(models(:application)) + client = UpdateJob.new(models(:client)) + + refute_equal application.relation.to_sql, client.relation.to_sql + + adapter = ActiveJob::QueueAdapters::QueAdapter.new + + assert_difference application.relation.method(:count), 2 do + adapter.enqueue(application) + adapter.enqueue(application) + + assert_difference client.relation.method(:count), 2 do + adapter.enqueue(client) + adapter.enqueue(client) + end + end + end + + test 'perform later' do + adapter = ActiveJob::QueueAdapters::QueAdapter.new + job = UpdateJob.new(models(:application)) + + adapter.enqueue(job) + + assert_difference job.relation.method(:count), -1 do + ApplicationJob.perform_later(job) # this is not using the same adapter, so it actually just removes previous one + end + end + test 'creates entry' do model = Model.create!(tenant: tenants(:two), record: applications(:two)) diff --git a/test/lib/prometheus/que_stats_test.rb b/test/lib/prometheus/que_stats_test.rb index 910ecedf..0bdef172 100644 --- a/test/lib/prometheus/que_stats_test.rb +++ b/test/lib/prometheus/que_stats_test.rb @@ -13,17 +13,80 @@ class Prometheus::QueStatsTest < ActiveSupport::TestCase end test 'worker stats' do - assert Prometheus::QueStats.worker_stats + assert Prometheus::QueStats::WorkerStats.new.call end test 'job stats' do - assert Prometheus::QueStats.job_stats - assert Prometheus::QueStats.job_stats('1 > 0') + Que.stop! + ApplicationJob.perform_later + assert_equal 1, stats_count + assert_equal 1, stats_count(where: ['1 > 0']) + assert_equal 1, stats_count(where: ['1 > 0', '2 > 1']) + assert_equal 0, stats_count(where: ['1 > 0', '2 < 1']) + end + + test 'ready jobs stats' do + Que.stop! + assert_equal 0, stats_count(type: :ready) + jobs = Array.new(3) { ApplicationJob.perform_later } + jobs << ApplicationJob.set(wait_until: 1.day.from_now).perform_later + assert_equal 3, stats_count(type: :ready) + update_job(jobs[0], error_count: 1) + assert_equal 2, stats_count(type: :ready) + update_job(jobs[1], expired_at: 1.minute.ago) + assert_equal 1, stats_count(type: :ready) + update_job(jobs[2], finished_at: 1.minute.ago) + assert_equal 0, stats_count(type: :ready) end - uses_transaction def test_readonly_transaction - Prometheus::QueStats.stub(:read_only_transaction, true) do - Prometheus::QueStats.worker_stats + test 'scheduled jobs stats' do + Que.stop! + assert_equal 0, stats_count(type: :scheduled) + jobs = [ApplicationJob, ApplicationJob.set(wait_until: 1.day.from_now), ApplicationJob.set(wait_until: 2.days.from_now)].map(&:perform_later) + assert_equal 2, stats_count(type: :scheduled) + update_job(jobs[1], error_count: 16, expired_at: 1.minute.ago) + assert_equal 1, stats_count(type: :scheduled) + update_job(jobs.last, run_at: 1.minute.ago) + assert_equal 0, stats_count(type: :scheduled) + end + + test 'finished jobs stats' do + Que.stop! + assert_equal 0, stats_count(type: :finished) + jobs = Array.new(2) { ApplicationJob.perform_later } + assert_equal 0, stats_count(type: :finished) + update_job(jobs.first, finished_at: Time.now) + assert_equal 1, stats_count(type: :finished) + end + + test 'failed jobs stats' do + Que.stop! + assert_equal 0, stats_count(type: :failed) + jobs = Array.new(2) { ApplicationJob.perform_later } + assert_equal 0, stats_count(type: :failed) + update_job(jobs.first, error_count: 1) + assert_equal 1, stats_count(type: :failed) + update_job(jobs.first, error_count: 15) + assert_equal 1, stats_count(type: :failed) + update_job(jobs.first, error_count: 16, expired_at: Time.now.utc) + assert_equal 0, stats_count(type: :failed) + end + + test 'expired jobs stats' do + Que.stop! + assert_equal 0, stats_count(type: :expired) + jobs = Array.new(2) { ApplicationJob.perform_later } + assert_equal 0, stats_count(type: :expired) + update_job(jobs.first, error_count: 16, expired_at: Time.now.utc) + assert_equal 1, stats_count(type: :expired) + end + + class WithTransaction < ActiveSupport::TestCase + uses_transaction :test_readonly_transaction + def test_readonly_transaction + Prometheus::QueStats.stub(:read_only_transaction, true) do + Prometheus::QueStats::WorkerStats.new.call + end end end @@ -43,4 +106,17 @@ class Prometheus::QueStatsTest < ActiveSupport::TestCase assert Prometheus::Client::Formats::Text.marshal(Yabeda::Prometheus.registry) end + + protected + + def update_job(job, attributes = {}) + ApplicationJob.model.where("args->0->>'job_id' = ?", job.job_id).update_all(attributes) + end + + def stats_count(job_class: ApplicationJob.name, type: nil, where: []) + job_stats = Prometheus::QueStats::JobStats.new + stats = type ? job_stats.public_send(type) : job_stats.call(*where) + record = stats.find { |record| record['job_name'] == job_class } + record['count'] + end end diff --git a/test/models/integration_test.rb b/test/models/integration_test.rb index 09a38ec9..4e5604d0 100644 --- a/test/models/integration_test.rb +++ b/test/models/integration_test.rb @@ -13,4 +13,56 @@ class IntegrationTest < ActiveSupport::TestCase assert_equal [ keycloak ], Integration.tenant_or_model(nil, model) assert_equal [ one ], Integration.tenant_or_model(tenant, nil) end + + test 'enabled is by integration' do + with_integration keycloak: false, rest: true, kubernetes: true do + assert Integration.new.enabled? + refute Integration::Keycloak.new.enabled? + end + end + + test 'rest enabled?' do + integration = Integration::REST.new + integration.endpoint = 'https://rest.example.com/endpoint' + assert integration.enabled? + + integration.endpoint = nil + refute integration.enabled? + + with_integration rest: false do + integration = Integration::REST.new + integration.endpoint = 'https://rest.example.com/endpoint' + refute integration.enabled? + + integration.endpoint = nil + refute integration.enabled? + end + end + + test 'kubernetes enabled?' do + client = K8s::Client.new(nil) + integration = Integration::Kubernetes.new + + K8s::Client.stub(:autoconfig, client) do + assert integration.enabled? + + with_integration kubernetes: false do + refute integration.enabled? + end + end + + K8s::Client.stub(:autoconfig, nil) do + refute integration.enabled? + end + + with_integration kubernetes: false do + refute integration.enabled? + end + end + + protected + + def with_integration(opts = {}, &block) + Rails.application.config.stub(:integrations, opts.with_indifferent_access, &block) + end end diff --git a/test/models/provider_test.rb b/test/models/provider_test.rb new file mode 100644 index 00000000..252fcb9b --- /dev/null +++ b/test/models/provider_test.rb @@ -0,0 +1,7 @@ +require 'test_helper' + +class ProviderTest < ActiveSupport::TestCase + # test "the truth" do + # assert true + # end +end diff --git a/test/services/fetch_service_test.rb b/test/services/fetch_service_test.rb index 4e3f0510..f9ab90ec 100644 --- a/test/services/fetch_service_test.rb +++ b/test/services/fetch_service_test.rb @@ -17,6 +17,27 @@ def setup assert_kind_of Entry, @service.call(models(:application)) end + test 'call with Provider' do + stub_request(:get, "#{tenants(:two).endpoint}/admin/api/provider.json"). + to_return(status: 200, body: '{}', headers: {}) + + assert_kind_of Entry, @service.call(models(:provider)) + end + + test 'call with Client' do + stub_request(:get, "#{tenants(:two).endpoint}/admin/api/applications/find.json?app_id=two"). + to_return(status: 200, body: '{}', headers: {}) + + assert_kind_of Entry, @service.call(models(:client)) + end + + test 'call with Proxy' do + stub_request(:get, "https://two.example.com/admin/api/services/298486374/proxy.json"). + to_return(status: 200, body: '{}', headers: {}) + + assert_kind_of Entry, @service.call(models(:proxy)) + end + test 'call returns entry that can be saved' do @service.call(models(:service)).save! end diff --git a/test/services/incoming_notification_service_test.rb b/test/services/incoming_notification_service_test.rb index 16a4f106..29dac322 100644 --- a/test/services/incoming_notification_service_test.rb +++ b/test/services/incoming_notification_service_test.rb @@ -37,41 +37,37 @@ class LockingTest < ActiveSupport::TestCase teardown do ::Que.clear! + ActiveRecord::Base.connection_pool.disconnect! end def test_process_locked_model notification = notifications(:two) - queue = SizedQueue.new(1) + fiber = Fiber.new do + first = Model.connection_pool.checkout + first.transaction(requires_new: true) do + first.execute('SET SESSION statement_timeout TO 100;') - Model.connection.execute('SET SESSION statement_timeout TO 100;') - - runner = Thread.new do - Model.connection_pool.with_connection do |connection| - connection.transaction do - connection.execute('SET SESSION statement_timeout TO 100;') + second = Model.connection_pool.checkout + second.transaction(requires_new: true) do + second.execute('SET SESSION statement_timeout TO 100;') model = Model.find(notification.model_id) UpdateState.acquire_lock(model) do |state| - queue.push state - queue.push model.touch - queue.push true + model.touch + Fiber.yield state end - end end end - runner.abort_on_exception = true - assert_kind_of UpdateState, queue.pop + assert_kind_of UpdateState, fiber.resume UpdateJob.stub(:perform_later, nil) do assert IncomingNotificationService.call(notification.dup) end - ensure - runner.kill - runner.join + assert_nil fiber.resume end end end diff --git a/test/services/integration/generic_service_test.rb b/test/services/integration/generic_service_test.rb index 0c48c987..5c39eb64 100644 --- a/test/services/integration/generic_service_test.rb +++ b/test/services/integration/generic_service_test.rb @@ -40,7 +40,7 @@ def test_new entry = entries(:client) adapter = MiniTest::Mock.new - adapter.expect(:update_client, true, [ GenericAdapter::Client ]) + adapter.expect(:update_client, true, [RESTAdapter::Client ]) subject.stub(:adapter, adapter) do |service| service.call(entry) @@ -54,7 +54,7 @@ def test_new entry.data = entry.data.except(:enabled) adapter = MiniTest::Mock.new - adapter.expect(:delete_client, true, [ GenericAdapter::Client ]) + adapter.expect(:delete_client, true, [RESTAdapter::Client ]) subject.stub(:adapter, adapter) do |service| service.call(entry) diff --git a/test/services/kubernetes_service_test.rb b/test/services/kubernetes_service_test.rb new file mode 100644 index 00000000..16b0aa5b --- /dev/null +++ b/test/services/kubernetes_service_test.rb @@ -0,0 +1,318 @@ +# frozen_string_literal: true + +require 'test_helper' +require 'base64' + +class Integration::KubernetesServiceTest < ActiveSupport::TestCase + include Base64 + + def before_setup + @_env = ENV.to_hash + super + end + + def after_teardown + ENV.replace(@_env) + super + end + + setup do + ENV['KUBERNETES_NAMESPACE'] = 'zync' + ENV['KUBE_TOKEN'] = strict_encode64('token') + ENV['KUBE_SERVER'] = 'http://localhost' + ENV['KUBE_CA'] = encode64 <<~CERTIFICATE + -----BEGIN CERTIFICATE----- + MIIBZjCCAQ2gAwIBAgIQBHMSmrmlj2QTqgFRa+HP3DAKBggqhkjOPQQDAjASMRAw + DgYDVQQDEwdyb290LWNhMB4XDTE5MDQwNDExMzI1OVoXDTI5MDQwMTExMzI1OVow + EjEQMA4GA1UEAxMHcm9vdC1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABGG2 + NDgiBuXNVWVVxrDNVjPsKm14wg76w4830Zn3K24u03LJthzsB3RPJN9l+kM7ryjg + dCenDYANVabMMQEy2iGjRTBDMA4GA1UdDwEB/wQEAwIBBjASBgNVHRMBAf8ECDAG + AQH/AgEBMB0GA1UdDgQWBBRfJt1t0sAlUMBwfeTWVv2v4XNcNjAKBggqhkjOPQQD + AgNHADBEAiB+MlaTocrG33AiOE8TrH4N2gVrDBo2fAyJ1qDmjxhWvAIgPOoAoWQ9 + qwUVj52L6/Ptj0Tn4Mt6u+bdVr6jEXkZ8f0= + -----END CERTIFICATE----- + CERTIFICATE + + @service = Integration::KubernetesService.new(nil) + end + + attr_reader :service + + test 'create ingress' do + proxy = entries(:proxy) + + stub_request(:get, 'http://localhost/apis/route.openshift.io/v1'). + with(headers: request_headers). + to_return(status: 200, body: { + kind: 'APIResourceList', + apiVersion: 'v1', + groupVersion: 'route.openshift.io/v1', + resources: [ + { name: 'routes', singularName: '', namespaced: true, kind: 'Route', verbs: %w(create delete deletecollection get list patch update watch), categories: ['all'] }, + ] + }.to_json, headers: response_headers) + + stub_request(:get, 'http://localhost/apis/route.openshift.io/v1/namespaces/zync/routes?labelSelector=3scale.net/created-by=zync,3scale.net/tenant_id=298486374,zync.3scale.net/record=Z2lkOi8venluYy9Qcm94eS8yOTg0ODYzNzQ,zync.3scale.net/ingress=proxy,3scale.net/service_id=2'). + with(headers: request_headers). + to_return(status: 200, body: { + kind: 'RouteList', + apiVersion: 'route.openshift.io/v1', + metadata: { selfLink: '/apis/route.openshift.io/v1/namespaces/zync/routes', resourceVersion: '651341' }, + items: [] + }.to_json, headers: response_headers) + + service.call(proxy) + end + + test 'route status missing ingress' do + # stub api resource list requests (kinds 'pods' and 'routes') + stub_request(:get, 'http://localhost/api/v1'). + with(headers: request_headers). + to_return(status: 200, body: { + kind: 'APIResourceList', + apiVersion: 'v1', + groupVersion: 'apps.3scale.net/v1alpha1', + resources: [ + { name: 'pods', singularName: '', namespaced: true, kind: 'pod', verbs: %w(create delete deletecollection get list patch update watch), categories: ['all'] }, + ] + }.to_json, headers: response_headers) + + stub_request(:get, 'http://localhost/apis/route.openshift.io/v1'). + with(headers: request_headers). + to_return(status: 200, body: { + kind: 'APIResourceList', + apiVersion: 'v1', + groupVersion: 'route.openshift.io/v1', + resources: [ + { name: 'routes', singularName: '', namespaced: true, kind: 'Route', verbs: %w(create delete deletecollection get list patch update watch), categories: ['all'] }, + ] + }.to_json, headers: response_headers) + + # stub route owner + ENV['POD_NAME'] = 'zync-que-123' + route_owner = { kind: 'Pod', apiVersion: 'v1', metadata: { name: 'zync-que-123', generateName: 'zync-que-', namespace: 'zync', selfLink: '/api/v1/namespaces/zync/pods/zync-que-123', uid: 'b145c845-7222-44ce-8d9d-f13b8f357de6', resourceVersion: '3620670' } } + + stub_request(:get, "http://localhost/api/v1/namespaces/zync/pods/#{route_owner.dig(:metadata, :name)}"). + with(headers: request_headers). + to_return(status: 200, body: route_owner.to_json, headers: response_headers) + + route_owner_reference = route_owner.slice(:kind, :apiVersion).merge(**route_owner[:metadata].slice(:name, :uid), controller: nil, blockOwnerDeletion: nil) + + # base objects for creating provider routes + entry = entries(:provider) + provider_id = entry.data.fetch('id') + provider = entry.model.record + tenant_id = entry.tenant_id + record_gid = provider.to_gid_param + + provider_route_labels = { + '3scale.net/created-by' => 'zync', + '3scale.net/tenant_id' => tenant_id.to_s, + 'zync.3scale.net/record' => record_gid, + 'zync.3scale.net/ingress' => 'provider', + '3scale.net/provider_id' => provider_id.to_s + } + + provider_route_annotations = { + '3scale.net/gid' => entry.to_gid.to_s, + 'zync.3scale.net/gid' => provider.to_gid.to_s + } + + route_list = { + kind: 'RouteList', + apiVersion: 'route.openshift.io/v1', + metadata: { selfLink: '/apis/route.openshift.io/v1/namespaces/zync/routes', resourceVersion: '651341' }, + items: [] + } + + # stub for creating provider route to system-developer + system_developer_route_labels = provider_route_labels.merge('zync.3scale.net/route-to' => 'system-developer') + system_developer_route_annotations = provider_route_annotations.merge('zync.3scale.net/host' => 'provider.example.com') + system_developer_route = { + kind: 'Route', + apiVersion: 'route.openshift.io/v1', + metadata: { + namespace: 'zync', + name: 'zync-3scale-provider-grvkd', + uid: '3882e5dc-1f8f-460e-a1cc-ee4c5f35a709', + selfLink: '/apis/route.openshift.io/v1/namespaces/zync/routes/zync-3scale-provider-grvkd', + labels: system_developer_route_labels, + annotations: system_developer_route_annotations + }, + status: {} + } + + stub_request(:get, "http://localhost/apis/route.openshift.io/v1/namespaces/zync/routes?labelSelector=3scale.net/created-by=zync,3scale.net/tenant_id=#{tenant_id},zync.3scale.net/record=#{record_gid},zync.3scale.net/route-to=system-developer"). + with(headers: request_headers). + to_return(status: 200, body: route_list.to_json, headers: response_headers) + + stub_request(:post, 'http://localhost/apis/route.openshift.io/v1/namespaces/zync/routes'). + with(headers: request_headers, body: { + metadata: { + generateName: 'zync-3scale-provider-', + namespace: 'zync', + labels: system_developer_route_labels, + ownerReferences: [route_owner_reference], + annotations: system_developer_route_annotations + }, + spec: { + host: 'provider.example.com', + port: { targetPort: 'http' }, + to: { kind: 'Service', name: 'system-developer' }, + tls: { insecureEdgeTerminationPolicy: 'Redirect', termination: 'edge' } + }, + apiVersion: 'route.openshift.io/v1', + kind: 'Route' + }.to_json). + to_return(status: 201, body: system_developer_route.to_json, headers: response_headers) + + route_list[:metadata][:resourceVersion] = '651342' + route_list[:items] = [system_developer_route] + + stub_request(:get, 'http://localhost/apis/route.openshift.io/v1/namespaces/zync/routes'). + with(headers: request_headers). + to_return(status: 200, body: route_list.to_json, headers: response_headers) + + stub_request(:get, "http://localhost/apis/route.openshift.io/v1/namespaces/zync/routes/#{system_developer_route.dig(:metadata, :name)}"). + with(headers: request_headers). + to_return(status: 200, body: system_developer_route.to_json, headers: response_headers) + + # stub for creating provider route to system-provider + system_provider_route_labels = provider_route_labels.merge('zync.3scale.net/route-to' => 'system-provider') + system_provider_route_annotations = provider_route_annotations.merge('zync.3scale.net/host' => 'provider-admin.example.com') + system_provider_route = { + kind: 'Route', + apiVersion: 'route.openshift.io/v1', + metadata: { + namespace: 'zync', + name: 'zync-3scale-provider-rbpqw', + uid: 'f741703c-7ca5-4480-8a32-074fcc759583', + selfLink: '/apis/route.openshift.io/v1/namespaces/zync/routes/zync-3scale-provider-rbpqw', + labels: system_developer_route_labels, + annotations: system_developer_route_annotations + }, + status: {} + } + + stub_request(:get, "http://localhost/apis/route.openshift.io/v1/namespaces/zync/routes?labelSelector=3scale.net/created-by=zync,3scale.net/tenant_id=#{tenant_id},zync.3scale.net/record=#{record_gid},zync.3scale.net/route-to=system-provider"). + with(headers: request_headers). + to_return(status: 200, body: route_list.merge(items: []).to_json, headers: response_headers) + + stub_request(:post, 'http://localhost/apis/route.openshift.io/v1/namespaces/zync/routes'). + with(headers: request_headers, body: { + metadata: { + generateName: 'zync-3scale-provider-', + namespace: 'zync', + labels: system_provider_route_labels, + ownerReferences: [route_owner_reference], + annotations: system_provider_route_annotations + }, + spec: { + host: 'provider-admin.example.com', + port: { targetPort: 'http' }, + to: { kind: 'Service', name: 'system-provider' }, + tls: { insecureEdgeTerminationPolicy: 'Redirect', termination: 'edge' } + }, + apiVersion: 'route.openshift.io/v1', + kind: 'Route' + }.to_json). + to_return(status: 201, body: system_provider_route.to_json, headers: response_headers) + + route_list[:metadata][:resourceVersion] = '651343' + route_list[:items] = [system_developer_route, system_provider_route] + + stub_request(:get, 'http://localhost/apis/route.openshift.io/v1/namespaces/zync/routes'). + with(headers: request_headers). + to_return(status: 200, body: route_list.to_json, headers: response_headers) + + stub_request(:get, "http://localhost/apis/route.openshift.io/v1/namespaces/zync/routes/#{system_provider_route.dig(:metadata, :name)}"). + with(headers: request_headers). + to_return(status: 200, body: system_provider_route.to_json, headers: response_headers) + + # create both routes + assert_raises(Integration::KubernetesService::MissingStatusIngress) do + service.call(entry) + end + end + + test 'owner reference controller' do + replication_controller = { apiVersion: "v1", kind: "ReplicationController", name: "zync-que-1", uid: "252c094e-b23a-4b80-8d82-5ef1499a1771", controller: true, blockOwnerDeletion: true, metadata: { name: 'zync-que-1' } } + pod = K8s::Resource.new(kind: 'Pod', apiVersion: 'v1', metadata: { name: 'zync-que-1', generateName: 'zync-que-', namespace: 'zync', selfLink: '/api/v1/namespaces/zync/pods/zync-que-123', uid: 'b145c845-7222-44ce-8d9d-f13b8f357de6', resourceVersion: '3620670', ownerReferences: [replication_controller.except(:metadata)] }) + + stub_request(:get, 'http://localhost/api/v1'). + with(headers: request_headers). + to_return(status: 200, body: { + kind: 'APIResourceList', + apiVersion: 'v1', + groupVersion: 'apps.3scale.net/v1alpha1', + resources: [ + { name: 'replicationcontrollers', singularName: '', namespaced: true, kind: 'ReplicationController', verbs: %w(get patch update) }, + ] + }.to_json, headers: response_headers) + + stub_request(:get, 'http://localhost/api/v1/namespaces/zync/replicationcontrollers/zync-que-1'). + with(headers: request_headers). + to_return(status: 200, body: K8s::Resource.new(replication_controller).to_json, headers: response_headers) + + owner_root = service.owner_reference_controller(pod) + + assert_equal 'ReplicationController', owner_root.kind + assert_equal 'zync-que-1', owner_root.name + end + + class RouteSpec < ActiveSupport::TestCase + test 'secure routes' do + url = 'https://my-api.example.com' + service_name = 'My API' + port = 7443 + spec = Integration::KubernetesService::RouteSpec.new(url, service_name, port) + json = { + host: "my-api.example.com", + port: {targetPort: 7443}, + to: {kind: "Service", name: "My API"}, + tls: {insecureEdgeTerminationPolicy: "Redirect", termination: "edge"} + } + assert_equal json, spec.to_hash + + url = 'http://my-api.example.com' + service_name = 'My API' + port = 7780 + spec = Integration::KubernetesService::RouteSpec.new(url, service_name, port) + json = { + host: "my-api.example.com", + port: {targetPort: 7780}, + to: {kind: "Service", name: "My API"}, + tls: nil + } + assert_equal json, spec.to_hash + end + + test 'defaults to https when scheme is missing' do + url = 'my-api.example.com' + service_name = 'My API' + port = 7443 + spec = Integration::KubernetesService::RouteSpec.new(url, service_name, port) + json = { + host: "my-api.example.com", + port: {targetPort: 7443}, + to: {kind: "Service", name: "My API"}, + tls: {insecureEdgeTerminationPolicy: "Redirect", termination: "edge"} + } + assert_equal json, spec.to_hash + end + end + + protected + + def request_headers + { + 'Accept' => 'application/json', + 'Authorization' => 'Bearer token', + 'Host' => 'localhost:80' + } + end + + def response_headers + { 'Content-Type' => 'application/json' } + end +end