diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index fd8285c..a48edd2 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -2,5 +2,5 @@ blank_issues_enabled: false contact_links: - name: ❓ Question - url: https://github.com/appuio/appuio-cloud-reporting/discussions + url: https://github.com/appuio/appuio-reporting/discussions about: Ask or discuss with us, we're happy to help 🙋 diff --git a/.gitignore b/.gitignore index 12a4c36..071ebea 100644 --- a/.gitignore +++ b/.gitignore @@ -3,7 +3,7 @@ dist/ .github/release-notes.md # Build -appuio-cloud-reporting +appuio-reporting *.out # Docs diff --git a/Dockerfile b/Dockerfile index d2ee0c5..82cab42 100644 --- a/Dockerfile +++ b/Dockerfile @@ -8,7 +8,7 @@ RUN \ ca-certificates \ tzdata -ENTRYPOINT ["appuio-cloud-reporting"] -COPY appuio-cloud-reporting /usr/bin/ +ENTRYPOINT ["appuio-reporting"] +COPY appuio-reporting /usr/bin/ USER 65536:0 diff --git a/Makefile b/Makefile index 59174c0..6a4ffd2 100644 --- a/Makefile +++ b/Makefile @@ -26,7 +26,7 @@ build: build-bin build-docker ## All-in-one build .PHONY: build-bin build-bin: export CGO_ENABLED = 0 build-bin: fmt vet ## Build binary - @go build -o $(BIN_FILENAME) github.com/appuio/appuio-cloud-reporting + @go build -o $(BIN_FILENAME) github.com/appuio/appuio-reporting .PHONY: build-docker build-docker: build-bin ## Build docker image @@ -37,23 +37,8 @@ ensure-prometheus: .cache/prometheus ## Ensures that Prometheus is installed in .PHONY: test test: export ACR_DB_URL = postgres://user:password@localhost:55432/db?sslmode=disable -test: COMPOSE_FILE = docker-compose-test.yml -test: compose_args = -p reporting-test -test: ensure-prometheus docker-compose-down ping-postgres ## Run full test suite - go run github.com/appuio/appuio-cloud-reporting migrate - go run github.com/appuio/appuio-cloud-reporting migrate --seed +test: ensure-prometheus go test ./... -tags integration -coverprofile cover.out -covermode atomic - @$(COMPOSE_CMD) $(compose_args) down - -.PHONY: gen-golden -gen-golden: export ACR_DB_URL = postgres://user:password@localhost:55432/db?sslmode=disable -gen-golden: COMPOSE_FILE = docker-compose-test.yml -gen-golden: compose_args = -p reporting-test -gen-golden: ensure-prometheus docker-compose-down ping-postgres ## Update golden files - go run github.com/appuio/appuio-cloud-reporting migrate - go run github.com/appuio/appuio-cloud-reporting migrate --seed - go test ./pkg/invoice -update - @$(COMPOSE_CMD) $(compose_args) down .PHONY: fmt fmt: ## Run 'go fmt' and `jsonnetfmt` against code @@ -74,7 +59,7 @@ generate: ## Generate additional code and artifacts @go generate ./... .PHONY: clean -clean: docker-compose-down ## Cleans local build artifacts +clean: rm -rf docs/node_modules $(docs_out_dir) dist .cache .cache/prometheus: diff --git a/Makefile.compose.mk b/Makefile.compose.mk deleted file mode 100644 index eb6d051..0000000 --- a/Makefile.compose.mk +++ /dev/null @@ -1,15 +0,0 @@ -.PHONY: docker-compose-up -docker-compose-up: ## Starts up docker compose services - @$(COMPOSE_CMD) -f $(COMPOSE_FILE) $(compose_args) up --detach - @echo '' - @echo ' To connect to the DB:' - @echo ' $$ psql postgres://reporting:reporting@localhost/appuio-cloud-reporting-test' - @echo '' - -.PHONY: docker-compose-down -docker-compose-down: ## Stops docker compose services - @$(COMPOSE_CMD) -f $(COMPOSE_FILE) $(compose_args) down - -.PHONY: ping-postgres -ping-postgres: docker-compose-up ## Waits until postgres is ready to accept connections - $(COMPOSE_CMD) -f $(COMPOSE_FILE) $(compose_args) exec -T -- postgres sh -c "until pg_isready; do sleep 1s; done; sleep 1s" diff --git a/Makefile.vars.mk b/Makefile.vars.mk index 682c69e..0c7051b 100644 --- a/Makefile.vars.mk +++ b/Makefile.vars.mk @@ -1,7 +1,7 @@ ## These are some common variables for Make PROJECT_ROOT_DIR = . -PROJECT_NAME ?= appuio-cloud-reporting +PROJECT_NAME ?= appuio-reporting PROJECT_OWNER ?= appuio ## BUILD:go @@ -14,11 +14,6 @@ IMG_TAG ?= latest # Image URL to use all building/pushing image targets CONTAINER_IMG ?= local.dev/$(PROJECT_OWNER)/$(PROJECT_NAME):$(IMG_TAG) -## COMPOSE: -COMPOSE_CMD ?= docker-compose -COMPOSE_DB_URL ?= postgres://reporting:reporting@localhost:55432/reporting-db?sslmode=disable -COMPOSE_FILE ?= docker-compose.yml - PROMETHEUS_VERSION ?= 2.40.7 PROMETHEUS_DIST ?= $(shell go env GOOS) PROMETHEUS_ARCH ?= $(shell go env GOARCH) diff --git a/README.md b/README.md index 0c64f46..9547f8f 100644 --- a/README.md +++ b/README.md @@ -1,139 +1,36 @@ -# APPUiO Cloud Reporting +# APPUiO Reporting -[![Build](https://img.shields.io/github/workflow/status/appuio/appuio-cloud-reporting/Test)][build] -![Go version](https://img.shields.io/github/go-mod/go-version/appuio/appuio-cloud-reporting) -[![Version](https://img.shields.io/github/v/release/appuio/appuio-cloud-reporting)][releases] -[![Maintainability](https://img.shields.io/codeclimate/maintainability/appuio/appuio-cloud-reporting)][codeclimate] -[![Coverage](https://img.shields.io/codeclimate/coverage/appuio/appuio-cloud-reporting)][codeclimate] -[![GitHub downloads](https://img.shields.io/github/downloads/appuio/appuio-cloud-reporting/total)][releases] +[![Build](https://img.shields.io/github/workflow/status/appuio/appuio-reporting/Test)][build] +![Go version](https://img.shields.io/github/go-mod/go-version/appuio/appuio-reporting) +[![Version](https://img.shields.io/github/v/release/appuio/appuio-reporting)][releases] +[![Maintainability](https://img.shields.io/codeclimate/maintainability/appuio/appuio-reporting)][codeclimate] +[![Coverage](https://img.shields.io/codeclimate/coverage/appuio/appuio-reporting)][codeclimate] +[![GitHub downloads](https://img.shields.io/github/downloads/appuio/appuio-reporting/total)][releases] -[build]: https://github.com/appuio/appuio-cloud-reporting/actions?query=workflow%3ATest -[releases]: https://github.com/appuio/appuio-cloud-reporting/releases -[codeclimate]: https://codeclimate.com/github/appuio/appuio-cloud-reporting - -## Use APPUiO Global instance - -```sh -# Follow the login instructions to get a token -oc login --server=https://api.cloudscale-lpg-2.appuio.cloud:6443 - -# Forward database and thanos to local host -kubectl -n appuio-reporting port-forward svc/reporting-db 5432 & -kubectl --as=cluster-admin -n appuio-thanos port-forward svc/thanos-query 9090 & - -# Check for pending migrations -DB_USER=$(kubectl -n appuio-reporting get secret/reporting-db-superuser -o jsonpath='{.data.user}' | base64 --decode) -DB_PASSWORD=$(kubectl -n appuio-reporting get secret/reporting-db-superuser -o jsonpath='{.data.password}' | base64 --decode) -export ACR_DB_URL="postgres://${DB_USER}:${DB_PASSWORD}@localhost/reporting?sslmode=disable" -go run . migrate --show-pending - -# Run a query -go run . report --query-name ping --begin "2022-01-17T09:00:00Z" - -# Connect to the database's interactive terminal -DB_USER=$(kubectl -n appuio-reporting get secret/reporting-db-superuser -o jsonpath='{.data.user}' | base64 --decode) -export PGPASSWORD=$(kubectl -n appuio-reporting get secret/reporting-db-superuser -o jsonpath='{.data.password}' | base64 --decode) -psql -U "${DB_USER}" -w -h localhost reporting -``` - -## Local Installation - -```sh -SUPERUSER_PW=$(pwgen 40 1) - -kubectl create ns appuio-reporting -kubectl -n appuio-reporting create secret generic reporting-db-superuser --from-literal=user=reporting-db-superuser "--from-literal=password=${SUPERUSER_PW}" -kubectl -n appuio-reporting apply -k manifests/base -``` - -### Grafana - -There is a Grafana deployment prepared under `manifests/grafana`. -To be able to use the deployment, customize the parameters in `grafana-helm-values.yaml` and run `make` to generate the manifest. - -Add the required Grafana Helm chart using `helm repo add grafana https://grafana.github.io/helm-charts`. - -The deployment requires a secret `grafana-creds` containing the admin username and password: - -```sh -oc -n appuio-reporting create secret generic grafana-creds --from-literal=admin-password=$(pwgen 40 1) --from-literal=admin-user=admin -``` +[build]: https://github.com/appuio/appuio-reporting/actions?query=workflow%3ATest +[releases]: https://github.com/appuio/appuio-reporting/releases +[codeclimate]: https://codeclimate.com/github/appuio/appuio-reporting ## Usage ### Run Report ```sh -kubectl -n appuio-reporting port-forward svc/reporting-db 5432 & -kubectl --as=cluster-admin -n appuio-thanos port-forward svc/thanos-query 9090 & - -DB_USER=$(kubectl -n appuio-reporting get secret/reporting-db-superuser -o jsonpath='{.data.user}' | base64 --decode) -DB_PASSWORD=$(kubectl -n appuio-reporting get secret/reporting-db-superuser -o jsonpath='{.data.password}' | base64 --decode) -export ACR_DB_URL="postgres://${DB_USER}:${DB_PASSWORD}@localhost/reporting?sslmode=disable" - -go run . report --query-name ping --begin "2022-01-17T09:00:00Z" -``` - -### Migrate to Most Recent Schema - -```sh -kubectl -n appuio-reporting port-forward svc/reporting-db 5432 & - -DB_USER=$(kubectl -n appuio-reporting get secret/reporting-db-superuser -o jsonpath='{.data.user}' | base64 --decode) -DB_PASSWORD=$(kubectl -n appuio-reporting get secret/reporting-db-superuser -o jsonpath='{.data.password}' | base64 --decode) -export ACR_DB_URL="postgres://${DB_USER}:${DB_PASSWORD}@localhost/reporting?sslmode=disable" - -go run . migrate --show-pending - -go run . migrate -``` - -### Connect to the Database - -```sh -kubectl -n appuio-reporting port-forward svc/reporting-db 5432 & - -DB_USER=$(kubectl -n appuio-reporting get secret/reporting-db-superuser -o jsonpath='{.data.user}' | base64 --decode) -export PGPASSWORD=$(kubectl -n appuio-reporting get secret/reporting-db-superuser -o jsonpath='{.data.password}' | base64 --decode) - -psql -U "${DB_USER}" -w -h localhost reporting -``` - -## Local Development - -Local development assumes a locally installed PostgreSQL database. -This can be achieved by running `make docker-compose-up`. -See `docker-compose.yml` for the configuration. - -```sh -# Needs to be repeated after a Docker restart -make docker-compose-up - -# Next command asks for a password, it is "reporting" -createdb --username=reporting -h localhost -p 5432 appuio-cloud-reporting-test - -export ACR_DB_URL="postgres://reporting:reporting@localhost/appuio-cloud-reporting-test?sslmode=disable" - -# Required for tests -make ensure-prometheus - -go run . migrate -go run . migrate --seed -go test ./... - -# To connect to the DB: -psql -U reporting -W -h localhost appuio-cloud-reporting-test -``` +# Follow the login instructions to get a token +oc login --server=https://api.cloudscale-lpg-2.appuio.cloud:6443 -### IDE Integration +# Forward mimir to local host +kubectl --as cluster-admin -nvshn-appuio-mimir service/vshn-appuio-mimir-query-frontend 8080 -To enable IDE Test/Debug support, `ACR_DB_URL` should be added to the test environment. +# Set environment +export ACR_PROM_URL="http://localhost:8080/prometheus" +export ACR_ORG_ID="appuio-managed-openshift-billing" # mimir organization in which data is stored +export ACR_ODOO_URL=https://test.central.vshn.ch/api/v2/product_usage_report_POST +export ACR_ODOO_OAUTH_TOKEN_URL="https://test.central.vshn.ch/api/v2/authentication/oauth2/token" +export ACR_ODOO_OAUTH_CLIENT_ID="your_client_id" # see https://docs.central.vshn.ch/rest-api.html#_authentication_and_authorization +export ACR_ODOO_OAUTH_CLIENT_SECRET="your_client_secret" -#### VS Code +# Run a query +go run . report --query 'sum by (label) (metric)' --begin "2023-07-08T13:00:00Z" --product-id "your-odoo-product-id" --instance-jsonnet 'local labels = std.extVar("labels"); "instance-%(label)s" % labels' --unit-id "your_odoo_unit_id" --timerange 1h --item-description-jsonnet '"This is a description."' --item-group-description-jsonnet 'local labels = std.extVar("labels"); "Instance %(label)s" % labels' -```sh -mkdir -p .vscode -touch .vscode/settings.json -jq -s '(.[0] // {}) | ."go.testEnvVars"."ACR_DB_URL" = $ENV."ACR_DB_URL"' .vscode/settings.json > .vscode/settings.json.i -mv .vscode/settings.json.i .vscode/settings.json ``` diff --git a/check_command.go b/check_command.go deleted file mode 100644 index 5289c87..0000000 --- a/check_command.go +++ /dev/null @@ -1,87 +0,0 @@ -package main - -import ( - "database/sql" - "fmt" - "os" - "text/tabwriter" - - "github.com/urfave/cli/v2" - - "github.com/appuio/appuio-cloud-reporting/pkg/check" - "github.com/appuio/appuio-cloud-reporting/pkg/db" -) - -type checkMissingCommand struct { - DatabaseURL string -} - -var checkMissingCommandName = "check_missing" - -func newCheckMissingCommand() *cli.Command { - command := &checkMissingCommand{} - return &cli.Command{ - Name: checkMissingCommandName, - Usage: "Check for missing data in the database", - Before: command.before, - Action: command.execute, - Flags: []cli.Flag{ - newDbURLFlag(&command.DatabaseURL), - }, - } -} - -func (cmd *checkMissingCommand) before(context *cli.Context) error { - return LogMetadata(context) -} - -func (cmd *checkMissingCommand) execute(cliCtx *cli.Context) error { - ctx := cliCtx.Context - log := AppLogger(ctx).WithName(migrateCommandName) - - log.V(1).Info("Opening database connection", "url", cmd.DatabaseURL) - rdb, err := db.Openx(cmd.DatabaseURL) - if err != nil { - return fmt.Errorf("could not open database connection: %w", err) - } - - log.V(1).Info("Begin transaction") - tx, err := rdb.BeginTxx(ctx, &sql.TxOptions{ReadOnly: true}) - if err != nil { - return err - } - defer tx.Rollback() - - missing, err := check.Missing(ctx, tx) - if err != nil { - return err - } - inconsistent, err := check.Inconsistent(ctx, tx) - if err != nil { - return err - } - - if len(missing) != 0 { - w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) - defer w.Flush() - fmt.Fprint(w, "Table\tMissing Field\tID\tSource\n") - for _, m := range missing { - fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", m.Table, m.MissingField, m.ID, m.Source) - } - } - - if len(inconsistent) != 0 { - w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) - defer w.Flush() - fmt.Fprint(w, "Table\tDimension ID\tFact Time\tDimension Time Range\n") - for _, i := range inconsistent { - fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", i.Table, i.DimensionID, i.FactTime, i.DimensionRange) - } - } - - if len(missing) == 0 && len(inconsistent) == 0 { - return cli.Exit("No missing or inconsistent data found.", 0) - } - - return cli.Exit(fmt.Sprintf("%d missing, %d inconsistent entries found.", len(missing), len(inconsistent)), 1) -} diff --git a/common_flags.go b/common_flags.go index ddcee37..99eb0b6 100644 --- a/common_flags.go +++ b/common_flags.go @@ -1,26 +1,18 @@ package main import ( - "github.com/appuio/appuio-cloud-reporting/pkg/db" "github.com/urfave/cli/v2" ) -const defaultTestForRequiredFlags = "" - -func newDbURLFlag(destination *string) *cli.StringFlag { - return &cli.StringFlag{Name: "db-url", Usage: "Database connection URL in the form of postgres://user@host:port/db-name?option=value", - EnvVars: envVars("DB_URL"), Destination: destination, Required: true, DefaultText: defaultTestForRequiredFlags} -} +const defaultTextForRequiredFlags = "" +const defaultTextForOptionalFlags = "" func newPromURLFlag(destination *string) *cli.StringFlag { return &cli.StringFlag{Name: "prom-url", Usage: "Prometheus connection URL in the form of http://host:port", EnvVars: envVars("PROM_URL"), Destination: destination, Value: "http://localhost:9090"} } -func queryNames(queries []db.Query) []string { - names := make([]string, len(queries)) - for i := range queries { - names[i] = queries[i].Name - } - return names +func newOdooURLFlag(destination *string) *cli.StringFlag { + return &cli.StringFlag{Name: "odoo-url", Usage: "URL of the Odoo Metered Billing API", + EnvVars: envVars("ODOO_URL"), Destination: destination, Value: "http://localhost:8080"} } diff --git a/docker-compose-test.yml b/docker-compose-test.yml deleted file mode 100644 index 740832b..0000000 --- a/docker-compose-test.yml +++ /dev/null @@ -1,11 +0,0 @@ -# This file is used for unit tests -version: "3.8" -services: - postgres: - image: docker.io/library/postgres:15-bullseye - environment: - POSTGRES_DB: db - POSTGRES_PASSWORD: password - POSTGRES_USER: user - ports: - - "55432:5432" diff --git a/docker-compose.yml b/docker-compose.yml deleted file mode 100644 index 94da861..0000000 --- a/docker-compose.yml +++ /dev/null @@ -1,15 +0,0 @@ -# This file can be used for local tinkering -version: "3.8" -services: - postgres: - image: docker.io/library/postgres:15-bullseye - environment: - POSTGRES_DB: reporting-db - POSTGRES_PASSWORD: reporting - POSTGRES_USER: reporting - ports: - - "5432:5432" - volumes: - - "postgres:/var/lib/postgresql/data" -volumes: - postgres: {} diff --git a/go.mod b/go.mod index 7315656..bcfde76 100644 --- a/go.mod +++ b/go.mod @@ -1,4 +1,4 @@ -module github.com/appuio/appuio-cloud-reporting +module github.com/appuio/appuio-reporting go 1.19 @@ -6,28 +6,20 @@ require ( github.com/go-logr/logr v1.2.3 github.com/go-logr/zapr v1.2.3 github.com/google/go-jsonnet v0.19.1 - github.com/google/uuid v1.3.0 - github.com/jackc/pgconn v1.14.0 - github.com/jackc/pgtype v1.14.0 - github.com/jackc/pgx/v4 v4.18.1 - github.com/jmoiron/sqlx v1.3.5 - github.com/lopezator/migrator v0.3.1 github.com/prometheus/client_golang v1.14.0 github.com/prometheus/common v0.40.0 github.com/stretchr/testify v1.8.2 github.com/urfave/cli/v2 v2.24.4 + go.uber.org/multierr v1.6.0 go.uber.org/zap v1.24.0 + golang.org/x/oauth2 v0.14.0 ) require ( github.com/benbjohnson/clock v1.1.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/jackc/chunkreader/v2 v2.0.1 // indirect - github.com/jackc/pgio v1.0.0 // indirect - github.com/jackc/pgpassfile v1.0.0 // indirect - github.com/jackc/pgproto3/v2 v2.3.2 // indirect - github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect + github.com/golang/protobuf v1.5.3 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kr/pretty v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect @@ -36,9 +28,9 @@ require ( github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect go.uber.org/atomic v1.7.0 // indirect - go.uber.org/multierr v1.6.0 // indirect - golang.org/x/crypto v0.6.0 // indirect - golang.org/x/text v0.7.0 // indirect + golang.org/x/net v0.18.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/protobuf v1.31.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect sigs.k8s.io/yaml v1.1.0 // indirect diff --git a/go.sum b/go.sum index 733e43f..24b9319 100644 --- a/go.sum +++ b/go.sum @@ -1,118 +1,38 @@ -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= -github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= -github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= -github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= -github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= -github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= -github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-jsonnet v0.19.1 h1:MORxkrG0elylUqh36R4AcSPX0oZQa9hvI3lroN+kDhs= github.com/google/go-jsonnet v0.19.1/go.mod h1:5JVT33JVCoehdTj5Z2KJq1eIdt3Nb8PCmZ+W5D8U350= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= -github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= -github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= -github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= -github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= -github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= -github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= -github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= -github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= -github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= -github.com/jackc/pgconn v1.14.0 h1:vrbA9Ud87g6JdFWkHTJXppVce58qPIdP7N8y0Ml/A7Q= -github.com/jackc/pgconn v1.14.0/go.mod h1:9mBNlny0UvkgJdCDvdVHYSjI+8tD2rnKK69Wz8ti++E= -github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= -github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= -github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= -github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c= -github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc= -github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= -github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= -github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= -github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= -github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= -github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= -github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= -github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= -github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.3.2 h1:7eY55bdBeCz1F2fTzSz69QC+pG46jYq9/jtSPiJ5nn0= -github.com/jackc/pgproto3/v2 v2.3.2/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= -github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= -github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= -github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= -github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= -github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= -github.com/jackc/pgtype v1.14.0 h1:y+xUdabmyMkJLyApYuPj38mW+aAIqCe5uuBB51rH3Vw= -github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= -github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= -github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= -github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= -github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= -github.com/jackc/pgx/v4 v4.18.1 h1:YP7G1KABtKpB5IHrO9vYwSrCOhs7p3uqhvhhQBptya0= -github.com/jackc/pgx/v4 v4.18.1/go.mod h1:FydWkUyadDmdNH/mHnGob881GawxeEm7TcMCzkb+qQE= -github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g= -github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.10.2 h1:AqzbZs4ZoCBp+GtejcpCpcxM3zlSMx29dXbUSeVtJb8= -github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lopezator/migrator v0.3.1 h1:ZFPT6aC7+nGWkqhleynABZ6ftycSf6hmHHLOaryq1Og= -github.com/lopezator/migrator v0.3.1/go.mod h1:X+lHDMZ9Ci3/KdbypJcQYFFwipVrJsX4fRCQ4QLauYk= -github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= -github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-sqlite3 v1.14.6 h1:dNPt6NO46WmLVt2DLNpwczCmdV5boIZ6g/tlDrlRUbg= -github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= @@ -131,141 +51,66 @@ github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvq github.com/prometheus/common v0.40.0 h1:Afz7EVRqGg2Mqqf4JuF9vdvp1pi220m55Pi9T2JnO4Q= github.com/prometheus/common v0.40.0/go.mod h1:L65ZJPSmfn/UBWLQIHV7dBrKFidB/wPlF1y5TlSt9OE= github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= -github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= -github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= -github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= -github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/urfave/cli/v2 v2.24.4 h1:0gyJJEBYtCV87zI/x2nZCPyDxD51K6xM8SkwjHFCNEU= github.com/urfave/cli/v2 v2.24.4/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= -golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.6.0 h1:L4ZwwTvKW9gr0ZMS1yrHD9GZhIuVjOBBnaKH+SPQK0Q= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/oauth2 v0.3.0 h1:6l90koy8/LaBLmLu8jpHeHexzMwEita0zFfYlggy2F8= +golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg= +golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= +golang.org/x/oauth2 v0.14.0 h1:P0Vrf/2538nmC0H+pEQ3MNFRRnVR7RlqyVw+bvm26z0= +golang.org/x/oauth2 v0.14.0/go.mod h1:lAtNWgaWfL4cm7j2OV8TxGi9Qb7ECORx8DktCY74OwM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -276,6 +121,5 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/invoice_command.go b/invoice_command.go deleted file mode 100644 index 079d9f9..0000000 --- a/invoice_command.go +++ /dev/null @@ -1,78 +0,0 @@ -package main - -import ( - "database/sql" - "encoding/json" - "fmt" - "os" - "time" - - "github.com/urfave/cli/v2" - - "github.com/appuio/appuio-cloud-reporting/pkg/db" - "github.com/appuio/appuio-cloud-reporting/pkg/invoice" -) - -type invoiceCommand struct { - DatabaseURL string - Year int - Month time.Month -} - -var invoiceCommandName = "invoice" - -func newInvoiceCommand() *cli.Command { - command := &invoiceCommand{} - return &cli.Command{ - Name: invoiceCommandName, - Usage: "Run a invoice for a query in the given period", - Before: command.before, - Action: command.execute, - Flags: []cli.Flag{ - newDbURLFlag(&command.DatabaseURL), - - &cli.IntFlag{Name: "year", Usage: "Year to generate the report for.", - EnvVars: envVars("YEAR"), Destination: &command.Year, Required: true}, - &cli.IntFlag{Name: "month", Usage: "Month to generate the report for.", - EnvVars: envVars("MONTH"), Destination: (*int)(&command.Month), Required: true}, - }, - } -} - -func (cmd *invoiceCommand) before(context *cli.Context) error { - if cmd.Month < 1 || cmd.Month > 12 { - return fmt.Errorf("unknown month %q", cmd.Month) - } - return LogMetadata(context) -} - -func (cmd *invoiceCommand) execute(cliCtx *cli.Context) error { - ctx := cliCtx.Context - log := AppLogger(ctx).WithName(invoiceCommandName) - - log.V(1).Info("Opening database connection", "url", cmd.DatabaseURL) - rdb, err := db.Openx(cmd.DatabaseURL) - if err != nil { - return fmt.Errorf("could not open database connection: %w", err) - } - defer rdb.Close() - - log.V(1).Info("Begin transaction") - tx, err := rdb.BeginTxx(ctx, &sql.TxOptions{ReadOnly: true}) - if err != nil { - return err - } - defer tx.Rollback() - - invoices, err := invoice.Generate(ctx, tx, cmd.Year, cmd.Month) - if err != nil { - return err - } - - enc := json.NewEncoder(os.Stdout) - enc.SetEscapeHTML(false) - enc.SetIndent("", "\t") - enc.Encode(invoices) - return enc.Encode(invoices) - -} diff --git a/main.go b/main.go index bc0a656..d01897e 100644 --- a/main.go +++ b/main.go @@ -20,7 +20,7 @@ var ( commit = "-dirty-" date = time.Now().Format("2006-01-02") - appName = "appuio-cloud-reporting" + appName = "appuio-reporting" appLongName = "Reporting for APPUiO Cloud" // envPrefix is the global prefix to use for the keys in environment variables @@ -65,11 +65,7 @@ func newApp() (context.Context, context.CancelFunc, *cli.App) { }, }, Commands: []*cli.Command{ - newMigrateCommand(), newReportCommand(), - newCheckMissingCommand(), - newInvoiceCommand(), - newTmapCommand(), }, ExitErrHandler: func(context *cli.Context, err error) { if err == nil { diff --git a/manifests/base/database-sts.yaml b/manifests/base/database-sts.yaml deleted file mode 100644 index b39f487..0000000 --- a/manifests/base/database-sts.yaml +++ /dev/null @@ -1,73 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: reporting-db-headless -spec: - selector: - app: reporting-db - clusterIP: None ---- -apiVersion: v1 -kind: Service -metadata: - name: reporting-db -spec: - selector: - app: reporting-db - ports: - - port: 5432 ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: reporting-db - labels: - app: reporting-db -spec: - selector: - matchLabels: - app: reporting-db - serviceName: reporting-db-headless - replicas: 1 - template: - metadata: - labels: - app: reporting-db - spec: - containers: - - name: postgresql - image: docker.io/library/postgres:13-bullseye - env: - - name: POSTGRES_DB - value: reporting - - name: POSTGRES_USER - valueFrom: - secretKeyRef: - name: reporting-db-superuser - key: user - - name: POSTGRES_PASSWORD - valueFrom: - secretKeyRef: - name: reporting-db-superuser - key: password - ports: - - containerPort: 5432 - volumeMounts: - - name: data - subPath: data - mountPath: /var/lib/postgresql/data - resources: - requests: - cpu: 500m - memory: 256Mi - limits: - cpu: 1500m - memory: 1Gi - volumeClaimTemplates: - - metadata: - name: data - spec: - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: 5Gi diff --git a/manifests/base/kustomization.yaml b/manifests/base/kustomization.yaml deleted file mode 100644 index 738eb49..0000000 --- a/manifests/base/kustomization.yaml +++ /dev/null @@ -1,2 +0,0 @@ -resources: - - database-sts.yaml diff --git a/manifests/grafana/Makefile b/manifests/grafana/Makefile deleted file mode 100644 index 1fd00d1..0000000 --- a/manifests/grafana/Makefile +++ /dev/null @@ -1,10 +0,0 @@ -.PHONY: build -build: - helm template graphs --namespace=appuio-reporting grafana/grafana \ - -f grafana-helm-values.yaml \ - > grafana.yaml - -.PHONY: update -update: - helm repo update - helm search repo grafana diff --git a/manifests/grafana/grafana-helm-values.yaml b/manifests/grafana/grafana-helm-values.yaml deleted file mode 100644 index fd4d3fd..0000000 --- a/manifests/grafana/grafana-helm-values.yaml +++ /dev/null @@ -1,31 +0,0 @@ -env: - GF_SERVER_DOMAIN: reporting-grafana.appuio.cloud - GF_SERVER_ROOT_URL: https://reporting-grafana.appuio.cloud -ingress: - enabled: true - hosts: - - reporting-grafana.appuio.cloud - tls: - - hosts: - - reporting-grafana.appuio.cloud - secretName: grafana-ingress-cert - annotations: - cert-manager.io/cluster-issuer: letsencrypt-production - ingress.kubernetes.io/ssl-redirect: "true" -persistence: - enabled: true - size: 5Gi - storageClassName: ssd -initChownData: - enabled: false -rbac: - namespaced: true - pspEnabled: false -admin: - existingSecret: grafana-creds -podAnnotations: - k8up.syn.tools/backupcommand: tar c -f - /var/lib/grafana - k8up.syn.tools/fileextension: grafana-data.tar -deploymentStrategy: - type: Recreate -securityContext: null diff --git a/manifests/ocp/kustomization.yaml b/manifests/ocp/kustomization.yaml deleted file mode 100644 index c27aed1..0000000 --- a/manifests/ocp/kustomization.yaml +++ /dev/null @@ -1,5 +0,0 @@ -bases: - - "../base" - -patchesStrategicMerge: -- ocp-patch.yaml diff --git a/manifests/ocp/ocp-patch.yaml b/manifests/ocp/ocp-patch.yaml deleted file mode 100644 index 452fd9a..0000000 --- a/manifests/ocp/ocp-patch.yaml +++ /dev/null @@ -1,28 +0,0 @@ -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: reporting-db -spec: - template: - spec: - containers: - - name: postgresql - image: centos/postgresql-13-centos7:latest - env: - - $patch: replace - - name: POSTGRESQL_DATABASE - value: reporting - - name: POSTGRESQL_USER - valueFrom: - secretKeyRef: - name: reporting-db-superuser - key: user - - name: POSTGRESQL_PASSWORD - valueFrom: - secretKeyRef: - name: reporting-db-superuser - key: password - volumeMounts: - - name: data - subPath: data - mountPath: /var/lib/pgsql/data diff --git a/migrate_command.go b/migrate_command.go deleted file mode 100644 index 56414d0..0000000 --- a/migrate_command.go +++ /dev/null @@ -1,80 +0,0 @@ -package main - -import ( - "fmt" - - "github.com/appuio/appuio-cloud-reporting/pkg/db" - "github.com/urfave/cli/v2" -) - -type migrateCommand struct { - ShowPending bool - DatabaseURL string - SeedEnabled bool -} - -var migrateCommandName = "migrate" - -func newMigrateCommand() *cli.Command { - command := &migrateCommand{} - return &cli.Command{ - Name: migrateCommandName, - Usage: "Perform database migrations", - Before: command.before, - Action: command.execute, - Flags: []cli.Flag{ - &cli.BoolFlag{Name: "show-pending", Usage: "Shows pending migrations and exits", EnvVars: envVars("SHOW_PENDING"), Destination: &command.ShowPending}, - &cli.BoolFlag{Name: "seed", Usage: "Seeds database with initial data and exits", EnvVars: envVars("SEED"), Destination: &command.SeedEnabled}, - newDbURLFlag(&command.DatabaseURL), - }, - } -} - -func (cmd *migrateCommand) before(ctx *cli.Context) error { - return LogMetadata(ctx) -} - -func (cmd *migrateCommand) execute(context *cli.Context) error { - log := AppLogger(context.Context).WithName(migrateCommandName) - log.V(1).Info("Opening database connection", "url", cmd.DatabaseURL) - rdb, err := db.Open(cmd.DatabaseURL) - if err != nil { - return fmt.Errorf("could not open database connection: %w", err) - } - - if cmd.SeedEnabled { - log.V(1).Info("Seeding DB...") - err := db.Seed(rdb) - if err != nil { - return fmt.Errorf("error seeding database: %w", err) - } - log.Info("Done seeding") - return nil - } - - if cmd.ShowPending { - log.V(1).Info("Showing pending DB migrations") - pm, err := db.Pending(rdb) - if err != nil { - return fmt.Errorf("error showing pending migrations: %w", err) - } - - for _, p := range pm { - fmt.Println(p.Name) - } - - // non-zero exit code could be used in scripts - if len(pm) > 0 { - cli.Exit("Pending migrations found.", 1) - } - return nil - } - - log.V(1).Info("Start DB migrations") - err = db.Migrate(rdb) - if err != nil { - return fmt.Errorf("could not migrate database: %w", err) - } - - return nil -} diff --git a/pkg/categories/category.go b/pkg/categories/category.go deleted file mode 100644 index 8c23430..0000000 --- a/pkg/categories/category.go +++ /dev/null @@ -1,66 +0,0 @@ -package categories - -import ( - "context" - "database/sql" - "fmt" - - "github.com/appuio/appuio-cloud-reporting/pkg/db" - "github.com/appuio/appuio-cloud-reporting/pkg/erp" - "github.com/appuio/appuio-cloud-reporting/pkg/erp/entity" - "github.com/go-logr/logr" - "github.com/jmoiron/sqlx" -) - -// Reconcile synchronizes all stored db.Category with a 3rd party ERP. -// Note: A logger is retrieved from logr.FromContextOrDiscard. -func Reconcile(ctx context.Context, database *sqlx.DB, reconciler erp.CategoryReconciler) error { - logger := logr.FromContextOrDiscard(ctx).WithName("category") - logger.Info("Reconciling categories") - - categories, err := fetchCategories(ctx, database, logger) - if err != nil { - return err - } - - for _, cat := range categories { - // We need to reconcile categories in the ERP regardless if Target has been set. - // These categories in the ERP may have been updated by a 3rd party without the reporting knowing of it. - // So the reporting being authoritative over categories in the ERP, it should be given chance to reset any changes that deviate from the desired defaults. - // If we only ever create categories, the categories in the ERP won't ever be touched again. - logger.V(2).Info("Reconciling category with ERP...", "source", cat.Source) - input := entity.Category{Source: cat.Source, Target: cat.Target.String} - output, err := reconciler.Reconcile(ctx, input) - if err != nil { - return fmt.Errorf("error from erp category reconciler: %w", err) - } - if output == input { - // No target update needed - logger.Info("Category is up-to-date", "category", output) - continue - } - err = db.RunInTransaction(ctx, database, func(tx *sqlx.Tx) error { - logger.V(2).Info("Updating category...", "id", cat.Id, "source", cat.Source) - cat.Target = sql.NullString{String: output.Target, Valid: output.Target != ""} - _, err = tx.NamedExecContext(ctx, "UPDATE categories SET target = :target WHERE id = :id", cat) - if err != nil { - return err - } - logger.Info("Updated category", "source", cat.Source, "target", cat.Target.String) - return nil - }) - } - logger.Info("Done reconciling categories") - return nil -} - -func fetchCategories(ctx context.Context, database *sqlx.DB, logger logr.Logger) ([]db.Category, error) { - var categories []db.Category - logger.V(2).Info("Retrieving all categories...") - err := database.SelectContext(ctx, &categories, "SELECT * FROM categories") - if err != nil { - return nil, err - } - logger.V(1).Info("Retrieved all categories", "count", len(categories)) - return categories, err -} diff --git a/pkg/categories/category_test.go b/pkg/categories/category_test.go deleted file mode 100644 index 58cc80d..0000000 --- a/pkg/categories/category_test.go +++ /dev/null @@ -1,70 +0,0 @@ -package categories - -import ( - "context" - "database/sql" - "testing" - - "github.com/appuio/appuio-cloud-reporting/pkg/db" - "github.com/appuio/appuio-cloud-reporting/pkg/db/dbtest" - "github.com/appuio/appuio-cloud-reporting/pkg/erp/entity" - "github.com/stretchr/testify/suite" -) - -type CategoriesSuite struct { - dbtest.Suite -} - -func (s *CategoriesSuite) TestReconcile() { - - s.Run("GivenCategoryWithEmptyTarget_ThenExpectUpdateAfterReconciler", func() { - - cat := db.Category{Source: "us-rac-2:disposal-plant-p-12a-furnace-control"} - - s.Require().NoError( - db.GetNamed(s.DB(), &cat, "INSERT INTO categories (source,target) VALUES (:source,:target) RETURNING *", cat), - ) - - err := Reconcile(context.Background(), s.DB(), &stubReconciler{returnArg: entity.Category{Source: cat.Source, Target: "12"}, returnErr: nil}) - s.Require().NoError(err) - - s.Require().NoError( - db.GetNamed(s.DB(), &cat, "SELECT * FROM categories WHERE source=:source", cat), - ) - s.Equal("us-rac-2:disposal-plant-p-12a-furnace-control", cat.Source) // Verify unchanged - s.Equal("12", cat.Target.String) // Verify updated - s.True(cat.Target.Valid) - }) - - s.Run("GivenCategoryWithSetTarget_ThenDoNothing", func() { - cat := db.Category{Source: "us-rac-2:nest-elevator-control", Target: sql.NullString{String: "12", Valid: true}} - - s.Require().NoError( - db.GetNamed(s.DB(), &cat, "INSERT INTO categories (source,target) VALUES (:source,:target) RETURNING *", cat), - ) - - err := Reconcile(context.Background(), s.DB(), &stubReconciler{returnArg: entity.Category{Source: cat.Source, Target: cat.Target.String}, returnErr: nil}) - s.Require().NoError(err) - - s.Require().NoError( - db.GetNamed(s.DB(), &cat, "SELECT * FROM categories WHERE source=:source", cat), - ) - - s.Equal("us-rac-2:nest-elevator-control", cat.Source) // Verify unchanged - s.Equal("12", cat.Target.String) // Verify unchanged - s.True(cat.Target.Valid) - }) -} - -func TestCategories(t *testing.T) { - suite.Run(t, new(CategoriesSuite)) -} - -type stubReconciler struct { - returnErr error - returnArg entity.Category -} - -func (s *stubReconciler) Reconcile(_ context.Context, _ entity.Category) (entity.Category, error) { - return s.returnArg, s.returnErr -} diff --git a/pkg/check/inconsistent.go b/pkg/check/inconsistent.go deleted file mode 100644 index 1a98a56..0000000 --- a/pkg/check/inconsistent.go +++ /dev/null @@ -1,52 +0,0 @@ -package check - -import ( - "context" - "fmt" - "strings" - - "github.com/jmoiron/sqlx" - "go.uber.org/multierr" - - "github.com/appuio/appuio-cloud-reporting/pkg/db" -) - -var dimensionsWithTimeranges = []db.Model{ - db.Discount{}, - db.Product{}, - db.Query{}, - db.Tenant{}, -} - -const inconsistentFactsQuery = ` -select distinct '{{table}}' as "table", {{table}}.id as DimensionID, date_times.timestamp::text as FactTime, {{table}}.during::text as DimensionRange from facts - inner join {{table}} on facts.{{foreign_key}} = {{table}}.id - inner join date_times on facts.date_time_id = date_times.id - where false = {{table}}.during @> date_times.timestamp -` - -// InconsistentField represents an inconsistent field. -type InconsistentField struct { - Table string - - DimensionID string - - FactTime string - DimensionRange string -} - -// Inconsistent checks for facts with inconsistent time ranges. -// Those are facts that reference a dimension with a time range that does not include the fact's timestamp. -func Inconsistent(ctx context.Context, tx sqlx.QueryerContext) ([]InconsistentField, error) { - var inconsistent []InconsistentField - var errors []error - for _, m := range dimensionsWithTimeranges { - var ic []InconsistentField - q := strings.NewReplacer("{{table}}", m.TableName(), "{{foreign_key}}", m.ForeignKeyName()).Replace(inconsistentFactsQuery) - err := sqlx.SelectContext(ctx, tx, &ic, fmt.Sprintf(`WITH inconsistent AS (%s) SELECT * FROM inconsistent ORDER BY "table",FactTime`, q)) - errors = append(errors, err) - inconsistent = append(inconsistent, ic...) - } - - return inconsistent, multierr.Combine(errors...) -} diff --git a/pkg/check/inconsistent_test.go b/pkg/check/inconsistent_test.go deleted file mode 100644 index b8c7198..0000000 --- a/pkg/check/inconsistent_test.go +++ /dev/null @@ -1,138 +0,0 @@ -package check_test - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/jackc/pgtype" - "github.com/jmoiron/sqlx" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - "github.com/appuio/appuio-cloud-reporting/pkg/check" - "github.com/appuio/appuio-cloud-reporting/pkg/db" - "github.com/appuio/appuio-cloud-reporting/pkg/db/dbtest" -) - -type InconsistentTestSuite struct { - dbtest.Suite -} - -func (s *InconsistentTestSuite) TestInconsistentFields() { - t := s.T() - tx := s.Begin() - defer tx.Rollback() - require.NoError(t, func() error { _, err := tx.Exec("set timezone to 'UTC';"); return err }()) - - m, err := check.Inconsistent(context.Background(), tx) - require.NoError(t, err) - require.Len(t, m, 0) - - expectedInconsistent := s.requireInconsistentTestEntries(t, tx) - - m, err = check.Inconsistent(context.Background(), tx) - require.NoError(t, err) - require.Equal(t, expectedInconsistent, m) -} - -func (s *InconsistentTestSuite) requireInconsistentTestEntries(t *testing.T, tdb *sqlx.Tx) []check.InconsistentField { - var category db.Category - require.NoError(t, - db.GetNamed(tdb, &category, - "INSERT INTO categories (source,target) VALUES (:source,:target) RETURNING *", db.Category{ - Source: "af-south-1:uroboros-research", - })) - - at := time.Date(2023, time.January, 2, 3, 0, 0, 0, time.UTC) - var dateTime db.DateTime - require.NoError(t, - db.GetNamed(tdb, &dateTime, - "INSERT INTO date_times (timestamp, year, month, day, hour) VALUES (:timestamp, :year, :month, :day, :hour) RETURNING *", - db.BuildDateTime(at), - )) - - discountOutsideRange, err := db.CreateDiscount(tdb, db.Discount{ - Source: "test_memory:us-rac-2", - During: rangeOutsideDateTimes(), - }) - require.NoError(t, err) - - var tenantOutsideRange db.Tenant - require.NoError(t, - db.GetNamed(tdb, &tenantOutsideRange, - "INSERT INTO tenants (source,during) VALUES (:source,:during) RETURNING *", db.Tenant{ - Source: "tricell", - During: rangeOutsideDateTimes(), - })) - - var tenantInsideRange db.Tenant - require.NoError(t, - db.GetNamed(tdb, &tenantInsideRange, - "INSERT INTO tenants (source,during) VALUES (:source,:during) RETURNING *", db.Tenant{ - Source: "tricell", - During: db.Timerange(db.MustTimestamp(at), db.MustTimestamp(at.Add(time.Hour))), - })) - - var productOutsideRange db.Product - require.NoError(t, - db.GetNamed(tdb, &productOutsideRange, - "INSERT INTO products (source,target,amount,unit,during) VALUES (:source,:target,:amount,:unit,:during) RETURNING *", db.Product{ - Source: "test_memory:us-rac-2", - During: rangeOutsideDateTimes(), - })) - - var queryOutsideRange db.Query - require.NoError(t, - db.GetNamed(tdb, &queryOutsideRange, - "INSERT INTO queries (name,query,unit,during) VALUES (:name,:query,:unit,:during) RETURNING *", db.Query{ - Name: "test_memory", - Query: "test_memory", - Unit: "GiB", - During: rangeOutsideDateTimes(), - })) - - testFact := db.Fact{ - DateTimeId: dateTime.Id, - QueryId: queryOutsideRange.Id, - TenantId: tenantOutsideRange.Id, - CategoryId: category.Id, - ProductId: productOutsideRange.Id, - DiscountId: discountOutsideRange.Id, - Quantity: 1, - } - createFact(t, tdb, testFact) - testFact.TenantId = tenantInsideRange.Id - createFact(t, tdb, testFact) - - formattedAt := at.Format(db.PGTimestampFormat) - formattedRange := fmt.Sprintf("[\"%s\",\"%s\")", - rangeOutsideDateTimes().Lower.Time.Format(db.PGTimestampFormat), - rangeOutsideDateTimes().Upper.Time.Format(db.PGTimestampFormat), - ) - return []check.InconsistentField{ - {Table: "discounts", DimensionID: discountOutsideRange.Id, FactTime: formattedAt, DimensionRange: formattedRange}, - {Table: "products", DimensionID: productOutsideRange.Id, FactTime: formattedAt, DimensionRange: formattedRange}, - {Table: "queries", DimensionID: queryOutsideRange.Id, FactTime: formattedAt, DimensionRange: formattedRange}, - {Table: "tenants", DimensionID: tenantOutsideRange.Id, FactTime: formattedAt, DimensionRange: formattedRange}, - } -} - -func rangeOutsideDateTimes() pgtype.Tstzrange { - return db.Timerange( - db.MustTimestamp(time.Date(2023, time.January, 2, 10, 0, 0, 0, time.UTC)), - db.MustTimestamp(time.Date(2023, time.January, 2, 11, 0, 0, 0, time.UTC)), - ) -} - -func createFact(t *testing.T, tx *sqlx.Tx, fact db.Fact) (rf db.Fact) { - require.NoError(t, - db.GetNamed(tx, &rf, - "INSERT INTO facts (date_time_id,query_id,tenant_id,category_id,product_id,discount_id,quantity) VALUES (:date_time_id,:query_id,:tenant_id,:category_id,:product_id,:discount_id,:quantity) RETURNING *", fact)) - return -} - -func TestInconsistentTestSuite(t *testing.T) { - suite.Run(t, new(InconsistentTestSuite)) -} diff --git a/pkg/check/missing.go b/pkg/check/missing.go deleted file mode 100644 index ac5e43f..0000000 --- a/pkg/check/missing.go +++ /dev/null @@ -1,34 +0,0 @@ -package check - -import ( - "context" - "fmt" - - "github.com/jmoiron/sqlx" -) - -// MissingField represents a missing field. -type MissingField struct { - Table string - - ID string - Source string - - MissingField string -} - -const missingQuery = ` - SELECT 'categories' as table, id, source, 'target' as missingfield FROM categories WHERE target IS NULL OR target = '' - UNION ALL - SELECT 'tenants' as table, id, source, 'target' as missingfield FROM tenants WHERE target IS NULL OR target = '' - UNION ALL - SELECT 'products' as table, id, source, 'target' as missingfield FROM products WHERE target IS NULL OR target = '' -` - -// Missing checks for missing fields in the reporting database. -func Missing(ctx context.Context, tx sqlx.QueryerContext) ([]MissingField, error) { - var missing []MissingField - - err := sqlx.SelectContext(ctx, tx, &missing, fmt.Sprintf(`WITH missing AS (%s) SELECT * FROM missing ORDER BY "table",missingfield,source`, missingQuery)) - return missing, err -} diff --git a/pkg/check/missing_test.go b/pkg/check/missing_test.go deleted file mode 100644 index 08b4a4d..0000000 --- a/pkg/check/missing_test.go +++ /dev/null @@ -1,68 +0,0 @@ -package check_test - -import ( - "context" - "testing" - - "github.com/jmoiron/sqlx" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - "github.com/appuio/appuio-cloud-reporting/pkg/check" - "github.com/appuio/appuio-cloud-reporting/pkg/db" - "github.com/appuio/appuio-cloud-reporting/pkg/db/dbtest" -) - -type TestSuite struct { - dbtest.Suite -} - -func (s *TestSuite) TestMissingFields() { - t := s.T() - tx := s.Begin() - defer tx.Rollback() - - m, err := check.Missing(context.Background(), tx) - require.NoError(t, err) - require.Len(t, m, 0) - - expectedMissing := s.requireMissingTestEntries(t, tx) - - m, err = check.Missing(context.Background(), tx) - require.NoError(t, err) - require.Equal(t, expectedMissing, m) -} - -func (s *TestSuite) requireMissingTestEntries(t *testing.T, tdb *sqlx.Tx) []check.MissingField { - var catEmptyTarget db.Category - require.NoError(t, - db.GetNamed(tdb, &catEmptyTarget, - "INSERT INTO categories (source,target) VALUES (:source,:target) RETURNING *", db.Category{ - Source: "af-south-1:uroboros-research", - })) - - var tenantEmptyTarget db.Tenant - require.NoError(t, - db.GetNamed(tdb, &tenantEmptyTarget, - "INSERT INTO tenants (source,target) VALUES (:source,:target) RETURNING *", db.Tenant{ - Source: "tricell", - })) - - var productEmptyTarget db.Product - require.NoError(t, - db.GetNamed(tdb, &productEmptyTarget, - "INSERT INTO products (source,target,amount,unit,during) VALUES (:source,:target,:amount,:unit,:during) RETURNING *", db.Product{ - Source: "test_memory:us-rac-2", - During: db.InfiniteRange(), - })) - - return []check.MissingField{ - {Table: "categories", MissingField: "target", ID: catEmptyTarget.Id, Source: catEmptyTarget.Source}, - {Table: "products", MissingField: "target", ID: productEmptyTarget.Id, Source: productEmptyTarget.Source}, - {Table: "tenants", MissingField: "target", ID: tenantEmptyTarget.Id, Source: tenantEmptyTarget.Source}, - } -} - -func TestTestSuite(t *testing.T) { - suite.Run(t, new(TestSuite)) -} diff --git a/pkg/db/dbtest/suite.go b/pkg/db/dbtest/suite.go deleted file mode 100644 index bf56151..0000000 --- a/pkg/db/dbtest/suite.go +++ /dev/null @@ -1,117 +0,0 @@ -package dbtest - -import ( - "fmt" - "net/url" - "os" - "strings" - - "github.com/google/uuid" - "github.com/jackc/pgx/v4" - "github.com/jmoiron/sqlx" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - "github.com/appuio/appuio-cloud-reporting/pkg/db" -) - -var DatabaseURL = urlFromEnv() - -// Suite holds a database test suite. Each Suite holds its own clone of -// the database given by the `ACR_DB_URL` environment variable. -// The database is cloned before the suite starts and dropped in the suite teardown. -// Suites can be run in parallel. -type Suite struct { - suite.Suite - - maintenanceDB *sqlx.DB - - tmpDB *sqlx.DB - tmpDBName string -} - -func (ts *Suite) DB() *sqlx.DB { - return ts.tmpDB -} - -func (ts *Suite) Begin() *sqlx.Tx { - txx, err := ts.DB().Beginx() - require.NoError(ts.T(), err) - return txx -} - -func (ts *Suite) SetupSuite() { - u, err := url.Parse(DatabaseURL) - require.NoError(ts.T(), err) - dbName := strings.TrimPrefix(u.Path, "/") - tmpDbName := dbName + "-tmp-" + uuid.NewString() - ts.tmpDBName = tmpDbName - - // Connect to a neutral database - mdb, err := openMaintenance(DatabaseURL) - require.NoError(ts.T(), err) - ts.maintenanceDB = mdb - - require.NoError(ts.T(), - cloneDB(ts.maintenanceDB, pgx.Identifier{tmpDbName}, pgx.Identifier{dbName}), - ) - - // Connect to the temporary database - tmpURL := new(url.URL) - *tmpURL = *u - tmpURL.Path = "/" + tmpDbName - ts.T().Logf("Using database name: %s", tmpDbName) - dbx, err := db.Openx(tmpURL.String()) - require.NoError(ts.T(), err) - ts.tmpDB = dbx -} - -func (ts *Suite) TearDownSuite() { - t := ts.T() - require.NoError(t, ts.tmpDB.Close()) - require.NoError(t, dropDB(ts.maintenanceDB, pgx.Identifier{ts.tmpDBName})) - require.NoError(t, ts.maintenanceDB.Close()) -} - -func cloneDB(maint *sqlx.DB, dst, src pgx.Identifier) error { - _, err := maint.Exec( - fmt.Sprintf(`CREATE DATABASE %s TEMPLATE %s`, - dst.Sanitize(), - src.Sanitize(), - ), - ) - if err != nil { - return fmt.Errorf("error cloning database `%s` to `%s`: %w", src.Sanitize(), dst.Sanitize(), err) - } - return nil -} - -func dropDB(maint *sqlx.DB, db pgx.Identifier) error { - _, err := maint.Exec( - fmt.Sprintf(`DROP DATABASE %s WITH (FORCE)`, db.Sanitize()), - ) - if err != nil { - return fmt.Errorf("error dropping database `%s`: %w", db.Sanitize(), err) - } - return nil -} - -func openMaintenance(dbURL string) (*sqlx.DB, error) { - maintURL, err := url.Parse(dbURL) - if err != nil { - return nil, fmt.Errorf("error parsing url: %w", err) - } - maintURL.Path = "/postgres" - mdb, err := db.Openx(maintURL.String()) - if err != nil { - return nil, fmt.Errorf("error connecting to maintenance (`%s`) database: %w", maintURL.Path, err) - } - return mdb, nil -} - -func urlFromEnv() string { - if u, exists := os.LookupEnv("ACR_DB_URL"); exists { - return u - } - return "postgres://postgres@localhost/reporting-db?sslmode=disable" -} diff --git a/pkg/db/dbtest/suite_test.go b/pkg/db/dbtest/suite_test.go deleted file mode 100644 index 0de02bb..0000000 --- a/pkg/db/dbtest/suite_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package dbtest_test - -import ( - "testing" - - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - "github.com/appuio/appuio-cloud-reporting/pkg/db/dbtest" -) - -type TestSuite struct { - dbtest.Suite -} - -func (s *TestSuite) TestTxQuery() { - t := s.T() - tx := s.Begin() - defer tx.Rollback() - - _, err := tx.Exec("SELECT 1") - require.NoError(t, err) -} - -func TestTestSuite(t *testing.T) { - suite.Run(t, new(TestSuite)) -} diff --git a/pkg/db/migrations.go b/pkg/db/migrations.go deleted file mode 100644 index 147e247..0000000 --- a/pkg/db/migrations.go +++ /dev/null @@ -1,91 +0,0 @@ -package db - -import ( - "database/sql" - "embed" - "fmt" - "io/fs" - - "github.com/lopezator/migrator" -) - -//go:embed migrations/*.sql -var migrationFiles embed.FS - -// Migrations returns all registered migrations. -var Migrations = func() []interface{} { - m, err := loadMigrations() - if err != nil { - panic(fmt.Errorf("failed to load migrations: %w", err)) - } - return m -}() - -// Migrate migrates the database to the newest migration. -func Migrate(db *sql.DB) error { - m, err := newMigrator() - if err != nil { - return err - } - - if err := m.Migrate(db); err != nil { - return fmt.Errorf("error while migrating: %w", err) - } - return nil -} - -// Pending returns all pending migrations. -func Pending(db *sql.DB) ([]*migrator.Migration, error) { - m, err := newMigrator() - if err != nil { - return nil, err - } - - pending, err := m.Pending(db) - if err != nil { - return nil, fmt.Errorf("error while querying for pending migrations: %w", err) - } - - pm := make([]*migrator.Migration, 0, len(pending)) - for _, pp := range pending { - pm = append(pm, pp.(*migrator.Migration)) - } - return pm, nil -} - -func newMigrator() (*migrator.Migrator, error) { - m, err := migrator.New( - migrator.Migrations(Migrations...), - ) - if err != nil { - return m, fmt.Errorf("error while loading migrations: %w", err) - } - - return m, nil -} - -func loadMigrations() (migrations []interface{}, err error) { - migrations = make([]interface{}, 0) - - // the only possible error is bad pattern and can be safely ignored - files, _ := fs.Glob(migrationFiles, "migrations/*") - - for _, file := range files { - file := file - migration, err := fs.ReadFile(migrationFiles, file) - if err != nil { - return nil, fmt.Errorf("error reading migration file: %w", err) - } - migrations = append(migrations, &migrator.Migration{ - Name: file, - Func: func(tx *sql.Tx) error { - if _, err := tx.Exec(string(migration)); err != nil { - return err - } - return nil - }, - }) - } - - return migrations, nil -} diff --git a/pkg/db/migrations/0001_create_queries.sql b/pkg/db/migrations/0001_create_queries.sql deleted file mode 100644 index ab23731..0000000 --- a/pkg/db/migrations/0001_create_queries.sql +++ /dev/null @@ -1,11 +0,0 @@ -CREATE TABLE queries ( - id uuid PRIMARY KEY DEFAULT gen_random_uuid(), - name text NOT NULL, - description text NOT NULL DEFAULT '', - query text NOT NULL, - unit text NOT NULL, - after timestamp with time zone, - before timestamp with time zone, - - UNIQUE (name,unit,after,before) -) diff --git a/pkg/db/migrations/0002_create_tenants.sql b/pkg/db/migrations/0002_create_tenants.sql deleted file mode 100644 index 5079063..0000000 --- a/pkg/db/migrations/0002_create_tenants.sql +++ /dev/null @@ -1,7 +0,0 @@ -CREATE TABLE tenants ( - id uuid PRIMARY KEY DEFAULT gen_random_uuid(), - source text NOT NULL, - target text, - - UNIQUE (source) -) diff --git a/pkg/db/migrations/0003_create_categories.sql b/pkg/db/migrations/0003_create_categories.sql deleted file mode 100644 index 6f16c93..0000000 --- a/pkg/db/migrations/0003_create_categories.sql +++ /dev/null @@ -1,7 +0,0 @@ -CREATE TABLE categories ( - id uuid PRIMARY KEY DEFAULT gen_random_uuid(), - source text NOT NULL, - target text, - - UNIQUE (source) -) diff --git a/pkg/db/migrations/0004_create_products.sql b/pkg/db/migrations/0004_create_products.sql deleted file mode 100644 index f447837..0000000 --- a/pkg/db/migrations/0004_create_products.sql +++ /dev/null @@ -1,11 +0,0 @@ -CREATE TABLE products ( - id uuid PRIMARY KEY DEFAULT gen_random_uuid(), - source text NOT NULL, - target text, - amount bigint NOT NULL DEFAULT 0, - unit text NOT NULL, - after timestamp with time zone, - before timestamp with time zone, - - UNIQUE (source,after,before) -) diff --git a/pkg/db/migrations/0005_create_discounts.sql b/pkg/db/migrations/0005_create_discounts.sql deleted file mode 100644 index 0375fc0..0000000 --- a/pkg/db/migrations/0005_create_discounts.sql +++ /dev/null @@ -1,9 +0,0 @@ -CREATE TABLE discounts ( - id uuid PRIMARY KEY DEFAULT gen_random_uuid(), - source text NOT NULL, - discount int NOT NULL DEFAULT 0, - after timestamp with time zone, - before timestamp with time zone, - - UNIQUE (source,after,before) -) diff --git a/pkg/db/migrations/0006_create_date_times.sql b/pkg/db/migrations/0006_create_date_times.sql deleted file mode 100644 index 7a6977a..0000000 --- a/pkg/db/migrations/0006_create_date_times.sql +++ /dev/null @@ -1,10 +0,0 @@ -CREATE TABLE date_times ( - id uuid PRIMARY KEY DEFAULT gen_random_uuid(), - timestamp timestamp with time zone NOT NULL, - year int NOT NULL, - month int NOT NULL, - day int NOT NULL, - hour int NOT NULL, - - UNIQUE(year,month,day,hour) -) diff --git a/pkg/db/migrations/0007_create_facts.sql b/pkg/db/migrations/0007_create_facts.sql deleted file mode 100644 index f3ac406..0000000 --- a/pkg/db/migrations/0007_create_facts.sql +++ /dev/null @@ -1,36 +0,0 @@ -CREATE TABLE facts ( - id uuid PRIMARY KEY DEFAULT gen_random_uuid(), - date_time_id uuid NOT NULL, - query_id uuid NOT NULL, - tenant_id uuid NOT NULL, - category_id uuid NOT NULL, - product_id uuid NOT NULL, - discount_id uuid NOT NULL, - quantity double precision NOT NULL DEFAULT 0, - - CONSTRAINT fk_date_time - FOREIGN KEY(date_time_id) - REFERENCES date_times(id), - - CONSTRAINT fk_query - FOREIGN KEY(query_id) - REFERENCES queries(id), - - CONSTRAINT fk_tenant - FOREIGN KEY(tenant_id) - REFERENCES tenants(id), - - CONSTRAINT fk_category - FOREIGN KEY(category_id) - REFERENCES categories(id), - - CONSTRAINT fk_product - FOREIGN KEY(product_id) - REFERENCES products(id), - - CONSTRAINT fk_discount - FOREIGN KEY(discount_id) - REFERENCES discounts(id), - - UNIQUE(date_time_id,query_id,tenant_id,category_id,product_id,discount_id) -) diff --git a/pkg/db/migrations/0008_create_btree_gist_extension.sql b/pkg/db/migrations/0008_create_btree_gist_extension.sql deleted file mode 100644 index c30efd1..0000000 --- a/pkg/db/migrations/0008_create_btree_gist_extension.sql +++ /dev/null @@ -1 +0,0 @@ -CREATE EXTENSION IF NOT EXISTS btree_gist; diff --git a/pkg/db/migrations/0009_use_timeranges.sql b/pkg/db/migrations/0009_use_timeranges.sql deleted file mode 100644 index 02d5991..0000000 --- a/pkg/db/migrations/0009_use_timeranges.sql +++ /dev/null @@ -1,44 +0,0 @@ -ALTER TABLE queries - ADD COLUMN during tstzrange NOT NULL DEFAULT '[-infinity,infinity)', - ADD CONSTRAINT queries_name_unit_during_non_overlapping EXCLUDE USING GIST (name WITH =, unit WITH =, during WITH &&), - ADD CONSTRAINT queries_during_lower_not_null_ck CHECK (lower(during) IS NOT NULL), - ADD CONSTRAINT queries_during_upper_not_null_ck CHECK (upper(during) IS NOT NULL); - -UPDATE queries - SET during = tstzrange(COALESCE(after,'-infinity'), COALESCE(before,'infinity'), '[)'); - -ALTER TABLE queries - DROP CONSTRAINT queries_name_unit_after_before_key, - DROP COLUMN after, - DROP COLUMN before; - - -ALTER TABLE products - ADD COLUMN during tstzrange NOT NULL DEFAULT '[-infinity,infinity)', - ADD CONSTRAINT products_source_during_non_overlapping EXCLUDE USING GIST (source WITH =, during WITH &&), - ADD CONSTRAINT products_during_lower_not_null_ck CHECK (lower(during) IS NOT NULL), - ADD CONSTRAINT products_during_upper_not_null_ck CHECK (upper(during) IS NOT NULL); - -UPDATE products - SET during = tstzrange(COALESCE(after,'-infinity'), COALESCE(before,'infinity'), '[)'); - -ALTER TABLE products - DROP CONSTRAINT products_source_after_before_key, - DROP COLUMN after, - DROP COLUMN before; - - -ALTER TABLE discounts - ADD COLUMN during tstzrange NOT NULL DEFAULT '[-infinity,infinity)', - ADD CONSTRAINT discounts_source_during_non_overlapping EXCLUDE USING GIST (source WITH =, during WITH &&), - ADD CONSTRAINT discounts_during_lower_not_null_ck CHECK (lower(during) IS NOT NULL), - ADD CONSTRAINT discounts_during_upper_not_null_ck CHECK (upper(during) IS NOT NULL); - -UPDATE discounts - SET during = tstzrange(COALESCE(after,'-infinity'), COALESCE(before,'infinity'), '[)'); - -ALTER TABLE discounts - DROP CONSTRAINT discounts_source_after_before_key, - DROP COLUMN after, - DROP COLUMN before; - diff --git a/pkg/db/migrations/0010_products_amount_to_float64.sql b/pkg/db/migrations/0010_products_amount_to_float64.sql deleted file mode 100644 index e79294e..0000000 --- a/pkg/db/migrations/0010_products_amount_to_float64.sql +++ /dev/null @@ -1,2 +0,0 @@ -ALTER TABLE products - ALTER COLUMN amount TYPE double precision; diff --git a/pkg/db/migrations/0011_discounts_discount_to_float64.sql b/pkg/db/migrations/0011_discounts_discount_to_float64.sql deleted file mode 100644 index bc91a33..0000000 --- a/pkg/db/migrations/0011_discounts_discount_to_float64.sql +++ /dev/null @@ -1,8 +0,0 @@ -ALTER TABLE discounts - ALTER COLUMN discount TYPE double precision; - -UPDATE discounts SET discount = discount / 100; - -ALTER TABLE discounts - ADD CONSTRAINT discounts_discount_min_ck CHECK (discount >= 0), - ADD CONSTRAINT discounts_discount_max_ck CHECK (discount <= 1); diff --git a/pkg/db/migrations/0012_sub_query.sql b/pkg/db/migrations/0012_sub_query.sql deleted file mode 100644 index e709f5c..0000000 --- a/pkg/db/migrations/0012_sub_query.sql +++ /dev/null @@ -1,5 +0,0 @@ -ALTER TABLE queries - ADD COLUMN parent_id uuid, - ADD CONSTRAINT pt_query - FOREIGN KEY(parent_id) - REFERENCES queries(id); diff --git a/pkg/db/migrations/0013_tenants_add_timerange.sql b/pkg/db/migrations/0013_tenants_add_timerange.sql deleted file mode 100644 index fd21a2a..0000000 --- a/pkg/db/migrations/0013_tenants_add_timerange.sql +++ /dev/null @@ -1,6 +0,0 @@ -ALTER TABLE tenants - ADD COLUMN during tstzrange NOT NULL DEFAULT '[-infinity,infinity)', - ADD CONSTRAINT tenants_source_during_non_overlapping EXCLUDE USING GIST (source WITH =, during WITH &&), - ADD CONSTRAINT tenants_during_lower_not_null_ck CHECK (lower(during) IS NOT NULL), - ADD CONSTRAINT tenants_during_upper_not_null_ck CHECK (upper(during) IS NOT NULL), - DROP CONSTRAINT tenants_source_key; diff --git a/pkg/db/migrations/0014_enforce_date_times_timestamp_consistency.sql b/pkg/db/migrations/0014_enforce_date_times_timestamp_consistency.sql deleted file mode 100644 index d61bac3..0000000 --- a/pkg/db/migrations/0014_enforce_date_times_timestamp_consistency.sql +++ /dev/null @@ -1,8 +0,0 @@ --- Timestamp duplicates the (year, month, day, hour) fields, but is more convenient to use. --- I'd delete the fields but that would be a pretty breaking change. --- So we just enforce consistency between the two fields. - -ALTER TABLE date_times - ADD CONSTRAINT date_times_timestamp_check_consistency CHECK ( - (date_times.year || '-' || date_times.month || '-' || date_times.day || ' ' || date_times.hour || ':00:00+00')::timestamptz = date_times.timestamp - ); diff --git a/pkg/db/migrations_test.go b/pkg/db/migrations_test.go deleted file mode 100644 index 72f4f0a..0000000 --- a/pkg/db/migrations_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package db_test - -import ( - "testing" - - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - "github.com/appuio/appuio-cloud-reporting/pkg/db" - "github.com/appuio/appuio-cloud-reporting/pkg/db/dbtest" -) - -type MigrationTestSuite struct { - dbtest.Suite -} - -func (s *MigrationTestSuite) TestMigrations_DatabaseShouldBeFullyMigrated() { - t := s.T() - pending, err := db.Pending(s.DB().DB) - require.NoError(t, err) - require.Lenf(t, pending, 0, "the test database should be migrated to the newest version before running tests") -} - -func TestMigrations(t *testing.T) { - suite.Run(t, new(MigrationTestSuite)) -} diff --git a/pkg/db/open.go b/pkg/db/open.go deleted file mode 100644 index 78fe885..0000000 --- a/pkg/db/open.go +++ /dev/null @@ -1,29 +0,0 @@ -package db - -import ( - "database/sql" - - _ "github.com/jackc/pgx/v4/stdlib" // postgres driver - "github.com/jmoiron/sqlx" -) - -const driver = "pgx" - -// Open opens a postgres database with the pgx driver. -func Open(dataSourceName string) (*sql.DB, error) { - return sql.Open(driver, dataSourceName) -} - -// Openx opens a postgres database with the pgx driver and wraps it in an sqlx.DB. -func Openx(dataSourceName string) (*sqlx.DB, error) { - db, err := Open(dataSourceName) - if err != nil { - return nil, err - } - return sqlx.NewDb(db, driver), nil -} - -// NewDBx wraps the given database in an sqlx.DB. -func NewDBx(db *sql.DB) *sqlx.DB { - return sqlx.NewDb(db, driver) -} diff --git a/pkg/db/schema_test.go b/pkg/db/schema_test.go deleted file mode 100644 index 00e6e2a..0000000 --- a/pkg/db/schema_test.go +++ /dev/null @@ -1,180 +0,0 @@ -package db_test - -import ( - "testing" - "time" - - "github.com/jackc/pgconn" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - "github.com/appuio/appuio-cloud-reporting/pkg/db" - "github.com/appuio/appuio-cloud-reporting/pkg/db/dbtest" -) - -type SchemaTestSuite struct { - dbtest.Suite -} - -func (s *SchemaTestSuite) TestQueries_NameUnitDuring_NonOverlapping() { - t := s.T() - tx := s.Begin() - defer tx.Rollback() - - stmt, err := tx.PrepareNamed("INSERT INTO queries (name, query, unit, during) VALUES (:name, :query, :unit, :during)") - require.NoError(t, err) - defer stmt.Close() - - base := db.Query{ - Name: "test", - Unit: "test", - Query: "test", - During: db.Timerange( - db.MustTimestamp(time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC)), - db.MustTimestamp(time.Date(2020, time.January, 1, 0, 0, 0, 0, time.UTC)), - ), - } - _, err = stmt.Exec(base) - require.NoError(t, err) - - nonOverlapping := base - nonOverlapping.During = db.Timerange( - db.MustTimestamp(time.Date(1991, time.January, 1, 0, 0, 0, 0, time.UTC)), - db.MustTimestamp(time.Date(1995, time.January, 1, 0, 0, 0, 0, time.UTC)), - ) - _, err = stmt.Exec(nonOverlapping) - require.NoError(t, err) - - overlapping := base - overlapping.During = db.Timerange( - db.MustTimestamp(time.Date(2019, time.January, 1, 0, 0, 0, 0, time.UTC)), - db.MustTimestamp(time.Date(2021, time.January, 1, 0, 0, 0, 0, time.UTC)), - ) - _, err = stmt.Exec(overlapping) - requireExclusionValidationError(t, err) -} - -func (s *SchemaTestSuite) TestProducts_SourceDuring_NonOverlapping() { - t := s.T() - tx := s.Begin() - defer tx.Rollback() - - stmt, err := tx.PrepareNamed("INSERT INTO products (source, unit, during) VALUES (:source, :unit, :during)") - require.NoError(t, err) - defer stmt.Close() - - base := db.Product{ - Source: "test", - During: db.Timerange( - db.MustTimestamp(time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC)), - db.MustTimestamp(time.Date(2020, time.January, 1, 0, 0, 0, 0, time.UTC)), - ), - } - _, err = stmt.Exec(base) - require.NoError(t, err) - - nonOverlapping := base - nonOverlapping.During = db.Timerange( - db.MustTimestamp(time.Date(1991, time.January, 1, 0, 0, 0, 0, time.UTC)), - db.MustTimestamp(time.Date(1995, time.January, 1, 0, 0, 0, 0, time.UTC)), - ) - _, err = stmt.Exec(nonOverlapping) - require.NoError(t, err) - - overlapping := base - overlapping.During = db.Timerange( - db.MustTimestamp(time.Date(2019, time.January, 1, 0, 0, 0, 0, time.UTC)), - db.MustTimestamp(time.Date(2021, time.January, 1, 0, 0, 0, 0, time.UTC)), - ) - _, err = stmt.Exec(overlapping) - requireExclusionValidationError(t, err) -} - -func (s *SchemaTestSuite) TestDiscounts_SourceDuring_NonOverlapping() { - t := s.T() - tx := s.Begin() - defer tx.Rollback() - - stmt, err := tx.PrepareNamed("INSERT INTO discounts (source, during) VALUES (:source, :during)") - require.NoError(t, err) - defer stmt.Close() - - base := db.Discount{ - Source: "test", - During: db.Timerange( - db.MustTimestamp(time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC)), - db.MustTimestamp(time.Date(2020, time.January, 1, 0, 0, 0, 0, time.UTC)), - ), - } - _, err = stmt.Exec(base) - require.NoError(t, err) - - nonOverlapping := base - nonOverlapping.During = db.Timerange( - db.MustTimestamp(time.Date(1991, time.January, 1, 0, 0, 0, 0, time.UTC)), - db.MustTimestamp(time.Date(1995, time.January, 1, 0, 0, 0, 0, time.UTC)), - ) - _, err = stmt.Exec(nonOverlapping) - require.NoError(t, err) - - overlapping := base - overlapping.During = db.Timerange( - db.MustTimestamp(time.Date(2019, time.January, 1, 0, 0, 0, 0, time.UTC)), - db.MustTimestamp(time.Date(2021, time.January, 1, 0, 0, 0, 0, time.UTC)), - ) - _, err = stmt.Exec(overlapping) - requireExclusionValidationError(t, err) -} - -func (s *SchemaTestSuite) TestDiscounts_Discount_MinMaxConstraint() { - t := s.T() - - tests := []struct { - name string - discount float64 - errf func(*testing.T, error) - }{ - {"overMax", 1.3, requireCheckConstraintError}, - {"underMin", -7, requireCheckConstraintError}, - {"inRangeLow", 0, func(t *testing.T, e error) { require.NoError(t, e) }}, - {"inRange", 0.78, func(t *testing.T, e error) { require.NoError(t, e) }}, - {"inRangeHigh", 1, func(t *testing.T, e error) { require.NoError(t, e) }}, - } - - for _, testCase := range tests { - t.Run(testCase.name, func(t *testing.T) { - tx := s.Begin() - defer tx.Rollback() - - stmt, err := tx.PrepareNamed("INSERT INTO discounts (source, discount) VALUES (:source, :discount)") - require.NoError(t, err) - defer stmt.Close() - - _, err = stmt.Exec(db.Discount{ - Source: testCase.name, - Discount: testCase.discount, - }) - testCase.errf(t, err) - }) - } -} - -func TestSchema(t *testing.T) { - suite.Run(t, new(SchemaTestSuite)) -} - -func requireExclusionValidationError(t *testing.T, err error) { - t.Helper() - - pgErr := &pgconn.PgError{} - require.ErrorAs(t, err, &pgErr) - require.Equal(t, "23P01", pgErr.SQLState(), "error code should match exclusion violation error") -} - -func requireCheckConstraintError(t *testing.T, err error) { - t.Helper() - - pgErr := &pgconn.PgError{} - require.ErrorAs(t, err, &pgErr) - require.Equal(t, "23514", pgErr.SQLState(), "error code should match check constraint error") -} diff --git a/pkg/db/seeds.go b/pkg/db/seeds.go deleted file mode 100644 index 3c55ebe..0000000 --- a/pkg/db/seeds.go +++ /dev/null @@ -1,162 +0,0 @@ -package db - -import ( - "database/sql" - "fmt" - - _ "embed" - - "github.com/jmoiron/sqlx" -) - -//go:embed seeds/appuio_cloud_memory.promql -var appuioCloudMemoryQuery string - -//go:embed seeds/appuio_cloud_memory_sub_memory.promql -var appuioCloudMemorySubQueryMemory string - -//go:embed seeds/appuio_cloud_memory_sub_cpu.promql -var appuioCloudMemorySubQueryCPU string - -//go:embed seeds/appuio_cloud_loadbalancer.promql -var appuioCloudLoadbalancerQuery string - -//go:embed seeds/appuio_cloud_persistent_storage.promql -var appuioCloudPersistentStorageQuery string - -//go:embed seeds/appuio_managed_openshift_vcpu.promql -var appuioManagedOpenShiftvCPUQuery string - -//go:embed seeds/appuio_managed_openshift_clusters_legacy.promql -var appuioManagedOpenShiftClusterQuery string - -//go:embed seeds/appuio_managed_kubernetes_vcpu.promql -var appuioManagedKubernetesvCPUQuery string - -var appcatQuery = "appcat:billing" - -// DefaultQueries consists of default starter queries. -var DefaultQueries = []Query{ - { - Name: "appuio_cloud_memory", - Description: "Memory usage (maximum of requested and used memory) aggregated by namespace", - Query: appuioCloudMemoryQuery, - Unit: "MiB", - subQueries: []Query{ - { - Name: "appuio_cloud_memory_subquery_memory_request", - Description: "Memory request exceeding the actual usage", - Query: appuioCloudMemorySubQueryMemory, - Unit: "MiB", - }, - { - Name: "appuio_cloud_memory_subquery_cpu_request", - Description: "CPU requests exceeding the fair use limit, converted to the memory request equivalent", - Query: appuioCloudMemorySubQueryCPU, - Unit: "MiB", - }, - }, - }, - { - Name: "appuio_cloud_loadbalancer", - Description: "Number of services of type load balancer", - Query: appuioCloudLoadbalancerQuery, - }, - { - Name: "appuio_cloud_persistent_storage", - Description: "Persistent storage usage aggregated by namespace and storageclass", - Query: appuioCloudPersistentStorageQuery, - Unit: "GiB", - }, - { - Name: "appuio_managed_openshift_vcpu", - Description: "vCPU aggregated by cluster, node (app, storage), and service level", - Query: appuioManagedOpenShiftvCPUQuery, - Unit: "vCPU", - }, - { - Name: "appuio_managed_openshift_clusters", - Description: "Cluster base fee", - Query: appuioManagedOpenShiftClusterQuery, - Unit: "Clusters", - }, - { - Name: "appuio_managed_kubernetes_vcpu", - Description: "vCPU aggregated by cluster, node (app, storage), and service level", - Query: appuioManagedKubernetesvCPUQuery, - Unit: "vCPU", - }, - { - Name: "appcat_services", - Description: "AppCat service instances query", - Query: appcatQuery, - Unit: "instances", - }, -} - -// Seed seeds the database with "starter" data. -// Is idempotent and thus can be executed multiple times in one database. -func Seed(db *sql.DB) error { - return SeedQueries(db, DefaultQueries) -} - -func SeedQueries(db *sql.DB, queries []Query) error { - dbx := NewDBx(db) - tx, err := dbx.Beginx() - if err != nil { - return err - } - defer tx.Rollback() - - if err := createQueries(tx, queries); err != nil { - return err - } - - return tx.Commit() -} - -func createQueries(tx *sqlx.Tx, queries []Query) error { - for _, q := range queries { - exists, err := queryExistsByName(tx, q.Name) - if err != nil { - return fmt.Errorf("error checking if query exists: %w", err) - } - if exists { - fmt.Printf("Found query with name '%s'. Skip creating default query.\n", q.Name) - continue - } - - err = GetNamed(tx, &q.Id, - "INSERT INTO queries (name,description,query,unit,during) VALUES (:name,:description,:query,:unit,'[-infinity,infinity)') RETURNING id", - q) - if err != nil { - return fmt.Errorf("error creating default query: %w", err) - } - - for _, subQuery := range q.subQueries { - subQuery.ParentID = sql.NullString{ - String: q.Id, - Valid: true, - } - exists, err := queryExistsByName(tx, subQuery.Name) - if err != nil { - return fmt.Errorf("error checking if sub-query exists: %w", err) - } - if exists { - fmt.Printf("Found sub-query with name '%s'. Skip creating default query.\n", subQuery.Name) - continue - } - _, err = tx.NamedExec("INSERT INTO queries (name,description,query,unit,during) VALUES (:name,:description,:query,:unit,'[-infinity,infinity)')", subQuery) - if err != nil { - return fmt.Errorf("error creating default sub-query: %w", err) - } - } - } - return nil -} - -func queryExistsByName(tx *sqlx.Tx, name string) (bool, error) { - var exists bool - err := tx.Get(&exists, "SELECT EXISTS(SELECT 1 FROM queries WHERE name = $1)", name) - return exists, err -} diff --git a/pkg/db/seeds/appuio_cloud_loadbalancer.promql b/pkg/db/seeds/appuio_cloud_loadbalancer.promql deleted file mode 100644 index 69faaf1..0000000 --- a/pkg/db/seeds/appuio_cloud_loadbalancer.promql +++ /dev/null @@ -1,44 +0,0 @@ -# Sum values over one hour. -sum_over_time( - # Add the final product label by joining the base product with the cluster ID, the tenant, and the namespace. - label_join( - # Add the category label by joining the cluster ID and the namespace. - label_join( - # Add the base product identifier. - label_replace( - # Get number of services of type load balancer - sum by(cluster_id, namespace) (kube_service_spec_type{type="LoadBalancer"}) - * - # Join the namespace label to get the tenant - on(cluster_id, namespace) - group_left(tenant_id) - ( - bottomk(1, - min by (cluster_id, namespace, tenant_id) ( - label_replace( - kube_namespace_labels{label_appuio_io_organization=~".+"}, - "tenant_id", - "$1", - "label_appuio_io_organization", "(.*)" - ) - ) - ) by(cluster_id, namespace) - ), - "product", - "appuio_cloud_loadbalancer", - "product", - ".*" - ), - "category", - ":", - "cluster_id", - "namespace" - ), - "product", - ":", - "product", - "cluster_id", - "tenant_id", - "namespace" - )[59m:1m] -) diff --git a/pkg/db/seeds/appuio_cloud_memory.promql b/pkg/db/seeds/appuio_cloud_memory.promql deleted file mode 100644 index 112a36d..0000000 --- a/pkg/db/seeds/appuio_cloud_memory.promql +++ /dev/null @@ -1,99 +0,0 @@ -# Sum values over one hour. -sum_over_time( - # Average over a one-minute time frame. - # NOTE: This is a sliding window. Results vary based on the queries execution time. - avg_over_time( - # Add the final product label by joining the base product with the cluster ID, the tenant and the namespace. - label_join( - # Add the category label by joining the cluster ID and the namespace. - label_join( - # Add the base product identifier. - label_replace( - clamp_min( - # Get the maximum of requested and used memory. - # TODO Is there a better way to get the maximum of two vectors? - ( - ( - # Select used memory if higher. - ( - sum by(cluster_id, namespace, label_appuio_io_node_class) (container_memory_working_set_bytes{image!=""} - * on(cluster_id, node) group_left(label_appuio_io_node_class) (min by(cluster_id, node, label_appuio_io_node_class) (kube_node_labels{label_appuio_io_node_class!=""} or on(cluster_id, node) kube_node_labels{label_appuio_io_node_class=""}))) - # IMPORTANT: one clause must use equal. If used grater and lesser than, equal values will be dropped. - >= - sum by(cluster_id, namespace, label_appuio_io_node_class) (kube_pod_container_resource_requests{resource="memory"} - * on(uid, cluster_id, pod, namespace) group_left kube_pod_status_phase{phase="Running"} - * on(cluster_id, node) group_left(label_appuio_io_node_class) (min by(cluster_id, node, label_appuio_io_node_class) (kube_node_labels{label_appuio_io_node_class!=""} or on(cluster_id, node) kube_node_labels{label_appuio_io_node_class=""}))) - ) - or - # Select reserved memory if higher. - ( - # IMPORTANT: The desired time series must always be first. - sum by(cluster_id, namespace, label_appuio_io_node_class) (kube_pod_container_resource_requests{resource="memory"} - * on(uid, cluster_id, pod, namespace) group_left kube_pod_status_phase{phase="Running"} - * on(cluster_id, node) group_left(label_appuio_io_node_class) (min by(cluster_id, node, label_appuio_io_node_class) (kube_node_labels{label_appuio_io_node_class!=""} or on(cluster_id, node) kube_node_labels{label_appuio_io_node_class=""}))) - > - sum by(cluster_id, namespace, label_appuio_io_node_class) (container_memory_working_set_bytes{image!=""} - * on(cluster_id, node) group_left(label_appuio_io_node_class) (min by(cluster_id, node, label_appuio_io_node_class) (kube_node_labels{label_appuio_io_node_class!=""} or on(cluster_id, node) kube_node_labels{label_appuio_io_node_class=""}))) - ) - ) - # Add CPU requests in violation to the ratio provided by the platform. - + clamp_min( - # Convert CPU request to their memory equivalent. - sum by(cluster_id, namespace, label_appuio_io_node_class) ( - kube_pod_container_resource_requests{resource="cpu"} * on(uid, cluster_id, pod, namespace) group_left kube_pod_status_phase{phase="Running"} - * on(cluster_id, node) group_left(label_appuio_io_node_class) (min by(cluster_id, node, label_appuio_io_node_class) (kube_node_labels{label_appuio_io_node_class!=""} or on(cluster_id, node) kube_node_labels{label_appuio_io_node_class=""})) - # Build that ratio from static values - * on(cluster_id) group_left()( - # Build a time series of ratio for Cloudscale LPG 2 (4096 MiB/core) - label_replace(vector(4294967296), "cluster_id", "c-appuio-cloudscale-lpg-2", "", "") - # Build a time series of ratio for Exoscale GVA-2 0 (5086 MiB/core) - or label_replace(vector(5333057536), "cluster_id", "c-appuio-exoscale-ch-gva-2-0", "", "") - ) - ) - # Subtract memory request - - sum by(cluster_id, namespace, label_appuio_io_node_class) (kube_pod_container_resource_requests{resource="memory"} * on(uid, cluster_id, pod, namespace) group_left kube_pod_status_phase{phase="Running"} - * on(cluster_id, node) group_left(label_appuio_io_node_class) (min by(cluster_id, node, label_appuio_io_node_class) (kube_node_labels{label_appuio_io_node_class!=""} or on(cluster_id, node) kube_node_labels{label_appuio_io_node_class=""})) - # Only values above zero are in violation. - ), 0) - ) - * - # Join namespace label `label_appuio_io_organization` as `tenant_id`. - on(cluster_id, namespace) - group_left(tenant_id) - ( - bottomk(1, - min by (cluster_id, namespace, tenant_id) ( - label_replace( - kube_namespace_labels{label_appuio_io_organization=~".+"}, - "tenant_id", - "$1", - "label_appuio_io_organization", "(.*)" - ) - ) - ) by(cluster_id, namespace) - ), - # At least return 128MiB - 128 * 1024 * 1024 - ), - "product", - "appuio_cloud_memory", - "product", - ".*" - ), - "category", - ":", - "cluster_id", - "namespace" - ), - "product", - ":", - "product", - "cluster_id", - "tenant_id", - "namespace", - "label_appuio_io_node_class" - )[45s:15s] - )[59m:1m] -) -# Convert to MiB -/ 1024 / 1024 diff --git a/pkg/db/seeds/appuio_cloud_memory_sub_cpu.promql b/pkg/db/seeds/appuio_cloud_memory_sub_cpu.promql deleted file mode 100644 index 7e1d425..0000000 --- a/pkg/db/seeds/appuio_cloud_memory_sub_cpu.promql +++ /dev/null @@ -1,72 +0,0 @@ -# Calculates CPU requests higher than memory requests respecting the fair-use ratio - -# Sum values over one hour. -sum_over_time( - # Average over a one-minute time frame. - # NOTE: This is a sliding window. Results vary based on the queries execution time. - avg_over_time( - # Add the final product label by joining the base product with the cluster ID, the tenant and the namespace. - label_join( - # Add the category label by joining the cluster ID and the namespace. - label_join( - # Add the base product identifier. - label_replace( - clamp_min( - ( - sum by(cluster_id, namespace, label_appuio_io_node_class) ( - # Get the CPU requests - kube_pod_container_resource_requests{resource="cpu"} * on(uid, cluster_id, pod, namespace) group_left kube_pod_status_phase{phase="Running"} - * on(cluster_id, node) group_left(label_appuio_io_node_class) (min by(cluster_id, node, label_appuio_io_node_class) (kube_node_labels{label_appuio_io_node_class!=""} or on(cluster_id, node) kube_node_labels{label_appuio_io_node_class=""})) - # Convert them to their memory equivalent by multiplying them by the memory to CPU ratio - # Build that ratio from static values - * on(cluster_id) group_left()( - # Build a time series for Cloudscale LPG 2 (4096 MiB/core) - label_replace(vector(4294967296), "cluster_id", "c-appuio-cloudscale-lpg-2", "", "") - # Build a time series for Exoscale GVA-2 0 (5086 MiB/core) - or label_replace(vector(5333057536), "cluster_id", "c-appuio-exoscale-ch-gva-2-0", "", "") - ) - ) - - sum by(cluster_id, namespace, label_appuio_io_node_class) (kube_pod_container_resource_requests{resource="memory"} * on(uid, cluster_id, pod, namespace) group_left kube_pod_status_phase{phase="Running"} - * on(cluster_id, node) group_left(label_appuio_io_node_class) (min by(cluster_id, node, label_appuio_io_node_class) (kube_node_labels{label_appuio_io_node_class!=""} or on(cluster_id, node) kube_node_labels{label_appuio_io_node_class=""}))) - ) - * - # Join namespace label `label_appuio_io_organization` as `tenant_id`. - on(cluster_id, namespace) - group_left(tenant_id) - ( - bottomk(1, - min by (cluster_id, namespace, tenant_id) ( - label_replace( - kube_namespace_labels{label_appuio_io_organization=~".+"}, - "tenant_id", - "$1", - "label_appuio_io_organization", "(.*)" - ) - ) - ) by(cluster_id, namespace) - ), - # At least return 0 - 0 - ), - "product", - "appuio_cloud_memory", - "product", - ".*" - ), - "category", - ":", - "cluster_id", - "namespace" - ), - "product", - ":", - "product", - "cluster_id", - "tenant_id", - "namespace", - "label_appuio_io_node_class" - )[45s:15s] - )[59m:1m] -) -# Convert to MiB -/ 1024 / 1024 diff --git a/pkg/db/seeds/appuio_cloud_memory_sub_memory.promql b/pkg/db/seeds/appuio_cloud_memory_sub_memory.promql deleted file mode 100644 index c83e3ca..0000000 --- a/pkg/db/seeds/appuio_cloud_memory_sub_memory.promql +++ /dev/null @@ -1,64 +0,0 @@ -# Calculates memory requests higher than the real memory usage - -# Sum values over one hour. -sum_over_time( - # Average over a one-minute time frame. - # NOTE: This is a sliding window. Results vary based on the queries execution time. - avg_over_time( - # Add the final product label by joining the base product with the cluster ID, the tenant and the namespace. - label_join( - # Add the category label by joining the cluster ID and the namespace. - label_join( - # Add the base product identifier. - label_replace( - clamp_min( - ( - clamp_min( - sum by(cluster_id, namespace, label_appuio_io_node_class) (kube_pod_container_resource_requests{resource="memory"} - * on(uid, cluster_id, pod, namespace) group_left kube_pod_status_phase{phase="Running"} - * on(cluster_id, node) group_left(label_appuio_io_node_class) (min by(cluster_id, node, label_appuio_io_node_class) (kube_node_labels{label_appuio_io_node_class!=""} or on(cluster_id, node) kube_node_labels{label_appuio_io_node_class=""}))), - 128 * 1024 * 1024 - ) - - sum by(cluster_id, namespace, label_appuio_io_node_class) (container_memory_working_set_bytes{image!=""} - * on(cluster_id, node) group_left(label_appuio_io_node_class) (min by(cluster_id, node, label_appuio_io_node_class) (kube_node_labels{label_appuio_io_node_class!=""} or on(cluster_id, node) kube_node_labels{label_appuio_io_node_class=""}))) - ), - 0 - ) - * - # Join namespace label `label_appuio_io_organization` as `tenant_id`. - on(cluster_id, namespace) - group_left(tenant_id) - ( - bottomk(1, - min by (cluster_id, namespace, tenant_id) ( - label_replace( - kube_namespace_labels{label_appuio_io_organization=~".+"}, - "tenant_id", - "$1", - "label_appuio_io_organization", "(.*)" - ) - ) - ) by(cluster_id, namespace) - ), - "product", - "appuio_cloud_memory", - "product", - ".*" - ), - "category", - ":", - "cluster_id", - "namespace" - ), - "product", - ":", - "product", - "cluster_id", - "tenant_id", - "namespace", - "label_appuio_io_node_class" - )[45s:15s] - )[59m:1m] -) -# Convert to MiB -/ 1024 / 1024 diff --git a/pkg/db/seeds/appuio_cloud_persistent_storage.promql b/pkg/db/seeds/appuio_cloud_persistent_storage.promql deleted file mode 100644 index b9006b8..0000000 --- a/pkg/db/seeds/appuio_cloud_persistent_storage.promql +++ /dev/null @@ -1,77 +0,0 @@ -# Sum values over one hour. -sum_over_time( - # Add the final product label by joining the base product with the cluster ID, the tenant, and the namespace. - label_join( - # Add the category label by joining the cluster ID and the namespace. - label_join( - # Add the base product identifier. - label_replace( - clamp_min( - sum by(cluster_id, tenant_id, namespace, storageclass)( - # Get the PersistentVolume size - kube_persistentvolume_capacity_bytes - * - # Join the PersistentVolumeClaim to get the namespace - on (cluster_id,persistentvolume) - group_left(namespace, name) - label_replace( - kube_persistentvolume_claim_ref, - "namespace", - "$1", - "claim_namespace", - "(.+)(-.*)?" - ) - * - # Join the PersistentVolume info to get StorageClass - on (cluster_id,persistentvolume) - group_left(storageclass) - # Do not differantiate between regular and encrypted storage class versions. - min by (cluster_id, persistentvolume, storageclass) ( - label_replace( - kube_persistentvolume_info, - "storageclass", - "$1", - "storageclass", - "([^-]+)-encrypted" - ) - ) - * - # Join the namespace label to get the tenant - on(cluster_id,namespace) - group_left(tenant_id) - ( - bottomk(1, - min by (cluster_id, namespace, tenant_id) ( - label_replace( - kube_namespace_labels{label_appuio_io_organization=~".+"}, - "tenant_id", - "$1", - "label_appuio_io_organization", "(.*)" - ) - ) - ) by(cluster_id, namespace) - ) - ), - 1024 * 1024 * 1024 - ), - "product", - "appuio_cloud_persistent_storage", - "product", - ".*" - ), - "category", - ":", - "cluster_id", - "namespace" - ), - "product", - ":", - "product", - "cluster_id", - "tenant_id", - "namespace", - "storageclass" - )[59m:1m] -) -# Convert to GiB -/ 1024 / 1024 / 1024 diff --git a/pkg/db/seeds/appuio_managed_kubernetes_vcpu.promql b/pkg/db/seeds/appuio_managed_kubernetes_vcpu.promql deleted file mode 100644 index 22c0fdb..0000000 --- a/pkg/db/seeds/appuio_managed_kubernetes_vcpu.promql +++ /dev/null @@ -1,36 +0,0 @@ -# Calculates vCPUs for app nodes of a cluster -# Structure of resulting product label "query:cluster:tenant::class" - -# Max values over one hour. -max_over_time( - # Add the final product label by joining the base product with the cluster ID, the tenant, and the service class. - label_join( - label_join( - label_replace( - # Add the base product identifier. - label_replace( - sum by (cluster_id, vshn_service_level, tenant_id) (min without(prometheus_replica) (node_cpu_info)), - "product", - "appuio_managed_kubernetes_vcpu", - "", - ".*" - ), - "class", - "$1", - "vshn_service_level", - "(.*)" - ), - "product", - ":", - "product", - "cluster_id", - "tenant_id", - "place_holder", - "class" - ), - "category", - ":", - "tenant_id", - "cluster_id" - )[59m:1m] -) diff --git a/pkg/db/seeds/appuio_managed_openshift_clusters_legacy.promql b/pkg/db/seeds/appuio_managed_openshift_clusters_legacy.promql deleted file mode 100644 index d10a404..0000000 --- a/pkg/db/seeds/appuio_managed_openshift_clusters_legacy.promql +++ /dev/null @@ -1,39 +0,0 @@ -# Calculates number of clusters per cluster, but only for clusters with old billing -# Yes, this is always 1 -# Structure of resulting product label "query:cloud:tenant:cluster:sla" - -# Max values over one hour. -max_over_time( - # Add the final product label by joining the base product with the cluster ID, the tenant, and the service class. - label_join( - label_join( - label_replace( - # Add the base product identifier. - label_replace( - max by(cluster_id, vshn_service_level, tenant_id, cloud_provider) ( - appuio_managed_info{vshn_service_level=~"(zero|standard|professional|premium)"} - ), - "product", - "appuio_managed_openshift_clusters", - "", - ".*" - ), - "class", - "$0", - "vshn_service_level", - "(.*)" - ), - "product", - ":", - "product", - "cloud_provider", - "tenant_id", - "cluster_id", - "class" - ), - "category", - ":", - "tenant_id", - "cluster_id" - )[58m:1m] -) diff --git a/pkg/db/seeds/appuio_managed_openshift_vcpu.promql b/pkg/db/seeds/appuio_managed_openshift_vcpu.promql deleted file mode 100644 index 29b0a00..0000000 --- a/pkg/db/seeds/appuio_managed_openshift_vcpu.promql +++ /dev/null @@ -1,46 +0,0 @@ -# Calculates vCPUs for app nodes of a cluster -# Structure of resulting product label "query:cloud:tenant:cluster:sla:role" - -# Max values over one hour. -max_over_time( - # Add the final product label by joining the base product with the cluster ID, the tenant, and the service class. - label_join( - label_join( - label_replace( - # Add the base product identifier. - label_replace( - sum by(cluster_id, vshn_service_level, tenant_id, role, cloud_provider) ( - node_cpu_info * on (tenant_id, cluster_id, instance) group_left(role) - label_join( - kube_node_role{role=~"app|storage"}, - "instance", - "", - "node" - ) * on(cluster_id) group_left(tenant_id, vshn_service_level, cloud_provider) - max by(cluster_id, tenant_id, vshn_service_level, cloud_provider)(appuio_managed_info) - ), - "product", - "appuio_managed_openshift_vcpu", - "", - ".*" - ), - "class", - "$1", - "vshn_service_level", - "(.*)" - ), - "product", - ":", - "product", - "cloud_provider", - "tenant_id", - "cluster_id", - "class", - "role" - ), - "category", - ":", - "tenant_id", - "cluster_id" - )[59m:1m] -) \ No newline at end of file diff --git a/pkg/db/seeds/promtest/appuio_cloud_loadbalancer.jsonnet b/pkg/db/seeds/promtest/appuio_cloud_loadbalancer.jsonnet deleted file mode 100644 index 8ae1a29..0000000 --- a/pkg/db/seeds/promtest/appuio_cloud_loadbalancer.jsonnet +++ /dev/null @@ -1,88 +0,0 @@ -local c = import 'common.libsonnet'; - -local query = importstr '../appuio_cloud_loadbalancer.promql'; - -local commonLabels = { - cluster_id: 'c-appuio-cloudscale-lpg-2', - tenant_id: 'c-appuio-cloudscale-lpg-2', -}; - -// One pvc, minimal (=1 byte) request -// 10 samples -local baseSeries = { - testprojectNamespaceOrgLabel: c.series('kube_namespace_labels', commonLabels { - namespace: 'testproject', - label_appuio_io_organization: 'cherry-pickers-inc', - }, '1x120'), - - pvCapacity: c.series('kube_service_spec_type', commonLabels { - type: 'LoadBalancer', - namespace: 'testproject', - }, '1x120'), -}; - -local baseCalculatedLabels = { - category: 'c-appuio-cloudscale-lpg-2:testproject', - cluster_id: 'c-appuio-cloudscale-lpg-2', - namespace: 'testproject', - product: 'appuio_cloud_loadbalancer:c-appuio-cloudscale-lpg-2:cherry-pickers-inc:testproject', - tenant_id: 'cherry-pickers-inc', -}; - -{ - tests: [ - c.test('minimal PVC', - baseSeries, - query, - { - labels: c.formatLabels(baseCalculatedLabels), - value: 60, - }), - - c.test('unrelated kube_namespace_labels changes do not throw errors - there is an overlap since series go stale only after a few missed scrapes', - baseSeries { - testprojectNamespaceOrgLabelUpdated: self.testprojectNamespaceOrgLabel { - _labels+:: { - custom_appuio_io_myid: '672004be-a86b-44e0-b446-1255a1f8b340', - }, - values: '_x30 1x30 _x60', - }, - }, - query, - { - labels: c.formatLabels(baseCalculatedLabels), - value: 60, - }), - - c.test('organization changes do not throw many-to-many errors - there is an overlap since series go stale only after a few missed scrapes', - baseSeries { - testprojectNamespaceOrgLabel+: { - // We cheat here and use an impossible value. - // Since we use min() and bottomk() in the query this priotizes this series less than the other. - // It's ugly but it prevents flaky tests since otherwise one of the series gets picked randomly. - values: '2x120', - }, - testprojectNamespaceOrgLabelUpdated: self.testprojectNamespaceOrgLabel { - _labels+:: { - label_appuio_io_organization: 'carrot-pickers-inc', - }, - values: '_x60 1x60', - }, - }, - query, - [ - { - labels: c.formatLabels(baseCalculatedLabels), - // 1 service * 29 * 2 because of the cheat above. - value: 29 * 2, - }, - { - labels: c.formatLabels(baseCalculatedLabels { - tenant_id: 'carrot-pickers-inc', - product: 'appuio_cloud_loadbalancer:c-appuio-cloudscale-lpg-2:carrot-pickers-inc:testproject', - }), - value: 31, - }, - ]), - ], -} diff --git a/pkg/db/seeds/promtest/appuio_cloud_memory.jsonnet b/pkg/db/seeds/promtest/appuio_cloud_memory.jsonnet deleted file mode 100644 index 19178ce..0000000 --- a/pkg/db/seeds/promtest/appuio_cloud_memory.jsonnet +++ /dev/null @@ -1,240 +0,0 @@ -local c = import 'common.libsonnet'; - -local query = importstr '../appuio_cloud_memory.promql'; -local subCPUQuery = importstr '../appuio_cloud_memory_sub_cpu.promql'; -local subMemoryQuery = importstr '../appuio_cloud_memory_sub_memory.promql'; - -local commonLabels = { - cluster_id: 'c-appuio-cloudscale-lpg-2', - tenant_id: 'c-appuio-cloudscale-lpg-2', -}; - -// One running pod, minimal (=1 byte) memory request and usage, no CPU request -// 10 samples -local baseSeries = { - flexNodeLabel: c.series('kube_node_labels', commonLabels { - label_appuio_io_node_class: 'flex', - label_kubernetes_io_hostname: 'flex-x666', - node: 'flex-x666', - }, '1x120'), - testprojectNamespaceOrgLabel: c.series('kube_namespace_labels', commonLabels { - namespace: 'testproject', - label_appuio_io_organization: 'cherry-pickers-inc', - }, '1x120'), - - local podLbls = commonLabels { - namespace: 'testproject', - pod: 'running-pod', - uid: '35e3a8b1-b46d-496c-b2b7-1b52953bf904', - }, - // Phases - runningPodPhase: c.series('kube_pod_status_phase', podLbls { - phase: 'Running', - }, '1x120'), - // Requests - runningPodMemoryRequests: c.series('kube_pod_container_resource_requests', podLbls { - resource: 'memory', - node: 'flex-x666', - }, '1x120'), - runningPodCPURequests: c.series('kube_pod_container_resource_requests', podLbls { - resource: 'cpu', - node: 'flex-x666', - }, '0x120'), - // Real usage - runningPodMemoryUsage: c.series('container_memory_working_set_bytes', podLbls { - image: 'busybox', - node: 'flex-x666', - }, '1x120'), -}; - -local baseCalculatedLabels = { - category: 'c-appuio-cloudscale-lpg-2:testproject', - cluster_id: 'c-appuio-cloudscale-lpg-2', - label_appuio_io_node_class: 'flex', - namespace: 'testproject', - product: 'appuio_cloud_memory:c-appuio-cloudscale-lpg-2:cherry-pickers-inc:testproject:flex', - tenant_id: 'cherry-pickers-inc', -}; - -// Constants from the query -local minMemoryRequestMib = 128; -local cloudscaleFairUseRatio = 4294967296; - -local subQueryTests = [ - c.test('sub CPU requests query sanity check', - baseSeries, - subCPUQuery, - { - labels: c.formatLabels(baseCalculatedLabels), - value: 0, - }), - c.test('sub memory requests query sanity check', - baseSeries, - subMemoryQuery, - { - labels: c.formatLabels(baseCalculatedLabels), - value: (minMemoryRequestMib - (1 / 1024 / 1024)) * 60, - }), -]; - -{ - tests: subQueryTests + [ - c.test('minimal pod', - baseSeries, - query, - { - labels: c.formatLabels(baseCalculatedLabels), - value: minMemoryRequestMib * 60, - }), - c.test('pod with higher memory usage', - baseSeries { - runningPodMemoryUsage+: { - values: '%sx120' % (500 * 1024 * 1024), - }, - }, - query, - { - labels: c.formatLabels(baseCalculatedLabels), - value: 500 * 60, - }), - c.test('pod with higher memory requests', - baseSeries { - runningPodMemoryRequests+: { - values: '%sx120' % (500 * 1024 * 1024), - }, - }, - query, - { - labels: c.formatLabels(baseCalculatedLabels), - value: 500 * 60, - }), - c.test('pod with CPU requests violating fair use', - baseSeries { - runningPodCPURequests+: { - values: '1x120', - }, - }, - query, - { - labels: c.formatLabels(baseCalculatedLabels), - // See per cluster fair use ratio in query - // value: 2.048E+04, - value: (cloudscaleFairUseRatio / 1024 / 1024) * 60, - }), - c.test('non-running pods are not counted', - baseSeries { - local lbls = commonLabels { - namespace: 'testproject', - pod: 'succeeded-pod', - uid: '2a7a6e32-0840-4ac3-bab4-52d7e16f4a0a', - }, - succeededPodPhase: c.series('kube_pod_status_phase', lbls { - phase: 'Succeeded', - }, '1x120'), - succeededPodMemoryRequests: c.series('kube_pod_container_resource_requests', lbls { - resource: 'memory', - node: 'flex-x666', - }, '1x120'), - succeededPodCPURequests: c.series('kube_pod_container_resource_requests', lbls { - node: 'flex-x666', - resource: 'cpu', - }, '1x120'), - }, - query, - { - labels: c.formatLabels(baseCalculatedLabels), - value: minMemoryRequestMib * 60, - }), - c.test('unrelated kube_node_labels changes do not throw errors - there is an overlap since series go stale only after a few missed scrapes', - baseSeries { - flexNodeLabelUpdated: self.flexNodeLabel { - _labels+:: { - label_csi_driver_id: '18539CC3-0B6C-4E72-82BD-90A9BEF7D807', - }, - values: '_x30 1x30 _x60', - }, - }, - query, - { - labels: c.formatLabels(baseCalculatedLabels), - value: minMemoryRequestMib * 60, - }), - c.test('node class adds do not throw errors - there is an overlap since series go stale only after a few missed scrapes', - baseSeries { - flexNodeLabel+: { - _labels+:: { - label_appuio_io_node_class:: null, - }, - values: '1x60', - }, - flexNodeLabelUpdated: super.flexNodeLabel { - values: '_x30 1x90', - }, - }, - query, - [ - // I'm not sure why this is 61min * minMemoryRequestMib. Other queries always result in 60min - // TODO investigate where the extra min comes from - { - labels: c.formatLabels(baseCalculatedLabels), - value: minMemoryRequestMib * 46, - }, - { - labels: c.formatLabels(baseCalculatedLabels { - label_appuio_io_node_class:: null, - product: 'appuio_cloud_memory:c-appuio-cloudscale-lpg-2:cherry-pickers-inc:testproject:', - }), - value: minMemoryRequestMib * 15, - }, - ]), - - c.test('unrelated kube_namespace_labels changes do not throw errors - there is an overlap since series go stale only after a few missed scrapes', - baseSeries { - testprojectNamespaceOrgLabelUpdated: self.testprojectNamespaceOrgLabel { - _labels+:: { - custom_appuio_io_myid: '672004be-a86b-44e0-b446-1255a1f8b340', - }, - values: '_x30 1x30 _x60', - }, - }, - query, - { - labels: c.formatLabels(baseCalculatedLabels), - value: minMemoryRequestMib * 60, - }), - - c.test('organization changes do not throw many-to-many errors - there is an overlap since series go stale only after a few missed scrapes', - baseSeries { - testprojectNamespaceOrgLabel+: { - // We cheat here and use an impossible value. - // Since we use min() and bottomk() in the query this priotizes this series less than the other. - // It's ugly but it prevents flaky tests since otherwise one of the series gets picked randomly. - // Does not influence the result. The result is floored to a minimum of 128MiB. - values: '2x120', - }, - testprojectNamespaceOrgLabelUpdated: self.testprojectNamespaceOrgLabel { - _labels+:: { - label_appuio_io_organization: 'carrot-pickers-inc', - }, - values: '_x60 1x60', - }, - }, - query, - [ - // I'm not sure why this is 61min * minMemoryRequestMib. Other queries always result in 60min - // TODO investigate where the extra min comes from - { - labels: c.formatLabels(baseCalculatedLabels), - value: minMemoryRequestMib * 30, - }, - { - labels: c.formatLabels(baseCalculatedLabels { - tenant_id: 'carrot-pickers-inc', - product: 'appuio_cloud_memory:c-appuio-cloudscale-lpg-2:carrot-pickers-inc:testproject:flex', - }), - value: minMemoryRequestMib * 31, - }, - ]), - - ], -} diff --git a/pkg/db/seeds/promtest/appuio_cloud_persistent_storage.jsonnet b/pkg/db/seeds/promtest/appuio_cloud_persistent_storage.jsonnet deleted file mode 100644 index 7728915..0000000 --- a/pkg/db/seeds/promtest/appuio_cloud_persistent_storage.jsonnet +++ /dev/null @@ -1,110 +0,0 @@ -local c = import 'common.libsonnet'; - -local query = importstr '../appuio_cloud_persistent_storage.promql'; - -local commonLabels = { - cluster_id: 'c-appuio-cloudscale-lpg-2', - tenant_id: 'c-appuio-cloudscale-lpg-2', -}; - -// One pvc, minimal (=1 byte) request -// 10 samples -local baseSeries = { - testprojectNamespaceOrgLabel: c.series('kube_namespace_labels', commonLabels { - namespace: 'testproject', - label_appuio_io_organization: 'cherry-pickers-inc', - }, '1x120'), - - local pvcID = 'pvc-da01b12d-2e31-44da-8312-f91169256221', - pvCapacity: c.series('kube_persistentvolume_capacity_bytes', commonLabels { - persistentvolume: pvcID, - }, '1x120'), - pvInfo: c.series('kube_persistentvolume_info', commonLabels { - persistentvolume: pvcID, - storageclass: 'ssd', - }, '1x120'), - pvcRef: c.series('kube_persistentvolume_claim_ref', commonLabels { - claim_namespace: 'testproject', - name: 'important-database', - persistentvolume: pvcID, - }, '1x120'), -}; - -local baseCalculatedLabels = { - category: 'c-appuio-cloudscale-lpg-2:testproject', - cluster_id: 'c-appuio-cloudscale-lpg-2', - namespace: 'testproject', - product: 'appuio_cloud_persistent_storage:c-appuio-cloudscale-lpg-2:cherry-pickers-inc:testproject:ssd', - storageclass: 'ssd', - tenant_id: 'cherry-pickers-inc', -}; - -{ - tests: [ - c.test('minimal PVC', - baseSeries, - query, - { - labels: c.formatLabels(baseCalculatedLabels), - value: 60, - }), - c.test('higher than 1GiB request', - baseSeries { - pvCapacity+: { - values: '%sx120' % (5 * 1024 * 1024 * 1024), - }, - }, - query, - { - labels: c.formatLabels(baseCalculatedLabels), - value: 5 * 60, - }), - - c.test('unrelated kube_namespace_labels changes do not throw errors - there is an overlap since series go stale only after a few missed scrapes', - baseSeries { - testprojectNamespaceOrgLabelUpdated: self.testprojectNamespaceOrgLabel { - _labels+:: { - custom_appuio_io_myid: '672004be-a86b-44e0-b446-1255a1f8b340', - }, - values: '_x30 1x30 _x60', - }, - }, - query, - { - labels: c.formatLabels(baseCalculatedLabels), - value: 60, - }), - - c.test('organization changes do not throw many-to-many errors - there is an overlap since series go stale only after a few missed scrapes', - baseSeries { - testprojectNamespaceOrgLabel+: { - // We cheat here and use an impossible value. - // Since we use min() and bottomk() in the query this priotizes this series less than the other. - // It's ugly but it prevents flaky tests since otherwise one of the series gets picked randomly. - // Does not influence the result. The result is floored to a minimum of 1GiB. - values: '2x120', - }, - testprojectNamespaceOrgLabelUpdated: self.testprojectNamespaceOrgLabel { - _labels+:: { - label_appuio_io_organization: 'carrot-pickers-inc', - }, - values: '_x60 1x60', - }, - }, - query, - [ - { - labels: c.formatLabels(baseCalculatedLabels), - value: 29, - }, - { - labels: c.formatLabels(baseCalculatedLabels { - tenant_id: 'carrot-pickers-inc', - product: 'appuio_cloud_persistent_storage:c-appuio-cloudscale-lpg-2:carrot-pickers-inc:testproject:ssd', - }), - value: 31, - }, - ]), - - ], -} diff --git a/pkg/db/seeds/promtest/appuio_managed_kubernetes_vcpu.jsonnet b/pkg/db/seeds/promtest/appuio_managed_kubernetes_vcpu.jsonnet deleted file mode 100644 index 700405c..0000000 --- a/pkg/db/seeds/promtest/appuio_managed_kubernetes_vcpu.jsonnet +++ /dev/null @@ -1,48 +0,0 @@ -local c = import 'common.libsonnet'; - -local query = importstr '../appuio_managed_kubernetes_vcpu.promql'; - -local commonLabels = { - cluster_id: 'c-managed-kubernetes', - tenant_id: 't-managed-kubernetes', - vshn_service_level: 'standard', -}; - -local baseSeries = { - appNodeCPUInfoLabel0: c.series('node_cpu_info', commonLabels { - instance: 'app-test', - cpu: '0', - }, '1x120'), - appNodeCPUInfoLabel1: c.series('node_cpu_info', commonLabels { - instance: 'app-test', - cpu: '1', - }, '1x120'), - appNodeCPUInfoLabel2: c.series('node_cpu_info', commonLabels { - instance: 'app-test2', - cpu: '0', - }, '1x120'), -}; - -local baseCalculatedLabels = commonLabels { - class: super.vshn_service_level, - category: super.tenant_id + ':' + super.cluster_id, -}; - -{ - tests: [ - c.test( - 'two app CPUs and one storage CPU', - baseSeries, - query, - [ - { - labels: c.formatLabels(baseCalculatedLabels { - product: 'appuio_managed_kubernetes_vcpu:c-managed-kubernetes:t-managed-kubernetes::standard', - }), - value: 3, - }, - ] - ), - - ], -} diff --git a/pkg/db/seeds/promtest/appuio_managed_openshift_clusters_legacy.jsonnet b/pkg/db/seeds/promtest/appuio_managed_openshift_clusters_legacy.jsonnet deleted file mode 100644 index 9ddde37..0000000 --- a/pkg/db/seeds/promtest/appuio_managed_openshift_clusters_legacy.jsonnet +++ /dev/null @@ -1,44 +0,0 @@ -local c = import 'common.libsonnet'; - -local query = importstr '../appuio_managed_openshift_clusters_legacy.promql'; - -local commonLabels = { - cluster_id: 'c-managed-openshift', -}; - -local infoLabels = commonLabels { - tenant_id: 't-managed-openshift', - vshn_service_level: 'standard', - cloud_provider: 'cloudscale', -}; - -local baseSeries = { - appuioInfoLabel: c.series('appuio_managed_info', infoLabels, '1x120'), - appuioInfoLabel2: c.series('appuio_managed_info', infoLabels { - vshn_service_level: 'best_effort', - }, '1x120'), -}; - -local baseCalculatedLabels = infoLabels { - class: super.vshn_service_level, - category: super.tenant_id + ':' + super.cluster_id, -}; - -{ - tests: [ - c.test( - 'one cluster', - baseSeries, - query, - [ - { - labels: c.formatLabels(baseCalculatedLabels { - product: 'appuio_managed_openshift_clusters:cloudscale:t-managed-openshift:c-managed-openshift:standard', - }), - value: 1, - }, - ] - ), - - ], -} diff --git a/pkg/db/seeds/promtest/appuio_managed_openshift_vcpu.jsonnet b/pkg/db/seeds/promtest/appuio_managed_openshift_vcpu.jsonnet deleted file mode 100644 index 969be30..0000000 --- a/pkg/db/seeds/promtest/appuio_managed_openshift_vcpu.jsonnet +++ /dev/null @@ -1,73 +0,0 @@ -local c = import 'common.libsonnet'; - -local query = importstr '../appuio_managed_openshift_vcpu.promql'; - -local commonLabels = { - cluster_id: 'c-managed-openshift', -}; - -local infoLabels = commonLabels { - tenant_id: 't-managed-openshift', - vshn_service_level: 'ondemand', - cloud_provider: 'cloudscale', -}; - -local baseSeries = { - appNodeRoleLabel: c.series('kube_node_role', commonLabels { - node: 'app-test', - role: 'app', - }, '1x120'), - - appNodeCPUInfoLabel0: c.series('node_cpu_info', commonLabels { - instance: 'app-test', - core: '0', - }, '1x120'), - appNodeCPUInfoLabel1: c.series('node_cpu_info', commonLabels { - instance: 'app-test', - core: '1', - }, '1x120'), - - storageNodeRoleLabel: c.series('kube_node_role', commonLabels { - node: 'storage-test', - role: 'storage', - }, '1x120'), - - storageNodeCPUInfoLabel0: c.series('node_cpu_info', commonLabels { - instance: 'storage-test', - core: '0', - }, '1x120'), - - appuioInfoLabel: c.series('appuio_managed_info', infoLabels, '1x120'), -}; - -local baseCalculatedLabels = infoLabels { - class: super.vshn_service_level, - category: super.tenant_id + ':' + super.cluster_id, -}; - -{ - tests: [ - c.test( - 'two app CPUs and one storage CPU', - baseSeries, - query, - [ - { - labels: c.formatLabels(baseCalculatedLabels { - role: 'app', - product: 'appuio_managed_openshift_vcpu:cloudscale:t-managed-openshift:c-managed-openshift:ondemand:app', - }), - value: 2, - }, - { - labels: c.formatLabels(baseCalculatedLabels { - role: 'storage', - product: 'appuio_managed_openshift_vcpu:cloudscale:t-managed-openshift:c-managed-openshift:ondemand:storage', - }), - value: 1, - }, - ] - ), - - ], -} diff --git a/pkg/db/seeds/promtest/common.libsonnet b/pkg/db/seeds/promtest/common.libsonnet deleted file mode 100644 index a6a2664..0000000 --- a/pkg/db/seeds/promtest/common.libsonnet +++ /dev/null @@ -1,33 +0,0 @@ -local formatLabels = function(labels) - local lf = std.join(', ', std.map(function(l) '%s="%s"' % [l, labels[l]], std.objectFields(labels))); - '{%s}' % [lf]; - -// returns a series object with correctly formatted labels. -// labels can be modified post creation using `_labels`. -local series = function(name, labels, values) { - _name:: name, - _labels:: labels, - series: self._name + formatLabels(self._labels), - values: values, -}; - -// returns a test object with the given series and samples. Sample interval is 30s -// the evaluation time is set one hour in the future since all our queries operate on a 1h window -local test = function(name, series, query, samples, interval='30s', eval_time='1h') { - name: name, - interval: interval, - input_series: if std.isArray(series) then series else std.objectValues(series), - promql_expr_test: [ - { - expr: query, - eval_time: eval_time, - exp_samples: if std.isArray(samples) then samples else [samples], - }, - ], -}; - -{ - series: series, - formatLabels: formatLabels, - test: test, -} diff --git a/pkg/db/seeds/queries_test.go b/pkg/db/seeds/queries_test.go deleted file mode 100644 index d884d47..0000000 --- a/pkg/db/seeds/queries_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package seeds_test - -import ( - "fmt" - "io/fs" - "os" - "os/exec" - "path" - "strings" - "testing" - - "github.com/google/go-jsonnet" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/appuio/appuio-cloud-reporting/pkg/testsuite" -) - -func TestQueries(t *testing.T) { - wd := os.DirFS(".") - testFiles, err := fs.Glob(wd, "promtest/*.jsonnet") - require.NoError(t, err) - - for _, tFile := range testFiles { - t.Run(tFile, func(t *testing.T) { - tmp := renderJsonnet(t, tFile) - runPromtool(t, tmp) - }) - } -} - -func runPromtool(t *testing.T, tmp string) { - t.Helper() - - cmd := exec.Command(testsuite.PromtoolBin, "test", "rules", tmp) - var stderr, stdout strings.Builder - cmd.Stderr = &stderr - cmd.Stdout = &stdout - assert.NoError(t, cmd.Run()) - // Not using t.Log to keep formatting sane - fmt.Println("STDOUT") - fmt.Println(stdout.String()) - fmt.Println("STDERR") - fmt.Println(stderr.String()) -} - -func renderJsonnet(t *testing.T, tFile string) string { - t.Helper() - - ev, err := jsonnet.MakeVM().EvaluateFile(tFile) - require.NoError(t, err) - tmp := path.Join(t.TempDir(), "test.json") - require.NoError(t, os.WriteFile(tmp, []byte(ev), 0644)) - return tmp -} diff --git a/pkg/db/seeds_test.go b/pkg/db/seeds_test.go deleted file mode 100644 index c3539b5..0000000 --- a/pkg/db/seeds_test.go +++ /dev/null @@ -1,70 +0,0 @@ -package db_test - -import ( - "testing" - - "github.com/jmoiron/sqlx" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - "github.com/appuio/appuio-cloud-reporting/pkg/db" - "github.com/appuio/appuio-cloud-reporting/pkg/db/dbtest" -) - -type SeedsTestSuite struct { - dbtest.Suite -} - -func (s *SeedsTestSuite) TestSeedDefaultQueries() { - t := s.T() - d := s.DB() - - _, err := d.Exec("DELETE FROM queries") - require.NoError(t, err) - - expQueryNum := 9 - - count := "SELECT COUNT(*) FROM queries" - requireQueryEqual(t, d, 0, count) - - require.NoError(t, db.SeedQueries(d.DB, []db.Query{ - { - Name: "appuio_cloud_memory", - Description: "Memory usage (maximum of requested and used memory) aggregated by namespace", - Unit: "MiB", - }, - })) - t.Log(t, count) - requireQueryEqual(t, d, 1, count) - - // Some appuio_cloud_memory exists so don't create sub queries - err = db.Seed(d.DB) - require.NoError(t, err) - requireQueryEqual(t, d, expQueryNum-2, count) - - // Drop queries and check we create sub queries - _, err = d.DB.Exec("DELETE FROM queries;") - require.NoError(t, err) - err = db.Seed(d.DB) - require.NoError(t, err) - requireQueryEqual(t, d, expQueryNum, count) - err = db.Seed(d.DB) - require.NoError(t, err) - requireQueryEqual(t, d, expQueryNum, count) -} - -func requireQueryEqual[T any](t *testing.T, q sqlx.Queryer, expected T, query string, args ...interface{}) { - t.Helper() - var res T - require.NoError(t, sqlx.Get(q, &res, query, args...)) - require.Equal(t, expected, res) -} - -func requireQueryTrue(t *testing.T, q sqlx.Queryer, query string, args ...interface{}) { - t.Helper() - requireQueryEqual(t, q, true, query, args...) -} - -func TestSeeds(t *testing.T) { - suite.Run(t, new(SeedsTestSuite)) -} diff --git a/pkg/db/types.go b/pkg/db/types.go deleted file mode 100644 index 66b7a50..0000000 --- a/pkg/db/types.go +++ /dev/null @@ -1,238 +0,0 @@ -package db - -import ( - "database/sql" - "fmt" - "time" - - "github.com/jackc/pgtype" -) - -type Model interface { - TableName() string - ForeignKeyName() string -} - -var _ Model = Query{} - -type Query struct { - Id string - ParentID sql.NullString `db:"parent_id"` - - Name string - Description string - Query string - Unit string - - During pgtype.Tstzrange - - subQueries []Query -} - -func (q Query) TableName() string { - return "queries" -} - -func (q Query) ForeignKeyName() string { - return "query_id" -} - -// CreateQuery creates the given query -func CreateQuery(p NamedPreparer, in Query) (Query, error) { - var query Query - err := GetNamed(p, &query, - "INSERT INTO queries (name,description,query,unit,during,parent_id) VALUES (:name,:description,:query,:unit,:during,:parent_id) RETURNING *", in) - return query, err -} - -var _ Model = Tenant{} - -type Tenant struct { - Id string - - // Source is the tenant string read from the 'appuio.io/organization' label. - Source string - Target sql.NullString - - During pgtype.Tstzrange -} - -func (t Tenant) TableName() string { - return "tenants" -} - -func (t Tenant) ForeignKeyName() string { - return "tenant_id" -} - -var _ Model = Category{} - -type Category struct { - Id string - - // Source consists of the cluster id and namespace in the form of "zone:namespace". - Source string - Target sql.NullString -} - -func (c Category) TableName() string { - return "categories" -} - -func (c Category) ForeignKeyName() string { - return "category_id" -} - -// CreateTenant creates the given tenant -func CreateTenant(p NamedPreparer, in Tenant) (Tenant, error) { - var tenant Tenant - err := GetNamed(p, &tenant, - "INSERT INTO tenants (source,target,during) VALUES (:source,:target,:during) RETURNING *", in) - return tenant, err -} - -var _ Model = Product{} - -type Product struct { - Id string - - // Source is a string consisting of "query:zone:tenant:namespace:class" and can contain wildcards. - // See package `sourcekey` for more information. - Source string - Target sql.NullString - Amount float64 - Unit string - - During pgtype.Tstzrange -} - -func (p Product) TableName() string { - return "products" -} - -func (p Product) ForeignKeyName() string { - return "product_id" -} - -// CreateProduct creates the given product -func CreateProduct(p NamedPreparer, in Product) (Product, error) { - var product Product - err := GetNamed(p, &product, - "INSERT INTO products (source,target,amount,unit,during) VALUES (:source,:target,:amount,:unit,:during) RETURNING *", in) - return product, err -} - -var _ Model = Discount{} - -type Discount struct { - Id string - - // Source is a string consisting of "query:zone:tenant:namespace:class" and can contain wildcards. - // See package `sourcekey` for more information. - Source string - Discount float64 - - During pgtype.Tstzrange -} - -func (d Discount) TableName() string { - return "discounts" -} - -func (d Discount) ForeignKeyName() string { - return "discount_id" -} - -// CreateDiscount creates the given discount -func CreateDiscount(p NamedPreparer, in Discount) (Discount, error) { - var discount Discount - err := GetNamed(p, &discount, - "INSERT INTO discounts (source,discount,during) VALUES (:source,:discount,:during) RETURNING *", in) - return discount, err -} - -var _ Model = DateTime{} - -type DateTime struct { - Id string - - Timestamp time.Time - - Year int - Month int - Day int - Hour int -} - -func (d DateTime) TableName() string { - return "date_times" -} - -func (d DateTime) ForeignKeyName() string { - return "date_time_id" -} - -var _ Model = Fact{} - -type Fact struct { - Id string - - DateTimeId string `db:"date_time_id"` - QueryId string `db:"query_id"` - TenantId string `db:"tenant_id"` - CategoryId string `db:"category_id"` - ProductId string `db:"product_id"` - DiscountId string `db:"discount_id"` - - Quantity float64 -} - -func (f Fact) TableName() string { - return "facts" -} - -func (f Fact) ForeignKeyName() string { - return "fact_id" -} - -// BuildDateTime builds a DateTime object from the given timestamp. -func BuildDateTime(ts time.Time) DateTime { - return DateTime{ - Timestamp: ts, - - Year: ts.Year(), - Month: int(ts.Month()), - Day: ts.Day(), - Hour: ts.Hour(), - } -} - -// Timestamp creates a Postgres timestamp from the given value. -// Valid values are nil, pgtype.Infinity/pgtype.NegativeInfinity, and a time.Time object. -func Timestamp(from interface{}) (pgtype.Timestamptz, error) { - ts := pgtype.Timestamptz{} - err := ts.Set(from) - return ts, err -} - -// MustTimestamp creates a Postgres timestamp from the given value. -// Valid values are nil, pgtype.Infinity/pgtype.NegativeInfinity, and a time.Time object. -// Panics if given an unsupported type. -func MustTimestamp(from interface{}) pgtype.Timestamptz { - ts, err := Timestamp(from) - if err != nil { - panic(fmt.Errorf("expected to create valid timestamp: %s", err)) - } - return ts -} - -// Timerange creates a Postgres timerange from two Postgres timestamps with [lower,upper) bounds. -func Timerange(lower, upper pgtype.Timestamptz) pgtype.Tstzrange { - return pgtype.Tstzrange{ - Lower: lower, - LowerType: pgtype.Inclusive, - Upper: upper, - UpperType: pgtype.Exclusive, - Status: pgtype.Present, - } -} diff --git a/pkg/db/types_test.go b/pkg/db/types_test.go deleted file mode 100644 index 8c119fd..0000000 --- a/pkg/db/types_test.go +++ /dev/null @@ -1,87 +0,0 @@ -package db_test - -import ( - "testing" - "time" - - "github.com/jackc/pgtype" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - "github.com/appuio/appuio-cloud-reporting/pkg/db" - "github.com/appuio/appuio-cloud-reporting/pkg/db/dbtest" -) - -func TestTimerange(t *testing.T) { - lower := db.MustTimestamp(time.Now()) - upper := db.MustTimestamp(pgtype.Infinity) - subject := db.Timerange(lower, upper) - - assert.Equal(t, subject.Status, pgtype.Present, "new timetamp should be present") - assert.Equal(t, subject.Lower, lower) - assert.Equal(t, subject.Upper, upper) - assert.Equal(t, subject.LowerType, pgtype.Inclusive, "lower bound should be inclusive") - assert.Equal(t, subject.UpperType, pgtype.Exclusive, "upper bound should be exclusive") -} - -func TestBuildDateTime(t *testing.T) { - ts := time.Date(2033, time.March, 23, 17, 0, 0, 0, time.UTC) - subject := db.BuildDateTime(ts) - - assert.True(t, subject.Timestamp.Equal(ts)) - assert.Equal(t, subject.Year, 2033) - assert.Equal(t, subject.Month, 3) - assert.Equal(t, subject.Day, 23) - assert.Equal(t, subject.Hour, 17) -} - -func TestTypes(t *testing.T) { - suite.Run(t, new(TypesTestSuite)) -} - -type TypesTestSuite struct { - dbtest.Suite -} - -func (s *TypesTestSuite) TestTypes_Query() { - t := s.T() - d := s.DB() - - _, err := db.CreateQuery(d, db.Query{ - Name: "test", - Query: "test", - Unit: "tps", - During: db.InfiniteRange(), - }) - require.NoError(t, err) - - count := "SELECT ((SELECT COUNT(*) FROM queries WHERE name=$1) = 1)" - requireQueryTrue(t, d, count, "test") -} -func (s *TypesTestSuite) TestTypes_Product() { - t := s.T() - d := s.DB() - - _, err := db.CreateProduct(d, db.Product{ - Source: "test", - During: db.InfiniteRange(), - }) - require.NoError(t, err) - - count := "SELECT ((SELECT COUNT(*) FROM products WHERE source=$1) = 1)" - requireQueryTrue(t, d, count, "test") -} -func (s *TypesTestSuite) TestTypes_Discount() { - t := s.T() - d := s.DB() - - _, err := db.CreateDiscount(d, db.Discount{ - Source: "test", - During: db.InfiniteRange(), - }) - require.NoError(t, err) - - count := "SELECT ((SELECT COUNT(*) FROM discounts WHERE source=$1) = 1)" - requireQueryTrue(t, d, count, "test") -} diff --git a/pkg/db/util.go b/pkg/db/util.go deleted file mode 100644 index ba07c45..0000000 --- a/pkg/db/util.go +++ /dev/null @@ -1,83 +0,0 @@ -package db - -import ( - "context" - "fmt" - - "github.com/jackc/pgtype" - "github.com/jmoiron/sqlx" -) - -// PGTimestampFormat is the string representation of PostgreSQL's `timestamptz`. -const PGTimestampFormat = "2006-01-02 15:04:05-07" - -// NamedPreparer is an interface used by GetNamed. -type NamedPreparer interface { - PrepareNamed(query string) (*sqlx.NamedStmt, error) -} - -// NamedPreparerContext is an interface used by GetNamedContext. -type NamedPreparerContext interface { - PrepareNamedContext(ctx context.Context, query string) (*sqlx.NamedStmt, error) -} - -// GetNamed is like sqlx.Get but for named statements. -func GetNamed(p NamedPreparer, dest interface{}, query string, arg interface{}) error { - st, err := p.PrepareNamed(query) - if err != nil { - return fmt.Errorf("failed to prepare statement: %w", err) - } - defer st.Close() - return st.Get(dest, arg) -} - -// GetNamedContext is like sqlx.GetContext but for named statements. -func GetNamedContext(ctx context.Context, p NamedPreparerContext, dest interface{}, query string, arg interface{}) error { - st, err := p.PrepareNamedContext(ctx, query) - if err != nil { - return fmt.Errorf("failed to prepare statement: %w", err) - } - defer st.Close() - return st.GetContext(ctx, dest, arg) -} - -// SelectNamed is like sqlx.Select but for named statements. -func SelectNamed(p sqlx.Ext, dest interface{}, query string, arg interface{}) error { - nq, narg, err := sqlx.Named(query, arg) - if err != nil { - return fmt.Errorf("failed to substitute name parameters: %w", err) - } - nq = p.Rebind(nq) - return sqlx.Select(p, dest, nq, narg...) -} - -// SelectNamedContext is like sqlx.SelectContext but for named statements. -func SelectNamedContext(ctx context.Context, p sqlx.ExtContext, dest interface{}, query string, arg interface{}) error { - nq, narg, err := sqlx.Named(query, arg) - if err != nil { - return fmt.Errorf("failed to substitute name parameters: %w", err) - } - nq = p.Rebind(nq) - return sqlx.SelectContext(ctx, p, dest, nq, narg...) -} - -// InfiniteRange returns an infinite PostgreSQL timerange [-Inf,Inf). -func InfiniteRange() pgtype.Tstzrange { - return Timerange(MustTimestamp(pgtype.NegativeInfinity), MustTimestamp(pgtype.Infinity)) -} - -// RunInTransaction runs the given cb func in a transaction. -// If the func returns an error, the transaction is rolled back, otherwise committed. -func RunInTransaction(ctx context.Context, db *sqlx.DB, cb func(tx *sqlx.Tx) error) error { - // TODO: Add unit test - tx, err := db.BeginTxx(ctx, nil) - if err != nil { - return fmt.Errorf("error starting transaction: %w", err) - } - defer tx.Rollback() - - if err := cb(tx); err != nil { - return err - } - return tx.Commit() -} diff --git a/pkg/db/util_test.go b/pkg/db/util_test.go deleted file mode 100644 index fc5e666..0000000 --- a/pkg/db/util_test.go +++ /dev/null @@ -1,68 +0,0 @@ -package db_test - -import ( - "context" - "testing" - - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - "github.com/appuio/appuio-cloud-reporting/pkg/db" - "github.com/appuio/appuio-cloud-reporting/pkg/db/dbtest" -) - -type UtilTestSuite struct { - dbtest.Suite -} - -func (s *UtilTestSuite) TestGetNamed() { - t := s.T() - tx := s.Begin() - defer tx.Rollback() - - query := "SELECT :q" - expected := "ping" - namedParam := map[string]interface{}{"q": expected} - - var res string - require.NoError(t, db.GetNamed(tx, &res, query, namedParam)) - require.Equal(t, expected, res) - - require.NoError(t, db.GetNamedContext(context.Background(), tx, &res, query, namedParam)) - require.Equal(t, expected, res) - - require.Error(t, db.GetNamed(tx, &res, "invalid", namedParam)) - require.Error(t, db.GetNamedContext(context.Background(), tx, &res, "invalid", namedParam)) -} - -func (s *UtilTestSuite) TestSelectNamed() { - t := s.T() - tx := s.Begin() - defer tx.Rollback() - - type testTable struct{ Q string } - _, err := tx.Exec("CREATE TEMPORARY TABLE t (q text)") - require.NoError(t, err) - - query := "INSERT INTO t (q) VALUES (:q) RETURNING *" - expected := []testTable{{"ping"}, {"pong"}} - - res := make([]testTable, 0) - require.NoError(t, db.SelectNamed(tx, &res, query, expected)) - require.Equal(t, expected, res) - - res = make([]testTable, 0) - require.NoError(t, db.SelectNamedContext(context.Background(), tx, &res, query, expected)) - require.Equal(t, expected, res) - - // Type castings must be in the form of `CAST(:q AS )` - strRes := make([]string, 0) - require.Error(t, db.SelectNamed(tx, &strRes, "SELECT :q::text", map[string]interface{}{"q": "test"})) - require.NoError(t, db.SelectNamed(tx, &strRes, "SELECT CAST(:q AS text)", map[string]interface{}{"q": "test"})) - require.Error(t, db.SelectNamedContext(context.Background(), tx, &strRes, "SELECT :q::text", map[string]interface{}{"q": "test"})) - require.NoError(t, db.SelectNamedContext(context.Background(), tx, &strRes, "SELECT CAST(:q AS text)", map[string]interface{}{"q": "test"})) -} - -func TestUtil(t *testing.T) { - suite.Run(t, new(UtilTestSuite)) -} diff --git a/pkg/erp/category.go b/pkg/erp/category.go deleted file mode 100644 index 6c15dfc..0000000 --- a/pkg/erp/category.go +++ /dev/null @@ -1,15 +0,0 @@ -package erp - -import ( - "context" - - "github.com/appuio/appuio-cloud-reporting/pkg/erp/entity" -) - -// CategoryReconciler reconciles entity.Category instances. -type CategoryReconciler interface { - // Reconcile takes the given category and reconciles it with the concrete ERP implementation. - // The CategoryReconciler may return a modified entity.Category instance or the same one if there were no changes. - // An error is returned if reconciliation failed. - Reconcile(ctx context.Context, category entity.Category) (entity.Category, error) -} diff --git a/pkg/erp/entity/category.go b/pkg/erp/entity/category.go deleted file mode 100644 index 218460a..0000000 --- a/pkg/erp/entity/category.go +++ /dev/null @@ -1,9 +0,0 @@ -package entity - -// Category represents the category dimension. -type Category struct { - // Source consists of the cluster id and namespace in the form of "zone:namespace". - Source string - // Target contains a unique identifier of a Category representation in the foreign ERP. - Target string -} diff --git a/pkg/invoice/invoice.go b/pkg/invoice/invoice.go deleted file mode 100644 index b771285..0000000 --- a/pkg/invoice/invoice.go +++ /dev/null @@ -1,266 +0,0 @@ -// Package invoice allows generating invoices from a filled report database. -package invoice - -import ( - "context" - "database/sql" - "fmt" - "time" - - "github.com/appuio/appuio-cloud-reporting/pkg/db" - "github.com/jmoiron/sqlx" -) - -// Invoice represents an invoice for a tenant. -type Invoice struct { - Tenant Tenant - - PeriodStart time.Time - PeriodEnd time.Time - - Categories []Category - // Total represents the total accumulated cost of the invoice. - Total float64 -} - -// Category represents a category of the invoice i.e. a namespace. -type Category struct { - Source string - Target string - Items []Item - // Total represents the total accumulated cost per category. - Total float64 -} - -// Item represents a line in the invoice. -type Item struct { - // Description describes the line item. - Description string - // QueryName is the name of the query that generated this line item - QueryName string - // Product describes the product this item is based on. - ProductRef - // Quantity represents the amount of the resource used. - Quantity float64 - // QuantityMin represents the minimum amount of the resource used. - QuantityMin float64 - // QuantityAvg represents the average amount of the resource used. - QuantityAvg float64 - // QuantityMax represents the maximum amount of the resource used. - QuantityMax float64 - // Unit represents the unit of the item. e.g. MiB - Unit string - // PricePerUnit represents the price per unit in Rappen - PricePerUnit float64 - // Discount represents a discount in percent. 0.3 discount equals price per unit * 0.7 - Discount float64 - // Total represents the total accumulated cost. - // (hour1 * quantity * price per unit * discount) + (hour2 * quantity * price - // per unit * discount) - Total float64 - // SubItems are entries created by the subqueries of the main invoice item. - // The keys are the QueryNames of the sub items. - SubItems map[string]SubItem -} - -// SubItem reflects additional information created by a subquery of the main invoice item -type SubItem struct { - // Description describes the line item. - Description string - // QueryName is the name of the query that generated this line item - QueryName string - // Quantity represents the amount of the resource used. - Quantity float64 - // QuantityMin represents the minimum amount of the resource used. - QuantityMin float64 - // QuantityAvg represents the average amount of the resource used. - QuantityAvg float64 - // QuantityMax represents the maximum amount of the resource used. - QuantityMax float64 - // Unit represents the unit of the item. e.g. MiB - Unit string -} - -// Tenant represents a tenant in the invoice. -type Tenant struct { - Source string - Target string -} - -// ProductRef represents a product reference in the invoice. -type ProductRef struct { - Source string `db:"product_ref_source"` - Target string `db:"product_ref_target"` -} - -// Generate generates invoices for the given month. -// No data is written to the database. The transaction can be read-only. -func Generate(ctx context.Context, tx *sqlx.Tx, year int, month time.Month) ([]Invoice, error) { - tenants, err := tenantsForPeriod(ctx, tx, year, month) - if err != nil { - return nil, err - } - - invoices := make([]Invoice, 0, len(tenants)) - for _, tenant := range tenants { - invoice, err := invoiceForTenant(ctx, tx, tenant, year, month) - if err != nil { - return nil, err - } - invoices = append(invoices, invoice) - } - return invoices, nil -} - -func invoiceForTenant(ctx context.Context, tx *sqlx.Tx, tenant db.Tenant, year int, month time.Month) (Invoice, error) { - var categories []db.Category - err := sqlx.SelectContext(ctx, tx, &categories, - `SELECT DISTINCT categories.* - FROM categories - INNER JOIN facts ON (facts.category_id = categories.id) - INNER JOIN date_times ON (facts.date_time_id = date_times.id) - WHERE date_times.year = $1 AND date_times.month = $2 - AND facts.tenant_id = $3 - ORDER BY categories.source - `, - year, int(month), tenant.Id) - - if err != nil { - return Invoice{}, fmt.Errorf("failed to load categories for %q at %d %s: %w", tenant.Source, year, month.String(), err) - } - - invCategories := make([]Category, 0, len(categories)) - for _, category := range categories { - items, err := itemsForCategory(ctx, tx, tenant, category, year, month) - if err != nil { - return Invoice{}, err - } - invCategories = append(invCategories, Category{ - Source: category.Source, - Target: category.Target.String, - Items: items, - Total: sumCategoryTotal(items), - }) - } - - return Invoice{ - Tenant: Tenant{Source: tenant.Source, Target: tenant.Target.String}, - PeriodStart: time.Date(year, month, 1, 0, 0, 0, 0, time.UTC), - PeriodEnd: time.Date(year, month, 1, 0, 0, 0, 0, time.UTC).AddDate(0, 1, -1), - Categories: invCategories, - Total: sumInvoiceTotal(invCategories), - }, nil -} - -// rawItem is a line item with additional internal fields for querying. -// This way we do not needlessly expose (and test) internal IDs. -type rawItem struct { - Item - // QueryID is the id of the query that generated this item - QueryID string `db:"query_id"` - // ParentQueryID is the id of the parent-query of the query that generated this item - ParentQueryID sql.NullString `db:"parent_query_id"` - // DiscountID is the id of the corresponding discount - DiscountID string `db:"discount_id"` - // ProductID is the id of the corresponding product entry - ProductID string `db:"product_ref_id"` -} - -func itemsForCategory(ctx context.Context, tx *sqlx.Tx, tenant db.Tenant, category db.Category, year int, month time.Month) ([]Item, error) { - var items []rawItem - err := sqlx.SelectContext(ctx, tx, &items, - `SELECT queries.id as query_id, queries.parent_id as parent_query_id, discounts.id as discount_id, - queries.description, queries.name as queryName, - SUM(facts.quantity) as quantity, MIN(facts.quantity) as quantitymin, AVG(facts.quantity) as quantityavg, MAX(facts.quantity) as quantitymax, - queries.unit, products.amount AS pricePerUnit, discounts.discount, - products.id as product_ref_id, products.source as product_ref_source, COALESCE(products.target,''::text) as product_ref_target, - SUM( facts.quantity * products.amount * ( 1::double precision - discounts.discount ) ) AS total - FROM facts - INNER JOIN tenants ON (facts.tenant_id = tenants.id) - INNER JOIN queries ON (facts.query_id = queries.id) - INNER JOIN discounts ON (facts.discount_id = discounts.id) - INNER JOIN products ON (facts.product_id = products.id) - INNER JOIN date_times ON (facts.date_time_id = date_times.id) - WHERE date_times.year = $1 AND date_times.month = $2 - AND facts.tenant_id = $3 - AND facts.category_id = $4 - GROUP BY queries.id, products.id, discounts.id - `, - year, int(month), tenant.Id, category.Id) - - if err != nil { - return nil, fmt.Errorf("failed to load item for %q/%q at %d %s: %w", tenant.Source, category.Source, year, month.String(), err) - } - - return buildItemHierarchy(items), nil -} - -// buildItemHierarchy takes a flat list of raw items containing items and sub-items and returns a list of items containing their corresponding sub-items. -// It will drop any sub-item without a matching main item. -func buildItemHierarchy(items []rawItem) []Item { - mainItems := map[string]Item{} - for _, item := range items { - if !item.ParentQueryID.Valid { - // These three IDs uniquely identify the line item - itemID := fmt.Sprintf("%s:%s:%s", item.QueryID, item.ProductID, item.DiscountID) - item.Item.SubItems = map[string]SubItem{} - mainItems[itemID] = item.Item - } - } - for _, item := range items { - if item.ParentQueryID.Valid { - pqid := fmt.Sprintf("%s:%s:%s", item.ParentQueryID.String, item.ProductID, item.DiscountID) - parent, ok := mainItems[pqid] - if ok { - parent.SubItems[item.QueryName] = SubItem{ - Description: item.Description, - QueryName: item.QueryName, - Quantity: item.Quantity, - QuantityMin: item.QuantityMin, - QuantityAvg: item.QuantityAvg, - QuantityMax: item.QuantityAvg, - Unit: item.Unit, - } - mainItems[pqid] = parent - } - } - } - var res []Item - for _, it := range mainItems { - res = append(res, it) - } - return res -} - -func tenantsForPeriod(ctx context.Context, tx *sqlx.Tx, year int, month time.Month) ([]db.Tenant, error) { - var tenants []db.Tenant - - err := sqlx.SelectContext(ctx, tx, &tenants, - `SELECT DISTINCT tenants.* - FROM tenants - INNER JOIN facts ON (facts.tenant_id = tenants.id) - INNER JOIN date_times ON (facts.date_time_id = date_times.id) - WHERE date_times.year = $1 AND date_times.month = $2 - ORDER BY tenants.source, tenants.during - `, - year, int(month)) - - if err != nil { - return nil, fmt.Errorf("failed to load tenants for %d %s: %w", year, month.String(), err) - } - return tenants, nil -} - -func sumCategoryTotal(itms []Item) (sum float64) { - for _, itm := range itms { - sum += itm.Total - } - return -} - -func sumInvoiceTotal(cat []Category) (sum float64) { - for _, itm := range cat { - sum += itm.Total - } - return -} diff --git a/pkg/invoice/invoice_golden_discounts_test.go b/pkg/invoice/invoice_golden_discounts_test.go deleted file mode 100644 index 0e686c4..0000000 --- a/pkg/invoice/invoice_golden_discounts_test.go +++ /dev/null @@ -1,89 +0,0 @@ -package invoice_test - -import ( - "database/sql" - "time" - - "github.com/appuio/appuio-cloud-reporting/pkg/db" - - "github.com/stretchr/testify/require" -) - -func (s *InvoiceGoldenSuite) TestInvoiceGolden_Discounts() { - t := s.T() - tdb := s.DB() - - _, err := db.CreateProduct(tdb, db.Product{ - Source: "my-product", - Amount: 1, - During: db.InfiniteRange(), - }) - require.NoError(t, err) - - _, err = db.CreateDiscount(tdb, db.Discount{ - Source: "my-product", - During: db.InfiniteRange(), - }) - require.NoError(t, err) - - _, err = db.CreateDiscount(tdb, db.Discount{ - Source: "my-product:*:my-tenant", - Discount: 0.25, - During: db.InfiniteRange(), - }) - require.NoError(t, err) - - _, err = db.CreateDiscount(tdb, db.Discount{ - Source: "my-product:my-cluster:my-tenant", - Discount: 0.5, - During: db.InfiniteRange(), - }) - require.NoError(t, err) - - _, err = db.CreateDiscount(tdb, db.Discount{ - Source: "my-product:my-cluster:my-tenant:secret-namespace", - Discount: 1, - During: db.InfiniteRange(), - }) - require.NoError(t, err) - - q, err := db.CreateQuery(tdb, db.Query{ - Name: "test", - Description: "test description", - Query: "test", - Unit: "tps", - During: db.InfiniteRange(), - }) - s.prom.queries[q.Query] = fakeQueryResults{ - "my-product:my-cluster:my-tenant:my-namespace": fakeQuerySample{Value: 42}, - "my-product:my-cluster:my-tenant:other-namespace": fakeQuerySample{Value: 42}, - "my-product:other-cluster:my-tenant:my-namespace": fakeQuerySample{Value: 42}, - "my-product:my-cluster:other-tenant:my-namespace": fakeQuerySample{Value: 23}, - } - require.NoError(t, err) - - sq, err := db.CreateQuery(tdb, db.Query{ - ParentID: sql.NullString{ - String: q.Id, - Valid: true, - }, - Name: "sub-test", - Description: "A sub query of Test", - Query: "sub-test", - Unit: "tps", - During: db.InfiniteRange(), - }) - s.prom.queries[sq.Query] = fakeQueryResults{ - "my-product:my-cluster:my-tenant:my-namespace": fakeQuerySample{Value: 4}, - "my-product:my-cluster:my-tenant:other-namespace": fakeQuerySample{Value: 4}, - "my-product:other-cluster:my-tenant:my-namespace": fakeQuerySample{Value: 4}, - "my-product:my-cluster:other-tenant:my-namespace": fakeQuerySample{Value: 2}, - } - require.NoError(t, err) - - runReport(t, tdb, s.prom, q.Name, "2022-02-25", "2022-03-10") - - invoiceEqualsGolden(t, "discounts", - generateInvoice(t, tdb, 2022, time.March), - *updateGolden) -} diff --git a/pkg/invoice/invoice_golden_products_test.go b/pkg/invoice/invoice_golden_products_test.go deleted file mode 100644 index d500609..0000000 --- a/pkg/invoice/invoice_golden_products_test.go +++ /dev/null @@ -1,89 +0,0 @@ -package invoice_test - -import ( - "database/sql" - "time" - - "github.com/appuio/appuio-cloud-reporting/pkg/db" - - "github.com/stretchr/testify/require" -) - -func (s *InvoiceGoldenSuite) TestInvoiceGolden_Products() { - t := s.T() - tdb := s.DB() - - _, err := db.CreateProduct(tdb, db.Product{ - Source: "my-product", - Amount: 1, - During: db.InfiniteRange(), - }) - require.NoError(t, err) - - _, err = db.CreateProduct(tdb, db.Product{ - Source: "my-product:*:my-tenant", - Amount: 2, - During: db.InfiniteRange(), - }) - require.NoError(t, err) - - _, err = db.CreateProduct(tdb, db.Product{ - Source: "my-product:my-cluster:my-tenant", - Amount: 3, - During: db.InfiniteRange(), - }) - require.NoError(t, err) - - _, err = db.CreateProduct(tdb, db.Product{ - Source: "my-product:*:my-tenant:secret-namespace", - Amount: 0, - During: db.InfiniteRange(), - }) - require.NoError(t, err) - - _, err = db.CreateDiscount(tdb, db.Discount{ - Source: "my-product", - During: db.InfiniteRange(), - }) - require.NoError(t, err) - - q, err := db.CreateQuery(tdb, db.Query{ - Name: "test", - Description: "test description", - Query: "test", - Unit: "tps", - During: db.InfiniteRange(), - }) - s.prom.queries[q.Query] = fakeQueryResults{ - "my-product:my-cluster:my-tenant:my-namespace": fakeQuerySample{Value: 42}, - "my-product:my-cluster:my-tenant:other-namespace": fakeQuerySample{Value: 42}, - "my-product:other-cluster:my-tenant:my-namespace": fakeQuerySample{Value: 42}, - "my-product:my-cluster:other-tenant:my-namespace": fakeQuerySample{Value: 23}, - } - require.NoError(t, err) - - sq, err := db.CreateQuery(tdb, db.Query{ - ParentID: sql.NullString{ - String: q.Id, - Valid: true, - }, - Name: "sub-test", - Description: "A sub query of Test", - Query: "sub-test", - Unit: "tps", - During: db.InfiniteRange(), - }) - s.prom.queries[sq.Query] = fakeQueryResults{ - "my-product:my-cluster:my-tenant:my-namespace": fakeQuerySample{Value: 4}, - "my-product:my-cluster:my-tenant:other-namespace": fakeQuerySample{Value: 4}, - "my-product:other-cluster:my-tenant:my-namespace": fakeQuerySample{Value: 4}, - "my-product:my-cluster:other-tenant:my-namespace": fakeQuerySample{Value: 2}, - } - require.NoError(t, err) - - runReport(t, tdb, s.prom, q.Name, "2022-02-25", "2022-03-10") - - invoiceEqualsGolden(t, "products", - generateInvoice(t, tdb, 2022, time.March), - *updateGolden) -} diff --git a/pkg/invoice/invoice_golden_simple_test.go b/pkg/invoice/invoice_golden_simple_test.go deleted file mode 100644 index b21bbd5..0000000 --- a/pkg/invoice/invoice_golden_simple_test.go +++ /dev/null @@ -1,80 +0,0 @@ -package invoice_test - -import ( - "database/sql" - "time" - - "github.com/appuio/appuio-cloud-reporting/pkg/db" - - "github.com/stretchr/testify/require" -) - -func (s *InvoiceGoldenSuite) TestInvoiceGolden_Simple() { - t := s.T() - tdb := s.DB() - - _, err := db.CreateProduct(tdb, db.Product{ - Source: "my-product", - Amount: 1, - During: db.InfiniteRange(), - }) - require.NoError(t, err) - - _, err = db.CreateDiscount(tdb, db.Discount{ - Source: "my-product", - During: db.InfiniteRange(), - }) - require.NoError(t, err) - - q, err := db.CreateQuery(tdb, db.Query{ - Name: "test", - Description: "test description", - Query: "test", - Unit: "tps", - During: db.InfiniteRange(), - }) - s.prom.queries[q.Query] = fakeQueryResults{ - "my-product:my-cluster:my-tenant:my-namespace": fakeQuerySample{Value: 42}, - "my-product:my-cluster:other-tenant:my-namespace": fakeQuerySample{Value: 23}, - } - require.NoError(t, err) - - sq, err := db.CreateQuery(tdb, db.Query{ - ParentID: sql.NullString{ - String: q.Id, - Valid: true, - }, - Name: "sub-test", - Description: "A sub query of Test", - Query: "sub-test", - Unit: "tps", - During: db.InfiniteRange(), - }) - s.prom.queries[sq.Query] = fakeQueryResults{ - "my-product:my-cluster:my-tenant:my-namespace": fakeQuerySample{Value: 4}, - "my-product:my-cluster:other-tenant:my-namespace": fakeQuerySample{Value: 2}, - } - require.NoError(t, err) - - sq2, err := db.CreateQuery(tdb, db.Query{ - ParentID: sql.NullString{ - String: q.Id, - Valid: true, - }, - Name: "sub-test2", - Description: "An other sub query of Test", - Query: "sub-test2", - Unit: "tps", - During: db.InfiniteRange(), - }) - s.prom.queries[sq2.Query] = fakeQueryResults{ - "my-product:my-cluster:my-tenant:my-namespace": fakeQuerySample{Value: 7}, - "my-product:my-cluster:other-tenant:my-namespace": fakeQuerySample{Value: 0}, - } - require.NoError(t, err) - - runReport(t, tdb, s.prom, q.Name, "2022-02-25", "2022-03-10") - invoiceEqualsGolden(t, "simple", - generateInvoice(t, tdb, 2022, time.March), - *updateGolden) -} diff --git a/pkg/invoice/invoice_golden_tenants_test.go b/pkg/invoice/invoice_golden_tenants_test.go deleted file mode 100644 index 95dd311..0000000 --- a/pkg/invoice/invoice_golden_tenants_test.go +++ /dev/null @@ -1,75 +0,0 @@ -package invoice_test - -import ( - "database/sql" - "time" - - "github.com/appuio/appuio-cloud-reporting/pkg/db" - "github.com/stretchr/testify/require" -) - -func (s *InvoiceGoldenSuite) TestInvoiceGolden_Tenants() { - t := s.T() - tdb := s.DB() - - _, err := db.CreateTenant(tdb, db.Tenant{ - Source: "tricell", - Target: sql.NullString{Valid: true, String: "98757"}, - During: timerange(t, "-", "2022-01-20"), - }) - require.NoError(t, err) - - _, err = db.CreateTenant(tdb, db.Tenant{ - Source: "tricell", - Target: sql.NullString{Valid: true, String: "98942"}, - During: timerange(t, "2022-01-20", "-"), - }) - require.NoError(t, err) - - _, err = db.CreateTenant(tdb, db.Tenant{ - Source: "umbrellacorp", - Target: sql.NullString{Valid: true, String: "96432"}, - During: db.InfiniteRange(), - }) - require.NoError(t, err) - - _, err = db.CreateTenant(tdb, db.Tenant{ - Source: "megacorp", - Target: sql.NullString{Valid: true, String: "83492"}, - During: db.InfiniteRange(), - }) - require.NoError(t, err) - - _, err = db.CreateProduct(tdb, db.Product{ - Source: "my-product", - Amount: 1, - During: db.InfiniteRange(), - }) - require.NoError(t, err) - - _, err = db.CreateDiscount(tdb, db.Discount{ - Source: "my-product", - During: db.InfiniteRange(), - }) - require.NoError(t, err) - - query, err := db.CreateQuery(tdb, db.Query{ - Name: "test", - Description: "test description", - Query: "test", - Unit: "tps", - During: db.InfiniteRange(), - }) - require.NoError(t, err) - - s.prom.queries[query.Query] = fakeQueryResults{ - "my-product:my-cluster:tricell:my-namespace": fakeQuerySample{Value: 42}, // split over two tenant targets - "my-product:my-cluster:megacorp:my-namespace": fakeQuerySample{Value: 42}, // same value to verify that the sum of both tricell tenant targets is correct - "my-product:my-cluster:umbrellacorp:my-namespace": fakeQuerySample{Value: 14}, - } - - runReport(t, tdb, s.prom, query.Query, "2022-01-01", "2022-01-30") - invoiceEqualsGolden(t, "tenants", - generateInvoice(t, tdb, 2022, time.January), - *updateGolden) -} diff --git a/pkg/invoice/invoice_golden_test.go b/pkg/invoice/invoice_golden_test.go deleted file mode 100644 index 255046b..0000000 --- a/pkg/invoice/invoice_golden_test.go +++ /dev/null @@ -1,133 +0,0 @@ -package invoice_test - -import ( - "context" - "encoding/json" - "fmt" - "io" - "os" - "path" - "testing" - "time" - - "github.com/appuio/appuio-cloud-reporting/pkg/db" - "github.com/appuio/appuio-cloud-reporting/pkg/db/dbtest" - "github.com/appuio/appuio-cloud-reporting/pkg/invoice" - "github.com/appuio/appuio-cloud-reporting/pkg/report" - "github.com/appuio/appuio-cloud-reporting/pkg/sourcekey" - "github.com/jackc/pgtype" - "github.com/jmoiron/sqlx" - apiv1 "github.com/prometheus/client_golang/api/prometheus/v1" - "github.com/prometheus/common/model" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" -) - -type InvoiceGoldenSuite struct { - dbtest.Suite - - prom fakeQuerier -} - -func (s *InvoiceGoldenSuite) SetupTest() { - s.prom = fakeQuerier{ - queries: map[string]fakeQueryResults{}, - } - t := s.T() - _, err := s.DB().Exec("TRUNCATE queries, date_times, facts, tenants, categories, products, discounts RESTART IDENTITY;") - require.NoError(t, err) -} - -func TestInvoiceIntegration(t *testing.T) { - suite.Run(t, new(InvoiceGoldenSuite)) -} - -const dayLayout = "2006-01-02" - -type fakeQuerySample struct { - Value model.SampleValue -} -type fakeQueryResults map[string]fakeQuerySample -type fakeQuerier struct { - queries map[string]fakeQueryResults -} - -func (q fakeQuerier) Query(ctx context.Context, query string, ts time.Time, _ ...apiv1.Option) (model.Value, apiv1.Warnings, error) { - var res model.Vector - for k, s := range q.queries[query] { - sk, err := sourcekey.Parse(k) - if err != nil { - return nil, nil, err - } - res = append(res, &model.Sample{ - Metric: map[model.LabelName]model.LabelValue{ - "product": model.LabelValue(k), - "category": model.LabelValue(fmt.Sprintf("%s:%s", sk.Part(1), sk.Part(3))), - "tenant": model.LabelValue(sk.Part(2)), - }, - Value: s.Value, - }) - } - return res, nil, nil -} - -func runReport(t *testing.T, tdb *sqlx.DB, prom report.PromQuerier, queryName string, from, until string) { - start, err := time.Parse(dayLayout, from) - require.NoError(t, err) - end, err := time.Parse(dayLayout, until) - require.NoError(t, err) - _, err = report.RunRange(context.Background(), tdb, prom, queryName, start, end) - require.NoError(t, err) -} -func generateInvoice(t *testing.T, tdb *sqlx.DB, year int, month time.Month) []invoice.Invoice { - tx, err := tdb.Beginx() - require.NoError(t, err) - defer tx.Rollback() - invRun, err := invoice.Generate(context.Background(), tx, year, month) - require.NoError(t, err) - return invRun -} - -func invoiceEqualsGolden(t *testing.T, goldenFile string, actual []invoice.Invoice, update bool) { - t.Run(goldenFile, func(t *testing.T) { - actualJSON, err := json.MarshalIndent(sortInvoices(actual), "", "\t") - require.NoErrorf(t, err, "Failed to marshal invoice to JSON") - - goldenPath := path.Join("testdata", fmt.Sprintf("%s.json", goldenFile)) - if update { - os.WriteFile(goldenPath, actualJSON, 0644) - require.NoErrorf(t, err, "failed to update goldenFile %s", goldenPath) - return - } - - f, err := os.OpenFile(goldenPath, os.O_RDONLY, 0644) - defer f.Close() - require.NoErrorf(t, err, "failed to open goldenFile %s", goldenPath) - expected, err := io.ReadAll(f) - require.NoErrorf(t, err, "failed to read goldenFile %s", goldenPath) - - assert.JSONEq(t, string(expected), string(actualJSON)) - }) -} - -func timerange(t *testing.T, from, to string) pgtype.Tstzrange { - var fromTs pgtype.Timestamptz - if from == "-" { - fromTs = db.MustTimestamp(pgtype.NegativeInfinity) - } else { - ts, err := time.Parse(dayLayout, from) - require.NoError(t, err, "failed to parse timestamp") - fromTs = db.MustTimestamp(ts) - } - var toTs pgtype.Timestamptz - if to == "-" { - toTs = db.MustTimestamp(pgtype.Infinity) - } else { - ts, err := time.Parse(dayLayout, to) - require.NoError(t, err, "failed to parse timestamp") - toTs = db.MustTimestamp(ts) - } - return db.Timerange(fromTs, toTs) -} diff --git a/pkg/invoice/invoice_golden_timed_discounts_test.go b/pkg/invoice/invoice_golden_timed_discounts_test.go deleted file mode 100644 index 215432e..0000000 --- a/pkg/invoice/invoice_golden_timed_discounts_test.go +++ /dev/null @@ -1,89 +0,0 @@ -package invoice_test - -import ( - "database/sql" - "time" - - "github.com/appuio/appuio-cloud-reporting/pkg/db" - - "github.com/stretchr/testify/require" -) - -func (s *InvoiceGoldenSuite) TestInvoiceGolden_TimedDiscounts() { - t := s.T() - tdb := s.DB() - - _, err := db.CreateProduct(tdb, db.Product{ - Source: "my-product", - Amount: 1, - During: db.InfiniteRange(), - }) - require.NoError(t, err) - - _, err = db.CreateDiscount(tdb, db.Discount{ - Source: "my-product", - During: timerange(t, "2022-03-04", "-"), - }) - require.NoError(t, err) - - _, err = db.CreateDiscount(tdb, db.Discount{ - Source: "my-product", - Discount: 0.25, - During: timerange(t, "2022-03-02", "2022-03-04"), - }) - require.NoError(t, err) - - _, err = db.CreateDiscount(tdb, db.Discount{ - Source: "my-product", - Discount: 0.5, - During: timerange(t, "2022-02-25", "2022-03-02"), - }) - require.NoError(t, err) - - _, err = db.CreateDiscount(tdb, db.Discount{ - Source: "my-product", - Discount: 1, - During: timerange(t, "-", "2022-02-25"), - }) - require.NoError(t, err) - - q, err := db.CreateQuery(tdb, db.Query{ - Name: "test", - Description: "test description", - Query: "test", - Unit: "tps", - During: db.InfiniteRange(), - }) - s.prom.queries[q.Query] = fakeQueryResults{ - "my-product:my-cluster:my-tenant:my-namespace": fakeQuerySample{Value: 42}, - "my-product:my-cluster:my-tenant:other-namespace": fakeQuerySample{Value: 42}, - "my-product:other-cluster:my-tenant:my-namespace": fakeQuerySample{Value: 42}, - "my-product:my-cluster:other-tenant:my-namespace": fakeQuerySample{Value: 23}, - } - require.NoError(t, err) - - sq, err := db.CreateQuery(tdb, db.Query{ - ParentID: sql.NullString{ - String: q.Id, - Valid: true, - }, - Name: "sub-test", - Description: "A sub query of Test", - Query: "sub-test", - Unit: "tps", - During: db.InfiniteRange(), - }) - s.prom.queries[sq.Query] = fakeQueryResults{ - "my-product:my-cluster:my-tenant:my-namespace": fakeQuerySample{Value: 4}, - "my-product:my-cluster:my-tenant:other-namespace": fakeQuerySample{Value: 4}, - "my-product:other-cluster:my-tenant:my-namespace": fakeQuerySample{Value: 4}, - "my-product:my-cluster:other-tenant:my-namespace": fakeQuerySample{Value: 2}, - } - require.NoError(t, err) - - runReport(t, tdb, s.prom, q.Name, "2022-02-25", "2022-03-10") - - invoiceEqualsGolden(t, "timed_discounts", - generateInvoice(t, tdb, 2022, time.March), - *updateGolden) -} diff --git a/pkg/invoice/invoice_golden_timed_query_test.go b/pkg/invoice/invoice_golden_timed_query_test.go deleted file mode 100644 index 469c989..0000000 --- a/pkg/invoice/invoice_golden_timed_query_test.go +++ /dev/null @@ -1,125 +0,0 @@ -package invoice_test - -import ( - "database/sql" - "time" - - "github.com/appuio/appuio-cloud-reporting/pkg/db" - - "github.com/stretchr/testify/require" -) - -func (s *InvoiceGoldenSuite) TestInvoiceGolden_TimedQuery() { - t := s.T() - tdb := s.DB() - - // Create base product and discount - _, err := db.CreateProduct(tdb, db.Product{ - Source: "my-product", - Amount: 1, - During: db.InfiniteRange(), - }) - require.NoError(t, err) - _, err = db.CreateDiscount(tdb, db.Discount{ - Source: "my-product", - During: db.InfiniteRange(), - }) - require.NoError(t, err) - - // Create old query, only valid before the billing period. - // Should not be in invoice - old, err := db.CreateQuery(tdb, db.Query{ - Name: "test", - Description: "old invalid query", - Query: "old-test", - Unit: "tps", - During: timerange(t, "-", "2022-02-25"), - }) - s.prom.queries[old.Query] = fakeQueryResults{ - "my-product:my-cluster:my-tenant:my-namespace": fakeQuerySample{Value: 9001}, - "my-product:my-cluster:other-tenant:my-namespace": fakeQuerySample{Value: 9001}, - } - require.NoError(t, err) - - // Create query and two subqueries that are valid for the first 5 days - // One subquery is only valid for the first two days of the billing month - q, err := db.CreateQuery(tdb, db.Query{ - Name: "test", - Description: "test description", - Query: "test", - Unit: "tps", - During: timerange(t, "2022-02-25", "2022-03-05"), - }) - s.prom.queries[q.Query] = fakeQueryResults{ - "my-product:my-cluster:my-tenant:my-namespace": fakeQuerySample{Value: 42}, - "my-product:my-cluster:other-tenant:my-namespace": fakeQuerySample{Value: 23}, - } - require.NoError(t, err) - sq, err := db.CreateQuery(tdb, db.Query{ - ParentID: sql.NullString{ - String: q.Id, - Valid: true, - }, - Name: "sub-test", - Description: "A sub query of Test", - Query: "sub-test", - Unit: "tps", - During: db.InfiniteRange(), - }) - s.prom.queries[sq.Query] = fakeQueryResults{ - "my-product:my-cluster:my-tenant:my-namespace": fakeQuerySample{Value: 4}, - "my-product:my-cluster:other-tenant:my-namespace": fakeQuerySample{Value: 2}, - } - require.NoError(t, err) - sq2, err := db.CreateQuery(tdb, db.Query{ - ParentID: sql.NullString{ - String: q.Id, - Valid: true, - }, - Name: "sub-test2", - Description: "An other sub query of Test that stops early", - Query: "sub-test2", - Unit: "tps", - During: timerange(t, "2022-02-25", "2022-03-02"), - }) - s.prom.queries[sq2.Query] = fakeQueryResults{ - "my-product:my-cluster:my-tenant:my-namespace": fakeQuerySample{Value: 7}, - "my-product:my-cluster:other-tenant:my-namespace": fakeQuerySample{Value: 0}, - } - require.NoError(t, err) - - // Create new query that is valid from the 5th day and has one subquery - newQ, err := db.CreateQuery(tdb, db.Query{ - Name: "test", - Description: "new nicer query", - Query: "nice-test", - Unit: "tps", - During: timerange(t, "2022-03-05", "-"), - }) - s.prom.queries[newQ.Query] = fakeQueryResults{ - "my-product:my-cluster:my-tenant:my-namespace": fakeQuerySample{Value: 69}, - "my-product:my-cluster:other-tenant:my-namespace": fakeQuerySample{Value: 69}, - } - require.NoError(t, err) - nsq, err := db.CreateQuery(tdb, db.Query{ - ParentID: sql.NullString{ - String: newQ.Id, - Valid: true, - }, - Name: "new-sub-test", - Description: "A better sub query of Test", - Query: "new-sub-test", - Unit: "tps", - During: db.InfiniteRange(), - }) - s.prom.queries[nsq.Query] = fakeQueryResults{ - "my-product:my-cluster:my-tenant:my-namespace": fakeQuerySample{Value: 4}, - "my-product:my-cluster:other-tenant:my-namespace": fakeQuerySample{Value: 2}, - } - require.NoError(t, err) - - runReport(t, tdb, s.prom, "test", "2022-02-25", "2022-03-10") - invoiceEqualsGolden(t, "timed_query", - generateInvoice(t, tdb, 2022, time.March), - *updateGolden) -} diff --git a/pkg/invoice/invoice_test.go b/pkg/invoice/invoice_test.go deleted file mode 100644 index c311fcf..0000000 --- a/pkg/invoice/invoice_test.go +++ /dev/null @@ -1,486 +0,0 @@ -package invoice_test - -import ( - "context" - "database/sql" - "testing" - "time" - - "github.com/jackc/pgtype" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - "github.com/appuio/appuio-cloud-reporting/pkg/db" - "github.com/appuio/appuio-cloud-reporting/pkg/db/dbtest" - "github.com/appuio/appuio-cloud-reporting/pkg/invoice" -) - -type InvoiceSuite struct { - dbtest.Suite - - memoryProduct db.Product - storageProduct db.Product - - memoryDiscount db.Discount - tricellMemoryDiscount db.Discount - storageDiscount db.Discount - - memoryQuery db.Query - memorySubQuery db.Query - memoryOtherSubQuery db.Query - storageQuery db.Query - - umbrellaCorpTenant db.Tenant - tricellTenant db.Tenant - - p12aCategory db.Category - nestElevCtrlCategory db.Category - uroborosCategory db.Category - - dateTimes []db.DateTime - - facts []db.Fact -} - -func (s *InvoiceSuite) SetupSuite() { - s.Suite.SetupSuite() - - t := s.T() - tdb := s.DB() - - require.NoError(s.T(), - db.GetNamed(tdb, &s.memoryProduct, - "INSERT INTO products (source,target,amount,unit,during) VALUES (:source,:target,:amount,:unit,:during) RETURNING *", db.Product{ - Source: "test_memory:us-rac-2", - Amount: 3, - During: db.InfiniteRange(), - })) - require.NoError(s.T(), - db.GetNamed(tdb, &s.storageProduct, - "INSERT INTO products (source,target,amount,unit,during) VALUES (:source,:target,:amount,:unit,:during) RETURNING *", db.Product{ - Source: "test_storage:us-rac-2", - Amount: 5, - During: db.InfiniteRange(), - })) - - require.NoError(t, - db.GetNamed(tdb, &s.memoryDiscount, - "INSERT INTO discounts (source,discount,during) VALUES (:source,:discount,:during) RETURNING *", db.Discount{ - Source: "test_memory", - Discount: 0, - During: db.InfiniteRange(), - })) - require.NoError(t, - db.GetNamed(tdb, &s.tricellMemoryDiscount, - "INSERT INTO discounts (source,discount,during) VALUES (:source,:discount,:during) RETURNING *", db.Discount{ - Source: "test_memory:*:", - Discount: .5, - During: db.Timerange(db.MustTimestamp(time.Date(2021, time.December, 15, 14, 0, 0, 0, time.UTC)), db.MustTimestamp(pgtype.Infinity)), - })) - require.NoError(t, - db.GetNamed(tdb, &s.storageDiscount, - "INSERT INTO discounts (source,discount,during) VALUES (:source,:discount,:during) RETURNING *", db.Discount{ - Source: "test_storage:us-rac-2", - Discount: 0.4, - During: db.InfiniteRange(), - })) - - require.NoError(t, - db.GetNamed(tdb, &s.memoryQuery, - "INSERT INTO queries (name,description,unit,query) VALUES (:name,:description,:unit,:query) RETURNING *", db.Query{ - Name: "test_memory", - Description: "Memory", - Unit: "MiB", - })) - require.NoError(t, - db.GetNamed(tdb, &s.memorySubQuery, - "INSERT INTO queries (parent_id,name,description,unit,query) VALUES (:parent_id,:name,:description,:unit,:query) RETURNING *", db.Query{ - ParentID: sql.NullString{ - String: s.memoryQuery.Id, - Valid: true, - }, - Name: "test_sub_memory", - Description: "Sub Memory", - Unit: "MiB", - })) - require.NoError(t, - db.GetNamed(tdb, &s.memoryOtherSubQuery, - "INSERT INTO queries (parent_id,name,description,unit,query) VALUES (:parent_id,:name,:description,:unit,:query) RETURNING *", db.Query{ - ParentID: sql.NullString{ - String: s.memoryQuery.Id, - Valid: true, - }, - Name: "test_other_sub_memory", - Description: "Other Sub Memory", - Unit: "core", - })) - - require.NoError(t, - db.GetNamed(tdb, &s.storageQuery, - "INSERT INTO queries (name,description,unit,query) VALUES (:name,:description,:unit,:query) RETURNING *", db.Query{ - Name: "test_storage", - Description: "Storage", - Unit: "Gib", - })) - - require.NoError(t, - db.GetNamed(tdb, &s.umbrellaCorpTenant, - "INSERT INTO tenants (source,target) VALUES (:source,:target) RETURNING *", db.Tenant{ - Source: "umbrellacorp", - Target: sql.NullString{Valid: true, String: "23465-umbrellacorp"}, - })) - require.NoError(t, - db.GetNamed(tdb, &s.tricellTenant, - "INSERT INTO tenants (source,target) VALUES (:source,:target) RETURNING *", db.Tenant{ - Source: "tricell", - Target: sql.NullString{Valid: true, String: "98756-tricell"}, - })) - - require.NoError(t, - db.GetNamed(tdb, &s.p12aCategory, - "INSERT INTO categories (source,target) VALUES (:source,:target) RETURNING *", db.Category{ - Source: "us-rac-2:disposal-plant-p-12a-furnace-control", - Target: sql.NullString{Valid: true, String: "3445-disposal-plant-p-12a-furnace-control"}, - })) - require.NoError(t, - db.GetNamed(tdb, &s.nestElevCtrlCategory, - "INSERT INTO categories (source,target) VALUES (:source,:target) RETURNING *", db.Category{ - Source: "us-rac-2:nest-elevator-control", - Target: sql.NullString{Valid: true, String: "897-nest-elevator-control"}, - })) - require.NoError(t, - db.GetNamed(tdb, &s.uroborosCategory, - "INSERT INTO categories (source,target) VALUES (:source,:target) RETURNING *", db.Category{ - Source: "af-south-1:uroboros-research", - Target: sql.NullString{Valid: true, String: "123587-uroboros-research"}, - })) - - require.NoError(t, - db.SelectNamed(tdb, &s.dateTimes, - "INSERT INTO date_times (timestamp, year, month, day, hour) VALUES (:timestamp, :year, :month, :day, :hour) RETURNING *", - []db.DateTime{ - db.BuildDateTime(time.Date(2021, time.December, 1, 1, 0, 0, 0, time.UTC)), - db.BuildDateTime(time.Date(2021, time.December, 31, 23, 0, 0, 0, time.UTC)), - db.BuildDateTime(time.Date(2022, time.January, 1, 1, 0, 0, 0, time.UTC)), - }, - )) - - facts := make([]db.Fact, 0) - - facts = append(facts, factWithDateTime(db.Fact{ - QueryId: s.memoryQuery.Id, - ProductId: s.memoryProduct.Id, - DiscountId: s.memoryDiscount.Id, - - TenantId: s.umbrellaCorpTenant.Id, - CategoryId: s.p12aCategory.Id, - - Quantity: 4000, - }, s.dateTimes)...) - facts = append(facts, factWithDateTime(db.Fact{ - QueryId: s.memorySubQuery.Id, - ProductId: s.memoryProduct.Id, - DiscountId: s.memoryDiscount.Id, - - TenantId: s.umbrellaCorpTenant.Id, - CategoryId: s.p12aCategory.Id, - - Quantity: 1337, - }, s.dateTimes)...) - facts = append(facts, factWithDateTime(db.Fact{ - QueryId: s.memoryOtherSubQuery.Id, - ProductId: s.memoryProduct.Id, - DiscountId: s.memoryDiscount.Id, - - TenantId: s.umbrellaCorpTenant.Id, - CategoryId: s.p12aCategory.Id, - - Quantity: 42, - }, s.dateTimes)...) - - facts = append(facts, factWithDateTime(db.Fact{ - QueryId: s.storageQuery.Id, - ProductId: s.storageProduct.Id, - DiscountId: s.storageDiscount.Id, - - TenantId: s.umbrellaCorpTenant.Id, - CategoryId: s.p12aCategory.Id, - - Quantity: 12, - }, s.dateTimes)...) - - facts = append(facts, factWithDateTime(db.Fact{ - QueryId: s.memoryQuery.Id, - ProductId: s.memoryProduct.Id, - DiscountId: s.memoryDiscount.Id, - - TenantId: s.umbrellaCorpTenant.Id, - CategoryId: s.nestElevCtrlCategory.Id, - - Quantity: 1000, - }, s.dateTimes)...) - - facts = append(facts, factWithDateTime(db.Fact{ - QueryId: s.memoryQuery.Id, - ProductId: s.memoryProduct.Id, - DiscountId: s.memoryDiscount.Id, - - TenantId: s.tricellTenant.Id, - CategoryId: s.uroborosCategory.Id, - - Quantity: 2000, - }, s.dateTimes[:1])...) - facts = append(facts, factWithDateTime(db.Fact{ - QueryId: s.memorySubQuery.Id, - ProductId: s.memoryProduct.Id, - DiscountId: s.memoryDiscount.Id, - - TenantId: s.tricellTenant.Id, - CategoryId: s.uroborosCategory.Id, - - Quantity: 1337, - }, s.dateTimes[:1])...) - facts = append(facts, factWithDateTime(db.Fact{ - QueryId: s.memoryQuery.Id, - ProductId: s.memoryProduct.Id, - DiscountId: s.tricellMemoryDiscount.Id, - - TenantId: s.tricellTenant.Id, - CategoryId: s.uroborosCategory.Id, - - Quantity: 2000, - }, s.dateTimes[1:])...) - facts = append(facts, factWithDateTime(db.Fact{ - QueryId: s.memorySubQuery.Id, - ProductId: s.memoryProduct.Id, - DiscountId: s.tricellMemoryDiscount.Id, - - TenantId: s.tricellTenant.Id, - CategoryId: s.uroborosCategory.Id, - - Quantity: 1337, - }, s.dateTimes[1:])...) - - require.NoError(t, - db.SelectNamed(tdb, &s.facts, - "INSERT INTO facts (date_time_id,query_id,tenant_id,category_id,product_id,discount_id,quantity) VALUES (:date_time_id,:query_id,:tenant_id,:category_id,:product_id,:discount_id,:quantity) RETURNING *", - facts, - )) -} - -func (s *InvoiceSuite) TestInvoice_Generate() { - t := s.T() - - tx, err := s.DB().Beginx() - require.NoError(t, err) - defer tx.Rollback() - - invRun, err := invoice.Generate(context.Background(), tx, 2021, time.December) - require.NoError(t, err) - require.Len(t, invRun, 2) - - discountToMultiplier := func(discount float64) float64 { - return float64(1) - float64(discount) - } - - const stampsInTimerange = 2 - t.Run("InvoiceForTricell", func(t *testing.T) { - inv := invRun[0] - const quantity = float64(2000) - const subMemQuantity = float64(1337) - - invoiceEqual(t, invoice.Invoice{ - Tenant: invoice.Tenant{ - Source: s.tricellTenant.Source, - Target: s.tricellTenant.Target.String, - }, - PeriodStart: time.Date(2021, time.December, 1, 0, 0, 0, 0, time.UTC), - PeriodEnd: time.Date(2021, time.December, 31, 0, 0, 0, 0, time.UTC), - Categories: []invoice.Category{ - { - Source: s.uroborosCategory.Source, - Target: s.uroborosCategory.Target.String, - Items: []invoice.Item{ - { - Description: s.memoryQuery.Description, - QueryName: s.memoryQuery.Name, - ProductRef: invoice.ProductRef{ - Source: s.memoryProduct.Source, - Target: s.memoryProduct.Target.String, - }, - Quantity: quantity, - QuantityMin: quantity, - QuantityAvg: quantity, - QuantityMax: quantity, - Unit: s.memoryQuery.Unit, - PricePerUnit: s.memoryProduct.Amount, - Discount: s.memoryDiscount.Discount, - Total: quantity * s.memoryProduct.Amount, - SubItems: map[string]invoice.SubItem{ - s.memorySubQuery.Name: { - Description: s.memorySubQuery.Description, - QueryName: s.memorySubQuery.Name, - Quantity: subMemQuantity, - QuantityMin: subMemQuantity, - QuantityAvg: subMemQuantity, - QuantityMax: subMemQuantity, - Unit: s.memorySubQuery.Unit, - }, - }, - }, - { - Description: s.memoryQuery.Description, - QueryName: s.memoryQuery.Name, - ProductRef: invoice.ProductRef{ - Source: s.memoryProduct.Source, - Target: s.memoryProduct.Target.String, - }, - Quantity: quantity, - QuantityMin: quantity, - QuantityAvg: quantity, - QuantityMax: quantity, - Unit: s.memoryQuery.Unit, - PricePerUnit: s.memoryProduct.Amount, - Discount: s.tricellMemoryDiscount.Discount, - Total: quantity * s.memoryProduct.Amount * 0.5, - SubItems: map[string]invoice.SubItem{ - s.memorySubQuery.Name: { - Description: s.memorySubQuery.Description, - QueryName: s.memorySubQuery.Name, - Quantity: subMemQuantity, - QuantityMin: subMemQuantity, - QuantityAvg: subMemQuantity, - QuantityMax: subMemQuantity, - Unit: s.memorySubQuery.Unit, - }, - }, - }, - }, - Total: quantity * s.memoryProduct.Amount * 1.5, - }, - }, - Total: quantity * s.memoryProduct.Amount * 1.5, - }, inv) - }) - - t.Run("InvoiceForUmbrellaCorp", func(t *testing.T) { - inv := invRun[1] - const memP12Quantity = float64(4000) - memP12Total := memP12Quantity * stampsInTimerange * s.memoryProduct.Amount * discountToMultiplier(s.memoryDiscount.Discount) - const subMemP12Quantity = float64(1337) - const otherSubMemP12Quantity = float64(42) - const storP12Quantity = float64(12) - storP12Total := storP12Quantity * stampsInTimerange * s.storageProduct.Amount * discountToMultiplier(s.storageDiscount.Discount) - const memNestQuantity = float64(1000) - memNestTotal := memNestQuantity * stampsInTimerange * s.memoryProduct.Amount * discountToMultiplier(s.memoryDiscount.Discount) - - invoiceEqual(t, invoice.Invoice{ - Tenant: invoice.Tenant{ - Source: s.umbrellaCorpTenant.Source, - Target: s.umbrellaCorpTenant.Target.String, - }, - PeriodStart: time.Date(2021, time.December, 1, 0, 0, 0, 0, time.UTC), - PeriodEnd: time.Date(2021, time.December, 31, 0, 0, 0, 0, time.UTC), - Categories: []invoice.Category{ - { - Source: s.p12aCategory.Source, - Target: s.p12aCategory.Target.String, - Items: []invoice.Item{ - { - Description: s.storageQuery.Description, - QueryName: s.storageQuery.Name, - ProductRef: invoice.ProductRef{ - Source: s.storageProduct.Source, - Target: s.storageProduct.Target.String, - }, - Quantity: storP12Quantity * stampsInTimerange, - QuantityMin: storP12Quantity, - QuantityAvg: storP12Quantity, - QuantityMax: storP12Quantity, - Unit: s.storageQuery.Unit, - PricePerUnit: s.storageProduct.Amount, - Discount: s.storageDiscount.Discount, - Total: storP12Total, - SubItems: map[string]invoice.SubItem{}, - }, - { - Description: s.memoryQuery.Description, - QueryName: s.memoryQuery.Name, - ProductRef: invoice.ProductRef{ - Source: s.memoryProduct.Source, - Target: s.memoryProduct.Target.String, - }, - Quantity: memP12Quantity * stampsInTimerange, - QuantityMin: memP12Quantity, - QuantityAvg: memP12Quantity, - QuantityMax: memP12Quantity, - Unit: s.memoryQuery.Unit, - PricePerUnit: s.memoryProduct.Amount, - Discount: s.memoryDiscount.Discount, - Total: memP12Total, - SubItems: map[string]invoice.SubItem{ - s.memorySubQuery.Name: { - Description: s.memorySubQuery.Description, - QueryName: s.memorySubQuery.Name, - Quantity: subMemP12Quantity * stampsInTimerange, - QuantityMin: subMemP12Quantity, - QuantityAvg: subMemP12Quantity, - QuantityMax: subMemP12Quantity, - Unit: s.memorySubQuery.Unit, - }, - s.memoryOtherSubQuery.Name: { - Description: s.memoryOtherSubQuery.Description, - QueryName: s.memoryOtherSubQuery.Name, - Quantity: otherSubMemP12Quantity * stampsInTimerange, - QuantityMin: otherSubMemP12Quantity, - QuantityAvg: otherSubMemP12Quantity, - QuantityMax: otherSubMemP12Quantity, - Unit: s.memoryOtherSubQuery.Unit, - }, - }, - }, - }, - Total: memP12Total + storP12Total, - }, - { - Source: s.nestElevCtrlCategory.Source, - Target: s.nestElevCtrlCategory.Target.String, - Items: []invoice.Item{ - { - Description: s.memoryQuery.Description, - QueryName: s.memoryQuery.Name, - ProductRef: invoice.ProductRef{ - Source: s.memoryProduct.Source, - Target: s.memoryProduct.Target.String, - }, - Quantity: memNestQuantity * stampsInTimerange, - QuantityMin: memNestQuantity, - QuantityAvg: memNestQuantity, - QuantityMax: memNestQuantity, - Unit: s.memoryQuery.Unit, - PricePerUnit: s.memoryProduct.Amount, - Discount: s.memoryDiscount.Discount, - Total: memNestTotal, - SubItems: map[string]invoice.SubItem{}, - }, - }, - Total: memNestTotal, - }, - }, - Total: memP12Total + storP12Total + memNestTotal, - }, inv) - }) -} - -func TestInvoice(t *testing.T) { - suite.Run(t, new(InvoiceSuite)) -} - -func factWithDateTime(f db.Fact, dts []db.DateTime) []db.Fact { - facts := make([]db.Fact, 0, len(dts)) - for _, dt := range dts { - f.DateTimeId = dt.Id - facts = append(facts, f) - } - return facts -} diff --git a/pkg/invoice/main_test.go b/pkg/invoice/main_test.go deleted file mode 100644 index cdc2fb8..0000000 --- a/pkg/invoice/main_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package invoice_test - -import ( - "encoding/json" - "flag" - "os" - "sort" - "testing" - - "github.com/appuio/appuio-cloud-reporting/pkg/invoice" - "github.com/stretchr/testify/assert" -) - -var ( - updateGolden = flag.Bool("update", false, "update the golden files of this test") -) - -func TestMain(m *testing.M) { - flag.Parse() - os.Exit(m.Run()) -} - -func invoiceEqual(t *testing.T, expInv, inv invoice.Invoice) bool { - sortInvoice(&inv) - sortInvoice(&expInv) - return assert.Equal(t, expInv, inv) -} - -func sortInvoices(invSlice []invoice.Invoice) []invoice.Invoice { - sort.Slice(invSlice, func(i, j int) bool { - return invSlice[i].Tenant.Source < invSlice[j].Tenant.Source - }) - for k := range invSlice { - sortInvoice(&invSlice[k]) - } - return invSlice -} - -func sortInvoice(inv *invoice.Invoice) { - sort.Slice(inv.Categories, func(i, j int) bool { - // This is horrible, but I don't really have any ID or similar to sort on.. - iraw, _ := json.Marshal(inv.Categories[i]) - jraw, _ := json.Marshal(inv.Categories[j]) - return string(iraw) < string(jraw) - }) - for catIter := range inv.Categories { - sort.Slice(inv.Categories[catIter].Items, func(i, j int) bool { - // This is horrible, but I don't really have any ID or similar to sort on.. - iraw, _ := json.Marshal(inv.Categories[catIter].Items[i]) - jraw, _ := json.Marshal(inv.Categories[catIter].Items[j]) - return string(iraw) < string(jraw) - }) - } -} diff --git a/pkg/invoice/testdata/discounts.json b/pkg/invoice/testdata/discounts.json deleted file mode 100644 index 92d4a49..0000000 --- a/pkg/invoice/testdata/discounts.json +++ /dev/null @@ -1,152 +0,0 @@ -[ - { - "Tenant": { - "Source": "my-tenant", - "Target": "" - }, - "PeriodStart": "2022-03-01T00:00:00Z", - "PeriodEnd": "2022-03-31T00:00:00Z", - "Categories": [ - { - "Source": "my-cluster:my-namespace", - "Target": "", - "Items": [ - { - "Description": "test description", - "QueryName": "test", - "Source": "my-product", - "Target": "", - "Quantity": 9072, - "QuantityMin": 42, - "QuantityAvg": 42, - "QuantityMax": 42, - "Unit": "tps", - "PricePerUnit": 1, - "Discount": 0.5, - "Total": 4536, - "SubItems": { - "sub-test": { - "Description": "A sub query of Test", - "QueryName": "sub-test", - "Quantity": 864, - "QuantityMin": 4, - "QuantityAvg": 4, - "QuantityMax": 4, - "Unit": "tps" - } - } - } - ], - "Total": 4536 - }, - { - "Source": "my-cluster:other-namespace", - "Target": "", - "Items": [ - { - "Description": "test description", - "QueryName": "test", - "Source": "my-product", - "Target": "", - "Quantity": 9072, - "QuantityMin": 42, - "QuantityAvg": 42, - "QuantityMax": 42, - "Unit": "tps", - "PricePerUnit": 1, - "Discount": 0.5, - "Total": 4536, - "SubItems": { - "sub-test": { - "Description": "A sub query of Test", - "QueryName": "sub-test", - "Quantity": 864, - "QuantityMin": 4, - "QuantityAvg": 4, - "QuantityMax": 4, - "Unit": "tps" - } - } - } - ], - "Total": 4536 - }, - { - "Source": "other-cluster:my-namespace", - "Target": "", - "Items": [ - { - "Description": "test description", - "QueryName": "test", - "Source": "my-product", - "Target": "", - "Quantity": 9072, - "QuantityMin": 42, - "QuantityAvg": 42, - "QuantityMax": 42, - "Unit": "tps", - "PricePerUnit": 1, - "Discount": 0.25, - "Total": 6804, - "SubItems": { - "sub-test": { - "Description": "A sub query of Test", - "QueryName": "sub-test", - "Quantity": 864, - "QuantityMin": 4, - "QuantityAvg": 4, - "QuantityMax": 4, - "Unit": "tps" - } - } - } - ], - "Total": 6804 - } - ], - "Total": 15876 - }, - { - "Tenant": { - "Source": "other-tenant", - "Target": "" - }, - "PeriodStart": "2022-03-01T00:00:00Z", - "PeriodEnd": "2022-03-31T00:00:00Z", - "Categories": [ - { - "Source": "my-cluster:my-namespace", - "Target": "", - "Items": [ - { - "Description": "test description", - "QueryName": "test", - "Source": "my-product", - "Target": "", - "Quantity": 4968, - "QuantityMin": 23, - "QuantityAvg": 23, - "QuantityMax": 23, - "Unit": "tps", - "PricePerUnit": 1, - "Discount": 0, - "Total": 4968, - "SubItems": { - "sub-test": { - "Description": "A sub query of Test", - "QueryName": "sub-test", - "Quantity": 432, - "QuantityMin": 2, - "QuantityAvg": 2, - "QuantityMax": 2, - "Unit": "tps" - } - } - } - ], - "Total": 4968 - } - ], - "Total": 4968 - } -] \ No newline at end of file diff --git a/pkg/invoice/testdata/products.json b/pkg/invoice/testdata/products.json deleted file mode 100644 index 72bcc11..0000000 --- a/pkg/invoice/testdata/products.json +++ /dev/null @@ -1,152 +0,0 @@ -[ - { - "Tenant": { - "Source": "my-tenant", - "Target": "" - }, - "PeriodStart": "2022-03-01T00:00:00Z", - "PeriodEnd": "2022-03-31T00:00:00Z", - "Categories": [ - { - "Source": "my-cluster:my-namespace", - "Target": "", - "Items": [ - { - "Description": "test description", - "QueryName": "test", - "Source": "my-product:my-cluster:my-tenant", - "Target": "", - "Quantity": 9072, - "QuantityMin": 42, - "QuantityAvg": 42, - "QuantityMax": 42, - "Unit": "tps", - "PricePerUnit": 3, - "Discount": 0, - "Total": 27216, - "SubItems": { - "sub-test": { - "Description": "A sub query of Test", - "QueryName": "sub-test", - "Quantity": 864, - "QuantityMin": 4, - "QuantityAvg": 4, - "QuantityMax": 4, - "Unit": "tps" - } - } - } - ], - "Total": 27216 - }, - { - "Source": "my-cluster:other-namespace", - "Target": "", - "Items": [ - { - "Description": "test description", - "QueryName": "test", - "Source": "my-product:my-cluster:my-tenant", - "Target": "", - "Quantity": 9072, - "QuantityMin": 42, - "QuantityAvg": 42, - "QuantityMax": 42, - "Unit": "tps", - "PricePerUnit": 3, - "Discount": 0, - "Total": 27216, - "SubItems": { - "sub-test": { - "Description": "A sub query of Test", - "QueryName": "sub-test", - "Quantity": 864, - "QuantityMin": 4, - "QuantityAvg": 4, - "QuantityMax": 4, - "Unit": "tps" - } - } - } - ], - "Total": 27216 - }, - { - "Source": "other-cluster:my-namespace", - "Target": "", - "Items": [ - { - "Description": "test description", - "QueryName": "test", - "Source": "my-product:*:my-tenant", - "Target": "", - "Quantity": 9072, - "QuantityMin": 42, - "QuantityAvg": 42, - "QuantityMax": 42, - "Unit": "tps", - "PricePerUnit": 2, - "Discount": 0, - "Total": 18144, - "SubItems": { - "sub-test": { - "Description": "A sub query of Test", - "QueryName": "sub-test", - "Quantity": 864, - "QuantityMin": 4, - "QuantityAvg": 4, - "QuantityMax": 4, - "Unit": "tps" - } - } - } - ], - "Total": 18144 - } - ], - "Total": 72576 - }, - { - "Tenant": { - "Source": "other-tenant", - "Target": "" - }, - "PeriodStart": "2022-03-01T00:00:00Z", - "PeriodEnd": "2022-03-31T00:00:00Z", - "Categories": [ - { - "Source": "my-cluster:my-namespace", - "Target": "", - "Items": [ - { - "Description": "test description", - "QueryName": "test", - "Source": "my-product", - "Target": "", - "Quantity": 4968, - "QuantityMin": 23, - "QuantityAvg": 23, - "QuantityMax": 23, - "Unit": "tps", - "PricePerUnit": 1, - "Discount": 0, - "Total": 4968, - "SubItems": { - "sub-test": { - "Description": "A sub query of Test", - "QueryName": "sub-test", - "Quantity": 432, - "QuantityMin": 2, - "QuantityAvg": 2, - "QuantityMax": 2, - "Unit": "tps" - } - } - } - ], - "Total": 4968 - } - ], - "Total": 4968 - } -] \ No newline at end of file diff --git a/pkg/invoice/testdata/simple.json b/pkg/invoice/testdata/simple.json deleted file mode 100644 index 9370c85..0000000 --- a/pkg/invoice/testdata/simple.json +++ /dev/null @@ -1,106 +0,0 @@ -[ - { - "Tenant": { - "Source": "my-tenant", - "Target": "" - }, - "PeriodStart": "2022-03-01T00:00:00Z", - "PeriodEnd": "2022-03-31T00:00:00Z", - "Categories": [ - { - "Source": "my-cluster:my-namespace", - "Target": "", - "Items": [ - { - "Description": "test description", - "QueryName": "test", - "Source": "my-product", - "Target": "", - "Quantity": 9072, - "QuantityMin": 42, - "QuantityAvg": 42, - "QuantityMax": 42, - "Unit": "tps", - "PricePerUnit": 1, - "Discount": 0, - "Total": 9072, - "SubItems": { - "sub-test": { - "Description": "A sub query of Test", - "QueryName": "sub-test", - "Quantity": 864, - "QuantityMin": 4, - "QuantityAvg": 4, - "QuantityMax": 4, - "Unit": "tps" - }, - "sub-test2": { - "Description": "An other sub query of Test", - "QueryName": "sub-test2", - "Quantity": 1512, - "QuantityMin": 7, - "QuantityAvg": 7, - "QuantityMax": 7, - "Unit": "tps" - } - } - } - ], - "Total": 9072 - } - ], - "Total": 9072 - }, - { - "Tenant": { - "Source": "other-tenant", - "Target": "" - }, - "PeriodStart": "2022-03-01T00:00:00Z", - "PeriodEnd": "2022-03-31T00:00:00Z", - "Categories": [ - { - "Source": "my-cluster:my-namespace", - "Target": "", - "Items": [ - { - "Description": "test description", - "QueryName": "test", - "Source": "my-product", - "Target": "", - "Quantity": 4968, - "QuantityMin": 23, - "QuantityAvg": 23, - "QuantityMax": 23, - "Unit": "tps", - "PricePerUnit": 1, - "Discount": 0, - "Total": 4968, - "SubItems": { - "sub-test": { - "Description": "A sub query of Test", - "QueryName": "sub-test", - "Quantity": 432, - "QuantityMin": 2, - "QuantityAvg": 2, - "QuantityMax": 2, - "Unit": "tps" - }, - "sub-test2": { - "Description": "An other sub query of Test", - "QueryName": "sub-test2", - "Quantity": 0, - "QuantityMin": 0, - "QuantityAvg": 0, - "QuantityMax": 0, - "Unit": "tps" - } - } - } - ], - "Total": 4968 - } - ], - "Total": 4968 - } -] \ No newline at end of file diff --git a/pkg/invoice/testdata/tenants.json b/pkg/invoice/testdata/tenants.json deleted file mode 100644 index cbc2d1f..0000000 --- a/pkg/invoice/testdata/tenants.json +++ /dev/null @@ -1,134 +0,0 @@ -[ - { - "Tenant": { - "Source": "megacorp", - "Target": "83492" - }, - "PeriodStart": "2022-01-01T00:00:00Z", - "PeriodEnd": "2022-01-31T00:00:00Z", - "Categories": [ - { - "Source": "my-cluster:my-namespace", - "Target": "", - "Items": [ - { - "Description": "test description", - "QueryName": "test", - "Source": "my-product", - "Target": "", - "Quantity": 29232, - "QuantityMin": 42, - "QuantityAvg": 42, - "QuantityMax": 42, - "Unit": "tps", - "PricePerUnit": 1, - "Discount": 0, - "Total": 29232, - "SubItems": {} - } - ], - "Total": 29232 - } - ], - "Total": 29232 - }, - { - "Tenant": { - "Source": "tricell", - "Target": "98757" - }, - "PeriodStart": "2022-01-01T00:00:00Z", - "PeriodEnd": "2022-01-31T00:00:00Z", - "Categories": [ - { - "Source": "my-cluster:my-namespace", - "Target": "", - "Items": [ - { - "Description": "test description", - "QueryName": "test", - "Source": "my-product", - "Target": "", - "Quantity": 19152, - "QuantityMin": 42, - "QuantityAvg": 42, - "QuantityMax": 42, - "Unit": "tps", - "PricePerUnit": 1, - "Discount": 0, - "Total": 19152, - "SubItems": {} - } - ], - "Total": 19152 - } - ], - "Total": 19152 - }, - { - "Tenant": { - "Source": "tricell", - "Target": "98942" - }, - "PeriodStart": "2022-01-01T00:00:00Z", - "PeriodEnd": "2022-01-31T00:00:00Z", - "Categories": [ - { - "Source": "my-cluster:my-namespace", - "Target": "", - "Items": [ - { - "Description": "test description", - "QueryName": "test", - "Source": "my-product", - "Target": "", - "Quantity": 10080, - "QuantityMin": 42, - "QuantityAvg": 42, - "QuantityMax": 42, - "Unit": "tps", - "PricePerUnit": 1, - "Discount": 0, - "Total": 10080, - "SubItems": {} - } - ], - "Total": 10080 - } - ], - "Total": 10080 - }, - { - "Tenant": { - "Source": "umbrellacorp", - "Target": "96432" - }, - "PeriodStart": "2022-01-01T00:00:00Z", - "PeriodEnd": "2022-01-31T00:00:00Z", - "Categories": [ - { - "Source": "my-cluster:my-namespace", - "Target": "", - "Items": [ - { - "Description": "test description", - "QueryName": "test", - "Source": "my-product", - "Target": "", - "Quantity": 9744, - "QuantityMin": 14, - "QuantityAvg": 14, - "QuantityMax": 14, - "Unit": "tps", - "PricePerUnit": 1, - "Discount": 0, - "Total": 9744, - "SubItems": {} - } - ], - "Total": 9744 - } - ], - "Total": 9744 - } -] \ No newline at end of file diff --git a/pkg/invoice/testdata/timed_discounts.json b/pkg/invoice/testdata/timed_discounts.json deleted file mode 100644 index 15c80e0..0000000 --- a/pkg/invoice/testdata/timed_discounts.json +++ /dev/null @@ -1,352 +0,0 @@ -[ - { - "Tenant": { - "Source": "my-tenant", - "Target": "" - }, - "PeriodStart": "2022-03-01T00:00:00Z", - "PeriodEnd": "2022-03-31T00:00:00Z", - "Categories": [ - { - "Source": "my-cluster:my-namespace", - "Target": "", - "Items": [ - { - "Description": "test description", - "QueryName": "test", - "Source": "my-product", - "Target": "", - "Quantity": 1008, - "QuantityMin": 42, - "QuantityAvg": 42, - "QuantityMax": 42, - "Unit": "tps", - "PricePerUnit": 1, - "Discount": 0.5, - "Total": 504, - "SubItems": { - "sub-test": { - "Description": "A sub query of Test", - "QueryName": "sub-test", - "Quantity": 96, - "QuantityMin": 4, - "QuantityAvg": 4, - "QuantityMax": 4, - "Unit": "tps" - } - } - }, - { - "Description": "test description", - "QueryName": "test", - "Source": "my-product", - "Target": "", - "Quantity": 2016, - "QuantityMin": 42, - "QuantityAvg": 42, - "QuantityMax": 42, - "Unit": "tps", - "PricePerUnit": 1, - "Discount": 0.25, - "Total": 1512, - "SubItems": { - "sub-test": { - "Description": "A sub query of Test", - "QueryName": "sub-test", - "Quantity": 192, - "QuantityMin": 4, - "QuantityAvg": 4, - "QuantityMax": 4, - "Unit": "tps" - } - } - }, - { - "Description": "test description", - "QueryName": "test", - "Source": "my-product", - "Target": "", - "Quantity": 6048, - "QuantityMin": 42, - "QuantityAvg": 42, - "QuantityMax": 42, - "Unit": "tps", - "PricePerUnit": 1, - "Discount": 0, - "Total": 6048, - "SubItems": { - "sub-test": { - "Description": "A sub query of Test", - "QueryName": "sub-test", - "Quantity": 576, - "QuantityMin": 4, - "QuantityAvg": 4, - "QuantityMax": 4, - "Unit": "tps" - } - } - } - ], - "Total": 8064 - }, - { - "Source": "my-cluster:other-namespace", - "Target": "", - "Items": [ - { - "Description": "test description", - "QueryName": "test", - "Source": "my-product", - "Target": "", - "Quantity": 1008, - "QuantityMin": 42, - "QuantityAvg": 42, - "QuantityMax": 42, - "Unit": "tps", - "PricePerUnit": 1, - "Discount": 0.5, - "Total": 504, - "SubItems": { - "sub-test": { - "Description": "A sub query of Test", - "QueryName": "sub-test", - "Quantity": 96, - "QuantityMin": 4, - "QuantityAvg": 4, - "QuantityMax": 4, - "Unit": "tps" - } - } - }, - { - "Description": "test description", - "QueryName": "test", - "Source": "my-product", - "Target": "", - "Quantity": 2016, - "QuantityMin": 42, - "QuantityAvg": 42, - "QuantityMax": 42, - "Unit": "tps", - "PricePerUnit": 1, - "Discount": 0.25, - "Total": 1512, - "SubItems": { - "sub-test": { - "Description": "A sub query of Test", - "QueryName": "sub-test", - "Quantity": 192, - "QuantityMin": 4, - "QuantityAvg": 4, - "QuantityMax": 4, - "Unit": "tps" - } - } - }, - { - "Description": "test description", - "QueryName": "test", - "Source": "my-product", - "Target": "", - "Quantity": 6048, - "QuantityMin": 42, - "QuantityAvg": 42, - "QuantityMax": 42, - "Unit": "tps", - "PricePerUnit": 1, - "Discount": 0, - "Total": 6048, - "SubItems": { - "sub-test": { - "Description": "A sub query of Test", - "QueryName": "sub-test", - "Quantity": 576, - "QuantityMin": 4, - "QuantityAvg": 4, - "QuantityMax": 4, - "Unit": "tps" - } - } - } - ], - "Total": 8064 - }, - { - "Source": "other-cluster:my-namespace", - "Target": "", - "Items": [ - { - "Description": "test description", - "QueryName": "test", - "Source": "my-product", - "Target": "", - "Quantity": 1008, - "QuantityMin": 42, - "QuantityAvg": 42, - "QuantityMax": 42, - "Unit": "tps", - "PricePerUnit": 1, - "Discount": 0.5, - "Total": 504, - "SubItems": { - "sub-test": { - "Description": "A sub query of Test", - "QueryName": "sub-test", - "Quantity": 96, - "QuantityMin": 4, - "QuantityAvg": 4, - "QuantityMax": 4, - "Unit": "tps" - } - } - }, - { - "Description": "test description", - "QueryName": "test", - "Source": "my-product", - "Target": "", - "Quantity": 2016, - "QuantityMin": 42, - "QuantityAvg": 42, - "QuantityMax": 42, - "Unit": "tps", - "PricePerUnit": 1, - "Discount": 0.25, - "Total": 1512, - "SubItems": { - "sub-test": { - "Description": "A sub query of Test", - "QueryName": "sub-test", - "Quantity": 192, - "QuantityMin": 4, - "QuantityAvg": 4, - "QuantityMax": 4, - "Unit": "tps" - } - } - }, - { - "Description": "test description", - "QueryName": "test", - "Source": "my-product", - "Target": "", - "Quantity": 6048, - "QuantityMin": 42, - "QuantityAvg": 42, - "QuantityMax": 42, - "Unit": "tps", - "PricePerUnit": 1, - "Discount": 0, - "Total": 6048, - "SubItems": { - "sub-test": { - "Description": "A sub query of Test", - "QueryName": "sub-test", - "Quantity": 576, - "QuantityMin": 4, - "QuantityAvg": 4, - "QuantityMax": 4, - "Unit": "tps" - } - } - } - ], - "Total": 8064 - } - ], - "Total": 24192 - }, - { - "Tenant": { - "Source": "other-tenant", - "Target": "" - }, - "PeriodStart": "2022-03-01T00:00:00Z", - "PeriodEnd": "2022-03-31T00:00:00Z", - "Categories": [ - { - "Source": "my-cluster:my-namespace", - "Target": "", - "Items": [ - { - "Description": "test description", - "QueryName": "test", - "Source": "my-product", - "Target": "", - "Quantity": 1104, - "QuantityMin": 23, - "QuantityAvg": 23, - "QuantityMax": 23, - "Unit": "tps", - "PricePerUnit": 1, - "Discount": 0.25, - "Total": 828, - "SubItems": { - "sub-test": { - "Description": "A sub query of Test", - "QueryName": "sub-test", - "Quantity": 96, - "QuantityMin": 2, - "QuantityAvg": 2, - "QuantityMax": 2, - "Unit": "tps" - } - } - }, - { - "Description": "test description", - "QueryName": "test", - "Source": "my-product", - "Target": "", - "Quantity": 3312, - "QuantityMin": 23, - "QuantityAvg": 23, - "QuantityMax": 23, - "Unit": "tps", - "PricePerUnit": 1, - "Discount": 0, - "Total": 3312, - "SubItems": { - "sub-test": { - "Description": "A sub query of Test", - "QueryName": "sub-test", - "Quantity": 288, - "QuantityMin": 2, - "QuantityAvg": 2, - "QuantityMax": 2, - "Unit": "tps" - } - } - }, - { - "Description": "test description", - "QueryName": "test", - "Source": "my-product", - "Target": "", - "Quantity": 552, - "QuantityMin": 23, - "QuantityAvg": 23, - "QuantityMax": 23, - "Unit": "tps", - "PricePerUnit": 1, - "Discount": 0.5, - "Total": 276, - "SubItems": { - "sub-test": { - "Description": "A sub query of Test", - "QueryName": "sub-test", - "Quantity": 48, - "QuantityMin": 2, - "QuantityAvg": 2, - "QuantityMax": 2, - "Unit": "tps" - } - } - } - ], - "Total": 4416 - } - ], - "Total": 4416 - } -] \ No newline at end of file diff --git a/pkg/invoice/testdata/timed_query.json b/pkg/invoice/testdata/timed_query.json deleted file mode 100644 index a22656c..0000000 --- a/pkg/invoice/testdata/timed_query.json +++ /dev/null @@ -1,156 +0,0 @@ -[ - { - "Tenant": { - "Source": "my-tenant", - "Target": "" - }, - "PeriodStart": "2022-03-01T00:00:00Z", - "PeriodEnd": "2022-03-31T00:00:00Z", - "Categories": [ - { - "Source": "my-cluster:my-namespace", - "Target": "", - "Items": [ - { - "Description": "new nicer query", - "QueryName": "test", - "Source": "my-product", - "Target": "", - "Quantity": 8280, - "QuantityMin": 69, - "QuantityAvg": 69, - "QuantityMax": 69, - "Unit": "tps", - "PricePerUnit": 1, - "Discount": 0, - "Total": 8280, - "SubItems": { - "new-sub-test": { - "Description": "A better sub query of Test", - "QueryName": "new-sub-test", - "Quantity": 480, - "QuantityMin": 4, - "QuantityAvg": 4, - "QuantityMax": 4, - "Unit": "tps" - } - } - }, - { - "Description": "test description", - "QueryName": "test", - "Source": "my-product", - "Target": "", - "Quantity": 4032, - "QuantityMin": 42, - "QuantityAvg": 42, - "QuantityMax": 42, - "Unit": "tps", - "PricePerUnit": 1, - "Discount": 0, - "Total": 4032, - "SubItems": { - "sub-test": { - "Description": "A sub query of Test", - "QueryName": "sub-test", - "Quantity": 384, - "QuantityMin": 4, - "QuantityAvg": 4, - "QuantityMax": 4, - "Unit": "tps" - }, - "sub-test2": { - "Description": "An other sub query of Test that stops early", - "QueryName": "sub-test2", - "Quantity": 168, - "QuantityMin": 7, - "QuantityAvg": 7, - "QuantityMax": 7, - "Unit": "tps" - } - } - } - ], - "Total": 12312 - } - ], - "Total": 12312 - }, - { - "Tenant": { - "Source": "other-tenant", - "Target": "" - }, - "PeriodStart": "2022-03-01T00:00:00Z", - "PeriodEnd": "2022-03-31T00:00:00Z", - "Categories": [ - { - "Source": "my-cluster:my-namespace", - "Target": "", - "Items": [ - { - "Description": "new nicer query", - "QueryName": "test", - "Source": "my-product", - "Target": "", - "Quantity": 8280, - "QuantityMin": 69, - "QuantityAvg": 69, - "QuantityMax": 69, - "Unit": "tps", - "PricePerUnit": 1, - "Discount": 0, - "Total": 8280, - "SubItems": { - "new-sub-test": { - "Description": "A better sub query of Test", - "QueryName": "new-sub-test", - "Quantity": 240, - "QuantityMin": 2, - "QuantityAvg": 2, - "QuantityMax": 2, - "Unit": "tps" - } - } - }, - { - "Description": "test description", - "QueryName": "test", - "Source": "my-product", - "Target": "", - "Quantity": 2208, - "QuantityMin": 23, - "QuantityAvg": 23, - "QuantityMax": 23, - "Unit": "tps", - "PricePerUnit": 1, - "Discount": 0, - "Total": 2208, - "SubItems": { - "sub-test": { - "Description": "A sub query of Test", - "QueryName": "sub-test", - "Quantity": 192, - "QuantityMin": 2, - "QuantityAvg": 2, - "QuantityMax": 2, - "Unit": "tps" - }, - "sub-test2": { - "Description": "An other sub query of Test that stops early", - "QueryName": "sub-test2", - "Quantity": 0, - "QuantityMin": 0, - "QuantityAvg": 0, - "QuantityMax": 0, - "Unit": "tps" - } - } - } - ], - "Total": 10488 - } - ], - "Total": 10488 - } -] \ No newline at end of file diff --git a/pkg/odoo/odoo16.go b/pkg/odoo/odoo16.go new file mode 100644 index 0000000..29a8b81 --- /dev/null +++ b/pkg/odoo/odoo16.go @@ -0,0 +1,94 @@ +package odoo + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "time" + + "github.com/go-logr/logr" + "golang.org/x/oauth2/clientcredentials" +) + +type OdooAPIClient struct { + odooURL string + logger logr.Logger + oauthClient *http.Client +} + +type apiObject struct { + Data []OdooMeteredBillingRecord `json:"data"` +} + +type OdooMeteredBillingRecord struct { + ProductID string `json:"product_id"` + InstanceID string `json:"instance_id"` + ItemDescription string `json:"item_description,omitempty"` + ItemGroupDescription string `json:"item_group_description,omitempty"` + SalesOrderID string `json:"sales_order_id"` + UnitID string `json:"unit_id"` + ConsumedUnits float64 `json:"consumed_units"` + Timerange Timerange `json:"timerange"` +} + +type Timerange struct { + From time.Time + To time.Time +} + +func (t Timerange) MarshalJSON() ([]byte, error) { + return []byte(`"` + t.From.Format(time.RFC3339) + "/" + t.To.Format(time.RFC3339) + `"`), nil +} + +func (t *Timerange) UnmarshalJSON([]byte) error { + return errors.New("Not implemented") +} + +func NewOdooAPIClient(ctx context.Context, odooURL string, oauthTokenURL string, oauthClientId string, oauthClientSecret string, logger logr.Logger) *OdooAPIClient { + oauthConfig := clientcredentials.Config{ + ClientID: oauthClientId, + ClientSecret: oauthClientSecret, + TokenURL: oauthTokenURL, + } + oauthClient := oauthConfig.Client(ctx) + return &OdooAPIClient{ + odooURL: odooURL, + logger: logger, + oauthClient: oauthClient, + } +} + +func NewOdooAPIWithClient(odooURL string, client *http.Client, logger logr.Logger) *OdooAPIClient { + return &OdooAPIClient{ + odooURL: odooURL, + logger: logger, + oauthClient: client, + } +} + +func (c OdooAPIClient) SendData(ctx context.Context, data []OdooMeteredBillingRecord) error { + apiObject := apiObject{ + Data: data, + } + str, err := json.Marshal(apiObject) + if err != nil { + return err + } + resp, err := c.oauthClient.Post(c.odooURL, "application/json", bytes.NewBuffer(str)) + if err != nil { + return err + } + defer resp.Body.Close() + body, _ := io.ReadAll(resp.Body) + c.logger.Info("Records sent to Odoo API", "status", resp.Status, "body", string(body), "numberOfRecords", len(data)) + + if resp.StatusCode != 200 { + return errors.New(fmt.Sprintf("API error when sending records to Odoo:\n%s", body)) + } + + return nil +} diff --git a/pkg/odoo/odoo16_test.go b/pkg/odoo/odoo16_test.go new file mode 100644 index 0000000..cdaf0d6 --- /dev/null +++ b/pkg/odoo/odoo16_test.go @@ -0,0 +1,130 @@ +package odoo_test + +import ( + "context" + "errors" + "io" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/go-logr/logr" + "github.com/stretchr/testify/require" + + "github.com/appuio/appuio-reporting/pkg/odoo" +) + +type mockRoundTripper struct { + cannedResponse *http.Response + receivedContent string +} + +type mockRoundTripperWhichFails struct { +} + +func (rt *mockRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + body, _ := io.ReadAll(req.Body) + rt.receivedContent = string(body) + return rt.cannedResponse, nil +} + +func (rt *mockRoundTripperWhichFails) RoundTrip(req *http.Request) (*http.Response, error) { + return nil, errors.New("there was an error") +} + +func TestOdooRecordsSent(t *testing.T) { + recorder := httptest.NewRecorder() + recorder.WriteString("success") + expectedResponse := recorder.Result() + + mrt := &mockRoundTripper{cannedResponse: expectedResponse} + client := http.Client{Transport: mrt} + + logger := logr.New(logr.Discard().GetSink()) + uut := odoo.NewOdooAPIWithClient("https://foo.bar/odoo16/", &client, logger) + + err := uut.SendData(context.Background(), []odoo.OdooMeteredBillingRecord{getOdooRecord()}) + + require.NoError(t, err) + require.Equal(t, `{"data":[{"product_id":"my-product","instance_id":"my-instance","item_description":"my-description","item_group_description":"my-group","sales_order_id":"SO00000","unit_id":"my-unit","consumed_units":11.1,"timerange":"2022-02-22T22:22:22Z/2022-02-22T23:22:22Z"}]}`, mrt.receivedContent) +} + +func TestErrorHandling(t *testing.T) { + mrt := &mockRoundTripperWhichFails{} + client := http.Client{Transport: mrt} + + logger := logr.New(logr.Discard().GetSink()) + uut := odoo.NewOdooAPIWithClient("https://foo.bar/odoo16/", &client, logger) + + err := uut.SendData(context.Background(), []odoo.OdooMeteredBillingRecord{getOdooRecord()}) + + require.Error(t, err) +} + +func TestErrorFromServerRaisesError(t *testing.T) { + recorder := httptest.NewRecorder() + recorder.WriteHeader(500) + recorder.WriteString(`{ + "arguments": [ + "data" + ], + "code": 500, + "context": {}, + "message": "data", + "name": "builtins.KeyError", + "traceback": [ + "Traceback (most recent call last):", + " File \"/opt/odoo/bin/odoo/http.py\", line 1589, in _serve_db", + " return service_model.retrying(self._serve_ir_http, self.env)", + " File \"/opt/odoo/bin/odoo/service/model.py\", line 133, in retrying", + " result = func()", + " File \"/opt/odoo/bin/odoo/http.py\", line 1616, in _serve_ir_http", + " response = self.dispatcher.dispatch(rule.endpoint, args)", + " File \"/opt/odoo/braintec/ext/muk_rest/core/http.py\", line 295, in dispatch", + " result = self.request.registry['ir.http']._dispatch(endpoint)", + " File \"/opt/odoo/bin/addons/website/models/ir_http.py\", line 237, in _dispatch", + " response = super()._dispatch(endpoint)", + " File \"/opt/odoo/braintec/ext/muk_rest/models/ir_http.py\", line 160, in _dispatch", + " response = super()._dispatch(endpoint)", + " File \"/opt/odoo/addons/monitoring_prometheus/models/ir_http.py\", line 38, in _dispatch", + " res = super()._dispatch(endpoint)", + " File \"/opt/odoo/bin/odoo/addons/base/models/ir_http.py\", line 154, in _dispatch", + " result = endpoint(**request.params)", + " File \"/opt/odoo/bin/odoo/http.py\", line 697, in route_wrapper", + " result = endpoint(self, *args, **params_ok)", + " File \"/opt/odoo/braintec/ext/muk_rest/core/http.py\", line 122, in wrapper", + " result = func(*args, **kwargs)", + " File \"/opt/odoo/braintec/vshn/vshn_metered_usage_rest/controllers/metered_usage_rest.py\", line 65, in vshn_send_metered_usage", + " 'payload': json.dumps(kw['data'], indent=4)", + "KeyError: 'data'" + ] +}`) + expectedResponse := recorder.Result() + + mrt := &mockRoundTripper{cannedResponse: expectedResponse} + client := http.Client{Transport: mrt} + + logger := logr.New(logr.Discard().GetSink()) + uut := odoo.NewOdooAPIWithClient("https://foo.bar/odoo16/", &client, logger) + + err := uut.SendData(context.Background(), []odoo.OdooMeteredBillingRecord{getOdooRecord()}) + + require.Error(t, err) +} + +func getOdooRecord() odoo.OdooMeteredBillingRecord { + return odoo.OdooMeteredBillingRecord{ + ProductID: "my-product", + UnitID: "my-unit", + SalesOrderID: "SO00000", + InstanceID: "my-instance", + ItemDescription: "my-description", + ItemGroupDescription: "my-group", + ConsumedUnits: 11.1, + Timerange: odoo.Timerange{ + From: time.Date(2022, 2, 22, 22, 22, 22, 222, time.UTC), + To: time.Date(2022, 2, 22, 23, 22, 22, 222, time.UTC), + }, + } +} diff --git a/pkg/report/report.go b/pkg/report/report.go index 5f62312..d3506bc 100644 --- a/pkg/report/report.go +++ b/pkg/report/report.go @@ -2,36 +2,50 @@ package report import ( "context" + "encoding/json" "fmt" - "strings" "time" - "github.com/appuio/appuio-cloud-reporting/pkg/db" - "github.com/appuio/appuio-cloud-reporting/pkg/sourcekey" - "github.com/jackc/pgx/v4" - "github.com/jmoiron/sqlx" + "github.com/appuio/appuio-reporting/pkg/odoo" + "github.com/google/go-jsonnet" apiv1 "github.com/prometheus/client_golang/api/prometheus/v1" "github.com/prometheus/common/model" + "go.uber.org/multierr" ) type PromQuerier interface { Query(ctx context.Context, query string, ts time.Time, opts ...apiv1.Option) (model.Value, apiv1.Warnings, error) } +type OdooClient interface { + SendData(ctx context.Context, data []odoo.OdooMeteredBillingRecord) error +} + +type ReportArgs struct { + Query string + InstanceJsonnet string + ItemDescriptionJsonnet string + ItemGroupDescriptionJsonnet string + UnitID string + ProductID string + TimerangeSize time.Duration + OverrideSalesOrderID string +} + +const SalesOrderIDLabel = "sales_order_id" + // RunRange executes prometheus queries like Run() until the `until` timestamp is reached or an error occurred. // Returns the number of reports run and a possible error. -func RunRange(ctx context.Context, database *sqlx.DB, prom PromQuerier, queryName string, from time.Time, until time.Time, options ...Option) (int, error) { +func RunRange(ctx context.Context, odoo OdooClient, prom PromQuerier, args ReportArgs, from time.Time, until time.Time, options ...Option) (int, error) { opts := buildOptions(options) n := 0 - for currentTime := from; until.After(currentTime); currentTime = currentTime.Add(time.Hour) { + for currentTime := from; until.After(currentTime); currentTime = currentTime.Add(args.TimerangeSize) { n++ if opts.progressReporter != nil { opts.progressReporter(Progress{currentTime, n}) } - if err := db.RunInTransaction(ctx, database, func(tx *sqlx.Tx) error { - return Run(ctx, tx, prom, queryName, currentTime, options...) - }); err != nil { + if err := Run(ctx, odoo, prom, args, currentTime, options...); err != nil { return n, fmt.Errorf("error running report at %s: %w", currentTime.Format(time.RFC3339), err) } } @@ -41,7 +55,7 @@ func RunRange(ctx context.Context, database *sqlx.DB, prom PromQuerier, queryNam // Run executes a prometheus query loaded from queries with using the `queryName` and the timestamp. // The results of the query are saved in the facts table. -func Run(ctx context.Context, tx *sqlx.Tx, prom PromQuerier, queryName string, from time.Time, options ...Option) error { +func Run(ctx context.Context, odoo OdooClient, prom PromQuerier, args ReportArgs, from time.Time, options ...Option) error { opts := buildOptions(options) from = from.In(time.UTC) @@ -49,31 +63,14 @@ func Run(ctx context.Context, tx *sqlx.Tx, prom PromQuerier, queryName string, f return fmt.Errorf("timestamp should only contain full hours based on UTC, got: %s", from.Format(time.RFC3339Nano)) } - var query db.Query - if err := sqlx.GetContext(ctx, tx, &query, "SELECT * FROM queries WHERE name = $1 AND (during @> $2::timestamptz)", queryName, from); err != nil { - return fmt.Errorf("failed to load query '%s' at '%s': %w", queryName, from.Format(time.RFC3339), err) - } - - if err := runQuery(ctx, tx, prom, query, from, opts); err != nil { - return fmt.Errorf("failed to run query '%s' at '%s': %w", queryName, from.Format(time.RFC3339), err) - } - - var subQueries []db.Query - if err := sqlx.SelectContext(ctx, tx, &subQueries, - "SELECT id, name, description, query, unit, during FROM queries WHERE parent_id = $1 AND (during @> $2::timestamptz)", query.Id, from, - ); err != nil { - return fmt.Errorf("failed to load subQueries for '%s' at '%s': %w", queryName, from.Format(time.RFC3339), err) - } - for _, subQuery := range subQueries { - if err := runQuery(ctx, tx, prom, subQuery, from, opts); err != nil { - return fmt.Errorf("failed to run subQuery '%s' at '%s': %w", subQuery.Name, from.Format(time.RFC3339), err) - } + if err := runQuery(ctx, odoo, prom, args, from, opts); err != nil { + return fmt.Errorf("failed to run query '%s' at '%s': %w", args.Query, from.Format(time.RFC3339), err) } return nil } -func runQuery(ctx context.Context, tx *sqlx.Tx, prom PromQuerier, query db.Query, from time.Time, opts options) error { +func runQuery(ctx context.Context, odooClient OdooClient, prom PromQuerier, args ReportArgs, from time.Time, opts options) error { promQCtx := ctx if opts.prometheusQueryTimeout != 0 { ctx, cancel := context.WithTimeout(promQCtx, opts.prometheusQueryTimeout) @@ -82,7 +79,7 @@ func runQuery(ctx context.Context, tx *sqlx.Tx, prom PromQuerier, query db.Query } // The data in the database is from T to T+1h. Prometheus queries backwards from T to T-1h. - res, _, err := prom.Query(promQCtx, query.Query, from.Add(time.Hour)) + res, _, err := prom.Query(promQCtx, args.Query, from.Add(args.TimerangeSize)) if err != nil { return fmt.Errorf("failed to query prometheus: %w", err) } @@ -92,168 +89,93 @@ func runQuery(ctx context.Context, tx *sqlx.Tx, prom PromQuerier, query db.Query return fmt.Errorf("expected prometheus query to return a model.Vector, got %T", res) } + var errs error + var records []odoo.OdooMeteredBillingRecord for _, sample := range samples { - if err := processSample(ctx, tx, from, query, sample); err != nil { - return fmt.Errorf("failed to process sample: %w", err) + record, err := processSample(ctx, odooClient, args, from, sample) + if err != nil { + errs = multierr.Append(errs, fmt.Errorf("failed to process sample: %w", err)) + } else { + records = append(records, *record) } } - return nil + return multierr.Append(errs, odooClient.SendData(ctx, records)) } -func processSample(ctx context.Context, tx *sqlx.Tx, ts time.Time, query db.Query, s *model.Sample) error { - category, err := getMetricLabel(s.Metric, "category") - if err != nil { - return err - } - productLabel, err := getMetricLabel(s.Metric, "product") - if err != nil { - return err - } +func processSample(ctx context.Context, odooClient OdooClient, args ReportArgs, from time.Time, s *model.Sample) (*odoo.OdooMeteredBillingRecord, error) { + metricLabels := s.Metric - skey, err := sourcekey.Parse(string(productLabel)) - if err != nil { - return fmt.Errorf("failed to parse source key from product label: %w", err) + salesOrderID := "" + if args.OverrideSalesOrderID != "" { + salesOrderID = args.OverrideSalesOrderID + } else { + sid, err := getMetricLabel(s.Metric, SalesOrderIDLabel) + if err != nil { + return nil, err + } + salesOrderID = string(sid) } - var upsertedTenant db.Tenant - err = upsertTenant(ctx, tx, &upsertedTenant, db.Tenant{ - Source: skey.Tenant(), - }, ts) + labelList, err := json.Marshal(metricLabels) if err != nil { - return fmt.Errorf("failed to upsert tenant '%s': %w", skey.Tenant(), err) - } - - var upsertedCategory db.Category - if err := upsertCategory(ctx, tx, &upsertedCategory, db.Category{Source: string(category)}); err != nil { - return err - } - - sourceLookup := skey.LookupKeys() - - var product db.Product - if err := getBySourceKeyAndTime(ctx, tx, &product, pgx.Identifier{"products"}, sourceLookup, ts); err != nil { - return fmt.Errorf("failed to load product for '%s': %w", productLabel, err) + return nil, err } - var discount db.Discount - if err := getBySourceKeyAndTime(ctx, tx, &discount, pgx.Identifier{"discounts"}, sourceLookup, ts); err != nil { - return fmt.Errorf("failed to load discount for '%s': %w", productLabel, err) - } + vm := jsonnet.MakeVM() + vm.ExtCode("labels", string(labelList)) - var upsertedDateTime db.DateTime - err = upsertDateTime(ctx, tx, &upsertedDateTime, db.BuildDateTime(ts)) + instance, err := vm.EvaluateAnonymousSnippet("instance.json", args.InstanceJsonnet) if err != nil { - return fmt.Errorf("failed to upsert date_time '%s': %w", ts.Format(time.RFC3339), err) + return nil, err } - - var upsertedFact db.Fact - err = upsertFact(ctx, tx, &upsertedFact, db.Fact{ - DateTimeId: upsertedDateTime.Id, - TenantId: upsertedTenant.Id, - CategoryId: upsertedCategory.Id, - QueryId: query.Id, - ProductId: product.Id, - DiscountId: discount.Id, - Quantity: float64(s.Value), - }) + instanceStr := "" + err = json.Unmarshal([]byte(instance), &instanceStr) if err != nil { - return fmt.Errorf("failed to upsert fact '%s': %w", ts.Format(time.RFC3339), err) + return nil, fmt.Errorf("failed to interpolate instance template: %w", err) } - return nil -} - -// getBySourceKeyAndTime gets the first record matching a key in keys while preserving the priority or order of the keys. -// The first key has the highest priority while the last key has the lowest priority. -// If keys are [a,b,c] and records [a,c] exist a is chosen. -func getBySourceKeyAndTime(ctx context.Context, q sqlx.QueryerContext, dest interface{}, table pgx.Identifier, keys []string, ts time.Time) error { - const query = `WITH keys AS ( - -- add a priority to keep track of which key match we should choose - -- first key -> prio 1, third key -> prio 3 - SELECT row_number() over () AS prio, unnest as key - -- unpack the given array of strings into rows - FROM unnest($1::text[]) - ) - SELECT {{table}}.* - FROM {{table}} - INNER JOIN keys ON (keys.key = {{table}}.source) - WHERE during @> $2::timestamptz - ORDER BY prio - LIMIT 1` - return sqlx.GetContext(ctx, q, dest, strings.ReplaceAll(query, "{{table}}", table.Sanitize()), keys, ts) -} - -func upsertFact(ctx context.Context, tx *sqlx.Tx, dst *db.Fact, src db.Fact) error { - err := db.GetNamedContext(ctx, tx, dst, - `INSERT INTO facts - (date_time_id,query_id,tenant_id,category_id,product_id,discount_id,quantity) - VALUES - (:date_time_id,:query_id,:tenant_id,:category_id,:product_id,:discount_id,:quantity) - ON CONFLICT (date_time_id,query_id,tenant_id,category_id,product_id,discount_id) - DO UPDATE SET quantity = :quantity - RETURNING *`, - src) - if err != nil { - return fmt.Errorf("failed to upsert fact %+v: %w", src, err) + var groupStr string + if args.ItemGroupDescriptionJsonnet != "" { + group, err := vm.EvaluateAnonymousSnippet("group.json", args.ItemGroupDescriptionJsonnet) + if err != nil { + return nil, fmt.Errorf("failed to interpolate group description template: %w", err) + } + err = json.Unmarshal([]byte(group), &groupStr) + if err != nil { + return nil, err + } } - return nil -} -func upsertCategory(ctx context.Context, tx *sqlx.Tx, dst *db.Category, src db.Category) error { - err := db.GetNamedContext(ctx, tx, dst, - `WITH - existing AS ( - SELECT * FROM categories WHERE source = :source - ), - inserted AS ( - INSERT INTO categories (source) - SELECT :source WHERE NOT EXISTS (SELECT 1 FROM existing) - RETURNING * - ) - SELECT * FROM inserted UNION ALL SELECT * FROM existing`, - src) - if err != nil { - return fmt.Errorf("failed to upsert category %+v: %w", src, err) + var descriptionStr string + if args.ItemDescriptionJsonnet != "" { + description, err := vm.EvaluateAnonymousSnippet("description.json", args.ItemDescriptionJsonnet) + if err != nil { + return nil, fmt.Errorf("failed to interpolate description template: %w", err) + } + err = json.Unmarshal([]byte(description), &descriptionStr) + if err != nil { + return nil, err + } } - return nil -} -func upsertTenant(ctx context.Context, tx *sqlx.Tx, dst *db.Tenant, src db.Tenant, ts time.Time) error { - err := sqlx.GetContext(ctx, tx, dst, - `WITH - existing AS ( - SELECT * FROM tenants WHERE source = $1 AND during @> $2::timestamptz - ), - inserted AS ( - INSERT INTO tenants (source) - SELECT $1 WHERE NOT EXISTS (SELECT 1 FROM existing) - RETURNING * - ) - SELECT * FROM inserted UNION ALL SELECT * FROM existing`, src.Source, ts) - if err != nil { - return fmt.Errorf("failed to upsert tenant %+v: %w", src, err) + timerange := odoo.Timerange{ + From: from, + To: from.Add(args.TimerangeSize), } - return nil -} -func upsertDateTime(ctx context.Context, tx *sqlx.Tx, dst *db.DateTime, src db.DateTime) error { - err := db.GetNamedContext(ctx, tx, dst, - `WITH - existing AS ( - SELECT * FROM date_times WHERE year = :year AND month = :month AND day = :day AND hour = :hour - ), - inserted AS ( - INSERT INTO date_times (timestamp, year, month, day, hour) - SELECT :timestamp, :year, :month, :day, :hour WHERE NOT EXISTS (SELECT 1 FROM existing) - RETURNING * - ) - SELECT * FROM inserted UNION ALL SELECT * FROM existing`, - src) - if err != nil { - return fmt.Errorf("failed to upsert date_time %+v: %w", src, err) + record := odoo.OdooMeteredBillingRecord{ + ProductID: args.ProductID, + InstanceID: instanceStr, + ItemDescription: descriptionStr, + ItemGroupDescription: groupStr, + SalesOrderID: salesOrderID, + UnitID: args.UnitID, + ConsumedUnits: float64(s.Value), + Timerange: timerange, } - return nil + + return &record, nil } func getMetricLabel(m model.Metric, name string) (model.LabelValue, error) { diff --git a/pkg/report/report_test.go b/pkg/report/report_test.go index bf42902..5f58c02 100644 --- a/pkg/report/report_test.go +++ b/pkg/report/report_test.go @@ -2,27 +2,24 @@ package report_test import ( "context" - "database/sql" "fmt" "testing" "time" - "github.com/jackc/pgtype" - "github.com/jmoiron/sqlx" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/appuio/appuio-cloud-reporting/pkg/db" - "github.com/appuio/appuio-cloud-reporting/pkg/report" - "github.com/appuio/appuio-cloud-reporting/pkg/testsuite" + "github.com/appuio/appuio-reporting/pkg/odoo" + "github.com/appuio/appuio-reporting/pkg/report" + "github.com/appuio/appuio-reporting/pkg/testsuite" ) type ReportSuite struct { testsuite.Suite +} - sampleProduct db.Product - sampleDiscount db.Discount - sampleQuery db.Query +func TestReport(t *testing.T) { + suite.Run(t, new(ReportSuite)) } const defaultQueryReturnValue = 42 @@ -31,79 +28,48 @@ const promTestquery = ` label_replace( label_replace( label_replace( - vector(%d), - "category", "my-cluster:my-namespace", "", "" + label_replace( + vector(%d), + "namespace", "my-namespace", "", "" + ), + "product", "my-product", "", "" ), - "product", "my-product:my-cluster:my-tenant:my-namespace", "", "" + "tenant", "my-tenant", "", "" ), - "tenant", "my-tenant", "", "" + "sales_order_id", "SO00000", "", "" ) ` -const promBarTestquery = ` +const promInvalidTestquery = ` label_replace( label_replace( label_replace( vector(%d), - "category", "my-cluster:my-namespace", "", "" + "namespace", "my-namespace", "", "" ), - "product", "bar-product:my-cluster:my-tenant:my-namespace", "", "" + "product", "my-product", "", "" ), "tenant", "my-tenant", "", "" ) ` -func (s *ReportSuite) SetupSuite() { - s.Suite.SetupSuite() - - t := s.T() - tdb := s.DB() - - s.sampleProduct = s.createProduct(tdb, "my-product:my-cluster") - - require.NoError(t, - db.GetNamed(tdb, &s.sampleDiscount, - "INSERT INTO discounts (source,discount,during) VALUES (:source,:discount,:during) RETURNING *", db.Discount{ - Source: "my-product:my-cluster", - Discount: 0.5, - During: infiniteRange(), - })) - - require.NoError(t, - db.GetNamed(tdb, &s.sampleQuery, - "INSERT INTO queries (name,description,query,unit,during) VALUES (:name,:description,:query,:unit,:during) RETURNING *", db.Query{ - Name: "test", - Query: fmt.Sprintf(promTestquery, defaultQueryReturnValue), - Unit: "tps", - During: infiniteRange(), - })) - -} - func (s *ReportSuite) TestReport_ReturnsErrorIfTimestampContainsUnitsSmallerOneHour() { t := s.T() + o := &MockOdooClient{} prom := s.PrometheusAPIClient() - query := s.sampleQuery - - tx, err := s.DB().Beginx() - require.NoError(t, err) - defer tx.Rollback() baseTime := time.Date(2020, time.January, 23, 17, 0, 0, 0, time.UTC) for _, d := range []time.Duration{time.Minute, time.Second, time.Nanosecond} { - require.Error(t, report.Run(context.Background(), tx, prom, query.Name, baseTime.Add(d))) + require.Error(t, report.Run(context.Background(), o, prom, getReportArgs(), baseTime.Add(d))) } } func (s *ReportSuite) TestReport_RunRange() { t := s.T() + o := &MockOdooClient{} prom := s.PrometheusAPIClient() - query := s.sampleQuery - tdb := s.DB() const hoursToCalculate = 3 - defer tdb.Exec("DELETE FROM facts") - base := time.Date(2020, time.January, 23, 17, 0, 0, 0, time.UTC) expectedProgress := []report.Progress{ @@ -114,186 +80,109 @@ func (s *ReportSuite) TestReport_RunRange() { progress := make([]report.Progress, 0) - c, err := report.RunRange(context.Background(), tdb, prom, query.Name, base, base.Add(hoursToCalculate*time.Hour), + c, err := report.RunRange(context.Background(), o, prom, getReportArgs(), base, base.Add(hoursToCalculate*time.Hour), report.WithProgressReporter(func(p report.Progress) { progress = append(progress, p) }), ) require.NoError(t, err) require.Equal(t, hoursToCalculate, c) require.Equal(t, expectedProgress, progress) - var factCount int - require.NoError(t, sqlx.Get(tdb, &factCount, "SELECT COUNT(*) FROM facts")) - require.Equal(t, hoursToCalculate, factCount) + require.Equal(t, hoursToCalculate, o.totalReceived) } -func (s *ReportSuite) TestReport_RunReportCreatesFact() { +func (s *ReportSuite) TestReport_Run() { t := s.T() + o := &MockOdooClient{} prom := s.PrometheusAPIClient() - query := s.sampleQuery + args := getReportArgs() - tx, err := s.DB().Beginx() - require.NoError(t, err) - defer tx.Rollback() + args.InstanceJsonnet = `local labels = std.extVar("labels"); "%(tenant)s" % labels` + args.ItemGroupDescriptionJsonnet = `local labels = std.extVar("labels"); "%(namespace)s" % labels` + args.ItemDescriptionJsonnet = `local labels = std.extVar("labels"); "%(product)s" % labels` - ts := time.Now().Truncate(time.Hour) - require.NoError(t, report.Run(context.Background(), tx, prom, query.Name, ts, report.WithPrometheusQueryTimeout(time.Second))) - fact := s.requireFactForQueryIdAndProductSource(tx, query, "my-product:my-cluster", ts) - require.Equal(t, float64(defaultQueryReturnValue), fact.Quantity) -} + from := time.Date(2020, time.January, 23, 17, 0, 0, 0, time.UTC) -func (s *ReportSuite) TestReport_RunReportCreatesSubFact() { - t := s.T() - prom := s.PrometheusAPIClient() - tdb := s.DB() - s.createProduct(tdb, "bar-product:my-cluster") - disc := db.Discount{} - require.NoError(t, - db.GetNamed(tdb, &disc, - "INSERT INTO discounts (source,discount,during) VALUES (:source,:discount,:during) RETURNING *", db.Discount{ - Source: "bar-product", - Discount: 0, - During: infiniteRange(), - })) - query := db.Query{} - require.NoError(t, - db.GetNamed(tdb, &query, - "INSERT INTO queries (name,description,query,unit,during) VALUES (:name,:description,:query,:unit,:during) RETURNING *", db.Query{ - Name: "bar", - Query: fmt.Sprintf(promBarTestquery, defaultQueryReturnValue), - Unit: "tps", - During: infiniteRange(), - })) - subquery := db.Query{} - require.NoError(t, - db.GetNamed(tdb, &subquery, - "INSERT INTO queries (parent_id,name,description,query,unit,during) VALUES (:parent_id,:name,:description,:query,:unit,:during) RETURNING *", db.Query{ - ParentID: sql.NullString{ - String: query.Id, - Valid: true, - }, - Name: "sub-bar", - Query: fmt.Sprintf(promBarTestquery, defaultSubQueryReturnValue), - Unit: "tps", - During: infiniteRange(), - })) - - tx, err := s.DB().Beginx() + err := report.Run(context.Background(), o, prom, args, from) require.NoError(t, err) - defer tx.Rollback() - - ts := time.Now().Truncate(time.Hour) - require.NoError(t, report.Run(context.Background(), tx, prom, query.Name, ts, report.WithPrometheusQueryTimeout(time.Second))) - fact := s.requireFactForQueryIdAndProductSource(tx, query, "bar-product:my-cluster", ts) - require.Equal(t, float64(defaultQueryReturnValue), fact.Quantity) - subfact := s.requireFactForQueryIdAndProductSource(tx, subquery, "bar-product:my-cluster", ts) - require.Equal(t, float64(defaultSubQueryReturnValue), subfact.Quantity) + + require.Equal(t, "my-namespace", o.lastReceivedData[0].ItemGroupDescription) + require.Equal(t, "my-tenant", o.lastReceivedData[0].InstanceID) + require.Equal(t, "my-product", o.lastReceivedData[0].ItemDescription) + require.Equal(t, 1.0, o.lastReceivedData[0].ConsumedUnits) + require.Equal(t, "SO00000", o.lastReceivedData[0].SalesOrderID) } -func (s *ReportSuite) TestReport_RunReportNonLockingUpsert() { +func (s *ReportSuite) TestReport_RequireErrorWhenInvalidTemplateVariable() { t := s.T() + o := &MockOdooClient{} prom := s.PrometheusAPIClient() - query := s.sampleQuery + from := time.Date(2020, time.January, 23, 17, 0, 0, 0, time.UTC) - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() + args := getReportArgs() + args.InstanceJsonnet = `local labels = std.extVar("labels"); "%(doesnotexist)s" % labels` - defer s.DB().Exec("DELETE FROM facts") + err := report.Run(context.Background(), o, prom, args, from) + require.Error(t, err) - tx, err := s.DB().Beginx() - require.NoError(t, err) - defer tx.Rollback() + args = getReportArgs() + args.ItemGroupDescriptionJsonnet = `local labels = std.extVar("labels"); "%(doesnotexist)s" % labels` - tx1, err := s.DB().BeginTxx(ctx, nil) - require.NoError(t, err) - defer tx1.Rollback() + err = report.Run(context.Background(), o, prom, args, from) + require.Error(t, err) - tx2, err := s.DB().BeginTxx(ctx, nil) - require.NoError(t, err) - defer tx2.Rollback() - - baseTime := time.Date(2020, time.January, 23, 17, 0, 0, 0, time.UTC) - require.NoError(t, report.Run(context.Background(), tx, prom, query.Name, baseTime)) - require.NoError(t, tx.Commit()) + args = getReportArgs() + args.ItemDescriptionJsonnet = `local labels = std.extVar("labels"); "%(doesnotexist)s" % labels` - require.NoError(t, report.Run(context.Background(), tx1, prom, query.Name, baseTime.Add(1*time.Hour)), "transaction should not be blocked on upsert") - require.NoError(t, report.Run(context.Background(), tx2, prom, query.Name, baseTime.Add(2*time.Hour)), "transaction should not be blocked on upsert") - - require.NoError(t, tx2.Commit(), "transaction should not be blocked on commit") - require.NoError(t, tx1.Commit(), "transaction should not be blocked on commit") + err = report.Run(context.Background(), o, prom, args, from) + require.Error(t, err) } -func (s *ReportSuite) TestReport_RerunReportUpdatesFactQuantity() { +func (s *ReportSuite) TestReport_RequireErrorWhenNoSalesOrder() { t := s.T() + o := &MockOdooClient{} prom := s.PrometheusAPIClient() - query := s.sampleQuery - - tx, err := s.DB().Beginx() - require.NoError(t, err) - defer tx.Rollback() + args := getReportArgs() + args.Query = fmt.Sprintf(promInvalidTestquery, 1) - ts := time.Now().Truncate(time.Hour) - require.NoError(t, report.Run(context.Background(), tx, prom, query.Name, ts)) + from := time.Date(2020, time.January, 23, 17, 0, 0, 0, time.UTC) - _, err = tx.Exec("UPDATE queries SET query = $1 WHERE id = $2", fmt.Sprintf(promTestquery, 77), query.Id) - require.NoError(t, err) - require.NoError(t, report.Run(context.Background(), tx, prom, query.Name, ts)) - fact := s.requireFactForQueryIdAndProductSource(tx, query, "my-product:my-cluster", ts) - require.Equal(t, float64(77), fact.Quantity) + err := report.Run(context.Background(), o, prom, args, from) + require.Error(t, err) } -func (s *ReportSuite) TestReport_ProductSpecificityOfSource() { +func (s *ReportSuite) TestReport_OverrideSalesOrderID() { t := s.T() + o := &MockOdooClient{} prom := s.PrometheusAPIClient() - query := s.sampleQuery + args := getReportArgs() + args.OverrideSalesOrderID = "myoverride" - tx, err := s.DB().Beginx() - require.NoError(t, err) - defer tx.Rollback() - - ts := time.Now().Truncate(time.Hour) - require.NoError(t, report.Run(context.Background(), tx, prom, query.Name, ts)) - s.requireFactForQueryIdAndProductSource(tx, query, "my-product:my-cluster", ts) - - wildcardProduct := s.createProduct(tx, "my-product:*:my-tenant") - require.NoError(t, report.Run(context.Background(), tx, prom, query.Name, ts)) - fact := s.requireFactForQueryIdAndProductSource(tx, query, "my-product:*:my-tenant", ts) - require.Equal(t, wildcardProduct.Id, fact.ProductId) - - specificProduct := s.createProduct(tx, "my-product:my-cluster:my-tenant:my-namespace") - require.NoError(t, report.Run(context.Background(), tx, prom, query.Name, ts)) - fact = s.requireFactForQueryIdAndProductSource(tx, query, "my-product:my-cluster:my-tenant:my-namespace", ts) - require.Equal(t, specificProduct.Id, fact.ProductId) -} + from := time.Date(2020, time.January, 23, 17, 0, 0, 0, time.UTC) -func TestReport(t *testing.T) { - suite.Run(t, new(ReportSuite)) + err := report.Run(context.Background(), o, prom, args, from) + require.NoError(t, err) + require.Equal(t, "myoverride", o.lastReceivedData[0].SalesOrderID) } -func (s *ReportSuite) createProduct(p db.NamedPreparer, source string) db.Product { - var product db.Product - require.NoError(s.T(), - db.GetNamed(p, &product, - "INSERT INTO products (source,target,amount,unit,during) VALUES (:source,:target,:amount,:unit,:during) RETURNING *", db.Product{ - Source: source, - Amount: 1, - Unit: "tps", - During: infiniteRange(), - })) - - return product +func getReportArgs() report.ReportArgs { + return report.ReportArgs{ + ProductID: "myProductId", + UnitID: "unit_kg", + Query: fmt.Sprintf(promTestquery, 1), + InstanceJsonnet: `"myinstance"`, + ItemGroupDescriptionJsonnet: `"myitemgroup"`, + ItemDescriptionJsonnet: `"myitemdescription"`, + TimerangeSize: time.Hour, + } } -func (s *ReportSuite) requireFactForQueryIdAndProductSource(dbq sqlx.Queryer, q db.Query, productSource string, ts time.Time) db.Fact { - var fact db.Fact - require.NoError( - s.T(), - sqlx.Get( - dbq, &fact, - "SELECT facts.* FROM facts INNER JOIN products ON (facts.product_id = products.id) WHERE facts.query_id = $1 AND products.source = $2", - q.Id, productSource)) - return fact +type MockOdooClient struct { + totalReceived int + lastReceivedData []odoo.OdooMeteredBillingRecord } -func infiniteRange() pgtype.Tstzrange { - return db.Timerange(db.MustTimestamp(pgtype.NegativeInfinity), db.MustTimestamp(pgtype.Infinity)) +func (c *MockOdooClient) SendData(ctx context.Context, data []odoo.OdooMeteredBillingRecord) error { + c.lastReceivedData = data + c.totalReceived += 1 + return nil } diff --git a/pkg/sourcekey/sourcekey.go b/pkg/sourcekey/sourcekey.go deleted file mode 100644 index f599a1a..0000000 --- a/pkg/sourcekey/sourcekey.go +++ /dev/null @@ -1,109 +0,0 @@ -package sourcekey - -import ( - "fmt" - "math" - "math/bits" - "sort" - "strings" -) - -const elementSeparator = ":" - -// SourceKey represents a source key to look up dimensions objects (currently queries and products). -// It implements the lookup logic found in https://kb.vshn.ch/appuio-cloud/references/architecture/metering-data-flow.html#_system_idea. -type SourceKey struct { - parts []string -} - -// Parse parses a source key in the format of "query:zone:tenant:namespace:class" or "query:zone:tenant:namespace". -func Parse(raw string) (SourceKey, error) { - parts := strings.Split(raw, elementSeparator) - if parts[len(parts)-1] == "" { - parts = parts[0 : len(parts)-1] - } - if len(parts) >= 4 { - return SourceKey{parts}, nil - } - - return SourceKey{}, fmt.Errorf("expected key with at least 4 elements separated by `%s` got %d", elementSeparator, len(parts)) -} - -// Tenant returns the third element of the source key which was historically used as the tenant. -// -// Deprecated: We would like to get rid of this and read the tenant from a metric label. -func (k SourceKey) Tenant() string { - return k.parts[2] -} - -// Part returns the i-th part of the source key, or an empty string if no such part exists -func (k SourceKey) Part(i int) string { - if i < len(k.parts) { - return k.parts[i] - } - return "" -} - -// String returns the string representation "query:zone:tenant:namespace:class" of the key. -func (k SourceKey) String() string { - return strings.Join(k.parts, elementSeparator) -} - -// LookupKeys generates lookup keys for a dimension object in the database. -// The logic is described here: https://kb.vshn.ch/appuio-cloud/references/architecture/metering-data-flow.html#_system_idea -func (k SourceKey) LookupKeys() []string { - - keys := make([]string, 0) - currentKeyBase := k.parts - - for len(currentKeyBase) > 1 { - // For the base key of a given length l, the inner l-2 elements are to be replaced with wildcards in all possible combinations. - // To that end, generate 2^(l-2) binary numbers, sort them by specificity, and then for each number generate a key where - // for each 1-digit, the element is replaced with a wildcard (and for a 0-digit, the element is kept as-is). - innerLength := len(currentKeyBase) - 2 - nums := makeRange(0, int(math.Pow(2, float64(innerLength)))) - sort.Sort(sortBySpecificity(nums)) - for i := range nums { - currentKeyElements := make([]string, 0) - currentKeyElements = append(currentKeyElements, currentKeyBase[0]) - for digit := 0; digit < innerLength; digit++ { - if nums[i]&uint(math.Pow(2, float64(innerLength-1-digit))) > 0 { - currentKeyElements = append(currentKeyElements, "*") - } else { - currentKeyElements = append(currentKeyElements, currentKeyBase[1+digit]) - } - } - currentKeyElements = append(currentKeyElements, currentKeyBase[len(currentKeyBase)-1]) - keys = append(keys, strings.Join(currentKeyElements, elementSeparator)) - } - currentKeyBase = currentKeyBase[0 : len(currentKeyBase)-1] - } - keys = append(keys, currentKeyBase[0]) - return keys -} - -// SortBySpecificity sorts an array of uints representing binary numbers, such that numbers with fewer 1-digits come first. -// Numbers with an equal amount of 1-digits are sorted by magnitude. -type sortBySpecificity []uint - -func (a sortBySpecificity) Len() int { return len(a) } -func (a sortBySpecificity) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a sortBySpecificity) Less(i, j int) bool { - onesI := bits.OnesCount(a[i]) - onesJ := bits.OnesCount(a[j]) - if onesI < onesJ { - return true - } - if onesI > onesJ { - return false - } - return a[i] < a[j] -} - -func makeRange(min, max int) []uint { - a := make([]uint, max-min) - for i := range a { - a[i] = uint(min + i) - } - return a -} diff --git a/pkg/sourcekey/sourcekey_test.go b/pkg/sourcekey/sourcekey_test.go deleted file mode 100644 index 8b6fa89..0000000 --- a/pkg/sourcekey/sourcekey_test.go +++ /dev/null @@ -1,133 +0,0 @@ -package sourcekey - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestParseInvalidKey(t *testing.T) { - _, err := Parse("appuio_cloud_storage:c-appuio-cloudscale-lpg-2") - require.Error(t, err) -} - -func TestParseWithclass(t *testing.T) { - k, err := Parse("appuio_cloud_storage:c-appuio-cloudscale-lpg-2:acme-corp:sparkling-sound-1234:ssd") - require.NoError(t, err) - require.Equal(t, SourceKey{ - parts: []string{"appuio_cloud_storage", "c-appuio-cloudscale-lpg-2", "acme-corp", "sparkling-sound-1234", "ssd"}, - }, k) -} - -func TestParseWithoutclass(t *testing.T) { - k, err := Parse("appuio_cloud_storage:c-appuio-cloudscale-lpg-2:acme-corp:sparkling-sound-1234") - require.NoError(t, err) - require.Equal(t, SourceKey{ - parts: []string{"appuio_cloud_storage", "c-appuio-cloudscale-lpg-2", "acme-corp", "sparkling-sound-1234"}, - }, k) -} - -func TestParseWithEmptyclass(t *testing.T) { - k, err := Parse("appuio_cloud_storage:c-appuio-cloudscale-lpg-2:acme-corp:sparkling-sound-1234:") - require.NoError(t, err) - require.Equal(t, SourceKey{ - parts: []string{"appuio_cloud_storage", "c-appuio-cloudscale-lpg-2", "acme-corp", "sparkling-sound-1234"}, - }, k) -} - -func TestStringWithclass(t *testing.T) { - key := SourceKey{ - parts: []string{"appuio_cloud_storage", "c-appuio-cloudscale-lpg-2", "acme-corp", "sparkling-sound-1234", "ssd"}, - } - require.Equal(t, "appuio_cloud_storage:c-appuio-cloudscale-lpg-2:acme-corp:sparkling-sound-1234:ssd", key.String()) -} - -func TestStringWithoutclass(t *testing.T) { - key := SourceKey{ - parts: []string{"appuio_cloud_storage", "c-appuio-cloudscale-lpg-2", "acme-corp", "sparkling-sound-1234"}, - } - require.Equal(t, "appuio_cloud_storage:c-appuio-cloudscale-lpg-2:acme-corp:sparkling-sound-1234", key.String()) -} - -func TestGenerateSourceKeysWithoutclass(t *testing.T) { - keys := SourceKey{ - parts: []string{"appuio_cloud_storage", "c-appuio-cloudscale-lpg-2", "acme-corp", "sparkling-sound-1234"}, - }.LookupKeys() - - require.Equal(t, []string{ - "appuio_cloud_storage:c-appuio-cloudscale-lpg-2:acme-corp:sparkling-sound-1234", - "appuio_cloud_storage:c-appuio-cloudscale-lpg-2:*:sparkling-sound-1234", - "appuio_cloud_storage:*:acme-corp:sparkling-sound-1234", - "appuio_cloud_storage:*:*:sparkling-sound-1234", - "appuio_cloud_storage:c-appuio-cloudscale-lpg-2:acme-corp", - "appuio_cloud_storage:*:acme-corp", - "appuio_cloud_storage:c-appuio-cloudscale-lpg-2", - "appuio_cloud_storage", - }, keys) -} - -func TestGenerateSourceKeysWithclass(t *testing.T) { - keys := SourceKey{ - parts: []string{"appuio_cloud_storage", "c-appuio-cloudscale-lpg-2", "acme-corp", "sparkling-sound-1234", "ssd"}, - }.LookupKeys() - - require.Equal(t, []string{ - "appuio_cloud_storage:c-appuio-cloudscale-lpg-2:acme-corp:sparkling-sound-1234:ssd", - "appuio_cloud_storage:c-appuio-cloudscale-lpg-2:acme-corp:*:ssd", - "appuio_cloud_storage:c-appuio-cloudscale-lpg-2:*:sparkling-sound-1234:ssd", - "appuio_cloud_storage:*:acme-corp:sparkling-sound-1234:ssd", - "appuio_cloud_storage:c-appuio-cloudscale-lpg-2:*:*:ssd", - "appuio_cloud_storage:*:acme-corp:*:ssd", - "appuio_cloud_storage:*:*:sparkling-sound-1234:ssd", - "appuio_cloud_storage:*:*:*:ssd", - "appuio_cloud_storage:c-appuio-cloudscale-lpg-2:acme-corp:sparkling-sound-1234", - "appuio_cloud_storage:c-appuio-cloudscale-lpg-2:*:sparkling-sound-1234", - "appuio_cloud_storage:*:acme-corp:sparkling-sound-1234", - "appuio_cloud_storage:*:*:sparkling-sound-1234", - "appuio_cloud_storage:c-appuio-cloudscale-lpg-2:acme-corp", - "appuio_cloud_storage:*:acme-corp", - "appuio_cloud_storage:c-appuio-cloudscale-lpg-2", - "appuio_cloud_storage", - }, keys) -} - -func TestGenerateSourceKeysWithSixElements(t *testing.T) { - keys := SourceKey{ - parts: []string{"appuio_cloud_storage", "c-appuio-cloudscale-lpg-2", "acme-corp", "sparkling-sound-1234", "ssd", "exoscale"}, - }.LookupKeys() - - require.Equal(t, []string{ - "appuio_cloud_storage:c-appuio-cloudscale-lpg-2:acme-corp:sparkling-sound-1234:ssd:exoscale", - "appuio_cloud_storage:c-appuio-cloudscale-lpg-2:acme-corp:sparkling-sound-1234:*:exoscale", - "appuio_cloud_storage:c-appuio-cloudscale-lpg-2:acme-corp:*:ssd:exoscale", - "appuio_cloud_storage:c-appuio-cloudscale-lpg-2:*:sparkling-sound-1234:ssd:exoscale", - "appuio_cloud_storage:*:acme-corp:sparkling-sound-1234:ssd:exoscale", - "appuio_cloud_storage:c-appuio-cloudscale-lpg-2:acme-corp:*:*:exoscale", - "appuio_cloud_storage:c-appuio-cloudscale-lpg-2:*:sparkling-sound-1234:*:exoscale", - "appuio_cloud_storage:c-appuio-cloudscale-lpg-2:*:*:ssd:exoscale", - "appuio_cloud_storage:*:acme-corp:sparkling-sound-1234:*:exoscale", - "appuio_cloud_storage:*:acme-corp:*:ssd:exoscale", - "appuio_cloud_storage:*:*:sparkling-sound-1234:ssd:exoscale", - "appuio_cloud_storage:c-appuio-cloudscale-lpg-2:*:*:*:exoscale", - "appuio_cloud_storage:*:acme-corp:*:*:exoscale", - "appuio_cloud_storage:*:*:sparkling-sound-1234:*:exoscale", - "appuio_cloud_storage:*:*:*:ssd:exoscale", - "appuio_cloud_storage:*:*:*:*:exoscale", - "appuio_cloud_storage:c-appuio-cloudscale-lpg-2:acme-corp:sparkling-sound-1234:ssd", - "appuio_cloud_storage:c-appuio-cloudscale-lpg-2:acme-corp:*:ssd", - "appuio_cloud_storage:c-appuio-cloudscale-lpg-2:*:sparkling-sound-1234:ssd", - "appuio_cloud_storage:*:acme-corp:sparkling-sound-1234:ssd", - "appuio_cloud_storage:c-appuio-cloudscale-lpg-2:*:*:ssd", - "appuio_cloud_storage:*:acme-corp:*:ssd", - "appuio_cloud_storage:*:*:sparkling-sound-1234:ssd", - "appuio_cloud_storage:*:*:*:ssd", - "appuio_cloud_storage:c-appuio-cloudscale-lpg-2:acme-corp:sparkling-sound-1234", - "appuio_cloud_storage:c-appuio-cloudscale-lpg-2:*:sparkling-sound-1234", - "appuio_cloud_storage:*:acme-corp:sparkling-sound-1234", - "appuio_cloud_storage:*:*:sparkling-sound-1234", - "appuio_cloud_storage:c-appuio-cloudscale-lpg-2:acme-corp", - "appuio_cloud_storage:*:acme-corp", - "appuio_cloud_storage:c-appuio-cloudscale-lpg-2", - "appuio_cloud_storage", - }, keys) -} diff --git a/pkg/tenantmapping/mapping.go b/pkg/tenantmapping/mapping.go deleted file mode 100644 index fe030ae..0000000 --- a/pkg/tenantmapping/mapping.go +++ /dev/null @@ -1,167 +0,0 @@ -package tenantmapping - -import ( - "context" - "database/sql" - "fmt" - "strings" - "time" - - "github.com/go-logr/logr" - "github.com/jmoiron/sqlx" - "github.com/prometheus/common/model" - - "github.com/appuio/appuio-cloud-reporting/pkg/db" - "github.com/appuio/appuio-cloud-reporting/pkg/report" -) - -var ( - promQuery = `sum by(organization, billing_entity) (control_api_organization_billing_entity_ref{%s})` -) - -type options struct { - prometheusQueryTimeout time.Duration - metricSelector string -} - -// Option represents a report option. -type Option interface { - set(*options) -} - -func buildOptions(os []Option) options { - var build options - for _, o := range os { - o.set(&build) - } - return build -} - -// WithPrometheusQueryTimeout allows setting a timout when querying prometheus. -func WithPrometheusQueryTimeout(tm time.Duration) Option { - return prometheusQueryTimeout(tm) -} - -type prometheusQueryTimeout time.Duration - -func (t prometheusQueryTimeout) set(o *options) { - o.prometheusQueryTimeout = time.Duration(t) -} - -// WithMetricSelector allows further specifying which metrics to choose. -// Example: WithMetricSelector(`namespace="testing"`) -func WithMetricSelector(q string) Option { - return metricSelector(q) -} - -type metricSelector string - -func (t metricSelector) set(o *options) { - o.metricSelector = string(t) -} - -// MapTenantTarget maps the tenants (source, target) for a given time, read from a control-api prometheus metric. -// Truncates the time to the current hour. -func MapTenantTarget(ctx context.Context, tx *sqlx.Tx, prom report.PromQuerier, at time.Time, options ...Option) error { - log := logr.FromContextOrDiscard(ctx).WithValues("at", at) - opts := buildOptions(options) - at = at.In(time.UTC).Truncate(time.Hour) - - promQCtx := ctx - if opts.prometheusQueryTimeout != 0 { - ctx, cancel := context.WithTimeout(promQCtx, opts.prometheusQueryTimeout) - defer cancel() - promQCtx = ctx - } - res, _, err := prom.Query(promQCtx, fmt.Sprintf(promQuery, opts.metricSelector), at) - if err != nil { - return fmt.Errorf("failed to query prometheus: %w", err) - } - - samples, ok := res.(model.Vector) - if !ok { - return fmt.Errorf("expected prometheus query to return a model.Vector, got %T", res) - } - - log.V(1).Info("processing samples", "count", len(samples)) - for _, sample := range samples { - if err := processSample(ctx, tx, at, sample); err != nil { - return fmt.Errorf("failed to process sample: %w", err) - } - } - - return nil -} - -func processSample(ctx context.Context, tx *sqlx.Tx, ts time.Time, s *model.Sample) error { - log := logr.FromContextOrDiscard(ctx).WithValues("at", ts) - - organization, err := getMetricLabel(s.Metric, "organization") - if err != nil { - return err - } - // Entity can be unset - billingEntityRaw, _ := getMetricLabel(s.Metric, "billing_entity") - billingEntity := strings.TrimPrefix(string(billingEntityRaw), "be-") - log = log.WithValues("organization", organization, "billing_entity", billingEntity) - - et := db.Tenant{} - err = tx.GetContext(ctx, &et, "SELECT * FROM tenants WHERE source = $1 AND during @> $2::timestamptz", organization, ts) - if err != nil && err != sql.ErrNoRows { - return err - } - - if et.Target.String == billingEntity { - log.V(1).Info("tenant mapping already up to date") - return nil - } - - if et.Id != "" { - log = log.WithValues("id", et.Id) - log.Info("found existing tenant mapping, updating") - if l, ok := et.During.Lower.Get().(time.Time); ok && l.Equal(ts) { - log.Info("update would result in empty range, deleting instead") - if _, err := tx.ExecContext(ctx, "DELETE FROM tenants WHERE id = $1", et.Id); err != nil { - return err - } - } else { - log.Info("setting upper bound of existing tenant mapping") - et.During.Upper.Set(ts) - if _, err := tx.NamedExecContext(ctx, "UPDATE tenants SET during = :during WHERE id = :id", et); err != nil { - return err - } - } - } - - it := db.Tenant{} - // Set the upper bound to the next lower bound, or infinity if there is none - err = tx.GetContext(ctx, &it, ` - WITH upper AS ( - select coalesce(min(lower(during)),'infinity'::timestamptz) as upper from tenants where source = $1 AND lower(during) > $2 - ) - INSERT INTO tenants (source,target,during) - VALUES ( - $3, $4, - tstzrange( - $5::timestamptz, - (select upper from upper), - '[)') - ) - RETURNING *`, organization, ts, organization, billingEntity, ts) - if err != nil { - return err - } - log.Info("created new tenant mapping", "tenant", it.Id, - "during_lower", it.During.Lower.Get().(fmt.Stringer).String(), - "during_upper", it.During.Upper.Get().(fmt.Stringer).String()) - - return nil -} - -func getMetricLabel(m model.Metric, name string) (model.LabelValue, error) { - value, ok := m[model.LabelName(name)] - if !ok { - return "", fmt.Errorf("expected sample to contain label '%s'", name) - } - return value, nil -} diff --git a/pkg/tenantmapping/mapping_test.go b/pkg/tenantmapping/mapping_test.go deleted file mode 100644 index df9cfc2..0000000 --- a/pkg/tenantmapping/mapping_test.go +++ /dev/null @@ -1,195 +0,0 @@ -package tenantmapping_test - -import ( - "context" - "database/sql" - "fmt" - "testing" - "time" - - "github.com/jackc/pgtype" - "github.com/jmoiron/sqlx" - "github.com/prometheus/common/model" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - "github.com/appuio/appuio-cloud-reporting/pkg/db" - tenantmapping "github.com/appuio/appuio-cloud-reporting/pkg/tenantmapping" - "github.com/appuio/appuio-cloud-reporting/pkg/testsuite" - apiv1 "github.com/prometheus/client_golang/api/prometheus/v1" -) - -type MappingSuite struct { - testsuite.Suite -} - -func (s *MappingSuite) TestReport_RunReportCreatesTenants() { - t := s.T() - prom := fakeQuerier{ - mappings: map[string]string{ - "foo-org": "be-555", - "bar-org": "be-666", - }, - } - - tx, err := s.DB().Beginx() - require.NoError(t, err) - defer tx.Rollback() - - ts := time.Now().Truncate(time.Hour) - require.NoError(t, tenantmapping.MapTenantTarget(context.Background(), tx, prom, ts, tenantmapping.WithPrometheusQueryTimeout(time.Second), tenantmapping.WithMetricSelector(`namespace="testing"`))) - - var tenantCount int - require.NoError(t, sqlx.Get(tx, &tenantCount, "SELECT COUNT(*) FROM tenants WHERE during @> $1::timestamptz", ts)) - require.Equal(t, 2, tenantCount) -} - -func (s *MappingSuite) TestReport_RunReportRemapsExistingTenants() { - t := s.T() - prom := fakeQuerier{ - mappings: map[string]string{ - "foo-org": "be-555", - "bar-org": "be-666", - }, - } - - tx, err := s.DB().Beginx() - require.NoError(t, err) - defer tx.Rollback() - - existing := db.Tenant{} - require.NoError(t, - db.GetNamed(tx, &existing, - "INSERT INTO tenants (source,target) VALUES (:source,:target) RETURNING *", db.Tenant{ - Source: "foo-org", - Target: sql.NullString{String: "be-other", Valid: true}, - })) - - ts := time.Now().Truncate(time.Hour) - require.NoError(t, tenantmapping.MapTenantTarget(context.Background(), tx, prom, ts, tenantmapping.WithPrometheusQueryTimeout(time.Second))) - - expectedTenants := []comparableTenant{ - { - Source: "bar-org", - Target: "666", - During: fmt.Sprintf("[\"%s\",infinity)", ts.In(time.UTC).Format(db.PGTimestampFormat)), - }, - { - Source: "foo-org", - Target: "555", - During: fmt.Sprintf("[\"%s\",infinity)", ts.In(time.UTC).Format(db.PGTimestampFormat)), - }, - { - Source: "foo-org", - Target: "be-other", - During: fmt.Sprintf("[-infinity,\"%s\")", ts.In(time.UTC).Format(db.PGTimestampFormat)), - }, - } - var tenants []comparableTenant - // make timestamps string comparable - require.NoError(t, func() error { _, err := tx.Exec("set timezone to 'UTC';"); return err }()) - require.NoError(t, sqlx.Select(tx, &tenants, "SELECT source, target, during::text FROM tenants ORDER BY source, target")) - require.Equal(t, expectedTenants, tenants) - // Edge case: Don't fail on zero-length ranges. - // This should only happen if some data was inserted manually. Not failing here aligns with the behavior of any other ranges. - prom.mappings["foo-org"] = "be-777" - expectedTenants[1].Target = "777" - require.NoError(t, tenantmapping.MapTenantTarget(context.Background(), tx, prom, ts)) - require.NoError(t, sqlx.Select(tx, &tenants, "SELECT source, target, during::text FROM tenants ORDER BY source, target")) - require.Equal(t, expectedTenants, tenants) -} - -func (s *MappingSuite) TestReport_RunReport_NewUpperBoundInfinityOrUntilNextRange() { - t := s.T() - prom := fakeQuerier{ - mappings: map[string]string{ - "foo-org": "be-555", - "bar-org": "be-666", - }, - } - - tx, err := s.DB().Beginx() - require.NoError(t, err) - defer tx.Rollback() - - ts := time.Now().Truncate(time.Hour) - futureTS := ts.Add(5 * time.Hour) - pastTS := ts.Add(-5 * time.Hour) - - existingFoo := db.Tenant{} - require.NoError(t, - db.GetNamed(tx, &existingFoo, - "INSERT INTO tenants (source,target,during) VALUES (:source,:target,:during) RETURNING *", db.Tenant{ - Source: "foo-org", - Target: sql.NullString{String: "be-other", Valid: true}, - During: db.Timerange(db.MustTimestamp(futureTS), db.MustTimestamp(pgtype.Infinity)), - })) - - existingBar := db.Tenant{} - require.NoError(t, - db.GetNamed(tx, &existingBar, - "INSERT INTO tenants (source,target,during) VALUES (:source,:target,:during) RETURNING *", db.Tenant{ - Source: "bar-org", - Target: sql.NullString{String: "be-other", Valid: true}, - During: db.Timerange(db.MustTimestamp(pgtype.NegativeInfinity), db.MustTimestamp(pastTS)), - })) - - require.NoError(t, tenantmapping.MapTenantTarget(context.Background(), tx, prom, ts)) - - expectedTenants := []comparableTenant{ - { - Source: "bar-org", - Target: "666", - During: fmt.Sprintf("[\"%s\",infinity)", ts.In(time.UTC).Format(db.PGTimestampFormat)), - }, - { - Source: "bar-org", - Target: "be-other", - During: fmt.Sprintf("[-infinity,\"%s\")", pastTS.In(time.UTC).Format(db.PGTimestampFormat)), - }, - { - Source: "foo-org", - Target: "555", - During: fmt.Sprintf("[\"%s\",\"%s\")", ts.In(time.UTC).Format(db.PGTimestampFormat), futureTS.In(time.UTC).Format(db.PGTimestampFormat)), - }, - { - Source: "foo-org", - Target: "be-other", - During: fmt.Sprintf("[\"%s\",infinity)", futureTS.In(time.UTC).Format(db.PGTimestampFormat)), - }, - } - var tenants []comparableTenant - // make timestamps string comparable - require.NoError(t, func() error { _, err := tx.Exec("set timezone to 'UTC';"); return err }()) - require.NoError(t, sqlx.Select(tx, &tenants, "SELECT source, target, during::text FROM tenants ORDER BY source, target")) - require.Equal(t, expectedTenants, tenants) -} - -func TestReport(t *testing.T) { - suite.Run(t, new(MappingSuite)) -} - -type fakeQuerier struct { - mappings map[string]string -} - -func (q fakeQuerier) Query(ctx context.Context, query string, ts time.Time, _ ...apiv1.Option) (model.Value, apiv1.Warnings, error) { - var res model.Vector - for k, s := range q.mappings { - res = append(res, &model.Sample{ - Metric: map[model.LabelName]model.LabelValue{ - "__name__": "control_api_organization_billing_entity_ref", - "organization": model.LabelValue(k), - "billing_entity": model.LabelValue(s), - }, - Value: 1, - }) - } - return res, nil, nil -} - -type comparableTenant struct { - Source string - Target string - During string -} diff --git a/pkg/testsuite/testsuite.go b/pkg/testsuite/testsuite.go index 2870bd2..5f9c2a1 100644 --- a/pkg/testsuite/testsuite.go +++ b/pkg/testsuite/testsuite.go @@ -10,15 +10,13 @@ import ( "github.com/prometheus/client_golang/api" apiv1 "github.com/prometheus/client_golang/api/prometheus/v1" "github.com/stretchr/testify/require" - - "github.com/appuio/appuio-cloud-reporting/pkg/db/dbtest" + "github.com/stretchr/testify/suite" ) // Suite holds a dbtest.Suite and a lazily started prometheus server. // Each Suite holds its own Prometheus server. Suites can be run in parallel. type Suite struct { - dbtest.Suite - + suite.Suite // promMutex guards all prom* variables below. promMutex sync.Mutex promAddr string @@ -62,8 +60,6 @@ func (ts *Suite) PrometheusAPIClient() apiv1.API { // TearDownSuite stops prometheus and drops the temporary database. func (ts *Suite) TearDownSuite() { - ts.Suite.TearDownSuite() - ts.promMutex.Lock() defer ts.promMutex.Unlock() diff --git a/pkg/testsuite/testsuite_test.go b/pkg/testsuite/testsuite_test.go index 2088a79..e90e12d 100644 --- a/pkg/testsuite/testsuite_test.go +++ b/pkg/testsuite/testsuite_test.go @@ -7,7 +7,7 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/appuio/appuio-cloud-reporting/pkg/testsuite" + "github.com/appuio/appuio-reporting/pkg/testsuite" ) type TestSuite struct { diff --git a/report_command.go b/report_command.go index b0148d3..2944cf4 100644 --- a/report_command.go +++ b/report_command.go @@ -6,23 +6,27 @@ import ( "os" "time" - "github.com/appuio/appuio-cloud-reporting/pkg/db" - "github.com/appuio/appuio-cloud-reporting/pkg/report" - "github.com/appuio/appuio-cloud-reporting/pkg/thanos" - "github.com/jmoiron/sqlx" + "github.com/appuio/appuio-reporting/pkg/odoo" + "github.com/appuio/appuio-reporting/pkg/report" + "github.com/appuio/appuio-reporting/pkg/thanos" "github.com/prometheus/client_golang/api" apiv1 "github.com/prometheus/client_golang/api/prometheus/v1" "github.com/urfave/cli/v2" ) type reportCommand struct { - DatabaseURL string - PrometheusURL string - QueryName string - Begin *time.Time - RepeatUntil *time.Time - PromQueryTimeout time.Duration + PrometheusURL string + OdooURL string + OdooOauthTokenURL string + OdooClientId string + OdooClientSecret string + ReportArgs report.ReportArgs + + Begin *time.Time + RepeatUntil *time.Time + + PromQueryTimeout time.Duration ThanosAllowPartialResponses bool OrgId string } @@ -37,12 +41,32 @@ func newReportCommand() *cli.Command { Before: command.before, Action: command.execute, Flags: []cli.Flag{ - newDbURLFlag(&command.DatabaseURL), - newPromURLFlag(&command.PrometheusURL), - &cli.StringFlag{Name: "query-name", Usage: fmt.Sprintf("Name of the query (sample values: %s)", queryNames(db.DefaultQueries)), - EnvVars: envVars("QUERY_NAME"), Destination: &command.QueryName, Required: true, DefaultText: defaultTestForRequiredFlags}, + &cli.StringFlag{Name: "prom-url", Usage: "Prometheus connection URL in the form of http://host:port", + EnvVars: envVars("PROM_URL"), Destination: &command.PrometheusURL, Value: "http://localhost:9090"}, + &cli.StringFlag{Name: "odoo-url", Usage: "URL of the Odoo Metered Billing API", + EnvVars: envVars("ODOO_URL"), Destination: &command.OdooURL, Value: "http://localhost:8080"}, + &cli.StringFlag{Name: "odoo-oauth-token-url", Usage: "Oauth Token URL to authenticate with Odoo metered billing API", + EnvVars: envVars("ODOO_OAUTH_TOKEN_URL"), Destination: &command.OdooOauthTokenURL, Required: true, DefaultText: defaultTextForRequiredFlags}, + &cli.StringFlag{Name: "odoo-oauth-client-id", Usage: "Client ID of the oauth client to interact with Odoo metered billing API", + EnvVars: envVars("ODOO_OAUTH_CLIENT_ID"), Destination: &command.OdooClientId, Required: true, DefaultText: defaultTextForRequiredFlags}, + &cli.StringFlag{Name: "odoo-oauth-client-secret", Usage: "Client secret of the oauth client to interact with Odoo metered billing API", + EnvVars: envVars("ODOO_OAUTH_CLIENT_SECRET"), Destination: &command.OdooClientSecret, Required: true, DefaultText: defaultTextForRequiredFlags}, + &cli.StringFlag{Name: "product-id", Usage: fmt.Sprintf("Odoo Product ID for this query"), + EnvVars: envVars("PRODUCT_ID"), Destination: &command.ReportArgs.ProductID, Required: true, DefaultText: defaultTextForRequiredFlags}, + &cli.StringFlag{Name: "query", Usage: fmt.Sprintf("Prometheus query to run"), + EnvVars: envVars("QUERY"), Destination: &command.ReportArgs.Query, Required: true, DefaultText: defaultTextForRequiredFlags}, + &cli.StringFlag{Name: "instance-jsonnet", Usage: fmt.Sprintf("Jsonnet snippet that generates the Instance ID"), + EnvVars: envVars("INSTANCE_JSONNET"), Destination: &command.ReportArgs.InstanceJsonnet, Required: true, DefaultText: defaultTextForRequiredFlags}, + &cli.StringFlag{Name: "item-group-description-jsonnet", Usage: fmt.Sprintf("Jsonnet snippet that generates the item group description on invoice"), + EnvVars: envVars("ITEM_GROUP_DESCRIPTION_JSONNET"), Destination: &command.ReportArgs.ItemGroupDescriptionJsonnet, Required: false, DefaultText: defaultTextForOptionalFlags}, + &cli.StringFlag{Name: "item-description-jsonnet", Usage: fmt.Sprintf("Jsonnet snippet that generates the item description on invoice"), + EnvVars: envVars("ITEM_DESCRIPTION_JSONNET"), Destination: &command.ReportArgs.ItemDescriptionJsonnet, Required: false, DefaultText: defaultTextForOptionalFlags}, + &cli.StringFlag{Name: "unit-id", Usage: fmt.Sprintf("ID of the unit to use in Odoo"), + EnvVars: envVars("UNIT_ID"), Destination: &command.ReportArgs.UnitID, Required: true, DefaultText: defaultTextForRequiredFlags}, &cli.TimestampFlag{Name: "begin", Usage: fmt.Sprintf("Beginning timestamp of the report period in the form of RFC3339 (%s)", time.RFC3339), - EnvVars: envVars("BEGIN"), Layout: time.RFC3339, Required: true, DefaultText: defaultTestForRequiredFlags}, + EnvVars: envVars("BEGIN"), Layout: time.RFC3339, Required: true, DefaultText: defaultTextForRequiredFlags}, + &cli.DurationFlag{Name: "timerange", Usage: "Timerange for individual measurement samples", + EnvVars: envVars("TIMERANGE"), Destination: &command.ReportArgs.TimerangeSize, Required: true, DefaultText: defaultTextForRequiredFlags}, &cli.TimestampFlag{Name: "repeat-until", Usage: fmt.Sprintf("Repeat running the report until reaching this timestamp (%s)", time.RFC3339), EnvVars: envVars("REPEAT_UNTIL"), Layout: time.RFC3339, Required: false}, &cli.DurationFlag{Name: "prom-query-timeout", Usage: "Timeout when querying prometheus (example: 1m)", @@ -51,6 +75,8 @@ func newReportCommand() *cli.Command { EnvVars: envVars("THANOS_ALLOW_PARTIAL_RESPONSES"), Destination: &command.ThanosAllowPartialResponses, Required: false, DefaultText: "false"}, &cli.StringFlag{Name: "org-id", Usage: "Sets the X-Scope-OrgID header to this value on requests to Prometheus", Value: "", EnvVars: envVars("ORG_ID"), Destination: &command.OrgId, Required: false, DefaultText: "empty"}, + &cli.StringFlag{Name: "debug-override-sales-order-id", Usage: "Overrides the sales order ID to a static constant for debugging purposes", Value: "", + EnvVars: envVars("DEBUG_OVERRIDE_SALES_ORDER_ID"), Destination: &command.ReportArgs.OverrideSalesOrderID, Required: false, DefaultText: "empty"}, }, } } @@ -70,12 +96,7 @@ func (cmd *reportCommand) execute(cliCtx *cli.Context) error { return fmt.Errorf("could not create prometheus client: %w", err) } - log.V(1).Info("Opening database connection", "url", cmd.DatabaseURL) - rdb, err := db.Openx(cmd.DatabaseURL) - if err != nil { - return fmt.Errorf("could not open database connection: %w", err) - } - defer rdb.Close() + odooClient := odoo.NewOdooAPIClient(ctx, cmd.OdooURL, cmd.OdooOauthTokenURL, cmd.OdooClientId, cmd.OdooClientSecret, log) o := make([]report.Option, 0) if cmd.PromQueryTimeout != 0 { @@ -83,11 +104,11 @@ func (cmd *reportCommand) execute(cliCtx *cli.Context) error { } if cmd.RepeatUntil != nil { - if err := cmd.runReportRange(ctx, rdb, promClient, o); err != nil { + if err := cmd.runReportRange(ctx, odooClient, promClient, o); err != nil { return err } } else { - if err := cmd.runReport(ctx, rdb, promClient, o); err != nil { + if err := cmd.runReport(ctx, odooClient, promClient, o); err != nil { return err } } @@ -96,7 +117,7 @@ func (cmd *reportCommand) execute(cliCtx *cli.Context) error { return nil } -func (cmd *reportCommand) runReportRange(ctx context.Context, db *sqlx.DB, promClient apiv1.API, o []report.Option) error { +func (cmd *reportCommand) runReportRange(ctx context.Context, odooClient *odoo.OdooAPIClient, promClient apiv1.API, o []report.Option) error { log := AppLogger(ctx) started := time.Now() @@ -107,28 +128,21 @@ func (cmd *reportCommand) runReportRange(ctx context.Context, db *sqlx.DB, promC }) log.Info("Running reports...") - c, err := report.RunRange(ctx, db, promClient, cmd.QueryName, *cmd.Begin, *cmd.RepeatUntil, append(o, reporter)...) + c, err := report.RunRange(ctx, odooClient, promClient, cmd.ReportArgs, *cmd.Begin, *cmd.RepeatUntil, append(o, reporter)...) log.Info(fmt.Sprintf("Ran %d reports", c)) return err } -func (cmd *reportCommand) runReport(ctx context.Context, db *sqlx.DB, promClient apiv1.API, o []report.Option) error { +func (cmd *reportCommand) runReport(ctx context.Context, odooClient *odoo.OdooAPIClient, promClient apiv1.API, o []report.Option) error { log := AppLogger(ctx) log.V(1).Info("Begin transaction") - tx, err := db.BeginTxx(ctx, nil) - if err != nil { - return err - } - defer tx.Rollback() log.Info("Running report...") - if err := report.Run(ctx, tx, promClient, cmd.QueryName, *cmd.Begin, o...); err != nil { + if err := report.Run(ctx, odooClient, promClient, cmd.ReportArgs, *cmd.Begin, o...); err != nil { return err } - - log.V(1).Info("Commit transaction") - return tx.Commit() + return nil } func newPrometheusAPIClient(promURL string, thanosAllowPartialResponses bool, orgId string) (apiv1.API, error) { diff --git a/tenantmapping_command.go b/tenantmapping_command.go deleted file mode 100644 index 8e03b4d..0000000 --- a/tenantmapping_command.go +++ /dev/null @@ -1,129 +0,0 @@ -package main - -import ( - "context" - "fmt" - "time" - - "github.com/go-logr/logr" - "github.com/jmoiron/sqlx" - apiv1 "github.com/prometheus/client_golang/api/prometheus/v1" - "github.com/urfave/cli/v2" - - "github.com/appuio/appuio-cloud-reporting/pkg/db" - "github.com/appuio/appuio-cloud-reporting/pkg/tenantmapping" -) - -type tmapCommand struct { - DatabaseURL string - PrometheusURL string - Begin *time.Time - RepeatUntil *time.Time - PromQueryTimeout time.Duration - DryRun bool - - AdditionalMetricSelector string - - ThanosAllowPartialResponses bool - OrgId string -} - -var tenantmappingCommandName = "tenantmapping" - -func newTmapCommand() *cli.Command { - command := &tmapCommand{} - return &cli.Command{ - Name: tenantmappingCommandName, - Usage: "Update the tenant mapping (source, target) in the database for a given time", - Before: command.before, - Action: command.execute, - Flags: []cli.Flag{ - newDbURLFlag(&command.DatabaseURL), - newPromURLFlag(&command.PrometheusURL), - &cli.TimestampFlag{Name: "begin", Usage: fmt.Sprintf("Beginning timestamp of the report period in the form of RFC3339 (%s)", time.RFC3339), - EnvVars: envVars("BEGIN"), Layout: time.RFC3339, Required: true, DefaultText: defaultTestForRequiredFlags}, - &cli.TimestampFlag{Name: "repeat-until", Usage: fmt.Sprintf("Repeat running the report until reaching this timestamp (%s)", time.RFC3339), - EnvVars: envVars("REPEAT_UNTIL"), Layout: time.RFC3339, Required: false}, - &cli.DurationFlag{Name: "prom-query-timeout", Usage: "Timeout when querying prometheus (example: 1m)", - EnvVars: envVars("PROM_QUERY_TIMEOUT"), Destination: &command.PromQueryTimeout, Required: false}, - &cli.BoolFlag{Name: "thanos-allow-partial-responses", Usage: "Allows partial responses from Thanos. Can be helpful when querying a Thanos cluster with lost data.", - EnvVars: envVars("THANOS_ALLOW_PARTIAL_RESPONSES"), Destination: &command.ThanosAllowPartialResponses, Required: false, DefaultText: "false"}, - &cli.BoolFlag{Name: "dry-run", Usage: "Does not commit results if set.", - EnvVars: envVars("DRY_RUN"), Destination: &command.DryRun, Required: false, DefaultText: "false"}, - &cli.StringFlag{Name: "additional-metric-selector", Usage: "Allows further specifying which metrics to choose. Example: --additional-metric-selector='namespace=\"testing\"'", - EnvVars: envVars("ADDITIONAL_METRIC_SELECTOR"), Destination: &command.AdditionalMetricSelector, Required: false, DefaultText: "false"}, - &cli.StringFlag{Name: "org-id", Usage: "Sets the X-Scope-OrgID header to this value on requests to Prometheus", Value: "", - EnvVars: envVars("ORG_ID"), Destination: &command.OrgId, Required: false, DefaultText: "empty"}, - }, - } -} - -func (cmd *tmapCommand) before(context *cli.Context) error { - cmd.Begin = context.Timestamp("begin") - cmd.RepeatUntil = context.Timestamp("repeat-until") - return LogMetadata(context) -} - -func (cmd *tmapCommand) execute(cliCtx *cli.Context) error { - ctx := cliCtx.Context - log := AppLogger(ctx).WithName(tenantmappingCommandName) - // We really need to fix the inane dance around the AppLogger which needs custom plumbing and can't be used from packages because of import cycles. - ctx = logr.NewContext(ctx, log) - - promClient, err := newPrometheusAPIClient(cmd.PrometheusURL, cmd.ThanosAllowPartialResponses, cmd.OrgId) - if err != nil { - return fmt.Errorf("could not create prometheus client: %w", err) - } - - log.V(1).Info("Opening database connection", "url", cmd.DatabaseURL) - rdb, err := db.Openx(cmd.DatabaseURL) - if err != nil { - return fmt.Errorf("could not open database connection: %w", err) - } - defer rdb.Close() - - o := make([]tenantmapping.Option, 0) - if cmd.PromQueryTimeout != 0 { - o = append(o, tenantmapping.WithPrometheusQueryTimeout(cmd.PromQueryTimeout)) - } - if cmd.AdditionalMetricSelector != "" { - o = append(o, tenantmapping.WithMetricSelector(cmd.AdditionalMetricSelector)) - } - - if cmd.RepeatUntil == nil { - return runTenantMapping(ctx, rdb, promClient, *cmd.Begin, cmd.DryRun, o...) - } - - for currentTime := *cmd.Begin; cmd.RepeatUntil.After(currentTime); currentTime = currentTime.Add(time.Hour) { - if err := runTenantMapping(ctx, rdb, promClient, currentTime, cmd.DryRun, o...); err != nil { - return fmt.Errorf("error running report at %s: %w", currentTime.Format(time.RFC3339), err) - } - } - - return nil -} - -func runTenantMapping(ctx context.Context, rdb *sqlx.DB, promClient apiv1.API, begin time.Time, dryRun bool, o ...tenantmapping.Option) error { - log := AppLogger(ctx).WithName(tenantmappingCommandName) - - log.V(1).Info("Begin transaction") - tx, err := rdb.Beginx() - if err != nil { - return err - } - defer tx.Rollback() - - log.Info("Running mapper...") - err = tenantmapping.MapTenantTarget(ctx, tx, promClient, begin, o...) - if err != nil { - return err - } - - if dryRun { - log.Info("Dry run, not committing transaction") - return nil - } - - log.V(1).Info("Commit transaction") - return tx.Commit() -}