QueryLimitSpec defines the limits applies at the query path.
@@ -3572,7 +3800,7 @@ IMPORTANT: Make sure that the replication factor defined is less than or equal t
## RetentionLimitSpec { #loki-grafana-com-v1-RetentionLimitSpec }
RetentionLimitSpec controls how long logs will be kept in storage.
diff --git a/operator/docs/operator/compatibility.md b/operator/docs/operator/compatibility.md
index ee09abb6b71ae..36550f06a7062 100644
--- a/operator/docs/operator/compatibility.md
+++ b/operator/docs/operator/compatibility.md
@@ -36,3 +36,4 @@ The versions of Loki compatible to be run with the Loki Operator are:
* v2.9.0
* v2.9.1
* v2.9.2
+* v2.9.3
diff --git a/operator/go.mod b/operator/go.mod
index 0fc901ddab734..4ffc3899d11ce 100644
--- a/operator/go.mod
+++ b/operator/go.mod
@@ -131,15 +131,15 @@ require (
go.uber.org/multierr v1.8.0 // indirect
go.uber.org/zap v1.24.0 // indirect
go4.org/netipx v0.0.0-20230125063823-8449b0a6169f // indirect
- golang.org/x/crypto v0.14.0 // indirect
+ golang.org/x/crypto v0.17.0 // indirect
golang.org/x/exp v0.0.0-20230124195608-d38c7dcee874 // indirect
golang.org/x/mod v0.12.0 // indirect
golang.org/x/net v0.17.0 // indirect
golang.org/x/oauth2 v0.8.0 // indirect
golang.org/x/sync v0.3.0 // indirect
- golang.org/x/sys v0.13.0 // indirect
- golang.org/x/term v0.13.0 // indirect
- golang.org/x/text v0.13.0 // indirect
+ golang.org/x/sys v0.15.0 // indirect
+ golang.org/x/term v0.15.0 // indirect
+ golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.12.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.3.0 // indirect
diff --git a/operator/go.sum b/operator/go.sum
index 982073b8b5eb6..4383e691d41a0 100644
--- a/operator/go.sum
+++ b/operator/go.sum
@@ -664,8 +664,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20221012134737-56aed061732a/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
-golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
-golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
+golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k=
+golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -888,14 +888,14 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
-golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
+golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
-golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek=
-golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
+golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4=
+golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -906,8 +906,8 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
-golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
-golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
diff --git a/operator/hack/addons_dev.yaml b/operator/hack/addons_dev.yaml
index 5ecad9181c301..adf6aa053add4 100644
--- a/operator/hack/addons_dev.yaml
+++ b/operator/hack/addons_dev.yaml
@@ -29,7 +29,7 @@ spec:
spec:
containers:
- name: logcli
- image: docker.io/grafana/logcli:2.9.2-amd64
+ image: docker.io/grafana/logcli:2.9.3-amd64
imagePullPolicy: IfNotPresent
command:
- /bin/sh
@@ -73,7 +73,7 @@ spec:
spec:
containers:
- name: promtail
- image: docker.io/grafana/promtail:2.9.2
+ image: docker.io/grafana/promtail:2.9.3
args:
- -config.file=/etc/promtail/promtail.yaml
- -log.level=info
diff --git a/operator/hack/addons_ocp.yaml b/operator/hack/addons_ocp.yaml
index da62de2936409..1a0ff7325a62a 100644
--- a/operator/hack/addons_ocp.yaml
+++ b/operator/hack/addons_ocp.yaml
@@ -29,7 +29,7 @@ spec:
spec:
containers:
- name: logcli
- image: docker.io/grafana/logcli:2.9.2-amd64
+ image: docker.io/grafana/logcli:2.9.3-amd64
imagePullPolicy: IfNotPresent
command:
- /bin/sh
@@ -70,7 +70,7 @@ spec:
spec:
containers:
- name: promtail
- image: docker.io/grafana/promtail:2.9.2
+ image: docker.io/grafana/promtail:2.9.3
args:
- -config.file=/etc/promtail/promtail.yaml
- -log.level=info
diff --git a/operator/hack/deploy-aws-storage-secret.sh b/operator/hack/deploy-aws-storage-secret.sh
index 5fcf2e83f75f5..ecad7efc5537b 100755
--- a/operator/hack/deploy-aws-storage-secret.sh
+++ b/operator/hack/deploy-aws-storage-secret.sh
@@ -1,19 +1,26 @@
-#!/bin/bash
+#!/usr/bin/env bash
-set -eou pipefail
+set -euo pipefail
-BUCKET_NAME=$1
+readonly bucket_name=${1-}
-NAMESPACE=${NAMESPACE:-openshift-logging}
+if [[ -z "${bucket_name}" ]]; then
+ echo "Provide a bucket name"
+ exit 1
+fi
-REGION=${REGION:-$(aws configure get region)}
-ACCESS_KEY_ID=${ACCESS_KEY_ID:-$(aws configure get aws_access_key_id)}
-SECRET_ACCESS_KEY=${SECRET_ACCESS_KEY:-$(aws configure get aws_secret_access_key)}
+readonly namespace=${NAMESPACE:-openshift-logging}
+region=${REGION:-$(aws configure get region)}
+readonly region
+access_key_id=${ACCESS_KEY_ID:-$(aws configure get aws_access_key_id)}
+readonly access_key_id
+secret_access_key=${SECRET_ACCESS_KEY:-$(aws configure get aws_secret_access_key)}
+readonly secret_access_key
-kubectl --ignore-not-found=true -n "${NAMESPACE}" delete secret test
-kubectl -n "${NAMESPACE}" create secret generic test \
- --from-literal=region="$(echo -n "${REGION}")" \
- --from-literal=bucketnames="$(echo -n "${BUCKET_NAME}")" \
- --from-literal=access_key_id="$(echo -n "${ACCESS_KEY_ID}")" \
- --from-literal=access_key_secret="$(echo -n "${SECRET_ACCESS_KEY}")" \
- --from-literal=endpoint="$(echo -n "https://s3.${REGION}.amazonaws.com")"
+kubectl --ignore-not-found=true -n "${namespace}" delete secret test
+kubectl -n "${namespace}" create secret generic test \
+ --from-literal=region="$(echo -n "${region}")" \
+ --from-literal=bucketnames="$(echo -n "${bucket_name}")" \
+ --from-literal=access_key_id="$(echo -n "${access_key_id}")" \
+ --from-literal=access_key_secret="$(echo -n "${secret_access_key}")" \
+ --from-literal=endpoint="$(echo -n "https://s3.${region}.amazonaws.com")"
diff --git a/operator/hack/deploy-azure-storage-secret.sh b/operator/hack/deploy-azure-storage-secret.sh
new file mode 100755
index 0000000000000..bf99c66aae1d3
--- /dev/null
+++ b/operator/hack/deploy-azure-storage-secret.sh
@@ -0,0 +1,33 @@
+#!/usr/bin/env bash
+
+set -euo pipefail
+
+readonly account_name="${1-}"
+readonly container_name="${2-}"
+
+if [[ -z "${account_name}" ]]; then
+ echo "Provide a account name"
+ exit 1
+fi
+
+if [[ -z "${container_name}" ]]; then
+ echo "Provide a container name"
+ exit 1
+fi
+
+readonly namespace="${NAMESPACE:-openshift-logging}"
+
+readonly azure_environment="AzureGlobal"
+
+resource_group=$(az storage account show --name "${account_name}" | jq -r '.resourceGroup')
+readonly resource_group
+
+account_key=$(az storage account keys list --resource-group "${resource_group}" --account-name "${account_name}" | jq -r '.[0].value')
+readonly account_key
+
+kubectl --ignore-not-found=true -n "${namespace}" delete secret test
+kubectl -n "${namespace}" create secret generic test \
+ --from-literal=environment="$(echo -n "${azure_environment}")" \
+ --from-literal=account_name="$(echo -n "${account_name}")" \
+ --from-literal=account_key="$(echo -n "${account_key}")" \
+ --from-literal=container="$(echo -n "${container_name}")"
diff --git a/operator/hack/deploy-gcp-storage-secret.sh b/operator/hack/deploy-gcp-storage-secret.sh
new file mode 100755
index 0000000000000..b57bef803773d
--- /dev/null
+++ b/operator/hack/deploy-gcp-storage-secret.sh
@@ -0,0 +1,23 @@
+#!/usr/bin/env bash
+
+set -euo pipefail
+
+readonly bucket_name=${1-}
+readonly google_application_credentials=${2-}
+
+if [[ -z "${bucket_name}" ]]; then
+ echo "Provide a bucket name"
+ exit 1
+fi
+
+if [[ -z "${google_application_credentials}" ]]; then
+ echo "Provide a path to the Google application credentials file"
+ exit 1
+fi
+
+readonly namespace=${NAMESPACE:-openshift-logging}
+
+kubectl --ignore-not-found=true -n "${namespace}" delete secret test
+kubectl -n "${namespace}" create secret generic test \
+ --from-literal=bucketname="$(echo -n "${bucket_name}")" \
+ --from-file=key.json="${google_application_credentials}"
diff --git a/operator/hack/lokistack_dev.yaml b/operator/hack/lokistack_dev.yaml
index 573bba382416b..c05bbede47452 100644
--- a/operator/hack/lokistack_dev.yaml
+++ b/operator/hack/lokistack_dev.yaml
@@ -6,8 +6,8 @@ spec:
size: 1x.demo
storage:
schemas:
- - version: v12
- effectiveDate: 2022-06-01
+ - version: v13
+ effectiveDate: 2023-10-15
secret:
name: test
type: s3
diff --git a/operator/hack/lokistack_gateway_dev.yaml b/operator/hack/lokistack_gateway_dev.yaml
index c7f81d1f50e65..0cbe605c4b4ea 100644
--- a/operator/hack/lokistack_gateway_dev.yaml
+++ b/operator/hack/lokistack_gateway_dev.yaml
@@ -14,8 +14,8 @@ spec:
size: 1x.demo
storage:
schemas:
- - version: v12
- effectiveDate: 2022-06-01
+ - version: v13
+ effectiveDate: 2023-10-15
secret:
name: test
type: s3
diff --git a/operator/hack/lokistack_gateway_ocp.yaml b/operator/hack/lokistack_gateway_ocp.yaml
index 723009c0a5eec..5fb6b3cc3efb6 100644
--- a/operator/hack/lokistack_gateway_ocp.yaml
+++ b/operator/hack/lokistack_gateway_ocp.yaml
@@ -7,8 +7,8 @@ spec:
size: 1x.demo
storage:
schemas:
- - version: v12
- effectiveDate: 2022-06-01
+ - version: v13
+ effectiveDate: 2023-10-15
secret:
name: test
type: s3
diff --git a/operator/hack/lokistack_gateway_ocp_azure.yaml b/operator/hack/lokistack_gateway_ocp_azure.yaml
new file mode 100644
index 0000000000000..3e38ef5b68a7f
--- /dev/null
+++ b/operator/hack/lokistack_gateway_ocp_azure.yaml
@@ -0,0 +1,25 @@
+apiVersion: loki.grafana.com/v1
+kind: LokiStack
+metadata:
+ name: lokistack-dev
+ namespace: openshift-logging
+spec:
+ size: 1x.demo
+ storage:
+ schemas:
+ - version: v13
+ effectiveDate: 2023-10-15
+ secret:
+ name: test
+ type: azure
+ storageClassName: managed-csi
+ tenants:
+ mode: openshift-logging
+ rules:
+ enabled: true
+ selector:
+ matchLabels:
+ openshift.io/cluster-monitoring: "true"
+ namespaceSelector:
+ matchLabels:
+ openshift.io/cluster-monitoring: "true"
diff --git a/operator/hack/lokistack_gateway_ocp_gcp.yaml b/operator/hack/lokistack_gateway_ocp_gcp.yaml
new file mode 100644
index 0000000000000..1157b1760f667
--- /dev/null
+++ b/operator/hack/lokistack_gateway_ocp_gcp.yaml
@@ -0,0 +1,25 @@
+apiVersion: loki.grafana.com/v1
+kind: LokiStack
+metadata:
+ name: lokistack-dev
+ namespace: openshift-logging
+spec:
+ size: 1x.demo
+ storage:
+ schemas:
+ - version: v13
+ effectiveDate: 2023-10-15
+ secret:
+ name: test
+ type: gcs
+ storageClassName: standard-csi
+ tenants:
+ mode: openshift-logging
+ rules:
+ enabled: true
+ selector:
+ matchLabels:
+ openshift.io/cluster-monitoring: "true"
+ namespaceSelector:
+ matchLabels:
+ openshift.io/cluster-monitoring: "true"
diff --git a/operator/internal/certrotation/build_test.go b/operator/internal/certrotation/build_test.go
index 334cf654ed79b..47845765eb49c 100644
--- a/operator/internal/certrotation/build_test.go
+++ b/operator/internal/certrotation/build_test.go
@@ -5,11 +5,12 @@ import (
"strings"
"testing"
- configv1 "github.com/grafana/loki/operator/apis/config/v1"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
+
+ configv1 "github.com/grafana/loki/operator/apis/config/v1"
)
func TestBuildAll(t *testing.T) {
diff --git a/operator/internal/certrotation/rotation_test.go b/operator/internal/certrotation/rotation_test.go
index 01dfab335b03e..f75826d812f49 100644
--- a/operator/internal/certrotation/rotation_test.go
+++ b/operator/internal/certrotation/rotation_test.go
@@ -15,6 +15,8 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
)
+var errExpectedSingleCert = errors.New("Expected a single certificate")
+
func TestSignerRotation_ReturnErrorOnMissingIssuer(t *testing.T) {
c := signerRotation{}
_, err := c.NewCertificate(1 * time.Hour)
@@ -330,7 +332,7 @@ func signCertificate(template *x509.Certificate, requestKey stdcrypto.PublicKey,
return nil, err
}
if len(certs) != 1 {
- return nil, errors.New("Expected a single certificate")
+ return nil, errExpectedSingleCert
}
return certs[0], nil
}
diff --git a/operator/internal/certrotation/target_test.go b/operator/internal/certrotation/target_test.go
index c705e9c50816f..71efc5e70cfdf 100644
--- a/operator/internal/certrotation/target_test.go
+++ b/operator/internal/certrotation/target_test.go
@@ -5,11 +5,12 @@ import (
"testing"
"time"
- configv1 "github.com/grafana/loki/operator/apis/config/v1"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/util/cert"
+
+ configv1 "github.com/grafana/loki/operator/apis/config/v1"
)
func TestCertificatesExpired(t *testing.T) {
diff --git a/operator/internal/handlers/dashboards_create_test.go b/operator/internal/handlers/dashboards_create_test.go
index 387de0e3e589b..f897b45841543 100644
--- a/operator/internal/handlers/dashboards_create_test.go
+++ b/operator/internal/handlers/dashboards_create_test.go
@@ -4,9 +4,6 @@ import (
"context"
"testing"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
- "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
-
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
apierrors "k8s.io/apimachinery/pkg/api/errors"
@@ -15,6 +12,9 @@ import (
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
+
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
)
func TestCreateDashboards_ReturnsResourcesInManagedNamespaces(t *testing.T) {
diff --git a/operator/internal/handlers/dashboards_delete_test.go b/operator/internal/handlers/dashboards_delete_test.go
index d0ffa9874ae4a..ee05f16adf571 100644
--- a/operator/internal/handlers/dashboards_delete_test.go
+++ b/operator/internal/handlers/dashboards_delete_test.go
@@ -4,12 +4,13 @@ import (
"context"
"testing"
- "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
- "github.com/grafana/loki/operator/internal/manifests/openshift"
"github.com/stretchr/testify/require"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/client"
+
+ "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
+ "github.com/grafana/loki/operator/internal/manifests/openshift"
)
func TestDeleteDashboards(t *testing.T) {
diff --git a/operator/internal/handlers/internal/certificates/options_test.go b/operator/internal/handlers/internal/certificates/options_test.go
index 16603b32712c2..775d61ffd787c 100644
--- a/operator/internal/handlers/internal/certificates/options_test.go
+++ b/operator/internal/handlers/internal/certificates/options_test.go
@@ -6,10 +6,7 @@ import (
"strings"
"testing"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
- "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
"github.com/stretchr/testify/require"
-
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -17,6 +14,9 @@ import (
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
+
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
)
func TestGetOptions_ReturnEmpty_WhenCertificatesNotExisting(t *testing.T) {
diff --git a/operator/internal/handlers/internal/gateway/modes_test.go b/operator/internal/handlers/internal/gateway/modes_test.go
index 2b79f6383d7f5..f54d348f6b25f 100644
--- a/operator/internal/handlers/internal/gateway/modes_test.go
+++ b/operator/internal/handlers/internal/gateway/modes_test.go
@@ -3,9 +3,10 @@ package gateway
import (
"testing"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
)
func TestValidateModes_StaticMode(t *testing.T) {
diff --git a/operator/internal/handlers/internal/gateway/tenant_configsecret_test.go b/operator/internal/handlers/internal/gateway/tenant_configsecret_test.go
index b6783aa15ca9e..f0035a89a16ff 100644
--- a/operator/internal/handlers/internal/gateway/tenant_configsecret_test.go
+++ b/operator/internal/handlers/internal/gateway/tenant_configsecret_test.go
@@ -4,9 +4,6 @@ import (
"context"
"testing"
- "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
- "github.com/grafana/loki/operator/internal/manifests"
-
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
@@ -15,6 +12,9 @@ import (
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
+
+ "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
+ "github.com/grafana/loki/operator/internal/manifests"
)
var tenantConfigData = []byte(`
diff --git a/operator/internal/handlers/internal/gateway/tenant_secrets_test.go b/operator/internal/handlers/internal/gateway/tenant_secrets_test.go
index cb7bf6775050e..d0292108d8290 100644
--- a/operator/internal/handlers/internal/gateway/tenant_secrets_test.go
+++ b/operator/internal/handlers/internal/gateway/tenant_secrets_test.go
@@ -6,16 +6,15 @@ import (
"testing"
"github.com/stretchr/testify/require"
-
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
- "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
- "github.com/grafana/loki/operator/internal/manifests"
-
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
+
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
+ "github.com/grafana/loki/operator/internal/manifests"
)
func TestGetTenantSecrets(t *testing.T) {
diff --git a/operator/internal/handlers/internal/openshift/proxy_test.go b/operator/internal/handlers/internal/openshift/proxy_test.go
index 247ac69982069..db37e53800303 100644
--- a/operator/internal/handlers/internal/openshift/proxy_test.go
+++ b/operator/internal/handlers/internal/openshift/proxy_test.go
@@ -4,13 +4,14 @@ import (
"context"
"testing"
- "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
configv1 "github.com/openshift/api/config/v1"
"github.com/stretchr/testify/require"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
+
+ "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
)
func TestGetProxy_ReturnError_WhenOtherThanNotFound(t *testing.T) {
diff --git a/operator/internal/handlers/internal/rules/rules_test.go b/operator/internal/handlers/internal/rules/rules_test.go
index 1859198b412fa..8bc52afb6a9a4 100644
--- a/operator/internal/handlers/internal/rules/rules_test.go
+++ b/operator/internal/handlers/internal/rules/rules_test.go
@@ -4,9 +4,6 @@ import (
"context"
"testing"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
- "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
- "github.com/grafana/loki/operator/internal/handlers/internal/rules"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
@@ -15,6 +12,10 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
+
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
+ "github.com/grafana/loki/operator/internal/handlers/internal/rules"
)
func TestList_AlertingRulesMatchSelector_WithDefaultStackNamespaceRules(t *testing.T) {
diff --git a/operator/internal/handlers/internal/rules/secrets_test.go b/operator/internal/handlers/internal/rules/secrets_test.go
index 59be996cca008..d31237308cea0 100644
--- a/operator/internal/handlers/internal/rules/secrets_test.go
+++ b/operator/internal/handlers/internal/rules/secrets_test.go
@@ -3,11 +3,12 @@ package rules_test
import (
"testing"
+ "github.com/stretchr/testify/require"
+ corev1 "k8s.io/api/core/v1"
+
lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/handlers/internal/rules"
"github.com/grafana/loki/operator/internal/manifests"
- "github.com/stretchr/testify/require"
- corev1 "k8s.io/api/core/v1"
)
func TestExtractRulerSecret(t *testing.T) {
diff --git a/operator/internal/handlers/internal/storage/secrets.go b/operator/internal/handlers/internal/storage/secrets.go
index d5f195e3c0547..0e027be8f3260 100644
--- a/operator/internal/handlers/internal/storage/secrets.go
+++ b/operator/internal/handlers/internal/storage/secrets.go
@@ -1,6 +1,10 @@
package storage
import (
+ "crypto/sha1"
+ "fmt"
+ "sort"
+
"github.com/ViaQ/logerr/v2/kverrors"
corev1 "k8s.io/api/core/v1"
@@ -8,11 +12,18 @@ import (
"github.com/grafana/loki/operator/internal/manifests/storage"
)
+var hashSeparator = []byte(",")
+
// ExtractSecret reads a k8s secret into a manifest object storage struct if valid.
func ExtractSecret(s *corev1.Secret, secretType lokiv1.ObjectStorageSecretType) (*storage.Options, error) {
- var err error
+ hash, err := hashSecretData(s)
+ if err != nil {
+ return nil, kverrors.Wrap(err, "error calculating hash for secret", "type", secretType)
+ }
+
storageOpts := storage.Options{
SecretName: s.Name,
+ SecretSHA1: hash,
SharedStore: secretType,
}
@@ -37,48 +48,75 @@ func ExtractSecret(s *corev1.Secret, secretType lokiv1.ObjectStorageSecretType)
return &storageOpts, nil
}
+func hashSecretData(s *corev1.Secret) (string, error) {
+ keys := make([]string, 0, len(s.Data))
+ for k := range s.Data {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ h := sha1.New()
+ for _, k := range keys {
+ if _, err := h.Write([]byte(k)); err != nil {
+ return "", err
+ }
+
+ if _, err := h.Write(hashSeparator); err != nil {
+ return "", err
+ }
+
+ if _, err := h.Write(s.Data[k]); err != nil {
+ return "", err
+ }
+
+ if _, err := h.Write(hashSeparator); err != nil {
+ return "", err
+ }
+ }
+
+ return fmt.Sprintf("%x", h.Sum(nil)), nil
+}
+
func extractAzureConfigSecret(s *corev1.Secret) (*storage.AzureStorageConfig, error) {
// Extract and validate mandatory fields
- env := s.Data["environment"]
+ env := s.Data[storage.KeyAzureEnvironmentName]
if len(env) == 0 {
- return nil, kverrors.New("missing secret field", "field", "environment")
+ return nil, kverrors.New("missing secret field", "field", storage.KeyAzureEnvironmentName)
}
- container := s.Data["container"]
+ container := s.Data[storage.KeyAzureStorageContainerName]
if len(container) == 0 {
- return nil, kverrors.New("missing secret field", "field", "container")
+ return nil, kverrors.New("missing secret field", "field", storage.KeyAzureStorageContainerName)
}
- name := s.Data["account_name"]
+ name := s.Data[storage.KeyAzureStorageAccountName]
if len(name) == 0 {
- return nil, kverrors.New("missing secret field", "field", "account_name")
+ return nil, kverrors.New("missing secret field", "field", storage.KeyAzureStorageAccountName)
}
- key := s.Data["account_key"]
+ key := s.Data[storage.KeyAzureStorageAccountKey]
if len(key) == 0 {
- return nil, kverrors.New("missing secret field", "field", "account_key")
+ return nil, kverrors.New("missing secret field", "field", storage.KeyAzureStorageAccountKey)
}
// Extract and validate optional fields
- endpointSuffix := s.Data["endpoint_suffix"]
+ endpointSuffix := s.Data[storage.KeyAzureStorageEndpointSuffix]
return &storage.AzureStorageConfig{
Env: string(env),
Container: string(container),
- AccountName: string(name),
- AccountKey: string(key),
EndpointSuffix: string(endpointSuffix),
}, nil
}
func extractGCSConfigSecret(s *corev1.Secret) (*storage.GCSStorageConfig, error) {
// Extract and validate mandatory fields
- bucket := s.Data["bucketname"]
+ bucket := s.Data[storage.KeyGCPStorageBucketName]
if len(bucket) == 0 {
- return nil, kverrors.New("missing secret field", "field", "bucketname")
+ return nil, kverrors.New("missing secret field", "field", storage.KeyGCPStorageBucketName)
}
// Check if google authentication credentials is provided
- keyJSON := s.Data["key.json"]
+ keyJSON := s.Data[storage.KeyGCPServiceAccountKeyFilename]
if len(keyJSON) == 0 {
- return nil, kverrors.New("missing google authentication credentials", "field", "key.json")
+ return nil, kverrors.New("missing google authentication credentials", "field", storage.KeyGCPServiceAccountKeyFilename)
}
return &storage.GCSStorageConfig{
@@ -88,25 +126,25 @@ func extractGCSConfigSecret(s *corev1.Secret) (*storage.GCSStorageConfig, error)
func extractS3ConfigSecret(s *corev1.Secret) (*storage.S3StorageConfig, error) {
// Extract and validate mandatory fields
- endpoint := s.Data["endpoint"]
+ endpoint := s.Data[storage.KeyAWSEndpoint]
if len(endpoint) == 0 {
- return nil, kverrors.New("missing secret field", "field", "endpoint")
+ return nil, kverrors.New("missing secret field", "field", storage.KeyAWSEndpoint)
}
- buckets := s.Data["bucketnames"]
+ buckets := s.Data[storage.KeyAWSBucketNames]
if len(buckets) == 0 {
- return nil, kverrors.New("missing secret field", "field", "bucketnames")
+ return nil, kverrors.New("missing secret field", "field", storage.KeyAWSBucketNames)
}
- id := s.Data["access_key_id"]
+ id := s.Data[storage.KeyAWSAccessKeyID]
if len(id) == 0 {
- return nil, kverrors.New("missing secret field", "field", "access_key_id")
+ return nil, kverrors.New("missing secret field", "field", storage.KeyAWSAccessKeyID)
}
- secret := s.Data["access_key_secret"]
+ secret := s.Data[storage.KeyAWSAccessKeySecret]
if len(secret) == 0 {
- return nil, kverrors.New("missing secret field", "field", "access_key_secret")
+ return nil, kverrors.New("missing secret field", "field", storage.KeyAWSAccessKeySecret)
}
// Extract and validate optional fields
- region := s.Data["region"]
+ region := s.Data[storage.KeyAWSRegion]
sseCfg, err := extractS3SSEConfig(s.Data)
if err != nil {
@@ -114,12 +152,10 @@ func extractS3ConfigSecret(s *corev1.Secret) (*storage.S3StorageConfig, error) {
}
return &storage.S3StorageConfig{
- Endpoint: string(endpoint),
- Buckets: string(buckets),
- AccessKeyID: string(id),
- AccessKeySecret: string(secret),
- Region: string(region),
- SSE: sseCfg,
+ Endpoint: string(endpoint),
+ Buckets: string(buckets),
+ Region: string(region),
+ SSE: sseCfg,
}, nil
}
@@ -129,12 +165,12 @@ func extractS3SSEConfig(d map[string][]byte) (storage.S3SSEConfig, error) {
kmsKeyId, kmsEncryptionCtx string
)
- switch sseType = storage.S3SSEType(d["sse_type"]); sseType {
+ switch sseType = storage.S3SSEType(d[storage.KeyAWSSSEType]); sseType {
case storage.SSEKMSType:
- kmsEncryptionCtx = string(d["sse_kms_encryption_context"])
- kmsKeyId = string(d["sse_kms_key_id"])
+ kmsEncryptionCtx = string(d[storage.KeyAWSSseKmsEncryptionContext])
+ kmsKeyId = string(d[storage.KeyAWSSseKmsKeyID])
if kmsKeyId == "" {
- return storage.S3SSEConfig{}, kverrors.New("missing secret field", "field", "sse_kms_key_id")
+ return storage.S3SSEConfig{}, kverrors.New("missing secret field", "field", storage.KeyAWSSseKmsKeyID)
}
case storage.SSES3Type:
@@ -142,7 +178,7 @@ func extractS3SSEConfig(d map[string][]byte) (storage.S3SSEConfig, error) {
return storage.S3SSEConfig{}, nil
default:
- return storage.S3SSEConfig{}, kverrors.New("unsupported secret field value (Supported: SSE-KMS, SSE-S3)", "field", "sse_type", "value", sseType)
+ return storage.S3SSEConfig{}, kverrors.New("unsupported secret field value (Supported: SSE-KMS, SSE-S3)", "field", storage.KeyAWSSSEType, "value", sseType)
}
return storage.S3SSEConfig{
@@ -154,57 +190,55 @@ func extractS3SSEConfig(d map[string][]byte) (storage.S3SSEConfig, error) {
func extractSwiftConfigSecret(s *corev1.Secret) (*storage.SwiftStorageConfig, error) {
// Extract and validate mandatory fields
- url := s.Data["auth_url"]
+ url := s.Data[storage.KeySwiftAuthURL]
if len(url) == 0 {
- return nil, kverrors.New("missing secret field", "field", "auth_url")
+ return nil, kverrors.New("missing secret field", "field", storage.KeySwiftAuthURL)
}
- username := s.Data["username"]
+ username := s.Data[storage.KeySwiftUsername]
if len(username) == 0 {
- return nil, kverrors.New("missing secret field", "field", "username")
+ return nil, kverrors.New("missing secret field", "field", storage.KeySwiftUsername)
}
- userDomainName := s.Data["user_domain_name"]
+ userDomainName := s.Data[storage.KeySwiftUserDomainName]
if len(userDomainName) == 0 {
- return nil, kverrors.New("missing secret field", "field", "user_domain_name")
+ return nil, kverrors.New("missing secret field", "field", storage.KeySwiftUserDomainName)
}
- userDomainID := s.Data["user_domain_id"]
+ userDomainID := s.Data[storage.KeySwiftUserDomainID]
if len(userDomainID) == 0 {
- return nil, kverrors.New("missing secret field", "field", "user_domain_id")
+ return nil, kverrors.New("missing secret field", "field", storage.KeySwiftUserDomainID)
}
- userID := s.Data["user_id"]
+ userID := s.Data[storage.KeySwiftUserID]
if len(userID) == 0 {
- return nil, kverrors.New("missing secret field", "field", "user_id")
+ return nil, kverrors.New("missing secret field", "field", storage.KeySwiftUserID)
}
- password := s.Data["password"]
+ password := s.Data[storage.KeySwiftPassword]
if len(password) == 0 {
- return nil, kverrors.New("missing secret field", "field", "password")
+ return nil, kverrors.New("missing secret field", "field", storage.KeySwiftPassword)
}
- domainID := s.Data["domain_id"]
+ domainID := s.Data[storage.KeySwiftDomainID]
if len(domainID) == 0 {
- return nil, kverrors.New("missing secret field", "field", "domain_id")
+ return nil, kverrors.New("missing secret field", "field", storage.KeySwiftDomainID)
}
- domainName := s.Data["domain_name"]
+ domainName := s.Data[storage.KeySwiftDomainName]
if len(domainName) == 0 {
- return nil, kverrors.New("missing secret field", "field", "domain_name")
+ return nil, kverrors.New("missing secret field", "field", storage.KeySwiftDomainName)
}
- containerName := s.Data["container_name"]
+ containerName := s.Data[storage.KeySwiftContainerName]
if len(containerName) == 0 {
- return nil, kverrors.New("missing secret field", "field", "container_name")
+ return nil, kverrors.New("missing secret field", "field", storage.KeySwiftContainerName)
}
// Extract and validate optional fields
- projectID := s.Data["project_id"]
- projectName := s.Data["project_name"]
- projectDomainID := s.Data["project_domain_id"]
- projectDomainName := s.Data["project_domain_name"]
- region := s.Data["region"]
+ projectID := s.Data[storage.KeySwiftProjectID]
+ projectName := s.Data[storage.KeySwiftProjectName]
+ projectDomainID := s.Data[storage.KeySwiftProjectDomainId]
+ projectDomainName := s.Data[storage.KeySwiftProjectDomainName]
+ region := s.Data[storage.KeySwiftRegion]
return &storage.SwiftStorageConfig{
AuthURL: string(url),
- Username: string(username),
UserDomainName: string(userDomainName),
UserDomainID: string(userDomainID),
UserID: string(userID),
- Password: string(password),
DomainID: string(domainID),
DomainName: string(domainName),
ProjectID: string(projectID),
@@ -218,28 +252,25 @@ func extractSwiftConfigSecret(s *corev1.Secret) (*storage.SwiftStorageConfig, er
func extractAlibabaCloudConfigSecret(s *corev1.Secret) (*storage.AlibabaCloudStorageConfig, error) {
// Extract and validate mandatory fields
- endpoint := s.Data["endpoint"]
+ endpoint := s.Data[storage.KeyAlibabaCloudEndpoint]
if len(endpoint) == 0 {
- return nil, kverrors.New("missing secret field", "field", "endpoint")
+ return nil, kverrors.New("missing secret field", "field", storage.KeyAlibabaCloudEndpoint)
}
- bucket := s.Data["bucket"]
+ bucket := s.Data[storage.KeyAlibabaCloudBucket]
if len(bucket) == 0 {
- return nil, kverrors.New("missing secret field", "field", "bucket")
+ return nil, kverrors.New("missing secret field", "field", storage.KeyAlibabaCloudBucket)
}
- // TODO buckets are comma-separated list
- id := s.Data["access_key_id"]
+ id := s.Data[storage.KeyAlibabaCloudAccessKeyID]
if len(id) == 0 {
- return nil, kverrors.New("missing secret field", "field", "access_key_id")
+ return nil, kverrors.New("missing secret field", "field", storage.KeyAlibabaCloudAccessKeyID)
}
- secret := s.Data["secret_access_key"]
+ secret := s.Data[storage.KeyAlibabaCloudSecretAccessKey]
if len(secret) == 0 {
- return nil, kverrors.New("missing secret field", "field", "secret_access_key")
+ return nil, kverrors.New("missing secret field", "field", storage.KeyAlibabaCloudSecretAccessKey)
}
return &storage.AlibabaCloudStorageConfig{
- Endpoint: string(endpoint),
- Bucket: string(bucket),
- AccessKeyID: string(id),
- SecretAccessKey: string(secret),
+ Endpoint: string(endpoint),
+ Bucket: string(bucket),
}, nil
}
diff --git a/operator/internal/handlers/internal/storage/secrets_test.go b/operator/internal/handlers/internal/storage/secrets_test.go
index a9e474414e44f..eea31fbd522c8 100644
--- a/operator/internal/handlers/internal/storage/secrets_test.go
+++ b/operator/internal/handlers/internal/storage/secrets_test.go
@@ -1,14 +1,66 @@
-package storage_test
+package storage
import (
"testing"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
- "github.com/grafana/loki/operator/internal/handlers/internal/storage"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
)
+func TestHashSecretData(t *testing.T) {
+ tt := []struct {
+ desc string
+ data map[string][]byte
+ wantHash string
+ }{
+ {
+ desc: "nil",
+ data: nil,
+ wantHash: "da39a3ee5e6b4b0d3255bfef95601890afd80709",
+ },
+ {
+ desc: "empty",
+ data: map[string][]byte{},
+ wantHash: "da39a3ee5e6b4b0d3255bfef95601890afd80709",
+ },
+ {
+ desc: "single entry",
+ data: map[string][]byte{
+ "key": []byte("value"),
+ },
+ wantHash: "a8973b2094d3af1e43931132dee228909bf2b02a",
+ },
+ {
+ desc: "multiple entries",
+ data: map[string][]byte{
+ "key": []byte("value"),
+ "key3": []byte("value3"),
+ "key2": []byte("value2"),
+ },
+ wantHash: "a3341093891ad4df9f07db586029be48e9e6e884",
+ },
+ }
+
+ for _, tc := range tt {
+ tc := tc
+
+ t.Run(tc.desc, func(t *testing.T) {
+ t.Parallel()
+
+ s := &corev1.Secret{
+ Data: tc.data,
+ }
+
+ hash, err := hashSecretData(s)
+ require.NoError(t, err)
+ require.Equal(t, tc.wantHash, hash)
+ })
+ }
+}
+
func TestAzureExtract(t *testing.T) {
type test struct {
name string
@@ -43,6 +95,7 @@ func TestAzureExtract(t *testing.T) {
{
name: "missing account_key",
secret: &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{Name: "test"},
Data: map[string][]byte{
"environment": []byte("here"),
"container": []byte("this,that"),
@@ -54,6 +107,7 @@ func TestAzureExtract(t *testing.T) {
{
name: "all mandatory set",
secret: &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{Name: "test"},
Data: map[string][]byte{
"environment": []byte("here"),
"container": []byte("this,that"),
@@ -65,6 +119,7 @@ func TestAzureExtract(t *testing.T) {
{
name: "all set including optional",
secret: &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{Name: "test"},
Data: map[string][]byte{
"environment": []byte("here"),
"container": []byte("this,that"),
@@ -80,9 +135,12 @@ func TestAzureExtract(t *testing.T) {
t.Run(tst.name, func(t *testing.T) {
t.Parallel()
- _, err := storage.ExtractSecret(tst.secret, lokiv1.ObjectStorageSecretAzure)
+ opts, err := ExtractSecret(tst.secret, lokiv1.ObjectStorageSecretAzure)
if !tst.wantErr {
require.NoError(t, err)
+ require.NotEmpty(t, opts.SecretName)
+ require.NotEmpty(t, opts.SecretSHA1)
+ require.Equal(t, opts.SharedStore, lokiv1.ObjectStorageSecretAzure)
}
if tst.wantErr {
require.NotNil(t, err)
@@ -115,6 +173,7 @@ func TestGCSExtract(t *testing.T) {
{
name: "all set",
secret: &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{Name: "test"},
Data: map[string][]byte{
"bucketname": []byte("here"),
"key.json": []byte("{\"type\": \"SA\"}"),
@@ -127,7 +186,7 @@ func TestGCSExtract(t *testing.T) {
t.Run(tst.name, func(t *testing.T) {
t.Parallel()
- _, err := storage.ExtractSecret(tst.secret, lokiv1.ObjectStorageSecretGCS)
+ _, err := ExtractSecret(tst.secret, lokiv1.ObjectStorageSecretGCS)
if !tst.wantErr {
require.NoError(t, err)
}
@@ -210,6 +269,7 @@ func TestS3Extract(t *testing.T) {
{
name: "all set with SSE-KMS",
secret: &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{Name: "test"},
Data: map[string][]byte{
"endpoint": []byte("here"),
"bucketnames": []byte("this,that"),
@@ -223,6 +283,7 @@ func TestS3Extract(t *testing.T) {
{
name: "all set with SSE-KMS with encryption context",
secret: &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{Name: "test"},
Data: map[string][]byte{
"endpoint": []byte("here"),
"bucketnames": []byte("this,that"),
@@ -237,6 +298,7 @@ func TestS3Extract(t *testing.T) {
{
name: "all set with SSE-S3",
secret: &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{Name: "test"},
Data: map[string][]byte{
"endpoint": []byte("here"),
"bucketnames": []byte("this,that"),
@@ -249,6 +311,7 @@ func TestS3Extract(t *testing.T) {
{
name: "all set without SSE",
secret: &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{Name: "test"},
Data: map[string][]byte{
"endpoint": []byte("here"),
"bucketnames": []byte("this,that"),
@@ -263,9 +326,12 @@ func TestS3Extract(t *testing.T) {
t.Run(tst.name, func(t *testing.T) {
t.Parallel()
- _, err := storage.ExtractSecret(tst.secret, lokiv1.ObjectStorageSecretS3)
+ opts, err := ExtractSecret(tst.secret, lokiv1.ObjectStorageSecretS3)
if !tst.wantErr {
require.NoError(t, err)
+ require.NotEmpty(t, opts.SecretName)
+ require.NotEmpty(t, opts.SecretSHA1)
+ require.Equal(t, opts.SharedStore, lokiv1.ObjectStorageSecretS3)
}
if tst.wantErr {
require.NotNil(t, err)
@@ -389,6 +455,7 @@ func TestSwiftExtract(t *testing.T) {
{
name: "all set",
secret: &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{Name: "test"},
Data: map[string][]byte{
"auth_url": []byte("here"),
"username": []byte("this,that"),
@@ -408,9 +475,12 @@ func TestSwiftExtract(t *testing.T) {
t.Run(tst.name, func(t *testing.T) {
t.Parallel()
- _, err := storage.ExtractSecret(tst.secret, lokiv1.ObjectStorageSecretSwift)
+ opts, err := ExtractSecret(tst.secret, lokiv1.ObjectStorageSecretSwift)
if !tst.wantErr {
require.NoError(t, err)
+ require.NotEmpty(t, opts.SecretName)
+ require.NotEmpty(t, opts.SecretSHA1)
+ require.Equal(t, opts.SharedStore, lokiv1.ObjectStorageSecretSwift)
}
if tst.wantErr {
require.NotNil(t, err)
@@ -464,6 +534,7 @@ func TestAlibabaCloudExtract(t *testing.T) {
{
name: "all set",
secret: &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{Name: "test"},
Data: map[string][]byte{
"endpoint": []byte("here"),
"bucket": []byte("this,that"),
@@ -478,9 +549,12 @@ func TestAlibabaCloudExtract(t *testing.T) {
t.Run(tst.name, func(t *testing.T) {
t.Parallel()
- _, err := storage.ExtractSecret(tst.secret, lokiv1.ObjectStorageSecretAlibabaCloud)
+ opts, err := ExtractSecret(tst.secret, lokiv1.ObjectStorageSecretAlibabaCloud)
if !tst.wantErr {
require.NoError(t, err)
+ require.NotEmpty(t, opts.SecretName)
+ require.NotEmpty(t, opts.SecretSHA1)
+ require.Equal(t, opts.SharedStore, lokiv1.ObjectStorageSecretAlibabaCloud)
}
if tst.wantErr {
require.NotNil(t, err)
diff --git a/operator/internal/handlers/internal/tlsprofile/tlsprofile_test.go b/operator/internal/handlers/internal/tlsprofile/tlsprofile_test.go
index 90a5acb11f740..0a7535bcd921a 100644
--- a/operator/internal/handlers/internal/tlsprofile/tlsprofile_test.go
+++ b/operator/internal/handlers/internal/tlsprofile/tlsprofile_test.go
@@ -4,10 +4,6 @@ import (
"context"
"testing"
- configv1 "github.com/grafana/loki/operator/apis/config/v1"
- "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
- "github.com/grafana/loki/operator/internal/handlers/internal/tlsprofile"
-
openshiftconfigv1 "github.com/openshift/api/config/v1"
"github.com/stretchr/testify/assert"
apierrors "k8s.io/apimachinery/pkg/api/errors"
@@ -15,6 +11,10 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
+
+ configv1 "github.com/grafana/loki/operator/apis/config/v1"
+ "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
+ "github.com/grafana/loki/operator/internal/handlers/internal/tlsprofile"
)
func TestGetTLSSecurityProfile(t *testing.T) {
diff --git a/operator/internal/handlers/lokistack_check_cert_expiry_test.go b/operator/internal/handlers/lokistack_check_cert_expiry_test.go
index 156cb70e0a59a..400b6244d3af9 100644
--- a/operator/internal/handlers/lokistack_check_cert_expiry_test.go
+++ b/operator/internal/handlers/lokistack_check_cert_expiry_test.go
@@ -6,9 +6,6 @@ import (
"testing"
"time"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
- "github.com/grafana/loki/operator/internal/certrotation"
- "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
@@ -18,6 +15,10 @@ import (
"k8s.io/utils/pointer"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
+
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ "github.com/grafana/loki/operator/internal/certrotation"
+ "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
)
func TestCheckCertExpiry_WhenGetReturnsNotFound_DoesNotError(t *testing.T) {
diff --git a/operator/internal/handlers/lokistack_create_or_update_test.go b/operator/internal/handlers/lokistack_create_or_update_test.go
index 7b1a32fe47d6b..781966ac2ddad 100644
--- a/operator/internal/handlers/lokistack_create_or_update_test.go
+++ b/operator/internal/handlers/lokistack_create_or_update_test.go
@@ -8,11 +8,6 @@ import (
"os"
"testing"
- configv1 "github.com/grafana/loki/operator/apis/config/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
- "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
- "github.com/grafana/loki/operator/internal/status"
-
"github.com/ViaQ/logerr/v2/log"
"github.com/go-logr/logr"
routev1 "github.com/openshift/api/route/v1"
@@ -30,6 +25,11 @@ import (
"k8s.io/utils/pointer"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
+
+ configv1 "github.com/grafana/loki/operator/apis/config/v1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
+ "github.com/grafana/loki/operator/internal/status"
)
var (
@@ -339,8 +339,8 @@ func TestCreateOrUpdateLokiStack_SetsOwnerRefOnAllObjects(t *testing.T) {
Kind: stack.Kind,
Name: stack.Name,
UID: stack.UID,
- Controller: pointer.BoolPtr(true),
- BlockOwnerDeletion: pointer.BoolPtr(true),
+ Controller: pointer.Bool(true),
+ BlockOwnerDeletion: pointer.Bool(true),
}
k.CreateStub = func(_ context.Context, o client.Object, _ ...client.CreateOption) error {
@@ -485,8 +485,8 @@ func TestCreateOrUpdateLokiStack_WhenGetReturnsNoError_UpdateObjects(t *testing.
Kind: "LokiStack",
Name: "my-stack",
UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
- Controller: pointer.BoolPtr(true),
- BlockOwnerDeletion: pointer.BoolPtr(true),
+ Controller: pointer.Bool(true),
+ BlockOwnerDeletion: pointer.Bool(true),
},
},
},
@@ -654,8 +654,8 @@ func TestCreateOrUpdateLokiStack_WhenUpdateReturnsError_ContinueWithOtherObjects
Kind: "LokiStack",
Name: "someStack",
UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
- Controller: pointer.BoolPtr(true),
- BlockOwnerDeletion: pointer.BoolPtr(true),
+ Controller: pointer.Bool(true),
+ BlockOwnerDeletion: pointer.Bool(true),
},
},
},
@@ -943,7 +943,7 @@ func TestCreateOrUpdateLokiStack_WhenMissingCAConfigMap_SetDegraded(t *testing.T
Type: lokiv1.ObjectStorageSecretS3,
},
TLS: &lokiv1.ObjectStorageTLSSpec{
- lokiv1.CASpec{
+ CASpec: lokiv1.CASpec{
CA: "not-existing",
},
},
@@ -1015,7 +1015,7 @@ func TestCreateOrUpdateLokiStack_WhenInvalidCAConfigMap_SetDegraded(t *testing.T
Type: lokiv1.ObjectStorageSecretS3,
},
TLS: &lokiv1.ObjectStorageTLSSpec{
- lokiv1.CASpec{
+ CASpec: lokiv1.CASpec{
CA: invalidCAConfigMap.Name,
},
},
@@ -1550,7 +1550,7 @@ func TestCreateOrUpdateLokiStack_RemovesRulerResourcesWhenDisabled(t *testing.T)
return nil
}
- k.ListStub = func(_ context.Context, list client.ObjectList, options ...client.ListOption) error {
+ k.ListStub = func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error {
switch list.(type) {
case *corev1.ConfigMapList:
k.SetClientObjectList(list, &corev1.ConfigMapList{
diff --git a/operator/internal/handlers/lokistack_enable_zone_awareness_test.go b/operator/internal/handlers/lokistack_enable_zone_awareness_test.go
index 4f661ef3cc844..f0c0f02be342b 100644
--- a/operator/internal/handlers/lokistack_enable_zone_awareness_test.go
+++ b/operator/internal/handlers/lokistack_enable_zone_awareness_test.go
@@ -7,13 +7,14 @@ import (
"testing"
"github.com/ViaQ/logerr/v2/kverrors"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
- "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
+
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
)
var defaultPod = corev1.Pod{
@@ -102,6 +103,6 @@ func TestAnnotatePodWithAvailabilityZone_WhenGetReturnsNode_DoesNotError(t *test
require.Equal(t, 1, k.PatchCallCount())
_, p, patch, _ := k.PatchArgsForCall(0)
require.Equal(t, p, &testPod)
- actualPatch, err := patch.Data(nil)
+ actualPatch, _ := patch.Data(nil)
require.Equal(t, actualPatch, expectedPatch)
}
diff --git a/operator/internal/handlers/lokistack_rotate_certs_test.go b/operator/internal/handlers/lokistack_rotate_certs_test.go
index 0ee7ef77c039f..1ac48b5ebb139 100644
--- a/operator/internal/handlers/lokistack_rotate_certs_test.go
+++ b/operator/internal/handlers/lokistack_rotate_certs_test.go
@@ -5,8 +5,6 @@ import (
"errors"
"testing"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
- "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
@@ -17,6 +15,9 @@ import (
"k8s.io/utils/pointer"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
+
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
)
func TestCreateOrRotateCertificates_WhenGetReturnsNotFound_DoesNotError(t *testing.T) {
@@ -229,8 +230,8 @@ func TestCreateOrRotateCertificates_SetsOwnerRefOnAllObjects(t *testing.T) {
Kind: stack.Kind,
Name: stack.Name,
UID: stack.UID,
- Controller: pointer.BoolPtr(true),
- BlockOwnerDeletion: pointer.BoolPtr(true),
+ Controller: pointer.Bool(true),
+ BlockOwnerDeletion: pointer.Bool(true),
}
k.CreateStub = func(_ context.Context, o client.Object, _ ...client.CreateOption) error {
@@ -375,8 +376,8 @@ func TestCreateOrRotateCertificates_WhenGetReturnsNoError_UpdateObjects(t *testi
Kind: "LokiStack",
Name: "my-stack",
UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
- Controller: pointer.BoolPtr(true),
- BlockOwnerDeletion: pointer.BoolPtr(true),
+ Controller: pointer.Bool(true),
+ BlockOwnerDeletion: pointer.Bool(true),
},
},
},
@@ -529,8 +530,8 @@ func TestCreateOrRotateCertificates_WhenUpdateReturnsError_ContinueWithOtherObje
Kind: "LokiStack",
Name: "someStack",
UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
- Controller: pointer.BoolPtr(true),
- BlockOwnerDeletion: pointer.BoolPtr(true),
+ Controller: pointer.Bool(true),
+ BlockOwnerDeletion: pointer.Bool(true),
},
},
},
diff --git a/operator/internal/manifests/build_test.go b/operator/internal/manifests/build_test.go
index 16c50b800f337..9e6afb3c62251 100644
--- a/operator/internal/manifests/build_test.go
+++ b/operator/internal/manifests/build_test.go
@@ -6,6 +6,7 @@ import (
"github.com/ViaQ/logerr/v2/kverrors"
openshiftconfigv1 "github.com/openshift/api/config/v1"
+ "github.com/stretchr/testify/require"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -13,8 +14,6 @@ import (
configv1 "github.com/grafana/loki/operator/apis/config/v1"
lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/internal"
-
- "github.com/stretchr/testify/require"
)
func TestApplyUserOptions_OverrideDefaults(t *testing.T) {
@@ -44,7 +43,7 @@ func TestApplyUserOptions_OverrideDefaults(t *testing.T) {
require.NoError(t, err)
require.Equal(t, defs.Size, opt.Stack.Size)
require.Equal(t, defs.Limits, opt.Stack.Limits)
- require.Equal(t, defs.ReplicationFactor, opt.Stack.ReplicationFactor)
+ require.Equal(t, defs.ReplicationFactor, opt.Stack.ReplicationFactor) //nolint:staticcheck
require.Equal(t, defs.Replication, opt.Stack.Replication)
require.Equal(t, defs.ManagementState, opt.Stack.ManagementState)
require.Equal(t, defs.Template.Ingester, opt.Stack.Template.Ingester)
diff --git a/operator/internal/manifests/compactor.go b/operator/internal/manifests/compactor.go
index b67997d29fe9c..0362b8d40010c 100644
--- a/operator/internal/manifests/compactor.go
+++ b/operator/internal/manifests/compactor.go
@@ -67,7 +67,7 @@ func BuildCompactor(opts Options) ([]client.Object, error) {
// NewCompactorStatefulSet creates a statefulset object for a compactor.
func NewCompactorStatefulSet(opts Options) *appsv1.StatefulSet {
l := ComponentLabels(LabelCompactorComponent, opts.Name)
- a := commonAnnotations(opts.ConfigSHA1, opts.CertRotationRequiredAt)
+ a := commonAnnotations(opts.ConfigSHA1, opts.ObjectStorage.SecretSHA1, opts.CertRotationRequiredAt)
podSpec := corev1.PodSpec{
Affinity: configureAffinity(LabelCompactorComponent, opts.Name, opts.Gates.DefaultNodeAffinity, opts.Stack.Template.Compactor),
Volumes: []corev1.Volume{
diff --git a/operator/internal/manifests/compactor_test.go b/operator/internal/manifests/compactor_test.go
index b2348b50faf5b..6839d64a74263 100644
--- a/operator/internal/manifests/compactor_test.go
+++ b/operator/internal/manifests/compactor_test.go
@@ -3,8 +3,10 @@ package manifests
import (
"testing"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/stretchr/testify/require"
+
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ "github.com/grafana/loki/operator/internal/manifests/storage"
)
func TestNewCompactorStatefulSet_SelectorMatchesLabels(t *testing.T) {
@@ -48,10 +50,32 @@ func TestNewCompactorStatefulSet_HasTemplateConfigHashAnnotation(t *testing.T) {
},
},
})
- expected := "loki.grafana.com/config-hash"
+
+ annotations := ss.Spec.Template.Annotations
+ require.Contains(t, annotations, AnnotationLokiConfigHash)
+ require.Equal(t, annotations[AnnotationLokiConfigHash], "deadbeef")
+}
+
+func TestNewCompactorStatefulSet_HasTemplateObjectStorageHashAnnotation(t *testing.T) {
+ ss := NewCompactorStatefulSet(Options{
+ Name: "abcd",
+ Namespace: "efgh",
+ ObjectStorage: storage.Options{
+ SecretSHA1: "deadbeef",
+ },
+ Stack: lokiv1.LokiStackSpec{
+ StorageClassName: "standard",
+ Template: &lokiv1.LokiTemplateSpec{
+ Compactor: &lokiv1.LokiComponentSpec{
+ Replicas: 1,
+ },
+ },
+ },
+ })
+
annotations := ss.Spec.Template.Annotations
- require.Contains(t, annotations, expected)
- require.Equal(t, annotations[expected], "deadbeef")
+ require.Contains(t, annotations, AnnotationLokiObjectStoreHash)
+ require.Equal(t, annotations[AnnotationLokiObjectStoreHash], "deadbeef")
}
func TestNewCompactorStatefulSet_HasTemplateCertRotationRequiredAtAnnotation(t *testing.T) {
@@ -68,8 +92,8 @@ func TestNewCompactorStatefulSet_HasTemplateCertRotationRequiredAtAnnotation(t *
},
},
})
- expected := "loki.grafana.com/certRotationRequiredAt"
+
annotations := ss.Spec.Template.Annotations
- require.Contains(t, annotations, expected)
- require.Equal(t, annotations[expected], "deadbeef")
+ require.Contains(t, annotations, AnnotationCertRotationRequiredAt)
+ require.Equal(t, annotations[AnnotationCertRotationRequiredAt], "deadbeef")
}
diff --git a/operator/internal/manifests/config.go b/operator/internal/manifests/config.go
index e3aed82610eee..4ec0b728d916d 100644
--- a/operator/internal/manifests/config.go
+++ b/operator/internal/manifests/config.go
@@ -47,9 +47,9 @@ func LokiConfigMap(opt Options) (*corev1.ConfigMap, string, error) {
Name: lokiConfigMapName(opt.Name),
Labels: commonLabels(opt.Name),
},
- BinaryData: map[string][]byte{
- config.LokiConfigFileName: c,
- config.LokiRuntimeConfigFileName: rc,
+ Data: map[string]string{
+ config.LokiConfigFileName: string(c),
+ config.LokiRuntimeConfigFileName: string(rc),
},
}, sha1C, nil
}
@@ -117,6 +117,21 @@ func ConfigOptions(opt Options) config.Options {
opt.Stack.Replication.Factor = opt.Stack.ReplicationFactor
}
+ // Build a slice of with the shippers that are being used in the config
+ // booleans used to prevent duplicates
+ shippers := []string{}
+ boltdb := false
+ tsdb := false
+ for _, schema := range opt.Stack.Storage.Schemas {
+ if !boltdb && (schema.Version == lokiv1.ObjectStorageSchemaV11 || schema.Version == lokiv1.ObjectStorageSchemaV12) {
+ shippers = append(shippers, "boltdb")
+ boltdb = true
+ } else if !tsdb {
+ shippers = append(shippers, "tsdb")
+ tsdb = true
+ }
+ }
+
return config.Options{
Stack: opt.Stack,
Gates: opt.Gates,
@@ -175,6 +190,7 @@ func ConfigOptions(opt Options) config.Options {
Directory: walDirectory,
IngesterMemoryRequest: opt.ResourceRequirements.Ingester.Requests.Memory().Value(),
},
+ Shippers: shippers,
ObjectStorage: opt.ObjectStorage,
HTTPTimeouts: opt.Timeouts.Loki,
EnableRemoteReporting: opt.Gates.GrafanaLabsUsageReport,
diff --git a/operator/internal/manifests/config_test.go b/operator/internal/manifests/config_test.go
index 268c83853b20e..2a5df6f135cd1 100644
--- a/operator/internal/manifests/config_test.go
+++ b/operator/internal/manifests/config_test.go
@@ -7,7 +7,6 @@ import (
"time"
"github.com/google/uuid"
-
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
@@ -84,7 +83,7 @@ func randomConfigOptions() Options {
MaxQuerySeries: rand.Int31(),
},
},
- Tenants: map[string]lokiv1.LimitsTemplateSpec{
+ Tenants: map[string]lokiv1.PerTenantLimitsTemplateSpec{
uuid.New().String(): {
IngestionLimits: &lokiv1.IngestionLimitSpec{
IngestionRate: rand.Int31(),
@@ -97,10 +96,12 @@ func randomConfigOptions() Options {
PerStreamRateLimit: rand.Int31(),
PerStreamRateLimitBurst: rand.Int31(),
},
- QueryLimits: &lokiv1.QueryLimitSpec{
- MaxEntriesLimitPerQuery: rand.Int31(),
- MaxChunksPerQuery: rand.Int31(),
- MaxQuerySeries: rand.Int31(),
+ QueryLimits: &lokiv1.PerTenantQueryLimitSpec{
+ QueryLimitSpec: lokiv1.QueryLimitSpec{
+ MaxEntriesLimitPerQuery: rand.Int31(),
+ MaxChunksPerQuery: rand.Int31(),
+ MaxQuerySeries: rand.Int31(),
+ },
},
},
},
@@ -117,7 +118,7 @@ func randomConfigOptions() Options {
Operator: corev1.TolerationOpEqual,
Value: uuid.New().String(),
Effect: corev1.TaintEffectNoExecute,
- TolerationSeconds: pointer.Int64Ptr(rand.Int63()),
+ TolerationSeconds: pointer.Int64(rand.Int63()),
},
},
},
@@ -132,7 +133,7 @@ func randomConfigOptions() Options {
Operator: corev1.TolerationOpEqual,
Value: uuid.New().String(),
Effect: corev1.TaintEffectNoExecute,
- TolerationSeconds: pointer.Int64Ptr(rand.Int63()),
+ TolerationSeconds: pointer.Int64(rand.Int63()),
},
},
},
@@ -147,7 +148,7 @@ func randomConfigOptions() Options {
Operator: corev1.TolerationOpEqual,
Value: uuid.New().String(),
Effect: corev1.TaintEffectNoExecute,
- TolerationSeconds: pointer.Int64Ptr(rand.Int63()),
+ TolerationSeconds: pointer.Int64(rand.Int63()),
},
},
},
@@ -162,7 +163,7 @@ func randomConfigOptions() Options {
Operator: corev1.TolerationOpEqual,
Value: uuid.New().String(),
Effect: corev1.TaintEffectNoExecute,
- TolerationSeconds: pointer.Int64Ptr(rand.Int63()),
+ TolerationSeconds: pointer.Int64(rand.Int63()),
},
},
},
@@ -177,7 +178,7 @@ func randomConfigOptions() Options {
Operator: corev1.TolerationOpEqual,
Value: uuid.New().String(),
Effect: corev1.TaintEffectNoExecute,
- TolerationSeconds: pointer.Int64Ptr(rand.Int63()),
+ TolerationSeconds: pointer.Int64(rand.Int63()),
},
},
},
@@ -192,7 +193,7 @@ func randomConfigOptions() Options {
Operator: corev1.TolerationOpEqual,
Value: uuid.New().String(),
Effect: corev1.TaintEffectNoExecute,
- TolerationSeconds: pointer.Int64Ptr(rand.Int63()),
+ TolerationSeconds: pointer.Int64(rand.Int63()),
},
},
},
@@ -375,7 +376,7 @@ func TestConfigOptions_RetentionConfig(t *testing.T) {
Days: 14,
},
},
- Tenants: map[string]lokiv1.LimitsTemplateSpec{
+ Tenants: map[string]lokiv1.PerTenantLimitsTemplateSpec{
"development": {
Retention: &lokiv1.RetentionLimitSpec{
Days: 3,
@@ -394,7 +395,7 @@ func TestConfigOptions_RetentionConfig(t *testing.T) {
spec: lokiv1.LokiStackSpec{
Size: lokiv1.SizeOneXExtraSmall,
Limits: &lokiv1.LimitsSpec{
- Tenants: map[string]lokiv1.LimitsTemplateSpec{
+ Tenants: map[string]lokiv1.PerTenantLimitsTemplateSpec{
"development": {
Retention: &lokiv1.RetentionLimitSpec{
Days: 3,
@@ -1092,10 +1093,12 @@ func TestConfigOptions_RulerOverrides_OCPUserWorkloadOnlyEnabled(t *testing.T) {
Enabled: true,
},
Limits: &lokiv1.LimitsSpec{
- Tenants: map[string]lokiv1.LimitsTemplateSpec{
+ Tenants: map[string]lokiv1.PerTenantLimitsTemplateSpec{
"application": {
- QueryLimits: &lokiv1.QueryLimitSpec{
- QueryTimeout: "5m",
+ QueryLimits: &lokiv1.PerTenantQueryLimitSpec{
+ QueryLimitSpec: lokiv1.QueryLimitSpec{
+ QueryTimeout: "5m",
+ },
},
},
},
@@ -1132,9 +1135,11 @@ func TestConfigOptions_RulerOverrides_OCPUserWorkloadOnlyEnabled(t *testing.T) {
},
wantOverridesOptions: map[string]config.LokiOverrides{
"application": {
- Limits: lokiv1.LimitsTemplateSpec{
- QueryLimits: &lokiv1.QueryLimitSpec{
- QueryTimeout: "5m",
+ Limits: lokiv1.PerTenantLimitsTemplateSpec{
+ QueryLimits: &lokiv1.PerTenantQueryLimitSpec{
+ QueryLimitSpec: lokiv1.QueryLimitSpec{
+ QueryTimeout: "5m",
+ },
},
},
Ruler: config.RulerOverrides{
@@ -1323,3 +1328,89 @@ func TestConfigOptions_ServerOptions(t *testing.T) {
require.Equal(t, want, got.HTTPTimeouts)
}
+
+func TestConfigOptions_Shipper(t *testing.T) {
+ for _, tc := range []struct {
+ name string
+ inOpt Options
+ wantShipper []string
+ }{
+ {
+ name: "default_config_v11_schema",
+ inOpt: Options{
+ Stack: lokiv1.LokiStackSpec{
+ Storage: lokiv1.ObjectStorageSpec{
+ Schemas: []lokiv1.ObjectStorageSchema{
+ {
+ Version: lokiv1.ObjectStorageSchemaV11,
+ EffectiveDate: "2020-10-01",
+ },
+ },
+ },
+ },
+ },
+ wantShipper: []string{"boltdb"},
+ },
+ {
+ name: "v12_schema",
+ inOpt: Options{
+ Stack: lokiv1.LokiStackSpec{
+ Storage: lokiv1.ObjectStorageSpec{
+ Schemas: []lokiv1.ObjectStorageSchema{
+ {
+ Version: lokiv1.ObjectStorageSchemaV12,
+ EffectiveDate: "2020-02-05",
+ },
+ },
+ },
+ },
+ },
+ wantShipper: []string{"boltdb"},
+ },
+ {
+ name: "v13_schema",
+ inOpt: Options{
+ Stack: lokiv1.LokiStackSpec{
+ Storage: lokiv1.ObjectStorageSpec{
+ Schemas: []lokiv1.ObjectStorageSchema{
+ {
+ Version: lokiv1.ObjectStorageSchemaV13,
+ EffectiveDate: "2024-01-01",
+ },
+ },
+ },
+ },
+ },
+ wantShipper: []string{"tsdb"},
+ },
+ {
+ name: "multiple_schema",
+ inOpt: Options{
+ Stack: lokiv1.LokiStackSpec{
+ Storage: lokiv1.ObjectStorageSpec{
+ Schemas: []lokiv1.ObjectStorageSchema{
+ {
+ Version: lokiv1.ObjectStorageSchemaV11,
+ EffectiveDate: "2020-01-01",
+ },
+ {
+ Version: lokiv1.ObjectStorageSchemaV12,
+ EffectiveDate: "2021-01-01",
+ },
+ {
+ Version: lokiv1.ObjectStorageSchemaV13,
+ EffectiveDate: "2024-01-01",
+ },
+ },
+ },
+ },
+ },
+ wantShipper: []string{"boltdb", "tsdb"},
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ got := ConfigOptions(tc.inOpt)
+ require.Equal(t, tc.wantShipper, got.Shippers)
+ })
+ }
+}
diff --git a/operator/internal/manifests/distributor.go b/operator/internal/manifests/distributor.go
index 3d60ae50144cb..ea856762cb6ac 100644
--- a/operator/internal/manifests/distributor.go
+++ b/operator/internal/manifests/distributor.go
@@ -67,7 +67,7 @@ func BuildDistributor(opts Options) ([]client.Object, error) {
// NewDistributorDeployment creates a deployment object for a distributor
func NewDistributorDeployment(opts Options) *appsv1.Deployment {
l := ComponentLabels(LabelDistributorComponent, opts.Name)
- a := commonAnnotations(opts.ConfigSHA1, opts.CertRotationRequiredAt)
+ a := commonAnnotations(opts.ConfigSHA1, opts.ObjectStorage.SecretSHA1, opts.CertRotationRequiredAt)
podSpec := corev1.PodSpec{
Affinity: configureAffinity(LabelDistributorComponent, opts.Name, opts.Gates.DefaultNodeAffinity, opts.Stack.Template.Distributor),
Volumes: []corev1.Volume{
diff --git a/operator/internal/manifests/distributor_test.go b/operator/internal/manifests/distributor_test.go
index 638e33d9cd269..a3b9e6abc932d 100644
--- a/operator/internal/manifests/distributor_test.go
+++ b/operator/internal/manifests/distributor_test.go
@@ -11,6 +11,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ "github.com/grafana/loki/operator/internal/manifests/storage"
)
func TestNewDistributorDeployment_SelectorMatchesLabels(t *testing.T) {
@@ -47,10 +48,30 @@ func TestNewDistributorDeployment_HasTemplateConfigHashAnnotation(t *testing.T)
},
})
- expected := "loki.grafana.com/config-hash"
annotations := ss.Spec.Template.Annotations
- require.Contains(t, annotations, expected)
- require.Equal(t, annotations[expected], "deadbeef")
+ require.Contains(t, annotations, AnnotationLokiConfigHash)
+ require.Equal(t, annotations[AnnotationLokiConfigHash], "deadbeef")
+}
+
+func TestNewDistributorDeployment_HasTemplateObjectStoreHashAnnotation(t *testing.T) {
+ ss := NewDistributorDeployment(Options{
+ Name: "abcd",
+ Namespace: "efgh",
+ ObjectStorage: storage.Options{
+ SecretSHA1: "deadbeef",
+ },
+ Stack: lokiv1.LokiStackSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ Distributor: &lokiv1.LokiComponentSpec{
+ Replicas: 1,
+ },
+ },
+ },
+ })
+
+ annotations := ss.Spec.Template.Annotations
+ require.Contains(t, annotations, AnnotationLokiObjectStoreHash)
+ require.Equal(t, annotations[AnnotationLokiObjectStoreHash], "deadbeef")
}
func TestNewDistributorDeployment_HasTemplateCertRotationRequiredAtAnnotation(t *testing.T) {
@@ -67,10 +88,9 @@ func TestNewDistributorDeployment_HasTemplateCertRotationRequiredAtAnnotation(t
},
})
- expected := "loki.grafana.com/certRotationRequiredAt"
annotations := ss.Spec.Template.Annotations
- require.Contains(t, annotations, expected)
- require.Equal(t, annotations[expected], "deadbeef")
+ require.Contains(t, annotations, AnnotationCertRotationRequiredAt)
+ require.Equal(t, annotations[AnnotationCertRotationRequiredAt], "deadbeef")
}
func TestBuildDistributor_PodDisruptionBudget(t *testing.T) {
diff --git a/operator/internal/manifests/gateway.go b/operator/internal/manifests/gateway.go
index 08c8822177643..03bce6453f363 100644
--- a/operator/internal/manifests/gateway.go
+++ b/operator/internal/manifests/gateway.go
@@ -114,7 +114,7 @@ func BuildGateway(opts Options) ([]client.Object, error) {
// NewGatewayDeployment creates a deployment object for a lokiStack-gateway
func NewGatewayDeployment(opts Options, sha1C string) *appsv1.Deployment {
l := ComponentLabels(LabelGatewayComponent, opts.Name)
- a := commonAnnotations(sha1C, opts.CertRotationRequiredAt)
+ a := commonAnnotations(sha1C, "", opts.CertRotationRequiredAt)
podSpec := corev1.PodSpec{
ServiceAccountName: GatewayName(opts.Name),
Affinity: configureAffinity(LabelGatewayComponent, opts.Name, opts.Gates.DefaultNodeAffinity, opts.Stack.Template.Gateway),
diff --git a/operator/internal/manifests/gateway_tenants_test.go b/operator/internal/manifests/gateway_tenants_test.go
index 834703e80652a..9d6b4d636559e 100644
--- a/operator/internal/manifests/gateway_tenants_test.go
+++ b/operator/internal/manifests/gateway_tenants_test.go
@@ -7,15 +7,14 @@ import (
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
"github.com/stretchr/testify/require"
-
- configv1 "github.com/grafana/loki/operator/apis/config/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
- "github.com/grafana/loki/operator/internal/manifests/openshift"
-
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
+
+ configv1 "github.com/grafana/loki/operator/apis/config/v1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ "github.com/grafana/loki/operator/internal/manifests/openshift"
)
func defaultGatewayDeployment() *appsv1.Deployment {
@@ -1396,12 +1395,11 @@ func TestConfigureServiceForMode(t *testing.T) {
func TestConfigureServiceMonitorForMode(t *testing.T) {
type tt struct {
- desc string
- opts Options
- mode lokiv1.ModeType
- featureGates configv1.FeatureGates
- sm *monitoringv1.ServiceMonitor
- want *monitoringv1.ServiceMonitor
+ desc string
+ opts Options
+ mode lokiv1.ModeType
+ sm *monitoringv1.ServiceMonitor
+ want *monitoringv1.ServiceMonitor
}
tc := []tt{
diff --git a/operator/internal/manifests/gateway_test.go b/operator/internal/manifests/gateway_test.go
index 720b72f5b2fdf..d5c8f37c415c1 100644
--- a/operator/internal/manifests/gateway_test.go
+++ b/operator/internal/manifests/gateway_test.go
@@ -6,19 +6,19 @@ import (
"reflect"
"testing"
- configv1 "github.com/grafana/loki/operator/apis/config/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
- "github.com/grafana/loki/operator/internal/manifests/internal/gateway"
- "github.com/grafana/loki/operator/internal/manifests/openshift"
-
"github.com/google/uuid"
- "github.com/stretchr/testify/require"
-
routev1 "github.com/openshift/api/route/v1"
+ "github.com/stretchr/testify/require"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ configv1 "github.com/grafana/loki/operator/apis/config/v1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ "github.com/grafana/loki/operator/internal/manifests/internal/gateway"
+ "github.com/grafana/loki/operator/internal/manifests/openshift"
+ "github.com/grafana/loki/operator/internal/manifests/storage"
)
func TestNewGatewayDeployment_HasTemplateConfigHashAnnotation(t *testing.T) {
@@ -51,10 +51,46 @@ func TestNewGatewayDeployment_HasTemplateConfigHashAnnotation(t *testing.T) {
Timeouts: defaultTimeoutConfig,
}, sha1C)
- expected := "loki.grafana.com/config-hash"
annotations := ss.Spec.Template.Annotations
- require.Contains(t, annotations, expected)
- require.Equal(t, annotations[expected], sha1C)
+ require.Contains(t, annotations, AnnotationLokiConfigHash)
+ require.Equal(t, annotations[AnnotationLokiConfigHash], sha1C)
+}
+
+func TestNewGatewayDeployment_HasNotTemplateObjectStoreHashAnnotation(t *testing.T) {
+ sha1C := "deadbeef"
+ ss := NewGatewayDeployment(Options{
+ Name: "abcd",
+ Namespace: "efgh",
+ ObjectStorage: storage.Options{
+ SecretSHA1: "deadbeef",
+ },
+ Stack: lokiv1.LokiStackSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ Compactor: &lokiv1.LokiComponentSpec{
+ Replicas: rand.Int31(),
+ },
+ Distributor: &lokiv1.LokiComponentSpec{
+ Replicas: rand.Int31(),
+ },
+ Gateway: &lokiv1.LokiComponentSpec{
+ Replicas: rand.Int31(),
+ },
+ Ingester: &lokiv1.LokiComponentSpec{
+ Replicas: rand.Int31(),
+ },
+ Querier: &lokiv1.LokiComponentSpec{
+ Replicas: rand.Int31(),
+ },
+ QueryFrontend: &lokiv1.LokiComponentSpec{
+ Replicas: rand.Int31(),
+ },
+ },
+ },
+ Timeouts: defaultTimeoutConfig,
+ }, sha1C)
+
+ annotations := ss.Spec.Template.Annotations
+ require.NotContains(t, annotations, AnnotationLokiObjectStoreHash)
}
func TestNewGatewayDeployment_HasNodeSelector(t *testing.T) {
@@ -134,10 +170,9 @@ func TestNewGatewayDeployment_HasTemplateCertRotationRequiredAtAnnotation(t *tes
Timeouts: defaultTimeoutConfig,
}, sha1C)
- expected := "loki.grafana.com/certRotationRequiredAt"
annotations := ss.Spec.Template.Annotations
- require.Contains(t, annotations, expected)
- require.Equal(t, annotations[expected], "deadbeef")
+ require.Contains(t, annotations, AnnotationCertRotationRequiredAt)
+ require.Equal(t, annotations[AnnotationCertRotationRequiredAt], "deadbeef")
}
func TestGatewayConfigMap_ReturnsSHA1OfBinaryContents(t *testing.T) {
diff --git a/operator/internal/manifests/indexgateway.go b/operator/internal/manifests/indexgateway.go
index 16b8d6516743d..3f43075875108 100644
--- a/operator/internal/manifests/indexgateway.go
+++ b/operator/internal/manifests/indexgateway.go
@@ -73,7 +73,7 @@ func BuildIndexGateway(opts Options) ([]client.Object, error) {
// NewIndexGatewayStatefulSet creates a statefulset object for an index-gateway
func NewIndexGatewayStatefulSet(opts Options) *appsv1.StatefulSet {
l := ComponentLabels(LabelIndexGatewayComponent, opts.Name)
- a := commonAnnotations(opts.ConfigSHA1, opts.CertRotationRequiredAt)
+ a := commonAnnotations(opts.ConfigSHA1, opts.ObjectStorage.SecretSHA1, opts.CertRotationRequiredAt)
podSpec := corev1.PodSpec{
Affinity: configureAffinity(LabelIndexGatewayComponent, opts.Name, opts.Gates.DefaultNodeAffinity, opts.Stack.Template.IndexGateway),
Volumes: []corev1.Volume{
diff --git a/operator/internal/manifests/indexgateway_test.go b/operator/internal/manifests/indexgateway_test.go
index b25522b538073..93ab7a033e147 100644
--- a/operator/internal/manifests/indexgateway_test.go
+++ b/operator/internal/manifests/indexgateway_test.go
@@ -10,6 +10,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ "github.com/grafana/loki/operator/internal/manifests/storage"
)
func TestNewIndexGatewayStatefulSet_HasTemplateConfigHashAnnotation(t *testing.T) {
@@ -27,10 +28,31 @@ func TestNewIndexGatewayStatefulSet_HasTemplateConfigHashAnnotation(t *testing.T
},
})
- expected := "loki.grafana.com/config-hash"
annotations := ss.Spec.Template.Annotations
- require.Contains(t, annotations, expected)
- require.Equal(t, annotations[expected], "deadbeef")
+ require.Contains(t, annotations, AnnotationLokiConfigHash)
+ require.Equal(t, annotations[AnnotationLokiConfigHash], "deadbeef")
+}
+
+func TestNewIndexGatewayStatefulSet_HasTemplateObjectStoreHashAnnotation(t *testing.T) {
+ ss := NewIndexGatewayStatefulSet(Options{
+ Name: "abcd",
+ Namespace: "efgh",
+ ObjectStorage: storage.Options{
+ SecretSHA1: "deadbeef",
+ },
+ Stack: lokiv1.LokiStackSpec{
+ StorageClassName: "standard",
+ Template: &lokiv1.LokiTemplateSpec{
+ IndexGateway: &lokiv1.LokiComponentSpec{
+ Replicas: 1,
+ },
+ },
+ },
+ })
+
+ annotations := ss.Spec.Template.Annotations
+ require.Contains(t, annotations, AnnotationLokiObjectStoreHash)
+ require.Equal(t, annotations[AnnotationLokiObjectStoreHash], "deadbeef")
}
func TestNewIndexGatewayStatefulSet_HasTemplateCertRotationRequiredAtAnnotation(t *testing.T) {
@@ -47,10 +69,10 @@ func TestNewIndexGatewayStatefulSet_HasTemplateCertRotationRequiredAtAnnotation(
},
},
})
- expected := "loki.grafana.com/certRotationRequiredAt"
+
annotations := ss.Spec.Template.Annotations
- require.Contains(t, annotations, expected)
- require.Equal(t, annotations[expected], "deadbeef")
+ require.Contains(t, annotations, AnnotationCertRotationRequiredAt)
+ require.Equal(t, annotations[AnnotationCertRotationRequiredAt], "deadbeef")
}
func TestNewIndexGatewayStatefulSet_SelectorMatchesLabels(t *testing.T) {
diff --git a/operator/internal/manifests/ingester.go b/operator/internal/manifests/ingester.go
index 75934b66836da..0f20ce84776fd 100644
--- a/operator/internal/manifests/ingester.go
+++ b/operator/internal/manifests/ingester.go
@@ -73,7 +73,7 @@ func BuildIngester(opts Options) ([]client.Object, error) {
// NewIngesterStatefulSet creates a deployment object for an ingester
func NewIngesterStatefulSet(opts Options) *appsv1.StatefulSet {
l := ComponentLabels(LabelIngesterComponent, opts.Name)
- a := commonAnnotations(opts.ConfigSHA1, opts.CertRotationRequiredAt)
+ a := commonAnnotations(opts.ConfigSHA1, opts.ObjectStorage.SecretSHA1, opts.CertRotationRequiredAt)
podSpec := corev1.PodSpec{
Affinity: configureAffinity(LabelIngesterComponent, opts.Name, opts.Gates.DefaultNodeAffinity, opts.Stack.Template.Ingester),
Volumes: []corev1.Volume{
diff --git a/operator/internal/manifests/ingester_test.go b/operator/internal/manifests/ingester_test.go
index 605c5e5c588f3..83b0d94111301 100644
--- a/operator/internal/manifests/ingester_test.go
+++ b/operator/internal/manifests/ingester_test.go
@@ -13,6 +13,7 @@ import (
v1 "github.com/grafana/loki/operator/apis/config/v1"
lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/internal"
+ "github.com/grafana/loki/operator/internal/manifests/storage"
)
func TestNewIngesterStatefulSet_HasTemplateConfigHashAnnotation(t *testing.T) {
@@ -30,10 +31,31 @@ func TestNewIngesterStatefulSet_HasTemplateConfigHashAnnotation(t *testing.T) {
},
})
- expected := "loki.grafana.com/config-hash"
annotations := ss.Spec.Template.Annotations
- require.Contains(t, annotations, expected)
- require.Equal(t, annotations[expected], "deadbeef")
+ require.Contains(t, annotations, AnnotationLokiConfigHash)
+ require.Equal(t, annotations[AnnotationLokiConfigHash], "deadbeef")
+}
+
+func TestNewIngesterStatefulSet_HasTemplateObjectStoreHashAnnotation(t *testing.T) {
+ ss := NewIngesterStatefulSet(Options{
+ Name: "abcd",
+ Namespace: "efgh",
+ ObjectStorage: storage.Options{
+ SecretSHA1: "deadbeef",
+ },
+ Stack: lokiv1.LokiStackSpec{
+ StorageClassName: "standard",
+ Template: &lokiv1.LokiTemplateSpec{
+ Ingester: &lokiv1.LokiComponentSpec{
+ Replicas: 1,
+ },
+ },
+ },
+ })
+
+ annotations := ss.Spec.Template.Annotations
+ require.Contains(t, annotations, AnnotationLokiObjectStoreHash)
+ require.Equal(t, annotations[AnnotationLokiObjectStoreHash], "deadbeef")
}
func TestNewIngesterStatefulSet_HasTemplateCertRotationRequiredAtAnnotation(t *testing.T) {
@@ -50,10 +72,10 @@ func TestNewIngesterStatefulSet_HasTemplateCertRotationRequiredAtAnnotation(t *t
},
},
})
- expected := "loki.grafana.com/certRotationRequiredAt"
+
annotations := ss.Spec.Template.Annotations
- require.Contains(t, annotations, expected)
- require.Equal(t, annotations[expected], "deadbeef")
+ require.Contains(t, annotations, AnnotationCertRotationRequiredAt)
+ require.Equal(t, annotations[AnnotationCertRotationRequiredAt], "deadbeef")
}
func TestNewIngesterStatefulSet_SelectorMatchesLabels(t *testing.T) {
diff --git a/operator/internal/manifests/internal/config/build.go b/operator/internal/manifests/internal/config/build.go
index 399e9efd493c8..f599b791a7949 100644
--- a/operator/internal/manifests/internal/config/build.go
+++ b/operator/internal/manifests/internal/config/build.go
@@ -4,6 +4,8 @@ import (
"bytes"
"embed"
"io"
+ "reflect"
+ "strings"
"text/template"
"github.com/ViaQ/logerr/v2/kverrors"
@@ -27,7 +29,9 @@ var (
lokiConfigYAMLTmpl = template.Must(template.ParseFS(lokiConfigYAMLTmplFile, "loki-config.yaml"))
- lokiRuntimeConfigYAMLTmpl = template.Must(template.ParseFS(lokiRuntimeConfigYAMLTmplFile, "loki-runtime-config.yaml"))
+ lokiRuntimeConfigYAMLTmpl = template.Must(template.New("loki-runtime-config.yaml").Funcs(template.FuncMap{
+ "yamlBlock": yamlBlock,
+ }).ParseFS(lokiRuntimeConfigYAMLTmplFile, "loki-runtime-config.yaml"))
)
// Build builds a loki stack configuration files
@@ -54,3 +58,9 @@ func Build(opts Options) ([]byte, []byte, error) {
}
return cfg, rcfg, nil
}
+
+func yamlBlock(indent string, in reflect.Value) string {
+ inStr := in.String()
+ lines := strings.Split(strings.TrimRight(inStr, "\n"), "\n")
+ return strings.Join(lines, "\n"+indent)
+}
diff --git a/operator/internal/manifests/internal/config/build_test.go b/operator/internal/manifests/internal/config/build_test.go
index 2972b15377950..537ec84bf71a5 100644
--- a/operator/internal/manifests/internal/config/build_test.go
+++ b/operator/internal/manifests/internal/config/build_test.go
@@ -1,6 +1,7 @@
package config
import (
+ "strings"
"testing"
"time"
@@ -27,8 +28,8 @@ common:
s3: http://test.default.svc.cluster.local.:9000
bucketnames: loki
region: us-east
- access_key_id: test
- secret_access_key: test123
+ access_key_id: ${AWS_ACCESS_KEY_ID}
+ secret_access_key: ${AWS_ACCESS_KEY_SECRET}
s3forcepathstyle: true
compactor_grpc_address: loki-compactor-grpc-lokistack-dev.default.svc.cluster.local:9095
ring:
@@ -43,7 +44,7 @@ compactor:
frontend:
tail_proxy_url: http://loki-querier-http-lokistack-dev.default.svc.cluster.local:3100
compress_responses: true
- max_outstanding_per_tenant: 256
+ max_outstanding_per_tenant: 4096
log_queries_longer_than: 5s
frontend_worker:
frontend_address: loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local:9095
@@ -102,14 +103,19 @@ limits_config:
max_chunks_per_query: 2000000
max_query_length: 721h
max_query_parallelism: 32
+ tsdb_max_query_parallelism: 512
max_query_series: 500
cardinality_limit: 100000
max_streams_matchers_per_query: 1000
max_cache_freshness_per_query: 10m
- per_stream_rate_limit: 3MB
- per_stream_rate_limit_burst: 15MB
split_queries_by_interval: 30m
query_timeout: 1m
+ per_stream_rate_limit: 5MB
+ per_stream_rate_limit_burst: 15MB
+ shard_streams:
+ enabled: true
+ desired_rate: 3MB
+ allow_structured_metadata: true
memberlist:
abort_if_cluster_join_fails: true
advertise_port: 7946
@@ -190,8 +196,9 @@ overrides:
MaxLabelNamesPerSeries: 30,
MaxGlobalStreamsPerTenant: 0,
MaxLineSize: 256000,
- PerStreamRateLimit: 3,
+ PerStreamRateLimit: 5,
PerStreamRateLimitBurst: 15,
+ PerStreamDesiredRate: 3,
},
QueryLimits: &lokiv1.QueryLimitSpec{
MaxEntriesLimitPerQuery: 5000,
@@ -238,11 +245,9 @@ overrides:
ObjectStorage: storage.Options{
SharedStore: lokiv1.ObjectStorageSecretS3,
S3: &storage.S3StorageConfig{
- Endpoint: "http://test.default.svc.cluster.local.:9000",
- Region: "us-east",
- Buckets: "loki",
- AccessKeyID: "test",
- AccessKeySecret: "test123",
+ Endpoint: "http://test.default.svc.cluster.local.:9000",
+ Region: "us-east",
+ Buckets: "loki",
},
Schemas: []lokiv1.ObjectStorageSchema{
{
@@ -251,6 +256,7 @@ overrides:
},
},
},
+ Shippers: []string{"boltdb"},
EnableRemoteReporting: true,
HTTPTimeouts: HTTPTimeoutConfig{
IdleTimeout: 30 * time.Second,
@@ -279,8 +285,8 @@ common:
s3: http://test.default.svc.cluster.local.:9000
bucketnames: loki
region: us-east
- access_key_id: test
- secret_access_key: test123
+ access_key_id: ${AWS_ACCESS_KEY_ID}
+ secret_access_key: ${AWS_ACCESS_KEY_SECRET}
s3forcepathstyle: true
compactor_grpc_address: loki-compactor-grpc-lokistack-dev.default.svc.cluster.local:9095
ring:
@@ -295,7 +301,7 @@ compactor:
frontend:
tail_proxy_url: http://loki-querier-http-lokistack-dev.default.svc.cluster.local:3100
compress_responses: true
- max_outstanding_per_tenant: 256
+ max_outstanding_per_tenant: 4096
log_queries_longer_than: 5s
frontend_worker:
frontend_address: loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local:9095
@@ -354,14 +360,19 @@ limits_config:
max_chunks_per_query: 2000000
max_query_length: 721h
max_query_parallelism: 32
+ tsdb_max_query_parallelism: 512
max_query_series: 500
cardinality_limit: 100000
max_streams_matchers_per_query: 1000
max_cache_freshness_per_query: 10m
- per_stream_rate_limit: 3MB
- per_stream_rate_limit_burst: 15MB
split_queries_by_interval: 30m
query_timeout: 1m
+ per_stream_rate_limit: 5MB
+ per_stream_rate_limit_burst: 15MB
+ shard_streams:
+ enabled: true
+ desired_rate: 3MB
+ allow_structured_metadata: true
memberlist:
abort_if_cluster_join_fails: true
advertise_port: 7946
@@ -431,6 +442,15 @@ overrides:
ingestion_burst_size_mb: 5
max_global_streams_per_user: 1
max_chunks_per_query: 1000000
+ blocked_queries:
+ - hash: 12345
+ types: metric,limited
+ - pattern: |
+ .*prod.*
+ regex: true
+ - types: metric
+ - pattern: |
+ sum(rate({env="prod"}[1m]))
`
opts := Options{
Stack: lokiv1.LokiStackSpec{
@@ -447,8 +467,9 @@ overrides:
MaxLabelNamesPerSeries: 30,
MaxGlobalStreamsPerTenant: 0,
MaxLineSize: 256000,
- PerStreamRateLimit: 3,
+ PerStreamRateLimit: 5,
PerStreamRateLimitBurst: 15,
+ PerStreamDesiredRate: 3,
},
QueryLimits: &lokiv1.QueryLimitSpec{
MaxEntriesLimitPerQuery: 5000,
@@ -458,15 +479,33 @@ overrides:
CardinalityLimit: 100000,
},
},
- Tenants: map[string]lokiv1.LimitsTemplateSpec{
+ Tenants: map[string]lokiv1.PerTenantLimitsTemplateSpec{
"test-a": {
IngestionLimits: &lokiv1.IngestionLimitSpec{
IngestionRate: 2,
IngestionBurstSize: 5,
MaxGlobalStreamsPerTenant: 1,
},
- QueryLimits: &lokiv1.QueryLimitSpec{
- MaxChunksPerQuery: 1000000,
+ QueryLimits: &lokiv1.PerTenantQueryLimitSpec{
+ QueryLimitSpec: lokiv1.QueryLimitSpec{
+ MaxChunksPerQuery: 1000000,
+ },
+ Blocked: []lokiv1.BlockedQuerySpec{
+ {
+ Hash: 12345,
+ Types: lokiv1.BlockedQueryTypes{lokiv1.BlockedQueryMetric, lokiv1.BlockedQueryLimited},
+ },
+ {
+ Pattern: ".*prod.*",
+ Regex: true,
+ },
+ {
+ Types: lokiv1.BlockedQueryTypes{lokiv1.BlockedQueryMetric},
+ },
+ {
+ Pattern: `sum(rate({env="prod"}[1m]))`,
+ },
+ },
},
},
},
@@ -474,14 +513,32 @@ overrides:
},
Overrides: map[string]LokiOverrides{
"test-a": {
- Limits: lokiv1.LimitsTemplateSpec{
+ Limits: lokiv1.PerTenantLimitsTemplateSpec{
IngestionLimits: &lokiv1.IngestionLimitSpec{
IngestionRate: 2,
MaxGlobalStreamsPerTenant: 1,
IngestionBurstSize: 5,
},
- QueryLimits: &lokiv1.QueryLimitSpec{
- MaxChunksPerQuery: 1000000,
+ QueryLimits: &lokiv1.PerTenantQueryLimitSpec{
+ QueryLimitSpec: lokiv1.QueryLimitSpec{
+ MaxChunksPerQuery: 1000000,
+ },
+ Blocked: []lokiv1.BlockedQuerySpec{
+ {
+ Hash: 12345,
+ Types: lokiv1.BlockedQueryTypes{lokiv1.BlockedQueryMetric, lokiv1.BlockedQueryLimited},
+ },
+ {
+ Pattern: ".*prod.*",
+ Regex: true,
+ },
+ {
+ Types: lokiv1.BlockedQueryTypes{lokiv1.BlockedQueryMetric},
+ },
+ {
+ Pattern: `sum(rate({env="prod"}[1m]))`,
+ },
+ },
},
},
},
@@ -521,11 +578,9 @@ overrides:
ObjectStorage: storage.Options{
SharedStore: lokiv1.ObjectStorageSecretS3,
S3: &storage.S3StorageConfig{
- Endpoint: "http://test.default.svc.cluster.local.:9000",
- Region: "us-east",
- Buckets: "loki",
- AccessKeyID: "test",
- AccessKeySecret: "test123",
+ Endpoint: "http://test.default.svc.cluster.local.:9000",
+ Region: "us-east",
+ Buckets: "loki",
},
Schemas: []lokiv1.ObjectStorageSchema{
{
@@ -534,6 +589,7 @@ overrides:
},
},
},
+ Shippers: []string{"boltdb"},
HTTPTimeouts: HTTPTimeoutConfig{
IdleTimeout: 30 * time.Second,
ReadTimeout: 30 * time.Second,
@@ -562,8 +618,9 @@ func TestBuild_ConfigAndRuntimeConfig_CreateLokiConfigFailed(t *testing.T) {
MaxLabelNamesPerSeries: 30,
MaxGlobalStreamsPerTenant: 0,
MaxLineSize: 256000,
- PerStreamRateLimit: 3,
+ PerStreamRateLimit: 5,
PerStreamRateLimitBurst: 15,
+ PerStreamDesiredRate: 3,
},
// making it nil so that the template is not generated and error is returned
QueryLimits: nil,
@@ -605,11 +662,9 @@ func TestBuild_ConfigAndRuntimeConfig_CreateLokiConfigFailed(t *testing.T) {
ObjectStorage: storage.Options{
SharedStore: lokiv1.ObjectStorageSecretS3,
S3: &storage.S3StorageConfig{
- Endpoint: "http://test.default.svc.cluster.local.:9000",
- Region: "us-east",
- Buckets: "loki",
- AccessKeyID: "test",
- AccessKeySecret: "test123",
+ Endpoint: "http://test.default.svc.cluster.local.:9000",
+ Region: "us-east",
+ Buckets: "loki",
},
Schemas: []lokiv1.ObjectStorageSchema{
{
@@ -618,6 +673,7 @@ func TestBuild_ConfigAndRuntimeConfig_CreateLokiConfigFailed(t *testing.T) {
},
},
},
+ Shippers: []string{"boltdb"},
}
cfg, rCfg, err := Build(opts)
require.Error(t, err)
@@ -640,8 +696,8 @@ common:
s3: http://test.default.svc.cluster.local.:9000
bucketnames: loki
region: us-east
- access_key_id: test
- secret_access_key: test123
+ access_key_id: ${AWS_ACCESS_KEY_ID}
+ secret_access_key: ${AWS_ACCESS_KEY_SECRET}
s3forcepathstyle: true
compactor_grpc_address: loki-compactor-grpc-lokistack-dev.default.svc.cluster.local:9095
ring:
@@ -656,7 +712,7 @@ compactor:
frontend:
tail_proxy_url: http://loki-querier-http-lokistack-dev.default.svc.cluster.local:3100
compress_responses: true
- max_outstanding_per_tenant: 256
+ max_outstanding_per_tenant: 4096
log_queries_longer_than: 5s
frontend_worker:
frontend_address: loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local:9095
@@ -715,14 +771,19 @@ limits_config:
max_chunks_per_query: 2000000
max_query_length: 721h
max_query_parallelism: 32
+ tsdb_max_query_parallelism: 512
max_query_series: 500
cardinality_limit: 100000
max_streams_matchers_per_query: 1000
max_cache_freshness_per_query: 10m
- per_stream_rate_limit: 3MB
- per_stream_rate_limit_burst: 15MB
split_queries_by_interval: 30m
query_timeout: 1m
+ per_stream_rate_limit: 5MB
+ per_stream_rate_limit_burst: 15MB
+ shard_streams:
+ enabled: true
+ desired_rate: 3MB
+ allow_structured_metadata: true
memberlist:
abort_if_cluster_join_fails: true
advertise_port: 7946
@@ -857,8 +918,9 @@ overrides:
MaxLabelNamesPerSeries: 30,
MaxGlobalStreamsPerTenant: 0,
MaxLineSize: 256000,
- PerStreamRateLimit: 3,
+ PerStreamRateLimit: 5,
PerStreamRateLimitBurst: 15,
+ PerStreamDesiredRate: 3,
},
QueryLimits: &lokiv1.QueryLimitSpec{
MaxEntriesLimitPerQuery: 5000,
@@ -952,11 +1014,9 @@ overrides:
ObjectStorage: storage.Options{
SharedStore: lokiv1.ObjectStorageSecretS3,
S3: &storage.S3StorageConfig{
- Endpoint: "http://test.default.svc.cluster.local.:9000",
- Region: "us-east",
- Buckets: "loki",
- AccessKeyID: "test",
- AccessKeySecret: "test123",
+ Endpoint: "http://test.default.svc.cluster.local.:9000",
+ Region: "us-east",
+ Buckets: "loki",
},
Schemas: []lokiv1.ObjectStorageSchema{
{
@@ -965,6 +1025,7 @@ overrides:
},
},
},
+ Shippers: []string{"boltdb"},
EnableRemoteReporting: true,
HTTPTimeouts: HTTPTimeoutConfig{
IdleTimeout: 30 * time.Second,
@@ -993,8 +1054,8 @@ common:
s3: http://test.default.svc.cluster.local.:9000
bucketnames: loki
region: us-east
- access_key_id: test
- secret_access_key: test123
+ access_key_id: ${AWS_ACCESS_KEY_ID}
+ secret_access_key: ${AWS_ACCESS_KEY_SECRET}
s3forcepathstyle: true
compactor_grpc_address: loki-compactor-grpc-lokistack-dev.default.svc.cluster.local:9095
ring:
@@ -1009,7 +1070,7 @@ compactor:
frontend:
tail_proxy_url: http://loki-querier-http-lokistack-dev.default.svc.cluster.local:3100
compress_responses: true
- max_outstanding_per_tenant: 256
+ max_outstanding_per_tenant: 4096
log_queries_longer_than: 5s
frontend_worker:
frontend_address: loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local:9095
@@ -1068,14 +1129,19 @@ limits_config:
max_chunks_per_query: 2000000
max_query_length: 721h
max_query_parallelism: 32
+ tsdb_max_query_parallelism: 512
max_query_series: 500
cardinality_limit: 100000
max_streams_matchers_per_query: 1000
max_cache_freshness_per_query: 10m
- per_stream_rate_limit: 3MB
- per_stream_rate_limit_burst: 15MB
split_queries_by_interval: 30m
query_timeout: 1m
+ per_stream_rate_limit: 5MB
+ per_stream_rate_limit_burst: 15MB
+ shard_streams:
+ enabled: true
+ desired_rate: 3MB
+ allow_structured_metadata: true
memberlist:
abort_if_cluster_join_fails: true
advertise_port: 7946
@@ -1210,8 +1276,9 @@ overrides:
MaxLabelNamesPerSeries: 30,
MaxGlobalStreamsPerTenant: 0,
MaxLineSize: 256000,
- PerStreamRateLimit: 3,
+ PerStreamRateLimit: 5,
PerStreamRateLimitBurst: 15,
+ PerStreamDesiredRate: 3,
},
QueryLimits: &lokiv1.QueryLimitSpec{
MaxEntriesLimitPerQuery: 5000,
@@ -1306,11 +1373,9 @@ overrides:
ObjectStorage: storage.Options{
SharedStore: lokiv1.ObjectStorageSecretS3,
S3: &storage.S3StorageConfig{
- Endpoint: "http://test.default.svc.cluster.local.:9000",
- Region: "us-east",
- Buckets: "loki",
- AccessKeyID: "test",
- AccessKeySecret: "test123",
+ Endpoint: "http://test.default.svc.cluster.local.:9000",
+ Region: "us-east",
+ Buckets: "loki",
},
Schemas: []lokiv1.ObjectStorageSchema{
{
@@ -1319,6 +1384,7 @@ overrides:
},
},
},
+ Shippers: []string{"boltdb"},
EnableRemoteReporting: true,
HTTPTimeouts: HTTPTimeoutConfig{
IdleTimeout: 30 * time.Second,
@@ -1347,8 +1413,8 @@ common:
s3: http://test.default.svc.cluster.local.:9000
bucketnames: loki
region: us-east
- access_key_id: test
- secret_access_key: test123
+ access_key_id: ${AWS_ACCESS_KEY_ID}
+ secret_access_key: ${AWS_ACCESS_KEY_SECRET}
s3forcepathstyle: true
compactor_grpc_address: loki-compactor-grpc-lokistack-dev.default.svc.cluster.local:9095
ring:
@@ -1363,7 +1429,7 @@ compactor:
frontend:
tail_proxy_url: http://loki-querier-http-lokistack-dev.default.svc.cluster.local:3100
compress_responses: true
- max_outstanding_per_tenant: 256
+ max_outstanding_per_tenant: 4096
log_queries_longer_than: 5s
frontend_worker:
frontend_address: loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local:9095
@@ -1422,14 +1488,19 @@ limits_config:
max_chunks_per_query: 2000000
max_query_length: 721h
max_query_parallelism: 32
+ tsdb_max_query_parallelism: 512
max_query_series: 500
cardinality_limit: 100000
max_streams_matchers_per_query: 1000
max_cache_freshness_per_query: 10m
- per_stream_rate_limit: 3MB
- per_stream_rate_limit_burst: 15MB
split_queries_by_interval: 30m
query_timeout: 1m
+ per_stream_rate_limit: 5MB
+ per_stream_rate_limit_burst: 15MB
+ shard_streams:
+ enabled: true
+ desired_rate: 3MB
+ allow_structured_metadata: true
memberlist:
abort_if_cluster_join_fails: true
advertise_port: 7946
@@ -1577,8 +1648,9 @@ overrides:
MaxLabelNamesPerSeries: 30,
MaxGlobalStreamsPerTenant: 0,
MaxLineSize: 256000,
- PerStreamRateLimit: 3,
+ PerStreamRateLimit: 5,
PerStreamRateLimitBurst: 15,
+ PerStreamDesiredRate: 3,
},
QueryLimits: &lokiv1.QueryLimitSpec{
MaxEntriesLimitPerQuery: 5000,
@@ -1690,11 +1762,9 @@ overrides:
ObjectStorage: storage.Options{
SharedStore: lokiv1.ObjectStorageSecretS3,
S3: &storage.S3StorageConfig{
- Endpoint: "http://test.default.svc.cluster.local.:9000",
- Region: "us-east",
- Buckets: "loki",
- AccessKeyID: "test",
- AccessKeySecret: "test123",
+ Endpoint: "http://test.default.svc.cluster.local.:9000",
+ Region: "us-east",
+ Buckets: "loki",
},
Schemas: []lokiv1.ObjectStorageSchema{
{
@@ -1703,6 +1773,7 @@ overrides:
},
},
},
+ Shippers: []string{"boltdb"},
EnableRemoteReporting: true,
HTTPTimeouts: HTTPTimeoutConfig{
IdleTimeout: 30 * time.Second,
@@ -1731,8 +1802,8 @@ common:
s3: http://test.default.svc.cluster.local.:9000
bucketnames: loki
region: us-east
- access_key_id: test
- secret_access_key: test123
+ access_key_id: ${AWS_ACCESS_KEY_ID}
+ secret_access_key: ${AWS_ACCESS_KEY_SECRET}
s3forcepathstyle: true
compactor_grpc_address: loki-compactor-grpc-lokistack-dev.default.svc.cluster.local:9095
ring:
@@ -1750,7 +1821,7 @@ compactor:
frontend:
tail_proxy_url: http://loki-querier-http-lokistack-dev.default.svc.cluster.local:3100
compress_responses: true
- max_outstanding_per_tenant: 256
+ max_outstanding_per_tenant: 4096
log_queries_longer_than: 5s
frontend_worker:
frontend_address: loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local:9095
@@ -1809,6 +1880,7 @@ limits_config:
max_chunks_per_query: 2000000
max_query_length: 721h
max_query_parallelism: 32
+ tsdb_max_query_parallelism: 512
max_query_series: 500
cardinality_limit: 100000
max_streams_matchers_per_query: 1000
@@ -1818,10 +1890,14 @@ limits_config:
priority: 1
period: 3d
max_cache_freshness_per_query: 10m
- per_stream_rate_limit: 3MB
- per_stream_rate_limit_burst: 15MB
split_queries_by_interval: 30m
query_timeout: 1m
+ per_stream_rate_limit: 5MB
+ per_stream_rate_limit_burst: 15MB
+ shard_streams:
+ enabled: true
+ desired_rate: 3MB
+ allow_structured_metadata: true
memberlist:
abort_if_cluster_join_fails: true
advertise_port: 7946
@@ -1912,8 +1988,9 @@ overrides:
MaxLabelNamesPerSeries: 30,
MaxGlobalStreamsPerTenant: 0,
MaxLineSize: 256000,
- PerStreamRateLimit: 3,
+ PerStreamRateLimit: 5,
PerStreamRateLimitBurst: 15,
+ PerStreamDesiredRate: 3,
},
QueryLimits: &lokiv1.QueryLimitSpec{
MaxEntriesLimitPerQuery: 5000,
@@ -1933,15 +2010,17 @@ overrides:
},
},
},
- Tenants: map[string]lokiv1.LimitsTemplateSpec{
+ Tenants: map[string]lokiv1.PerTenantLimitsTemplateSpec{
"test-a": {
IngestionLimits: &lokiv1.IngestionLimitSpec{
IngestionRate: 2,
IngestionBurstSize: 5,
MaxGlobalStreamsPerTenant: 1,
},
- QueryLimits: &lokiv1.QueryLimitSpec{
- MaxChunksPerQuery: 1000000,
+ QueryLimits: &lokiv1.PerTenantQueryLimitSpec{
+ QueryLimitSpec: lokiv1.QueryLimitSpec{
+ MaxChunksPerQuery: 1000000,
+ },
},
Retention: &lokiv1.RetentionLimitSpec{
Days: 7,
@@ -1959,14 +2038,16 @@ overrides:
},
Overrides: map[string]LokiOverrides{
"test-a": {
- Limits: lokiv1.LimitsTemplateSpec{
+ Limits: lokiv1.PerTenantLimitsTemplateSpec{
IngestionLimits: &lokiv1.IngestionLimitSpec{
IngestionRate: 2,
IngestionBurstSize: 5,
MaxGlobalStreamsPerTenant: 1,
},
- QueryLimits: &lokiv1.QueryLimitSpec{
- MaxChunksPerQuery: 1000000,
+ QueryLimits: &lokiv1.PerTenantQueryLimitSpec{
+ QueryLimitSpec: lokiv1.QueryLimitSpec{
+ MaxChunksPerQuery: 1000000,
+ },
},
Retention: &lokiv1.RetentionLimitSpec{
Days: 7,
@@ -2016,11 +2097,9 @@ overrides:
ObjectStorage: storage.Options{
SharedStore: lokiv1.ObjectStorageSecretS3,
S3: &storage.S3StorageConfig{
- Endpoint: "http://test.default.svc.cluster.local.:9000",
- Region: "us-east",
- Buckets: "loki",
- AccessKeyID: "test",
- AccessKeySecret: "test123",
+ Endpoint: "http://test.default.svc.cluster.local.:9000",
+ Region: "us-east",
+ Buckets: "loki",
},
Schemas: []lokiv1.ObjectStorageSchema{
{
@@ -2029,6 +2108,7 @@ overrides:
},
},
},
+ Shippers: []string{"boltdb"},
Retention: RetentionOptions{
Enabled: true,
DeleteWorkerCount: 50,
@@ -2060,8 +2140,8 @@ common:
s3: http://test.default.svc.cluster.local.:9000
bucketnames: loki
region: us-east
- access_key_id: test
- secret_access_key: test123
+ access_key_id: ${AWS_ACCESS_KEY_ID}
+ secret_access_key: ${AWS_ACCESS_KEY_SECRET}
s3forcepathstyle: true
compactor_grpc_address: loki-compactor-grpc-lokistack-dev.default.svc.cluster.local:9095
ring:
@@ -2076,7 +2156,7 @@ compactor:
frontend:
tail_proxy_url: http://loki-querier-http-lokistack-dev.default.svc.cluster.local:3100
compress_responses: true
- max_outstanding_per_tenant: 256
+ max_outstanding_per_tenant: 4096
log_queries_longer_than: 5s
frontend_worker:
frontend_address: loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local:9095
@@ -2135,14 +2215,19 @@ limits_config:
max_chunks_per_query: 2000000
max_query_length: 721h
max_query_parallelism: 32
+ tsdb_max_query_parallelism: 512
max_query_series: 500
cardinality_limit: 100000
max_streams_matchers_per_query: 1000
max_cache_freshness_per_query: 10m
- per_stream_rate_limit: 3MB
- per_stream_rate_limit_burst: 15MB
split_queries_by_interval: 30m
query_timeout: 2m
+ per_stream_rate_limit: 5MB
+ per_stream_rate_limit_burst: 15MB
+ shard_streams:
+ enabled: true
+ desired_rate: 3MB
+ allow_structured_metadata: true
memberlist:
abort_if_cluster_join_fails: true
advertise_port: 7946
@@ -2303,8 +2388,9 @@ overrides:
MaxLabelNamesPerSeries: 30,
MaxGlobalStreamsPerTenant: 0,
MaxLineSize: 256000,
- PerStreamRateLimit: 3,
+ PerStreamRateLimit: 5,
PerStreamRateLimitBurst: 15,
+ PerStreamDesiredRate: 3,
},
QueryLimits: &lokiv1.QueryLimitSpec{
MaxEntriesLimitPerQuery: 5000,
@@ -2433,11 +2519,9 @@ overrides:
ObjectStorage: storage.Options{
SharedStore: lokiv1.ObjectStorageSecretS3,
S3: &storage.S3StorageConfig{
- Endpoint: "http://test.default.svc.cluster.local.:9000",
- Region: "us-east",
- Buckets: "loki",
- AccessKeyID: "test",
- AccessKeySecret: "test123",
+ Endpoint: "http://test.default.svc.cluster.local.:9000",
+ Region: "us-east",
+ Buckets: "loki",
},
Schemas: []lokiv1.ObjectStorageSchema{
{
@@ -2446,6 +2530,7 @@ overrides:
},
},
},
+ Shippers: []string{"boltdb"},
EnableRemoteReporting: true,
HTTPTimeouts: HTTPTimeoutConfig{
IdleTimeout: 30 * time.Second,
@@ -2474,8 +2559,8 @@ common:
s3: http://test.default.svc.cluster.local.:9000
bucketnames: loki
region: us-east
- access_key_id: test
- secret_access_key: test123
+ access_key_id: ${AWS_ACCESS_KEY_ID}
+ secret_access_key: ${AWS_ACCESS_KEY_SECRET}
s3forcepathstyle: true
compactor_grpc_address: loki-compactor-grpc-lokistack-dev.default.svc.cluster.local:9095
ring:
@@ -2497,7 +2582,7 @@ frontend:
tls_cipher_suites: cipher1,cipher2
tls_min_version: VersionTLS12
compress_responses: true
- max_outstanding_per_tenant: 256
+ max_outstanding_per_tenant: 4096
log_queries_longer_than: 5s
frontend_worker:
frontend_address: loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local:9095
@@ -2570,14 +2655,19 @@ limits_config:
max_chunks_per_query: 2000000
max_query_length: 721h
max_query_parallelism: 32
+ tsdb_max_query_parallelism: 512
max_query_series: 500
cardinality_limit: 100000
max_streams_matchers_per_query: 1000
max_cache_freshness_per_query: 10m
- per_stream_rate_limit: 3MB
- per_stream_rate_limit_burst: 15MB
split_queries_by_interval: 30m
query_timeout: 1m
+ per_stream_rate_limit: 5MB
+ per_stream_rate_limit_burst: 15MB
+ shard_streams:
+ enabled: true
+ desired_rate: 3MB
+ allow_structured_metadata: true
memberlist:
abort_if_cluster_join_fails: true
advertise_port: 7946
@@ -2695,8 +2785,9 @@ overrides:
MaxLabelNamesPerSeries: 30,
MaxGlobalStreamsPerTenant: 0,
MaxLineSize: 256000,
- PerStreamRateLimit: 3,
+ PerStreamRateLimit: 5,
PerStreamRateLimitBurst: 15,
+ PerStreamDesiredRate: 3,
},
QueryLimits: &lokiv1.QueryLimitSpec{
MaxEntriesLimitPerQuery: 5000,
@@ -2774,11 +2865,9 @@ overrides:
ObjectStorage: storage.Options{
SharedStore: lokiv1.ObjectStorageSecretS3,
S3: &storage.S3StorageConfig{
- Endpoint: "http://test.default.svc.cluster.local.:9000",
- Region: "us-east",
- Buckets: "loki",
- AccessKeyID: "test",
- AccessKeySecret: "test123",
+ Endpoint: "http://test.default.svc.cluster.local.:9000",
+ Region: "us-east",
+ Buckets: "loki",
},
Schemas: []lokiv1.ObjectStorageSchema{
{
@@ -2787,6 +2876,7 @@ overrides:
},
},
},
+ Shippers: []string{"boltdb"},
EnableRemoteReporting: true,
HTTPTimeouts: HTTPTimeoutConfig{
IdleTimeout: 30 * time.Second,
@@ -2815,8 +2905,8 @@ common:
s3: http://test.default.svc.cluster.local.:9000
bucketnames: loki
region: us-east
- access_key_id: test
- secret_access_key: test123
+ access_key_id: ${AWS_ACCESS_KEY_ID}
+ secret_access_key: ${AWS_ACCESS_KEY_SECRET}
s3forcepathstyle: true
compactor_grpc_address: loki-compactor-grpc-lokistack-dev.default.svc.cluster.local:9095
ring:
@@ -2831,7 +2921,7 @@ compactor:
frontend:
tail_proxy_url: http://loki-querier-http-lokistack-dev.default.svc.cluster.local:3100
compress_responses: true
- max_outstanding_per_tenant: 256
+ max_outstanding_per_tenant: 4096
log_queries_longer_than: 5s
frontend_worker:
frontend_address: loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local:9095
@@ -2890,14 +2980,19 @@ limits_config:
max_chunks_per_query: 2000000
max_query_length: 721h
max_query_parallelism: 32
+ tsdb_max_query_parallelism: 512
max_query_series: 500
cardinality_limit: 100000
max_streams_matchers_per_query: 1000
max_cache_freshness_per_query: 10m
- per_stream_rate_limit: 3MB
- per_stream_rate_limit_burst: 15MB
split_queries_by_interval: 30m
query_timeout: 2m
+ per_stream_rate_limit: 5MB
+ per_stream_rate_limit_burst: 15MB
+ shard_streams:
+ enabled: true
+ desired_rate: 3MB
+ allow_structured_metadata: true
memberlist:
abort_if_cluster_join_fails: true
advertise_port: 7946
@@ -3086,8 +3181,9 @@ overrides:
MaxLabelNamesPerSeries: 30,
MaxGlobalStreamsPerTenant: 0,
MaxLineSize: 256000,
- PerStreamRateLimit: 3,
+ PerStreamRateLimit: 5,
PerStreamRateLimitBurst: 15,
+ PerStreamDesiredRate: 3,
},
QueryLimits: &lokiv1.QueryLimitSpec{
MaxEntriesLimitPerQuery: 5000,
@@ -3265,11 +3361,9 @@ overrides:
ObjectStorage: storage.Options{
SharedStore: lokiv1.ObjectStorageSecretS3,
S3: &storage.S3StorageConfig{
- Endpoint: "http://test.default.svc.cluster.local.:9000",
- Region: "us-east",
- Buckets: "loki",
- AccessKeyID: "test",
- AccessKeySecret: "test123",
+ Endpoint: "http://test.default.svc.cluster.local.:9000",
+ Region: "us-east",
+ Buckets: "loki",
},
Schemas: []lokiv1.ObjectStorageSchema{
{
@@ -3278,6 +3372,7 @@ overrides:
},
},
},
+ Shippers: []string{"boltdb"},
EnableRemoteReporting: true,
HTTPTimeouts: HTTPTimeoutConfig{
IdleTimeout: 30 * time.Second,
@@ -3306,8 +3401,8 @@ common:
s3: http://test.default.svc.cluster.local.:9000
bucketnames: loki
region: us-east
- access_key_id: test
- secret_access_key: test123
+ access_key_id: ${AWS_ACCESS_KEY_ID}
+ secret_access_key: ${AWS_ACCESS_KEY_SECRET}
s3forcepathstyle: true
compactor_grpc_address: loki-compactor-grpc-lokistack-dev.default.svc.cluster.local:9095
ring:
@@ -3323,7 +3418,7 @@ compactor:
frontend:
tail_proxy_url: http://loki-querier-http-lokistack-dev.default.svc.cluster.local:3100
compress_responses: true
- max_outstanding_per_tenant: 256
+ max_outstanding_per_tenant: 4096
log_queries_longer_than: 5s
frontend_worker:
frontend_address: loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local:9095
@@ -3382,14 +3477,19 @@ limits_config:
max_chunks_per_query: 2000000
max_query_length: 721h
max_query_parallelism: 32
+ tsdb_max_query_parallelism: 512
max_query_series: 500
cardinality_limit: 100000
max_streams_matchers_per_query: 1000
max_cache_freshness_per_query: 10m
- per_stream_rate_limit: 3MB
- per_stream_rate_limit_burst: 15MB
split_queries_by_interval: 30m
query_timeout: 1m
+ per_stream_rate_limit: 5MB
+ per_stream_rate_limit_burst: 15MB
+ shard_streams:
+ enabled: true
+ desired_rate: 3MB
+ allow_structured_metadata: true
memberlist:
abort_if_cluster_join_fails: true
advertise_addr: ${HASH_RING_INSTANCE_ADDR}
@@ -3471,8 +3571,9 @@ overrides:
MaxLabelNamesPerSeries: 30,
MaxGlobalStreamsPerTenant: 0,
MaxLineSize: 256000,
- PerStreamRateLimit: 3,
+ PerStreamRateLimit: 5,
PerStreamRateLimitBurst: 15,
+ PerStreamDesiredRate: 3,
},
QueryLimits: &lokiv1.QueryLimitSpec{
MaxEntriesLimitPerQuery: 5000,
@@ -3520,11 +3621,9 @@ overrides:
ObjectStorage: storage.Options{
SharedStore: lokiv1.ObjectStorageSecretS3,
S3: &storage.S3StorageConfig{
- Endpoint: "http://test.default.svc.cluster.local.:9000",
- Region: "us-east",
- Buckets: "loki",
- AccessKeyID: "test",
- AccessKeySecret: "test123",
+ Endpoint: "http://test.default.svc.cluster.local.:9000",
+ Region: "us-east",
+ Buckets: "loki",
},
Schemas: []lokiv1.ObjectStorageSchema{
{
@@ -3533,6 +3632,7 @@ overrides:
},
},
},
+ Shippers: []string{"boltdb"},
EnableRemoteReporting: true,
HTTPTimeouts: HTTPTimeoutConfig{
IdleTimeout: 30 * time.Second,
@@ -3561,8 +3661,8 @@ common:
s3: http://test.default.svc.cluster.local.:9000
bucketnames: loki
region: us-east
- access_key_id: test
- secret_access_key: test123
+ access_key_id: ${AWS_ACCESS_KEY_ID}
+ secret_access_key: ${AWS_ACCESS_KEY_SECRET}
s3forcepathstyle: true
compactor_grpc_address: loki-compactor-grpc-lokistack-dev.default.svc.cluster.local:9095
ring:
@@ -3578,7 +3678,7 @@ compactor:
frontend:
tail_proxy_url: http://loki-querier-http-lokistack-dev.default.svc.cluster.local:3100
compress_responses: true
- max_outstanding_per_tenant: 256
+ max_outstanding_per_tenant: 4096
log_queries_longer_than: 5s
frontend_worker:
frontend_address: loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local:9095
@@ -3638,14 +3738,19 @@ limits_config:
max_chunks_per_query: 2000000
max_query_length: 721h
max_query_parallelism: 32
+ tsdb_max_query_parallelism: 512
max_query_series: 500
cardinality_limit: 100000
max_streams_matchers_per_query: 1000
max_cache_freshness_per_query: 10m
- per_stream_rate_limit: 3MB
- per_stream_rate_limit_burst: 15MB
split_queries_by_interval: 30m
query_timeout: 1m
+ per_stream_rate_limit: 5MB
+ per_stream_rate_limit_burst: 15MB
+ shard_streams:
+ enabled: true
+ desired_rate: 3MB
+ allow_structured_metadata: true
memberlist:
abort_if_cluster_join_fails: true
advertise_addr: ${HASH_RING_INSTANCE_ADDR}
@@ -3727,8 +3832,9 @@ overrides:
MaxLabelNamesPerSeries: 30,
MaxGlobalStreamsPerTenant: 0,
MaxLineSize: 256000,
- PerStreamRateLimit: 3,
+ PerStreamRateLimit: 5,
PerStreamRateLimitBurst: 15,
+ PerStreamDesiredRate: 3,
},
QueryLimits: &lokiv1.QueryLimitSpec{
MaxEntriesLimitPerQuery: 5000,
@@ -3777,11 +3883,9 @@ overrides:
ObjectStorage: storage.Options{
SharedStore: lokiv1.ObjectStorageSecretS3,
S3: &storage.S3StorageConfig{
- Endpoint: "http://test.default.svc.cluster.local.:9000",
- Region: "us-east",
- Buckets: "loki",
- AccessKeyID: "test",
- AccessKeySecret: "test123",
+ Endpoint: "http://test.default.svc.cluster.local.:9000",
+ Region: "us-east",
+ Buckets: "loki",
},
Schemas: []lokiv1.ObjectStorageSchema{
{
@@ -3790,6 +3894,7 @@ overrides:
},
},
},
+ Shippers: []string{"boltdb"},
EnableRemoteReporting: true,
HTTPTimeouts: HTTPTimeoutConfig{
IdleTimeout: 30 * time.Second,
@@ -3818,8 +3923,8 @@ common:
s3: http://test.default.svc.cluster.local.:9000
bucketnames: loki
region: us-east
- access_key_id: test
- secret_access_key: test123
+ access_key_id: ${AWS_ACCESS_KEY_ID}
+ secret_access_key: ${AWS_ACCESS_KEY_SECRET}
s3forcepathstyle: true
compactor_grpc_address: loki-compactor-grpc-lokistack-dev.default.svc.cluster.local:9095
ring:
@@ -3836,7 +3941,7 @@ compactor:
frontend:
tail_proxy_url: http://loki-querier-http-lokistack-dev.default.svc.cluster.local:3100
compress_responses: true
- max_outstanding_per_tenant: 256
+ max_outstanding_per_tenant: 4096
log_queries_longer_than: 5s
frontend_worker:
frontend_address: loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local:9095
@@ -3895,14 +4000,19 @@ limits_config:
max_chunks_per_query: 2000000
max_query_length: 721h
max_query_parallelism: 32
+ tsdb_max_query_parallelism: 512
max_query_series: 500
cardinality_limit: 100000
max_streams_matchers_per_query: 1000
max_cache_freshness_per_query: 10m
- per_stream_rate_limit: 3MB
- per_stream_rate_limit_burst: 15MB
split_queries_by_interval: 30m
query_timeout: 1m
+ per_stream_rate_limit: 5MB
+ per_stream_rate_limit_burst: 15MB
+ shard_streams:
+ enabled: true
+ desired_rate: 3MB
+ allow_structured_metadata: true
memberlist:
abort_if_cluster_join_fails: true
advertise_port: 7946
@@ -3983,8 +4093,9 @@ overrides:
MaxLabelNamesPerSeries: 30,
MaxGlobalStreamsPerTenant: 0,
MaxLineSize: 256000,
- PerStreamRateLimit: 3,
+ PerStreamRateLimit: 5,
PerStreamRateLimitBurst: 15,
+ PerStreamDesiredRate: 3,
},
QueryLimits: &lokiv1.QueryLimitSpec{
MaxEntriesLimitPerQuery: 5000,
@@ -4032,11 +4143,9 @@ overrides:
ObjectStorage: storage.Options{
SharedStore: lokiv1.ObjectStorageSecretS3,
S3: &storage.S3StorageConfig{
- Endpoint: "http://test.default.svc.cluster.local.:9000",
- Region: "us-east",
- Buckets: "loki",
- AccessKeyID: "test",
- AccessKeySecret: "test123",
+ Endpoint: "http://test.default.svc.cluster.local.:9000",
+ Region: "us-east",
+ Buckets: "loki",
},
Schemas: []lokiv1.ObjectStorageSchema{
{
@@ -4045,6 +4154,7 @@ overrides:
},
},
},
+ Shippers: []string{"boltdb"},
EnableRemoteReporting: true,
HTTPTimeouts: HTTPTimeoutConfig{
IdleTimeout: 30 * time.Second,
@@ -4073,13 +4183,13 @@ common:
s3: http://test.default.svc.cluster.local.:9000
bucketnames: loki
region: us-east
- access_key_id: test
- secret_access_key: test123
+ access_key_id: ${AWS_ACCESS_KEY_ID}
+ secret_access_key: ${AWS_ACCESS_KEY_SECRET}
sse:
type: SSE-KMS
kms_key_id: test
kms_encryption_context: |
- {"key": "value", "another":"value1"}
+ ${AWS_SSE_KMS_ENCRYPTION_CONTEXT}
s3forcepathstyle: true
compactor_grpc_address: loki-compactor-grpc-lokistack-dev.default.svc.cluster.local:9095
ring:
@@ -4094,7 +4204,7 @@ compactor:
frontend:
tail_proxy_url: http://loki-querier-http-lokistack-dev.default.svc.cluster.local:3100
compress_responses: true
- max_outstanding_per_tenant: 256
+ max_outstanding_per_tenant: 4096
log_queries_longer_than: 5s
frontend_worker:
frontend_address: loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local:9095
@@ -4153,14 +4263,19 @@ limits_config:
max_chunks_per_query: 2000000
max_query_length: 721h
max_query_parallelism: 32
+ tsdb_max_query_parallelism: 512
max_query_series: 500
cardinality_limit: 100000
max_streams_matchers_per_query: 1000
max_cache_freshness_per_query: 10m
- per_stream_rate_limit: 3MB
- per_stream_rate_limit_burst: 15MB
split_queries_by_interval: 30m
query_timeout: 1m
+ per_stream_rate_limit: 5MB
+ per_stream_rate_limit_burst: 15MB
+ shard_streams:
+ enabled: true
+ desired_rate: 3MB
+ allow_structured_metadata: true
memberlist:
abort_if_cluster_join_fails: true
advertise_port: 7946
@@ -4246,8 +4361,9 @@ overrides:
MaxLabelNamesPerSeries: 30,
MaxGlobalStreamsPerTenant: 0,
MaxLineSize: 256000,
- PerStreamRateLimit: 3,
+ PerStreamRateLimit: 5,
PerStreamRateLimitBurst: 15,
+ PerStreamDesiredRate: 3,
},
QueryLimits: &lokiv1.QueryLimitSpec{
MaxEntriesLimitPerQuery: 5000,
@@ -4257,15 +4373,17 @@ overrides:
CardinalityLimit: 100000,
},
},
- Tenants: map[string]lokiv1.LimitsTemplateSpec{
+ Tenants: map[string]lokiv1.PerTenantLimitsTemplateSpec{
"test-a": {
IngestionLimits: &lokiv1.IngestionLimitSpec{
IngestionRate: 2,
IngestionBurstSize: 5,
MaxGlobalStreamsPerTenant: 1,
},
- QueryLimits: &lokiv1.QueryLimitSpec{
- MaxChunksPerQuery: 1000000,
+ QueryLimits: &lokiv1.PerTenantQueryLimitSpec{
+ QueryLimitSpec: lokiv1.QueryLimitSpec{
+ MaxChunksPerQuery: 1000000,
+ },
},
},
},
@@ -4273,14 +4391,16 @@ overrides:
},
Overrides: map[string]LokiOverrides{
"test-a": {
- Limits: lokiv1.LimitsTemplateSpec{
+ Limits: lokiv1.PerTenantLimitsTemplateSpec{
IngestionLimits: &lokiv1.IngestionLimitSpec{
IngestionRate: 2,
MaxGlobalStreamsPerTenant: 1,
IngestionBurstSize: 5,
},
- QueryLimits: &lokiv1.QueryLimitSpec{
- MaxChunksPerQuery: 1000000,
+ QueryLimits: &lokiv1.PerTenantQueryLimitSpec{
+ QueryLimitSpec: lokiv1.QueryLimitSpec{
+ MaxChunksPerQuery: 1000000,
+ },
},
},
},
@@ -4320,11 +4440,10 @@ overrides:
ObjectStorage: storage.Options{
SharedStore: lokiv1.ObjectStorageSecretS3,
S3: &storage.S3StorageConfig{
- Endpoint: "http://test.default.svc.cluster.local.:9000",
- Region: "us-east",
- Buckets: "loki",
- AccessKeyID: "test",
- AccessKeySecret: "test123",
+ Endpoint: "http://test.default.svc.cluster.local.:9000",
+ Region: "us-east",
+ Buckets: "loki",
+
SSE: storage.S3SSEConfig{
Type: storage.SSEKMSType,
KMSKeyID: "test",
@@ -4338,6 +4457,7 @@ overrides:
},
},
},
+ Shippers: []string{"boltdb"},
HTTPTimeouts: HTTPTimeoutConfig{
IdleTimeout: 30 * time.Second,
ReadTimeout: 30 * time.Second,
@@ -4365,8 +4485,8 @@ common:
s3: http://test.default.svc.cluster.local.:9000
bucketnames: loki
region: us-east
- access_key_id: test
- secret_access_key: test123
+ access_key_id: ${AWS_ACCESS_KEY_ID}
+ secret_access_key: ${AWS_ACCESS_KEY_SECRET}
sse:
type: SSE-S3
s3forcepathstyle: true
@@ -4383,7 +4503,7 @@ compactor:
frontend:
tail_proxy_url: http://loki-querier-http-lokistack-dev.default.svc.cluster.local:3100
compress_responses: true
- max_outstanding_per_tenant: 256
+ max_outstanding_per_tenant: 4096
log_queries_longer_than: 5s
frontend_worker:
frontend_address: loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local:9095
@@ -4442,14 +4562,19 @@ limits_config:
max_chunks_per_query: 2000000
max_query_length: 721h
max_query_parallelism: 32
+ tsdb_max_query_parallelism: 512
max_query_series: 500
cardinality_limit: 100000
max_streams_matchers_per_query: 1000
max_cache_freshness_per_query: 10m
- per_stream_rate_limit: 3MB
- per_stream_rate_limit_burst: 15MB
split_queries_by_interval: 30m
query_timeout: 1m
+ per_stream_rate_limit: 5MB
+ per_stream_rate_limit_burst: 15MB
+ shard_streams:
+ enabled: true
+ desired_rate: 3MB
+ allow_structured_metadata: true
memberlist:
abort_if_cluster_join_fails: true
advertise_port: 7946
@@ -4535,8 +4660,9 @@ overrides:
MaxLabelNamesPerSeries: 30,
MaxGlobalStreamsPerTenant: 0,
MaxLineSize: 256000,
- PerStreamRateLimit: 3,
+ PerStreamRateLimit: 5,
PerStreamRateLimitBurst: 15,
+ PerStreamDesiredRate: 3,
},
QueryLimits: &lokiv1.QueryLimitSpec{
MaxEntriesLimitPerQuery: 5000,
@@ -4546,15 +4672,17 @@ overrides:
CardinalityLimit: 100000,
},
},
- Tenants: map[string]lokiv1.LimitsTemplateSpec{
+ Tenants: map[string]lokiv1.PerTenantLimitsTemplateSpec{
"test-a": {
IngestionLimits: &lokiv1.IngestionLimitSpec{
IngestionRate: 2,
IngestionBurstSize: 5,
MaxGlobalStreamsPerTenant: 1,
},
- QueryLimits: &lokiv1.QueryLimitSpec{
- MaxChunksPerQuery: 1000000,
+ QueryLimits: &lokiv1.PerTenantQueryLimitSpec{
+ QueryLimitSpec: lokiv1.QueryLimitSpec{
+ MaxChunksPerQuery: 1000000,
+ },
},
},
},
@@ -4562,14 +4690,16 @@ overrides:
},
Overrides: map[string]LokiOverrides{
"test-a": {
- Limits: lokiv1.LimitsTemplateSpec{
+ Limits: lokiv1.PerTenantLimitsTemplateSpec{
IngestionLimits: &lokiv1.IngestionLimitSpec{
IngestionRate: 2,
MaxGlobalStreamsPerTenant: 1,
IngestionBurstSize: 5,
},
- QueryLimits: &lokiv1.QueryLimitSpec{
- MaxChunksPerQuery: 1000000,
+ QueryLimits: &lokiv1.PerTenantQueryLimitSpec{
+ QueryLimitSpec: lokiv1.QueryLimitSpec{
+ MaxChunksPerQuery: 1000000,
+ },
},
},
},
@@ -4609,11 +4739,10 @@ overrides:
ObjectStorage: storage.Options{
SharedStore: lokiv1.ObjectStorageSecretS3,
S3: &storage.S3StorageConfig{
- Endpoint: "http://test.default.svc.cluster.local.:9000",
- Region: "us-east",
- Buckets: "loki",
- AccessKeyID: "test",
- AccessKeySecret: "test123",
+ Endpoint: "http://test.default.svc.cluster.local.:9000",
+ Region: "us-east",
+ Buckets: "loki",
+
SSE: storage.S3SSEConfig{
Type: storage.SSES3Type,
KMSKeyID: "test",
@@ -4627,6 +4756,7 @@ overrides:
},
},
},
+ Shippers: []string{"boltdb"},
HTTPTimeouts: HTTPTimeoutConfig{
IdleTimeout: 30 * time.Second,
ReadTimeout: 30 * time.Second,
@@ -4638,3 +4768,653 @@ overrides:
require.YAMLEq(t, expCfg, string(cfg))
require.YAMLEq(t, expRCfg, string(rCfg))
}
+
+func TestBuild_ConfigAndRuntimeConfig_WithManualPerStreamRateLimits(t *testing.T) {
+ expCfg := `
+---
+auth_enabled: true
+chunk_store_config:
+ chunk_cache_config:
+ embedded_cache:
+ enabled: true
+ max_size_mb: 500
+common:
+ storage:
+ s3:
+ s3: http://test.default.svc.cluster.local.:9000
+ bucketnames: loki
+ region: us-east
+ access_key_id: ${AWS_ACCESS_KEY_ID}
+ secret_access_key: ${AWS_ACCESS_KEY_SECRET}
+ s3forcepathstyle: true
+ compactor_grpc_address: loki-compactor-grpc-lokistack-dev.default.svc.cluster.local:9095
+ ring:
+ kvstore:
+ store: memberlist
+ heartbeat_period: 5s
+ heartbeat_timeout: 1m
+ instance_port: 9095
+compactor:
+ compaction_interval: 2h
+ working_directory: /tmp/loki/compactor
+frontend:
+ tail_proxy_url: http://loki-querier-http-lokistack-dev.default.svc.cluster.local:3100
+ compress_responses: true
+ max_outstanding_per_tenant: 4096
+ log_queries_longer_than: 5s
+frontend_worker:
+ frontend_address: loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local:9095
+ grpc_client_config:
+ max_send_msg_size: 104857600
+ match_max_concurrent: true
+ingester:
+ chunk_block_size: 262144
+ chunk_encoding: snappy
+ chunk_idle_period: 1h
+ chunk_retain_period: 5m
+ chunk_target_size: 2097152
+ flush_op_timeout: 10m
+ lifecycler:
+ final_sleep: 0s
+ join_after: 30s
+ num_tokens: 512
+ ring:
+ replication_factor: 1
+ max_chunk_age: 2h
+ max_transfer_retries: 0
+ wal:
+ enabled: true
+ dir: /tmp/wal
+ replay_memory_ceiling: 2500
+ingester_client:
+ grpc_client_config:
+ max_recv_msg_size: 67108864
+ remote_timeout: 1s
+# NOTE: Keep the order of keys as in Loki docs
+# to enable easy diffs when vendoring newer
+# Loki releases.
+# (See https://grafana.com/docs/loki/latest/configuration/#limits_config)
+#
+# Values for not exposed fields are taken from the grafana/loki production
+# configuration manifests.
+# (See https://github.com/grafana/loki/blob/main/production/ksonnet/loki/config.libsonnet)
+limits_config:
+ ingestion_rate_strategy: global
+ ingestion_rate_mb: 4
+ ingestion_burst_size_mb: 6
+ max_label_name_length: 1024
+ max_label_value_length: 2048
+ max_label_names_per_series: 30
+ reject_old_samples: true
+ reject_old_samples_max_age: 168h
+ creation_grace_period: 10m
+ enforce_metric_name: false
+ # Keep max_streams_per_user always to 0 to default
+ # using max_global_streams_per_user always.
+ # (See https://github.com/grafana/loki/blob/main/pkg/ingester/limiter.go#L73)
+ max_streams_per_user: 0
+ max_line_size: 256000
+ max_entries_limit_per_query: 5000
+ max_global_streams_per_user: 0
+ max_chunks_per_query: 2000000
+ max_query_length: 721h
+ max_query_parallelism: 32
+ max_query_series: 500
+ cardinality_limit: 100000
+ max_streams_matchers_per_query: 1000
+ max_cache_freshness_per_query: 10m
+ per_stream_rate_limit: 3MB
+ per_stream_rate_limit_burst: 15MB
+ split_queries_by_interval: 30m
+ tsdb_max_query_parallelism: 512
+ query_timeout: 1m
+ allow_structured_metadata: true
+memberlist:
+ abort_if_cluster_join_fails: true
+ advertise_port: 7946
+ bind_port: 7946
+ join_members:
+ - loki-gossip-ring-lokistack-dev.default.svc.cluster.local:7946
+ max_join_backoff: 1m
+ max_join_retries: 10
+ min_join_backoff: 1s
+querier:
+ engine:
+ max_look_back_period: 30s
+ extra_query_delay: 0s
+ max_concurrent: 2
+ query_ingesters_within: 3h
+ tail_max_duration: 1h
+query_range:
+ align_queries_with_step: true
+ cache_results: true
+ max_retries: 5
+ results_cache:
+ cache:
+ embedded_cache:
+ enabled: true
+ max_size_mb: 500
+ parallelise_shardable_queries: true
+schema_config:
+ configs:
+ - from: "2020-10-01"
+ index:
+ period: 24h
+ prefix: index_
+ object_store: s3
+ schema: v11
+ store: boltdb-shipper
+server:
+ graceful_shutdown_timeout: 5s
+ grpc_server_min_time_between_pings: '10s'
+ grpc_server_ping_without_stream_allowed: true
+ grpc_server_max_concurrent_streams: 1000
+ grpc_server_max_recv_msg_size: 104857600
+ grpc_server_max_send_msg_size: 104857600
+ http_listen_port: 3100
+ http_server_idle_timeout: 30s
+ http_server_read_timeout: 30s
+ http_server_write_timeout: 10m0s
+ log_level: info
+storage_config:
+ boltdb_shipper:
+ active_index_directory: /tmp/loki/index
+ cache_location: /tmp/loki/index_cache
+ cache_ttl: 24h
+ resync_interval: 5m
+ shared_store: s3
+ index_gateway_client:
+ server_address: dns:///loki-index-gateway-grpc-lokistack-dev.default.svc.cluster.local:9095
+tracing:
+ enabled: false
+analytics:
+ reporting_enabled: true
+`
+ expRCfg := `
+---
+overrides:
+`
+ opts := Options{
+ Stack: lokiv1.LokiStackSpec{
+ Replication: &lokiv1.ReplicationSpec{
+ Factor: 1,
+ },
+ Limits: &lokiv1.LimitsSpec{
+ Global: &lokiv1.LimitsTemplateSpec{
+ IngestionLimits: &lokiv1.IngestionLimitSpec{
+ IngestionRate: 4,
+ IngestionBurstSize: 6,
+ MaxLabelNameLength: 1024,
+ MaxLabelValueLength: 2048,
+ MaxLabelNamesPerSeries: 30,
+ MaxGlobalStreamsPerTenant: 0,
+ MaxLineSize: 256000,
+ PerStreamRateLimit: 3,
+ PerStreamRateLimitBurst: 15,
+ },
+ QueryLimits: &lokiv1.QueryLimitSpec{
+ MaxEntriesLimitPerQuery: 5000,
+ MaxChunksPerQuery: 2000000,
+ MaxQuerySeries: 500,
+ QueryTimeout: "1m",
+ CardinalityLimit: 100000,
+ },
+ },
+ },
+ },
+ Namespace: "test-ns",
+ Name: "test",
+ Compactor: Address{
+ FQDN: "loki-compactor-grpc-lokistack-dev.default.svc.cluster.local",
+ Port: 9095,
+ },
+ FrontendWorker: Address{
+ FQDN: "loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local",
+ Port: 9095,
+ },
+ GossipRing: GossipRing{
+ InstancePort: 9095,
+ BindPort: 7946,
+ MembersDiscoveryAddr: "loki-gossip-ring-lokistack-dev.default.svc.cluster.local",
+ },
+ Querier: Address{
+ Protocol: "http",
+ FQDN: "loki-querier-http-lokistack-dev.default.svc.cluster.local",
+ Port: 3100,
+ },
+ IndexGateway: Address{
+ FQDN: "loki-index-gateway-grpc-lokistack-dev.default.svc.cluster.local",
+ Port: 9095,
+ },
+ StorageDirectory: "/tmp/loki",
+ MaxConcurrent: MaxConcurrent{
+ AvailableQuerierCPUCores: 2,
+ },
+ WriteAheadLog: WriteAheadLog{
+ Directory: "/tmp/wal",
+ IngesterMemoryRequest: 5000,
+ },
+ ObjectStorage: storage.Options{
+ SharedStore: lokiv1.ObjectStorageSecretS3,
+ S3: &storage.S3StorageConfig{
+ Endpoint: "http://test.default.svc.cluster.local.:9000",
+ Region: "us-east",
+ Buckets: "loki",
+ },
+ Schemas: []lokiv1.ObjectStorageSchema{
+ {
+ Version: lokiv1.ObjectStorageSchemaV11,
+ EffectiveDate: "2020-10-01",
+ },
+ },
+ },
+ Shippers: []string{"boltdb"},
+ EnableRemoteReporting: true,
+ HTTPTimeouts: HTTPTimeoutConfig{
+ IdleTimeout: 30 * time.Second,
+ ReadTimeout: 30 * time.Second,
+ WriteTimeout: 10 * time.Minute,
+ },
+ }
+ cfg, rCfg, err := Build(opts)
+ require.NoError(t, err)
+ require.YAMLEq(t, expCfg, string(cfg))
+ require.YAMLEq(t, expRCfg, string(rCfg))
+}
+
+func defaultOptions() Options {
+ return Options{
+ Stack: lokiv1.LokiStackSpec{
+ Replication: &lokiv1.ReplicationSpec{
+ Factor: 1,
+ },
+ Limits: &lokiv1.LimitsSpec{
+ Global: &lokiv1.LimitsTemplateSpec{
+ IngestionLimits: &lokiv1.IngestionLimitSpec{
+ IngestionRate: 4,
+ IngestionBurstSize: 6,
+ MaxLabelNameLength: 1024,
+ MaxLabelValueLength: 2048,
+ MaxLabelNamesPerSeries: 30,
+ MaxGlobalStreamsPerTenant: 0,
+ MaxLineSize: 256000,
+ PerStreamRateLimit: 3,
+ PerStreamRateLimitBurst: 15,
+ },
+ QueryLimits: &lokiv1.QueryLimitSpec{
+ MaxEntriesLimitPerQuery: 5000,
+ MaxChunksPerQuery: 2000000,
+ MaxQuerySeries: 500,
+ QueryTimeout: "1m",
+ CardinalityLimit: 100000,
+ },
+ },
+ },
+ },
+ Namespace: "test-ns",
+ Name: "test",
+ Compactor: Address{
+ FQDN: "loki-compactor-grpc-lokistack-dev.default.svc.cluster.local",
+ Port: 9095,
+ },
+ FrontendWorker: Address{
+ FQDN: "loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local",
+ Port: 9095,
+ },
+ GossipRing: GossipRing{
+ InstancePort: 9095,
+ BindPort: 7946,
+ MembersDiscoveryAddr: "loki-gossip-ring-lokistack-dev.default.svc.cluster.local",
+ },
+ Querier: Address{
+ Protocol: "http",
+ FQDN: "loki-querier-http-lokistack-dev.default.svc.cluster.local",
+ Port: 3100,
+ },
+ IndexGateway: Address{
+ FQDN: "loki-index-gateway-grpc-lokistack-dev.default.svc.cluster.local",
+ Port: 9095,
+ },
+ StorageDirectory: "/tmp/loki",
+ MaxConcurrent: MaxConcurrent{
+ AvailableQuerierCPUCores: 2,
+ },
+ WriteAheadLog: WriteAheadLog{
+ Directory: "/tmp/wal",
+ IngesterMemoryRequest: 5000,
+ },
+ ObjectStorage: storage.Options{
+ SharedStore: lokiv1.ObjectStorageSecretS3,
+ S3: &storage.S3StorageConfig{
+ Endpoint: "http://test.default.svc.cluster.local.:9000",
+ Region: "us-east",
+ Buckets: "loki",
+ },
+ Schemas: []lokiv1.ObjectStorageSchema{
+ {
+ Version: lokiv1.ObjectStorageSchemaV11,
+ EffectiveDate: "2020-10-01",
+ },
+ },
+ },
+ Shippers: []string{"boltdb"},
+ EnableRemoteReporting: true,
+ HTTPTimeouts: HTTPTimeoutConfig{
+ IdleTimeout: 30 * time.Second,
+ ReadTimeout: 30 * time.Second,
+ WriteTimeout: 10 * time.Minute,
+ },
+ }
+}
+
+func TestBuild_ConfigAndRuntimeConfig_Schemas(t *testing.T) {
+ for _, tc := range []struct {
+ name string
+ schemaConfig []lokiv1.ObjectStorageSchema
+ shippers []string
+ expSchemaConfig string
+ expStorageConfig string
+ }{
+ {
+ name: "default_config_v11_schema",
+ schemaConfig: []lokiv1.ObjectStorageSchema{
+ {
+ Version: lokiv1.ObjectStorageSchemaV11,
+ EffectiveDate: "2020-10-01",
+ },
+ },
+ shippers: []string{"boltdb"},
+ expSchemaConfig: `
+ configs:
+ - from: "2020-10-01"
+ index:
+ period: 24h
+ prefix: index_
+ object_store: s3
+ schema: v11
+ store: boltdb-shipper`,
+ expStorageConfig: `
+ boltdb_shipper:
+ active_index_directory: /tmp/loki/index
+ cache_location: /tmp/loki/index_cache
+ cache_ttl: 24h
+ resync_interval: 5m
+ shared_store: s3
+ index_gateway_client:
+ server_address: dns:///loki-index-gateway-grpc-lokistack-dev.default.svc.cluster.local:9095`,
+ },
+ {
+ name: "v12_schema",
+ schemaConfig: []lokiv1.ObjectStorageSchema{
+ {
+ Version: lokiv1.ObjectStorageSchemaV12,
+ EffectiveDate: "2020-02-05",
+ },
+ },
+ shippers: []string{"boltdb"},
+ expSchemaConfig: `
+ configs:
+ - from: "2020-02-05"
+ index:
+ period: 24h
+ prefix: index_
+ object_store: s3
+ schema: v12
+ store: boltdb-shipper`,
+ expStorageConfig: `
+ boltdb_shipper:
+ active_index_directory: /tmp/loki/index
+ cache_location: /tmp/loki/index_cache
+ cache_ttl: 24h
+ resync_interval: 5m
+ shared_store: s3
+ index_gateway_client:
+ server_address: dns:///loki-index-gateway-grpc-lokistack-dev.default.svc.cluster.local:9095`,
+ },
+ {
+ name: "v13_schema",
+ schemaConfig: []lokiv1.ObjectStorageSchema{
+ {
+ Version: lokiv1.ObjectStorageSchemaV13,
+ EffectiveDate: "2024-01-01",
+ },
+ },
+ shippers: []string{"tsdb"},
+ expSchemaConfig: `
+ configs:
+ - from: "2024-01-01"
+ index:
+ period: 24h
+ prefix: index_
+ object_store: s3
+ schema: v13
+ store: tsdb`,
+ expStorageConfig: `
+ tsdb_shipper:
+ active_index_directory: /tmp/loki/tsdb-index
+ cache_location: /tmp/loki/tsdb-cache
+ cache_ttl: 24h
+ resync_interval: 5m
+ shared_store: s3
+ index_gateway_client:
+ server_address: dns:///loki-index-gateway-grpc-lokistack-dev.default.svc.cluster.local:9095`,
+ },
+ {
+ name: "multiple_schema",
+ schemaConfig: []lokiv1.ObjectStorageSchema{
+ {
+ Version: lokiv1.ObjectStorageSchemaV11,
+ EffectiveDate: "2020-01-01",
+ },
+ {
+ Version: lokiv1.ObjectStorageSchemaV12,
+ EffectiveDate: "2021-01-01",
+ },
+ {
+ Version: lokiv1.ObjectStorageSchemaV13,
+ EffectiveDate: "2024-01-01",
+ },
+ },
+ shippers: []string{"boltdb", "tsdb"},
+ expSchemaConfig: `
+ configs:
+ - from: "2020-01-01"
+ index:
+ period: 24h
+ prefix: index_
+ object_store: s3
+ schema: v11
+ store: boltdb-shipper
+ - from: "2021-01-01"
+ index:
+ period: 24h
+ prefix: index_
+ object_store: s3
+ schema: v12
+ store: boltdb-shipper
+ - from: "2024-01-01"
+ index:
+ period: 24h
+ prefix: index_
+ object_store: s3
+ schema: v13
+ store: tsdb`,
+ expStorageConfig: `
+ boltdb_shipper:
+ active_index_directory: /tmp/loki/index
+ cache_location: /tmp/loki/index_cache
+ cache_ttl: 24h
+ resync_interval: 5m
+ shared_store: s3
+ index_gateway_client:
+ server_address: dns:///loki-index-gateway-grpc-lokistack-dev.default.svc.cluster.local:9095
+ tsdb_shipper:
+ active_index_directory: /tmp/loki/tsdb-index
+ cache_location: /tmp/loki/tsdb-cache
+ cache_ttl: 24h
+ resync_interval: 5m
+ shared_store: s3
+ index_gateway_client:
+ server_address: dns:///loki-index-gateway-grpc-lokistack-dev.default.svc.cluster.local:9095`,
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ expCfg := `
+---
+auth_enabled: true
+chunk_store_config:
+ chunk_cache_config:
+ embedded_cache:
+ enabled: true
+ max_size_mb: 500
+common:
+ storage:
+ s3:
+ s3: http://test.default.svc.cluster.local.:9000
+ bucketnames: loki
+ region: us-east
+ access_key_id: ${AWS_ACCESS_KEY_ID}
+ secret_access_key: ${AWS_ACCESS_KEY_SECRET}
+ s3forcepathstyle: true
+ compactor_grpc_address: loki-compactor-grpc-lokistack-dev.default.svc.cluster.local:9095
+ ring:
+ kvstore:
+ store: memberlist
+ heartbeat_period: 5s
+ heartbeat_timeout: 1m
+ instance_port: 9095
+compactor:
+ compaction_interval: 2h
+ working_directory: /tmp/loki/compactor
+frontend:
+ tail_proxy_url: http://loki-querier-http-lokistack-dev.default.svc.cluster.local:3100
+ compress_responses: true
+ max_outstanding_per_tenant: 4096
+ log_queries_longer_than: 5s
+frontend_worker:
+ frontend_address: loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local:9095
+ grpc_client_config:
+ max_send_msg_size: 104857600
+ match_max_concurrent: true
+ingester:
+ chunk_block_size: 262144
+ chunk_encoding: snappy
+ chunk_idle_period: 1h
+ chunk_retain_period: 5m
+ chunk_target_size: 2097152
+ flush_op_timeout: 10m
+ lifecycler:
+ final_sleep: 0s
+ join_after: 30s
+ num_tokens: 512
+ ring:
+ replication_factor: 1
+ max_chunk_age: 2h
+ max_transfer_retries: 0
+ wal:
+ enabled: true
+ dir: /tmp/wal
+ replay_memory_ceiling: 2500
+ingester_client:
+ grpc_client_config:
+ max_recv_msg_size: 67108864
+ remote_timeout: 1s
+# NOTE: Keep the order of keys as in Loki docs
+# to enable easy diffs when vendoring newer
+# Loki releases.
+# (See https://grafana.com/docs/loki/latest/configuration/#limits_config)
+#
+# Values for not exposed fields are taken from the grafana/loki production
+# configuration manifests.
+# (See https://github.com/grafana/loki/blob/main/production/ksonnet/loki/config.libsonnet)
+limits_config:
+ ingestion_rate_strategy: global
+ ingestion_rate_mb: 4
+ ingestion_burst_size_mb: 6
+ max_label_name_length: 1024
+ max_label_value_length: 2048
+ max_label_names_per_series: 30
+ reject_old_samples: true
+ reject_old_samples_max_age: 168h
+ creation_grace_period: 10m
+ enforce_metric_name: false
+ # Keep max_streams_per_user always to 0 to default
+ # using max_global_streams_per_user always.
+ # (See https://github.com/grafana/loki/blob/main/pkg/ingester/limiter.go#L73)
+ max_streams_per_user: 0
+ max_line_size: 256000
+ max_entries_limit_per_query: 5000
+ max_global_streams_per_user: 0
+ max_chunks_per_query: 2000000
+ max_query_length: 721h
+ max_query_parallelism: 32
+ tsdb_max_query_parallelism: 512
+ max_query_series: 500
+ cardinality_limit: 100000
+ max_streams_matchers_per_query: 1000
+ max_cache_freshness_per_query: 10m
+ per_stream_rate_limit: 3MB
+ per_stream_rate_limit_burst: 15MB
+ split_queries_by_interval: 30m
+ query_timeout: 1m
+ allow_structured_metadata: true
+memberlist:
+ abort_if_cluster_join_fails: true
+ advertise_port: 7946
+ bind_port: 7946
+ join_members:
+ - loki-gossip-ring-lokistack-dev.default.svc.cluster.local:7946
+ max_join_backoff: 1m
+ max_join_retries: 10
+ min_join_backoff: 1s
+querier:
+ engine:
+ max_look_back_period: 30s
+ extra_query_delay: 0s
+ max_concurrent: 2
+ query_ingesters_within: 3h
+ tail_max_duration: 1h
+query_range:
+ align_queries_with_step: true
+ cache_results: true
+ max_retries: 5
+ results_cache:
+ cache:
+ embedded_cache:
+ enabled: true
+ max_size_mb: 500
+ parallelise_shardable_queries: true
+schema_config:
+${SCHEMA_CONFIG}
+server:
+ graceful_shutdown_timeout: 5s
+ grpc_server_min_time_between_pings: '10s'
+ grpc_server_ping_without_stream_allowed: true
+ grpc_server_max_concurrent_streams: 1000
+ grpc_server_max_recv_msg_size: 104857600
+ grpc_server_max_send_msg_size: 104857600
+ http_listen_port: 3100
+ http_server_idle_timeout: 30s
+ http_server_read_timeout: 30s
+ http_server_write_timeout: 10m0s
+ log_level: info
+storage_config:
+${STORAGE_CONFIG}
+tracing:
+ enabled: false
+analytics:
+ reporting_enabled: true
+`
+ expCfg = strings.Replace(expCfg, "${SCHEMA_CONFIG}", tc.expSchemaConfig, 1)
+ expCfg = strings.Replace(expCfg, "${STORAGE_CONFIG}", tc.expStorageConfig, 1)
+
+ opts := defaultOptions()
+ opts.ObjectStorage.Schemas = tc.schemaConfig
+ opts.Shippers = tc.shippers
+
+ cfg, _, err := Build(opts)
+ require.NoError(t, err)
+ require.YAMLEq(t, expCfg, string(cfg))
+ })
+ }
+}
diff --git a/operator/internal/manifests/internal/config/loki-config.yaml b/operator/internal/manifests/internal/config/loki-config.yaml
index 0c06a44ed16e4..a11191627d375 100644
--- a/operator/internal/manifests/internal/config/loki-config.yaml
+++ b/operator/internal/manifests/internal/config/loki-config.yaml
@@ -12,8 +12,8 @@ common:
azure:
environment: {{ .Env }}
container_name: {{ .Container }}
- account_name: {{ .AccountName }}
- account_key: {{ .AccountKey }}
+ account_name: ${AZURE_STORAGE_ACCOUNT_NAME}
+ account_key: ${AZURE_STORAGE_ACCOUNT_KEY}
{{- with .EndpointSuffix }}
endpoint_suffix: {{ . }}
{{- end }}
@@ -27,8 +27,8 @@ common:
s3: {{ .Endpoint }}
bucketnames: {{ .Buckets }}
region: {{ .Region }}
- access_key_id: {{ .AccessKeyID }}
- secret_access_key: {{ .AccessKeySecret }}
+ access_key_id: ${AWS_ACCESS_KEY_ID}
+ secret_access_key: ${AWS_ACCESS_KEY_SECRET}
{{- with .SSE }}
{{- if .Type }}
sse:
@@ -37,7 +37,7 @@ common:
kms_key_id: {{ .KMSKeyID }}
{{- with .KMSEncryptionContext }}
kms_encryption_context: |
- {{ . }}
+ ${AWS_SSE_KMS_ENCRYPTION_CONTEXT}
{{- end }}
{{- end}}
{{- end }}
@@ -47,11 +47,11 @@ common:
{{- with .ObjectStorage.Swift }}
swift:
auth_url: {{ .AuthURL }}
- username: {{ .Username }}
+ username: ${SWIFT_USERNAME}
user_domain_name: {{ .UserDomainName }}
user_domain_id: {{ .UserDomainID }}
user_id: {{ .UserID }}
- password: {{ .Password }}
+ password: ${SWIFT_PASSWORD}
domain_id: {{ .DomainID }}
domain_name: {{ .DomainName }}
project_id: {{ .ProjectID }}
@@ -65,8 +65,8 @@ common:
alibabacloud:
bucket: {{ .Bucket }}
endpoint: {{ .Endpoint }}
- access_key_id: {{ .AccessKeyID }}
- secret_access_key: {{ .SecretAccessKey }}
+ access_key_id: ${ALIBABA_CLOUD_ACCESS_KEY_ID}
+ secret_access_key: ${ALIBABA_CLOUD_ACCESS_KEY_SECRET}
{{- end }}
compactor_grpc_address: {{ .Compactor.FQDN }}:{{ .Compactor.Port }}
{{- with .GossipRing }}
@@ -104,7 +104,7 @@ frontend:
tls_min_version: {{ .TLS.MinTLSVersion }}
{{- end }}
compress_responses: true
- max_outstanding_per_tenant: 256
+ max_outstanding_per_tenant: 4096
log_queries_longer_than: 5s
frontend_worker:
frontend_address: {{ .FrontendWorker.FQDN }}:{{ .FrontendWorker.Port }}
@@ -184,6 +184,7 @@ limits_config:
max_chunks_per_query: {{ .Stack.Limits.Global.QueryLimits.MaxChunksPerQuery }}
max_query_length: 721h
max_query_parallelism: 32
+ tsdb_max_query_parallelism: 512
max_query_series: {{ .Stack.Limits.Global.QueryLimits.MaxQuerySeries }}
cardinality_limit: {{ .Stack.Limits.Global.QueryLimits.CardinalityLimit }}
max_streams_matchers_per_query: 1000
@@ -203,6 +204,12 @@ limits_config:
per_stream_rate_limit: {{ .Stack.Limits.Global.IngestionLimits.PerStreamRateLimit }}MB
per_stream_rate_limit_burst: {{ .Stack.Limits.Global.IngestionLimits.PerStreamRateLimitBurst }}MB
split_queries_by_interval: 30m
+{{- with .Stack.Limits.Global.IngestionLimits.PerStreamDesiredRate }}
+ shard_streams:
+ enabled: true
+ desired_rate: {{ . }}MB
+{{- end }}
+ allow_structured_metadata: true
{{- with .GossipRing }}
memberlist:
abort_if_cluster_join_fails: true
@@ -254,7 +261,11 @@ schema_config:
prefix: index_
object_store: {{ $store }}
schema: {{ .Version }}
+ {{- if or (eq .Version "v11") (eq .Version "v12")}}
store: boltdb-shipper
+ {{- else }}
+ store: tsdb
+ {{- end}}
{{- end }}
{{ if .Ruler.Enabled }}
ruler:
@@ -468,23 +479,32 @@ server:
{{- end }}
log_level: info
storage_config:
+{{- range $_, $ship := .Shippers }}
+{{- if eq $ship "boltdb" }}
boltdb_shipper:
- active_index_directory: {{ .StorageDirectory }}/index
- cache_location: {{ .StorageDirectory }}/index_cache
+ active_index_directory: {{ $.StorageDirectory }}/index
+ cache_location: {{ $.StorageDirectory }}/index_cache
+{{- end }}
+{{- if eq $ship "tsdb" }}
+ tsdb_shipper:
+ active_index_directory: {{ $.StorageDirectory }}/tsdb-index
+ cache_location: {{ $.StorageDirectory }}/tsdb-cache
+{{- end }}
cache_ttl: 24h
resync_interval: 5m
- shared_store: {{ .ObjectStorage.SharedStore }}
+ shared_store: {{ $.ObjectStorage.SharedStore }}
index_gateway_client:
- server_address: dns:///{{ .IndexGateway.FQDN }}:{{ .IndexGateway.Port }}
-{{- if .Gates.GRPCEncryption }}
+ server_address: dns:///{{ $.IndexGateway.FQDN }}:{{ $.IndexGateway.Port }}
+{{- if $.Gates.GRPCEncryption }}
grpc_client_config:
tls_enabled: true
- tls_cert_path: {{ .TLS.Paths.GRPC.Certificate }}
- tls_key_path: {{ .TLS.Paths.GRPC.Key }}
- tls_ca_path: {{ .TLS.Paths.CA }}
- tls_server_name: {{ .TLS.ServerNames.GRPC.IndexGateway }}
- tls_cipher_suites: {{ .TLS.CipherSuitesString }}
- tls_min_version: {{ .TLS.MinTLSVersion }}
+ tls_cert_path: {{ $.TLS.Paths.GRPC.Certificate }}
+ tls_key_path: {{ $.TLS.Paths.GRPC.Key }}
+ tls_ca_path: {{ $.TLS.Paths.CA }}
+ tls_server_name: {{ $.TLS.ServerNames.GRPC.IndexGateway }}
+ tls_cipher_suites: {{ $.TLS.CipherSuitesString }}
+ tls_min_version: {{ $.TLS.MinTLSVersion }}
+{{- end }}
{{- end }}
tracing:
enabled: false
diff --git a/operator/internal/manifests/internal/config/loki-runtime-config.yaml b/operator/internal/manifests/internal/config/loki-runtime-config.yaml
index ca62d0a783db9..421426cb3911f 100644
--- a/operator/internal/manifests/internal/config/loki-runtime-config.yaml
+++ b/operator/internal/manifests/internal/config/loki-runtime-config.yaml
@@ -32,6 +32,11 @@ overrides:
{{- if $l.PerStreamRateLimitBurst }}
per_stream_rate_limit_burst: {{ $l.PerStreamRateLimitBurst }}MB
{{- end }}
+ {{- with $l.PerStreamDesiredRate }}
+ shard_streams:
+ enabled: true
+ desired_rate: {{ . }}MB
+ {{- end}}
{{- end -}}
{{- if $l := $spec.QueryLimits -}}
{{- if $l.MaxEntriesLimitPerQuery }}
@@ -49,6 +54,24 @@ overrides:
{{- if $spec.QueryLimits.CardinalityLimit }}
cardinality_limit: {{ $spec.QueryLimits.CardinalityLimit }}
{{- end }}
+ {{- with $l.Blocked }}
+ blocked_queries:
+ {{- range $blockedQuery := . }}
+ - {{ with $blockedQuery.Pattern -}}
+ pattern: |
+ {{ . | yamlBlock " " }}
+ {{ end -}}
+ {{- with $blockedQuery.Regex }}
+ regex: {{ . }}
+ {{- end }}
+ {{- with $blockedQuery.Types }}
+ types: {{ . }}
+ {{- end }}
+ {{- with $blockedQuery.Hash }}
+ hash: {{ . }}
+ {{- end }}
+ {{- end }}
+ {{- end}}
{{- end -}}
{{- with $spec.Retention }}
retention_period: {{ .Days }}d
diff --git a/operator/internal/manifests/internal/config/options.go b/operator/internal/manifests/internal/config/options.go
index d82000034acc7..110bce9ee8df3 100644
--- a/operator/internal/manifests/internal/config/options.go
+++ b/operator/internal/manifests/internal/config/options.go
@@ -29,6 +29,7 @@ type Options struct {
MaxConcurrent MaxConcurrent
WriteAheadLog WriteAheadLog
EnableRemoteReporting bool
+ Shippers []string
ObjectStorage storage.Options
@@ -40,7 +41,7 @@ type Options struct {
}
type LokiOverrides struct {
- Limits lokiv1.LimitsTemplateSpec
+ Limits lokiv1.PerTenantLimitsTemplateSpec
Ruler RulerOverrides
}
diff --git a/operator/internal/manifests/internal/gateway/build_test.go b/operator/internal/manifests/internal/gateway/build_test.go
index 38e67ffab1a80..77d7fc1873a24 100644
--- a/operator/internal/manifests/internal/gateway/build_test.go
+++ b/operator/internal/manifests/internal/gateway/build_test.go
@@ -3,9 +3,10 @@ package gateway
import (
"testing"
+ "github.com/stretchr/testify/require"
+
lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/openshift"
- "github.com/stretchr/testify/require"
)
func TestBuild_StaticMode(t *testing.T) {
diff --git a/operator/internal/manifests/internal/rules/marshal_test.go b/operator/internal/manifests/internal/rules/marshal_test.go
index 213a2bd23beec..1620f050a7652 100644
--- a/operator/internal/manifests/internal/rules/marshal_test.go
+++ b/operator/internal/manifests/internal/rules/marshal_test.go
@@ -3,10 +3,11 @@ package rules_test
import (
"testing"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
- "github.com/grafana/loki/operator/internal/manifests/internal/rules"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ "github.com/grafana/loki/operator/internal/manifests/internal/rules"
)
func TestMarshalAlertingRule(t *testing.T) {
diff --git a/operator/internal/manifests/internal/sizes.go b/operator/internal/manifests/internal/sizes.go
index 01c0f20eea6da..be5ac2eefb018 100644
--- a/operator/internal/manifests/internal/sizes.go
+++ b/operator/internal/manifests/internal/sizes.go
@@ -241,7 +241,8 @@ var StackSizeTable = map[lokiv1.LokiStackSizeType]lokiv1.LokiStackSpec{
MaxLabelValueLength: 2048,
MaxLabelNamesPerSeries: 30,
MaxLineSize: 256000,
- PerStreamRateLimit: 3,
+ PerStreamDesiredRate: 3,
+ PerStreamRateLimit: 5,
PerStreamRateLimitBurst: 15,
},
QueryLimits: &lokiv1.QueryLimitSpec{
@@ -296,7 +297,8 @@ var StackSizeTable = map[lokiv1.LokiStackSizeType]lokiv1.LokiStackSpec{
MaxLabelValueLength: 2048,
MaxLabelNamesPerSeries: 30,
MaxLineSize: 256000,
- PerStreamRateLimit: 3,
+ PerStreamDesiredRate: 3,
+ PerStreamRateLimit: 5,
PerStreamRateLimitBurst: 15,
},
QueryLimits: &lokiv1.QueryLimitSpec{
@@ -354,7 +356,8 @@ var StackSizeTable = map[lokiv1.LokiStackSizeType]lokiv1.LokiStackSpec{
MaxLabelValueLength: 2048,
MaxLabelNamesPerSeries: 30,
MaxLineSize: 256000,
- PerStreamRateLimit: 3,
+ PerStreamDesiredRate: 3,
+ PerStreamRateLimit: 5,
PerStreamRateLimitBurst: 15,
},
QueryLimits: &lokiv1.QueryLimitSpec{
@@ -412,7 +415,8 @@ var StackSizeTable = map[lokiv1.LokiStackSizeType]lokiv1.LokiStackSpec{
MaxLabelValueLength: 2048,
MaxLabelNamesPerSeries: 30,
MaxLineSize: 256000,
- PerStreamRateLimit: 3,
+ PerStreamDesiredRate: 3,
+ PerStreamRateLimit: 5,
PerStreamRateLimitBurst: 15,
},
QueryLimits: &lokiv1.QueryLimitSpec{
diff --git a/operator/internal/manifests/memberlist_test.go b/operator/internal/manifests/memberlist_test.go
index 56d48b39e928f..f7d2b0686f518 100644
--- a/operator/internal/manifests/memberlist_test.go
+++ b/operator/internal/manifests/memberlist_test.go
@@ -3,9 +3,10 @@ package manifests
import (
"testing"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/stretchr/testify/require"
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
+
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
)
func TestConfigureHashRingEnv_UseDefaults_NoHashRingSpec(t *testing.T) {
@@ -44,9 +45,9 @@ func TestConfigureHashRingEnv_UseDefaults_NoHashRingSpec(t *testing.T) {
},
}
- wantEnvVar := v1.EnvVar{
- ValueFrom: &v1.EnvVarSource{
- FieldRef: &v1.ObjectFieldSelector{
+ wantEnvVar := corev1.EnvVar{
+ ValueFrom: &corev1.EnvVarSource{
+ FieldRef: &corev1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "status.podIP",
},
@@ -102,9 +103,9 @@ func TestConfigureHashRingEnv_UseDefaults_WithCustomHashRingSpec(t *testing.T) {
},
}
- wantEnvVar := v1.EnvVar{
- ValueFrom: &v1.EnvVarSource{
- FieldRef: &v1.ObjectFieldSelector{
+ wantEnvVar := corev1.EnvVar{
+ ValueFrom: &corev1.EnvVarSource{
+ FieldRef: &corev1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "status.podIP",
},
@@ -160,10 +161,10 @@ func TestConfigureHashRingEnv_UseInstanceAddrPodIP(t *testing.T) {
},
}
- wantEnvVar := v1.EnvVar{
+ wantEnvVar := corev1.EnvVar{
Name: gossipInstanceAddrEnvVarName,
- ValueFrom: &v1.EnvVarSource{
- FieldRef: &v1.ObjectFieldSelector{
+ ValueFrom: &corev1.EnvVarSource{
+ FieldRef: &corev1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "status.podIP",
},
diff --git a/operator/internal/manifests/mutate_test.go b/operator/internal/manifests/mutate_test.go
index 4fac03e606569..18407c3c23daf 100644
--- a/operator/internal/manifests/mutate_test.go
+++ b/operator/internal/manifests/mutate_test.go
@@ -504,7 +504,7 @@ func TestMutateFuncFor_MutateDeploymentSpec(t *testing.T) {
"test": "test",
},
},
- Replicas: pointer.Int32Ptr(1),
+ Replicas: pointer.Int32(1),
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
@@ -525,7 +525,7 @@ func TestMutateFuncFor_MutateDeploymentSpec(t *testing.T) {
"and": "another",
},
},
- Replicas: pointer.Int32Ptr(2),
+ Replicas: pointer.Int32(2),
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
@@ -552,7 +552,7 @@ func TestMutateFuncFor_MutateDeploymentSpec(t *testing.T) {
"test": "test",
},
},
- Replicas: pointer.Int32Ptr(1),
+ Replicas: pointer.Int32(1),
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
@@ -574,7 +574,7 @@ func TestMutateFuncFor_MutateDeploymentSpec(t *testing.T) {
"and": "another",
},
},
- Replicas: pointer.Int32Ptr(2),
+ Replicas: pointer.Int32(2),
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
@@ -665,7 +665,7 @@ func TestMutateFuncFor_MutateStatefulSetSpec(t *testing.T) {
"test": "test",
},
},
- Replicas: pointer.Int32Ptr(1),
+ Replicas: pointer.Int32(1),
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
@@ -693,7 +693,7 @@ func TestMutateFuncFor_MutateStatefulSetSpec(t *testing.T) {
"and": "another",
},
},
- Replicas: pointer.Int32Ptr(2),
+ Replicas: pointer.Int32(2),
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
@@ -728,7 +728,7 @@ func TestMutateFuncFor_MutateStatefulSetSpec(t *testing.T) {
"test": "test",
},
},
- Replicas: pointer.Int32Ptr(1),
+ Replicas: pointer.Int32(1),
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
@@ -757,7 +757,7 @@ func TestMutateFuncFor_MutateStatefulSetSpec(t *testing.T) {
"and": "another",
},
},
- Replicas: pointer.Int32Ptr(2),
+ Replicas: pointer.Int32(2),
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
diff --git a/operator/internal/manifests/node_placement_test.go b/operator/internal/manifests/node_placement_test.go
index 1f82284061126..013b23d904f96 100644
--- a/operator/internal/manifests/node_placement_test.go
+++ b/operator/internal/manifests/node_placement_test.go
@@ -3,13 +3,14 @@ package manifests
import (
"testing"
- configv1 "github.com/grafana/loki/operator/apis/config/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
- "github.com/grafana/loki/operator/internal/manifests/storage"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ configv1 "github.com/grafana/loki/operator/apis/config/v1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ "github.com/grafana/loki/operator/internal/manifests/storage"
)
func TestTolerationsAreSetForEachComponent(t *testing.T) {
diff --git a/operator/internal/manifests/openshift/alertingrule_test.go b/operator/internal/manifests/openshift/alertingrule_test.go
index 66a08bdfe88ca..91da560e2a6df 100644
--- a/operator/internal/manifests/openshift/alertingrule_test.go
+++ b/operator/internal/manifests/openshift/alertingrule_test.go
@@ -3,9 +3,10 @@ package openshift
import (
"testing"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
)
func TestAlertingRuleTenantLabels(t *testing.T) {
diff --git a/operator/internal/manifests/openshift/build_test.go b/operator/internal/manifests/openshift/build_test.go
index 875138614629c..9cf050ed92fab 100644
--- a/operator/internal/manifests/openshift/build_test.go
+++ b/operator/internal/manifests/openshift/build_test.go
@@ -6,7 +6,6 @@ import (
"time"
"github.com/stretchr/testify/require"
-
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
diff --git a/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-chunks.json b/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-chunks.json
index c2c1313e8364d..71f39e929c4a5 100644
--- a/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-chunks.json
+++ b/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-chunks.json
@@ -598,7 +598,7 @@
"steppedLine": false,
"targets": [
{
- "expr": "cortex_ingester_flush_queue_length{namespace=\"$namespace\", job=~\".+-ingester-http\"}",
+ "expr": "loki_ingester_flush_queue_length{namespace=\"$namespace\", job=~\".+-ingester-http\"} or cortex_ingester_flush_queue_length{namespace=\"$namespace\", job=~\".+-ingester-http\"}",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{pod}}",
diff --git a/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-reads.json b/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-reads.json
index 09a8198cedf73..e1adb4dd6cc0a 100644
--- a/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-reads.json
+++ b/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-reads.json
@@ -209,14 +209,17 @@
"dashes": false,
"datasource": "$datasource",
"fieldConfig": {
- "custom": {
- "fillOpacity": 50,
- "showPoints": "never",
- "stacking": {
- "group": "A",
- "mode": "normal"
+ "defaults": {
+ "custom": {
+ "fillOpacity": 50,
+ "showPoints": "never",
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
}
- }
+ },
+ "unit": "s"
},
"fill": 1,
"id": 3,
@@ -482,14 +485,17 @@
"dashes": false,
"datasource": "$datasource",
"fieldConfig": {
- "custom": {
- "fillOpacity": 50,
- "showPoints": "never",
- "stacking": {
- "group": "A",
- "mode": "normal"
+ "defaults": {
+ "custom": {
+ "fillOpacity": 50,
+ "showPoints": "never",
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
}
- }
+ },
+ "unit": "s"
},
"fill": 1,
"id": 6,
@@ -755,14 +761,17 @@
"dashes": false,
"datasource": "$datasource",
"fieldConfig": {
- "custom": {
- "fillOpacity": 50,
- "showPoints": "never",
- "stacking": {
- "group": "A",
- "mode": "normal"
+ "defaults": {
+ "custom": {
+ "fillOpacity": 50,
+ "showPoints": "never",
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
}
- }
+ },
+ "unit": "s"
},
"fill": 1,
"id": 9,
@@ -1028,14 +1037,17 @@
"dashes": false,
"datasource": "$datasource",
"fieldConfig": {
- "custom": {
- "fillOpacity": 50,
- "showPoints": "never",
- "stacking": {
- "group": "A",
- "mode": "normal"
+ "defaults": {
+ "custom": {
+ "fillOpacity": 50,
+ "showPoints": "never",
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
}
- }
+ },
+ "unit": "s"
},
"fill": 1,
"id": 15,
@@ -1113,279 +1125,6 @@
"showTitle": true,
"title": "Index",
"titleSize": "h6"
- },
- {
- "collapse": false,
- "height": "250px",
- "panels": [
- {
- "aliasColors": {
- "1xx": "#EAB839",
- "2xx": "#7EB26D",
- "3xx": "#6ED0E0",
- "4xx": "#EF843C",
- "5xx": "#E24D42",
- "error": "#E24D42",
- "success": "#7EB26D"
- },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 10,
- "id": 16,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 0,
- "links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
- "span": 4,
- "stack": true,
- "steppedLine": false,
- "targets": [
- {
- "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_boltdb_shipper_request_duration_seconds_count{namespace=\"$namespace\",job=~\".+-index-gateway-http\", operation=\"Shipper.Query\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-z]+)\"))\n",
- "format": "time_series",
- "intervalFactor": 2,
- "legendFormat": "{{status}}",
- "refId": "A",
- "step": 10
- }
- ],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
- "title": "QPS",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
- },
- {
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "id": 17,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
- "span": 4,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "histogram_quantile(0.99, sum(rate(loki_boltdb_shipper_request_duration_seconds_bucket{namespace=\"$namespace\",job=~\".+-index-gateway-http\", operation=\"Shipper.Query\"}[$__rate_interval])) by (le)) * 1e3",
- "format": "time_series",
- "intervalFactor": 2,
- "legendFormat": "99th Percentile",
- "refId": "A",
- "step": 10
- },
- {
- "expr": "histogram_quantile(0.50, sum(rate(loki_boltdb_shipper_request_duration_seconds_bucket{namespace=\"$namespace\",job=~\".+-index-gateway-http\", operation=\"Shipper.Query\"}[$__rate_interval])) by (le)) * 1e3",
- "format": "time_series",
- "intervalFactor": 2,
- "legendFormat": "50th Percentile",
- "refId": "B",
- "step": 10
- },
- {
- "expr": "sum(rate(loki_boltdb_shipper_request_duration_seconds_sum{namespace=\"$namespace\",job=~\".+-index-gateway-http\", operation=\"Shipper.Query\"}[$__rate_interval])) * 1e3 / sum(rate(loki_boltdb_shipper_request_duration_seconds_count{namespace=\"$namespace\",job=~\".+-index-gateway-http\", operation=\"Shipper.Query\"}[$__rate_interval]))",
- "format": "time_series",
- "intervalFactor": 2,
- "legendFormat": "Average",
- "refId": "C",
- "step": 10
- }
- ],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
- "title": "Latency",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "ms",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
- },
- {
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fieldConfig": {
- "custom": {
- "fillOpacity": 50,
- "showPoints": "never",
- "stacking": {
- "group": "A",
- "mode": "normal"
- }
- }
- },
- "fill": 1,
- "id": 18,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
- "span": 4,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "histogram_quantile(0.99,\n sum(\n rate(loki_boltdb_shipper_request_duration_seconds_bucket{namespace=\"$namespace\",job=~\".+-index-gateway-http\", operation=\"Shipper.Query\"}[$__rate_interval])\n ) by (pod, le)\n )\n",
- "instant": false,
- "legendFormat": "{{pod}}",
- "range": true,
- "refId": "A"
- }
- ],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
- "title": "Per Pod Latency (p99)",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
- }
- ],
- "repeat": null,
- "repeatIteration": null,
- "repeatRowId": null,
- "showTitle": true,
- "title": "BoltDB Shipper",
- "titleSize": "h6"
}
],
"schemaVersion": 14,
diff --git a/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-retention.json b/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-retention.json
index 39f847e9b7588..6a3e34fd00119 100644
--- a/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-retention.json
+++ b/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-retention.json
@@ -375,7 +375,7 @@
"renderer": "flot",
"seriesOverrides": [ ],
"spaceLength": 10,
- "span": 4,
+ "span": 6,
"stack": false,
"steppedLine": false,
"targets": [
@@ -389,7 +389,7 @@
"thresholds": [ ],
"timeFrom": null,
"timeShift": null,
- "title": "Last Compact and Mark Operation Success",
+ "title": "Last Compact Tables Operation Success",
"tooltip": {
"shared": true,
"sort": 2,
@@ -449,7 +449,7 @@
"renderer": "flot",
"seriesOverrides": [ ],
"spaceLength": 10,
- "span": 4,
+ "span": 6,
"stack": false,
"steppedLine": false,
"targets": [
@@ -465,7 +465,7 @@
"thresholds": [ ],
"timeFrom": null,
"timeShift": null,
- "title": "Compact and Mark Operations Duration",
+ "title": "Compact Tables Operations Duration",
"tooltip": {
"shared": true,
"sort": 2,
@@ -497,7 +497,19 @@
"show": false
}
]
- },
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Compaction",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
{
"aliasColors": { },
"bars": false,
@@ -505,7 +517,7 @@
"dashes": false,
"datasource": "$datasource",
"fill": 1,
- "id": 6,
+ "id": 7,
"legend": {
"avg": false,
"current": false,
@@ -525,7 +537,7 @@
"renderer": "flot",
"seriesOverrides": [ ],
"spaceLength": 10,
- "span": 4,
+ "span": 6,
"stack": false,
"steppedLine": false,
"targets": [
@@ -541,7 +553,7 @@
"thresholds": [ ],
"timeFrom": null,
"timeShift": null,
- "title": "Compact and Mark Operations Per Status",
+ "title": "Compact Tables Operations Per Status",
"tooltip": {
"shared": true,
"sort": 2,
@@ -579,7 +591,7 @@
"repeatIteration": null,
"repeatRowId": null,
"showTitle": true,
- "title": "Compact and Mark",
+ "title": "",
"titleSize": "h6"
},
{
@@ -593,7 +605,7 @@
"dashes": false,
"datasource": "$datasource",
"fill": 10,
- "id": 7,
+ "id": 11,
"legend": {
"avg": false,
"current": false,
@@ -669,7 +681,7 @@
"dashes": false,
"datasource": "$datasource",
"fill": 10,
- "id": 8,
+ "id": 12,
"legend": {
"avg": false,
"current": false,
@@ -745,7 +757,7 @@
"dashes": false,
"datasource": "$datasource",
"fill": 10,
- "id": 9,
+ "id": 13,
"legend": {
"avg": false,
"current": false,
@@ -834,7 +846,7 @@
"datasource": "$datasource",
"fill": 1,
"format": "short",
- "id": 10,
+ "id": 14,
"legend": {
"avg": false,
"current": false,
@@ -909,7 +921,7 @@
"dashes": false,
"datasource": "$datasource",
"fill": 1,
- "id": 11,
+ "id": 15,
"legend": {
"avg": false,
"current": false,
@@ -1014,7 +1026,7 @@
"datasource": "$datasource",
"fill": 1,
"format": "short",
- "id": 12,
+ "id": 16,
"legend": {
"avg": false,
"current": false,
@@ -1089,7 +1101,7 @@
"dashes": false,
"datasource": "$datasource",
"fill": 1,
- "id": 13,
+ "id": 17,
"legend": {
"avg": false,
"current": false,
@@ -1193,7 +1205,7 @@
"dashes": false,
"datasource": "$datasource",
"fill": 1,
- "id": 14,
+ "id": 18,
"legend": {
"avg": false,
"current": false,
@@ -1269,7 +1281,7 @@
"dashes": false,
"datasource": "$datasource",
"fill": 1,
- "id": 15,
+ "id": 19,
"legend": {
"avg": false,
"current": false,
@@ -1345,7 +1357,7 @@
"dashes": false,
"datasource": "$datasource",
"fill": 1,
- "id": 16,
+ "id": 20,
"legend": {
"avg": false,
"current": false,
diff --git a/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-writes.json b/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-writes.json
index d217299772e6b..58107485d370c 100644
--- a/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-writes.json
+++ b/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-writes.json
@@ -215,21 +215,13 @@
"height": "250px",
"panels": [
{
- "aliasColors": {
- "1xx": "#EAB839",
- "2xx": "#7EB26D",
- "3xx": "#6ED0E0",
- "4xx": "#EF843C",
- "5xx": "#E24D42",
- "error": "#E24D42",
- "success": "#7EB26D"
- },
+ "aliasColors": { },
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
- "fill": 10,
- "id": 5,
+ "fill": 1,
+ "id": 3,
"legend": {
"avg": false,
"current": false,
@@ -240,7 +232,7 @@
"values": false
},
"lines": true,
- "linewidth": 0,
+ "linewidth": 1,
"links": [ ],
"nullPointMode": "null as zero",
"percentage": false,
@@ -250,22 +242,22 @@
"seriesOverrides": [ ],
"spaceLength": 10,
"span": 6,
- "stack": true,
+ "stack": false,
"steppedLine": false,
"targets": [
{
- "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_request_duration_seconds_count{namespace=\"$namespace\",job=~\".+-ingester-http\", route=\"/logproto.Pusher/Push\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-z]+)\"))\n",
+ "expr": "sum (rate(loki_distributor_structured_metadata_bytes_received_total{namespace=\"$namespace\",job=~\".+-distributor-http\",route=\"loki_api_v1_push\",}[$__rate_interval])) / sum(rate(loki_distributor_bytes_received_total{namespace=\"$namespace\",job=~\".+-distributor-http\",route=\"loki_api_v1_push\",}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 2,
- "legendFormat": "{{status}}",
- "refId": "A",
+ "legendFormat": "bytes",
+ "legendLink": null,
"step": 10
}
],
"thresholds": [ ],
"timeFrom": null,
"timeShift": null,
- "title": "QPS",
+ "title": "Per Total Received Bytes",
"tooltip": {
"shared": true,
"sort": 2,
@@ -305,7 +297,7 @@
"dashes": false,
"datasource": "$datasource",
"fill": 1,
- "id": 6,
+ "id": 4,
"legend": {
"avg": false,
"current": false,
@@ -326,38 +318,22 @@
"seriesOverrides": [ ],
"spaceLength": 10,
"span": 6,
- "stack": false,
+ "stack": true,
"steppedLine": false,
"targets": [
{
- "expr": "histogram_quantile(0.99, sum by (le) (namespace_job_route:loki_request_duration_seconds_bucket:sum_rate{namespace=\"$namespace\", job=~\".+-ingester-http\", route=\"/logproto.Pusher/Push\"})) * 1e3",
- "format": "time_series",
- "intervalFactor": 2,
- "legendFormat": "99th Percentile",
- "refId": "A",
- "step": 10
- },
- {
- "expr": "histogram_quantile(0.50, sum by (le) (namespace_job_route:loki_request_duration_seconds_bucket:sum_rate{namespace=\"$namespace\", job=~\".+-ingester-http\", route=\"/logproto.Pusher/Push\"})) * 1e3",
- "format": "time_series",
- "intervalFactor": 2,
- "legendFormat": "50th Percentile",
- "refId": "B",
- "step": 10
- },
- {
- "expr": "1e3 * sum(namespace_job_route:loki_request_duration_seconds_sum:sum_rate{namespace=\"$namespace\", job=~\".+-ingester-http\", route=\"/logproto.Pusher/Push\"}) / sum(namespace_job_route:loki_request_duration_seconds_count:sum_rate{namespace=\"$namespace\", job=~\".+-ingester-http\", route=\"/logproto.Pusher/Push\"})",
+ "expr": "sum by (tenant) (rate(loki_distributor_structured_metadata_bytes_received_total{namespace=\"$namespace\",job=~\".+-distributor-http\",route=\"loki_api_v1_push\",}[$__rate_interval])) / ignoring(tenant) group_left sum(rate(loki_distributor_structured_metadata_bytes_received_total{namespace=\"$namespace\",job=~\".+-distributor-http\",route=\"loki_api_v1_push\",}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 2,
- "legendFormat": "Average",
- "refId": "C",
+ "legendFormat": "{{tenant}}",
+ "legendLink": null,
"step": 10
}
],
"thresholds": [ ],
"timeFrom": null,
"timeShift": null,
- "title": "Latency",
+ "title": "Per Tenant",
"tooltip": {
"shared": true,
"sort": 2,
@@ -373,10 +349,10 @@
},
"yaxes": [
{
- "format": "ms",
+ "format": "short",
"label": null,
"logBase": 1,
- "max": null,
+ "max": 1,
"min": 0,
"show": true
},
@@ -384,7 +360,7 @@
"format": "short",
"label": null,
"logBase": 1,
- "max": null,
+ "max": 1,
"min": null,
"show": false
}
@@ -395,7 +371,7 @@
"repeatIteration": null,
"repeatRowId": null,
"showTitle": true,
- "title": "Ingester",
+ "title": "Distributor - Structured Metadata",
"titleSize": "h6"
},
{
@@ -442,7 +418,7 @@
"steppedLine": false,
"targets": [
{
- "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_index_request_duration_seconds_count{namespace=\"$namespace\",job=~\".+-ingester-http\", operation=\"index_chunk\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-z]+)\"))\n",
+ "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_request_duration_seconds_count{namespace=\"$namespace\",job=~\".+-ingester-http\", route=\"/logproto.Pusher/Push\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-z]+)\"))\n",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{status}}",
@@ -518,7 +494,7 @@
"steppedLine": false,
"targets": [
{
- "expr": "histogram_quantile(0.99, sum(rate(loki_index_request_duration_seconds_bucket{namespace=\"$namespace\",job=~\".+-ingester-http\", operation=\"index_chunk\"}[$__rate_interval])) by (le)) * 1e3",
+ "expr": "histogram_quantile(0.99, sum by (le) (namespace_job_route:loki_request_duration_seconds_bucket:sum_rate{namespace=\"$namespace\", job=~\".+-ingester-http\", route=\"/logproto.Pusher/Push\"})) * 1e3",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "99th Percentile",
@@ -526,7 +502,7 @@
"step": 10
},
{
- "expr": "histogram_quantile(0.50, sum(rate(loki_index_request_duration_seconds_bucket{namespace=\"$namespace\",job=~\".+-ingester-http\", operation=\"index_chunk\"}[$__rate_interval])) by (le)) * 1e3",
+ "expr": "histogram_quantile(0.50, sum by (le) (namespace_job_route:loki_request_duration_seconds_bucket:sum_rate{namespace=\"$namespace\", job=~\".+-ingester-http\", route=\"/logproto.Pusher/Push\"})) * 1e3",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "50th Percentile",
@@ -534,7 +510,7 @@
"step": 10
},
{
- "expr": "sum(rate(loki_index_request_duration_seconds_sum{namespace=\"$namespace\",job=~\".+-ingester-http\", operation=\"index_chunk\"}[$__rate_interval])) * 1e3 / sum(rate(loki_index_request_duration_seconds_count{namespace=\"$namespace\",job=~\".+-ingester-http\", operation=\"index_chunk\"}[$__rate_interval]))",
+ "expr": "1e3 * sum(namespace_job_route:loki_request_duration_seconds_sum:sum_rate{namespace=\"$namespace\", job=~\".+-ingester-http\", route=\"/logproto.Pusher/Push\"}) / sum(namespace_job_route:loki_request_duration_seconds_count:sum_rate{namespace=\"$namespace\", job=~\".+-ingester-http\", route=\"/logproto.Pusher/Push\"})",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "Average",
@@ -583,7 +559,7 @@
"repeatIteration": null,
"repeatRowId": null,
"showTitle": true,
- "title": "Index",
+ "title": "Ingester",
"titleSize": "h6"
},
{
@@ -630,7 +606,7 @@
"steppedLine": false,
"targets": [
{
- "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_boltdb_shipper_request_duration_seconds_count{namespace=\"$namespace\",job=~\".+-ingester-http\", operation=\"WRITE\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-z]+)\"))\n",
+ "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_index_request_duration_seconds_count{namespace=\"$namespace\",job=~\".+-ingester-http\", operation=\"index_chunk\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-z]+)\"))\n",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{status}}",
@@ -706,7 +682,7 @@
"steppedLine": false,
"targets": [
{
- "expr": "histogram_quantile(0.99, sum(rate(loki_boltdb_shipper_request_duration_seconds_bucket{namespace=\"$namespace\",job=~\".+-ingester-http\", operation=\"WRITE\"}[$__rate_interval])) by (le)) * 1e3",
+ "expr": "histogram_quantile(0.99, sum(rate(loki_index_request_duration_seconds_bucket{namespace=\"$namespace\",job=~\".+-ingester-http\", operation=\"index_chunk\"}[$__rate_interval])) by (le)) * 1e3",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "99th Percentile",
@@ -714,7 +690,7 @@
"step": 10
},
{
- "expr": "histogram_quantile(0.50, sum(rate(loki_boltdb_shipper_request_duration_seconds_bucket{namespace=\"$namespace\",job=~\".+-ingester-http\", operation=\"WRITE\"}[$__rate_interval])) by (le)) * 1e3",
+ "expr": "histogram_quantile(0.50, sum(rate(loki_index_request_duration_seconds_bucket{namespace=\"$namespace\",job=~\".+-ingester-http\", operation=\"index_chunk\"}[$__rate_interval])) by (le)) * 1e3",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "50th Percentile",
@@ -722,7 +698,7 @@
"step": 10
},
{
- "expr": "sum(rate(loki_boltdb_shipper_request_duration_seconds_sum{namespace=\"$namespace\",job=~\".+-ingester-http\", operation=\"WRITE\"}[$__rate_interval])) * 1e3 / sum(rate(loki_boltdb_shipper_request_duration_seconds_count{namespace=\"$namespace\",job=~\".+-ingester-http\", operation=\"WRITE\"}[$__rate_interval]))",
+ "expr": "sum(rate(loki_index_request_duration_seconds_sum{namespace=\"$namespace\",job=~\".+-ingester-http\", operation=\"index_chunk\"}[$__rate_interval])) * 1e3 / sum(rate(loki_index_request_duration_seconds_count{namespace=\"$namespace\",job=~\".+-ingester-http\", operation=\"index_chunk\"}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "Average",
@@ -771,7 +747,7 @@
"repeatIteration": null,
"repeatRowId": null,
"showTitle": true,
- "title": "BoltDB Shipper",
+ "title": "Index",
"titleSize": "h6"
}
],
diff --git a/operator/internal/manifests/openshift/recordingrule_test.go b/operator/internal/manifests/openshift/recordingrule_test.go
index af496b7024aa9..49e30de999f35 100644
--- a/operator/internal/manifests/openshift/recordingrule_test.go
+++ b/operator/internal/manifests/openshift/recordingrule_test.go
@@ -3,9 +3,10 @@ package openshift
import (
"testing"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
)
func TestRecordingRuleTenantLabels(t *testing.T) {
diff --git a/operator/internal/manifests/options_test.go b/operator/internal/manifests/options_test.go
index 48e9853f7870a..d6fe7c5c19d4a 100644
--- a/operator/internal/manifests/options_test.go
+++ b/operator/internal/manifests/options_test.go
@@ -6,9 +6,8 @@ import (
"github.com/stretchr/testify/require"
- "github.com/grafana/loki/operator/internal/manifests/internal/config"
-
lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ "github.com/grafana/loki/operator/internal/manifests/internal/config"
)
func TestNewTimeoutConfig_ReturnsDefaults_WhenLimitsSpecEmpty(t *testing.T) {
@@ -60,15 +59,19 @@ func TestNewTimeoutConfig_ReturnsCustomConfig_WhenLimitsSpecNotEmpty_UseMaxTenan
QueryTimeout: "10m",
},
},
- Tenants: map[string]lokiv1.LimitsTemplateSpec{
+ Tenants: map[string]lokiv1.PerTenantLimitsTemplateSpec{
"tenant-a": {
- QueryLimits: &lokiv1.QueryLimitSpec{
- QueryTimeout: "10m",
+ QueryLimits: &lokiv1.PerTenantQueryLimitSpec{
+ QueryLimitSpec: lokiv1.QueryLimitSpec{
+ QueryTimeout: "10m",
+ },
},
},
"tenant-b": {
- QueryLimits: &lokiv1.QueryLimitSpec{
- QueryTimeout: "20m",
+ QueryLimits: &lokiv1.PerTenantQueryLimitSpec{
+ QueryLimitSpec: lokiv1.QueryLimitSpec{
+ QueryTimeout: "20m",
+ },
},
},
},
@@ -99,15 +102,19 @@ func TestNewTimeoutConfig_ReturnsCustomConfig_WhenTenantLimitsSpecOnly_ReturnsUs
s := lokiv1.LokiStack{
Spec: lokiv1.LokiStackSpec{
Limits: &lokiv1.LimitsSpec{
- Tenants: map[string]lokiv1.LimitsTemplateSpec{
+ Tenants: map[string]lokiv1.PerTenantLimitsTemplateSpec{
"tenant-a": {
- QueryLimits: &lokiv1.QueryLimitSpec{
- QueryTimeout: "10m",
+ QueryLimits: &lokiv1.PerTenantQueryLimitSpec{
+ QueryLimitSpec: lokiv1.QueryLimitSpec{
+ QueryTimeout: "10m",
+ },
},
},
"tenant-b": {
- QueryLimits: &lokiv1.QueryLimitSpec{
- QueryTimeout: "20m",
+ QueryLimits: &lokiv1.PerTenantQueryLimitSpec{
+ QueryLimitSpec: lokiv1.QueryLimitSpec{
+ QueryTimeout: "20m",
+ },
},
},
},
@@ -160,15 +167,19 @@ func TestNewTimeoutConfig_ReturnsDefaults_WhenTenantQueryTimeoutParseError(t *te
QueryTimeout: "10m",
},
},
- Tenants: map[string]lokiv1.LimitsTemplateSpec{
+ Tenants: map[string]lokiv1.PerTenantLimitsTemplateSpec{
"tenant-a": {
- QueryLimits: &lokiv1.QueryLimitSpec{
- QueryTimeout: "invalid",
+ QueryLimits: &lokiv1.PerTenantQueryLimitSpec{
+ QueryLimitSpec: lokiv1.QueryLimitSpec{
+ QueryTimeout: "invalid",
+ },
},
},
"tenant-b": {
- QueryLimits: &lokiv1.QueryLimitSpec{
- QueryTimeout: "20m",
+ QueryLimits: &lokiv1.PerTenantQueryLimitSpec{
+ QueryLimitSpec: lokiv1.QueryLimitSpec{
+ QueryTimeout: "20m",
+ },
},
},
},
diff --git a/operator/internal/manifests/proxy_env_test.go b/operator/internal/manifests/proxy_env_test.go
index cd273f5f0000d..9a780dfe48be4 100644
--- a/operator/internal/manifests/proxy_env_test.go
+++ b/operator/internal/manifests/proxy_env_test.go
@@ -4,10 +4,11 @@ import (
"strings"
"testing"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/stretchr/testify/require"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
+
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
)
func TestContainerEnvVars_ReadVarsFromCustomResource(t *testing.T) {
diff --git a/operator/internal/manifests/querier.go b/operator/internal/manifests/querier.go
index a560c025d9e2b..b75997e4553c5 100644
--- a/operator/internal/manifests/querier.go
+++ b/operator/internal/manifests/querier.go
@@ -73,7 +73,7 @@ func BuildQuerier(opts Options) ([]client.Object, error) {
// NewQuerierDeployment creates a deployment object for a querier
func NewQuerierDeployment(opts Options) *appsv1.Deployment {
l := ComponentLabels(LabelQuerierComponent, opts.Name)
- a := commonAnnotations(opts.ConfigSHA1, opts.CertRotationRequiredAt)
+ a := commonAnnotations(opts.ConfigSHA1, opts.ObjectStorage.SecretSHA1, opts.CertRotationRequiredAt)
podSpec := corev1.PodSpec{
Affinity: configureAffinity(LabelQuerierComponent, opts.Name, opts.Gates.DefaultNodeAffinity, opts.Stack.Template.Querier),
Volumes: []corev1.Volume{
diff --git a/operator/internal/manifests/querier_test.go b/operator/internal/manifests/querier_test.go
index 89717edabaebc..b9d085866b784 100644
--- a/operator/internal/manifests/querier_test.go
+++ b/operator/internal/manifests/querier_test.go
@@ -11,6 +11,7 @@ import (
"k8s.io/apimachinery/pkg/util/intstr"
lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ "github.com/grafana/loki/operator/internal/manifests/storage"
)
func TestNewQuerierDeployment_HasTemplateConfigHashAnnotation(t *testing.T) {
@@ -28,10 +29,31 @@ func TestNewQuerierDeployment_HasTemplateConfigHashAnnotation(t *testing.T) {
},
})
- expected := "loki.grafana.com/config-hash"
annotations := ss.Spec.Template.Annotations
- require.Contains(t, annotations, expected)
- require.Equal(t, annotations[expected], "deadbeef")
+ require.Contains(t, annotations, AnnotationLokiConfigHash)
+ require.Equal(t, annotations[AnnotationLokiConfigHash], "deadbeef")
+}
+
+func TestNewQuerierDeployment_HasTemplateObjectStoreHashAnnotation(t *testing.T) {
+ ss := NewQuerierDeployment(Options{
+ Name: "abcd",
+ Namespace: "efgh",
+ ObjectStorage: storage.Options{
+ SecretSHA1: "deadbeef",
+ },
+ Stack: lokiv1.LokiStackSpec{
+ StorageClassName: "standard",
+ Template: &lokiv1.LokiTemplateSpec{
+ Querier: &lokiv1.LokiComponentSpec{
+ Replicas: 1,
+ },
+ },
+ },
+ })
+
+ annotations := ss.Spec.Template.Annotations
+ require.Contains(t, annotations, AnnotationLokiObjectStoreHash)
+ require.Equal(t, annotations[AnnotationLokiObjectStoreHash], "deadbeef")
}
func TestNewQuerierDeployment_HasTemplateCertRotationRequiredAtAnnotation(t *testing.T) {
@@ -48,10 +70,9 @@ func TestNewQuerierDeployment_HasTemplateCertRotationRequiredAtAnnotation(t *tes
},
})
- expected := "loki.grafana.com/certRotationRequiredAt"
annotations := ss.Spec.Template.Annotations
- require.Contains(t, annotations, expected)
- require.Equal(t, annotations[expected], "deadbeef")
+ require.Contains(t, annotations, AnnotationCertRotationRequiredAt)
+ require.Equal(t, annotations[AnnotationCertRotationRequiredAt], "deadbeef")
}
func TestNewQuerierDeployment_SelectorMatchesLabels(t *testing.T) {
diff --git a/operator/internal/manifests/query-frontend.go b/operator/internal/manifests/query-frontend.go
index fab6f50bcb3a7..119f28f7e4f72 100644
--- a/operator/internal/manifests/query-frontend.go
+++ b/operator/internal/manifests/query-frontend.go
@@ -67,7 +67,7 @@ func BuildQueryFrontend(opts Options) ([]client.Object, error) {
// NewQueryFrontendDeployment creates a deployment object for a query-frontend
func NewQueryFrontendDeployment(opts Options) *appsv1.Deployment {
l := ComponentLabels(LabelQueryFrontendComponent, opts.Name)
- a := commonAnnotations(opts.ConfigSHA1, opts.CertRotationRequiredAt)
+ a := commonAnnotations(opts.ConfigSHA1, opts.ObjectStorage.SecretSHA1, opts.CertRotationRequiredAt)
podSpec := corev1.PodSpec{
Affinity: configureAffinity(LabelQueryFrontendComponent, opts.Name, opts.Gates.DefaultNodeAffinity, opts.Stack.Template.QueryFrontend),
Volumes: []corev1.Volume{
diff --git a/operator/internal/manifests/query-frontend_test.go b/operator/internal/manifests/query-frontend_test.go
index 6615e078a2bb0..d11fb968ce3ac 100644
--- a/operator/internal/manifests/query-frontend_test.go
+++ b/operator/internal/manifests/query-frontend_test.go
@@ -10,6 +10,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ "github.com/grafana/loki/operator/internal/manifests/storage"
)
func TestNewQueryFrontendDeployment_SelectorMatchesLabels(t *testing.T) {
@@ -44,10 +45,31 @@ func TestNewQueryFrontendDeployment_HasTemplateConfigHashAnnotation(t *testing.T
},
},
})
- expected := "loki.grafana.com/config-hash"
+
+ annotations := ss.Spec.Template.Annotations
+ require.Contains(t, annotations, AnnotationLokiConfigHash)
+ require.Equal(t, annotations[AnnotationLokiConfigHash], "deadbeef")
+}
+
+func TestNewQueryFrontendDeployment_HasTemplateObjectStoreHashAnnotation(t *testing.T) {
+ ss := NewQueryFrontendDeployment(Options{
+ Name: "abcd",
+ Namespace: "efgh",
+ ObjectStorage: storage.Options{
+ SecretSHA1: "deadbeef",
+ },
+ Stack: lokiv1.LokiStackSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ QueryFrontend: &lokiv1.LokiComponentSpec{
+ Replicas: 1,
+ },
+ },
+ },
+ })
+
annotations := ss.Spec.Template.Annotations
- require.Contains(t, annotations, expected)
- require.Equal(t, annotations[expected], "deadbeef")
+ require.Contains(t, annotations, AnnotationLokiObjectStoreHash)
+ require.Equal(t, annotations[AnnotationLokiObjectStoreHash], "deadbeef")
}
func TestNewQueryFrontendDeployment_HasTemplateCertRotationRequiredAtAnnotation(t *testing.T) {
@@ -64,10 +86,9 @@ func TestNewQueryFrontendDeployment_HasTemplateCertRotationRequiredAtAnnotation(
},
})
- expected := "loki.grafana.com/certRotationRequiredAt"
annotations := ss.Spec.Template.Annotations
- require.Contains(t, annotations, expected)
- require.Equal(t, annotations[expected], "deadbeef")
+ require.Contains(t, annotations, AnnotationCertRotationRequiredAt)
+ require.Equal(t, annotations[AnnotationCertRotationRequiredAt], "deadbeef")
}
func TestBuildQueryFrontend_PodDisruptionBudget(t *testing.T) {
diff --git a/operator/internal/manifests/ruler.go b/operator/internal/manifests/ruler.go
index 902160486575a..8e44f5834fef1 100644
--- a/operator/internal/manifests/ruler.go
+++ b/operator/internal/manifests/ruler.go
@@ -17,6 +17,7 @@ import (
lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/internal/config"
"github.com/grafana/loki/operator/internal/manifests/openshift"
+ "github.com/grafana/loki/operator/internal/manifests/storage"
)
// BuildRuler returns a list of k8s objects for Loki Stack Ruler
@@ -28,6 +29,10 @@ func BuildRuler(opts Options) ([]client.Object, error) {
}
}
+ if err := storage.ConfigureStatefulSet(statefulSet, opts.ObjectStorage); err != nil {
+ return nil, err
+ }
+
if opts.Gates.GRPCEncryption {
if err := configureRulerGRPCServicePKI(statefulSet, opts); err != nil {
return nil, err
@@ -92,7 +97,7 @@ func NewRulerStatefulSet(opts Options) *appsv1.StatefulSet {
}
l := ComponentLabels(LabelRulerComponent, opts.Name)
- a := commonAnnotations(opts.ConfigSHA1, opts.CertRotationRequiredAt)
+ a := commonAnnotations(opts.ConfigSHA1, opts.ObjectStorage.SecretSHA1, opts.CertRotationRequiredAt)
podSpec := corev1.PodSpec{
Affinity: configureAffinity(LabelRulerComponent, opts.Name, opts.Gates.DefaultNodeAffinity, opts.Stack.Template.Ruler),
Volumes: []corev1.Volume{
diff --git a/operator/internal/manifests/ruler_test.go b/operator/internal/manifests/ruler_test.go
index b40a9c4de5d9a..b753dc090de8c 100644
--- a/operator/internal/manifests/ruler_test.go
+++ b/operator/internal/manifests/ruler_test.go
@@ -12,6 +12,7 @@ import (
lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/openshift"
+ "github.com/grafana/loki/operator/internal/manifests/storage"
)
func TestNewRulerStatefulSet_HasTemplateConfigHashAnnotation(t *testing.T) {
@@ -29,10 +30,31 @@ func TestNewRulerStatefulSet_HasTemplateConfigHashAnnotation(t *testing.T) {
},
})
- expected := "loki.grafana.com/config-hash"
annotations := ss.Spec.Template.Annotations
- require.Contains(t, annotations, expected)
- require.Equal(t, annotations[expected], "deadbeef")
+ require.Contains(t, annotations, AnnotationLokiConfigHash)
+ require.Equal(t, annotations[AnnotationLokiConfigHash], "deadbeef")
+}
+
+func TestNewRulerStatefulSet_HasTemplateObjectStoreHashAnnotation(t *testing.T) {
+ ss := NewRulerStatefulSet(Options{
+ Name: "abcd",
+ Namespace: "efgh",
+ ObjectStorage: storage.Options{
+ SecretSHA1: "deadbeef",
+ },
+ Stack: lokiv1.LokiStackSpec{
+ StorageClassName: "standard",
+ Template: &lokiv1.LokiTemplateSpec{
+ Ruler: &lokiv1.LokiComponentSpec{
+ Replicas: 1,
+ },
+ },
+ },
+ })
+
+ annotations := ss.Spec.Template.Annotations
+ require.Contains(t, annotations, AnnotationLokiObjectStoreHash)
+ require.Equal(t, annotations[AnnotationLokiObjectStoreHash], "deadbeef")
}
func TestNewRulerStatefulSet_HasTemplateCertRotationRequiredAtAnnotation(t *testing.T) {
@@ -49,10 +71,10 @@ func TestNewRulerStatefulSet_HasTemplateCertRotationRequiredAtAnnotation(t *test
},
},
})
- expected := "loki.grafana.com/certRotationRequiredAt"
+
annotations := ss.Spec.Template.Annotations
- require.Contains(t, annotations, expected)
- require.Equal(t, annotations[expected], "deadbeef")
+ require.Contains(t, annotations, AnnotationCertRotationRequiredAt)
+ require.Equal(t, annotations[AnnotationCertRotationRequiredAt], "deadbeef")
}
func TestBuildRuler_HasExtraObjectsForTenantMode(t *testing.T) {
diff --git a/operator/internal/manifests/rules_config_test.go b/operator/internal/manifests/rules_config_test.go
index 088fe828de8a6..338fab231644e 100644
--- a/operator/internal/manifests/rules_config_test.go
+++ b/operator/internal/manifests/rules_config_test.go
@@ -4,10 +4,11 @@ import (
"fmt"
"testing"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
+
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
)
func TestRulesConfigMap_ReturnsDataEntriesPerRule(t *testing.T) {
diff --git a/operator/internal/manifests/service_monitor_test.go b/operator/internal/manifests/service_monitor_test.go
index de46dfbda14f7..fb5a4359a30cb 100644
--- a/operator/internal/manifests/service_monitor_test.go
+++ b/operator/internal/manifests/service_monitor_test.go
@@ -4,13 +4,13 @@ import (
"fmt"
"testing"
+ monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
configv1 "github.com/grafana/loki/operator/apis/config/v1"
lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
- monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
)
// Test that all serviceMonitor match the labels of their services so that we know all serviceMonitor
@@ -99,6 +99,7 @@ func TestServiceMonitorMatchLabels(t *testing.T) {
}
for _, tst := range table {
+ tst := tst
testName := fmt.Sprintf("%s_%s", tst.Service.GetName(), tst.ServiceMonitor.GetName())
t.Run(testName, func(t *testing.T) {
t.Parallel()
@@ -189,6 +190,7 @@ func TestServiceMonitorEndpoints_ForBuiltInCertRotation(t *testing.T) {
}
for _, tst := range table {
+ tst := tst
testName := fmt.Sprintf("%s_%s", tst.Service.GetName(), tst.ServiceMonitor.GetName())
t.Run(testName, func(t *testing.T) {
t.Parallel()
diff --git a/operator/internal/manifests/storage/configure.go b/operator/internal/manifests/storage/configure.go
index 8a2003b976115..8c953ae1c5951 100644
--- a/operator/internal/manifests/storage/configure.go
+++ b/operator/internal/manifests/storage/configure.go
@@ -12,28 +12,19 @@ import (
lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
)
-const (
- // EnvGoogleApplicationCredentials is the environment variable to specify path to key.json
- EnvGoogleApplicationCredentials = "GOOGLE_APPLICATION_CREDENTIALS"
- // GCSFileName is the file containing the Google credentials for authentication
- GCSFileName = "key.json"
-
- secretDirectory = "/etc/storage/secrets"
- storageTLSVolume = "storage-tls"
- caDirectory = "/etc/storage/ca"
-)
-
// ConfigureDeployment appends additional pod volumes and container env vars, args, volume mounts
// based on the object storage type. Currently supported amendments:
+// - All: Ensure object storage secret mounted and auth projected as env vars.
// - GCS: Ensure env var GOOGLE_APPLICATION_CREDENTIALS in container
// - S3: Ensure mounting custom CA configmap if any TLSConfig given
func ConfigureDeployment(d *appsv1.Deployment, opts Options) error {
switch opts.SharedStore {
- case lokiv1.ObjectStorageSecretGCS:
- return configureDeployment(d, opts.SecretName)
+ case lokiv1.ObjectStorageSecretAlibabaCloud, lokiv1.ObjectStorageSecretAzure, lokiv1.ObjectStorageSecretGCS, lokiv1.ObjectStorageSecretSwift:
+ return configureDeployment(d, opts)
case lokiv1.ObjectStorageSecretS3:
- if opts.TLS == nil {
- return nil
+ err := configureDeployment(d, opts)
+ if err != nil {
+ return err
}
return configureDeploymentCA(d, opts.TLS)
default:
@@ -43,15 +34,16 @@ func ConfigureDeployment(d *appsv1.Deployment, opts Options) error {
// ConfigureStatefulSet appends additional pod volumes and container env vars, args, volume mounts
// based on the object storage type. Currently supported amendments:
+// - All: Ensure object storage secret mounted and auth projected as env vars.
// - GCS: Ensure env var GOOGLE_APPLICATION_CREDENTIALS in container
// - S3: Ensure mounting custom CA configmap if any TLSConfig given
func ConfigureStatefulSet(d *appsv1.StatefulSet, opts Options) error {
switch opts.SharedStore {
- case lokiv1.ObjectStorageSecretGCS:
- return configureStatefulSet(d, opts.SecretName)
+ case lokiv1.ObjectStorageSecretAlibabaCloud, lokiv1.ObjectStorageSecretAzure, lokiv1.ObjectStorageSecretGCS, lokiv1.ObjectStorageSecretSwift:
+ return configureStatefulSet(d, opts)
case lokiv1.ObjectStorageSecretS3:
- if opts.TLS == nil {
- return nil
+ if err := configureStatefulSet(d, opts); err != nil {
+ return err
}
return configureStatefulSetCA(d, opts.TLS)
default:
@@ -59,10 +51,10 @@ func ConfigureStatefulSet(d *appsv1.StatefulSet, opts Options) error {
}
}
-// ConfigureDeployment merges a GCS Object Storage volume into the deployment spec.
-// With this, the deployment will expose an environment variable for Google authentication.
-func configureDeployment(d *appsv1.Deployment, secretName string) error {
- p := ensureCredentialsForGCS(&d.Spec.Template.Spec, secretName)
+// ConfigureDeployment merges the object storage secret volume into the deployment spec.
+// With this, the deployment will expose credentials specific environment variables.
+func configureDeployment(d *appsv1.Deployment, opts Options) error {
+ p := ensureObjectStoreCredentials(&d.Spec.Template.Spec, opts)
if err := mergo.Merge(&d.Spec.Template.Spec, p, mergo.WithOverride); err != nil {
return kverrors.Wrap(err, "failed to merge gcs object storage spec ")
@@ -73,6 +65,10 @@ func configureDeployment(d *appsv1.Deployment, secretName string) error {
// ConfigureDeploymentCA merges a S3 CA ConfigMap volume into the deployment spec.
func configureDeploymentCA(d *appsv1.Deployment, tls *TLSConfig) error {
+ if tls == nil {
+ return nil
+ }
+
p := ensureCAForS3(&d.Spec.Template.Spec, tls)
if err := mergo.Merge(&d.Spec.Template.Spec, p, mergo.WithOverride); err != nil {
@@ -82,10 +78,10 @@ func configureDeploymentCA(d *appsv1.Deployment, tls *TLSConfig) error {
return nil
}
-// ConfigureStatefulSet merges a GCS Object Storage volume into the statefulset spec.
-// With this, the statefulset will expose an environment variable for Google authentication.
-func configureStatefulSet(s *appsv1.StatefulSet, secretName string) error {
- p := ensureCredentialsForGCS(&s.Spec.Template.Spec, secretName)
+// ConfigureStatefulSet merges a the object storage secrect volume into the statefulset spec.
+// With this, the statefulset will expose credentials specific environment variable.
+func configureStatefulSet(s *appsv1.StatefulSet, opts Options) error {
+ p := ensureObjectStoreCredentials(&s.Spec.Template.Spec, opts)
if err := mergo.Merge(&s.Spec.Template.Spec, p, mergo.WithOverride); err != nil {
return kverrors.Wrap(err, "failed to merge gcs object storage spec ")
@@ -96,6 +92,10 @@ func configureStatefulSet(s *appsv1.StatefulSet, secretName string) error {
// ConfigureStatefulSetCA merges a S3 CA ConfigMap volume into the statefulset spec.
func configureStatefulSetCA(s *appsv1.StatefulSet, tls *TLSConfig) error {
+ if tls == nil {
+ return nil
+ }
+
p := ensureCAForS3(&s.Spec.Template.Spec, tls)
if err := mergo.Merge(&s.Spec.Template.Spec, p, mergo.WithOverride); err != nil {
@@ -105,9 +105,11 @@ func configureStatefulSetCA(s *appsv1.StatefulSet, tls *TLSConfig) error {
return nil
}
-func ensureCredentialsForGCS(p *corev1.PodSpec, secretName string) corev1.PodSpec {
+func ensureObjectStoreCredentials(p *corev1.PodSpec, opts Options) corev1.PodSpec {
container := p.Containers[0].DeepCopy()
volumes := p.Volumes
+ secretName := opts.SecretName
+ storeType := opts.SharedStore
volumes = append(volumes, corev1.Volume{
Name: secretName,
@@ -124,10 +126,133 @@ func ensureCredentialsForGCS(p *corev1.PodSpec, secretName string) corev1.PodSpe
MountPath: secretDirectory,
})
- container.Env = append(container.Env, corev1.EnvVar{
- Name: EnvGoogleApplicationCredentials,
- Value: path.Join(secretDirectory, GCSFileName),
- })
+ var storeEnvVars []corev1.EnvVar
+ switch storeType {
+ case lokiv1.ObjectStorageSecretAlibabaCloud:
+ storeEnvVars = []corev1.EnvVar{
+ {
+ Name: EnvAlibabaCloudAccessKeyID,
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: secretName,
+ },
+ Key: KeyAlibabaCloudAccessKeyID,
+ },
+ },
+ },
+ {
+ Name: EnvAlibabaCloudAccessKeySecret,
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: secretName,
+ },
+ Key: KeyAlibabaCloudSecretAccessKey,
+ },
+ },
+ },
+ }
+ case lokiv1.ObjectStorageSecretAzure:
+ storeEnvVars = []corev1.EnvVar{
+ {
+ Name: EnvAzureStorageAccountName,
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: secretName,
+ },
+ Key: KeyAzureStorageAccountName,
+ },
+ },
+ },
+ {
+ Name: EnvAzureStorageAccountKey,
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: secretName,
+ },
+ Key: KeyAzureStorageAccountKey,
+ },
+ },
+ },
+ }
+ case lokiv1.ObjectStorageSecretGCS:
+ storeEnvVars = []corev1.EnvVar{
+ {
+ Name: EnvGoogleApplicationCredentials,
+ Value: path.Join(secretDirectory, KeyGCPServiceAccountKeyFilename),
+ },
+ }
+ case lokiv1.ObjectStorageSecretS3:
+ storeEnvVars = []corev1.EnvVar{
+ {
+ Name: EnvAWSAccessKeyID,
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: secretName,
+ },
+ Key: KeyAWSAccessKeyID,
+ },
+ },
+ },
+ {
+ Name: EnvAWSAccessKeySecret,
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: secretName,
+ },
+ Key: KeyAWSAccessKeySecret,
+ },
+ },
+ },
+ }
+
+ if opts.S3 != nil && opts.S3.SSE.Type == SSEKMSType && opts.S3.SSE.KMSEncryptionContext != "" {
+ storeEnvVars = append(storeEnvVars, corev1.EnvVar{
+ Name: EnvAWSSseKmsEncryptionContext,
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: secretName,
+ },
+ Key: KeyAWSSseKmsEncryptionContext,
+ },
+ },
+ })
+ }
+
+ case lokiv1.ObjectStorageSecretSwift:
+ storeEnvVars = []corev1.EnvVar{
+ {
+ Name: EnvSwiftUsername,
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: secretName,
+ },
+ Key: KeySwiftUsername,
+ },
+ },
+ },
+ {
+ Name: EnvSwiftPassword,
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: secretName,
+ },
+ Key: KeySwiftPassword,
+ },
+ },
+ },
+ }
+ }
+
+ container.Env = append(container.Env, storeEnvVars...)
return corev1.PodSpec{
Containers: []corev1.Container{
diff --git a/operator/internal/manifests/storage/configure_test.go b/operator/internal/manifests/storage/configure_test.go
index 792a452229e1e..6614453df22d3 100644
--- a/operator/internal/manifests/storage/configure_test.go
+++ b/operator/internal/manifests/storage/configure_test.go
@@ -1,4 +1,4 @@
-package storage_test
+package storage
import (
"testing"
@@ -8,26 +8,564 @@ import (
corev1 "k8s.io/api/core/v1"
lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
- "github.com/grafana/loki/operator/internal/manifests/storage"
)
func TestConfigureDeploymentForStorageType(t *testing.T) {
type tt struct {
desc string
- opts storage.Options
+ opts Options
dpl *appsv1.Deployment
want *appsv1.Deployment
}
tc := []tt{
{
- desc: "object storage other than GCS",
- opts: storage.Options{
+ desc: "object storage AlibabaCloud",
+ opts: Options{
+ SecretName: "test",
+ SharedStore: lokiv1.ObjectStorageSecretAlibabaCloud,
+ },
+ dpl: &appsv1.Deployment{
+ Spec: appsv1.DeploymentSpec{
+ Template: corev1.PodTemplateSpec{
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {
+ Name: "loki-ingester",
+ },
+ },
+ },
+ },
+ },
+ },
+ want: &appsv1.Deployment{
+ Spec: appsv1.DeploymentSpec{
+ Template: corev1.PodTemplateSpec{
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {
+ Name: "loki-ingester",
+ VolumeMounts: []corev1.VolumeMount{
+ {
+ Name: "test",
+ ReadOnly: false,
+ MountPath: "/etc/storage/secrets",
+ },
+ },
+ Env: []corev1.EnvVar{
+ {
+ Name: EnvAlibabaCloudAccessKeyID,
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "test",
+ },
+ Key: KeyAlibabaCloudAccessKeyID,
+ },
+ },
+ },
+ {
+ Name: EnvAlibabaCloudAccessKeySecret,
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "test",
+ },
+ Key: KeyAlibabaCloudSecretAccessKey,
+ },
+ },
+ },
+ },
+ },
+ },
+ Volumes: []corev1.Volume{
+ {
+ Name: "test",
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: "test",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ desc: "object storage Azure",
+ opts: Options{
+ SecretName: "test",
+ SharedStore: lokiv1.ObjectStorageSecretAzure,
+ },
+ dpl: &appsv1.Deployment{
+ Spec: appsv1.DeploymentSpec{
+ Template: corev1.PodTemplateSpec{
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {
+ Name: "loki-ingester",
+ },
+ },
+ },
+ },
+ },
+ },
+ want: &appsv1.Deployment{
+ Spec: appsv1.DeploymentSpec{
+ Template: corev1.PodTemplateSpec{
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {
+ Name: "loki-ingester",
+ VolumeMounts: []corev1.VolumeMount{
+ {
+ Name: "test",
+ ReadOnly: false,
+ MountPath: "/etc/storage/secrets",
+ },
+ },
+ Env: []corev1.EnvVar{
+ {
+ Name: EnvAzureStorageAccountName,
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "test",
+ },
+ Key: KeyAzureStorageAccountName,
+ },
+ },
+ },
+ {
+ Name: EnvAzureStorageAccountKey,
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "test",
+ },
+ Key: KeyAzureStorageAccountKey,
+ },
+ },
+ },
+ },
+ },
+ },
+ Volumes: []corev1.Volume{
+ {
+ Name: "test",
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: "test",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ desc: "object storage GCS",
+ opts: Options{
+ SecretName: "test",
+ SharedStore: lokiv1.ObjectStorageSecretGCS,
+ },
+ dpl: &appsv1.Deployment{
+ Spec: appsv1.DeploymentSpec{
+ Template: corev1.PodTemplateSpec{
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {
+ Name: "loki-ingester",
+ },
+ },
+ },
+ },
+ },
+ },
+ want: &appsv1.Deployment{
+ Spec: appsv1.DeploymentSpec{
+ Template: corev1.PodTemplateSpec{
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {
+ Name: "loki-ingester",
+ VolumeMounts: []corev1.VolumeMount{
+ {
+ Name: "test",
+ ReadOnly: false,
+ MountPath: "/etc/storage/secrets",
+ },
+ },
+ Env: []corev1.EnvVar{
+ {
+ Name: EnvGoogleApplicationCredentials,
+ Value: "/etc/storage/secrets/key.json",
+ },
+ },
+ },
+ },
+ Volumes: []corev1.Volume{
+ {
+ Name: "test",
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: "test",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ desc: "object storage S3",
+ opts: Options{
+ SecretName: "test",
+ SharedStore: lokiv1.ObjectStorageSecretS3,
+ },
+ dpl: &appsv1.Deployment{
+ Spec: appsv1.DeploymentSpec{
+ Template: corev1.PodTemplateSpec{
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {
+ Name: "loki-ingester",
+ },
+ },
+ },
+ },
+ },
+ },
+ want: &appsv1.Deployment{
+ Spec: appsv1.DeploymentSpec{
+ Template: corev1.PodTemplateSpec{
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {
+ Name: "loki-ingester",
+ VolumeMounts: []corev1.VolumeMount{
+ {
+ Name: "test",
+ ReadOnly: false,
+ MountPath: "/etc/storage/secrets",
+ },
+ },
+ Env: []corev1.EnvVar{
+ {
+ Name: EnvAWSAccessKeyID,
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "test",
+ },
+ Key: KeyAWSAccessKeyID,
+ },
+ },
+ },
+ {
+ Name: EnvAWSAccessKeySecret,
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "test",
+ },
+ Key: KeyAWSAccessKeySecret,
+ },
+ },
+ },
+ },
+ },
+ },
+ Volumes: []corev1.Volume{
+ {
+ Name: "test",
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: "test",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ desc: "object storage S3 with SSE KMS encryption context",
+ opts: Options{
SecretName: "test",
SharedStore: lokiv1.ObjectStorageSecretS3,
+ S3: &S3StorageConfig{
+ SSE: S3SSEConfig{
+ Type: SSEKMSType,
+ KMSEncryptionContext: "test",
+ },
+ },
+ },
+ dpl: &appsv1.Deployment{
+ Spec: appsv1.DeploymentSpec{
+ Template: corev1.PodTemplateSpec{
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {
+ Name: "loki-ingester",
+ },
+ },
+ },
+ },
+ },
+ },
+ want: &appsv1.Deployment{
+ Spec: appsv1.DeploymentSpec{
+ Template: corev1.PodTemplateSpec{
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {
+ Name: "loki-ingester",
+ VolumeMounts: []corev1.VolumeMount{
+ {
+ Name: "test",
+ ReadOnly: false,
+ MountPath: "/etc/storage/secrets",
+ },
+ },
+ Env: []corev1.EnvVar{
+ {
+ Name: EnvAWSAccessKeyID,
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "test",
+ },
+ Key: KeyAWSAccessKeyID,
+ },
+ },
+ },
+ {
+ Name: EnvAWSAccessKeySecret,
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "test",
+ },
+ Key: KeyAWSAccessKeySecret,
+ },
+ },
+ },
+ {
+ Name: EnvAWSSseKmsEncryptionContext,
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "test",
+ },
+ Key: KeyAWSSseKmsEncryptionContext,
+ },
+ },
+ },
+ },
+ },
+ },
+ Volumes: []corev1.Volume{
+ {
+ Name: "test",
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: "test",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ desc: "object storage Swift",
+ opts: Options{
+ SecretName: "test",
+ SharedStore: lokiv1.ObjectStorageSecretSwift,
+ },
+ dpl: &appsv1.Deployment{
+ Spec: appsv1.DeploymentSpec{
+ Template: corev1.PodTemplateSpec{
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {
+ Name: "loki-ingester",
+ },
+ },
+ },
+ },
+ },
+ },
+ want: &appsv1.Deployment{
+ Spec: appsv1.DeploymentSpec{
+ Template: corev1.PodTemplateSpec{
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {
+ Name: "loki-ingester",
+ VolumeMounts: []corev1.VolumeMount{
+ {
+ Name: "test",
+ ReadOnly: false,
+ MountPath: "/etc/storage/secrets",
+ },
+ },
+ Env: []corev1.EnvVar{
+ {
+ Name: EnvSwiftUsername,
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "test",
+ },
+ Key: KeySwiftUsername,
+ },
+ },
+ },
+ {
+ Name: EnvSwiftPassword,
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "test",
+ },
+ Key: KeySwiftPassword,
+ },
+ },
+ },
+ },
+ },
+ },
+ Volumes: []corev1.Volume{
+ {
+ Name: "test",
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: "test",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+
+ for _, tc := range tc {
+ tc := tc
+ t.Run(tc.desc, func(t *testing.T) {
+ t.Parallel()
+ err := ConfigureDeployment(tc.dpl, tc.opts)
+ require.NoError(t, err)
+ require.Equal(t, tc.want, tc.dpl)
+ })
+ }
+}
+
+func TestConfigureStatefulSetForStorageType(t *testing.T) {
+ type tt struct {
+ desc string
+ opts Options
+ sts *appsv1.StatefulSet
+ want *appsv1.StatefulSet
+ }
+
+ tc := []tt{
+ {
+ desc: "object storage AlibabaCloud",
+ opts: Options{
+ SecretName: "test",
+ SharedStore: lokiv1.ObjectStorageSecretAlibabaCloud,
+ },
+ sts: &appsv1.StatefulSet{
+ Spec: appsv1.StatefulSetSpec{
+ Template: corev1.PodTemplateSpec{
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {
+ Name: "loki-ingester",
+ },
+ },
+ },
+ },
+ },
+ },
+ want: &appsv1.StatefulSet{
+ Spec: appsv1.StatefulSetSpec{
+ Template: corev1.PodTemplateSpec{
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {
+ Name: "loki-ingester",
+ VolumeMounts: []corev1.VolumeMount{
+ {
+ Name: "test",
+ ReadOnly: false,
+ MountPath: "/etc/storage/secrets",
+ },
+ },
+ Env: []corev1.EnvVar{
+ {
+ Name: EnvAlibabaCloudAccessKeyID,
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "test",
+ },
+ Key: KeyAlibabaCloudAccessKeyID,
+ },
+ },
+ },
+ {
+ Name: EnvAlibabaCloudAccessKeySecret,
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "test",
+ },
+ Key: KeyAlibabaCloudSecretAccessKey,
+ },
+ },
+ },
+ },
+ },
+ },
+ Volumes: []corev1.Volume{
+ {
+ Name: "test",
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: "test",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
},
- dpl: &appsv1.Deployment{
- Spec: appsv1.DeploymentSpec{
+ },
+ {
+ desc: "object storage Azure",
+ opts: Options{
+ SecretName: "test",
+ SharedStore: lokiv1.ObjectStorageSecretAzure,
+ },
+ sts: &appsv1.StatefulSet{
+ Spec: appsv1.StatefulSetSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
@@ -39,13 +577,54 @@ func TestConfigureDeploymentForStorageType(t *testing.T) {
},
},
},
- want: &appsv1.Deployment{
- Spec: appsv1.DeploymentSpec{
+ want: &appsv1.StatefulSet{
+ Spec: appsv1.StatefulSetSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "loki-ingester",
+ VolumeMounts: []corev1.VolumeMount{
+ {
+ Name: "test",
+ ReadOnly: false,
+ MountPath: "/etc/storage/secrets",
+ },
+ },
+ Env: []corev1.EnvVar{
+ {
+ Name: EnvAzureStorageAccountName,
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "test",
+ },
+ Key: KeyAzureStorageAccountName,
+ },
+ },
+ },
+ {
+ Name: EnvAzureStorageAccountKey,
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "test",
+ },
+ Key: KeyAzureStorageAccountKey,
+ },
+ },
+ },
+ },
+ },
+ },
+ Volumes: []corev1.Volume{
+ {
+ Name: "test",
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: "test",
+ },
+ },
},
},
},
@@ -55,12 +634,12 @@ func TestConfigureDeploymentForStorageType(t *testing.T) {
},
{
desc: "object storage GCS",
- opts: storage.Options{
+ opts: Options{
SecretName: "test",
SharedStore: lokiv1.ObjectStorageSecretGCS,
},
- dpl: &appsv1.Deployment{
- Spec: appsv1.DeploymentSpec{
+ sts: &appsv1.StatefulSet{
+ Spec: appsv1.StatefulSetSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
@@ -72,8 +651,8 @@ func TestConfigureDeploymentForStorageType(t *testing.T) {
},
},
},
- want: &appsv1.Deployment{
- Spec: appsv1.DeploymentSpec{
+ want: &appsv1.StatefulSet{
+ Spec: appsv1.StatefulSetSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
@@ -88,7 +667,7 @@ func TestConfigureDeploymentForStorageType(t *testing.T) {
},
Env: []corev1.EnvVar{
{
- Name: storage.EnvGoogleApplicationCredentials,
+ Name: EnvGoogleApplicationCredentials,
Value: "/etc/storage/secrets/key.json",
},
},
@@ -109,31 +688,9 @@ func TestConfigureDeploymentForStorageType(t *testing.T) {
},
},
},
- }
-
- for _, tc := range tc {
- tc := tc
- t.Run(tc.desc, func(t *testing.T) {
- t.Parallel()
- err := storage.ConfigureDeployment(tc.dpl, tc.opts)
- require.NoError(t, err)
- require.Equal(t, tc.want, tc.dpl)
- })
- }
-}
-
-func TestConfigureStatefulSetForStorageType(t *testing.T) {
- type tt struct {
- desc string
- opts storage.Options
- sts *appsv1.StatefulSet
- want *appsv1.StatefulSet
- }
-
- tc := []tt{
{
- desc: "object storage other than GCS",
- opts: storage.Options{
+ desc: "object storage S3",
+ opts: Options{
SecretName: "test",
SharedStore: lokiv1.ObjectStorageSecretS3,
},
@@ -157,6 +714,47 @@ func TestConfigureStatefulSetForStorageType(t *testing.T) {
Containers: []corev1.Container{
{
Name: "loki-ingester",
+ VolumeMounts: []corev1.VolumeMount{
+ {
+ Name: "test",
+ ReadOnly: false,
+ MountPath: "/etc/storage/secrets",
+ },
+ },
+ Env: []corev1.EnvVar{
+ {
+ Name: EnvAWSAccessKeyID,
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "test",
+ },
+ Key: KeyAWSAccessKeyID,
+ },
+ },
+ },
+ {
+ Name: EnvAWSAccessKeySecret,
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "test",
+ },
+ Key: KeyAWSAccessKeySecret,
+ },
+ },
+ },
+ },
+ },
+ },
+ Volumes: []corev1.Volume{
+ {
+ Name: "test",
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: "test",
+ },
+ },
},
},
},
@@ -165,10 +763,16 @@ func TestConfigureStatefulSetForStorageType(t *testing.T) {
},
},
{
- desc: "object storage GCS",
- opts: storage.Options{
+ desc: "object storage S3 with SSE KMS encryption Context",
+ opts: Options{
SecretName: "test",
- SharedStore: lokiv1.ObjectStorageSecretGCS,
+ SharedStore: lokiv1.ObjectStorageSecretS3,
+ S3: &S3StorageConfig{
+ SSE: S3SSEConfig{
+ Type: SSEKMSType,
+ KMSEncryptionContext: "test",
+ },
+ },
},
sts: &appsv1.StatefulSet{
Spec: appsv1.StatefulSetSpec{
@@ -199,8 +803,111 @@ func TestConfigureStatefulSetForStorageType(t *testing.T) {
},
Env: []corev1.EnvVar{
{
- Name: storage.EnvGoogleApplicationCredentials,
- Value: "/etc/storage/secrets/key.json",
+ Name: EnvAWSAccessKeyID,
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "test",
+ },
+ Key: KeyAWSAccessKeyID,
+ },
+ },
+ },
+ {
+ Name: EnvAWSAccessKeySecret,
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "test",
+ },
+ Key: KeyAWSAccessKeySecret,
+ },
+ },
+ },
+ {
+ Name: EnvAWSSseKmsEncryptionContext,
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "test",
+ },
+ Key: KeyAWSSseKmsEncryptionContext,
+ },
+ },
+ },
+ },
+ },
+ },
+ Volumes: []corev1.Volume{
+ {
+ Name: "test",
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: "test",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ desc: "object storage Swift",
+ opts: Options{
+ SecretName: "test",
+ SharedStore: lokiv1.ObjectStorageSecretSwift,
+ },
+ sts: &appsv1.StatefulSet{
+ Spec: appsv1.StatefulSetSpec{
+ Template: corev1.PodTemplateSpec{
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {
+ Name: "loki-ingester",
+ },
+ },
+ },
+ },
+ },
+ },
+ want: &appsv1.StatefulSet{
+ Spec: appsv1.StatefulSetSpec{
+ Template: corev1.PodTemplateSpec{
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {
+ Name: "loki-ingester",
+ VolumeMounts: []corev1.VolumeMount{
+ {
+ Name: "test",
+ ReadOnly: false,
+ MountPath: "/etc/storage/secrets",
+ },
+ },
+ Env: []corev1.EnvVar{
+ {
+ Name: EnvSwiftUsername,
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "test",
+ },
+ Key: KeySwiftUsername,
+ },
+ },
+ },
+ {
+ Name: EnvSwiftPassword,
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "test",
+ },
+ Key: KeySwiftPassword,
+ },
+ },
},
},
},
@@ -226,7 +933,7 @@ func TestConfigureStatefulSetForStorageType(t *testing.T) {
tc := tc
t.Run(tc.desc, func(t *testing.T) {
t.Parallel()
- err := storage.ConfigureStatefulSet(tc.sts, tc.opts)
+ err := ConfigureStatefulSet(tc.sts, tc.opts)
require.NoError(t, err)
require.Equal(t, tc.want, tc.sts)
})
@@ -236,7 +943,7 @@ func TestConfigureStatefulSetForStorageType(t *testing.T) {
func TestConfigureDeploymentForStorageCA(t *testing.T) {
type tt struct {
desc string
- opts storage.Options
+ opts Options
dpl *appsv1.Deployment
want *appsv1.Deployment
}
@@ -244,9 +951,9 @@ func TestConfigureDeploymentForStorageCA(t *testing.T) {
tc := []tt{
{
desc: "object storage other than S3",
- opts: storage.Options{
+ opts: Options{
SecretName: "test",
- SharedStore: lokiv1.ObjectStorageSecretAzure,
+ SharedStore: lokiv1.ObjectStorageSecretSwift,
},
dpl: &appsv1.Deployment{
Spec: appsv1.DeploymentSpec{
@@ -268,6 +975,47 @@ func TestConfigureDeploymentForStorageCA(t *testing.T) {
Containers: []corev1.Container{
{
Name: "loki-querier",
+ VolumeMounts: []corev1.VolumeMount{
+ {
+ Name: "test",
+ ReadOnly: false,
+ MountPath: "/etc/storage/secrets",
+ },
+ },
+ Env: []corev1.EnvVar{
+ {
+ Name: EnvSwiftUsername,
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "test",
+ },
+ Key: KeySwiftUsername,
+ },
+ },
+ },
+ {
+ Name: EnvSwiftPassword,
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "test",
+ },
+ Key: KeySwiftPassword,
+ },
+ },
+ },
+ },
+ },
+ },
+ Volumes: []corev1.Volume{
+ {
+ Name: "test",
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: "test",
+ },
+ },
},
},
},
@@ -277,10 +1025,10 @@ func TestConfigureDeploymentForStorageCA(t *testing.T) {
},
{
desc: "object storage S3",
- opts: storage.Options{
+ opts: Options{
SecretName: "test",
SharedStore: lokiv1.ObjectStorageSecretS3,
- TLS: &storage.TLSConfig{
+ TLS: &TLSConfig{
CA: "test",
Key: "service-ca.crt",
},
@@ -306,6 +1054,11 @@ func TestConfigureDeploymentForStorageCA(t *testing.T) {
{
Name: "loki-querier",
VolumeMounts: []corev1.VolumeMount{
+ {
+ Name: "test",
+ ReadOnly: false,
+ MountPath: "/etc/storage/secrets",
+ },
{
Name: "storage-tls",
ReadOnly: false,
@@ -315,9 +1068,41 @@ func TestConfigureDeploymentForStorageCA(t *testing.T) {
Args: []string{
"-s3.http.ca-file=/etc/storage/ca/service-ca.crt",
},
+ Env: []corev1.EnvVar{
+ {
+ Name: EnvAWSAccessKeyID,
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "test",
+ },
+ Key: KeyAWSAccessKeyID,
+ },
+ },
+ },
+ {
+ Name: EnvAWSAccessKeySecret,
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "test",
+ },
+ Key: KeyAWSAccessKeySecret,
+ },
+ },
+ },
+ },
},
},
Volumes: []corev1.Volume{
+ {
+ Name: "test",
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: "test",
+ },
+ },
+ },
{
Name: "storage-tls",
VolumeSource: corev1.VolumeSource{
@@ -340,7 +1125,7 @@ func TestConfigureDeploymentForStorageCA(t *testing.T) {
tc := tc
t.Run(tc.desc, func(t *testing.T) {
t.Parallel()
- err := storage.ConfigureDeployment(tc.dpl, tc.opts)
+ err := ConfigureDeployment(tc.dpl, tc.opts)
require.NoError(t, err)
require.Equal(t, tc.want, tc.dpl)
})
@@ -350,7 +1135,7 @@ func TestConfigureDeploymentForStorageCA(t *testing.T) {
func TestConfigureStatefulSetForStorageCA(t *testing.T) {
type tt struct {
desc string
- opts storage.Options
+ opts Options
sts *appsv1.StatefulSet
want *appsv1.StatefulSet
}
@@ -358,10 +1143,10 @@ func TestConfigureStatefulSetForStorageCA(t *testing.T) {
tc := []tt{
{
desc: "object storage other than S3",
- opts: storage.Options{
+ opts: Options{
SecretName: "test",
- SharedStore: lokiv1.ObjectStorageSecretAzure,
- TLS: &storage.TLSConfig{
+ SharedStore: lokiv1.ObjectStorageSecretSwift,
+ TLS: &TLSConfig{
CA: "test",
},
},
@@ -385,6 +1170,47 @@ func TestConfigureStatefulSetForStorageCA(t *testing.T) {
Containers: []corev1.Container{
{
Name: "loki-ingester",
+ VolumeMounts: []corev1.VolumeMount{
+ {
+ Name: "test",
+ ReadOnly: false,
+ MountPath: "/etc/storage/secrets",
+ },
+ },
+ Env: []corev1.EnvVar{
+ {
+ Name: EnvSwiftUsername,
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "test",
+ },
+ Key: KeySwiftUsername,
+ },
+ },
+ },
+ {
+ Name: EnvSwiftPassword,
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "test",
+ },
+ Key: KeySwiftPassword,
+ },
+ },
+ },
+ },
+ },
+ },
+ Volumes: []corev1.Volume{
+ {
+ Name: "test",
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: "test",
+ },
+ },
},
},
},
@@ -394,10 +1220,10 @@ func TestConfigureStatefulSetForStorageCA(t *testing.T) {
},
{
desc: "object storage S3",
- opts: storage.Options{
+ opts: Options{
SecretName: "test",
SharedStore: lokiv1.ObjectStorageSecretS3,
- TLS: &storage.TLSConfig{
+ TLS: &TLSConfig{
CA: "test",
Key: "service-ca.crt",
},
@@ -423,6 +1249,11 @@ func TestConfigureStatefulSetForStorageCA(t *testing.T) {
{
Name: "loki-ingester",
VolumeMounts: []corev1.VolumeMount{
+ {
+ Name: "test",
+ ReadOnly: false,
+ MountPath: "/etc/storage/secrets",
+ },
{
Name: "storage-tls",
ReadOnly: false,
@@ -432,9 +1263,41 @@ func TestConfigureStatefulSetForStorageCA(t *testing.T) {
Args: []string{
"-s3.http.ca-file=/etc/storage/ca/service-ca.crt",
},
+ Env: []corev1.EnvVar{
+ {
+ Name: EnvAWSAccessKeyID,
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "test",
+ },
+ Key: KeyAWSAccessKeyID,
+ },
+ },
+ },
+ {
+ Name: EnvAWSAccessKeySecret,
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "test",
+ },
+ Key: KeyAWSAccessKeySecret,
+ },
+ },
+ },
+ },
},
},
Volumes: []corev1.Volume{
+ {
+ Name: "test",
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: "test",
+ },
+ },
+ },
{
Name: "storage-tls",
VolumeSource: corev1.VolumeSource{
@@ -457,7 +1320,7 @@ func TestConfigureStatefulSetForStorageCA(t *testing.T) {
tc := tc
t.Run(tc.desc, func(t *testing.T) {
t.Parallel()
- err := storage.ConfigureStatefulSet(tc.sts, tc.opts)
+ err := ConfigureStatefulSet(tc.sts, tc.opts)
require.NoError(t, err)
require.Equal(t, tc.want, tc.sts)
})
diff --git a/operator/internal/manifests/storage/options.go b/operator/internal/manifests/storage/options.go
index b5d47ce593729..8767f576848a9 100644
--- a/operator/internal/manifests/storage/options.go
+++ b/operator/internal/manifests/storage/options.go
@@ -17,6 +17,7 @@ type Options struct {
AlibabaCloud *AlibabaCloudStorageConfig
SecretName string
+ SecretSHA1 string
TLS *TLSConfig
}
@@ -24,8 +25,6 @@ type Options struct {
type AzureStorageConfig struct {
Env string
Container string
- AccountName string
- AccountKey string
EndpointSuffix string
}
@@ -36,12 +35,10 @@ type GCSStorageConfig struct {
// S3StorageConfig for S3 storage config
type S3StorageConfig struct {
- Endpoint string
- Region string
- Buckets string
- AccessKeyID string
- AccessKeySecret string
- SSE S3SSEConfig
+ Endpoint string
+ Region string
+ Buckets string
+ SSE S3SSEConfig
}
type S3SSEType string
@@ -60,11 +57,9 @@ type S3SSEConfig struct {
// SwiftStorageConfig for Swift storage config
type SwiftStorageConfig struct {
AuthURL string
- Username string
UserDomainName string
UserDomainID string
UserID string
- Password string
DomainID string
DomainName string
ProjectID string
@@ -77,10 +72,8 @@ type SwiftStorageConfig struct {
// AlibabaCloudStorageConfig for AlibabaCloud storage config
type AlibabaCloudStorageConfig struct {
- Endpoint string
- Bucket string
- AccessKeyID string
- SecretAccessKey string
+ Endpoint string
+ Bucket string
}
// TLSConfig for object storage endpoints. Currently supported only by:
diff --git a/operator/internal/manifests/storage/schema_test.go b/operator/internal/manifests/storage/schema_test.go
index 3663a5c0ddf2f..c3ca914658f9e 100644
--- a/operator/internal/manifests/storage/schema_test.go
+++ b/operator/internal/manifests/storage/schema_test.go
@@ -4,9 +4,9 @@ import (
"testing"
"time"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
-
"github.com/stretchr/testify/require"
+
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
)
func TestBuildSchemaConfig_NoSchemas(t *testing.T) {
diff --git a/operator/internal/manifests/storage/var.go b/operator/internal/manifests/storage/var.go
new file mode 100644
index 0000000000000..aae6e1ea0e583
--- /dev/null
+++ b/operator/internal/manifests/storage/var.go
@@ -0,0 +1,99 @@
+package storage
+
+const (
+ // EnvAlibabaCloudAccessKeyID is the environment variable to specify the AlibabaCloud client id to access S3.
+ EnvAlibabaCloudAccessKeyID = "ALIBABA_CLOUD_ACCESS_KEY_ID"
+ // EnvAlibabaCloudAccessKeySecret is the environment variable to specify the AlibabaCloud client secret to access S3.
+ EnvAlibabaCloudAccessKeySecret = "ALIBABA_CLOUD_ACCESS_KEY_SECRET"
+ // EnvAWSAccessKeyID is the environment variable to specify the AWS client id to access S3.
+ EnvAWSAccessKeyID = "AWS_ACCESS_KEY_ID"
+ // EnvAWSAccessKeySecret is the environment variable to specify the AWS client secret to access S3.
+ EnvAWSAccessKeySecret = "AWS_ACCESS_KEY_SECRET"
+ // EnvAWSSseKmsEncryptionContext is the environment variable to specity the AWS KMS encryption context when using type SSE-KMS.
+ EnvAWSSseKmsEncryptionContext = "AWS_SSE_KMS_ENCRYPTION_CONTEXT"
+ // EnvAzureStorageAccountName is the environment variable to specify the Azure storage account name to access the container.
+ EnvAzureStorageAccountName = "AZURE_STORAGE_ACCOUNT_NAME"
+ // EnvAzureStorageAccountKey is the environment variable to specify the Azure storage account key to access the container.
+ EnvAzureStorageAccountKey = "AZURE_STORAGE_ACCOUNT_KEY"
+ // EnvGoogleApplicationCredentials is the environment variable to specify path to key.json
+ EnvGoogleApplicationCredentials = "GOOGLE_APPLICATION_CREDENTIALS"
+ // EnvSwiftPassword is the environment variable to specify the OpenStack Swift password.
+ EnvSwiftPassword = "SWIFT_PASSWORD"
+ // EnvSwiftUsername is the environment variable to specify the OpenStack Swift username.
+ EnvSwiftUsername = "SWIFT_USERNAME"
+
+ // KeyAlibabaCloudAccessKeyID is the secret data key for the AlibabaCloud client id to access S3.
+ KeyAlibabaCloudAccessKeyID = "access_key_id"
+ // KeyAlibabaCloudSecretAccessKey is the secret data key for the AlibabaCloud client secret to access S3.
+ KeyAlibabaCloudSecretAccessKey = "secret_access_key"
+ // KeyAlibabaCloudBucket is the secret data key for the S3 bucket name.
+ KeyAlibabaCloudBucket = "bucket"
+ // KeyAlibabaCloudEndpoint is the secret data key for the S3 endpoint URL.
+ KeyAlibabaCloudEndpoint = "endpoint"
+
+ // KeyAWSAccessKeyID is the secret data key for the AWS client id to access S3.
+ KeyAWSAccessKeyID = "access_key_id"
+ // KeyAWSAccessKeySecret is the secret data key for the AWS client secret to access S3.
+ KeyAWSAccessKeySecret = "access_key_secret"
+ // KeyAWSBucketNames is the secret data key for the AWS S3 bucket names.
+ KeyAWSBucketNames = "bucketnames"
+ // KeyAWSEndpoint is the secret data key for the AWS endpoint URL.
+ KeyAWSEndpoint = "endpoint"
+ // KeyAWSRegion is the secret data key for the AWS region.
+ KeyAWSRegion = "region"
+ // KeyAWSSSEType is the secret data key for the AWS server-side encryption type.
+ KeyAWSSSEType = "sse_type"
+ // KeyAWSSseKmsEncryptionContext is the secret data key for the AWS SSE KMS encryption context.
+ KeyAWSSseKmsEncryptionContext = "sse_kms_encryption_context"
+ // KeyAWSSseKmsKeyID is the secret data key for the AWS SSE KMS key id.
+ KeyAWSSseKmsKeyID = "sse_kms_key_id"
+
+ // KeyAzureStorageAccountKey is the secret data key for the Azure storage account key.
+ KeyAzureStorageAccountKey = "account_key"
+ // KeyAzureStorageAccountName is the secret data key for the Azure storage account name.
+ KeyAzureStorageAccountName = "account_name"
+ // KeyAzureStorageContainerName is the secret data key for the Azure storage container name.
+ KeyAzureStorageContainerName = "container"
+ // KeyAzureStorageEndpointSuffix is the secret data key for the Azure storage endpoint URL suffix.
+ KeyAzureStorageEndpointSuffix = "endpoint_suffix"
+ // KeyAzureEnvironmentName is the secret data key for the Azure cloud environment name.
+ KeyAzureEnvironmentName = "environment"
+
+ // KeyGCPStorageBucketName is the secret data key for the GCS bucket name.
+ KeyGCPStorageBucketName = "bucketname"
+ // KeyGCPServiceAccountKeyFilename is the service account key filename containing the Google authentication credentials.
+ KeyGCPServiceAccountKeyFilename = "key.json"
+
+ // KeySwiftAuthURL is the secret data key for the OpenStack Swift authentication URL.
+ KeySwiftAuthURL = "auth_url"
+ // KeySwiftContainerName is the secret data key for the OpenStack Swift container name.
+ KeySwiftContainerName = "container_name"
+ // KeySwiftDomainID is the secret data key for the OpenStack domain ID.
+ KeySwiftDomainID = "domain_id"
+ // KeySwiftDomainName is the secret data key for the OpenStack domain name.
+ KeySwiftDomainName = "domain_name"
+ // KeySwiftPassword is the secret data key for the OpenStack Swift password.
+ KeySwiftPassword = "password"
+ // KeySwiftProjectDomainId is the secret data key for the OpenStack project's domain id.
+ KeySwiftProjectDomainId = "project_domain_id"
+ // KeySwiftProjectDomainName is the secret data key for the OpenStack project's domain name.
+ KeySwiftProjectDomainName = "project_domain_name"
+ // KeySwiftProjectID is the secret data key for the OpenStack project id.
+ KeySwiftProjectID = "project_id"
+ // KeySwiftProjectName is the secret data key for the OpenStack project name.
+ KeySwiftProjectName = "project_name"
+ // KeySwiftRegion is the secret data key for the OpenStack Swift region.
+ KeySwiftRegion = "region"
+ // KeySwiftUserDomainID is the secret data key for the OpenStack Swift user domain id.
+ KeySwiftUserDomainID = "user_domain_id"
+ // KeySwiftUserDomainID is the secret data key for the OpenStack Swift user domain name.
+ KeySwiftUserDomainName = "user_domain_name"
+ // KeySwiftUserID is the secret data key for the OpenStack Swift user id.
+ KeySwiftUserID = "user_id"
+ // KeySwiftPassword is the secret data key for the OpenStack Swift password.
+ KeySwiftUsername = "username"
+
+ secretDirectory = "/etc/storage/secrets"
+ storageTLSVolume = "storage-tls"
+ caDirectory = "/etc/storage/ca"
+)
diff --git a/operator/internal/manifests/var.go b/operator/internal/manifests/var.go
index 3f4644be4f322..6468e4426bf0e 100644
--- a/operator/internal/manifests/var.go
+++ b/operator/internal/manifests/var.go
@@ -59,7 +59,7 @@ const (
EnvRelatedImageGateway = "RELATED_IMAGE_GATEWAY"
// DefaultContainerImage declares the default fallback for loki image.
- DefaultContainerImage = "docker.io/grafana/loki:2.9.2"
+ DefaultContainerImage = "docker.io/grafana/loki:2.9.3"
// DefaultLokiStackGatewayImage declares the default image for lokiStack-gateway.
DefaultLokiStackGatewayImage = "quay.io/observatorium/api:latest"
@@ -76,6 +76,8 @@ const (
AnnotationCertRotationRequiredAt string = "loki.grafana.com/certRotationRequiredAt"
// AnnotationLokiConfigHash stores the last SHA1 hash of the loki configuration
AnnotationLokiConfigHash string = "loki.grafana.com/config-hash"
+ // AnnotationLokiObjectStoreHash stores the last SHA1 hash of the loki object storage credetials.
+ AnnotationLokiObjectStoreHash string = "loki.grafana.com/object-store-hash"
// LabelCompactorComponent is the label value for the compactor component
LabelCompactorComponent string = "compactor"
@@ -130,11 +132,18 @@ var (
volumeFileSystemMode = corev1.PersistentVolumeFilesystem
)
-func commonAnnotations(configHash, rotationRequiredAt string) map[string]string {
- return map[string]string{
- AnnotationLokiConfigHash: configHash,
+func commonAnnotations(configHash, objStoreHash, rotationRequiredAt string) map[string]string {
+ a := map[string]string{
+ AnnotationLokiConfigHash: configHash,
+
AnnotationCertRotationRequiredAt: rotationRequiredAt,
}
+
+ if objStoreHash != "" {
+ a[AnnotationLokiObjectStoreHash] = objStoreHash
+ }
+
+ return a
}
func commonLabels(stackName string) map[string]string {
diff --git a/operator/internal/metrics/metrics.go b/operator/internal/metrics/metrics.go
index fcf82ddca5d69..3c994f13c61ef 100644
--- a/operator/internal/metrics/metrics.go
+++ b/operator/internal/metrics/metrics.go
@@ -142,7 +142,7 @@ func boolValue(value bool) float64 {
return 0
}
-func streamRate(tenantLimits map[string]lokiv1.LimitsTemplateSpec, ingesters int32) float64 {
+func streamRate(tenantLimits map[string]lokiv1.PerTenantLimitsTemplateSpec, ingesters int32) float64 {
var tenants, tenantStreamLimit int32 = 0, 0
for _, tenant := range tenantLimits {
diff --git a/operator/internal/status/components_test.go b/operator/internal/status/components_test.go
index 3e0987fbc8717..698b6a536d421 100644
--- a/operator/internal/status/components_test.go
+++ b/operator/internal/status/components_test.go
@@ -5,13 +5,14 @@ import (
"fmt"
"testing"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
- "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
- "github.com/grafana/loki/operator/internal/manifests"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
+
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
+ "github.com/grafana/loki/operator/internal/manifests"
)
func createPodList(baseName string, phases ...corev1.PodPhase) *corev1.PodList {
diff --git a/operator/internal/status/conditions.go b/operator/internal/status/conditions.go
new file mode 100644
index 0000000000000..637a50e6f89f4
--- /dev/null
+++ b/operator/internal/status/conditions.go
@@ -0,0 +1,37 @@
+package status
+
+import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+func mergeConditions(old, active []metav1.Condition, now metav1.Time) []metav1.Condition {
+ merged := make([]metav1.Condition, 0, len(old)+len(active))
+ for len(old) > 0 {
+ c := old[0]
+ found := -1
+ for i, ac := range active {
+ if c.Type == ac.Type && c.Reason == ac.Reason {
+ found = i
+ break
+ }
+ }
+
+ if found != -1 {
+ c = active[found]
+ active = append(active[:found], active[found+1:]...)
+
+ c.Status = metav1.ConditionTrue
+ } else {
+ c.Status = metav1.ConditionFalse
+ }
+
+ c.LastTransitionTime = now
+ merged = append(merged, c)
+ old = old[1:]
+ }
+
+ for _, c := range active {
+ c.Status = metav1.ConditionTrue
+ c.LastTransitionTime = now
+ merged = append(merged, c)
+ }
+ return merged
+}
diff --git a/operator/internal/status/conditions_test.go b/operator/internal/status/conditions_test.go
new file mode 100644
index 0000000000000..3d85942df8753
--- /dev/null
+++ b/operator/internal/status/conditions_test.go
@@ -0,0 +1,141 @@
+package status
+
+import (
+ "testing"
+ "time"
+
+ "github.com/google/go-cmp/cmp"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+)
+
+func TestMergeConditions(t *testing.T) {
+ now := metav1.NewTime(time.Unix(0, 0))
+ tt := []struct {
+ desc string
+ old []metav1.Condition
+ active []metav1.Condition
+ wantMerged []metav1.Condition
+ }{
+ {
+ desc: "set status and time",
+ old: []metav1.Condition{},
+ active: []metav1.Condition{
+ conditionReady,
+ },
+ wantMerged: []metav1.Condition{
+ {
+ Type: conditionReady.Type,
+ Status: metav1.ConditionTrue,
+ LastTransitionTime: now,
+ Reason: conditionReady.Reason,
+ Message: conditionReady.Message,
+ },
+ },
+ },
+ {
+ desc: "reset old condition",
+ old: []metav1.Condition{
+ conditionPending,
+ },
+ active: []metav1.Condition{
+ conditionReady,
+ },
+ wantMerged: []metav1.Condition{
+ {
+ Type: conditionPending.Type,
+ Status: metav1.ConditionFalse,
+ LastTransitionTime: now,
+ Reason: conditionPending.Reason,
+ Message: conditionPending.Message,
+ },
+ {
+ Type: conditionReady.Type,
+ Status: metav1.ConditionTrue,
+ LastTransitionTime: now,
+ Reason: conditionReady.Reason,
+ Message: conditionReady.Message,
+ },
+ },
+ },
+ {
+ desc: "keep active conditions",
+ old: []metav1.Condition{
+ {
+ Type: conditionReady.Type,
+ Status: metav1.ConditionTrue,
+ LastTransitionTime: now,
+ Reason: conditionReady.Reason,
+ Message: conditionReady.Message,
+ },
+ {
+ Type: conditionPending.Type,
+ Status: metav1.ConditionFalse,
+ LastTransitionTime: now,
+ Reason: conditionPending.Reason,
+ Message: conditionPending.Message,
+ },
+ },
+ active: []metav1.Condition{
+ conditionReady,
+ {
+ Type: string(lokiv1.ConditionWarning),
+ Reason: "test-warning",
+ Message: "test-warning-message",
+ },
+ },
+ wantMerged: []metav1.Condition{
+ {
+ Type: conditionReady.Type,
+ Status: metav1.ConditionTrue,
+ LastTransitionTime: now,
+ Reason: conditionReady.Reason,
+ Message: conditionReady.Message,
+ },
+ {
+ Type: conditionPending.Type,
+ Status: metav1.ConditionFalse,
+ LastTransitionTime: now,
+ Reason: conditionPending.Reason,
+ Message: conditionPending.Message,
+ },
+ {
+ Type: string(lokiv1.ConditionWarning),
+ Status: metav1.ConditionTrue,
+ LastTransitionTime: now,
+ Reason: "test-warning",
+ Message: "test-warning-message",
+ },
+ },
+ },
+ }
+
+ for _, tc := range tt {
+ tc := tc
+
+ t.Run(tc.desc, func(t *testing.T) {
+ t.Parallel()
+
+ beforeLenOld := len(tc.old)
+ beforeLenActive := len(tc.active)
+
+ merged := mergeConditions(tc.old, tc.active, now)
+
+ afterLenOld := len(tc.old)
+ afterLenActive := len(tc.active)
+
+ if diff := cmp.Diff(merged, tc.wantMerged); diff != "" {
+ t.Errorf("Merged conditions differ: -got+want\n%s", diff)
+ }
+
+ if beforeLenOld != afterLenOld {
+ t.Errorf("old length differs: got %v, want %v", afterLenOld, beforeLenOld)
+ }
+
+ if beforeLenActive != afterLenActive {
+ t.Errorf("active length differs: got %v, want %v", afterLenActive, beforeLenActive)
+ }
+ })
+ }
+}
diff --git a/operator/internal/status/lokistack.go b/operator/internal/status/lokistack.go
index 1212e5c2f7e92..467fef398ea77 100644
--- a/operator/internal/status/lokistack.go
+++ b/operator/internal/status/lokistack.go
@@ -4,12 +4,8 @@ import (
"context"
"fmt"
- "github.com/ViaQ/logerr/v2/kverrors"
corev1 "k8s.io/api/core/v1"
- apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/client-go/util/retry"
- ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
@@ -17,11 +13,12 @@ import (
)
const (
- messageReady = "All components ready"
- messageFailed = "Some LokiStack components failed"
- messagePending = "Some LokiStack components pending on dependencies"
- messageDegradedMissingNodes = "Cluster contains no nodes matching the labels used for zone-awareness"
- messageDegradedEmptyNodeLabel = "No value for the labels used for zone-awareness"
+ messageReady = "All components ready"
+ messageFailed = "Some LokiStack components failed"
+ messagePending = "Some LokiStack components pending on dependencies"
+ messageDegradedMissingNodes = "Cluster contains no nodes matching the labels used for zone-awareness"
+ messageDegradedEmptyNodeLabel = "No value for the labels used for zone-awareness"
+ messageWarningNeedsSchemaVersionUpdate = "The schema configuration does not contain the most recent schema version and needs an update"
)
var (
@@ -63,18 +60,27 @@ func (e *DegradedError) Error() string {
return fmt.Sprintf("cluster degraded: %s", e.Message)
}
-// SetDegradedCondition appends the condition Degraded to the lokistack status conditions.
-func SetDegradedCondition(ctx context.Context, k k8s.Client, req ctrl.Request, msg string, reason lokiv1.LokiStackConditionReason) error {
- degraded := metav1.Condition{
- Type: string(lokiv1.ConditionDegraded),
- Message: msg,
- Reason: string(reason),
+func generateConditions(ctx context.Context, cs *lokiv1.LokiStackComponentStatus, k k8s.Client, stack *lokiv1.LokiStack, degradedErr *DegradedError) ([]metav1.Condition, error) {
+ conditions := generateWarnings(stack.Status.Storage.Schemas)
+
+ mainCondition, err := generateCondition(ctx, cs, k, stack, degradedErr)
+ if err != nil {
+ return nil, err
}
- return updateCondition(ctx, k, req, degraded)
+ conditions = append(conditions, mainCondition)
+ return conditions, nil
}
-func generateCondition(ctx context.Context, cs *lokiv1.LokiStackComponentStatus, k k8s.Client, req ctrl.Request, stack *lokiv1.LokiStack) (metav1.Condition, error) {
+func generateCondition(ctx context.Context, cs *lokiv1.LokiStackComponentStatus, k k8s.Client, stack *lokiv1.LokiStack, degradedErr *DegradedError) (metav1.Condition, error) {
+ if degradedErr != nil {
+ return metav1.Condition{
+ Type: string(lokiv1.ConditionDegraded),
+ Message: degradedErr.Message,
+ Reason: string(degradedErr.Reason),
+ }, nil
+ }
+
// Check for failed pods first
failed := len(cs.Compactor[corev1.PodFailed]) +
len(cs.Distributor[corev1.PodFailed]) +
@@ -149,53 +155,16 @@ func checkForZoneawareNodes(ctx context.Context, k client.Client, zones []lokiv1
return true, true, nil
}
-func updateCondition(ctx context.Context, k k8s.Client, req ctrl.Request, condition metav1.Condition) error {
- var stack lokiv1.LokiStack
- if err := k.Get(ctx, req.NamespacedName, &stack); err != nil {
- if apierrors.IsNotFound(err) {
- return nil
- }
- return kverrors.Wrap(err, "failed to lookup LokiStack", "name", req.NamespacedName)
- }
+func generateWarnings(schemas []lokiv1.ObjectStorageSchema) []metav1.Condition {
+ warnings := make([]metav1.Condition, 0, 2)
- for _, c := range stack.Status.Conditions {
- if c.Type == condition.Type &&
- c.Reason == condition.Reason &&
- c.Message == condition.Message &&
- c.Status == metav1.ConditionTrue {
- // resource already has desired condition
- return nil
- }
+ if len(schemas) > 0 && schemas[len(schemas)-1].Version != lokiv1.ObjectStorageSchemaV13 {
+ warnings = append(warnings, metav1.Condition{
+ Type: string(lokiv1.ConditionWarning),
+ Reason: string(lokiv1.ReasonStorageNeedsSchemaUpdate),
+ Message: messageWarningNeedsSchemaVersionUpdate,
+ })
}
- condition.Status = metav1.ConditionTrue
-
- return retry.RetryOnConflict(retry.DefaultRetry, func() error {
- if err := k.Get(ctx, req.NamespacedName, &stack); err != nil {
- return err
- }
-
- now := metav1.Now()
- condition.LastTransitionTime = now
-
- index := -1
- for i := range stack.Status.Conditions {
- // Reset all other conditions first
- stack.Status.Conditions[i].Status = metav1.ConditionFalse
- stack.Status.Conditions[i].LastTransitionTime = now
-
- // Locate existing pending condition if any
- if stack.Status.Conditions[i].Type == condition.Type {
- index = i
- }
- }
-
- if index == -1 {
- stack.Status.Conditions = append(stack.Status.Conditions, condition)
- } else {
- stack.Status.Conditions[index] = condition
- }
-
- return k.Status().Update(ctx, &stack)
- })
+ return warnings
}
diff --git a/operator/internal/status/lokistack_test.go b/operator/internal/status/lokistack_test.go
index 8bdc9fadc7cf6..bc35ed2a91193 100644
--- a/operator/internal/status/lokistack_test.go
+++ b/operator/internal/status/lokistack_test.go
@@ -5,16 +5,16 @@ import (
"errors"
"testing"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
- "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
- ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
+
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
)
func setupFakesNoError(t *testing.T, stack *lokiv1.LokiStack) (*k8sfakes.FakeClient, *k8sfakes.FakeStatusWriter) {
@@ -39,128 +39,8 @@ func setupFakesNoError(t *testing.T, stack *lokiv1.LokiStack) (*k8sfakes.FakeCli
return k, sw
}
-func TestSetDegradedCondition_WhenGetLokiStackReturnsNotFound_DoNothing(t *testing.T) {
- msg := "tell me nothing"
- reason := lokiv1.ReasonMissingObjectStorageSecret
-
- r := ctrl.Request{
- NamespacedName: types.NamespacedName{
- Name: "my-stack",
- Namespace: "some-ns",
- },
- }
-
- k := &k8sfakes.FakeClient{}
- k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error {
- return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found")
- }
-
- err := SetDegradedCondition(context.Background(), k, r, msg, reason)
- require.NoError(t, err)
-}
-
-func TestSetDegradedCondition_WhenExisting_DoNothing(t *testing.T) {
- msg := "tell me nothing"
- reason := lokiv1.ReasonMissingObjectStorageSecret
- s := lokiv1.LokiStack{
- ObjectMeta: metav1.ObjectMeta{
- Name: "my-stack",
- Namespace: "some-ns",
- },
- Status: lokiv1.LokiStackStatus{
- Conditions: []metav1.Condition{
- {
- Type: string(lokiv1.ConditionDegraded),
- Reason: string(reason),
- Message: msg,
- Status: metav1.ConditionTrue,
- },
- },
- },
- }
-
- r := ctrl.Request{
- NamespacedName: types.NamespacedName{
- Name: "my-stack",
- Namespace: "some-ns",
- },
- }
-
- k, _ := setupFakesNoError(t, &s)
-
- err := SetDegradedCondition(context.Background(), k, r, msg, reason)
- require.NoError(t, err)
- require.Zero(t, k.StatusCallCount())
-}
-
-func TestSetDegradedCondition_WhenExisting_SetDegradedConditionTrue(t *testing.T) {
- msg := "tell me something"
- reason := lokiv1.ReasonMissingObjectStorageSecret
- s := lokiv1.LokiStack{
- ObjectMeta: metav1.ObjectMeta{
- Name: "my-stack",
- Namespace: "some-ns",
- },
- Status: lokiv1.LokiStackStatus{
- Conditions: []metav1.Condition{
- {
- Type: string(lokiv1.ConditionDegraded),
- Reason: string(reason),
- Status: metav1.ConditionFalse,
- },
- },
- },
- }
-
- r := ctrl.Request{
- NamespacedName: types.NamespacedName{
- Name: "my-stack",
- Namespace: "some-ns",
- },
- }
-
- k, sw := setupFakesNoError(t, &s)
-
- err := SetDegradedCondition(context.Background(), k, r, msg, reason)
- require.NoError(t, err)
- require.NotZero(t, k.StatusCallCount())
- require.NotZero(t, sw.UpdateCallCount())
-}
-
-func TestSetDegradedCondition_WhenNoneExisting_AppendDegradedCondition(t *testing.T) {
- msg := "tell me something"
- reason := lokiv1.ReasonMissingObjectStorageSecret
- s := lokiv1.LokiStack{
- ObjectMeta: metav1.ObjectMeta{
- Name: "my-stack",
- Namespace: "some-ns",
- },
- }
-
- r := ctrl.Request{
- NamespacedName: types.NamespacedName{
- Name: "my-stack",
- Namespace: "some-ns",
- },
- }
-
- k, sw := setupFakesNoError(t, &s)
-
- err := SetDegradedCondition(context.Background(), k, r, msg, reason)
- require.NoError(t, err)
-
- require.NotZero(t, k.StatusCallCount())
- require.NotZero(t, sw.UpdateCallCount())
-}
-
func TestGenerateCondition(t *testing.T) {
k := &k8sfakes.FakeClient{}
- r := ctrl.Request{
- NamespacedName: types.NamespacedName{
- Name: "test-lokistack",
- Namespace: "some-ns",
- },
- }
lokiStack := lokiv1.LokiStack{
TypeMeta: metav1.TypeMeta{
Kind: "LokiStack",
@@ -173,6 +53,7 @@ func TestGenerateCondition(t *testing.T) {
tt := []struct {
desc string
componentStatus *lokiv1.LokiStackComponentStatus
+ degradedErr *DegradedError
wantCondition metav1.Condition
}{
{
@@ -202,6 +83,25 @@ func TestGenerateCondition(t *testing.T) {
},
wantCondition: conditionFailed,
},
+ {
+ desc: "degraded error",
+ componentStatus: &lokiv1.LokiStackComponentStatus{
+ Ingester: map[corev1.PodPhase][]string{
+ corev1.PodRunning: {
+ "pod-0",
+ },
+ },
+ },
+ degradedErr: &DegradedError{
+ Message: "test-message",
+ Reason: "test-reason",
+ },
+ wantCondition: metav1.Condition{
+ Type: "Degraded",
+ Reason: "test-reason",
+ Message: "test-message",
+ },
+ },
}
for _, tc := range tt {
@@ -209,7 +109,7 @@ func TestGenerateCondition(t *testing.T) {
t.Run(tc.desc, func(t *testing.T) {
t.Parallel()
- condition, err := generateCondition(context.TODO(), tc.componentStatus, k, r, &lokiStack)
+ condition, err := generateCondition(context.TODO(), tc.componentStatus, k, &lokiStack, tc.degradedErr)
require.Nil(t, err)
require.Equal(t, tc.wantCondition, condition)
})
@@ -217,7 +117,7 @@ func TestGenerateCondition(t *testing.T) {
}
func TestGenerateCondition_ZoneAwareLokiStack(t *testing.T) {
- testError := errors.New("test-error")
+ testError := errors.New("test-error") //nolint:goerr113
tt := []struct {
desc string
nodes []corev1.Node
@@ -259,12 +159,6 @@ func TestGenerateCondition_ZoneAwareLokiStack(t *testing.T) {
t.Run(tc.desc, func(t *testing.T) {
t.Parallel()
- r := ctrl.Request{
- NamespacedName: types.NamespacedName{
- Name: "test-lokistack",
- Namespace: "some-ns",
- },
- }
componentStatus := &lokiv1.LokiStackComponentStatus{
Ingester: map[corev1.PodPhase][]string{
corev1.PodPending: {
@@ -306,10 +200,85 @@ func TestGenerateCondition_ZoneAwareLokiStack(t *testing.T) {
return tc.wantErr
}
- condition, err := generateCondition(context.TODO(), componentStatus, k, r, &lokiStack)
+ condition, err := generateCondition(context.TODO(), componentStatus, k, &lokiStack, nil)
require.Equal(t, tc.wantErr, err)
require.Equal(t, tc.wantCondition, condition)
})
}
}
+
+func TestGenerateWarningCondition_WhenStorageSchemaIsOld(t *testing.T) {
+ tt := []struct {
+ desc string
+ schemas []lokiv1.ObjectStorageSchema
+ wantCondition []metav1.Condition
+ }{
+ {
+ desc: "no V13 in schema config",
+ schemas: []lokiv1.ObjectStorageSchema{
+ {
+ Version: lokiv1.ObjectStorageSchemaV11,
+ EffectiveDate: "2020-10-11",
+ },
+ {
+ Version: lokiv1.ObjectStorageSchemaV12,
+ EffectiveDate: "2023-10-11",
+ },
+ },
+ wantCondition: []metav1.Condition{{
+ Type: string(lokiv1.ConditionWarning),
+ Reason: string(lokiv1.ReasonStorageNeedsSchemaUpdate),
+ Message: messageWarningNeedsSchemaVersionUpdate,
+ }},
+ },
+ {
+ desc: "with V13 not as the last element in schema config",
+ schemas: []lokiv1.ObjectStorageSchema{
+ {
+ Version: lokiv1.ObjectStorageSchemaV11,
+ EffectiveDate: "2020-10-11",
+ },
+ {
+ Version: lokiv1.ObjectStorageSchemaV13,
+ EffectiveDate: "2023-10-11",
+ },
+ {
+ Version: lokiv1.ObjectStorageSchemaV12,
+ EffectiveDate: "2024-10-11",
+ },
+ },
+ wantCondition: []metav1.Condition{{
+ Type: string(lokiv1.ConditionWarning),
+ Reason: string(lokiv1.ReasonStorageNeedsSchemaUpdate),
+ Message: messageWarningNeedsSchemaVersionUpdate,
+ }},
+ },
+ {
+ desc: "with V13 as the last element in schema config",
+ schemas: []lokiv1.ObjectStorageSchema{
+ {
+ Version: lokiv1.ObjectStorageSchemaV11,
+ EffectiveDate: "2020-10-11",
+ },
+ {
+ Version: lokiv1.ObjectStorageSchemaV12,
+ EffectiveDate: "2023-10-11",
+ },
+ {
+ Version: lokiv1.ObjectStorageSchemaV13,
+ EffectiveDate: "2024-10-11",
+ },
+ },
+ wantCondition: []metav1.Condition{},
+ },
+ }
+ for _, tc := range tt {
+ tc := tc
+ t.Run(tc.desc, func(t *testing.T) {
+ t.Parallel()
+ condition := generateWarnings(tc.schemas)
+ require.Equal(t, condition, tc.wantCondition)
+ })
+ }
+}
diff --git a/operator/internal/status/status.go b/operator/internal/status/status.go
index 97a8e81bbae08..281a167355c37 100644
--- a/operator/internal/status/status.go
+++ b/operator/internal/status/status.go
@@ -17,7 +17,7 @@ import (
// Refresh executes an aggregate update of the LokiStack Status struct, i.e.
// - It recreates the Status.Components pod status map per component.
// - It sets the appropriate Status.Condition to true that matches the pod status maps.
-func Refresh(ctx context.Context, k k8s.Client, req ctrl.Request, now time.Time) error {
+func Refresh(ctx context.Context, k k8s.Client, req ctrl.Request, now time.Time, degradedErr *DegradedError) error {
var stack lokiv1.LokiStack
if err := k.Get(ctx, req.NamespacedName, &stack); err != nil {
if apierrors.IsNotFound(err) {
@@ -31,34 +31,20 @@ func Refresh(ctx context.Context, k k8s.Client, req ctrl.Request, now time.Time)
return err
}
- condition, err := generateCondition(ctx, cs, k, req, &stack)
+ activeConditions, err := generateConditions(ctx, cs, k, &stack, degradedErr)
if err != nil {
return err
}
- condition.LastTransitionTime = metav1.NewTime(now)
- condition.Status = metav1.ConditionTrue
+ metaTime := metav1.NewTime(now)
+ for _, c := range activeConditions {
+ c.LastTransitionTime = metaTime
+ c.Status = metav1.ConditionTrue
+ }
statusUpdater := func(stack *lokiv1.LokiStack) {
stack.Status.Components = *cs
-
- index := -1
- for i := range stack.Status.Conditions {
- // Reset all other conditions first
- stack.Status.Conditions[i].Status = metav1.ConditionFalse
- stack.Status.Conditions[i].LastTransitionTime = metav1.NewTime(now)
-
- // Locate existing pending condition if any
- if stack.Status.Conditions[i].Type == condition.Type {
- index = i
- }
- }
-
- if index == -1 {
- stack.Status.Conditions = append(stack.Status.Conditions, condition)
- } else {
- stack.Status.Conditions[index] = condition
- }
+ stack.Status.Conditions = mergeConditions(stack.Status.Conditions, activeConditions, metaTime)
}
statusUpdater(&stack)
diff --git a/operator/internal/status/status_test.go b/operator/internal/status/status_test.go
index 6befb13df8f7a..c7895cbe8020e 100644
--- a/operator/internal/status/status_test.go
+++ b/operator/internal/status/status_test.go
@@ -5,14 +5,15 @@ import (
"testing"
"time"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
- "github.com/grafana/loki/operator/internal/manifests"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
+
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ "github.com/grafana/loki/operator/internal/manifests"
)
func TestRefreshSuccess(t *testing.T) {
@@ -67,7 +68,7 @@ func TestRefreshSuccess(t *testing.T) {
k, sw := setupListClient(t, stack, componentPods)
- err := Refresh(context.Background(), k, req, now)
+ err := Refresh(context.Background(), k, req, now, nil)
require.NoError(t, err)
require.Equal(t, 1, k.GetCallCount())
@@ -129,7 +130,7 @@ func TestRefreshSuccess_ZoneAwarePendingPod(t *testing.T) {
return nil
}
- err := Refresh(context.Background(), k, req, now)
+ err := Refresh(context.Background(), k, req, now, nil)
require.NoError(t, err)
require.Equal(t, 1, k.GetCallCount())
diff --git a/operator/internal/status/storage_test.go b/operator/internal/status/storage_test.go
index 7aa77e8b9ffc9..5e2c0b595d517 100644
--- a/operator/internal/status/storage_test.go
+++ b/operator/internal/status/storage_test.go
@@ -4,17 +4,17 @@ import (
"context"
"testing"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
- "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
- "github.com/grafana/loki/operator/internal/status"
"github.com/stretchr/testify/require"
-
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
+
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
+ "github.com/grafana/loki/operator/internal/status"
)
func TestSetStorageSchemaStatus_WhenGetLokiStackReturnsError_ReturnError(t *testing.T) {
diff --git a/operator/internal/validation/alertingrule_test.go b/operator/internal/validation/alertingrule_test.go
index 7ff5bdb1cca7c..120c65b27fa02 100644
--- a/operator/internal/validation/alertingrule_test.go
+++ b/operator/internal/validation/alertingrule_test.go
@@ -4,14 +4,14 @@ import (
"context"
"testing"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
- "github.com/grafana/loki/operator/internal/validation"
-
"github.com/stretchr/testify/require"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/validation/field"
+
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ "github.com/grafana/loki/operator/internal/validation"
)
var att = []struct {
diff --git a/operator/internal/validation/lokistack_test.go b/operator/internal/validation/lokistack_test.go
index 238f884980e08..e0419cd39565a 100644
--- a/operator/internal/validation/lokistack_test.go
+++ b/operator/internal/validation/lokistack_test.go
@@ -5,14 +5,13 @@ import (
"testing"
"github.com/stretchr/testify/require"
-
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
- "github.com/grafana/loki/operator/internal/validation"
-
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/validation/field"
+
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ "github.com/grafana/loki/operator/internal/validation"
)
var ltt = []struct {
diff --git a/operator/internal/validation/openshift/alertingrule_test.go b/operator/internal/validation/openshift/alertingrule_test.go
index ae911d537c67e..64de2601fe69b 100644
--- a/operator/internal/validation/openshift/alertingrule_test.go
+++ b/operator/internal/validation/openshift/alertingrule_test.go
@@ -4,11 +4,11 @@ import (
"context"
"testing"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
-
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/validation/field"
+
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
)
func TestAlertingRuleValidator(t *testing.T) {
diff --git a/operator/internal/validation/openshift/recordingrule_test.go b/operator/internal/validation/openshift/recordingrule_test.go
index 139b9e8dfd6ad..5ee511bd22230 100644
--- a/operator/internal/validation/openshift/recordingrule_test.go
+++ b/operator/internal/validation/openshift/recordingrule_test.go
@@ -4,11 +4,11 @@ import (
"context"
"testing"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
-
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/validation/field"
+
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
)
func TestRecordingRuleValidator(t *testing.T) {
diff --git a/operator/internal/validation/recordingrule_test.go b/operator/internal/validation/recordingrule_test.go
index dcbbc2d7bf4da..465298facb013 100644
--- a/operator/internal/validation/recordingrule_test.go
+++ b/operator/internal/validation/recordingrule_test.go
@@ -4,14 +4,14 @@ import (
"context"
"testing"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
- "github.com/grafana/loki/operator/internal/validation"
-
"github.com/stretchr/testify/require"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/validation/field"
+
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ "github.com/grafana/loki/operator/internal/validation"
)
var rtt = []struct {
diff --git a/operator/internal/validation/rulerconfig_test.go b/operator/internal/validation/rulerconfig_test.go
index 374e6b5206b0c..158ad9042f3a7 100644
--- a/operator/internal/validation/rulerconfig_test.go
+++ b/operator/internal/validation/rulerconfig_test.go
@@ -4,15 +4,15 @@ import (
"context"
"testing"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
- "github.com/grafana/loki/operator/internal/validation"
-
"github.com/stretchr/testify/require"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/utils/pointer"
+
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ "github.com/grafana/loki/operator/internal/validation"
)
var rctt = []struct {
diff --git a/operator/jsonnet/config.libsonnet b/operator/jsonnet/config.libsonnet
index ec50b795a1de2..efdc1c6103d5c 100644
--- a/operator/jsonnet/config.libsonnet
+++ b/operator/jsonnet/config.libsonnet
@@ -144,7 +144,10 @@ local utils = (import 'github.com/grafana/jsonnet-libs/mixin-utils/utils.libsonn
grafanaDashboards+: {
'loki-retention.json'+: {
- local dropList = ['Logs'],
+ // TODO (JoaoBraveCoding) Once we upgrade to 3.x we should be able to lift the drops on
+ // 'Number of times Tables were skipped during Compaction' and 'Retention' since Loki will then have the
+ // updated metrics
+ local dropList = ['Logs', 'Number of times Tables were skipped during Compaction', 'Retention'],
local replacements = [
{ from: 'cluster=~"$cluster",', to: '' },
{ from: 'container="compactor"', to: 'container=~".+-compactor"' },
@@ -155,7 +158,7 @@ local utils = (import 'github.com/grafana/jsonnet-libs/mixin-utils/utils.libsonn
tags: defaultLokiTags(super.tags),
rows: [
r {
- panels: mapPanels([replaceMatchers(replacements), replaceType('stat', 'singlestat')], r.panels),
+ panels: mapPanels([replaceMatchers(replacements), replaceType('stat', 'singlestat')], dropPanels(r.panels, dropList, function(p) true)),
}
for r in dropPanels(super.rows, dropList, function(p) true)
],
@@ -181,7 +184,10 @@ local utils = (import 'github.com/grafana/jsonnet-libs/mixin-utils/utils.libsonn
},
},
'loki-reads.json'+: {
- local dropList = ['BigTable', 'Ingester - Zone Aware'],
+ // We drop both BigTable and BlotDB dashboards as they have been
+ // replaced by the Index dashboards
+ local dropList = ['BigTable', 'Ingester - Zone Aware', 'BoltDB Shipper'],
+
uid: '62q5jjYwhVSaz4Mcrm8tV3My3gcKED',
title: 'OpenShift Logging / LokiStack / Reads',
@@ -220,7 +226,7 @@ local utils = (import 'github.com/grafana/jsonnet-libs/mixin-utils/utils.libsonn
},
},
'loki-writes.json'+: {
- local dropList = ['Ingester - Zone Aware'],
+ local dropList = ['Ingester - Zone Aware', 'BoltDB Shipper'],
uid: 'F6nRYKuXmFVpVSFQmXr7cgXy5j7UNr',
title: 'OpenShift Logging / LokiStack / Writes',
tags: defaultLokiTags(super.tags),
@@ -239,6 +245,10 @@ local utils = (import 'github.com/grafana/jsonnet-libs/mixin-utils/utils.libsonn
utils.selector.re('job', '.+-ingester-http'),
],
ingester_zone:: [],
+ any_ingester:: [
+ utils.selector.eq('namespace', '$namespace'),
+ utils.selector.re('job', '.+-ingester-http'),
+ ],
},
rows: dropPanels(super.rows, dropList, function(p) true),
templating+: {
diff --git a/operator/jsonnet/jsonnetfile.json b/operator/jsonnet/jsonnetfile.json
index 139ecf1db8a86..4b25fb159b3d8 100644
--- a/operator/jsonnet/jsonnetfile.json
+++ b/operator/jsonnet/jsonnetfile.json
@@ -8,7 +8,7 @@
"subdir": "production/loki-mixin"
}
},
- "version": "v2.9.2"
+ "version": "bd505f8e2d37172ff35a89f4ac42efec9566a263"
}
],
"legacyImports": true
diff --git a/operator/jsonnet/jsonnetfile.lock.json b/operator/jsonnet/jsonnetfile.lock.json
index 7e26d6b3d0384..27d2e6e8756c6 100644
--- a/operator/jsonnet/jsonnetfile.lock.json
+++ b/operator/jsonnet/jsonnetfile.lock.json
@@ -38,8 +38,8 @@
"subdir": "production/loki-mixin"
}
},
- "version": "cbad5587450a93af43394e5675c4056235df5df3",
- "sum": "a/71V1QzEB46ewPIE2nyNp2HlYFwmDqmSddNulZPP40="
+ "version": "bd505f8e2d37172ff35a89f4ac42efec9566a263",
+ "sum": "yiXXBAcWfMkYSJthU2OZSgHHmveWvmRT6aM1V0MaAjs="
},
{
"source": {
diff --git a/pkg/bloomcompactor/TODO.md b/pkg/bloomcompactor/TODO.md
deleted file mode 100644
index 479f5399a350d..0000000000000
--- a/pkg/bloomcompactor/TODO.md
+++ /dev/null
@@ -1,4 +0,0 @@
-* Adding falsePosRate of sbf into config
-* Add per-tenant bool to enable compaction
-* Use tarGz, untarGz before uploding blocks to storage
-* Introduce back `maxLookBackPeriod` as `RejectOldSamplesMaxAge` limit in distributors
diff --git a/pkg/bloomcompactor/bloomcompactor.go b/pkg/bloomcompactor/bloomcompactor.go
index 71dbb08380d91..1c52e558c718b 100644
--- a/pkg/bloomcompactor/bloomcompactor.go
+++ b/pkg/bloomcompactor/bloomcompactor.go
@@ -29,8 +29,6 @@ import (
"fmt"
"math"
"os"
- "path/filepath"
- "sort"
"time"
"github.com/go-kit/log"
@@ -44,12 +42,13 @@ import (
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
- "github.com/grafana/loki/pkg/compactor/retention"
- "github.com/grafana/loki/pkg/logproto"
+ "path/filepath"
+
+ "github.com/google/uuid"
+
+ "github.com/grafana/loki/pkg/bloomutils"
"github.com/grafana/loki/pkg/storage"
v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
- "github.com/grafana/loki/pkg/storage/bloom/v1/filter"
- "github.com/grafana/loki/pkg/storage/chunk"
chunk_client "github.com/grafana/loki/pkg/storage/chunk/client"
"github.com/grafana/loki/pkg/storage/config"
"github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper"
@@ -61,11 +60,6 @@ import (
"github.com/grafana/loki/pkg/util"
)
-const (
- fpRate = 0.01
- bloomFileName = "bloom"
-)
-
type Compactor struct {
services.Service
@@ -83,6 +77,7 @@ type Compactor struct {
sharding ShardingStrategy
metrics *metrics
+ reg prometheus.Registerer
}
type storeClient struct {
@@ -108,6 +103,7 @@ func New(
schemaCfg: schemaConfig,
sharding: sharding,
limits: limits,
+ reg: r,
}
// Configure BloomClient for meta.json management
@@ -119,14 +115,8 @@ func New(
c.storeClients = make(map[config.DayTime]storeClient)
for i, periodicConfig := range schemaConfig.Configs {
- var indexStorageCfg indexshipper.Config
- switch periodicConfig.IndexType {
- case config.TSDBType:
- indexStorageCfg = storageCfg.TSDBShipperConfig
- case config.BoltDBShipperType:
- indexStorageCfg = storageCfg.BoltDBShipperConfig.Config
- default:
- level.Warn(c.logger).Log("msg", "skipping period because index type is unsupported")
+ if periodicConfig.IndexType != config.TSDBType {
+ level.Warn(c.logger).Log("msg", "skipping schema period because index type is not supported", "index_type", periodicConfig.IndexType, "period", periodicConfig.From)
continue
}
@@ -143,7 +133,7 @@ func New(
indexShipper, err := indexshipper.NewIndexShipper(
periodicConfig.IndexTables.PathPrefix,
- indexStorageCfg,
+ storageCfg.TSDBShipperConfig,
objectClient,
limits,
nil,
@@ -151,7 +141,7 @@ func New(
return tsdb.OpenShippableTSDB(p)
},
periodicConfig.GetIndexTableNumberRange(periodEndTime),
- prometheus.WrapRegistererWithPrefix("loki_tsdb_shipper_", prometheus.DefaultRegisterer),
+ prometheus.WrapRegistererWithPrefix("loki_bloom_compactor_tsdb_shipper_", r),
logger,
)
@@ -353,52 +343,70 @@ func (c *Compactor) compactTenant(ctx context.Context, logger log.Logger, sc sto
}
// Tokenizer is not thread-safe so we need one per goroutine.
- bt, _ := v1.NewBloomTokenizer(prometheus.DefaultRegisterer)
+ NGramLength := c.limits.BloomNGramLength(tenant)
+ NGramSkip := c.limits.BloomNGramSkip(tenant)
+ bt, _ := v1.NewBloomTokenizer(c.reg, NGramLength, NGramSkip)
- // TODO: Use ForEachConcurrent?
errs := multierror.New()
- if err := sc.indexShipper.ForEach(ctx, tableName, tenant, func(isMultiTenantIndex bool, idx shipperindex.Index) error {
+ rs, err := c.sharding.GetTenantSubRing(tenant).GetAllHealthy(RingOp)
+ if err != nil {
+ return err
+ }
+ tokenRanges := bloomutils.GetInstanceWithTokenRange(c.cfg.Ring.InstanceID, rs.Instances)
+
+ _ = sc.indexShipper.ForEach(ctx, tableName, tenant, func(isMultiTenantIndex bool, idx shipperindex.Index) error {
if isMultiTenantIndex {
- return fmt.Errorf("unexpected multi-tenant")
+ // Skip multi-tenant indexes
+ return nil
+ }
+
+ tsdbFile, ok := idx.(*tsdb.TSDBFile)
+ if !ok {
+ errs.Add(fmt.Errorf("failed to cast to TSDBFile"))
+ return nil
+ }
+
+ tsdbIndex, ok := tsdbFile.Index.(*tsdb.TSDBIndex)
+ if !ok {
+ errs.Add(fmt.Errorf("failed to cast to TSDBIndex"))
+ return nil
}
- // TODO: Make these casts safely
- if err := idx.(*tsdb.TSDBFile).Index.(*tsdb.TSDBIndex).ForSeries(
+ var seriesMetas []seriesMeta
+
+ err := tsdbIndex.ForSeries(
ctx, nil,
0, math.MaxInt64, // TODO: Replace with MaxLookBackPeriod
func(labels labels.Labels, fingerprint model.Fingerprint, chksMetas []tsdbindex.ChunkMeta) {
- job := NewJob(tenant, tableName, idx.Path(), fingerprint, labels, chksMetas)
- jobLogger := log.With(logger, "job", job.String())
-
- ownsJob, err := c.sharding.OwnsJob(job)
- if err != nil {
- c.metrics.compactionRunUnownedJobs.Inc()
- level.Error(jobLogger).Log("msg", "failed to check if compactor owns job", "err", err)
- errs.Add(err)
- return
- }
- if !ownsJob {
- c.metrics.compactionRunUnownedJobs.Inc()
- level.Debug(jobLogger).Log("msg", "skipping job because it is not owned by this shard")
- return
- }
-
- if err := c.runCompact(ctx, jobLogger, job, c.bloomShipperClient, bt, sc); err != nil {
- c.metrics.compactionRunFailedJobs.Inc()
- errs.Add(errors.Wrap(err, "runBloomCompact"))
+ if !tokenRanges.Contains(uint32(fingerprint)) {
return
}
- c.metrics.compactionRunSucceededJobs.Inc()
+ temp := make([]tsdbindex.ChunkMeta, len(chksMetas))
+ _ = copy(temp, chksMetas)
+ //All seriesMetas given a table within fp of this compactor shard
+ seriesMetas = append(seriesMetas, seriesMeta{seriesFP: fingerprint, seriesLbs: labels, chunkRefs: temp})
},
- ); err != nil {
+ )
+
+ if err != nil {
errs.Add(err)
+ return nil
}
+ job := NewJob(tenant, tableName, idx.Path(), seriesMetas)
+ jobLogger := log.With(logger, "job", job.String())
+ c.metrics.compactionRunJobStarted.Inc()
+
+ err = c.runCompact(ctx, jobLogger, job, bt, sc)
+ if err != nil {
+ c.metrics.compactionRunJobFailed.Inc()
+ errs.Add(errors.Wrap(err, "runBloomCompact failed"))
+ } else {
+ c.metrics.compactionRunJobSuceeded.Inc()
+ }
return nil
- }); err != nil {
- errs.Add(err)
- }
+ })
return errs.Err()
}
@@ -441,207 +449,137 @@ func (c *Compactor) compactTenantWithRetries(ctx context.Context, logger log.Log
)
}
-func makeChunkRefs(chksMetas []tsdbindex.ChunkMeta, tenant string, fp model.Fingerprint) []chunk.Chunk {
- chunkRefs := make([]chunk.Chunk, 0, len(chksMetas))
- for _, chk := range chksMetas {
- chunkRefs = append(chunkRefs, chunk.Chunk{
- ChunkRef: logproto.ChunkRef{
- Fingerprint: uint64(fp),
- UserID: tenant,
- From: chk.From(),
- Through: chk.Through(),
- Checksum: chk.Checksum,
- },
- })
- }
-
- return chunkRefs
-}
-
-// TODO Revisit this step once v1/bloom lib updated to combine blooms in the same series
-func buildBloomBlock(ctx context.Context, logger log.Logger, bloomForChks v1.SeriesWithBloom, job Job, workingDir string) (bloomshipper.Block, error) {
+func (c *Compactor) runCompact(ctx context.Context, logger log.Logger, job Job, bt *v1.BloomTokenizer, storeClient storeClient) error {
// Ensure the context has not been canceled (ie. compactor shutdown has been triggered).
if err := ctx.Err(); err != nil {
- return bloomshipper.Block{}, err
+ return err
}
-
- localDst := createLocalDirName(workingDir, job)
-
- // write bloom to a local dir
- builder, err := v1.NewBlockBuilder(v1.NewBlockOptions(), v1.NewDirectoryBlockWriter(localDst))
- if err != nil {
- level.Error(logger).Log("creating builder", err)
- return bloomshipper.Block{}, err
+ metaSearchParams := bloomshipper.MetaSearchParams{
+ TenantID: job.tenantID,
+ MinFingerprint: uint64(job.minFp),
+ MaxFingerprint: uint64(job.maxFp),
+ StartTimestamp: int64(job.from),
+ EndTimestamp: int64(job.through),
}
+ var metas []bloomshipper.Meta
+ //TODO Configure pool for these to avoid allocations
+ var activeBloomBlocksRefs []bloomshipper.BlockRef
- checksum, err := builder.BuildFrom(v1.NewSliceIter([]v1.SeriesWithBloom{bloomForChks}))
+ metas, err := c.bloomShipperClient.GetMetas(ctx, metaSearchParams)
if err != nil {
- level.Error(logger).Log("writing bloom", err)
- return bloomshipper.Block{}, err
+ return err
}
- blockFile, err := os.Open(filepath.Join(localDst, bloomFileName))
- if err != nil {
- level.Error(logger).Log("reading bloomBlock", err)
- }
+ // TODO This logic currently is NOT concerned with cutting blocks upon topology changes to bloom-compactors.
+ // It may create blocks with series outside of the fp range of the compactor. Cutting blocks will be addressed in a follow-up PR.
+ metasMatchingJob, blocksMatchingJob := matchingBlocks(metas, job)
- blocks := bloomshipper.Block{
- BlockRef: bloomshipper.BlockRef{
- Ref: bloomshipper.Ref{
- TenantID: job.Tenant(),
- TableName: job.TableName(),
- MinFingerprint: uint64(job.Fingerprint()), // TODO will change once we compact multiple blooms into a block
- MaxFingerprint: uint64(job.Fingerprint()),
- StartTimestamp: job.From().Unix(),
- EndTimestamp: job.Through().Unix(),
- Checksum: checksum,
- },
- IndexPath: job.IndexPath(),
- },
- Data: blockFile,
- }
+ localDst := createLocalDirName(c.cfg.WorkingDirectory, job)
+ blockOptions := v1.NewBlockOptions(bt.GetNGramLength(), bt.GetNGramSkip())
- return blocks, nil
-}
+ defer func() {
+ //clean up the bloom directory
+ if err := os.RemoveAll(localDst); err != nil {
+ level.Error(logger).Log("msg", "failed to remove block directory", "dir", localDst, "err", err)
+ }
+ }()
-func createLocalDirName(workingDir string, job Job) string {
- dir := fmt.Sprintf("bloomBlock-%s-%s-%s-%s-%s-%s", job.TableName(), job.Tenant(), job.Fingerprint(), job.Fingerprint(), job.From(), job.Through())
- return filepath.Join(workingDir, dir)
-}
+ var resultingBlock bloomshipper.Block
+ defer func() {
+ if resultingBlock.Data != nil {
+ _ = resultingBlock.Data.Close()
+ }
+ }()
-// Compacts given list of chunks, uploads them to storage and returns a list of bloomBlocks
-func CompactNewChunks(ctx context.Context, logger log.Logger, job Job,
- chunks []chunk.Chunk, bt *v1.BloomTokenizer,
- bloomShipperClient bloomshipper.Client, dst string) ([]bloomshipper.Block, error) {
- // Ensure the context has not been canceled (ie. compactor shutdown has been triggered).
- if err := ctx.Err(); err != nil {
- return nil, err
- }
+ if len(blocksMatchingJob) == 0 && len(metasMatchingJob) > 0 {
+ // There is no change to any blocks, no compaction needed
+ level.Info(logger).Log("msg", "No changes to tsdb, no compaction needed")
+ return nil
+ } else if len(metasMatchingJob) == 0 {
+ // No matching existing blocks for this job, compact all series from scratch
- // Create a bloom for this series
- bloomForChks := v1.SeriesWithBloom{
- Series: &v1.Series{
- Fingerprint: job.Fingerprint(),
- },
- Bloom: &v1.Bloom{
- ScalableBloomFilter: *filter.NewDefaultScalableBloomFilter(fpRate),
- },
- }
+ builder, err := NewPersistentBlockBuilder(localDst, blockOptions)
+ if err != nil {
+ level.Error(logger).Log("msg", "failed creating block builder", "err", err)
+ return err
+ }
- // Tokenize data into n-grams
- bt.PopulateSeriesWithBloom(&bloomForChks, chunks)
+ fpRate := c.limits.BloomFalsePositiveRate(job.tenantID)
+ resultingBlock, err = compactNewChunks(ctx, logger, job, fpRate, bt, storeClient.chunk, builder)
+ if err != nil {
+ return level.Error(logger).Log("msg", "failed compacting new chunks", "err", err)
+ }
- // Build and upload bloomBlock to storage
- blocks, err := buildBloomBlock(ctx, logger, bloomForChks, job, dst)
- if err != nil {
- level.Error(logger).Log("building bloomBlocks", err)
- return nil, err
- }
- storedBlocks, err := bloomShipperClient.PutBlocks(ctx, []bloomshipper.Block{blocks})
- if err != nil {
- level.Error(logger).Log("putting blocks to storage", err)
- return nil, err
- }
- return storedBlocks, nil
-}
+ } else if len(blocksMatchingJob) > 0 {
+ // When already compacted metas exists, we need to merge all blocks with amending blooms with new series
-func (c *Compactor) runCompact(ctx context.Context, logger log.Logger, job Job, bloomShipperClient bloomshipper.Client, bt *v1.BloomTokenizer, storeClient storeClient) error {
- // Ensure the context has not been canceled (ie. compactor shutdown has been triggered).
- if err := ctx.Err(); err != nil {
- return err
- }
+ var populate = createPopulateFunc(ctx, logger, job, storeClient, bt)
- metaSearchParams := bloomshipper.MetaSearchParams{
- TenantID: job.tenantID,
- MinFingerprint: uint64(job.seriesFP),
- MaxFingerprint: uint64(job.seriesFP),
- StartTimestamp: int64(job.from),
- EndTimestamp: int64(job.through),
- }
- var metas []bloomshipper.Meta
- //TODO Configure pool for these to avoid allocations
- var bloomBlocksRefs []bloomshipper.BlockRef
- var tombstonedBlockRefs []bloomshipper.BlockRef
+ seriesIter := makeSeriesIterFromSeriesMeta(job)
- metas, err := bloomShipperClient.GetMetas(ctx, metaSearchParams)
- if err != nil {
- return err
- }
+ blockIters, blockPaths, err := makeBlockIterFromBlocks(ctx, logger, c.bloomShipperClient, blocksMatchingJob, c.cfg.WorkingDirectory)
+ defer func() {
+ for _, path := range blockPaths {
+ if err := os.RemoveAll(path); err != nil {
+ level.Error(logger).Log("msg", "failed removing uncompressed bloomDir", "dir", path, "err", err)
+ }
+ }
+ }()
- if len(metas) == 0 {
- // Get chunks data from list of chunkRefs
- chks, err := storeClient.chunk.GetChunks(ctx, makeChunkRefs(job.Chunks(), job.Tenant(), job.Fingerprint()))
if err != nil {
return err
}
- storedBlocks, err := CompactNewChunks(ctx, logger, job, chks, bt, bloomShipperClient, c.cfg.WorkingDirectory)
+ mergeBlockBuilder, err := NewPersistentBlockBuilder(localDst, blockOptions)
if err != nil {
- return level.Error(logger).Log("compacting new chunks", err)
+ level.Error(logger).Log("msg", "failed creating block builder", "err", err)
+ return err
}
- storedBlockRefs := make([]bloomshipper.BlockRef, len(storedBlocks))
-
- for i, block := range storedBlocks {
- storedBlockRefs[i] = block.BlockRef
+ resultingBlock, err = mergeCompactChunks(logger, populate, mergeBlockBuilder, blockIters, seriesIter, job)
+ if err != nil {
+ level.Error(logger).Log("msg", "failed merging existing blocks with new chunks", "err", err)
+ return err
}
+ }
- // all blocks are new and active blocks
- bloomBlocksRefs = storedBlockRefs
- } else {
- // TODO complete part 2 - periodic compaction for delta from previous period
- // When already compacted metas exists
- // Deduplicate index paths
- uniqueIndexPaths := make(map[string]struct{})
-
- for _, meta := range metas {
- for _, blockRef := range meta.Blocks {
- uniqueIndexPaths[blockRef.IndexPath] = struct{}{}
- // ...
-
- // the result should return a list of active
- // blocks and tombstoned bloom blocks.
- }
+ archivePath := filepath.Join(c.cfg.WorkingDirectory, uuid.New().String())
+
+ blockToUpload, err := bloomshipper.CompressBloomBlock(resultingBlock.BlockRef, archivePath, localDst, logger)
+ if err != nil {
+ level.Error(logger).Log("msg", "failed compressing bloom blocks into tar file", "err", err)
+ return err
+ }
+ defer func() {
+ err = os.Remove(archivePath)
+ if err != nil {
+ level.Error(logger).Log("msg", "failed removing archive file", "err", err, "file", archivePath)
}
+ }()
+ // Do not change the signature of PutBlocks yet.
+ // Once block size is limited potentially, compactNewChunks will return multiple blocks, hence a list is appropriate.
+ storedBlocks, err := c.bloomShipperClient.PutBlocks(ctx, []bloomshipper.Block{blockToUpload})
+ if err != nil {
+ level.Error(logger).Log("msg", "failed uploading blocks to storage", "err", err)
+ return err
+ }
+
+ // all blocks are new and active blocks
+ for _, block := range storedBlocks {
+ activeBloomBlocksRefs = append(activeBloomBlocksRefs, block.BlockRef)
}
+ // TODO delete old metas in later compactions
// After all is done, create one meta file and upload to storage
meta := bloomshipper.Meta{
- Tombstones: tombstonedBlockRefs,
- Blocks: bloomBlocksRefs,
+ Tombstones: blocksMatchingJob,
+ Blocks: activeBloomBlocksRefs,
}
- err = bloomShipperClient.PutMeta(ctx, meta)
+ err = c.bloomShipperClient.PutMeta(ctx, meta)
if err != nil {
- level.Error(logger).Log("putting meta.json to storage", err)
+ level.Error(logger).Log("msg", "failed uploading meta.json to storage", "err", err)
return err
}
return nil
}
-
-func getIntervalsForTables(tables []string) map[string]model.Interval {
- tablesIntervals := make(map[string]model.Interval, len(tables))
- for _, table := range tables {
- tablesIntervals[table] = retention.ExtractIntervalFromTableName(table)
- }
-
- return tablesIntervals
-}
-
-func sortTablesByRange(tables []string, intervals map[string]model.Interval) {
- sort.Slice(tables, func(i, j int) bool {
- // less than if start time is after produces a most recent first sort order
- return intervals[tables[i]].Start.After(intervals[tables[j]].Start)
- })
-}
-
-// TODO: comes from pkg/compactor/compactor.go
-func schemaPeriodForTable(cfg config.SchemaConfig, tableName string) (config.PeriodConfig, bool) {
- tableInterval := retention.ExtractIntervalFromTableName(tableName)
- schemaCfg, err := cfg.SchemaForTime(tableInterval.Start)
- if err != nil || schemaCfg.IndexTables.TableFor(tableInterval.Start) != tableName {
- return config.PeriodConfig{}, false
- }
-
- return schemaCfg, true
-}
diff --git a/pkg/bloomcompactor/bloomcompactor_test.go b/pkg/bloomcompactor/bloomcompactor_test.go
index 65c6779750320..6221610321b69 100644
--- a/pkg/bloomcompactor/bloomcompactor_test.go
+++ b/pkg/bloomcompactor/bloomcompactor_test.go
@@ -8,10 +8,11 @@ import (
"testing"
"time"
+ "github.com/go-kit/log"
"github.com/grafana/dskit/flagext"
+ "github.com/grafana/dskit/kv"
"github.com/grafana/dskit/kv/consul"
"github.com/grafana/dskit/ring"
- "github.com/grafana/dskit/server"
"github.com/grafana/dskit/services"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
@@ -23,7 +24,6 @@ import (
"github.com/grafana/loki/pkg/storage/chunk/client/local"
"github.com/grafana/loki/pkg/storage/config"
"github.com/grafana/loki/pkg/storage/stores/shipper/indexshipper"
- util_log "github.com/grafana/loki/pkg/util/log"
lokiring "github.com/grafana/loki/pkg/util/ring"
"github.com/grafana/loki/pkg/validation"
)
@@ -33,10 +33,124 @@ const (
workingDirName = "working-dir"
)
+func parseDayTime(s string) config.DayTime {
+ t, err := time.Parse("2006-01-02", s)
+ if err != nil {
+ panic(err)
+ }
+ return config.DayTime{
+ Time: model.TimeFromUnix(t.Unix()),
+ }
+}
+
+func TestCompactor_StartStopService(t *testing.T) {
+ shardingStrategy := NewNoopStrategy()
+ logger := log.NewNopLogger()
+ reg := prometheus.NewRegistry()
+
+ cm := storage.NewClientMetrics()
+ t.Cleanup(cm.Unregister)
+
+ var limits validation.Limits
+ limits.RegisterFlags(flag.NewFlagSet("limits", flag.PanicOnError))
+ overrides, _ := validation.NewOverrides(limits, nil)
+
+ periodConfigUnsupported := config.PeriodConfig{
+ From: parseDayTime("2023-09-01"),
+ IndexType: config.BoltDBShipperType,
+ ObjectType: config.StorageTypeFileSystem,
+ Schema: "v13",
+ RowShards: 16,
+ IndexTables: config.IndexPeriodicTableConfig{
+ PathPrefix: "index/",
+ PeriodicTableConfig: config.PeriodicTableConfig{
+ Prefix: indexTablePrefix,
+ Period: config.ObjectStorageIndexRequiredPeriod,
+ },
+ },
+ }
+
+ periodConfigSupported := config.PeriodConfig{
+ From: parseDayTime("2023-10-01"),
+ IndexType: config.TSDBType,
+ ObjectType: config.StorageTypeFileSystem,
+ Schema: "v13",
+ RowShards: 16,
+ IndexTables: config.IndexPeriodicTableConfig{
+ PathPrefix: "index/",
+ PeriodicTableConfig: config.PeriodicTableConfig{
+ Prefix: indexTablePrefix,
+ Period: config.ObjectStorageIndexRequiredPeriod,
+ },
+ },
+ }
+
+ schemaCfg := config.SchemaConfig{
+ Configs: []config.PeriodConfig{
+ periodConfigUnsupported,
+ periodConfigSupported,
+ },
+ }
+
+ fsDir := t.TempDir()
+ tsdbDir := t.TempDir()
+
+ storageCfg := storage.Config{
+ FSConfig: local.FSConfig{
+ Directory: fsDir,
+ },
+ TSDBShipperConfig: indexshipper.Config{
+ ActiveIndexDirectory: filepath.Join(tsdbDir, "index"),
+ ResyncInterval: 1 * time.Minute,
+ Mode: indexshipper.ModeReadWrite,
+ CacheLocation: filepath.Join(tsdbDir, "cache"),
+ },
+ }
+
+ t.Run("ignore unsupported index types in schema config", func(t *testing.T) {
+ kvStore, closer := consul.NewInMemoryClient(ring.GetCodec(), logger, reg)
+ t.Cleanup(func() {
+ closer.Close()
+ })
+
+ var cfg Config
+ flagext.DefaultValues(&cfg)
+ cfg.Enabled = true
+ cfg.WorkingDirectory = filepath.Join(t.TempDir(), workingDirName)
+ cfg.Ring = lokiring.RingConfig{
+ KVStore: kv.Config{
+ Mock: kvStore,
+ },
+ }
+
+ c, err := New(cfg, storageCfg, schemaCfg, overrides, logger, shardingStrategy, cm, reg)
+ require.NoError(t, err)
+
+ err = services.StartAndAwaitRunning(context.Background(), c)
+ require.NoError(t, err)
+
+ require.Equal(t, 1, len(c.storeClients))
+
+ // supported index type TSDB is present
+ sc, ok := c.storeClients[periodConfigSupported.From]
+ require.True(t, ok)
+ require.NotNil(t, sc)
+
+ // unsupported index type BoltDB is not present
+ _, ok = c.storeClients[periodConfigUnsupported.From]
+ require.False(t, ok)
+
+ err = services.StopAndAwaitTerminated(context.Background(), c)
+ require.NoError(t, err)
+ })
+}
+
func TestCompactor_RunCompaction(t *testing.T) {
- servercfg := &server.Config{}
- require.Nil(t, servercfg.LogLevel.Set("debug"))
- util_log.InitLogger(servercfg, nil, false)
+ logger := log.NewNopLogger()
+ reg := prometheus.NewRegistry()
+
+ cm := storage.NewClientMetrics()
+ t.Cleanup(cm.Unregister)
tempDir := t.TempDir()
indexDir := filepath.Join(tempDir, "index")
@@ -79,7 +193,7 @@ func TestCompactor_RunCompaction(t *testing.T) {
)
}
- kvStore, cleanUp := consul.NewInMemoryClient(ring.GetCodec(), util_log.Logger, nil)
+ kvStore, cleanUp := consul.NewInMemoryClient(ring.GetCodec(), logger, nil)
t.Cleanup(func() { assert.NoError(t, cleanUp.Close()) })
var cfg Config
@@ -104,10 +218,7 @@ func TestCompactor_RunCompaction(t *testing.T) {
limits.RegisterFlags(flag.NewFlagSet("limits", flag.PanicOnError))
overrides, _ := validation.NewOverrides(limits, nil)
- clientMetrics := storage.NewClientMetrics()
- t.Cleanup(clientMetrics.Unregister)
-
- ringManager, err := lokiring.NewRingManager("bloom-compactor", lokiring.ServerMode, cfg.Ring, 1, 1, util_log.Logger, prometheus.DefaultRegisterer)
+ ringManager, err := lokiring.NewRingManager("bloom-compactor", lokiring.ServerMode, cfg.Ring, 1, 1, logger, reg)
require.NoError(t, err)
err = ringManager.StartAsync(context.Background())
@@ -124,7 +235,7 @@ func TestCompactor_RunCompaction(t *testing.T) {
shuffleSharding := NewShuffleShardingStrategy(ringManager.Ring, ringManager.RingLifecycler, overrides)
- c, err := New(cfg, storageConfig, schemaCfg, overrides, util_log.Logger, shuffleSharding, clientMetrics, nil)
+ c, err := New(cfg, storageConfig, schemaCfg, overrides, logger, shuffleSharding, cm, nil)
require.NoError(t, err)
err = c.runCompaction(context.Background())
diff --git a/pkg/bloomcompactor/chunkcompactor.go b/pkg/bloomcompactor/chunkcompactor.go
new file mode 100644
index 0000000000000..a949f26452d9d
--- /dev/null
+++ b/pkg/bloomcompactor/chunkcompactor.go
@@ -0,0 +1,240 @@
+package bloomcompactor
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+
+ "github.com/go-kit/log"
+ "github.com/go-kit/log/level"
+ "github.com/prometheus/common/model"
+
+ "github.com/grafana/loki/pkg/logproto"
+ v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
+ "github.com/grafana/loki/pkg/storage/bloom/v1/filter"
+ "github.com/grafana/loki/pkg/storage/chunk"
+ "github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper"
+ tsdbindex "github.com/grafana/loki/pkg/storage/stores/shipper/indexshipper/tsdb/index"
+)
+
+type compactorTokenizer interface {
+ PopulateSeriesWithBloom(bloom *v1.SeriesWithBloom, chunks []chunk.Chunk) error
+}
+
+type chunkClient interface {
+ // TODO: Consider using lazyChunks to avoid downloading all requested chunks.
+ GetChunks(ctx context.Context, chunks []chunk.Chunk) ([]chunk.Chunk, error)
+}
+
+type blockBuilder interface {
+ BuildFrom(itr v1.Iterator[v1.SeriesWithBloom]) (uint32, error)
+ Data() (io.ReadSeekCloser, error)
+}
+
+type PersistentBlockBuilder struct {
+ builder *v1.BlockBuilder
+ localDst string
+}
+
+func NewPersistentBlockBuilder(localDst string, blockOptions v1.BlockOptions) (*PersistentBlockBuilder, error) {
+ // write bloom to a local dir
+ b, err := v1.NewBlockBuilder(blockOptions, v1.NewDirectoryBlockWriter(localDst))
+ if err != nil {
+ return nil, err
+ }
+ builder := PersistentBlockBuilder{
+ builder: b,
+ localDst: localDst,
+ }
+ return &builder, nil
+}
+
+func (p *PersistentBlockBuilder) BuildFrom(itr v1.Iterator[v1.SeriesWithBloom]) (uint32, error) {
+ return p.builder.BuildFrom(itr)
+}
+
+func (p *PersistentBlockBuilder) mergeBuild(builder *v1.MergeBuilder) (uint32, error) {
+ return builder.Build(p.builder)
+}
+
+func (p *PersistentBlockBuilder) Data() (io.ReadSeekCloser, error) {
+ blockFile, err := os.Open(filepath.Join(p.localDst, v1.BloomFileName))
+ if err != nil {
+ return nil, err
+ }
+ return blockFile, nil
+}
+
+func makeChunkRefs(chksMetas []tsdbindex.ChunkMeta, tenant string, fp model.Fingerprint) []chunk.Chunk {
+ chunkRefs := make([]chunk.Chunk, 0, len(chksMetas))
+ for _, chk := range chksMetas {
+ chunkRefs = append(chunkRefs, chunk.Chunk{
+ ChunkRef: logproto.ChunkRef{
+ Fingerprint: uint64(fp),
+ UserID: tenant,
+ From: chk.From(),
+ Through: chk.Through(),
+ Checksum: chk.Checksum,
+ },
+ })
+ }
+
+ return chunkRefs
+}
+
+func buildBloomFromSeries(seriesMeta seriesMeta, fpRate float64, tokenizer compactorTokenizer, chunks []chunk.Chunk) (v1.SeriesWithBloom, error) {
+ // Create a bloom for this series
+ bloomForChks := v1.SeriesWithBloom{
+ Series: &v1.Series{
+ Fingerprint: seriesMeta.seriesFP,
+ },
+ Bloom: &v1.Bloom{
+ ScalableBloomFilter: *filter.NewDefaultScalableBloomFilter(fpRate),
+ },
+ }
+
+ // Tokenize data into n-grams
+ err := tokenizer.PopulateSeriesWithBloom(&bloomForChks, chunks)
+ if err != nil {
+ return v1.SeriesWithBloom{}, err
+ }
+ return bloomForChks, nil
+}
+
+// TODO Test this when bloom block size check is implemented
+func buildBlockFromBlooms(
+ ctx context.Context,
+ logger log.Logger,
+ builder blockBuilder,
+ blooms v1.Iterator[v1.SeriesWithBloom],
+ job Job,
+) (bloomshipper.Block, error) {
+ // Ensure the context has not been canceled (ie. compactor shutdown has been triggered).
+ if err := ctx.Err(); err != nil {
+ return bloomshipper.Block{}, err
+ }
+
+ checksum, err := builder.BuildFrom(blooms)
+ if err != nil {
+ level.Error(logger).Log("msg", "failed writing to bloom", "err", err)
+ return bloomshipper.Block{}, err
+ }
+
+ data, err := builder.Data()
+ if err != nil {
+ level.Error(logger).Log("msg", "failed reading bloom data", "err", err)
+ return bloomshipper.Block{}, err
+ }
+
+ block := bloomshipper.Block{
+ BlockRef: bloomshipper.BlockRef{
+ Ref: bloomshipper.Ref{
+ TenantID: job.tenantID,
+ TableName: job.tableName,
+ MinFingerprint: uint64(job.minFp),
+ MaxFingerprint: uint64(job.maxFp),
+ StartTimestamp: int64(job.from),
+ EndTimestamp: int64(job.through),
+ Checksum: checksum,
+ },
+ IndexPath: job.indexPath,
+ },
+ Data: data,
+ }
+
+ return block, nil
+}
+
+func createLocalDirName(workingDir string, job Job) string {
+ dir := fmt.Sprintf("bloomBlock-%s-%s-%s-%s-%s-%s", job.tableName, job.tenantID, job.minFp, job.maxFp, job.from, job.through)
+ return filepath.Join(workingDir, dir)
+}
+
+// Compacts given list of chunks, uploads them to storage and returns a list of bloomBlocks
+func compactNewChunks(
+ ctx context.Context,
+ logger log.Logger,
+ job Job,
+ fpRate float64,
+ bt compactorTokenizer,
+ storeClient chunkClient,
+ builder blockBuilder,
+) (bloomshipper.Block, error) {
+ // Ensure the context has not been canceled (ie. compactor shutdown has been triggered).
+ if err := ctx.Err(); err != nil {
+ return bloomshipper.Block{}, err
+ }
+
+ bloomIter := newLazyBloomBuilder(ctx, job, storeClient, bt, fpRate)
+
+ // Build and upload bloomBlock to storage
+ block, err := buildBlockFromBlooms(ctx, logger, builder, bloomIter, job)
+ if err != nil {
+ level.Error(logger).Log("msg", "failed building bloomBlocks", "err", err)
+ return bloomshipper.Block{}, err
+ }
+
+ return block, nil
+}
+
+type lazyBloomBuilder struct {
+ ctx context.Context
+ metas v1.Iterator[seriesMeta]
+ tenant string
+ client chunkClient
+ bt compactorTokenizer
+ fpRate float64
+
+ cur v1.SeriesWithBloom // retured by At()
+ err error // returned by Err()
+}
+
+// newLazyBloomBuilder returns an iterator that yields v1.SeriesWithBloom
+// which are used by the blockBuilder to write a bloom block.
+// We use an interator to avoid loading all blooms into memory first, before
+// building the block.
+func newLazyBloomBuilder(ctx context.Context, job Job, client chunkClient, bt compactorTokenizer, fpRate float64) *lazyBloomBuilder {
+ return &lazyBloomBuilder{
+ ctx: ctx,
+ metas: v1.NewSliceIter(job.seriesMetas),
+ client: client,
+ tenant: job.tenantID,
+ bt: bt,
+ fpRate: fpRate,
+ }
+}
+
+func (it *lazyBloomBuilder) Next() bool {
+ if !it.metas.Next() {
+ it.err = io.EOF
+ it.cur = v1.SeriesWithBloom{}
+ return false
+ }
+ meta := it.metas.At()
+
+ // Get chunks data from list of chunkRefs
+ chks, err := it.client.GetChunks(it.ctx, makeChunkRefs(meta.chunkRefs, it.tenant, meta.seriesFP))
+ if err != nil {
+ it.err = err
+ it.cur = v1.SeriesWithBloom{}
+ return false
+ }
+
+ it.cur, err = buildBloomFromSeries(meta, it.fpRate, it.bt, chks)
+ if err != nil {
+ it.err = err
+ it.cur = v1.SeriesWithBloom{}
+ return false
+ }
+ return true
+}
+
+func (it *lazyBloomBuilder) At() v1.SeriesWithBloom {
+ return it.cur
+}
+
+func (it *lazyBloomBuilder) Err() error {
+ return it.err
+}
diff --git a/pkg/bloomcompactor/chunkcompactor_test.go b/pkg/bloomcompactor/chunkcompactor_test.go
new file mode 100644
index 0000000000000..4d19f24417d47
--- /dev/null
+++ b/pkg/bloomcompactor/chunkcompactor_test.go
@@ -0,0 +1,225 @@
+package bloomcompactor
+
+import (
+ "context"
+ "io"
+ "testing"
+ "time"
+
+ "github.com/go-kit/log"
+ "github.com/prometheus/common/model"
+ "github.com/prometheus/prometheus/model/labels"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/grafana/loki/pkg/chunkenc"
+ "github.com/grafana/loki/pkg/push"
+ v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
+ "github.com/grafana/loki/pkg/storage/chunk"
+ "github.com/grafana/loki/pkg/storage/stores/shipper/indexshipper/tsdb/index"
+)
+
+var (
+ userID = "userID"
+ fpRate = 0.01
+
+ from = model.Earliest
+ to = model.Latest
+
+ table = "test_table"
+ indexPath = "index_test_table"
+
+ testBlockSize = 256 * 1024
+ testTargetSize = 1500 * 1024
+)
+
+func createTestChunk(fp model.Fingerprint, lb labels.Labels) chunk.Chunk {
+ memChunk := chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, chunkenc.EncSnappy, chunkenc.ChunkHeadFormatFor(chunkenc.ChunkFormatV4), testBlockSize, testTargetSize)
+ if err := memChunk.Append(&push.Entry{
+ Timestamp: time.Unix(0, 1),
+ Line: "this is a log line",
+ }); err != nil {
+ panic(err)
+ }
+ c := chunk.NewChunk(userID,
+ fp, lb, chunkenc.NewFacade(memChunk, testBlockSize, testTargetSize), from, to)
+
+ return c
+}
+
+// Given a seriesMeta and corresponding chunks verify SeriesWithBloom can be built
+func TestChunkCompactor_BuildBloomFromSeries(t *testing.T) {
+ label := labels.FromStrings("foo", "bar")
+ fp := model.Fingerprint(label.Hash())
+ seriesMeta := seriesMeta{
+ seriesFP: fp,
+ seriesLbs: label,
+ }
+
+ chunks := []chunk.Chunk{createTestChunk(fp, label)}
+
+ mbt := mockBloomTokenizer{}
+ bloom, err := buildBloomFromSeries(seriesMeta, fpRate, &mbt, chunks)
+ require.NoError(t, err)
+ require.Equal(t, seriesMeta.seriesFP, bloom.Series.Fingerprint)
+ require.Equal(t, chunks, mbt.chunks)
+}
+
+func TestChunkCompactor_CompactNewChunks(t *testing.T) {
+ // Setup
+ logger := log.NewNopLogger()
+ label := labels.FromStrings("foo", "bar")
+ fp1 := model.Fingerprint(100)
+ fp2 := model.Fingerprint(999)
+ fp3 := model.Fingerprint(200)
+
+ chunkRef1 := index.ChunkMeta{
+ Checksum: 1,
+ MinTime: 1,
+ MaxTime: 99,
+ }
+
+ chunkRef2 := index.ChunkMeta{
+ Checksum: 2,
+ MinTime: 10,
+ MaxTime: 999,
+ }
+
+ seriesMetas := []seriesMeta{
+ {
+ seriesFP: fp1,
+ seriesLbs: label,
+ chunkRefs: []index.ChunkMeta{chunkRef1},
+ },
+ {
+ seriesFP: fp2,
+ seriesLbs: label,
+ chunkRefs: []index.ChunkMeta{chunkRef1, chunkRef2},
+ },
+ {
+ seriesFP: fp3,
+ seriesLbs: label,
+ chunkRefs: []index.ChunkMeta{chunkRef1, chunkRef1, chunkRef2},
+ },
+ }
+
+ job := NewJob(userID, table, indexPath, seriesMetas)
+
+ mbt := mockBloomTokenizer{}
+ mcc := mockChunkClient{}
+ pbb := mockPersistentBlockBuilder{}
+
+ // Run Compaction
+ compactedBlock, err := compactNewChunks(context.Background(), logger, job, fpRate, &mbt, &mcc, &pbb)
+
+ // Validate Compaction Succeeds
+ require.NoError(t, err)
+ require.NotNil(t, compactedBlock)
+
+ // Validate Compacted Block has expected data
+ require.Equal(t, job.tenantID, compactedBlock.TenantID)
+ require.Equal(t, job.tableName, compactedBlock.TableName)
+ require.Equal(t, uint64(fp1), compactedBlock.MinFingerprint)
+ require.Equal(t, uint64(fp2), compactedBlock.MaxFingerprint)
+ require.Equal(t, chunkRef1.MinTime, compactedBlock.StartTimestamp)
+ require.Equal(t, chunkRef2.MaxTime, compactedBlock.EndTimestamp)
+ require.Equal(t, indexPath, compactedBlock.IndexPath)
+}
+
+func TestLazyBloomBuilder(t *testing.T) {
+ label := labels.FromStrings("foo", "bar")
+ fp1 := model.Fingerprint(100)
+ fp2 := model.Fingerprint(999)
+ fp3 := model.Fingerprint(200)
+
+ chunkRef1 := index.ChunkMeta{
+ Checksum: 1,
+ MinTime: 1,
+ MaxTime: 99,
+ }
+
+ chunkRef2 := index.ChunkMeta{
+ Checksum: 2,
+ MinTime: 10,
+ MaxTime: 999,
+ }
+
+ seriesMetas := []seriesMeta{
+ {
+ seriesFP: fp1,
+ seriesLbs: label,
+ chunkRefs: []index.ChunkMeta{chunkRef1},
+ },
+ {
+ seriesFP: fp2,
+ seriesLbs: label,
+ chunkRefs: []index.ChunkMeta{chunkRef1, chunkRef2},
+ },
+ {
+ seriesFP: fp3,
+ seriesLbs: label,
+ chunkRefs: []index.ChunkMeta{chunkRef1, chunkRef1, chunkRef2},
+ },
+ }
+
+ job := NewJob(userID, table, indexPath, seriesMetas)
+
+ mbt := &mockBloomTokenizer{}
+ mcc := &mockChunkClient{}
+
+ it := newLazyBloomBuilder(context.Background(), job, mcc, mbt, fpRate)
+
+ // first seriesMeta has 1 chunks
+ require.True(t, it.Next())
+ require.Equal(t, 1, mcc.requestCount)
+ require.Equal(t, 1, mcc.chunkCount)
+ require.Equal(t, fp1, it.At().Series.Fingerprint)
+
+ // first seriesMeta has 2 chunks
+ require.True(t, it.Next())
+ require.Equal(t, 2, mcc.requestCount)
+ require.Equal(t, 3, mcc.chunkCount)
+ require.Equal(t, fp2, it.At().Series.Fingerprint)
+
+ // first seriesMeta has 3 chunks
+ require.True(t, it.Next())
+ require.Equal(t, 3, mcc.requestCount)
+ require.Equal(t, 6, mcc.chunkCount)
+ require.Equal(t, fp3, it.At().Series.Fingerprint)
+
+ // iterator is done
+ require.False(t, it.Next())
+ require.Error(t, io.EOF, it.Err())
+ require.Equal(t, v1.SeriesWithBloom{}, it.At())
+}
+
+type mockBloomTokenizer struct {
+ chunks []chunk.Chunk
+}
+
+func (mbt *mockBloomTokenizer) PopulateSeriesWithBloom(_ *v1.SeriesWithBloom, c []chunk.Chunk) error {
+ mbt.chunks = append(mbt.chunks, c...)
+ return nil
+}
+
+type mockChunkClient struct {
+ requestCount int
+ chunkCount int
+}
+
+func (mcc *mockChunkClient) GetChunks(_ context.Context, chks []chunk.Chunk) ([]chunk.Chunk, error) {
+ mcc.requestCount++
+ mcc.chunkCount += len(chks)
+ return nil, nil
+}
+
+type mockPersistentBlockBuilder struct {
+}
+
+func (pbb *mockPersistentBlockBuilder) BuildFrom(_ v1.Iterator[v1.SeriesWithBloom]) (uint32, error) {
+ return 0, nil
+}
+
+func (pbb *mockPersistentBlockBuilder) Data() (io.ReadSeekCloser, error) {
+ return nil, nil
+}
diff --git a/pkg/bloomcompactor/config.go b/pkg/bloomcompactor/config.go
index 57721850d2927..3bdf65d3e68aa 100644
--- a/pkg/bloomcompactor/config.go
+++ b/pkg/bloomcompactor/config.go
@@ -44,4 +44,7 @@ type Limits interface {
BloomCompactorMaxTableAge(tenantID string) time.Duration
BloomCompactorMinTableAge(tenantID string) time.Duration
BloomCompactorEnabled(tenantID string) bool
+ BloomNGramLength(tenantID string) int
+ BloomNGramSkip(tenantID string) int
+ BloomFalsePositiveRate(tenantID string) float64
}
diff --git a/pkg/bloomcompactor/job.go b/pkg/bloomcompactor/job.go
index 3084b7db7c34b..bd43293c73cb6 100644
--- a/pkg/bloomcompactor/job.go
+++ b/pkg/bloomcompactor/job.go
@@ -1,20 +1,27 @@
package bloomcompactor
import (
+ "math"
+
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
"github.com/grafana/loki/pkg/storage/stores/shipper/indexshipper/tsdb/index"
)
+type seriesMeta struct {
+ seriesFP model.Fingerprint
+ seriesLbs labels.Labels
+ chunkRefs []index.ChunkMeta
+}
+
type Job struct {
tableName, tenantID, indexPath string
- seriesLbs labels.Labels
- seriesFP model.Fingerprint
- chunks []index.ChunkMeta
+ seriesMetas []seriesMeta
// We compute them lazily. Unset value is 0.
from, through model.Time
+ minFp, maxFp model.Fingerprint
}
// NewJob returns a new compaction Job.
@@ -22,80 +29,57 @@ func NewJob(
tenantID string,
tableName string,
indexPath string,
- seriesFP model.Fingerprint,
- seriesLbs labels.Labels,
- chunks []index.ChunkMeta,
+ seriesMetas []seriesMeta,
) Job {
- return Job{
- tenantID: tenantID,
- tableName: tableName,
- indexPath: indexPath,
- seriesFP: seriesFP,
- seriesLbs: seriesLbs,
- chunks: chunks,
+ j := Job{
+ tenantID: tenantID,
+ tableName: tableName,
+ indexPath: indexPath,
+ seriesMetas: seriesMetas,
}
+ j.computeBounds()
+ return j
}
func (j *Job) String() string {
- return j.tableName + "_" + j.tenantID + "_" + j.seriesFP.String()
-}
-
-func (j *Job) TableName() string {
- return j.tableName
-}
-
-func (j *Job) Tenant() string {
- return j.tenantID
-}
-
-func (j *Job) Fingerprint() model.Fingerprint {
- return j.seriesFP
-}
-
-func (j *Job) Chunks() []index.ChunkMeta {
- return j.chunks
-}
-
-func (j *Job) Labels() labels.Labels {
- return j.seriesLbs
-}
-
-func (j *Job) IndexPath() string {
- return j.indexPath
-}
-
-func (j *Job) From() model.Time {
- if j.from == 0 {
- j.computeFromThrough()
- }
- return j.from
+ return j.tableName + "_" + j.tenantID + "_"
}
-func (j *Job) Through() model.Time {
- if j.through == 0 {
- j.computeFromThrough()
- }
- return j.through
-}
-
-func (j *Job) computeFromThrough() {
- if len(j.chunks) == 0 {
+func (j *Job) computeBounds() {
+ if len(j.seriesMetas) == 0 {
return
}
minFrom := model.Latest
maxThrough := model.Earliest
- for _, chunk := range j.chunks {
- from, through := chunk.Bounds()
- if minFrom > from {
- minFrom = from
+ minFp := model.Fingerprint(math.MaxInt64)
+ maxFp := model.Fingerprint(0)
+
+ for _, seriesMeta := range j.seriesMetas {
+ // calculate timestamp boundaries
+ for _, chunkRef := range seriesMeta.chunkRefs {
+ from, through := chunkRef.Bounds()
+ if minFrom > from {
+ minFrom = from
+ }
+ if maxThrough < through {
+ maxThrough = through
+ }
}
- if maxThrough < through {
- maxThrough = through
+
+ // calculate fingerprint boundaries
+ if minFp > seriesMeta.seriesFP {
+ minFp = seriesMeta.seriesFP
+ }
+ if maxFp < seriesMeta.seriesFP {
+ maxFp = seriesMeta.seriesFP
}
}
j.from = minFrom
j.through = maxThrough
+
+ j.minFp = minFp
+ j.maxFp = maxFp
}
diff --git a/pkg/bloomcompactor/mergecompactor.go b/pkg/bloomcompactor/mergecompactor.go
new file mode 100644
index 0000000000000..94682579ac9e2
--- /dev/null
+++ b/pkg/bloomcompactor/mergecompactor.go
@@ -0,0 +1,149 @@
+package bloomcompactor
+
+import (
+ "context"
+
+ "github.com/grafana/dskit/concurrency"
+
+ "github.com/grafana/loki/pkg/logproto"
+ "github.com/grafana/loki/pkg/storage/chunk"
+
+ "github.com/go-kit/log"
+ "github.com/go-kit/log/level"
+
+ v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
+ "github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper"
+)
+
+func makeSeriesIterFromSeriesMeta(job Job) *v1.SliceIter[*v1.Series] {
+ // Satisfy types for series
+ seriesFromSeriesMeta := make([]*v1.Series, len(job.seriesMetas))
+
+ for i, s := range job.seriesMetas {
+ crefs := make([]v1.ChunkRef, len(s.chunkRefs))
+ for j, chk := range s.chunkRefs {
+ crefs[j] = v1.ChunkRef{
+ Start: chk.From(),
+ End: chk.Through(),
+ Checksum: chk.Checksum,
+ }
+ }
+ seriesFromSeriesMeta[i] = &v1.Series{
+ Fingerprint: s.seriesFP,
+ Chunks: crefs,
+ }
+ }
+ return v1.NewSliceIter(seriesFromSeriesMeta)
+}
+
+func makeBlockIterFromBlocks(ctx context.Context, logger log.Logger,
+ bloomShipperClient bloomshipper.Client, blocksToUpdate []bloomshipper.BlockRef,
+ workingDir string) ([]v1.PeekingIterator[*v1.SeriesWithBloom], []string, error) {
+
+ // Download existing blocks that needs compaction
+ blockIters := make([]v1.PeekingIterator[*v1.SeriesWithBloom], len(blocksToUpdate))
+ blockPaths := make([]string, len(blocksToUpdate))
+
+ err := concurrency.ForEachJob(ctx, len(blocksToUpdate), len(blocksToUpdate), func(ctx context.Context, i int) error {
+ b := blocksToUpdate[i]
+
+ lazyBlock, err := bloomShipperClient.GetBlock(ctx, b)
+ if err != nil {
+ level.Error(logger).Log("msg", "failed downloading block", "err", err)
+ return err
+ }
+
+ blockPath, err := bloomshipper.UncompressBloomBlock(&lazyBlock, workingDir, logger)
+ if err != nil {
+ level.Error(logger).Log("msg", "failed extracting block", "err", err)
+ return err
+ }
+ blockPaths[i] = blockPath
+
+ reader := v1.NewDirectoryBlockReader(blockPath)
+ block := v1.NewBlock(reader)
+ blockQuerier := v1.NewBlockQuerier(block)
+
+ blockIters[i] = v1.NewPeekingIter[*v1.SeriesWithBloom](blockQuerier)
+ return nil
+ })
+
+ if err != nil {
+ return nil, nil, err
+ }
+ return blockIters, blockPaths, nil
+}
+
+func createPopulateFunc(ctx context.Context, logger log.Logger, job Job, storeClient storeClient, bt *v1.BloomTokenizer) func(series *v1.Series, bloom *v1.Bloom) error {
+ return func(series *v1.Series, bloom *v1.Bloom) error {
+ bloomForChks := v1.SeriesWithBloom{
+ Series: series,
+ Bloom: bloom,
+ }
+
+ // Satisfy types for chunks
+ chunkRefs := make([]chunk.Chunk, len(series.Chunks))
+ for i, chk := range series.Chunks {
+ chunkRefs[i] = chunk.Chunk{
+ ChunkRef: logproto.ChunkRef{
+ Fingerprint: uint64(series.Fingerprint),
+ UserID: job.tenantID,
+ From: chk.Start,
+ Through: chk.End,
+ Checksum: chk.Checksum,
+ },
+ }
+ }
+
+ chks, err := storeClient.chunk.GetChunks(ctx, chunkRefs)
+ if err != nil {
+ level.Error(logger).Log("msg", "failed downloading chunks", "err", err)
+ return err
+ }
+ err = bt.PopulateSeriesWithBloom(&bloomForChks, chks)
+ if err != nil {
+ return err
+ }
+ return nil
+ }
+}
+
+func mergeCompactChunks(logger log.Logger,
+ populate func(*v1.Series, *v1.Bloom) error,
+ mergeBlockBuilder *PersistentBlockBuilder,
+ blockIters []v1.PeekingIterator[*v1.SeriesWithBloom], seriesIter *v1.SliceIter[*v1.Series],
+ job Job) (bloomshipper.Block, error) {
+
+ mergeBuilder := v1.NewMergeBuilder(
+ blockIters,
+ seriesIter,
+ populate)
+
+ checksum, err := mergeBlockBuilder.mergeBuild(mergeBuilder)
+ if err != nil {
+ level.Error(logger).Log("msg", "failed merging the blooms", "err", err)
+ return bloomshipper.Block{}, err
+ }
+ data, err := mergeBlockBuilder.Data()
+ if err != nil {
+ level.Error(logger).Log("msg", "failed reading bloom data", "err", err)
+ return bloomshipper.Block{}, err
+ }
+
+ mergedBlock := bloomshipper.Block{
+ BlockRef: bloomshipper.BlockRef{
+ Ref: bloomshipper.Ref{
+ TenantID: job.tenantID,
+ TableName: job.tableName,
+ MinFingerprint: uint64(job.minFp),
+ MaxFingerprint: uint64(job.maxFp),
+ StartTimestamp: int64(job.from),
+ EndTimestamp: int64(job.through),
+ Checksum: checksum,
+ },
+ IndexPath: job.indexPath,
+ },
+ Data: data,
+ }
+ return mergedBlock, nil
+}
diff --git a/pkg/bloomcompactor/metrics.go b/pkg/bloomcompactor/metrics.go
index 9baa7128d25e1..c043b8103c31d 100644
--- a/pkg/bloomcompactor/metrics.go
+++ b/pkg/bloomcompactor/metrics.go
@@ -18,9 +18,9 @@ type metrics struct {
compactionRunSkippedTenants prometheus.Counter
compactionRunSucceededTenants prometheus.Counter
compactionRunFailedTenants prometheus.Counter
- compactionRunUnownedJobs prometheus.Counter
- compactionRunSucceededJobs prometheus.Counter
- compactionRunFailedJobs prometheus.Counter
+ compactionRunJobStarted prometheus.Counter
+ compactionRunJobSuceeded prometheus.Counter
+ compactionRunJobFailed prometheus.Counter
compactionRunInterval prometheus.Gauge
compactorRunning prometheus.Gauge
}
@@ -69,22 +69,22 @@ func newMetrics(r prometheus.Registerer) *metrics {
Name: "tenants_failed",
Help: "Number of tenants failed processing during the current compaction run",
}),
- compactionRunUnownedJobs: promauto.With(r).NewCounter(prometheus.CounterOpts{
+ compactionRunJobStarted: promauto.With(r).NewCounter(prometheus.CounterOpts{
Namespace: metricsNamespace,
Subsystem: metricsSubsystem,
- Name: "jobs_unowned",
- Help: "Number of unowned jobs skipped during the current compaction run",
+ Name: "job_started",
+ Help: "Number of jobs started processing during the current compaction run",
}),
- compactionRunSucceededJobs: promauto.With(r).NewCounter(prometheus.CounterOpts{
+ compactionRunJobSuceeded: promauto.With(r).NewCounter(prometheus.CounterOpts{
Namespace: metricsNamespace,
Subsystem: metricsSubsystem,
- Name: "jobs_succeeded",
+ Name: "job_succeeded",
Help: "Number of jobs successfully processed during the current compaction run",
}),
- compactionRunFailedJobs: promauto.With(r).NewCounter(prometheus.CounterOpts{
+ compactionRunJobFailed: promauto.With(r).NewCounter(prometheus.CounterOpts{
Namespace: metricsNamespace,
Subsystem: metricsSubsystem,
- Name: "jobs_failed",
+ Name: "job_failed",
Help: "Number of jobs failed processing during the current compaction run",
}),
compactionRunInterval: promauto.With(r).NewGauge(prometheus.GaugeOpts{
diff --git a/pkg/bloomcompactor/sharding.go b/pkg/bloomcompactor/sharding.go
index 093c0c3ac9a31..9b3009bd50652 100644
--- a/pkg/bloomcompactor/sharding.go
+++ b/pkg/bloomcompactor/sharding.go
@@ -14,7 +14,7 @@ var (
// ShardingStrategy describes whether compactor "owns" given user or job.
type ShardingStrategy interface {
util_ring.TenantSharding
- OwnsJob(job Job) (bool, error)
+ OwnsFingerprint(tenantID string, fp uint64) (bool, error)
}
type ShuffleShardingStrategy struct {
@@ -31,13 +31,28 @@ func NewShuffleShardingStrategy(r *ring.Ring, ringLifecycler *ring.BasicLifecycl
return &s
}
-// OwnsJob makes sure only a single compactor should execute the job.
-func (s *ShuffleShardingStrategy) OwnsJob(job Job) (bool, error) {
- if !s.OwnsTenant(job.Tenant()) {
+// OwnsFingerprint makes sure only a single compactor processes the fingerprint.
+func (s *ShuffleShardingStrategy) OwnsFingerprint(tenantID string, fp uint64) (bool, error) {
+ if !s.OwnsTenant(tenantID) {
return false, nil
}
- tenantRing := s.GetTenantSubRing(job.Tenant())
+ tenantRing := s.GetTenantSubRing(tenantID)
fpSharding := util_ring.NewFingerprintShuffleSharding(tenantRing, s.ringLifeCycler, RingOp)
- return fpSharding.OwnsFingerprint(uint64(job.Fingerprint()))
+ return fpSharding.OwnsFingerprint(fp)
+}
+
+// NoopStrategy is an implementation of the ShardingStrategy that does not
+// filter anything.
+type NoopStrategy struct {
+ util_ring.NoopStrategy
+}
+
+// OwnsFingerprint implements TenantShuffleSharding.
+func (s *NoopStrategy) OwnsFingerprint(_ string, _ uint64) (bool, error) {
+ return true, nil
+}
+
+func NewNoopStrategy() *NoopStrategy {
+ return &NoopStrategy{NoopStrategy: util_ring.NoopStrategy{}}
}
diff --git a/pkg/bloomcompactor/sharding_test.go b/pkg/bloomcompactor/sharding_test.go
index 1bd7b198648e1..fc77536f6061f 100644
--- a/pkg/bloomcompactor/sharding_test.go
+++ b/pkg/bloomcompactor/sharding_test.go
@@ -13,7 +13,6 @@ import (
"github.com/prometheus/prometheus/model/labels"
"github.com/stretchr/testify/require"
- "github.com/grafana/loki/pkg/storage/stores/shipper/indexshipper/downloads"
util_log "github.com/grafana/loki/pkg/util/log"
lokiring "github.com/grafana/loki/pkg/util/ring"
"github.com/grafana/loki/pkg/validation"
@@ -44,7 +43,7 @@ func TestShuffleSharding(t *testing.T) {
require.NoError(t, ringManager.StartAsync(context.Background()))
sharding := NewShuffleShardingStrategy(ringManager.Ring, ringManager.RingLifecycler, mockLimits{
- Limits: overrides,
+ Overrides: overrides,
bloomCompactorShardSize: shardSize,
})
@@ -91,13 +90,13 @@ func TestShuffleSharding(t *testing.T) {
for j := 0; j < jobsPerTenant; j++ {
lbls := labels.FromStrings("namespace", fmt.Sprintf("namespace-%d", j))
- job := NewJob(tenant, "", "", model.Fingerprint(lbls.Hash()), lbls, nil)
- ownsJob, err := shard.OwnsJob(job)
+ fp := model.Fingerprint(lbls.Hash())
+ ownsFingerprint, err := shard.OwnsFingerprint(tenant, uint64(fp))
require.NoError(t, err)
var jobOwnedByOther int
for _, other := range otherShards {
- otherOwns, err := other.OwnsJob(job)
+ otherOwns, err := other.OwnsFingerprint(tenant, uint64(fp))
require.NoError(t, err)
if otherOwns {
jobOwnedByOther++
@@ -106,7 +105,7 @@ func TestShuffleSharding(t *testing.T) {
// If this shard owns the job, no one else should own the job.
// And if this shard doesn't own the job, only one of the other shards should own the job.
- if ownsJob {
+ if ownsFingerprint {
require.Equal(t, 0, jobOwnedByOther)
ownedJobs++
} else {
@@ -128,22 +127,10 @@ func TestShuffleSharding(t *testing.T) {
}
type mockLimits struct {
- downloads.Limits
+ *validation.Overrides
bloomCompactorShardSize int
}
func (m mockLimits) BloomCompactorShardSize(_ string) int {
return m.bloomCompactorShardSize
}
-
-func (m mockLimits) BloomCompactorMaxTableAge(_ string) time.Duration {
- return 0
-}
-
-func (m mockLimits) BloomCompactorMinTableAge(_ string) time.Duration {
- return 0
-}
-
-func (m mockLimits) BloomCompactorEnabled(_ string) bool {
- return false
-}
diff --git a/pkg/bloomcompactor/table_utils.go b/pkg/bloomcompactor/table_utils.go
new file mode 100644
index 0000000000000..91940f4cfd455
--- /dev/null
+++ b/pkg/bloomcompactor/table_utils.go
@@ -0,0 +1,37 @@
+package bloomcompactor
+
+import (
+ "sort"
+
+ "github.com/prometheus/common/model"
+
+ "github.com/grafana/loki/pkg/compactor/retention"
+ "github.com/grafana/loki/pkg/storage/config"
+)
+
+func getIntervalsForTables(tables []string) map[string]model.Interval {
+ tablesIntervals := make(map[string]model.Interval, len(tables))
+ for _, table := range tables {
+ tablesIntervals[table] = retention.ExtractIntervalFromTableName(table)
+ }
+
+ return tablesIntervals
+}
+
+func sortTablesByRange(tables []string, intervals map[string]model.Interval) {
+ sort.Slice(tables, func(i, j int) bool {
+ // less than if start time is after produces a most recent first sort order
+ return intervals[tables[i]].Start.After(intervals[tables[j]].Start)
+ })
+}
+
+// TODO: comes from pkg/compactor/compactor.go
+func schemaPeriodForTable(cfg config.SchemaConfig, tableName string) (config.PeriodConfig, bool) {
+ tableInterval := retention.ExtractIntervalFromTableName(tableName)
+ schemaCfg, err := cfg.SchemaForTime(tableInterval.Start)
+ if err != nil || schemaCfg.IndexTables.TableFor(tableInterval.Start) != tableName {
+ return config.PeriodConfig{}, false
+ }
+
+ return schemaCfg, true
+}
diff --git a/pkg/bloomcompactor/utils.go b/pkg/bloomcompactor/utils.go
new file mode 100644
index 0000000000000..4b9c3ff541fe2
--- /dev/null
+++ b/pkg/bloomcompactor/utils.go
@@ -0,0 +1,37 @@
+package bloomcompactor
+
+import "github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper"
+
+func matchingBlocks(metas []bloomshipper.Meta, job Job) ([]bloomshipper.Meta, []bloomshipper.BlockRef) {
+ var metasMatchingJob []bloomshipper.Meta
+ var blocksMatchingJob []bloomshipper.BlockRef
+ oldTombstonedBlockRefs := make(map[bloomshipper.BlockRef]struct{})
+
+ for _, meta := range metas {
+ if meta.TableName != job.tableName {
+ continue
+ }
+ metasMatchingJob = append(metasMatchingJob, meta)
+
+ for _, tombstonedBlockRef := range meta.Tombstones {
+ oldTombstonedBlockRefs[tombstonedBlockRef] = struct{}{}
+ }
+ }
+
+ for _, meta := range metasMatchingJob {
+ for _, blockRef := range meta.Blocks {
+ if _, ok := oldTombstonedBlockRefs[blockRef]; ok {
+ // skip any previously tombstoned blockRefs
+ continue
+ }
+
+ if blockRef.IndexPath == job.indexPath {
+ // index has not changed, no compaction needed
+ continue
+ }
+ blocksMatchingJob = append(blocksMatchingJob, blockRef)
+ }
+ }
+
+ return metasMatchingJob, blocksMatchingJob
+}
diff --git a/pkg/bloomgateway/bloomgateway.go b/pkg/bloomgateway/bloomgateway.go
index b0b01c34dbaf6..d7963daf50b43 100644
--- a/pkg/bloomgateway/bloomgateway.go
+++ b/pkg/bloomgateway/bloomgateway.go
@@ -23,6 +23,10 @@ of line filter expressions.
|
bloomgateway.Gateway
|
+ queue.RequestQueue
+ |
+ bloomgateway.Worker
+ |
bloomshipper.Store
|
bloomshipper.Shipper
@@ -56,6 +60,7 @@ import (
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/queue"
"github.com/grafana/loki/pkg/storage"
+ v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
"github.com/grafana/loki/pkg/storage/config"
"github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper"
"github.com/grafana/loki/pkg/util"
@@ -63,13 +68,15 @@ import (
)
var errGatewayUnhealthy = errors.New("bloom-gateway is unhealthy in the ring")
-var errInvalidTenant = errors.New("invalid tenant in chunk refs")
-// TODO(chaudum): Make these configurable
const (
- numWorkers = 4
- maxTasksPerTenant = 1024
pendingTasksInitialCap = 1024
+ metricsSubsystem = "bloom_gateway"
+)
+
+var (
+ // responsesPool pooling array of v1.Output [64, 128, 256, ..., 65536]
+ responsesPool = queue.NewSlicePool[v1.Output](1<<6, 1<<16, 2)
)
type metrics struct {
@@ -77,17 +84,17 @@ type metrics struct {
inflightRequests prometheus.Summary
}
-func newMetrics(subsystem string, registerer prometheus.Registerer) *metrics {
+func newMetrics(registerer prometheus.Registerer, namespace, subsystem string) *metrics {
return &metrics{
queueDuration: promauto.With(registerer).NewHistogram(prometheus.HistogramOpts{
- Namespace: constants.Loki,
+ Namespace: namespace,
Subsystem: subsystem,
Name: "queue_duration_seconds",
Help: "Time spent by tasks in queue before getting picked up by a worker.",
Buckets: prometheus.DefBuckets,
}),
inflightRequests: promauto.With(registerer).NewSummary(prometheus.SummaryOpts{
- Namespace: constants.Loki,
+ Namespace: namespace,
Subsystem: subsystem,
Name: "inflight_tasks",
Help: "Number of inflight tasks (either queued or processing) sampled at a regular interval. Quantile buckets keep track of inflight tasks over the last 60s.",
@@ -98,40 +105,6 @@ func newMetrics(subsystem string, registerer prometheus.Registerer) *metrics {
}
}
-// Task is the data structure that is enqueued to the internal queue and queued by query workers
-type Task struct {
- // ID is a lexcographically sortable unique identifier of the task
- ID ulid.ULID
- // Tenant is the tenant ID
- Tenant string
- // Request is the original request
- Request *logproto.FilterChunkRefRequest
- // ErrCh is a send-only channel to write an error to
- ErrCh chan<- error
- // ResCh is a send-only channel to write partial responses to
- ResCh chan<- *logproto.GroupedChunkRefs
-}
-
-// newTask returns a new Task that can be enqueued to the task queue.
-// As additional arguments, it returns a result and an error channel, as well
-// as an error if the instantiation fails.
-func newTask(tenantID string, req *logproto.FilterChunkRefRequest) (Task, chan *logproto.GroupedChunkRefs, chan error, error) {
- key, err := ulid.New(ulid.Now(), nil)
- if err != nil {
- return Task{}, nil, nil, err
- }
- errCh := make(chan error, 1)
- resCh := make(chan *logproto.GroupedChunkRefs, 1)
- task := Task{
- ID: key,
- Tenant: tenantID,
- Request: req,
- ErrCh: errCh,
- ResCh: resCh,
- }
- return task, resCh, errCh, nil
-}
-
// SyncMap is a map structure which can be synchronized using the RWMutex
type SyncMap[k comparable, v any] struct {
sync.RWMutex
@@ -169,14 +142,16 @@ func makePendingTasks(n int) *pendingTasks {
type Gateway struct {
services.Service
- cfg Config
- logger log.Logger
- metrics *metrics
+ cfg Config
+ logger log.Logger
+
+ metrics *metrics
+ workerMetrics *workerMetrics
+ queueMetrics *queue.Metrics
- queue *queue.RequestQueue
- queueMetrics *queue.Metrics
- activeUsers *util.ActiveUsersCleanupService
- bloomStore bloomshipper.Store
+ queue *queue.RequestQueue
+ activeUsers *util.ActiveUsersCleanupService
+ bloomStore bloomshipper.Store
sharding ShardingStrategy
@@ -184,20 +159,36 @@ type Gateway struct {
serviceMngr *services.Manager
serviceWatcher *services.FailureWatcher
+
+ workerConfig workerConfig
+}
+
+type fixedQueueLimits struct {
+ maxConsumers int
+}
+
+func (l *fixedQueueLimits) MaxConsumers(_ string, _ int) int {
+ return l.maxConsumers
}
// New returns a new instance of the Bloom Gateway.
-func New(cfg Config, schemaCfg config.SchemaConfig, storageCfg storage.Config, shardingStrategy ShardingStrategy, cm storage.ClientMetrics, logger log.Logger, reg prometheus.Registerer) (*Gateway, error) {
+func New(cfg Config, schemaCfg config.SchemaConfig, storageCfg storage.Config, overrides Limits, shardingStrategy ShardingStrategy, cm storage.ClientMetrics, logger log.Logger, reg prometheus.Registerer) (*Gateway, error) {
g := &Gateway{
cfg: cfg,
logger: logger,
- metrics: newMetrics("bloom_gateway", reg),
+ metrics: newMetrics(reg, constants.Loki, metricsSubsystem),
sharding: shardingStrategy,
pendingTasks: makePendingTasks(pendingTasksInitialCap),
+ workerConfig: workerConfig{
+ maxWaitTime: 200 * time.Millisecond,
+ maxItems: 100,
+ processBlocksSequentially: false,
+ },
+ workerMetrics: newWorkerMetrics(reg, constants.Loki, metricsSubsystem),
+ queueMetrics: queue.NewMetrics(reg, constants.Loki, metricsSubsystem),
}
- g.queueMetrics = queue.NewMetrics(reg, constants.Loki, "bloom_gateway")
- g.queue = queue.NewRequestQueue(maxTasksPerTenant, time.Minute, g.queueMetrics)
+ g.queue = queue.NewRequestQueue(cfg.MaxOutstandingPerTenant, time.Minute, &fixedQueueLimits{100}, g.queueMetrics)
g.activeUsers = util.NewActiveUsersCleanupWithDefaultValues(g.queueMetrics.Cleanup)
client, err := bloomshipper.NewBloomClient(schemaCfg.Configs, storageCfg, cm)
@@ -205,7 +196,7 @@ func New(cfg Config, schemaCfg config.SchemaConfig, storageCfg storage.Config, s
return nil, err
}
- bloomShipper, err := bloomshipper.NewShipper(client, storageCfg.BloomShipperConfig, logger)
+ bloomShipper, err := bloomshipper.NewShipper(client, storageCfg.BloomShipperConfig, overrides, logger, reg)
if err != nil {
return nil, err
}
@@ -215,19 +206,32 @@ func New(cfg Config, schemaCfg config.SchemaConfig, storageCfg storage.Config, s
return nil, err
}
+ // We need to keep a reference to be able to call Stop() on shutdown of the gateway.
g.bloomStore = bloomStore
+ if err := g.initServices(); err != nil {
+ return nil, err
+ }
+ g.Service = services.NewBasicService(g.starting, g.running, g.stopping).WithName("bloom-gateway")
+
+ return g, nil
+}
+
+func (g *Gateway) initServices() error {
+ var err error
svcs := []services.Service{g.queue, g.activeUsers}
+ for i := 0; i < g.cfg.WorkerConcurrency; i++ {
+ id := fmt.Sprintf("bloom-query-worker-%d", i)
+ w := newWorker(id, g.workerConfig, g.queue, g.bloomStore, g.pendingTasks, g.logger, g.workerMetrics)
+ svcs = append(svcs, w)
+ }
g.serviceMngr, err = services.NewManager(svcs...)
if err != nil {
- return nil, err
+ return err
}
g.serviceWatcher = services.NewFailureWatcher()
g.serviceWatcher.WatchManager(g.serviceMngr)
-
- g.Service = services.NewBasicService(g.starting, g.running, g.stopping).WithName("bloom-gateway")
-
- return g, nil
+ return nil
}
func (g *Gateway) starting(ctx context.Context) error {
@@ -245,10 +249,6 @@ func (g *Gateway) starting(ctx context.Context) error {
return errors.Wrap(err, "unable to start bloom gateway subservices")
}
- for i := 0; i < numWorkers; i++ {
- go g.startWorker(ctx, fmt.Sprintf("worker-%d", i))
- }
-
return nil
}
@@ -278,52 +278,6 @@ func (g *Gateway) stopping(_ error) error {
return services.StopManagerAndAwaitStopped(context.Background(), g.serviceMngr)
}
-// This is just a dummy implementation of the worker!
-// TODO(chaudum): Implement worker that dequeues multiple pending tasks and
-// multiplexes them prior to execution.
-func (g *Gateway) startWorker(_ context.Context, id string) error {
- level.Info(g.logger).Log("msg", "starting worker", "worker", id)
-
- g.queue.RegisterConsumerConnection(id)
- defer g.queue.UnregisterConsumerConnection(id)
-
- idx := queue.StartIndexWithLocalQueue
-
- for {
- ctx := context.Background()
- item, newIdx, err := g.queue.Dequeue(ctx, idx, id)
- if err != nil {
- if err != queue.ErrStopped {
- level.Error(g.logger).Log("msg", "failed to dequeue task", "worker", id, "err", err)
- continue
- }
- level.Info(g.logger).Log("msg", "stopping worker", "worker", id)
- return err
- }
- task, ok := item.(Task)
- if !ok {
- level.Error(g.logger).Log("msg", "failed to cast to Task", "item", item)
- continue
- }
-
- idx = newIdx
- level.Info(g.logger).Log("msg", "dequeued task", "worker", id, "task", task.ID)
- g.pendingTasks.Delete(task.ID)
-
- r := task.Request
- if len(r.Filters) > 0 {
- r.Refs, err = g.bloomStore.FilterChunkRefs(ctx, task.Tenant, r.From.Time(), r.Through.Time(), r.Refs, r.Filters...)
- }
- if err != nil {
- task.ErrCh <- err
- } else {
- for _, ref := range r.Refs {
- task.ResCh <- ref
- }
- }
- }
-}
-
// FilterChunkRefs implements BloomGatewayServer
func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunkRefRequest) (*logproto.FilterChunkRefResponse, error) {
tenantID, err := tenant.TenantID(ctx)
@@ -331,10 +285,11 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk
return nil, err
}
- for _, ref := range req.Refs {
- if ref.Tenant != tenantID {
- return nil, errors.Wrapf(errInvalidTenant, "expected chunk refs from tenant %s, got tenant %s", tenantID, ref.Tenant)
- }
+ // Shortcut if request does not contain filters
+ if len(req.Filters) == 0 {
+ return &logproto.FilterChunkRefResponse{
+ ChunkRefs: req.Refs,
+ }, nil
}
// Sort ChunkRefs by fingerprint in ascending order
@@ -342,31 +297,73 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk
return req.Refs[i].Fingerprint < req.Refs[j].Fingerprint
})
- task, resCh, errCh, err := newTask(tenantID, req)
+ task, resCh, errCh, err := NewTask(tenantID, req)
if err != nil {
return nil, err
}
g.activeUsers.UpdateUserTimestamp(tenantID, time.Now())
level.Info(g.logger).Log("msg", "enqueue task", "task", task.ID)
- g.queue.Enqueue(tenantID, []string{}, task, 100, func() {
+ g.queue.Enqueue(tenantID, []string{}, task, func() {
// When enqueuing, we also add the task to the pending tasks
g.pendingTasks.Add(task.ID, task)
})
- response := make([]*logproto.GroupedChunkRefs, 0, len(req.Refs))
+ requestCount := len(req.Refs)
+ responses := responsesPool.Get(requestCount)
+ defer responsesPool.Put(responses)
+
for {
select {
case <-ctx.Done():
- return nil, ctx.Err()
+ return nil, errors.Wrap(ctx.Err(), "waiting for results")
case err := <-errCh:
- return nil, err
+ return nil, errors.Wrap(err, "waiting for results")
case res := <-resCh:
- level.Info(g.logger).Log("msg", "got result", "task", task.ID, "tenant", tenantID, "res", res)
+ responses = append(responses, res)
+ // log line is helpful for debugging tests
+ // level.Debug(g.logger).Log("msg", "got partial result", "task", task.ID, "tenant", tenantID, "fp", uint64(res.Fp), "chunks", res.Removals.Len(), "progress", fmt.Sprintf("%d/%d", len(responses), requestCount))
// wait for all parts of the full response
- response = append(response, res)
- if len(response) == len(req.Refs) {
- return &logproto.FilterChunkRefResponse{ChunkRefs: response}, nil
+ if len(responses) == requestCount {
+ for _, o := range responses {
+ if res.Removals.Len() == 0 {
+ continue
+ }
+ // we must not remove items from req.Refs as long as the worker may iterater over them
+ g.removeNotMatchingChunks(req, o)
+ }
+ return &logproto.FilterChunkRefResponse{ChunkRefs: req.Refs}, nil
+ }
+ }
+ }
+}
+
+func (g *Gateway) removeNotMatchingChunks(req *logproto.FilterChunkRefRequest, res v1.Output) {
+ // binary search index of fingerprint
+ idx := sort.Search(len(req.Refs), func(i int) bool {
+ return req.Refs[i].Fingerprint >= uint64(res.Fp)
+ })
+
+ // fingerprint not found
+ if idx >= len(req.Refs) {
+ level.Error(g.logger).Log("msg", "index out of range", "idx", idx, "len", len(req.Refs), "fp", uint64(res.Fp))
+ return
+ }
+
+ // if all chunks of a fingerprint are are removed
+ // then remove the whole group from the response
+ if len(req.Refs[idx].Refs) == res.Removals.Len() {
+ req.Refs[idx] = nil // avoid leaking pointer
+ req.Refs = append(req.Refs[:idx], req.Refs[idx+1:]...)
+ return
+ }
+
+ for i := range res.Removals {
+ toRemove := res.Removals[i]
+ for j := range req.Refs[idx].Refs {
+ if toRemove.Checksum == req.Refs[idx].Refs[j].Checksum {
+ req.Refs[idx].Refs[j] = nil // avoid leaking pointer
+ req.Refs[idx].Refs = append(req.Refs[idx].Refs[:j], req.Refs[idx].Refs[j+1:]...)
}
}
}
diff --git a/pkg/bloomgateway/bloomgateway_test.go b/pkg/bloomgateway/bloomgateway_test.go
index c0d9ffdfae230..fd50a8c5fb2db 100644
--- a/pkg/bloomgateway/bloomgateway_test.go
+++ b/pkg/bloomgateway/bloomgateway_test.go
@@ -2,11 +2,14 @@ package bloomgateway
import (
"context"
+ "fmt"
+ "math/rand"
"os"
"testing"
"time"
"github.com/go-kit/log"
+ "github.com/grafana/dskit/flagext"
"github.com/grafana/dskit/kv"
"github.com/grafana/dskit/kv/consul"
"github.com/grafana/dskit/ring"
@@ -18,9 +21,12 @@ import (
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/storage"
+ v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
"github.com/grafana/loki/pkg/storage/chunk/client/local"
"github.com/grafana/loki/pkg/storage/config"
+ "github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper"
lokiring "github.com/grafana/loki/pkg/util/ring"
+ "github.com/grafana/loki/pkg/validation"
)
func parseDayTime(s string) config.DayTime {
@@ -33,17 +39,35 @@ func parseDayTime(s string) config.DayTime {
}
}
+func mktime(s string) model.Time {
+ ts, err := time.Parse("2006-01-02 15:04", s)
+ if err != nil {
+ panic(err)
+ }
+ return model.TimeFromUnix(ts.Unix())
+}
+
func groupRefs(t *testing.T, chunkRefs []*logproto.ChunkRef) []*logproto.GroupedChunkRefs {
t.Helper()
grouped := make([]*logproto.GroupedChunkRefs, 0, len(chunkRefs))
return groupChunkRefs(chunkRefs, grouped)
}
+func newLimits() *validation.Overrides {
+ limits := validation.Limits{}
+ flagext.DefaultValues(&limits)
+ limits.BloomGatewayEnabled = true
+
+ overrides, _ := validation.NewOverrides(limits, nil)
+ return overrides
+}
+
func TestBloomGateway_StartStopService(t *testing.T) {
ss := NewNoopStrategy()
logger := log.NewNopLogger()
reg := prometheus.NewRegistry()
+ limits := newLimits()
cm := storage.NewClientMetrics()
t.Cleanup(cm.Unregister)
@@ -80,9 +104,11 @@ func TestBloomGateway_StartStopService(t *testing.T) {
},
ReplicationFactor: 1,
},
+ WorkerConcurrency: 4,
+ MaxOutstandingPerTenant: 1024,
}
- gw, err := New(cfg, schemaCfg, storageCfg, ss, cm, logger, reg)
+ gw, err := New(cfg, schemaCfg, storageCfg, limits, ss, cm, logger, reg)
require.NoError(t, err)
err = services.StartAndAwaitRunning(context.Background(), gw)
@@ -90,7 +116,7 @@ func TestBloomGateway_StartStopService(t *testing.T) {
// Wait for workers to connect to queue
time.Sleep(50 * time.Millisecond)
- require.Equal(t, float64(numWorkers), gw.queue.GetConnectedConsumersMetric())
+ require.Equal(t, float64(cfg.WorkerConcurrency), gw.queue.GetConnectedConsumersMetric())
err = services.StopAndAwaitTerminated(context.Background(), gw)
require.NoError(t, err)
@@ -103,6 +129,7 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) {
ss := NewNoopStrategy()
logger := log.NewLogfmtLogger(os.Stderr)
reg := prometheus.NewRegistry()
+ limits := newLimits()
cm := storage.NewClientMetrics()
t.Cleanup(cm.Unregister)
@@ -138,11 +165,13 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) {
},
ReplicationFactor: 1,
},
+ WorkerConcurrency: 4,
+ MaxOutstandingPerTenant: 1024,
}
t.Run("returns unfiltered chunk refs if no filters provided", func(t *testing.T) {
reg := prometheus.NewRegistry()
- gw, err := New(cfg, schemaCfg, storageCfg, ss, cm, logger, reg)
+ gw, err := New(cfg, schemaCfg, storageCfg, limits, ss, cm, logger, reg)
require.NoError(t, err)
err = services.StartAndAwaitRunning(context.Background(), gw)
@@ -152,8 +181,7 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) {
require.NoError(t, err)
})
- ts, _ := time.Parse("2006-01-02 15:04", "2023-10-03 10:00")
- now := model.TimeFromUnix(ts.Unix())
+ now := mktime("2023-10-03 10:00")
chunkRefs := []*logproto.ChunkRef{
{Fingerprint: 3000, UserID: tenantID, From: now.Add(-24 * time.Hour), Through: now.Add(-23 * time.Hour), Checksum: 1},
@@ -186,33 +214,9 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) {
}, res)
})
- t.Run("returns error if chunk refs do not belong to tenant", func(t *testing.T) {
- reg := prometheus.NewRegistry()
- gw, err := New(cfg, schemaCfg, storageCfg, ss, cm, logger, reg)
- require.NoError(t, err)
-
- ts, _ := time.Parse("2006-01-02 15:04", "2023-10-03 10:00")
- now := model.TimeFromUnix(ts.Unix())
-
- chunkRefs := []*logproto.ChunkRef{
- {Fingerprint: 1000, UserID: tenantID, From: now.Add(-22 * time.Hour), Through: now.Add(-21 * time.Hour), Checksum: 1},
- {Fingerprint: 2000, UserID: "other", From: now.Add(-20 * time.Hour), Through: now.Add(-19 * time.Hour), Checksum: 2},
- }
- req := &logproto.FilterChunkRefRequest{
- From: now.Add(-24 * time.Hour),
- Through: now,
- Refs: groupRefs(t, chunkRefs),
- }
-
- ctx := user.InjectOrgID(context.Background(), tenantID)
- _, err = gw.FilterChunkRefs(ctx, req)
- require.Error(t, err)
- require.Equal(t, "expected chunk refs from tenant test, got tenant other: invalid tenant in chunk refs", err.Error())
- })
-
t.Run("gateway tracks active users", func(t *testing.T) {
reg := prometheus.NewRegistry()
- gw, err := New(cfg, schemaCfg, storageCfg, ss, cm, logger, reg)
+ gw, err := New(cfg, schemaCfg, storageCfg, limits, ss, cm, logger, reg)
require.NoError(t, err)
err = services.StartAndAwaitRunning(context.Background(), gw)
@@ -222,8 +226,7 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) {
require.NoError(t, err)
})
- ts, _ := time.Parse("2006-01-02 15:04", "2023-10-03 10:00")
- now := model.TimeFromUnix(ts.Unix())
+ now := mktime("2023-10-03 10:00")
tenants := []string{"tenant-a", "tenant-b", "tenant-c"}
for idx, tenantID := range tenants {
@@ -240,6 +243,9 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) {
From: now.Add(-24 * time.Hour),
Through: now,
Refs: groupRefs(t, chunkRefs),
+ Filters: []*logproto.LineFilterExpression{
+ {Operator: 1, Match: "foo"},
+ },
}
ctx := user.InjectOrgID(context.Background(), tenantID)
_, err = gw.FilterChunkRefs(ctx, req)
@@ -247,4 +253,190 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) {
}
require.ElementsMatch(t, tenants, gw.activeUsers.ActiveUsers())
})
+
+ t.Run("use fuse queriers to filter chunks", func(t *testing.T) {
+ for _, tc := range []struct {
+ name string
+ value bool
+ }{
+ {"sequentially", true},
+ {"callback", false},
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+
+ reg := prometheus.NewRegistry()
+ gw, err := New(cfg, schemaCfg, storageCfg, limits, ss, cm, logger, reg)
+ require.NoError(t, err)
+
+ now := mktime("2023-10-03 10:00")
+
+ // replace store implementation and re-initialize workers and sub-services
+ bqs, data := createBlockQueriers(t, 5, now.Add(-8*time.Hour), now, 0, 1024)
+ gw.bloomStore = newMockBloomStore(bqs)
+ gw.workerConfig.processBlocksSequentially = tc.value
+ err = gw.initServices()
+ require.NoError(t, err)
+
+ t.Log("process blocks in worker sequentially", gw.workerConfig.processBlocksSequentially)
+
+ err = services.StartAndAwaitRunning(context.Background(), gw)
+ require.NoError(t, err)
+ t.Cleanup(func() {
+ err = services.StopAndAwaitTerminated(context.Background(), gw)
+ require.NoError(t, err)
+ })
+
+ chunkRefs := createQueryInputFromBlockData(t, tenantID, data, 100)
+
+ t.Run("no match - return empty response", func(t *testing.T) {
+ inputChunkRefs := groupRefs(t, chunkRefs)
+ req := &logproto.FilterChunkRefRequest{
+ From: now.Add(-8 * time.Hour),
+ Through: now,
+ Refs: inputChunkRefs,
+ Filters: []*logproto.LineFilterExpression{
+ {Operator: 1, Match: "does not match"},
+ },
+ }
+ ctx := user.InjectOrgID(context.Background(), tenantID)
+ res, err := gw.FilterChunkRefs(ctx, req)
+ require.NoError(t, err)
+
+ expectedResponse := &logproto.FilterChunkRefResponse{
+ ChunkRefs: []*logproto.GroupedChunkRefs{},
+ }
+ require.Equal(t, expectedResponse, res)
+ })
+
+ t.Run("match - return filtered", func(t *testing.T) {
+ inputChunkRefs := groupRefs(t, chunkRefs)
+ // hack to get indexed key for a specific series
+ // the indexed key range for a series is defined as
+ // i * keysPerSeries ... i * keysPerSeries + keysPerSeries - 1
+ // where i is the nth series in a block
+ // fortunately, i is also used as Checksum for the single chunk of a series
+ // see mkBasicSeriesWithBlooms() in pkg/storage/bloom/v1/test_util.go
+ key := inputChunkRefs[0].Refs[0].Checksum*1000 + 500
+
+ req := &logproto.FilterChunkRefRequest{
+ From: now.Add(-8 * time.Hour),
+ Through: now,
+ Refs: inputChunkRefs,
+ Filters: []*logproto.LineFilterExpression{
+ {Operator: 1, Match: fmt.Sprint(key)},
+ },
+ }
+ ctx := user.InjectOrgID(context.Background(), tenantID)
+ res, err := gw.FilterChunkRefs(ctx, req)
+ require.NoError(t, err)
+
+ expectedResponse := &logproto.FilterChunkRefResponse{
+ ChunkRefs: inputChunkRefs[:1],
+ }
+ require.Equal(t, expectedResponse, res)
+ })
+
+ })
+ }
+
+ })
+}
+
+func createBlockQueriers(t *testing.T, numBlocks int, from, through model.Time, minFp, maxFp model.Fingerprint) ([]bloomshipper.BlockQuerierWithFingerprintRange, [][]v1.SeriesWithBloom) {
+ t.Helper()
+ step := (maxFp - minFp) / model.Fingerprint(numBlocks)
+ bqs := make([]bloomshipper.BlockQuerierWithFingerprintRange, 0, numBlocks)
+ series := make([][]v1.SeriesWithBloom, 0, numBlocks)
+ for i := 0; i < numBlocks; i++ {
+ fromFp := minFp + (step * model.Fingerprint(i))
+ throughFp := fromFp + step - 1
+ // last block needs to include maxFp
+ if i == numBlocks-1 {
+ throughFp = maxFp
+ }
+ blockQuerier, data := v1.MakeBlockQuerier(t, fromFp, throughFp, from, through)
+ bq := bloomshipper.BlockQuerierWithFingerprintRange{
+ BlockQuerier: blockQuerier,
+ MinFp: fromFp,
+ MaxFp: throughFp,
+ }
+ bqs = append(bqs, bq)
+ series = append(series, data)
+ }
+ return bqs, series
+}
+
+func newMockBloomStore(bqs []bloomshipper.BlockQuerierWithFingerprintRange) *mockBloomStore {
+ return &mockBloomStore{bqs: bqs}
+}
+
+type mockBloomStore struct {
+ bqs []bloomshipper.BlockQuerierWithFingerprintRange
+}
+
+var _ bloomshipper.Store = &mockBloomStore{}
+
+// GetBlockQueriersForBlockRefs implements bloomshipper.Store.
+func (s *mockBloomStore) GetBlockQueriersForBlockRefs(_ context.Context, _ string, _ []bloomshipper.BlockRef) ([]bloomshipper.BlockQuerierWithFingerprintRange, error) {
+ return s.bqs, nil
+}
+
+// GetBlockRefs implements bloomshipper.Store.
+func (s *mockBloomStore) GetBlockRefs(_ context.Context, tenant string, _, _ time.Time) ([]bloomshipper.BlockRef, error) {
+ blocks := make([]bloomshipper.BlockRef, 0, len(s.bqs))
+ for i := range s.bqs {
+ blocks = append(blocks, bloomshipper.BlockRef{
+ Ref: bloomshipper.Ref{
+ MinFingerprint: uint64(s.bqs[i].MinFp),
+ MaxFingerprint: uint64(s.bqs[i].MaxFp),
+ TenantID: tenant,
+ },
+ })
+ }
+ return blocks, nil
+}
+
+// GetBlockQueriers implements bloomshipper.Store.
+func (s *mockBloomStore) GetBlockQueriers(_ context.Context, _ string, _, _ time.Time, _ []uint64) ([]bloomshipper.BlockQuerierWithFingerprintRange, error) {
+ return s.bqs, nil
+}
+
+func (s *mockBloomStore) Stop() {}
+
+// ForEach implements bloomshipper.Store.
+func (s *mockBloomStore) ForEach(_ context.Context, _ string, _ []bloomshipper.BlockRef, callback bloomshipper.ForEachBlockCallback) error {
+ shuffled := make([]bloomshipper.BlockQuerierWithFingerprintRange, len(s.bqs))
+ _ = copy(shuffled, s.bqs)
+
+ rand.Shuffle(len(shuffled), func(i, j int) {
+ shuffled[i], shuffled[j] = shuffled[j], shuffled[i]
+ })
+
+ for _, bq := range shuffled {
+ // ignore errors in the mock
+ _ = callback(bq.BlockQuerier, uint64(bq.MinFp), uint64(bq.MaxFp))
+ }
+ return nil
+}
+
+func createQueryInputFromBlockData(t *testing.T, tenant string, data [][]v1.SeriesWithBloom, nthSeries int) []*logproto.ChunkRef {
+ t.Helper()
+ n := 0
+ res := make([]*logproto.ChunkRef, 0)
+ for i := range data {
+ for j := range data[i] {
+ if n%nthSeries == 0 {
+ chk := data[i][j].Series.Chunks[0]
+ res = append(res, &logproto.ChunkRef{
+ Fingerprint: uint64(data[i][j].Series.Fingerprint),
+ UserID: tenant,
+ From: chk.Start,
+ Through: chk.End,
+ Checksum: chk.Checksum,
+ })
+ }
+ n++
+ }
+ }
+ return res
}
diff --git a/pkg/bloomgateway/cache.go b/pkg/bloomgateway/cache.go
new file mode 100644
index 0000000000000..fe40b87e95488
--- /dev/null
+++ b/pkg/bloomgateway/cache.go
@@ -0,0 +1,217 @@
+package bloomgateway
+
+import (
+ "context"
+ "flag"
+ "sort"
+ "time"
+
+ "github.com/go-kit/log"
+ "github.com/prometheus/common/model"
+ "golang.org/x/exp/slices"
+ "google.golang.org/grpc"
+
+ "github.com/grafana/loki/pkg/logproto"
+ "github.com/grafana/loki/pkg/storage/chunk/cache"
+ "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"
+)
+
+const (
+ cacheParalellism = 1
+)
+
+type CacheConfig struct {
+ resultscache.Config `yaml:",inline"`
+}
+
+// RegisterFlags registers flags.
+func (cfg *CacheConfig) RegisterFlags(f *flag.FlagSet) {
+ cfg.RegisterFlagsWithPrefix("bloom-gateway-client.cache.", f)
+}
+
+func (cfg *CacheConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
+ cfg.Config.RegisterFlagsWithPrefix(f, prefix)
+}
+
+type CacheLimits interface {
+ resultscache.Limits
+ BloomGatewayCacheKeyInterval(tenantID string) time.Duration
+}
+
+type keyGen struct {
+ CacheLimits
+}
+
+func newCacheKeyGen(limits CacheLimits) keyGen {
+ return keyGen{limits}
+}
+
+func (k keyGen) GenerateCacheKey(ctx context.Context, tenant string, r resultscache.Request) string {
+ return resultscache.ConstSplitter(k.BloomGatewayCacheKeyInterval(tenant)).GenerateCacheKey(ctx, tenant, r)
+}
+
+type extractor struct{}
+
+func newExtractor() extractor {
+ return extractor{}
+}
+
+// Extract extracts a subset of a response from the `start` and `end` timestamps in milliseconds.
+// We remove chunks that are not within the given time range.
+func (e extractor) Extract(start, end int64, r resultscache.Response, _, _ int64) resultscache.Response {
+ res := r.(*logproto.FilterChunkRefResponse)
+
+ chunkRefs := make([]*logproto.GroupedChunkRefs, 0, len(res.ChunkRefs))
+ for _, chunkRef := range res.ChunkRefs {
+ refs := make([]*logproto.ShortRef, 0, len(chunkRef.Refs))
+ for _, ref := range chunkRef.Refs {
+ if model.Time(end) < ref.From || ref.Through <= model.Time(start) {
+ continue
+ }
+ refs = append(refs, ref)
+ }
+ if len(refs) > 0 {
+ chunkRefs = append(chunkRefs, &logproto.GroupedChunkRefs{
+ Fingerprint: chunkRef.Fingerprint,
+ Tenant: chunkRef.Tenant,
+ Refs: refs,
+ })
+ }
+ }
+
+ return &logproto.FilterChunkRefResponse{
+ ChunkRefs: chunkRefs,
+ }
+}
+
+type merger struct{}
+
+func newMerger() merger {
+ return merger{}
+}
+
+// MergeResponse merges responses from multiple requests into a single Response
+// We merge all chunks grouped by their fingerprint.
+func (m merger) MergeResponse(responses ...resultscache.Response) (resultscache.Response, error) {
+ var size int
+ for _, r := range responses {
+ res := r.(*logproto.FilterChunkRefResponse)
+ size += len(res.ChunkRefs)
+ }
+
+ chunkRefs := make([]*logproto.GroupedChunkRefs, 0, size)
+ for _, r := range responses {
+ res := r.(*logproto.FilterChunkRefResponse)
+ chunkRefs = append(chunkRefs, res.ChunkRefs...)
+ }
+
+ return &logproto.FilterChunkRefResponse{
+ ChunkRefs: mergeGroupedChunkRefs(chunkRefs),
+ }, nil
+}
+
+// Merge duplicated fingerprints by:
+// 1. Sort the chunkRefs by their stream fingerprint
+// 2. Remove duplicated FPs appending all chunks into the first fingerprint's chunk list.
+func mergeGroupedChunkRefs(chunkRefs []*logproto.GroupedChunkRefs) []*logproto.GroupedChunkRefs {
+ if len(chunkRefs) <= 1 {
+ return chunkRefs
+ }
+
+ sort.Slice(chunkRefs, func(i, j int) bool {
+ return chunkRefs[i].Fingerprint < chunkRefs[j].Fingerprint
+ })
+
+ var lastDiffFP int
+ for i := 1; i < len(chunkRefs); i++ {
+ if chunkRefs[lastDiffFP].Fingerprint == chunkRefs[i].Fingerprint {
+ chunkRefs[lastDiffFP].Refs = mergeShortRefs(append(chunkRefs[lastDiffFP].Refs, chunkRefs[i].Refs...))
+ } else {
+ lastDiffFP++
+ chunkRefs[lastDiffFP] = chunkRefs[i]
+ }
+ }
+ return chunkRefs[:lastDiffFP+1]
+}
+
+// mergeShortRefs merges short-refs by removing duplicated checksums.
+func mergeShortRefs(refs []*logproto.ShortRef) []*logproto.ShortRef {
+ if len(refs) <= 1 {
+ return refs
+ }
+
+ sort.Slice(refs, func(i, j int) bool {
+ return refs[i].Checksum < refs[j].Checksum
+ })
+ return slices.CompactFunc(refs, func(a, b *logproto.ShortRef) bool {
+ return a.Checksum == b.Checksum
+ })
+}
+
+type ClientCache struct {
+ cache *resultscache.ResultsCache
+ limits CacheLimits
+ logger log.Logger
+}
+
+func NewBloomGatewayClientCacheMiddleware(
+ logger log.Logger,
+ next logproto.BloomGatewayClient,
+ c cache.Cache,
+ limits CacheLimits,
+ cacheGen resultscache.CacheGenNumberLoader,
+ retentionEnabled bool,
+) *ClientCache {
+ nextAsHandler := resultscache.HandlerFunc(func(ctx context.Context, cacheReq resultscache.Request) (resultscache.Response, error) {
+ req := cacheReq.(requestWithGrpcCallOptions)
+ return next.FilterChunkRefs(ctx, req.FilterChunkRefRequest, req.grpcCallOptions...)
+ })
+
+ resultsCache := resultscache.NewResultsCache(
+ logger,
+ c,
+ nextAsHandler,
+ newCacheKeyGen(limits),
+ limits,
+ newMerger(),
+ newExtractor(),
+ nil,
+ nil,
+ func(_ context.Context, _ []string, _ resultscache.Request) int {
+ return cacheParalellism
+ },
+ cacheGen,
+ retentionEnabled,
+ )
+
+ return &ClientCache{
+ cache: resultsCache,
+ limits: limits,
+ logger: logger,
+ }
+}
+
+func (c *ClientCache) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunkRefRequest, opts ...grpc.CallOption) (*logproto.FilterChunkRefResponse, error) {
+ cacheReq := requestWithGrpcCallOptions{
+ FilterChunkRefRequest: req,
+ grpcCallOptions: opts,
+ }
+ res, err := c.cache.Do(ctx, cacheReq)
+ if err != nil {
+ return nil, err
+ }
+
+ return res.(*logproto.FilterChunkRefResponse), nil
+}
+
+type requestWithGrpcCallOptions struct {
+ *logproto.FilterChunkRefRequest
+ grpcCallOptions []grpc.CallOption
+}
+
+func (r requestWithGrpcCallOptions) WithStartEndForCache(start time.Time, end time.Time) resultscache.Request {
+ return requestWithGrpcCallOptions{
+ FilterChunkRefRequest: r.FilterChunkRefRequest.WithStartEndForCache(start, end).(*logproto.FilterChunkRefRequest),
+ grpcCallOptions: r.grpcCallOptions,
+ }
+}
diff --git a/pkg/bloomgateway/cache_test.go b/pkg/bloomgateway/cache_test.go
new file mode 100644
index 0000000000000..5a66162000a46
--- /dev/null
+++ b/pkg/bloomgateway/cache_test.go
@@ -0,0 +1,494 @@
+package bloomgateway
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/go-kit/log"
+ "github.com/grafana/dskit/user"
+ "github.com/prometheus/common/model"
+ "github.com/stretchr/testify/require"
+ "google.golang.org/grpc"
+
+ "github.com/grafana/loki/pkg/logproto"
+ "github.com/grafana/loki/pkg/logqlmodel/stats"
+ "github.com/grafana/loki/pkg/storage/chunk/cache"
+ "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"
+ "github.com/grafana/loki/pkg/util/constants"
+)
+
+// Range is 1000-4000
+var templateResponse = &logproto.FilterChunkRefResponse{
+ ChunkRefs: []*logproto.GroupedChunkRefs{
+ {
+ Fingerprint: 1,
+ Tenant: "fake",
+ Refs: []*logproto.ShortRef{
+ {
+ From: 1000,
+ Through: 1500,
+ Checksum: 10,
+ },
+ {
+ From: 1500,
+ Through: 2500,
+ Checksum: 20,
+ },
+ },
+ },
+ {
+ Fingerprint: 2,
+ Tenant: "fake",
+ Refs: []*logproto.ShortRef{
+ {
+ From: 3000,
+ Through: 4000,
+ Checksum: 30,
+ },
+ {
+ From: 1000,
+ Through: 3000,
+ Checksum: 40,
+ },
+ },
+ },
+ },
+}
+
+func TestExtract(t *testing.T) {
+ for _, tc := range []struct {
+ name string
+ start int64
+ end int64
+ input *logproto.FilterChunkRefResponse
+ expected *logproto.FilterChunkRefResponse
+ }{
+ {
+ name: "start and end out of range",
+ start: 100,
+ end: 200,
+ input: templateResponse,
+ expected: &logproto.FilterChunkRefResponse{
+ ChunkRefs: []*logproto.GroupedChunkRefs{},
+ },
+ },
+ {
+ name: "start spans exact range",
+ start: 1000,
+ end: 4000,
+ input: templateResponse,
+ expected: templateResponse,
+ },
+ {
+ name: "start spans more than range",
+ start: 100,
+ end: 5000,
+ input: templateResponse,
+ expected: templateResponse,
+ },
+ {
+ name: "start and end within range",
+ start: 1700,
+ end: 2700,
+ input: templateResponse,
+ expected: &logproto.FilterChunkRefResponse{
+ ChunkRefs: []*logproto.GroupedChunkRefs{
+ {
+ Fingerprint: 1,
+ Tenant: "fake",
+ Refs: []*logproto.ShortRef{
+ {
+ From: 1500,
+ Through: 2500,
+ Checksum: 20,
+ },
+ },
+ },
+ {
+ Fingerprint: 2,
+ Tenant: "fake",
+ Refs: []*logproto.ShortRef{
+ {
+ From: 1000,
+ Through: 3000,
+ Checksum: 40,
+ },
+ },
+ },
+ },
+ },
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ e := newExtractor()
+ actual := e.Extract(tc.start, tc.end, tc.input, 0, 0)
+ require.Equal(t, tc.expected, actual)
+ })
+ }
+}
+
+func TestMerge(t *testing.T) {
+ for _, tc := range []struct {
+ name string
+ input []*logproto.FilterChunkRefResponse
+ expected *logproto.FilterChunkRefResponse
+ }{
+ {
+ name: "empy input",
+ input: []*logproto.FilterChunkRefResponse{},
+ expected: &logproto.FilterChunkRefResponse{
+ ChunkRefs: []*logproto.GroupedChunkRefs{},
+ },
+ },
+ {
+ name: "single input",
+ input: []*logproto.FilterChunkRefResponse{templateResponse},
+ expected: templateResponse,
+ },
+ {
+ name: "repeating and non-repeating fingerprint with repeating and non-repeating chunks",
+ input: []*logproto.FilterChunkRefResponse{
+ {
+ ChunkRefs: []*logproto.GroupedChunkRefs{
+ {
+ Fingerprint: 1,
+ Tenant: "fake",
+ Refs: []*logproto.ShortRef{
+ {
+ From: 1000,
+ Through: 1500,
+ Checksum: 10,
+ },
+ {
+ From: 1500,
+ Through: 2500,
+ Checksum: 20,
+ },
+ },
+ },
+ {
+ Fingerprint: 2,
+ Tenant: "fake",
+ Refs: []*logproto.ShortRef{
+ {
+ From: 1000,
+ Through: 1500,
+ Checksum: 10,
+ },
+ {
+ From: 1500,
+ Through: 2500,
+ Checksum: 20,
+ },
+ },
+ },
+ },
+ },
+ {
+ ChunkRefs: []*logproto.GroupedChunkRefs{
+ // Same FP as in previous input and same chunks
+ {
+ Fingerprint: 1,
+ Tenant: "fake",
+ Refs: []*logproto.ShortRef{
+ {
+ From: 1000,
+ Through: 1500,
+ Checksum: 10,
+ },
+ {
+ From: 1500,
+ Through: 2500,
+ Checksum: 20,
+ },
+ },
+ },
+ // Same FP as in previous input, but different chunks
+ {
+ Fingerprint: 2,
+ Tenant: "fake",
+ Refs: []*logproto.ShortRef{
+ // Same chunk as in previous input
+ {
+ From: 1500,
+ Through: 2500,
+ Checksum: 20,
+ },
+ // New chunk
+ {
+ From: 2000,
+ Through: 2500,
+ Checksum: 30,
+ },
+ },
+ },
+ // New FP
+ {
+ Fingerprint: 3,
+ Tenant: "fake",
+ Refs: []*logproto.ShortRef{
+ {
+ From: 1000,
+ Through: 1500,
+ Checksum: 10,
+ },
+ {
+ From: 1500,
+ Through: 2500,
+ Checksum: 20,
+ },
+ },
+ },
+ },
+ },
+ {
+ ChunkRefs: []*logproto.GroupedChunkRefs{
+ // Same FP as in previous input and diff chunks
+ {
+ Fingerprint: 2,
+ Tenant: "fake",
+ Refs: []*logproto.ShortRef{
+ {
+ From: 700,
+ Through: 1000,
+ Checksum: 40,
+ },
+ {
+ From: 2000,
+ Through: 2700,
+ Checksum: 50,
+ },
+ },
+ },
+ },
+ },
+ },
+ expected: &logproto.FilterChunkRefResponse{
+ ChunkRefs: []*logproto.GroupedChunkRefs{
+ {
+ Fingerprint: 1,
+ Tenant: "fake",
+ Refs: []*logproto.ShortRef{
+ {
+ From: 1000,
+ Through: 1500,
+ Checksum: 10,
+ },
+ {
+ From: 1500,
+ Through: 2500,
+ Checksum: 20,
+ },
+ },
+ },
+ {
+ Fingerprint: 2,
+ Tenant: "fake",
+ Refs: []*logproto.ShortRef{
+ {
+ From: 1000,
+ Through: 1500,
+ Checksum: 10,
+ },
+ {
+ From: 1500,
+ Through: 2500,
+ Checksum: 20,
+ },
+ {
+ From: 2000,
+ Through: 2500,
+ Checksum: 30,
+ },
+ {
+ From: 700,
+ Through: 1000,
+ Checksum: 40,
+ },
+ {
+ From: 2000,
+ Through: 2700,
+ Checksum: 50,
+ },
+ },
+ },
+ {
+ Fingerprint: 3,
+ Tenant: "fake",
+ Refs: []*logproto.ShortRef{
+ {
+ From: 1000,
+ Through: 1500,
+ Checksum: 10,
+ },
+ {
+ From: 1500,
+ Through: 2500,
+ Checksum: 20,
+ },
+ },
+ },
+ },
+ },
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ input := make([]resultscache.Response, 0, len(tc.input))
+ for _, i := range tc.input {
+ input = append(input, i)
+ }
+
+ m := newMerger()
+ actual, err := m.MergeResponse(input...)
+ require.NoError(t, err)
+ require.Equal(t, tc.expected, actual)
+ })
+ }
+}
+
+func TestCache(t *testing.T) {
+ ctx := user.InjectOrgID(context.Background(), "fake")
+
+ limits := mockLimits{
+ cacheInterval: 15 * time.Minute,
+ }
+
+ cfg := CacheConfig{
+ Config: resultscache.Config{
+ CacheConfig: cache.Config{
+ Cache: cache.NewMockCache(),
+ },
+ },
+ }
+ c, err := cache.New(cfg.CacheConfig, nil, log.NewNopLogger(), stats.BloomFilterCache, constants.Loki)
+ require.NoError(t, err)
+ defer c.Stop()
+
+ chunkRefs := []*logproto.ChunkRef{
+ {
+ Fingerprint: 2,
+ UserID: "fake",
+ From: 1500,
+ Through: 2500,
+ Checksum: 30,
+ },
+ {
+ Fingerprint: 3,
+ UserID: "fake",
+ From: 2500,
+ Through: 3500,
+ },
+ }
+ req := &logproto.FilterChunkRefRequest{
+ From: model.Time(2000),
+ Through: model.Time(3000),
+ Refs: groupRefs(t, chunkRefs),
+ Filters: []*logproto.LineFilterExpression{
+ {Operator: 1, Match: "foo"},
+ },
+ }
+ expectedRes := &logproto.FilterChunkRefResponse{
+ ChunkRefs: groupRefs(t, chunkRefs),
+ }
+
+ server, calls := newMockServer(expectedRes)
+
+ cacheMiddleware := NewBloomGatewayClientCacheMiddleware(
+ log.NewNopLogger(),
+ server,
+ c,
+ limits,
+ nil,
+ false,
+ )
+
+ // First call should go to the server
+ *calls = 0
+ res, err := cacheMiddleware.FilterChunkRefs(ctx, req)
+ require.NoError(t, err)
+ require.Equal(t, 1, *calls)
+ require.Equal(t, expectedRes, res)
+
+ // Second call should go to the cache
+ *calls = 0
+ res, err = cacheMiddleware.FilterChunkRefs(ctx, req)
+ require.NoError(t, err)
+ require.Equal(t, 0, *calls)
+ require.Equal(t, expectedRes, res)
+
+ // Doing a request with new start and end should:
+ // 1. hit the server the leading time
+ // 2. hit the cache the cached span
+ // 3. hit the server for the trailing time
+ newChunkRefs := []*logproto.ChunkRef{
+ {
+ Fingerprint: 1,
+ UserID: "fake",
+ From: 1000,
+ Through: 1500,
+ Checksum: 10,
+ },
+ {
+ Fingerprint: 4,
+ UserID: "fake",
+ From: 3500,
+ Through: 4500,
+ },
+ }
+ server.SetResponse(&logproto.FilterChunkRefResponse{
+ ChunkRefs: groupRefs(t, newChunkRefs),
+ })
+ expectedRes = &logproto.FilterChunkRefResponse{
+ ChunkRefs: groupRefs(t, append(chunkRefs, newChunkRefs...)),
+ }
+ req.From = model.Time(100)
+ req.Through = model.Time(5000)
+ *calls = 0
+ res, err = cacheMiddleware.FilterChunkRefs(ctx, req)
+ require.NoError(t, err)
+ require.Equal(t, 2, *calls)
+ require.Equal(t, expectedRes, res)
+
+ // Doing a request again should only hit the cache
+ *calls = 0
+ res, err = cacheMiddleware.FilterChunkRefs(ctx, req)
+ require.NoError(t, err)
+ require.Equal(t, 0, *calls)
+ require.Equal(t, expectedRes, res)
+}
+
+type mockServer struct {
+ calls *int
+ res *logproto.FilterChunkRefResponse
+}
+
+func newMockServer(res *logproto.FilterChunkRefResponse) (*mockServer, *int) {
+ var calls int
+ return &mockServer{
+ calls: &calls,
+ res: res,
+ }, &calls
+}
+
+func (s *mockServer) SetResponse(res *logproto.FilterChunkRefResponse) {
+ s.res = res
+}
+
+func (s *mockServer) FilterChunkRefs(_ context.Context, _ *logproto.FilterChunkRefRequest, _ ...grpc.CallOption) (*logproto.FilterChunkRefResponse, error) {
+ *s.calls++
+ return s.res, nil
+}
+
+type mockLimits struct {
+ cacheFreshness time.Duration
+ cacheInterval time.Duration
+}
+
+func (m mockLimits) MaxCacheFreshness(_ context.Context, _ string) time.Duration {
+ return m.cacheFreshness
+}
+
+func (m mockLimits) BloomGatewayCacheKeyInterval(_ string) time.Duration {
+ return m.cacheInterval
+}
diff --git a/pkg/bloomgateway/client.go b/pkg/bloomgateway/client.go
index 9e43a32d08e76..e1bd59a0e8e57 100644
--- a/pkg/bloomgateway/client.go
+++ b/pkg/bloomgateway/client.go
@@ -7,6 +7,8 @@ import (
"io"
"math"
"math/rand"
+ "sort"
+ "sync"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
@@ -21,12 +23,45 @@ import (
"google.golang.org/grpc"
"google.golang.org/grpc/health/grpc_health_v1"
+ "github.com/grafana/loki/pkg/bloomutils"
"github.com/grafana/loki/pkg/distributor/clientpool"
"github.com/grafana/loki/pkg/logproto"
- "github.com/grafana/loki/pkg/util"
+ "github.com/grafana/loki/pkg/logqlmodel/stats"
+ "github.com/grafana/loki/pkg/queue"
+ v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
+ "github.com/grafana/loki/pkg/storage/chunk/cache"
+ "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"
"github.com/grafana/loki/pkg/util/constants"
)
+var (
+ // groupedChunksRefPool pooling slice of logproto.GroupedChunkRefs [64, 128, 256, ..., 65536]
+ groupedChunksRefPool = queue.NewSlicePool[*logproto.GroupedChunkRefs](1<<6, 1<<16, 2)
+ // ringGetBuffersPool pooling for ringGetBuffers to avoid calling ring.MakeBuffersForGet() for each request
+ ringGetBuffersPool = sync.Pool{
+ New: func() interface{} {
+ descs, hosts, zones := ring.MakeBuffersForGet()
+ return &ringGetBuffers{
+ Descs: descs,
+ Hosts: hosts,
+ Zones: zones,
+ }
+ },
+ }
+)
+
+type ringGetBuffers struct {
+ Descs []ring.InstanceDesc
+ Hosts []string
+ Zones []string
+}
+
+func (buf *ringGetBuffers) Reset() {
+ buf.Descs = buf.Descs[:0]
+ buf.Hosts = buf.Hosts[:0]
+ buf.Zones = buf.Zones[:0]
+}
+
// GRPCPool represents a pool of gRPC connections to different bloom gateway instances.
// Interfaces are inlined for simplicity to automatically satisfy interface functions.
type GRPCPool struct {
@@ -68,6 +103,10 @@ type ClientConfig struct {
// Ring is the Bloom Gateway ring used to find the appropriate Bloom Gateway instance
// this client should talk to.
Ring ring.ReadRing `yaml:"-"`
+
+ // Cache configures the cache used to store the results of the Bloom Gateway server.
+ Cache CacheConfig `yaml:"results_cache,omitempty"`
+ CacheResults bool `yaml:"cache_results"`
}
// RegisterFlags registers flags for the Bloom Gateway client configuration.
@@ -78,9 +117,25 @@ func (i *ClientConfig) RegisterFlags(f *flag.FlagSet) {
// RegisterFlagsWithPrefix registers flags for the Bloom Gateway client configuration with a common prefix.
func (i *ClientConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
i.GRPCClientConfig.RegisterFlagsWithPrefix(prefix+"grpc", f)
+ i.Cache.RegisterFlagsWithPrefix(prefix+"cache.", f)
+ f.BoolVar(&i.CacheResults, prefix+"cache_results", false, "Flag to control whether to cache bloom gateway client requests/responses.")
f.BoolVar(&i.LogGatewayRequests, prefix+"log-gateway-requests", false, "Flag to control whether requests sent to the gateway should be logged or not.")
}
+func (i *ClientConfig) Validate() error {
+ if err := i.GRPCClientConfig.Validate(); err != nil {
+ return errors.Wrap(err, "grpc client config")
+ }
+
+ if i.CacheResults {
+ if err := i.Cache.Validate(); err != nil {
+ return errors.Wrap(err, "cache config")
+ }
+ }
+
+ return nil
+}
+
type Client interface {
FilterChunks(ctx context.Context, tenant string, from, through model.Time, groups []*logproto.GroupedChunkRefs, filters ...*logproto.LineFilterExpression) ([]*logproto.GroupedChunkRefs, error)
}
@@ -93,7 +148,15 @@ type GatewayClient struct {
ring ring.ReadRing
}
-func NewGatewayClient(cfg ClientConfig, limits Limits, registerer prometheus.Registerer, logger log.Logger, metricsNamespace string) (*GatewayClient, error) {
+func NewGatewayClient(
+ cfg ClientConfig,
+ limits Limits,
+ registerer prometheus.Registerer,
+ logger log.Logger,
+ metricsNamespace string,
+ cacheGen resultscache.CacheGenNumberLoader,
+ retentionEnabled bool,
+) (*GatewayClient, error) {
latency := promauto.With(registerer).NewHistogramVec(prometheus.HistogramOpts{
Namespace: constants.Loki,
Subsystem: "bloom_gateway",
@@ -107,22 +170,43 @@ func NewGatewayClient(cfg ClientConfig, limits Limits, registerer prometheus.Reg
return nil, err
}
+ var c cache.Cache
+ if cfg.CacheResults {
+ c, err = cache.New(cfg.Cache.CacheConfig, registerer, logger, stats.BloomFilterCache, constants.Loki)
+ if err != nil {
+ return nil, errors.Wrap(err, "new bloom gateway cache")
+ }
+ if cfg.Cache.Compression == "snappy" {
+ c = cache.NewSnappy(c, logger)
+ }
+ }
+
poolFactory := func(addr string) (ringclient.PoolClient, error) {
pool, err := NewBloomGatewayGRPCPool(addr, dialOpts)
if err != nil {
return nil, errors.Wrap(err, "new bloom gateway grpc pool")
}
+
+ if cfg.CacheResults {
+ pool.BloomGatewayClient = NewBloomGatewayClientCacheMiddleware(
+ logger,
+ pool.BloomGatewayClient,
+ c,
+ limits,
+ cacheGen,
+ retentionEnabled,
+ )
+ }
+
return pool, nil
}
- c := &GatewayClient{
+ return &GatewayClient{
cfg: cfg,
logger: logger,
limits: limits,
pool: clientpool.NewPool("bloom-gateway", cfg.PoolConfig, cfg.Ring, ringclient.PoolAddrFunc(poolFactory), logger, metricsNamespace),
- }
-
- return c, nil
+ }, nil
}
func shuffleAddrs(addrs []string) []string {
@@ -138,27 +222,28 @@ func (c *GatewayClient) FilterChunks(ctx context.Context, tenant string, from, t
return groups, nil
}
- // Get the addresses of corresponding bloom gateways for each series.
- fingerprints, addrs, err := c.serverAddrsForFingerprints(tenant, groups)
+ subRing := GetShuffleShardingSubring(c.ring, tenant, c.limits)
+ rs, err := subRing.GetAllHealthy(BlocksRead)
if err != nil {
- return nil, err
+ return nil, errors.Wrap(err, "bloom gateway get healthy instances")
}
- // Group chunk refs by addresses of one or more bloom gateways.
- // All chunk refs of series that belong to one and the same bloom gateway are set in one batch.
- streamsByAddr := c.groupStreamsByAddr(groups, addrs)
+ streamsByInst, err := c.groupFingerprintsByServer(groups, subRing, rs.Instances)
+ if err != nil {
+ return nil, err
+ }
- // TODO(chaudum): We might over-allocate for the filtered responses here?
- filteredChunkRefs := make([]*logproto.GroupedChunkRefs, 0, len(fingerprints))
+ filteredChunkRefs := groupedChunksRefPool.Get(len(groups))
+ defer groupedChunksRefPool.Put(filteredChunkRefs)
- for _, item := range streamsByAddr {
+ for _, item := range streamsByInst {
// randomize order of addresses so we don't hotspot the first server in the list
- addrs := shuffleAddrs(item.addrs)
+ addrs := shuffleAddrs(item.instance.addrs)
err := c.doForAddrs(addrs, func(client logproto.BloomGatewayClient) error {
req := &logproto.FilterChunkRefRequest{
From: from,
Through: through,
- Refs: item.refs,
+ Refs: item.fingerprints,
Filters: filters,
}
resp, err := client.FilterChunkRefs(ctx, req)
@@ -175,53 +260,6 @@ func (c *GatewayClient) FilterChunks(ctx context.Context, tenant string, from, t
return filteredChunkRefs, nil
}
-// isEqualStringElements checks if two string slices contain the same elements.
-// The order of the elements is ignored.
-func isEqualStringElements(a, b []string) bool {
- if len(a) != len(b) {
- return false
- }
- for _, s := range a {
- if !util.StringsContain(b, s) {
- return false
- }
- }
- return true
-}
-
-// listContainsAddrs checks if a slice of chunkRefAddrs contains an element
-// whos field addrs contains the same addresses as the given slice of
-// addresses.
-// It returns the index of the element, if found, and a boolean whether the
-// given list contains the given addrs.
-func listContainsAddrs(list []chunkRefsByAddrs, addrs []string) (int, bool) {
- for i, r := range list {
- if isEqualStringElements(r.addrs, addrs) {
- return i, true
- }
- }
- return -1, false
-}
-
-type chunkRefsByAddrs struct {
- addrs []string
- refs []*logproto.GroupedChunkRefs
-}
-
-func (c *GatewayClient) groupStreamsByAddr(groups []*logproto.GroupedChunkRefs, addresses [][]string) []chunkRefsByAddrs {
- res := make([]chunkRefsByAddrs, 0, len(addresses))
- for i := 0; i < len(addresses); i++ {
- addrs := addresses[i]
- refs := groups[i]
- if idx, ok := listContainsAddrs(res, addrs); ok {
- res[idx].refs = append(res[idx].refs, refs)
- } else {
- res = append(res, chunkRefsByAddrs{addrs: addrs, refs: []*logproto.GroupedChunkRefs{refs}})
- }
- }
- return res
-}
-
// doForAddrs sequetially calls the provided callback function fn for each
// address in given slice addrs until the callback function does not return an
// error.
@@ -245,47 +283,127 @@ func (c *GatewayClient) doForAddrs(addrs []string, fn func(logproto.BloomGateway
return err
}
-// serverAddrsForFingerprints returns a slices of server address slices for
-// each fingerprint of given fingerprints.
-// The indexes of the returned slices correspond to each other.
-// Returns an error in case the bloom gateway ring could not get the
-// corresponding replica set for a given fingerprint.
-// Warning: This function becomes inefficient when the number of fingerprints is very large.
-func (c *GatewayClient) serverAddrsForFingerprints(tenantID string, groups []*logproto.GroupedChunkRefs) ([]uint64, [][]string, error) {
- subRing := GetShuffleShardingSubring(c.ring, tenantID, c.limits)
-
- rs, err := subRing.GetAllHealthy(BlocksRead)
+func (c *GatewayClient) groupFingerprintsByServer(groups []*logproto.GroupedChunkRefs, subRing ring.ReadRing, instances []ring.InstanceDesc) ([]instanceWithFingerprints, error) {
+ servers, err := serverAddressesWithTokenRanges(subRing, instances)
if err != nil {
- return nil, nil, errors.Wrap(err, "bloom gateway get healthy instances")
+ return nil, err
}
+ boundedFingerprints := partitionFingerprintsByAddresses(groups, servers)
+ return groupByInstance(boundedFingerprints), nil
+}
+
+func serverAddressesWithTokenRanges(subRing ring.ReadRing, instances []ring.InstanceDesc) ([]addrsWithTokenRange, error) {
+ bufDescs, bufHosts, bufZones := ring.MakeBuffersForGet()
- var numTokens int
- for _, instanceDesc := range rs.Instances {
- numTokens += len(instanceDesc.Tokens)
+ servers := make([]addrsWithTokenRange, 0, len(instances))
+ it := bloomutils.NewInstanceSortMergeIterator(instances)
+ for it.Next() {
+ // We can use on of the tokens from the token range
+ // to obtain all addresses for that token.
+ rs, err := subRing.Get(it.At().MaxToken, BlocksRead, bufDescs, bufHosts, bufZones)
+ if err != nil {
+ return nil, errors.Wrap(err, "bloom gateway get ring")
+ }
+ servers = append(servers, addrsWithTokenRange{
+ id: it.At().Instance.Id,
+ addrs: rs.GetAddresses(),
+ minToken: it.At().MinToken,
+ maxToken: it.At().MaxToken,
+ })
}
- numFingerprints := len(groups)
- if numFingerprints > int(float64(numTokens)*math.Log2(float64(numFingerprints))) {
- // TODO(chaudum): Implement algorithm in O(n * m * log(k) + n) instead of O(k) by iterating over ring tokens
- // and finding corresponding fingerprint ranges using binary search.
- // n .. number of instances
- // m .. number of tokens per instance
- // k .. number of fingerprints
- level.Warn(c.logger).Log("msg", "using an inefficient algorithm to determin server addresses for fingerprints", "fingerprints", numFingerprints, "tokens", numTokens)
+ if len(servers) > 0 && servers[len(servers)-1].maxToken < math.MaxUint32 {
+ // append the instance for the token range between the greates token and MaxUint32
+ servers = append(servers, addrsWithTokenRange{
+ id: servers[0].id,
+ addrs: servers[0].addrs,
+ minToken: servers[len(servers)-1].maxToken + 1,
+ maxToken: math.MaxUint32,
+ })
}
+ return servers, nil
+}
- fingerprints := make([]uint64, numFingerprints)
- addresses := make([][]string, numFingerprints)
- bufDescs, bufHosts, bufZones := ring.MakeBuffersForGet()
+type instanceWithToken struct {
+ instance ring.InstanceDesc
+ token uint32
+}
- for idx, key := range groups {
- rs, err = subRing.Get(uint32(key.Fingerprint), BlocksRead, bufDescs, bufHosts, bufZones)
- if err != nil {
- return nil, nil, errors.Wrap(err, "bloom gateway get ring")
+type addrsWithTokenRange struct {
+ id string
+ addrs []string
+ minToken, maxToken uint32
+}
+
+func (s addrsWithTokenRange) cmp(token uint32) v1.BoundsCheck {
+ if token < s.minToken {
+ return v1.Before
+ } else if token > s.maxToken {
+ return v1.After
+ }
+ return v1.Overlap
+}
+
+type instanceWithFingerprints struct {
+ instance addrsWithTokenRange
+ fingerprints []*logproto.GroupedChunkRefs
+}
+
+func partitionFingerprintsByAddresses(fingerprints []*logproto.GroupedChunkRefs, addresses []addrsWithTokenRange) (result []instanceWithFingerprints) {
+ for _, instance := range addresses {
+
+ min := sort.Search(len(fingerprints), func(i int) bool {
+ return instance.cmp(uint32(fingerprints[i].Fingerprint)) > v1.Before
+ })
+
+ max := sort.Search(len(fingerprints), func(i int) bool {
+ return instance.cmp(uint32(fingerprints[i].Fingerprint)) == v1.After
+ })
+
+ // fingerprint is out of boundaries
+ if min == len(fingerprints) || max == 0 {
+ continue
+ }
+
+ result = append(result, instanceWithFingerprints{instance: instance, fingerprints: fingerprints[min:max]})
+ }
+
+ return result
+}
+
+// groupByInstance groups fingerprints by server instance
+func groupByInstance(boundedFingerprints []instanceWithFingerprints) []instanceWithFingerprints {
+ if len(boundedFingerprints) == 0 {
+ return []instanceWithFingerprints{}
+ }
+
+ result := make([]instanceWithFingerprints, 0, len(boundedFingerprints))
+ pos := make(map[string]int, len(boundedFingerprints))
+
+ for _, cur := range boundedFingerprints {
+ if len(cur.fingerprints) == 0 {
+ continue
+ }
+ // Copy fingerprint slice, otherwise we mutate the original
+ // TODO(chaudum): Use SlicePool
+ tmp := make([]*logproto.GroupedChunkRefs, len(cur.fingerprints))
+ _ = copy(tmp, cur.fingerprints)
+
+ idx, ok := pos[cur.instance.id]
+ if ok {
+ result[idx].fingerprints = append(result[idx].fingerprints, tmp...)
+ continue
}
- fingerprints[idx] = key.Fingerprint
- addresses[idx] = rs.GetAddresses()
+
+ pos[cur.instance.id] = len(result)
+ result = append(result, instanceWithFingerprints{
+ instance: addrsWithTokenRange{
+ id: cur.instance.id,
+ addrs: cur.instance.addrs,
+ },
+ fingerprints: tmp,
+ })
}
- return fingerprints, addresses, nil
+ return result
}
diff --git a/pkg/bloomgateway/client_test.go b/pkg/bloomgateway/client_test.go
index 670c050517163..6edd8fcb406ea 100644
--- a/pkg/bloomgateway/client_test.go
+++ b/pkg/bloomgateway/client_test.go
@@ -1,19 +1,23 @@
package bloomgateway
import (
+ "math"
+ "sort"
"testing"
+ "time"
"github.com/go-kit/log"
"github.com/grafana/dskit/flagext"
+ "github.com/grafana/dskit/ring"
"github.com/prometheus/client_golang/prometheus"
"github.com/stretchr/testify/require"
+ "github.com/grafana/loki/pkg/bloomutils"
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/validation"
)
func TestBloomGatewayClient(t *testing.T) {
-
logger := log.NewNopLogger()
reg := prometheus.NewRegistry()
@@ -24,12 +28,180 @@ func TestBloomGatewayClient(t *testing.T) {
flagext.DefaultValues(&cfg)
t.Run("", func(t *testing.T) {
- _, err := NewGatewayClient(cfg, l, reg, logger, "loki")
+ _, err := NewGatewayClient(cfg, l, reg, logger, "loki", nil, false)
require.NoError(t, err)
})
}
-func TestBloomGatewayClient_GroupStreamsByAddresses(t *testing.T) {
+func TestBloomGatewayClient_PartitionFingerprintsByAddresses(t *testing.T) {
+ // instance token ranges do not overlap
+ t.Run("non-overlapping", func(t *testing.T) {
+ groups := []*logproto.GroupedChunkRefs{
+ {Fingerprint: 0},
+ {Fingerprint: 100},
+ {Fingerprint: 101},
+ {Fingerprint: 200},
+ {Fingerprint: 201},
+ {Fingerprint: 300},
+ {Fingerprint: 301},
+ {Fingerprint: 400},
+ {Fingerprint: 401}, // out of bounds, will be dismissed
+ }
+ servers := []addrsWithTokenRange{
+ {id: "instance-1", addrs: []string{"10.0.0.1"}, minToken: 0, maxToken: 100},
+ {id: "instance-2", addrs: []string{"10.0.0.2"}, minToken: 101, maxToken: 200},
+ {id: "instance-3", addrs: []string{"10.0.0.3"}, minToken: 201, maxToken: 300},
+ {id: "instance-2", addrs: []string{"10.0.0.2"}, minToken: 301, maxToken: 400},
+ }
+
+ // partition fingerprints
+
+ expected := []instanceWithFingerprints{
+ {
+ instance: servers[0],
+ fingerprints: []*logproto.GroupedChunkRefs{
+ {Fingerprint: 0},
+ {Fingerprint: 100},
+ },
+ },
+ {
+ instance: servers[1],
+ fingerprints: []*logproto.GroupedChunkRefs{
+ {Fingerprint: 101},
+ {Fingerprint: 200},
+ },
+ },
+ {
+ instance: servers[2],
+ fingerprints: []*logproto.GroupedChunkRefs{
+ {Fingerprint: 201},
+ {Fingerprint: 300},
+ },
+ },
+ {
+ instance: servers[3],
+ fingerprints: []*logproto.GroupedChunkRefs{
+ {Fingerprint: 301},
+ {Fingerprint: 400},
+ },
+ },
+ }
+
+ bounded := partitionFingerprintsByAddresses(groups, servers)
+ require.Equal(t, expected, bounded)
+
+ // group fingerprints by instance
+
+ expected = []instanceWithFingerprints{
+ {
+ instance: addrsWithTokenRange{id: "instance-1", addrs: []string{"10.0.0.1"}},
+ fingerprints: []*logproto.GroupedChunkRefs{
+ {Fingerprint: 0},
+ {Fingerprint: 100},
+ },
+ },
+ {
+ instance: addrsWithTokenRange{id: "instance-2", addrs: []string{"10.0.0.2"}},
+ fingerprints: []*logproto.GroupedChunkRefs{
+ {Fingerprint: 101},
+ {Fingerprint: 200},
+ {Fingerprint: 301},
+ {Fingerprint: 400},
+ },
+ },
+ {
+ instance: addrsWithTokenRange{id: "instance-3", addrs: []string{"10.0.0.3"}},
+ fingerprints: []*logproto.GroupedChunkRefs{
+ {Fingerprint: 201},
+ {Fingerprint: 300},
+ },
+ },
+ }
+ result := groupByInstance(bounded)
+ require.Equal(t, expected, result)
+ })
+
+ // instance token ranges overlap
+ t.Run("overlapping", func(t *testing.T) {
+ groups := []*logproto.GroupedChunkRefs{
+ {Fingerprint: 50},
+ {Fingerprint: 150},
+ {Fingerprint: 250},
+ {Fingerprint: 350},
+ }
+ servers := []addrsWithTokenRange{
+ {id: "instance-1", addrs: []string{"10.0.0.1"}, minToken: 0, maxToken: 200},
+ {id: "instance-2", addrs: []string{"10.0.0.2"}, minToken: 100, maxToken: 300},
+ {id: "instance-3", addrs: []string{"10.0.0.3"}, minToken: 200, maxToken: 400},
+ }
+
+ // partition fingerprints
+
+ expected := []instanceWithFingerprints{
+ {instance: servers[0], fingerprints: []*logproto.GroupedChunkRefs{
+ {Fingerprint: 50},
+ {Fingerprint: 150},
+ }},
+ {instance: servers[1], fingerprints: []*logproto.GroupedChunkRefs{
+ {Fingerprint: 150},
+ {Fingerprint: 250},
+ }},
+ {instance: servers[2], fingerprints: []*logproto.GroupedChunkRefs{
+ {Fingerprint: 250},
+ {Fingerprint: 350},
+ }},
+ }
+
+ bounded := partitionFingerprintsByAddresses(groups, servers)
+ require.Equal(t, expected, bounded)
+ })
+}
+
+func TestBloomGatewayClient_ServerAddressesWithTokenRanges(t *testing.T) {
+ testCases := map[string]struct {
+ instances []ring.InstanceDesc
+ expected []addrsWithTokenRange
+ }{
+ "one token per instance": {
+ instances: []ring.InstanceDesc{
+ {Id: "instance-1", Addr: "10.0.0.1", Tokens: []uint32{math.MaxUint32 / 6 * 1}},
+ {Id: "instance-2", Addr: "10.0.0.2", Tokens: []uint32{math.MaxUint32 / 6 * 3}},
+ {Id: "instance-3", Addr: "10.0.0.3", Tokens: []uint32{math.MaxUint32 / 6 * 5}},
+ },
+ expected: []addrsWithTokenRange{
+ {id: "instance-1", addrs: []string{"10.0.0.1"}, minToken: 0, maxToken: math.MaxUint32 / 6 * 1},
+ {id: "instance-2", addrs: []string{"10.0.0.2"}, minToken: math.MaxUint32/6*1 + 1, maxToken: math.MaxUint32 / 6 * 3},
+ {id: "instance-3", addrs: []string{"10.0.0.3"}, minToken: math.MaxUint32/6*3 + 1, maxToken: math.MaxUint32 / 6 * 5},
+ {id: "instance-1", addrs: []string{"10.0.0.1"}, minToken: math.MaxUint32/6*5 + 1, maxToken: math.MaxUint32},
+ },
+ },
+ "MinUint32 and MaxUint32 are tokens in the ring": {
+ instances: []ring.InstanceDesc{
+ {Id: "instance-1", Addr: "10.0.0.1", Tokens: []uint32{0, math.MaxUint32 / 3 * 2}},
+ {Id: "instance-2", Addr: "10.0.0.2", Tokens: []uint32{math.MaxUint32 / 3 * 1, math.MaxUint32}},
+ },
+ expected: []addrsWithTokenRange{
+ {id: "instance-1", addrs: []string{"10.0.0.1"}, minToken: 0, maxToken: 0},
+ {id: "instance-2", addrs: []string{"10.0.0.2"}, minToken: 1, maxToken: math.MaxUint32 / 3},
+ {id: "instance-1", addrs: []string{"10.0.0.1"}, minToken: math.MaxUint32/3*1 + 1, maxToken: math.MaxUint32 / 3 * 2},
+ {id: "instance-2", addrs: []string{"10.0.0.2"}, minToken: math.MaxUint32/3*2 + 1, maxToken: math.MaxUint32},
+ },
+ },
+ }
+
+ for name, tc := range testCases {
+ tc := tc
+ t.Run(name, func(t *testing.T) {
+ subRing := newMockRing(tc.instances)
+ res, err := serverAddressesWithTokenRanges(subRing, tc.instances)
+ require.NoError(t, err)
+ require.Equal(t, tc.expected, res)
+ })
+ }
+
+}
+
+func TestBloomGatewayClient_GroupFingerprintsByServer(t *testing.T) {
logger := log.NewNopLogger()
reg := prometheus.NewRegistry()
@@ -40,75 +212,212 @@ func TestBloomGatewayClient_GroupStreamsByAddresses(t *testing.T) {
cfg := ClientConfig{}
flagext.DefaultValues(&cfg)
- c, err := NewGatewayClient(cfg, l, reg, logger, "loki")
+ c, err := NewGatewayClient(cfg, l, reg, logger, "loki", nil, false)
require.NoError(t, err)
+ instances := []ring.InstanceDesc{
+ {Id: "instance-1", Addr: "10.0.0.1", Tokens: []uint32{2146405214, 1029997044, 678878693}},
+ {Id: "instance-2", Addr: "10.0.0.2", Tokens: []uint32{296463531, 1697323986, 800258284}},
+ {Id: "instance-3", Addr: "10.0.0.3", Tokens: []uint32{2014002871, 315617625, 1036168527}},
+ }
+
+ it := bloomutils.NewInstanceSortMergeIterator(instances)
+ for it.Next() {
+ t.Log(it.At().MaxToken, it.At().Instance.Addr)
+ }
+
testCases := []struct {
- name string
- chunks []*logproto.GroupedChunkRefs
- addresses [][]string
- expected []chunkRefsByAddrs
+ name string
+ chunks []*logproto.GroupedChunkRefs
+ expected []instanceWithFingerprints
}{
{
- name: "empty input yields empty result",
- chunks: []*logproto.GroupedChunkRefs{},
- addresses: [][]string{},
- expected: []chunkRefsByAddrs{},
+ name: "empty input yields empty result",
+ chunks: []*logproto.GroupedChunkRefs{},
+ expected: []instanceWithFingerprints{},
},
{
- name: "addresses with same elements are grouped into single item",
+ name: "fingerprints within a single token range are grouped",
chunks: []*logproto.GroupedChunkRefs{
- {Fingerprint: 1, Refs: []*logproto.ShortRef{{Checksum: 1}}},
- {Fingerprint: 2, Refs: []*logproto.ShortRef{{Checksum: 2}}},
- {Fingerprint: 3, Refs: []*logproto.ShortRef{{Checksum: 3}}},
- },
- addresses: [][]string{
- {"10.0.0.1", "10.0.0.2", "10.0.0.3"},
- {"10.0.0.2", "10.0.0.3", "10.0.0.1"},
- {"10.0.0.3", "10.0.0.1", "10.0.0.2"},
+ {Fingerprint: 1000000000, Refs: []*logproto.ShortRef{{Checksum: 1}}},
+ {Fingerprint: 1000000001, Refs: []*logproto.ShortRef{{Checksum: 2}}},
},
- expected: []chunkRefsByAddrs{
+ expected: []instanceWithFingerprints{
{
- addrs: []string{"10.0.0.1", "10.0.0.2", "10.0.0.3"},
- refs: []*logproto.GroupedChunkRefs{
- {Fingerprint: 1, Refs: []*logproto.ShortRef{{Checksum: 1}}},
- {Fingerprint: 2, Refs: []*logproto.ShortRef{{Checksum: 2}}},
- {Fingerprint: 3, Refs: []*logproto.ShortRef{{Checksum: 3}}},
+ instance: addrsWithTokenRange{
+ id: "instance-1",
+ addrs: []string{"10.0.0.1"},
+ },
+ fingerprints: []*logproto.GroupedChunkRefs{
+ {Fingerprint: 1000000000, Refs: []*logproto.ShortRef{{Checksum: 1}}},
+ {Fingerprint: 1000000001, Refs: []*logproto.ShortRef{{Checksum: 2}}},
},
},
},
},
{
- name: "partially overlapping addresses are not grouped together",
+ name: "fingerprints within multiple token ranges of a single instance are grouped",
chunks: []*logproto.GroupedChunkRefs{
- {Fingerprint: 1, Refs: []*logproto.ShortRef{{Checksum: 1}}},
- {Fingerprint: 2, Refs: []*logproto.ShortRef{{Checksum: 2}}},
+ {Fingerprint: 1000000000, Refs: []*logproto.ShortRef{{Checksum: 1}}},
+ {Fingerprint: 2100000000, Refs: []*logproto.ShortRef{{Checksum: 2}}},
+ },
+ expected: []instanceWithFingerprints{
+ {
+ instance: addrsWithTokenRange{
+ id: "instance-1",
+ addrs: []string{"10.0.0.1"},
+ },
+ fingerprints: []*logproto.GroupedChunkRefs{
+ {Fingerprint: 1000000000, Refs: []*logproto.ShortRef{{Checksum: 1}}},
+ {Fingerprint: 2100000000, Refs: []*logproto.ShortRef{{Checksum: 2}}},
+ },
+ },
},
- addresses: [][]string{
- {"10.0.0.1", "10.0.0.2"},
- {"10.0.0.2", "10.0.0.3"},
+ },
+ {
+ name: "fingerprints with token ranges of multiple instances are grouped",
+ chunks: []*logproto.GroupedChunkRefs{
+ // instance 1
+ {Fingerprint: 1000000000, Refs: []*logproto.ShortRef{{Checksum: 1}}},
+ // instance 1
+ {Fingerprint: 2100000000, Refs: []*logproto.ShortRef{{Checksum: 2}}},
+ // instance 2
+ {Fingerprint: 290000000, Refs: []*logproto.ShortRef{{Checksum: 3}}},
+ // instance 2 (fingerprint equals instance token)
+ {Fingerprint: 800258284, Refs: []*logproto.ShortRef{{Checksum: 4}}},
+ // instance 2 (fingerprint greater than greatest token)
+ {Fingerprint: 2147483648, Refs: []*logproto.ShortRef{{Checksum: 5}}},
+ // instance 3
+ {Fingerprint: 1029997045, Refs: []*logproto.ShortRef{{Checksum: 6}}},
},
- expected: []chunkRefsByAddrs{
+ expected: []instanceWithFingerprints{
+ {
+ instance: addrsWithTokenRange{
+ id: "instance-2",
+ addrs: []string{"10.0.0.2"},
+ },
+ fingerprints: []*logproto.GroupedChunkRefs{
+ {Fingerprint: 290000000, Refs: []*logproto.ShortRef{{Checksum: 3}}},
+ {Fingerprint: 800258284, Refs: []*logproto.ShortRef{{Checksum: 4}}},
+ {Fingerprint: 2147483648, Refs: []*logproto.ShortRef{{Checksum: 5}}},
+ },
+ },
{
- addrs: []string{"10.0.0.1", "10.0.0.2"},
- refs: []*logproto.GroupedChunkRefs{
- {Fingerprint: 1, Refs: []*logproto.ShortRef{{Checksum: 1}}},
+ instance: addrsWithTokenRange{
+ id: "instance-1",
+ addrs: []string{"10.0.0.1"},
+ },
+ fingerprints: []*logproto.GroupedChunkRefs{
+ {Fingerprint: 1000000000, Refs: []*logproto.ShortRef{{Checksum: 1}}},
+ {Fingerprint: 2100000000, Refs: []*logproto.ShortRef{{Checksum: 2}}},
},
},
{
- addrs: []string{"10.0.0.2", "10.0.0.3"},
- refs: []*logproto.GroupedChunkRefs{
- {Fingerprint: 2, Refs: []*logproto.ShortRef{{Checksum: 2}}},
+ instance: addrsWithTokenRange{
+ id: "instance-3",
+ addrs: []string{"10.0.0.3"},
+ },
+ fingerprints: []*logproto.GroupedChunkRefs{
+ {Fingerprint: 1029997045, Refs: []*logproto.ShortRef{{Checksum: 6}}},
},
},
},
},
}
+
+ subRing := newMockRing(instances)
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
- res := c.groupStreamsByAddr(tc.chunks, tc.addresses)
+ // sort chunks here, to be able to write more human readable test input
+ sort.Slice(tc.chunks, func(i, j int) bool {
+ return tc.chunks[i].Fingerprint < tc.chunks[j].Fingerprint
+ })
+
+ res, err := c.groupFingerprintsByServer(tc.chunks, subRing, instances)
+ require.NoError(t, err)
require.Equal(t, tc.expected, res)
})
}
}
+
+// make sure mockRing implements the ring.ReadRing interface
+var _ ring.ReadRing = &mockRing{}
+
+func newMockRing(instances []ring.InstanceDesc) *mockRing {
+ it := bloomutils.NewInstanceSortMergeIterator(instances)
+ ranges := make([]bloomutils.InstanceWithTokenRange, 0)
+ for it.Next() {
+ ranges = append(ranges, it.At())
+ }
+ return &mockRing{
+ instances: instances,
+ ranges: ranges,
+ }
+}
+
+type mockRing struct {
+ instances []ring.InstanceDesc
+ ranges []bloomutils.InstanceWithTokenRange
+}
+
+// Get implements ring.ReadRing.
+func (r *mockRing) Get(key uint32, _ ring.Operation, _ []ring.InstanceDesc, _ []string, _ []string) (ring.ReplicationSet, error) {
+ idx, _ := sort.Find(len(r.ranges), func(i int) int {
+ if r.ranges[i].MaxToken < key {
+ return 1
+ }
+ if r.ranges[i].MaxToken > key {
+ return -1
+ }
+ return 0
+ })
+ return ring.ReplicationSet{Instances: []ring.InstanceDesc{r.ranges[idx].Instance}}, nil
+}
+
+// GetAllHealthy implements ring.ReadRing.
+func (r *mockRing) GetAllHealthy(_ ring.Operation) (ring.ReplicationSet, error) {
+ return ring.ReplicationSet{
+ Instances: r.instances,
+ }, nil
+}
+
+// GetInstanceState implements ring.ReadRing.
+func (*mockRing) GetInstanceState(_ string) (ring.InstanceState, error) {
+ panic("unimplemented")
+}
+
+// GetReplicationSetForOperation implements ring.ReadRing.
+func (*mockRing) GetReplicationSetForOperation(_ ring.Operation) (ring.ReplicationSet, error) {
+ panic("unimplemented")
+}
+
+// HasInstance implements ring.ReadRing.
+func (*mockRing) HasInstance(_ string) bool {
+ panic("unimplemented")
+}
+
+// InstancesCount implements ring.ReadRing.
+func (r *mockRing) InstancesCount() int {
+ return len(r.instances)
+}
+
+// ReplicationFactor implements ring.ReadRing.
+func (*mockRing) ReplicationFactor() int {
+ return 1
+}
+
+// ShuffleShard implements ring.ReadRing.
+func (*mockRing) ShuffleShard(_ string, _ int) ring.ReadRing {
+ panic("unimplemented")
+}
+
+// ShuffleShardWithLookback implements ring.ReadRing.
+func (*mockRing) ShuffleShardWithLookback(_ string, _ int, _ time.Duration, _ time.Time) ring.ReadRing {
+ panic("unimplemented")
+}
+
+// CleanupShuffleShardCache implements ring.ReadRing.
+func (*mockRing) CleanupShuffleShardCache(_ string) {
+ panic("unimplemented")
+}
diff --git a/pkg/bloomgateway/config.go b/pkg/bloomgateway/config.go
index 68856a45d4c21..3eb94324bd7e8 100644
--- a/pkg/bloomgateway/config.go
+++ b/pkg/bloomgateway/config.go
@@ -16,6 +16,9 @@ type Config struct {
Enabled bool `yaml:"enabled"`
// Client configures the Bloom Gateway client
Client ClientConfig `yaml:"client,omitempty" doc:""`
+
+ WorkerConcurrency int `yaml:"worker_concurrency"`
+ MaxOutstandingPerTenant int `yaml:"max_outstanding_per_tenant"`
}
// RegisterFlags registers flags for the Bloom Gateway configuration.
@@ -27,7 +30,16 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
cfg.Ring.RegisterFlagsWithPrefix(prefix, "collectors/", f)
f.BoolVar(&cfg.Enabled, prefix+"enabled", false, "Flag to enable or disable the bloom gateway component globally.")
+ f.IntVar(&cfg.WorkerConcurrency, prefix+"worker-concurrency", 4, "Number of workers to use for filtering chunks concurrently.")
+ f.IntVar(&cfg.MaxOutstandingPerTenant, prefix+"max-outstanding-per-tenant", 1024, "Maximum number of outstanding tasks per tenant.")
// TODO(chaudum): Figure out what the better place is for registering flags
// -bloom-gateway.client.* or -bloom-gateway-client.*
cfg.Client.RegisterFlags(f)
}
+
+type Limits interface {
+ CacheLimits
+ BloomGatewayShardSize(tenantID string) int
+ BloomGatewayEnabled(tenantID string) bool
+ BloomGatewayBlocksDownloadingParallelism(tenantID string) int
+}
diff --git a/pkg/bloomgateway/multiplexing.go b/pkg/bloomgateway/multiplexing.go
new file mode 100644
index 0000000000000..c5c6964038931
--- /dev/null
+++ b/pkg/bloomgateway/multiplexing.go
@@ -0,0 +1,221 @@
+package bloomgateway
+
+import (
+ "sort"
+ "time"
+
+ "github.com/oklog/ulid"
+ "github.com/prometheus/common/model"
+
+ "github.com/grafana/loki/pkg/logproto"
+ v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
+)
+
+const (
+ Day = 24 * time.Hour
+)
+
+// Task is the data structure that is enqueued to the internal queue and dequeued by query workers
+type Task struct {
+ // ID is a lexcographically sortable unique identifier of the task
+ ID ulid.ULID
+ // Tenant is the tenant ID
+ Tenant string
+ // Request is the original request
+ Request *logproto.FilterChunkRefRequest
+ // ErrCh is a send-only channel to write an error to
+ ErrCh chan<- error
+ // ResCh is a send-only channel to write partial responses to
+ ResCh chan<- v1.Output
+}
+
+// NewTask returns a new Task that can be enqueued to the task queue.
+// In addition, it returns a result and an error channel, as well
+// as an error if the instantiation fails.
+func NewTask(tenantID string, req *logproto.FilterChunkRefRequest) (Task, chan v1.Output, chan error, error) {
+ key, err := ulid.New(ulid.Now(), nil)
+ if err != nil {
+ return Task{}, nil, nil, err
+ }
+ errCh := make(chan error, 1)
+ resCh := make(chan v1.Output, 1)
+ task := Task{
+ ID: key,
+ Tenant: tenantID,
+ Request: req,
+ ErrCh: errCh,
+ ResCh: resCh,
+ }
+ return task, resCh, errCh, nil
+}
+
+// Copy returns a copy of the existing task but with a new slice of chunks
+func (t Task) Copy(refs []*logproto.GroupedChunkRefs) Task {
+ return Task{
+ ID: t.ID,
+ Tenant: t.Tenant,
+ Request: &logproto.FilterChunkRefRequest{
+ From: t.Request.From,
+ Through: t.Request.Through,
+ Filters: t.Request.Filters,
+ Refs: refs,
+ },
+ ErrCh: t.ErrCh,
+ ResCh: t.ResCh,
+ }
+}
+
+// Bounds returns the day boundaries of the task
+func (t Task) Bounds() (time.Time, time.Time) {
+ return getDayTime(t.Request.From), getDayTime(t.Request.Through)
+}
+
+func (t Task) ChunkIterForDay(day time.Time) v1.Iterator[*logproto.GroupedChunkRefs] {
+ cf := filterGroupedChunkRefsByDay{day: day}
+ return &FilterIter[*logproto.GroupedChunkRefs]{
+ iter: v1.NewSliceIter(t.Request.Refs),
+ matches: cf.contains,
+ transform: cf.filter,
+ }
+}
+
+type filterGroupedChunkRefsByDay struct {
+ day time.Time
+}
+
+func (cf filterGroupedChunkRefsByDay) contains(a *logproto.GroupedChunkRefs) bool {
+ from, through := getFromThrough(a.Refs)
+ if from.Time().After(cf.day.Add(Day)) || through.Time().Before(cf.day) {
+ return false
+ }
+ return true
+}
+
+func (cf filterGroupedChunkRefsByDay) filter(a *logproto.GroupedChunkRefs) *logproto.GroupedChunkRefs {
+ minTs, maxTs := getFromThrough(a.Refs)
+
+ // in most cases, all chunks are within day range
+ if minTs.Time().Compare(cf.day) >= 0 && maxTs.Time().Before(cf.day.Add(Day)) {
+ return a
+ }
+
+ // case where certain chunks are outside of day range
+ // using binary search to get min and max index of chunks that fall into the day range
+ min := sort.Search(len(a.Refs), func(i int) bool {
+ start := a.Refs[i].From.Time()
+ end := a.Refs[i].Through.Time()
+ return start.Compare(cf.day) >= 0 || end.Compare(cf.day) >= 0
+ })
+
+ max := sort.Search(len(a.Refs), func(i int) bool {
+ start := a.Refs[i].From.Time()
+ return start.Compare(cf.day.Add(Day)) > 0
+ })
+
+ return &logproto.GroupedChunkRefs{
+ Tenant: a.Tenant,
+ Fingerprint: a.Fingerprint,
+ Refs: a.Refs[min:max],
+ }
+}
+
+type Predicate[T any] func(a T) bool
+type Transform[T any] func(a T) T
+
+type FilterIter[T any] struct {
+ iter v1.Iterator[T]
+ matches Predicate[T]
+ transform Transform[T]
+ cache T
+ zero T // zero value of the return type of Next()
+}
+
+func (it *FilterIter[T]) Next() bool {
+ next := it.iter.Next()
+ if !next {
+ it.cache = it.zero
+ return false
+ }
+ for next && !it.matches(it.iter.At()) {
+ next = it.iter.Next()
+ if !next {
+ it.cache = it.zero
+ return false
+ }
+ }
+ it.cache = it.transform(it.iter.At())
+ return true
+}
+
+func (it *FilterIter[T]) At() T {
+ return it.cache
+}
+
+func (it *FilterIter[T]) Err() error {
+ return nil
+}
+
+// FilterRequest extends v1.Request with an error channel
+type FilterRequest struct {
+ v1.Request
+ Error chan<- error
+}
+
+// taskMergeIterator implements v1.Iterator
+type taskMergeIterator struct {
+ curr FilterRequest
+ heap *v1.HeapIterator[v1.IndexedValue[*logproto.GroupedChunkRefs]]
+ tasks []Task
+ day time.Time
+ err error
+}
+
+func newTaskMergeIterator(day time.Time, tasks ...Task) v1.PeekingIterator[v1.Request] {
+ it := &taskMergeIterator{
+ tasks: tasks,
+ curr: FilterRequest{},
+ day: day,
+ }
+ it.init()
+ return v1.NewPeekingIter[v1.Request](it)
+}
+
+func (it *taskMergeIterator) init() {
+ sequences := make([]v1.PeekingIterator[v1.IndexedValue[*logproto.GroupedChunkRefs]], 0, len(it.tasks))
+ for i := range it.tasks {
+ iter := v1.NewIterWithIndex(it.tasks[i].ChunkIterForDay(it.day), i)
+ sequences = append(sequences, v1.NewPeekingIter(iter))
+ }
+ it.heap = v1.NewHeapIterator(
+ func(i, j v1.IndexedValue[*logproto.GroupedChunkRefs]) bool {
+ return i.Value().Fingerprint < j.Value().Fingerprint
+ },
+ sequences...,
+ )
+ it.err = nil
+}
+
+func (it *taskMergeIterator) Next() bool {
+ ok := it.heap.Next()
+ if !ok {
+ return false
+ }
+
+ group := it.heap.At()
+ task := it.tasks[group.Index()]
+
+ it.curr.Fp = model.Fingerprint(group.Value().Fingerprint)
+ it.curr.Chks = convertToChunkRefs(group.Value().Refs)
+ it.curr.Searches = convertToSearches(task.Request.Filters)
+ it.curr.Response = task.ResCh
+ it.curr.Error = task.ErrCh
+ return true
+}
+
+func (it *taskMergeIterator) At() v1.Request {
+ return it.curr.Request
+}
+
+func (it *taskMergeIterator) Err() error {
+ return it.err
+}
diff --git a/pkg/bloomgateway/multiplexing_test.go b/pkg/bloomgateway/multiplexing_test.go
new file mode 100644
index 0000000000000..93e5e5686fdaf
--- /dev/null
+++ b/pkg/bloomgateway/multiplexing_test.go
@@ -0,0 +1,203 @@
+package bloomgateway
+
+import (
+ "testing"
+ "time"
+
+ "github.com/prometheus/common/model"
+ "github.com/stretchr/testify/require"
+
+ "github.com/grafana/loki/pkg/logproto"
+)
+
+func TestTask(t *testing.T) {
+ t.Run("bounds returns request boundaries", func(t *testing.T) {
+ ts := model.Now()
+ req := &logproto.FilterChunkRefRequest{
+ From: ts.Add(-1 * time.Hour),
+ Through: ts,
+ }
+ task, _, _, err := NewTask("tenant", req)
+ require.NoError(t, err)
+ from, through := task.Bounds()
+ require.Equal(t, getDayTime(req.From), from)
+ require.Equal(t, getDayTime(req.Through), through)
+ })
+}
+
+func TestTaskMergeIterator(t *testing.T) {
+ // Thu Nov 09 2023 10:56:50 UTC
+ ts := model.TimeFromUnix(1699523810)
+ day := getDayTime(ts)
+ tenant := "fake"
+
+ t.Run("empty requests result in empty iterator", func(t *testing.T) {
+ r1 := &logproto.FilterChunkRefRequest{
+ From: ts.Add(-3 * time.Hour),
+ Through: ts.Add(-2 * time.Hour),
+ Refs: []*logproto.GroupedChunkRefs{},
+ }
+ t1, _, _, err := NewTask(tenant, r1)
+ require.NoError(t, err)
+
+ r2 := &logproto.FilterChunkRefRequest{
+ From: ts.Add(-1 * time.Hour),
+ Through: ts,
+ Refs: []*logproto.GroupedChunkRefs{},
+ }
+ t2, _, _, err := NewTask(tenant, r2)
+ require.NoError(t, err)
+
+ r3 := &logproto.FilterChunkRefRequest{
+ From: ts.Add(-1 * time.Hour),
+ Through: ts,
+ Refs: []*logproto.GroupedChunkRefs{},
+ }
+ t3, _, _, err := NewTask(tenant, r3)
+ require.NoError(t, err)
+
+ it := newTaskMergeIterator(day, t1, t2, t3)
+ // nothing to iterate over
+ require.False(t, it.Next())
+ })
+
+ t.Run("merge multiple tasks in ascending fingerprint order", func(t *testing.T) {
+ r1 := &logproto.FilterChunkRefRequest{
+ From: ts.Add(-3 * time.Hour),
+ Through: ts.Add(-2 * time.Hour),
+ Refs: []*logproto.GroupedChunkRefs{
+ {Fingerprint: 100, Tenant: tenant, Refs: []*logproto.ShortRef{
+ {From: ts.Add(-3 * time.Hour), Through: ts.Add(-2 * time.Hour), Checksum: 100},
+ }},
+ },
+ }
+ t1, _, _, err := NewTask(tenant, r1)
+ require.NoError(t, err)
+
+ r2 := &logproto.FilterChunkRefRequest{
+ From: ts.Add(-1 * time.Hour),
+ Through: ts,
+ Refs: []*logproto.GroupedChunkRefs{
+ {Fingerprint: 100, Tenant: tenant, Refs: []*logproto.ShortRef{
+ {From: ts.Add(-1 * time.Hour), Through: ts, Checksum: 200},
+ }},
+ {Fingerprint: 200, Tenant: tenant, Refs: []*logproto.ShortRef{
+ {From: ts.Add(-1 * time.Hour), Through: ts, Checksum: 300},
+ }},
+ },
+ }
+ t2, _, _, err := NewTask(tenant, r2)
+ require.NoError(t, err)
+
+ r3 := &logproto.FilterChunkRefRequest{
+ From: ts.Add(-1 * time.Hour),
+ Through: ts,
+ Refs: []*logproto.GroupedChunkRefs{
+ {Fingerprint: 200, Tenant: tenant, Refs: []*logproto.ShortRef{
+ {From: ts.Add(-1 * time.Hour), Through: ts, Checksum: 400},
+ }},
+ },
+ }
+ t3, _, _, err := NewTask(tenant, r3)
+ require.NoError(t, err)
+
+ it := newTaskMergeIterator(day, t1, t2, t3)
+
+ // first item
+ require.True(t, it.Next())
+ r := it.At()
+ require.Equal(t, model.Fingerprint(100), r.Fp)
+ require.Equal(t, uint32(100), r.Chks[0].Checksum)
+
+ // second item
+ require.True(t, it.Next())
+ r = it.At()
+ require.Equal(t, model.Fingerprint(100), r.Fp)
+ require.Equal(t, uint32(200), r.Chks[0].Checksum)
+
+ // third item
+ require.True(t, it.Next())
+ r = it.At()
+ require.Equal(t, model.Fingerprint(200), r.Fp)
+ require.Equal(t, uint32(300), r.Chks[0].Checksum)
+
+ // fourth item
+ require.True(t, it.Next())
+ r = it.At()
+ require.Equal(t, model.Fingerprint(200), r.Fp)
+ require.Equal(t, uint32(400), r.Chks[0].Checksum)
+
+ // no more items
+ require.False(t, it.Next())
+ })
+}
+
+func TestChunkIterForDay(t *testing.T) {
+ tenant := "fake"
+
+ // Thu Nov 09 2023 10:56:50 UTC
+ ts := model.TimeFromUnix(1699523810)
+
+ t.Run("filter chunk refs that fall into the day range", func(t *testing.T) {
+ input := &logproto.FilterChunkRefRequest{
+ From: ts.Add(-168 * time.Hour), // 1w ago
+ Through: ts,
+ Refs: []*logproto.GroupedChunkRefs{
+ {Fingerprint: 100, Tenant: tenant, Refs: []*logproto.ShortRef{
+ {From: ts.Add(-168 * time.Hour), Through: ts.Add(-167 * time.Hour), Checksum: 100},
+ {From: ts.Add(-143 * time.Hour), Through: ts.Add(-142 * time.Hour), Checksum: 101},
+ }},
+ {Fingerprint: 200, Tenant: tenant, Refs: []*logproto.ShortRef{
+ {From: ts.Add(-144 * time.Hour), Through: ts.Add(-143 * time.Hour), Checksum: 200},
+ {From: ts.Add(-119 * time.Hour), Through: ts.Add(-118 * time.Hour), Checksum: 201},
+ }},
+ {Fingerprint: 300, Tenant: tenant, Refs: []*logproto.ShortRef{
+ {From: ts.Add(-120 * time.Hour), Through: ts.Add(-119 * time.Hour), Checksum: 300},
+ {From: ts.Add(-95 * time.Hour), Through: ts.Add(-94 * time.Hour), Checksum: 301},
+ }},
+ {Fingerprint: 400, Tenant: tenant, Refs: []*logproto.ShortRef{
+ {From: ts.Add(-96 * time.Hour), Through: ts.Add(-95 * time.Hour), Checksum: 400},
+ {From: ts.Add(-71 * time.Hour), Through: ts.Add(-70 * time.Hour), Checksum: 401},
+ }},
+ {Fingerprint: 500, Tenant: tenant, Refs: []*logproto.ShortRef{
+ {From: ts.Add(-72 * time.Hour), Through: ts.Add(-71 * time.Hour), Checksum: 500},
+ {From: ts.Add(-47 * time.Hour), Through: ts.Add(-46 * time.Hour), Checksum: 501},
+ }},
+ {Fingerprint: 600, Tenant: tenant, Refs: []*logproto.ShortRef{
+ {From: ts.Add(-48 * time.Hour), Through: ts.Add(-47 * time.Hour), Checksum: 600},
+ {From: ts.Add(-23 * time.Hour), Through: ts.Add(-22 * time.Hour), Checksum: 601},
+ }},
+ {Fingerprint: 700, Tenant: tenant, Refs: []*logproto.ShortRef{
+ {From: ts.Add(-24 * time.Hour), Through: ts.Add(-23 * time.Hour), Checksum: 700},
+ {From: ts.Add(-1 * time.Hour), Through: ts, Checksum: 701},
+ }},
+ },
+ Filters: []*logproto.LineFilterExpression{
+ {Operator: 1, Match: "foo"},
+ {Operator: 1, Match: "bar"},
+ },
+ }
+
+ // day ranges from ts-48h to ts-24h
+ day := getDayTime(ts.Add(-36 * time.Hour))
+
+ expected := []*logproto.GroupedChunkRefs{
+ {Fingerprint: 500, Tenant: tenant, Refs: []*logproto.ShortRef{
+ {From: ts.Add(-47 * time.Hour), Through: ts.Add(-46 * time.Hour), Checksum: 501},
+ }},
+ {Fingerprint: 600, Tenant: tenant, Refs: []*logproto.ShortRef{
+ {From: ts.Add(-48 * time.Hour), Through: ts.Add(-47 * time.Hour), Checksum: 600},
+ }},
+ }
+
+ task, _, _, _ := NewTask(tenant, input)
+ it := task.ChunkIterForDay(day)
+
+ output := make([]*logproto.GroupedChunkRefs, 0, len(input.Refs))
+ for it.Next() {
+ output = append(output, it.At())
+ }
+
+ require.Equal(t, expected, output)
+ })
+}
diff --git a/pkg/bloomgateway/querier.go b/pkg/bloomgateway/querier.go
index ab7b9eb40500e..ec9e2a45842d6 100644
--- a/pkg/bloomgateway/querier.go
+++ b/pkg/bloomgateway/querier.go
@@ -31,9 +31,9 @@ func (bq *BloomQuerier) FilterChunkRefs(ctx context.Context, tenant string, from
return chunkRefs, nil
}
- // TODO(chaudum): Make buffer pool to reduce allocations.
// The indexes of the chunks slice correspond to the indexes of the fingerprint slice.
- grouped := make([]*logproto.GroupedChunkRefs, 0, len(chunkRefs))
+ grouped := groupedChunksRefPool.Get(len(chunkRefs))
+ defer groupedChunksRefPool.Put(grouped)
grouped = groupChunkRefs(chunkRefs, grouped)
refs, err := bq.c.FilterChunks(ctx, tenant, from, through, grouped, filters...)
@@ -41,8 +41,6 @@ func (bq *BloomQuerier) FilterChunkRefs(ctx context.Context, tenant string, from
return nil, err
}
- // TODO(chaudum): Cache response
-
// Flatten response from client and return
result := make([]*logproto.ChunkRef, 0, len(chunkRefs))
for i := range refs {
diff --git a/pkg/bloomgateway/sharding.go b/pkg/bloomgateway/sharding.go
index 4bd288ccfe43b..5dfb9f11732a0 100644
--- a/pkg/bloomgateway/sharding.go
+++ b/pkg/bloomgateway/sharding.go
@@ -35,11 +35,6 @@ var (
})
)
-type Limits interface {
- BloomGatewayShardSize(tenantID string) int
- BloomGatewayEnabled(tenantID string) bool
-}
-
type ShardingStrategy interface {
// FilterTenants whose indexes should be loaded by the index gateway.
// Returns the list of user IDs that should be synced by the index gateway.
diff --git a/pkg/bloomgateway/util.go b/pkg/bloomgateway/util.go
new file mode 100644
index 0000000000000..33477e9052fb0
--- /dev/null
+++ b/pkg/bloomgateway/util.go
@@ -0,0 +1,114 @@
+package bloomgateway
+
+import (
+ "sort"
+ "time"
+
+ "github.com/prometheus/common/model"
+ "golang.org/x/exp/slices"
+
+ "github.com/grafana/loki/pkg/logproto"
+ v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
+ "github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper"
+)
+
+func getDayTime(ts model.Time) time.Time {
+ return time.Date(ts.Time().Year(), ts.Time().Month(), ts.Time().Day(), 0, 0, 0, 0, time.UTC)
+}
+
+// getFromThrough assumes a list of ShortRefs sorted by From time
+func getFromThrough(refs []*logproto.ShortRef) (model.Time, model.Time) {
+ if len(refs) == 0 {
+ return model.Earliest, model.Latest
+ }
+
+ maxItem := slices.MaxFunc(refs, func(a, b *logproto.ShortRef) int {
+ if a.Through > b.Through {
+ return 1
+ } else if a.Through < b.Through {
+ return -1
+ }
+ return 0
+ })
+
+ return refs[0].From, maxItem.Through
+}
+
+// convertToSearches converts a list of line filter expressions to a list of
+// byte slices that can be used with the bloom filters.
+// TODO(chaudum): Currently this function only supports equality matchers,
+// but we eventually also want to support regex matchers.
+func convertToSearches(filters []*logproto.LineFilterExpression) [][]byte {
+ searches := make([][]byte, 0, len(filters))
+ for _, f := range filters {
+ searches = append(searches, []byte(f.Match))
+ }
+ return searches
+}
+
+// convertToShortRefs converts a v1.ChunkRefs into []*logproto.ShortRef
+// TODO(chaudum): Avoid conversion by transferring v1.ChunkRefs in gRPC request.
+func convertToShortRefs(refs v1.ChunkRefs) []*logproto.ShortRef {
+ result := make([]*logproto.ShortRef, 0, len(refs))
+ for _, ref := range refs {
+ result = append(result, &logproto.ShortRef{From: ref.Start, Through: ref.End, Checksum: ref.Checksum})
+ }
+ return result
+}
+
+// convertToChunkRefs converts a []*logproto.ShortRef into v1.ChunkRefs
+// TODO(chaudum): Avoid conversion by transferring v1.ChunkRefs in gRPC request.
+func convertToChunkRefs(refs []*logproto.ShortRef) v1.ChunkRefs {
+ result := make(v1.ChunkRefs, 0, len(refs))
+ for _, ref := range refs {
+ result = append(result, v1.ChunkRef{Start: ref.From, End: ref.Through, Checksum: ref.Checksum})
+ }
+ return result
+}
+
+// getFirstLast returns the first and last item of a fingerprint slice
+// It assumes an ascending sorted list of fingerprints.
+func getFirstLast[T any](s []T) (T, T) {
+ var zero T
+ if len(s) == 0 {
+ return zero, zero
+ }
+ return s[0], s[len(s)-1]
+}
+
+type boundedTasks struct {
+ blockRef bloomshipper.BlockRef
+ tasks []Task
+}
+
+func partitionFingerprintRange(tasks []Task, blocks []bloomshipper.BlockRef) (result []boundedTasks) {
+ for _, block := range blocks {
+ bounded := boundedTasks{
+ blockRef: block,
+ }
+
+ for _, task := range tasks {
+ refs := task.Request.Refs
+ min := sort.Search(len(refs), func(i int) bool {
+ return block.Cmp(refs[i].Fingerprint) > v1.Before
+ })
+
+ max := sort.Search(len(refs), func(i int) bool {
+ return block.Cmp(refs[i].Fingerprint) == v1.After
+ })
+
+ // All fingerprints fall outside of the consumer's range
+ if min == len(refs) || max == 0 {
+ continue
+ }
+
+ bounded.tasks = append(bounded.tasks, task.Copy(refs[min:max]))
+ }
+
+ if len(bounded.tasks) > 0 {
+ result = append(result, bounded)
+ }
+
+ }
+ return result
+}
diff --git a/pkg/bloomgateway/util_test.go b/pkg/bloomgateway/util_test.go
new file mode 100644
index 0000000000000..08c6d2a1306a4
--- /dev/null
+++ b/pkg/bloomgateway/util_test.go
@@ -0,0 +1,74 @@
+package bloomgateway
+
+import (
+ "testing"
+
+ "github.com/prometheus/common/model"
+ "github.com/stretchr/testify/require"
+
+ "github.com/grafana/loki/pkg/logproto"
+ "github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper"
+)
+
+func TestGetFromThrough(t *testing.T) {
+ chunks := []*logproto.ShortRef{
+ {From: 0, Through: 6},
+ {From: 1, Through: 5},
+ {From: 2, Through: 9},
+ {From: 3, Through: 8},
+ {From: 4, Through: 7},
+ }
+ from, through := getFromThrough(chunks)
+ require.Equal(t, model.Time(0), from)
+ require.Equal(t, model.Time(9), through)
+
+ // assert that slice order did not change
+ require.Equal(t, model.Time(0), chunks[0].From)
+ require.Equal(t, model.Time(4), chunks[len(chunks)-1].From)
+}
+
+func mkBlockRef(minFp, maxFp uint64) bloomshipper.BlockRef {
+ return bloomshipper.BlockRef{
+ Ref: bloomshipper.Ref{
+ MinFingerprint: minFp,
+ MaxFingerprint: maxFp,
+ },
+ }
+}
+
+func TestPartitionFingerprintRange(t *testing.T) {
+ seriesPerBound := 100
+ bounds := []bloomshipper.BlockRef{
+ mkBlockRef(0, 99),
+ mkBlockRef(100, 199),
+ mkBlockRef(200, 299),
+ mkBlockRef(300, 399), // one out of bounds block
+ }
+
+ nTasks := 4
+ nSeries := 300
+ tasks := make([]Task, nTasks)
+ for i := 0; i < nSeries; i++ {
+ if tasks[i%4].Request == nil {
+ tasks[i%4].Request = &logproto.FilterChunkRefRequest{}
+ }
+ tasks[i%4].Request.Refs = append(tasks[i%nTasks].Request.Refs, &logproto.GroupedChunkRefs{Fingerprint: uint64(i)})
+ }
+
+ results := partitionFingerprintRange(tasks, bounds)
+ require.Equal(t, 3, len(results)) // ensure we only return bounds in range
+ for _, res := range results {
+ // ensure we have the right number of tasks per bound
+ for i := 0; i < nTasks; i++ {
+ require.Equal(t, seriesPerBound/nTasks, len(res.tasks[i].Request.Refs))
+ }
+ }
+
+ // ensure bound membership
+ for i := 0; i < nSeries; i++ {
+ require.Equal(t,
+ &logproto.GroupedChunkRefs{Fingerprint: uint64(i)},
+ results[i/seriesPerBound].tasks[i%nTasks].Request.Refs[i%seriesPerBound/nTasks],
+ )
+ }
+}
diff --git a/pkg/bloomgateway/worker.go b/pkg/bloomgateway/worker.go
new file mode 100644
index 0000000000000..9de580166ea4d
--- /dev/null
+++ b/pkg/bloomgateway/worker.go
@@ -0,0 +1,253 @@
+package bloomgateway
+
+import (
+ "context"
+ "time"
+
+ "github.com/go-kit/log"
+ "github.com/go-kit/log/level"
+ "github.com/grafana/dskit/services"
+ "github.com/pkg/errors"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promauto"
+ "github.com/prometheus/common/model"
+
+ "github.com/grafana/loki/pkg/queue"
+ v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
+ "github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper"
+)
+
+type workerConfig struct {
+ maxWaitTime time.Duration
+ maxItems int
+
+ processBlocksSequentially bool
+}
+
+type workerMetrics struct {
+ dequeuedTasks *prometheus.CounterVec
+ dequeueErrors *prometheus.CounterVec
+ dequeueWaitTime *prometheus.SummaryVec
+ storeAccessLatency *prometheus.HistogramVec
+}
+
+func newWorkerMetrics(registerer prometheus.Registerer, namespace, subsystem string) *workerMetrics {
+ labels := []string{"worker"}
+ return &workerMetrics{
+ dequeuedTasks: promauto.With(registerer).NewCounterVec(prometheus.CounterOpts{
+ Namespace: namespace,
+ Subsystem: subsystem,
+ Name: "dequeued_tasks_total",
+ Help: "Total amount of tasks that the worker dequeued from the bloom query queue",
+ }, labels),
+ dequeueErrors: promauto.With(registerer).NewCounterVec(prometheus.CounterOpts{
+ Namespace: namespace,
+ Subsystem: subsystem,
+ Name: "dequeue_errors_total",
+ Help: "Total amount of failed dequeue operations",
+ }, labels),
+ dequeueWaitTime: promauto.With(registerer).NewSummaryVec(prometheus.SummaryOpts{
+ Namespace: namespace,
+ Subsystem: subsystem,
+ Name: "dequeue_wait_time",
+ Help: "Time spent waiting for dequeuing tasks from queue",
+ }, labels),
+ storeAccessLatency: promauto.With(registerer).NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: namespace,
+ Subsystem: subsystem,
+ Name: "store_latency",
+ Help: "Latency in seconds of accessing the bloom store component",
+ }, append(labels, "operation")),
+ }
+}
+
+// worker is a datastructure that consumes tasks from the request queue,
+// processes them and returns the result/error back to the response channels of
+// the tasks.
+// It is responsible for multiplexing tasks so they can be processes in a more
+// efficient way.
+type worker struct {
+ services.Service
+
+ id string
+ cfg workerConfig
+ queue *queue.RequestQueue
+ store bloomshipper.Store
+ tasks *pendingTasks
+ logger log.Logger
+ metrics *workerMetrics
+}
+
+func newWorker(id string, cfg workerConfig, queue *queue.RequestQueue, store bloomshipper.Store, tasks *pendingTasks, logger log.Logger, metrics *workerMetrics) *worker {
+ w := &worker{
+ id: id,
+ cfg: cfg,
+ queue: queue,
+ store: store,
+ tasks: tasks,
+ logger: log.With(logger, "worker", id),
+ metrics: metrics,
+ }
+ w.Service = services.NewBasicService(w.starting, w.running, w.stopping).WithName(id)
+ return w
+}
+
+func (w *worker) starting(_ context.Context) error {
+ level.Debug(w.logger).Log("msg", "starting worker")
+ w.queue.RegisterConsumerConnection(w.id)
+ return nil
+}
+
+func (w *worker) running(ctx context.Context) error {
+ idx := queue.StartIndexWithLocalQueue
+
+ for {
+ select {
+
+ case <-ctx.Done():
+ return ctx.Err()
+
+ default:
+ taskCtx := context.Background()
+ dequeueStart := time.Now()
+ items, newIdx, err := w.queue.DequeueMany(taskCtx, idx, w.id, w.cfg.maxItems, w.cfg.maxWaitTime)
+ w.metrics.dequeueWaitTime.WithLabelValues(w.id).Observe(time.Since(dequeueStart).Seconds())
+ if err != nil {
+ // We only return an error if the queue is stopped and dequeuing did not yield any items
+ if err == queue.ErrStopped && len(items) == 0 {
+ return err
+ }
+ w.metrics.dequeueErrors.WithLabelValues(w.id).Inc()
+ level.Error(w.logger).Log("msg", "failed to dequeue tasks", "err", err, "items", len(items))
+ }
+ idx = newIdx
+
+ if len(items) == 0 {
+ w.queue.ReleaseRequests(items)
+ continue
+ }
+ w.metrics.dequeuedTasks.WithLabelValues(w.id).Add(float64(len(items)))
+
+ tasksPerDay := make(map[time.Time][]Task)
+
+ for _, item := range items {
+ task, ok := item.(Task)
+ if !ok {
+ // This really should never happen, because only the bloom gateway itself can enqueue tasks.
+ w.queue.ReleaseRequests(items)
+ return errors.Errorf("failed to cast dequeued item to Task: %v", item)
+ }
+ level.Debug(w.logger).Log("msg", "dequeued task", "task", task.ID)
+ w.tasks.Delete(task.ID)
+
+ fromDay, throughDay := task.Bounds()
+
+ if fromDay.Equal(throughDay) {
+ tasksPerDay[fromDay] = append(tasksPerDay[fromDay], task)
+ } else {
+ for i := fromDay; i.Before(throughDay); i = i.Add(24 * time.Hour) {
+ tasksPerDay[i] = append(tasksPerDay[i], task)
+ }
+ }
+ }
+
+ for day, tasks := range tasksPerDay {
+ logger := log.With(w.logger, "day", day)
+ level.Debug(logger).Log("msg", "process tasks", "tasks", len(tasks))
+
+ storeFetchStart := time.Now()
+ blockRefs, err := w.store.GetBlockRefs(taskCtx, tasks[0].Tenant, day, day.Add(Day).Add(-1*time.Nanosecond))
+ w.metrics.storeAccessLatency.WithLabelValues(w.id, "GetBlockRefs").Observe(time.Since(storeFetchStart).Seconds())
+ if err != nil {
+ for _, t := range tasks {
+ t.ErrCh <- err
+ }
+ // continue with tasks of next day
+ continue
+ }
+ // No blocks found.
+ // Since there are no blocks for the given tasks, we need to return the
+ // unfiltered list of chunk refs.
+ if len(blockRefs) == 0 {
+ level.Warn(logger).Log("msg", "no blocks found")
+ for _, t := range tasks {
+ for _, ref := range t.Request.Refs {
+ t.ResCh <- v1.Output{
+ Fp: model.Fingerprint(ref.Fingerprint),
+ Removals: nil,
+ }
+ }
+ }
+ // continue with tasks of next day
+ continue
+ }
+
+ boundedRefs := partitionFingerprintRange(tasks, blockRefs)
+ blockRefs = blockRefs[:0]
+ for _, b := range boundedRefs {
+ blockRefs = append(blockRefs, b.blockRef)
+ }
+
+ if w.cfg.processBlocksSequentially {
+ err = w.processBlocksSequentially(taskCtx, tasks[0].Tenant, day, blockRefs, boundedRefs)
+ } else {
+ err = w.processBlocksWithCallback(taskCtx, tasks[0].Tenant, day, blockRefs, boundedRefs)
+ }
+ if err != nil {
+ for _, t := range tasks {
+ t.ErrCh <- err
+ }
+ // continue with tasks of next day
+ continue
+ }
+ }
+
+ // return dequeued items back to the pool
+ w.queue.ReleaseRequests(items)
+
+ }
+ }
+}
+
+func (w *worker) stopping(err error) error {
+ level.Debug(w.logger).Log("msg", "stopping worker", "err", err)
+ w.queue.UnregisterConsumerConnection(w.id)
+ return nil
+}
+
+func (w *worker) processBlocksWithCallback(taskCtx context.Context, tenant string, day time.Time, blockRefs []bloomshipper.BlockRef, boundedRefs []boundedTasks) error {
+ return w.store.ForEach(taskCtx, tenant, blockRefs, func(bq *v1.BlockQuerier, minFp, maxFp uint64) error {
+ for _, b := range boundedRefs {
+ if b.blockRef.MinFingerprint == minFp && b.blockRef.MaxFingerprint == maxFp {
+ processBlock(bq, day, b.tasks)
+ return nil
+ }
+ }
+ return nil
+ })
+}
+
+func (w *worker) processBlocksSequentially(taskCtx context.Context, tenant string, day time.Time, blockRefs []bloomshipper.BlockRef, boundedRefs []boundedTasks) error {
+ storeFetchStart := time.Now()
+ blockQueriers, err := w.store.GetBlockQueriersForBlockRefs(taskCtx, tenant, blockRefs)
+ w.metrics.storeAccessLatency.WithLabelValues(w.id, "GetBlockQueriersForBlockRefs").Observe(time.Since(storeFetchStart).Seconds())
+ if err != nil {
+ return err
+ }
+
+ for i := range blockQueriers {
+ processBlock(blockQueriers[i].BlockQuerier, day, boundedRefs[i].tasks)
+ }
+ return nil
+}
+
+func processBlock(blockQuerier *v1.BlockQuerier, day time.Time, tasks []Task) {
+ it := newTaskMergeIterator(day, tasks...)
+ fq := blockQuerier.Fuse([]v1.PeekingIterator[v1.Request]{it})
+ err := fq.Run()
+ if err != nil {
+ for _, t := range tasks {
+ t.ErrCh <- errors.Wrap(err, "failed to run chunk check")
+ }
+ }
+}
diff --git a/pkg/bloomutils/iter.go b/pkg/bloomutils/iter.go
new file mode 100644
index 0000000000000..fdbe4a5e62587
--- /dev/null
+++ b/pkg/bloomutils/iter.go
@@ -0,0 +1,37 @@
+package bloomutils
+
+import (
+ "io"
+
+ v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
+)
+
+// sortMergeIterator implements v1.Iterator
+type sortMergeIterator[T any, C comparable, R any] struct {
+ curr *R
+ heap *v1.HeapIterator[v1.IndexedValue[C]]
+ items []T
+ transform func(T, C, *R) *R
+ err error
+}
+
+func (it *sortMergeIterator[T, C, R]) Next() bool {
+ ok := it.heap.Next()
+ if !ok {
+ it.err = io.EOF
+ return false
+ }
+
+ group := it.heap.At()
+ it.curr = it.transform(it.items[group.Index()], group.Value(), it.curr)
+
+ return true
+}
+
+func (it *sortMergeIterator[T, C, R]) At() R {
+ return *it.curr
+}
+
+func (it *sortMergeIterator[T, C, R]) Err() error {
+ return it.err
+}
diff --git a/pkg/bloomutils/ring.go b/pkg/bloomutils/ring.go
new file mode 100644
index 0000000000000..08e62a13acb71
--- /dev/null
+++ b/pkg/bloomutils/ring.go
@@ -0,0 +1,146 @@
+// This file contains a bunch of utility functions for bloom components.
+// TODO: Find a better location for this package
+
+package bloomutils
+
+import (
+ "math"
+ "sort"
+
+ "github.com/grafana/dskit/ring"
+ "golang.org/x/exp/slices"
+
+ v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
+)
+
+type InstanceWithTokenRange struct {
+ Instance ring.InstanceDesc
+ MinToken, MaxToken uint32
+}
+
+func (i InstanceWithTokenRange) Cmp(token uint32) v1.BoundsCheck {
+ if token < i.MinToken {
+ return v1.Before
+ } else if token > i.MaxToken {
+ return v1.After
+ }
+ return v1.Overlap
+}
+
+type InstancesWithTokenRange []InstanceWithTokenRange
+
+func (i InstancesWithTokenRange) Contains(token uint32) bool {
+ for _, instance := range i {
+ if instance.Cmp(token) == v1.Overlap {
+ return true
+ }
+ }
+ return false
+}
+
+// GetInstanceTokenRange calculates the token range for a specific instance
+// with given id based on the first token in the ring.
+// This assumes that each instance in the ring is configured with only a single
+// token.
+func GetInstanceWithTokenRange(id string, instances []ring.InstanceDesc) InstancesWithTokenRange {
+
+ // Sorting the tokens of the instances would not be necessary if there is
+ // only a single token per instances, however, since we only assume one
+ // token, but don't enforce one token, we keep the sorting.
+ for _, inst := range instances {
+ sort.Slice(inst.Tokens, func(i, j int) bool {
+ return inst.Tokens[i] < inst.Tokens[j]
+ })
+ }
+
+ // Sort instances
+ sort.Slice(instances, func(i, j int) bool {
+ return instances[i].Tokens[0] < instances[j].Tokens[0]
+ })
+
+ idx := slices.IndexFunc(instances, func(inst ring.InstanceDesc) bool {
+ return inst.Id == id
+ })
+
+ // instance with Id == id not found
+ if idx == -1 {
+ return InstancesWithTokenRange{}
+ }
+
+ i := uint32(idx)
+ n := uint32(len(instances))
+ step := math.MaxUint32 / n
+
+ minToken := step * i
+ maxToken := step*i + step - 1
+ if i == n-1 {
+ // extend the last token tange to MaxUint32
+ maxToken = math.MaxUint32
+ }
+
+ return InstancesWithTokenRange{
+ {MinToken: minToken, MaxToken: maxToken, Instance: instances[i]},
+ }
+}
+
+// GetInstancesWithTokenRanges calculates the token ranges for a specific
+// instance with given id based on all tokens in the ring.
+// If the instances in the ring are configured with a single token, such as the
+// bloom compactor, use GetInstanceWithTokenRange() instead.
+func GetInstancesWithTokenRanges(id string, instances []ring.InstanceDesc) InstancesWithTokenRange {
+ servers := make([]InstanceWithTokenRange, 0, len(instances))
+ it := NewInstanceSortMergeIterator(instances)
+ var firstInst ring.InstanceDesc
+ var lastToken uint32
+ for it.Next() {
+ if firstInst.Id == "" {
+ firstInst = it.At().Instance
+ }
+ if it.At().Instance.Id == id {
+ servers = append(servers, it.At())
+ }
+ lastToken = it.At().MaxToken
+ }
+ // append token range from lastToken+1 to MaxUint32
+ // only if the instance with the first token is the current one
+ if len(servers) > 0 && firstInst.Id == id {
+ servers = append(servers, InstanceWithTokenRange{
+ MinToken: lastToken + 1,
+ MaxToken: math.MaxUint32,
+ Instance: servers[0].Instance,
+ })
+ }
+ return servers
+}
+
+// NewInstanceSortMergeIterator creates an iterator that yields instanceWithToken elements
+// where the token of the elements are sorted in ascending order.
+func NewInstanceSortMergeIterator(instances []ring.InstanceDesc) v1.Iterator[InstanceWithTokenRange] {
+ it := &sortMergeIterator[ring.InstanceDesc, uint32, InstanceWithTokenRange]{
+ items: instances,
+ transform: func(item ring.InstanceDesc, val uint32, prev *InstanceWithTokenRange) *InstanceWithTokenRange {
+ var prevToken uint32
+ if prev != nil {
+ prevToken = prev.MaxToken + 1
+ }
+ return &InstanceWithTokenRange{Instance: item, MinToken: prevToken, MaxToken: val}
+ },
+ }
+ sequences := make([]v1.PeekingIterator[v1.IndexedValue[uint32]], 0, len(instances))
+ for i := range instances {
+ sort.Slice(instances[i].Tokens, func(a, b int) bool {
+ return instances[i].Tokens[a] < instances[i].Tokens[b]
+ })
+ iter := v1.NewIterWithIndex[uint32](v1.NewSliceIter(instances[i].Tokens), i)
+ sequences = append(sequences, v1.NewPeekingIter[v1.IndexedValue[uint32]](iter))
+ }
+ it.heap = v1.NewHeapIterator(
+ func(i, j v1.IndexedValue[uint32]) bool {
+ return i.Value() < j.Value()
+ },
+ sequences...,
+ )
+ it.err = nil
+
+ return it
+}
diff --git a/pkg/bloomutils/ring_test.go b/pkg/bloomutils/ring_test.go
new file mode 100644
index 0000000000000..30da072021edf
--- /dev/null
+++ b/pkg/bloomutils/ring_test.go
@@ -0,0 +1,112 @@
+package bloomutils
+
+import (
+ "math"
+ "testing"
+
+ "github.com/grafana/dskit/ring"
+ "github.com/stretchr/testify/require"
+)
+
+func TestBloomGatewayClient_SortInstancesByToken(t *testing.T) {
+ input := []ring.InstanceDesc{
+ {Id: "1", Tokens: []uint32{5, 9}},
+ {Id: "2", Tokens: []uint32{3, 7}},
+ {Id: "3", Tokens: []uint32{1}},
+ }
+ expected := []InstanceWithTokenRange{
+ {Instance: input[2], MinToken: 0, MaxToken: 1},
+ {Instance: input[1], MinToken: 2, MaxToken: 3},
+ {Instance: input[0], MinToken: 4, MaxToken: 5},
+ {Instance: input[1], MinToken: 6, MaxToken: 7},
+ {Instance: input[0], MinToken: 8, MaxToken: 9},
+ }
+
+ var i int
+ it := NewInstanceSortMergeIterator(input)
+ for it.Next() {
+ t.Log(expected[i], it.At())
+ require.Equal(t, expected[i], it.At())
+ i++
+ }
+}
+
+func TestBloomGatewayClient_GetInstancesWithTokenRanges(t *testing.T) {
+ t.Run("instance does not own first token in the ring", func(t *testing.T) {
+ input := []ring.InstanceDesc{
+ {Id: "1", Tokens: []uint32{5, 9}},
+ {Id: "2", Tokens: []uint32{3, 7}},
+ {Id: "3", Tokens: []uint32{1}},
+ }
+ expected := InstancesWithTokenRange{
+ {Instance: input[1], MinToken: 2, MaxToken: 3},
+ {Instance: input[1], MinToken: 6, MaxToken: 7},
+ }
+
+ result := GetInstancesWithTokenRanges("2", input)
+ require.Equal(t, expected, result)
+ })
+
+ t.Run("instance owns first token in the ring", func(t *testing.T) {
+ input := []ring.InstanceDesc{
+ {Id: "1", Tokens: []uint32{5, 9}},
+ {Id: "2", Tokens: []uint32{3, 7}},
+ {Id: "3", Tokens: []uint32{1}},
+ }
+ expected := InstancesWithTokenRange{
+ {Instance: input[2], MinToken: 0, MaxToken: 1},
+ {Instance: input[2], MinToken: 10, MaxToken: math.MaxUint32},
+ }
+
+ result := GetInstancesWithTokenRanges("3", input)
+ require.Equal(t, expected, result)
+ })
+}
+
+func TestBloomGatewayClient_GetInstanceWithTokenRange(t *testing.T) {
+ for name, tc := range map[string]struct {
+ id string
+ input []ring.InstanceDesc
+ expected InstancesWithTokenRange
+ }{
+ "first instance includes 0 token": {
+ id: "3",
+ input: []ring.InstanceDesc{
+ {Id: "1", Tokens: []uint32{3}},
+ {Id: "2", Tokens: []uint32{5}},
+ {Id: "3", Tokens: []uint32{1}},
+ },
+ expected: InstancesWithTokenRange{
+ {Instance: ring.InstanceDesc{Id: "3", Tokens: []uint32{1}}, MinToken: 0, MaxToken: math.MaxUint32/3 - 1},
+ },
+ },
+ "middle instance": {
+ id: "1",
+ input: []ring.InstanceDesc{
+ {Id: "1", Tokens: []uint32{3}},
+ {Id: "2", Tokens: []uint32{5}},
+ {Id: "3", Tokens: []uint32{1}},
+ },
+ expected: InstancesWithTokenRange{
+ {Instance: ring.InstanceDesc{Id: "1", Tokens: []uint32{3}}, MinToken: math.MaxUint32 / 3, MaxToken: math.MaxUint32/3*2 - 1},
+ },
+ },
+ "last instance includes MaxUint32 token": {
+ id: "2",
+ input: []ring.InstanceDesc{
+ {Id: "1", Tokens: []uint32{3}},
+ {Id: "2", Tokens: []uint32{5}},
+ {Id: "3", Tokens: []uint32{1}},
+ },
+ expected: InstancesWithTokenRange{
+ {Instance: ring.InstanceDesc{Id: "2", Tokens: []uint32{5}}, MinToken: math.MaxUint32 / 3 * 2, MaxToken: math.MaxUint32},
+ },
+ },
+ } {
+ tc := tc
+ t.Run(name, func(t *testing.T) {
+ result := GetInstanceWithTokenRange(tc.id, tc.input)
+ require.Equal(t, tc.expected, result)
+ })
+ }
+}
diff --git a/pkg/compactor/compactor.go b/pkg/compactor/compactor.go
index a45248af6bbd3..07e8389c5b843 100644
--- a/pkg/compactor/compactor.go
+++ b/pkg/compactor/compactor.go
@@ -124,8 +124,13 @@ func (cfg *Config) Validate() error {
return fmt.Errorf("compactor.delete-request-store should be configured when retention is enabled")
}
- if cfg.ApplyRetentionInterval != 0 && cfg.ApplyRetentionInterval%cfg.CompactionInterval != 0 {
- return fmt.Errorf("interval for applying retention should either be set to a 0 or a multiple of compaction interval")
+ if cfg.ApplyRetentionInterval == 0 {
+ cfg.ApplyRetentionInterval = cfg.CompactionInterval
+ }
+
+ if cfg.ApplyRetentionInterval == cfg.CompactionInterval {
+ // add some jitter to avoid running retention and compaction at same time
+ cfg.ApplyRetentionInterval += minDuration(10*time.Minute, cfg.ApplyRetentionInterval/2)
}
if err := config.ValidatePathPrefix(cfg.DeleteRequestStoreKeyPrefix); err != nil {
@@ -153,6 +158,7 @@ type Compactor struct {
wg sync.WaitGroup
indexCompactors map[string]IndexCompactor
schemaConfig config.SchemaConfig
+ tableLocker *tableLocker
// Ring used for running a single compactor
ringLifecycler *ring.BasicLifecycler
@@ -193,6 +199,7 @@ func NewCompactor(cfg Config, objectStoreClients map[config.DayTime]client.Objec
ringPollPeriod: 5 * time.Second,
indexCompactors: map[string]IndexCompactor{},
schemaConfig: schemaConfig,
+ tableLocker: newTableLocker(),
}
ringStore, err := kv.NewClient(
@@ -503,28 +510,14 @@ func (c *Compactor) runCompactions(ctx context.Context) {
}
}()
- lastRetentionRunAt := time.Unix(0, 0)
- runCompaction := func() {
- applyRetention := false
- if c.cfg.RetentionEnabled && time.Since(lastRetentionRunAt) >= c.cfg.ApplyRetentionInterval {
- level.Info(util_log.Logger).Log("msg", "applying retention with compaction")
- applyRetention = true
- }
-
- err := c.RunCompaction(ctx, applyRetention)
- if err != nil {
- level.Error(util_log.Logger).Log("msg", "failed to run compaction", "err", err)
- }
-
- if applyRetention {
- lastRetentionRunAt = time.Now()
- }
+ // do the initial compaction
+ if err := c.RunCompaction(ctx, false); err != nil {
+ level.Error(util_log.Logger).Log("msg", "failed to run compaction", err)
}
c.wg.Add(1)
go func() {
defer c.wg.Done()
- runCompaction()
ticker := time.NewTicker(c.cfg.CompactionInterval)
defer ticker.Stop()
@@ -532,13 +525,38 @@ func (c *Compactor) runCompactions(ctx context.Context) {
for {
select {
case <-ticker.C:
- runCompaction()
+ if err := c.RunCompaction(ctx, false); err != nil {
+ level.Error(util_log.Logger).Log("msg", "failed to run compaction", err)
+ }
case <-ctx.Done():
return
}
}
}()
+
if c.cfg.RetentionEnabled {
+ c.wg.Add(1)
+ go func() {
+ defer c.wg.Done()
+ if err := c.RunCompaction(ctx, true); err != nil {
+ level.Error(util_log.Logger).Log("msg", "failed to apply retention", err)
+ }
+
+ ticker := time.NewTicker(c.cfg.ApplyRetentionInterval)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ if err := c.RunCompaction(ctx, true); err != nil {
+ level.Error(util_log.Logger).Log("msg", "failed to apply retention", err)
+ }
+ case <-ctx.Done():
+ return
+ }
+ }
+ }()
+
for _, container := range c.storeContainers {
c.wg.Add(1)
go func(sc storeContainer) {
@@ -576,6 +594,37 @@ func (c *Compactor) CompactTable(ctx context.Context, tableName string, applyRet
return fmt.Errorf("index store client not found for period starting at %s", schemaCfg.From.String())
}
+ for {
+ locked, lockWaiterChan := c.tableLocker.lockTable(tableName)
+ if locked {
+ break
+ }
+ // do not wait for lock to be released if we are only compacting the table since
+ // compaction should happen more frequently than retention and retention anyway compacts un-compacted files as well.
+ if !applyRetention {
+ hasUncompactedIndex, err := tableHasUncompactedIndex(ctx, tableName, sc.indexStorageClient)
+ if err != nil {
+ level.Error(util_log.Logger).Log("msg", "failed to check if table has uncompacted index", "table_name", tableName)
+ hasUncompactedIndex = true
+ }
+
+ if hasUncompactedIndex {
+ c.metrics.skippedCompactingLockedTables.WithLabelValues(tableName).Inc()
+ level.Warn(util_log.Logger).Log("msg", "skipped compacting table which likely has uncompacted index since it is locked by retention", "table_name", tableName)
+ }
+ return nil
+ }
+
+ // we are applying retention and processing delete requests so,
+ // wait for lock to be released since we can't mark delete requests as processed without checking all the tables
+ select {
+ case <-lockWaiterChan:
+ case <-ctx.Done():
+ return nil
+ }
+ }
+ defer c.tableLocker.unlockTable(tableName)
+
table, err := newTable(ctx, filepath.Join(c.cfg.WorkingDirectory, tableName), sc.indexStorageClient, indexCompactor,
schemaCfg, sc.tableMarker, c.expirationChecker, c.cfg.UploadParallelism)
if err != nil {
@@ -601,7 +650,7 @@ func (c *Compactor) RegisterIndexCompactor(indexType string, indexCompactor Inde
c.indexCompactors[indexType] = indexCompactor
}
-func (c *Compactor) RunCompaction(ctx context.Context, applyRetention bool) error {
+func (c *Compactor) RunCompaction(ctx context.Context, applyRetention bool) (err error) {
status := statusSuccess
start := time.Now()
@@ -610,13 +659,22 @@ func (c *Compactor) RunCompaction(ctx context.Context, applyRetention bool) erro
}
defer func() {
- c.metrics.compactTablesOperationTotal.WithLabelValues(status).Inc()
+ if err != nil {
+ status = statusFailure
+ }
+ if applyRetention {
+ c.metrics.applyRetentionOperationTotal.WithLabelValues(status).Inc()
+ } else {
+ c.metrics.compactTablesOperationTotal.WithLabelValues(status).Inc()
+ }
runtime := time.Since(start)
if status == statusSuccess {
- c.metrics.compactTablesOperationDurationSeconds.Set(runtime.Seconds())
- c.metrics.compactTablesOperationLastSuccess.SetToCurrentTime()
if applyRetention {
+ c.metrics.applyRetentionOperationDurationSeconds.Set(runtime.Seconds())
c.metrics.applyRetentionLastSuccess.SetToCurrentTime()
+ } else {
+ c.metrics.compactTablesOperationDurationSeconds.Set(runtime.Seconds())
+ c.metrics.compactTablesOperationLastSuccess.SetToCurrentTime()
}
}
@@ -627,7 +685,7 @@ func (c *Compactor) RunCompaction(ctx context.Context, applyRetention bool) erro
c.expirationChecker.MarkPhaseFailed()
}
}
- if runtime > c.cfg.CompactionInterval {
+ if !applyRetention && runtime > c.cfg.CompactionInterval {
level.Warn(util_log.Logger).Log("msg", fmt.Sprintf("last compaction took %s which is longer than the compaction interval of %s, this can lead to duplicate compactors running if not running a standalone compactor instance.", runtime, c.cfg.CompactionInterval))
}
}()
@@ -644,7 +702,6 @@ func (c *Compactor) RunCompaction(ctx context.Context, applyRetention bool) erro
sc.indexStorageClient.RefreshIndexTableNamesCache(ctx)
tbls, err := sc.indexStorageClient.ListTables(ctx)
if err != nil {
- status = statusFailure
return fmt.Errorf("failed to list tables: %w", err)
}
@@ -721,12 +778,15 @@ func (c *Compactor) RunCompaction(ctx context.Context, applyRetention bool) erro
for i := 0; i < c.cfg.MaxCompactionParallelism; i++ {
err := <-errChan
if err != nil && firstErr == nil {
- status = statusFailure
firstErr = err
}
}
- return firstErr
+ if firstErr != nil {
+ return firstErr
+ }
+
+ return ctx.Err()
}
type expirationChecker struct {
@@ -824,3 +884,11 @@ func schemaPeriodForTable(cfg config.SchemaConfig, tableName string) (config.Per
return schemaCfg, true
}
+
+func minDuration(x time.Duration, y time.Duration) time.Duration {
+ if x < y {
+ return x
+ }
+
+ return y
+}
diff --git a/pkg/compactor/compactor_test.go b/pkg/compactor/compactor_test.go
index 854339ca6ecaf..17df040290732 100644
--- a/pkg/compactor/compactor_test.go
+++ b/pkg/compactor/compactor_test.go
@@ -10,6 +10,8 @@ import (
"time"
"github.com/grafana/dskit/flagext"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/testutil"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
@@ -18,6 +20,7 @@ import (
"github.com/grafana/loki/pkg/storage/config"
"github.com/grafana/loki/pkg/util/constants"
loki_net "github.com/grafana/loki/pkg/util/net"
+ "github.com/grafana/loki/pkg/validation"
)
const indexTablePrefix = "table_"
@@ -41,7 +44,8 @@ func setupTestCompactor(t *testing.T, objectClients map[config.DayTime]client.Ob
cfg := Config{}
flagext.DefaultValues(&cfg)
cfg.WorkingDirectory = filepath.Join(tempDir, workingDirName)
- cfg.RetentionEnabled = false
+ cfg.RetentionEnabled = true
+ cfg.DeleteRequestStore = periodConfigs[len(periodConfigs)-1].ObjectType
cfg.CompactorRing.InstanceAddr = localhost
if loopbackIFace, err := loki_net.LoopbackInterfaceName(); err == nil {
@@ -50,9 +54,16 @@ func setupTestCompactor(t *testing.T, objectClients map[config.DayTime]client.Ob
require.NoError(t, cfg.Validate())
- c, err := NewCompactor(cfg, objectClients, nil, config.SchemaConfig{
+ defaultLimits := validation.Limits{}
+ flagext.DefaultValues(&defaultLimits)
+ require.NoError(t, defaultLimits.RetentionPeriod.Set("30d"))
+
+ overrides, err := validation.NewOverrides(defaultLimits, nil)
+ require.NoError(t, err)
+
+ c, err := NewCompactor(cfg, objectClients, objectClients[periodConfigs[len(periodConfigs)-1].From], config.SchemaConfig{
Configs: periodConfigs,
- }, nil, nil, constants.Loki)
+ }, overrides, prometheus.NewPedanticRegistry(), constants.Loki)
require.NoError(t, err)
c.RegisterIndexCompactor("dummy", testIndexCompactor{})
@@ -292,3 +303,144 @@ func Test_tableSort(t *testing.T) {
sortTablesByRange(intervals)
require.Equal(t, []string{"index_19195", "index_19192", "index_19191"}, intervals)
}
+
+func TestCompactor_TableLocking(t *testing.T) {
+ commonDBsConfig := IndexesConfig{NumUnCompactedFiles: 5}
+ perUserDBsConfig := PerUserIndexesConfig{}
+
+ daySeconds := int64(24 * time.Hour / time.Second)
+ tableNumEnd := time.Now().Unix() / daySeconds
+ tableNumStart := tableNumEnd - 5
+
+ setupCompactorAndIndex := func(tempDir string) *Compactor {
+ tablesPath := filepath.Join(tempDir, "index")
+
+ periodConfigs := []config.PeriodConfig{
+ {
+ From: config.DayTime{Time: model.Time(0)},
+ IndexType: "dummy",
+ ObjectType: "fs_01",
+ IndexTables: config.IndexPeriodicTableConfig{
+ PathPrefix: "index/",
+ PeriodicTableConfig: config.PeriodicTableConfig{
+ Prefix: indexTablePrefix,
+ Period: config.ObjectStorageIndexRequiredPeriod,
+ }},
+ },
+ }
+
+ for i := tableNumStart; i <= tableNumEnd; i++ {
+ SetupTable(t, filepath.Join(tablesPath, fmt.Sprintf("%s%d", indexTablePrefix, i)), IndexesConfig{NumUnCompactedFiles: 5}, PerUserIndexesConfig{})
+ }
+
+ var (
+ objectClients = map[config.DayTime]client.ObjectClient{}
+ err error
+ )
+ objectClients[periodConfigs[0].From], err = local.NewFSObjectClient(local.FSConfig{Directory: tempDir})
+ require.NoError(t, err)
+
+ return setupTestCompactor(t, objectClients, periodConfigs, tempDir)
+ }
+
+ for _, tc := range []struct {
+ name string
+ lockTable string
+ applyRetention bool
+
+ retentionShouldTimeout bool
+ }{
+ {
+ name: "no table locked - not applying retention",
+ },
+ {
+ name: "no table locked - applying retention",
+ applyRetention: true,
+ },
+ {
+ name: "first table locked - not applying retention",
+ lockTable: fmt.Sprintf("%s%d", indexTablePrefix, tableNumEnd),
+ },
+ {
+ name: "first table locked - applying retention",
+ lockTable: fmt.Sprintf("%s%d", indexTablePrefix, tableNumEnd),
+ applyRetention: true,
+ retentionShouldTimeout: true,
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ tempDir := t.TempDir()
+ tablesPath := filepath.Join(tempDir, "index")
+ compactor := setupCompactorAndIndex(tempDir)
+
+ // run the compaction twice, 2nd time without any table locking
+ for n := 1; n <= 2; n++ {
+ t.Run(fmt.Sprintf("%d", n), func(t *testing.T) {
+ // lock table only for the first run
+ if n == 1 && tc.lockTable != "" {
+ locked, _ := compactor.tableLocker.lockTable(tc.lockTable)
+ require.True(t, locked)
+
+ defer compactor.tableLocker.unlockTable(tc.lockTable)
+ }
+
+ // set a timeout so that retention does not get blocked forever on acquiring table lock.
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second)
+ defer cancel()
+
+ err := compactor.RunCompaction(ctx, tc.applyRetention)
+ // retention should not timeout after first run since we won't be locking the table
+ if n == 1 && tc.retentionShouldTimeout {
+ require.ErrorIs(t, err, context.DeadlineExceeded)
+ require.Equal(t, float64(1), testutil.ToFloat64(compactor.metrics.applyRetentionOperationTotal.WithLabelValues(statusFailure)))
+ require.Equal(t, float64(0), testutil.ToFloat64(compactor.metrics.compactTablesOperationTotal.WithLabelValues(statusFailure)))
+ return
+ }
+ require.NoError(t, err)
+
+ if n > 1 && tc.applyRetention && tc.retentionShouldTimeout {
+ // this should be the first successful run if retention was expected to timeout out during first run
+ require.Equal(t, float64(1), testutil.ToFloat64(compactor.metrics.applyRetentionOperationTotal.WithLabelValues(statusSuccess)))
+ } else {
+ // else it should have succeeded during all the n runs
+ if tc.applyRetention {
+ require.Equal(t, float64(n), testutil.ToFloat64(compactor.metrics.applyRetentionOperationTotal.WithLabelValues(statusSuccess)))
+ } else {
+ require.Equal(t, float64(n), testutil.ToFloat64(compactor.metrics.compactTablesOperationTotal.WithLabelValues(statusSuccess)))
+ }
+ }
+ if tc.applyRetention {
+ require.Equal(t, float64(0), testutil.ToFloat64(compactor.metrics.compactTablesOperationTotal.WithLabelValues(statusSuccess)))
+ } else {
+ require.Equal(t, float64(0), testutil.ToFloat64(compactor.metrics.applyRetentionOperationTotal.WithLabelValues(statusSuccess)))
+ }
+
+ // if the table was locked and compaction ran without retention then only locked table should have been skipped
+ if tc.lockTable != "" {
+ if tc.applyRetention {
+ require.Equal(t, float64(0), testutil.ToFloat64(compactor.metrics.skippedCompactingLockedTables.WithLabelValues(tc.lockTable)))
+ } else {
+ require.Equal(t, float64(1), testutil.ToFloat64(compactor.metrics.skippedCompactingLockedTables.WithLabelValues(tc.lockTable)))
+ }
+ }
+
+ for tableNum := tableNumStart; tableNum <= tableNumEnd; tableNum++ {
+ name := fmt.Sprintf("%s%d", indexTablePrefix, tableNum)
+ files, err := os.ReadDir(filepath.Join(tablesPath, name))
+ require.NoError(t, err)
+
+ if n == 1 && name == tc.lockTable {
+ // locked table should not be compacted during first run
+ require.Len(t, files, 5)
+ } else {
+ require.Len(t, files, 1)
+ require.True(t, strings.HasSuffix(files[0].Name(), ".gz"))
+
+ verifyCompactedIndexTable(t, commonDBsConfig, perUserDBsConfig, filepath.Join(tablesPath, name))
+ }
+ }
+ })
+ }
+ })
+ }
+}
diff --git a/pkg/compactor/metrics.go b/pkg/compactor/metrics.go
index b81ae2ab51da4..96fc9b16541e1 100644
--- a/pkg/compactor/metrics.go
+++ b/pkg/compactor/metrics.go
@@ -11,11 +11,14 @@ const (
)
type metrics struct {
- compactTablesOperationTotal *prometheus.CounterVec
- compactTablesOperationDurationSeconds prometheus.Gauge
- compactTablesOperationLastSuccess prometheus.Gauge
- applyRetentionLastSuccess prometheus.Gauge
- compactorRunning prometheus.Gauge
+ compactTablesOperationTotal *prometheus.CounterVec
+ compactTablesOperationDurationSeconds prometheus.Gauge
+ compactTablesOperationLastSuccess prometheus.Gauge
+ applyRetentionOperationTotal *prometheus.CounterVec
+ applyRetentionOperationDurationSeconds prometheus.Gauge
+ applyRetentionLastSuccess prometheus.Gauge
+ compactorRunning prometheus.Gauge
+ skippedCompactingLockedTables *prometheus.CounterVec
}
func newMetrics(r prometheus.Registerer) *metrics {
@@ -35,8 +38,18 @@ func newMetrics(r prometheus.Registerer) *metrics {
Name: "compact_tables_operation_last_successful_run_timestamp_seconds",
Help: "Unix timestamp of the last successful compaction run",
}),
+ applyRetentionOperationTotal: promauto.With(r).NewCounterVec(prometheus.CounterOpts{
+ Namespace: "loki_compactor",
+ Name: "apply_retention_operation_total",
+ Help: "Total number of attempts done to apply retention with status",
+ }, []string{"status"}),
+ applyRetentionOperationDurationSeconds: promauto.With(r).NewGauge(prometheus.GaugeOpts{
+ Namespace: "loki_compactor",
+ Name: "apply_retention_operation_duration_seconds",
+ Help: "Time (in seconds) spent in applying retention",
+ }),
applyRetentionLastSuccess: promauto.With(r).NewGauge(prometheus.GaugeOpts{
- Namespace: "loki_boltdb_shipper",
+ Namespace: "loki_compactor",
Name: "apply_retention_last_successful_run_timestamp_seconds",
Help: "Unix timestamp of the last successful retention run",
}),
@@ -45,6 +58,11 @@ func newMetrics(r prometheus.Registerer) *metrics {
Name: "compactor_running",
Help: "Value will be 1 if compactor is currently running on this instance",
}),
+ skippedCompactingLockedTables: promauto.With(r).NewCounterVec(prometheus.CounterOpts{
+ Namespace: "loki_compactor",
+ Name: "skipped_compacting_locked_table_total",
+ Help: "Count of uncompacted tables being skipped due to them being locked by retention",
+ }, []string{"table_name"}),
}
return &m
diff --git a/pkg/compactor/table.go b/pkg/compactor/table.go
index 92059a7c15e29..b7b94627c7415 100644
--- a/pkg/compactor/table.go
+++ b/pkg/compactor/table.go
@@ -265,3 +265,11 @@ func (t *table) openCompactedIndexForRetention(idxSet *indexSet) error {
return nil
}
+
+// tableHasUncompactedIndex returns true if we have more than "1" common index files.
+// We are checking for more than "1" because earlier boltdb-shipper index type did not have per tenant index so there would be only common index files.
+// In case of per tenant index, it is okay to consider it compacted since having just 1 uncompacted index file for a while should be fine.
+func tableHasUncompactedIndex(ctx context.Context, tableName string, indexStorageClient storage.Client) (bool, error) {
+ commonIndexFiles, _, err := indexStorageClient.ListFiles(ctx, tableName, false)
+ return len(commonIndexFiles) > 1, err
+}
diff --git a/pkg/compactor/table_locker.go b/pkg/compactor/table_locker.go
new file mode 100644
index 0000000000000..bce818a5d2b62
--- /dev/null
+++ b/pkg/compactor/table_locker.go
@@ -0,0 +1,52 @@
+package compactor
+
+import "sync"
+
+type lockWaiterChan chan struct{}
+
+type tableLocker struct {
+ lockedTables map[string]lockWaiterChan
+ lockedTablesMtx sync.RWMutex
+}
+
+func newTableLocker() *tableLocker {
+ return &tableLocker{
+ lockedTables: map[string]lockWaiterChan{},
+ }
+}
+
+// lockTable attempts to lock a table. It returns true if the lock gets acquired for the caller.
+// It also returns a channel which the caller can watch to detect unlocking of table if it was already locked by some other caller.
+func (t *tableLocker) lockTable(tableName string) (bool, <-chan struct{}) {
+ locked := false
+
+ t.lockedTablesMtx.RLock()
+ c, ok := t.lockedTables[tableName]
+ t.lockedTablesMtx.RUnlock()
+ if ok {
+ return false, c
+ }
+
+ t.lockedTablesMtx.Lock()
+ defer t.lockedTablesMtx.Unlock()
+
+ c, ok = t.lockedTables[tableName]
+ if !ok {
+ t.lockedTables[tableName] = make(chan struct{})
+ c = t.lockedTables[tableName]
+ locked = true
+ }
+
+ return locked, c
+}
+
+func (t *tableLocker) unlockTable(tableName string) {
+ t.lockedTablesMtx.Lock()
+ defer t.lockedTablesMtx.Unlock()
+
+ c, ok := t.lockedTables[tableName]
+ if ok {
+ close(c)
+ }
+ delete(t.lockedTables, tableName)
+}
diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go
index 963b5cc4302b5..98dde0b915154 100644
--- a/pkg/distributor/distributor.go
+++ b/pkg/distributor/distributor.go
@@ -99,6 +99,7 @@ type Distributor struct {
ingestersRing ring.ReadRing
validator *Validator
pool *ring_client.Pool
+ tee Tee
rateStore RateStore
shardTracker *ShardTracker
@@ -136,6 +137,7 @@ func New(
overrides Limits,
registerer prometheus.Registerer,
metricsNamespace string,
+ tee Tee,
logger log.Logger,
) (*Distributor, error) {
factory := cfg.factory
@@ -182,6 +184,7 @@ func New(
shardTracker: NewShardTracker(),
healthyInstancesCount: atomic.NewUint32(0),
rateLimitStrat: rateLimitStrat,
+ tee: tee,
ingesterAppends: promauto.With(registerer).NewCounterVec(prometheus.CounterOpts{
Namespace: constants.Loki,
Name: "distributor_ingester_appends_total",
@@ -272,9 +275,14 @@ func (d *Distributor) stopping(_ error) error {
return services.StopManagerAndAwaitStopped(context.Background(), d.subservices)
}
+type KeyedStream struct {
+ HashKey uint32
+ Stream logproto.Stream
+}
+
// TODO taken from Cortex, see if we can refactor out an usable interface.
type streamTracker struct {
- stream logproto.Stream
+ KeyedStream
minSuccess int
maxFailures int
succeeded atomic.Int32
@@ -305,8 +313,7 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log
// First we flatten out the request into a list of samples.
// We use the heuristic of 1 sample per TS to size the array.
// We also work out the hash value at the same time.
- streams := make([]streamTracker, 0, len(req.Streams))
- keys := make([]uint32, 0, len(req.Streams))
+ streams := make([]KeyedStream, 0, len(req.Streams))
validatedLineSize := 0
validatedLineCount := 0
@@ -379,12 +386,12 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log
shardStreamsCfg := d.validator.Limits.ShardStreams(tenantID)
if shardStreamsCfg.Enabled {
- derivedKeys, derivedStreams := d.shardStream(stream, pushSize, tenantID)
- keys = append(keys, derivedKeys...)
- streams = append(streams, derivedStreams...)
+ streams = append(streams, d.shardStream(stream, pushSize, tenantID)...)
} else {
- keys = append(keys, lokiring.TokenFor(tenantID, stream.Labels))
- streams = append(streams, streamTracker{stream: stream})
+ streams = append(streams, KeyedStream{
+ HashKey: lokiring.TokenFor(tenantID, stream.Labels),
+ Stream: stream,
+ })
}
}
}()
@@ -410,9 +417,16 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log
return nil, httpgrpc.Errorf(http.StatusTooManyRequests, err.Error())
}
+ // Nil check for performance reasons, to avoid dynamic lookup and/or no-op
+ // function calls that cannot be inlined.
+ if d.tee != nil {
+ d.tee.Duplicate(streams)
+ }
+
const maxExpectedReplicationSet = 5 // typical replication factor 3 plus one for inactive plus one for luck
var descs [maxExpectedReplicationSet]ring.InstanceDesc
+ streamTrackers := make([]streamTracker, len(streams))
streamsByIngester := map[string][]*streamTracker{}
ingesterDescs := map[string]ring.InstanceDesc{}
@@ -425,16 +439,19 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log
}()
}
- for i, key := range keys {
- replicationSet, err := d.ingestersRing.Get(key, ring.WriteNoExtend, descs[:0], nil, nil)
+ for i, stream := range streams {
+ replicationSet, err := d.ingestersRing.Get(stream.HashKey, ring.WriteNoExtend, descs[:0], nil, nil)
if err != nil {
return err
}
- streams[i].minSuccess = len(replicationSet.Instances) - replicationSet.MaxErrors
- streams[i].maxFailures = replicationSet.MaxErrors
+ streamTrackers[i] = streamTracker{
+ KeyedStream: stream,
+ minSuccess: len(replicationSet.Instances) - replicationSet.MaxErrors,
+ maxFailures: replicationSet.MaxErrors,
+ }
for _, ingester := range replicationSet.Instances {
- streamsByIngester[ingester.Addr] = append(streamsByIngester[ingester.Addr], &streams[i])
+ streamsByIngester[ingester.Addr] = append(streamsByIngester[ingester.Addr], &streamTrackers[i])
ingesterDescs[ingester.Addr] = ingester
}
}
@@ -475,13 +492,13 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log
// streams and their associated keys for hashing to ingesters.
//
// The number of shards is limited by the number of entries.
-func (d *Distributor) shardStream(stream logproto.Stream, pushSize int, tenantID string) ([]uint32, []streamTracker) {
+func (d *Distributor) shardStream(stream logproto.Stream, pushSize int, tenantID string) []KeyedStream {
shardStreamsCfg := d.validator.Limits.ShardStreams(tenantID)
logger := log.With(util_log.WithUserID(tenantID, d.logger), "stream", stream.Labels)
shardCount := d.shardCountFor(logger, &stream, pushSize, tenantID, shardStreamsCfg)
if shardCount <= 1 {
- return []uint32{lokiring.TokenFor(tenantID, stream.Labels)}, []streamTracker{{stream: stream}}
+ return []KeyedStream{{HashKey: lokiring.TokenFor(tenantID, stream.Labels), Stream: stream}}
}
d.streamShardCount.Inc()
@@ -492,31 +509,30 @@ func (d *Distributor) shardStream(stream logproto.Stream, pushSize int, tenantID
return d.divideEntriesBetweenShards(tenantID, shardCount, shardStreamsCfg, stream)
}
-func (d *Distributor) divideEntriesBetweenShards(tenantID string, totalShards int, shardStreamsCfg *shardstreams.Config, stream logproto.Stream) ([]uint32, []streamTracker) {
- derivedKeys, derivedStreams := d.createShards(stream, totalShards, tenantID, shardStreamsCfg)
+func (d *Distributor) divideEntriesBetweenShards(tenantID string, totalShards int, shardStreamsCfg *shardstreams.Config, stream logproto.Stream) []KeyedStream {
+ derivedStreams := d.createShards(stream, totalShards, tenantID, shardStreamsCfg)
for i := 0; i < len(stream.Entries); i++ {
streamIndex := i % len(derivedStreams)
- entries := append(derivedStreams[streamIndex].stream.Entries, stream.Entries[i])
- derivedStreams[streamIndex].stream.Entries = entries
+ entries := append(derivedStreams[streamIndex].Stream.Entries, stream.Entries[i])
+ derivedStreams[streamIndex].Stream.Entries = entries
}
- return derivedKeys, derivedStreams
+ return derivedStreams
}
-func (d *Distributor) createShards(stream logproto.Stream, totalShards int, tenantID string, shardStreamsCfg *shardstreams.Config) ([]uint32, []streamTracker) {
+func (d *Distributor) createShards(stream logproto.Stream, totalShards int, tenantID string, shardStreamsCfg *shardstreams.Config) []KeyedStream {
var (
streamLabels = labelTemplate(stream.Labels, d.logger)
streamPattern = streamLabels.String()
- derivedKeys = make([]uint32, 0, totalShards)
- derivedStreams = make([]streamTracker, 0, totalShards)
+ derivedStreams = make([]KeyedStream, 0, totalShards)
streamCount = streamCount(totalShards, stream)
)
if totalShards <= 0 {
level.Error(d.logger).Log("msg", "attempt to create shard with zeroed total shards", "org_id", tenantID, "stream", stream.Labels, "entries_len", len(stream.Entries))
- return derivedKeys, derivedStreams
+ return derivedStreams
}
entriesPerShard := int(math.Ceil(float64(len(stream.Entries)) / float64(totalShards)))
@@ -525,8 +541,10 @@ func (d *Distributor) createShards(stream logproto.Stream, totalShards int, tena
shardNum := (startShard + i) % totalShards
shard := d.createShard(streamLabels, streamPattern, shardNum, entriesPerShard)
- derivedKeys = append(derivedKeys, lokiring.TokenFor(tenantID, shard.Labels))
- derivedStreams = append(derivedStreams, streamTracker{stream: shard})
+ derivedStreams = append(derivedStreams, KeyedStream{
+ HashKey: lokiring.TokenFor(tenantID, shard.Labels),
+ Stream: shard,
+ })
if shardStreamsCfg.LoggingEnabled {
level.Info(d.logger).Log("msg", "stream derived from sharding", "src-stream", stream.Labels, "derived-stream", shard.Labels)
@@ -534,7 +552,7 @@ func (d *Distributor) createShards(stream logproto.Stream, totalShards int, tena
}
d.shardTracker.SetLastShardNum(tenantID, stream.Hash, startShard+streamCount)
- return derivedKeys, derivedStreams
+ return derivedStreams
}
func streamCount(totalShards int, stream logproto.Stream) int {
@@ -649,7 +667,7 @@ func (d *Distributor) sendStreamsErr(ctx context.Context, ingester ring.Instance
Streams: make([]logproto.Stream, len(streams)),
}
for i, s := range streams {
- req.Streams[i] = s.stream
+ req.Streams[i] = s.Stream
}
_, err = c.(logproto.PusherClient).Push(ctx, req)
diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go
index e7899f7ea593c..5a03fe98e94cc 100644
--- a/pkg/distributor/distributor_test.go
+++ b/pkg/distributor/distributor_test.go
@@ -616,16 +616,16 @@ func TestStreamShard(t *testing.T) {
shardTracker: NewShardTracker(),
}
- _, derivedStreams := d.shardStream(baseStream, tc.streamSize, "fake")
+ derivedStreams := d.shardStream(baseStream, tc.streamSize, "fake")
require.Len(t, derivedStreams, tc.wantDerivedStreamSize)
for _, s := range derivedStreams {
// Generate sorted labels
- lbls, err := syntax.ParseLabels(s.stream.Labels)
+ lbls, err := syntax.ParseLabels(s.Stream.Labels)
require.NoError(t, err)
- require.Equal(t, lbls.Hash(), s.stream.Hash)
- require.Equal(t, lbls.String(), s.stream.Labels)
+ require.Equal(t, lbls.Hash(), s.Stream.Hash)
+ require.Equal(t, lbls.String(), s.Stream.Labels)
}
})
}
@@ -661,23 +661,23 @@ func TestStreamShardAcrossCalls(t *testing.T) {
shardTracker: NewShardTracker(),
}
- _, derivedStreams := d.shardStream(baseStream, streamRate, "fake")
+ derivedStreams := d.shardStream(baseStream, streamRate, "fake")
require.Len(t, derivedStreams, 2)
for i, s := range derivedStreams {
- require.Len(t, s.stream.Entries, 1)
- lbls, err := syntax.ParseLabels(s.stream.Labels)
+ require.Len(t, s.Stream.Entries, 1)
+ lbls, err := syntax.ParseLabels(s.Stream.Labels)
require.NoError(t, err)
require.Equal(t, lbls[0].Value, fmt.Sprint(i))
}
- _, derivedStreams = d.shardStream(baseStream, streamRate, "fake")
+ derivedStreams = d.shardStream(baseStream, streamRate, "fake")
require.Len(t, derivedStreams, 2)
for i, s := range derivedStreams {
- require.Len(t, s.stream.Entries, 1)
- lbls, err := syntax.ParseLabels(s.stream.Labels)
+ require.Len(t, s.Stream.Entries, 1)
+ lbls, err := syntax.ParseLabels(s.Stream.Labels)
require.NoError(t, err)
require.Equal(t, lbls[0].Value, fmt.Sprint(i+2))
@@ -1153,7 +1153,7 @@ func prepare(t *testing.T, numDistributors, numIngesters int, limits *validation
overrides, err := validation.NewOverrides(*limits, nil)
require.NoError(t, err)
- d, err := New(distributorConfig, clientConfig, runtime.DefaultTenantConfigs(), ingestersRing, overrides, prometheus.NewPedanticRegistry(), constants.Loki, log.NewNopLogger())
+ d, err := New(distributorConfig, clientConfig, runtime.DefaultTenantConfigs(), ingestersRing, overrides, prometheus.NewPedanticRegistry(), constants.Loki, nil, log.NewNopLogger())
require.NoError(t, err)
require.NoError(t, services.StartAndAwaitRunning(context.Background(), d))
distributors[i] = d
@@ -1247,3 +1247,65 @@ type fakeRateStore struct {
func (s *fakeRateStore) RateFor(_ string, _ uint64) (int64, float64) {
return s.rate, s.pushRate
}
+
+type mockTee struct {
+ mu sync.Mutex
+ duplicated [][]KeyedStream
+}
+
+func (mt *mockTee) Duplicate(streams []KeyedStream) {
+ mt.mu.Lock()
+ defer mt.mu.Unlock()
+ mt.duplicated = append(mt.duplicated, streams)
+}
+
+func TestDistributorTee(t *testing.T) {
+ data := []*logproto.PushRequest{
+ {
+ Streams: []logproto.Stream{
+ {
+ Labels: "{job=\"foo\"}",
+ Entries: []logproto.Entry{
+ {Timestamp: time.Unix(123456, 0), Line: "line 1"},
+ {Timestamp: time.Unix(123457, 0), Line: "line 2"},
+ },
+ },
+ },
+ },
+ {
+ Streams: []logproto.Stream{
+ {
+ Labels: "{job=\"foo\"}",
+ Entries: []logproto.Entry{
+ {Timestamp: time.Unix(123458, 0), Line: "line 3"},
+ {Timestamp: time.Unix(123459, 0), Line: "line 4"},
+ },
+ },
+ {
+ Labels: "{job=\"bar\"}",
+ Entries: []logproto.Entry{
+ {Timestamp: time.Unix(123458, 0), Line: "line 5"},
+ {Timestamp: time.Unix(123459, 0), Line: "line 6"},
+ },
+ },
+ },
+ },
+ }
+
+ limits := &validation.Limits{}
+ flagext.DefaultValues(limits)
+ limits.RejectOldSamples = false
+ distributors, _ := prepare(t, 1, 3, limits, nil)
+
+ tee := mockTee{}
+ distributors[0].tee = &tee
+
+ for i, td := range data {
+ _, err := distributors[0].Push(ctx, td)
+ require.NoError(t, err)
+
+ for j, streams := range td.Streams {
+ assert.Equal(t, tee.duplicated[i][j].Stream.Entries, streams.Entries)
+ }
+ }
+}
diff --git a/pkg/distributor/tee.go b/pkg/distributor/tee.go
new file mode 100644
index 0000000000000..9ac48083956e1
--- /dev/null
+++ b/pkg/distributor/tee.go
@@ -0,0 +1,6 @@
+package distributor
+
+// Tee imlpementations can duplicate the log streams to another endpoint.
+type Tee interface {
+ Duplicate([]KeyedStream)
+}
diff --git a/pkg/ingester/checkpoint_test.go b/pkg/ingester/checkpoint_test.go
index 8286b66cb12fd..2cf46d921ce94 100644
--- a/pkg/ingester/checkpoint_test.go
+++ b/pkg/ingester/checkpoint_test.go
@@ -452,7 +452,7 @@ func Test_SeriesIterator(t *testing.T) {
limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1)
for i := 0; i < 3; i++ {
- inst, err := newInstance(defaultConfig(), defaultPeriodConfigs, fmt.Sprintf("%d", i), limiter, runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, nil, nil, NewStreamRateCalculator(), nil)
+ inst, err := newInstance(defaultConfig(), defaultPeriodConfigs, fmt.Sprintf("%d", i), limiter, runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, nil, nil, nil, nil, NewStreamRateCalculator(), nil)
require.Nil(t, err)
require.NoError(t, inst.Push(context.Background(), &logproto.PushRequest{Streams: []logproto.Stream{stream1}}))
require.NoError(t, inst.Push(context.Background(), &logproto.PushRequest{Streams: []logproto.Stream{stream2}}))
@@ -499,7 +499,7 @@ func Benchmark_SeriesIterator(b *testing.B) {
limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1)
for i := range instances {
- inst, _ := newInstance(defaultConfig(), defaultPeriodConfigs, fmt.Sprintf("instance %d", i), limiter, runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, nil, nil, NewStreamRateCalculator(), nil)
+ inst, _ := newInstance(defaultConfig(), defaultPeriodConfigs, fmt.Sprintf("instance %d", i), limiter, runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, nil, nil, nil, nil, NewStreamRateCalculator(), nil)
require.NoError(b,
inst.Push(context.Background(), &logproto.PushRequest{
diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go
index 147262ff22e51..f5215971ba39b 100644
--- a/pkg/ingester/ingester.go
+++ b/pkg/ingester/ingester.go
@@ -12,6 +12,8 @@ import (
"sync"
"time"
+ lokilog "github.com/grafana/loki/pkg/logql/log"
+
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/grafana/dskit/concurrency"
@@ -37,6 +39,7 @@ import (
"github.com/grafana/loki/pkg/logql"
"github.com/grafana/loki/pkg/logql/syntax"
"github.com/grafana/loki/pkg/logqlmodel/stats"
+ "github.com/grafana/loki/pkg/querier/plan"
"github.com/grafana/loki/pkg/runtime"
"github.com/grafana/loki/pkg/storage"
"github.com/grafana/loki/pkg/storage/chunk"
@@ -98,7 +101,10 @@ type Config struct {
WAL WALConfig `yaml:"wal,omitempty" doc:"description=The ingester WAL (Write Ahead Log) records incoming logs and stores them on the local file systems in order to guarantee persistence of acknowledged data in the event of a process crash."`
- ChunkFilterer chunk.RequestChunkFilterer `yaml:"-"`
+ ChunkFilterer chunk.RequestChunkFilterer `yaml:"-"`
+ PipelineWrapper lokilog.PipelineWrapper `yaml:"-"`
+ SampleExtractorWrapper lokilog.SampleExtractorWrapper `yaml:"-"`
+
// Optional wrapper that can be used to modify the behaviour of the ingester
Wrapper Wrapper `yaml:"-"`
@@ -226,7 +232,9 @@ type Ingester struct {
wal WAL
- chunkFilter chunk.RequestChunkFilterer
+ chunkFilter chunk.RequestChunkFilterer
+ extractorWrapper lokilog.SampleExtractorWrapper
+ pipelineWrapper lokilog.PipelineWrapper
streamRateCalculator *StreamRateCalculator
@@ -303,6 +311,14 @@ func New(cfg Config, clientConfig client.Config, store Store, limits Limits, con
i.SetChunkFilterer(i.cfg.ChunkFilterer)
}
+ if i.cfg.PipelineWrapper != nil {
+ i.SetPipelineWrapper(i.cfg.PipelineWrapper)
+ }
+
+ if i.cfg.SampleExtractorWrapper != nil {
+ i.SetExtractorWrapper(i.cfg.SampleExtractorWrapper)
+ }
+
return i, nil
}
@@ -310,6 +326,14 @@ func (i *Ingester) SetChunkFilterer(chunkFilter chunk.RequestChunkFilterer) {
i.chunkFilter = chunkFilter
}
+func (i *Ingester) SetExtractorWrapper(wrapper lokilog.SampleExtractorWrapper) {
+ i.extractorWrapper = wrapper
+}
+
+func (i *Ingester) SetPipelineWrapper(wrapper lokilog.PipelineWrapper) {
+ i.pipelineWrapper = wrapper
+}
+
// setupAutoForget looks for ring status if `AutoForgetUnhealthy` is enabled
// when enabled, unhealthy ingesters that reach `ring.kvstore.heartbeat_timeout` are removed from the ring every `HeartbeatPeriod`
func (i *Ingester) setupAutoForget() {
@@ -836,7 +860,7 @@ func (i *Ingester) GetOrCreateInstance(instanceID string) (*instance, error) { /
inst, ok = i.instances[instanceID]
if !ok {
var err error
- inst, err = newInstance(&i.cfg, i.periodicConfigs, instanceID, i.limiter, i.tenantConfigs, i.wal, i.metrics, i.flushOnShutdownSwitch, i.chunkFilter, i.streamRateCalculator, i.writeLogManager)
+ inst, err = newInstance(&i.cfg, i.periodicConfigs, instanceID, i.limiter, i.tenantConfigs, i.wal, i.metrics, i.flushOnShutdownSwitch, i.chunkFilter, i.pipelineWrapper, i.extractorWrapper, i.streamRateCalculator, i.writeLogManager)
if err != nil {
return nil, err
}
@@ -851,6 +875,16 @@ func (i *Ingester) Query(req *logproto.QueryRequest, queryServer logproto.Querie
// initialize stats collection for ingester queries.
_, ctx := stats.NewContext(queryServer.Context())
+ if req.Plan == nil {
+ parsed, err := syntax.ParseLogSelector(req.Selector, true)
+ if err != nil {
+ return err
+ }
+ req.Plan = &plan.QueryPlan{
+ AST: parsed,
+ }
+ }
+
instanceID, err := tenant.TenantID(ctx)
if err != nil {
return err
@@ -874,6 +908,7 @@ func (i *Ingester) Query(req *logproto.QueryRequest, queryServer logproto.Querie
Limit: req.Limit,
Shards: req.Shards,
Deletes: req.Deletes,
+ Plan: req.Plan,
}}
storeItr, err := i.store.SelectLogs(ctx, storeReq)
if err != nil {
@@ -900,6 +935,17 @@ func (i *Ingester) QuerySample(req *logproto.SampleQueryRequest, queryServer log
_, ctx := stats.NewContext(queryServer.Context())
sp := opentracing.SpanFromContext(ctx)
+ // If the plan is empty we want all series to be returned.
+ if req.Plan == nil {
+ parsed, err := syntax.ParseSampleExpr(req.Selector)
+ if err != nil {
+ return err
+ }
+ req.Plan = &plan.QueryPlan{
+ AST: parsed,
+ }
+ }
+
instanceID, err := tenant.TenantID(ctx)
if err != nil {
return err
@@ -925,6 +971,7 @@ func (i *Ingester) QuerySample(req *logproto.SampleQueryRequest, queryServer log
Selector: req.Selector,
Shards: req.Shards,
Deletes: req.Deletes,
+ Plan: req.Plan,
}}
storeItr, err := i.store.SelectSamples(ctx, storeReq)
if err != nil {
@@ -1234,6 +1281,16 @@ func (i *Ingester) Tail(req *logproto.TailRequest, queryServer logproto.Querier_
default:
}
+ if req.Plan == nil {
+ parsed, err := syntax.ParseLogSelector(req.Query, true)
+ if err != nil {
+ return err
+ }
+ req.Plan = &plan.QueryPlan{
+ AST: parsed,
+ }
+ }
+
instanceID, err := tenant.TenantID(queryServer.Context())
if err != nil {
return err
@@ -1243,7 +1300,13 @@ func (i *Ingester) Tail(req *logproto.TailRequest, queryServer logproto.Querier_
if err != nil {
return err
}
- tailer, err := newTailer(instanceID, req.Query, queryServer, i.cfg.MaxDroppedStreams)
+
+ expr, ok := req.Plan.AST.(syntax.LogSelectorExpr)
+ if !ok {
+ return fmt.Errorf("unsupported query expression: want (LogSelectorExpr), got (%T)", req.Plan.AST)
+ }
+
+ tailer, err := newTailer(instanceID, expr, queryServer, i.cfg.MaxDroppedStreams)
if err != nil {
return err
}
diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go
index 3da4cd356daab..993ae4f10fe26 100644
--- a/pkg/ingester/ingester_test.go
+++ b/pkg/ingester/ingester_test.go
@@ -35,6 +35,8 @@ import (
"github.com/grafana/loki/pkg/iter"
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logql"
+ "github.com/grafana/loki/pkg/logql/syntax"
+ "github.com/grafana/loki/pkg/querier/plan"
"github.com/grafana/loki/pkg/runtime"
"github.com/grafana/loki/pkg/storage/chunk"
"github.com/grafana/loki/pkg/storage/chunk/fetcher"
@@ -277,16 +279,16 @@ func TestIngester(t *testing.T) {
require.Nil(t, err)
require.ElementsMatch(t, []logproto.SeriesIdentifier{
{
- Labels: map[string]string{
- "foo": "bar",
- "bar": "baz1",
- },
+ Labels: logproto.MustNewSeriesEntries(
+ "bar", "baz1",
+ "foo", "bar",
+ ),
},
{
- Labels: map[string]string{
- "foo": "bar",
- "bar": "baz2",
- },
+ Labels: logproto.MustNewSeriesEntries(
+ "bar", "baz2",
+ "foo", "bar",
+ ),
},
}, resp.GetSeries())
@@ -315,16 +317,16 @@ func TestIngester(t *testing.T) {
require.Nil(t, err)
require.ElementsMatch(t, []logproto.SeriesIdentifier{
{
- Labels: map[string]string{
- "foo": "bar",
- "bar": "baz1",
- },
+ Labels: logproto.MustNewSeriesEntries(
+ "bar", "baz1",
+ "foo", "bar",
+ ),
},
{
- Labels: map[string]string{
- "foo": "bar",
- "bar": "baz2",
- },
+ Labels: logproto.MustNewSeriesEntries(
+ "bar", "baz2",
+ "foo", "bar",
+ ),
},
}, resp.GetSeries())
@@ -337,10 +339,10 @@ func TestIngester(t *testing.T) {
require.Nil(t, err)
require.ElementsMatch(t, []logproto.SeriesIdentifier{
{
- Labels: map[string]string{
- "foo": "bar",
- "bar": "baz2",
- },
+ Labels: logproto.MustNewSeriesEntries(
+ "bar", "baz2",
+ "foo", "bar",
+ ),
},
}, resp.GetSeries())
@@ -353,16 +355,16 @@ func TestIngester(t *testing.T) {
require.Nil(t, err)
require.ElementsMatch(t, []logproto.SeriesIdentifier{
{
- Labels: map[string]string{
- "foo": "bar",
- "bar": "baz1",
- },
+ Labels: logproto.MustNewSeriesEntries(
+ "bar", "baz1",
+ "foo", "bar",
+ ),
},
{
- Labels: map[string]string{
- "foo": "bar",
- "bar": "baz2",
- },
+ Labels: logproto.MustNewSeriesEntries(
+ "bar", "baz2",
+ "foo", "bar",
+ ),
},
}, resp.GetSeries())
}
@@ -812,6 +814,9 @@ func Test_DedupeIngester(t *testing.T) {
End: time.Unix(0, requests+1),
Limit: uint32(requests * streamCount),
Direction: logproto.BACKWARD,
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`{foo="bar"} | label_format bar=""`),
+ },
})
require.NoError(t, err)
iterators = append(iterators, iter.NewQueryClientIterator(stream, logproto.BACKWARD))
@@ -870,6 +875,9 @@ func Test_DedupeIngester(t *testing.T) {
Selector: `sum(rate({foo="bar"}[1m])) by (bar)`,
Start: time.Unix(0, 0),
End: time.Unix(0, requests+1),
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`sum(rate({foo="bar"}[1m])) by (bar)`),
+ },
})
require.NoError(t, err)
iterators = append(iterators, iter.NewSampleQueryClientIterator(stream))
@@ -905,6 +913,9 @@ func Test_DedupeIngester(t *testing.T) {
Selector: `sum(rate({foo="bar"}[1m]))`,
Start: time.Unix(0, 0),
End: time.Unix(0, requests+1),
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`sum(rate({foo="bar"}[1m]))`),
+ },
})
require.NoError(t, err)
iterators = append(iterators, iter.NewSampleQueryClientIterator(stream))
@@ -965,6 +976,9 @@ func Test_DedupeIngesterParser(t *testing.T) {
End: time.Unix(0, int64(requests+1)),
Limit: uint32(requests * streamCount * 2),
Direction: logproto.BACKWARD,
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`{foo="bar"} | json`),
+ },
})
require.NoError(t, err)
iterators = append(iterators, iter.NewQueryClientIterator(stream, logproto.BACKWARD))
@@ -992,6 +1006,9 @@ func Test_DedupeIngesterParser(t *testing.T) {
End: time.Unix(0, int64(requests+1)),
Limit: uint32(requests * streamCount * 2),
Direction: logproto.FORWARD,
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`{foo="bar"} | json`),
+ },
})
require.NoError(t, err)
iterators = append(iterators, iter.NewQueryClientIterator(stream, logproto.FORWARD))
@@ -1016,6 +1033,9 @@ func Test_DedupeIngesterParser(t *testing.T) {
Selector: `rate({foo="bar"} | json [1m])`,
Start: time.Unix(0, 0),
End: time.Unix(0, int64(requests+1)),
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`rate({foo="bar"} | json [1m])`),
+ },
})
require.NoError(t, err)
iterators = append(iterators, iter.NewSampleQueryClientIterator(stream))
@@ -1041,6 +1061,9 @@ func Test_DedupeIngesterParser(t *testing.T) {
Selector: `sum by (c,d,e,foo) (rate({foo="bar"} | json [1m]))`,
Start: time.Unix(0, 0),
End: time.Unix(0, int64(requests+1)),
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`sum by (c,d,e,foo) (rate({foo="bar"} | json [1m]))`),
+ },
})
require.NoError(t, err)
iterators = append(iterators, iter.NewSampleQueryClientIterator(stream))
diff --git a/pkg/ingester/instance.go b/pkg/ingester/instance.go
index 14306b01dc4af..f29628d85eeb8 100644
--- a/pkg/ingester/instance.go
+++ b/pkg/ingester/instance.go
@@ -22,6 +22,8 @@ import (
tsdb_record "github.com/prometheus/prometheus/tsdb/record"
"go.uber.org/atomic"
+ "github.com/grafana/dskit/tenant"
+
"github.com/grafana/loki/pkg/analytics"
"github.com/grafana/loki/pkg/chunkenc"
"github.com/grafana/loki/pkg/distributor/writefailures"
@@ -30,6 +32,7 @@ import (
"github.com/grafana/loki/pkg/iter"
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logql"
+ "github.com/grafana/loki/pkg/logql/log"
"github.com/grafana/loki/pkg/logql/syntax"
"github.com/grafana/loki/pkg/logqlmodel/stats"
"github.com/grafana/loki/pkg/querier/astmapper"
@@ -109,6 +112,8 @@ type instance struct {
metrics *ingesterMetrics
chunkFilter chunk.RequestChunkFilterer
+ pipelineWrapper log.PipelineWrapper
+ extractorWrapper log.SampleExtractorWrapper
streamRateCalculator *StreamRateCalculator
writeFailures *writefailures.Manager
@@ -126,6 +131,8 @@ func newInstance(
metrics *ingesterMetrics,
flushOnShutdownSwitch *OnceSwitch,
chunkFilter chunk.RequestChunkFilterer,
+ pipelineWrapper log.PipelineWrapper,
+ extractorWrapper log.SampleExtractorWrapper,
streamRateCalculator *StreamRateCalculator,
writeFailures *writefailures.Manager,
) (*instance, error) {
@@ -153,7 +160,9 @@ func newInstance(
metrics: metrics,
flushOnShutdownSwitch: flushOnShutdownSwitch,
- chunkFilter: chunkFilter,
+ chunkFilter: chunkFilter,
+ pipelineWrapper: pipelineWrapper,
+ extractorWrapper: extractorWrapper,
streamRateCalculator: streamRateCalculator,
@@ -419,6 +428,15 @@ func (i *instance) Query(ctx context.Context, req logql.SelectLogParams) (iter.E
return nil, err
}
+ if i.pipelineWrapper != nil {
+ userID, err := tenant.TenantID(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ pipeline = i.pipelineWrapper.Wrap(ctx, pipeline, expr.String(), userID)
+ }
+
stats := stats.FromContext(ctx)
var iters []iter.EntryIterator
@@ -464,6 +482,15 @@ func (i *instance) QuerySample(ctx context.Context, req logql.SelectSampleParams
return nil, err
}
+ if i.extractorWrapper != nil {
+ userID, err := tenant.TenantID(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ extractor = i.extractorWrapper.Wrap(ctx, extractor, expr.String(), userID)
+ }
+
stats := stats.FromContext(ctx)
var iters []iter.SampleIterator
@@ -572,9 +599,7 @@ func (i *instance) Series(ctx context.Context, req *logproto.SeriesRequest) (*lo
err = i.forMatchingStreams(ctx, req.Start, nil, shard, func(stream *stream) error {
// consider the stream only if it overlaps the request time range
if shouldConsiderStream(stream, req.Start, req.End) {
- series = append(series, logproto.SeriesIdentifier{
- Labels: stream.labels.Map(),
- })
+ series = append(series, logproto.SeriesIdentifierFromLabels(stream.labels))
}
return nil
})
@@ -597,9 +622,7 @@ func (i *instance) Series(ctx context.Context, req *logproto.SeriesRequest) (*lo
return nil
}
- dedupedSeries[key] = logproto.SeriesIdentifier{
- Labels: stream.labels.Map(),
- }
+ dedupedSeries[key] = logproto.SeriesIdentifierFromLabels(stream.labels)
}
return nil
})
diff --git a/pkg/ingester/instance_test.go b/pkg/ingester/instance_test.go
index ac29f3516df45..d96960da6cda7 100644
--- a/pkg/ingester/instance_test.go
+++ b/pkg/ingester/instance_test.go
@@ -10,6 +10,11 @@ import (
"testing"
"time"
+ "github.com/grafana/dskit/tenant"
+ "github.com/grafana/dskit/user"
+
+ "github.com/grafana/loki/pkg/logql/log"
+
"github.com/grafana/dskit/flagext"
"github.com/pkg/errors"
"github.com/prometheus/common/model"
@@ -21,6 +26,7 @@ import (
"github.com/grafana/loki/pkg/logql"
"github.com/grafana/loki/pkg/logql/syntax"
"github.com/grafana/loki/pkg/querier/astmapper"
+ "github.com/grafana/loki/pkg/querier/plan"
loki_runtime "github.com/grafana/loki/pkg/runtime"
"github.com/grafana/loki/pkg/storage/chunk"
"github.com/grafana/loki/pkg/storage/config"
@@ -64,7 +70,7 @@ func TestLabelsCollisions(t *testing.T) {
require.NoError(t, err)
limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1)
- i, err := newInstance(defaultConfig(), defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, NewStreamRateCalculator(), nil)
+ i, err := newInstance(defaultConfig(), defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil)
require.Nil(t, err)
// avoid entries from the future.
@@ -92,7 +98,7 @@ func TestConcurrentPushes(t *testing.T) {
require.NoError(t, err)
limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1)
- inst, err := newInstance(defaultConfig(), defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, NewStreamRateCalculator(), nil)
+ inst, err := newInstance(defaultConfig(), defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil)
require.Nil(t, err)
const (
@@ -144,7 +150,7 @@ func TestGetStreamRates(t *testing.T) {
require.NoError(t, err)
limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1)
- inst, err := newInstance(defaultConfig(), defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, NewStreamRateCalculator(), nil)
+ inst, err := newInstance(defaultConfig(), defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil)
require.NoError(t, err)
const (
@@ -238,7 +244,7 @@ func TestSyncPeriod(t *testing.T) {
minUtil = 0.20
)
- inst, err := newInstance(defaultConfig(), defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, NewStreamRateCalculator(), nil)
+ inst, err := newInstance(defaultConfig(), defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil)
require.Nil(t, err)
lbls := makeRandomLabels()
@@ -283,7 +289,7 @@ func setupTestStreams(t *testing.T) (*instance, time.Time, int) {
cfg.SyncMinUtilization = 0.20
cfg.IndexShards = indexShards
- instance, err := newInstance(cfg, defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, NewStreamRateCalculator(), nil)
+ instance, err := newInstance(cfg, defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil)
require.Nil(t, err)
currentTime := time.Now()
@@ -408,8 +414,8 @@ func Test_SeriesQuery(t *testing.T) {
Groups: []string{`{job="varlogs"}`},
},
[]logproto.SeriesIdentifier{
- {Labels: map[string]string{"app": "test", "job": "varlogs"}},
- {Labels: map[string]string{"app": "test2", "job": "varlogs"}},
+ {Labels: logproto.MustNewSeriesEntries("app", "test", "job", "varlogs")},
+ {Labels: logproto.MustNewSeriesEntries("app", "test2", "job", "varlogs")},
},
},
{
@@ -425,7 +431,7 @@ func Test_SeriesQuery(t *testing.T) {
},
[]logproto.SeriesIdentifier{
// Separated by shard number
- {Labels: map[string]string{"app": "test2", "job": "varlogs"}},
+ {Labels: logproto.MustNewSeriesEntries("app", "test2", "job", "varlogs")},
},
},
{
@@ -436,7 +442,7 @@ func Test_SeriesQuery(t *testing.T) {
Groups: []string{`{job="varlogs"}`},
},
[]logproto.SeriesIdentifier{
- {Labels: map[string]string{"app": "test", "job": "varlogs"}},
+ {Labels: logproto.MustNewSeriesEntries("app", "test", "job", "varlogs")},
},
},
{
@@ -447,7 +453,7 @@ func Test_SeriesQuery(t *testing.T) {
Groups: []string{`{job="varlogs"}`},
},
[]logproto.SeriesIdentifier{
- {Labels: map[string]string{"app": "test2", "job": "varlogs"}},
+ {Labels: logproto.MustNewSeriesEntries("app", "test2", "job", "varlogs")},
},
},
}
@@ -492,7 +498,7 @@ func Benchmark_PushInstance(b *testing.B) {
require.NoError(b, err)
limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1)
- i, _ := newInstance(&Config{IndexShards: 1}, defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, NewStreamRateCalculator(), nil)
+ i, _ := newInstance(&Config{IndexShards: 1}, defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil)
ctx := context.Background()
for n := 0; n < b.N; n++ {
@@ -536,8 +542,10 @@ func Benchmark_instance_addNewTailer(b *testing.B) {
ctx := context.Background()
- inst, _ := newInstance(&Config{}, defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, NewStreamRateCalculator(), nil)
- t, err := newTailer("foo", `{namespace="foo",pod="bar",instance=~"10.*"}`, nil, 10)
+ inst, _ := newInstance(&Config{}, defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil)
+ expr, err := syntax.ParseLogSelector(`{namespace="foo",pod="bar",instance=~"10.*"}`, true)
+ require.NoError(b, err)
+ t, err := newTailer("foo", expr, nil, 10)
require.NoError(b, err)
for i := 0; i < 10000; i++ {
require.NoError(b, inst.Push(ctx, &logproto.PushRequest{
@@ -596,6 +604,9 @@ func Test_Iterator(t *testing.T) {
Start: time.Unix(0, 0),
End: time.Unix(0, 100000000),
Direction: logproto.BACKWARD,
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`{job="3"} | logfmt`),
+ },
},
},
)
@@ -648,6 +659,9 @@ func Test_ChunkFilter(t *testing.T) {
Start: time.Unix(0, 0),
End: time.Unix(0, 100000000),
Direction: logproto.BACKWARD,
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`{job="3"}`),
+ },
},
},
)
@@ -662,6 +676,183 @@ func Test_ChunkFilter(t *testing.T) {
}
}
+func Test_PipelineWrapper(t *testing.T) {
+ instance := defaultInstance(t)
+
+ wrapper := &testPipelineWrapper{
+ pipeline: newMockPipeline(),
+ }
+ instance.pipelineWrapper = wrapper
+
+ ctx := user.InjectOrgID(context.Background(), "test-user")
+
+ _, err := tenant.TenantID(ctx)
+ require.NoError(t, err)
+
+ it, err := instance.Query(ctx,
+ logql.SelectLogParams{
+ QueryRequest: &logproto.QueryRequest{
+ Selector: `{job="3"}`,
+ Limit: uint32(2),
+ Start: time.Unix(0, 0),
+ End: time.Unix(0, 100000000),
+ Direction: logproto.BACKWARD,
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`{job="3"}`),
+ },
+ },
+ },
+ )
+ require.NoError(t, err)
+ defer it.Close()
+
+ for it.Next() {
+ // Consume the iterator
+ require.NoError(t, it.Error())
+ }
+
+ require.Equal(t, "test-user", wrapper.tenant)
+ require.Equal(t, `{job="3"}`, wrapper.query)
+ require.Equal(t, 10, wrapper.pipeline.sp.called) // we've passed every log line through the wrapper
+}
+
+type testPipelineWrapper struct {
+ query string
+ tenant string
+ pipeline *mockPipeline
+}
+
+func (t *testPipelineWrapper) Wrap(_ context.Context, pipeline log.Pipeline, query, tenant string) log.Pipeline {
+ t.tenant = tenant
+ t.query = query
+ t.pipeline.wrappedExtractor = pipeline
+ return t.pipeline
+}
+
+func newMockPipeline() *mockPipeline {
+ return &mockPipeline{
+ sp: &mockStreamPipeline{},
+ }
+}
+
+type mockPipeline struct {
+ wrappedExtractor log.Pipeline
+ sp *mockStreamPipeline
+}
+
+func (p *mockPipeline) ForStream(l labels.Labels) log.StreamPipeline {
+ sp := p.wrappedExtractor.ForStream(l)
+ p.sp.wrappedSP = sp
+ return p.sp
+}
+
+func (p *mockPipeline) Reset() {}
+
+// A stub always returns the same data
+type mockStreamPipeline struct {
+ wrappedSP log.StreamPipeline
+ called int
+}
+
+func (p *mockStreamPipeline) BaseLabels() log.LabelsResult {
+ return p.wrappedSP.BaseLabels()
+}
+
+func (p *mockStreamPipeline) Process(ts int64, line []byte, lbs ...labels.Label) ([]byte, log.LabelsResult, bool) {
+ p.called++
+ return p.wrappedSP.Process(ts, line, lbs...)
+}
+
+func (p *mockStreamPipeline) ProcessString(ts int64, line string, lbs ...labels.Label) (string, log.LabelsResult, bool) {
+ p.called++
+ return p.wrappedSP.ProcessString(ts, line, lbs...)
+}
+
+func Test_ExtractorWrapper(t *testing.T) {
+ instance := defaultInstance(t)
+
+ wrapper := &testExtractorWrapper{
+ extractor: newMockExtractor(),
+ }
+ instance.extractorWrapper = wrapper
+
+ ctx := user.InjectOrgID(context.Background(), "test-user")
+ it, err := instance.QuerySample(ctx,
+ logql.SelectSampleParams{
+ SampleQueryRequest: &logproto.SampleQueryRequest{
+ Selector: `sum(count_over_time({job="3"}[1m]))`,
+ Start: time.Unix(0, 0),
+ End: time.Unix(0, 100000000),
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`sum(count_over_time({job="3"}[1m]))`),
+ },
+ },
+ },
+ )
+ require.NoError(t, err)
+ defer it.Close()
+
+ for it.Next() {
+ // Consume the iterator
+ require.NoError(t, it.Error())
+ }
+
+ require.Equal(t, `sum(count_over_time({job="3"}[1m]))`, wrapper.query)
+ require.Equal(t, 10, wrapper.extractor.sp.called) // we've passed every log line through the wrapper
+}
+
+type testExtractorWrapper struct {
+ query string
+ tenant string
+ extractor *mockExtractor
+}
+
+func (t *testExtractorWrapper) Wrap(_ context.Context, extractor log.SampleExtractor, query, tenant string) log.SampleExtractor {
+ t.tenant = tenant
+ t.query = query
+ t.extractor.wrappedExtractor = extractor
+ return t.extractor
+}
+
+func newMockExtractor() *mockExtractor {
+ return &mockExtractor{
+ sp: &mockStreamExtractor{},
+ }
+}
+
+type mockExtractor struct {
+ wrappedExtractor log.SampleExtractor
+ sp *mockStreamExtractor
+}
+
+func (p *mockExtractor) ForStream(l labels.Labels) log.StreamSampleExtractor {
+ sp := p.wrappedExtractor.ForStream(l)
+ p.sp.wrappedSP = sp
+ return p.sp
+}
+
+func (p *mockExtractor) Reset() {}
+
+// A stub always returns the same data
+type mockStreamExtractor struct {
+ wrappedSP log.StreamSampleExtractor
+ called int
+}
+
+func (p *mockStreamExtractor) BaseLabels() log.LabelsResult {
+ return p.wrappedSP.BaseLabels()
+}
+
+func (p *mockStreamExtractor) Process(ts int64, line []byte, lbs ...labels.Label) (float64, log.LabelsResult, bool) {
+ p.called++
+ return p.wrappedSP.Process(ts, line, lbs...)
+}
+
+func (p *mockStreamExtractor) ProcessString(ts int64, line string, lbs ...labels.Label) (float64, log.LabelsResult, bool) {
+ p.called++
+ return p.wrappedSP.ProcessString(ts, line, lbs...)
+}
+
func Test_QueryWithDelete(t *testing.T) {
instance := defaultInstance(t)
@@ -690,6 +881,9 @@ func Test_QueryWithDelete(t *testing.T) {
End: 10 * 1e6,
},
},
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`{job="3"}`),
+ },
},
},
)
@@ -730,6 +924,9 @@ func Test_QuerySampleWithDelete(t *testing.T) {
End: 10 * 1e6,
},
},
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`count_over_time({job="3"}[5m])`),
+ },
},
},
)
@@ -809,7 +1006,7 @@ func TestStreamShardingUsage(t *testing.T) {
})
t.Run("invalid push returns error", func(t *testing.T) {
- i, _ := newInstance(&Config{IndexShards: 1}, defaultPeriodConfigs, customTenant1, limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, NewStreamRateCalculator(), nil)
+ i, _ := newInstance(&Config{IndexShards: 1}, defaultPeriodConfigs, customTenant1, limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil)
ctx := context.Background()
err = i.Push(ctx, &logproto.PushRequest{
@@ -828,7 +1025,7 @@ func TestStreamShardingUsage(t *testing.T) {
})
t.Run("valid push returns no error", func(t *testing.T) {
- i, _ := newInstance(&Config{IndexShards: 1}, defaultPeriodConfigs, customTenant2, limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, NewStreamRateCalculator(), nil)
+ i, _ := newInstance(&Config{IndexShards: 1}, defaultPeriodConfigs, customTenant2, limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil)
ctx := context.Background()
err = i.Push(ctx, &logproto.PushRequest{
@@ -1159,6 +1356,8 @@ func defaultInstance(t *testing.T) *instance {
NilMetrics,
nil,
nil,
+ nil,
+ nil,
NewStreamRateCalculator(),
nil,
)
diff --git a/pkg/ingester/stream_test.go b/pkg/ingester/stream_test.go
index 641fd1c926523..d1b01f22746c2 100644
--- a/pkg/ingester/stream_test.go
+++ b/pkg/ingester/stream_test.go
@@ -18,6 +18,7 @@ import (
"github.com/grafana/loki/pkg/iter"
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logql/log"
+ "github.com/grafana/loki/pkg/logql/syntax"
"github.com/grafana/loki/pkg/util/flagext"
"github.com/grafana/loki/pkg/validation"
)
@@ -524,7 +525,9 @@ func Benchmark_PushStream(b *testing.B) {
chunkfmt, headfmt := defaultChunkFormat(b)
s := newStream(chunkfmt, headfmt, &Config{MaxChunkAge: 24 * time.Hour}, limiter, "fake", model.Fingerprint(0), ls, true, NewStreamRateCalculator(), NilMetrics, nil)
- t, err := newTailer("foo", `{namespace="loki-dev"}`, &fakeTailServer{}, 10)
+ expr, err := syntax.ParseLogSelector(`{namespace="loki-dev"}`, true)
+ require.NoError(b, err)
+ t, err := newTailer("foo", expr, &fakeTailServer{}, 10)
require.NoError(b, err)
go t.loop()
diff --git a/pkg/ingester/tailer.go b/pkg/ingester/tailer.go
index 72e7026e810e7..3e9a8a64cfd88 100644
--- a/pkg/ingester/tailer.go
+++ b/pkg/ingester/tailer.go
@@ -46,11 +46,7 @@ type tailer struct {
conn TailServer
}
-func newTailer(orgID, query string, conn TailServer, maxDroppedStreams int) (*tailer, error) {
- expr, err := syntax.ParseLogSelector(query, true)
- if err != nil {
- return nil, err
- }
+func newTailer(orgID string, expr syntax.LogSelectorExpr, conn TailServer, maxDroppedStreams int) (*tailer, error) {
// Make sure we can build a pipeline. The stream processing code doesn't have a place to handle
// this error so make sure we handle it here.
pipeline, err := expr.Pipeline()
@@ -66,7 +62,7 @@ func newTailer(orgID, query string, conn TailServer, maxDroppedStreams int) (*ta
conn: conn,
droppedStreams: make([]*logproto.DroppedStream, 0, maxDroppedStreams),
maxDroppedStreams: maxDroppedStreams,
- id: generateUniqueID(orgID, query),
+ id: generateUniqueID(orgID, expr.String()),
closeChan: make(chan struct{}),
pipeline: pipeline,
}, nil
diff --git a/pkg/ingester/tailer_test.go b/pkg/ingester/tailer_test.go
index 59293352030df..674dde3df8af0 100644
--- a/pkg/ingester/tailer_test.go
+++ b/pkg/ingester/tailer_test.go
@@ -12,6 +12,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/grafana/loki/pkg/logproto"
+ "github.com/grafana/loki/pkg/logql/syntax"
)
func TestTailer_sendRaceConditionOnSendWhileClosing(t *testing.T) {
@@ -26,7 +27,9 @@ func TestTailer_sendRaceConditionOnSendWhileClosing(t *testing.T) {
}
for run := 0; run < runs; run++ {
- tailer, err := newTailer("org-id", stream.Labels, nil, 10)
+ expr, err := syntax.ParseLogSelector(stream.Labels, true)
+ require.NoError(t, err)
+ tailer, err := newTailer("org-id", expr, nil, 10)
require.NoError(t, err)
require.NotNil(t, tailer)
@@ -78,7 +81,9 @@ func Test_dropstream(t *testing.T) {
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
- tail, err := newTailer("foo", `{app="foo"} |= "foo"`, &fakeTailServer{}, maxDroppedStreams)
+ expr, err := syntax.ParseLogSelector(`{app="foo"} |= "foo"`, true)
+ require.NoError(t, err)
+ tail, err := newTailer("foo", expr, &fakeTailServer{}, maxDroppedStreams)
require.NoError(t, err)
for i := 0; i < c.drop; i++ {
@@ -114,7 +119,9 @@ func (f *fakeTailServer) Reset() {
}
func Test_TailerSendRace(t *testing.T) {
- tail, err := newTailer("foo", `{app="foo"} |= "foo"`, &fakeTailServer{}, 10)
+ expr, err := syntax.ParseLogSelector(`{app="foo"} |= "foo"`, true)
+ require.NoError(t, err)
+ tail, err := newTailer("foo", expr, &fakeTailServer{}, 10)
require.NoError(t, err)
var wg sync.WaitGroup
@@ -250,7 +257,9 @@ func Test_StructuredMetadata(t *testing.T) {
} {
t.Run(tc.name, func(t *testing.T) {
var server fakeTailServer
- tail, err := newTailer("foo", tc.query, &server, 10)
+ expr, err := syntax.ParseLogSelector(tc.query, true)
+ require.NoError(t, err)
+ tail, err := newTailer("foo", expr, &server, 10)
require.NoError(t, err)
var wg sync.WaitGroup
diff --git a/pkg/logcli/client/file.go b/pkg/logcli/client/file.go
index 45681c36c2c8f..82274ef79fb8d 100644
--- a/pkg/logcli/client/file.go
+++ b/pkg/logcli/client/file.go
@@ -69,7 +69,7 @@ func (f *FileClient) Query(q string, limit int, t time.Time, direction logproto.
ctx = user.InjectOrgID(ctx, f.orgID)
- params := logql.NewLiteralParams(
+ params, err := logql.NewLiteralParams(
q,
t, t,
0,
@@ -78,6 +78,9 @@ func (f *FileClient) Query(q string, limit int, t time.Time, direction logproto.
uint32(limit),
nil,
)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse query: %w", err)
+ }
query := f.engine.Query(params)
@@ -106,7 +109,7 @@ func (f *FileClient) QueryRange(queryStr string, limit int, start, end time.Time
ctx = user.InjectOrgID(ctx, f.orgID)
- params := logql.NewLiteralParams(
+ params, err := logql.NewLiteralParams(
queryStr,
start,
end,
@@ -116,6 +119,9 @@ func (f *FileClient) QueryRange(queryStr string, limit int, start, end time.Time
uint32(limit),
nil,
)
+ if err != nil {
+ return nil, err
+ }
query := f.engine.Query(params)
diff --git a/pkg/logcli/query/query.go b/pkg/logcli/query/query.go
index 6a71f0979abcf..fc5be5f393cb2 100644
--- a/pkg/logcli/query/query.go
+++ b/pkg/logcli/query/query.go
@@ -451,7 +451,7 @@ func (q *Query) DoLocalQuery(out output.LogOutput, statistics bool, orgID string
var query logql.Query
if q.isInstant() {
- query = eng.Query(logql.NewLiteralParams(
+ params, err := logql.NewLiteralParams(
q.QueryString,
q.Start,
q.Start,
@@ -460,9 +460,14 @@ func (q *Query) DoLocalQuery(out output.LogOutput, statistics bool, orgID string
q.resultsDirection(),
uint32(q.Limit),
nil,
- ))
+ )
+ if err != nil {
+ return err
+ }
+
+ query = eng.Query(params)
} else {
- query = eng.Query(logql.NewLiteralParams(
+ params, err := logql.NewLiteralParams(
q.QueryString,
q.Start,
q.End,
@@ -471,7 +476,16 @@ func (q *Query) DoLocalQuery(out output.LogOutput, statistics bool, orgID string
q.resultsDirection(),
uint32(q.Limit),
nil,
- ))
+ )
+ if err != nil {
+ return err
+ }
+
+ query = eng.Query(params)
+ }
+
+ if err != nil {
+ return err
}
// execute the query
diff --git a/pkg/logcli/query/query_test.go b/pkg/logcli/query/query_test.go
index 72886fb84668d..1b4c18f5265e0 100644
--- a/pkg/logcli/query/query_test.go
+++ b/pkg/logcli/query/query_test.go
@@ -425,7 +425,10 @@ func (t *testQueryClient) Query(_ string, _ int, _ time.Time, _ logproto.Directi
func (t *testQueryClient) QueryRange(queryStr string, limit int, from, through time.Time, direction logproto.Direction, step, interval time.Duration, _ bool) (*loghttp.QueryResponse, error) {
ctx := user.InjectOrgID(context.Background(), "fake")
- params := logql.NewLiteralParams(queryStr, from, through, step, interval, direction, uint32(limit), nil)
+ params, err := logql.NewLiteralParams(queryStr, from, through, step, interval, direction, uint32(limit), nil)
+ if err != nil {
+ return nil, err
+ }
v, err := t.engine.Query(params).Exec(ctx)
if err != nil {
diff --git a/pkg/loghttp/tail.go b/pkg/loghttp/tail.go
index 6b9b5ad7d131d..9ad2219b10979 100644
--- a/pkg/loghttp/tail.go
+++ b/pkg/loghttp/tail.go
@@ -11,6 +11,8 @@ import (
"github.com/grafana/dskit/httpgrpc"
"github.com/grafana/loki/pkg/logproto"
+ "github.com/grafana/loki/pkg/logql/syntax"
+ "github.com/grafana/loki/pkg/querier/plan"
)
const (
@@ -67,8 +69,16 @@ func (s *DroppedStream) UnmarshalJSON(data []byte) error {
// ParseTailQuery parses a TailRequest request from an http request.
func ParseTailQuery(r *http.Request) (*logproto.TailRequest, error) {
var err error
+ qs := query(r)
+ parsed, err := syntax.ParseExpr(qs)
+ if err != nil {
+ return nil, err
+ }
req := logproto.TailRequest{
- Query: query(r),
+ Query: qs,
+ Plan: &plan.QueryPlan{
+ AST: parsed,
+ },
}
req.Query, err = parseRegexQuery(r)
diff --git a/pkg/loghttp/tail_test.go b/pkg/loghttp/tail_test.go
index f5b2039723699..6fe7163116675 100644
--- a/pkg/loghttp/tail_test.go
+++ b/pkg/loghttp/tail_test.go
@@ -9,6 +9,8 @@ import (
"github.com/stretchr/testify/require"
"github.com/grafana/loki/pkg/logproto"
+ "github.com/grafana/loki/pkg/logql/syntax"
+ "github.com/grafana/loki/pkg/querier/plan"
)
func TestParseTailQuery(t *testing.T) {
@@ -38,6 +40,9 @@ func TestParseTailQuery(t *testing.T) {
DelayFor: 5,
Start: time.Date(2017, 06, 10, 21, 42, 24, 760738998, time.UTC),
Limit: 1000,
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`{foo="bar"}`),
+ },
}, false},
}
for _, tt := range tests {
diff --git a/pkg/logproto/compat.go b/pkg/logproto/compat.go
index fdf6f6b169cd7..268e588d3455c 100644
--- a/pkg/logproto/compat.go
+++ b/pkg/logproto/compat.go
@@ -1,6 +1,7 @@
package logproto
import (
+ "encoding/binary"
stdjson "encoding/json"
"fmt"
"math"
@@ -10,6 +11,7 @@ import (
"time"
"unsafe"
+ "github.com/cespare/xxhash/v2"
jsoniter "github.com/json-iterator/go"
"github.com/opentracing/opentracing-go"
otlog "github.com/opentracing/opentracing-go/log"
@@ -18,6 +20,7 @@ import (
"github.com/prometheus/prometheus/model/timestamp"
"github.com/grafana/loki/pkg/querier/queryrange/queryrangebase/definitions"
+ "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"
"github.com/grafana/loki/pkg/util"
)
@@ -260,6 +263,11 @@ func (m *IndexStatsRequest) WithStartEnd(start, end time.Time) definitions.Reque
return &clone
}
+// WithStartEndForCache implements resultscache.Request.
+func (m *IndexStatsRequest) WithStartEndForCache(start, end time.Time) resultscache.Request {
+ return m.WithStartEnd(start, end).(resultscache.Request)
+}
+
// WithQuery clone the current request with a different query.
func (m *IndexStatsRequest) WithQuery(query string) definitions.Request {
clone := *m
@@ -308,6 +316,11 @@ func (m *VolumeRequest) WithStartEnd(start, end time.Time) definitions.Request {
return &clone
}
+// WithStartEndForCache implements resultscache.Request.
+func (m *VolumeRequest) WithStartEndForCache(start, end time.Time) resultscache.Request {
+ return m.WithStartEnd(start, end).(resultscache.Request)
+}
+
// WithQuery clone the current request with a different query.
func (m *VolumeRequest) WithQuery(query string) definitions.Request {
clone := *m
@@ -323,3 +336,83 @@ func (m *VolumeRequest) LogToSpan(sp opentracing.Span) {
otlog.String("end", timestamp.Time(int64(m.Through)).String()),
)
}
+
+// Satisfy definitions.Request for FilterChunkRefRequest
+
+// GetStart returns the start timestamp of the request in milliseconds.
+func (m *FilterChunkRefRequest) GetStart() time.Time {
+ return time.UnixMilli(int64(m.From))
+}
+
+// GetEnd returns the end timestamp of the request in milliseconds.
+func (m *FilterChunkRefRequest) GetEnd() time.Time {
+ return time.UnixMilli(int64(m.Through))
+}
+
+// GetStep returns the step of the request in milliseconds. Always 0.
+func (m *FilterChunkRefRequest) GetStep() int64 {
+ return 0
+}
+
+// GetQuery returns the query of the request.
+// The query is the hash for the input chunks refs and the filter expressions.
+func (m *FilterChunkRefRequest) GetQuery() string {
+ var encodeBuf []byte
+ var chunksHash uint64
+ if len(m.Refs) > 0 {
+ h := xxhash.New()
+ for _, ref := range m.Refs {
+ _, _ = h.Write(binary.AppendUvarint(encodeBuf[:0], ref.Fingerprint))
+ }
+ chunksHash = h.Sum64()
+ }
+
+ // Short circuit if there are no filters.
+ if len(m.Filters) == 0 {
+ return fmt.Sprintf("%d", chunksHash)
+ }
+
+ var sb strings.Builder
+ for i, filter := range m.Filters {
+ if i > 0 {
+ sb.WriteString(",")
+ }
+ sb.Write(fmt.Appendf(encodeBuf[:0], "%d", filter.Operator))
+ sb.WriteString("-")
+ sb.WriteString(filter.Match)
+ }
+
+ return fmt.Sprintf("%d/%s", chunksHash, sb.String())
+}
+
+// GetCachingOptions returns the caching options.
+func (m *FilterChunkRefRequest) GetCachingOptions() (res resultscache.CachingOptions) { return }
+
+// WithStartEndForCache implements resultscache.Request.
+func (m *FilterChunkRefRequest) WithStartEndForCache(start, end time.Time) resultscache.Request {
+ // We Remove the chunks that are not within the given time range.
+ chunkRefs := make([]*GroupedChunkRefs, 0, len(m.Refs))
+ for _, chunkRef := range m.Refs {
+ refs := make([]*ShortRef, 0, len(chunkRef.Refs))
+ for _, ref := range chunkRef.Refs {
+ if end.Before(ref.From.Time()) || ref.Through.Time().Before(start) {
+ continue
+ }
+ refs = append(refs, ref)
+ }
+ if len(refs) > 0 {
+ chunkRefs = append(chunkRefs, &GroupedChunkRefs{
+ Fingerprint: chunkRef.Fingerprint,
+ Tenant: chunkRef.Tenant,
+ Refs: refs,
+ })
+ }
+ }
+
+ clone := *m
+ clone.From = model.TimeFromUnixNano(start.UnixNano())
+ clone.Through = model.TimeFromUnixNano(end.UnixNano())
+ clone.Refs = chunkRefs
+
+ return &clone
+}
diff --git a/pkg/logproto/compat_test.go b/pkg/logproto/compat_test.go
index 84afa501b68dd..d2ac3c4590780 100644
--- a/pkg/logproto/compat_test.go
+++ b/pkg/logproto/compat_test.go
@@ -213,7 +213,7 @@ func TestMergeLabelResponses(t *testing.T) {
}
func TestMergeSeriesResponses(t *testing.T) {
- mockSeriesResponse := func(series []map[string]string) *SeriesResponse {
+ mockSeriesResponse := func(series [][]SeriesIdentifier_LabelsEntry) *SeriesResponse {
resp := &SeriesResponse{}
for _, s := range series {
resp.Series = append(resp.Series, SeriesIdentifier{
@@ -232,31 +232,31 @@ func TestMergeSeriesResponses(t *testing.T) {
{
desc: "merge one series response and expect one",
responses: []*SeriesResponse{
- {Series: []SeriesIdentifier{{Labels: map[string]string{"test": "test"}}}},
+ {Series: []SeriesIdentifier{{Labels: MustNewSeriesEntries("test", "test")}}},
},
expected: []*SeriesResponse{
- mockSeriesResponse([]map[string]string{{"test": "test"}}),
+ mockSeriesResponse([][]SeriesIdentifier_LabelsEntry{{{"test", "test"}}}),
},
},
{
desc: "merge two series responses",
responses: []*SeriesResponse{
- {Series: []SeriesIdentifier{{Labels: map[string]string{"test": "test"}}}},
- {Series: []SeriesIdentifier{{Labels: map[string]string{"test2": "test2"}}}},
+ {Series: []SeriesIdentifier{{Labels: MustNewSeriesEntries("test", "test")}}},
+ {Series: []SeriesIdentifier{{Labels: MustNewSeriesEntries("test2", "test2")}}},
},
expected: []*SeriesResponse{
- mockSeriesResponse([]map[string]string{{"test": "test"}, {"test2": "test2"}}),
+ mockSeriesResponse([][]SeriesIdentifier_LabelsEntry{{{"test", "test"}}, {{"test2", "test2"}}}),
},
},
{
desc: "merge three series responses",
responses: []*SeriesResponse{
- {Series: []SeriesIdentifier{{Labels: map[string]string{"test": "test"}}}},
- {Series: []SeriesIdentifier{{Labels: map[string]string{"test2": "test2"}}}},
- {Series: []SeriesIdentifier{{Labels: map[string]string{"test3": "test3"}}}},
+ {Series: []SeriesIdentifier{{Labels: MustNewSeriesEntries("test", "test")}}},
+ {Series: []SeriesIdentifier{{Labels: MustNewSeriesEntries("test2", "test2")}}},
+ {Series: []SeriesIdentifier{{Labels: MustNewSeriesEntries("test3", "test3")}}},
},
expected: []*SeriesResponse{
- mockSeriesResponse([]map[string]string{{"test": "test"}, {"test2": "test2"}, {"test3": "test3"}}),
+ mockSeriesResponse([][]SeriesIdentifier_LabelsEntry{{{"test", "test"}}, {{"test2", "test2"}}, {{"test3", "test3"}}}),
},
},
{
@@ -278,6 +278,74 @@ func TestMergeSeriesResponses(t *testing.T) {
}
}
+func TestFilterChunkRefRequestGetQuery(t *testing.T) {
+ for _, tc := range []struct {
+ desc string
+ request FilterChunkRefRequest
+ expected string
+ }{
+ {
+ desc: "empty request",
+ expected: `0`,
+ },
+ {
+ desc: "request no filters",
+ request: FilterChunkRefRequest{
+ Refs: []*GroupedChunkRefs{
+ {
+ Fingerprint: 1,
+ Tenant: "test",
+ },
+ },
+ },
+ expected: `9962287286179718960`,
+ },
+ {
+ desc: "request with filters but no chunks",
+ request: FilterChunkRefRequest{
+ Filters: []*LineFilterExpression{
+ {
+ Operator: 0,
+ Match: "uuid",
+ },
+ },
+ },
+ expected: `0/0-uuid`,
+ },
+ {
+ desc: "request with filters and chunks",
+ request: FilterChunkRefRequest{
+ Refs: []*GroupedChunkRefs{
+ {
+ Fingerprint: 1,
+ Tenant: "test",
+ },
+ {
+ Fingerprint: 2,
+ Tenant: "test",
+ },
+ },
+ Filters: []*LineFilterExpression{
+ {
+ Operator: 0,
+ Match: "uuid",
+ },
+ {
+ Operator: 1,
+ Match: "trace",
+ },
+ },
+ },
+ expected: `8827404902424034886/0-uuid,1-trace`,
+ },
+ } {
+ t.Run(tc.desc, func(t *testing.T) {
+ actual := tc.request.GetQuery()
+ require.Equal(t, tc.expected, actual)
+ })
+ }
+}
+
func benchmarkMergeLabelResponses(b *testing.B, responses []*LabelResponse) {
b.ReportAllocs()
for n := 0; n < b.N; n++ {
@@ -298,7 +366,7 @@ func BenchmarkMergeALabelResponse(b *testing.B) {
}
func BenchmarkMergeASeriesResponse(b *testing.B) {
- response := []*SeriesResponse{{Series: []SeriesIdentifier{{Labels: map[string]string{"test": "test"}}}}}
+ response := []*SeriesResponse{{Series: []SeriesIdentifier{{Labels: MustNewSeriesEntries("test", "test")}}}}
benchmarkMergeSeriesResponses(b, response)
}
@@ -313,9 +381,9 @@ func BenchmarkMergeSomeLabelResponses(b *testing.B) {
func BenchmarkMergeSomeSeriesResponses(b *testing.B) {
responses := []*SeriesResponse{
- {Series: []SeriesIdentifier{{Labels: map[string]string{"test": "test"}}}},
- {Series: []SeriesIdentifier{{Labels: map[string]string{"test2": "test2"}}}},
- {Series: []SeriesIdentifier{{Labels: map[string]string{"test3": "test3"}}}},
+ {Series: []SeriesIdentifier{{Labels: MustNewSeriesEntries("test", "test")}}},
+ {Series: []SeriesIdentifier{{Labels: MustNewSeriesEntries("test2", "test2")}}},
+ {Series: []SeriesIdentifier{{Labels: MustNewSeriesEntries("test3", "test3")}}},
}
benchmarkMergeSeriesResponses(b, responses)
}
@@ -332,7 +400,7 @@ func BenchmarkMergeManySeriesResponses(b *testing.B) {
responses := []*SeriesResponse{}
for i := 0; i < 20; i++ {
test := fmt.Sprintf("test%d", i)
- responses = append(responses, &SeriesResponse{Series: []SeriesIdentifier{{Labels: map[string]string{test: test}}}})
+ responses = append(responses, &SeriesResponse{Series: []SeriesIdentifier{{Labels: MustNewSeriesEntries(test, test)}}})
}
benchmarkMergeSeriesResponses(b, responses)
}
diff --git a/pkg/logproto/extensions.go b/pkg/logproto/extensions.go
index ee4e930941abb..9a5f3f8e61af6 100644
--- a/pkg/logproto/extensions.go
+++ b/pkg/logproto/extensions.go
@@ -8,6 +8,7 @@ import (
"github.com/cespare/xxhash/v2"
"github.com/dustin/go-humanize"
"github.com/prometheus/common/model"
+ "github.com/prometheus/prometheus/model/labels"
"github.com/grafana/loki/pkg/storage/stores/shipper/indexshipper/tsdb/index"
)
@@ -16,31 +17,28 @@ import (
var seps = []byte{'\xff'}
// Hash returns hash of the labels according to Prometheus' Labels.Hash function.
-// `b` and `keysForLabels` are buffers that should be reused to avoid
-// allocations.
-func (id SeriesIdentifier) Hash(b []byte, keysForLabels []string) (uint64, []string) {
- keysForLabels = keysForLabels[:0]
- for k := range id.Labels {
- keysForLabels = append(keysForLabels, k)
- }
- sort.Strings(keysForLabels)
+// `b` is a buffer that should be reused to avoid allocations.
+func (id SeriesIdentifier) Hash(b []byte) uint64 {
+ sort.Sort(id)
// Use xxhash.Sum64(b) for fast path as it's faster.
b = b[:0]
- for i, name := range keysForLabels {
- value := id.Labels[name]
+ for i, pair := range id.Labels {
+ name := pair.Key
+ value := pair.Value
if len(b)+len(name)+len(value)+2 >= cap(b) {
// If labels entry is 1KB+ do not allocate whole entry.
h := xxhash.New()
_, _ = h.Write(b)
- for _, name := range keysForLabels[i:] {
- value := id.Labels[name]
+ for _, pair := range id.Labels[i:] {
+ name := pair.Key
+ value := pair.Value
_, _ = h.WriteString(name)
_, _ = h.Write(seps)
_, _ = h.WriteString(value)
_, _ = h.Write(seps)
}
- return h.Sum64(), keysForLabels
+ return h.Sum64()
}
b = append(b, name...)
@@ -48,9 +46,54 @@ func (id SeriesIdentifier) Hash(b []byte, keysForLabels []string) (uint64, []str
b = append(b, value...)
b = append(b, seps[0])
}
- return xxhash.Sum64(b), keysForLabels
+ return xxhash.Sum64(b)
+}
+
+func (id SeriesIdentifier) Get(key string) string {
+ for _, entry := range id.Labels {
+ if entry.Key == key {
+ return entry.Value
+ }
+ }
+
+ return ""
+}
+
+func SeriesIdentifierFromMap(in map[string]string) SeriesIdentifier {
+ id := SeriesIdentifier{
+ Labels: make([]SeriesIdentifier_LabelsEntry, 0, len(in)),
+ }
+ for k, v := range in {
+ id.Labels = append(id.Labels, SeriesIdentifier_LabelsEntry{Key: k, Value: v})
+ }
+ return id
}
+func SeriesIdentifierFromLabels(in labels.Labels) SeriesIdentifier {
+ id := SeriesIdentifier{
+ Labels: make([]SeriesIdentifier_LabelsEntry, len(in)),
+ }
+ for i, l := range in {
+ id.Labels[i] = SeriesIdentifier_LabelsEntry{Key: l.Name, Value: l.Value}
+ }
+ return id
+}
+
+func MustNewSeriesEntries(labels ...string) []SeriesIdentifier_LabelsEntry {
+ if len(labels)%2 != 0 {
+ panic("invalid number of labels")
+ }
+ r := make([]SeriesIdentifier_LabelsEntry, 0, len(labels)/2)
+ for i := 0; i < len(labels); i += 2 {
+ r = append(r, SeriesIdentifier_LabelsEntry{Key: labels[i], Value: labels[i+1]})
+ }
+ return r
+}
+
+func (id SeriesIdentifier) Len() int { return len(id.Labels) }
+func (id SeriesIdentifier) Swap(i, j int) { id.Labels[i], id.Labels[j] = id.Labels[j], id.Labels[i] }
+func (id SeriesIdentifier) Less(i, j int) bool { return id.Labels[i].Key < id.Labels[j].Key }
+
type Streams []Stream
func (xs Streams) Len() int { return len(xs) }
diff --git a/pkg/logproto/indexgateway.pb.go b/pkg/logproto/indexgateway.pb.go
index e8b569ea07323..86b2665e86b17 100644
--- a/pkg/logproto/indexgateway.pb.go
+++ b/pkg/logproto/indexgateway.pb.go
@@ -6,7 +6,6 @@ package logproto
import (
context "context"
fmt "fmt"
- _ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
@@ -28,31 +27,30 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
func init() { proto.RegisterFile("pkg/logproto/indexgateway.proto", fileDescriptor_d27585148d0a52c8) }
var fileDescriptor_d27585148d0a52c8 = []byte{
- // 372 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xc1, 0x4a, 0xfb, 0x30,
- 0x1c, 0xc7, 0x1b, 0xf8, 0xf3, 0x47, 0xa3, 0x78, 0x08, 0xc2, 0x46, 0xa7, 0x11, 0xc4, 0x83, 0x5e,
- 0x56, 0xd1, 0x17, 0x10, 0x85, 0x95, 0xc1, 0x14, 0x9c, 0xb0, 0xc3, 0x0e, 0x62, 0x3a, 0x7f, 0xeb,
- 0xca, 0xba, 0xa6, 0xb6, 0x29, 0xba, 0x9b, 0x8f, 0xe0, 0x63, 0xf8, 0x10, 0x3e, 0x80, 0xc7, 0x1d,
- 0x77, 0x74, 0xdd, 0xc5, 0xe3, 0x1e, 0x41, 0x9a, 0xd0, 0x2d, 0x9b, 0x1d, 0x78, 0x6a, 0xfa, 0xf9,
- 0x7e, 0xf3, 0xf9, 0xd1, 0xa4, 0xf8, 0x20, 0xec, 0xbb, 0x96, 0xcf, 0xdd, 0x30, 0xe2, 0x82, 0x5b,
- 0x5e, 0xf0, 0x08, 0x2f, 0x2e, 0x13, 0xf0, 0xcc, 0x86, 0x55, 0x89, 0xc8, 0x8e, 0xce, 0x42, 0xc7,
- 0xdc, 0x75, 0xb9, 0xcb, 0x55, 0x3b, 0x5b, 0xa9, 0x96, 0x59, 0x59, 0xd2, 0xe4, 0x0b, 0x15, 0x9e,
- 0x7d, 0xfc, 0xc3, 0xdb, 0xf5, 0xcc, 0x62, 0x2b, 0x0b, 0xa9, 0x63, 0x7c, 0x9b, 0x40, 0x34, 0x94,
- 0x90, 0x54, 0xaa, 0xf3, 0xfe, 0x82, 0x36, 0xe1, 0x29, 0x81, 0x58, 0x98, 0x7b, 0xc5, 0x61, 0x1c,
- 0xf2, 0x20, 0x86, 0x53, 0x44, 0x1a, 0x78, 0xcb, 0x06, 0x71, 0xd5, 0x4b, 0x82, 0x7e, 0x13, 0xba,
- 0x44, 0xab, 0x6b, 0x38, 0x97, 0xed, 0xaf, 0x49, 0x95, 0xed, 0xd0, 0x20, 0x35, 0xbc, 0x69, 0x83,
- 0xb8, 0x83, 0xc8, 0x83, 0x98, 0x98, 0x4b, 0x6d, 0x05, 0x73, 0x53, 0xa5, 0x30, 0x9b, 0x7b, 0xee,
- 0x71, 0xa9, 0xc1, 0x1c, 0xf0, 0x6f, 0xd8, 0x00, 0xe2, 0x1a, 0x8f, 0xae, 0x41, 0x44, 0x5e, 0x27,
- 0x7b, 0x23, 0xc7, 0x8b, 0x9d, 0x6b, 0x2a, 0xf9, 0x8c, 0xd2, 0x4a, 0x53, 0xf3, 0x3f, 0xe0, 0xb2,
- 0x44, 0x2d, 0xe6, 0x27, 0xab, 0x03, 0x4e, 0x56, 0xb6, 0x15, 0x74, 0xfe, 0x30, 0xc1, 0xc6, 0x1b,
- 0xd9, 0x87, 0x09, 0x26, 0x62, 0xfd, 0x82, 0xe4, 0xf1, 0x4b, 0x5a, 0x70, 0x41, 0x7a, 0x38, 0x17,
- 0x5d, 0xc8, 0x23, 0x6d, 0x71, 0x3f, 0x19, 0x00, 0xd1, 0x06, 0x2a, 0x92, 0x5b, 0xca, 0xbf, 0x83,
- 0xdc, 0x70, 0xd9, 0x1e, 0x4d, 0xa8, 0x31, 0x9e, 0x50, 0x63, 0x36, 0xa1, 0xe8, 0x35, 0xa5, 0xe8,
- 0x3d, 0xa5, 0xe8, 0x33, 0xa5, 0x68, 0x94, 0x52, 0xf4, 0x95, 0x52, 0xf4, 0x9d, 0x52, 0x63, 0x96,
- 0x52, 0xf4, 0x36, 0xa5, 0xc6, 0x68, 0x4a, 0x8d, 0xf1, 0x94, 0x1a, 0xed, 0x23, 0xd7, 0x13, 0xbd,
- 0xc4, 0xa9, 0x76, 0xf8, 0xc0, 0x72, 0x23, 0xd6, 0x65, 0x01, 0xb3, 0x7c, 0xde, 0xf7, 0x2c, 0xfd,
- 0x4f, 0x75, 0xfe, 0xcb, 0xc7, 0xf9, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x79, 0xe4, 0x24, 0x34,
- 0x07, 0x03, 0x00, 0x00,
+ // 361 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xb1, 0x4e, 0xc2, 0x40,
+ 0x18, 0xc7, 0xef, 0x12, 0x63, 0xf4, 0x34, 0x0e, 0xb7, 0x40, 0x40, 0xcf, 0xc4, 0x38, 0xe8, 0x42,
+ 0x8d, 0xbe, 0x80, 0xd1, 0x84, 0x86, 0x04, 0x4d, 0xc4, 0x84, 0x81, 0xc1, 0x78, 0xc5, 0x8f, 0xd2,
+ 0x50, 0x7a, 0xb5, 0xbd, 0x46, 0xd9, 0x7c, 0x04, 0x1f, 0xc3, 0x87, 0xf0, 0x01, 0x1c, 0x19, 0x19,
+ 0xe5, 0x58, 0x1c, 0x79, 0x04, 0xc3, 0x35, 0x85, 0x03, 0x4b, 0xe2, 0x04, 0xfd, 0xfd, 0x7f, 0xdf,
+ 0xff, 0x4b, 0xef, 0x4a, 0x0e, 0xc3, 0x9e, 0x6b, 0xf9, 0xc2, 0x0d, 0x23, 0x21, 0x85, 0xe5, 0x05,
+ 0x4f, 0xf0, 0xea, 0x72, 0x09, 0x2f, 0x7c, 0x50, 0xd1, 0x88, 0xee, 0x99, 0x2c, 0x74, 0x4a, 0xe5,
+ 0xa5, 0x81, 0xec, 0x4f, 0x2a, 0x9f, 0x7f, 0x6e, 0x90, 0xdd, 0xda, 0xcc, 0xb7, 0x53, 0x9f, 0xd6,
+ 0x08, 0xb9, 0x4b, 0x20, 0x1a, 0x68, 0x48, 0xcb, 0x95, 0xb9, 0xbf, 0xa0, 0x0d, 0x78, 0x4e, 0x20,
+ 0x96, 0xa5, 0xfd, 0xfc, 0x30, 0x0e, 0x45, 0x10, 0xc3, 0x19, 0xa6, 0x75, 0xb2, 0x63, 0x83, 0xbc,
+ 0xee, 0x26, 0x41, 0xaf, 0x01, 0x1d, 0x6a, 0xe8, 0x06, 0xce, 0xca, 0x0e, 0xd6, 0xa4, 0x69, 0xdb,
+ 0x11, 0xa2, 0x55, 0xb2, 0x6d, 0x83, 0xbc, 0x87, 0xc8, 0x83, 0x98, 0x96, 0x96, 0xec, 0x14, 0x66,
+ 0x4d, 0xe5, 0xdc, 0x6c, 0xde, 0xf3, 0x40, 0x0a, 0x75, 0xee, 0x80, 0x7f, 0xcb, 0xfb, 0x10, 0x57,
+ 0x45, 0x74, 0x03, 0x32, 0xf2, 0xda, 0xb3, 0x27, 0x7a, 0xb2, 0x98, 0x5c, 0xa3, 0x64, 0x3b, 0x0a,
+ 0x2b, 0xa6, 0xd1, 0xff, 0x48, 0x8a, 0x1a, 0x35, 0xb9, 0x9f, 0xac, 0x2e, 0x38, 0x5d, 0x19, 0xcb,
+ 0x71, 0xfe, 0xb1, 0xc1, 0x26, 0x5b, 0xb3, 0x17, 0x93, 0x5c, 0xc6, 0xe6, 0x05, 0xe9, 0xe3, 0xd7,
+ 0x34, 0xe7, 0x82, 0xcc, 0x70, 0x5e, 0x74, 0xa9, 0x8f, 0xb4, 0x29, 0xfc, 0xa4, 0x0f, 0xd4, 0x58,
+ 0x98, 0x92, 0xac, 0xa5, 0xf8, 0x37, 0xc8, 0x1a, 0xae, 0x5a, 0xc3, 0x31, 0x43, 0xa3, 0x31, 0x43,
+ 0xd3, 0x31, 0xc3, 0x6f, 0x8a, 0xe1, 0x0f, 0xc5, 0xf0, 0x97, 0x62, 0x78, 0xa8, 0x18, 0xfe, 0x56,
+ 0x0c, 0xff, 0x28, 0x86, 0xa6, 0x8a, 0xe1, 0xf7, 0x09, 0x43, 0xc3, 0x09, 0x43, 0xa3, 0x09, 0x43,
+ 0xad, 0x63, 0xd7, 0x93, 0xdd, 0xc4, 0xa9, 0xb4, 0x45, 0xdf, 0x72, 0x23, 0xde, 0xe1, 0x01, 0xb7,
+ 0x7c, 0xd1, 0xf3, 0x2c, 0xf3, 0x4b, 0x75, 0x36, 0xf5, 0xcf, 0xc5, 0x6f, 0x00, 0x00, 0x00, 0xff,
+ 0xff, 0x7a, 0x1a, 0x28, 0xb4, 0xf1, 0x02, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
diff --git a/pkg/logproto/indexgateway.proto b/pkg/logproto/indexgateway.proto
index 9271ee9b2b5f4..af34e03a279cb 100644
--- a/pkg/logproto/indexgateway.proto
+++ b/pkg/logproto/indexgateway.proto
@@ -2,7 +2,6 @@ syntax = "proto3";
package indexgatewaypb;
-import "gogoproto/gogo.proto";
import "pkg/logproto/logproto.proto";
option go_package = "github.com/grafana/loki/pkg/logproto";
diff --git a/pkg/logproto/logproto.pb.go b/pkg/logproto/logproto.pb.go
index 6aa905ab98a82..4745506a16db6 100644
--- a/pkg/logproto/logproto.pb.go
+++ b/pkg/logproto/logproto.pb.go
@@ -10,12 +10,12 @@ import (
fmt "fmt"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
- github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
_ "github.com/gogo/protobuf/types"
github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
stats "github.com/grafana/loki/pkg/logqlmodel/stats"
_ "github.com/grafana/loki/pkg/push"
github_com_grafana_loki_pkg_push "github.com/grafana/loki/pkg/push"
+ github_com_grafana_loki_pkg_querier_plan "github.com/grafana/loki/pkg/querier/plan"
github_com_prometheus_common_model "github.com/prometheus/common/model"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
@@ -216,13 +216,14 @@ func (m *StreamRate) GetPushes() uint32 {
}
type QueryRequest struct {
- Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"`
- Limit uint32 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"`
- Start time.Time `protobuf:"bytes,3,opt,name=start,proto3,stdtime" json:"start"`
- End time.Time `protobuf:"bytes,4,opt,name=end,proto3,stdtime" json:"end"`
- Direction Direction `protobuf:"varint,5,opt,name=direction,proto3,enum=logproto.Direction" json:"direction,omitempty"`
- Shards []string `protobuf:"bytes,7,rep,name=shards,proto3" json:"shards,omitempty"`
- Deletes []*Delete `protobuf:"bytes,8,rep,name=deletes,proto3" json:"deletes,omitempty"`
+ Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` // Deprecated: Do not use.
+ Limit uint32 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"`
+ Start time.Time `protobuf:"bytes,3,opt,name=start,proto3,stdtime" json:"start"`
+ End time.Time `protobuf:"bytes,4,opt,name=end,proto3,stdtime" json:"end"`
+ Direction Direction `protobuf:"varint,5,opt,name=direction,proto3,enum=logproto.Direction" json:"direction,omitempty"`
+ Shards []string `protobuf:"bytes,7,rep,name=shards,proto3" json:"shards,omitempty"`
+ Deletes []*Delete `protobuf:"bytes,8,rep,name=deletes,proto3" json:"deletes,omitempty"`
+ Plan *github_com_grafana_loki_pkg_querier_plan.QueryPlan `protobuf:"bytes,9,opt,name=plan,proto3,customtype=github.com/grafana/loki/pkg/querier/plan.QueryPlan" json:"plan,omitempty"`
}
func (m *QueryRequest) Reset() { *m = QueryRequest{} }
@@ -257,6 +258,7 @@ func (m *QueryRequest) XXX_DiscardUnknown() {
var xxx_messageInfo_QueryRequest proto.InternalMessageInfo
+// Deprecated: Do not use.
func (m *QueryRequest) GetSelector() string {
if m != nil {
return m.Selector
@@ -307,11 +309,12 @@ func (m *QueryRequest) GetDeletes() []*Delete {
}
type SampleQueryRequest struct {
- Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"`
- Start time.Time `protobuf:"bytes,2,opt,name=start,proto3,stdtime" json:"start"`
- End time.Time `protobuf:"bytes,3,opt,name=end,proto3,stdtime" json:"end"`
- Shards []string `protobuf:"bytes,4,rep,name=shards,proto3" json:"shards,omitempty"`
- Deletes []*Delete `protobuf:"bytes,5,rep,name=deletes,proto3" json:"deletes,omitempty"`
+ Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` // Deprecated: Do not use.
+ Start time.Time `protobuf:"bytes,2,opt,name=start,proto3,stdtime" json:"start"`
+ End time.Time `protobuf:"bytes,3,opt,name=end,proto3,stdtime" json:"end"`
+ Shards []string `protobuf:"bytes,4,rep,name=shards,proto3" json:"shards,omitempty"`
+ Deletes []*Delete `protobuf:"bytes,5,rep,name=deletes,proto3" json:"deletes,omitempty"`
+ Plan *github_com_grafana_loki_pkg_querier_plan.QueryPlan `protobuf:"bytes,6,opt,name=plan,proto3,customtype=github.com/grafana/loki/pkg/querier/plan.QueryPlan" json:"plan,omitempty"`
}
func (m *SampleQueryRequest) Reset() { *m = SampleQueryRequest{} }
@@ -346,6 +349,7 @@ func (m *SampleQueryRequest) XXX_DiscardUnknown() {
var xxx_messageInfo_SampleQueryRequest proto.InternalMessageInfo
+// Deprecated: Do not use.
func (m *SampleQueryRequest) GetSelector() string {
if m != nil {
return m.Selector
@@ -381,6 +385,49 @@ func (m *SampleQueryRequest) GetDeletes() []*Delete {
return nil
}
+type Plan struct {
+ Raw []byte `protobuf:"bytes,1,opt,name=raw,proto3" json:"raw,omitempty"`
+}
+
+func (m *Plan) Reset() { *m = Plan{} }
+func (*Plan) ProtoMessage() {}
+func (*Plan) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c28a5f14f1f4c79a, []int{5}
+}
+func (m *Plan) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Plan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Plan.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Plan) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Plan.Merge(m, src)
+}
+func (m *Plan) XXX_Size() int {
+ return m.Size()
+}
+func (m *Plan) XXX_DiscardUnknown() {
+ xxx_messageInfo_Plan.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Plan proto.InternalMessageInfo
+
+func (m *Plan) GetRaw() []byte {
+ if m != nil {
+ return m.Raw
+ }
+ return nil
+}
+
type Delete struct {
Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"`
Start int64 `protobuf:"varint,2,opt,name=start,proto3" json:"start,omitempty"`
@@ -390,7 +437,7 @@ type Delete struct {
func (m *Delete) Reset() { *m = Delete{} }
func (*Delete) ProtoMessage() {}
func (*Delete) Descriptor() ([]byte, []int) {
- return fileDescriptor_c28a5f14f1f4c79a, []int{5}
+ return fileDescriptor_c28a5f14f1f4c79a, []int{6}
}
func (m *Delete) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -448,7 +495,7 @@ type QueryResponse struct {
func (m *QueryResponse) Reset() { *m = QueryResponse{} }
func (*QueryResponse) ProtoMessage() {}
func (*QueryResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_c28a5f14f1f4c79a, []int{6}
+ return fileDescriptor_c28a5f14f1f4c79a, []int{7}
}
func (m *QueryResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -492,7 +539,7 @@ type SampleQueryResponse struct {
func (m *SampleQueryResponse) Reset() { *m = SampleQueryResponse{} }
func (*SampleQueryResponse) ProtoMessage() {}
func (*SampleQueryResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_c28a5f14f1f4c79a, []int{7}
+ return fileDescriptor_c28a5f14f1f4c79a, []int{8}
}
func (m *SampleQueryResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -539,7 +586,7 @@ type LabelRequest struct {
func (m *LabelRequest) Reset() { *m = LabelRequest{} }
func (*LabelRequest) ProtoMessage() {}
func (*LabelRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_c28a5f14f1f4c79a, []int{8}
+ return fileDescriptor_c28a5f14f1f4c79a, []int{9}
}
func (m *LabelRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -610,7 +657,7 @@ type LabelResponse struct {
func (m *LabelResponse) Reset() { *m = LabelResponse{} }
func (*LabelResponse) ProtoMessage() {}
func (*LabelResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_c28a5f14f1f4c79a, []int{9}
+ return fileDescriptor_c28a5f14f1f4c79a, []int{10}
}
func (m *LabelResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -655,7 +702,7 @@ type Sample struct {
func (m *Sample) Reset() { *m = Sample{} }
func (*Sample) ProtoMessage() {}
func (*Sample) Descriptor() ([]byte, []int) {
- return fileDescriptor_c28a5f14f1f4c79a, []int{10}
+ return fileDescriptor_c28a5f14f1f4c79a, []int{11}
}
func (m *Sample) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -714,7 +761,7 @@ type LegacySample struct {
func (m *LegacySample) Reset() { *m = LegacySample{} }
func (*LegacySample) ProtoMessage() {}
func (*LegacySample) Descriptor() ([]byte, []int) {
- return fileDescriptor_c28a5f14f1f4c79a, []int{11}
+ return fileDescriptor_c28a5f14f1f4c79a, []int{12}
}
func (m *LegacySample) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -766,7 +813,7 @@ type Series struct {
func (m *Series) Reset() { *m = Series{} }
func (*Series) ProtoMessage() {}
func (*Series) Descriptor() ([]byte, []int) {
- return fileDescriptor_c28a5f14f1f4c79a, []int{12}
+ return fileDescriptor_c28a5f14f1f4c79a, []int{13}
}
func (m *Series) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -817,16 +864,17 @@ func (m *Series) GetStreamHash() uint64 {
}
type TailRequest struct {
- Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"`
- DelayFor uint32 `protobuf:"varint,3,opt,name=delayFor,proto3" json:"delayFor,omitempty"`
- Limit uint32 `protobuf:"varint,4,opt,name=limit,proto3" json:"limit,omitempty"`
- Start time.Time `protobuf:"bytes,5,opt,name=start,proto3,stdtime" json:"start"`
+ Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` // Deprecated: Do not use.
+ DelayFor uint32 `protobuf:"varint,3,opt,name=delayFor,proto3" json:"delayFor,omitempty"`
+ Limit uint32 `protobuf:"varint,4,opt,name=limit,proto3" json:"limit,omitempty"`
+ Start time.Time `protobuf:"bytes,5,opt,name=start,proto3,stdtime" json:"start"`
+ Plan *github_com_grafana_loki_pkg_querier_plan.QueryPlan `protobuf:"bytes,6,opt,name=plan,proto3,customtype=github.com/grafana/loki/pkg/querier/plan.QueryPlan" json:"plan,omitempty"`
}
func (m *TailRequest) Reset() { *m = TailRequest{} }
func (*TailRequest) ProtoMessage() {}
func (*TailRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_c28a5f14f1f4c79a, []int{13}
+ return fileDescriptor_c28a5f14f1f4c79a, []int{14}
}
func (m *TailRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -855,6 +903,7 @@ func (m *TailRequest) XXX_DiscardUnknown() {
var xxx_messageInfo_TailRequest proto.InternalMessageInfo
+// Deprecated: Do not use.
func (m *TailRequest) GetQuery() string {
if m != nil {
return m.Query
@@ -891,7 +940,7 @@ type TailResponse struct {
func (m *TailResponse) Reset() { *m = TailResponse{} }
func (*TailResponse) ProtoMessage() {}
func (*TailResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_c28a5f14f1f4c79a, []int{14}
+ return fileDescriptor_c28a5f14f1f4c79a, []int{15}
}
func (m *TailResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -937,7 +986,7 @@ type SeriesRequest struct {
func (m *SeriesRequest) Reset() { *m = SeriesRequest{} }
func (*SeriesRequest) ProtoMessage() {}
func (*SeriesRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_c28a5f14f1f4c79a, []int{15}
+ return fileDescriptor_c28a5f14f1f4c79a, []int{16}
}
func (m *SeriesRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1001,7 +1050,7 @@ type SeriesResponse struct {
func (m *SeriesResponse) Reset() { *m = SeriesResponse{} }
func (*SeriesResponse) ProtoMessage() {}
func (*SeriesResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_c28a5f14f1f4c79a, []int{16}
+ return fileDescriptor_c28a5f14f1f4c79a, []int{17}
}
func (m *SeriesResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1038,13 +1087,13 @@ func (m *SeriesResponse) GetSeries() []SeriesIdentifier {
}
type SeriesIdentifier struct {
- Labels map[string]string `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ Labels []SeriesIdentifier_LabelsEntry `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"`
}
func (m *SeriesIdentifier) Reset() { *m = SeriesIdentifier{} }
func (*SeriesIdentifier) ProtoMessage() {}
func (*SeriesIdentifier) Descriptor() ([]byte, []int) {
- return fileDescriptor_c28a5f14f1f4c79a, []int{17}
+ return fileDescriptor_c28a5f14f1f4c79a, []int{18}
}
func (m *SeriesIdentifier) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1073,13 +1122,64 @@ func (m *SeriesIdentifier) XXX_DiscardUnknown() {
var xxx_messageInfo_SeriesIdentifier proto.InternalMessageInfo
-func (m *SeriesIdentifier) GetLabels() map[string]string {
+func (m *SeriesIdentifier) GetLabels() []SeriesIdentifier_LabelsEntry {
if m != nil {
return m.Labels
}
return nil
}
+type SeriesIdentifier_LabelsEntry struct {
+ Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *SeriesIdentifier_LabelsEntry) Reset() { *m = SeriesIdentifier_LabelsEntry{} }
+func (*SeriesIdentifier_LabelsEntry) ProtoMessage() {}
+func (*SeriesIdentifier_LabelsEntry) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c28a5f14f1f4c79a, []int{18, 0}
+}
+func (m *SeriesIdentifier_LabelsEntry) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SeriesIdentifier_LabelsEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_SeriesIdentifier_LabelsEntry.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *SeriesIdentifier_LabelsEntry) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SeriesIdentifier_LabelsEntry.Merge(m, src)
+}
+func (m *SeriesIdentifier_LabelsEntry) XXX_Size() int {
+ return m.Size()
+}
+func (m *SeriesIdentifier_LabelsEntry) XXX_DiscardUnknown() {
+ xxx_messageInfo_SeriesIdentifier_LabelsEntry.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SeriesIdentifier_LabelsEntry proto.InternalMessageInfo
+
+func (m *SeriesIdentifier_LabelsEntry) GetKey() string {
+ if m != nil {
+ return m.Key
+ }
+ return ""
+}
+
+func (m *SeriesIdentifier_LabelsEntry) GetValue() string {
+ if m != nil {
+ return m.Value
+ }
+ return ""
+}
+
type DroppedStream struct {
From time.Time `protobuf:"bytes,1,opt,name=from,proto3,stdtime" json:"from"`
To time.Time `protobuf:"bytes,2,opt,name=to,proto3,stdtime" json:"to"`
@@ -1089,7 +1189,7 @@ type DroppedStream struct {
func (m *DroppedStream) Reset() { *m = DroppedStream{} }
func (*DroppedStream) ProtoMessage() {}
func (*DroppedStream) Descriptor() ([]byte, []int) {
- return fileDescriptor_c28a5f14f1f4c79a, []int{18}
+ return fileDescriptor_c28a5f14f1f4c79a, []int{19}
}
func (m *DroppedStream) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1147,7 +1247,7 @@ type LabelPair struct {
func (m *LabelPair) Reset() { *m = LabelPair{} }
func (*LabelPair) ProtoMessage() {}
func (*LabelPair) Descriptor() ([]byte, []int) {
- return fileDescriptor_c28a5f14f1f4c79a, []int{19}
+ return fileDescriptor_c28a5f14f1f4c79a, []int{20}
}
func (m *LabelPair) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1200,7 +1300,7 @@ type LegacyLabelPair struct {
func (m *LegacyLabelPair) Reset() { *m = LegacyLabelPair{} }
func (*LegacyLabelPair) ProtoMessage() {}
func (*LegacyLabelPair) Descriptor() ([]byte, []int) {
- return fileDescriptor_c28a5f14f1f4c79a, []int{20}
+ return fileDescriptor_c28a5f14f1f4c79a, []int{21}
}
func (m *LegacyLabelPair) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1250,7 +1350,7 @@ type Chunk struct {
func (m *Chunk) Reset() { *m = Chunk{} }
func (*Chunk) ProtoMessage() {}
func (*Chunk) Descriptor() ([]byte, []int) {
- return fileDescriptor_c28a5f14f1f4c79a, []int{21}
+ return fileDescriptor_c28a5f14f1f4c79a, []int{22}
}
func (m *Chunk) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1292,7 +1392,7 @@ type TailersCountRequest struct {
func (m *TailersCountRequest) Reset() { *m = TailersCountRequest{} }
func (*TailersCountRequest) ProtoMessage() {}
func (*TailersCountRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_c28a5f14f1f4c79a, []int{22}
+ return fileDescriptor_c28a5f14f1f4c79a, []int{23}
}
func (m *TailersCountRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1328,7 +1428,7 @@ type TailersCountResponse struct {
func (m *TailersCountResponse) Reset() { *m = TailersCountResponse{} }
func (*TailersCountResponse) ProtoMessage() {}
func (*TailersCountResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_c28a5f14f1f4c79a, []int{23}
+ return fileDescriptor_c28a5f14f1f4c79a, []int{24}
}
func (m *TailersCountResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1373,7 +1473,7 @@ type GetChunkIDsRequest struct {
func (m *GetChunkIDsRequest) Reset() { *m = GetChunkIDsRequest{} }
func (*GetChunkIDsRequest) ProtoMessage() {}
func (*GetChunkIDsRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_c28a5f14f1f4c79a, []int{24}
+ return fileDescriptor_c28a5f14f1f4c79a, []int{25}
}
func (m *GetChunkIDsRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1430,7 +1530,7 @@ type GetChunkIDsResponse struct {
func (m *GetChunkIDsResponse) Reset() { *m = GetChunkIDsResponse{} }
func (*GetChunkIDsResponse) ProtoMessage() {}
func (*GetChunkIDsResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_c28a5f14f1f4c79a, []int{25}
+ return fileDescriptor_c28a5f14f1f4c79a, []int{26}
}
func (m *GetChunkIDsResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1483,7 +1583,7 @@ type ChunkRef struct {
func (m *ChunkRef) Reset() { *m = ChunkRef{} }
func (*ChunkRef) ProtoMessage() {}
func (*ChunkRef) Descriptor() ([]byte, []int) {
- return fileDescriptor_c28a5f14f1f4c79a, []int{26}
+ return fileDescriptor_c28a5f14f1f4c79a, []int{27}
}
func (m *ChunkRef) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1544,7 +1644,7 @@ type LabelValuesForMetricNameRequest struct {
func (m *LabelValuesForMetricNameRequest) Reset() { *m = LabelValuesForMetricNameRequest{} }
func (*LabelValuesForMetricNameRequest) ProtoMessage() {}
func (*LabelValuesForMetricNameRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_c28a5f14f1f4c79a, []int{27}
+ return fileDescriptor_c28a5f14f1f4c79a, []int{28}
}
func (m *LabelValuesForMetricNameRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1603,7 +1703,7 @@ type LabelNamesForMetricNameRequest struct {
func (m *LabelNamesForMetricNameRequest) Reset() { *m = LabelNamesForMetricNameRequest{} }
func (*LabelNamesForMetricNameRequest) ProtoMessage() {}
func (*LabelNamesForMetricNameRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_c28a5f14f1f4c79a, []int{28}
+ return fileDescriptor_c28a5f14f1f4c79a, []int{29}
}
func (m *LabelNamesForMetricNameRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1647,7 +1747,7 @@ type LineFilterExpression struct {
func (m *LineFilterExpression) Reset() { *m = LineFilterExpression{} }
func (*LineFilterExpression) ProtoMessage() {}
func (*LineFilterExpression) Descriptor() ([]byte, []int) {
- return fileDescriptor_c28a5f14f1f4c79a, []int{29}
+ return fileDescriptor_c28a5f14f1f4c79a, []int{30}
}
func (m *LineFilterExpression) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1700,7 +1800,7 @@ type GetChunkRefRequest struct {
func (m *GetChunkRefRequest) Reset() { *m = GetChunkRefRequest{} }
func (*GetChunkRefRequest) ProtoMessage() {}
func (*GetChunkRefRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_c28a5f14f1f4c79a, []int{30}
+ return fileDescriptor_c28a5f14f1f4c79a, []int{31}
}
func (m *GetChunkRefRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1750,7 +1850,7 @@ type GetChunkRefResponse struct {
func (m *GetChunkRefResponse) Reset() { *m = GetChunkRefResponse{} }
func (*GetChunkRefResponse) ProtoMessage() {}
func (*GetChunkRefResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_c28a5f14f1f4c79a, []int{31}
+ return fileDescriptor_c28a5f14f1f4c79a, []int{32}
}
func (m *GetChunkRefResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1795,7 +1895,7 @@ type GetSeriesRequest struct {
func (m *GetSeriesRequest) Reset() { *m = GetSeriesRequest{} }
func (*GetSeriesRequest) ProtoMessage() {}
func (*GetSeriesRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_c28a5f14f1f4c79a, []int{32}
+ return fileDescriptor_c28a5f14f1f4c79a, []int{33}
}
func (m *GetSeriesRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1838,7 +1938,7 @@ type GetSeriesResponse struct {
func (m *GetSeriesResponse) Reset() { *m = GetSeriesResponse{} }
func (*GetSeriesResponse) ProtoMessage() {}
func (*GetSeriesResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_c28a5f14f1f4c79a, []int{33}
+ return fileDescriptor_c28a5f14f1f4c79a, []int{34}
}
func (m *GetSeriesResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1882,7 +1982,7 @@ type IndexSeries struct {
func (m *IndexSeries) Reset() { *m = IndexSeries{} }
func (*IndexSeries) ProtoMessage() {}
func (*IndexSeries) Descriptor() ([]byte, []int) {
- return fileDescriptor_c28a5f14f1f4c79a, []int{34}
+ return fileDescriptor_c28a5f14f1f4c79a, []int{35}
}
func (m *IndexSeries) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1919,7 +2019,7 @@ type QueryIndexResponse struct {
func (m *QueryIndexResponse) Reset() { *m = QueryIndexResponse{} }
func (*QueryIndexResponse) ProtoMessage() {}
func (*QueryIndexResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_c28a5f14f1f4c79a, []int{35}
+ return fileDescriptor_c28a5f14f1f4c79a, []int{36}
}
func (m *QueryIndexResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1970,7 +2070,7 @@ type Row struct {
func (m *Row) Reset() { *m = Row{} }
func (*Row) ProtoMessage() {}
func (*Row) Descriptor() ([]byte, []int) {
- return fileDescriptor_c28a5f14f1f4c79a, []int{36}
+ return fileDescriptor_c28a5f14f1f4c79a, []int{37}
}
func (m *Row) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2020,7 +2120,7 @@ type QueryIndexRequest struct {
func (m *QueryIndexRequest) Reset() { *m = QueryIndexRequest{} }
func (*QueryIndexRequest) ProtoMessage() {}
func (*QueryIndexRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_c28a5f14f1f4c79a, []int{37}
+ return fileDescriptor_c28a5f14f1f4c79a, []int{38}
}
func (m *QueryIndexRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2067,7 +2167,7 @@ type IndexQuery struct {
func (m *IndexQuery) Reset() { *m = IndexQuery{} }
func (*IndexQuery) ProtoMessage() {}
func (*IndexQuery) Descriptor() ([]byte, []int) {
- return fileDescriptor_c28a5f14f1f4c79a, []int{38}
+ return fileDescriptor_c28a5f14f1f4c79a, []int{39}
}
func (m *IndexQuery) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2140,7 +2240,7 @@ type IndexStatsRequest struct {
func (m *IndexStatsRequest) Reset() { *m = IndexStatsRequest{} }
func (*IndexStatsRequest) ProtoMessage() {}
func (*IndexStatsRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_c28a5f14f1f4c79a, []int{39}
+ return fileDescriptor_c28a5f14f1f4c79a, []int{40}
}
func (m *IndexStatsRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2186,7 +2286,7 @@ type IndexStatsResponse struct {
func (m *IndexStatsResponse) Reset() { *m = IndexStatsResponse{} }
func (*IndexStatsResponse) ProtoMessage() {}
func (*IndexStatsResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_c28a5f14f1f4c79a, []int{40}
+ return fileDescriptor_c28a5f14f1f4c79a, []int{41}
}
func (m *IndexStatsResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2256,7 +2356,7 @@ type VolumeRequest struct {
func (m *VolumeRequest) Reset() { *m = VolumeRequest{} }
func (*VolumeRequest) ProtoMessage() {}
func (*VolumeRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_c28a5f14f1f4c79a, []int{41}
+ return fileDescriptor_c28a5f14f1f4c79a, []int{42}
}
func (m *VolumeRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2328,7 +2428,7 @@ type VolumeResponse struct {
func (m *VolumeResponse) Reset() { *m = VolumeResponse{} }
func (*VolumeResponse) ProtoMessage() {}
func (*VolumeResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_c28a5f14f1f4c79a, []int{42}
+ return fileDescriptor_c28a5f14f1f4c79a, []int{43}
}
func (m *VolumeResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2379,7 +2479,7 @@ type Volume struct {
func (m *Volume) Reset() { *m = Volume{} }
func (*Volume) ProtoMessage() {}
func (*Volume) Descriptor() ([]byte, []int) {
- return fileDescriptor_c28a5f14f1f4c79a, []int{43}
+ return fileDescriptor_c28a5f14f1f4c79a, []int{44}
}
func (m *Volume) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2429,6 +2529,7 @@ func init() {
proto.RegisterType((*StreamRate)(nil), "logproto.StreamRate")
proto.RegisterType((*QueryRequest)(nil), "logproto.QueryRequest")
proto.RegisterType((*SampleQueryRequest)(nil), "logproto.SampleQueryRequest")
+ proto.RegisterType((*Plan)(nil), "logproto.Plan")
proto.RegisterType((*Delete)(nil), "logproto.Delete")
proto.RegisterType((*QueryResponse)(nil), "logproto.QueryResponse")
proto.RegisterType((*SampleQueryResponse)(nil), "logproto.SampleQueryResponse")
@@ -2442,7 +2543,7 @@ func init() {
proto.RegisterType((*SeriesRequest)(nil), "logproto.SeriesRequest")
proto.RegisterType((*SeriesResponse)(nil), "logproto.SeriesResponse")
proto.RegisterType((*SeriesIdentifier)(nil), "logproto.SeriesIdentifier")
- proto.RegisterMapType((map[string]string)(nil), "logproto.SeriesIdentifier.LabelsEntry")
+ proto.RegisterType((*SeriesIdentifier_LabelsEntry)(nil), "logproto.SeriesIdentifier.LabelsEntry")
proto.RegisterType((*DroppedStream)(nil), "logproto.DroppedStream")
proto.RegisterType((*LabelPair)(nil), "logproto.LabelPair")
proto.RegisterType((*LegacyLabelPair)(nil), "logproto.LegacyLabelPair")
@@ -2474,145 +2575,150 @@ func init() {
func init() { proto.RegisterFile("pkg/logproto/logproto.proto", fileDescriptor_c28a5f14f1f4c79a) }
var fileDescriptor_c28a5f14f1f4c79a = []byte{
- // 2202 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x19, 0x4d, 0x8f, 0x1b, 0x49,
- 0xd5, 0x6d, 0xb7, 0xbf, 0x9e, 0x3d, 0x93, 0x49, 0x8d, 0x37, 0xb1, 0x9c, 0xc4, 0x9e, 0x94, 0x96,
- 0xec, 0x28, 0x9b, 0xb5, 0x37, 0xb3, 0xb0, 0x64, 0x13, 0x16, 0x88, 0x67, 0xf2, 0x31, 0xc9, 0xe4,
- 0x83, 0x9a, 0x10, 0xd0, 0x0a, 0x14, 0xf5, 0xd8, 0x65, 0xbb, 0x15, 0xb7, 0xdb, 0xe9, 0x2e, 0x6f,
- 0x32, 0x12, 0x07, 0xfe, 0xc0, 0x4a, 0x7b, 0x43, 0x5c, 0x10, 0x07, 0x24, 0x90, 0x10, 0x17, 0x7e,
- 0x00, 0x5c, 0x90, 0x08, 0xb7, 0x70, 0x5b, 0x71, 0x30, 0x64, 0x72, 0x41, 0x73, 0xda, 0x1b, 0x12,
- 0x07, 0x84, 0xea, 0xab, 0xbb, 0xec, 0xf1, 0xec, 0xae, 0x43, 0x24, 0x94, 0x8b, 0xbb, 0xde, 0xab,
- 0x57, 0xaf, 0xde, 0x77, 0xd5, 0x2b, 0xc3, 0x89, 0xe1, 0xc3, 0x6e, 0xa3, 0xef, 0x77, 0x87, 0x81,
- 0xcf, 0xfc, 0x68, 0x50, 0x17, 0xbf, 0x28, 0xa7, 0xe1, 0x4a, 0xa9, 0xeb, 0x77, 0x7d, 0x49, 0xc3,
- 0x47, 0x72, 0xbe, 0x52, 0xeb, 0xfa, 0x7e, 0xb7, 0x4f, 0x1b, 0x02, 0xda, 0x19, 0x75, 0x1a, 0xcc,
- 0xf5, 0x68, 0xc8, 0x1c, 0x6f, 0xa8, 0x08, 0x56, 0x14, 0xf7, 0x47, 0x7d, 0xcf, 0x6f, 0xd3, 0x7e,
- 0x23, 0x64, 0x0e, 0x0b, 0xe5, 0xaf, 0xa2, 0x58, 0xe6, 0x14, 0xc3, 0x51, 0xd8, 0x13, 0x3f, 0x12,
- 0x89, 0x4b, 0x80, 0xb6, 0x59, 0x40, 0x1d, 0x8f, 0x38, 0x8c, 0x86, 0x84, 0x3e, 0x1a, 0xd1, 0x90,
- 0xe1, 0x5b, 0xb0, 0x3c, 0x81, 0x0d, 0x87, 0xfe, 0x20, 0xa4, 0xe8, 0x7d, 0x28, 0x84, 0x31, 0xba,
- 0x6c, 0xad, 0xa4, 0x56, 0x0b, 0x6b, 0xa5, 0x7a, 0xa4, 0x4a, 0xbc, 0x86, 0x98, 0x84, 0xf8, 0x17,
- 0x16, 0x40, 0x3c, 0x87, 0xaa, 0x00, 0x72, 0xf6, 0xba, 0x13, 0xf6, 0xca, 0xd6, 0x8a, 0xb5, 0x6a,
- 0x13, 0x03, 0x83, 0xce, 0xc1, 0xd1, 0x18, 0xba, 0xed, 0x6f, 0xf7, 0x9c, 0xa0, 0x5d, 0x4e, 0x0a,
- 0xb2, 0x83, 0x13, 0x08, 0x81, 0x1d, 0x38, 0x8c, 0x96, 0x53, 0x2b, 0xd6, 0x6a, 0x8a, 0x88, 0x31,
- 0x3a, 0x06, 0x19, 0x46, 0x07, 0xce, 0x80, 0x95, 0xed, 0x15, 0x6b, 0x35, 0x4f, 0x14, 0xc4, 0xf1,
- 0x5c, 0x77, 0x1a, 0x96, 0xd3, 0x2b, 0xd6, 0xea, 0x02, 0x51, 0x10, 0xfe, 0x73, 0x12, 0x8a, 0xdf,
- 0x1b, 0xd1, 0x60, 0x57, 0x19, 0x00, 0x55, 0x20, 0x17, 0xd2, 0x3e, 0x6d, 0x31, 0x3f, 0x10, 0x02,
- 0xe6, 0x49, 0x04, 0xa3, 0x12, 0xa4, 0xfb, 0xae, 0xe7, 0x32, 0x21, 0xd2, 0x02, 0x91, 0x00, 0xba,
- 0x08, 0xe9, 0x90, 0x39, 0x01, 0x13, 0x72, 0x14, 0xd6, 0x2a, 0x75, 0xe9, 0xb0, 0xba, 0x76, 0x58,
- 0xfd, 0x9e, 0x76, 0x58, 0x33, 0xf7, 0x74, 0x5c, 0x4b, 0x7c, 0xfa, 0xf7, 0x9a, 0x45, 0xe4, 0x12,
- 0xf4, 0x3e, 0xa4, 0xe8, 0xa0, 0x2d, 0x64, 0xfd, 0xaa, 0x2b, 0xf9, 0x02, 0x74, 0x1e, 0xf2, 0x6d,
- 0x37, 0xa0, 0x2d, 0xe6, 0xfa, 0x03, 0xa1, 0xd1, 0xe2, 0xda, 0x72, 0xec, 0x8d, 0x0d, 0x3d, 0x45,
- 0x62, 0x2a, 0x74, 0x0e, 0x32, 0x21, 0x37, 0x5b, 0x58, 0xce, 0xae, 0xa4, 0x56, 0xf3, 0xcd, 0xd2,
- 0xfe, 0xb8, 0xb6, 0x24, 0x31, 0xe7, 0x7c, 0xcf, 0x65, 0xd4, 0x1b, 0xb2, 0x5d, 0xa2, 0x68, 0xd0,
- 0x59, 0xc8, 0xb6, 0x69, 0x9f, 0x72, 0x67, 0xe7, 0x84, 0xb3, 0x97, 0x0c, 0xf6, 0x62, 0x82, 0x68,
- 0x82, 0x1b, 0x76, 0x2e, 0xb3, 0x94, 0xc5, 0xff, 0xb1, 0x00, 0x6d, 0x3b, 0xde, 0xb0, 0x4f, 0xbf,
- 0xb2, 0x3d, 0x23, 0xcb, 0x25, 0x5f, 0xda, 0x72, 0xa9, 0x79, 0x2d, 0x17, 0x9b, 0xc1, 0x9e, 0xcf,
- 0x0c, 0xe9, 0x2f, 0x31, 0x03, 0xde, 0x82, 0x8c, 0x44, 0x7d, 0x59, 0x0c, 0xc5, 0x3a, 0xa7, 0xb4,
- 0x36, 0x4b, 0xb1, 0x36, 0x29, 0x21, 0x27, 0xfe, 0xa5, 0x05, 0x0b, 0xca, 0x90, 0x2a, 0x07, 0x77,
- 0x20, 0x2b, 0x73, 0x40, 0xe7, 0xdf, 0xf1, 0xe9, 0xfc, 0xbb, 0xdc, 0x76, 0x86, 0x8c, 0x06, 0xcd,
- 0xc6, 0xd3, 0x71, 0xcd, 0xfa, 0xdb, 0xb8, 0xf6, 0x56, 0xd7, 0x65, 0xbd, 0xd1, 0x4e, 0xbd, 0xe5,
- 0x7b, 0x8d, 0x6e, 0xe0, 0x74, 0x9c, 0x81, 0xd3, 0xe8, 0xfb, 0x0f, 0xdd, 0x86, 0xae, 0x07, 0x3a,
- 0x6f, 0x35, 0x63, 0xf4, 0xb6, 0x90, 0x8e, 0x85, 0xca, 0x23, 0x47, 0xea, 0xb2, 0x8c, 0x6c, 0x0e,
- 0xba, 0x34, 0xe4, 0x9c, 0x6d, 0x6e, 0x4c, 0x22, 0x69, 0xf0, 0x4f, 0x60, 0x79, 0xc2, 0xe1, 0x4a,
- 0xce, 0x0b, 0x90, 0x09, 0x69, 0xe0, 0x46, 0x65, 0xc2, 0x30, 0xd9, 0xb6, 0xc0, 0x37, 0x17, 0x95,
- 0x7c, 0x19, 0x09, 0x13, 0x45, 0x3f, 0xdf, 0xee, 0x7f, 0xb2, 0xa0, 0xb8, 0xe5, 0xec, 0xd0, 0xbe,
- 0x8e, 0x34, 0x04, 0xf6, 0xc0, 0xf1, 0xa8, 0xb2, 0xb8, 0x18, 0xf3, 0xb4, 0xff, 0xd8, 0xe9, 0x8f,
- 0xa8, 0x64, 0x99, 0x23, 0x0a, 0x9a, 0x37, 0x67, 0xad, 0x97, 0xce, 0x59, 0x2b, 0x8e, 0xbc, 0x12,
- 0xa4, 0x1f, 0x71, 0x43, 0x89, 0x7c, 0xcd, 0x13, 0x09, 0xe0, 0xb7, 0x60, 0x41, 0x69, 0xa1, 0xcc,
- 0x17, 0x8b, 0xcc, 0xcd, 0x97, 0xd7, 0x22, 0x63, 0x0f, 0x32, 0xd2, 0xda, 0xe8, 0x4d, 0xc8, 0x47,
- 0x67, 0x80, 0xd0, 0x36, 0xd5, 0xcc, 0xec, 0x8f, 0x6b, 0x49, 0x16, 0x92, 0x78, 0x02, 0xd5, 0x20,
- 0x2d, 0x56, 0x0a, 0xcd, 0xad, 0x66, 0x7e, 0x7f, 0x5c, 0x93, 0x08, 0x22, 0x3f, 0xe8, 0x24, 0xd8,
- 0x3d, 0x5e, 0x86, 0xb9, 0x09, 0xec, 0x66, 0x6e, 0x7f, 0x5c, 0x13, 0x30, 0x11, 0xbf, 0xf8, 0x1a,
- 0x14, 0xb7, 0x68, 0xd7, 0x69, 0xed, 0xaa, 0x4d, 0x4b, 0x9a, 0x1d, 0xdf, 0xd0, 0xd2, 0x3c, 0x4e,
- 0x43, 0x31, 0xda, 0xf1, 0x81, 0x17, 0xaa, 0xa0, 0x2e, 0x44, 0xb8, 0x5b, 0x21, 0xfe, 0xb9, 0x05,
- 0xca, 0xcf, 0x08, 0x43, 0xa6, 0xcf, 0x75, 0x0d, 0xa5, 0x8f, 0x9a, 0xb0, 0x3f, 0xae, 0x29, 0x0c,
- 0x51, 0x5f, 0x74, 0x09, 0xb2, 0xa1, 0xd8, 0x91, 0x33, 0x9b, 0x0e, 0x1f, 0x31, 0xd1, 0x3c, 0xc2,
- 0xc3, 0x60, 0x7f, 0x5c, 0xd3, 0x84, 0x44, 0x0f, 0x50, 0x7d, 0xe2, 0x7c, 0x91, 0x8a, 0x2d, 0xee,
- 0x8f, 0x6b, 0x06, 0xd6, 0x3c, 0x6f, 0xf0, 0xcf, 0x2c, 0x28, 0xdc, 0x73, 0xdc, 0x28, 0x84, 0x22,
- 0x17, 0x59, 0x86, 0x8b, 0x78, 0x3a, 0xb7, 0x69, 0xdf, 0xd9, 0xbd, 0xea, 0x07, 0x82, 0xe7, 0x02,
- 0x89, 0xe0, 0xf8, 0x48, 0xb0, 0x67, 0x1e, 0x09, 0xe9, 0xb9, 0x0b, 0xdb, 0x0d, 0x3b, 0x97, 0x5c,
- 0x4a, 0xe1, 0xdf, 0x59, 0x50, 0x94, 0x92, 0xa9, 0xb0, 0xf8, 0x11, 0x64, 0xa4, 0xe0, 0x42, 0xb6,
- 0x2f, 0x48, 0xfe, 0xb7, 0xe7, 0x49, 0x7c, 0xc5, 0x13, 0x7d, 0x07, 0x16, 0xdb, 0x81, 0x3f, 0x1c,
- 0xd2, 0xf6, 0xb6, 0x2a, 0x31, 0xc9, 0xe9, 0x12, 0xb3, 0x61, 0xce, 0x93, 0x29, 0x72, 0xfc, 0x17,
- 0x0b, 0x16, 0x54, 0x36, 0x2b, 0x5b, 0x46, 0x36, 0xb0, 0x5e, 0xba, 0xb8, 0x27, 0xe7, 0x2d, 0xee,
- 0xc7, 0x20, 0xd3, 0x0d, 0xfc, 0xd1, 0x30, 0x2c, 0xa7, 0x64, 0xee, 0x48, 0x68, 0xbe, 0xa2, 0x8f,
- 0x6f, 0xc0, 0xa2, 0x56, 0xe5, 0x90, 0x92, 0x56, 0x99, 0x2e, 0x69, 0x9b, 0x6d, 0x3a, 0x60, 0x6e,
- 0xc7, 0x8d, 0x8a, 0x94, 0xa2, 0xc7, 0x9f, 0x58, 0xb0, 0x34, 0x4d, 0x82, 0xbe, 0x6d, 0xe4, 0x01,
- 0x67, 0x77, 0xe6, 0x70, 0x76, 0x75, 0x51, 0x1c, 0xc2, 0x2b, 0x03, 0x16, 0xec, 0xea, 0x1c, 0xa9,
- 0x7c, 0x00, 0x05, 0x03, 0xcd, 0x0f, 0x8f, 0x87, 0x54, 0xc7, 0x2c, 0x1f, 0xc6, 0xc9, 0x9a, 0x94,
- 0x71, 0x2c, 0x80, 0x8b, 0xc9, 0x0b, 0x16, 0x8f, 0xf8, 0x85, 0x09, 0x4f, 0xa2, 0x0b, 0x60, 0x77,
- 0x02, 0xdf, 0x9b, 0xcb, 0x4d, 0x62, 0x05, 0xfa, 0x3a, 0x24, 0x99, 0x3f, 0x97, 0x93, 0x92, 0xcc,
- 0xe7, 0x3e, 0x52, 0xca, 0xa7, 0xe4, 0x0d, 0x4d, 0x42, 0xf8, 0x1b, 0x90, 0x17, 0x4a, 0xdd, 0x75,
- 0xdc, 0x60, 0x66, 0x2d, 0x9f, 0xa9, 0x14, 0xbe, 0x04, 0x47, 0x64, 0x9d, 0x9a, 0xbd, 0xb8, 0x38,
- 0x6b, 0x71, 0x51, 0x2f, 0x3e, 0x01, 0xe9, 0xf5, 0xde, 0x68, 0xf0, 0x90, 0x2f, 0x69, 0x3b, 0xcc,
- 0xd1, 0x4b, 0xf8, 0x18, 0xbf, 0x01, 0xcb, 0x3c, 0x03, 0x69, 0x10, 0xae, 0xfb, 0xa3, 0x01, 0xd3,
- 0x37, 0xe4, 0x73, 0x50, 0x9a, 0x44, 0xab, 0x18, 0x29, 0x41, 0xba, 0xc5, 0x11, 0x82, 0xc7, 0x02,
- 0x91, 0x00, 0xfe, 0x95, 0x05, 0xe8, 0x1a, 0x65, 0x62, 0x97, 0xcd, 0x8d, 0xd0, 0xb8, 0x15, 0x79,
- 0x0e, 0x6b, 0xf5, 0x68, 0x10, 0xea, 0x1b, 0x82, 0x86, 0xff, 0x1f, 0xb7, 0x22, 0x7c, 0x1e, 0x96,
- 0x27, 0xa4, 0x54, 0x3a, 0x55, 0x20, 0xd7, 0x52, 0x38, 0x75, 0x1a, 0x45, 0x30, 0xfe, 0x7d, 0x12,
- 0x72, 0x62, 0x01, 0xa1, 0x1d, 0x74, 0x1e, 0x0a, 0x1d, 0x77, 0xd0, 0xa5, 0xc1, 0x30, 0x70, 0x95,
- 0x09, 0xec, 0xe6, 0x91, 0xfd, 0x71, 0xcd, 0x44, 0x13, 0x13, 0x40, 0xef, 0x40, 0x76, 0x14, 0xd2,
- 0xe0, 0x81, 0x2b, 0xf3, 0x3c, 0xdf, 0x2c, 0xed, 0x8d, 0x6b, 0x99, 0xef, 0x87, 0x34, 0xd8, 0xdc,
- 0xe0, 0xe7, 0xc2, 0x48, 0x8c, 0x88, 0xfc, 0xb6, 0xd1, 0x4d, 0x15, 0xa6, 0xe2, 0x8a, 0xd4, 0xfc,
- 0x26, 0x17, 0x7f, 0xaa, 0xd0, 0x0d, 0x03, 0xdf, 0xa3, 0xac, 0x47, 0x47, 0x61, 0xa3, 0xe5, 0x7b,
- 0x9e, 0x3f, 0x68, 0x88, 0x7e, 0x48, 0x28, 0xcd, 0x0f, 0x37, 0xbe, 0x5c, 0x45, 0xee, 0x3d, 0xc8,
- 0xb2, 0x5e, 0xe0, 0x8f, 0xba, 0x3d, 0x51, 0xb7, 0x53, 0xcd, 0x8b, 0xf3, 0xf3, 0xd3, 0x1c, 0x88,
- 0x1e, 0xa0, 0xd3, 0xdc, 0x5a, 0xb4, 0xf5, 0x30, 0x1c, 0x79, 0xb2, 0xcb, 0x68, 0xa6, 0xf7, 0xc7,
- 0x35, 0xeb, 0x1d, 0x12, 0xa1, 0xf1, 0x27, 0x49, 0xa8, 0x89, 0x40, 0xbd, 0x2f, 0x0e, 0xf5, 0xab,
- 0x7e, 0x70, 0x8b, 0xb2, 0xc0, 0x6d, 0xdd, 0x76, 0x3c, 0xaa, 0x63, 0xa3, 0x06, 0x05, 0x4f, 0x20,
- 0x1f, 0x18, 0x29, 0x00, 0x5e, 0x44, 0x87, 0x4e, 0x01, 0x88, 0x9c, 0x91, 0xf3, 0x32, 0x1b, 0xf2,
- 0x02, 0x23, 0xa6, 0xd7, 0x27, 0x2c, 0xd5, 0x98, 0x53, 0x33, 0x65, 0xa1, 0xcd, 0x69, 0x0b, 0xcd,
- 0xcd, 0x27, 0x32, 0x8b, 0x19, 0xeb, 0xe9, 0xc9, 0x58, 0xc7, 0x7f, 0xb5, 0xa0, 0xba, 0xa5, 0x25,
- 0x7f, 0x49, 0x73, 0x68, 0x7d, 0x93, 0xaf, 0x48, 0xdf, 0xd4, 0xff, 0xa6, 0x2f, 0xbe, 0x0e, 0xa5,
- 0x2d, 0x77, 0x40, 0xaf, 0xba, 0x7d, 0x46, 0x83, 0x2b, 0x4f, 0x86, 0x01, 0x0d, 0x43, 0xde, 0x80,
- 0x55, 0x20, 0xe7, 0x0f, 0x69, 0xe0, 0xe8, 0xae, 0x20, 0x45, 0x22, 0x98, 0x17, 0x0f, 0x61, 0x13,
- 0x5d, 0xdb, 0x04, 0x80, 0xff, 0x6d, 0x14, 0x0f, 0x42, 0x3b, 0xda, 0x22, 0xeb, 0x46, 0xc5, 0x7e,
- 0x15, 0x0a, 0x27, 0x5f, 0xa1, 0x83, 0x53, 0x53, 0xc5, 0xec, 0x02, 0x64, 0x3b, 0xc2, 0x10, 0xf2,
- 0xe8, 0x2d, 0xac, 0x55, 0xe3, 0xb3, 0x6e, 0x96, 0x95, 0x88, 0x26, 0xc7, 0x1f, 0xc6, 0x25, 0x49,
- 0xe8, 0xae, 0x4a, 0xd2, 0x19, 0xb0, 0x03, 0xda, 0xd1, 0x27, 0x27, 0x8a, 0xb9, 0x45, 0x94, 0x62,
- 0x1e, 0xff, 0xc1, 0x82, 0xa5, 0x6b, 0x94, 0x4d, 0xde, 0x49, 0x5e, 0x23, 0xcb, 0xe1, 0xeb, 0x70,
- 0xd4, 0x90, 0x5f, 0x69, 0xff, 0xde, 0xd4, 0x45, 0xe4, 0x8d, 0x58, 0xff, 0xcd, 0x41, 0x9b, 0x3e,
- 0x51, 0x0d, 0xd6, 0xe4, 0x1d, 0xe4, 0x2e, 0x14, 0x8c, 0x49, 0x74, 0x79, 0xea, 0xf6, 0x61, 0x3c,
- 0x1c, 0x44, 0x67, 0x68, 0xb3, 0xa4, 0x74, 0x92, 0x2d, 0x96, 0xba, 0x5b, 0x46, 0x67, 0xf5, 0x36,
- 0x20, 0xd1, 0xf3, 0x09, 0xb6, 0xe6, 0x69, 0x21, 0xb0, 0x37, 0xa3, 0xcb, 0x48, 0x04, 0xa3, 0xd3,
- 0x60, 0x07, 0xfe, 0x63, 0x7d, 0xad, 0x5c, 0x88, 0xb7, 0x24, 0xfe, 0x63, 0x22, 0xa6, 0xf0, 0x25,
- 0x48, 0x11, 0xff, 0x31, 0xaa, 0x02, 0x04, 0xce, 0xa0, 0x4b, 0xef, 0x47, 0xdd, 0x46, 0x91, 0x18,
- 0x98, 0x43, 0x4e, 0xf2, 0x75, 0x38, 0x6a, 0x4a, 0x24, 0xdd, 0x5d, 0x87, 0x2c, 0x47, 0xba, 0xb3,
- 0x5e, 0xac, 0x04, 0xa1, 0x6c, 0x5c, 0x35, 0x11, 0x8f, 0x19, 0x88, 0xf1, 0xe8, 0x24, 0xe4, 0x99,
- 0xb3, 0xd3, 0xa7, 0xb7, 0xe3, 0xba, 0x13, 0x23, 0xf8, 0x2c, 0x6f, 0x94, 0xee, 0x1b, 0x57, 0x92,
- 0x18, 0x81, 0xce, 0xc2, 0x52, 0x2c, 0xf3, 0xdd, 0x80, 0x76, 0xdc, 0x27, 0xc2, 0xc3, 0x45, 0x72,
- 0x00, 0x8f, 0x56, 0xe1, 0x48, 0x8c, 0xdb, 0x16, 0x47, 0xbf, 0x2d, 0x48, 0xa7, 0xd1, 0xdc, 0x36,
- 0x42, 0xdd, 0x2b, 0x8f, 0x46, 0x4e, 0x5f, 0x14, 0xd3, 0x22, 0x31, 0x30, 0xf8, 0x8f, 0x16, 0x1c,
- 0x95, 0xae, 0xe6, 0x2d, 0xf2, 0xeb, 0x18, 0xf5, 0xbf, 0xb6, 0x00, 0x99, 0x1a, 0xa8, 0xd0, 0xfa,
- 0x9a, 0xf9, 0xf6, 0xc1, 0xef, 0x16, 0x05, 0xd1, 0xff, 0x49, 0x54, 0xfc, 0x7c, 0x81, 0x21, 0x23,
- 0xee, 0x27, 0xb2, 0x11, 0xb5, 0x65, 0x83, 0x29, 0x31, 0x44, 0x7d, 0x79, 0x5f, 0xbc, 0xb3, 0xcb,
- 0x68, 0xa8, 0xda, 0x43, 0xd1, 0x17, 0x0b, 0x04, 0x91, 0x1f, 0xbe, 0x17, 0x1d, 0x30, 0x11, 0x35,
- 0x76, 0xbc, 0x97, 0x42, 0x11, 0x3d, 0xc0, 0xbf, 0x4d, 0xc2, 0xc2, 0x7d, 0xbf, 0x3f, 0x8a, 0x4f,
- 0xaa, 0xd7, 0xa9, 0x2e, 0x4f, 0xf4, 0xad, 0x69, 0xdd, 0xb7, 0x22, 0xb0, 0x43, 0x46, 0x87, 0x22,
- 0xb2, 0x52, 0x44, 0x8c, 0x11, 0x86, 0x22, 0x73, 0x82, 0x2e, 0x65, 0xb2, 0xe5, 0x28, 0x67, 0xc4,
- 0x3d, 0x70, 0x02, 0x87, 0x56, 0xa0, 0xe0, 0x74, 0xbb, 0x01, 0xed, 0x3a, 0x8c, 0x36, 0x77, 0xcb,
- 0x59, 0xb1, 0x99, 0x89, 0xc2, 0x3f, 0x84, 0x45, 0x6d, 0x2c, 0xe5, 0xd2, 0x77, 0x21, 0xfb, 0xb1,
- 0xc0, 0xcc, 0x78, 0x27, 0x92, 0xa4, 0xaa, 0x8c, 0x69, 0xb2, 0xc9, 0xe7, 0x57, 0x2d, 0x33, 0xbe,
- 0x01, 0x19, 0x49, 0x8e, 0x4e, 0x9a, 0x4d, 0x83, 0x7c, 0xd0, 0xe0, 0xb0, 0xea, 0x00, 0x30, 0x64,
- 0x24, 0x23, 0xe5, 0x78, 0x11, 0x1b, 0x12, 0x43, 0xd4, 0xf7, 0xec, 0x19, 0xc8, 0x47, 0x6f, 0xa7,
- 0xa8, 0x00, 0xd9, 0xab, 0x77, 0xc8, 0x0f, 0x2e, 0x93, 0x8d, 0xa5, 0x04, 0x2a, 0x42, 0xae, 0x79,
- 0x79, 0xfd, 0xa6, 0x80, 0xac, 0xb5, 0x7f, 0xd9, 0xba, 0xb2, 0x04, 0xe8, 0x5b, 0x90, 0x96, 0xe5,
- 0xe2, 0x58, 0x2c, 0xbf, 0xf9, 0x02, 0x5a, 0x39, 0x7e, 0x00, 0x2f, 0x2d, 0x80, 0x13, 0xef, 0x5a,
- 0xe8, 0x36, 0x14, 0x04, 0x52, 0xbd, 0xb2, 0x9c, 0x9c, 0x7e, 0xec, 0x98, 0xe0, 0x74, 0xea, 0x90,
- 0x59, 0x83, 0xdf, 0x45, 0x48, 0x0b, 0x9f, 0x98, 0xd2, 0x98, 0xaf, 0x64, 0xa6, 0x34, 0x13, 0xef,
- 0x4e, 0x38, 0x81, 0x3e, 0x00, 0x9b, 0x77, 0x36, 0xc8, 0x38, 0x54, 0x8c, 0xc7, 0x91, 0xca, 0xb1,
- 0x69, 0xb4, 0xb1, 0xed, 0x87, 0xd1, 0x1b, 0xcf, 0xf1, 0xe9, 0x5e, 0x56, 0x2f, 0x2f, 0x1f, 0x9c,
- 0x88, 0x76, 0xbe, 0x23, 0x1f, 0x3b, 0x74, 0x4f, 0x85, 0x4e, 0x4d, 0x6e, 0x35, 0xd5, 0x82, 0x55,
- 0xaa, 0x87, 0x4d, 0x47, 0x0c, 0xb7, 0xa0, 0x60, 0xf4, 0x33, 0xa6, 0x59, 0x0f, 0x36, 0x63, 0xa6,
- 0x59, 0x67, 0x34, 0x41, 0x38, 0x81, 0xae, 0x41, 0x8e, 0x1f, 0xc5, 0xbc, 0x22, 0xa1, 0x13, 0xd3,
- 0x27, 0xae, 0x51, 0x69, 0x2b, 0x27, 0x67, 0x4f, 0x46, 0x8c, 0xbe, 0x0b, 0xf9, 0x6b, 0x94, 0xa9,
- 0x70, 0x3d, 0x3e, 0x1d, 0xef, 0x33, 0x2c, 0x35, 0x99, 0x33, 0x38, 0xb1, 0xf6, 0x63, 0xfd, 0x7f,
- 0xca, 0x86, 0xc3, 0x1c, 0x74, 0x07, 0x16, 0x85, 0x60, 0xd1, 0x1f, 0x2e, 0x13, 0x01, 0x74, 0xe0,
- 0xdf, 0x9d, 0x89, 0x00, 0x3a, 0xf8, 0x2f, 0x0f, 0x4e, 0x34, 0x3f, 0x7a, 0xf6, 0xbc, 0x9a, 0xf8,
- 0xec, 0x79, 0x35, 0xf1, 0xf9, 0xf3, 0xaa, 0xf5, 0xd3, 0xbd, 0xaa, 0xf5, 0x9b, 0xbd, 0xaa, 0xf5,
- 0x74, 0xaf, 0x6a, 0x3d, 0xdb, 0xab, 0x5a, 0xff, 0xd8, 0xab, 0x5a, 0xff, 0xdc, 0xab, 0x26, 0x3e,
- 0xdf, 0xab, 0x5a, 0x9f, 0xbe, 0xa8, 0x26, 0x9e, 0xbd, 0xa8, 0x26, 0x3e, 0x7b, 0x51, 0x4d, 0x7c,
- 0xf4, 0xe6, 0x17, 0x3d, 0x37, 0xe9, 0x1d, 0x77, 0x32, 0xe2, 0xf3, 0xde, 0x7f, 0x03, 0x00, 0x00,
- 0xff, 0xff, 0xc7, 0xff, 0x87, 0xf1, 0x0e, 0x1b, 0x00, 0x00,
+ // 2276 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x19, 0x4d, 0x6f, 0x1b, 0xc7,
+ 0x95, 0x4b, 0x2e, 0xbf, 0x1e, 0x29, 0x59, 0x1e, 0x31, 0x36, 0xc1, 0xd8, 0xa4, 0x3c, 0x48, 0x1d,
+ 0xc1, 0x71, 0xc8, 0x58, 0x69, 0x5c, 0xd7, 0x6e, 0xd0, 0x9a, 0x92, 0x3f, 0x64, 0xcb, 0x1f, 0x19,
+ 0xb9, 0x6e, 0x61, 0xb4, 0x30, 0x56, 0xe2, 0x88, 0x22, 0xcc, 0xdd, 0xa5, 0x77, 0x87, 0xb1, 0x05,
+ 0xf4, 0xd0, 0x3f, 0x50, 0x34, 0xb7, 0xa2, 0x97, 0xa2, 0x87, 0x02, 0x29, 0x50, 0xf4, 0xd2, 0x1f,
+ 0xd0, 0x5e, 0x7a, 0x70, 0x6f, 0xee, 0x2d, 0xc8, 0x81, 0xad, 0xe5, 0x4b, 0xa1, 0x53, 0x6e, 0x01,
+ 0x7a, 0x2a, 0xe6, 0x6b, 0x77, 0x76, 0x25, 0x1b, 0xa1, 0xeb, 0xa2, 0xf0, 0x85, 0x3b, 0xef, 0xcd,
+ 0x9b, 0x37, 0xef, 0x6b, 0xde, 0x9b, 0x37, 0x84, 0xb7, 0x47, 0x0f, 0xfa, 0x9d, 0xa1, 0xdf, 0x1f,
+ 0x05, 0x3e, 0xf3, 0xa3, 0x41, 0x5b, 0xfc, 0xa2, 0x92, 0x86, 0x1b, 0xb5, 0xbe, 0xdf, 0xf7, 0x25,
+ 0x0d, 0x1f, 0xc9, 0xf9, 0x46, 0xab, 0xef, 0xfb, 0xfd, 0x21, 0xed, 0x08, 0x68, 0x63, 0xbc, 0xd5,
+ 0x61, 0x03, 0x97, 0x86, 0xcc, 0x71, 0x47, 0x8a, 0x60, 0x41, 0x71, 0x7f, 0x38, 0x74, 0xfd, 0x1e,
+ 0x1d, 0x76, 0x42, 0xe6, 0xb0, 0x50, 0xfe, 0x2a, 0x8a, 0x79, 0x4e, 0x31, 0x1a, 0x87, 0xdb, 0xe2,
+ 0x47, 0x22, 0x71, 0x0d, 0xd0, 0x3a, 0x0b, 0xa8, 0xe3, 0x12, 0x87, 0xd1, 0x90, 0xd0, 0x87, 0x63,
+ 0x1a, 0x32, 0x7c, 0x03, 0xe6, 0x13, 0xd8, 0x70, 0xe4, 0x7b, 0x21, 0x45, 0x67, 0xa1, 0x12, 0xc6,
+ 0xe8, 0xba, 0xb5, 0x90, 0x5b, 0xac, 0x2c, 0xd5, 0xda, 0x91, 0x2a, 0xf1, 0x1a, 0x62, 0x12, 0xe2,
+ 0xdf, 0x58, 0x00, 0xf1, 0x1c, 0x6a, 0x02, 0xc8, 0xd9, 0xab, 0x4e, 0xb8, 0x5d, 0xb7, 0x16, 0xac,
+ 0x45, 0x9b, 0x18, 0x18, 0x74, 0x1a, 0x0e, 0xc7, 0xd0, 0x4d, 0x7f, 0x7d, 0xdb, 0x09, 0x7a, 0xf5,
+ 0xac, 0x20, 0xdb, 0x3f, 0x81, 0x10, 0xd8, 0x81, 0xc3, 0x68, 0x3d, 0xb7, 0x60, 0x2d, 0xe6, 0x88,
+ 0x18, 0xa3, 0x23, 0x50, 0x60, 0xd4, 0x73, 0x3c, 0x56, 0xb7, 0x17, 0xac, 0xc5, 0x32, 0x51, 0x10,
+ 0xc7, 0x73, 0xdd, 0x69, 0x58, 0xcf, 0x2f, 0x58, 0x8b, 0x33, 0x44, 0x41, 0xf8, 0xf3, 0x1c, 0x54,
+ 0x3f, 0x19, 0xd3, 0x60, 0x47, 0x19, 0x00, 0x35, 0xa1, 0x14, 0xd2, 0x21, 0xdd, 0x64, 0x7e, 0x20,
+ 0x04, 0x2c, 0x77, 0xb3, 0x75, 0x8b, 0x44, 0x38, 0x54, 0x83, 0xfc, 0x70, 0xe0, 0x0e, 0x98, 0x10,
+ 0x6b, 0x86, 0x48, 0x00, 0x9d, 0x87, 0x7c, 0xc8, 0x9c, 0x80, 0x09, 0x59, 0x2a, 0x4b, 0x8d, 0xb6,
+ 0x74, 0x5a, 0x5b, 0x3b, 0xad, 0x7d, 0x47, 0x3b, 0xad, 0x5b, 0x7a, 0x32, 0x69, 0x65, 0x3e, 0xfb,
+ 0x47, 0xcb, 0x22, 0x72, 0x09, 0x3a, 0x0b, 0x39, 0xea, 0xf5, 0x84, 0xbc, 0xdf, 0x74, 0x25, 0x5f,
+ 0x80, 0xce, 0x40, 0xb9, 0x37, 0x08, 0xe8, 0x26, 0x1b, 0xf8, 0x9e, 0xd0, 0x6a, 0x76, 0x69, 0x3e,
+ 0xf6, 0xc8, 0x8a, 0x9e, 0x22, 0x31, 0x15, 0x3a, 0x0d, 0x85, 0x90, 0x9b, 0x2e, 0xac, 0x17, 0x17,
+ 0x72, 0x8b, 0xe5, 0x6e, 0x6d, 0x6f, 0xd2, 0x9a, 0x93, 0x98, 0xd3, 0xbe, 0x3b, 0x60, 0xd4, 0x1d,
+ 0xb1, 0x1d, 0xa2, 0x68, 0xd0, 0x29, 0x28, 0xf6, 0xe8, 0x90, 0x72, 0x87, 0x97, 0x84, 0xc3, 0xe7,
+ 0x0c, 0xf6, 0x62, 0x82, 0x68, 0x02, 0x74, 0x0f, 0xec, 0xd1, 0xd0, 0xf1, 0xea, 0x65, 0xa1, 0xc5,
+ 0x6c, 0x4c, 0x78, 0x7b, 0xe8, 0x78, 0xdd, 0xb3, 0x5f, 0x4e, 0x5a, 0x4b, 0xfd, 0x01, 0xdb, 0x1e,
+ 0x6f, 0xb4, 0x37, 0x7d, 0xb7, 0xd3, 0x0f, 0x9c, 0x2d, 0xc7, 0x73, 0x3a, 0x43, 0xff, 0xc1, 0xa0,
+ 0xc3, 0x83, 0xf3, 0xe1, 0x98, 0x06, 0x03, 0x1a, 0x74, 0x38, 0x8f, 0xb6, 0xf0, 0x07, 0x5f, 0x47,
+ 0x04, 0xcf, 0x6b, 0x76, 0xa9, 0x30, 0x57, 0xc4, 0x93, 0x2c, 0xa0, 0x75, 0xc7, 0x1d, 0x0d, 0xe9,
+ 0x54, 0xfe, 0x8a, 0x3c, 0x93, 0x7d, 0x65, 0xcf, 0xe4, 0xa6, 0xf5, 0x4c, 0x6c, 0x66, 0x7b, 0x3a,
+ 0x33, 0xe7, 0xbf, 0xa9, 0x99, 0x0b, 0xaf, 0xdf, 0xcc, 0xb8, 0x0e, 0x36, 0x87, 0xd0, 0x1c, 0xe4,
+ 0x02, 0xe7, 0x91, 0x30, 0x66, 0x95, 0xf0, 0x21, 0x5e, 0x83, 0x82, 0x14, 0x04, 0x35, 0xd2, 0xd6,
+ 0x4e, 0x9e, 0x8c, 0xd8, 0xd2, 0x39, 0x6d, 0xc3, 0xb9, 0xd8, 0x86, 0x39, 0x61, 0x1d, 0xfc, 0x5b,
+ 0x0b, 0x66, 0x94, 0x0b, 0x55, 0x76, 0xd9, 0x80, 0xa2, 0x3c, 0xdd, 0x3a, 0xb3, 0x1c, 0x4d, 0x67,
+ 0x96, 0x8b, 0x3d, 0x67, 0xc4, 0x68, 0xd0, 0xed, 0x3c, 0x99, 0xb4, 0xac, 0x2f, 0x27, 0xad, 0x77,
+ 0x5f, 0xa6, 0xa5, 0x48, 0x72, 0x2a, 0xeb, 0x68, 0xc6, 0xe8, 0x3d, 0x21, 0x1d, 0x0b, 0x55, 0x1c,
+ 0x1c, 0x6a, 0xcb, 0x04, 0xb9, 0xea, 0xf5, 0x69, 0xc8, 0x39, 0xdb, 0xdc, 0x85, 0x44, 0xd2, 0xe0,
+ 0x9f, 0xc1, 0x7c, 0x22, 0xd4, 0x94, 0x9c, 0xe7, 0xa0, 0x10, 0x72, 0x03, 0x6a, 0x31, 0x0d, 0x47,
+ 0xad, 0x0b, 0x7c, 0x77, 0x56, 0xc9, 0x57, 0x90, 0x30, 0x51, 0xf4, 0xd3, 0xed, 0xfe, 0x57, 0x0b,
+ 0xaa, 0x6b, 0xce, 0x06, 0x1d, 0xea, 0x18, 0x47, 0x60, 0x7b, 0x8e, 0x4b, 0x95, 0xc5, 0xc5, 0x98,
+ 0x27, 0xb4, 0x4f, 0x9d, 0xe1, 0x98, 0x4a, 0x96, 0x25, 0xa2, 0xa0, 0x69, 0x33, 0x91, 0xf5, 0xca,
+ 0x99, 0xc8, 0x8a, 0xe3, 0xbd, 0x06, 0x79, 0x1e, 0x59, 0x3b, 0x22, 0x0b, 0x95, 0x89, 0x04, 0xf0,
+ 0xbb, 0x30, 0xa3, 0xb4, 0x50, 0xe6, 0x8b, 0x45, 0xe6, 0xe6, 0x2b, 0x6b, 0x91, 0xb1, 0x0b, 0x05,
+ 0x69, 0x6d, 0xf4, 0x0e, 0x94, 0xa3, 0xea, 0x26, 0xb4, 0xcd, 0x75, 0x0b, 0x7b, 0x93, 0x56, 0x96,
+ 0x85, 0x24, 0x9e, 0x40, 0x2d, 0xc8, 0x8b, 0x95, 0x42, 0x73, 0xab, 0x5b, 0xde, 0x9b, 0xb4, 0x24,
+ 0x82, 0xc8, 0x0f, 0x3a, 0x06, 0xf6, 0x36, 0x2f, 0x30, 0xdc, 0x04, 0x76, 0xb7, 0xb4, 0x37, 0x69,
+ 0x09, 0x98, 0x88, 0x5f, 0x7c, 0x05, 0xaa, 0x6b, 0xb4, 0xef, 0x6c, 0xee, 0xa8, 0x4d, 0x6b, 0x9a,
+ 0x1d, 0xdf, 0xd0, 0xd2, 0x3c, 0x4e, 0x40, 0x35, 0xda, 0xf1, 0xbe, 0x1b, 0xaa, 0xa0, 0xae, 0x44,
+ 0xb8, 0x1b, 0x21, 0xfe, 0xb5, 0x05, 0xca, 0xcf, 0x08, 0x43, 0x61, 0xc8, 0x75, 0x0d, 0x55, 0x0e,
+ 0x82, 0xbd, 0x49, 0x4b, 0x61, 0x88, 0xfa, 0xa2, 0x0b, 0x50, 0x0c, 0xc5, 0x8e, 0x9c, 0x59, 0x3a,
+ 0x7c, 0xc4, 0x44, 0xf7, 0x10, 0x0f, 0x83, 0xbd, 0x49, 0x4b, 0x13, 0x12, 0x3d, 0x40, 0xed, 0x44,
+ 0xe5, 0x94, 0x8a, 0xcd, 0xee, 0x4d, 0x5a, 0x06, 0xd6, 0xac, 0xa4, 0xf8, 0x6b, 0x0b, 0x2a, 0x77,
+ 0x9c, 0x41, 0x14, 0x42, 0x75, 0xed, 0xa2, 0x38, 0x47, 0x4a, 0x04, 0x3f, 0xd2, 0x3d, 0x3a, 0x74,
+ 0x76, 0x2e, 0xfb, 0x81, 0xe0, 0x3b, 0x43, 0x22, 0x38, 0x2e, 0x76, 0xf6, 0x81, 0xc5, 0x2e, 0x3f,
+ 0x7d, 0x4a, 0xfd, 0x1f, 0x26, 0xb0, 0x6b, 0x76, 0x29, 0x3b, 0x97, 0xc3, 0x7f, 0xb4, 0xa0, 0x2a,
+ 0x35, 0x57, 0x61, 0xf7, 0x13, 0x28, 0x48, 0xc3, 0x08, 0xdd, 0x5f, 0x92, 0x5c, 0xde, 0x9b, 0x26,
+ 0xb1, 0x28, 0x9e, 0xe8, 0xfb, 0x30, 0xdb, 0x0b, 0xfc, 0xd1, 0x88, 0xf6, 0xd6, 0x55, 0x0a, 0xcb,
+ 0xa6, 0x53, 0xd8, 0x8a, 0x39, 0x4f, 0x52, 0xe4, 0xf8, 0x6f, 0x16, 0xcc, 0xa8, 0x6c, 0xa1, 0x7c,
+ 0x15, 0xd9, 0xd7, 0x7a, 0xe5, 0x92, 0x95, 0x9d, 0xb6, 0x64, 0x1d, 0x81, 0x42, 0x3f, 0xf0, 0xc7,
+ 0xa3, 0xb0, 0x9e, 0x93, 0x67, 0x53, 0x42, 0xd3, 0x95, 0x32, 0x7c, 0x0d, 0x66, 0xb5, 0x2a, 0x2f,
+ 0x48, 0x99, 0x8d, 0x74, 0xca, 0x5c, 0xed, 0x51, 0x8f, 0x0d, 0xb6, 0x06, 0x51, 0x12, 0x54, 0xf4,
+ 0xf8, 0x97, 0x16, 0xcc, 0xa5, 0x49, 0xd0, 0x8a, 0x71, 0xce, 0x38, 0xbb, 0x93, 0x2f, 0x66, 0xd7,
+ 0x16, 0xc9, 0x27, 0xbc, 0xe4, 0xb1, 0x60, 0x47, 0xb3, 0x96, 0x6b, 0x1b, 0x1f, 0x41, 0xc5, 0x98,
+ 0xe4, 0x25, 0xea, 0x01, 0x55, 0x27, 0x83, 0xf0, 0x61, 0x9c, 0x12, 0xb2, 0x32, 0xa1, 0x09, 0x00,
+ 0xff, 0xca, 0x82, 0x99, 0x84, 0x2f, 0xd1, 0x39, 0xb0, 0xb7, 0x02, 0xdf, 0x9d, 0xca, 0x51, 0x62,
+ 0x05, 0xfa, 0x36, 0x64, 0x99, 0x3f, 0x95, 0x9b, 0xb2, 0xcc, 0xe7, 0x5e, 0x52, 0xea, 0xe7, 0xe4,
+ 0xed, 0x56, 0x42, 0xf8, 0x23, 0x28, 0x0b, 0x85, 0x6e, 0x3b, 0x83, 0xe0, 0xc0, 0x6a, 0x71, 0xb0,
+ 0x42, 0x17, 0xe0, 0x90, 0xcc, 0x84, 0x07, 0x2f, 0xae, 0x1e, 0xb4, 0xb8, 0xaa, 0x17, 0xbf, 0x0d,
+ 0xf9, 0xe5, 0xed, 0xb1, 0xf7, 0x80, 0x2f, 0xe9, 0x39, 0xcc, 0xd1, 0x4b, 0xf8, 0x18, 0xbf, 0x05,
+ 0xf3, 0xfc, 0x0c, 0xd2, 0x20, 0x5c, 0xf6, 0xc7, 0x1e, 0xd3, 0xdd, 0xc5, 0x69, 0xa8, 0x25, 0xd1,
+ 0x2a, 0x4a, 0x6a, 0x90, 0xdf, 0xe4, 0x08, 0xc1, 0x63, 0x86, 0x48, 0x00, 0xff, 0xce, 0x02, 0x74,
+ 0x85, 0x32, 0xb1, 0xcb, 0xea, 0x4a, 0x74, 0x3c, 0x1a, 0x50, 0x72, 0x1d, 0xb6, 0xb9, 0x4d, 0x83,
+ 0x50, 0xdf, 0x41, 0x34, 0xfc, 0xff, 0xb8, 0xed, 0xe1, 0x33, 0x30, 0x9f, 0x90, 0x52, 0xe9, 0xd4,
+ 0x80, 0xd2, 0xa6, 0xc2, 0xa9, 0x7a, 0x17, 0xc1, 0xf8, 0x4f, 0x59, 0x28, 0x89, 0x05, 0x84, 0x6e,
+ 0xa1, 0x33, 0x50, 0xd9, 0x1a, 0x78, 0x7d, 0x1a, 0x8c, 0x82, 0x81, 0x32, 0x81, 0xdd, 0x3d, 0xb4,
+ 0x37, 0x69, 0x99, 0x68, 0x62, 0x02, 0xe8, 0x7d, 0x28, 0x8e, 0x43, 0x1a, 0xdc, 0x1f, 0xc8, 0x93,
+ 0x5e, 0xee, 0xd6, 0x76, 0x27, 0xad, 0xc2, 0x0f, 0x43, 0x1a, 0xac, 0xae, 0xf0, 0xca, 0x33, 0x16,
+ 0x23, 0x22, 0xbf, 0x3d, 0x74, 0x5d, 0x85, 0xa9, 0xb8, 0x84, 0x75, 0xbf, 0xc3, 0xc5, 0x4f, 0xa5,
+ 0xba, 0x51, 0xe0, 0xbb, 0x94, 0x6d, 0xd3, 0x71, 0xd8, 0xd9, 0xf4, 0x5d, 0xd7, 0xf7, 0x3a, 0xa2,
+ 0x97, 0x14, 0x4a, 0xf3, 0xf2, 0xc9, 0x97, 0xab, 0xc8, 0xbd, 0x03, 0x45, 0xb6, 0x1d, 0xf8, 0xe3,
+ 0xfe, 0xb6, 0xa8, 0x0a, 0xb9, 0xee, 0xf9, 0xe9, 0xf9, 0x69, 0x0e, 0x44, 0x0f, 0xd0, 0x09, 0x6e,
+ 0x2d, 0xba, 0xf9, 0x20, 0x1c, 0xbb, 0xb2, 0x43, 0xeb, 0xe6, 0xf7, 0x26, 0x2d, 0xeb, 0x7d, 0x12,
+ 0xa1, 0xf1, 0x2f, 0xb2, 0xd0, 0x12, 0x81, 0x7a, 0x57, 0x5c, 0x1b, 0x2e, 0xfb, 0xc1, 0x0d, 0xca,
+ 0x82, 0xc1, 0xe6, 0x4d, 0xc7, 0xa5, 0x3a, 0x36, 0x5a, 0x50, 0x71, 0x05, 0xf2, 0xbe, 0x71, 0x04,
+ 0xc0, 0x8d, 0xe8, 0xd0, 0x71, 0x00, 0x71, 0x66, 0xe4, 0xbc, 0x3c, 0x0d, 0x65, 0x81, 0x11, 0xd3,
+ 0xcb, 0x09, 0x4b, 0x75, 0xa6, 0xd4, 0x4c, 0x59, 0x68, 0x35, 0x6d, 0xa1, 0xa9, 0xf9, 0x44, 0x66,
+ 0x31, 0x63, 0x3d, 0x9f, 0x8c, 0x75, 0xfc, 0x77, 0x0b, 0x9a, 0x6b, 0x5a, 0xf2, 0x57, 0x34, 0x87,
+ 0xd6, 0x37, 0xfb, 0x9a, 0xf4, 0xcd, 0xfd, 0x77, 0xfa, 0xe2, 0xab, 0x50, 0x5b, 0x1b, 0x78, 0xf4,
+ 0xf2, 0x60, 0xc8, 0x68, 0x70, 0xe9, 0xf1, 0x28, 0xa0, 0x61, 0xc8, 0x1b, 0xd7, 0x06, 0x94, 0xfc,
+ 0x11, 0x0d, 0x1c, 0xdd, 0x77, 0xe4, 0x48, 0x04, 0xf3, 0xe4, 0x21, 0x6c, 0xa2, 0x73, 0x9b, 0x00,
+ 0xf0, 0xbf, 0x8d, 0xe4, 0x41, 0xe8, 0x96, 0xb6, 0xc8, 0xb2, 0x91, 0xb1, 0x5f, 0x87, 0xc2, 0xd9,
+ 0xd7, 0xe8, 0xe0, 0x5c, 0x2a, 0x99, 0x9d, 0x83, 0xe2, 0x96, 0x30, 0x84, 0x2c, 0xbe, 0x95, 0xa5,
+ 0x66, 0x5c, 0xed, 0x0e, 0xb2, 0x12, 0xd1, 0xe4, 0xf8, 0xe3, 0x38, 0x25, 0x09, 0xdd, 0x55, 0x4a,
+ 0x3a, 0x09, 0x76, 0x40, 0xb7, 0x74, 0xed, 0x44, 0x31, 0xb7, 0x88, 0x52, 0xcc, 0xe3, 0x3f, 0x5b,
+ 0x30, 0x77, 0x85, 0xb2, 0xe4, 0xad, 0xe4, 0x0d, 0xb2, 0x1c, 0xbe, 0x0a, 0x87, 0x0d, 0xf9, 0x95,
+ 0xf6, 0x1f, 0xa6, 0xae, 0x22, 0x6f, 0xc5, 0xfa, 0xaf, 0x7a, 0x3d, 0xfa, 0x58, 0xb5, 0x70, 0xc9,
+ 0x5b, 0xc8, 0x6d, 0xa8, 0x18, 0x93, 0xe8, 0x62, 0xea, 0xfe, 0x61, 0x3c, 0xb8, 0x44, 0x35, 0xb4,
+ 0x5b, 0x53, 0x3a, 0xc9, 0x26, 0x4e, 0xdd, 0x2e, 0xa3, 0x5a, 0xbd, 0x0e, 0x48, 0x5c, 0x5c, 0x05,
+ 0x5b, 0xb3, 0x5a, 0x08, 0xec, 0xf5, 0xe8, 0x22, 0x12, 0xc1, 0xe8, 0x04, 0xd8, 0x81, 0xff, 0x48,
+ 0x5f, 0x2c, 0x67, 0xe2, 0x2d, 0x89, 0xff, 0x88, 0x88, 0x29, 0x7c, 0x01, 0x72, 0xc4, 0x7f, 0x84,
+ 0x9a, 0x00, 0x81, 0xe3, 0xf5, 0xe9, 0xdd, 0xa8, 0x9f, 0xa9, 0x12, 0x03, 0xf3, 0x82, 0x4a, 0xbe,
+ 0x0c, 0x87, 0x4d, 0x89, 0xa4, 0xbb, 0xdb, 0x50, 0xfc, 0x64, 0x6c, 0x9a, 0xab, 0x96, 0x32, 0x97,
+ 0x6c, 0x8d, 0x35, 0x11, 0x8f, 0x19, 0x88, 0xf1, 0xe8, 0x18, 0x94, 0x99, 0xb3, 0x31, 0xa4, 0x37,
+ 0xe3, 0xbc, 0x13, 0x23, 0xf8, 0x2c, 0x6f, 0xc5, 0xee, 0x1a, 0x57, 0x92, 0x18, 0x81, 0x4e, 0xc1,
+ 0x5c, 0x2c, 0xf3, 0xed, 0x80, 0x6e, 0x0d, 0x1e, 0x0b, 0x0f, 0x57, 0xc9, 0x3e, 0x3c, 0x5a, 0x84,
+ 0x43, 0x31, 0x6e, 0x5d, 0x94, 0x7e, 0x5b, 0x90, 0xa6, 0xd1, 0xdc, 0x36, 0x42, 0xdd, 0x4b, 0x0f,
+ 0xc7, 0xce, 0x50, 0x24, 0xd3, 0x2a, 0x31, 0x30, 0xf8, 0x2f, 0x16, 0x1c, 0x96, 0xae, 0xe6, 0x4d,
+ 0xf8, 0x9b, 0x18, 0xf5, 0x9f, 0x5b, 0x80, 0x4c, 0x0d, 0x54, 0x68, 0x7d, 0xcb, 0x7c, 0x5d, 0xe1,
+ 0x77, 0x8b, 0x8a, 0xe8, 0x30, 0x25, 0x2a, 0x7e, 0x20, 0xc1, 0x50, 0x10, 0xf7, 0x13, 0xd9, 0xea,
+ 0xda, 0xb2, 0x85, 0x95, 0x18, 0xa2, 0xbe, 0xbc, 0xf3, 0xde, 0xd8, 0x61, 0x34, 0x54, 0x0d, 0xa8,
+ 0xe8, 0xbc, 0x05, 0x82, 0xc8, 0x0f, 0xdf, 0x8b, 0x7a, 0x4c, 0x44, 0x8d, 0x1d, 0xef, 0xa5, 0x50,
+ 0x44, 0x0f, 0xf0, 0x1f, 0xb2, 0x30, 0x73, 0xd7, 0x1f, 0x8e, 0xe3, 0x4a, 0xf5, 0x26, 0xe5, 0xe5,
+ 0x44, 0x57, 0x9c, 0xd7, 0x5d, 0x31, 0x02, 0x3b, 0x64, 0x74, 0x24, 0x22, 0x2b, 0x47, 0xc4, 0x18,
+ 0x61, 0xa8, 0x32, 0x27, 0xe8, 0x53, 0x26, 0xdb, 0x8d, 0x7a, 0x41, 0xdc, 0x03, 0x13, 0x38, 0xb4,
+ 0x00, 0x15, 0xa7, 0xdf, 0x0f, 0x68, 0xdf, 0x61, 0xb4, 0xbb, 0x53, 0x2f, 0x8a, 0xcd, 0x4c, 0x14,
+ 0xfe, 0x31, 0xcc, 0x6a, 0x63, 0x29, 0x97, 0x7e, 0x00, 0xc5, 0x4f, 0x05, 0xe6, 0x80, 0x97, 0x28,
+ 0x49, 0xaa, 0xd2, 0x98, 0x26, 0x4b, 0x3e, 0x5b, 0x6b, 0x99, 0xf1, 0x35, 0x28, 0x48, 0x72, 0x74,
+ 0xcc, 0x6c, 0x1a, 0xe4, 0x93, 0x09, 0x87, 0x55, 0x07, 0x80, 0xa1, 0x20, 0x19, 0x29, 0xc7, 0x8b,
+ 0xd8, 0x90, 0x18, 0xa2, 0xbe, 0xa7, 0x4e, 0x42, 0x39, 0x7a, 0x73, 0x46, 0x15, 0x28, 0x5e, 0xbe,
+ 0x45, 0x7e, 0x74, 0x91, 0xac, 0xcc, 0x65, 0x50, 0x15, 0x4a, 0xdd, 0x8b, 0xcb, 0xd7, 0x05, 0x64,
+ 0x2d, 0x7d, 0x6d, 0xeb, 0xcc, 0x12, 0xa0, 0xef, 0x41, 0x5e, 0xa6, 0x8b, 0x23, 0xb1, 0xfc, 0xe6,
+ 0xeb, 0x6e, 0xe3, 0xe8, 0x3e, 0xbc, 0xb4, 0x00, 0xce, 0x7c, 0x60, 0xa1, 0x9b, 0x50, 0x11, 0x48,
+ 0xf5, 0x8e, 0x73, 0x2c, 0xfd, 0x9c, 0x92, 0xe0, 0x74, 0xfc, 0x05, 0xb3, 0x06, 0xbf, 0xf3, 0x90,
+ 0x17, 0x3e, 0x31, 0xa5, 0x31, 0xdf, 0xe1, 0x4c, 0x69, 0x12, 0x2f, 0x5b, 0x38, 0x83, 0xbe, 0x0b,
+ 0x36, 0xef, 0x6c, 0x90, 0x51, 0x54, 0x8c, 0xe7, 0x97, 0xc6, 0x91, 0x34, 0xda, 0xd8, 0xf6, 0xe3,
+ 0xe8, 0x15, 0xe9, 0x68, 0xba, 0x9b, 0xd5, 0xcb, 0xeb, 0xfb, 0x27, 0xa2, 0x9d, 0x6f, 0xc9, 0xe7,
+ 0x0e, 0xdd, 0x53, 0xa1, 0xe3, 0xc9, 0xad, 0x52, 0x2d, 0x58, 0xa3, 0xf9, 0xa2, 0xe9, 0x88, 0xe1,
+ 0x1a, 0x54, 0x8c, 0x7e, 0xc6, 0x34, 0xeb, 0xfe, 0x66, 0xcc, 0x34, 0xeb, 0x01, 0x4d, 0x10, 0xce,
+ 0xa0, 0x2b, 0x50, 0xe2, 0xa5, 0x98, 0x67, 0x24, 0xf4, 0x76, 0xba, 0xe2, 0x1a, 0x99, 0xb6, 0x71,
+ 0xec, 0xe0, 0xc9, 0x88, 0xd1, 0x0f, 0xa0, 0x7c, 0x85, 0x32, 0x15, 0xae, 0x47, 0xd3, 0xf1, 0x7e,
+ 0x80, 0xa5, 0x92, 0x67, 0x06, 0x67, 0x96, 0x7e, 0xaa, 0xff, 0x8b, 0x5a, 0x71, 0x98, 0x83, 0x6e,
+ 0xc1, 0xac, 0x10, 0x2c, 0xfa, 0xb3, 0x2a, 0x11, 0x40, 0xfb, 0xfe, 0x19, 0x4b, 0x04, 0xd0, 0xfe,
+ 0x7f, 0xc8, 0x70, 0xa6, 0x7b, 0xef, 0xe9, 0xb3, 0x66, 0xe6, 0x8b, 0x67, 0xcd, 0xcc, 0x57, 0xcf,
+ 0x9a, 0xd6, 0xcf, 0x77, 0x9b, 0xd6, 0xef, 0x77, 0x9b, 0xd6, 0x93, 0xdd, 0xa6, 0xf5, 0x74, 0xb7,
+ 0x69, 0xfd, 0x73, 0xb7, 0x69, 0xfd, 0x6b, 0xb7, 0x99, 0xf9, 0x6a, 0xb7, 0x69, 0x7d, 0xf6, 0xbc,
+ 0x99, 0x79, 0xfa, 0xbc, 0x99, 0xf9, 0xe2, 0x79, 0x33, 0x73, 0xef, 0x9d, 0x97, 0x3d, 0x38, 0xe9,
+ 0x1d, 0x37, 0x0a, 0xe2, 0xf3, 0xe1, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xf4, 0x39, 0x4b, 0xe2,
+ 0x4a, 0x1c, 0x00, 0x00,
}
func (x Direction) String() string {
@@ -2758,6 +2864,13 @@ func (this *QueryRequest) Equal(that interface{}) bool {
return false
}
}
+ if that1.Plan == nil {
+ if this.Plan != nil {
+ return false
+ }
+ } else if !this.Plan.Equal(*that1.Plan) {
+ return false
+ }
return true
}
func (this *SampleQueryRequest) Equal(that interface{}) bool {
@@ -2804,6 +2917,37 @@ func (this *SampleQueryRequest) Equal(that interface{}) bool {
return false
}
}
+ if that1.Plan == nil {
+ if this.Plan != nil {
+ return false
+ }
+ } else if !this.Plan.Equal(*that1.Plan) {
+ return false
+ }
+ return true
+}
+func (this *Plan) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*Plan)
+ if !ok {
+ that2, ok := that.(Plan)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if !bytes.Equal(this.Raw, that1.Raw) {
+ return false
+ }
return true
}
func (this *Delete) Equal(that interface{}) bool {
@@ -3096,6 +3240,13 @@ func (this *TailRequest) Equal(that interface{}) bool {
if !this.Start.Equal(that1.Start) {
return false
}
+ if that1.Plan == nil {
+ if this.Plan != nil {
+ return false
+ }
+ } else if !this.Plan.Equal(*that1.Plan) {
+ return false
+ }
return true
}
func (this *TailResponse) Equal(that interface{}) bool {
@@ -3229,10 +3380,37 @@ func (this *SeriesIdentifier) Equal(that interface{}) bool {
return false
}
for i := range this.Labels {
- if this.Labels[i] != that1.Labels[i] {
+ if !this.Labels[i].Equal(&that1.Labels[i]) {
+ return false
+ }
+ }
+ return true
+}
+func (this *SeriesIdentifier_LabelsEntry) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*SeriesIdentifier_LabelsEntry)
+ if !ok {
+ that2, ok := that.(SeriesIdentifier_LabelsEntry)
+ if ok {
+ that1 = &that2
+ } else {
return false
}
}
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if this.Key != that1.Key {
+ return false
+ }
+ if this.Value != that1.Value {
+ return false
+ }
return true
}
func (this *DroppedStream) Equal(that interface{}) bool {
@@ -4063,7 +4241,7 @@ func (this *QueryRequest) GoString() string {
if this == nil {
return "nil"
}
- s := make([]string, 0, 11)
+ s := make([]string, 0, 12)
s = append(s, "&logproto.QueryRequest{")
s = append(s, "Selector: "+fmt.Sprintf("%#v", this.Selector)+",\n")
s = append(s, "Limit: "+fmt.Sprintf("%#v", this.Limit)+",\n")
@@ -4074,6 +4252,7 @@ func (this *QueryRequest) GoString() string {
if this.Deletes != nil {
s = append(s, "Deletes: "+fmt.Sprintf("%#v", this.Deletes)+",\n")
}
+ s = append(s, "Plan: "+fmt.Sprintf("%#v", this.Plan)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
@@ -4081,7 +4260,7 @@ func (this *SampleQueryRequest) GoString() string {
if this == nil {
return "nil"
}
- s := make([]string, 0, 9)
+ s := make([]string, 0, 10)
s = append(s, "&logproto.SampleQueryRequest{")
s = append(s, "Selector: "+fmt.Sprintf("%#v", this.Selector)+",\n")
s = append(s, "Start: "+fmt.Sprintf("%#v", this.Start)+",\n")
@@ -4090,6 +4269,17 @@ func (this *SampleQueryRequest) GoString() string {
if this.Deletes != nil {
s = append(s, "Deletes: "+fmt.Sprintf("%#v", this.Deletes)+",\n")
}
+ s = append(s, "Plan: "+fmt.Sprintf("%#v", this.Plan)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *Plan) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&logproto.Plan{")
+ s = append(s, "Raw: "+fmt.Sprintf("%#v", this.Raw)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
@@ -4196,12 +4386,13 @@ func (this *TailRequest) GoString() string {
if this == nil {
return "nil"
}
- s := make([]string, 0, 8)
+ s := make([]string, 0, 9)
s = append(s, "&logproto.TailRequest{")
s = append(s, "Query: "+fmt.Sprintf("%#v", this.Query)+",\n")
s = append(s, "DelayFor: "+fmt.Sprintf("%#v", this.DelayFor)+",\n")
s = append(s, "Limit: "+fmt.Sprintf("%#v", this.Limit)+",\n")
s = append(s, "Start: "+fmt.Sprintf("%#v", this.Start)+",\n")
+ s = append(s, "Plan: "+fmt.Sprintf("%#v", this.Plan)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
@@ -4253,19 +4444,24 @@ func (this *SeriesIdentifier) GoString() string {
}
s := make([]string, 0, 5)
s = append(s, "&logproto.SeriesIdentifier{")
- keysForLabels := make([]string, 0, len(this.Labels))
- for k, _ := range this.Labels {
- keysForLabels = append(keysForLabels, k)
- }
- github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
- mapStringForLabels := "map[string]string{"
- for _, k := range keysForLabels {
- mapStringForLabels += fmt.Sprintf("%#v: %#v,", k, this.Labels[k])
- }
- mapStringForLabels += "}"
if this.Labels != nil {
- s = append(s, "Labels: "+mapStringForLabels+",\n")
+ vs := make([]*SeriesIdentifier_LabelsEntry, len(this.Labels))
+ for i := range vs {
+ vs[i] = &this.Labels[i]
+ }
+ s = append(s, "Labels: "+fmt.Sprintf("%#v", vs)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *SeriesIdentifier_LabelsEntry) GoString() string {
+ if this == nil {
+ return "nil"
}
+ s := make([]string, 0, 6)
+ s = append(s, "&logproto.SeriesIdentifier_LabelsEntry{")
+ s = append(s, "Key: "+fmt.Sprintf("%#v", this.Key)+",\n")
+ s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
@@ -5258,6 +5454,18 @@ func (m *QueryRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if m.Plan != nil {
+ {
+ size := m.Plan.Size()
+ i -= size
+ if _, err := m.Plan.MarshalTo(dAtA[i:]); err != nil {
+ return 0, err
+ }
+ i = encodeVarintLogproto(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x4a
+ }
if len(m.Deletes) > 0 {
for iNdEx := len(m.Deletes) - 1; iNdEx >= 0; iNdEx-- {
{
@@ -5286,21 +5494,21 @@ func (m *QueryRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i--
dAtA[i] = 0x28
}
- n1, err1 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.End):])
- if err1 != nil {
- return 0, err1
- }
- i -= n1
- i = encodeVarintLogproto(dAtA, i, uint64(n1))
- i--
- dAtA[i] = 0x22
- n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Start):])
+ n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.End):])
if err2 != nil {
return 0, err2
}
i -= n2
i = encodeVarintLogproto(dAtA, i, uint64(n2))
i--
+ dAtA[i] = 0x22
+ n3, err3 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Start):])
+ if err3 != nil {
+ return 0, err3
+ }
+ i -= n3
+ i = encodeVarintLogproto(dAtA, i, uint64(n3))
+ i--
dAtA[i] = 0x1a
if m.Limit != 0 {
i = encodeVarintLogproto(dAtA, i, uint64(m.Limit))
@@ -5337,6 +5545,18 @@ func (m *SampleQueryRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if m.Plan != nil {
+ {
+ size := m.Plan.Size()
+ i -= size
+ if _, err := m.Plan.MarshalTo(dAtA[i:]); err != nil {
+ return 0, err
+ }
+ i = encodeVarintLogproto(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
if len(m.Deletes) > 0 {
for iNdEx := len(m.Deletes) - 1; iNdEx >= 0; iNdEx-- {
{
@@ -5360,20 +5580,20 @@ func (m *SampleQueryRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
dAtA[i] = 0x22
}
}
- n3, err3 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.End):])
- if err3 != nil {
- return 0, err3
+ n5, err5 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.End):])
+ if err5 != nil {
+ return 0, err5
}
- i -= n3
- i = encodeVarintLogproto(dAtA, i, uint64(n3))
+ i -= n5
+ i = encodeVarintLogproto(dAtA, i, uint64(n5))
i--
dAtA[i] = 0x1a
- n4, err4 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Start):])
- if err4 != nil {
- return 0, err4
+ n6, err6 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Start):])
+ if err6 != nil {
+ return 0, err6
}
- i -= n4
- i = encodeVarintLogproto(dAtA, i, uint64(n4))
+ i -= n6
+ i = encodeVarintLogproto(dAtA, i, uint64(n6))
i--
dAtA[i] = 0x12
if len(m.Selector) > 0 {
@@ -5386,7 +5606,7 @@ func (m *SampleQueryRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
-func (m *Delete) Marshal() (dAtA []byte, err error) {
+func (m *Plan) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -5396,23 +5616,53 @@ func (m *Delete) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *Delete) MarshalTo(dAtA []byte) (int, error) {
+func (m *Plan) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *Delete) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *Plan) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
- if m.End != 0 {
- i = encodeVarintLogproto(dAtA, i, uint64(m.End))
+ if len(m.Raw) > 0 {
+ i -= len(m.Raw)
+ copy(dAtA[i:], m.Raw)
+ i = encodeVarintLogproto(dAtA, i, uint64(len(m.Raw)))
i--
- dAtA[i] = 0x18
+ dAtA[i] = 0xa
}
- if m.Start != 0 {
- i = encodeVarintLogproto(dAtA, i, uint64(m.Start))
+ return len(dAtA) - i, nil
+}
+
+func (m *Delete) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Delete) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Delete) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.End != 0 {
+ i = encodeVarintLogproto(dAtA, i, uint64(m.End))
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.Start != 0 {
+ i = encodeVarintLogproto(dAtA, i, uint64(m.Start))
i--
dAtA[i] = 0x10
}
@@ -5548,22 +5798,22 @@ func (m *LabelRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
dAtA[i] = 0x2a
}
if m.End != nil {
- n7, err7 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.End, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.End):])
- if err7 != nil {
- return 0, err7
+ n9, err9 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.End, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.End):])
+ if err9 != nil {
+ return 0, err9
}
- i -= n7
- i = encodeVarintLogproto(dAtA, i, uint64(n7))
+ i -= n9
+ i = encodeVarintLogproto(dAtA, i, uint64(n9))
i--
dAtA[i] = 0x22
}
if m.Start != nil {
- n8, err8 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Start):])
- if err8 != nil {
- return 0, err8
+ n10, err10 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Start):])
+ if err10 != nil {
+ return 0, err10
}
- i -= n8
- i = encodeVarintLogproto(dAtA, i, uint64(n8))
+ i -= n10
+ i = encodeVarintLogproto(dAtA, i, uint64(n10))
i--
dAtA[i] = 0x1a
}
@@ -5761,12 +6011,24 @@ func (m *TailRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
- n9, err9 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Start):])
- if err9 != nil {
- return 0, err9
+ if m.Plan != nil {
+ {
+ size := m.Plan.Size()
+ i -= size
+ if _, err := m.Plan.MarshalTo(dAtA[i:]); err != nil {
+ return 0, err
+ }
+ i = encodeVarintLogproto(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
}
- i -= n9
- i = encodeVarintLogproto(dAtA, i, uint64(n9))
+ n12, err12 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Start):])
+ if err12 != nil {
+ return 0, err12
+ }
+ i -= n12
+ i = encodeVarintLogproto(dAtA, i, uint64(n12))
i--
dAtA[i] = 0x2a
if m.Limit != 0 {
@@ -5876,20 +6138,20 @@ func (m *SeriesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
dAtA[i] = 0x1a
}
}
- n11, err11 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.End):])
- if err11 != nil {
- return 0, err11
+ n14, err14 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.End):])
+ if err14 != nil {
+ return 0, err14
}
- i -= n11
- i = encodeVarintLogproto(dAtA, i, uint64(n11))
+ i -= n14
+ i = encodeVarintLogproto(dAtA, i, uint64(n14))
i--
dAtA[i] = 0x12
- n12, err12 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Start):])
- if err12 != nil {
- return 0, err12
+ n15, err15 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Start):])
+ if err15 != nil {
+ return 0, err15
}
- i -= n12
- i = encodeVarintLogproto(dAtA, i, uint64(n12))
+ i -= n15
+ i = encodeVarintLogproto(dAtA, i, uint64(n15))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
@@ -5953,20 +6215,15 @@ func (m *SeriesIdentifier) MarshalToSizedBuffer(dAtA []byte) (int, error) {
var l int
_ = l
if len(m.Labels) > 0 {
- for k := range m.Labels {
- v := m.Labels[k]
- baseI := i
- i -= len(v)
- copy(dAtA[i:], v)
- i = encodeVarintLogproto(dAtA, i, uint64(len(v)))
- i--
- dAtA[i] = 0x12
- i -= len(k)
- copy(dAtA[i:], k)
- i = encodeVarintLogproto(dAtA, i, uint64(len(k)))
- i--
- dAtA[i] = 0xa
- i = encodeVarintLogproto(dAtA, i, uint64(baseI-i))
+ for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintLogproto(dAtA, i, uint64(size))
+ }
i--
dAtA[i] = 0xa
}
@@ -5974,6 +6231,43 @@ func (m *SeriesIdentifier) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
+func (m *SeriesIdentifier_LabelsEntry) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SeriesIdentifier_LabelsEntry) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SeriesIdentifier_LabelsEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Value) > 0 {
+ i -= len(m.Value)
+ copy(dAtA[i:], m.Value)
+ i = encodeVarintLogproto(dAtA, i, uint64(len(m.Value)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Key) > 0 {
+ i -= len(m.Key)
+ copy(dAtA[i:], m.Key)
+ i = encodeVarintLogproto(dAtA, i, uint64(len(m.Key)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
func (m *DroppedStream) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -6001,20 +6295,20 @@ func (m *DroppedStream) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i--
dAtA[i] = 0x1a
}
- n13, err13 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.To, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.To):])
- if err13 != nil {
- return 0, err13
+ n16, err16 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.To, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.To):])
+ if err16 != nil {
+ return 0, err16
}
- i -= n13
- i = encodeVarintLogproto(dAtA, i, uint64(n13))
+ i -= n16
+ i = encodeVarintLogproto(dAtA, i, uint64(n16))
i--
dAtA[i] = 0x12
- n14, err14 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.From, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.From):])
- if err14 != nil {
- return 0, err14
+ n17, err17 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.From, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.From):])
+ if err17 != nil {
+ return 0, err17
}
- i -= n14
- i = encodeVarintLogproto(dAtA, i, uint64(n14))
+ i -= n17
+ i = encodeVarintLogproto(dAtA, i, uint64(n17))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
@@ -6195,20 +6489,20 @@ func (m *GetChunkIDsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
- n15, err15 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.End):])
- if err15 != nil {
- return 0, err15
+ n18, err18 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.End):])
+ if err18 != nil {
+ return 0, err18
}
- i -= n15
- i = encodeVarintLogproto(dAtA, i, uint64(n15))
+ i -= n18
+ i = encodeVarintLogproto(dAtA, i, uint64(n18))
i--
dAtA[i] = 0x1a
- n16, err16 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Start):])
- if err16 != nil {
- return 0, err16
+ n19, err19 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Start):])
+ if err19 != nil {
+ return 0, err19
}
- i -= n16
- i = encodeVarintLogproto(dAtA, i, uint64(n16))
+ i -= n19
+ i = encodeVarintLogproto(dAtA, i, uint64(n19))
i--
dAtA[i] = 0x12
if len(m.Matchers) > 0 {
@@ -7131,6 +7425,10 @@ func (m *QueryRequest) Size() (n int) {
n += 1 + l + sovLogproto(uint64(l))
}
}
+ if m.Plan != nil {
+ l = m.Plan.Size()
+ n += 1 + l + sovLogproto(uint64(l))
+ }
return n
}
@@ -7160,6 +7458,23 @@ func (m *SampleQueryRequest) Size() (n int) {
n += 1 + l + sovLogproto(uint64(l))
}
}
+ if m.Plan != nil {
+ l = m.Plan.Size()
+ n += 1 + l + sovLogproto(uint64(l))
+ }
+ return n
+}
+
+func (m *Plan) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Raw)
+ if l > 0 {
+ n += 1 + l + sovLogproto(uint64(l))
+ }
return n
}
@@ -7332,6 +7647,10 @@ func (m *TailRequest) Size() (n int) {
}
l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Start)
n += 1 + l + sovLogproto(uint64(l))
+ if m.Plan != nil {
+ l = m.Plan.Size()
+ n += 1 + l + sovLogproto(uint64(l))
+ }
return n
}
@@ -7401,16 +7720,31 @@ func (m *SeriesIdentifier) Size() (n int) {
var l int
_ = l
if len(m.Labels) > 0 {
- for k, v := range m.Labels {
- _ = k
- _ = v
- mapEntrySize := 1 + len(k) + sovLogproto(uint64(len(k))) + 1 + len(v) + sovLogproto(uint64(len(v)))
- n += mapEntrySize + 1 + sovLogproto(uint64(mapEntrySize))
+ for _, e := range m.Labels {
+ l = e.Size()
+ n += 1 + l + sovLogproto(uint64(l))
}
}
return n
}
+func (m *SeriesIdentifier_LabelsEntry) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Key)
+ if l > 0 {
+ n += 1 + l + sovLogproto(uint64(l))
+ }
+ l = len(m.Value)
+ if l > 0 {
+ n += 1 + l + sovLogproto(uint64(l))
+ }
+ return n
+}
+
func (m *DroppedStream) Size() (n int) {
if m == nil {
return 0
@@ -7954,6 +8288,7 @@ func (this *QueryRequest) String() string {
`Direction:` + fmt.Sprintf("%v", this.Direction) + `,`,
`Shards:` + fmt.Sprintf("%v", this.Shards) + `,`,
`Deletes:` + repeatedStringForDeletes + `,`,
+ `Plan:` + fmt.Sprintf("%v", this.Plan) + `,`,
`}`,
}, "")
return s
@@ -7973,6 +8308,17 @@ func (this *SampleQueryRequest) String() string {
`End:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.End), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
`Shards:` + fmt.Sprintf("%v", this.Shards) + `,`,
`Deletes:` + repeatedStringForDeletes + `,`,
+ `Plan:` + fmt.Sprintf("%v", this.Plan) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Plan) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Plan{`,
+ `Raw:` + fmt.Sprintf("%v", this.Raw) + `,`,
`}`,
}, "")
return s
@@ -8084,6 +8430,7 @@ func (this *TailRequest) String() string {
`DelayFor:` + fmt.Sprintf("%v", this.DelayFor) + `,`,
`Limit:` + fmt.Sprintf("%v", this.Limit) + `,`,
`Start:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Start), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
+ `Plan:` + fmt.Sprintf("%v", this.Plan) + `,`,
`}`,
}, "")
return s
@@ -8136,18 +8483,24 @@ func (this *SeriesIdentifier) String() string {
if this == nil {
return "nil"
}
- keysForLabels := make([]string, 0, len(this.Labels))
- for k, _ := range this.Labels {
- keysForLabels = append(keysForLabels, k)
+ repeatedStringForLabels := "[]SeriesIdentifier_LabelsEntry{"
+ for _, f := range this.Labels {
+ repeatedStringForLabels += fmt.Sprintf("%v", f) + ","
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
- mapStringForLabels := "map[string]string{"
- for _, k := range keysForLabels {
- mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
- }
- mapStringForLabels += "}"
+ repeatedStringForLabels += "}"
s := strings.Join([]string{`&SeriesIdentifier{`,
- `Labels:` + mapStringForLabels + `,`,
+ `Labels:` + repeatedStringForLabels + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SeriesIdentifier_LabelsEntry) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SeriesIdentifier_LabelsEntry{`,
+ `Key:` + fmt.Sprintf("%v", this.Key) + `,`,
+ `Value:` + fmt.Sprintf("%v", this.Value) + `,`,
`}`,
}, "")
return s
@@ -9022,6 +9375,42 @@ func (m *QueryRequest) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Plan", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Plan == nil {
+ m.Plan = &github_com_grafana_loki_pkg_querier_plan.QueryPlan{}
+ }
+ if err := m.Plan.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipLogproto(dAtA[iNdEx:])
@@ -9239,6 +9628,129 @@ func (m *SampleQueryRequest) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Plan", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Plan == nil {
+ m.Plan = &github_com_grafana_loki_pkg_querier_plan.QueryPlan{}
+ }
+ if err := m.Plan.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipLogproto(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Plan) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Plan: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Plan: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Raw = append(m.Raw[:0], dAtA[iNdEx:postIndex]...)
+ if m.Raw == nil {
+ m.Raw = []byte{}
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipLogproto(dAtA[iNdEx:])
@@ -10375,6 +10887,42 @@ func (m *TailRequest) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Plan", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Plan == nil {
+ m.Plan = &github_com_grafana_loki_pkg_querier_plan.QueryPlan{}
+ }
+ if err := m.Plan.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipLogproto(dAtA[iNdEx:])
@@ -10850,103 +11398,127 @@ func (m *SeriesIdentifier) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.Labels == nil {
- m.Labels = make(map[string]string)
+ m.Labels = append(m.Labels, SeriesIdentifier_LabelsEntry{})
+ if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
}
- var mapkey string
- var mapvalue string
- for iNdEx < postIndex {
- entryPreIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogproto
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipLogproto(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SeriesIdentifier_LabelsEntry) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LabelsEntry: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LabelsEntry: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
}
- fieldNum := int32(wire >> 3)
- if fieldNum == 1 {
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogproto
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthLogproto
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey < 0 {
- return ErrInvalidLengthLogproto
- }
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
- } else if fieldNum == 2 {
- var stringLenmapvalue uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogproto
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapvalue |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapvalue := int(stringLenmapvalue)
- if intStringLenmapvalue < 0 {
- return ErrInvalidLengthLogproto
- }
- postStringIndexmapvalue := iNdEx + intStringLenmapvalue
- if postStringIndexmapvalue < 0 {
- return ErrInvalidLengthLogproto
- }
- if postStringIndexmapvalue > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
- iNdEx = postStringIndexmapvalue
- } else {
- iNdEx = entryPreIndex
- skippy, err := skipLogproto(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthLogproto
- }
- if (iNdEx + skippy) > postIndex {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
}
}
- m.Labels[mapkey] = mapvalue
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Value = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
diff --git a/pkg/logproto/logproto.proto b/pkg/logproto/logproto.proto
index 35824a78e202e..b2beb25d050ff 100644
--- a/pkg/logproto/logproto.proto
+++ b/pkg/logproto/logproto.proto
@@ -51,7 +51,7 @@ message StreamRate {
}
message QueryRequest {
- string selector = 1;
+ string selector = 1 [deprecated = true];
uint32 limit = 2;
google.protobuf.Timestamp start = 3 [
(gogoproto.stdtime) = true,
@@ -65,10 +65,11 @@ message QueryRequest {
reserved 6;
repeated string shards = 7 [(gogoproto.jsontag) = "shards,omitempty"];
repeated Delete deletes = 8;
+ Plan plan = 9 [(gogoproto.customtype) = "github.com/grafana/loki/pkg/querier/plan.QueryPlan"];
}
message SampleQueryRequest {
- string selector = 1;
+ string selector = 1 [deprecated = true]; // mark as reserved once we've fully migrated to plan.
google.protobuf.Timestamp start = 2 [
(gogoproto.stdtime) = true,
(gogoproto.nullable) = false
@@ -79,6 +80,11 @@ message SampleQueryRequest {
];
repeated string shards = 4 [(gogoproto.jsontag) = "shards,omitempty"];
repeated Delete deletes = 5;
+ Plan plan = 6 [(gogoproto.customtype) = "github.com/grafana/loki/pkg/querier/plan.QueryPlan"];
+}
+
+message Plan {
+ bytes raw = 1;
}
message Delete {
@@ -148,7 +154,7 @@ message Series {
}
message TailRequest {
- string query = 1;
+ string query = 1 [deprecated = true];
reserved 2;
uint32 delayFor = 3;
uint32 limit = 4;
@@ -156,6 +162,7 @@ message TailRequest {
(gogoproto.stdtime) = true,
(gogoproto.nullable) = false
];
+ Plan plan = 6 [(gogoproto.customtype) = "github.com/grafana/loki/pkg/querier/plan.QueryPlan"];
}
message TailResponse {
@@ -181,7 +188,11 @@ message SeriesResponse {
}
message SeriesIdentifier {
- map
labels = 1;
+ message LabelsEntry {
+ string key = 1;
+ string value = 2;
+ }
+ repeated LabelsEntry labels = 1 [(gogoproto.nullable) = false];
}
message DroppedStream {
diff --git a/pkg/logproto/sketch.pb.go b/pkg/logproto/sketch.pb.go
index 4a56552d984e8..c555d64d55970 100644
--- a/pkg/logproto/sketch.pb.go
+++ b/pkg/logproto/sketch.pb.go
@@ -7,7 +7,6 @@ import (
bytes "bytes"
encoding_binary "encoding/binary"
fmt "fmt"
- _ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
io "io"
math "math"
@@ -657,47 +656,46 @@ func init() {
func init() { proto.RegisterFile("pkg/logproto/sketch.proto", fileDescriptor_7f9fd40e59b87ff3) }
var fileDescriptor_7f9fd40e59b87ff3 = []byte{
- // 632 bytes of a gzipped FileDescriptorProto
+ // 623 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0x41, 0x4f, 0xd4, 0x4e,
- 0x14, 0xef, 0xfc, 0x77, 0xff, 0xcb, 0xf2, 0x16, 0x88, 0x8e, 0xc4, 0xac, 0xc5, 0x4c, 0xd6, 0xc6,
+ 0x14, 0xef, 0xfc, 0x77, 0xff, 0xcb, 0xf2, 0x16, 0x88, 0x8e, 0xc4, 0xd4, 0xc5, 0x4c, 0xd6, 0xc6,
0x28, 0xd1, 0xb8, 0x9b, 0x40, 0x42, 0x38, 0x83, 0x07, 0x12, 0x45, 0x71, 0x20, 0xc6, 0x70, 0x31,
- 0xa5, 0x1d, 0xba, 0x93, 0x6d, 0x3b, 0x4d, 0x67, 0x16, 0xf0, 0xe6, 0x27, 0x30, 0xc6, 0x4f, 0xe1,
- 0xd5, 0x8f, 0xe0, 0xcd, 0x23, 0x47, 0x8e, 0x52, 0x2e, 0x1e, 0xf9, 0x08, 0x66, 0x66, 0xda, 0x85,
- 0x2e, 0x31, 0x7a, 0xda, 0xf7, 0x7e, 0xef, 0xf7, 0x7e, 0xf3, 0x9b, 0x79, 0x7d, 0x0b, 0xf7, 0xb2,
- 0x51, 0x34, 0x88, 0x45, 0x94, 0xe5, 0x42, 0x89, 0x81, 0x1c, 0x31, 0x15, 0x0c, 0xfb, 0x26, 0xc1,
- 0xed, 0x0a, 0x76, 0x17, 0x23, 0x11, 0x09, 0xcb, 0xd0, 0x91, 0xad, 0xbb, 0x4b, 0xb5, 0xd6, 0x2a,
- 0xb0, 0x45, 0xef, 0x15, 0x2c, 0xbe, 0x19, 0xfb, 0xa9, 0xe2, 0x31, 0xdb, 0x35, 0xa2, 0xdb, 0xbe,
- 0xca, 0xf9, 0x09, 0x5e, 0x83, 0xd6, 0x91, 0x1f, 0x8f, 0x99, 0xec, 0xa2, 0x5e, 0x63, 0xb9, 0xb3,
- 0x42, 0xfa, 0x93, 0xc6, 0x3a, 0xff, 0x2d, 0x0b, 0x94, 0xc8, 0x69, 0xc9, 0xf6, 0x76, 0xa6, 0xf5,
- 0x6c, 0x1d, 0xaf, 0xc3, 0x8c, 0xf4, 0x93, 0x2c, 0xfe, 0xbb, 0xe0, 0xae, 0xa1, 0xd1, 0x8a, 0xee,
- 0x7d, 0x42, 0xd3, 0x92, 0x96, 0x81, 0x1f, 0x01, 0x3a, 0xec, 0xa2, 0x1e, 0x5a, 0xee, 0xac, 0x74,
- 0xff, 0x24, 0x46, 0xd1, 0x21, 0x7e, 0x00, 0x73, 0x8a, 0x27, 0x4c, 0x2a, 0x3f, 0xc9, 0xde, 0x27,
- 0xb2, 0xfb, 0x5f, 0x0f, 0x2d, 0x37, 0x68, 0x67, 0x82, 0x6d, 0x4b, 0xfc, 0x14, 0x5a, 0x09, 0x53,
- 0x39, 0x0f, 0xba, 0x0d, 0x63, 0xee, 0xce, 0x95, 0xde, 0x4b, 0xff, 0x80, 0xc5, 0x3b, 0x3e, 0xcf,
- 0x69, 0x49, 0xf1, 0x22, 0x58, 0xa8, 0x1f, 0x82, 0x9f, 0xc1, 0x8c, 0x0a, 0x79, 0xc4, 0xa4, 0x2a,
- 0xfd, 0xdc, 0xbe, 0xea, 0xdf, 0x7b, 0x6e, 0x0a, 0x5b, 0x0e, 0xad, 0x38, 0xf8, 0x3e, 0xb4, 0xc3,
- 0xd0, 0x8e, 0xd0, 0x98, 0x99, 0xdb, 0x72, 0xe8, 0x04, 0xd9, 0x68, 0x43, 0xcb, 0x46, 0xde, 0x77,
- 0x04, 0x33, 0x65, 0x3b, 0xbe, 0x05, 0x8d, 0x84, 0xa7, 0x46, 0x1e, 0x51, 0x1d, 0x1a, 0xc4, 0x3f,
- 0x31, 0x02, 0x1a, 0xf1, 0x4f, 0x70, 0x0f, 0x3a, 0x81, 0x48, 0xb2, 0x9c, 0x49, 0xc9, 0x45, 0xda,
- 0x6d, 0x98, 0xca, 0x75, 0x08, 0xaf, 0xc3, 0x6c, 0x96, 0x8b, 0x80, 0x49, 0xc9, 0xc2, 0x6e, 0xd3,
- 0x5c, 0xd5, 0xbd, 0x61, 0xb5, 0xbf, 0xc9, 0x52, 0x95, 0x0b, 0x1e, 0xd2, 0x2b, 0xb2, 0xbb, 0x06,
- 0xed, 0x0a, 0xc6, 0x18, 0x9a, 0x09, 0xf3, 0x2b, 0x33, 0x26, 0xc6, 0x77, 0xa1, 0x75, 0xcc, 0x78,
- 0x34, 0x54, 0xa5, 0xa1, 0x32, 0xf3, 0xde, 0xc1, 0xc2, 0xa6, 0x18, 0xa7, 0x6a, 0x9b, 0xa7, 0xe5,
- 0x63, 0x2d, 0xc2, 0xff, 0x21, 0xcb, 0xd4, 0xd0, 0xb4, 0xcf, 0x53, 0x9b, 0x68, 0xf4, 0x98, 0x87,
- 0xca, 0x3e, 0xc8, 0x3c, 0xb5, 0x09, 0x76, 0xa1, 0x1d, 0xe8, 0x6e, 0x96, 0x4b, 0x33, 0x99, 0x79,
- 0x3a, 0xc9, 0xbd, 0x6f, 0x08, 0x9a, 0x7b, 0x22, 0x7b, 0x81, 0x9f, 0x40, 0x23, 0x48, 0xe4, 0xcd,
- 0x2f, 0xa1, 0x7e, 0x2e, 0xd5, 0x24, 0xfc, 0x18, 0x9a, 0x31, 0x97, 0xda, 0xe4, 0xd4, 0x98, 0xb5,
- 0x52, 0xdf, 0x8c, 0xd9, 0x10, 0xf4, 0x5b, 0x0e, 0x3f, 0x64, 0x2c, 0x8f, 0x45, 0x14, 0x8b, 0xc8,
- 0xbc, 0xe5, 0x1c, 0xbd, 0x0e, 0xb9, 0x2b, 0xd0, 0xd4, 0x7c, 0xed, 0x9c, 0x1d, 0xb1, 0xd4, 0x8e,
- 0x7e, 0x96, 0xda, 0x44, 0xa3, 0xc6, 0x69, 0x75, 0x1f, 0x93, 0x78, 0x5f, 0x10, 0x80, 0x3e, 0xa9,
- 0x5c, 0xb2, 0xd5, 0xa9, 0x25, 0x5b, 0xaa, 0xfb, 0xb1, 0xac, 0x7e, 0x7d, 0xc3, 0xdc, 0xd7, 0xd0,
- 0x2a, 0x77, 0xca, 0x83, 0xa6, 0x12, 0xd9, 0xa8, 0xbc, 0xf9, 0x42, 0xbd, 0x99, 0x9a, 0xda, 0x3f,
- 0x7c, 0xfc, 0x1b, 0xfb, 0xa7, 0xe7, 0xc4, 0x39, 0x3b, 0x27, 0xce, 0xe5, 0x39, 0x41, 0x1f, 0x0b,
- 0x82, 0xbe, 0x16, 0x04, 0xfd, 0x28, 0x08, 0x3a, 0x2d, 0x08, 0xfa, 0x59, 0x10, 0xf4, 0xab, 0x20,
- 0xce, 0x65, 0x41, 0xd0, 0xe7, 0x0b, 0xe2, 0x9c, 0x5e, 0x10, 0xe7, 0xec, 0x82, 0x38, 0xfb, 0x0f,
- 0x23, 0xae, 0x86, 0xe3, 0x83, 0x7e, 0x20, 0x92, 0x41, 0x94, 0xfb, 0x87, 0x7e, 0xea, 0x0f, 0x62,
- 0x31, 0xe2, 0x83, 0xeb, 0xff, 0x36, 0x07, 0x2d, 0xf3, 0xb3, 0xfa, 0x3b, 0x00, 0x00, 0xff, 0xff,
- 0xa9, 0x7c, 0xb5, 0x30, 0xbf, 0x04, 0x00, 0x00,
+ 0x43, 0x3b, 0x74, 0x27, 0xdb, 0x76, 0x9a, 0xce, 0x2c, 0xe0, 0xcd, 0x4f, 0x60, 0x8c, 0x9f, 0xc2,
+ 0xab, 0x1f, 0xc1, 0x9b, 0x47, 0x8e, 0x1c, 0xa5, 0x5c, 0x3c, 0xf2, 0x11, 0xcc, 0x4c, 0xdb, 0x85,
+ 0x2e, 0x31, 0x7a, 0xda, 0x79, 0xbf, 0xf7, 0x7b, 0xbf, 0xf9, 0xcd, 0x7b, 0x7d, 0x0b, 0xf7, 0xd2,
+ 0x51, 0x38, 0x88, 0x64, 0x98, 0x66, 0x52, 0xcb, 0x81, 0x1a, 0x71, 0xed, 0x0f, 0xfb, 0x36, 0xc0,
+ 0xed, 0x0a, 0xee, 0x2e, 0xd5, 0x48, 0xd5, 0xa1, 0xa0, 0x79, 0xaf, 0x60, 0xf1, 0xcd, 0x98, 0x25,
+ 0x5a, 0x44, 0x7c, 0xd7, 0x96, 0x6f, 0x33, 0x9d, 0x89, 0x13, 0xbc, 0x06, 0xad, 0x23, 0x16, 0x8d,
+ 0xb9, 0x72, 0x51, 0xaf, 0xb1, 0xdc, 0x59, 0x21, 0xfd, 0x49, 0x61, 0x9d, 0xff, 0x96, 0xfb, 0x5a,
+ 0x66, 0xb4, 0x64, 0x7b, 0x3b, 0xd3, 0x7a, 0x45, 0x1e, 0xaf, 0xc3, 0x8c, 0x62, 0x71, 0x1a, 0xfd,
+ 0x5d, 0x70, 0xd7, 0xd2, 0x68, 0x45, 0xf7, 0x3e, 0xa1, 0x69, 0xc9, 0x82, 0x81, 0x1f, 0x01, 0x3a,
+ 0x74, 0x51, 0x0f, 0x2d, 0x77, 0x56, 0xdc, 0x3f, 0x89, 0x51, 0x74, 0x88, 0x1f, 0xc0, 0x9c, 0x16,
+ 0x31, 0x57, 0x9a, 0xc5, 0xe9, 0xfb, 0x58, 0xb9, 0xff, 0xf5, 0xd0, 0x72, 0x83, 0x76, 0x26, 0xd8,
+ 0xb6, 0xc2, 0x4f, 0xa1, 0x15, 0x73, 0x9d, 0x09, 0xdf, 0x6d, 0x58, 0x73, 0x77, 0xae, 0xf4, 0x5e,
+ 0xb2, 0x03, 0x1e, 0xed, 0x30, 0x91, 0xd1, 0x92, 0xe2, 0x85, 0xb0, 0x50, 0xbf, 0x04, 0x3f, 0x83,
+ 0x19, 0x1d, 0x88, 0x90, 0x2b, 0x5d, 0xfa, 0xb9, 0x7d, 0x55, 0xbf, 0xf7, 0xdc, 0x26, 0xb6, 0x1c,
+ 0x5a, 0x71, 0xf0, 0x7d, 0x68, 0x07, 0x41, 0x31, 0x2c, 0x6b, 0x66, 0x6e, 0xcb, 0xa1, 0x13, 0x64,
+ 0xa3, 0x0d, 0xad, 0xe2, 0xe4, 0x7d, 0x47, 0x30, 0x53, 0x96, 0xe3, 0x5b, 0xd0, 0x88, 0x45, 0x62,
+ 0xe5, 0x11, 0x35, 0x47, 0x8b, 0xb0, 0x13, 0x2b, 0x60, 0x10, 0x76, 0x82, 0x7b, 0xd0, 0xf1, 0x65,
+ 0x9c, 0x66, 0x5c, 0x29, 0x21, 0x13, 0xb7, 0x61, 0x33, 0xd7, 0x21, 0xbc, 0x0e, 0xb3, 0x69, 0x26,
+ 0x7d, 0xae, 0x14, 0x0f, 0xdc, 0xa6, 0x7d, 0x6a, 0xf7, 0x86, 0xd5, 0xfe, 0x26, 0x4f, 0x74, 0x26,
+ 0x45, 0x40, 0xaf, 0xc8, 0xdd, 0x35, 0x68, 0x57, 0x30, 0xc6, 0xd0, 0x8c, 0x39, 0xab, 0xcc, 0xd8,
+ 0x33, 0xbe, 0x0b, 0xad, 0x63, 0x2e, 0xc2, 0xa1, 0x2e, 0x0d, 0x95, 0x91, 0xf7, 0x0e, 0x16, 0x36,
+ 0xe5, 0x38, 0xd1, 0xdb, 0x22, 0x29, 0x9b, 0xb5, 0x08, 0xff, 0x07, 0x3c, 0xd5, 0x43, 0x5b, 0x3e,
+ 0x4f, 0x8b, 0xc0, 0xa0, 0xc7, 0x22, 0xd0, 0x45, 0x43, 0xe6, 0x69, 0x11, 0xe0, 0x2e, 0xb4, 0x7d,
+ 0x53, 0xcd, 0x33, 0x65, 0x27, 0x33, 0x4f, 0x27, 0xb1, 0xf7, 0x0d, 0x41, 0x73, 0x4f, 0xa6, 0x2f,
+ 0xf0, 0x13, 0x68, 0xf8, 0xb1, 0xba, 0xf9, 0x25, 0xd4, 0xef, 0xa5, 0x86, 0x84, 0x1f, 0x43, 0x33,
+ 0x12, 0xca, 0x98, 0x9c, 0x1a, 0xb3, 0x51, 0xea, 0xdb, 0x31, 0x5b, 0x82, 0xe9, 0xe5, 0xf0, 0x43,
+ 0xca, 0xb3, 0x48, 0x86, 0x91, 0x0c, 0x6d, 0x2f, 0xe7, 0xe8, 0x75, 0xa8, 0xbb, 0x02, 0x4d, 0xc3,
+ 0x37, 0xce, 0xf9, 0x11, 0x4f, 0x8a, 0xd1, 0xcf, 0xd2, 0x22, 0x30, 0xa8, 0x75, 0x5a, 0xbd, 0xc7,
+ 0x06, 0xde, 0x17, 0x04, 0x60, 0x6e, 0x2a, 0x97, 0x6c, 0x75, 0x6a, 0xc9, 0x96, 0xea, 0x7e, 0x0a,
+ 0x56, 0xbf, 0xbe, 0x61, 0xdd, 0xd7, 0xd0, 0x2a, 0x77, 0xca, 0x83, 0xa6, 0x96, 0xe9, 0xa8, 0x7c,
+ 0xf9, 0x42, 0xbd, 0x98, 0xda, 0xdc, 0x3f, 0x7c, 0xfc, 0x1b, 0xfb, 0xa7, 0xe7, 0xc4, 0x39, 0x3b,
+ 0x27, 0xce, 0xe5, 0x39, 0x41, 0x1f, 0x73, 0x82, 0xbe, 0xe6, 0x04, 0xfd, 0xc8, 0x09, 0x3a, 0xcd,
+ 0x09, 0xfa, 0x99, 0x13, 0xf4, 0x2b, 0x27, 0xce, 0x65, 0x4e, 0xd0, 0xe7, 0x0b, 0xe2, 0x9c, 0x5e,
+ 0x10, 0xe7, 0xec, 0x82, 0x38, 0xfb, 0x0f, 0x43, 0xa1, 0x87, 0xe3, 0x83, 0xbe, 0x2f, 0xe3, 0x41,
+ 0x98, 0xb1, 0x43, 0x96, 0xb0, 0x41, 0x24, 0x47, 0x62, 0x70, 0xfd, 0xdf, 0xe6, 0xa0, 0x65, 0x7f,
+ 0x56, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, 0x24, 0x9c, 0x74, 0xb7, 0xa9, 0x04, 0x00, 0x00,
}
func (this *QuantileSketchMatrix) Equal(that interface{}) bool {
diff --git a/pkg/logproto/sketch.proto b/pkg/logproto/sketch.proto
index e84deaf20d4c8..d8ffeb0110340 100644
--- a/pkg/logproto/sketch.proto
+++ b/pkg/logproto/sketch.proto
@@ -2,7 +2,6 @@ syntax = "proto3";
package logproto;
-import "gogoproto/gogo.proto";
import "pkg/logproto/logproto.proto";
option go_package = "github.com/grafana/loki/pkg/logproto";
diff --git a/pkg/logql/blocker.go b/pkg/logql/blocker.go
index d38a640456c30..9a07113c40dd3 100644
--- a/pkg/logql/blocker.go
+++ b/pkg/logql/blocker.go
@@ -8,6 +8,7 @@ import (
"github.com/go-kit/log/level"
"github.com/grafana/regexp"
+ "github.com/grafana/loki/pkg/util"
logutil "github.com/grafana/loki/pkg/util/log"
"github.com/grafana/loki/pkg/util/validation"
)
@@ -32,8 +33,8 @@ func (qb *queryBlocker) isBlocked(ctx context.Context, tenant string) bool {
return false
}
- query := qb.q.params.Query()
- typ, err := QueryType(query)
+ query := qb.q.params.QueryString()
+ typ, err := QueryType(qb.q.params.GetExpression())
if err != nil {
typ = "unknown"
}
@@ -43,7 +44,7 @@ func (qb *queryBlocker) isBlocked(ctx context.Context, tenant string) bool {
for _, b := range blocks {
if b.Hash > 0 {
- if b.Hash == HashedQuery(query) {
+ if b.Hash == util.HashedQuery(query) {
level.Warn(logger).Log("msg", "query blocker matched with hash policy", "hash", b.Hash, "query", query)
return qb.block(b, typ, logger)
}
diff --git a/pkg/logql/blocker_test.go b/pkg/logql/blocker_test.go
index 3dc3b72c81599..9fa586a02db80 100644
--- a/pkg/logql/blocker_test.go
+++ b/pkg/logql/blocker_test.go
@@ -12,6 +12,7 @@ import (
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logqlmodel"
+ "github.com/grafana/loki/pkg/util"
"github.com/grafana/loki/pkg/util/validation"
)
@@ -124,7 +125,7 @@ func TestEngine_ExecWithBlockedQueries(t *testing.T) {
"correct FNV32 hash matches",
defaultQuery, []*validation.BlockedQuery{
{
- Hash: HashedQuery(defaultQuery),
+ Hash: util.HashedQuery(defaultQuery),
},
}, logqlmodel.ErrBlocked,
},
@@ -132,7 +133,7 @@ func TestEngine_ExecWithBlockedQueries(t *testing.T) {
"incorrect FNV32 hash does not match",
defaultQuery, []*validation.BlockedQuery{
{
- Hash: HashedQuery(defaultQuery) + 1,
+ Hash: util.HashedQuery(defaultQuery) + 1,
},
}, nil,
},
@@ -144,15 +145,10 @@ func TestEngine_ExecWithBlockedQueries(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
limits.blockedQueries = test.blocked
- q := eng.Query(LiteralParams{
- qs: test.q,
- start: time.Unix(0, 0),
- end: time.Unix(100000, 0),
- step: 60 * time.Second,
- direction: logproto.FORWARD,
- limit: 1000,
- })
- _, err := q.Exec(user.InjectOrgID(context.Background(), "fake"))
+ params, err := NewLiteralParams(test.q, time.Unix(0, 0), time.Unix(100000, 0), 60*time.Second, 0, logproto.FORWARD, 1000, nil)
+ require.NoError(t, err)
+ q := eng.Query(params)
+ _, err = q.Exec(user.InjectOrgID(context.Background(), "fake"))
if test.expectedErr == nil {
require.NoError(t, err)
diff --git a/pkg/logql/downstream.go b/pkg/logql/downstream.go
index 3944b4fc492a7..27cb3e849fa2c 100644
--- a/pkg/logql/downstream.go
+++ b/pkg/logql/downstream.go
@@ -4,12 +4,14 @@ import (
"context"
"errors"
"fmt"
+ "strings"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/prometheus/promql"
"github.com/grafana/loki/pkg/iter"
+ "github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logql/syntax"
"github.com/grafana/loki/pkg/logqlmodel"
"github.com/grafana/loki/pkg/logqlmodel/metadata"
@@ -62,15 +64,12 @@ func NewDownstreamEngine(opts EngineOpts, downstreamable Downstreamable, limits
func (ng *DownstreamEngine) Opts() EngineOpts { return ng.opts }
// Query constructs a Query
-func (ng *DownstreamEngine) Query(ctx context.Context, p Params, mapped syntax.Expr) Query {
+func (ng *DownstreamEngine) Query(ctx context.Context, p Params) Query {
return &query{
logger: ng.logger,
params: p,
evaluator: NewDownstreamEvaluator(ng.downstreamable.Downstreamer(ctx)),
- parse: func(_ context.Context, _ string) (syntax.Expr, error) {
- return mapped, nil
- },
- limits: ng.limits,
+ limits: ng.limits,
}
}
@@ -157,6 +156,50 @@ func (c ConcatLogSelectorExpr) string(maxDepth int) string {
return fmt.Sprintf("%s ++ %s", c.DownstreamLogSelectorExpr.String(), c.next.string(maxDepth-1))
}
+// QuantileSketchEvalExpr evaluates a quantile sketch to the actual quantile.
+type QuantileSketchEvalExpr struct {
+ syntax.SampleExpr
+ quantileMergeExpr *QuantileSketchMergeExpr
+ quantile *float64
+}
+
+func (e QuantileSketchEvalExpr) String() string {
+ return fmt.Sprintf("quantileSketchEval<%s>", e.quantileMergeExpr.String())
+}
+
+func (e *QuantileSketchEvalExpr) Walk(f syntax.WalkFn) {
+ f(e)
+ e.quantileMergeExpr.Walk(f)
+}
+
+type QuantileSketchMergeExpr struct {
+ syntax.SampleExpr
+ downstreams []DownstreamSampleExpr
+}
+
+func (e QuantileSketchMergeExpr) String() string {
+ var sb strings.Builder
+ for i, d := range e.downstreams {
+ if i >= defaultMaxDepth {
+ break
+ }
+
+ if i > 0 {
+ sb.WriteString(" ++ ")
+ }
+
+ sb.WriteString(d.String())
+ }
+ return fmt.Sprintf("quantileSketchMerge<%s>", sb.String())
+}
+
+func (e *QuantileSketchMergeExpr) Walk(f syntax.WalkFn) {
+ f(e)
+ for _, d := range e.downstreams {
+ d.Walk(f)
+ }
+}
+
type Shards []astmapper.ShardAnnotation
func (xs Shards) Encode() (encoded []string) {
@@ -189,9 +232,7 @@ type Downstreamable interface {
}
type DownstreamQuery struct {
- Expr syntax.Expr
Params Params
- Shards Shards
}
// Downstreamer is an interface for deferring responsibility for query execution.
@@ -268,9 +309,10 @@ func (ev *DownstreamEvaluator) NewStepEvaluator(
shards = append(shards, *e.shard)
}
results, err := ev.Downstream(ctx, []DownstreamQuery{{
- Expr: e.SampleExpr,
- Params: params,
- Shards: shards,
+ Params: ParamsWithShardsOverride{
+ Params: ParamsWithExpressionOverride{Params: params, ExpressionOverride: e.SampleExpr},
+ ShardsOverride: Shards(shards).Encode(),
+ },
}})
if err != nil {
return nil, err
@@ -282,11 +324,10 @@ func (ev *DownstreamEvaluator) NewStepEvaluator(
var queries []DownstreamQuery
for cur != nil {
qry := DownstreamQuery{
- Expr: cur.DownstreamSampleExpr.SampleExpr,
- Params: params,
+ Params: ParamsWithExpressionOverride{Params: params, ExpressionOverride: cur.DownstreamSampleExpr.SampleExpr},
}
if shard := cur.DownstreamSampleExpr.shard; shard != nil {
- qry.Shards = Shards{*shard}
+ qry.Params = ParamsWithShardsOverride{Params: qry.Params, ShardsOverride: Shards{*shard}.Encode()}
}
queries = append(queries, qry)
cur = cur.next
@@ -304,7 +345,7 @@ func (ev *DownstreamEvaluator) NewStepEvaluator(
level.Warn(util_log.Logger).Log(
"msg", "could not extract StepEvaluator",
"err", err,
- "expr", queries[i].Expr.String(),
+ "expr", queries[i].Params.GetExpression().String(),
)
return nil, err
}
@@ -312,6 +353,47 @@ func (ev *DownstreamEvaluator) NewStepEvaluator(
}
return NewConcatStepEvaluator(xs), nil
+ case *QuantileSketchEvalExpr:
+ var queries []DownstreamQuery
+ if e.quantileMergeExpr != nil {
+ for _, d := range e.quantileMergeExpr.downstreams {
+ qry := DownstreamQuery{
+ Params: ParamsWithExpressionOverride{
+ Params: params,
+ ExpressionOverride: d.SampleExpr,
+ },
+ }
+ if shard := d.shard; shard != nil {
+ qry.Params = ParamsWithShardsOverride{
+ Params: qry.Params,
+ ShardsOverride: Shards{*shard}.Encode(),
+ }
+ }
+ queries = append(queries, qry)
+ }
+ }
+
+ results, err := ev.Downstream(ctx, queries)
+ if err != nil {
+ return nil, fmt.Errorf("error running quantile sketch downstream query: %w", err)
+ }
+
+ xs := make([]StepEvaluator, 0, len(queries))
+ for _, res := range results {
+ if res.Data.Type() != QuantileSketchMatrixType {
+ return nil, fmt.Errorf("unexpected matrix data type: got (%s), want (%s)", res.Data.Type(), QuantileSketchMatrixType)
+ }
+ data, ok := res.Data.(ProbabilisticQuantileMatrix)
+ if !ok {
+ return nil, fmt.Errorf("unexpected matrix type: got (%T), want (ProbabilisticQuantileMatrix)", res.Data)
+ }
+ stepper := NewQuantileSketchMatrixStepEvaluator(data, params)
+ xs = append(xs, stepper)
+ }
+
+ inner := NewQuantileSketchMergeStepEvaluator(xs)
+
+ return NewQuantileSketchVectorStepEvaluator(inner, *e.quantile), nil
default:
return ev.defaultEvaluator.NewStepEvaluator(ctx, nextEvFactory, e, params)
@@ -332,25 +414,25 @@ func (ev *DownstreamEvaluator) NewIterator(
shards = append(shards, *e.shard)
}
results, err := ev.Downstream(ctx, []DownstreamQuery{{
- Expr: e.LogSelectorExpr,
- Params: params,
- Shards: shards,
+ Params: ParamsWithShardsOverride{
+ Params: ParamsWithExpressionOverride{Params: params, ExpressionOverride: e.LogSelectorExpr},
+ ShardsOverride: shards.Encode(),
+ },
}})
if err != nil {
return nil, err
}
- return ResultIterator(results[0], params)
+ return ResultIterator(results[0], params.Direction())
case *ConcatLogSelectorExpr:
cur := e
var queries []DownstreamQuery
for cur != nil {
qry := DownstreamQuery{
- Expr: cur.DownstreamLogSelectorExpr.LogSelectorExpr,
- Params: params,
+ Params: ParamsWithExpressionOverride{Params: params, ExpressionOverride: cur.DownstreamLogSelectorExpr.LogSelectorExpr},
}
if shard := cur.DownstreamLogSelectorExpr.shard; shard != nil {
- qry.Shards = Shards{*shard}
+ qry.Params = ParamsWithShardsOverride{Params: qry.Params, ShardsOverride: Shards{*shard}.Encode()}
}
queries = append(queries, qry)
cur = cur.next
@@ -363,12 +445,12 @@ func (ev *DownstreamEvaluator) NewIterator(
xs := make([]iter.EntryIterator, 0, len(results))
for i, res := range results {
- iter, err := ResultIterator(res, params)
+ iter, err := ResultIterator(res, params.Direction())
if err != nil {
level.Warn(util_log.Logger).Log(
"msg", "could not extract Iterator",
"err", err,
- "expr", queries[i].Expr.String(),
+ "expr", queries[i].Params.GetExpression().String(),
)
}
xs = append(xs, iter)
@@ -452,10 +534,10 @@ func NewResultStepEvaluator(res logqlmodel.Result, params Params) (StepEvaluator
}
// ResultIterator coerces a downstream streams result into an iter.EntryIterator
-func ResultIterator(res logqlmodel.Result, params Params) (iter.EntryIterator, error) {
+func ResultIterator(res logqlmodel.Result, direction logproto.Direction) (iter.EntryIterator, error) {
streams, ok := res.Data.(logqlmodel.Streams)
if !ok {
return nil, fmt.Errorf("unexpected type (%s) for ResultIterator; expected %s", res.Data.Type(), logqlmodel.ValueTypeStreams)
}
- return iter.NewStreamsIterator(streams, params.Direction()), nil
+ return iter.NewStreamsIterator(streams, direction), nil
}
diff --git a/pkg/logql/downstream_test.go b/pkg/logql/downstream_test.go
index c5f54b9e1c056..218957f862bb1 100644
--- a/pkg/logql/downstream_test.go
+++ b/pkg/logql/downstream_test.go
@@ -13,6 +13,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/grafana/loki/pkg/logproto"
+ "github.com/grafana/loki/pkg/logql/syntax"
)
var nilShardMetrics = NewShardMapperMetrics(nil)
@@ -53,6 +54,7 @@ func TestMappingEquivalence(t *testing.T) {
{`sum(rate({a=~".+"} |= "foo" != "foo"[1s]) or vector(1))`, false},
{`avg_over_time({a=~".+"} | logfmt | unwrap value [1s])`, false},
{`avg_over_time({a=~".+"} | logfmt | unwrap value [1s]) by (a)`, true},
+ {`quantile_over_time(0.99, {a=~".+"} | logfmt | unwrap value [1s])`, true},
// topk prefers already-seen values in tiebreakers. Since the test data generates
// the same log lines for each series & the resulting promql.Vectors aren't deterministically
// sorted by labels, we don't expect this to pass.
@@ -69,7 +71,7 @@ func TestMappingEquivalence(t *testing.T) {
sharded := NewDownstreamEngine(opts, MockDownstreamer{regular}, NoLimits, log.NewNopLogger())
t.Run(tc.query, func(t *testing.T) {
- params := NewLiteralParams(
+ params, err := NewLiteralParams(
tc.query,
start,
end,
@@ -79,20 +81,22 @@ func TestMappingEquivalence(t *testing.T) {
uint32(limit),
nil,
)
+ require.NoError(t, err)
+
qry := regular.Query(params)
ctx := user.InjectOrgID(context.Background(), "fake")
- mapper := NewShardMapper(ConstantShards(shards), nilShardMetrics)
- _, _, mapped, err := mapper.Parse(tc.query)
- require.Nil(t, err)
+ mapper := NewShardMapper(ConstantShards(shards), nilShardMetrics, []string{})
+ _, _, mapped, err := mapper.Parse(params.GetExpression())
+ require.NoError(t, err)
- shardedQry := sharded.Query(ctx, params, mapped)
+ shardedQry := sharded.Query(ctx, ParamsWithExpressionOverride{Params: params, ExpressionOverride: mapped})
res, err := qry.Exec(ctx)
- require.Nil(t, err)
+ require.NoError(t, err)
shardedRes, err := shardedQry.Exec(ctx)
- require.Nil(t, err)
+ require.NoError(t, err)
if tc.approximate {
approximatelyEquals(t, res.Data.(promql.Matrix), shardedRes.Data.(promql.Matrix))
@@ -103,6 +107,70 @@ func TestMappingEquivalence(t *testing.T) {
}
}
+func TestMappingEquivalenceSketches(t *testing.T) {
+ var (
+ shards = 3
+ nStreams = 10_000
+ rounds = 20
+ streams = randomStreams(nStreams, rounds+1, shards, []string{"a", "b", "c", "d"}, true)
+ start = time.Unix(0, 0)
+ end = time.Unix(0, int64(time.Second*time.Duration(rounds)))
+ step = time.Second
+ interval = time.Duration(0)
+ limit = 100
+ )
+
+ for _, tc := range []struct {
+ query string
+ realtiveError float64
+ }{
+ {`quantile_over_time(0.70, {a=~".+"} | logfmt | unwrap value [1s]) by (a)`, 0.03},
+ {`quantile_over_time(0.99, {a=~".+"} | logfmt | unwrap value [1s]) by (a)`, 0.02},
+ } {
+ q := NewMockQuerier(
+ shards,
+ streams,
+ )
+
+ opts := EngineOpts{}
+ regular := NewEngine(opts, q, NoLimits, log.NewNopLogger())
+ sharded := NewDownstreamEngine(opts, MockDownstreamer{regular}, NoLimits, log.NewNopLogger())
+
+ t.Run(tc.query, func(t *testing.T) {
+ params, err := NewLiteralParams(
+ tc.query,
+ start,
+ end,
+ step,
+ interval,
+ logproto.FORWARD,
+ uint32(limit),
+ nil,
+ )
+ require.NoError(t, err)
+ qry := regular.Query(params)
+ ctx := user.InjectOrgID(context.Background(), "fake")
+
+ mapper := NewShardMapper(ConstantShards(shards), nilShardMetrics, []string{ShardQuantileOverTime})
+ _, _, mapped, err := mapper.Parse(params.GetExpression())
+ require.NoError(t, err)
+
+ shardedQry := sharded.Query(ctx, ParamsWithExpressionOverride{
+ Params: params,
+ ExpressionOverride: mapped,
+ })
+
+ res, err := qry.Exec(ctx)
+ require.NoError(t, err)
+
+ shardedRes, err := shardedQry.Exec(ctx)
+ require.NoError(t, err)
+
+ relativeError(t, res.Data.(promql.Matrix), shardedRes.Data.(promql.Matrix), tc.realtiveError)
+ })
+ }
+}
+
func TestShardCounter(t *testing.T) {
var (
shards = 3
@@ -135,7 +203,7 @@ func TestShardCounter(t *testing.T) {
sharded := NewDownstreamEngine(opts, MockDownstreamer{regular}, NoLimits, log.NewNopLogger())
t.Run(tc.query, func(t *testing.T) {
- params := NewLiteralParams(
+ params, err := NewLiteralParams(
tc.query,
start,
end,
@@ -145,13 +213,14 @@ func TestShardCounter(t *testing.T) {
uint32(limit),
nil,
)
+ require.NoError(t, err)
ctx := user.InjectOrgID(context.Background(), "fake")
- mapper := NewShardMapper(ConstantShards(shards), nilShardMetrics)
- noop, _, mapped, err := mapper.Parse(tc.query)
- require.Nil(t, err)
+ mapper := NewShardMapper(ConstantShards(shards), nilShardMetrics, []string{ShardQuantileOverTime})
+ noop, _, mapped, err := mapper.Parse(params.GetExpression())
+ require.NoError(t, err)
- shardedQry := sharded.Query(ctx, params, mapped)
+ shardedQry := sharded.Query(ctx, ParamsWithExpressionOverride{Params: params, ExpressionOverride: mapped})
shardedRes, err := shardedQry.Exec(ctx)
require.Nil(t, err)
@@ -393,7 +462,7 @@ func TestRangeMappingEquivalence(t *testing.T) {
t.Run(tc.query, func(t *testing.T) {
ctx := user.InjectOrgID(context.Background(), "fake")
- params := NewLiteralParams(
+ params, err := NewLiteralParams(
tc.query,
start,
end,
@@ -403,21 +472,22 @@ func TestRangeMappingEquivalence(t *testing.T) {
uint32(limit),
nil,
)
+ require.NoError(t, err)
// Regular engine
qry := regularEngine.Query(params)
res, err := qry.Exec(ctx)
- require.Nil(t, err)
+ require.NoError(t, err)
// Downstream engine - split by range
rangeMapper, err := NewRangeMapper(tc.splitByInterval, nilRangeMetrics, NewMapperStats())
- require.Nil(t, err)
- noop, rangeExpr, err := rangeMapper.Parse(tc.query)
- require.Nil(t, err)
+ require.NoError(t, err)
+ noop, rangeExpr, err := rangeMapper.Parse(syntax.MustParseExpr(tc.query))
+ require.NoError(t, err)
require.False(t, noop, "downstream engine cannot execute noop")
- rangeQry := downstreamEngine.Query(ctx, params, rangeExpr)
+ rangeQry := downstreamEngine.Query(ctx, ParamsWithExpressionOverride{Params: params, ExpressionOverride: rangeExpr})
rangeRes, err := rangeQry.Exec(ctx)
require.Nil(t, err)
@@ -446,3 +516,22 @@ func approximatelyEquals(t *testing.T, as, bs promql.Matrix) {
require.Equalf(t, a, b, "metric %s differs from %s at %d", a.Metric, b.Metric, i)
}
}
+
+func relativeError(t *testing.T, expected, actual promql.Matrix, alpha float64) {
+ require.Len(t, actual, len(expected))
+
+ for i := 0; i < len(expected); i++ {
+ expectedSeries := expected[i]
+ actualSeries := actual[i]
+ require.Equal(t, expectedSeries.Metric, actualSeries.Metric)
+ require.Lenf(t, actualSeries.Floats, len(expectedSeries.Floats), "for series %s", expectedSeries.Metric)
+
+ e := make([]float64, len(expectedSeries.Floats))
+ a := make([]float64, len(expectedSeries.Floats))
+ for j := 0; j < len(expectedSeries.Floats); j++ {
+ e[j] = expectedSeries.Floats[j].F
+ a[j] = actualSeries.Floats[j].F
+ }
+ require.InEpsilonSlice(t, e, a, alpha)
+ }
+}
diff --git a/pkg/logql/engine.go b/pkg/logql/engine.go
index 1b85ea05ea760..89490d479e34e 100644
--- a/pkg/logql/engine.go
+++ b/pkg/logql/engine.go
@@ -2,6 +2,7 @@ package logql
import (
"context"
+ "errors"
"flag"
"fmt"
"math"
@@ -83,7 +84,14 @@ func (s SelectLogParams) String() string {
// LogSelector returns the LogSelectorExpr from the SelectParams.
// The `LogSelectorExpr` can then returns all matchers and filters to use for that request.
func (s SelectLogParams) LogSelector() (syntax.LogSelectorExpr, error) {
- return syntax.ParseLogSelector(s.Selector, true)
+ if s.QueryRequest.Plan == nil {
+ return nil, errors.New("query plan is empty")
+ }
+ expr, ok := s.QueryRequest.Plan.AST.(syntax.LogSelectorExpr)
+ if !ok {
+ return nil, errors.New("only log selector is supported")
+ }
+ return expr, nil
}
type SelectSampleParams struct {
@@ -93,13 +101,20 @@ type SelectSampleParams struct {
// Expr returns the SampleExpr from the SelectSampleParams.
// The `LogSelectorExpr` can then returns all matchers and filters to use for that request.
func (s SelectSampleParams) Expr() (syntax.SampleExpr, error) {
- return syntax.ParseSampleExpr(s.Selector)
+ if s.SampleQueryRequest.Plan == nil {
+ return nil, errors.New("query plan is empty")
+ }
+ expr, ok := s.SampleQueryRequest.Plan.AST.(syntax.SampleExpr)
+ if !ok {
+ return nil, errors.New("only sample expression supported")
+ }
+ return expr, nil
}
// LogSelector returns the LogSelectorExpr from the SelectParams.
// The `LogSelectorExpr` can then returns all matchers and filters to use for that request.
func (s SelectSampleParams) LogSelector() (syntax.LogSelectorExpr, error) {
- expr, err := syntax.ParseSampleExpr(s.Selector)
+ expr, err := s.Expr()
if err != nil {
return nil, err
}
@@ -160,12 +175,9 @@ func NewEngine(opts EngineOpts, q Querier, l Limits, logger log.Logger) *Engine
// Query creates a new LogQL query. Instant/Range type is derived from the parameters.
func (ng *Engine) Query(params Params) Query {
return &query{
- logger: ng.logger,
- params: params,
- evaluator: ng.evaluatorFactory,
- parse: func(_ context.Context, query string) (syntax.Expr, error) {
- return syntax.ParseExpr(query)
- },
+ logger: ng.logger,
+ params: params,
+ evaluator: ng.evaluatorFactory,
record: true,
logExecQuery: ng.opts.LogExecutingQuery,
limits: ng.limits,
@@ -181,7 +193,6 @@ type Query interface {
type query struct {
logger log.Logger
params Params
- parse func(context.Context, string) (syntax.Expr, error)
limits Limits
evaluator EvaluatorFactory
record bool
@@ -211,7 +222,7 @@ func (q *query) Exec(ctx context.Context) (logqlmodel.Result, error) {
sp.LogKV(
"type", GetRangeType(q.params),
- "query", q.params.Query(),
+ "query", q.params.QueryString(),
"start", q.params.Start(),
"end", q.params.End(),
"step", q.params.Step(),
@@ -219,11 +230,11 @@ func (q *query) Exec(ctx context.Context) (logqlmodel.Result, error) {
)
if q.logExecQuery {
- queryHash := HashedQuery(q.params.Query())
+ queryHash := util.HashedQuery(q.params.QueryString())
if GetRangeType(q.params) == InstantType {
- level.Info(logutil.WithContext(ctx, q.logger)).Log("msg", "executing query", "type", "instant", "query", q.params.Query(), "query_hash", queryHash)
+ level.Info(logutil.WithContext(ctx, q.logger)).Log("msg", "executing query", "type", "instant", "query", q.params.QueryString(), "query_hash", queryHash)
} else {
- level.Info(logutil.WithContext(ctx, q.logger)).Log("msg", "executing query", "type", "range", "query", q.params.Query(), "length", q.params.End().Sub(q.params.Start()), "step", q.params.Step(), "query_hash", queryHash)
+ level.Info(logutil.WithContext(ctx, q.logger)).Log("msg", "executing query", "type", "range", "query", q.params.QueryString(), "length", q.params.End().Sub(q.params.Start()), "step", q.params.Step(), "query_hash", queryHash)
}
}
@@ -263,16 +274,11 @@ func (q *query) Eval(ctx context.Context) (promql_parser.Value, error) {
ctx, cancel := context.WithTimeout(ctx, queryTimeout)
defer cancel()
- expr, err := q.parse(ctx, q.params.Query())
- if err != nil {
- return nil, err
- }
-
if q.checkBlocked(ctx, tenants) {
return nil, logqlmodel.ErrBlocked
}
- switch e := expr.(type) {
+ switch e := q.params.GetExpression().(type) {
case syntax.SampleExpr:
value, err := q.evalSample(ctx, e)
return value, err
@@ -336,21 +342,37 @@ func (q *query) evalSample(ctx context.Context, expr syntax.SampleExpr) (promql_
if err != nil {
return nil, err
}
+
stepEvaluator, err := q.evaluator.NewStepEvaluator(ctx, q.evaluator, expr, q.params)
if err != nil {
return nil, err
}
defer util.LogErrorWithContext(ctx, "closing SampleExpr", stepEvaluator.Close)
- maxSeriesCapture := func(id string) int { return q.limits.MaxQuerySeries(ctx, id) }
- maxSeries := validation.SmallestPositiveIntPerTenant(tenantIDs, maxSeriesCapture)
-
- seriesIndex := map[uint64]*promql.Series{}
-
next, ts, r := stepEvaluator.Next()
if stepEvaluator.Error() != nil {
return nil, stepEvaluator.Error()
}
+
+ if next && r != nil {
+ switch vec := r.(type) {
+ case SampleVector:
+ maxSeriesCapture := func(id string) int { return q.limits.MaxQuerySeries(ctx, id) }
+ maxSeries := validation.SmallestPositiveIntPerTenant(tenantIDs, maxSeriesCapture)
+ return q.JoinSampleVector(next, ts, vec, stepEvaluator, maxSeries)
+ case ProbabilisticQuantileVector:
+ return JoinQuantileSketchVector(next, vec, stepEvaluator)
+ default:
+ return nil, fmt.Errorf("unsupported result type: %T", r)
+ }
+ }
+ return nil, nil
+}
+
+func (q *query) JoinSampleVector(next bool, ts int64, r StepResult, stepEvaluator StepEvaluator, maxSeries int) (promql_parser.Value, error) {
+
+ seriesIndex := map[uint64]*promql.Series{}
+
vec := promql.Vector{}
if next {
vec = r.SampleVector()
@@ -364,7 +386,7 @@ func (q *query) evalSample(ctx context.Context, expr syntax.SampleExpr) (promql_
if GetRangeType(q.params) == InstantType {
sortByValue, err := Sortable(q.params)
if err != nil {
- return nil, fmt.Errorf("fail to check Sortable, logql: %s ,err: %s", q.params.Query(), err)
+ return nil, fmt.Errorf("fail to check Sortable, logql: %s ,err: %s", q.params.QueryString(), err)
}
if !sortByValue {
sort.Slice(vec, func(i, j int) bool { return labels.Compare(vec[i].Metric, vec[j].Metric) < 0 })
diff --git a/pkg/logql/engine_test.go b/pkg/logql/engine_test.go
index ef7d5e0538e3d..2dce4ba57ed41 100644
--- a/pkg/logql/engine_test.go
+++ b/pkg/logql/engine_test.go
@@ -12,6 +12,7 @@ import (
"time"
"github.com/grafana/loki/pkg/logqlmodel/metadata"
+ "github.com/grafana/loki/pkg/querier/plan"
"github.com/grafana/loki/pkg/querier/queryrange/queryrangebase/definitions"
"github.com/go-kit/log"
@@ -64,8 +65,15 @@ func TestEngine_LogsRateUnwrap(t *testing.T) {
{newSeries(testSize, offset(46, constantValue(1)), `{app="foo"}`)},
},
[]SelectSampleParams{
- {&logproto.SampleQueryRequest{Start: time.Unix(30, 0), End: time.Unix(60, 0), Selector: `rate({app="foo"} | unwrap foo[30s])`}},
- },
+ {&logproto.SampleQueryRequest{
+ Start: time.Unix(30, 0),
+ End: time.Unix(60, 0),
+ Selector: `rate({app="foo"} | unwrap foo[30s])`,
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`rate({app="foo"} | unwrap foo[30s])`),
+ },
+ },
+ }},
// there are 15 samples (from 47 to 61) matched from the generated series
// SUM(n=47, 61, 1) = 15
// 15 / 30 = 0.5
@@ -82,7 +90,14 @@ func TestEngine_LogsRateUnwrap(t *testing.T) {
{newSeries(testSize, offset(46, incValue(1)), `{app="foo"}`)},
},
[]SelectSampleParams{
- {&logproto.SampleQueryRequest{Start: time.Unix(30, 0), End: time.Unix(60, 0), Selector: `rate({app="foo"} | unwrap foo[30s])`}},
+ {&logproto.SampleQueryRequest{
+ Start: time.Unix(30, 0),
+ End: time.Unix(60, 0),
+ Selector: `rate({app="foo"} | unwrap foo[30s])`,
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`rate({app="foo"} | unwrap foo[30s])`),
+ },
+ }},
},
// there are 15 samples (from 47 to 61) matched from the generated series
// SUM(n=47, 61, n) = (47+48+...+61) = 810
@@ -100,7 +115,14 @@ func TestEngine_LogsRateUnwrap(t *testing.T) {
{newSeries(testSize, offset(46, constantValue(1)), `{app="foo"}`)},
},
[]SelectSampleParams{
- {&logproto.SampleQueryRequest{Start: time.Unix(30, 0), End: time.Unix(60, 0), Selector: `rate_counter({app="foo"} | unwrap foo[30s])`}},
+ {&logproto.SampleQueryRequest{
+ Start: time.Unix(30, 0),
+ End: time.Unix(60, 0),
+ Selector: `rate_counter({app="foo"} | unwrap foo[30s])`,
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`rate_counter({app="foo"} | unwrap foo[30s])`),
+ },
+ }},
},
// there are 15 samples (from 47 to 61) matched from the generated series
// (1 - 1) / 30 = 0
@@ -129,13 +151,9 @@ func TestEngine_LogsRateUnwrap(t *testing.T) {
t.Parallel()
eng := NewEngine(EngineOpts{}, newQuerierRecorder(t, test.data, test.params), NoLimits, log.NewNopLogger())
- q := eng.Query(LiteralParams{
- qs: test.qs,
- start: test.ts,
- end: test.ts,
- direction: test.direction,
- limit: test.limit,
- })
+ params, err := NewLiteralParams(test.qs, test.ts, test.ts, 0, 0, test.direction, test.limit, nil)
+ require.NoError(t, err)
+ q := eng.Query(params)
res, err := q.Exec(user.InjectOrgID(context.Background(), "fake"))
if expectedError, ok := test.expected.(error); ok {
assert.Equal(t, expectedError.Error(), err.Error())
@@ -960,13 +978,10 @@ func TestEngine_LogsInstantQuery(t *testing.T) {
t.Parallel()
eng := NewEngine(EngineOpts{}, newQuerierRecorder(t, test.data, test.params), NoLimits, log.NewNopLogger())
- q := eng.Query(LiteralParams{
- qs: test.qs,
- start: test.ts,
- end: test.ts,
- direction: test.direction,
- limit: test.limit,
- })
+
+ params, err := NewLiteralParams(test.qs, test.ts, test.ts, 0, 0, test.direction, test.limit, nil)
+ require.NoError(t, err)
+ q := eng.Query(params)
res, err := q.Exec(user.InjectOrgID(context.Background(), "fake"))
if expectedError, ok := test.expected.(error); ok {
assert.Equal(t, expectedError.Error(), err.Error())
@@ -2266,15 +2281,9 @@ func TestEngine_RangeQuery(t *testing.T) {
eng := NewEngine(EngineOpts{}, newQuerierRecorder(t, test.data, test.params), NoLimits, log.NewNopLogger())
- q := eng.Query(LiteralParams{
- qs: test.qs,
- start: test.start,
- end: test.end,
- step: test.step,
- interval: test.interval,
- direction: test.direction,
- limit: test.limit,
- })
+ params, err := NewLiteralParams(test.qs, test.start, test.end, test.step, test.interval, test.direction, test.limit, nil)
+ require.NoError(t, err)
+ q := eng.Query(params)
res, err := q.Exec(user.InjectOrgID(context.Background(), "fake"))
if err != nil {
t.Fatal(err)
@@ -2302,13 +2311,11 @@ func TestEngine_Stats(t *testing.T) {
eng := NewEngine(EngineOpts{}, &statsQuerier{}, NoLimits, log.NewNopLogger())
queueTime := 2 * time.Nanosecond
- q := eng.Query(LiteralParams{
- qs: `{foo="bar"}`,
- start: time.Now(),
- end: time.Now(),
- direction: logproto.BACKWARD,
- limit: 1000,
- })
+
+ params, err := NewLiteralParams(`{foo="bar"}`, time.Now(), time.Now(), 0, 0, logproto.FORWARD, 1000, nil)
+ require.NoError(t, err)
+ q := eng.Query(params)
+
ctx := context.WithValue(context.Background(), httpreq.QueryQueueTimeHTTPHeader, queueTime)
r, err := q.Exec(user.InjectOrgID(ctx, "fake"))
require.NoError(t, err)
@@ -2338,13 +2345,9 @@ func (metaQuerier) SelectSamples(ctx context.Context, _ SelectSampleParams) (ite
func TestEngine_Metadata(t *testing.T) {
eng := NewEngine(EngineOpts{}, &metaQuerier{}, NoLimits, log.NewNopLogger())
- q := eng.Query(LiteralParams{
- qs: `{foo="bar"}`,
- start: time.Now(),
- end: time.Now(),
- direction: logproto.BACKWARD,
- limit: 1000,
- })
+ params, err := NewLiteralParams(`{foo="bar"}`, time.Now(), time.Now(), 0, 0, logproto.BACKWARD, 1000, nil)
+ require.NoError(t, err)
+ q := eng.Query(params)
r, err := q.Exec(user.InjectOrgID(context.Background(), "fake"))
require.NoError(t, err)
@@ -2353,51 +2356,17 @@ func TestEngine_Metadata(t *testing.T) {
}, r.Headers)
}
-func TestEngine_LogsInstantQuery_IllegalLogql(t *testing.T) {
- eng := NewEngine(EngineOpts{}, &statsQuerier{}, NoLimits, log.NewNopLogger())
-
- queueTime := 2 * time.Nanosecond
- illegalVector := `vector(abc)`
- q := eng.Query(LiteralParams{
- qs: illegalVector,
- start: time.Now(),
- end: time.Now(),
- step: time.Second * 30,
- interval: time.Second * 30,
- direction: logproto.BACKWARD,
- limit: 1000,
- })
- expectErr := logqlmodel.NewParseError("syntax error: unexpected IDENTIFIER, expecting NUMBER", 1, 8)
- ctx := context.WithValue(context.Background(), httpreq.QueryQueueTimeHTTPHeader, queueTime)
- _, err := q.Exec(user.InjectOrgID(ctx, "fake"))
-
- require.EqualError(t, err, expectErr.Error())
-
- qry, ok := q.(*query)
- require.Equal(t, ok, true)
- vectorExpr := syntax.NewVectorExpr(illegalVector)
-
- _, err = qry.evalSample(ctx, vectorExpr)
- expectEvalSampleErr := logqlmodel.NewParseError("unable to parse vectorExpr as a float: strconv.ParseFloat: parsing \"vector(abc)\": invalid syntax", 0, 0)
- require.EqualError(t, err, expectEvalSampleErr.Error())
-}
-
func TestEngine_LogsInstantQuery_Vector(t *testing.T) {
eng := NewEngine(EngineOpts{}, &statsQuerier{}, NoLimits, log.NewNopLogger())
now := time.Now()
queueTime := 2 * time.Nanosecond
logqlVector := `vector(5)`
- q := eng.Query(LiteralParams{
- qs: logqlVector,
- start: now,
- end: now,
- step: 0,
- interval: time.Second * 30,
- direction: logproto.BACKWARD,
- limit: 1000,
- })
+
+ params, err := NewLiteralParams(logqlVector, now, now, 0, time.Second*30, logproto.BACKWARD, 1000, nil)
+ require.NoError(t, err)
+ q := eng.Query(params)
ctx := context.WithValue(context.Background(), httpreq.QueryQueueTimeHTTPHeader, queueTime)
- _, err := q.Exec(user.InjectOrgID(ctx, "fake"))
+ _, err = q.Exec(user.InjectOrgID(ctx, "fake"))
require.NoError(t, err)
@@ -2472,14 +2441,11 @@ func TestStepEvaluator_Error(t *testing.T) {
tc := tc
t.Run(tc.name, func(t *testing.T) {
eng := NewEngine(EngineOpts{}, tc.querier, NoLimits, log.NewNopLogger())
- q := eng.Query(LiteralParams{
- qs: tc.qs,
- start: time.Unix(0, 0),
- end: time.Unix(180, 0),
- step: 1 * time.Second,
- limit: 1,
- })
- _, err := q.Exec(user.InjectOrgID(context.Background(), "fake"))
+
+ params, err := NewLiteralParams(tc.qs, time.Unix(0, 0), time.Unix(180, 0), 1*time.Second, 0, logproto.BACKWARD, 1, nil)
+ require.NoError(t, err)
+ q := eng.Query(params)
+ _, err = q.Exec(user.InjectOrgID(context.Background(), "fake"))
require.Equal(t, tc.err, err)
})
}
@@ -2502,15 +2468,10 @@ func TestEngine_MaxSeries(t *testing.T) {
{`avg(count_over_time({app=~"foo|bar"} |~".+bar" [1m]))`, logproto.FORWARD, false},
} {
t.Run(test.qs, func(t *testing.T) {
- q := eng.Query(LiteralParams{
- qs: test.qs,
- start: time.Unix(0, 0),
- end: time.Unix(100000, 0),
- step: 60 * time.Second,
- direction: test.direction,
- limit: 1000,
- })
- _, err := q.Exec(user.InjectOrgID(context.Background(), "fake"))
+ params, err := NewLiteralParams(test.qs, time.Unix(0, 0), time.Unix(100000, 0), 60*time.Second, 0, test.direction, 1000, nil)
+ require.NoError(t, err)
+ q := eng.Query(params)
+ _, err = q.Exec(user.InjectOrgID(context.Background(), "fake"))
if test.expectLimitErr {
require.NotNil(t, err)
require.True(t, errors.Is(err, logqlmodel.ErrLimit))
@@ -2534,15 +2495,11 @@ func TestEngine_MaxRangeInterval(t *testing.T) {
{`topk(1,rate({app=~"foo|bar"}[12h]) / (rate({app="baz"}[23h]) + rate({app="fiz"}[25h])))`, logproto.FORWARD, true},
} {
t.Run(test.qs, func(t *testing.T) {
- q := eng.Query(LiteralParams{
- qs: test.qs,
- start: time.Unix(0, 0),
- end: time.Unix(100000, 0),
- step: 60 * time.Second,
- direction: test.direction,
- limit: 1000,
- })
- _, err := q.Exec(user.InjectOrgID(context.Background(), "fake"))
+ params, err := NewLiteralParams(test.qs, time.Unix(0, 0), time.Unix(100000, 0), 60*time.Second, 0, test.direction, 1000, nil)
+ require.NoError(t, err)
+ q := eng.Query(params)
+
+ _, err = q.Exec(user.InjectOrgID(context.Background(), "fake"))
if test.expectLimitErr {
require.Error(t, err)
require.ErrorIs(t, err, logqlmodel.ErrIntervalLimit)
@@ -2605,14 +2562,10 @@ func benchmarkRangeQuery(testsize int64, b *testing.B) {
{`bottomk(2,rate(({app=~"foo|bar"} |~".+bar")[1m]))`, logproto.FORWARD},
{`bottomk(3,rate(({app=~"foo|bar"} |~".+bar")[1m])) without (app)`, logproto.FORWARD},
} {
- q := eng.Query(LiteralParams{
- qs: test.qs,
- start: start,
- end: end,
- step: 60 * time.Second,
- direction: test.direction,
- limit: 1000,
- })
+ params, err := NewLiteralParams(test.qs, start, end, 60*time.Second, 0, logproto.BACKWARD, 1000, nil)
+ require.NoError(b, err)
+ q := eng.Query(params)
+
res, err := q.Exec(user.InjectOrgID(context.Background(), "fake"))
if err != nil {
b.Fatal(err)
@@ -2640,8 +2593,13 @@ func TestHashingStability(t *testing.T) {
buf := bytes.NewBufferString("")
logger := log.NewLogfmtLogger(buf)
eng := NewEngine(EngineOpts{LogExecutingQuery: true}, getLocalQuerier(4), NoLimits, logger)
+
+ parsed, err := syntax.ParseExpr(params.QueryString())
+ require.NoError(t, err)
+ params.queryExpr = parsed
+
query := eng.Query(params)
- _, err := query.Exec(ctx)
+ _, err = query.Exec(ctx)
require.NoError(t, err)
return buf.String()
}
@@ -2668,8 +2626,8 @@ func TestHashingStability(t *testing.T) {
{`sum by(query_hash) (count_over_time({app="myapp",env="myenv"} |= "error" |= "metrics.go" | logfmt [10s]))`},
{`sum (count_over_time({app="myapp",env="myenv"} |= "error" |= "metrics.go" | logfmt [10s])) by(query_hash)`},
} {
- params.qs = test.qs
- expectedQueryHash := HashedQuery(test.qs)
+ params.queryString = test.qs
+ expectedQueryHash := util.HashedQuery(test.qs)
// check that both places will end up having the same query hash, even though they're emitting different log lines.
require.Regexp(t,
@@ -2733,6 +2691,9 @@ func newQuerierRecorder(t *testing.T, data interface{}, params interface{}) *que
if streamsIn, ok := data.([][]logproto.Stream); ok {
if paramsIn, ok2 := params.([]SelectLogParams); ok2 {
for i, p := range paramsIn {
+ p.Plan = &plan.QueryPlan{
+ AST: syntax.MustParseExpr(p.Selector),
+ }
streams[paramsID(p)] = streamsIn[i]
}
}
@@ -2742,6 +2703,9 @@ func newQuerierRecorder(t *testing.T, data interface{}, params interface{}) *que
if seriesIn, ok := data.([][]logproto.Series); ok {
if paramsIn, ok2 := params.([]SelectSampleParams); ok2 {
for i, p := range paramsIn {
+ p.Plan = &plan.QueryPlan{
+ AST: syntax.MustParseExpr(p.Selector),
+ }
series[paramsID(p)] = seriesIn[i]
}
}
diff --git a/pkg/logql/evaluator.go b/pkg/logql/evaluator.go
index 0c0dba2cad3d5..2d6837ef6a78a 100644
--- a/pkg/logql/evaluator.go
+++ b/pkg/logql/evaluator.go
@@ -17,6 +17,7 @@ import (
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logql/syntax"
"github.com/grafana/loki/pkg/logqlmodel"
+ "github.com/grafana/loki/pkg/querier/plan"
"github.com/grafana/loki/pkg/util"
)
@@ -31,7 +32,7 @@ var (
// Params details the parameters associated with a loki request
type Params interface {
- Query() string
+ QueryString() string
Start() time.Time
End() time.Time
Step() time.Duration
@@ -39,6 +40,7 @@ type Params interface {
Limit() uint32
Direction() logproto.Direction
Shards() []string
+ GetExpression() syntax.Expr
}
func NewLiteralParams(
@@ -48,33 +50,41 @@ func NewLiteralParams(
direction logproto.Direction,
limit uint32,
shards []string,
-) LiteralParams {
- return LiteralParams{
- qs: qs,
- start: start,
- end: end,
- step: step,
- interval: interval,
- direction: direction,
- limit: limit,
- shards: shards,
- }
+) (LiteralParams, error) {
+ p := LiteralParams{
+ queryString: qs,
+ start: start,
+ end: end,
+ step: step,
+ interval: interval,
+ direction: direction,
+ limit: limit,
+ shards: shards,
+ }
+ var err error
+ p.queryExpr, err = syntax.ParseExpr(qs)
+ return p, err
+
}
// LiteralParams impls Params
type LiteralParams struct {
- qs string
+ queryString string
start, end time.Time
step, interval time.Duration
direction logproto.Direction
limit uint32
shards []string
+ queryExpr syntax.Expr
}
func (p LiteralParams) Copy() LiteralParams { return p }
// String impls Params
-func (p LiteralParams) Query() string { return p.qs }
+func (p LiteralParams) QueryString() string { return p.queryString }
+
+// GetExpression impls Params
+func (p LiteralParams) GetExpression() syntax.Expr { return p.queryExpr }
// Start impls Params
func (p LiteralParams) Start() time.Time { return p.start }
@@ -105,12 +115,38 @@ func GetRangeType(q Params) QueryRangeType {
return RangeType
}
+// ParamsWithExpressionOverride overrides the query expression so that the query
+// string and the expression can differ. This is useful for for query planning
+// when plan my not match externally available logql syntax
+type ParamsWithExpressionOverride struct {
+ Params
+ ExpressionOverride syntax.Expr
+}
+
+// GetExpression returns the parsed expression of the query.
+func (p ParamsWithExpressionOverride) GetExpression() syntax.Expr {
+ return p.ExpressionOverride
+}
+
+// ParamsWithExpressionOverride overrides the shards. Since the backing
+// implementation of the Params interface is unknown they are embedded and the
+// original shards are shadowed.
+type ParamsWithShardsOverride struct {
+ Params
+ ShardsOverride []string
+}
+
+// Shards returns this overwriting shards.
+func (p ParamsWithShardsOverride) Shards() []string {
+ return p.ShardsOverride
+}
+
// Sortable logql contain sort or sort_desc.
func Sortable(q Params) (bool, error) {
var sortable bool
- expr, err := syntax.ParseSampleExpr(q.Query())
- if err != nil {
- return false, err
+ expr, ok := q.GetExpression().(syntax.SampleExpr)
+ if !ok {
+ return false, errors.New("only sample expression supported")
}
expr.Walk(func(e syntax.Expr) {
rangeExpr, ok := e.(*syntax.VectorAggregationExpr)
@@ -175,6 +211,9 @@ func (ev *DefaultEvaluator) NewIterator(ctx context.Context, expr syntax.LogSele
Direction: q.Direction(),
Selector: expr.String(),
Shards: q.Shards(),
+ Plan: &plan.QueryPlan{
+ AST: expr,
+ },
},
}
@@ -203,6 +242,9 @@ func (ev *DefaultEvaluator) NewStepEvaluator(
End: q.End().Add(-rangExpr.Left.Offset),
Selector: e.String(), // intentionally send the vector for reducing labels.
Shards: q.Shards(),
+ Plan: &plan.QueryPlan{
+ AST: expr,
+ },
},
})
if err != nil {
@@ -219,6 +261,9 @@ func (ev *DefaultEvaluator) NewStepEvaluator(
End: q.End().Add(-e.Left.Offset),
Selector: expr.String(),
Shards: q.Shards(),
+ Plan: &plan.QueryPlan{
+ AST: expr,
+ },
},
})
if err != nil {
@@ -480,17 +525,18 @@ func newRangeAggEvaluator(
q Params,
o time.Duration,
) (StepEvaluator, error) {
+ switch expr.Operation {
+ case syntax.OpRangeTypeAbsent:
+ iter, err := newRangeVectorIterator(
+ it, expr,
+ expr.Left.Interval.Nanoseconds(),
+ q.Step().Nanoseconds(),
+ q.Start().UnixNano(), q.End().UnixNano(), o.Nanoseconds(),
+ )
+ if err != nil {
+ return nil, err
+ }
- iter, err := newRangeVectorIterator(
- it, expr,
- expr.Left.Interval.Nanoseconds(),
- q.Step().Nanoseconds(),
- q.Start().UnixNano(), q.End().UnixNano(), o.Nanoseconds(),
- )
- if err != nil {
- return nil, err
- }
- if expr.Operation == syntax.OpRangeTypeAbsent {
absentLabels, err := absentLabels(expr)
if err != nil {
return nil, err
@@ -499,10 +545,32 @@ func newRangeAggEvaluator(
iter: iter,
lbs: absentLabels,
}, nil
+ case syntax.OpRangeTypeQuantileSketch:
+ iter := newQuantileSketchIterator(
+ it,
+ expr.Left.Interval.Nanoseconds(),
+ q.Step().Nanoseconds(),
+ q.Start().UnixNano(), q.End().UnixNano(), o.Nanoseconds(),
+ )
+
+ return &QuantileSketchStepEvaluator{
+ iter: iter,
+ }, nil
+ default:
+ iter, err := newRangeVectorIterator(
+ it, expr,
+ expr.Left.Interval.Nanoseconds(),
+ q.Step().Nanoseconds(),
+ q.Start().UnixNano(), q.End().UnixNano(), o.Nanoseconds(),
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ return &RangeVectorEvaluator{
+ iter: iter,
+ }, nil
}
- return &RangeVectorEvaluator{
- iter: iter,
- }, nil
}
type RangeVectorEvaluator struct {
diff --git a/pkg/logql/evaluator_test.go b/pkg/logql/evaluator_test.go
index 1bec3d9c67d68..e31d587252066 100644
--- a/pkg/logql/evaluator_test.go
+++ b/pkg/logql/evaluator_test.go
@@ -44,14 +44,14 @@ func TestDefaultEvaluator_DivideByZero(t *testing.T) {
}
func TestDefaultEvaluator_Sortable(t *testing.T) {
logqlSort := `sort(rate(({app=~"foo|bar"} |~".+bar")[1m])) `
- sortable, err := Sortable(LiteralParams{qs: logqlSort})
+ sortable, err := Sortable(LiteralParams{queryString: logqlSort, queryExpr: syntax.MustParseExpr(logqlSort)})
if err != nil {
t.Fatal(err)
}
require.Equal(t, true, sortable)
logqlSum := `sum(rate(({app=~"foo|bar"} |~".+bar")[1m])) `
- sortableSum, err := Sortable(LiteralParams{qs: logqlSum})
+ sortableSum, err := Sortable(LiteralParams{queryString: logqlSum, queryExpr: syntax.MustParseExpr(logqlSum)})
if err != nil {
t.Fatal(err)
}
diff --git a/pkg/logql/explain.go b/pkg/logql/explain.go
index ef161b38c8f97..4890d150f0a61 100644
--- a/pkg/logql/explain.go
+++ b/pkg/logql/explain.go
@@ -1,5 +1,9 @@
package logql
+// MaxChildrenDisplay defines the maximum number of children that should be
+// shown by explain.
+const MaxChildrenDisplay = 3
+
func (e *LiteralStepEvaluator) Explain(parent Node) {
b := parent.Child("Literal")
e.nextEv.Explain(b)
@@ -25,7 +29,7 @@ func (e *VectorStepEvaluator) Explain(parent Node) {
func (e *ConcatStepEvaluator) Explain(parent Node) {
b := parent.Child("Concat")
- if len(e.evaluators) < 3 {
+ if len(e.evaluators) < MaxChildrenDisplay {
for _, child := range e.evaluators {
child.Explain(b)
}
diff --git a/pkg/logql/explain_test.go b/pkg/logql/explain_test.go
index a54ffa5916f2c..307aa10cfa98d 100644
--- a/pkg/logql/explain_test.go
+++ b/pkg/logql/explain_test.go
@@ -28,15 +28,15 @@ func TestExplain(t *testing.T) {
defaultEv := NewDefaultEvaluator(querier, 30*time.Second)
downEv := &DownstreamEvaluator{Downstreamer: MockDownstreamer{regular}, defaultEvaluator: defaultEv}
- mapper := NewShardMapper(ConstantShards(4), nilShardMetrics)
- _, _, expr, err := mapper.Parse(query)
+ mapper := NewShardMapper(ConstantShards(4), nilShardMetrics, []string{ShardQuantileOverTime})
+ _, _, expr, err := mapper.Parse(syntax.MustParseExpr(query))
require.NoError(t, err)
params := LiteralParams{
- qs: query,
- start: time.Unix(60, 0),
- end: time.Unix(60, 0),
- limit: 1000,
+ queryString: query,
+ start: time.Unix(60, 0),
+ end: time.Unix(60, 0),
+ limit: 1000,
}
ev, err := downEv.NewStepEvaluator(ctx, downEv, expr.(syntax.SampleExpr), params)
diff --git a/pkg/logql/log/fmt.go b/pkg/logql/log/fmt.go
index e28f5a119a48b..9257834eee345 100644
--- a/pkg/logql/log/fmt.go
+++ b/pkg/logql/log/fmt.go
@@ -221,7 +221,10 @@ func (lf *LineFormatter) Process(ts int64, line []byte, lbs *LabelsBuilder) ([]b
lf.currentLine = line
lf.currentTs = ts
- if err := lf.Template.Execute(lf.buf, lbs.Map()); err != nil {
+ // map now is taking from a pool
+ m := lbs.Map()
+ defer smp.Put(m)
+ if err := lf.Template.Execute(lf.buf, m); err != nil {
lbs.SetErr(errTemplateFormat)
lbs.SetErrorDetails(err.Error())
return line, true
@@ -380,7 +383,8 @@ func (lf *LabelsFormatter) Process(ts int64, l []byte, lbs *LabelsBuilder) ([]by
lf.currentLine = l
lf.currentTs = ts
- var data interface{}
+ var m = smp.Get()
+ defer smp.Put(m)
for _, f := range lf.formats {
if f.Rename {
v, category, ok := lbs.GetWithCategory(f.Value)
@@ -391,10 +395,10 @@ func (lf *LabelsFormatter) Process(ts int64, l []byte, lbs *LabelsBuilder) ([]by
continue
}
lf.buf.Reset()
- if data == nil {
- data = lbs.Map()
+ if len(m) == 0 {
+ lbs.IntoMap(m)
}
- if err := f.tmpl.Execute(lf.buf, data); err != nil {
+ if err := f.tmpl.Execute(lf.buf, m); err != nil {
lbs.SetErr(errTemplateFormat)
lbs.SetErrorDetails(err.Error())
continue
diff --git a/pkg/logql/log/label_filter.go b/pkg/logql/log/label_filter.go
index e3bb1a4bcd5b8..a89f324008e16 100644
--- a/pkg/logql/log/label_filter.go
+++ b/pkg/logql/log/label_filter.go
@@ -366,7 +366,7 @@ func NewStringLabelFilter(m *labels.Matcher) LabelFilterer {
return &LineFilterLabelFilter{
Matcher: m,
- filter: f,
+ Filter: f,
}
}
@@ -383,12 +383,12 @@ func (s *StringLabelFilter) RequiredLabelNames() []string {
// LineFilterLabelFilter filters the desired label using an optimized line filter
type LineFilterLabelFilter struct {
*labels.Matcher
- filter Filterer
+ Filter Filterer
}
// overrides the matcher.String() function in case there is a regexpFilter
func (s *LineFilterLabelFilter) String() string {
- if unwrappedFilter, ok := s.filter.(regexpFilter); ok {
+ if unwrappedFilter, ok := s.Filter.(regexpFilter); ok {
rStr := unwrappedFilter.String()
str := fmt.Sprintf("%s%s`%s`", s.Matcher.Name, s.Matcher.Type, rStr)
return str
@@ -398,7 +398,7 @@ func (s *LineFilterLabelFilter) String() string {
func (s *LineFilterLabelFilter) Process(_ int64, line []byte, lbs *LabelsBuilder) ([]byte, bool) {
v := labelValue(s.Name, lbs)
- return line, s.filter.Filter(unsafeGetBytes(v))
+ return line, s.Filter.Filter(unsafeGetBytes(v))
}
func (s *LineFilterLabelFilter) isLabelFilterer() {}
diff --git a/pkg/logql/log/labels.go b/pkg/logql/log/labels.go
index 7bc313c8c302c..567c446b8c008 100644
--- a/pkg/logql/log/labels.go
+++ b/pkg/logql/log/labels.go
@@ -3,6 +3,7 @@ package log
import (
"fmt"
"sort"
+ "sync"
"github.com/prometheus/prometheus/model/labels"
@@ -437,6 +438,52 @@ func (b *LabelsBuilder) UnsortedLabels(buf labels.Labels, categories ...LabelCat
return buf
}
+type stringMapPool struct {
+ pool sync.Pool
+}
+
+func newStringMapPool() *stringMapPool {
+ return &stringMapPool{
+ pool: sync.Pool{
+ New: func() interface{} {
+ return make(map[string]string)
+ },
+ },
+ }
+}
+
+func (s *stringMapPool) Get() map[string]string {
+ m := s.pool.Get().(map[string]string)
+ clear(m)
+ return m
+}
+
+func (s *stringMapPool) Put(m map[string]string) {
+ s.pool.Put(m)
+}
+
+var smp = newStringMapPool()
+
+// puts labels entries into an existing map, it is up to the caller to
+// properly clear the map if it is going to be reused
+func (b *LabelsBuilder) IntoMap(m map[string]string) {
+ if !b.hasDel() && !b.hasAdd() && !b.HasErr() {
+ if b.baseMap == nil {
+ b.baseMap = b.base.Map()
+ for k, v := range b.baseMap {
+ m[k] = v
+ }
+ }
+ return
+ }
+ b.buf = b.UnsortedLabels(b.buf)
+ // todo should we also cache maps since limited by the result ?
+ // Maps also don't create a copy of the labels.
+ for _, l := range b.buf {
+ m[l.Name] = l.Value
+ }
+}
+
func (b *LabelsBuilder) Map() map[string]string {
if !b.hasDel() && !b.hasAdd() && !b.HasErr() {
if b.baseMap == nil {
@@ -447,7 +494,8 @@ func (b *LabelsBuilder) Map() map[string]string {
b.buf = b.UnsortedLabels(b.buf)
// todo should we also cache maps since limited by the result ?
// Maps also don't create a copy of the labels.
- res := make(map[string]string, len(b.buf))
+ res := smp.Get()
+ clear(res)
for _, l := range b.buf {
res[l.Name] = l.Value
}
diff --git a/pkg/logql/log/metrics_extraction.go b/pkg/logql/log/metrics_extraction.go
index cd4ef3b8e7af7..5dce57af8222c 100644
--- a/pkg/logql/log/metrics_extraction.go
+++ b/pkg/logql/log/metrics_extraction.go
@@ -1,6 +1,7 @@
package log
import (
+ "context"
"sort"
"strconv"
"time"
@@ -38,6 +39,12 @@ type StreamSampleExtractor interface {
ProcessString(ts int64, line string, structuredMetadata ...labels.Label) (float64, LabelsResult, bool)
}
+// SampleExtractorWrapper takes an extractor, wraps it is some desired functionality
+// and returns a new pipeline
+type SampleExtractorWrapper interface {
+ Wrap(ctx context.Context, extractor SampleExtractor, query, tenant string) SampleExtractor
+}
+
type lineSampleExtractor struct {
Stage
LineExtractor
diff --git a/pkg/logql/log/pipeline.go b/pkg/logql/log/pipeline.go
index 31665e7b303ae..df7f4ba65761c 100644
--- a/pkg/logql/log/pipeline.go
+++ b/pkg/logql/log/pipeline.go
@@ -1,6 +1,7 @@
package log
import (
+ "context"
"reflect"
"sync"
"unsafe"
@@ -35,6 +36,12 @@ type Stage interface {
RequiredLabelNames() []string
}
+// PipelineWrapper takes a pipeline, wraps it is some desired functionality and
+// returns a new pipeline
+type PipelineWrapper interface {
+ Wrap(ctx context.Context, pipeline Pipeline, query, tenant string) Pipeline
+}
+
// NewNoopPipeline creates a pipelines that does not process anything and returns log streams as is.
func NewNoopPipeline() Pipeline {
return &noopPipeline{
diff --git a/pkg/logql/metrics.go b/pkg/logql/metrics.go
index 3ba3a9c61535d..9db8ee96e4ed4 100644
--- a/pkg/logql/metrics.go
+++ b/pkg/logql/metrics.go
@@ -2,7 +2,6 @@ package logql
import (
"context"
- "hash/fnv"
"strconv"
"strings"
"time"
@@ -19,6 +18,7 @@ import (
"github.com/grafana/loki/pkg/logqlmodel"
logql_stats "github.com/grafana/loki/pkg/logqlmodel/stats"
"github.com/grafana/loki/pkg/querier/astmapper"
+ "github.com/grafana/loki/pkg/util"
"github.com/grafana/loki/pkg/util/constants"
"github.com/grafana/loki/pkg/util/httpreq"
util_log "github.com/grafana/loki/pkg/util/log"
@@ -98,7 +98,7 @@ func RecordRangeAndInstantQueryMetrics(
latencyType = latencyTypeFast
returnedLines = 0
)
- queryType, err := QueryType(p.Query())
+ queryType, err := QueryType(p.GetExpression())
if err != nil {
level.Warn(logger).Log("msg", "error parsing query type", "err", err)
}
@@ -119,8 +119,8 @@ func RecordRangeAndInstantQueryMetrics(
logValues = append(logValues, []interface{}{
"latency", latencyType, // this can be used to filter log lines.
- "query", p.Query(),
- "query_hash", HashedQuery(p.Query()),
+ "query", p.QueryString(),
+ "query_hash", util.HashedQuery(p.QueryString()),
"query_type", queryType,
"range_type", rt,
"length", p.End().Sub(p.Start()),
@@ -187,12 +187,6 @@ func RecordRangeAndInstantQueryMetrics(
recordUsageStats(queryType, stats)
}
-func HashedQuery(query string) uint32 {
- h := fnv.New32()
- _, _ = h.Write([]byte(query))
- return h.Sum32()
-}
-
func RecordLabelQueryMetrics(
ctx context.Context,
log log.Logger,
@@ -225,7 +219,7 @@ func RecordLabelQueryMetrics(
"status", status,
"label", label,
"query", query,
- "query_hash", HashedQuery(query),
+ "query_hash", util.HashedQuery(query),
"total_entries", stats.Summary.TotalEntriesReturned,
)
@@ -276,7 +270,7 @@ func RecordSeriesQueryMetrics(ctx context.Context, log log.Logger, start, end ti
"duration", time.Duration(int64(stats.Summary.ExecTime*float64(time.Second))),
"status", status,
"match", PrintMatches(match),
- "query_hash", HashedQuery(PrintMatches(match)),
+ "query_hash", util.HashedQuery(PrintMatches(match)),
"total_entries", stats.Summary.TotalEntriesReturned)
if shard != nil {
@@ -316,7 +310,7 @@ func RecordStatsQueryMetrics(ctx context.Context, log log.Logger, start, end tim
"duration", time.Duration(int64(stats.Summary.ExecTime*float64(time.Second))),
"status", status,
"query", query,
- "query_hash", HashedQuery(query),
+ "query_hash", util.HashedQuery(query),
"total_entries", stats.Summary.TotalEntriesReturned)
level.Info(logger).Log(logValues...)
@@ -346,7 +340,7 @@ func RecordVolumeQueryMetrics(ctx context.Context, log log.Logger, start, end ti
"latency", latencyType,
"query_type", queryType,
"query", query,
- "query_hash", HashedQuery(query),
+ "query_hash", util.HashedQuery(query),
"start", start.Format(time.RFC3339Nano),
"end", end.Format(time.RFC3339Nano),
"start_delta", time.Since(start),
@@ -379,11 +373,7 @@ func recordUsageStats(queryType string, stats logql_stats.Result) {
}
}
-func QueryType(query string) (string, error) {
- expr, err := syntax.ParseExpr(query)
- if err != nil {
- return "", err
- }
+func QueryType(expr syntax.Expr) (string, error) {
switch e := expr.(type) {
case syntax.SampleExpr:
return QueryTypeMetric, nil
diff --git a/pkg/logql/metrics_test.go b/pkg/logql/metrics_test.go
index 950a16bb39a73..6d07040bb802a 100644
--- a/pkg/logql/metrics_test.go
+++ b/pkg/logql/metrics_test.go
@@ -16,38 +16,35 @@ import (
"github.com/uber/jaeger-client-go"
"github.com/grafana/loki/pkg/logproto"
+ "github.com/grafana/loki/pkg/logql/syntax"
"github.com/grafana/loki/pkg/logqlmodel"
"github.com/grafana/loki/pkg/logqlmodel/stats"
+ "github.com/grafana/loki/pkg/util"
"github.com/grafana/loki/pkg/util/httpreq"
util_log "github.com/grafana/loki/pkg/util/log"
)
func TestQueryType(t *testing.T) {
tests := []struct {
- name string
- query string
- want string
- wantErr bool
+ name string
+ query string
+ want string
}{
- {"bad", "ddd", "", true},
- {"limited", `{app="foo"}`, QueryTypeLimited, false},
- {"limited multi label", `{app="foo" ,fuzz=~"foo"}`, QueryTypeLimited, false},
- {"limited with parser", `{app="foo" ,fuzz=~"foo"} | logfmt`, QueryTypeLimited, false},
- {"filter", `{app="foo"} |= "foo"`, QueryTypeFilter, false},
- {"filter string extracted label", `{app="foo"} | json | foo="a"`, QueryTypeFilter, false},
- {"filter duration", `{app="foo"} | json | duration > 5s`, QueryTypeFilter, false},
- {"metrics", `rate({app="foo"} |= "foo"[5m])`, QueryTypeMetric, false},
- {"metrics binary", `rate({app="foo"} |= "foo"[5m]) + count_over_time({app="foo"} |= "foo"[5m]) / rate({app="foo"} |= "foo"[5m]) `, QueryTypeMetric, false},
- {"filters", `{app="foo"} |= "foo" |= "f" != "b"`, QueryTypeFilter, false},
- {"filters and labels filters", `{app="foo"} |= "foo" |= "f" != "b" | json | a > 5`, QueryTypeFilter, false},
+ {"limited", `{app="foo"}`, QueryTypeLimited},
+ {"limited multi label", `{app="foo" ,fuzz=~"foo"}`, QueryTypeLimited},
+ {"limited with parser", `{app="foo" ,fuzz=~"foo"} | logfmt`, QueryTypeLimited},
+ {"filter", `{app="foo"} |= "foo"`, QueryTypeFilter},
+ {"filter string extracted label", `{app="foo"} | json | foo="a"`, QueryTypeFilter},
+ {"filter duration", `{app="foo"} | json | duration > 5s`, QueryTypeFilter},
+ {"metrics", `rate({app="foo"} |= "foo"[5m])`, QueryTypeMetric},
+ {"metrics binary", `rate({app="foo"} |= "foo"[5m]) + count_over_time({app="foo"} |= "foo"[5m]) / rate({app="foo"} |= "foo"[5m]) `, QueryTypeMetric},
+ {"filters", `{app="foo"} |= "foo" |= "f" != "b"`, QueryTypeFilter},
+ {"filters and labels filters", `{app="foo"} |= "foo" |= "f" != "b" | json | a > 5`, QueryTypeFilter},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- got, err := QueryType(tt.query)
- if (err != nil) != tt.wantErr {
- t.Errorf("QueryType() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
+ got, err := QueryType(syntax.MustParseExpr(tt.query))
+ require.NoError(t, err)
if got != tt.want {
t.Errorf("QueryType() = %v, want %v", got, tt.want)
}
@@ -68,12 +65,13 @@ func TestLogSlowQuery(t *testing.T) {
ctx = context.WithValue(ctx, httpreq.QueryTagsHTTPHeader, "Source=logvolhist,Feature=Beta")
RecordRangeAndInstantQueryMetrics(ctx, util_log.Logger, LiteralParams{
- qs: `{foo="bar"} |= "buzz"`,
- direction: logproto.BACKWARD,
- end: now,
- start: now.Add(-1 * time.Hour),
- limit: 1000,
- step: time.Minute,
+ queryString: `{foo="bar"} |= "buzz"`,
+ direction: logproto.BACKWARD,
+ end: now,
+ start: now.Add(-1 * time.Hour),
+ limit: 1000,
+ step: time.Minute,
+ queryExpr: syntax.MustParseExpr(`{foo="bar"} |= "buzz"`),
}, "200", stats.Result{
Summary: stats.Summary{
BytesProcessedPerSecond: 100000,
@@ -191,11 +189,11 @@ func Test_testToKeyValues(t *testing.T) {
}
func TestQueryHashing(t *testing.T) {
- h1 := HashedQuery(`{app="myapp",env="myenv"} |= "error" |= "metrics.go" |= logfmt`)
- h2 := HashedQuery(`{app="myapp",env="myenv"} |= "error" |= logfmt |= "metrics.go"`)
+ h1 := util.HashedQuery(`{app="myapp",env="myenv"} |= "error" |= "metrics.go" |= logfmt`)
+ h2 := util.HashedQuery(`{app="myapp",env="myenv"} |= "error" |= logfmt |= "metrics.go"`)
// check that it capture differences of order.
require.NotEqual(t, h1, h2)
- h3 := HashedQuery(`{app="myapp",env="myenv"} |= "error" |= "metrics.go" |= logfmt`)
+ h3 := util.HashedQuery(`{app="myapp",env="myenv"} |= "error" |= "metrics.go" |= logfmt`)
// check that it evaluate same queries as same hashes, even if evaluated at different timestamps.
require.Equal(t, h1, h3)
}
diff --git a/pkg/logql/optimize.go b/pkg/logql/optimize.go
index 1f00153e18b87..2f9c80a64f918 100644
--- a/pkg/logql/optimize.go
+++ b/pkg/logql/optimize.go
@@ -8,7 +8,7 @@ func optimizeSampleExpr(expr syntax.SampleExpr) (syntax.SampleExpr, error) {
// we skip sharding AST for now, it's not easy to clone them since they are not part of the language.
expr.Walk(func(e syntax.Expr) {
switch e.(type) {
- case *ConcatSampleExpr, *DownstreamSampleExpr:
+ case *ConcatSampleExpr, *DownstreamSampleExpr, *QuantileSketchEvalExpr, *QuantileSketchMergeExpr:
skip = true
return
}
@@ -16,9 +16,7 @@ func optimizeSampleExpr(expr syntax.SampleExpr) (syntax.SampleExpr, error) {
if skip {
return expr, nil
}
- // clone the expr.
- q := expr.String()
- expr, err := syntax.ParseSampleExpr(q)
+ expr, err := syntax.Clone[syntax.SampleExpr](expr)
if err != nil {
return nil, err
}
diff --git a/pkg/logql/quantile_over_time_sketch.go b/pkg/logql/quantile_over_time_sketch.go
new file mode 100644
index 0000000000000..94aea83dcd90e
--- /dev/null
+++ b/pkg/logql/quantile_over_time_sketch.go
@@ -0,0 +1,413 @@
+package logql
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/promql"
+ promql_parser "github.com/prometheus/prometheus/promql/parser"
+
+ "github.com/grafana/loki/pkg/iter"
+ "github.com/grafana/loki/pkg/logproto"
+ "github.com/grafana/loki/pkg/logql/sketch"
+ "github.com/grafana/loki/pkg/logqlmodel"
+)
+
+const (
+ QuantileSketchMatrixType = "QuantileSketchMatrix"
+)
+
+type ProbabilisticQuantileVector []ProbabilisticQuantileSample
+type ProbabilisticQuantileMatrix []ProbabilisticQuantileVector
+
+func (q ProbabilisticQuantileVector) Merge(right ProbabilisticQuantileVector) (ProbabilisticQuantileVector, error) {
+ // labels hash to vector index map
+ groups := make(map[uint64]int)
+ for i, sample := range q {
+ groups[sample.Metric.Hash()] = i
+ }
+
+ for _, sample := range right {
+ i, ok := groups[sample.Metric.Hash()]
+ if !ok {
+ q = append(q, sample)
+ continue
+ }
+
+ _, err := q[i].F.Merge(sample.F)
+ if err != nil {
+ return q, err
+ }
+ }
+
+ return q, nil
+}
+
+func (ProbabilisticQuantileVector) SampleVector() promql.Vector {
+ return promql.Vector{}
+}
+
+func (q ProbabilisticQuantileVector) QuantileSketchVec() ProbabilisticQuantileVector {
+ return q
+}
+
+func (q ProbabilisticQuantileVector) ToProto() *logproto.QuantileSketchVector {
+ samples := make([]*logproto.QuantileSketchSample, len(q))
+ for i, sample := range q {
+ samples[i] = sample.ToProto()
+ }
+ return &logproto.QuantileSketchVector{Samples: samples}
+}
+
+func ProbabilisticQuantileVectorFromProto(proto *logproto.QuantileSketchVector) (ProbabilisticQuantileVector, error) {
+ out := make([]ProbabilisticQuantileSample, len(proto.Samples))
+ var s ProbabilisticQuantileSample
+ var err error
+ for i, sample := range proto.Samples {
+ s, err = probabilisticQuantileSampleFromProto(sample)
+ if err != nil {
+ return ProbabilisticQuantileVector{}, err
+ }
+ out[i] = s
+ }
+ return out, nil
+}
+
+func (ProbabilisticQuantileMatrix) String() string {
+ return "QuantileSketchMatrix()"
+}
+
+func (ProbabilisticQuantileMatrix) Type() promql_parser.ValueType { return QuantileSketchMatrixType }
+
+func (m ProbabilisticQuantileMatrix) ToProto() *logproto.QuantileSketchMatrix {
+ values := make([]*logproto.QuantileSketchVector, len(m))
+ for i, vec := range m {
+ values[i] = vec.ToProto()
+ }
+ return &logproto.QuantileSketchMatrix{Values: values}
+}
+
+func ProbabilisticQuantileMatrixFromProto(proto *logproto.QuantileSketchMatrix) (ProbabilisticQuantileMatrix, error) {
+ out := make([]ProbabilisticQuantileVector, len(proto.Values))
+ var s ProbabilisticQuantileVector
+ var err error
+ for i, v := range proto.Values {
+ s, err = ProbabilisticQuantileVectorFromProto(v)
+ if err != nil {
+ return ProbabilisticQuantileMatrix{}, err
+ }
+ out[i] = s
+ }
+ return out, nil
+}
+
+type QuantileSketchStepEvaluator struct {
+ iter RangeVectorIterator
+
+ err error
+}
+
+func (e *QuantileSketchStepEvaluator) Next() (bool, int64, StepResult) {
+ next := e.iter.Next()
+ if !next {
+ return false, 0, ProbabilisticQuantileVector{}
+ }
+ ts, r := e.iter.At()
+ vec := r.QuantileSketchVec()
+ for _, s := range vec {
+ // Errors are not allowed in metrics unless they've been specifically requested.
+ if s.Metric.Has(logqlmodel.ErrorLabel) && s.Metric.Get(logqlmodel.PreserveErrorLabel) != "true" {
+ e.err = logqlmodel.NewPipelineErr(s.Metric)
+ return false, 0, ProbabilisticQuantileVector{}
+ }
+ }
+ return true, ts, vec
+}
+
+func (e *QuantileSketchStepEvaluator) Close() error { return e.iter.Close() }
+
+func (e *QuantileSketchStepEvaluator) Error() error {
+ if e.err != nil {
+ return e.err
+ }
+ return e.iter.Error()
+}
+
+func (e *QuantileSketchStepEvaluator) Explain(parent Node) {
+ parent.Child("QuantileSketch")
+}
+
+func newQuantileSketchIterator(
+ it iter.PeekingSampleIterator,
+ selRange, step, start, end, offset int64) RangeVectorIterator {
+ inner := &batchRangeVectorIterator{
+ iter: it,
+ step: step,
+ end: end,
+ selRange: selRange,
+ metrics: map[string]labels.Labels{},
+ window: map[string]*promql.Series{},
+ agg: nil,
+ current: start - step, // first loop iteration will set it to start
+ offset: offset,
+ }
+ return &quantileSketchBatchRangeVectorIterator{
+ batchRangeVectorIterator: inner,
+ }
+}
+
+//batch
+
+type ProbabilisticQuantileSample struct {
+ T int64
+ F sketch.QuantileSketch
+
+ Metric labels.Labels
+}
+
+func (q ProbabilisticQuantileSample) ToProto() *logproto.QuantileSketchSample {
+ metric := make([]*logproto.LabelPair, len(q.Metric))
+ for i, m := range q.Metric {
+ metric[i] = &logproto.LabelPair{Name: m.Name, Value: m.Value}
+ }
+
+ sketch := q.F.ToProto()
+
+ return &logproto.QuantileSketchSample{
+ F: sketch,
+ TimestampMs: q.T,
+ Metric: metric,
+ }
+}
+
+func probabilisticQuantileSampleFromProto(proto *logproto.QuantileSketchSample) (ProbabilisticQuantileSample, error) {
+ s, err := sketch.QuantileSketchFromProto(proto.F)
+ if err != nil {
+ return ProbabilisticQuantileSample{}, err
+ }
+ out := ProbabilisticQuantileSample{
+ T: proto.TimestampMs,
+ F: s,
+ Metric: make(labels.Labels, len(proto.Metric)),
+ }
+
+ for i, p := range proto.Metric {
+ out.Metric[i] = labels.Label{Name: p.Name, Value: p.Value}
+ }
+
+ return out, nil
+}
+
+type quantileSketchBatchRangeVectorIterator struct {
+ *batchRangeVectorIterator
+ at []ProbabilisticQuantileSample
+}
+
+func (r *quantileSketchBatchRangeVectorIterator) At() (int64, StepResult) {
+ if r.at == nil {
+ r.at = make([]ProbabilisticQuantileSample, 0, len(r.window))
+ }
+ r.at = r.at[:0]
+ // convert ts from nano to milli seconds as the iterator work with nanoseconds
+ ts := r.current/1e+6 + r.offset/1e+6
+ for _, series := range r.window {
+ r.at = append(r.at, ProbabilisticQuantileSample{
+ F: r.agg(series.Floats),
+ T: ts,
+ Metric: series.Metric,
+ })
+ }
+ return ts, ProbabilisticQuantileVector(r.at)
+}
+
+func (r *quantileSketchBatchRangeVectorIterator) agg(samples []promql.FPoint) sketch.QuantileSketch {
+ s := sketch.NewDDSketch()
+ for _, v := range samples {
+ // The sketch from the underlying sketch package we are using
+ // cannot return an error when calling Add.
+ s.Add(v.F) //nolint:errcheck
+ }
+ return s
+}
+
+// JoinQuantileSketchVector joins the results from stepEvaluator into a ProbabilisticQuantileMatrix.
+func JoinQuantileSketchVector(next bool, r StepResult, stepEvaluator StepEvaluator) (promql_parser.Value, error) {
+ vec := r.QuantileSketchVec()
+ if stepEvaluator.Error() != nil {
+ return nil, stepEvaluator.Error()
+ }
+
+ result := make([]ProbabilisticQuantileVector, 0)
+
+ for next {
+ result = append(result, vec)
+
+ next, _, r = stepEvaluator.Next()
+ vec = r.QuantileSketchVec()
+ if stepEvaluator.Error() != nil {
+ return nil, stepEvaluator.Error()
+ }
+ }
+
+ return ProbabilisticQuantileMatrix(result), stepEvaluator.Error()
+}
+
+// QuantileSketchMatrixStepEvaluator steps through a matrix of quantile sketch
+// vectors, ie t-digest or DDSketch structures per time step.
+type QuantileSketchMatrixStepEvaluator struct {
+ start, end, ts time.Time
+ step time.Duration
+ m ProbabilisticQuantileMatrix
+}
+
+func NewQuantileSketchMatrixStepEvaluator(m ProbabilisticQuantileMatrix, params Params) *QuantileSketchMatrixStepEvaluator {
+ var (
+ start = params.Start()
+ end = params.End()
+ step = params.Step()
+ )
+ return &QuantileSketchMatrixStepEvaluator{
+ start: start,
+ end: end,
+ ts: start.Add(-step), // will be corrected on first Next() call
+ step: step,
+ m: m,
+ }
+}
+
+func (m *QuantileSketchMatrixStepEvaluator) Next() (bool, int64, StepResult) {
+ m.ts = m.ts.Add(m.step)
+ if m.ts.After(m.end) {
+ return false, 0, nil
+ }
+
+ ts := m.ts.UnixNano() / int64(time.Millisecond)
+
+ if len(m.m) == 0 {
+ return false, 0, nil
+ }
+
+ vec := m.m[0]
+
+ // Reset for next step
+ m.m = m.m[1:]
+
+ return true, ts, vec
+}
+
+func (*QuantileSketchMatrixStepEvaluator) Close() error { return nil }
+
+func (*QuantileSketchMatrixStepEvaluator) Error() error { return nil }
+
+func (*QuantileSketchMatrixStepEvaluator) Explain(parent Node) {
+ parent.Child("QuantileSketchMatrix")
+}
+
+// QuantileSketchMergeStepEvaluator merges multiple quantile sketches into one for each
+// step.
+type QuantileSketchMergeStepEvaluator struct {
+ evaluators []StepEvaluator
+ err error
+}
+
+func NewQuantileSketchMergeStepEvaluator(evaluators []StepEvaluator) *QuantileSketchMergeStepEvaluator {
+ return &QuantileSketchMergeStepEvaluator{
+ evaluators: evaluators,
+ err: nil,
+ }
+}
+
+func (e *QuantileSketchMergeStepEvaluator) Next() (bool, int64, StepResult) {
+ ok, ts, r := e.evaluators[0].Next()
+ var cur ProbabilisticQuantileVector
+ if ok {
+ cur = r.QuantileSketchVec()
+ }
+
+ if len(e.evaluators) == 1 {
+ return ok, ts, cur
+ }
+
+ for _, eval := range e.evaluators[1:] {
+ ok, nextTs, vec := eval.Next()
+ if ok {
+ if cur == nil {
+ cur = vec.QuantileSketchVec()
+ } else {
+ if ts != nextTs {
+ e.err = fmt.Errorf("timestamps of sketches differ: %d!=%d", ts, nextTs)
+ return false, 0, nil
+ }
+
+ _, e.err = cur.Merge(vec.QuantileSketchVec())
+ if e.err != nil {
+ return false, 0, nil
+ }
+ }
+ }
+ }
+
+ return ok, ts, cur
+}
+
+func (*QuantileSketchMergeStepEvaluator) Close() error { return nil }
+
+func (e *QuantileSketchMergeStepEvaluator) Error() error { return e.err }
+
+func (e *QuantileSketchMergeStepEvaluator) Explain(parent Node) {
+ b := parent.Child("QuantileSketchMerge")
+ if len(e.evaluators) < MaxChildrenDisplay {
+ for _, child := range e.evaluators {
+ child.Explain(b)
+ }
+ } else {
+ e.evaluators[0].Explain(b)
+ b.Child("...")
+ e.evaluators[len(e.evaluators)-1].Explain(b)
+ }
+}
+
+// QuantileSketchVectorStepEvaluator evaluates a quantile sketch into a
+// promql.Vector.
+type QuantileSketchVectorStepEvaluator struct {
+ inner StepEvaluator
+ quantile float64
+}
+
+var _ StepEvaluator = NewQuantileSketchVectorStepEvaluator(nil, 0)
+
+func NewQuantileSketchVectorStepEvaluator(inner StepEvaluator, quantile float64) *QuantileSketchVectorStepEvaluator {
+ return &QuantileSketchVectorStepEvaluator{
+ inner: inner,
+ quantile: quantile,
+ }
+}
+
+func (e *QuantileSketchVectorStepEvaluator) Next() (bool, int64, StepResult) {
+ ok, ts, r := e.inner.Next()
+ quantileSketchVec := r.QuantileSketchVec()
+
+ vec := make(promql.Vector, len(quantileSketchVec))
+
+ for i, quantileSketch := range quantileSketchVec {
+ f, _ := quantileSketch.F.Quantile(e.quantile)
+
+ vec[i] = promql.Sample{
+ T: quantileSketch.T,
+ F: f,
+ Metric: quantileSketch.Metric,
+ }
+ }
+
+ return ok, ts, SampleVector(vec)
+}
+
+func (*QuantileSketchVectorStepEvaluator) Close() error { return nil }
+
+func (*QuantileSketchVectorStepEvaluator) Error() error { return nil }
+
+func (e *QuantileSketchVectorStepEvaluator) Explain(parent Node) {
+ b := parent.Child("QuantileSketchVector")
+ e.inner.Explain(b)
+}
diff --git a/pkg/logql/quantile_over_time_sketch_test.go b/pkg/logql/quantile_over_time_sketch_test.go
new file mode 100644
index 0000000000000..9a9ff1b603ebf
--- /dev/null
+++ b/pkg/logql/quantile_over_time_sketch_test.go
@@ -0,0 +1,109 @@
+package logql
+
+import (
+ "errors"
+ "testing"
+
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/stretchr/testify/require"
+
+ "github.com/grafana/loki/pkg/logproto"
+ "github.com/grafana/loki/pkg/logql/sketch"
+ "github.com/grafana/loki/pkg/logqlmodel"
+)
+
+func TestProbabilisticMQuantileMatrixSerialization(t *testing.T) {
+ emptySketch := sketch.NewDDSketch()
+ ddsketchBytes := make([]byte, 0)
+ emptySketch.Encode(&ddsketchBytes, false)
+
+ matrix := ProbabilisticQuantileMatrix([]ProbabilisticQuantileVector{
+ []ProbabilisticQuantileSample{
+ {T: 0, F: emptySketch, Metric: []labels.Label{{Name: "foo", Value: "bar"}}},
+ },
+ })
+
+ proto := &logproto.QuantileSketchMatrix{
+ Values: []*logproto.QuantileSketchVector{
+ {
+ Samples: []*logproto.QuantileSketchSample{
+ {
+ TimestampMs: 0,
+ F: &logproto.QuantileSketch{Sketch: &logproto.QuantileSketch_Ddsketch{Ddsketch: ddsketchBytes}},
+ Metric: []*logproto.LabelPair{{Name: "foo", Value: "bar"}},
+ },
+ },
+ },
+ },
+ }
+
+ actual := matrix.ToProto()
+ require.Equal(t, proto, actual)
+
+ _, err := ProbabilisticQuantileMatrixFromProto(actual)
+ require.NoError(t, err)
+}
+
+func TestQuantileSketchStepEvaluatorError(t *testing.T) {
+ iter := errorRangeVectorIterator{
+ result: ProbabilisticQuantileVector([]ProbabilisticQuantileSample{
+ {T: 43, F: nil, Metric: labels.Labels{{Name: logqlmodel.ErrorLabel, Value: "my error"}}},
+ }),
+ }
+ ev := QuantileSketchStepEvaluator{
+ iter: iter,
+ }
+ ok, _, _ := ev.Next()
+ require.False(t, ok)
+
+ err := ev.Error()
+ require.ErrorContains(t, err, "my error")
+}
+
+func TestJoinQuantileSketchVectorError(t *testing.T) {
+ result := ProbabilisticQuantileVector{}
+ ev := errorStepEvaluator{
+ err: errors.New("could not evaluate"),
+ }
+ _, err := JoinQuantileSketchVector(true, result, ev)
+ require.ErrorContains(t, err, "could not evaluate")
+}
+
+type errorRangeVectorIterator struct {
+ err error
+ result StepResult
+}
+
+func (e errorRangeVectorIterator) Next() bool {
+ return e.result != nil
+}
+
+func (e errorRangeVectorIterator) At() (int64, StepResult) {
+ return 0, e.result
+}
+
+func (errorRangeVectorIterator) Close() error {
+ return nil
+}
+
+func (e errorRangeVectorIterator) Error() error {
+ return e.err
+}
+
+type errorStepEvaluator struct {
+ err error
+}
+
+func (errorStepEvaluator) Next() (ok bool, ts int64, r StepResult) {
+ return false, 0, nil
+}
+
+func (errorStepEvaluator) Close() error {
+ return nil
+}
+
+func (e errorStepEvaluator) Error() error {
+ return e.err
+}
+
+func (e errorStepEvaluator) Explain(Node) {}
diff --git a/pkg/logql/range_vector_test.go b/pkg/logql/range_vector_test.go
index 089bcff9e266a..c7176bed2ab90 100644
--- a/pkg/logql/range_vector_test.go
+++ b/pkg/logql/range_vector_test.go
@@ -3,6 +3,8 @@ package logql
import (
"context"
"fmt"
+ "math/rand"
+ "sort"
"testing"
"time"
@@ -13,7 +15,9 @@ import (
"github.com/grafana/loki/pkg/iter"
"github.com/grafana/loki/pkg/logproto"
+ "github.com/grafana/loki/pkg/logql/sketch"
"github.com/grafana/loki/pkg/logql/syntax"
+ "github.com/grafana/loki/pkg/logql/vector"
)
var samples = []logproto.Sample{
@@ -442,3 +446,69 @@ func value(value float64, negative bool) float64 {
}
return value
}
+
+func TestQuantiles(t *testing.T) {
+ // v controls the distribution of values along the curve, a greater v
+ // value means there's a large distance between generated values
+ vs := []float64{1.0, 5.0, 10.0}
+ // s controls the exponential curve of the distribution
+ // the higher the s values the faster the drop off from max value to lesser values
+ // s must be > 1.0
+ ss := []float64{1.01, 2.0, 3.0, 4.0}
+
+ // T-Digest is too big for 1_000 samples. However, we did not optimize
+ // the format for size.
+ nSamples := []int{5_000, 10_000, 100_000, 1_000_000}
+
+ factories := []struct {
+ newSketch sketch.QuantileSketchFactory
+ name string
+ relativeError float64
+ }{
+ {newSketch: func() sketch.QuantileSketch { return sketch.NewDDSketch() }, name: "DDSketch", relativeError: 0.02},
+ {newSketch: sketch.NewTDigestSketch, name: "T-Digest", relativeError: 0.05},
+ }
+
+ for _, tc := range factories {
+ for _, samplesCount := range nSamples {
+ for _, s := range ss {
+ for _, v := range vs {
+ t.Run(fmt.Sprintf("sketch=%s, s=%.2f, v=%.2f, events=%d", tc.name, s, v, samplesCount), func(t *testing.T) {
+ sk := tc.newSketch()
+
+ r := rand.New(rand.NewSource(42))
+ z := rand.NewZipf(r, s, v, 1_000)
+ values := make(vector.HeapByMaxValue, 0)
+ for i := 0; i < samplesCount; i++ {
+
+ value := float64(z.Uint64())
+ values = append(values, promql.Sample{F: value})
+ err := sk.Add(value)
+ require.NoError(t, err)
+ }
+ sort.Sort(values)
+
+ // Size
+ var buf []byte
+ var err error
+ switch s := sk.(type) {
+ case *sketch.DDSketchQuantile:
+ buf, err = proto.Marshal(s.DDSketch.ToProto())
+ require.NoError(t, err)
+ case *sketch.TDigestQuantile:
+ buf, err = proto.Marshal(s.ToProto())
+ require.NoError(t, err)
+ }
+ require.Less(t, len(buf), samplesCount*8)
+
+ // Accuracy
+ expected := Quantile(0.99, values)
+ actual, err := sk.Quantile(0.99)
+ require.NoError(t, err)
+ require.InEpsilonf(t, expected, actual, tc.relativeError, "expected quantile %f, actual quantile %f", expected, actual)
+ })
+ }
+ }
+ }
+ }
+}
diff --git a/pkg/logql/rangemapper.go b/pkg/logql/rangemapper.go
index cc63944bc07e9..250f586603b7e 100644
--- a/pkg/logql/rangemapper.go
+++ b/pkg/logql/rangemapper.go
@@ -81,10 +81,10 @@ func NewRangeMapperMetrics(registerer prometheus.Registerer) *MapperMetrics {
// be executed by the downstream engine.
// It returns a boolean indicating whether a rewrite was possible, the
// rewritten sample expression, and an error in case the rewrite failed.
-func (m RangeMapper) Parse(query string) (bool, syntax.Expr, error) {
- origExpr, err := syntax.ParseSampleExpr(query)
- if err != nil {
- return true, nil, err
+func (m RangeMapper) Parse(expr syntax.Expr) (bool, syntax.Expr, error) {
+ origExpr, ok := expr.(syntax.SampleExpr)
+ if !ok {
+ return true, nil, errors.New("only sample expression supported")
}
recorder := m.metrics.downstreamRecorder()
diff --git a/pkg/logql/rangemapper_test.go b/pkg/logql/rangemapper_test.go
index 1c2f827867f93..48394d219be1a 100644
--- a/pkg/logql/rangemapper_test.go
+++ b/pkg/logql/rangemapper_test.go
@@ -6,7 +6,7 @@ import (
"github.com/stretchr/testify/require"
- "github.com/grafana/loki/pkg/logqlmodel"
+ "github.com/grafana/loki/pkg/logql/syntax"
)
func Test_SplitRangeInterval(t *testing.T) {
@@ -83,7 +83,7 @@ func Test_SplitRangeInterval(t *testing.T) {
rvm, err := NewRangeMapper(2*time.Second, nilShardMetrics, mapperStats)
require.NoError(t, err)
- noop, mappedExpr, err := rvm.Parse(tc.expr)
+ noop, mappedExpr, err := rvm.Parse(syntax.MustParseExpr(tc.expr))
require.NoError(t, err)
require.Equal(t, removeWhiteSpace(tc.expected), removeWhiteSpace(mappedExpr.String()))
@@ -1741,7 +1741,7 @@ func Test_SplitRangeVectorMapping(t *testing.T) {
rvm, err := NewRangeMapper(time.Minute, nilShardMetrics, mapperStats)
require.NoError(t, err)
- noop, mappedExpr, err := rvm.Parse(tc.expr)
+ noop, mappedExpr, err := rvm.Parse(syntax.MustParseExpr(tc.expr))
require.NoError(t, err)
require.Equal(t, removeWhiteSpace(tc.expected), removeWhiteSpace(mappedExpr.String()))
@@ -1932,7 +1932,7 @@ func Test_SplitRangeVectorMapping_Noop(t *testing.T) {
rvm, err := NewRangeMapper(time.Minute, nilShardMetrics, mapperStats)
require.NoError(t, err)
- noop, mappedExpr, err := rvm.Parse(tc.expr)
+ noop, mappedExpr, err := rvm.Parse(syntax.MustParseExpr(tc.expr))
require.NoError(t, err)
require.Equal(t, removeWhiteSpace(tc.expected), removeWhiteSpace(mappedExpr.String()))
@@ -1945,21 +1945,9 @@ func Test_SplitRangeVectorMapping_Noop(t *testing.T) {
func Test_FailQuery(t *testing.T) {
rvm, err := NewRangeMapper(2*time.Minute, nilShardMetrics, NewMapperStats())
require.NoError(t, err)
- _, _, err = rvm.Parse(`{app="foo"} |= "err"`)
+ _, _, err = rvm.Parse(syntax.MustParseExpr(`{app="foo"} |= "err"`))
require.Error(t, err)
- _, _, err = rvm.Parse(`topk(0, sum(count_over_time({app="foo"} | json | __error__="" [15m])))`)
- require.Error(t, err)
- // Check fixes for bug where missing or empty parameters for regexp and pattern parsers threw a panic
- // Missing parameter to regexp parser
- _, _, err = rvm.Parse(`topk(10,sum by(namespace)(count_over_time({application="nginx", site!="eu-west-1-dev"} |= "/artifactory/" != "api" != "binarystore" | regexp [1d])))`)
- require.ErrorIs(t, err, logqlmodel.ErrParse)
- // Empty parameter to regexp parser
- _, _, err = rvm.Parse(`topk(10,sum by(namespace)(count_over_time({application="nginx", site!="eu-west-1-dev"} |= "/artifactory/" != "api" != "binarystore" | regexp ` + "``" + ` [1d])))`)
- require.ErrorIs(t, err, logqlmodel.ErrParse)
- // Empty parameter to pattern parser
- _, _, err = rvm.Parse(`topk(10,sum by(namespace)(count_over_time({application="nginx", site!="eu-west-1-dev"} |= "/artifactory/" != "api" != "binarystore" | pattern ` + `""` + ` [1d])))`)
- require.ErrorIs(t, err, logqlmodel.ErrParse)
// Empty parameter to json parser
- _, _, err = rvm.Parse(`topk(10,sum by(namespace)(count_over_time({application="nginx", site!="eu-west-1-dev"} |= "/artifactory/" != "api" != "binarystore" | json [1d])))`)
+ _, _, err = rvm.Parse(syntax.MustParseExpr(`topk(10,sum by(namespace)(count_over_time({application="nginx", site!="eu-west-1-dev"} |= "/artifactory/" != "api" != "binarystore" | json [1d])))`))
require.NoError(t, err)
}
diff --git a/pkg/logql/shardmapper.go b/pkg/logql/shardmapper.go
index 6409cdbf0860c..4bee2616bf036 100644
--- a/pkg/logql/shardmapper.go
+++ b/pkg/logql/shardmapper.go
@@ -25,15 +25,27 @@ type ConstantShards int
func (s ConstantShards) Shards(_ syntax.Expr) (int, uint64, error) { return int(s), 0, nil }
func (s ConstantShards) GetStats(_ syntax.Expr) (stats.Stats, error) { return stats.Stats{}, nil }
+const (
+ ShardQuantileOverTime = "quantile_over_time"
+)
+
type ShardMapper struct {
- shards ShardResolver
- metrics *MapperMetrics
+ shards ShardResolver
+ metrics *MapperMetrics
+ quantileOverTimeSharding bool
}
-func NewShardMapper(resolver ShardResolver, metrics *MapperMetrics) ShardMapper {
+func NewShardMapper(resolver ShardResolver, metrics *MapperMetrics, shardAggregation []string) ShardMapper {
+ quantileOverTimeSharding := false
+ for _, a := range shardAggregation {
+ if a == ShardQuantileOverTime {
+ quantileOverTimeSharding = true
+ }
+ }
return ShardMapper{
- shards: resolver,
- metrics: metrics,
+ shards: resolver,
+ metrics: metrics,
+ quantileOverTimeSharding: quantileOverTimeSharding,
}
}
@@ -41,12 +53,7 @@ func NewShardMapperMetrics(registerer prometheus.Registerer) *MapperMetrics {
return newMapperMetrics(registerer, "shard")
}
-func (m ShardMapper) Parse(query string) (noop bool, bytesPerShard uint64, expr syntax.Expr, err error) {
- parsed, err := syntax.ParseExpr(query)
- if err != nil {
- return false, 0, nil, err
- }
-
+func (m ShardMapper) Parse(parsed syntax.Expr) (noop bool, bytesPerShard uint64, expr syntax.Expr, err error) {
recorder := m.metrics.downstreamRecorder()
mapped, bytesPerShard, err := m.Map(parsed, recorder)
@@ -163,11 +170,11 @@ func (m ShardMapper) mapSampleExpr(expr syntax.SampleExpr, r *downstreamRecorder
},
}, bytesPerShard, nil
}
- for i := shards - 1; i >= 0; i-- {
+ for shard := shards - 1; shard >= 0; shard-- {
head = &ConcatSampleExpr{
DownstreamSampleExpr: DownstreamSampleExpr{
shard: &astmapper.ShardAnnotation{
- Shard: i,
+ Shard: shard,
Of: shards,
},
SampleExpr: expr,
@@ -379,7 +386,7 @@ func (m ShardMapper) mapRangeAggregationExpr(expr *syntax.RangeAggregationExpr,
return m.mapSampleExpr(expr, r)
}
- // avg_overtime() by (foo) -> sum by (foo) (sum_over_time()) / sum by (foo) (count_over_time())
+ // avg_over_time() by (foo) -> sum by (foo) (sum_over_time()) / sum by (foo) (count_over_time())
lhs, lhsBytesPerShard, err := m.mapVectorAggregationExpr(&syntax.VectorAggregationExpr{
Left: &syntax.RangeAggregationExpr{
Left: expr.Left,
@@ -419,6 +426,43 @@ func (m ShardMapper) mapRangeAggregationExpr(expr *syntax.RangeAggregationExpr,
Op: syntax.OpTypeDiv,
}, bytesPerShard, nil
+ case syntax.OpRangeTypeQuantile:
+ potentialConflict := syntax.ReducesLabels(expr)
+ if !potentialConflict && (expr.Grouping == nil || expr.Grouping.Noop()) {
+ return m.mapSampleExpr(expr, r)
+ }
+
+ shards, bytesPerShard, err := m.shards.Shards(expr)
+ if err != nil {
+ return nil, 0, err
+ }
+ if shards == 0 || !m.quantileOverTimeSharding {
+ return m.mapSampleExpr(expr, r)
+ }
+
+ // quantile_over_time() by (foo) ->
+ // quantile_sketch_eval(quantile_merge by (foo)
+ // (__quantile_sketch_over_time__() by (foo)))
+
+ downstreams := make([]DownstreamSampleExpr, 0, shards)
+ expr.Operation = syntax.OpRangeTypeQuantileSketch
+ for shard := shards - 1; shard >= 0; shard-- {
+ downstreams = append(downstreams, DownstreamSampleExpr{
+ shard: &astmapper.ShardAnnotation{
+ Shard: shard,
+ Of: shards,
+ },
+ SampleExpr: expr,
+ })
+ }
+
+ return &QuantileSketchEvalExpr{
+ quantileMergeExpr: &QuantileSketchMergeExpr{
+ downstreams: downstreams,
+ },
+ quantile: expr.Params,
+ }, bytesPerShard, nil
+
default:
// don't shard if there's not an appropriate optimization
exprStats, err := m.shards.GetStats(expr)
diff --git a/pkg/logql/shardmapper_test.go b/pkg/logql/shardmapper_test.go
index bdfd8a6c42d41..4dc4aac0fb449 100644
--- a/pkg/logql/shardmapper_test.go
+++ b/pkg/logql/shardmapper_test.go
@@ -51,7 +51,7 @@ func TestShardedStringer(t *testing.T) {
}
func TestMapSampleExpr(t *testing.T) {
- m := NewShardMapper(ConstantShards(2), nilShardMetrics)
+ m := NewShardMapper(ConstantShards(2), nilShardMetrics, []string{ShardQuantileOverTime})
for _, tc := range []struct {
in syntax.SampleExpr
@@ -113,7 +113,7 @@ func TestMapSampleExpr(t *testing.T) {
}
func TestMappingStrings(t *testing.T) {
- m := NewShardMapper(ConstantShards(2), nilShardMetrics)
+ m := NewShardMapper(ConstantShards(2), nilShardMetrics, []string{ShardQuantileOverTime})
for _, tc := range []struct {
in string
out string
@@ -418,7 +418,7 @@ func TestMappingStrings(t *testing.T) {
}
func TestMapping(t *testing.T) {
- m := NewShardMapper(ConstantShards(2), nilShardMetrics)
+ m := NewShardMapper(ConstantShards(2), nilShardMetrics, []string{ShardQuantileOverTime})
for _, tc := range []struct {
in string
@@ -1361,14 +1361,14 @@ func mustNewMatcher(t labels.MatchType, n, v string) *labels.Matcher {
func TestStringTrimming(t *testing.T) {
for _, tc := range []struct {
- expr string
+ expr syntax.Expr
expected string
shards int
}{
{
// sample expr in entirety for low shard count
shards: 2,
- expr: `count_over_time({app="foo"}[1m])`,
+ expr: syntax.MustParseExpr(`count_over_time({app="foo"}[1m])`),
expected: `
downstream ++
downstream
@@ -1377,7 +1377,7 @@ func TestStringTrimming(t *testing.T) {
{
// sample expr doesnt display infinite shards
shards: 5,
- expr: `count_over_time({app="foo"}[1m])`,
+ expr: syntax.MustParseExpr(`count_over_time({app="foo"}[1m])`),
expected: `
downstream ++
downstream ++
@@ -1389,7 +1389,7 @@ func TestStringTrimming(t *testing.T) {
{
// log selector expr in entirety for low shard count
shards: 2,
- expr: `{app="foo"}`,
+ expr: syntax.MustParseExpr(`{app="foo"}`),
expected: `
downstream<{app="foo"},shard=0_of_2> ++
downstream<{app="foo"},shard=1_of_2>
@@ -1398,7 +1398,7 @@ func TestStringTrimming(t *testing.T) {
{
// log selector expr doesnt display infinite shards
shards: 5,
- expr: `{app="foo"}`,
+ expr: syntax.MustParseExpr(`{app="foo"}`),
expected: `
downstream<{app="foo"},shard=0_of_5> ++
downstream<{app="foo"},shard=1_of_5> ++
@@ -1408,8 +1408,8 @@ func TestStringTrimming(t *testing.T) {
`,
},
} {
- t.Run(tc.expr, func(t *testing.T) {
- m := NewShardMapper(ConstantShards(tc.shards), nilShardMetrics)
+ t.Run(tc.expr.String(), func(t *testing.T) {
+ m := NewShardMapper(ConstantShards(tc.shards), nilShardMetrics, []string{ShardQuantileOverTime})
_, _, mappedExpr, err := m.Parse(tc.expr)
require.Nil(t, err)
require.Equal(t, removeWhiteSpace(tc.expected), removeWhiteSpace(mappedExpr.String()))
diff --git a/pkg/logql/sketch/quantile.go b/pkg/logql/sketch/quantile.go
index 14b44e69f51c7..3a0526fcfc137 100644
--- a/pkg/logql/sketch/quantile.go
+++ b/pkg/logql/sketch/quantile.go
@@ -6,107 +6,10 @@ import (
"github.com/DataDog/sketches-go/ddsketch"
"github.com/influxdata/tdigest"
- "github.com/prometheus/prometheus/model/labels"
- promql_parser "github.com/prometheus/prometheus/promql/parser"
"github.com/grafana/loki/pkg/logproto"
)
-// QuantileSketchVector represents multiple qunatile sketches at the same point in
-// time.
-type QuantileSketchVector []quantileSketchSample
-
-// QuantileSketchMatrix contains multiples QuantileSketchVectors across many
-// points in time.
-type QuantileSketchMatrix []QuantileSketchVector
-
-// ToProto converts a quantile sketch vector to its protobuf definition.
-func (q QuantileSketchVector) ToProto() *logproto.QuantileSketchVector {
- samples := make([]*logproto.QuantileSketchSample, len(q))
- for i, sample := range q {
- samples[i] = sample.ToProto()
- }
- return &logproto.QuantileSketchVector{Samples: samples}
-}
-
-func QuantileSketchVectorFromProto(proto *logproto.QuantileSketchVector) (QuantileSketchVector, error) {
- out := make([]quantileSketchSample, len(proto.Samples))
- var err error
- for i, s := range proto.Samples {
- out[i], err = quantileSketchSampleFromProto(s)
- if err != nil {
- return nil, err
- }
- }
- return out, nil
-}
-
-func (QuantileSketchMatrix) String() string {
- return "QuantileSketchMatrix()"
-}
-
-func (QuantileSketchMatrix) Type() promql_parser.ValueType { return "QuantileSketchMatrix" }
-
-func (m QuantileSketchMatrix) ToProto() *logproto.QuantileSketchMatrix {
- values := make([]*logproto.QuantileSketchVector, len(m))
- for i, vec := range m {
- values[i] = vec.ToProto()
- }
- return &logproto.QuantileSketchMatrix{Values: values}
-}
-
-func QuantileSketchMatrixFromProto(proto *logproto.QuantileSketchMatrix) (QuantileSketchMatrix, error) {
- out := make([]QuantileSketchVector, len(proto.Values))
- var err error
- for i, v := range proto.Values {
- out[i], err = QuantileSketchVectorFromProto(v)
- if err != nil {
- return nil, err
- }
- }
- return out, nil
-}
-
-type quantileSketchSample struct {
- T int64
- F QuantileSketch
-
- Metric labels.Labels
-}
-
-func (q quantileSketchSample) ToProto() *logproto.QuantileSketchSample {
- metric := make([]*logproto.LabelPair, len(q.Metric))
- for i, m := range q.Metric {
- metric[i] = &logproto.LabelPair{Name: m.Name, Value: m.Value}
- }
-
- sketch := q.F.ToProto()
-
- return &logproto.QuantileSketchSample{
- F: sketch,
- TimestampMs: q.T,
- Metric: metric,
- }
-}
-
-func quantileSketchSampleFromProto(proto *logproto.QuantileSketchSample) (quantileSketchSample, error) {
- sketch, err := QuantileSketchFromProto(proto.F)
- if err != nil {
- return quantileSketchSample{}, err
- }
- out := quantileSketchSample{
- T: proto.TimestampMs,
- F: sketch,
- Metric: make(labels.Labels, len(proto.Metric)),
- }
-
- for i, p := range proto.Metric {
- out.Metric[i] = labels.Label{Name: p.Name, Value: p.Value}
- }
-
- return out, nil
-}
-
// QuantileSketch estimates quantiles over time.
type QuantileSketch interface {
Add(float64) error
diff --git a/pkg/logql/sketch/quantile_test.go b/pkg/logql/sketch/quantile_test.go
deleted file mode 100644
index 3b2f34c0e87c8..0000000000000
--- a/pkg/logql/sketch/quantile_test.go
+++ /dev/null
@@ -1,81 +0,0 @@
-package sketch
-
-import (
- "fmt"
- "math/rand"
- "sort"
- "testing"
-
- "github.com/gogo/protobuf/proto"
- "github.com/prometheus/prometheus/promql"
- "github.com/stretchr/testify/require"
-
- "github.com/grafana/loki/pkg/logql"
- "github.com/grafana/loki/pkg/logql/vector"
-)
-
-func TestQuantiles(t *testing.T) {
- // v controls the distribution of values along the curve, a greater v
- // value means there's a large distance between generated values
- vs := []float64{1.0, 5.0, 10.0}
- // s controls the exponential curve of the distribution
- // the higher the s values the faster the drop off from max value to lesser values
- // s must be > 1.0
- ss := []float64{1.01, 2.0, 3.0, 4.0}
-
- // T-Digest is too big for 1_000 samples. However, we did not optimize
- // the format for size.
- nSamples := []int{5_000, 10_000, 100_000, 1_000_000}
-
- factories := []struct {
- newSketch QuantileSketchFactory
- name string
- relativeError float64
- }{
- {newSketch: func() QuantileSketch { return NewDDSketch() }, name: "DDSketch", relativeError: 0.02},
- {newSketch: NewTDigestSketch, name: "T-Digest", relativeError: 0.05},
- }
-
- for _, tc := range factories {
- for _, samplesCount := range nSamples {
- for _, s := range ss {
- for _, v := range vs {
- t.Run(fmt.Sprintf("sketch=%s, s=%.2f, v=%.2f, events=%d", tc.name, s, v, samplesCount), func(t *testing.T) {
- sketch := tc.newSketch()
-
- r := rand.New(rand.NewSource(42))
- z := rand.NewZipf(r, s, v, 1_000)
- values := make(vector.HeapByMaxValue, 0)
- for i := 0; i < samplesCount; i++ {
-
- value := float64(z.Uint64())
- values = append(values, promql.Sample{F: value})
- err := sketch.Add(value)
- require.NoError(t, err)
- }
- sort.Sort(values)
-
- // Size
- var buf []byte
- var err error
- switch s := sketch.(type) {
- case *DDSketchQuantile:
- buf, err = proto.Marshal(s.DDSketch.ToProto())
- require.NoError(t, err)
- case *TDigestQuantile:
- buf, err = proto.Marshal(s.ToProto())
- require.NoError(t, err)
- }
- require.Less(t, len(buf), samplesCount*8)
-
- // Accuracy
- expected := logql.Quantile(0.99, values)
- actual, err := sketch.Quantile(0.99)
- require.NoError(t, err)
- require.InEpsilonf(t, expected, actual, tc.relativeError, "expected quantile %f, actual quantile %f", expected, actual)
- })
- }
- }
- }
- }
-}
diff --git a/pkg/logql/step_evaluator.go b/pkg/logql/step_evaluator.go
index 3831c8babdf27..955f9e2b97f86 100644
--- a/pkg/logql/step_evaluator.go
+++ b/pkg/logql/step_evaluator.go
@@ -6,6 +6,7 @@ import (
type StepResult interface {
SampleVector() promql.Vector
+ QuantileSketchVec() ProbabilisticQuantileVector
}
type SampleVector promql.Vector
@@ -16,6 +17,10 @@ func (p SampleVector) SampleVector() promql.Vector {
return promql.Vector(p)
}
+func (p SampleVector) QuantileSketchVec() ProbabilisticQuantileVector {
+ return ProbabilisticQuantileVector{}
+}
+
// StepEvaluator evaluate a single step of a query.
type StepEvaluator interface {
// while Next returns a promql.Value, the only acceptable types are Scalar and Vector.
diff --git a/pkg/logql/syntax/ast.go b/pkg/logql/syntax/ast.go
index 95009df3a4689..e110b37236c2f 100644
--- a/pkg/logql/syntax/ast.go
+++ b/pkg/logql/syntax/ast.go
@@ -37,17 +37,23 @@ type Expr interface {
func Clone[T Expr](e T) (T, error) {
var empty T
- copied, err := ParseExpr(e.String())
- if err != nil {
- return empty, err
- }
- cast, ok := copied.(T)
+ v := &cloneVisitor{}
+ e.Accept(v)
+ cast, ok := v.cloned.(T)
if !ok {
- return empty, fmt.Errorf("unpexpected type of cloned expression: want %T, got %T", empty, copied)
+ return empty, fmt.Errorf("unexpected type of cloned expression: want %T, got %T", empty, v.cloned)
}
return cast, nil
}
+func MustClone[T Expr](e T) T {
+ copied, err := Clone[T](e)
+ if err != nil {
+ panic(err)
+ }
+ return copied
+}
+
// implicit holds default implementations
type implicit struct{}
@@ -307,11 +313,12 @@ func (e *PipelineExpr) HasFilter() bool {
}
type LineFilterExpr struct {
- Left *LineFilterExpr
- Or *LineFilterExpr
- Ty labels.MatchType
- Match string
- Op string
+ Left *LineFilterExpr
+ Or *LineFilterExpr
+ IsOrChild bool
+ Ty labels.MatchType
+ Match string
+ Op string
implicit
}
@@ -328,6 +335,7 @@ func newOrLineFilter(left, right *LineFilterExpr) *LineFilterExpr {
if left.Ty == labels.MatchEqual || left.Ty == labels.MatchRegexp {
left.Or = right
+ right.IsOrChild = true
return left
}
@@ -380,52 +388,66 @@ func (e *LineFilterExpr) String() string {
sb.WriteString(e.Left.String())
sb.WriteString(" ")
}
- switch e.Ty {
- case labels.MatchRegexp:
- sb.WriteString("|~")
- case labels.MatchNotRegexp:
- sb.WriteString("!~")
- case labels.MatchEqual:
- sb.WriteString("|=")
- case labels.MatchNotEqual:
- sb.WriteString("!=")
+
+ if !e.IsOrChild { // Only write the type when we're not chaining "or" filters
+ switch e.Ty {
+ case labels.MatchRegexp:
+ sb.WriteString("|~")
+ case labels.MatchNotRegexp:
+ sb.WriteString("!~")
+ case labels.MatchEqual:
+ sb.WriteString("|=")
+ case labels.MatchNotEqual:
+ sb.WriteString("!=")
+ }
+ sb.WriteString(" ")
}
- sb.WriteString(" ")
+
if e.Op == "" {
sb.WriteString(strconv.Quote(e.Match))
- return sb.String()
+ } else {
+ sb.WriteString(e.Op)
+ sb.WriteString("(")
+ sb.WriteString(strconv.Quote(e.Match))
+ sb.WriteString(")")
}
- sb.WriteString(e.Op)
- sb.WriteString("(")
- sb.WriteString(strconv.Quote(e.Match))
- sb.WriteString(")")
+
+ if e.Or != nil {
+ sb.WriteString(" or ")
+ // This is dirty but removes the leading MatchType from the or expression.
+ sb.WriteString(e.Or.String())
+ }
+
return sb.String()
}
func (e *LineFilterExpr) Filter() (log.Filterer, error) {
acc := make([]log.Filterer, 0)
for curr := e; curr != nil; curr = curr.Left {
- switch curr.Op {
- case OpFilterIP:
- var err error
- next, err := log.NewIPLineFilter(curr.Match, curr.Ty)
+ var next log.Filterer
+ var err error
+ if curr.Or != nil {
+ next, err = newOrFilter(curr)
if err != nil {
return nil, err
}
acc = append(acc, next)
- default:
- var next log.Filterer
- var err error
- if curr.Or != nil {
- next, err = newOrFilter(curr)
- } else {
+ } else {
+ switch curr.Op {
+ case OpFilterIP:
+ next, err := log.NewIPLineFilter(curr.Match, curr.Ty)
+ if err != nil {
+ return nil, err
+ }
+ acc = append(acc, next)
+ default:
next, err = log.NewFilter(curr.Match, curr.Ty)
- }
- if err != nil {
- return nil, err
- }
+ if err != nil {
+ return nil, err
+ }
- acc = append(acc, next)
+ acc = append(acc, next)
+ }
}
}
@@ -1140,6 +1162,11 @@ const (
// parser flags
OpStrict = "--strict"
OpKeepEmpty = "--keep-empty"
+
+ // internal expressions not represented in LogQL. These are used to
+ // evaluate expressions differently resulting in intermediate formats
+ // that are not consumable by LogQL clients but are used for sharding.
+ OpRangeTypeQuantileSketch = "__quantile_sketch_over_time__"
)
func IsComparisonOperator(op string) bool {
@@ -1188,7 +1215,7 @@ type RangeAggregationExpr struct {
func newRangeAggregationExpr(left *LogRange, operation string, gr *Grouping, stringParams *string) SampleExpr {
var params *float64
if stringParams != nil {
- if operation != OpRangeTypeQuantile {
+ if operation != OpRangeTypeQuantile && operation != OpRangeTypeQuantileSketch {
return &RangeAggregationExpr{err: logqlmodel.NewParseError(fmt.Sprintf("parameter %s not supported for operation %s", *stringParams, operation), 0, 0)}
}
var err error
@@ -1243,7 +1270,7 @@ func (e *RangeAggregationExpr) MatcherGroups() ([]MatcherRange, error) {
func (e RangeAggregationExpr) validate() error {
if e.Grouping != nil {
switch e.Operation {
- case OpRangeTypeAvg, OpRangeTypeStddev, OpRangeTypeStdvar, OpRangeTypeQuantile, OpRangeTypeMax, OpRangeTypeMin, OpRangeTypeFirst, OpRangeTypeLast:
+ case OpRangeTypeAvg, OpRangeTypeStddev, OpRangeTypeStdvar, OpRangeTypeQuantile, OpRangeTypeQuantileSketch, OpRangeTypeMax, OpRangeTypeMin, OpRangeTypeFirst, OpRangeTypeLast:
default:
return fmt.Errorf("grouping not allowed for %s aggregation", e.Operation)
}
@@ -1252,7 +1279,7 @@ func (e RangeAggregationExpr) validate() error {
switch e.Operation {
case OpRangeTypeAvg, OpRangeTypeSum, OpRangeTypeMax, OpRangeTypeMin, OpRangeTypeStddev,
OpRangeTypeStdvar, OpRangeTypeQuantile, OpRangeTypeRate, OpRangeTypeRateCounter,
- OpRangeTypeAbsent, OpRangeTypeFirst, OpRangeTypeLast:
+ OpRangeTypeAbsent, OpRangeTypeFirst, OpRangeTypeLast, OpRangeTypeQuantileSketch:
return nil
default:
return fmt.Errorf("invalid aggregation %s with unwrap", e.Operation)
@@ -2112,6 +2139,7 @@ var shardableOps = map[string]bool{
OpRangeTypeSum: true,
OpRangeTypeMax: true,
OpRangeTypeMin: true,
+ OpRangeTypeQuantile: true,
// binops - arith
OpTypeAdd: true,
diff --git a/pkg/logql/syntax/ast_test.go b/pkg/logql/syntax/ast_test.go
index e1570e07e8c1f..8767651eaae75 100644
--- a/pkg/logql/syntax/ast_test.go
+++ b/pkg/logql/syntax/ast_test.go
@@ -404,6 +404,20 @@ func Test_FilterMatcher(t *testing.T) {
},
[]linecheck{{"foo", false}, {"bar", false}, {"none", true}},
},
+ {
+ `{app="foo"} |= ip("127.0.0.1") or "foo"`,
+ []*labels.Matcher{
+ mustNewMatcher(labels.MatchEqual, "app", "foo"),
+ },
+ []linecheck{{"foo", true}, {"bar", false}, {"127.0.0.2", false}, {"127.0.0.1", true}},
+ },
+ {
+ `{app="foo"} != ip("127.0.0.1") or "foo"`,
+ []*labels.Matcher{
+ mustNewMatcher(labels.MatchEqual, "app", "foo"),
+ },
+ []linecheck{{"foo", false}, {"bar", true}, {"127.0.0.2", true}, {"127.0.0.1", false}},
+ },
} {
tt := tt
t.Run(tt.q, func(t *testing.T) {
@@ -474,6 +488,42 @@ func TestStringer(t *testing.T) {
in: `0 > count_over_time({foo="bar"}[1m])`,
out: `(0 > count_over_time({foo="bar"}[1m]))`,
},
+ {
+ in: `{app="foo"} |= "foo" or "bar"`,
+ out: `{app="foo"} |= "foo" or "bar"`,
+ },
+ {
+ in: `{app="foo"} |~ "foo" or "bar" or "baz"`,
+ out: `{app="foo"} |~ "foo" or "bar" or "baz"`,
+ },
+ {
+ in: `{app="foo"} |= ip("127.0.0.1") or "foo"`,
+ out: `{app="foo"} |= ip("127.0.0.1") or "foo"`,
+ },
+ {
+ in: `{app="foo"} |= "foo" or ip("127.0.0.1")`,
+ out: `{app="foo"} |= "foo" or ip("127.0.0.1")`,
+ },
+ {
+ in: `{app="foo"} |~ ip("127.0.0.1") or "foo"`,
+ out: `{app="foo"} |~ ip("127.0.0.1") or "foo"`,
+ },
+ { // !(A || B) == !A && !B
+ in: `{app="foo"} != "foo" or "bar"`,
+ out: `{app="foo"} != "foo" != "bar"`,
+ },
+ {
+ in: `{app="foo"} !~ "foo" or "bar"`,
+ out: `{app="foo"} !~ "foo" !~ "bar"`,
+ },
+ {
+ in: `{app="foo"} != ip("127.0.0.1") or "foo"`,
+ out: `{app="foo"} != ip("127.0.0.1") != "foo"`,
+ },
+ {
+ in: `{app="foo"} !~ ip("127.0.0.1") or "foo"`,
+ out: `{app="foo"} !~ ip("127.0.0.1") !~ "foo"`,
+ },
} {
t.Run(tc.in, func(t *testing.T) {
expr, err := ParseExpr(tc.in)
diff --git a/pkg/logql/syntax/clone.go b/pkg/logql/syntax/clone.go
new file mode 100644
index 0000000000000..3c4cf51a2ac93
--- /dev/null
+++ b/pkg/logql/syntax/clone.go
@@ -0,0 +1,300 @@
+package syntax
+
+import (
+ "github.com/prometheus/prometheus/model/labels"
+
+ "github.com/grafana/loki/pkg/logql/log"
+)
+
+type cloneVisitor struct {
+ cloned Expr
+}
+
+var _ RootVisitor = &cloneVisitor{}
+
+func cloneGrouping(g *Grouping) *Grouping {
+ copied := &Grouping{
+ Without: g.Without,
+ }
+ if g.Groups != nil {
+ copied.Groups = make([]string, len(g.Groups))
+ copy(copied.Groups, g.Groups)
+ }
+ return copied
+}
+
+func cloneVectorMatching(v *VectorMatching) *VectorMatching {
+ copied := *v
+ copy(copied.Include, v.Include)
+ copy(copied.MatchingLabels, v.MatchingLabels)
+
+ return &copied
+}
+
+func (v *cloneVisitor) VisitBinOp(e *BinOpExpr) {
+ lhs := MustClone[SampleExpr](e.SampleExpr)
+ rhs := MustClone[SampleExpr](e.RHS)
+ copied := &BinOpExpr{
+ SampleExpr: lhs,
+ RHS: rhs,
+ Op: e.Op,
+ }
+
+ if e.Opts != nil {
+ copied.Opts = &BinOpOptions{
+ ReturnBool: e.Opts.ReturnBool,
+ VectorMatching: cloneVectorMatching(e.Opts.VectorMatching),
+ }
+ }
+
+ v.cloned = copied
+}
+
+func (v *cloneVisitor) VisitVectorAggregation(e *VectorAggregationExpr) {
+ copied := &VectorAggregationExpr{
+ Left: MustClone[SampleExpr](e.Left),
+ Params: e.Params,
+ Operation: e.Operation,
+ }
+
+ if e.Grouping != nil {
+ copied.Grouping = cloneGrouping(e.Grouping)
+ }
+
+ v.cloned = copied
+}
+
+func (v *cloneVisitor) VisitRangeAggregation(e *RangeAggregationExpr) {
+ copied := &RangeAggregationExpr{
+ Left: MustClone[*LogRange](e.Left),
+ Operation: e.Operation,
+ }
+
+ if e.Grouping != nil {
+ copied.Grouping = cloneGrouping(e.Grouping)
+ }
+
+ if e.Params != nil {
+ tmp := *e.Params
+ copied.Params = &tmp
+ }
+
+ v.cloned = copied
+}
+
+func (v *cloneVisitor) VisitLabelReplace(e *LabelReplaceExpr) {
+ left := MustClone[SampleExpr](e.Left)
+ v.cloned = mustNewLabelReplaceExpr(left, e.Dst, e.Replacement, e.Src, e.Regex)
+}
+
+func (v *cloneVisitor) VisitLiteral(e *LiteralExpr) {
+ v.cloned = &LiteralExpr{Val: e.Val}
+}
+
+func (v *cloneVisitor) VisitVector(e *VectorExpr) {
+ v.cloned = &VectorExpr{Val: e.Val}
+}
+
+func (v *cloneVisitor) VisitLogRange(e *LogRange) {
+ copied := &LogRange{
+ Left: MustClone[LogSelectorExpr](e.Left),
+ Interval: e.Interval,
+ Offset: e.Offset,
+ }
+ if e.Unwrap != nil {
+ copied.Unwrap = &UnwrapExpr{
+ Identifier: e.Unwrap.Identifier,
+ Operation: e.Unwrap.Operation,
+ }
+ if e.Unwrap.PostFilters != nil {
+ copied.Unwrap.PostFilters = make([]log.LabelFilterer, len(e.Unwrap.PostFilters))
+ for i, f := range e.Unwrap.PostFilters {
+ copied.Unwrap.PostFilters[i] = cloneLabelFilterer(f)
+ }
+ }
+ }
+
+ v.cloned = copied
+}
+
+func (v *cloneVisitor) VisitMatchers(e *MatchersExpr) {
+ copied := &MatchersExpr{
+ Mts: make([]*labels.Matcher, len(e.Mts)),
+ }
+ for i, m := range e.Mts {
+ copied.Mts[i] = labels.MustNewMatcher(m.Type, m.Name, m.Value)
+ }
+
+ v.cloned = copied
+}
+
+func (v *cloneVisitor) VisitPipeline(e *PipelineExpr) {
+ copied := &PipelineExpr{
+ Left: MustClone[*MatchersExpr](e.Left),
+ MultiStages: make(MultiStageExpr, len(e.MultiStages)),
+ }
+ for i, s := range e.MultiStages {
+ copied.MultiStages[i] = MustClone[StageExpr](s)
+ }
+
+ v.cloned = copied
+}
+
+func (v *cloneVisitor) VisitDecolorize(*DecolorizeExpr) {
+ v.cloned = &DecolorizeExpr{}
+}
+
+func (v *cloneVisitor) VisitDropLabels(e *DropLabelsExpr) {
+ copied := &DropLabelsExpr{
+ dropLabels: make([]log.DropLabel, len(e.dropLabels)),
+ }
+ for i, l := range e.dropLabels {
+ var matcher *labels.Matcher
+ if l.Matcher != nil {
+ matcher = labels.MustNewMatcher(l.Matcher.Type, l.Matcher.Name, l.Matcher.Value)
+ }
+ copied.dropLabels[i] = log.NewDropLabel(matcher, l.Name)
+ }
+
+ v.cloned = copied
+}
+
+func (v *cloneVisitor) VisitJSONExpressionParser(e *JSONExpressionParser) {
+ copied := &JSONExpressionParser{
+ Expressions: make([]log.LabelExtractionExpr, len(e.Expressions)),
+ }
+ copy(copied.Expressions, e.Expressions)
+
+ v.cloned = copied
+}
+
+func (v *cloneVisitor) VisitKeepLabel(e *KeepLabelsExpr) {
+ copied := &KeepLabelsExpr{
+ keepLabels: make([]log.KeepLabel, len(e.keepLabels)),
+ }
+ for i, k := range e.keepLabels {
+ copied.keepLabels[i] = log.KeepLabel{
+ Name: k.Name,
+ }
+ if k.Matcher != nil {
+ copied.keepLabels[i].Matcher = labels.MustNewMatcher(k.Matcher.Type, k.Matcher.Name, k.Matcher.Value)
+ }
+ }
+
+ v.cloned = copied
+}
+
+func (v *cloneVisitor) VisitLabelFilter(e *LabelFilterExpr) {
+ v.cloned = &LabelFilterExpr{
+ LabelFilterer: cloneLabelFilterer(e.LabelFilterer),
+ }
+}
+
+func cloneLabelFilterer(filter log.LabelFilterer) log.LabelFilterer {
+ switch concrete := filter.(type) {
+ case *log.BinaryLabelFilter:
+ return &log.BinaryLabelFilter{
+ Left: cloneLabelFilterer(concrete.Left),
+ Right: cloneLabelFilterer(concrete.Right),
+ And: concrete.And,
+ }
+ case *log.NoopLabelFilter:
+ copied := &log.NoopLabelFilter{}
+ if concrete.Matcher != nil {
+ copied.Matcher = mustNewMatcher(concrete.Type, concrete.Name, concrete.Value)
+ }
+
+ return copied
+ case *log.BytesLabelFilter:
+ return &log.BytesLabelFilter{
+ Name: concrete.Name,
+ Value: concrete.Value,
+ Type: concrete.Type,
+ }
+ case *log.DurationLabelFilter:
+ return &log.DurationLabelFilter{
+ Name: concrete.Name,
+ Value: concrete.Value,
+ Type: concrete.Type,
+ }
+ case *log.NumericLabelFilter:
+ return &log.NumericLabelFilter{
+ Name: concrete.Name,
+ Value: concrete.Value,
+ Type: concrete.Type,
+ }
+ case *log.StringLabelFilter:
+ copied := &log.StringLabelFilter{}
+ if concrete.Matcher != nil {
+ copied.Matcher = mustNewMatcher(concrete.Type, concrete.Name, concrete.Value)
+ }
+ return copied
+ case *log.LineFilterLabelFilter:
+ copied := &log.LineFilterLabelFilter{
+ Filter: concrete.Filter,
+ }
+ if concrete.Matcher != nil {
+ copied.Matcher = mustNewMatcher(concrete.Type, concrete.Name, concrete.Value)
+ }
+ return copied
+ case *log.IPLabelFilter:
+ return log.NewIPLabelFilter(concrete.Pattern, concrete.Label, concrete.Ty)
+ }
+ return nil
+}
+
+func (v *cloneVisitor) VisitLabelFmt(e *LabelFmtExpr) {
+ copied := &LabelFmtExpr{
+ Formats: make([]log.LabelFmt, len(e.Formats)),
+ }
+ copy(copied.Formats, e.Formats)
+ v.cloned = copied
+}
+
+func (v *cloneVisitor) VisitLabelParser(e *LabelParserExpr) {
+ v.cloned = &LabelParserExpr{
+ Op: e.Op,
+ Param: e.Param,
+ }
+}
+
+func (v *cloneVisitor) VisitLineFilter(e *LineFilterExpr) {
+ copied := &LineFilterExpr{
+ Ty: e.Ty,
+ Match: e.Match,
+ Op: e.Op,
+ IsOrChild: e.IsOrChild,
+ }
+
+ if e.Left != nil {
+ copied.Left = MustClone[*LineFilterExpr](e.Left)
+ }
+
+ if e.Or != nil {
+ copied.Or = MustClone[*LineFilterExpr](e.Or)
+ }
+
+ v.cloned = copied
+}
+
+func (v *cloneVisitor) VisitLineFmt(e *LineFmtExpr) {
+ v.cloned = &LineFmtExpr{Value: e.Value}
+}
+
+func (v *cloneVisitor) VisitLogfmtExpressionParser(e *LogfmtExpressionParser) {
+ copied := &LogfmtExpressionParser{
+ Expressions: make([]log.LabelExtractionExpr, len(e.Expressions)),
+ Strict: e.Strict,
+ KeepEmpty: e.KeepEmpty,
+ }
+ copy(copied.Expressions, e.Expressions)
+
+ v.cloned = copied
+}
+
+func (v *cloneVisitor) VisitLogfmtParser(e *LogfmtParserExpr) {
+ v.cloned = &LogfmtParserExpr{
+ Strict: e.Strict,
+ KeepEmpty: e.KeepEmpty,
+ }
+}
diff --git a/pkg/logql/syntax/clone_test.go b/pkg/logql/syntax/clone_test.go
new file mode 100644
index 0000000000000..58dc6efb03e2c
--- /dev/null
+++ b/pkg/logql/syntax/clone_test.go
@@ -0,0 +1,132 @@
+package syntax
+
+import (
+ "strings"
+ "testing"
+
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/stretchr/testify/require"
+
+ "github.com/grafana/loki/pkg/logql/log"
+)
+
+func TestClone(t *testing.T) {
+ tests := map[string]struct {
+ query string
+ }{
+ "simple matchers": {
+ query: `{env="prod", app=~"loki.*"}`,
+ },
+ "simple aggregation": {
+ query: `count_over_time({env="prod", app=~"loki.*"}[5m])`,
+ },
+ "simple aggregation with unwrap": {
+ query: `sum_over_time({env="prod", app=~"loki.*"} | unwrap bytes[5m])`,
+ },
+ "bin op": {
+ query: `(count_over_time({env="prod", app=~"loki.*"}[5m]) >= 0)`,
+ },
+ "label filter": {
+ query: `{app="foo"} |= "bar" | json | ( latency>=250ms or ( status_code<500 , status_code>200 ) )`,
+ },
+ "line filter": {
+ query: `{app="foo"} |= "bar" | json |= "500" or "200"`,
+ },
+ "drop label": {
+ query: `{app="foo"} |= "bar" | json | drop latency, status_code="200"`,
+ },
+ "keep label": {
+ query: `{app="foo"} |= "bar" | json | keep latency, status_code="200"`,
+ },
+ "regexp": {
+ query: `{env="prod", app=~"loki.*"} |~ ".*foo.*"`,
+ },
+ "vector matching": {
+ query: `(sum by (cluster)(rate({foo="bar"}[5m])) / ignoring (cluster) count(rate({foo="bar"}[5m])))`,
+ },
+ "sum over or vector": {
+ query: `(sum(count_over_time({foo="bar"}[5m])) or vector(1.000000))`,
+ },
+ "label replace": {
+ query: `label_replace(vector(0.000000),"foo","bar","","")`,
+ },
+ "filters with bytes": {
+ query: `{app="foo"} |= "bar" | json | ( status_code <500 or ( status_code>200 , size>=2.5KiB ) )`,
+ },
+ "post filter": {
+ query: `quantile_over_time(0.99998,{app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
+ | line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}" | unwrap foo
+ | __error__ !~".+"[5m]) by (namespace,instance)`,
+ },
+ "multiple post filters": {
+ query: `rate({app="foo"} | json | unwrap foo | latency >= 250ms or bytes > 42B or ( status_code < 500 and status_code > 200) or source = ip("") and user = "me" [1m])`,
+ },
+ "true filter": {
+ query: `{ foo = "bar" } | foo =~".*"`,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+
+ expr, err := ParseExpr(test.query)
+ require.NoError(t, err)
+
+ actual, err := Clone[Expr](expr)
+ require.NoError(t, err)
+
+ require.Equal(t, expr.Pretty(0), actual.Pretty(0))
+ })
+ }
+}
+
+func TestCloneStringLabelFilter(t *testing.T) {
+ for name, tc := range map[string]struct {
+ expr Expr
+ }{
+ "pipeline": {
+ expr: newPipelineExpr(
+ newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}),
+ MultiStageExpr{
+ newLogfmtParserExpr(nil),
+ newLabelFilterExpr(&log.StringLabelFilter{Matcher: labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")}),
+ },
+ ),
+ },
+ "filterer": {
+ expr: &LabelFilterExpr{
+ LabelFilterer: &log.LineFilterLabelFilter{
+ Matcher: mustNewMatcher(labels.MatchEqual, "foo", "bar"),
+ Filter: log.ExistsFilter,
+ },
+ },
+ },
+ } {
+ t.Run(name, func(t *testing.T) {
+ actual, err := Clone[Expr](tc.expr)
+ require.NoError(t, err)
+
+ require.Equal(t, tc.expr.Pretty(0), actual.Pretty(0))
+ require.Equal(t, tc.expr, actual)
+ })
+ }
+}
+
+func TestCloneParseTestCases(t *testing.T) {
+ for _, tc := range ParseTestCases {
+ if tc.err == nil {
+ t.Run(tc.in, func(t *testing.T) {
+ ast, err := ParseExpr(tc.in)
+ require.NoError(t, err)
+ if strings.Contains(tc.in, "KiB") {
+ t.Skipf("Byte roundtrip conversion is broken. '%s' vs '%s'", tc.in, ast.String())
+ }
+
+ actual, err := Clone[Expr](ast)
+ require.NoError(t, err)
+
+ require.Equal(t, ast.Pretty(0), actual.Pretty(0))
+ })
+ }
+ }
+}
diff --git a/pkg/logql/syntax/parser.go b/pkg/logql/syntax/parser.go
index e1fe5971ff3a2..710bf7132c4c8 100644
--- a/pkg/logql/syntax/parser.go
+++ b/pkg/logql/syntax/parser.go
@@ -99,6 +99,14 @@ func ParseExprWithoutValidation(input string) (expr Expr, err error) {
return p.Parse()
}
+func MustParseExpr(input string) Expr {
+ expr, err := ParseExpr(input)
+ if err != nil {
+ panic(err)
+ }
+ return expr
+}
+
func validateExpr(expr Expr) error {
switch e := expr.(type) {
case SampleExpr:
@@ -138,7 +146,7 @@ func ParseMatchers(input string, validate bool) ([]*labels.Matcher, error) {
}
matcherExpr, ok := expr.(*MatchersExpr)
if !ok {
- return nil, errors.New("only label matchers is supported")
+ return nil, logqlmodel.ErrParseMatchers
}
return matcherExpr.Mts, nil
}
diff --git a/pkg/logql/syntax/serialize.go b/pkg/logql/syntax/serialize.go
index 2d7a1d786fda7..53c4bef37d290 100644
--- a/pkg/logql/syntax/serialize.go
+++ b/pkg/logql/syntax/serialize.go
@@ -623,9 +623,7 @@ func decodeLabelFilter(iter *jsoniter.Iterator) log.LabelFilterer {
}
var matcher *labels.Matcher
- if name != "" && value != "" {
- matcher = labels.MustNewMatcher(t, name, value)
- }
+ matcher = labels.MustNewMatcher(t, name, value)
filter = log.NewStringLabelFilter(matcher)
diff --git a/pkg/logql/syntax/serialize_test.go b/pkg/logql/syntax/serialize_test.go
index 846e3988b852b..f4051caaf7ea1 100644
--- a/pkg/logql/syntax/serialize_test.go
+++ b/pkg/logql/syntax/serialize_test.go
@@ -50,6 +50,9 @@ func TestJSONSerializationRoundTrip(t *testing.T) {
"multiple post filters": {
query: `rate({app="foo"} | json | unwrap foo | latency >= 250ms or bytes > 42B or ( status_code < 500 and status_code > 200) or source = ip("") and user = "me" [1m])`,
},
+ "empty label filter string": {
+ query: `rate({app="foo"} |= "bar" | json | unwrap latency | path!="" [5m])`,
+ },
}
for name, test := range tests {
diff --git a/pkg/logql/syntax/visit.go b/pkg/logql/syntax/visit.go
new file mode 100644
index 0000000000000..70c931ad49467
--- /dev/null
+++ b/pkg/logql/syntax/visit.go
@@ -0,0 +1,285 @@
+package syntax
+
+type AcceptVisitor interface {
+ Accept(RootVisitor)
+}
+
+type RootVisitor interface {
+ SampleExprVisitor
+ LogSelectorExprVisitor
+ StageExprVisitor
+
+ VisitLogRange(*LogRange)
+}
+
+type SampleExprVisitor interface {
+ VisitBinOp(*BinOpExpr)
+ VisitVectorAggregation(*VectorAggregationExpr)
+ VisitRangeAggregation(*RangeAggregationExpr)
+ VisitLabelReplace(*LabelReplaceExpr)
+ VisitLiteral(*LiteralExpr)
+ VisitVector(*VectorExpr)
+}
+
+type LogSelectorExprVisitor interface {
+ VisitMatchers(*MatchersExpr)
+ VisitPipeline(*PipelineExpr)
+ VisitLiteral(*LiteralExpr)
+ VisitVector(*VectorExpr)
+}
+
+type StageExprVisitor interface {
+ VisitDecolorize(*DecolorizeExpr)
+ VisitDropLabels(*DropLabelsExpr)
+ VisitJSONExpressionParser(*JSONExpressionParser)
+ VisitKeepLabel(*KeepLabelsExpr)
+ VisitLabelFilter(*LabelFilterExpr)
+ VisitLabelFmt(*LabelFmtExpr)
+ VisitLabelParser(*LabelParserExpr)
+ VisitLineFilter(*LineFilterExpr)
+ VisitLineFmt(*LineFmtExpr)
+ VisitLogfmtExpressionParser(*LogfmtExpressionParser)
+ VisitLogfmtParser(*LogfmtParserExpr)
+}
+
+var _ RootVisitor = &DepthFirstTraversal{}
+
+type DepthFirstTraversal struct {
+ VisitBinOpFn func(v RootVisitor, e *BinOpExpr)
+ VisitDecolorizeFn func(v RootVisitor, e *DecolorizeExpr)
+ VisitDropLabelsFn func(v RootVisitor, e *DropLabelsExpr)
+ VisitJSONExpressionParserFn func(v RootVisitor, e *JSONExpressionParser)
+ VisitKeepLabelFn func(v RootVisitor, e *KeepLabelsExpr)
+ VisitLabelFilterFn func(v RootVisitor, e *LabelFilterExpr)
+ VisitLabelFmtFn func(v RootVisitor, e *LabelFmtExpr)
+ VisitLabelParserFn func(v RootVisitor, e *LabelParserExpr)
+ VisitLabelReplaceFn func(v RootVisitor, e *LabelReplaceExpr)
+ VisitLineFilterFn func(v RootVisitor, e *LineFilterExpr)
+ VisitLineFmtFn func(v RootVisitor, e *LineFmtExpr)
+ VisitLiteralFn func(v RootVisitor, e *LiteralExpr)
+ VisitLogRangeFn func(v RootVisitor, e *LogRange)
+ VisitLogfmtExpressionParserFn func(v RootVisitor, e *LogfmtExpressionParser)
+ VisitLogfmtParserFn func(v RootVisitor, e *LogfmtParserExpr)
+ VisitMatchersFn func(v RootVisitor, e *MatchersExpr)
+ VisitPipelineFn func(v RootVisitor, e *PipelineExpr)
+ VisitRangeAggregationFn func(v RootVisitor, e *RangeAggregationExpr)
+ VisitVectorFn func(v RootVisitor, e *VectorExpr)
+ VisitVectorAggregationFn func(v RootVisitor, e *VectorAggregationExpr)
+}
+
+// VisitBinOp implements RootVisitor.
+func (v *DepthFirstTraversal) VisitBinOp(e *BinOpExpr) {
+ if e == nil {
+ return
+ }
+ if v.VisitBinOpFn != nil {
+ v.VisitBinOpFn(v, e)
+ } else {
+ e.SampleExpr.Accept(v)
+ e.RHS.Accept(v)
+ }
+}
+
+// VisitDecolorize implements RootVisitor.
+func (v *DepthFirstTraversal) VisitDecolorize(e *DecolorizeExpr) {
+ if e == nil {
+ return
+ }
+ if v.VisitDecolorizeFn != nil {
+ v.VisitDecolorizeFn(v, e)
+ }
+}
+
+// VisitDropLabels implements RootVisitor.
+func (v *DepthFirstTraversal) VisitDropLabels(e *DropLabelsExpr) {
+ if e == nil {
+ return
+ }
+ if v.VisitDecolorizeFn != nil {
+ v.VisitDropLabelsFn(v, e)
+ }
+}
+
+// VisitJSONExpressionParser implements RootVisitor.
+func (v *DepthFirstTraversal) VisitJSONExpressionParser(e *JSONExpressionParser) {
+ if e == nil {
+ return
+ }
+ if v.VisitJSONExpressionParserFn != nil {
+ v.VisitJSONExpressionParserFn(v, e)
+ }
+}
+
+// VisitKeepLabel implements RootVisitor.
+func (v *DepthFirstTraversal) VisitKeepLabel(e *KeepLabelsExpr) {
+ if e == nil {
+ return
+ }
+ if v.VisitKeepLabelFn != nil {
+ v.VisitKeepLabelFn(v, e)
+ }
+}
+
+// VisitLabelFilter implements RootVisitor.
+func (v *DepthFirstTraversal) VisitLabelFilter(e *LabelFilterExpr) {
+ if e == nil {
+ return
+ }
+ if v.VisitLabelFilterFn != nil {
+ v.VisitLabelFilterFn(v, e)
+ }
+}
+
+// VisitLabelFmt implements RootVisitor.
+func (v *DepthFirstTraversal) VisitLabelFmt(e *LabelFmtExpr) {
+ if e == nil {
+ return
+ }
+ if v.VisitLabelFmtFn != nil {
+ v.VisitLabelFmtFn(v, e)
+ }
+}
+
+// VisitLabelParser implements RootVisitor.
+func (v *DepthFirstTraversal) VisitLabelParser(e *LabelParserExpr) {
+ if e == nil {
+ return
+ }
+ if v.VisitLabelParserFn != nil {
+ v.VisitLabelParserFn(v, e)
+ }
+}
+
+// VisitLabelReplace implements RootVisitor.
+func (v *DepthFirstTraversal) VisitLabelReplace(e *LabelReplaceExpr) {
+ if e == nil {
+ return
+ }
+ if v.VisitLabelReplaceFn != nil {
+ v.VisitLabelReplaceFn(v, e)
+ }
+}
+
+// VisitLineFilter implements RootVisitor.
+func (v *DepthFirstTraversal) VisitLineFilter(e *LineFilterExpr) {
+ if e == nil {
+ return
+ }
+ if v.VisitLineFilterFn != nil {
+ v.VisitLineFilterFn(v, e)
+ } else {
+ e.Left.Accept(v)
+ e.Or.Accept(v)
+ }
+}
+
+// VisitLineFmt implements RootVisitor.
+func (v *DepthFirstTraversal) VisitLineFmt(e *LineFmtExpr) {
+ if e == nil {
+ return
+ }
+ if v.VisitLineFmtFn != nil {
+ v.VisitLineFmtFn(v, e)
+ }
+}
+
+// VisitLiteral implements RootVisitor.
+func (v *DepthFirstTraversal) VisitLiteral(e *LiteralExpr) {
+ if e == nil {
+ return
+ }
+ if v.VisitLiteralFn != nil {
+ v.VisitLiteralFn(v, e)
+ }
+}
+
+// VisitLogRange implements RootVisitor.
+func (v *DepthFirstTraversal) VisitLogRange(e *LogRange) {
+ if e == nil {
+ return
+ }
+ if v.VisitLogRangeFn != nil {
+ v.VisitLogRangeFn(v, e)
+ } else {
+ e.Left.Accept(v)
+ }
+}
+
+// VisitLogfmtExpressionParser implements RootVisitor.
+func (v *DepthFirstTraversal) VisitLogfmtExpressionParser(e *LogfmtExpressionParser) {
+ if e == nil {
+ return
+ }
+ if v.VisitLogfmtExpressionParserFn != nil {
+ v.VisitLogfmtExpressionParserFn(v, e)
+ }
+}
+
+// VisitLogfmtParser implements RootVisitor.
+func (v *DepthFirstTraversal) VisitLogfmtParser(e *LogfmtParserExpr) {
+ if e == nil {
+ return
+ }
+ if v.VisitLogfmtParserFn != nil {
+ v.VisitLogfmtParserFn(v, e)
+ }
+}
+
+// VisitMatchers implements RootVisitor.
+func (v *DepthFirstTraversal) VisitMatchers(e *MatchersExpr) {
+ if e == nil {
+ return
+ }
+ if v.VisitMatchersFn != nil {
+ v.VisitMatchersFn(v, e)
+ }
+}
+
+// VisitPipeline implements RootVisitor.
+func (v *DepthFirstTraversal) VisitPipeline(e *PipelineExpr) {
+ if e == nil {
+ return
+ }
+ if v.VisitPipelineFn != nil {
+ v.VisitPipelineFn(v, e)
+ } else {
+ e.Left.Accept(v)
+ for i := range e.MultiStages {
+ e.MultiStages[i].Accept(v)
+ }
+ }
+}
+
+// VisitRangeAggregation implements RootVisitor.
+func (v *DepthFirstTraversal) VisitRangeAggregation(e *RangeAggregationExpr) {
+ if e == nil {
+ return
+ }
+ if v.VisitRangeAggregationFn != nil {
+ v.VisitRangeAggregationFn(v, e)
+ } else {
+ e.Left.Accept(v)
+ }
+}
+
+// VisitVector implements RootVisitor.
+func (v *DepthFirstTraversal) VisitVector(e *VectorExpr) {
+ if e == nil {
+ return
+ }
+ if v.VisitVectorFn != nil {
+ v.VisitVectorFn(v, e)
+ }
+}
+
+// VisitVectorAggregation implements RootVisitor.
+func (v *DepthFirstTraversal) VisitVectorAggregation(e *VectorAggregationExpr) {
+ if e == nil {
+ return
+ }
+ if v.VisitVectorAggregationFn != nil {
+ v.VisitVectorAggregationFn(v, e)
+ } else {
+ e.Left.Accept(v)
+ }
+}
diff --git a/pkg/logql/syntax/visit_test.go b/pkg/logql/syntax/visit_test.go
new file mode 100644
index 0000000000000..eeb040ce83a1a
--- /dev/null
+++ b/pkg/logql/syntax/visit_test.go
@@ -0,0 +1,48 @@
+package syntax
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestDepthFirstTraversalVisitor(t *testing.T) {
+
+ visited := [][2]string{}
+
+ visitor := &DepthFirstTraversal{
+ VisitLabelParserFn: func(v RootVisitor, e *LabelParserExpr) {
+ visited = append(visited, [2]string{fmt.Sprintf("%T", e), e.String()})
+ },
+ VisitLineFilterFn: func(v RootVisitor, e *LineFilterExpr) {
+ visited = append(visited, [2]string{fmt.Sprintf("%T", e), e.String()})
+ },
+ VisitLogfmtParserFn: func(v RootVisitor, e *LogfmtParserExpr) {
+ visited = append(visited, [2]string{fmt.Sprintf("%T", e), e.String()})
+ },
+ VisitMatchersFn: func(v RootVisitor, e *MatchersExpr) {
+ visited = append(visited, [2]string{fmt.Sprintf("%T", e), e.String()})
+ },
+ }
+
+ // Only expressions that have a Visit function defined are added to the list
+ expected := [][2]string{
+ {"*syntax.MatchersExpr", `{env="prod"}`},
+ {"*syntax.LineFilterExpr", `|= "foo" or "bar"`},
+ {"*syntax.LogfmtParserExpr", `| logfmt`},
+ {"*syntax.MatchersExpr", `{env="dev"}`},
+ {"*syntax.LineFilterExpr", `|~ "(foo|bar)"`},
+ {"*syntax.LabelParserExpr", `| json`},
+ }
+
+ query := `
+ sum by (container) (min_over_time({env="prod"} |= "foo" or "bar" | logfmt | unwrap duration [1m]))
+ /
+ sum by (container) (max_over_time({env="dev"} |~ "(foo|bar)" | json | unwrap duration [1m]))
+ `
+ expr, err := ParseExpr(query)
+ require.NoError(t, err)
+ expr.Accept(visitor)
+ require.Equal(t, expected, visited)
+}
diff --git a/pkg/logql/syntax/walk.go b/pkg/logql/syntax/walk.go
index c528c9ca63437..291ec8b31036f 100644
--- a/pkg/logql/syntax/walk.go
+++ b/pkg/logql/syntax/walk.go
@@ -1,7 +1,5 @@
package syntax
-import "fmt"
-
type WalkFn = func(e Expr)
func walkAll(f WalkFn, xs ...Walkable) {
@@ -13,120 +11,3 @@ func walkAll(f WalkFn, xs ...Walkable) {
type Walkable interface {
Walk(f WalkFn)
}
-
-type AcceptVisitor interface {
- Accept(RootVisitor)
-}
-
-type RootVisitor interface {
- SampleExprVisitor
- LogSelectorExprVisitor
- StageExprVisitor
-
- VisitLogRange(*LogRange)
-}
-
-type SampleExprVisitor interface {
- VisitBinOp(*BinOpExpr)
- VisitVectorAggregation(*VectorAggregationExpr)
- VisitRangeAggregation(*RangeAggregationExpr)
- VisitLabelReplace(*LabelReplaceExpr)
- VisitLiteral(*LiteralExpr)
- VisitVector(*VectorExpr)
-}
-
-type LogSelectorExprVisitor interface {
- VisitMatchers(*MatchersExpr)
- VisitPipeline(*PipelineExpr)
- VisitLiteral(*LiteralExpr)
- VisitVector(*VectorExpr)
-}
-
-type StageExprVisitor interface {
- VisitDecolorize(*DecolorizeExpr)
- VisitDropLabels(*DropLabelsExpr)
- VisitJSONExpressionParser(*JSONExpressionParser)
- VisitKeepLabel(*KeepLabelsExpr)
- VisitLabelFilter(*LabelFilterExpr)
- VisitLabelFmt(*LabelFmtExpr)
- VisitLabelParser(*LabelParserExpr)
- VisitLineFilter(*LineFilterExpr)
- VisitLineFmt(*LineFmtExpr)
- VisitLogfmtExpressionParser(*LogfmtExpressionParser)
- VisitLogfmtParser(*LogfmtParserExpr)
-}
-
-func Dispatch(root Expr, v RootVisitor) error {
- switch e := root.(type) {
- case SampleExpr:
- DispatchSampleExpr(e, v)
- case LogSelectorExpr:
- DispatchLogSelectorExpr(e, v)
- case StageExpr:
- DispatchStageExpr(e, v)
- case *LogRange:
- v.VisitLogRange(e)
- default:
- return fmt.Errorf("unpexpected root expression type: got (%T)", e)
- }
-
- return nil
-}
-
-func DispatchSampleExpr(expr SampleExpr, v SampleExprVisitor) {
- switch e := expr.(type) {
- case *BinOpExpr:
- v.VisitBinOp(e)
- case *VectorAggregationExpr:
- v.VisitVectorAggregation(e)
- case *RangeAggregationExpr:
- v.VisitRangeAggregation(e)
- case *LabelReplaceExpr:
- v.VisitLabelReplace(e)
- case *LiteralExpr:
- v.VisitLiteral(e)
- case *VectorExpr:
- v.VisitVector(e)
- }
-}
-
-func DispatchLogSelectorExpr(expr LogSelectorExpr, v LogSelectorExprVisitor) {
- switch e := expr.(type) {
- case *PipelineExpr:
- v.VisitPipeline(e)
- case *MatchersExpr:
- v.VisitMatchers(e)
- case *VectorExpr:
- v.VisitVector(e)
- case *LiteralExpr:
- v.VisitLiteral(e)
- }
-}
-
-func DispatchStageExpr(expr StageExpr, v StageExprVisitor) {
- switch e := expr.(type) {
- case *DecolorizeExpr:
- v.VisitDecolorize(e)
- case *DropLabelsExpr:
- v.VisitDropLabels(e)
- case *JSONExpressionParser:
- v.VisitJSONExpressionParser(e)
- case *KeepLabelsExpr:
- v.VisitKeepLabel(e)
- case *LabelFilterExpr:
- v.VisitLabelFilter(e)
- case *LabelFmtExpr:
- v.VisitLabelFmt(e)
- case *LabelParserExpr:
- v.VisitLabelParser(e)
- case *LineFilterExpr:
- v.VisitLineFilter(e)
- case *LineFmtExpr:
- v.VisitLineFmt(e)
- case *LogfmtExpressionParser:
- v.VisitLogfmtExpressionParser(e)
- case *LogfmtParserExpr:
- v.VisitLogfmtParser(e)
- }
-
-}
diff --git a/pkg/logql/test_utils.go b/pkg/logql/test_utils.go
index 982fa7f5f16d0..b979dedb42327 100644
--- a/pkg/logql/test_utils.go
+++ b/pkg/logql/test_utils.go
@@ -218,17 +218,7 @@ func (m MockDownstreamer) Downstreamer(_ context.Context) Downstreamer { return
func (m MockDownstreamer) Downstream(ctx context.Context, queries []DownstreamQuery) ([]logqlmodel.Result, error) {
results := make([]logqlmodel.Result, 0, len(queries))
for _, query := range queries {
- params := NewLiteralParams(
- query.Expr.String(),
- query.Params.Start(),
- query.Params.End(),
- query.Params.Step(),
- query.Params.Interval(),
- query.Params.Direction(),
- query.Params.Limit(),
- query.Shards.Encode(),
- )
- res, err := m.Query(params).Exec(ctx)
+ res, err := m.Query(query.Params).Exec(ctx)
if err != nil {
return nil, err
}
diff --git a/pkg/logqlmodel/error.go b/pkg/logqlmodel/error.go
index 9491a8f3342c1..68ddf72cc2f2d 100644
--- a/pkg/logqlmodel/error.go
+++ b/pkg/logqlmodel/error.go
@@ -15,6 +15,7 @@ var (
ErrLimit = errors.New("limit reached while evaluating the query")
ErrIntervalLimit = errors.New("[interval] value exceeds limit")
ErrBlocked = errors.New("query blocked by policy")
+ ErrParseMatchers = errors.New("only label matchers are supported")
ErrorLabel = "__error__"
PreserveErrorLabel = "__preserve_error__"
ErrorDetailsLabel = "__error_details__"
diff --git a/pkg/logqlmodel/logqlmodel.go b/pkg/logqlmodel/logqlmodel.go
index da9d7f083f22e..8ba0e198c403a 100644
--- a/pkg/logqlmodel/logqlmodel.go
+++ b/pkg/logqlmodel/logqlmodel.go
@@ -5,8 +5,8 @@ import (
"github.com/grafana/loki/pkg/querier/queryrange/queryrangebase/definitions"
- "github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logqlmodel/stats"
+ "github.com/grafana/loki/pkg/push"
)
// ValueTypeStreams promql.ValueType for log streams
@@ -23,7 +23,7 @@ type Result struct {
}
// Streams is promql.Value
-type Streams []logproto.Stream
+type Streams []push.Stream
func (streams Streams) Len() int { return len(streams) }
func (streams Streams) Swap(i, j int) { streams[i], streams[j] = streams[j], streams[i] }
diff --git a/pkg/logqlmodel/stats/context.go b/pkg/logqlmodel/stats/context.go
index 339d934c10eb5..33517d6cce905 100644
--- a/pkg/logqlmodel/stats/context.go
+++ b/pkg/logqlmodel/stats/context.go
@@ -61,6 +61,8 @@ const (
StatsResultCache = "stats-result"
VolumeResultCache = "volume-result"
WriteDedupeCache = "write-dedupe"
+ BloomFilterCache = "bloom-filter"
+ BloomBlocksCache = "bloom-blocks"
)
// NewContext creates a new statistics context
diff --git a/pkg/loki/config_compat.go b/pkg/loki/config_compat.go
index cd15b05f2da7e..1e4f800c46476 100644
--- a/pkg/loki/config_compat.go
+++ b/pkg/loki/config_compat.go
@@ -1,15 +1,18 @@
package loki
import (
+ "errors"
"fmt"
"github.com/grafana/loki/pkg/ingester/index"
+ frontend "github.com/grafana/loki/pkg/lokifrontend/frontend/v2"
"github.com/grafana/loki/pkg/storage/config"
)
func ValidateConfigCompatibility(c Config) error {
for _, fn := range []func(Config) error{
ensureInvertedIndexShardingCompatibility,
+ ensureProtobufEncodingForAggregationSharding,
} {
if err := fn(c); err != nil {
return err
@@ -40,3 +43,10 @@ func ensureInvertedIndexShardingCompatibility(c Config) error {
}
return nil
}
+
+func ensureProtobufEncodingForAggregationSharding(c Config) error {
+ if len(c.QueryRange.ShardAggregations) > 0 && c.Frontend.FrontendV2.Encoding != frontend.EncodingProtobuf {
+ return errors.New("shard_aggregation requires frontend.encoding=protobuf")
+ }
+ return nil
+}
diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go
index 09f95d794ddc3..c3cdea5392d80 100644
--- a/pkg/loki/loki.go
+++ b/pkg/loki/loki.go
@@ -323,6 +323,7 @@ type Loki struct {
clientMetrics storage.ClientMetrics
deleteClientMetrics *deletion.DeleteRequestClientMetrics
+ Tee distributor.Tee
HTTPAuthMiddleware middleware.Interface
Codec Codec
diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go
index 4c14a4872655f..797e01a098d94 100644
--- a/pkg/loki/modules.go
+++ b/pkg/loki/modules.go
@@ -6,6 +6,7 @@ import (
"fmt"
"hash/fnv"
"math"
+ "net"
"net/http"
"net/http/httputil"
"net/url"
@@ -318,6 +319,7 @@ func (t *Loki) initDistributor() (services.Service, error) {
t.Overrides,
prometheus.DefaultRegisterer,
t.Cfg.MetricsNamespace,
+ t.Tee,
logger,
)
if err != nil {
@@ -519,6 +521,7 @@ func (t *Loki) initQuerier() (services.Service, error) {
internalMiddlewares := []queryrangebase.Middleware{
serverutil.RecoveryMiddleware,
queryrange.Instrument{Metrics: t.Metrics},
+ queryrange.Tracer{},
}
if t.supportIndexDeleteRequest() && t.Cfg.CompactorConfig.RetentionEnabled {
internalMiddlewares = append(
@@ -529,6 +532,7 @@ func (t *Loki) initQuerier() (services.Service, error) {
internalHandler := queryrangebase.MergeMiddlewares(internalMiddlewares...).Wrap(handler)
svc, err := querier.InitWorkerService(
+ logger,
querierWorkerServiceConfig,
prometheus.DefaultRegisterer,
internalHandler,
@@ -577,7 +581,7 @@ func (t *Loki) initIngester() (_ services.Service, err error) {
t.Server.HTTP.Methods("POST", "GET", "DELETE").Path("/ingester/prepare_shutdown").Handler(
httpMiddleware.Wrap(http.HandlerFunc(t.Ingester.PrepareShutdown)),
)
- t.Server.HTTP.Methods("POST").Path("/ingester/shutdown").Handler(
+ t.Server.HTTP.Methods("POST", "GET").Path("/ingester/shutdown").Handler(
httpMiddleware.Wrap(http.HandlerFunc(t.Ingester.ShutdownHandler)),
)
return t.Ingester, nil
@@ -800,7 +804,9 @@ func (t *Loki) initIngesterQuerier() (_ services.Service, err error) {
// Placeholder limits type to pass to cortex frontend
type disabledShuffleShardingLimits struct{}
-func (disabledShuffleShardingLimits) MaxQueriersPerUser(_ string) int { return 0 }
+func (disabledShuffleShardingLimits) MaxQueriersPerUser(_ string) uint { return 0 }
+
+func (disabledShuffleShardingLimits) MaxQueryCapacity(_ string) float64 { return 0 }
func (t *Loki) initQueryFrontendMiddleware() (_ services.Service, err error) {
level.Debug(util_log.Logger).Log("msg", "initializing query frontend tripperware")
@@ -863,7 +869,7 @@ func (t *Loki) compactorAddress() (string, bool, error) {
legacyReadMode := t.Cfg.LegacyReadTarget && t.Cfg.isModuleEnabled(Read)
if t.Cfg.isModuleEnabled(All) || legacyReadMode || t.Cfg.isModuleEnabled(Backend) {
// In single binary or read modes, this module depends on Server
- return fmt.Sprintf("%s:%d", t.Cfg.Server.GRPCListenAddress, t.Cfg.Server.GRPCListenPort), true, nil
+ return net.JoinHostPort(t.Cfg.Server.GRPCListenAddress, strconv.Itoa(t.Cfg.Server.GRPCListenPort)), true, nil
}
if t.Cfg.Common.CompactorAddress == "" && t.Cfg.Common.CompactorGRPCAddress == "" {
@@ -1262,7 +1268,7 @@ func (t *Loki) initBloomGateway() (services.Service, error) {
shuffleSharding := bloomgateway.NewShuffleShardingStrategy(t.bloomGatewayRingManager.Ring, t.bloomGatewayRingManager.RingLifecycler, t.Overrides, logger)
- gateway, err := bloomgateway.New(t.Cfg.BloomGateway, t.Cfg.SchemaConfig, t.Cfg.StorageConfig, shuffleSharding, t.clientMetrics, logger, prometheus.DefaultRegisterer)
+ gateway, err := bloomgateway.New(t.Cfg.BloomGateway, t.Cfg.SchemaConfig, t.Cfg.StorageConfig, t.Overrides, shuffleSharding, t.clientMetrics, logger, prometheus.DefaultRegisterer)
if err != nil {
return nil, err
}
@@ -1331,7 +1337,15 @@ func (t *Loki) initIndexGateway() (services.Service, error) {
var bloomQuerier indexgateway.BloomQuerier
if t.Cfg.BloomGateway.Enabled {
- bloomGatewayClient, err := bloomgateway.NewGatewayClient(t.Cfg.BloomGateway.Client, t.Overrides, prometheus.DefaultRegisterer, logger, t.Cfg.MetricsNamespace)
+ bloomGatewayClient, err := bloomgateway.NewGatewayClient(
+ t.Cfg.BloomGateway.Client,
+ t.Overrides,
+ prometheus.DefaultRegisterer,
+ logger,
+ t.Cfg.MetricsNamespace,
+ t.cacheGenerationLoader,
+ t.Cfg.CompactorConfig.RetentionEnabled,
+ )
if err != nil {
return nil, err
}
diff --git a/pkg/lokifrontend/frontend/transport/handler.go b/pkg/lokifrontend/frontend/transport/handler.go
index 06f1ebe1c7b63..03332ee046771 100644
--- a/pkg/lokifrontend/frontend/transport/handler.go
+++ b/pkg/lokifrontend/frontend/transport/handler.go
@@ -15,7 +15,6 @@ import (
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/grafana/dskit/httpgrpc"
- "github.com/grafana/dskit/httpgrpc/server"
"github.com/grafana/dskit/user"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
@@ -26,6 +25,7 @@ import (
querier_stats "github.com/grafana/loki/pkg/querier/stats"
"github.com/grafana/loki/pkg/util"
util_log "github.com/grafana/loki/pkg/util/log"
+ "github.com/grafana/loki/pkg/util/server"
)
const (
@@ -134,7 +134,7 @@ func (f *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
queryResponseTime := time.Since(startTime)
if err != nil {
- writeError(w, err)
+ server.WriteError(err, w)
return
}
@@ -230,20 +230,6 @@ func formatQueryString(queryString url.Values) (fields []interface{}) {
return fields
}
-func writeError(w http.ResponseWriter, err error) {
- switch err {
- case context.Canceled:
- err = errCanceled
- case context.DeadlineExceeded:
- err = errDeadlineExceeded
- default:
- if util.IsRequestBodyTooLarge(err) {
- err = errRequestEntityTooLarge
- }
- }
- server.WriteError(w, err)
-}
-
func writeServiceTimingHeader(queryResponseTime time.Duration, headers http.Header, stats *querier_stats.Stats) {
if stats != nil {
parts := make([]string, 0)
@@ -277,7 +263,7 @@ func (a *grpcRoundTripperToHandlerAdapter) Do(ctx context.Context, req queryrang
return nil, err
}
- grpcReq, err := server.HTTPRequest(httpReq)
+ grpcReq, err := httpgrpc.FromHTTPRequest(httpReq)
if err != nil {
return nil, fmt.Errorf("cannot convert HTTP request to gRPC request: %w", err)
}
diff --git a/pkg/lokifrontend/frontend/transport/handler_test.go b/pkg/lokifrontend/frontend/transport/handler_test.go
deleted file mode 100644
index 6f25963626712..0000000000000
--- a/pkg/lokifrontend/frontend/transport/handler_test.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package transport
-
-import (
- "context"
- "net/http"
- "net/http/httptest"
- "testing"
-
- "github.com/grafana/dskit/httpgrpc"
- "github.com/pkg/errors"
- "github.com/stretchr/testify/require"
-)
-
-func TestWriteError(t *testing.T) {
- for _, test := range []struct {
- status int
- err error
- }{
- {http.StatusInternalServerError, errors.New("unknown")},
- {http.StatusGatewayTimeout, context.DeadlineExceeded},
- {StatusClientClosedRequest, context.Canceled},
- {http.StatusBadRequest, httpgrpc.Errorf(http.StatusBadRequest, "")},
- } {
- t.Run(test.err.Error(), func(t *testing.T) {
- w := httptest.NewRecorder()
- writeError(w, test.err)
- require.Equal(t, test.status, w.Result().StatusCode)
- })
- }
-}
diff --git a/pkg/lokifrontend/frontend/v1/frontend.go b/pkg/lokifrontend/frontend/v1/frontend.go
index ff32cbf7b98f0..cf17b62b03186 100644
--- a/pkg/lokifrontend/frontend/v1/frontend.go
+++ b/pkg/lokifrontend/frontend/v1/frontend.go
@@ -21,9 +21,9 @@ import (
"github.com/grafana/loki/pkg/lokifrontend/frontend/v1/frontendv1pb"
"github.com/grafana/loki/pkg/querier/stats"
"github.com/grafana/loki/pkg/queue"
+ "github.com/grafana/loki/pkg/scheduler/limits"
"github.com/grafana/loki/pkg/util"
lokigrpc "github.com/grafana/loki/pkg/util/httpgrpc"
- "github.com/grafana/loki/pkg/util/validation"
)
var errTooManyRequest = httpgrpc.Errorf(http.StatusTooManyRequests, "too many outstanding requests")
@@ -42,7 +42,10 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
type Limits interface {
// Returns max queriers to use per tenant, or 0 if shuffle sharding is disabled.
- MaxQueriersPerUser(user string) int
+ MaxQueriersPerUser(user string) uint
+
+ // MaxQueryCapacity returns how much of the available query capacity can be used by this user.
+ MaxQueryCapacity(user string) float64
}
// Frontend queues HTTP requests, dispatches them to backends, and handles retries
@@ -80,12 +83,12 @@ type request struct {
}
// New creates a new frontend. Frontend implements service, and must be started and stopped.
-func New(cfg Config, limits Limits, log log.Logger, registerer prometheus.Registerer, metricsNamespace string) (*Frontend, error) {
+func New(cfg Config, frontendLimits Limits, log log.Logger, registerer prometheus.Registerer, metricsNamespace string) (*Frontend, error) {
queueMetrics := queue.NewMetrics(registerer, metricsNamespace, "query_frontend")
f := &Frontend{
cfg: cfg,
log: log,
- limits: limits,
+ limits: frontendLimits,
queueMetrics: queueMetrics,
queueDuration: promauto.With(registerer).NewHistogram(prometheus.HistogramOpts{
Namespace: metricsNamespace,
@@ -95,7 +98,7 @@ func New(cfg Config, limits Limits, log log.Logger, registerer prometheus.Regist
}),
}
- f.requestQueue = queue.NewRequestQueue(cfg.MaxOutstandingPerTenant, cfg.QuerierForgetDelay, queueMetrics)
+ f.requestQueue = queue.NewRequestQueue(cfg.MaxOutstandingPerTenant, cfg.QuerierForgetDelay, limits.NewQueueLimits(frontendLimits), queueMetrics)
f.activeUsers = util.NewActiveUsersCleanupWithDefaultValues(f.cleanupInactiveUserMetrics)
var err error
@@ -312,13 +315,10 @@ func (f *Frontend) queueRequest(ctx context.Context, req *request) error {
req.enqueueTime = now
req.queueSpan, _ = opentracing.StartSpanFromContext(ctx, "queued")
- // aggregate the max queriers limit in the case of a multi tenant query
- maxQueriers := validation.SmallestPositiveNonZeroIntPerTenant(tenantIDs, f.limits.MaxQueriersPerUser)
-
joinedTenantID := tenant.JoinTenantIDs(tenantIDs)
f.activeUsers.UpdateUserTimestamp(joinedTenantID, now)
- err = f.requestQueue.Enqueue(joinedTenantID, nil, req, maxQueriers, nil)
+ err = f.requestQueue.Enqueue(joinedTenantID, nil, req, nil)
if err == queue.ErrTooManyRequests {
return errTooManyRequest
}
diff --git a/pkg/lokifrontend/frontend/v1/frontend_test.go b/pkg/lokifrontend/frontend/v1/frontend_test.go
index f715d3e8f5fd0..a10a55b37984f 100644
--- a/pkg/lokifrontend/frontend/v1/frontend_test.go
+++ b/pkg/lokifrontend/frontend/v1/frontend_test.go
@@ -35,6 +35,7 @@ import (
"github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"
querier_worker "github.com/grafana/loki/pkg/querier/worker"
"github.com/grafana/loki/pkg/queue"
+ "github.com/grafana/loki/pkg/scheduler/limits"
"github.com/grafana/loki/pkg/util/constants"
)
@@ -135,7 +136,7 @@ func TestFrontendCheckReady(t *testing.T) {
qm := queue.NewMetrics(nil, constants.Loki, "query_frontend")
f := &Frontend{
log: log.NewNopLogger(),
- requestQueue: queue.NewRequestQueue(5, 0, qm),
+ requestQueue: queue.NewRequestQueue(5, 0, limits.NewQueueLimits(nil), qm),
}
for i := 0; i < tt.connectedClients; i++ {
f.requestQueue.RegisterConsumerConnection("test")
@@ -243,7 +244,7 @@ func testFrontend(t *testing.T, config Config, handler queryrangebase.Handler, t
httpListen, err := net.Listen("tcp", "localhost:0")
require.NoError(t, err)
- v1, err := New(config, limits{}, logger, reg, constants.Loki)
+ v1, err := New(config, mockLimits{}, logger, reg, constants.Loki)
require.NoError(t, err)
require.NotNil(t, v1)
require.NoError(t, services.StartAndAwaitRunning(context.Background(), v1))
@@ -293,10 +294,15 @@ func defaultFrontendConfig() Config {
return config
}
-type limits struct {
- queriers int
+type mockLimits struct {
+ queriers uint
+ queryCapacity float64
}
-func (l limits) MaxQueriersPerUser(_ string) int {
+func (l mockLimits) MaxQueriersPerUser(_ string) uint {
return l.queriers
}
+
+func (l mockLimits) MaxQueryCapacity(_ string) float64 {
+ return l.queryCapacity
+}
diff --git a/pkg/lokifrontend/frontend/v1/queue_test.go b/pkg/lokifrontend/frontend/v1/queue_test.go
index efc04e338981f..a6f380afd492d 100644
--- a/pkg/lokifrontend/frontend/v1/queue_test.go
+++ b/pkg/lokifrontend/frontend/v1/queue_test.go
@@ -24,7 +24,7 @@ import (
func setupFrontend(t *testing.T, config Config) *Frontend {
logger := log.NewNopLogger()
- frontend, err := New(config, limits{queriers: 3}, logger, nil, constants.Loki)
+ frontend, err := New(config, mockLimits{queriers: 3}, logger, nil, constants.Loki)
require.NoError(t, err)
t.Cleanup(func() {
diff --git a/pkg/lokifrontend/frontend/v2/frontend.go b/pkg/lokifrontend/frontend/v2/frontend.go
index 4fe591a346a9b..99e3e05ad83c9 100644
--- a/pkg/lokifrontend/frontend/v2/frontend.go
+++ b/pkg/lokifrontend/frontend/v2/frontend.go
@@ -5,7 +5,9 @@ import (
"flag"
"fmt"
"math/rand"
+ "net"
"net/http"
+ "strconv"
"sync"
"time"
@@ -14,7 +16,6 @@ import (
"github.com/grafana/dskit/flagext"
"github.com/grafana/dskit/grpcclient"
"github.com/grafana/dskit/httpgrpc"
- "github.com/grafana/dskit/httpgrpc/server"
"github.com/grafana/dskit/netutil"
"github.com/grafana/dskit/ring"
"github.com/grafana/dskit/services"
@@ -137,7 +138,7 @@ type enqueueResult struct {
func NewFrontend(cfg Config, ring ring.ReadRing, log log.Logger, reg prometheus.Registerer, codec transport.Codec, metricsNamespace string) (*Frontend, error) {
requestsCh := make(chan *frontendRequest)
- schedulerWorkers, err := newFrontendSchedulerWorkers(cfg, fmt.Sprintf("%s:%d", cfg.Addr, cfg.Port), ring, requestsCh, log)
+ schedulerWorkers, err := newFrontendSchedulerWorkers(cfg, net.JoinHostPort(cfg.Addr, strconv.Itoa(cfg.Port)), ring, requestsCh, log)
if err != nil {
return nil, err
}
@@ -317,7 +318,7 @@ func (f *Frontend) Do(ctx context.Context, req queryrangebase.Request) (queryran
return nil, fmt.Errorf("cannot convert request to HTTP request: %w", err)
}
- freq.request, err = server.HTTPRequest(httpReq)
+ freq.request, err = httpgrpc.FromHTTPRequest(httpReq)
if err != nil {
return nil, fmt.Errorf("cannot convert HTTP request to gRPC request: %w", err)
}
diff --git a/pkg/lokifrontend/frontend/v2/frontend_test.go b/pkg/lokifrontend/frontend/v2/frontend_test.go
index 3ab1028e96138..9a87c5ff1c7cc 100644
--- a/pkg/lokifrontend/frontend/v2/frontend_test.go
+++ b/pkg/lokifrontend/frontend/v2/frontend_test.go
@@ -19,7 +19,9 @@ import (
"go.uber.org/atomic"
"google.golang.org/grpc"
+ "github.com/grafana/loki/pkg/logql/syntax"
"github.com/grafana/loki/pkg/lokifrontend/frontend/v2/frontendv2pb"
+ "github.com/grafana/loki/pkg/querier/plan"
"github.com/grafana/loki/pkg/querier/queryrange"
"github.com/grafana/loki/pkg/querier/stats"
"github.com/grafana/loki/pkg/scheduler/schedulerpb"
@@ -29,7 +31,7 @@ import (
const testFrontendWorkerConcurrency = 5
-func setupFrontend(t *testing.T, schedulerReplyFunc func(f *Frontend, msg *schedulerpb.FrontendToScheduler) *schedulerpb.SchedulerToFrontend) (*Frontend, *mockScheduler) {
+func setupFrontend(t *testing.T, cfg Config, schedulerReplyFunc func(f *Frontend, msg *schedulerpb.FrontendToScheduler) *schedulerpb.SchedulerToFrontend) (*Frontend, *mockScheduler) {
l, err := net.Listen("tcp", "")
require.NoError(t, err)
@@ -41,8 +43,6 @@ func setupFrontend(t *testing.T, schedulerReplyFunc func(f *Frontend, msg *sched
grpcPort, err := strconv.Atoi(p)
require.NoError(t, err)
- cfg := Config{}
- flagext.DefaultValues(&cfg)
cfg.SchedulerAddress = l.Addr().String()
cfg.WorkerConcurrency = testFrontendWorkerConcurrency
cfg.Addr = h
@@ -102,7 +102,9 @@ func TestFrontendBasicWorkflow(t *testing.T) {
userID = "test"
)
- f, _ := setupFrontend(t, func(f *Frontend, msg *schedulerpb.FrontendToScheduler) *schedulerpb.SchedulerToFrontend {
+ cfg := Config{}
+ flagext.DefaultValues(&cfg)
+ f, _ := setupFrontend(t, cfg, func(f *Frontend, msg *schedulerpb.FrontendToScheduler) *schedulerpb.SchedulerToFrontend {
// We cannot call QueryResult directly, as Frontend is not yet waiting for the response.
// It first needs to be told that enqueuing has succeeded.
go sendResponseWithDelay(f, 100*time.Millisecond, userID, msg.QueryID, &httpgrpc.HTTPResponse{
@@ -119,6 +121,41 @@ func TestFrontendBasicWorkflow(t *testing.T) {
require.Equal(t, []byte(body), resp.Body)
}
+func TestFrontendBasicWorkflowProto(t *testing.T) {
+ const (
+ userID = "test"
+ )
+
+ ctx := user.InjectOrgID(context.Background(), userID)
+
+ req := &queryrange.LokiRequest{
+ Query: `{foo="bar"} | json`,
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`{foo="bar"} | json`),
+ },
+ }
+
+ resp, err := queryrange.NewEmptyResponse(req)
+ require.NoError(t, err)
+ httpReq := &httpgrpc.HTTPRequest{Url: "/loki/api/v1/query_range"}
+ httpResp, err := queryrange.DefaultCodec.EncodeHTTPGrpcResponse(ctx, httpReq, resp)
+ require.NoError(t, err)
+
+ cfg := Config{}
+ flagext.DefaultValues(&cfg)
+ cfg.Encoding = EncodingProtobuf
+ f, _ := setupFrontend(t, cfg, func(f *Frontend, msg *schedulerpb.FrontendToScheduler) *schedulerpb.SchedulerToFrontend {
+ // We cannot call QueryResult directly, as Frontend is not yet waiting for the response.
+ // It first needs to be told that enqueuing has succeeded.
+ go sendResponseWithDelay(f, 100*time.Millisecond, userID, msg.QueryID, httpResp)
+
+ return &schedulerpb.SchedulerToFrontend{Status: schedulerpb.OK}
+ })
+ actualResp, err := f.Do(ctx, req)
+ require.NoError(t, err)
+ require.Equal(t, resp.(*queryrange.LokiResponse).Data, actualResp.(*queryrange.LokiResponse).Data)
+}
+
func TestFrontendRetryEnqueue(t *testing.T) {
// Frontend uses worker concurrency to compute number of retries. We use one less failure.
failures := atomic.NewInt64(testFrontendWorkerConcurrency - 1)
@@ -127,7 +164,9 @@ func TestFrontendRetryEnqueue(t *testing.T) {
userID = "test"
)
- f, _ := setupFrontend(t, func(f *Frontend, msg *schedulerpb.FrontendToScheduler) *schedulerpb.SchedulerToFrontend {
+ cfg := Config{}
+ flagext.DefaultValues(&cfg)
+ f, _ := setupFrontend(t, cfg, func(f *Frontend, msg *schedulerpb.FrontendToScheduler) *schedulerpb.SchedulerToFrontend {
fail := failures.Dec()
if fail >= 0 {
return &schedulerpb.SchedulerToFrontend{Status: schedulerpb.SHUTTING_DOWN}
@@ -145,7 +184,9 @@ func TestFrontendRetryEnqueue(t *testing.T) {
}
func TestFrontendEnqueueFailure(t *testing.T) {
- f, _ := setupFrontend(t, func(f *Frontend, msg *schedulerpb.FrontendToScheduler) *schedulerpb.SchedulerToFrontend {
+ cfg := Config{}
+ flagext.DefaultValues(&cfg)
+ f, _ := setupFrontend(t, cfg, func(f *Frontend, msg *schedulerpb.FrontendToScheduler) *schedulerpb.SchedulerToFrontend {
return &schedulerpb.SchedulerToFrontend{Status: schedulerpb.SHUTTING_DOWN}
})
@@ -155,7 +196,9 @@ func TestFrontendEnqueueFailure(t *testing.T) {
}
func TestFrontendCancellation(t *testing.T) {
- f, ms := setupFrontend(t, nil)
+ cfg := Config{}
+ flagext.DefaultValues(&cfg)
+ f, ms := setupFrontend(t, cfg, nil)
ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond)
defer cancel()
@@ -184,7 +227,9 @@ func TestFrontendCancellation(t *testing.T) {
// all the frontend workers thus not reaching the scheduler as well.
// Issue: https://github.com/grafana/loki/issues/5132
func TestFrontendWorkerCancellation(t *testing.T) {
- f, ms := setupFrontend(t, nil)
+ cfg := Config{}
+ flagext.DefaultValues(&cfg)
+ f, ms := setupFrontend(t, cfg, nil)
ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond)
defer cancel()
@@ -219,7 +264,9 @@ func TestFrontendWorkerCancellation(t *testing.T) {
}
func TestFrontendFailedCancellation(t *testing.T) {
- f, ms := setupFrontend(t, nil)
+ cfg := Config{}
+ flagext.DefaultValues(&cfg)
+ f, ms := setupFrontend(t, cfg, nil)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@@ -258,7 +305,9 @@ func TestFrontendFailedCancellation(t *testing.T) {
func TestFrontendStoppingWaitsForEmptyInflightRequests(t *testing.T) {
delayResponse := 10 * time.Millisecond
- f, _ := setupFrontend(t, func(f *Frontend, msg *schedulerpb.FrontendToScheduler) *schedulerpb.SchedulerToFrontend {
+ cfg := Config{}
+ flagext.DefaultValues(&cfg)
+ f, _ := setupFrontend(t, cfg, func(f *Frontend, msg *schedulerpb.FrontendToScheduler) *schedulerpb.SchedulerToFrontend {
// We cannot call QueryResult directly, as Frontend is not yet waiting for the response.
// It first needs to be told that enqueuing has succeeded.
go sendResponseWithDelay(f, 2*delayResponse, "test", msg.QueryID, &httpgrpc.HTTPResponse{
@@ -296,7 +345,9 @@ func TestFrontendStoppingWaitsForEmptyInflightRequests(t *testing.T) {
func TestFrontendShuttingDownLetsSubRequestsPass(t *testing.T) {
delayResponse := 100 * time.Millisecond
- f, _ := setupFrontend(t, func(f *Frontend, msg *schedulerpb.FrontendToScheduler) *schedulerpb.SchedulerToFrontend {
+ cfg := Config{}
+ flagext.DefaultValues(&cfg)
+ f, _ := setupFrontend(t, cfg, func(f *Frontend, msg *schedulerpb.FrontendToScheduler) *schedulerpb.SchedulerToFrontend {
// We cannot call QueryResult directly, as Frontend is not yet waiting for the response.
// It first needs to be told that enqueuing has succeeded.
go sendResponseWithDelay(f, delayResponse, "test", msg.QueryID, &httpgrpc.HTTPResponse{
diff --git a/pkg/querier/http.go b/pkg/querier/http.go
index 1ecde15626ecb..dc29c2f61e04f 100644
--- a/pkg/querier/http.go
+++ b/pkg/querier/http.go
@@ -69,7 +69,7 @@ func NewQuerierAPI(cfg Config, querier Querier, limits Limits, logger log.Logger
// RangeQueryHandler is a http.HandlerFunc for range queries and legacy log queries
func (q *QuerierAPI) RangeQueryHandler(ctx context.Context, req *queryrange.LokiRequest) (logqlmodel.Result, error) {
- if err := q.validateMaxEntriesLimits(ctx, req.Query, req.Limit); err != nil {
+ if err := q.validateMaxEntriesLimits(ctx, req.Plan.AST, req.Limit); err != nil {
return logqlmodel.Result{}, err
}
@@ -84,7 +84,7 @@ func (q *QuerierAPI) RangeQueryHandler(ctx context.Context, req *queryrange.Loki
// InstantQueryHandler is a http.HandlerFunc for instant queries.
func (q *QuerierAPI) InstantQueryHandler(ctx context.Context, req *queryrange.LokiInstantRequest) (logqlmodel.Result, error) {
- if err := q.validateMaxEntriesLimits(ctx, req.Query, req.Limit); err != nil {
+ if err := q.validateMaxEntriesLimits(ctx, req.Plan.AST, req.Limit); err != nil {
return logqlmodel.Result{}, err
}
@@ -343,17 +343,12 @@ func (q *QuerierAPI) VolumeHandler(ctx context.Context, req *logproto.VolumeRequ
return resp, nil
}
-func (q *QuerierAPI) validateMaxEntriesLimits(ctx context.Context, query string, limit uint32) error {
+func (q *QuerierAPI) validateMaxEntriesLimits(ctx context.Context, expr syntax.Expr, limit uint32) error {
tenantIDs, err := tenant.TenantIDs(ctx)
if err != nil {
return httpgrpc.Errorf(http.StatusBadRequest, err.Error())
}
- expr, err := syntax.ParseExpr(query)
- if err != nil {
- return err
- }
-
// entry limit does not apply to metric queries.
if _, ok := expr.(syntax.SampleExpr); ok {
return nil
diff --git a/pkg/querier/http_test.go b/pkg/querier/http_test.go
index 5b121ad891949..8e3ce9b90694d 100644
--- a/pkg/querier/http_test.go
+++ b/pkg/querier/http_test.go
@@ -31,7 +31,14 @@ func TestTailHandler(t *testing.T) {
api := NewQuerierAPI(mockQuerierConfig(), nil, limits, log.NewNopLogger())
- req, err := http.NewRequest("GET", "/", nil)
+ req, err := http.NewRequest("GET", `/`, nil)
+ require.NoError(t, err)
+ q := req.URL.Query()
+ q.Add("query", `{app="loki"}`)
+ req.URL.RawQuery = q.Encode()
+ err = req.ParseForm()
+ require.NoError(t, err)
+
ctx := user.InjectOrgID(req.Context(), "1|2")
req = req.WithContext(ctx)
require.NoError(t, err)
@@ -156,15 +163,15 @@ func TestSeriesHandler(t *testing.T) {
return &logproto.SeriesResponse{
Series: []logproto.SeriesIdentifier{
{
- Labels: map[string]string{
- "a": "1",
- "b": "2",
+ Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "a", Value: "1"},
+ {Key: "b", Value: "2"},
},
},
{
- Labels: map[string]string{
- "c": "3",
- "d": "4",
+ Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "c", Value: "3"},
+ {Key: "d", Value: "4"},
},
},
},
diff --git a/pkg/querier/multi_tenant_querier.go b/pkg/querier/multi_tenant_querier.go
index f4881df48a6d7..2849830141167 100644
--- a/pkg/querier/multi_tenant_querier.go
+++ b/pkg/querier/multi_tenant_querier.go
@@ -2,7 +2,9 @@ package querier
import (
"context"
+ "fmt"
+ "github.com/grafana/loki/pkg/querier/plan"
"github.com/grafana/loki/pkg/storage/stores/index/seriesvolume"
"github.com/go-kit/log"
@@ -53,6 +55,14 @@ func (q *MultiTenantQuerier) SelectLogs(ctx context.Context, params logql.Select
matchedTenants, filteredMatchers := filterValuesByMatchers(defaultTenantLabel, tenantIDs, selector.Matchers()...)
params.Selector = replaceMatchers(selector, filteredMatchers).String()
+ parsed, err := syntax.ParseLogSelector(params.Selector, true)
+ if err != nil {
+ return nil, fmt.Errorf("log selector is invalid after matcher update: %w", err)
+ }
+ params.Plan = &plan.QueryPlan{
+ AST: parsed,
+ }
+
iters := make([]iter.EntryIterator, len(matchedTenants))
i := 0
for id := range matchedTenants {
@@ -150,9 +160,10 @@ func (q *MultiTenantQuerier) Series(ctx context.Context, req *logproto.SeriesReq
return nil, err
}
- for _, s := range resp.GetSeries() {
- if _, ok := s.Labels[defaultTenantLabel]; !ok {
- s.Labels[defaultTenantLabel] = id
+ for i := range resp.GetSeries() {
+ s := &resp.Series[i]
+ if s.Get(defaultTenantLabel) == "" {
+ s.Labels = append(s.Labels, logproto.SeriesIdentifier_LabelsEntry{Key: defaultTenantLabel, Value: id})
}
}
diff --git a/pkg/querier/multi_tenant_querier_test.go b/pkg/querier/multi_tenant_querier_test.go
index 0a74fe957677b..48d9074106a42 100644
--- a/pkg/querier/multi_tenant_querier_test.go
+++ b/pkg/querier/multi_tenant_querier_test.go
@@ -21,6 +21,7 @@ import (
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logql"
"github.com/grafana/loki/pkg/logql/syntax"
+ "github.com/grafana/loki/pkg/querier/plan"
)
func TestMultiTenantQuerier_SelectLogs(t *testing.T) {
@@ -90,6 +91,9 @@ func TestMultiTenantQuerier_SelectLogs(t *testing.T) {
Shards: nil,
Start: time.Unix(0, 1),
End: time.Unix(0, time.Now().UnixNano()),
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(tc.selector),
+ },
}}
iter, err := multiTenantQuerier.SelectLogs(ctx, params)
require.NoError(t, err)
@@ -161,6 +165,9 @@ func TestMultiTenantQuerier_SelectSamples(t *testing.T) {
ctx := user.InjectOrgID(context.Background(), tc.orgID)
params := logql.SelectSampleParams{SampleQueryRequest: &logproto.SampleQueryRequest{
Selector: tc.selector,
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(tc.selector),
+ },
}}
iter, err := multiTenantQuerier.SelectSamples(ctx, params)
require.NoError(t, err)
@@ -191,6 +198,9 @@ func TestMultiTenantQuerier_TenantFilter(t *testing.T) {
t.Run(tc.selector, func(t *testing.T) {
params := logql.SelectSampleParams{SampleQueryRequest: &logproto.SampleQueryRequest{
Selector: tc.selector,
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(tc.selector),
+ },
}}
_, updatedSelector, err := removeTenantSelector(params, []string{})
require.NoError(t, err)
@@ -355,42 +365,42 @@ func TestMultiTenantQuerierSeries(t *testing.T) {
desc: "two tenantIDs",
orgID: "1|2",
expectedSeries: []logproto.SeriesIdentifier{
- {Labels: map[string]string{"__tenant_id__": "1", "a": "1", "b": "2"}},
- {Labels: map[string]string{"__tenant_id__": "1", "a": "1", "b": "3"}},
- {Labels: map[string]string{"__tenant_id__": "1", "a": "1", "b": "4"}},
- {Labels: map[string]string{"__tenant_id__": "1", "a": "1", "b": "5"}},
- {Labels: map[string]string{"__tenant_id__": "2", "a": "1", "b": "2"}},
- {Labels: map[string]string{"__tenant_id__": "2", "a": "1", "b": "3"}},
- {Labels: map[string]string{"__tenant_id__": "2", "a": "1", "b": "4"}},
- {Labels: map[string]string{"__tenant_id__": "2", "a": "1", "b": "5"}},
+ {Labels: logproto.MustNewSeriesEntries("a", "1", "b", "2", "__tenant_id__", "1")},
+ {Labels: logproto.MustNewSeriesEntries("a", "1", "b", "3", "__tenant_id__", "1")},
+ {Labels: logproto.MustNewSeriesEntries("a", "1", "b", "4", "__tenant_id__", "1")},
+ {Labels: logproto.MustNewSeriesEntries("a", "1", "b", "5", "__tenant_id__", "1")},
+ {Labels: logproto.MustNewSeriesEntries("a", "1", "b", "2", "__tenant_id__", "2")},
+ {Labels: logproto.MustNewSeriesEntries("a", "1", "b", "3", "__tenant_id__", "2")},
+ {Labels: logproto.MustNewSeriesEntries("a", "1", "b", "4", "__tenant_id__", "2")},
+ {Labels: logproto.MustNewSeriesEntries("a", "1", "b", "5", "__tenant_id__", "2")},
},
},
{
desc: "three tenantIDs",
orgID: "1|2|3",
expectedSeries: []logproto.SeriesIdentifier{
- {Labels: map[string]string{"__tenant_id__": "1", "a": "1", "b": "2"}},
- {Labels: map[string]string{"__tenant_id__": "1", "a": "1", "b": "3"}},
- {Labels: map[string]string{"__tenant_id__": "1", "a": "1", "b": "4"}},
- {Labels: map[string]string{"__tenant_id__": "1", "a": "1", "b": "5"}},
- {Labels: map[string]string{"__tenant_id__": "2", "a": "1", "b": "2"}},
- {Labels: map[string]string{"__tenant_id__": "2", "a": "1", "b": "3"}},
- {Labels: map[string]string{"__tenant_id__": "2", "a": "1", "b": "4"}},
- {Labels: map[string]string{"__tenant_id__": "2", "a": "1", "b": "5"}},
- {Labels: map[string]string{"__tenant_id__": "3", "a": "1", "b": "2"}},
- {Labels: map[string]string{"__tenant_id__": "3", "a": "1", "b": "3"}},
- {Labels: map[string]string{"__tenant_id__": "3", "a": "1", "b": "4"}},
- {Labels: map[string]string{"__tenant_id__": "3", "a": "1", "b": "5"}},
+ {Labels: logproto.MustNewSeriesEntries("a", "1", "b", "2", "__tenant_id__", "1")},
+ {Labels: logproto.MustNewSeriesEntries("a", "1", "b", "3", "__tenant_id__", "1")},
+ {Labels: logproto.MustNewSeriesEntries("a", "1", "b", "4", "__tenant_id__", "1")},
+ {Labels: logproto.MustNewSeriesEntries("a", "1", "b", "5", "__tenant_id__", "1")},
+ {Labels: logproto.MustNewSeriesEntries("a", "1", "b", "2", "__tenant_id__", "2")},
+ {Labels: logproto.MustNewSeriesEntries("a", "1", "b", "3", "__tenant_id__", "2")},
+ {Labels: logproto.MustNewSeriesEntries("a", "1", "b", "4", "__tenant_id__", "2")},
+ {Labels: logproto.MustNewSeriesEntries("a", "1", "b", "5", "__tenant_id__", "2")},
+ {Labels: logproto.MustNewSeriesEntries("a", "1", "b", "2", "__tenant_id__", "3")},
+ {Labels: logproto.MustNewSeriesEntries("a", "1", "b", "3", "__tenant_id__", "3")},
+ {Labels: logproto.MustNewSeriesEntries("a", "1", "b", "4", "__tenant_id__", "3")},
+ {Labels: logproto.MustNewSeriesEntries("a", "1", "b", "5", "__tenant_id__", "3")},
},
},
{
desc: "single tenantID; behaves like a normal `Series` call",
orgID: "2",
expectedSeries: []logproto.SeriesIdentifier{
- {Labels: map[string]string{"a": "1", "b": "2"}},
- {Labels: map[string]string{"a": "1", "b": "3"}},
- {Labels: map[string]string{"a": "1", "b": "4"}},
- {Labels: map[string]string{"a": "1", "b": "5"}},
+ {Labels: logproto.MustNewSeriesEntries("a", "1", "b", "2")},
+ {Labels: logproto.MustNewSeriesEntries("a", "1", "b", "3")},
+ {Labels: logproto.MustNewSeriesEntries("a", "1", "b", "4")},
+ {Labels: logproto.MustNewSeriesEntries("a", "1", "b", "5")},
},
},
} {
@@ -455,16 +465,16 @@ func mockSeriesResponse() *logproto.SeriesResponse {
return &logproto.SeriesResponse{
Series: []logproto.SeriesIdentifier{
{
- Labels: map[string]string{"a": "1", "b": "2"},
+ Labels: logproto.MustNewSeriesEntries("a", "1", "b", "2"),
},
{
- Labels: map[string]string{"a": "1", "b": "3"},
+ Labels: logproto.MustNewSeriesEntries("a", "1", "b", "3"),
},
{
- Labels: map[string]string{"a": "1", "b": "4"},
+ Labels: logproto.MustNewSeriesEntries("a", "1", "b", "4"),
},
{
- Labels: map[string]string{"a": "1", "b": "5"},
+ Labels: logproto.MustNewSeriesEntries("a", "1", "b", "5"),
},
},
}
diff --git a/pkg/querier/plan/plan.go b/pkg/querier/plan/plan.go
new file mode 100644
index 0000000000000..6822932d7b241
--- /dev/null
+++ b/pkg/querier/plan/plan.go
@@ -0,0 +1,101 @@
+package plan
+
+import (
+ "bytes"
+
+ "github.com/grafana/loki/pkg/logql/syntax"
+)
+
+type QueryPlan struct {
+ AST syntax.Expr
+}
+
+func (t QueryPlan) Marshal() ([]byte, error) {
+ return t.MarshalJSON()
+}
+
+func (t *QueryPlan) MarshalTo(data []byte) (int, error) {
+ appender := &appendWriter{
+ slice: data[:0],
+ }
+ err := syntax.EncodeJSON(t.AST, appender)
+ if err != nil {
+ return 0, err
+ }
+
+ return len(appender.slice), nil
+}
+
+func (t *QueryPlan) Unmarshal(data []byte) error {
+ return t.UnmarshalJSON(data)
+}
+
+func (t *QueryPlan) Size() int {
+ counter := &countWriter{}
+ err := syntax.EncodeJSON(t.AST, counter)
+ if err != nil {
+ return 0
+ }
+
+ return counter.bytes
+}
+
+func (t QueryPlan) MarshalJSON() ([]byte, error) {
+ var buf bytes.Buffer
+ err := syntax.EncodeJSON(t.AST, &buf)
+ if err != nil {
+ return nil, err
+ }
+
+ return buf.Bytes(), nil
+}
+
+func (t *QueryPlan) UnmarshalJSON(data []byte) error {
+ // An empty query plan is ingored to be backwards compatible.
+ if len(data) == 0 {
+ return nil
+ }
+
+ expr, err := syntax.DecodeJSON(string(data))
+ if err != nil {
+ return err
+ }
+
+ t.AST = expr
+ return nil
+}
+
+func (t QueryPlan) Equal(other QueryPlan) bool {
+ left, err := t.Marshal()
+ if err != nil {
+ return false
+ }
+
+ right, err := other.Marshal()
+ if err != nil {
+ return false
+ }
+ return bytes.Equal(left, right)
+}
+
+// countWriter is not writing any bytes. It just counts the bytes that would be
+// written.
+type countWriter struct {
+ bytes int
+}
+
+// Write implements io.Writer.
+func (w *countWriter) Write(p []byte) (int, error) {
+ w.bytes += len(p)
+ return len(p), nil
+}
+
+// appendWriter appends to a slice.
+type appendWriter struct {
+ slice []byte
+}
+
+func (w *appendWriter) Write(p []byte) (int, error) {
+ w.slice = append(w.slice, p...)
+ return len(p), nil
+}
diff --git a/pkg/querier/plan/plan_test.go b/pkg/querier/plan/plan_test.go
new file mode 100644
index 0000000000000..60f7d3fad1806
--- /dev/null
+++ b/pkg/querier/plan/plan_test.go
@@ -0,0 +1,26 @@
+package plan
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/grafana/loki/pkg/logql/syntax"
+)
+
+func TestMarshalTo(t *testing.T) {
+ plan := QueryPlan{
+ AST: syntax.MustParseExpr(`sum by (foo) (bytes_over_time({app="loki"} [1m]))`),
+ }
+
+ data := make([]byte, plan.Size())
+ _, err := plan.MarshalTo(data)
+ require.NoError(t, err)
+
+ var buf bytes.Buffer
+ err = syntax.EncodeJSON(plan.AST, &buf)
+ require.NoError(t, err)
+
+ require.JSONEq(t, buf.String(), string(data))
+}
diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go
index 964d92d58c10a..a91293c977968 100644
--- a/pkg/querier/querier.go
+++ b/pkg/querier/querier.go
@@ -29,6 +29,7 @@ import (
"github.com/grafana/loki/pkg/logql"
"github.com/grafana/loki/pkg/logql/syntax"
querier_limits "github.com/grafana/loki/pkg/querier/limits"
+ "github.com/grafana/loki/pkg/querier/plan"
"github.com/grafana/loki/pkg/storage"
"github.com/grafana/loki/pkg/storage/stores/index/stats"
listutil "github.com/grafana/loki/pkg/util"
@@ -443,6 +444,16 @@ func (q *SingleTenantQuerier) Tail(ctx context.Context, req *logproto.TailReques
return nil, err
}
+ if req.Plan == nil {
+ parsed, err := syntax.ParseExpr(req.Query)
+ if err != nil {
+ return nil, err
+ }
+ req.Plan = &plan.QueryPlan{
+ AST: parsed,
+ }
+ }
+
deletes, err := q.deletesForUser(ctx, req.Start, time.Now())
if err != nil {
level.Error(spanlogger.FromContext(ctx)).Log("msg", "failed loading deletes for user", "err", err)
@@ -456,6 +467,7 @@ func (q *SingleTenantQuerier) Tail(ctx context.Context, req *logproto.TailReques
Limit: req.Limit,
Direction: logproto.BACKWARD,
Deletes: deletes,
+ Plan: req.Plan,
},
}
@@ -577,24 +589,21 @@ func (q *SingleTenantQuerier) awaitSeries(ctx context.Context, req *logproto.Ser
}
}
- deduped := make(map[string]logproto.SeriesIdentifier)
+ response := &logproto.SeriesResponse{
+ Series: make([]logproto.SeriesIdentifier, 0),
+ }
+ seen := make(map[uint64]struct{})
+ b := make([]byte, 0, 1024)
for _, set := range sets {
for _, s := range set {
- key := loghttp.LabelSet(s.Labels).String()
- if _, exists := deduped[key]; !exists {
- deduped[key] = s
+ key := s.Hash(b)
+ if _, exists := seen[key]; !exists {
+ seen[key] = struct{}{}
+ response.Series = append(response.Series, s)
}
}
}
- response := &logproto.SeriesResponse{
- Series: make([]logproto.SeriesIdentifier, 0, len(deduped)),
- }
-
- for _, s := range deduped {
- response.Series = append(response.Series, s)
- }
-
return response, nil
}
@@ -629,6 +638,15 @@ func (q *SingleTenantQuerier) seriesForMatchers(
// seriesForMatcher fetches series from the store for a given matcher
func (q *SingleTenantQuerier) seriesForMatcher(ctx context.Context, from, through time.Time, matcher string, shards []string) ([]logproto.SeriesIdentifier, error) {
+ var parsed syntax.Expr
+ var err error
+ if matcher != "" {
+ parsed, err = syntax.ParseExpr(matcher)
+ if err != nil {
+ return nil, err
+ }
+ }
+
ids, err := q.store.SelectSeries(ctx, logql.SelectLogParams{
QueryRequest: &logproto.QueryRequest{
Selector: matcher,
@@ -637,6 +655,9 @@ func (q *SingleTenantQuerier) seriesForMatcher(ctx context.Context, from, throug
End: through,
Direction: logproto.FORWARD,
Shards: shards,
+ Plan: &plan.QueryPlan{
+ AST: parsed,
+ },
},
})
if err != nil {
diff --git a/pkg/querier/querier_mock_test.go b/pkg/querier/querier_mock_test.go
index fa2de7590465f..b329e2e1dc54c 100644
--- a/pkg/querier/querier_mock_test.go
+++ b/pkg/querier/querier_mock_test.go
@@ -6,6 +6,8 @@ import (
"fmt"
"time"
+ "github.com/grafana/loki/pkg/logql/log"
+
"github.com/grafana/loki/pkg/loghttp"
"github.com/grafana/dskit/grpcclient"
@@ -298,8 +300,9 @@ type storeMock struct {
func newStoreMock() *storeMock {
return &storeMock{}
}
-
-func (s *storeMock) SetChunkFilterer(chunk.RequestChunkFilterer) {}
+func (s *storeMock) SetChunkFilterer(chunk.RequestChunkFilterer) {}
+func (s *storeMock) SetExtractorWrapper(log.SampleExtractorWrapper) {}
+func (s *storeMock) SetPipelineWrapper(log.PipelineWrapper) {}
func (s *storeMock) SelectLogs(ctx context.Context, req logql.SelectLogParams) (iter.EntryIterator, error) {
args := s.Called(ctx, req)
diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go
index 4c8ee491cde61..e9c36f7ae91e8 100644
--- a/pkg/querier/querier_test.go
+++ b/pkg/querier/querier_test.go
@@ -23,6 +23,8 @@ import (
"github.com/grafana/loki/pkg/ingester/client"
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logql"
+ "github.com/grafana/loki/pkg/logql/syntax"
+ "github.com/grafana/loki/pkg/querier/plan"
"github.com/grafana/loki/pkg/storage"
"github.com/grafana/loki/pkg/util/constants"
"github.com/grafana/loki/pkg/validation"
@@ -84,10 +86,13 @@ func TestQuerier_Label_QueryTimeoutConfigFlag(t *testing.T) {
func TestQuerier_Tail_QueryTimeoutConfigFlag(t *testing.T) {
request := logproto.TailRequest{
- Query: "{type=\"test\"}",
+ Query: `{type="test"}`,
DelayFor: 0,
Limit: 10,
Start: time.Now(),
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`{type="test"}`),
+ },
}
store := newStoreMock()
@@ -168,11 +173,14 @@ func defaultLimitsTestConfig() validation.Limits {
func TestQuerier_validateQueryRequest(t *testing.T) {
request := logproto.QueryRequest{
- Selector: "{type=\"test\", fail=\"yes\"} |= \"foo\"",
+ Selector: `{type="test", fail="yes"} |= "foo"`,
Limit: 10,
Start: time.Now().Add(-1 * time.Minute),
End: time.Now(),
Direction: logproto.FORWARD,
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`{type="test", fail="yes"} |= "foo"`),
+ },
}
store := newStoreMock()
@@ -205,7 +213,10 @@ func TestQuerier_validateQueryRequest(t *testing.T) {
_, err = q.SelectLogs(ctx, logql.SelectLogParams{QueryRequest: &request})
require.Equal(t, httpgrpc.Errorf(http.StatusBadRequest, "max streams matchers per query exceeded, matchers-count > limit (2 > 1)"), err)
- request.Selector = "{type=\"test\"}"
+ request.Selector = `{type="test"}`
+ request.Plan = &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`{type="test"}`),
+ }
_, err = q.SelectLogs(ctx, logql.SelectLogParams{QueryRequest: &request})
require.NoError(t, err)
@@ -226,9 +237,7 @@ func TestQuerier_SeriesAPI(t *testing.T) {
mockSeriesResponse := func(series []map[string]string) *logproto.SeriesResponse {
resp := &logproto.SeriesResponse{}
for _, s := range series {
- resp.Series = append(resp.Series, logproto.SeriesIdentifier{
- Labels: s,
- })
+ resp.Series = append(resp.Series, logproto.SeriesIdentifierFromMap(s))
}
return resp
}
@@ -293,8 +302,14 @@ func TestQuerier_SeriesAPI(t *testing.T) {
}), nil)
store.On("SelectSeries", mock.Anything, mock.Anything).Return([]logproto.SeriesIdentifier{
- {Labels: map[string]string{"a": "1", "b": "4"}},
- {Labels: map[string]string{"a": "1", "b": "5"}},
+ {Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "a", Value: "1"},
+ {Key: "b", Value: "4"},
+ }},
+ {Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "a", Value: "1"},
+ {Key: "b", Value: "5"},
+ }},
}, nil)
},
func(t *testing.T, q *SingleTenantQuerier, req *logproto.SeriesRequest) {
@@ -302,10 +317,22 @@ func TestQuerier_SeriesAPI(t *testing.T) {
resp, err := q.Series(ctx, req)
require.Nil(t, err)
require.ElementsMatch(t, []logproto.SeriesIdentifier{
- {Labels: map[string]string{"a": "1", "b": "2"}},
- {Labels: map[string]string{"a": "1", "b": "3"}},
- {Labels: map[string]string{"a": "1", "b": "4"}},
- {Labels: map[string]string{"a": "1", "b": "5"}},
+ {Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "a", Value: "1"},
+ {Key: "b", Value: "2"},
+ }},
+ {Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "a", Value: "1"},
+ {Key: "b", Value: "3"}},
+ },
+ {Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "a", Value: "1"},
+ {Key: "b", Value: "4"},
+ }},
+ {Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "a", Value: "1"},
+ {Key: "b", Value: "5"},
+ }},
}, resp.GetSeries())
},
},
@@ -318,8 +345,14 @@ func TestQuerier_SeriesAPI(t *testing.T) {
}), nil)
store.On("SelectSeries", mock.Anything, mock.Anything).Return([]logproto.SeriesIdentifier{
- {Labels: map[string]string{"a": "1", "b": "2"}},
- {Labels: map[string]string{"a": "1", "b": "3"}},
+ {Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "a", Value: "1"},
+ {Key: "b", Value: "2"},
+ }},
+ {Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "a", Value: "1"},
+ {Key: "b", Value: "3"},
+ }},
}, nil)
},
func(t *testing.T, q *SingleTenantQuerier, req *logproto.SeriesRequest) {
@@ -327,8 +360,14 @@ func TestQuerier_SeriesAPI(t *testing.T) {
resp, err := q.Series(ctx, req)
require.Nil(t, err)
require.ElementsMatch(t, []logproto.SeriesIdentifier{
- {Labels: map[string]string{"a": "1", "b": "2"}},
- {Labels: map[string]string{"a": "1", "b": "3"}},
+ {Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "a", Value: "1"},
+ {Key: "b", Value: "2"},
+ }},
+ {Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "a", Value: "1"},
+ {Key: "b", Value: "3"},
+ }},
}, resp.GetSeries())
},
},
@@ -395,6 +434,9 @@ func TestQuerier_IngesterMaxQueryLookback(t *testing.T) {
Start: tc.end.Add(-6 * time.Hour),
End: tc.end,
Direction: logproto.FORWARD,
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`{app="foo"}`),
+ },
}
queryClient := newQueryClientMock()
@@ -442,6 +484,9 @@ func TestQuerier_concurrentTailLimits(t *testing.T) {
DelayFor: 0,
Limit: 10,
Start: time.Now(),
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr("{type=\"test\"}"),
+ },
}
t.Parallel()
@@ -879,11 +924,14 @@ func TestQuerier_RequestingIngesters(t *testing.T) {
do: func(querier *SingleTenantQuerier, start, end time.Time) error {
_, err := querier.SelectLogs(ctx, logql.SelectLogParams{
QueryRequest: &logproto.QueryRequest{
- Selector: "{type=\"test\", fail=\"yes\"} |= \"foo\"",
+ Selector: `{type="test", fail="yes"} |= "foo"`,
Limit: 10,
Start: start,
End: end,
Direction: logproto.FORWARD,
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`{type="test", fail="yes"} |= "foo"`),
+ },
},
})
@@ -895,9 +943,12 @@ func TestQuerier_RequestingIngesters(t *testing.T) {
do: func(querier *SingleTenantQuerier, start, end time.Time) error {
_, err := querier.SelectSamples(ctx, logql.SelectSampleParams{
SampleQueryRequest: &logproto.SampleQueryRequest{
- Selector: "count_over_time({foo=\"bar\"}[5m])",
+ Selector: `count_over_time({foo="bar"}[5m])`,
Start: start,
End: end,
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`count_over_time({foo="bar"}[5m])`),
+ },
},
})
return err
@@ -1093,7 +1144,7 @@ func setupIngesterQuerierMocks(conf Config, limits *validation.Overrides) (*quer
ingesterClient.On("Series", mock.Anything, mock.Anything, mock.Anything).Return(&logproto.SeriesResponse{
Series: []logproto.SeriesIdentifier{
{
- Labels: map[string]string{"bar": "1"},
+ Labels: []logproto.SeriesIdentifier_LabelsEntry{{Key: "bar", Value: "1"}},
},
},
}, nil)
@@ -1104,7 +1155,7 @@ func setupIngesterQuerierMocks(conf Config, limits *validation.Overrides) (*quer
store.On("LabelValuesForMetricName", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]string{"1", "2", "3"}, nil)
store.On("LabelNamesForMetricName", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]string{"foo"}, nil)
store.On("SelectSeries", mock.Anything, mock.Anything).Return([]logproto.SeriesIdentifier{
- {Labels: map[string]string{"foo": "1"}},
+ {Labels: []logproto.SeriesIdentifier_LabelsEntry{{Key: "foo", Value: "1"}}},
}, nil)
querier, err := newQuerier(
@@ -1204,6 +1255,9 @@ func TestQuerier_SelectLogWithDeletes(t *testing.T) {
Start: time.Unix(0, 300000000),
End: time.Unix(0, 600000000),
Direction: logproto.FORWARD,
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`{type="test"} |= "foo"`),
+ },
}
_, err = q.SelectLogs(ctx, logql.SelectLogParams{QueryRequest: &request})
@@ -1220,6 +1274,9 @@ func TestQuerier_SelectLogWithDeletes(t *testing.T) {
{Selector: "2", Start: 400000000, End: 500000000},
{Selector: "3", Start: 500000000, End: 700000000},
},
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(request.Selector),
+ },
}
require.Contains(t, store.Calls[0].Arguments, logql.SelectLogParams{QueryRequest: expectedRequest})
@@ -1264,6 +1321,9 @@ func TestQuerier_SelectSamplesWithDeletes(t *testing.T) {
Selector: `count_over_time({foo="bar"}[5m])`,
Start: time.Unix(0, 300000000),
End: time.Unix(0, 600000000),
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`count_over_time({foo="bar"}[5m])`),
+ },
}
_, err = q.SelectSamples(ctx, logql.SelectSampleParams{SampleQueryRequest: &request})
@@ -1279,6 +1339,9 @@ func TestQuerier_SelectSamplesWithDeletes(t *testing.T) {
{Selector: "2", Start: 400000000, End: 500000000},
{Selector: "3", Start: 500000000, End: 700000000},
},
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(request.Selector),
+ },
},
}
diff --git a/pkg/querier/queryrange/codec.go b/pkg/querier/queryrange/codec.go
index 0cf3c06c22639..5e4a322418a35 100644
--- a/pkg/querier/queryrange/codec.go
+++ b/pkg/querier/queryrange/codec.go
@@ -14,6 +14,7 @@ import (
strings "strings"
"time"
+ "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"
"github.com/grafana/loki/pkg/storage/stores/index/seriesvolume"
"github.com/grafana/dskit/httpgrpc"
@@ -29,6 +30,7 @@ import (
"github.com/grafana/loki/pkg/logql/syntax"
"github.com/grafana/loki/pkg/logqlmodel"
"github.com/grafana/loki/pkg/logqlmodel/stats"
+ "github.com/grafana/loki/pkg/querier/plan"
"github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"
indexStats "github.com/grafana/loki/pkg/storage/stores/index/stats"
"github.com/grafana/loki/pkg/util"
@@ -61,11 +63,9 @@ func (r *LokiRequest) WithStartEnd(s time.Time, e time.Time) queryrangebase.Requ
return &clone
}
-func (r *LokiRequest) WithStartEndTime(s time.Time, e time.Time) *LokiRequest {
- clone := *r
- clone.StartTs = s
- clone.EndTs = e
- return &clone
+// WithStartEndForCache implements resultscache.Request.
+func (r *LokiRequest) WithStartEndForCache(s time.Time, e time.Time) resultscache.Request {
+ return r.WithStartEnd(s, e).(resultscache.Request)
}
func (r *LokiRequest) WithQuery(query string) queryrangebase.Request {
@@ -113,6 +113,11 @@ func (r *LokiInstantRequest) WithStartEnd(s time.Time, _ time.Time) queryrangeba
return &clone
}
+// WithStartEndForCache implements resultscache.Request.
+func (r *LokiInstantRequest) WithStartEndForCache(s time.Time, e time.Time) resultscache.Request {
+ return r.WithStartEnd(s, e).(resultscache.Request)
+}
+
func (r *LokiInstantRequest) WithQuery(query string) queryrangebase.Request {
clone := *r
clone.Query = query
@@ -152,6 +157,11 @@ func (r *LokiSeriesRequest) WithStartEnd(s, e time.Time) queryrangebase.Request
return &clone
}
+// WithStartEndForCache implements resultscache.Request.
+func (r *LokiSeriesRequest) WithStartEndForCache(s time.Time, e time.Time) resultscache.Request {
+ return r.WithStartEnd(s, e).(resultscache.Request)
+}
+
func (r *LokiSeriesRequest) WithQuery(_ string) queryrangebase.Request {
clone := *r
return &clone
@@ -228,6 +238,11 @@ func (r *LabelRequest) WithStartEnd(s, e time.Time) queryrangebase.Request {
return &clone
}
+// WithStartEndForCache implements resultscache.Request.
+func (r *LabelRequest) WithStartEndForCache(s time.Time, e time.Time) resultscache.Request {
+ return r.WithStartEnd(s, e).(resultscache.Request)
+}
+
func (r *LabelRequest) WithQuery(query string) queryrangebase.Request {
clone := *r
clone.Query = query
@@ -259,6 +274,11 @@ func (Codec) DecodeRequest(_ context.Context, r *http.Request, _ []string) (quer
return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error())
}
+ parsed, err := syntax.ParseExpr(rangeQuery.Query)
+ if err != nil {
+ return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error())
+ }
+
return &LokiRequest{
Query: rangeQuery.Query,
Limit: rangeQuery.Limit,
@@ -269,12 +289,21 @@ func (Codec) DecodeRequest(_ context.Context, r *http.Request, _ []string) (quer
Interval: rangeQuery.Interval.Milliseconds(),
Path: r.URL.Path,
Shards: rangeQuery.Shards,
+ Plan: &plan.QueryPlan{
+ AST: parsed,
+ },
}, nil
case InstantQueryOp:
req, err := loghttp.ParseInstantQuery(r)
if err != nil {
return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error())
}
+
+ parsed, err := syntax.ParseExpr(req.Query)
+ if err != nil {
+ return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error())
+ }
+
return &LokiInstantRequest{
Query: req.Query,
Limit: req.Limit,
@@ -282,6 +311,9 @@ func (Codec) DecodeRequest(_ context.Context, r *http.Request, _ []string) (quer
TimeTs: req.Ts.UTC(),
Path: r.URL.Path,
Shards: req.Shards,
+ Plan: &plan.QueryPlan{
+ AST: parsed,
+ },
}, nil
case SeriesOp:
req, err := loghttp.ParseAndValidateSeriesQuery(r)
@@ -409,6 +441,12 @@ func (Codec) DecodeHTTPGrpcRequest(ctx context.Context, r *httpgrpc.HTTPRequest)
if err != nil {
return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, err.Error())
}
+
+ parsed, err := syntax.ParseExpr(req.Query)
+ if err != nil {
+ return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, err.Error())
+ }
+
return &LokiRequest{
Query: req.Query,
Limit: req.Limit,
@@ -419,12 +457,21 @@ func (Codec) DecodeHTTPGrpcRequest(ctx context.Context, r *httpgrpc.HTTPRequest)
Interval: req.Interval.Milliseconds(),
Path: r.Url,
Shards: req.Shards,
+ Plan: &plan.QueryPlan{
+ AST: parsed,
+ },
}, ctx, nil
case InstantQueryOp:
req, err := loghttp.ParseInstantQuery(httpReq)
if err != nil {
return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, err.Error())
}
+
+ parsed, err := syntax.ParseExpr(req.Query)
+ if err != nil {
+ return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, err.Error())
+ }
+
return &LokiInstantRequest{
Query: req.Query,
Limit: req.Limit,
@@ -432,6 +479,9 @@ func (Codec) DecodeHTTPGrpcRequest(ctx context.Context, r *httpgrpc.HTTPRequest)
TimeTs: req.Ts.UTC(),
Path: r.Url,
Shards: req.Shards,
+ Plan: &plan.QueryPlan{
+ AST: parsed,
+ },
}, ctx, nil
case SeriesOp:
req, err := loghttp.ParseAndValidateSeriesQuery(httpReq)
@@ -504,7 +554,7 @@ func (Codec) DecodeHTTPGrpcRequest(ctx context.Context, r *httpgrpc.HTTPRequest)
AggregateBy: req.AggregateBy,
}, ctx, err
default:
- return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, fmt.Sprintf("unknown request path: %s", r.Url))
+ return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, fmt.Sprintf("unknown request path in HTTP gRPC decode: %s", r.Url))
}
}
@@ -579,6 +629,15 @@ func (c Codec) EncodeRequest(ctx context.Context, r queryrangebase.Request) (*ht
}
header.Set(user.OrgIDHeaderName, orgID)
+ // Propagate trace context in request.
+ tracer, span := opentracing.GlobalTracer(), opentracing.SpanFromContext(ctx)
+ if tracer != nil && span != nil {
+ carrier := opentracing.HTTPHeadersCarrier(header)
+ if err := tracer.Inject(span.Context(), opentracing.HTTPHeaders, carrier); err != nil {
+ return nil, err
+ }
+ }
+
switch request := r.(type) {
case *LokiRequest:
params := url.Values{
@@ -799,25 +858,15 @@ func decodeResponseJSONFrom(buf []byte, req queryrangebase.Request, headers http
switch req := req.(type) {
case *LokiSeriesRequest:
- var resp loghttp.SeriesResponse
+ resp := &LokiSeriesResponse{
+ Version: uint32(loghttp.GetVersion(req.Path)),
+ Headers: httpResponseHeadersToPromResponseHeaders(headers),
+ }
if err := json.Unmarshal(buf, &resp); err != nil {
return nil, httpgrpc.Errorf(http.StatusInternalServerError, "error decoding response: %v", err)
}
- data := make([]logproto.SeriesIdentifier, 0, len(resp.Data))
- for _, label := range resp.Data {
- d := logproto.SeriesIdentifier{
- Labels: label.Map(),
- }
- data = append(data, d)
- }
-
- return &LokiSeriesResponse{
- Status: resp.Status,
- Version: uint32(loghttp.GetVersion(req.Path)),
- Data: data,
- Headers: httpResponseHeadersToPromResponseHeaders(headers),
- }, nil
+ return resp, nil
case *LabelRequest:
var resp loghttp.LabelResponse
if err := json.Unmarshal(buf, &resp); err != nil {
@@ -1125,7 +1174,6 @@ func (Codec) MergeResponse(responses ...queryrangebase.Response) (queryrangebase
// little overhead. A run with 4MB should the same speedup but
// much much more overhead.
b := make([]byte, 0, 1024)
- keyBuffer := make([]string, 0, 32)
var key uint64
// only unique series should be merged
@@ -1133,9 +1181,8 @@ func (Codec) MergeResponse(responses ...queryrangebase.Response) (queryrangebase
lokiResult := res.(*LokiSeriesResponse)
mergedStats.MergeSplit(lokiResult.Statistics)
for _, series := range lokiResult.Data {
- // Use series hash as the key and reuse key
- // buffer to avoid extra allocations.
- key, keyBuffer = series.Hash(b, keyBuffer)
+ // Use series hash as the key.
+ key = series.Hash(b)
// TODO(karsten): There is a chance that the
// keys match but not the labels due to hash
@@ -1420,10 +1467,14 @@ type paramsRangeWrapper struct {
*LokiRequest
}
-func (p paramsRangeWrapper) Query() string {
+func (p paramsRangeWrapper) QueryString() string {
return p.GetQuery()
}
+func (p paramsRangeWrapper) GetExpression() syntax.Expr {
+ return p.LokiRequest.Plan.AST
+}
+
func (p paramsRangeWrapper) Start() time.Time {
return p.GetStartTs()
}
@@ -1450,10 +1501,14 @@ type paramsInstantWrapper struct {
*LokiInstantRequest
}
-func (p paramsInstantWrapper) Query() string {
+func (p paramsInstantWrapper) QueryString() string {
return p.GetQuery()
}
+func (p paramsInstantWrapper) GetExpression() syntax.Expr {
+ return p.LokiInstantRequest.Plan.AST
+}
+
func (p paramsInstantWrapper) Start() time.Time {
return p.LokiInstantRequest.GetTimeTs()
}
@@ -1478,10 +1533,14 @@ type paramsSeriesWrapper struct {
*LokiSeriesRequest
}
-func (p paramsSeriesWrapper) Query() string {
+func (p paramsSeriesWrapper) QueryString() string {
return p.GetQuery()
}
+func (p paramsSeriesWrapper) GetExpression() syntax.Expr {
+ return nil
+}
+
func (p paramsSeriesWrapper) Start() time.Time {
return p.LokiSeriesRequest.GetStartTs()
}
@@ -1506,10 +1565,14 @@ type paramsLabelWrapper struct {
*LabelRequest
}
-func (p paramsLabelWrapper) Query() string {
+func (p paramsLabelWrapper) QueryString() string {
return p.GetQuery()
}
+func (p paramsLabelWrapper) GetExpression() syntax.Expr {
+ return nil
+}
+
func (p paramsLabelWrapper) Start() time.Time {
return p.LabelRequest.GetStartTs()
}
@@ -1534,10 +1597,14 @@ type paramsStatsWrapper struct {
*logproto.IndexStatsRequest
}
-func (p paramsStatsWrapper) Query() string {
+func (p paramsStatsWrapper) QueryString() string {
return p.GetQuery()
}
+func (p paramsStatsWrapper) GetExpression() syntax.Expr {
+ return nil
+}
+
func (p paramsStatsWrapper) Start() time.Time {
return p.From.Time()
}
diff --git a/pkg/querier/queryrange/codec_test.go b/pkg/querier/queryrange/codec_test.go
index 77df78aca6958..4f7a88edec6c2 100644
--- a/pkg/querier/queryrange/codec_test.go
+++ b/pkg/querier/queryrange/codec_test.go
@@ -25,8 +25,10 @@ import (
"github.com/grafana/loki/pkg/loghttp"
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logql"
+ "github.com/grafana/loki/pkg/logql/syntax"
"github.com/grafana/loki/pkg/logqlmodel"
"github.com/grafana/loki/pkg/logqlmodel/stats"
+ "github.com/grafana/loki/pkg/querier/plan"
"github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"
"github.com/grafana/loki/pkg/util"
"github.com/grafana/loki/pkg/util/httpreq"
@@ -63,6 +65,9 @@ func Test_codec_EncodeDecodeRequest(t *testing.T) {
Path: "/query_range",
StartTs: start,
EndTs: end,
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`{foo="bar"}`),
+ },
}, false},
{"query_range", func() (*http.Request, error) {
return http.NewRequest(http.MethodGet,
@@ -76,6 +81,9 @@ func Test_codec_EncodeDecodeRequest(t *testing.T) {
Path: "/query_range",
StartTs: start,
EndTs: end,
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`{foo="bar"}`),
+ },
}, false},
{"legacy query_range with refexp", func() (*http.Request, error) {
return http.NewRequest(http.MethodGet,
@@ -89,6 +97,9 @@ func Test_codec_EncodeDecodeRequest(t *testing.T) {
Path: "/api/prom/query",
StartTs: start,
EndTs: end,
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`{foo="bar"} |~ "foo"`),
+ },
}, false},
{"series", func() (*http.Request, error) {
return http.NewRequest(http.MethodGet,
@@ -211,13 +222,13 @@ func Test_codec_DecodeResponse(t *testing.T) {
res *http.Response
req queryrangebase.Request
want queryrangebase.Response
- wantErr bool
+ wantErr string
}{
- {"500", &http.Response{StatusCode: 500, Body: io.NopCloser(strings.NewReader("some error"))}, nil, nil, true},
- {"no body", &http.Response{StatusCode: 200, Body: io.NopCloser(badReader{})}, nil, nil, true},
- {"bad json", &http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(""))}, nil, nil, true},
- {"not success", &http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(`{"status":"fail"}`))}, nil, nil, true},
- {"unknown", &http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(`{"status":"success"}`))}, nil, nil, true},
+ {"500", &http.Response{StatusCode: 500, Body: io.NopCloser(strings.NewReader("some error"))}, nil, nil, "some error"},
+ {"no body", &http.Response{StatusCode: 200, Body: io.NopCloser(badReader{})}, nil, nil, "error decoding response"},
+ {"bad json", &http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(""))}, nil, nil, "Value looks like object, but can't find closing"},
+ {"not success", &http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(`{"status":"fail"}`))}, nil, nil, "unsupported response type"},
+ {"unknown", &http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(`{"status":"success"}`))}, nil, nil, "unsupported response type"},
{
"matrix", &http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(matrixString))}, nil,
&LokiPromResponse{
@@ -229,7 +240,7 @@ func Test_codec_DecodeResponse(t *testing.T) {
},
},
Statistics: statsResult,
- }, false,
+ }, "",
},
{
"matrix-empty-streams",
@@ -244,7 +255,7 @@ func Test_codec_DecodeResponse(t *testing.T) {
},
},
Statistics: statsResult,
- }, false,
+ }, "",
},
{
"vector-empty-streams",
@@ -259,7 +270,7 @@ func Test_codec_DecodeResponse(t *testing.T) {
},
},
Statistics: statsResult,
- }, false,
+ }, "",
},
{
"streams v1", &http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(streamsString))},
@@ -274,7 +285,7 @@ func Test_codec_DecodeResponse(t *testing.T) {
Result: logStreams,
},
Statistics: statsResult,
- }, false,
+ }, "",
},
{
"streams v1 with structured metadata", &http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(streamsStringWithStructuredMetdata))},
@@ -289,7 +300,7 @@ func Test_codec_DecodeResponse(t *testing.T) {
Result: logStreamsWithStructuredMetadata,
},
Statistics: statsResult,
- }, false,
+ }, "",
},
{
"streams v1 with categorized labels", &http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(streamsStringWithCategories))},
@@ -304,7 +315,7 @@ func Test_codec_DecodeResponse(t *testing.T) {
Result: logStreamsWithCategories,
},
Statistics: statsResult,
- }, false,
+ }, "",
},
{
"streams legacy", &http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(streamsString))},
@@ -319,7 +330,7 @@ func Test_codec_DecodeResponse(t *testing.T) {
Result: logStreams,
},
Statistics: statsResult,
- }, false,
+ }, "",
},
{
"series", &http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(seriesString))},
@@ -328,7 +339,7 @@ func Test_codec_DecodeResponse(t *testing.T) {
Status: "success",
Version: uint32(loghttp.VersionV1),
Data: seriesData,
- }, false,
+ }, "",
},
{
"labels legacy", &http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(labelsString))},
@@ -337,7 +348,7 @@ func Test_codec_DecodeResponse(t *testing.T) {
Status: "success",
Version: uint32(loghttp.VersionLegacy),
Data: labelsData,
- }, false,
+ }, "",
},
{
"index stats", &http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(indexStatsString))},
@@ -349,7 +360,7 @@ func Test_codec_DecodeResponse(t *testing.T) {
Bytes: 3,
Entries: 4,
},
- }, false,
+ }, "",
},
{
"volume", &http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(seriesVolumeString))},
@@ -361,16 +372,53 @@ func Test_codec_DecodeResponse(t *testing.T) {
},
Limit: 100,
},
- }, false,
+ }, "",
+ },
+ {
+ "series error", &http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(`{"status":"success","data":"not an array"}`))},
+ &LokiSeriesRequest{Path: "/loki/api/v1/series"},
+ nil, "Value is array",
+ },
+ {
+ "series error wrong status type", &http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(`{"status":42}`))},
+ &LokiSeriesRequest{Path: "/loki/api/v1/series"},
+ nil, "Value is not a string",
+ },
+ {
+ "series error no object", &http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(`{"status":"success","data": ["not an object"]}`))},
+ &LokiSeriesRequest{Path: "/loki/api/v1/series"},
+ nil, "unexpected data type: got(string), expected (object)",
+ },
+ {
+ "series error wrong value type", &http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(`{"status":"success","data": [{"some": 42}]}`))},
+ &LokiSeriesRequest{Path: "/loki/api/v1/series"},
+ nil, "unexpected label value type: got(number), expected (string)",
+ },
+ {
+ "series error wrong key type", &http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(`{"status":"success","data": [{42: "some string"}]}`))},
+ &LokiSeriesRequest{Path: "/loki/api/v1/series"},
+ nil, "error decoding response: ReadObjectCB",
+ },
+ {
+ "series error key decode", &http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(`{"status":"success","data": [{"\x": "some string"}]}`))},
+ &LokiSeriesRequest{Path: "/loki/api/v1/series"},
+ nil, "invalid escape char after",
+ },
+ {
+ "series error value decode", &http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(`{"status":"success","data": [{"label": "some string\x"}]}`))},
+ &LokiSeriesRequest{Path: "/loki/api/v1/series"},
+ nil, "invalid escape char after",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := DefaultCodec.DecodeResponse(context.TODO(), tt.res, tt.req)
- if (err != nil) != tt.wantErr {
- t.Errorf("codec.DecodeResponse() error = %v, wantErr %v", err, tt.wantErr)
+ if tt.wantErr != "" {
+ require.ErrorContains(t, err, tt.wantErr)
return
}
+
+ require.NoError(t, err)
require.Equal(t, tt.want, got)
})
}
@@ -559,7 +607,13 @@ func Test_codec_DecodeProtobufResponseParity(t *testing.T) {
}
codec := RequestProtobufCodec{}
for i, queryTest := range queryTests {
- u := &url.URL{Path: "/loki/api/v1/query_range"}
+ params := url.Values{
+ "query": []string{`{app="foo"}`},
+ }
+ u := &url.URL{
+ Path: "/loki/api/v1/query_range",
+ RawQuery: params.Encode(),
+ }
httpReq := &http.Request{
Method: "GET",
RequestURI: u.String(),
@@ -1369,10 +1423,16 @@ func Test_codec_MergeResponse(t *testing.T) {
Version: 1,
Data: []logproto.SeriesIdentifier{
{
- Labels: map[string]string{"filename": "/var/hostlog/apport.log", "job": "varlogs"},
+ Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "filename", Value: "/var/hostlog/apport.log"},
+ {Key: "job", Value: "varlogs"},
+ },
},
{
- Labels: map[string]string{"filename": "/var/hostlog/test.log", "job": "varlogs"},
+ Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "filename", Value: "/var/hostlog/test.log"},
+ {Key: "job", Value: "varlogs"},
+ },
},
},
},
@@ -1381,10 +1441,16 @@ func Test_codec_MergeResponse(t *testing.T) {
Version: 1,
Data: []logproto.SeriesIdentifier{
{
- Labels: map[string]string{"filename": "/var/hostlog/apport.log", "job": "varlogs"},
+ Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "filename", Value: "/var/hostlog/apport.log"},
+ {Key: "job", Value: "varlogs"},
+ },
},
{
- Labels: map[string]string{"filename": "/var/hostlog/other.log", "job": "varlogs"},
+ Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "filename", Value: "/var/hostlog/other.log"},
+ {Key: "job", Value: "varlogs"},
+ },
},
},
},
@@ -1395,13 +1461,22 @@ func Test_codec_MergeResponse(t *testing.T) {
Version: 1,
Data: []logproto.SeriesIdentifier{
{
- Labels: map[string]string{"filename": "/var/hostlog/apport.log", "job": "varlogs"},
+ Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "filename", Value: "/var/hostlog/apport.log"},
+ {Key: "job", Value: "varlogs"},
+ },
},
{
- Labels: map[string]string{"filename": "/var/hostlog/test.log", "job": "varlogs"},
+ Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "filename", Value: "/var/hostlog/test.log"},
+ {Key: "job", Value: "varlogs"},
+ },
},
{
- Labels: map[string]string{"filename": "/var/hostlog/other.log", "job": "varlogs"},
+ Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "filename", Value: "/var/hostlog/other.log"},
+ {Key: "job", Value: "varlogs"},
+ },
},
},
},
@@ -1721,7 +1796,7 @@ var (
"test": "test"
},
"values":[
- [ "123456789012345", "super line"],
+ [ "123456789012345", "super line", {}],
[ "123456789012346", "super line2", {
"structuredMetadata": {
"x": "a",
@@ -1835,10 +1910,16 @@ var (
}`
seriesData = []logproto.SeriesIdentifier{
{
- Labels: map[string]string{"filename": "/var/hostlog/apport.log", "job": "varlogs"},
+ Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "filename", Value: "/var/hostlog/apport.log"},
+ {Key: "job", Value: "varlogs"},
+ },
},
{
- Labels: map[string]string{"filename": "/var/hostlog/test.log", "job": "varlogs"},
+ Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "filename", Value: "/var/hostlog/test.log"},
+ {Key: "job", Value: "varlogs"},
+ },
},
}
labelsString = `{
@@ -2191,9 +2272,9 @@ func generateStream() (res []logproto.Stream) {
func generateSeries() (res []logproto.SeriesIdentifier) {
for i := 0; i < 1000; i++ {
- labels := make(map[string]string)
+ labels := make([]logproto.SeriesIdentifier_LabelsEntry, 100)
for l := 0; l < 100; l++ {
- labels[fmt.Sprintf("%d-%d", i, l)] = strconv.Itoa(l)
+ labels[l] = logproto.SeriesIdentifier_LabelsEntry{Key: fmt.Sprintf("%d-%d", i, l), Value: strconv.Itoa(l)}
}
res = append(res, logproto.SeriesIdentifier{Labels: labels})
}
diff --git a/pkg/querier/queryrange/downstreamer.go b/pkg/querier/queryrange/downstreamer.go
index b7a3d2f57a3ff..860aa980fb30b 100644
--- a/pkg/querier/queryrange/downstreamer.go
+++ b/pkg/querier/queryrange/downstreamer.go
@@ -20,6 +20,7 @@ import (
"github.com/grafana/loki/pkg/logqlmodel"
"github.com/grafana/loki/pkg/logqlmodel/metadata"
"github.com/grafana/loki/pkg/logqlmodel/stats"
+ "github.com/grafana/loki/pkg/querier/plan"
"github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"
"github.com/grafana/loki/pkg/querier/queryrange/queryrangebase/definitions"
"github.com/grafana/loki/pkg/util/spanlogger"
@@ -34,19 +35,22 @@ type DownstreamHandler struct {
next queryrangebase.Handler
}
-func ParamsToLokiRequest(params logql.Params, shards logql.Shards) queryrangebase.Request {
+func ParamsToLokiRequest(params logql.Params) queryrangebase.Request {
if logql.GetRangeType(params) == logql.InstantType {
return &LokiInstantRequest{
- Query: params.Query(),
+ Query: params.QueryString(),
Limit: params.Limit(),
TimeTs: params.Start(),
Direction: params.Direction(),
Path: "/loki/api/v1/query", // TODO(owen-d): make this derivable
- Shards: shards.Encode(),
+ Shards: params.Shards(),
+ Plan: &plan.QueryPlan{
+ AST: params.GetExpression(),
+ },
}
}
return &LokiRequest{
- Query: params.Query(),
+ Query: params.QueryString(),
Limit: params.Limit(),
Step: params.Step().Milliseconds(),
Interval: params.Interval().Milliseconds(),
@@ -54,7 +58,10 @@ func ParamsToLokiRequest(params logql.Params, shards logql.Shards) queryrangebas
EndTs: params.End(),
Direction: params.Direction(),
Path: "/loki/api/v1/query_range", // TODO(owen-d): make this derivable
- Shards: shards.Encode(),
+ Shards: params.Shards(),
+ Plan: &plan.QueryPlan{
+ AST: params.GetExpression(),
+ },
}
}
@@ -97,12 +104,12 @@ type instance struct {
func (in instance) Downstream(ctx context.Context, queries []logql.DownstreamQuery) ([]logqlmodel.Result, error) {
return in.For(ctx, queries, func(qry logql.DownstreamQuery) (logqlmodel.Result, error) {
- req := ParamsToLokiRequest(qry.Params, qry.Shards).WithQuery(qry.Expr.String())
+ req := ParamsToLokiRequest(qry.Params).WithQuery(qry.Params.GetExpression().String())
sp, ctx := opentracing.StartSpanFromContext(ctx, "DownstreamHandler.instance")
defer sp.Finish()
logger := spanlogger.FromContext(ctx)
defer logger.Finish()
- level.Debug(logger).Log("shards", fmt.Sprintf("%+v", qry.Shards), "query", req.GetQuery(), "step", req.GetStep(), "handler", reflect.TypeOf(in.handler))
+ level.Debug(logger).Log("shards", fmt.Sprintf("%+v", qry.Params.Shards()), "query", req.GetQuery(), "step", req.GetStep(), "handler", reflect.TypeOf(in.handler))
res, err := in.handler.Do(ctx, req)
if err != nil {
diff --git a/pkg/querier/queryrange/downstreamer_test.go b/pkg/querier/queryrange/downstreamer_test.go
index 552c0c53aa056..e453f03d9a3ee 100644
--- a/pkg/querier/queryrange/downstreamer_test.go
+++ b/pkg/querier/queryrange/downstreamer_test.go
@@ -4,6 +4,8 @@ import (
"context"
"errors"
"fmt"
+ "strconv"
+ "strings"
"sync"
"testing"
"time"
@@ -223,8 +225,8 @@ func TestInstanceFor(t *testing.T) {
}
in := mkIn()
newParams := func() logql.Params {
- return logql.NewLiteralParams(
- "",
+ params, err := logql.NewLiteralParams(
+ `{app="foo"}`,
time.Now(),
time.Now(),
0,
@@ -233,6 +235,8 @@ func TestInstanceFor(t *testing.T) {
1000,
nil,
)
+ require.NoError(t, err)
+ return params
}
var queries []logql.DownstreamQuery
@@ -280,22 +284,32 @@ func TestInstanceFor(t *testing.T) {
context.TODO(),
[]logql.DownstreamQuery{
{
- Params: newParams(),
- Shards: logql.Shards{
- {Shard: 0, Of: 2},
+ Params: logql.ParamsWithShardsOverride{
+ Params: newParams(),
+ ShardsOverride: logql.Shards{
+ {Shard: 0, Of: 2},
+ }.Encode(),
},
},
{
- Params: newParams(),
- Shards: logql.Shards{
- {Shard: 1, Of: 2},
+ Params: logql.ParamsWithShardsOverride{
+ Params: newParams(),
+ ShardsOverride: logql.Shards{
+ {Shard: 1, Of: 2},
+ }.Encode(),
},
},
},
func(qry logql.DownstreamQuery) (logqlmodel.Result, error) {
+ // Decode shard
+ s := strings.Split(qry.Params.Shards()[0], "_")
+ shard, err := strconv.Atoi(s[0])
+ if err != nil {
+ return logqlmodel.Result{}, err
+ }
return logqlmodel.Result{
Data: promql.Scalar{
- V: float64(qry.Shards[0].Shard),
+ V: float64(shard),
},
}, nil
},
@@ -309,8 +323,8 @@ func TestInstanceFor(t *testing.T) {
}
func TestInstanceDownstream(t *testing.T) {
- params := logql.NewLiteralParams(
- "",
+ params, err := logql.NewLiteralParams(
+ `{foo="bar"}`,
time.Now(),
time.Now(),
0,
@@ -319,8 +333,9 @@ func TestInstanceDownstream(t *testing.T) {
1000,
nil,
)
+ require.NoError(t, err)
expr, err := syntax.ParseExpr(`{foo="bar"}`)
- require.Nil(t, err)
+ require.NoError(t, err)
expectedResp := func() *LokiResponse {
return &LokiResponse{
@@ -340,9 +355,10 @@ func TestInstanceDownstream(t *testing.T) {
queries := []logql.DownstreamQuery{
{
- Expr: expr,
- Params: params,
- Shards: logql.Shards{{Shard: 0, Of: 2}},
+ Params: logql.ParamsWithShardsOverride{
+ Params: logql.ParamsWithExpressionOverride{Params: params, ExpressionOverride: expr},
+ ShardsOverride: logql.Shards{{Shard: 0, Of: 2}}.Encode(),
+ },
},
}
@@ -353,7 +369,7 @@ func TestInstanceDownstream(t *testing.T) {
// for some reason these seemingly can't be checked in their own goroutines,
// so we assign them to scoped variables for later comparison.
got = req
- want = ParamsToLokiRequest(params, queries[0].Shards).WithQuery(expr.String())
+ want = ParamsToLokiRequest(queries[0].Params).WithQuery(expr.String())
return expectedResp(), nil
},
@@ -484,9 +500,10 @@ func TestDownstreamAccumulatorSimple(t *testing.T) {
x = append(x, *s)
}
// dummy params. Only need to populate direction & limit
- params := logql.NewLiteralParams(
- "", time.Time{}, time.Time{}, 0, 0, direction, uint32(lim), nil,
+ params, err := logql.NewLiteralParams(
+ `{app="foo"}`, time.Time{}, time.Time{}, 0, 0, direction, uint32(lim), nil,
)
+ require.NoError(t, err)
acc := newDownstreamAccumulator(params, 1)
result := logqlmodel.Result{
@@ -542,9 +559,10 @@ func TestDownstreamAccumulatorMultiMerge(t *testing.T) {
}
// dummy params. Only need to populate direction & limit
- params := logql.NewLiteralParams(
- "", time.Time{}, time.Time{}, 0, 0, direction, uint32(lim), nil,
+ params, err := logql.NewLiteralParams(
+ `{app="foo"}`, time.Time{}, time.Time{}, 0, 0, direction, uint32(lim), nil,
)
+ require.NoError(t, err)
acc := newDownstreamAccumulator(params, 1)
for i := 0; i < nQueries; i++ {
diff --git a/pkg/querier/queryrange/extensions.go b/pkg/querier/queryrange/extensions.go
index 5b138355218f3..b8a0ca7f41935 100644
--- a/pkg/querier/queryrange/extensions.go
+++ b/pkg/querier/queryrange/extensions.go
@@ -1,6 +1,13 @@
package queryrange
-import "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"
+import (
+ "fmt"
+
+ "github.com/buger/jsonparser"
+
+ "github.com/grafana/loki/pkg/logproto"
+ "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"
+)
// To satisfy queryrange.Response interface(https://github.com/cortexproject/cortex/blob/21bad57b346c730d684d6d0205efef133422ab28/pkg/querier/queryrange/query_range.go#L88)
// we need to have following method as well on response types:
@@ -43,6 +50,51 @@ func (m *LokiSeriesResponse) WithHeaders(h []queryrangebase.PrometheusResponseHe
return m
}
+// UnmarshalJSON decodes from loghttpSeriesResponse JSON format directly into
+// the protobuf LokiSeriesResponse.
+func (m *LokiSeriesResponse) UnmarshalJSON(data []byte) error {
+ var err error
+ m.Status, err = jsonparser.GetString(data, "status")
+ if err != nil {
+ return err
+ }
+
+ var parseErr error
+ _, err = jsonparser.ArrayEach(data, func(value []byte, vt jsonparser.ValueType, _ int, _ error) {
+ if vt != jsonparser.Object {
+ parseErr = fmt.Errorf("unexpected data type: got(%s), expected (object)", vt)
+ return
+ }
+
+ identifier := logproto.SeriesIdentifier{}
+ parseErr = jsonparser.ObjectEach(value, func(key, val []byte, vt jsonparser.ValueType, _ int) error {
+ if vt != jsonparser.String {
+ return fmt.Errorf("unexpected label value type: got(%s), expected (string)", vt)
+ }
+ v, err := jsonparser.ParseString(val)
+ if err != nil {
+ return err
+ }
+ k, err := jsonparser.ParseString(key)
+ if err != nil {
+ return err
+ }
+
+ identifier.Labels = append(identifier.Labels, logproto.SeriesIdentifier_LabelsEntry{Key: k, Value: v})
+ return nil
+ })
+
+ if parseErr != nil {
+ return
+ }
+ m.Data = append(m.Data, identifier)
+ }, "data")
+ if parseErr != nil {
+ return parseErr
+ }
+ return err
+}
+
func (m *LokiPromResponse) GetHeaders() []*queryrangebase.PrometheusResponseHeader {
if m != nil {
return m.Response.GetHeaders()
diff --git a/pkg/querier/queryrange/index_stats_cache.go b/pkg/querier/queryrange/index_stats_cache.go
index 4814394fd47ab..a985167456a76 100644
--- a/pkg/querier/queryrange/index_stats_cache.go
+++ b/pkg/querier/queryrange/index_stats_cache.go
@@ -14,6 +14,7 @@ import (
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"
"github.com/grafana/loki/pkg/storage/chunk/cache"
+ "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"
"github.com/grafana/loki/pkg/util"
"github.com/grafana/loki/pkg/util/validation"
)
@@ -23,7 +24,7 @@ type IndexStatsSplitter struct {
}
// GenerateCacheKey generates a cache key based on the userID, Request and interval.
-func (i IndexStatsSplitter) GenerateCacheKey(ctx context.Context, userID string, r queryrangebase.Request) string {
+func (i IndexStatsSplitter) GenerateCacheKey(ctx context.Context, userID string, r resultscache.Request) string {
cacheKey := i.cacheKeyLimits.GenerateCacheKey(ctx, userID, r)
return fmt.Sprintf("indexStats:%s", cacheKey)
}
@@ -32,7 +33,7 @@ type IndexStatsExtractor struct{}
// Extract favors the ability to cache over exactness of results. It assumes a constant distribution
// of log volumes over a range and will extract subsets proportionally.
-func (p IndexStatsExtractor) Extract(start, end int64, res queryrangebase.Response, resStart, resEnd int64) queryrangebase.Response {
+func (p IndexStatsExtractor) Extract(start, end int64, res resultscache.Response, resStart, resEnd int64) resultscache.Response {
factor := util.GetFactorOfTime(start, end, resStart, resEnd)
statsRes := res.(*IndexStatsResponse)
@@ -93,7 +94,7 @@ func NewIndexStatsCacheMiddleware(
c cache.Cache,
cacheGenNumberLoader queryrangebase.CacheGenNumberLoader,
shouldCache queryrangebase.ShouldCacheFn,
- parallelismForReq func(ctx context.Context, tenantIDs []string, r queryrangebase.Request) int,
+ parallelismForReq queryrangebase.ParallelismForReqFn,
retentionEnabled bool,
transformer UserIDTransformer,
metrics *queryrangebase.ResultsCacheMetrics,
diff --git a/pkg/querier/queryrange/index_stats_cache_test.go b/pkg/querier/queryrange/index_stats_cache_test.go
index 72b24757aef5c..c8119c6b9fe25 100644
--- a/pkg/querier/queryrange/index_stats_cache_test.go
+++ b/pkg/querier/queryrange/index_stats_cache_test.go
@@ -15,14 +15,17 @@ import (
"github.com/grafana/loki/pkg/logqlmodel/stats"
"github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"
"github.com/grafana/loki/pkg/storage/chunk/cache"
+ "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"
"github.com/grafana/loki/pkg/util"
"github.com/grafana/loki/pkg/util/constants"
)
func TestIndexStatsCache(t *testing.T) {
cfg := queryrangebase.ResultsCacheConfig{
- CacheConfig: cache.Config{
- Cache: cache.NewMockCache(),
+ Config: resultscache.Config{
+ CacheConfig: cache.Config{
+ Cache: cache.NewMockCache(),
+ },
},
}
c, err := cache.New(cfg.CacheConfig, nil, log.NewNopLogger(), stats.ResultCache, constants.Loki)
@@ -158,8 +161,10 @@ func TestIndexStatsCache_RecentData(t *testing.T) {
} {
t.Run(tc.name, func(t *testing.T) {
cfg := queryrangebase.ResultsCacheConfig{
- CacheConfig: cache.Config{
- Cache: cache.NewMockCache(),
+ Config: resultscache.Config{
+ CacheConfig: cache.Config{
+ Cache: cache.NewMockCache(),
+ },
},
}
c, err := cache.New(cfg.CacheConfig, nil, log.NewNopLogger(), stats.ResultCache, constants.Loki)
diff --git a/pkg/querier/queryrange/instrument.go b/pkg/querier/queryrange/instrument.go
index 8c32fad4ca304..497cfb2dd8a1a 100644
--- a/pkg/querier/queryrange/instrument.go
+++ b/pkg/querier/queryrange/instrument.go
@@ -8,6 +8,7 @@ import (
"github.com/grafana/dskit/httpgrpc"
"github.com/grafana/dskit/instrument"
"github.com/grafana/dskit/middleware"
+ "github.com/opentracing/opentracing-go"
"github.com/grafana/dskit/server"
@@ -52,3 +53,18 @@ func (i Instrument) observe(ctx context.Context, route string, err error, durati
}
instrument.ObserveWithExemplar(ctx, i.RequestDuration.WithLabelValues(method, route, respStatus, "false"), duration.Seconds())
}
+
+type Tracer struct{}
+
+var _ queryrangebase.Middleware = Tracer{}
+
+// Wrap implements the queryrangebase.Middleware
+func (t Tracer) Wrap(next queryrangebase.Handler) queryrangebase.Handler {
+ return queryrangebase.HandlerFunc(func(ctx context.Context, r queryrangebase.Request) (queryrangebase.Response, error) {
+ route := DefaultCodec.Path(r)
+ route = middleware.MakeLabelValue(route)
+ span, ctx := opentracing.StartSpanFromContext(ctx, route)
+ defer span.Finish()
+ return next.Do(ctx, r)
+ })
+}
diff --git a/pkg/querier/queryrange/limits.go b/pkg/querier/queryrange/limits.go
index b6f5c4d51fb33..673c995a600b9 100644
--- a/pkg/querier/queryrange/limits.go
+++ b/pkg/querier/queryrange/limits.go
@@ -14,7 +14,6 @@ import (
"github.com/go-kit/log/level"
"github.com/grafana/dskit/httpgrpc"
"github.com/grafana/dskit/tenant"
-
"github.com/opentracing/opentracing-go"
otlog "github.com/opentracing/opentracing-go/log"
"github.com/pkg/errors"
@@ -28,6 +27,7 @@ import (
"github.com/grafana/loki/pkg/logql/syntax"
queryrange_limits "github.com/grafana/loki/pkg/querier/queryrange/limits"
"github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"
+ "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"
"github.com/grafana/loki/pkg/storage/config"
"github.com/grafana/loki/pkg/storage/stores/index/stats"
util_log "github.com/grafana/loki/pkg/util/log"
@@ -104,7 +104,7 @@ type cacheKeyLimits struct {
transformer UserIDTransformer
}
-func (l cacheKeyLimits) GenerateCacheKey(ctx context.Context, userID string, r queryrangebase.Request) string {
+func (l cacheKeyLimits) GenerateCacheKey(ctx context.Context, userID string, r resultscache.Request) string {
split := l.QuerySplitDuration(userID)
var currentInterval int64
@@ -304,7 +304,7 @@ func (q *querySizeLimiter) getBytesReadForRequest(ctx context.Context, r queryra
}
func (q *querySizeLimiter) getSchemaCfg(r queryrangebase.Request) (config.PeriodConfig, error) {
- maxRVDuration, maxOffset, err := maxRangeVectorAndOffsetDuration(r.GetQuery())
+ maxRVDuration, maxOffset, err := maxRangeVectorAndOffsetDurationFromQueryString(r.GetQuery())
if err != nil {
return config.PeriodConfig{}, errors.New("failed to get range-vector and offset duration: " + err.Error())
}
diff --git a/pkg/querier/queryrange/limits_test.go b/pkg/querier/queryrange/limits_test.go
index b4bff9f96d08d..efc9b030f7f84 100644
--- a/pkg/querier/queryrange/limits_test.go
+++ b/pkg/querier/queryrange/limits_test.go
@@ -17,7 +17,9 @@ import (
"gopkg.in/yaml.v2"
"github.com/grafana/loki/pkg/logproto"
+ "github.com/grafana/loki/pkg/logql/syntax"
"github.com/grafana/loki/pkg/logqlmodel"
+ "github.com/grafana/loki/pkg/querier/plan"
base "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"
"github.com/grafana/loki/pkg/storage/config"
"github.com/grafana/loki/pkg/util/constants"
@@ -72,6 +74,9 @@ func Test_seriesLimiter(t *testing.T) {
EndTs: testTime,
Direction: logproto.FORWARD,
Path: "/query_range",
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`rate({app="foo"} |= "foo"[1m])`),
+ },
}
ctx := user.InjectOrgID(context.Background(), "1")
@@ -241,6 +246,9 @@ func Test_MaxQueryLookBack(t *testing.T) {
EndTs: testTime,
Direction: logproto.FORWARD,
Path: "/loki/api/v1/query_range",
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`{app="foo"} |= "foo"`),
+ },
}
ctx := user.InjectOrgID(context.Background(), "1")
@@ -589,6 +597,9 @@ func Test_MaxQuerySize(t *testing.T) {
EndTs: tc.queryEnd,
Direction: logproto.FORWARD,
Path: "/query_range",
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(tc.query),
+ },
}
ctx := user.InjectOrgID(context.Background(), "foo")
diff --git a/pkg/querier/queryrange/log_result_cache.go b/pkg/querier/queryrange/log_result_cache.go
index ee29e385e0d28..c15568d9075ac 100644
--- a/pkg/querier/queryrange/log_result_cache.go
+++ b/pkg/querier/queryrange/log_result_cache.go
@@ -10,14 +10,13 @@ import (
"github.com/go-kit/log/level"
"github.com/gogo/protobuf/proto"
"github.com/grafana/dskit/httpgrpc"
+ "github.com/grafana/dskit/tenant"
"github.com/opentracing/opentracing-go"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/common/model"
"golang.org/x/sync/errgroup"
- "github.com/grafana/dskit/tenant"
-
"github.com/grafana/loki/pkg/loghttp"
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logqlmodel/stats"
@@ -201,7 +200,7 @@ func (l *logResultCache) handleHit(ctx context.Context, cacheKey string, cachedR
// if the response is empty and the query is larger than what is cached, update the cache
if isEmpty(result) && (lokiReq.EndTs.UnixNano()-lokiReq.StartTs.UnixNano() > cachedRequest.EndTs.UnixNano()-cachedRequest.StartTs.UnixNano()) {
- cachedRequest = cachedRequest.WithStartEndTime(lokiReq.GetStartTs(), lokiReq.GetEndTs())
+ cachedRequest = cachedRequest.WithStartEnd(lokiReq.GetStartTs(), lokiReq.GetEndTs()).(*LokiRequest)
updateCache = true
}
} else {
@@ -216,7 +215,7 @@ func (l *logResultCache) handleHit(ctx context.Context, cacheKey string, cachedR
// if we're missing data at the start, start fetching from the start to the cached start.
if lokiReq.GetStartTs().Before(cachedRequest.GetStartTs()) {
g.Go(func() error {
- startRequest = lokiReq.WithStartEndTime(lokiReq.GetStartTs(), cachedRequest.GetStartTs())
+ startRequest = lokiReq.WithStartEnd(lokiReq.GetStartTs(), cachedRequest.GetStartTs()).(*LokiRequest)
resp, err := l.next.Do(ctx, startRequest)
if err != nil {
return err
@@ -233,7 +232,7 @@ func (l *logResultCache) handleHit(ctx context.Context, cacheKey string, cachedR
// if we're missing data at the end, start fetching from the cached end to the end.
if lokiReq.GetEndTs().After(cachedRequest.GetEndTs()) {
g.Go(func() error {
- endRequest = lokiReq.WithStartEndTime(cachedRequest.GetEndTs(), lokiReq.GetEndTs())
+ endRequest = lokiReq.WithStartEnd(cachedRequest.GetEndTs(), lokiReq.GetEndTs()).(*LokiRequest)
resp, err := l.next.Do(ctx, endRequest)
if err != nil {
return err
@@ -255,7 +254,7 @@ func (l *logResultCache) handleHit(ctx context.Context, cacheKey string, cachedR
// If it's not empty only merge the response.
if startResp != nil {
if isEmpty(startResp) {
- cachedRequest = cachedRequest.WithStartEndTime(startRequest.GetStartTs(), cachedRequest.GetEndTs())
+ cachedRequest = cachedRequest.WithStartEnd(startRequest.GetStartTs(), cachedRequest.GetEndTs()).(*LokiRequest)
updateCache = true
} else {
if startResp.Status != loghttp.QueryStatusSuccess {
@@ -269,7 +268,7 @@ func (l *logResultCache) handleHit(ctx context.Context, cacheKey string, cachedR
// If it's not empty only merge the response.
if endResp != nil {
if isEmpty(endResp) {
- cachedRequest = cachedRequest.WithStartEndTime(cachedRequest.GetStartTs(), endRequest.GetEndTs())
+ cachedRequest = cachedRequest.WithStartEnd(cachedRequest.GetStartTs(), endRequest.GetEndTs()).(*LokiRequest)
updateCache = true
} else {
if endResp.Status != loghttp.QueryStatusSuccess {
diff --git a/pkg/querier/queryrange/marshal.go b/pkg/querier/queryrange/marshal.go
index 8b61facf39e4e..6f6e997865364 100644
--- a/pkg/querier/queryrange/marshal.go
+++ b/pkg/querier/queryrange/marshal.go
@@ -6,10 +6,12 @@ import (
"context"
"fmt"
"io"
+ "net/http"
"time"
"github.com/gogo/googleapis/google/rpc"
"github.com/gogo/status"
+ "github.com/grafana/dskit/httpgrpc"
"github.com/grafana/dskit/user"
"github.com/opentracing/opentracing-go"
"github.com/prometheus/prometheus/promql"
@@ -19,10 +21,13 @@ import (
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logql"
"github.com/grafana/loki/pkg/logql/sketch"
+ "github.com/grafana/loki/pkg/logql/syntax"
"github.com/grafana/loki/pkg/logqlmodel"
+ "github.com/grafana/loki/pkg/querier/plan"
"github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"
"github.com/grafana/loki/pkg/util/httpreq"
"github.com/grafana/loki/pkg/util/querylimits"
+ "github.com/grafana/loki/pkg/util/server"
)
const (
@@ -115,7 +120,7 @@ func ResultToResponse(result logqlmodel.Result, params logql.Params) (queryrange
case sketch.TopKMatrix:
sk, err := data.ToProto()
return &TopKSketchesResponse{Response: sk}, err
- case sketch.QuantileSketchMatrix:
+ case logql.ProbabilisticQuantileMatrix:
return &QuantileSketchResponse{Response: data.ToProto()}, nil
}
@@ -168,7 +173,7 @@ func ResponseToResult(resp queryrangebase.Response) (logqlmodel.Result, error) {
Headers: resp.GetHeaders(),
}, nil
case *QuantileSketchResponse:
- matrix, err := sketch.QuantileSketchMatrixFromProto(r.Response)
+ matrix, err := logql.ProbabilisticQuantileMatrixFromProto(r.Response)
if err != nil {
return logqlmodel.Result{}, fmt.Errorf("cannot decode quantile sketch: %w", err)
}
@@ -230,6 +235,8 @@ func QueryResponseWrap(res queryrangebase.Response) (*QueryResponse, error) {
p.Response = &QueryResponse_Labels{response}
case *IndexStatsResponse:
p.Response = &QueryResponse_Stats{response}
+ case *VolumeResponse:
+ p.Response = &QueryResponse_Volume{response}
case *TopKSketchesResponse:
p.Response = &QueryResponse_TopkSketches{response}
case *QuantileSketchResponse:
@@ -241,6 +248,13 @@ func QueryResponseWrap(res queryrangebase.Response) (*QueryResponse, error) {
return p, nil
}
+// QueryResponseWrapError wraps an error in the QueryResponse protobuf.
+func QueryResponseWrapError(err error) *QueryResponse {
+ return &QueryResponse{
+ Status: server.WrapError(err),
+ }
+}
+
func (Codec) QueryRequestUnwrap(ctx context.Context, req *QueryRequest) (queryrangebase.Request, context.Context, error) {
if req == nil {
return nil, ctx, nil
@@ -277,12 +291,32 @@ func (Codec) QueryRequestUnwrap(ctx context.Context, req *QueryRequest) (queryra
case *QueryRequest_Series:
return concrete.Series, ctx, nil
case *QueryRequest_Instant:
+ if concrete.Instant.Plan == nil {
+ parsed, err := syntax.ParseExpr(concrete.Instant.GetQuery())
+ if err != nil {
+ return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, err.Error())
+ }
+ concrete.Instant.Plan = &plan.QueryPlan{
+ AST: parsed,
+ }
+ }
+
return concrete.Instant, ctx, nil
case *QueryRequest_Stats:
return concrete.Stats, ctx, nil
case *QueryRequest_Volume:
return concrete.Volume, ctx, nil
case *QueryRequest_Streams:
+ if concrete.Streams.Plan == nil {
+ parsed, err := syntax.ParseExpr(concrete.Streams.GetQuery())
+ if err != nil {
+ return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, err.Error())
+ }
+ concrete.Streams.Plan = &plan.QueryPlan{
+ AST: parsed,
+ }
+ }
+
return concrete.Streams, ctx, nil
case *QueryRequest_Labels:
return &LabelRequest{
diff --git a/pkg/querier/queryrange/marshal_test.go b/pkg/querier/queryrange/marshal_test.go
index 569a1af1a4b98..6fa9bbe23897c 100644
--- a/pkg/querier/queryrange/marshal_test.go
+++ b/pkg/querier/queryrange/marshal_test.go
@@ -7,7 +7,10 @@ import (
"github.com/stretchr/testify/require"
"github.com/grafana/loki/pkg/loghttp"
+ "github.com/grafana/loki/pkg/logproto"
+ "github.com/grafana/loki/pkg/logql"
"github.com/grafana/loki/pkg/logqlmodel"
+ "github.com/grafana/loki/pkg/logqlmodel/stats"
"github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"
)
@@ -32,6 +35,18 @@ func TestResultToResponse(t *testing.T) {
},
},
},
+ {
+ name: "empty probabilistic quantile matrix",
+ result: logqlmodel.Result{
+ Data: logql.ProbabilisticQuantileMatrix([]logql.ProbabilisticQuantileVector{}),
+ },
+ response: &QuantileSketchResponse{
+ Response: &logproto.QuantileSketchMatrix{
+ Values: []*logproto.QuantileSketchVector{},
+ },
+ Headers: []queryrangebase.PrometheusResponseHeader(nil),
+ },
+ },
}
for _, tt := range tests {
@@ -43,3 +58,57 @@ func TestResultToResponse(t *testing.T) {
})
}
}
+
+func TestResponseWrap(t *testing.T) {
+ for _, tt := range []struct {
+ name string
+ response queryrangebase.Response
+ expected isQueryResponse_Response
+ }{
+ {"volume", &VolumeResponse{}, &QueryResponse_Volume{}},
+ {"series", &LokiSeriesResponse{}, &QueryResponse_Series{}},
+ {"label", &LokiLabelNamesResponse{}, &QueryResponse_Labels{}},
+ {"stats", &IndexStatsResponse{}, &QueryResponse_Stats{}},
+ {"prom", &LokiPromResponse{}, &QueryResponse_Prom{}},
+ {"streams", &LokiResponse{}, &QueryResponse_Streams{}},
+ {"topk", &TopKSketchesResponse{}, &QueryResponse_TopkSketches{}},
+ {"quantile", &QuantileSketchResponse{}, &QueryResponse_QuantileSketches{}},
+ } {
+ t.Run(tt.name, func(t *testing.T) {
+ actual, err := QueryResponseWrap(tt.response)
+ require.NoError(t, err)
+ require.IsType(t, tt.expected, actual.Response)
+ })
+ }
+}
+
+// Benchmark_UnwrapSeries is the sibling Benchmark_CodecDecodeSeries.
+func Benchmark_UnwrapSeries(b *testing.B) {
+ // Setup
+ original := &LokiSeriesResponse{
+ Status: "200",
+ Version: 1,
+ Statistics: stats.Result{},
+ Data: generateSeries(),
+ }
+
+ wrappedResponse, err := QueryResponseWrap(original)
+ require.NoError(b, err)
+
+ body, err := wrappedResponse.Marshal()
+ require.NoError(b, err)
+
+ // Actual run
+ b.ResetTimer()
+ b.ReportAllocs()
+ for n := 0; n < b.N; n++ {
+ resp := &QueryResponse{}
+ err := resp.Unmarshal(body)
+ require.NoError(b, err)
+
+ actual, err := QueryResponseUnwrap(resp)
+ require.NoError(b, err)
+ require.NotNil(b, actual)
+ }
+
+}
diff --git a/pkg/querier/queryrange/prometheus.go b/pkg/querier/queryrange/prometheus.go
index 81a131c1c4771..2a8ff78c164e5 100644
--- a/pkg/querier/queryrange/prometheus.go
+++ b/pkg/querier/queryrange/prometheus.go
@@ -14,6 +14,7 @@ import (
"github.com/grafana/loki/pkg/loghttp"
"github.com/grafana/loki/pkg/logqlmodel/stats"
"github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"
+ "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"
)
var (
@@ -25,7 +26,7 @@ var (
type PrometheusExtractor struct{}
// Extract wraps the original prometheus cache extractor
-func (PrometheusExtractor) Extract(start, end int64, res queryrangebase.Response, resStart, resEnd int64) queryrangebase.Response {
+func (PrometheusExtractor) Extract(start, end int64, res resultscache.Response, resStart, resEnd int64) resultscache.Response {
response := extractor.Extract(start, end, res.(*LokiPromResponse).Response, resStart, resEnd)
return &LokiPromResponse{
Response: response.(*queryrangebase.PrometheusResponse),
diff --git a/pkg/querier/queryrange/queryrange.pb.go b/pkg/querier/queryrange/queryrange.pb.go
index f3955da153bf9..cbc541a044044 100644
--- a/pkg/querier/queryrange/queryrange.pb.go
+++ b/pkg/querier/queryrange/queryrange.pb.go
@@ -4,6 +4,7 @@
package queryrange
import (
+ bytes "bytes"
fmt "fmt"
rpc "github.com/gogo/googleapis/google/rpc"
_ "github.com/gogo/protobuf/gogoproto"
@@ -16,6 +17,7 @@ import (
stats "github.com/grafana/loki/pkg/logqlmodel/stats"
_ "github.com/grafana/loki/pkg/push"
github_com_grafana_loki_pkg_push "github.com/grafana/loki/pkg/push"
+ github_com_grafana_loki_pkg_querier_plan "github.com/grafana/loki/pkg/querier/plan"
queryrangebase "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"
_ "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase/definitions"
github_com_grafana_loki_pkg_querier_queryrange_queryrangebase_definitions "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase/definitions"
@@ -40,15 +42,16 @@ var _ = time.Kitchen
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type LokiRequest struct {
- Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"`
- Limit uint32 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"`
- Step int64 `protobuf:"varint,3,opt,name=step,proto3" json:"step,omitempty"`
- Interval int64 `protobuf:"varint,9,opt,name=interval,proto3" json:"interval,omitempty"`
- StartTs time.Time `protobuf:"bytes,4,opt,name=startTs,proto3,stdtime" json:"startTs"`
- EndTs time.Time `protobuf:"bytes,5,opt,name=endTs,proto3,stdtime" json:"endTs"`
- Direction logproto.Direction `protobuf:"varint,6,opt,name=direction,proto3,enum=logproto.Direction" json:"direction,omitempty"`
- Path string `protobuf:"bytes,7,opt,name=path,proto3" json:"path,omitempty"`
- Shards []string `protobuf:"bytes,8,rep,name=shards,proto3" json:"shards"`
+ Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"`
+ Limit uint32 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"`
+ Step int64 `protobuf:"varint,3,opt,name=step,proto3" json:"step,omitempty"`
+ Interval int64 `protobuf:"varint,9,opt,name=interval,proto3" json:"interval,omitempty"`
+ StartTs time.Time `protobuf:"bytes,4,opt,name=startTs,proto3,stdtime" json:"startTs"`
+ EndTs time.Time `protobuf:"bytes,5,opt,name=endTs,proto3,stdtime" json:"endTs"`
+ Direction logproto.Direction `protobuf:"varint,6,opt,name=direction,proto3,enum=logproto.Direction" json:"direction,omitempty"`
+ Path string `protobuf:"bytes,7,opt,name=path,proto3" json:"path,omitempty"`
+ Shards []string `protobuf:"bytes,8,rep,name=shards,proto3" json:"shards"`
+ Plan *github_com_grafana_loki_pkg_querier_plan.QueryPlan `protobuf:"bytes,10,opt,name=plan,proto3,customtype=github.com/grafana/loki/pkg/querier/plan.QueryPlan" json:"plan,omitempty"`
}
func (m *LokiRequest) Reset() { *m = LokiRequest{} }
@@ -147,12 +150,13 @@ func (m *LokiRequest) GetShards() []string {
}
type LokiInstantRequest struct {
- Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"`
- Limit uint32 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"`
- TimeTs time.Time `protobuf:"bytes,3,opt,name=timeTs,proto3,stdtime" json:"timeTs"`
- Direction logproto.Direction `protobuf:"varint,4,opt,name=direction,proto3,enum=logproto.Direction" json:"direction,omitempty"`
- Path string `protobuf:"bytes,5,opt,name=path,proto3" json:"path,omitempty"`
- Shards []string `protobuf:"bytes,6,rep,name=shards,proto3" json:"shards"`
+ Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"`
+ Limit uint32 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"`
+ TimeTs time.Time `protobuf:"bytes,3,opt,name=timeTs,proto3,stdtime" json:"timeTs"`
+ Direction logproto.Direction `protobuf:"varint,4,opt,name=direction,proto3,enum=logproto.Direction" json:"direction,omitempty"`
+ Path string `protobuf:"bytes,5,opt,name=path,proto3" json:"path,omitempty"`
+ Shards []string `protobuf:"bytes,6,rep,name=shards,proto3" json:"shards"`
+ Plan *github_com_grafana_loki_pkg_querier_plan.QueryPlan `protobuf:"bytes,7,opt,name=plan,proto3,customtype=github.com/grafana/loki/pkg/querier/plan.QueryPlan" json:"plan,omitempty"`
}
func (m *LokiInstantRequest) Reset() { *m = LokiInstantRequest{} }
@@ -229,6 +233,49 @@ func (m *LokiInstantRequest) GetShards() []string {
return nil
}
+type Plan struct {
+ Raw []byte `protobuf:"bytes,1,opt,name=raw,proto3" json:"raw,omitempty"`
+}
+
+func (m *Plan) Reset() { *m = Plan{} }
+func (*Plan) ProtoMessage() {}
+func (*Plan) Descriptor() ([]byte, []int) {
+ return fileDescriptor_51b9d53b40d11902, []int{2}
+}
+func (m *Plan) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Plan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Plan.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Plan) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Plan.Merge(m, src)
+}
+func (m *Plan) XXX_Size() int {
+ return m.Size()
+}
+func (m *Plan) XXX_DiscardUnknown() {
+ xxx_messageInfo_Plan.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Plan proto.InternalMessageInfo
+
+func (m *Plan) GetRaw() []byte {
+ if m != nil {
+ return m.Raw
+ }
+ return nil
+}
+
type LokiResponse struct {
Status string `protobuf:"bytes,1,opt,name=Status,proto3" json:"status"`
Data LokiData `protobuf:"bytes,2,opt,name=Data,proto3" json:"data,omitempty"`
@@ -244,7 +291,7 @@ type LokiResponse struct {
func (m *LokiResponse) Reset() { *m = LokiResponse{} }
func (*LokiResponse) ProtoMessage() {}
func (*LokiResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_51b9d53b40d11902, []int{2}
+ return fileDescriptor_51b9d53b40d11902, []int{3}
}
func (m *LokiResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -340,7 +387,7 @@ type LokiSeriesRequest struct {
func (m *LokiSeriesRequest) Reset() { *m = LokiSeriesRequest{} }
func (*LokiSeriesRequest) ProtoMessage() {}
func (*LokiSeriesRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_51b9d53b40d11902, []int{3}
+ return fileDescriptor_51b9d53b40d11902, []int{4}
}
func (m *LokiSeriesRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -415,7 +462,7 @@ type LokiSeriesResponse struct {
func (m *LokiSeriesResponse) Reset() { *m = LokiSeriesResponse{} }
func (*LokiSeriesResponse) ProtoMessage() {}
func (*LokiSeriesResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_51b9d53b40d11902, []int{4}
+ return fileDescriptor_51b9d53b40d11902, []int{5}
}
func (m *LokiSeriesResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -483,7 +530,7 @@ type LokiLabelNamesResponse struct {
func (m *LokiLabelNamesResponse) Reset() { *m = LokiLabelNamesResponse{} }
func (*LokiLabelNamesResponse) ProtoMessage() {}
func (*LokiLabelNamesResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_51b9d53b40d11902, []int{5}
+ return fileDescriptor_51b9d53b40d11902, []int{6}
}
func (m *LokiLabelNamesResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -548,7 +595,7 @@ type LokiData struct {
func (m *LokiData) Reset() { *m = LokiData{} }
func (*LokiData) ProtoMessage() {}
func (*LokiData) Descriptor() ([]byte, []int) {
- return fileDescriptor_51b9d53b40d11902, []int{6}
+ return fileDescriptor_51b9d53b40d11902, []int{7}
}
func (m *LokiData) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -593,7 +640,7 @@ type LokiPromResponse struct {
func (m *LokiPromResponse) Reset() { *m = LokiPromResponse{} }
func (*LokiPromResponse) ProtoMessage() {}
func (*LokiPromResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_51b9d53b40d11902, []int{7}
+ return fileDescriptor_51b9d53b40d11902, []int{8}
}
func (m *LokiPromResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -644,7 +691,7 @@ type IndexStatsResponse struct {
func (m *IndexStatsResponse) Reset() { *m = IndexStatsResponse{} }
func (*IndexStatsResponse) ProtoMessage() {}
func (*IndexStatsResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_51b9d53b40d11902, []int{8}
+ return fileDescriptor_51b9d53b40d11902, []int{9}
}
func (m *IndexStatsResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -681,7 +728,7 @@ type VolumeResponse struct {
func (m *VolumeResponse) Reset() { *m = VolumeResponse{} }
func (*VolumeResponse) ProtoMessage() {}
func (*VolumeResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_51b9d53b40d11902, []int{9}
+ return fileDescriptor_51b9d53b40d11902, []int{10}
}
func (m *VolumeResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -718,7 +765,7 @@ type TopKSketchesResponse struct {
func (m *TopKSketchesResponse) Reset() { *m = TopKSketchesResponse{} }
func (*TopKSketchesResponse) ProtoMessage() {}
func (*TopKSketchesResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_51b9d53b40d11902, []int{10}
+ return fileDescriptor_51b9d53b40d11902, []int{11}
}
func (m *TopKSketchesResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -755,7 +802,7 @@ type QuantileSketchResponse struct {
func (m *QuantileSketchResponse) Reset() { *m = QuantileSketchResponse{} }
func (*QuantileSketchResponse) ProtoMessage() {}
func (*QuantileSketchResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_51b9d53b40d11902, []int{11}
+ return fileDescriptor_51b9d53b40d11902, []int{12}
}
func (m *QuantileSketchResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -801,7 +848,7 @@ type QueryResponse struct {
func (m *QueryResponse) Reset() { *m = QueryResponse{} }
func (*QueryResponse) ProtoMessage() {}
func (*QueryResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_51b9d53b40d11902, []int{12}
+ return fileDescriptor_51b9d53b40d11902, []int{13}
}
func (m *QueryResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -970,7 +1017,7 @@ type QueryRequest struct {
func (m *QueryRequest) Reset() { *m = QueryRequest{} }
func (*QueryRequest) ProtoMessage() {}
func (*QueryRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_51b9d53b40d11902, []int{13}
+ return fileDescriptor_51b9d53b40d11902, []int{14}
}
func (m *QueryRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1103,6 +1150,7 @@ func (*QueryRequest) XXX_OneofWrappers() []interface{} {
func init() {
proto.RegisterType((*LokiRequest)(nil), "queryrange.LokiRequest")
proto.RegisterType((*LokiInstantRequest)(nil), "queryrange.LokiInstantRequest")
+ proto.RegisterType((*Plan)(nil), "queryrange.Plan")
proto.RegisterType((*LokiResponse)(nil), "queryrange.LokiResponse")
proto.RegisterType((*LokiSeriesRequest)(nil), "queryrange.LokiSeriesRequest")
proto.RegisterType((*LokiSeriesResponse)(nil), "queryrange.LokiSeriesResponse")
@@ -1123,99 +1171,102 @@ func init() {
}
var fileDescriptor_51b9d53b40d11902 = []byte{
- // 1458 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0xcd, 0x6f, 0x1b, 0x45,
- 0x1b, 0xf7, 0xfa, 0x33, 0x9e, 0x34, 0x79, 0xfb, 0x4e, 0xa2, 0x74, 0xdf, 0xb4, 0xef, 0xae, 0x65,
- 0x89, 0xd6, 0x20, 0x58, 0x53, 0xa7, 0xf4, 0x13, 0x10, 0x5d, 0xda, 0xca, 0x15, 0x2d, 0x6a, 0x37,
- 0x11, 0x07, 0x6e, 0x13, 0x7b, 0x62, 0x2f, 0xf6, 0x7a, 0x37, 0x3b, 0xe3, 0xa8, 0xb9, 0xf1, 0x07,
- 0x80, 0xd4, 0xbf, 0x02, 0x21, 0x51, 0x21, 0x71, 0xe5, 0xc8, 0xa9, 0xc7, 0x1c, 0xab, 0x48, 0x18,
- 0xea, 0x72, 0x80, 0x9c, 0xfa, 0x27, 0xa0, 0xf9, 0xd8, 0xf5, 0xac, 0xed, 0xb4, 0x76, 0xb9, 0xb4,
- 0x12, 0x97, 0x64, 0x3e, 0x9e, 0xdf, 0xec, 0x3c, 0xbf, 0xe7, 0xf7, 0x3c, 0x33, 0x63, 0x70, 0x2e,
- 0xe8, 0xb4, 0xaa, 0xbb, 0x7d, 0x1c, 0xba, 0x38, 0xe4, 0xff, 0xf7, 0x43, 0xd4, 0x6b, 0x61, 0xa5,
- 0x69, 0x05, 0xa1, 0x4f, 0x7d, 0x08, 0x46, 0x23, 0xeb, 0xb5, 0x96, 0x4b, 0xdb, 0xfd, 0x6d, 0xab,
- 0xe1, 0x7b, 0xd5, 0x96, 0xdf, 0xf2, 0xab, 0x2d, 0xdf, 0x6f, 0x75, 0x31, 0x0a, 0x5c, 0x22, 0x9b,
- 0xd5, 0x30, 0x68, 0x54, 0x09, 0x45, 0xb4, 0x4f, 0x04, 0x7e, 0x7d, 0x95, 0x19, 0xf2, 0x26, 0x87,
- 0xc8, 0x51, 0x53, 0x9a, 0xf3, 0xde, 0x76, 0x7f, 0xa7, 0x4a, 0x5d, 0x0f, 0x13, 0x8a, 0xbc, 0x40,
- 0x1a, 0x9c, 0x66, 0xfb, 0xeb, 0xfa, 0x2d, 0x81, 0x8c, 0x1a, 0x72, 0xf2, 0x7f, 0x89, 0x49, 0xd2,
- 0xc1, 0xb4, 0xd1, 0x96, 0x53, 0x25, 0x39, 0xb5, 0xdb, 0xf5, 0xfc, 0x26, 0xee, 0xf2, 0xbd, 0x10,
- 0xf1, 0x57, 0x5a, 0xac, 0x30, 0x8b, 0xa0, 0x4f, 0xda, 0xfc, 0x8f, 0x1c, 0xfc, 0xf4, 0xa5, 0x74,
- 0x6c, 0x23, 0x82, 0xab, 0x4d, 0xbc, 0xe3, 0xf6, 0x5c, 0xea, 0xfa, 0x3d, 0xa2, 0xb6, 0xe5, 0x22,
- 0x17, 0x67, 0x5b, 0x64, 0x9c, 0xe2, 0xf2, 0x41, 0x1a, 0x2c, 0xde, 0xf1, 0x3b, 0xae, 0x83, 0x77,
- 0xfb, 0x98, 0x50, 0xb8, 0x0a, 0x72, 0xdc, 0x46, 0xd7, 0x4a, 0x5a, 0xa5, 0xe8, 0x88, 0x0e, 0x1b,
- 0xed, 0xba, 0x9e, 0x4b, 0xf5, 0x74, 0x49, 0xab, 0x2c, 0x39, 0xa2, 0x03, 0x21, 0xc8, 0x12, 0x8a,
- 0x03, 0x3d, 0x53, 0xd2, 0x2a, 0x19, 0x87, 0xb7, 0xe1, 0x3a, 0x58, 0x70, 0x7b, 0x14, 0x87, 0x7b,
- 0xa8, 0xab, 0x17, 0xf9, 0x78, 0xdc, 0x87, 0x1f, 0x83, 0x02, 0xa1, 0x28, 0xa4, 0x5b, 0x44, 0xcf,
- 0x96, 0xb4, 0xca, 0x62, 0x6d, 0xdd, 0x12, 0xa1, 0xb0, 0xa2, 0x50, 0x58, 0x5b, 0x51, 0x28, 0xec,
- 0x85, 0xc7, 0x03, 0x33, 0xf5, 0xf0, 0x37, 0x53, 0x73, 0x22, 0x10, 0xbc, 0x0a, 0x72, 0xb8, 0xd7,
- 0xdc, 0x22, 0x7a, 0x6e, 0x0e, 0xb4, 0x80, 0xc0, 0xf3, 0xa0, 0xd8, 0x74, 0x43, 0xdc, 0x60, 0x9c,
- 0xe9, 0xf9, 0x92, 0x56, 0x59, 0xae, 0xad, 0x58, 0x71, 0x68, 0x6f, 0x44, 0x53, 0xce, 0xc8, 0x8a,
- 0xb9, 0x17, 0x20, 0xda, 0xd6, 0x0b, 0x9c, 0x09, 0xde, 0x86, 0x65, 0x90, 0x27, 0x6d, 0x14, 0x36,
- 0x89, 0xbe, 0x50, 0xca, 0x54, 0x8a, 0x36, 0x38, 0x1a, 0x98, 0x72, 0xc4, 0x91, 0xff, 0xcb, 0x7f,
- 0x69, 0x00, 0x32, 0x4a, 0x6f, 0xf7, 0x08, 0x45, 0x3d, 0xfa, 0x2a, 0xcc, 0x7e, 0x08, 0xf2, 0x4c,
- 0x94, 0x5b, 0x84, 0x73, 0x3b, 0xab, 0xab, 0x12, 0x93, 0xf4, 0x35, 0x3b, 0x97, 0xaf, 0xb9, 0xa9,
- 0xbe, 0xe6, 0x8f, 0xf5, 0xf5, 0x87, 0x2c, 0x38, 0x21, 0xe4, 0x43, 0x02, 0xbf, 0x47, 0x30, 0x03,
- 0x6d, 0xf2, 0x14, 0x14, 0x6e, 0x4a, 0x10, 0x1f, 0x71, 0xe4, 0x0c, 0xfc, 0x04, 0x64, 0x6f, 0x20,
- 0x8a, 0xb8, 0xcb, 0x8b, 0xb5, 0x55, 0x4b, 0x11, 0x25, 0x5b, 0x8b, 0xcd, 0xd9, 0x6b, 0xcc, 0xab,
- 0xa3, 0x81, 0xb9, 0xdc, 0x44, 0x14, 0xbd, 0xeb, 0x7b, 0x2e, 0xc5, 0x5e, 0x40, 0xf7, 0x1d, 0x8e,
- 0x84, 0x1f, 0x80, 0xe2, 0xcd, 0x30, 0xf4, 0xc3, 0xad, 0xfd, 0x00, 0x73, 0x8a, 0x8a, 0xf6, 0xa9,
- 0xa3, 0x81, 0xb9, 0x82, 0xa3, 0x41, 0x05, 0x31, 0xb2, 0x84, 0x6f, 0x83, 0x1c, 0xef, 0x70, 0x52,
- 0x8a, 0xf6, 0xca, 0xd1, 0xc0, 0xfc, 0x0f, 0x87, 0x28, 0xe6, 0xc2, 0x22, 0xc9, 0x61, 0x6e, 0x26,
- 0x0e, 0xe3, 0x50, 0xe6, 0xd5, 0x50, 0xea, 0xa0, 0xb0, 0x87, 0x43, 0xc2, 0x96, 0x29, 0xf0, 0xf1,
- 0xa8, 0x0b, 0xaf, 0x03, 0xc0, 0x88, 0x71, 0x09, 0x75, 0x1b, 0x4c, 0x4f, 0x8c, 0x8c, 0x25, 0x4b,
- 0x94, 0x0b, 0x07, 0x93, 0x7e, 0x97, 0xda, 0x50, 0xb2, 0xa0, 0x18, 0x3a, 0x4a, 0x1b, 0x3e, 0xd2,
- 0x40, 0xa1, 0x8e, 0x51, 0x13, 0x87, 0x44, 0x2f, 0x96, 0x32, 0x95, 0xc5, 0xda, 0x5b, 0x96, 0x5a,
- 0x1b, 0xee, 0x85, 0xbe, 0x87, 0x69, 0x1b, 0xf7, 0x49, 0x14, 0x20, 0x61, 0x6d, 0x77, 0x0e, 0x07,
- 0xe6, 0xb6, 0x5a, 0x51, 0x43, 0xb4, 0x83, 0x7a, 0xa8, 0xda, 0xf5, 0x3b, 0x6e, 0x75, 0xee, 0x7a,
- 0x74, 0xec, 0x77, 0x8e, 0x06, 0xa6, 0xf6, 0x9e, 0x13, 0x6d, 0xb1, 0xfc, 0xab, 0x06, 0xfe, 0xcb,
- 0x22, 0xbc, 0xc9, 0xd6, 0x26, 0x4a, 0x62, 0x78, 0x88, 0x36, 0xda, 0xba, 0xc6, 0x64, 0xe6, 0x88,
- 0x8e, 0x5a, 0x2c, 0xd2, 0xff, 0xa8, 0x58, 0x64, 0xe6, 0x2f, 0x16, 0x51, 0x36, 0x64, 0xa7, 0x66,
- 0x43, 0xee, 0xd8, 0x6c, 0xf8, 0x26, 0x23, 0x32, 0x3f, 0xf2, 0x6f, 0x8e, 0x9c, 0xb8, 0x15, 0xe7,
- 0x44, 0x86, 0xef, 0x36, 0x96, 0x9a, 0x58, 0xeb, 0x76, 0x13, 0xf7, 0xa8, 0xbb, 0xe3, 0xe2, 0xf0,
- 0x25, 0x99, 0xa1, 0xc8, 0x2d, 0x93, 0x94, 0x9b, 0xaa, 0x95, 0xec, 0x6b, 0xaf, 0x95, 0xb1, 0xec,
- 0xc8, 0xbd, 0x42, 0x76, 0x94, 0x9f, 0xa7, 0xc1, 0x1a, 0x0b, 0xc7, 0x1d, 0xb4, 0x8d, 0xbb, 0x9f,
- 0x23, 0x6f, 0xce, 0x90, 0x9c, 0x55, 0x42, 0x52, 0xb4, 0xe1, 0xbf, 0x94, 0xcf, 0x40, 0xf9, 0x77,
- 0x1a, 0x58, 0x88, 0x6a, 0x38, 0xb4, 0x00, 0x10, 0x30, 0x5e, 0xa6, 0x05, 0xd1, 0xcb, 0x0c, 0x1c,
- 0xc6, 0xa3, 0x8e, 0x62, 0x01, 0xbf, 0x02, 0x79, 0xd1, 0x93, 0x59, 0x70, 0x4a, 0xc9, 0x02, 0x1a,
- 0x62, 0xe4, 0x5d, 0x6f, 0xa2, 0x80, 0xe2, 0xd0, 0xbe, 0xc2, 0x76, 0x71, 0x38, 0x30, 0xcf, 0xbd,
- 0x88, 0x22, 0x7e, 0xc3, 0x12, 0x38, 0x16, 0x5c, 0xf1, 0x4d, 0x47, 0x7e, 0xa1, 0xfc, 0xad, 0x06,
- 0x4e, 0xb2, 0x8d, 0x32, 0x6a, 0x62, 0x55, 0xdc, 0x00, 0x0b, 0xa1, 0x6c, 0xf3, 0xed, 0x2e, 0xd6,
- 0xca, 0x56, 0x92, 0xd6, 0x29, 0x54, 0xda, 0xd9, 0xc7, 0x03, 0x53, 0x73, 0x62, 0x24, 0xdc, 0x48,
- 0xd0, 0x98, 0x9e, 0x46, 0x23, 0x83, 0xa4, 0x12, 0xc4, 0xfd, 0x9c, 0x06, 0xf0, 0x76, 0xaf, 0x89,
- 0x1f, 0x30, 0xf1, 0x8d, 0x74, 0xda, 0x9f, 0xd8, 0xd1, 0x99, 0x11, 0x29, 0x93, 0xf6, 0xf6, 0xb5,
- 0xc3, 0x81, 0x79, 0xe9, 0x45, 0xac, 0xbc, 0x00, 0xac, 0xb8, 0xa0, 0x0a, 0x37, 0xfd, 0xfa, 0x9f,
- 0x2b, 0x3f, 0xa6, 0xc1, 0xf2, 0x17, 0x7e, 0xb7, 0xef, 0xe1, 0x98, 0x38, 0x6f, 0x82, 0x38, 0x7d,
- 0x44, 0x5c, 0xd2, 0xd6, 0xbe, 0x74, 0x38, 0x30, 0x37, 0x66, 0x22, 0x2d, 0x09, 0x7c, 0x73, 0x09,
- 0x7b, 0x94, 0x06, 0xab, 0x5b, 0x7e, 0xf0, 0xd9, 0x26, 0x7f, 0xbe, 0x28, 0x75, 0x11, 0x4f, 0xd0,
- 0xb6, 0x3a, 0xa2, 0x8d, 0x21, 0xee, 0x22, 0x1a, 0xba, 0x0f, 0xec, 0x8d, 0xc3, 0x81, 0x59, 0x9d,
- 0x89, 0xb2, 0x11, 0xe8, 0xcd, 0xa5, 0xeb, 0x97, 0x34, 0x58, 0xbb, 0xdf, 0x47, 0x3d, 0xea, 0x76,
- 0xb1, 0xa0, 0x2c, 0x26, 0x6c, 0x7f, 0x82, 0x30, 0x63, 0x44, 0x58, 0x12, 0x23, 0xa9, 0xfb, 0xe8,
- 0x70, 0x60, 0x5e, 0x99, 0x89, 0xba, 0x69, 0xf0, 0x37, 0x97, 0xc4, 0x9f, 0xb2, 0x60, 0xe9, 0x3e,
- 0x5b, 0x25, 0xe6, 0xee, 0x1d, 0x20, 0x8f, 0x5c, 0xc9, 0x1c, 0x8c, 0xee, 0x68, 0x61, 0xd0, 0xb0,
- 0x36, 0xe5, 0x61, 0x2c, 0x2c, 0xe0, 0x65, 0x90, 0x27, 0xfc, 0x26, 0x24, 0x0b, 0xaa, 0x31, 0xfe,
- 0x6a, 0x48, 0xde, 0xb9, 0xea, 0x29, 0x47, 0xda, 0xb3, 0xb7, 0x54, 0x97, 0x5d, 0x00, 0xa2, 0x9b,
- 0x60, 0x79, 0x1c, 0x39, 0x79, 0x3d, 0x60, 0x68, 0x81, 0x81, 0x17, 0x41, 0x8e, 0x57, 0x6e, 0xf9,
- 0x62, 0x4d, 0x7c, 0x76, 0xb2, 0x84, 0xd6, 0x53, 0x8e, 0x30, 0x87, 0x35, 0x90, 0x0d, 0x42, 0xdf,
- 0x93, 0xa7, 0xe8, 0x99, 0xf1, 0x6f, 0xaa, 0xc7, 0x4e, 0x3d, 0xe5, 0x70, 0x5b, 0x78, 0x81, 0x5d,
- 0x79, 0xd9, 0x79, 0x45, 0xf8, 0x13, 0x82, 0x95, 0xac, 0x31, 0x98, 0x02, 0x89, 0x4c, 0xe1, 0x05,
- 0x90, 0xdf, 0xe3, 0x65, 0x89, 0xbf, 0x2f, 0xd8, 0xdd, 0x51, 0x01, 0x25, 0x0b, 0x16, 0xf3, 0x4b,
- 0xd8, 0xc2, 0x5b, 0xe0, 0x04, 0xf5, 0x83, 0x4e, 0x54, 0x00, 0xe4, 0xf3, 0xa3, 0xa4, 0x62, 0xa7,
- 0x15, 0x88, 0x7a, 0xca, 0x49, 0xe0, 0xe0, 0x3d, 0x70, 0x72, 0x37, 0x21, 0x53, 0x4c, 0xf8, 0xbb,
- 0x7f, 0x8c, 0xe7, 0xe9, 0xd9, 0x53, 0x4f, 0x39, 0x13, 0x68, 0x1b, 0x8c, 0x32, 0xaa, 0xfc, 0x47,
- 0x06, 0x9c, 0x90, 0x9a, 0x11, 0x6f, 0x85, 0x4b, 0xb1, 0x0c, 0x84, 0x64, 0xfe, 0x7f, 0x9c, 0x0c,
- 0xb8, 0xb9, 0xa2, 0x82, 0xf7, 0x63, 0x15, 0x08, 0xfd, 0xac, 0x8d, 0xb2, 0x94, 0xc7, 0x5f, 0x41,
- 0xc8, 0xc8, 0x6f, 0x44, 0x91, 0x17, 0xb2, 0x39, 0x3d, 0xfd, 0xdc, 0x8d, 0x50, 0x32, 0xec, 0x57,
- 0x41, 0xc1, 0x15, 0xcf, 0xfe, 0x69, 0x82, 0x99, 0xfc, 0x55, 0x80, 0x05, 0x52, 0x02, 0xe0, 0xc6,
- 0x28, 0xfc, 0x42, 0x35, 0xa7, 0x26, 0xc3, 0x1f, 0x83, 0xa2, 0xe8, 0x9f, 0x8f, 0xa3, 0x9f, 0x97,
- 0x98, 0x89, 0xc3, 0x2a, 0x76, 0x4c, 0x86, 0xbe, 0x0e, 0x16, 0x3c, 0x4c, 0x11, 0xbb, 0xcb, 0xea,
- 0x05, 0x5e, 0x37, 0xce, 0x26, 0x43, 0x35, 0xe2, 0xdb, 0xba, 0x2b, 0x0d, 0x6f, 0xf6, 0x68, 0xb8,
- 0x2f, 0xaf, 0x2d, 0x31, 0x7a, 0xfd, 0x1a, 0x58, 0x4a, 0x18, 0xc0, 0x93, 0x20, 0xd3, 0xc1, 0xd1,
- 0x2f, 0x1c, 0xac, 0xc9, 0x1e, 0x77, 0x7b, 0xa8, 0xdb, 0xc7, 0x9c, 0xf6, 0xa2, 0x23, 0x3a, 0x57,
- 0xd3, 0x97, 0x35, 0xbb, 0x08, 0x0a, 0xa1, 0xf8, 0x8a, 0xdd, 0x3c, 0x78, 0x6a, 0xa4, 0x9e, 0x3c,
- 0x35, 0x52, 0xcf, 0x9f, 0x1a, 0xda, 0xd7, 0x43, 0x43, 0xfb, 0x7e, 0x68, 0x68, 0x8f, 0x87, 0x86,
- 0x76, 0x30, 0x34, 0xb4, 0xdf, 0x87, 0x86, 0xf6, 0xe7, 0xd0, 0x48, 0x3d, 0x1f, 0x1a, 0xda, 0xc3,
- 0x67, 0x46, 0xea, 0xe0, 0x99, 0x91, 0x7a, 0xf2, 0xcc, 0x48, 0x7d, 0x69, 0xcd, 0x57, 0xc2, 0xb6,
- 0xf3, 0x9c, 0x96, 0x8d, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x66, 0x27, 0xc9, 0x7f, 0x7f, 0x14,
- 0x00, 0x00,
+ // 1514 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x5b, 0x6f, 0x1b, 0x45,
+ 0x1b, 0xf6, 0xfa, 0x18, 0x4f, 0x0e, 0x5f, 0xbe, 0x49, 0x94, 0xee, 0x97, 0xf6, 0xdb, 0xb5, 0x2c,
+ 0xd1, 0x06, 0x04, 0x6b, 0x9a, 0x94, 0x1e, 0x01, 0xd1, 0xa5, 0xad, 0x52, 0xd1, 0xa2, 0x76, 0x13,
+ 0x71, 0x81, 0xb8, 0x99, 0xd8, 0x13, 0x7b, 0xf1, 0x9e, 0xb2, 0x33, 0x0e, 0xcd, 0x1d, 0x3f, 0x00,
+ 0xa4, 0xfe, 0x0a, 0x84, 0x44, 0x55, 0x89, 0x5b, 0x2e, 0xb9, 0xa1, 0x97, 0xbd, 0xac, 0x2c, 0xb1,
+ 0x50, 0x97, 0x0b, 0x94, 0xab, 0xfe, 0x04, 0x34, 0x87, 0x5d, 0xef, 0xda, 0x6e, 0xeb, 0x14, 0x21,
+ 0xb5, 0x12, 0x37, 0xf6, 0x1c, 0xde, 0x67, 0xf6, 0xdd, 0xe7, 0x79, 0xdf, 0x77, 0x66, 0x16, 0x9c,
+ 0x0a, 0xba, 0xed, 0xc6, 0x5e, 0x0f, 0x87, 0x36, 0x0e, 0xf9, 0xff, 0x41, 0x88, 0xbc, 0x36, 0x4e,
+ 0x35, 0x8d, 0x20, 0xf4, 0xa9, 0x0f, 0xc1, 0x70, 0x64, 0x75, 0xbd, 0x6d, 0xd3, 0x4e, 0x6f, 0xc7,
+ 0x68, 0xfa, 0x6e, 0xa3, 0xed, 0xb7, 0xfd, 0x46, 0xdb, 0xf7, 0xdb, 0x0e, 0x46, 0x81, 0x4d, 0x64,
+ 0xb3, 0x11, 0x06, 0xcd, 0x06, 0xa1, 0x88, 0xf6, 0x88, 0xc0, 0xaf, 0x2e, 0x33, 0x43, 0xde, 0xe4,
+ 0x10, 0x39, 0xaa, 0x4b, 0x73, 0xde, 0xdb, 0xe9, 0xed, 0x36, 0xa8, 0xed, 0x62, 0x42, 0x91, 0x1b,
+ 0x48, 0x83, 0xe3, 0xcc, 0x3f, 0xc7, 0x6f, 0x0b, 0x64, 0xdc, 0x90, 0x93, 0xff, 0xcb, 0x4c, 0x92,
+ 0x2e, 0xa6, 0xcd, 0x8e, 0x9c, 0xaa, 0xc9, 0xa9, 0x3d, 0xc7, 0xf5, 0x5b, 0xd8, 0xe1, 0xbe, 0x10,
+ 0xf1, 0x2b, 0x2d, 0x96, 0x98, 0x45, 0xd0, 0x23, 0x1d, 0xfe, 0x23, 0x07, 0x3f, 0x7e, 0x21, 0x1d,
+ 0x3b, 0x88, 0xe0, 0x46, 0x0b, 0xef, 0xda, 0x9e, 0x4d, 0x6d, 0xdf, 0x23, 0xe9, 0xb6, 0x5c, 0xe4,
+ 0xec, 0x74, 0x8b, 0x8c, 0x52, 0x5c, 0xbf, 0x5f, 0x00, 0xb3, 0x37, 0xfc, 0xae, 0x6d, 0xe1, 0xbd,
+ 0x1e, 0x26, 0x14, 0x2e, 0x83, 0x12, 0xb7, 0x51, 0x95, 0x9a, 0xb2, 0x56, 0xb5, 0x44, 0x87, 0x8d,
+ 0x3a, 0xb6, 0x6b, 0x53, 0x35, 0x5f, 0x53, 0xd6, 0xe6, 0x2d, 0xd1, 0x81, 0x10, 0x14, 0x09, 0xc5,
+ 0x81, 0x5a, 0xa8, 0x29, 0x6b, 0x05, 0x8b, 0xb7, 0xe1, 0x2a, 0x98, 0xb1, 0x3d, 0x8a, 0xc3, 0x7d,
+ 0xe4, 0xa8, 0x55, 0x3e, 0x9e, 0xf4, 0xe1, 0x87, 0xa0, 0x42, 0x28, 0x0a, 0xe9, 0x36, 0x51, 0x8b,
+ 0x35, 0x65, 0x6d, 0x76, 0x7d, 0xd5, 0x10, 0x52, 0x18, 0xb1, 0x14, 0xc6, 0x76, 0x2c, 0x85, 0x39,
+ 0xf3, 0x20, 0xd2, 0x73, 0x77, 0x7f, 0xd3, 0x15, 0x2b, 0x06, 0xc1, 0x8b, 0xa0, 0x84, 0xbd, 0xd6,
+ 0x36, 0x51, 0x4b, 0x47, 0x40, 0x0b, 0x08, 0x3c, 0x0d, 0xaa, 0x2d, 0x3b, 0xc4, 0x4d, 0xc6, 0x99,
+ 0x5a, 0xae, 0x29, 0x6b, 0x0b, 0xeb, 0x4b, 0x46, 0x22, 0xed, 0x95, 0x78, 0xca, 0x1a, 0x5a, 0xb1,
+ 0xd7, 0x0b, 0x10, 0xed, 0xa8, 0x15, 0xce, 0x04, 0x6f, 0xc3, 0x3a, 0x28, 0x93, 0x0e, 0x0a, 0x5b,
+ 0x44, 0x9d, 0xa9, 0x15, 0xd6, 0xaa, 0x26, 0x38, 0x8c, 0x74, 0x39, 0x62, 0xc9, 0x7f, 0xf8, 0x05,
+ 0x28, 0x06, 0x0e, 0xf2, 0x54, 0xc0, 0xbd, 0x5c, 0x34, 0x52, 0x9c, 0xdf, 0x72, 0x90, 0x67, 0x9e,
+ 0xed, 0x47, 0x7a, 0x26, 0x9a, 0x43, 0xb4, 0x8b, 0x3c, 0xd4, 0x70, 0xfc, 0xae, 0xdd, 0x48, 0xcb,
+ 0xc8, 0x56, 0x31, 0x6e, 0x33, 0x34, 0xc3, 0x59, 0x7c, 0xd5, 0xfa, 0x2f, 0x79, 0x00, 0x99, 0x60,
+ 0xd7, 0x3d, 0x42, 0x91, 0x47, 0x5f, 0x46, 0xb7, 0xf7, 0x41, 0x99, 0x85, 0xfc, 0x36, 0xe1, 0xca,
+ 0x4d, 0x4b, 0xa4, 0xc4, 0x64, 0x99, 0x2c, 0x1e, 0x89, 0xc9, 0xd2, 0x44, 0x26, 0xcb, 0x2f, 0x64,
+ 0xb2, 0xf2, 0x8f, 0x30, 0xa9, 0x82, 0x22, 0xeb, 0xc1, 0x45, 0x50, 0x08, 0xd1, 0x57, 0x9c, 0xb8,
+ 0x39, 0x8b, 0x35, 0xeb, 0x3f, 0x14, 0xc1, 0x9c, 0x48, 0x0a, 0x12, 0xf8, 0x1e, 0xc1, 0xcc, 0xd9,
+ 0x2d, 0x5e, 0x58, 0x04, 0xbd, 0xd2, 0x59, 0x3e, 0x62, 0xc9, 0x19, 0xf8, 0x11, 0x28, 0x5e, 0x41,
+ 0x14, 0x71, 0xaa, 0x67, 0xd7, 0x97, 0xd3, 0xce, 0xb2, 0xb5, 0xd8, 0x9c, 0xb9, 0xc2, 0xd8, 0x3c,
+ 0x8c, 0xf4, 0x85, 0x16, 0xa2, 0xe8, 0x6d, 0xdf, 0xb5, 0x29, 0x76, 0x03, 0x7a, 0x60, 0x71, 0x24,
+ 0x7c, 0x0f, 0x54, 0xaf, 0x86, 0xa1, 0x1f, 0x6e, 0x1f, 0x04, 0x98, 0x4b, 0x53, 0x35, 0x8f, 0x1d,
+ 0x46, 0xfa, 0x12, 0x8e, 0x07, 0x53, 0x88, 0xa1, 0x25, 0x7c, 0x13, 0x94, 0x78, 0x87, 0x8b, 0x51,
+ 0x35, 0x97, 0x0e, 0x23, 0xfd, 0x3f, 0x1c, 0x92, 0x32, 0x17, 0x16, 0x59, 0xed, 0x4a, 0x53, 0x69,
+ 0x97, 0x84, 0x50, 0x39, 0x1d, 0x42, 0x2a, 0xa8, 0xec, 0xe3, 0x90, 0xb0, 0x65, 0x2a, 0x7c, 0x3c,
+ 0xee, 0xc2, 0xcb, 0x00, 0x30, 0x62, 0x6c, 0x42, 0xed, 0x26, 0xcb, 0x12, 0x46, 0xc6, 0xbc, 0x21,
+ 0x8a, 0xa0, 0x85, 0x49, 0xcf, 0xa1, 0x26, 0x94, 0x2c, 0xa4, 0x0c, 0xad, 0x54, 0x1b, 0xde, 0x53,
+ 0x40, 0x65, 0x13, 0xa3, 0x16, 0x0e, 0x89, 0x5a, 0xad, 0x15, 0xd6, 0x66, 0xd7, 0xdf, 0x30, 0xd2,
+ 0x15, 0xef, 0x56, 0xe8, 0xbb, 0x98, 0x76, 0x70, 0x8f, 0xc4, 0x02, 0x09, 0x6b, 0xb3, 0xdb, 0x8f,
+ 0xf4, 0x9d, 0x69, 0xe2, 0x61, 0xaa, 0x2a, 0xfb, 0xcc, 0xe7, 0x1c, 0x46, 0xba, 0xf2, 0x8e, 0x15,
+ 0xbb, 0x58, 0xff, 0x55, 0x01, 0xff, 0x65, 0x0a, 0x6f, 0xb1, 0xb5, 0x49, 0x2a, 0x21, 0x5d, 0x44,
+ 0x9b, 0x1d, 0x55, 0x61, 0xe1, 0x6d, 0x89, 0x4e, 0xba, 0x04, 0xe6, 0xff, 0x56, 0x09, 0x2c, 0x1c,
+ 0xbd, 0x04, 0xc6, 0x59, 0x58, 0x9c, 0x98, 0x85, 0xa5, 0x67, 0x65, 0x61, 0xfd, 0x9b, 0x82, 0xa8,
+ 0x38, 0xf1, 0xfb, 0x1d, 0x21, 0x27, 0xae, 0x25, 0x39, 0x51, 0xe0, 0xde, 0x26, 0xa1, 0x26, 0xd6,
+ 0xba, 0xde, 0xc2, 0x1e, 0xb5, 0x77, 0x6d, 0x1c, 0xbe, 0x20, 0x33, 0x52, 0xe1, 0x56, 0xc8, 0x86,
+ 0x5b, 0x3a, 0x56, 0x8a, 0xaf, 0x7c, 0xac, 0x8c, 0x64, 0x47, 0xe9, 0x25, 0xb2, 0xa3, 0xfe, 0x34,
+ 0x0f, 0x56, 0x98, 0x1c, 0x37, 0xd0, 0x0e, 0x76, 0x3e, 0x45, 0xee, 0x11, 0x25, 0x39, 0x99, 0x92,
+ 0xa4, 0x6a, 0xc2, 0x7f, 0x29, 0x9f, 0x82, 0xf2, 0xef, 0x14, 0x30, 0x13, 0xd7, 0x70, 0x68, 0x00,
+ 0x20, 0x60, 0xbc, 0x4c, 0x0b, 0xa2, 0x17, 0x18, 0x38, 0x4c, 0x46, 0xad, 0x94, 0x05, 0xfc, 0x12,
+ 0x94, 0x45, 0x4f, 0x66, 0xc1, 0xb1, 0x54, 0x16, 0xd0, 0x10, 0x23, 0xf7, 0x72, 0x0b, 0x05, 0x14,
+ 0x87, 0xe6, 0x05, 0xe6, 0x45, 0x3f, 0xd2, 0x4f, 0x3d, 0x8f, 0x22, 0x7e, 0x6e, 0x14, 0x38, 0x26,
+ 0xae, 0x78, 0xa6, 0x25, 0x9f, 0x50, 0xff, 0x56, 0x01, 0x8b, 0xcc, 0x51, 0x46, 0x4d, 0x12, 0x15,
+ 0x57, 0xc0, 0x4c, 0x28, 0xdb, 0xdc, 0xdd, 0xd9, 0xf5, 0xba, 0x91, 0xa5, 0x75, 0x02, 0x95, 0x66,
+ 0xf1, 0x41, 0xa4, 0x2b, 0x56, 0x82, 0x84, 0x1b, 0x19, 0x1a, 0xf3, 0x93, 0x68, 0x64, 0x90, 0x5c,
+ 0x86, 0xb8, 0x9f, 0xf2, 0x00, 0x5e, 0xf7, 0x5a, 0xf8, 0x0e, 0x0b, 0xbe, 0x61, 0x9c, 0xf6, 0xc6,
+ 0x3c, 0x3a, 0x31, 0x24, 0x65, 0xdc, 0xde, 0xbc, 0xd4, 0x8f, 0xf4, 0x73, 0xcf, 0x63, 0xe5, 0x39,
+ 0xe0, 0xd4, 0x2b, 0xa4, 0x03, 0x37, 0xff, 0xea, 0xef, 0x2b, 0xf7, 0xf3, 0x60, 0xe1, 0x33, 0xdf,
+ 0xe9, 0xb9, 0x38, 0x21, 0xce, 0x1d, 0x23, 0x4e, 0x1d, 0x12, 0x97, 0xb5, 0x35, 0xcf, 0xf5, 0x23,
+ 0x7d, 0x63, 0x2a, 0xd2, 0xb2, 0xc0, 0xd7, 0x97, 0xb0, 0x7b, 0x79, 0xb0, 0xbc, 0xed, 0x07, 0x9f,
+ 0x6c, 0xf1, 0x4b, 0x59, 0xaa, 0x2e, 0xe2, 0x31, 0xda, 0x96, 0x87, 0xb4, 0x31, 0xc4, 0x4d, 0x44,
+ 0x43, 0xfb, 0x8e, 0xb9, 0xd1, 0x8f, 0xf4, 0xc6, 0x54, 0x94, 0x0d, 0x41, 0xaf, 0x2f, 0x5d, 0x3f,
+ 0xe7, 0xc1, 0xca, 0xed, 0x1e, 0xf2, 0xa8, 0xed, 0x60, 0x41, 0x59, 0x42, 0xd8, 0xc1, 0x18, 0x61,
+ 0xda, 0x90, 0xb0, 0x2c, 0x46, 0x52, 0xf7, 0x41, 0x3f, 0xd2, 0x2f, 0x4c, 0x45, 0xdd, 0x24, 0xf8,
+ 0xeb, 0x4b, 0xe2, 0x8f, 0x45, 0x30, 0xcf, 0x2f, 0x16, 0x09, 0x77, 0x6f, 0x01, 0xb9, 0xe5, 0x4a,
+ 0xe6, 0x60, 0x7c, 0x46, 0x0b, 0x83, 0xa6, 0xb1, 0x25, 0x37, 0x63, 0x61, 0x01, 0xcf, 0x83, 0x32,
+ 0xe1, 0x27, 0x21, 0x59, 0x50, 0xb5, 0xd1, 0x5b, 0x43, 0xf6, 0xcc, 0xb5, 0x99, 0xb3, 0xa4, 0x3d,
+ 0xbb, 0xc3, 0x39, 0xec, 0x00, 0x10, 0x9f, 0x04, 0xeb, 0xa3, 0xc8, 0xf1, 0xe3, 0x01, 0x43, 0x0b,
+ 0x0c, 0x3c, 0x0b, 0x4a, 0xbc, 0x72, 0xcb, 0x7b, 0x78, 0xe6, 0xb1, 0xe3, 0x25, 0x74, 0x33, 0x67,
+ 0x09, 0x73, 0xb8, 0x0e, 0x8a, 0x41, 0xe8, 0xbb, 0x72, 0x17, 0x3d, 0x31, 0xfa, 0xcc, 0xf4, 0xb6,
+ 0xb3, 0x99, 0xb3, 0xb8, 0x2d, 0x3c, 0xc3, 0x8e, 0xbc, 0x6c, 0xbf, 0x22, 0xfc, 0x0a, 0xc1, 0x4a,
+ 0xd6, 0x08, 0x2c, 0x05, 0x89, 0x4d, 0xe1, 0x19, 0x50, 0xde, 0xe7, 0x65, 0x49, 0x5e, 0xfe, 0x56,
+ 0xd3, 0xa0, 0x6c, 0xc1, 0x62, 0xef, 0x25, 0x6c, 0xe1, 0x35, 0x30, 0x47, 0xfd, 0xa0, 0x1b, 0x17,
+ 0x00, 0x79, 0xfd, 0xa8, 0xa5, 0xb1, 0x93, 0x0a, 0xc4, 0x66, 0xce, 0xca, 0xe0, 0xe0, 0x2d, 0xb0,
+ 0xb8, 0x97, 0x09, 0x53, 0x4c, 0xf8, 0xd7, 0x8c, 0x11, 0x9e, 0x27, 0x67, 0xcf, 0x66, 0xce, 0x1a,
+ 0x43, 0x9b, 0x60, 0x98, 0x51, 0xf5, 0x3f, 0x0a, 0x60, 0x4e, 0xc6, 0x8c, 0xb8, 0x2b, 0x9c, 0x4b,
+ 0xc2, 0x40, 0x84, 0xcc, 0xff, 0x9f, 0x15, 0x06, 0xdc, 0x3c, 0x15, 0x05, 0xef, 0x26, 0x51, 0x20,
+ 0xe2, 0x67, 0x65, 0x98, 0xa5, 0x5c, 0xff, 0x14, 0x42, 0x2a, 0xbf, 0x11, 0x2b, 0x2f, 0xc2, 0xe6,
+ 0xf8, 0xe4, 0x7d, 0x37, 0x46, 0x49, 0xd9, 0x2f, 0x82, 0x8a, 0x2d, 0x3e, 0x37, 0x4c, 0x0a, 0x98,
+ 0xf1, 0xaf, 0x11, 0x4c, 0x48, 0x09, 0x80, 0x1b, 0x43, 0xf9, 0x45, 0xd4, 0x1c, 0x1b, 0x97, 0x3f,
+ 0x01, 0xc5, 0xea, 0x9f, 0x4e, 0xd4, 0x2f, 0x4b, 0xcc, 0xd8, 0x66, 0x95, 0xbc, 0x98, 0x94, 0x7e,
+ 0x13, 0xcc, 0xb8, 0x98, 0x22, 0x76, 0x96, 0x55, 0x2b, 0xbc, 0x6e, 0x9c, 0xcc, 0x4a, 0x35, 0xe4,
+ 0xdb, 0xb8, 0x29, 0x0d, 0xaf, 0x7a, 0x34, 0x3c, 0x90, 0xc7, 0x96, 0x04, 0xbd, 0x7a, 0x09, 0xcc,
+ 0x67, 0x0c, 0xe0, 0x22, 0x28, 0x74, 0x71, 0xfc, 0x65, 0x85, 0x35, 0xd9, 0xe5, 0x6e, 0x1f, 0x39,
+ 0x3d, 0xcc, 0x69, 0xaf, 0x5a, 0xa2, 0x73, 0x31, 0x7f, 0x5e, 0x31, 0xab, 0xa0, 0x12, 0x8a, 0xa7,
+ 0x98, 0xad, 0x87, 0x8f, 0xb5, 0xdc, 0xa3, 0xc7, 0x5a, 0xee, 0xe9, 0x63, 0x4d, 0xf9, 0x7a, 0xa0,
+ 0x29, 0xdf, 0x0f, 0x34, 0xe5, 0xc1, 0x40, 0x53, 0x1e, 0x0e, 0x34, 0xe5, 0xf7, 0x81, 0xa6, 0xfc,
+ 0x39, 0xd0, 0x72, 0x4f, 0x07, 0x9a, 0x72, 0xf7, 0x89, 0x96, 0x7b, 0xf8, 0x44, 0xcb, 0x3d, 0x7a,
+ 0xa2, 0xe5, 0x3e, 0x37, 0x8e, 0x56, 0xc2, 0x76, 0xca, 0x9c, 0x96, 0x8d, 0xbf, 0x02, 0x00, 0x00,
+ 0xff, 0xff, 0xe6, 0x4a, 0x9a, 0x06, 0x55, 0x15, 0x00, 0x00,
}
func (this *LokiRequest) Equal(that interface{}) bool {
@@ -1269,6 +1320,13 @@ func (this *LokiRequest) Equal(that interface{}) bool {
return false
}
}
+ if that1.Plan == nil {
+ if this.Plan != nil {
+ return false
+ }
+ } else if !this.Plan.Equal(*that1.Plan) {
+ return false
+ }
return true
}
func (this *LokiInstantRequest) Equal(that interface{}) bool {
@@ -1313,6 +1371,37 @@ func (this *LokiInstantRequest) Equal(that interface{}) bool {
return false
}
}
+ if that1.Plan == nil {
+ if this.Plan != nil {
+ return false
+ }
+ } else if !this.Plan.Equal(*that1.Plan) {
+ return false
+ }
+ return true
+}
+func (this *Plan) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*Plan)
+ if !ok {
+ that2, ok := that.(Plan)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if !bytes.Equal(this.Raw, that1.Raw) {
+ return false
+ }
return true
}
func (this *LokiResponse) Equal(that interface{}) bool {
@@ -2120,7 +2209,7 @@ func (this *LokiRequest) GoString() string {
if this == nil {
return "nil"
}
- s := make([]string, 0, 13)
+ s := make([]string, 0, 14)
s = append(s, "&queryrange.LokiRequest{")
s = append(s, "Query: "+fmt.Sprintf("%#v", this.Query)+",\n")
s = append(s, "Limit: "+fmt.Sprintf("%#v", this.Limit)+",\n")
@@ -2131,6 +2220,7 @@ func (this *LokiRequest) GoString() string {
s = append(s, "Direction: "+fmt.Sprintf("%#v", this.Direction)+",\n")
s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n")
s = append(s, "Shards: "+fmt.Sprintf("%#v", this.Shards)+",\n")
+ s = append(s, "Plan: "+fmt.Sprintf("%#v", this.Plan)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
@@ -2138,7 +2228,7 @@ func (this *LokiInstantRequest) GoString() string {
if this == nil {
return "nil"
}
- s := make([]string, 0, 10)
+ s := make([]string, 0, 11)
s = append(s, "&queryrange.LokiInstantRequest{")
s = append(s, "Query: "+fmt.Sprintf("%#v", this.Query)+",\n")
s = append(s, "Limit: "+fmt.Sprintf("%#v", this.Limit)+",\n")
@@ -2146,6 +2236,17 @@ func (this *LokiInstantRequest) GoString() string {
s = append(s, "Direction: "+fmt.Sprintf("%#v", this.Direction)+",\n")
s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n")
s = append(s, "Shards: "+fmt.Sprintf("%#v", this.Shards)+",\n")
+ s = append(s, "Plan: "+fmt.Sprintf("%#v", this.Plan)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *Plan) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&queryrange.Plan{")
+ s = append(s, "Raw: "+fmt.Sprintf("%#v", this.Raw)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
@@ -2463,6 +2564,18 @@ func (m *LokiRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if m.Plan != nil {
+ {
+ size := m.Plan.Size()
+ i -= size
+ if _, err := m.Plan.MarshalTo(dAtA[i:]); err != nil {
+ return 0, err
+ }
+ i = encodeVarintQueryrange(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x52
+ }
if m.Interval != 0 {
i = encodeVarintQueryrange(dAtA, i, uint64(m.Interval))
i--
@@ -2489,21 +2602,21 @@ func (m *LokiRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i--
dAtA[i] = 0x30
}
- n1, err1 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.EndTs, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.EndTs):])
- if err1 != nil {
- return 0, err1
- }
- i -= n1
- i = encodeVarintQueryrange(dAtA, i, uint64(n1))
- i--
- dAtA[i] = 0x2a
- n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartTs, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.StartTs):])
+ n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.EndTs, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.EndTs):])
if err2 != nil {
return 0, err2
}
i -= n2
i = encodeVarintQueryrange(dAtA, i, uint64(n2))
i--
+ dAtA[i] = 0x2a
+ n3, err3 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartTs, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.StartTs):])
+ if err3 != nil {
+ return 0, err3
+ }
+ i -= n3
+ i = encodeVarintQueryrange(dAtA, i, uint64(n3))
+ i--
dAtA[i] = 0x22
if m.Step != 0 {
i = encodeVarintQueryrange(dAtA, i, uint64(m.Step))
@@ -2545,6 +2658,18 @@ func (m *LokiInstantRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if m.Plan != nil {
+ {
+ size := m.Plan.Size()
+ i -= size
+ if _, err := m.Plan.MarshalTo(dAtA[i:]); err != nil {
+ return 0, err
+ }
+ i = encodeVarintQueryrange(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ }
if len(m.Shards) > 0 {
for iNdEx := len(m.Shards) - 1; iNdEx >= 0; iNdEx-- {
i -= len(m.Shards[iNdEx])
@@ -2566,12 +2691,12 @@ func (m *LokiInstantRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i--
dAtA[i] = 0x20
}
- n3, err3 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.TimeTs, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.TimeTs):])
- if err3 != nil {
- return 0, err3
+ n5, err5 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.TimeTs, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.TimeTs):])
+ if err5 != nil {
+ return 0, err5
}
- i -= n3
- i = encodeVarintQueryrange(dAtA, i, uint64(n3))
+ i -= n5
+ i = encodeVarintQueryrange(dAtA, i, uint64(n5))
i--
dAtA[i] = 0x1a
if m.Limit != 0 {
@@ -2589,6 +2714,36 @@ func (m *LokiInstantRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
+func (m *Plan) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Plan) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Plan) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Raw) > 0 {
+ i -= len(m.Raw)
+ copy(dAtA[i:], m.Raw)
+ i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Raw)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
func (m *LokiResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -2718,20 +2873,20 @@ func (m *LokiSeriesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i--
dAtA[i] = 0x22
}
- n6, err6 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.EndTs, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.EndTs):])
- if err6 != nil {
- return 0, err6
+ n8, err8 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.EndTs, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.EndTs):])
+ if err8 != nil {
+ return 0, err8
}
- i -= n6
- i = encodeVarintQueryrange(dAtA, i, uint64(n6))
+ i -= n8
+ i = encodeVarintQueryrange(dAtA, i, uint64(n8))
i--
dAtA[i] = 0x1a
- n7, err7 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartTs, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.StartTs):])
- if err7 != nil {
- return 0, err7
+ n9, err9 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartTs, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.StartTs):])
+ if err9 != nil {
+ return 0, err9
}
- i -= n7
- i = encodeVarintQueryrange(dAtA, i, uint64(n7))
+ i -= n9
+ i = encodeVarintQueryrange(dAtA, i, uint64(n9))
i--
dAtA[i] = 0x12
if len(m.Match) > 0 {
@@ -3594,6 +3749,10 @@ func (m *LokiRequest) Size() (n int) {
if m.Interval != 0 {
n += 1 + sovQueryrange(uint64(m.Interval))
}
+ if m.Plan != nil {
+ l = m.Plan.Size()
+ n += 1 + l + sovQueryrange(uint64(l))
+ }
return n
}
@@ -3625,6 +3784,23 @@ func (m *LokiInstantRequest) Size() (n int) {
n += 1 + l + sovQueryrange(uint64(l))
}
}
+ if m.Plan != nil {
+ l = m.Plan.Size()
+ n += 1 + l + sovQueryrange(uint64(l))
+ }
+ return n
+}
+
+func (m *Plan) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Raw)
+ if l > 0 {
+ n += 1 + l + sovQueryrange(uint64(l))
+ }
return n
}
@@ -4092,6 +4268,7 @@ func (this *LokiRequest) String() string {
`Path:` + fmt.Sprintf("%v", this.Path) + `,`,
`Shards:` + fmt.Sprintf("%v", this.Shards) + `,`,
`Interval:` + fmt.Sprintf("%v", this.Interval) + `,`,
+ `Plan:` + fmt.Sprintf("%v", this.Plan) + `,`,
`}`,
}, "")
return s
@@ -4107,6 +4284,17 @@ func (this *LokiInstantRequest) String() string {
`Direction:` + fmt.Sprintf("%v", this.Direction) + `,`,
`Path:` + fmt.Sprintf("%v", this.Path) + `,`,
`Shards:` + fmt.Sprintf("%v", this.Shards) + `,`,
+ `Plan:` + fmt.Sprintf("%v", this.Plan) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Plan) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Plan{`,
+ `Raw:` + fmt.Sprintf("%v", this.Raw) + `,`,
`}`,
}, "")
return s
@@ -4689,6 +4877,42 @@ func (m *LokiRequest) Unmarshal(dAtA []byte) error {
break
}
}
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Plan", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQueryrange
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQueryrange
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQueryrange
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Plan == nil {
+ m.Plan = &github_com_grafana_loki_pkg_querier_plan.QueryPlan{}
+ }
+ if err := m.Plan.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipQueryrange(dAtA[iNdEx:])
@@ -4909,6 +5133,129 @@ func (m *LokiInstantRequest) Unmarshal(dAtA []byte) error {
}
m.Shards = append(m.Shards, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Plan", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQueryrange
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQueryrange
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQueryrange
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Plan == nil {
+ m.Plan = &github_com_grafana_loki_pkg_querier_plan.QueryPlan{}
+ }
+ if err := m.Plan.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQueryrange(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthQueryrange
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthQueryrange
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Plan) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQueryrange
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Plan: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Plan: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQueryrange
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthQueryrange
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQueryrange
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Raw = append(m.Raw[:0], dAtA[iNdEx:postIndex]...)
+ if m.Raw == nil {
+ m.Raw = []byte{}
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipQueryrange(dAtA[iNdEx:])
diff --git a/pkg/querier/queryrange/queryrange.proto b/pkg/querier/queryrange/queryrange.proto
index d5e89eeee47a7..f673464acfc0b 100644
--- a/pkg/querier/queryrange/queryrange.proto
+++ b/pkg/querier/queryrange/queryrange.proto
@@ -18,7 +18,7 @@ option (gogoproto.sizer_all) = true;
option (gogoproto.unmarshaler_all) = true;
message LokiRequest {
- string query = 1;
+ string query = 1; // mark as reserved once we've fully migrated to plan.
uint32 limit = 2;
int64 step = 3;
int64 interval = 9;
@@ -33,6 +33,7 @@ message LokiRequest {
logproto.Direction direction = 6;
string path = 7;
repeated string shards = 8 [(gogoproto.jsontag) = "shards"];
+ Plan plan = 10 [(gogoproto.customtype) = "github.com/grafana/loki/pkg/querier/plan.QueryPlan"];
}
message LokiInstantRequest {
@@ -45,6 +46,11 @@ message LokiInstantRequest {
logproto.Direction direction = 4;
string path = 5;
repeated string shards = 6 [(gogoproto.jsontag) = "shards"];
+ Plan plan = 7 [(gogoproto.customtype) = "github.com/grafana/loki/pkg/querier/plan.QueryPlan"];
+}
+
+message Plan {
+ bytes raw = 1;
}
message LokiResponse {
diff --git a/pkg/querier/queryrange/queryrangebase/alias.go b/pkg/querier/queryrange/queryrangebase/alias.go
index 01bf345cc48de..4b4e219202f0b 100644
--- a/pkg/querier/queryrange/queryrangebase/alias.go
+++ b/pkg/querier/queryrange/queryrangebase/alias.go
@@ -1,6 +1,9 @@
package queryrangebase
-import "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase/definitions"
+import (
+ "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase/definitions"
+ "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"
+)
// Helpful aliases for refactoring circular imports
@@ -9,5 +12,8 @@ type PrometheusResponseHeader = definitions.PrometheusResponseHeader
type PrometheusRequestHeader = definitions.PrometheusRequestHeader
type Codec = definitions.Codec
type Merger = definitions.Merger
+type CacheGenNumberLoader = resultscache.CacheGenNumberLoader
+
type Request = definitions.Request
type Response = definitions.Response
+type Extent = resultscache.Extent
diff --git a/pkg/querier/queryrange/queryrangebase/definitions/definitions.pb.go b/pkg/querier/queryrange/queryrangebase/definitions/definitions.pb.go
index f9ee4b21059bb..d4eb4fb83b25f 100644
--- a/pkg/querier/queryrange/queryrangebase/definitions/definitions.pb.go
+++ b/pkg/querier/queryrange/queryrangebase/definitions/definitions.pb.go
@@ -25,50 +25,6 @@ var _ = math.Inf
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-// Defined here to prevent circular imports between logproto & queryrangebase
-type CachingOptions struct {
- Disabled bool `protobuf:"varint,1,opt,name=disabled,proto3" json:"disabled,omitempty"`
-}
-
-func (m *CachingOptions) Reset() { *m = CachingOptions{} }
-func (*CachingOptions) ProtoMessage() {}
-func (*CachingOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1a37772b6ae2c5c, []int{0}
-}
-func (m *CachingOptions) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CachingOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_CachingOptions.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *CachingOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CachingOptions.Merge(m, src)
-}
-func (m *CachingOptions) XXX_Size() int {
- return m.Size()
-}
-func (m *CachingOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_CachingOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CachingOptions proto.InternalMessageInfo
-
-func (m *CachingOptions) GetDisabled() bool {
- if m != nil {
- return m.Disabled
- }
- return false
-}
-
type PrometheusRequestHeader struct {
Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"-"`
Values []string `protobuf:"bytes,2,rep,name=Values,proto3" json:"-"`
@@ -77,7 +33,7 @@ type PrometheusRequestHeader struct {
func (m *PrometheusRequestHeader) Reset() { *m = PrometheusRequestHeader{} }
func (*PrometheusRequestHeader) ProtoMessage() {}
func (*PrometheusRequestHeader) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1a37772b6ae2c5c, []int{1}
+ return fileDescriptor_d1a37772b6ae2c5c, []int{0}
}
func (m *PrometheusRequestHeader) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -128,7 +84,7 @@ type PrometheusResponseHeader struct {
func (m *PrometheusResponseHeader) Reset() { *m = PrometheusResponseHeader{} }
func (*PrometheusResponseHeader) ProtoMessage() {}
func (*PrometheusResponseHeader) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1a37772b6ae2c5c, []int{2}
+ return fileDescriptor_d1a37772b6ae2c5c, []int{1}
}
func (m *PrometheusResponseHeader) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -172,7 +128,6 @@ func (m *PrometheusResponseHeader) GetValues() []string {
}
func init() {
- proto.RegisterType((*CachingOptions)(nil), "definitions.CachingOptions")
proto.RegisterType((*PrometheusRequestHeader)(nil), "definitions.PrometheusRequestHeader")
proto.RegisterType((*PrometheusResponseHeader)(nil), "definitions.PrometheusResponseHeader")
}
@@ -182,52 +137,26 @@ func init() {
}
var fileDescriptor_d1a37772b6ae2c5c = []byte{
- // 294 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x91, 0x31, 0x4e, 0x33, 0x31,
- 0x10, 0x85, 0xed, 0xff, 0x87, 0x28, 0x18, 0x89, 0x62, 0x85, 0x44, 0x88, 0xc4, 0x10, 0xa5, 0xa2,
- 0x80, 0xb8, 0xe0, 0x06, 0x49, 0x03, 0x0d, 0xa0, 0x80, 0x28, 0xe8, 0xbc, 0xd9, 0x89, 0x63, 0x25,
- 0xb1, 0x37, 0xf6, 0x6e, 0x41, 0x05, 0x47, 0xe0, 0x18, 0x1c, 0x85, 0x72, 0xcb, 0x54, 0x88, 0xf5,
- 0x36, 0x88, 0x2a, 0x47, 0x40, 0x18, 0x04, 0xdb, 0x22, 0xaa, 0x79, 0xf3, 0xcd, 0x9b, 0x57, 0xcc,
- 0xb0, 0x41, 0x3a, 0x95, 0x7c, 0x91, 0xa3, 0x55, 0x68, 0x43, 0xbd, 0xb5, 0x42, 0x4b, 0xac, 0xc9,
- 0x58, 0x38, 0xe4, 0x09, 0x8e, 0x95, 0x56, 0x99, 0x32, 0xda, 0xd5, 0x75, 0x2f, 0xb5, 0x26, 0x33,
- 0xd1, 0x66, 0x0d, 0xb5, 0xb7, 0xa5, 0x91, 0x26, 0x70, 0xfe, 0xa1, 0x3e, 0x2d, 0xdd, 0x43, 0xb6,
- 0x35, 0x10, 0xa3, 0x89, 0xd2, 0xf2, 0x3c, 0x0d, 0xbe, 0xa8, 0xcd, 0x9a, 0x89, 0x72, 0x22, 0x9e,
- 0x61, 0xd2, 0xa2, 0x1d, 0x7a, 0xd0, 0x1c, 0x7e, 0xf7, 0xdd, 0x4b, 0xb6, 0x73, 0x61, 0xcd, 0x1c,
- 0xb3, 0x09, 0xe6, 0x6e, 0x88, 0x8b, 0x1c, 0x5d, 0x76, 0x82, 0x22, 0x41, 0x1b, 0xed, 0xb2, 0xb5,
- 0x33, 0x31, 0xc7, 0xb0, 0xb2, 0xd1, 0x5f, 0x7f, 0x7b, 0xde, 0xa7, 0x47, 0xc3, 0x80, 0xa2, 0x3d,
- 0xd6, 0xb8, 0x16, 0xb3, 0x1c, 0x5d, 0xeb, 0x5f, 0xe7, 0xff, 0xcf, 0xf0, 0x0b, 0x76, 0xaf, 0x58,
- 0xab, 0x1e, 0xea, 0x52, 0xa3, 0x1d, 0xfe, 0x35, 0xb5, 0x7f, 0x57, 0x94, 0x40, 0x96, 0x25, 0x90,
- 0x55, 0x09, 0xf4, 0xde, 0x03, 0x7d, 0xf4, 0x40, 0x9f, 0x3c, 0xd0, 0xc2, 0x03, 0x7d, 0xf1, 0x40,
- 0x5f, 0x3d, 0x90, 0x95, 0x07, 0xfa, 0x50, 0x01, 0x29, 0x2a, 0x20, 0xcb, 0x0a, 0xc8, 0xcd, 0xa9,
- 0x54, 0xd9, 0x24, 0x8f, 0x7b, 0x23, 0x33, 0xe7, 0xd2, 0x8a, 0xb1, 0xd0, 0x82, 0xcf, 0xcc, 0x54,
- 0xf1, 0x5f, 0xbf, 0x23, 0x6e, 0x84, 0x03, 0x1f, 0xbf, 0x07, 0x00, 0x00, 0xff, 0xff, 0x09, 0x36,
- 0xa9, 0xa5, 0xca, 0x01, 0x00, 0x00,
+ // 262 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x2e, 0xc8, 0x4e, 0xd7,
+ 0x2f, 0x2c, 0x4d, 0x2d, 0xca, 0x4c, 0x2d, 0x02, 0xd3, 0x95, 0x45, 0x89, 0x79, 0xe9, 0xa9, 0x48,
+ 0xcc, 0xa4, 0xc4, 0xe2, 0x54, 0xfd, 0x94, 0xd4, 0xb4, 0xcc, 0xbc, 0xcc, 0x92, 0xcc, 0xfc, 0xbc,
+ 0x62, 0x64, 0xb6, 0x5e, 0x41, 0x51, 0x7e, 0x49, 0xbe, 0x10, 0x37, 0x92, 0x90, 0x94, 0x48, 0x7a,
+ 0x7e, 0x7a, 0x3e, 0x58, 0x5c, 0x1f, 0xc4, 0x82, 0x28, 0x51, 0x0a, 0xe6, 0x12, 0x0f, 0x28, 0xca,
+ 0xcf, 0x4d, 0x2d, 0xc9, 0x48, 0x2d, 0x2d, 0x0e, 0x4a, 0x2d, 0x2c, 0x4d, 0x2d, 0x2e, 0xf1, 0x48,
+ 0x4d, 0x4c, 0x49, 0x2d, 0x12, 0x92, 0xe4, 0x62, 0xf1, 0x4b, 0xcc, 0x4d, 0x95, 0x60, 0x54, 0x60,
+ 0xd4, 0xe0, 0x74, 0x62, 0x7d, 0x75, 0x4f, 0x9e, 0x51, 0x37, 0x08, 0x2c, 0x24, 0x24, 0xcb, 0xc5,
+ 0x16, 0x96, 0x98, 0x53, 0x9a, 0x5a, 0x2c, 0xc1, 0xa4, 0xc0, 0x8c, 0x90, 0x84, 0x0a, 0x2a, 0x85,
+ 0x70, 0x49, 0x20, 0x1b, 0x5a, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0x4a, 0xa9, 0xa9, 0x4e, 0xf5, 0x17,
+ 0x1e, 0xca, 0x31, 0xdc, 0x78, 0x28, 0xc7, 0xf0, 0xe1, 0xa1, 0x1c, 0x63, 0xc3, 0x23, 0x39, 0xc6,
+ 0x15, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39,
+ 0xc6, 0x17, 0x8f, 0xe4, 0x18, 0x3e, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63,
+ 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, 0xa2, 0x3c, 0xd3, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92,
+ 0xf3, 0x73, 0xf5, 0xd3, 0x8b, 0x12, 0xd3, 0x12, 0xf3, 0x12, 0xf5, 0x73, 0xf2, 0xb3, 0x33, 0xf5,
+ 0x49, 0x0e, 0xe0, 0x24, 0x36, 0x70, 0x90, 0x19, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x51, 0x1b,
+ 0x61, 0xc9, 0x9c, 0x01, 0x00, 0x00,
}
-func (this *CachingOptions) Equal(that interface{}) bool {
- if that == nil {
- return this == nil
- }
-
- that1, ok := that.(*CachingOptions)
- if !ok {
- that2, ok := that.(CachingOptions)
- if ok {
- that1 = &that2
- } else {
- return false
- }
- }
- if that1 == nil {
- return this == nil
- } else if this == nil {
- return false
- }
- if this.Disabled != that1.Disabled {
- return false
- }
- return true
-}
func (this *PrometheusRequestHeader) Equal(that interface{}) bool {
if that == nil {
return this == nil
@@ -292,16 +221,6 @@ func (this *PrometheusResponseHeader) Equal(that interface{}) bool {
}
return true
}
-func (this *CachingOptions) GoString() string {
- if this == nil {
- return "nil"
- }
- s := make([]string, 0, 5)
- s = append(s, "&definitions.CachingOptions{")
- s = append(s, "Disabled: "+fmt.Sprintf("%#v", this.Disabled)+",\n")
- s = append(s, "}")
- return strings.Join(s, "")
-}
func (this *PrometheusRequestHeader) GoString() string {
if this == nil {
return "nil"
@@ -332,39 +251,6 @@ func valueToGoStringDefinitions(v interface{}, typ string) string {
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
}
-func (m *CachingOptions) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *CachingOptions) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *CachingOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.Disabled {
- i--
- if m.Disabled {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
func (m *PrometheusRequestHeader) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -454,18 +340,6 @@ func encodeVarintDefinitions(dAtA []byte, offset int, v uint64) int {
dAtA[offset] = uint8(v)
return base
}
-func (m *CachingOptions) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Disabled {
- n += 2
- }
- return n
-}
-
func (m *PrometheusRequestHeader) Size() (n int) {
if m == nil {
return 0
@@ -510,16 +384,6 @@ func sovDefinitions(x uint64) (n int) {
func sozDefinitions(x uint64) (n int) {
return sovDefinitions(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
-func (this *CachingOptions) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&CachingOptions{`,
- `Disabled:` + fmt.Sprintf("%v", this.Disabled) + `,`,
- `}`,
- }, "")
- return s
-}
func (this *PrometheusRequestHeader) String() string {
if this == nil {
return "nil"
@@ -550,79 +414,6 @@ func valueToStringDefinitions(v interface{}) string {
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
-func (m *CachingOptions) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowDefinitions
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: CachingOptions: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: CachingOptions: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowDefinitions
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Disabled = bool(v != 0)
- default:
- iNdEx = preIndex
- skippy, err := skipDefinitions(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthDefinitions
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthDefinitions
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
func (m *PrometheusRequestHeader) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
diff --git a/pkg/querier/queryrange/queryrangebase/definitions/definitions.proto b/pkg/querier/queryrange/queryrangebase/definitions/definitions.proto
index fdac8a5daa08a..9f5e7967c8524 100644
--- a/pkg/querier/queryrange/queryrangebase/definitions/definitions.proto
+++ b/pkg/querier/queryrange/queryrangebase/definitions/definitions.proto
@@ -8,11 +8,6 @@ option go_package = "github.com/grafana/loki/pkg/querier/queryrange/queryrangeba
option (gogoproto.marshaler_all) = true;
option (gogoproto.unmarshaler_all) = true;
-// Defined here to prevent circular imports between logproto & queryrangebase
-message CachingOptions {
- bool disabled = 1;
-}
-
message PrometheusRequestHeader {
string Name = 1 [(gogoproto.jsontag) = "-"];
repeated string Values = 2 [(gogoproto.jsontag) = "-"];
diff --git a/pkg/querier/queryrange/queryrangebase/definitions/interface.go b/pkg/querier/queryrange/queryrangebase/definitions/interface.go
index 0f5be9b10676e..f8c9a0f5531fb 100644
--- a/pkg/querier/queryrange/queryrangebase/definitions/interface.go
+++ b/pkg/querier/queryrange/queryrangebase/definitions/interface.go
@@ -7,6 +7,8 @@ import (
"github.com/gogo/protobuf/proto"
"github.com/opentracing/opentracing-go"
+
+ "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"
)
// Codec is used to encode/decode query range requests and responses so they can be passed down to middlewares.
@@ -32,6 +34,7 @@ type Merger interface {
// Request represents a query range request that can be process by middlewares.
type Request interface {
+ proto.Message
// GetStart returns the start timestamp of the request in milliseconds.
GetStart() time.Time
// GetEnd returns the end timestamp of the request in milliseconds.
@@ -46,11 +49,12 @@ type Request interface {
WithStartEnd(start time.Time, end time.Time) Request
// WithQuery clone the current request with a different query.
WithQuery(string) Request
- proto.Message
// LogToSpan writes information about this request to an OpenTracing span
LogToSpan(opentracing.Span)
}
+type CachingOptions = resultscache.CachingOptions
+
// Response represents a query range response.
type Response interface {
proto.Message
diff --git a/pkg/querier/queryrange/queryrangebase/middleware.go b/pkg/querier/queryrange/queryrangebase/middleware.go
index 8ed3368faf113..10e80ddf8a2ec 100644
--- a/pkg/querier/queryrange/queryrangebase/middleware.go
+++ b/pkg/querier/queryrange/queryrangebase/middleware.go
@@ -6,6 +6,8 @@ import (
"github.com/grafana/dskit/middleware"
"github.com/grafana/dskit/tenant"
+
+ "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"
)
const (
@@ -13,7 +15,7 @@ const (
ResultsCacheGenNumberHeaderName = "Results-Cache-Gen-Number"
)
-func CacheGenNumberHeaderSetterMiddleware(cacheGenNumbersLoader CacheGenNumberLoader) middleware.Interface {
+func CacheGenNumberHeaderSetterMiddleware(cacheGenNumbersLoader resultscache.CacheGenNumberLoader) middleware.Interface {
return middleware.Func(func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
userIDs, err := tenant.TenantIDs(r.Context())
@@ -30,7 +32,7 @@ func CacheGenNumberHeaderSetterMiddleware(cacheGenNumbersLoader CacheGenNumberLo
})
}
-func CacheGenNumberContextSetterMiddleware(cacheGenNumbersLoader CacheGenNumberLoader) Middleware {
+func CacheGenNumberContextSetterMiddleware(cacheGenNumbersLoader resultscache.CacheGenNumberLoader) Middleware {
return MiddlewareFunc(func(next Handler) Handler {
return HandlerFunc(func(ctx context.Context, req Request) (Response, error) {
userIDs, err := tenant.TenantIDs(ctx)
diff --git a/pkg/querier/queryrange/queryrangebase/query_range.go b/pkg/querier/queryrange/queryrangebase/query_range.go
index 5c76a4a80ee9e..ed2bf48c6757f 100644
--- a/pkg/querier/queryrange/queryrangebase/query_range.go
+++ b/pkg/querier/queryrange/queryrangebase/query_range.go
@@ -20,6 +20,7 @@ import (
"github.com/prometheus/prometheus/model/timestamp"
"github.com/grafana/loki/pkg/logproto"
+ "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"
"github.com/grafana/loki/pkg/util/spanlogger"
)
@@ -54,6 +55,12 @@ func (q *PrometheusRequest) WithStartEnd(start, end time.Time) Request {
return &clone
}
+// WithStartEndForCache implements resultscache.Request.
+func (q *PrometheusRequest) WithStartEndForCache(s time.Time, e time.Time) resultscache.Request {
+ clone := q.WithStartEnd(s, e).(resultscache.Request)
+ return clone
+}
+
// WithQuery clones the current `PrometheusRequest` with a new query.
func (q *PrometheusRequest) WithQuery(query string) Request {
clone := *q
diff --git a/pkg/querier/queryrange/queryrangebase/queryrange.pb.go b/pkg/querier/queryrange/queryrangebase/queryrange.pb.go
index 121b3ffb15351..f908b3621dcf6 100644
--- a/pkg/querier/queryrange/queryrangebase/queryrange.pb.go
+++ b/pkg/querier/queryrange/queryrangebase/queryrange.pb.go
@@ -7,12 +7,13 @@ import (
fmt "fmt"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
+ _ "github.com/gogo/protobuf/types"
github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
- types "github.com/gogo/protobuf/types"
_ "github.com/golang/protobuf/ptypes/duration"
github_com_grafana_loki_pkg_logproto "github.com/grafana/loki/pkg/logproto"
logproto "github.com/grafana/loki/pkg/logproto"
definitions "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase/definitions"
+ resultscache "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"
io "io"
math "math"
math_bits "math/bits"
@@ -40,7 +41,7 @@ type PrometheusRequest struct {
Step int64 `protobuf:"varint,4,opt,name=step,proto3" json:"step,omitempty"`
Timeout time.Duration `protobuf:"bytes,5,opt,name=timeout,proto3,stdduration" json:"timeout"`
Query string `protobuf:"bytes,6,opt,name=query,proto3" json:"query,omitempty"`
- CachingOptions definitions.CachingOptions `protobuf:"bytes,7,opt,name=cachingOptions,proto3" json:"cachingOptions"`
+ CachingOptions resultscache.CachingOptions `protobuf:"bytes,7,opt,name=cachingOptions,proto3" json:"cachingOptions"`
Headers []*definitions.PrometheusRequestHeader `protobuf:"bytes,8,rep,name=Headers,proto3" json:"-"`
}
@@ -118,11 +119,11 @@ func (m *PrometheusRequest) GetQuery() string {
return ""
}
-func (m *PrometheusRequest) GetCachingOptions() definitions.CachingOptions {
+func (m *PrometheusRequest) GetCachingOptions() resultscache.CachingOptions {
if m != nil {
return m.CachingOptions
}
- return definitions.CachingOptions{}
+ return resultscache.CachingOptions{}
}
func (m *PrometheusRequest) GetHeaders() []*definitions.PrometheusRequestHeader {
@@ -302,132 +303,11 @@ func (m *SampleStream) GetSamples() []logproto.LegacySample {
return nil
}
-type CachedResponse struct {
- Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key"`
- // List of cached responses; non-overlapping and in order.
- Extents []Extent `protobuf:"bytes,2,rep,name=extents,proto3" json:"extents"`
-}
-
-func (m *CachedResponse) Reset() { *m = CachedResponse{} }
-func (*CachedResponse) ProtoMessage() {}
-func (*CachedResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_4cc6a0c1d6b614c4, []int{4}
-}
-func (m *CachedResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CachedResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_CachedResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *CachedResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CachedResponse.Merge(m, src)
-}
-func (m *CachedResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *CachedResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_CachedResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CachedResponse proto.InternalMessageInfo
-
-func (m *CachedResponse) GetKey() string {
- if m != nil {
- return m.Key
- }
- return ""
-}
-
-func (m *CachedResponse) GetExtents() []Extent {
- if m != nil {
- return m.Extents
- }
- return nil
-}
-
-type Extent struct {
- Start int64 `protobuf:"varint,1,opt,name=start,proto3" json:"start"`
- End int64 `protobuf:"varint,2,opt,name=end,proto3" json:"end"`
- TraceId string `protobuf:"bytes,4,opt,name=trace_id,json=traceId,proto3" json:"-"`
- Response *types.Any `protobuf:"bytes,5,opt,name=response,proto3" json:"response"`
-}
-
-func (m *Extent) Reset() { *m = Extent{} }
-func (*Extent) ProtoMessage() {}
-func (*Extent) Descriptor() ([]byte, []int) {
- return fileDescriptor_4cc6a0c1d6b614c4, []int{5}
-}
-func (m *Extent) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Extent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Extent.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Extent) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Extent.Merge(m, src)
-}
-func (m *Extent) XXX_Size() int {
- return m.Size()
-}
-func (m *Extent) XXX_DiscardUnknown() {
- xxx_messageInfo_Extent.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Extent proto.InternalMessageInfo
-
-func (m *Extent) GetStart() int64 {
- if m != nil {
- return m.Start
- }
- return 0
-}
-
-func (m *Extent) GetEnd() int64 {
- if m != nil {
- return m.End
- }
- return 0
-}
-
-func (m *Extent) GetTraceId() string {
- if m != nil {
- return m.TraceId
- }
- return ""
-}
-
-func (m *Extent) GetResponse() *types.Any {
- if m != nil {
- return m.Response
- }
- return nil
-}
-
func init() {
proto.RegisterType((*PrometheusRequest)(nil), "queryrangebase.PrometheusRequest")
proto.RegisterType((*PrometheusResponse)(nil), "queryrangebase.PrometheusResponse")
proto.RegisterType((*PrometheusData)(nil), "queryrangebase.PrometheusData")
proto.RegisterType((*SampleStream)(nil), "queryrangebase.SampleStream")
- proto.RegisterType((*CachedResponse)(nil), "queryrangebase.CachedResponse")
- proto.RegisterType((*Extent)(nil), "queryrangebase.Extent")
}
func init() {
@@ -435,60 +315,54 @@ func init() {
}
var fileDescriptor_4cc6a0c1d6b614c4 = []byte{
- // 846 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x4f, 0x6f, 0xdc, 0x44,
- 0x14, 0x5f, 0xc7, 0xfb, 0x77, 0x5a, 0x6d, 0x61, 0x1a, 0x15, 0x27, 0x45, 0xf6, 0x6a, 0x05, 0x52,
- 0x90, 0xc0, 0x2b, 0x8a, 0xe8, 0x01, 0xa9, 0x88, 0x38, 0x09, 0xa2, 0x55, 0x25, 0x2a, 0xa7, 0x27,
- 0x2e, 0x68, 0x76, 0xfd, 0xe2, 0x58, 0xf1, 0xbf, 0xce, 0x8c, 0x2b, 0xf6, 0xc6, 0x89, 0x73, 0x4f,
- 0x88, 0x8f, 0xc0, 0x01, 0xf1, 0x39, 0x72, 0xcc, 0xb1, 0xe2, 0x60, 0xc8, 0xe6, 0x82, 0x7c, 0xea,
- 0x47, 0x40, 0xf3, 0xc7, 0xbb, 0xde, 0x8d, 0x02, 0xf4, 0xb2, 0xfb, 0x66, 0xde, 0xef, 0xbd, 0xf7,
- 0x7b, 0xbf, 0x79, 0x7e, 0xe8, 0x61, 0x7e, 0x16, 0x4e, 0x5e, 0x14, 0x40, 0x23, 0xa0, 0xf2, 0x7f,
- 0x4e, 0x49, 0x1a, 0x42, 0xc3, 0x9c, 0x12, 0xd6, 0x3c, 0xba, 0x39, 0xcd, 0x78, 0x86, 0x87, 0xeb,
- 0x80, 0xdd, 0xed, 0x30, 0x0b, 0x33, 0xe9, 0x9a, 0x08, 0x4b, 0xa1, 0x76, 0x77, 0xc2, 0x2c, 0x0b,
- 0x63, 0x98, 0xc8, 0xd3, 0xb4, 0x38, 0x99, 0x90, 0x74, 0xae, 0x5d, 0xf6, 0xa6, 0x2b, 0x28, 0x28,
- 0xe1, 0x51, 0x96, 0x6a, 0xbf, 0xb3, 0xe9, 0xe7, 0x51, 0x02, 0x8c, 0x93, 0x24, 0xd7, 0x80, 0xfb,
- 0x82, 0x79, 0x9c, 0x85, 0xaa, 0x68, 0x6d, 0x68, 0xe7, 0xc1, 0xff, 0x6b, 0x2b, 0x80, 0x93, 0x28,
- 0x8d, 0x44, 0x55, 0xd6, 0xb4, 0x55, 0x92, 0xf1, 0xcf, 0x26, 0x7a, 0xf7, 0x19, 0xcd, 0x12, 0xe0,
- 0xa7, 0x50, 0x30, 0x1f, 0x5e, 0x14, 0xc0, 0x38, 0xc6, 0xa8, 0x9d, 0x13, 0x7e, 0x6a, 0x19, 0x23,
- 0x63, 0x6f, 0xe0, 0x4b, 0x1b, 0x7f, 0x81, 0x3a, 0x8c, 0x13, 0xca, 0xad, 0xad, 0x91, 0xb1, 0x77,
- 0xeb, 0xc1, 0xae, 0xab, 0xc8, 0xbb, 0x35, 0x79, 0xf7, 0x79, 0x4d, 0xde, 0xeb, 0x9f, 0x97, 0x4e,
- 0xeb, 0xd5, 0x9f, 0x8e, 0xe1, 0xab, 0x10, 0xfc, 0x10, 0x99, 0x90, 0x06, 0x96, 0xf9, 0x16, 0x91,
- 0x22, 0x40, 0xf0, 0x60, 0x1c, 0x72, 0xab, 0x3d, 0x32, 0xf6, 0x4c, 0x5f, 0xda, 0xf8, 0x11, 0xea,
- 0x09, 0x99, 0xb2, 0x82, 0x5b, 0x1d, 0x99, 0x6f, 0xe7, 0x5a, 0xbe, 0x43, 0x2d, 0xb3, 0x4a, 0xf7,
- 0x8b, 0x48, 0x57, 0xc7, 0xe0, 0x6d, 0xd4, 0x91, 0x02, 0x59, 0x5d, 0xd9, 0x9b, 0x3a, 0xe0, 0xc7,
- 0x68, 0x38, 0x23, 0xb3, 0xd3, 0x28, 0x0d, 0xbf, 0xcd, 0xa5, 0x3c, 0x56, 0x4f, 0xe6, 0xbe, 0xef,
- 0x36, 0x25, 0x3b, 0x58, 0x83, 0x78, 0x6d, 0x91, 0xdd, 0xdf, 0x08, 0xc4, 0x47, 0xa8, 0xf7, 0x0d,
- 0x90, 0x00, 0x28, 0xb3, 0xfa, 0x23, 0x73, 0xef, 0xd6, 0x83, 0x0f, 0xd6, 0x72, 0x5c, 0x13, 0x5b,
- 0x81, 0xbd, 0x4e, 0x55, 0x3a, 0xc6, 0x27, 0x7e, 0x1d, 0x3b, 0xfe, 0x7d, 0x0b, 0xe1, 0x26, 0x96,
- 0xe5, 0x59, 0xca, 0x00, 0x8f, 0x51, 0xf7, 0x98, 0x13, 0x5e, 0x30, 0xf5, 0x36, 0x1e, 0xaa, 0x4a,
- 0xa7, 0xcb, 0xe4, 0x8d, 0xaf, 0x3d, 0xf8, 0x09, 0x6a, 0x1f, 0x12, 0x4e, 0xf4, 0x43, 0xd9, 0xee,
- 0xfa, 0x40, 0x34, 0x18, 0x08, 0x94, 0x77, 0x4f, 0x74, 0x51, 0x95, 0xce, 0x30, 0x20, 0x9c, 0x7c,
- 0x9c, 0x25, 0x11, 0x87, 0x24, 0xe7, 0x73, 0x5f, 0xe6, 0xc0, 0x9f, 0xa3, 0xc1, 0x11, 0xa5, 0x19,
- 0x7d, 0x3e, 0xcf, 0x41, 0xbe, 0xdf, 0xc0, 0x7b, 0xaf, 0x2a, 0x9d, 0xbb, 0x50, 0x5f, 0x36, 0x22,
- 0x56, 0x48, 0xfc, 0x11, 0xea, 0xc8, 0x83, 0x7c, 0xb9, 0x81, 0x77, 0xb7, 0x2a, 0x9d, 0x3b, 0x32,
- 0xa4, 0x01, 0x57, 0x08, 0xfc, 0xf5, 0x4a, 0xaf, 0x8e, 0xd4, 0xeb, 0xc3, 0x1b, 0xf5, 0x52, 0x1a,
- 0xdc, 0x20, 0xd8, 0x4f, 0x06, 0x1a, 0xae, 0xb7, 0x86, 0x5d, 0x84, 0x7c, 0x60, 0x45, 0xcc, 0x25,
- 0x7b, 0x25, 0xd8, 0xb0, 0x2a, 0x1d, 0x44, 0x97, 0xb7, 0x7e, 0x03, 0x81, 0x0f, 0x51, 0x57, 0x9d,
- 0xac, 0x2d, 0xc9, 0xe4, 0xfd, 0x4d, 0xe9, 0x8e, 0x49, 0x92, 0xc7, 0x70, 0xcc, 0x29, 0x90, 0xc4,
- 0x1b, 0x6a, 0xe1, 0xba, 0x2a, 0x9b, 0xaf, 0x63, 0xc7, 0xe7, 0x06, 0xba, 0xdd, 0x04, 0xe2, 0x97,
- 0xa8, 0x1b, 0x93, 0x29, 0xc4, 0xe2, 0xcd, 0x4c, 0x39, 0xb0, 0xcb, 0x2f, 0xf9, 0x29, 0x84, 0x64,
- 0x36, 0x7f, 0x2a, 0xbc, 0xcf, 0x48, 0x44, 0xbd, 0x03, 0x91, 0xf3, 0x8f, 0xd2, 0xf9, 0x34, 0x8c,
- 0xf8, 0x69, 0x31, 0x75, 0x67, 0x59, 0x32, 0x09, 0x29, 0x39, 0x21, 0x29, 0x99, 0xc4, 0xd9, 0x59,
- 0x34, 0x69, 0x2e, 0x04, 0x57, 0xc6, 0xed, 0x07, 0x24, 0xe7, 0x40, 0x05, 0x91, 0x04, 0x38, 0x8d,
- 0x66, 0xbe, 0xae, 0x86, 0xbf, 0x42, 0x3d, 0x26, 0x79, 0x30, 0xdd, 0xcf, 0xbd, 0xcd, 0xc2, 0x8a,
- 0xe6, 0xaa, 0x93, 0x97, 0x24, 0x2e, 0x80, 0xf9, 0x75, 0xd8, 0x38, 0x45, 0x43, 0x31, 0xf3, 0x10,
- 0x2c, 0xe7, 0x6f, 0x07, 0x99, 0x67, 0x30, 0xd7, 0x5a, 0xf6, 0xaa, 0xd2, 0x11, 0x47, 0x5f, 0xfc,
- 0xe0, 0x7d, 0xd4, 0x83, 0x1f, 0x38, 0xa4, 0x7c, 0x55, 0x6e, 0x43, 0xbe, 0x23, 0xe9, 0xf6, 0xee,
- 0xe8, 0x72, 0x35, 0xdc, 0xaf, 0x8d, 0xf1, 0x6f, 0x06, 0xea, 0x2a, 0x10, 0x76, 0xea, 0x75, 0x23,
- 0x4a, 0x99, 0xde, 0xa0, 0x2a, 0x1d, 0x75, 0x51, 0xef, 0x94, 0x1d, 0xb5, 0x53, 0xb6, 0xa4, 0x5b,
- 0x32, 0x81, 0x34, 0x50, 0x6b, 0x63, 0x84, 0xfa, 0x9c, 0x92, 0x19, 0x7c, 0x1f, 0x05, 0x7a, 0x00,
- 0xeb, 0x61, 0x91, 0xd7, 0x8f, 0x03, 0xfc, 0x25, 0xea, 0x53, 0xdd, 0x92, 0xde, 0x22, 0xdb, 0xd7,
- 0xb6, 0xc8, 0x7e, 0x3a, 0xf7, 0x6e, 0x57, 0xa5, 0xb3, 0x44, 0xfa, 0x4b, 0xeb, 0x49, 0xbb, 0x6f,
- 0xbe, 0xd3, 0xf6, 0xd8, 0xc5, 0xa5, 0xdd, 0x7a, 0x7d, 0x69, 0xb7, 0xde, 0x5c, 0xda, 0xc6, 0x8f,
- 0x0b, 0xdb, 0xf8, 0x75, 0x61, 0x1b, 0xe7, 0x0b, 0xdb, 0xb8, 0x58, 0xd8, 0xc6, 0x5f, 0x0b, 0xdb,
- 0xf8, 0x7b, 0x61, 0xb7, 0xde, 0x2c, 0x6c, 0xe3, 0xd5, 0x95, 0xdd, 0xba, 0xb8, 0xb2, 0x5b, 0xaf,
- 0xaf, 0xec, 0xd6, 0x77, 0x8f, 0xfe, 0xed, 0x6d, 0xff, 0x73, 0x9f, 0x4f, 0xbb, 0x92, 0xe0, 0x67,
- 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x63, 0x5c, 0x0b, 0x88, 0xd6, 0x06, 0x00, 0x00,
+ // 739 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xcf, 0x4f, 0xdb, 0x48,
+ 0x18, 0x8d, 0xc9, 0x2f, 0x32, 0xac, 0xb2, 0xda, 0x01, 0xb1, 0x5e, 0x16, 0x8d, 0xa3, 0x68, 0x57,
+ 0xca, 0x4a, 0x5b, 0xbb, 0xa5, 0x2a, 0x87, 0x4a, 0x48, 0xad, 0x81, 0xaa, 0x42, 0x48, 0x45, 0x86,
+ 0x53, 0x6f, 0x93, 0x64, 0x70, 0x2c, 0x6c, 0x8f, 0x99, 0x19, 0x23, 0xe5, 0xd6, 0x53, 0xcf, 0xdc,
+ 0xda, 0x3f, 0xa1, 0xa7, 0xfe, 0x1d, 0x1c, 0x39, 0xa2, 0x1e, 0xdc, 0x12, 0x2e, 0x55, 0x4e, 0xfc,
+ 0x09, 0xd5, 0xcc, 0x38, 0xe0, 0x04, 0xd1, 0x1f, 0xa7, 0xcc, 0xe7, 0xef, 0xbd, 0x37, 0xef, 0x7b,
+ 0x9f, 0x63, 0xb0, 0x9e, 0x1c, 0xf9, 0xce, 0x71, 0x4a, 0x58, 0x40, 0x98, 0xfa, 0x1d, 0x32, 0x1c,
+ 0xfb, 0xa4, 0x70, 0xec, 0x62, 0x5e, 0x2c, 0xed, 0x84, 0x51, 0x41, 0x61, 0x73, 0x1a, 0xb0, 0xb2,
+ 0xe4, 0x53, 0x9f, 0xaa, 0x96, 0x23, 0x4f, 0x1a, 0xb5, 0x82, 0x7c, 0x4a, 0xfd, 0x90, 0x38, 0xaa,
+ 0xea, 0xa6, 0x87, 0x4e, 0x3f, 0x65, 0x58, 0x04, 0x34, 0xce, 0xfb, 0xd6, 0x6c, 0x5f, 0x04, 0x11,
+ 0xe1, 0x02, 0x47, 0x49, 0x0e, 0xf8, 0x5b, 0xda, 0x0b, 0xa9, 0xaf, 0x95, 0x27, 0x87, 0xbc, 0xb9,
+ 0xf9, 0x73, 0xde, 0xfb, 0xe4, 0x30, 0x88, 0x03, 0x79, 0x2b, 0x2f, 0x9e, 0x73, 0x91, 0x87, 0x52,
+ 0x84, 0x0b, 0xca, 0xb0, 0x4f, 0x9c, 0xde, 0x20, 0x8d, 0x8f, 0x9c, 0x1e, 0xee, 0x0d, 0x88, 0xc3,
+ 0x08, 0x4f, 0x43, 0xc1, 0x75, 0x21, 0x86, 0x09, 0xc9, 0x19, 0xed, 0x77, 0x65, 0xf0, 0xc7, 0x1e,
+ 0xa3, 0x11, 0x11, 0x03, 0x92, 0x72, 0x8f, 0x1c, 0xa7, 0x84, 0x0b, 0x08, 0x41, 0x25, 0xc1, 0x62,
+ 0x60, 0x1a, 0x2d, 0xa3, 0xd3, 0xf0, 0xd4, 0x19, 0x3e, 0x05, 0x55, 0x2e, 0x30, 0x13, 0xe6, 0x5c,
+ 0xcb, 0xe8, 0x2c, 0xac, 0xad, 0xd8, 0x7a, 0x5c, 0x7b, 0x32, 0xae, 0x7d, 0x30, 0x19, 0xd7, 0x9d,
+ 0x3f, 0xcb, 0xac, 0xd2, 0xe9, 0x67, 0xcb, 0xf0, 0x34, 0x05, 0xae, 0x83, 0x32, 0x89, 0xfb, 0x66,
+ 0xf9, 0x17, 0x98, 0x92, 0x20, 0x7d, 0x70, 0x41, 0x12, 0xb3, 0xd2, 0x32, 0x3a, 0x65, 0x4f, 0x9d,
+ 0xe1, 0x06, 0xa8, 0xcb, 0x60, 0x69, 0x2a, 0xcc, 0xaa, 0xd2, 0xfb, 0xeb, 0x8e, 0xde, 0x56, 0xbe,
+ 0x18, 0x2d, 0xf7, 0x5e, 0xca, 0x4d, 0x38, 0x70, 0x09, 0x54, 0x55, 0xa4, 0x66, 0x4d, 0xcd, 0xa6,
+ 0x0b, 0xb8, 0x03, 0x9a, 0x32, 0x9b, 0x20, 0xf6, 0x5f, 0x25, 0x2a, 0x50, 0xb3, 0xae, 0xb4, 0x57,
+ 0xed, 0x62, 0x72, 0xf6, 0xe6, 0x14, 0xc6, 0xad, 0x48, 0x79, 0x6f, 0x86, 0x09, 0xb7, 0x41, 0xfd,
+ 0x25, 0xc1, 0x7d, 0xc2, 0xb8, 0x39, 0xdf, 0x2a, 0x77, 0x16, 0xd6, 0xfe, 0xb1, 0x8b, 0x9b, 0xba,
+ 0x93, 0xb6, 0x06, 0xbb, 0xd5, 0x71, 0x66, 0x19, 0x0f, 0xbc, 0x09, 0xb7, 0xfd, 0x71, 0x0e, 0xc0,
+ 0x22, 0x96, 0x27, 0x34, 0xe6, 0x04, 0xb6, 0x41, 0x6d, 0x5f, 0x60, 0x91, 0x72, 0xbd, 0x1c, 0x17,
+ 0x8c, 0x33, 0xab, 0xc6, 0xd5, 0x13, 0x2f, 0xef, 0xc0, 0x1d, 0x50, 0xd9, 0xc2, 0x02, 0xe7, 0x9b,
+ 0x42, 0xf6, 0xf4, 0x3b, 0x54, 0x70, 0x20, 0x51, 0xee, 0xb2, 0x9c, 0x62, 0x9c, 0x59, 0xcd, 0x3e,
+ 0x16, 0xf8, 0x7f, 0x1a, 0x05, 0x82, 0x44, 0x89, 0x18, 0x7a, 0x4a, 0x03, 0x3e, 0x01, 0x8d, 0x6d,
+ 0xc6, 0x28, 0x3b, 0x18, 0x26, 0x44, 0x2d, 0xb0, 0xe1, 0xfe, 0x39, 0xce, 0xac, 0x45, 0x32, 0x79,
+ 0x58, 0x60, 0xdc, 0x22, 0xe1, 0x7f, 0xa0, 0xaa, 0x0a, 0xb5, 0xba, 0x86, 0xbb, 0x38, 0xce, 0xac,
+ 0xdf, 0x15, 0xa5, 0x00, 0xd7, 0x08, 0xf8, 0xe2, 0x36, 0xaf, 0xaa, 0xca, 0xeb, 0xdf, 0x7b, 0xf3,
+ 0xd2, 0x19, 0xdc, 0x13, 0xd8, 0x5b, 0x03, 0x34, 0xa7, 0x47, 0x83, 0x36, 0x00, 0x9e, 0xda, 0x9f,
+ 0x72, 0xaf, 0x03, 0x6b, 0x8e, 0x33, 0x0b, 0xb0, 0x9b, 0xa7, 0x5e, 0x01, 0x01, 0xb7, 0x40, 0x4d,
+ 0x57, 0xe6, 0x9c, 0x72, 0xb2, 0x3a, 0x1b, 0xdd, 0x3e, 0x8e, 0x92, 0x90, 0xec, 0x0b, 0x46, 0x70,
+ 0xe4, 0x36, 0xf3, 0xe0, 0x6a, 0x5a, 0xcd, 0xcb, 0xb9, 0xed, 0x33, 0x03, 0xfc, 0x56, 0x04, 0xc2,
+ 0x13, 0x50, 0x0b, 0x71, 0x97, 0x84, 0x72, 0x67, 0x65, 0xf5, 0xc6, 0xde, 0xfc, 0xf9, 0x77, 0x89,
+ 0x8f, 0x7b, 0xc3, 0x5d, 0xd9, 0xdd, 0xc3, 0x01, 0x73, 0x37, 0xa5, 0xe6, 0xa7, 0xcc, 0x7a, 0xe4,
+ 0x07, 0x62, 0x90, 0x76, 0xed, 0x1e, 0x8d, 0x1c, 0x9f, 0xe1, 0x43, 0x1c, 0x63, 0x27, 0xa4, 0x47,
+ 0x81, 0x53, 0xfc, 0x86, 0xd8, 0x8a, 0xf7, 0xbc, 0x8f, 0x13, 0x41, 0x98, 0x34, 0x12, 0x11, 0xc1,
+ 0x82, 0x9e, 0x97, 0xdf, 0x06, 0x9f, 0x81, 0x3a, 0x57, 0x3e, 0x78, 0x3e, 0xcf, 0xf2, 0xec, 0xc5,
+ 0xda, 0xe6, 0xed, 0x24, 0x27, 0x38, 0x4c, 0x09, 0xf7, 0x26, 0x34, 0x97, 0x9f, 0x5f, 0xa2, 0xd2,
+ 0xc5, 0x25, 0x2a, 0x5d, 0x5f, 0x22, 0xe3, 0xcd, 0x08, 0x19, 0x1f, 0x46, 0xc8, 0x38, 0x1b, 0x21,
+ 0xe3, 0x7c, 0x84, 0x8c, 0x2f, 0x23, 0x64, 0x7c, 0x1d, 0xa1, 0xd2, 0xf5, 0x08, 0x19, 0xa7, 0x57,
+ 0xa8, 0x74, 0x7e, 0x85, 0x4a, 0x17, 0x57, 0xa8, 0xf4, 0x7a, 0xe3, 0x7b, 0xe6, 0x7f, 0xf8, 0x8d,
+ 0xeb, 0xd6, 0x94, 0xc3, 0xc7, 0xdf, 0x02, 0x00, 0x00, 0xff, 0xff, 0xfe, 0xcd, 0xe4, 0x4f, 0xcf,
+ 0x05, 0x00, 0x00,
}
func (this *PrometheusRequest) Equal(that interface{}) bool {
@@ -651,71 +525,6 @@ func (this *SampleStream) Equal(that interface{}) bool {
}
return true
}
-func (this *CachedResponse) Equal(that interface{}) bool {
- if that == nil {
- return this == nil
- }
-
- that1, ok := that.(*CachedResponse)
- if !ok {
- that2, ok := that.(CachedResponse)
- if ok {
- that1 = &that2
- } else {
- return false
- }
- }
- if that1 == nil {
- return this == nil
- } else if this == nil {
- return false
- }
- if this.Key != that1.Key {
- return false
- }
- if len(this.Extents) != len(that1.Extents) {
- return false
- }
- for i := range this.Extents {
- if !this.Extents[i].Equal(&that1.Extents[i]) {
- return false
- }
- }
- return true
-}
-func (this *Extent) Equal(that interface{}) bool {
- if that == nil {
- return this == nil
- }
-
- that1, ok := that.(*Extent)
- if !ok {
- that2, ok := that.(Extent)
- if ok {
- that1 = &that2
- } else {
- return false
- }
- }
- if that1 == nil {
- return this == nil
- } else if this == nil {
- return false
- }
- if this.Start != that1.Start {
- return false
- }
- if this.End != that1.End {
- return false
- }
- if this.TraceId != that1.TraceId {
- return false
- }
- if !this.Response.Equal(that1.Response) {
- return false
- }
- return true
-}
func (this *PrometheusRequest) GoString() string {
if this == nil {
return "nil"
@@ -785,38 +594,6 @@ func (this *SampleStream) GoString() string {
s = append(s, "}")
return strings.Join(s, "")
}
-func (this *CachedResponse) GoString() string {
- if this == nil {
- return "nil"
- }
- s := make([]string, 0, 6)
- s = append(s, "&queryrangebase.CachedResponse{")
- s = append(s, "Key: "+fmt.Sprintf("%#v", this.Key)+",\n")
- if this.Extents != nil {
- vs := make([]*Extent, len(this.Extents))
- for i := range vs {
- vs[i] = &this.Extents[i]
- }
- s = append(s, "Extents: "+fmt.Sprintf("%#v", vs)+",\n")
- }
- s = append(s, "}")
- return strings.Join(s, "")
-}
-func (this *Extent) GoString() string {
- if this == nil {
- return "nil"
- }
- s := make([]string, 0, 8)
- s = append(s, "&queryrangebase.Extent{")
- s = append(s, "Start: "+fmt.Sprintf("%#v", this.Start)+",\n")
- s = append(s, "End: "+fmt.Sprintf("%#v", this.End)+",\n")
- s = append(s, "TraceId: "+fmt.Sprintf("%#v", this.TraceId)+",\n")
- if this.Response != nil {
- s = append(s, "Response: "+fmt.Sprintf("%#v", this.Response)+",\n")
- }
- s = append(s, "}")
- return strings.Join(s, "")
-}
func valueToGoStringQueryrange(v interface{}, typ string) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
@@ -1078,102 +855,6 @@ func (m *SampleStream) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
-func (m *CachedResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *CachedResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *CachedResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.Extents) > 0 {
- for iNdEx := len(m.Extents) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Extents[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintQueryrange(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- if len(m.Key) > 0 {
- i -= len(m.Key)
- copy(dAtA[i:], m.Key)
- i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Key)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Extent) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Extent) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Extent) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.Response != nil {
- {
- size, err := m.Response.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintQueryrange(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x2a
- }
- if len(m.TraceId) > 0 {
- i -= len(m.TraceId)
- copy(dAtA[i:], m.TraceId)
- i = encodeVarintQueryrange(dAtA, i, uint64(len(m.TraceId)))
- i--
- dAtA[i] = 0x22
- }
- if m.End != 0 {
- i = encodeVarintQueryrange(dAtA, i, uint64(m.End))
- i--
- dAtA[i] = 0x10
- }
- if m.Start != 0 {
- i = encodeVarintQueryrange(dAtA, i, uint64(m.Start))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
func encodeVarintQueryrange(dAtA []byte, offset int, v uint64) int {
offset -= sovQueryrange(v)
base := offset
@@ -1288,48 +969,6 @@ func (m *SampleStream) Size() (n int) {
return n
}
-func (m *CachedResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Key)
- if l > 0 {
- n += 1 + l + sovQueryrange(uint64(l))
- }
- if len(m.Extents) > 0 {
- for _, e := range m.Extents {
- l = e.Size()
- n += 1 + l + sovQueryrange(uint64(l))
- }
- }
- return n
-}
-
-func (m *Extent) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Start != 0 {
- n += 1 + sovQueryrange(uint64(m.Start))
- }
- if m.End != 0 {
- n += 1 + sovQueryrange(uint64(m.End))
- }
- l = len(m.TraceId)
- if l > 0 {
- n += 1 + l + sovQueryrange(uint64(l))
- }
- if m.Response != nil {
- l = m.Response.Size()
- n += 1 + l + sovQueryrange(uint64(l))
- }
- return n
-}
-
func sovQueryrange(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
@@ -1352,7 +991,7 @@ func (this *PrometheusRequest) String() string {
`Step:` + fmt.Sprintf("%v", this.Step) + `,`,
`Timeout:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timeout), "Duration", "duration.Duration", 1), `&`, ``, 1) + `,`,
`Query:` + fmt.Sprintf("%v", this.Query) + `,`,
- `CachingOptions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CachingOptions), "CachingOptions", "definitions.CachingOptions", 1), `&`, ``, 1) + `,`,
+ `CachingOptions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CachingOptions), "CachingOptions", "resultscache.CachingOptions", 1), `&`, ``, 1) + `,`,
`Headers:` + repeatedStringForHeaders + `,`,
`}`,
}, "")
@@ -1409,35 +1048,6 @@ func (this *SampleStream) String() string {
}, "")
return s
}
-func (this *CachedResponse) String() string {
- if this == nil {
- return "nil"
- }
- repeatedStringForExtents := "[]Extent{"
- for _, f := range this.Extents {
- repeatedStringForExtents += strings.Replace(strings.Replace(f.String(), "Extent", "Extent", 1), `&`, ``, 1) + ","
- }
- repeatedStringForExtents += "}"
- s := strings.Join([]string{`&CachedResponse{`,
- `Key:` + fmt.Sprintf("%v", this.Key) + `,`,
- `Extents:` + repeatedStringForExtents + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *Extent) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&Extent{`,
- `Start:` + fmt.Sprintf("%v", this.Start) + `,`,
- `End:` + fmt.Sprintf("%v", this.End) + `,`,
- `TraceId:` + fmt.Sprintf("%v", this.TraceId) + `,`,
- `Response:` + strings.Replace(fmt.Sprintf("%v", this.Response), "Any", "types.Any", 1) + `,`,
- `}`,
- }, "")
- return s
-}
func valueToStringQueryrange(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
@@ -2204,284 +1814,6 @@ func (m *SampleStream) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *CachedResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowQueryrange
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: CachedResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: CachedResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowQueryrange
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthQueryrange
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthQueryrange
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Key = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Extents", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowQueryrange
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthQueryrange
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthQueryrange
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Extents = append(m.Extents, Extent{})
- if err := m.Extents[len(m.Extents)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipQueryrange(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthQueryrange
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthQueryrange
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Extent) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowQueryrange
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Extent: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Extent: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType)
- }
- m.Start = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowQueryrange
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Start |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field End", wireType)
- }
- m.End = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowQueryrange
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.End |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowQueryrange
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthQueryrange
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthQueryrange
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.TraceId = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Response", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowQueryrange
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthQueryrange
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthQueryrange
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Response == nil {
- m.Response = &types.Any{}
- }
- if err := m.Response.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipQueryrange(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthQueryrange
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthQueryrange
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
func skipQueryrange(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
diff --git a/pkg/querier/queryrange/queryrangebase/queryrange.proto b/pkg/querier/queryrange/queryrangebase/queryrange.proto
index ad66551d2bb11..98ddaa2b7d2db 100644
--- a/pkg/querier/queryrange/queryrangebase/queryrange.proto
+++ b/pkg/querier/queryrange/queryrangebase/queryrange.proto
@@ -3,11 +3,11 @@ syntax = "proto3";
package queryrangebase;
import "gogoproto/gogo.proto";
-import "google/protobuf/any.proto";
import "google/protobuf/duration.proto";
import "google/protobuf/timestamp.proto";
import "pkg/logproto/logproto.proto";
import "pkg/querier/queryrange/queryrangebase/definitions/definitions.proto";
+import "pkg/storage/chunk/cache/resultscache/types.proto";
option go_package = "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase";
option (gogoproto.marshaler_all) = true;
@@ -29,7 +29,7 @@ message PrometheusRequest {
(gogoproto.nullable) = false
];
string query = 6;
- definitions.CachingOptions cachingOptions = 7 [(gogoproto.nullable) = false];
+ resultscache.CachingOptions cachingOptions = 7 [(gogoproto.nullable) = false];
repeated definitions.PrometheusRequestHeader Headers = 8 [(gogoproto.jsontag) = "-"];
}
@@ -63,22 +63,3 @@ message SampleStream {
(gogoproto.jsontag) = "values"
];
}
-
-message CachedResponse {
- string key = 1 [(gogoproto.jsontag) = "key"];
-
- // List of cached responses; non-overlapping and in order.
- repeated Extent extents = 2 [
- (gogoproto.nullable) = false,
- (gogoproto.jsontag) = "extents"
- ];
-}
-
-message Extent {
- int64 start = 1 [(gogoproto.jsontag) = "start"];
- int64 end = 2 [(gogoproto.jsontag) = "end"];
- // reserved the previous key to ensure cache transition
- reserved 3;
- string trace_id = 4 [(gogoproto.jsontag) = "-"];
- google.protobuf.Any response = 5 [(gogoproto.jsontag) = "response"];
-}
diff --git a/pkg/querier/queryrange/queryrangebase/results_cache.go b/pkg/querier/queryrange/queryrangebase/results_cache.go
index 1e54b55859402..097dc264d32a0 100644
--- a/pkg/querier/queryrange/queryrangebase/results_cache.go
+++ b/pkg/querier/queryrange/queryrangebase/results_cache.go
@@ -4,35 +4,21 @@ import (
"context"
"flag"
"fmt"
- "net/http"
- "sort"
"strings"
- "time"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
- "github.com/gogo/protobuf/proto"
- "github.com/gogo/protobuf/types"
- "github.com/grafana/dskit/httpgrpc"
"github.com/grafana/dskit/user"
- "github.com/opentracing/opentracing-go"
- otlog "github.com/opentracing/opentracing-go/log"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
- "github.com/prometheus/common/model"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/parser"
- "github.com/uber/jaeger-client-go"
-
- "github.com/grafana/dskit/tenant"
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/storage/chunk/cache"
+ "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"
"github.com/grafana/loki/pkg/util/constants"
- "github.com/grafana/loki/pkg/util/math"
- "github.com/grafana/loki/pkg/util/spanlogger"
- "github.com/grafana/loki/pkg/util/validation"
)
var (
@@ -65,20 +51,9 @@ func NewResultsCacheMetrics(registerer prometheus.Registerer) *ResultsCacheMetri
}
}
-type CacheGenNumberLoader interface {
- GetResultsCacheGenNumber(tenantIDs []string) string
- Stop()
-}
-
// ResultsCacheConfig is the config for the results cache.
type ResultsCacheConfig struct {
- CacheConfig cache.Config `yaml:"cache"`
- Compression string `yaml:"compression"`
-}
-
-func (cfg *ResultsCacheConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string) {
- cfg.CacheConfig.RegisterFlagsWithPrefix(prefix, "", f)
- f.StringVar(&cfg.Compression, prefix+"compression", "", "Use compression in cache. The default is an empty value '', which disables compression. Supported values are: 'snappy' and ''.")
+ resultscache.Config `yaml:",inline"`
}
// RegisterFlags registers flags.
@@ -86,22 +61,9 @@ func (cfg *ResultsCacheConfig) RegisterFlags(f *flag.FlagSet) {
cfg.RegisterFlagsWithPrefix(f, "frontend.")
}
-func (cfg *ResultsCacheConfig) Validate() error {
- switch cfg.Compression {
- case "snappy", "":
- // valid
- default:
- return errors.Errorf("unsupported compression type: %s", cfg.Compression)
- }
-
- return nil
-}
-
// Extractor is used by the cache to extract a subset of a response from a cache entry.
type Extractor interface {
- // Extract extracts a subset of a response from the `start` and `end` timestamps in milliseconds
- // in the `res` response which spans from `resStart` to `resEnd`.
- Extract(start, end int64, res Response, resStart, resEnd int64) Response
+ resultscache.Extractor
ResponseWithoutHeaders(resp Response) Response
}
@@ -109,7 +71,7 @@ type Extractor interface {
type PrometheusResponseExtractor struct{}
// Extract extracts response for specific a range from a response.
-func (PrometheusResponseExtractor) Extract(start, end int64, res Response, _, _ int64) Response {
+func (PrometheusResponseExtractor) Extract(start, end int64, res resultscache.Response, _, _ int64) resultscache.Response {
promRes := res.(*PrometheusResponse)
return &PrometheusResponse{
Status: StatusSuccess,
@@ -134,39 +96,17 @@ func (PrometheusResponseExtractor) ResponseWithoutHeaders(resp Response) Respons
}
}
-// CacheSplitter generates cache keys. This is a useful interface for downstream
-// consumers who wish to implement their own strategies.
-type CacheSplitter interface {
- GenerateCacheKey(ctx context.Context, userID string, r Request) string
-}
-
-// constSplitter is a utility for using a constant split interval when determining cache keys
-type constSplitter time.Duration
-
-// GenerateCacheKey generates a cache key based on the userID, Request and interval.
-func (t constSplitter) GenerateCacheKey(_ context.Context, userID string, r Request) string {
- currentInterval := r.GetStart().UnixMilli() / int64(time.Duration(t)/time.Millisecond)
- return fmt.Sprintf("%s:%s:%d:%d", userID, r.GetQuery(), r.GetStep(), currentInterval)
-}
-
// ShouldCacheFn checks whether the current request should go to cache
// or not. If not, just send the request to next handler.
type ShouldCacheFn func(ctx context.Context, r Request) bool
+// ParallelismForReqFn returns the parallelism for a given request.
+type ParallelismForReqFn func(ctx context.Context, tenantIDs []string, r Request) int
+
type resultsCache struct {
- logger log.Logger
- next Handler
- cache cache.Cache
- limits Limits
- splitter CacheSplitter
-
- extractor Extractor
- minCacheExtent int64 // discard any cache extent smaller than this
- merger Merger
- cacheGenNumberLoader CacheGenNumberLoader
- shouldCache ShouldCacheFn
- parallelismForReq func(ctx context.Context, tenantIDs []string, r Request) int
- retentionEnabled bool
+ cache *resultscache.ResultsCache
+ logger log.Logger
+ cacheGenNumberLoader resultscache.CacheGenNumberLoader
metrics *ResultsCacheMetrics
}
@@ -179,13 +119,13 @@ type resultsCache struct {
func NewResultsCacheMiddleware(
logger log.Logger,
c cache.Cache,
- splitter CacheSplitter,
+ keygen resultscache.KeyGenerator,
limits Limits,
merger Merger,
extractor Extractor,
- cacheGenNumberLoader CacheGenNumberLoader,
+ cacheGenNumberLoader resultscache.CacheGenNumberLoader,
shouldCache ShouldCacheFn,
- parallelismForReq func(ctx context.Context, tenantIDs []string, r Request) int,
+ parallelismForReq ParallelismForReqFn,
retentionEnabled bool,
metrics *ResultsCacheMetrics,
) (Middleware, error) {
@@ -193,78 +133,63 @@ func NewResultsCacheMiddleware(
c = cache.NewCacheGenNumMiddleware(c)
}
+ out := &resultsCache{
+ logger: logger,
+ cacheGenNumberLoader: cacheGenNumberLoader,
+ metrics: metrics,
+ }
+
return MiddlewareFunc(func(next Handler) Handler {
- return &resultsCache{
- logger: logger,
- next: next,
- cache: c,
- limits: limits,
- merger: merger,
- extractor: extractor,
- minCacheExtent: (5 * time.Minute).Milliseconds(),
- splitter: splitter,
- cacheGenNumberLoader: cacheGenNumberLoader,
- shouldCache: shouldCache,
- parallelismForReq: parallelismForReq,
- retentionEnabled: retentionEnabled,
- metrics: metrics,
+ nextCacheWrapper := resultscache.HandlerFunc(func(ctx context.Context, req resultscache.Request) (resultscache.Response, error) {
+ return next.Do(ctx, req.(Request))
+ })
+
+ shouldCacheReqWrapper := func(ctx context.Context, req resultscache.Request) bool {
+ if shouldCache == nil {
+ return true
+ }
+ return shouldCache(ctx, req.(Request))
+ }
+
+ shouldCacheResWrapper := func(ctx context.Context, req resultscache.Request, res resultscache.Response, maxCacheTime int64) bool {
+ return out.shouldCacheResponse(ctx, req.(Request), res.(Response), maxCacheTime)
}
+
+ parallelismForReqWrapper := func(ctx context.Context, tenantIDs []string, req resultscache.Request) int {
+ return parallelismForReq(ctx, tenantIDs, req.(Request))
+ }
+
+ out.cache = resultscache.NewResultsCache(
+ logger,
+ c,
+ nextCacheWrapper,
+ keygen,
+ limits,
+ FromQueryResponseMergerToCacheResponseMerger(merger),
+ extractor,
+ shouldCacheReqWrapper,
+ shouldCacheResWrapper,
+ parallelismForReqWrapper,
+ cacheGenNumberLoader,
+ retentionEnabled,
+ )
+
+ return out
}), nil
}
func (s resultsCache) Do(ctx context.Context, r Request) (Response, error) {
- sp, ctx := opentracing.StartSpanFromContext(ctx, "resultsCache.Do")
- defer sp.Finish()
- tenantIDs, err := tenant.TenantIDs(ctx)
+ res, err := s.cache.Do(ctx, r.(resultscache.Request))
if err != nil {
- return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error())
- }
-
- if s.shouldCache != nil && !s.shouldCache(ctx, r) {
- return s.next.Do(ctx, r)
- }
-
- if s.cacheGenNumberLoader != nil && s.retentionEnabled {
- ctx = cache.InjectCacheGenNumber(ctx, s.cacheGenNumberLoader.GetResultsCacheGenNumber(tenantIDs))
- }
-
- var (
- key = s.splitter.GenerateCacheKey(ctx, tenant.JoinTenantIDs(tenantIDs), r)
- extents []Extent
- response Response
- )
-
- sp.LogKV(
- "query", r.GetQuery(),
- "step", time.UnixMilli(r.GetStep()),
- "start", r.GetStart(),
- "end", r.GetEnd(),
- "key", key,
- )
-
- cacheFreshnessCapture := func(id string) time.Duration { return s.limits.MaxCacheFreshness(ctx, id) }
- maxCacheFreshness := validation.MaxDurationPerTenant(tenantIDs, cacheFreshnessCapture)
- maxCacheTime := int64(model.Now().Add(-maxCacheFreshness))
- if r.GetStart().UnixMilli() > maxCacheTime {
- return s.next.Do(ctx, r)
- }
-
- cached, ok := s.get(ctx, key)
- if ok {
- response, extents, err = s.handleHit(ctx, r, cached, maxCacheTime)
- } else {
- response, extents, err = s.handleMiss(ctx, r, maxCacheTime)
+ return nil, err
}
- if err == nil && len(extents) > 0 {
- extents, err := s.filterRecentExtents(r, maxCacheFreshness, extents)
- if err != nil {
- return nil, err
- }
- s.put(ctx, key, extents)
+ queryRes, ok := res.(Response)
+ if !ok {
+ return nil, fmt.Errorf("could not cast cache response to query response")
}
- return response, err
+ return queryRes, nil
}
// shouldCacheResponse says whether the response should be cached or not.
@@ -379,303 +304,6 @@ func getHeaderValuesWithName(r Response, headerName string) (headerValues []stri
return
}
-func (s resultsCache) handleMiss(ctx context.Context, r Request, maxCacheTime int64) (Response, []Extent, error) {
- response, err := s.next.Do(ctx, r)
- if err != nil {
- return nil, nil, err
- }
-
- if !s.shouldCacheResponse(ctx, r, response, maxCacheTime) {
- return response, []Extent{}, nil
- }
-
- extent, err := toExtent(ctx, r, s.extractor.ResponseWithoutHeaders(response))
- if err != nil {
- return nil, nil, err
- }
-
- extents := []Extent{
- extent,
- }
- return response, extents, nil
-}
-
-func (s resultsCache) handleHit(ctx context.Context, r Request, extents []Extent, maxCacheTime int64) (Response, []Extent, error) {
- var (
- reqResps []RequestResponse
- err error
- )
- sp, ctx := opentracing.StartSpanFromContext(ctx, "handleHit")
- defer sp.Finish()
- log := spanlogger.FromContext(ctx)
- defer log.Finish()
-
- requests, responses, err := s.partition(r, extents)
- if err != nil {
- return nil, nil, err
- }
- if len(requests) == 0 {
- response, err := s.merger.MergeResponse(responses...)
- // No downstream requests so no need to write back to the cache.
- return response, nil, err
- }
-
- tenantIDs, err := tenant.TenantIDs(ctx)
- if err != nil {
- return nil, nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error())
- }
- reqResps, err = DoRequests(ctx, s.next, requests, s.parallelismForReq(ctx, tenantIDs, r))
-
- if err != nil {
- return nil, nil, err
- }
-
- for _, reqResp := range reqResps {
- responses = append(responses, reqResp.Response)
- if !s.shouldCacheResponse(ctx, r, reqResp.Response, maxCacheTime) {
- continue
- }
- extent, err := toExtent(ctx, reqResp.Request, s.extractor.ResponseWithoutHeaders(reqResp.Response))
- if err != nil {
- return nil, nil, err
- }
- extents = append(extents, extent)
- }
- sort.Slice(extents, func(i, j int) bool {
- if extents[i].Start == extents[j].Start {
- // as an optimization, for two extents starts at the same time, we
- // put bigger extent at the front of the slice, which helps
- // to reduce the amount of merge we have to do later.
- return extents[i].End > extents[j].End
- }
-
- return extents[i].Start < extents[j].Start
- })
-
- // Merge any extents - potentially overlapping
- accumulator, err := newAccumulator(extents[0])
- if err != nil {
- return nil, nil, err
- }
- mergedExtents := make([]Extent, 0, len(extents))
-
- for i := 1; i < len(extents); i++ {
- if accumulator.End+r.GetStep() < extents[i].Start {
- mergedExtents, err = merge(mergedExtents, accumulator)
- if err != nil {
- return nil, nil, err
- }
- accumulator, err = newAccumulator(extents[i])
- if err != nil {
- return nil, nil, err
- }
- continue
- }
-
- if accumulator.End >= extents[i].End {
- continue
- }
-
- accumulator.TraceId = jaegerTraceID(ctx)
- accumulator.End = extents[i].End
- currentRes, err := extents[i].toResponse()
- if err != nil {
- return nil, nil, err
- }
- merged, err := s.merger.MergeResponse(accumulator.Response, currentRes)
- if err != nil {
- return nil, nil, err
- }
- accumulator.Response = merged
- }
-
- mergedExtents, err = merge(mergedExtents, accumulator)
- if err != nil {
- return nil, nil, err
- }
-
- response, err := s.merger.MergeResponse(responses...)
- return response, mergedExtents, err
-}
-
-type accumulator struct {
- Response
- Extent
-}
-
-func merge(extents []Extent, acc *accumulator) ([]Extent, error) {
- anyResp, err := types.MarshalAny(acc.Response)
- if err != nil {
- return nil, err
- }
- return append(extents, Extent{
- Start: acc.Extent.Start,
- End: acc.Extent.End,
- Response: anyResp,
- TraceId: acc.Extent.TraceId,
- }), nil
-}
-
-func newAccumulator(base Extent) (*accumulator, error) {
- res, err := base.toResponse()
- if err != nil {
- return nil, err
- }
- return &accumulator{
- Response: res,
- Extent: base,
- }, nil
-}
-
-func toExtent(ctx context.Context, req Request, res Response) (Extent, error) {
- anyResp, err := types.MarshalAny(res)
- if err != nil {
- return Extent{}, err
- }
- return Extent{
- Start: req.GetStart().UnixMilli(),
- End: req.GetEnd().UnixMilli(),
- Response: anyResp,
- TraceId: jaegerTraceID(ctx),
- }, nil
-}
-
-// partition calculates the required requests to satisfy req given the cached data.
-// extents must be in order by start time.
-func (s resultsCache) partition(req Request, extents []Extent) ([]Request, []Response, error) {
- var requests []Request
- var cachedResponses []Response
- start := req.GetStart().UnixMilli()
- end := req.GetEnd().UnixMilli()
-
- for _, extent := range extents {
- // If there is no overlap, ignore this extent.
- if extent.GetEnd() < start || extent.Start > end {
- continue
- }
-
- // If this extent is tiny and request is not tiny, discard it: more efficient to do a few larger queries.
- // Hopefully tiny request can make tiny extent into not-so-tiny extent.
-
- // However if the step is large enough, the split_query_by_interval middleware would generate a query with same start and end.
- // For example, if the step size is more than 12h and the interval is 24h.
- // This means the extent's start and end time would be same, even if the timerange covers several hours.
- if (req.GetStart() != req.GetEnd()) && ((end - start) > s.minCacheExtent) && (extent.End-extent.Start < s.minCacheExtent) {
- continue
- }
-
- // If there is a bit missing at the front, make a request for that.
- if start < extent.Start {
- r := req.WithStartEnd(time.UnixMilli(start), time.UnixMilli(extent.Start))
- requests = append(requests, r)
- }
- res, err := extent.toResponse()
- if err != nil {
- return nil, nil, err
- }
- // extract the overlap from the cached extent.
- cachedResponses = append(cachedResponses, s.extractor.Extract(start, end, res, extent.GetStart(), extent.GetEnd()))
- start = extent.End
- }
-
- // Lastly, make a request for any data missing at the end.
- if start < req.GetEnd().UnixMilli() {
- r := req.WithStartEnd(time.UnixMilli(start), time.UnixMilli(end))
- requests = append(requests, r)
- }
-
- // If start and end are the same (valid in promql), start == req.GetEnd() and we won't do the query.
- // But we should only do the request if we don't have a valid cached response for it.
- if req.GetStart() == req.GetEnd() && len(cachedResponses) == 0 {
- requests = append(requests, req)
- }
-
- return requests, cachedResponses, nil
-}
-
-func (s resultsCache) filterRecentExtents(req Request, maxCacheFreshness time.Duration, extents []Extent) ([]Extent, error) {
- step := math.Max64(1, req.GetStep())
- maxCacheTime := (int64(model.Now().Add(-maxCacheFreshness)) / step) * step
- for i := range extents {
- // Never cache data for the latest freshness period.
- if extents[i].End > maxCacheTime {
- extents[i].End = maxCacheTime
- res, err := extents[i].toResponse()
- if err != nil {
- return nil, err
- }
- extracted := s.extractor.Extract(extents[i].GetStart(), maxCacheTime, res, extents[i].GetStart(), extents[i].GetEnd())
- anyResp, err := types.MarshalAny(extracted)
- if err != nil {
- return nil, err
- }
- extents[i].Response = anyResp
- }
- }
- return extents, nil
-}
-
-func (s resultsCache) get(ctx context.Context, key string) ([]Extent, bool) {
- found, bufs, _, _ := s.cache.Fetch(ctx, []string{cache.HashKey(key)})
- if len(found) != 1 {
- return nil, false
- }
-
- var resp CachedResponse
- sp, ctx := opentracing.StartSpanFromContext(ctx, "unmarshal-extent") //nolint:ineffassign,staticcheck
- defer sp.Finish()
- log := spanlogger.FromContext(ctx)
- defer log.Finish()
-
- log.LogFields(otlog.Int("bytes", len(bufs[0])))
-
- if err := proto.Unmarshal(bufs[0], &resp); err != nil {
- level.Error(log).Log("msg", "error unmarshalling cached value", "err", err)
- log.Error(err)
- return nil, false
- }
-
- if resp.Key != key {
- return nil, false
- }
-
- // Refreshes the cache if it contains an old proto schema.
- for _, e := range resp.Extents {
- if e.Response == nil {
- return nil, false
- }
- }
-
- return resp.Extents, true
-}
-
-func (s resultsCache) put(ctx context.Context, key string, extents []Extent) {
- buf, err := proto.Marshal(&CachedResponse{
- Key: key,
- Extents: extents,
- })
- if err != nil {
- level.Error(s.logger).Log("msg", "error marshalling cached value", "err", err)
- return
- }
-
- _ = s.cache.Store(ctx, []string{cache.HashKey(key)}, [][]byte{buf})
-}
-
-func jaegerTraceID(ctx context.Context) string {
- span := opentracing.SpanFromContext(ctx)
- if span == nil {
- return ""
- }
-
- spanContext, ok := span.Context().(jaeger.SpanContext)
- if !ok {
- return ""
- }
-
- return spanContext.TraceID().String()
-}
-
func extractMatrix(start, end int64, matrix []SampleStream) []SampleStream {
result := make([]SampleStream, 0, len(matrix))
for _, stream := range matrix {
@@ -702,20 +330,3 @@ func extractSampleStream(start, end int64, stream SampleStream) (SampleStream, b
}
return result, true
}
-
-func (e *Extent) toResponse() (Response, error) {
- msg, err := types.EmptyAny(e.Response)
- if err != nil {
- return nil, err
- }
-
- if err := types.UnmarshalAny(e.Response, msg); err != nil {
- return nil, err
- }
-
- resp, ok := msg.(Response)
- if !ok {
- return nil, fmt.Errorf("bad cached type")
- }
- return resp, nil
-}
diff --git a/pkg/querier/queryrange/queryrangebase/results_cache_test.go b/pkg/querier/queryrange/queryrangebase/results_cache_test.go
index 8020764d1f4a3..ff5e5be09a48f 100644
--- a/pkg/querier/queryrange/queryrangebase/results_cache_test.go
+++ b/pkg/querier/queryrange/queryrangebase/results_cache_test.go
@@ -3,7 +3,6 @@ package queryrangebase
import (
"context"
"fmt"
- "strconv"
"testing"
"time"
@@ -18,6 +17,7 @@ import (
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logqlmodel/stats"
"github.com/grafana/loki/pkg/storage/chunk/cache"
+ "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"
"github.com/grafana/loki/pkg/util/constants"
)
@@ -398,362 +398,13 @@ func TestShouldCache(t *testing.T) {
}
}
-func TestPartition(t *testing.T) {
- for _, tc := range []struct {
- name string
- input Request
- prevCachedResponse []Extent
- expectedRequests []Request
- expectedCachedResponse []Response
- }{
- {
- name: "Test a complete hit.",
- input: &PrometheusRequest{
- Start: time.UnixMilli(0),
- End: time.UnixMilli(100),
- },
- prevCachedResponse: []Extent{
- mkExtent(0, 100),
- },
- expectedCachedResponse: []Response{
- mkAPIResponse(0, 100, 10),
- },
- },
-
- {
- name: "Test with a complete miss.",
- input: &PrometheusRequest{
- Start: time.UnixMilli(0),
- End: time.UnixMilli(100),
- },
- prevCachedResponse: []Extent{
- mkExtent(110, 210),
- },
- expectedRequests: []Request{
- &PrometheusRequest{
- Start: time.UnixMilli(0),
- End: time.UnixMilli(100),
- },
- },
- },
- {
- name: "Test a partial hit.",
- input: &PrometheusRequest{
- Start: time.UnixMilli(0),
- End: time.UnixMilli(100),
- },
- prevCachedResponse: []Extent{
- mkExtent(50, 100),
- },
- expectedRequests: []Request{
- &PrometheusRequest{
- Start: time.UnixMilli(0),
- End: time.UnixMilli(50),
- },
- },
- expectedCachedResponse: []Response{
- mkAPIResponse(50, 100, 10),
- },
- },
- {
- name: "Test multiple partial hits.",
- input: &PrometheusRequest{
- Start: time.UnixMilli(100),
- End: time.UnixMilli(200),
- },
- prevCachedResponse: []Extent{
- mkExtent(50, 120),
- mkExtent(160, 250),
- },
- expectedRequests: []Request{
- &PrometheusRequest{
- Start: time.UnixMilli(120),
- End: time.UnixMilli(160),
- },
- },
- expectedCachedResponse: []Response{
- mkAPIResponse(100, 120, 10),
- mkAPIResponse(160, 200, 10),
- },
- },
- {
- name: "Partial hits with tiny gap.",
- input: &PrometheusRequest{
- Start: time.UnixMilli(100),
- End: time.UnixMilli(160),
- },
- prevCachedResponse: []Extent{
- mkExtent(50, 120),
- mkExtent(122, 130),
- },
- expectedRequests: []Request{
- &PrometheusRequest{
- Start: time.UnixMilli(120),
- End: time.UnixMilli(160),
- },
- },
- expectedCachedResponse: []Response{
- mkAPIResponse(100, 120, 10),
- },
- },
- {
- name: "Extent is outside the range and the request has a single step (same start and end).",
- input: &PrometheusRequest{
- Start: time.UnixMilli(100),
- End: time.UnixMilli(100),
- },
- prevCachedResponse: []Extent{
- mkExtent(50, 90),
- },
- expectedRequests: []Request{
- &PrometheusRequest{
- Start: time.UnixMilli(100),
- End: time.UnixMilli(100),
- },
- },
- },
- {
- name: "Test when hit has a large step and only a single sample extent.",
- // If there is a only a single sample in the split interval, start and end will be the same.
- input: &PrometheusRequest{
- Start: time.UnixMilli(100),
- End: time.UnixMilli(100),
- },
- prevCachedResponse: []Extent{
- mkExtent(100, 100),
- },
- expectedCachedResponse: []Response{
- mkAPIResponse(100, 105, 10),
- },
- },
- } {
- t.Run(tc.name, func(t *testing.T) {
- s := resultsCache{
- extractor: PrometheusResponseExtractor{},
- minCacheExtent: 10,
- }
- reqs, resps, err := s.partition(tc.input, tc.prevCachedResponse)
- require.Nil(t, err)
- require.Equal(t, tc.expectedRequests, reqs)
- require.Equal(t, tc.expectedCachedResponse, resps)
- })
- }
-}
-
-func TestHandleHit(t *testing.T) {
- for _, tc := range []struct {
- name string
- input Request
- cachedEntry []Extent
- expectedUpdatedCachedEntry []Extent
- }{
- {
- name: "Should drop tiny extent that overlaps with non-tiny request only",
- input: &PrometheusRequest{
- Start: time.UnixMilli(100),
- End: time.UnixMilli(120),
- Step: 5,
- },
- cachedEntry: []Extent{
- mkExtentWithStep(0, 50, 5),
- mkExtentWithStep(60, 65, 5),
- mkExtentWithStep(100, 105, 5),
- mkExtentWithStep(110, 150, 5),
- mkExtentWithStep(160, 165, 5),
- },
- expectedUpdatedCachedEntry: []Extent{
- mkExtentWithStep(0, 50, 5),
- mkExtentWithStep(60, 65, 5),
- mkExtentWithStep(100, 150, 5),
- mkExtentWithStep(160, 165, 5),
- },
- },
- {
- name: "Should replace tiny extents that are cover by bigger request",
- input: &PrometheusRequest{
- Start: time.UnixMilli(100),
- End: time.UnixMilli(200),
- Step: 5,
- },
- cachedEntry: []Extent{
- mkExtentWithStep(0, 50, 5),
- mkExtentWithStep(60, 65, 5),
- mkExtentWithStep(100, 105, 5),
- mkExtentWithStep(110, 115, 5),
- mkExtentWithStep(120, 125, 5),
- mkExtentWithStep(220, 225, 5),
- mkExtentWithStep(240, 250, 5),
- },
- expectedUpdatedCachedEntry: []Extent{
- mkExtentWithStep(0, 50, 5),
- mkExtentWithStep(60, 65, 5),
- mkExtentWithStep(100, 200, 5),
- mkExtentWithStep(220, 225, 5),
- mkExtentWithStep(240, 250, 5),
- },
- },
- {
- name: "Should not drop tiny extent that completely overlaps with tiny request",
- input: &PrometheusRequest{
- Start: time.UnixMilli(100),
- End: time.UnixMilli(105),
- Step: 5,
- },
- cachedEntry: []Extent{
- mkExtentWithStep(0, 50, 5),
- mkExtentWithStep(60, 65, 5),
- mkExtentWithStep(100, 105, 5),
- mkExtentWithStep(160, 165, 5),
- },
- expectedUpdatedCachedEntry: nil, // no cache update need, request fulfilled using cache
- },
- {
- name: "Should not drop tiny extent that partially center-overlaps with tiny request",
- input: &PrometheusRequest{
- Start: time.UnixMilli(106),
- End: time.UnixMilli(108),
- Step: 2,
- },
- cachedEntry: []Extent{
- mkExtentWithStep(60, 64, 2),
- mkExtentWithStep(104, 110, 2),
- mkExtentWithStep(160, 166, 2),
- },
- expectedUpdatedCachedEntry: nil, // no cache update need, request fulfilled using cache
- },
- {
- name: "Should not drop tiny extent that partially left-overlaps with tiny request",
- input: &PrometheusRequest{
- Start: time.UnixMilli(100),
- End: time.UnixMilli(106),
- Step: 2,
- },
- cachedEntry: []Extent{
- mkExtentWithStep(60, 64, 2),
- mkExtentWithStep(104, 110, 2),
- mkExtentWithStep(160, 166, 2),
- },
- expectedUpdatedCachedEntry: []Extent{
- mkExtentWithStep(60, 64, 2),
- mkExtentWithStep(100, 110, 2),
- mkExtentWithStep(160, 166, 2),
- },
- },
- {
- name: "Should not drop tiny extent that partially right-overlaps with tiny request",
- input: &PrometheusRequest{
- Start: time.UnixMilli(100),
- End: time.UnixMilli(106),
- Step: 2,
- },
- cachedEntry: []Extent{
- mkExtentWithStep(60, 64, 2),
- mkExtentWithStep(98, 102, 2),
- mkExtentWithStep(160, 166, 2),
- },
- expectedUpdatedCachedEntry: []Extent{
- mkExtentWithStep(60, 64, 2),
- mkExtentWithStep(98, 106, 2),
- mkExtentWithStep(160, 166, 2),
- },
- },
- {
- name: "Should merge fragmented extents if request fills the hole",
- input: &PrometheusRequest{
- Start: time.UnixMilli(40),
- End: time.UnixMilli(80),
- Step: 20,
- },
- cachedEntry: []Extent{
- mkExtentWithStep(0, 20, 20),
- mkExtentWithStep(80, 100, 20),
- },
- expectedUpdatedCachedEntry: []Extent{
- mkExtentWithStep(0, 100, 20),
- },
- },
- {
- name: "Should left-extend extent if request starts earlier than extent in cache",
- input: &PrometheusRequest{
- Start: time.UnixMilli(40),
- End: time.UnixMilli(80),
- Step: 20,
- },
- cachedEntry: []Extent{
- mkExtentWithStep(60, 160, 20),
- },
- expectedUpdatedCachedEntry: []Extent{
- mkExtentWithStep(40, 160, 20),
- },
- },
- {
- name: "Should right-extend extent if request ends later than extent in cache",
- input: &PrometheusRequest{
- Start: time.UnixMilli(100),
- End: time.UnixMilli(180),
- Step: 20,
- },
- cachedEntry: []Extent{
- mkExtentWithStep(60, 160, 20),
- },
- expectedUpdatedCachedEntry: []Extent{
- mkExtentWithStep(60, 180, 20),
- },
- },
- {
- name: "Should not throw error if complete-overlapped smaller Extent is erroneous",
- input: &PrometheusRequest{
- // This request is carefully crated such that cachedEntry is not used to fulfill
- // the request.
- Start: time.UnixMilli(160),
- End: time.UnixMilli(180),
- Step: 20,
- },
- cachedEntry: []Extent{
- {
- Start: 60,
- End: 80,
-
- // if the optimization of "sorting by End when Start of 2 Extents are equal" is not there, this nil
- // response would cause error during Extents merge phase. With the optimization
- // this bad Extent should be dropped. The good Extent below can be used instead.
- Response: nil,
- },
- mkExtentWithStep(60, 160, 20),
- },
- expectedUpdatedCachedEntry: []Extent{
- mkExtentWithStep(60, 180, 20),
- },
- },
- } {
- t.Run(tc.name, func(t *testing.T) {
- sut := resultsCache{
- extractor: PrometheusResponseExtractor{},
- minCacheExtent: 10,
- limits: mockLimits{},
- merger: PrometheusCodec,
- parallelismForReq: func(_ context.Context, tenantIDs []string, r Request) int { return 1 },
- next: HandlerFunc(func(_ context.Context, req Request) (Response, error) {
- return mkAPIResponse(req.GetStart().UnixMilli(), req.GetEnd().UnixMilli(), req.GetStep()), nil
- }),
- }
-
- ctx := user.InjectOrgID(context.Background(), "1")
- response, updatedExtents, err := sut.handleHit(ctx, tc.input, tc.cachedEntry, 0)
- require.NoError(t, err)
-
- expectedResponse := mkAPIResponse(tc.input.GetStart().UnixMilli(), tc.input.GetEnd().UnixMilli(), tc.input.GetStep())
- require.Equal(t, expectedResponse, response, "response does not match the expectation")
- require.Equal(t, tc.expectedUpdatedCachedEntry, updatedExtents, "updated cache entry does not match the expectation")
- })
- }
-}
-
func TestResultsCache(t *testing.T) {
calls := 0
cfg := ResultsCacheConfig{
- CacheConfig: cache.Config{
- Cache: cache.NewMockCache(),
+ Config: resultscache.Config{
+ CacheConfig: cache.Config{
+ Cache: cache.NewMockCache(),
+ },
},
}
c, err := cache.New(cfg.CacheConfig, nil, log.NewNopLogger(), stats.ResultCache, constants.Loki)
@@ -761,7 +412,7 @@ func TestResultsCache(t *testing.T) {
rcm, err := NewResultsCacheMiddleware(
log.NewNopLogger(),
c,
- constSplitter(day),
+ resultscache.ConstSplitter(day),
mockLimits{},
PrometheusCodec,
PrometheusResponseExtractor{},
@@ -807,7 +458,7 @@ func TestResultsCacheRecent(t *testing.T) {
rcm, err := NewResultsCacheMiddleware(
log.NewNopLogger(),
c,
- constSplitter(day),
+ resultscache.ConstSplitter(day),
mockLimits{maxCacheFreshness: 10 * time.Minute},
PrometheusCodec,
PrometheusResponseExtractor{},
@@ -844,122 +495,6 @@ func TestResultsCacheRecent(t *testing.T) {
require.Equal(t, parsedResponse, resp)
}
-func TestResultsCacheMaxFreshness(t *testing.T) {
- modelNow := model.Now()
- for i, tc := range []struct {
- fakeLimits Limits
- Handler HandlerFunc
- expectedResponse *PrometheusResponse
- }{
- {
- fakeLimits: mockLimits{maxCacheFreshness: 5 * time.Second},
- Handler: nil,
- expectedResponse: mkAPIResponse(int64(modelNow)-(50*1e3), int64(modelNow)-(10*1e3), 10),
- },
- {
- // should not lookup cache because per-tenant override will be applied
- fakeLimits: mockLimits{maxCacheFreshness: 10 * time.Minute},
- Handler: HandlerFunc(func(_ context.Context, _ Request) (Response, error) {
- return parsedResponse, nil
- }),
- expectedResponse: parsedResponse,
- },
- } {
- t.Run(strconv.Itoa(i), func(t *testing.T) {
- var cfg ResultsCacheConfig
- flagext.DefaultValues(&cfg)
- cfg.CacheConfig.Cache = cache.NewMockCache()
- c, err := cache.New(cfg.CacheConfig, nil, log.NewNopLogger(), stats.ResultCache, constants.Loki)
- require.NoError(t, err)
- fakeLimits := tc.fakeLimits
- rcm, err := NewResultsCacheMiddleware(
- log.NewNopLogger(),
- c,
- constSplitter(day),
- fakeLimits,
- PrometheusCodec,
- PrometheusResponseExtractor{},
- nil,
- nil,
- func(_ context.Context, tenantIDs []string, r Request) int {
- return tc.fakeLimits.MaxQueryParallelism(context.Background(), "fake")
- },
- false,
- nil,
- )
- require.NoError(t, err)
-
- // create cache with handler
- rc := rcm.Wrap(tc.Handler)
- ctx := user.InjectOrgID(context.Background(), "1")
-
- // create request with start end within the key extents
- req := parsedRequest.WithStartEnd(time.UnixMilli(int64(modelNow)-(50*1e3)), time.UnixMilli(int64(modelNow)-(10*1e3)))
-
- // fill cache
- key := constSplitter(day).GenerateCacheKey(context.Background(), "1", req)
- rc.(*resultsCache).put(ctx, key, []Extent{mkExtent(int64(modelNow)-(600*1e3), int64(modelNow))})
-
- resp, err := rc.Do(ctx, req)
- require.NoError(t, err)
- require.Equal(t, tc.expectedResponse, resp)
- })
- }
-}
-
-func Test_resultsCache_MissingData(t *testing.T) {
- cfg := ResultsCacheConfig{
- CacheConfig: cache.Config{
- Cache: cache.NewMockCache(),
- },
- }
- c, err := cache.New(cfg.CacheConfig, nil, log.NewNopLogger(), stats.ResultCache, constants.Loki)
- require.NoError(t, err)
- rm, err := NewResultsCacheMiddleware(
- log.NewNopLogger(),
- c,
- constSplitter(day),
- mockLimits{},
- PrometheusCodec,
- PrometheusResponseExtractor{},
- nil,
- nil,
- func(_ context.Context, tenantIDs []string, r Request) int {
- return mockLimits{}.MaxQueryParallelism(context.Background(), "fake")
- },
- false,
- nil,
- )
- require.NoError(t, err)
- rc := rm.Wrap(nil).(*resultsCache)
- ctx := context.Background()
-
- // fill up the cache
- rc.put(ctx, "empty", []Extent{{
- Start: 100,
- End: 200,
- Response: nil,
- }})
- rc.put(ctx, "notempty", []Extent{mkExtent(100, 120)})
- rc.put(ctx, "mixed", []Extent{mkExtent(100, 120), {
- Start: 120,
- End: 200,
- Response: nil,
- }})
-
- extents, hit := rc.get(ctx, "empty")
- require.Empty(t, extents)
- require.False(t, hit)
-
- extents, hit = rc.get(ctx, "notempty")
- require.Equal(t, len(extents), 1)
- require.True(t, hit)
-
- extents, hit = rc.get(ctx, "mixed")
- require.Equal(t, len(extents), 0)
- require.False(t, hit)
-}
-
func toMs(t time.Duration) int64 {
return t.Nanoseconds() / (int64(time.Millisecond) / int64(time.Nanosecond))
}
@@ -984,7 +519,7 @@ func TestConstSplitter_generateCacheKey(t *testing.T) {
}
for _, tt := range tests {
t.Run(fmt.Sprintf("%s - %s", tt.name, tt.interval), func(t *testing.T) {
- if got := constSplitter(tt.interval).GenerateCacheKey(context.Background(), "fake", tt.r); got != tt.want {
+ if got := resultscache.ConstSplitter(tt.interval).GenerateCacheKey(context.Background(), "fake", tt.r.(resultscache.Request)); got != tt.want {
t.Errorf("generateKey() = %v, want %v", got, tt.want)
}
})
@@ -1033,7 +568,7 @@ func TestResultsCacheShouldCacheFunc(t *testing.T) {
rcm, err := NewResultsCacheMiddleware(
log.NewNopLogger(),
c,
- constSplitter(day),
+ resultscache.ConstSplitter(day),
mockLimits{maxCacheFreshness: 10 * time.Minute},
PrometheusCodec,
PrometheusResponseExtractor{},
diff --git a/pkg/querier/queryrange/queryrangebase/retry.go b/pkg/querier/queryrange/queryrangebase/retry.go
index 5dbad8d82582a..d051363771bb9 100644
--- a/pkg/querier/queryrange/queryrangebase/retry.go
+++ b/pkg/querier/queryrange/queryrangebase/retry.go
@@ -11,6 +11,7 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
+ "github.com/grafana/loki/pkg/util"
util_log "github.com/grafana/loki/pkg/util/log"
)
@@ -73,6 +74,11 @@ func (r retry) Do(ctx context.Context, req Request) (Response, error) {
MaxRetries: 0,
}
bk := backoff.New(ctx, cfg)
+
+ start := req.GetStart()
+ end := req.GetEnd()
+ query := req.GetQuery()
+
for ; tries < r.maxRetries; tries++ {
if ctx.Err() != nil {
return nil, ctx.Err()
@@ -86,7 +92,19 @@ func (r retry) Do(ctx context.Context, req Request) (Response, error) {
httpResp, ok := httpgrpc.HTTPResponseFromError(err)
if !ok || httpResp.Code/100 == 5 {
lastErr = err
- level.Error(util_log.WithContext(ctx, r.log)).Log("msg", "error processing request", "try", tries, "query", req.GetQuery(), "retry_in", bk.NextDelay(), "err", err)
+ level.Error(util_log.WithContext(ctx, r.log)).Log(
+ "msg", "error processing request",
+ "try", tries,
+ "query", query,
+ "query_hash", util.HashedQuery(query),
+ "start", start.Format(time.RFC3339Nano),
+ "end", end.Format(time.RFC3339Nano),
+ "start_delta", time.Since(start),
+ "end_delta", time.Since(end),
+ "length", end.Sub(start),
+ "retry_in", bk.NextDelay(),
+ "err", err,
+ )
bk.Wait()
continue
}
diff --git a/pkg/querier/queryrange/queryrangebase/roundtrip.go b/pkg/querier/queryrange/queryrangebase/roundtrip.go
index 847d311323c1e..1e0fe625f24d2 100644
--- a/pkg/querier/queryrange/queryrangebase/roundtrip.go
+++ b/pkg/querier/queryrange/queryrangebase/roundtrip.go
@@ -22,6 +22,8 @@ import (
"time"
"github.com/pkg/errors"
+
+ "github.com/grafana/dskit/flagext"
)
const day = 24 * time.Hour
@@ -33,11 +35,12 @@ var PassthroughMiddleware = MiddlewareFunc(func(next Handler) Handler {
// Config for query_range middleware chain.
type Config struct {
- AlignQueriesWithStep bool `yaml:"align_queries_with_step"`
- ResultsCacheConfig ResultsCacheConfig `yaml:"results_cache"`
- CacheResults bool `yaml:"cache_results"`
- MaxRetries int `yaml:"max_retries"`
- ShardedQueries bool `yaml:"parallelise_shardable_queries"`
+ AlignQueriesWithStep bool `yaml:"align_queries_with_step"`
+ ResultsCacheConfig ResultsCacheConfig `yaml:"results_cache"`
+ CacheResults bool `yaml:"cache_results"`
+ MaxRetries int `yaml:"max_retries"`
+ ShardedQueries bool `yaml:"parallelise_shardable_queries"`
+ ShardAggregations flagext.StringSliceCSV `yaml:"shard_aggregations"`
}
// RegisterFlags adds the flags required to config this to the given FlagSet.
@@ -47,6 +50,10 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
f.BoolVar(&cfg.CacheResults, "querier.cache-results", false, "Cache query results.")
f.BoolVar(&cfg.ShardedQueries, "querier.parallelise-shardable-queries", true, "Perform query parallelisations based on storage sharding configuration and query ASTs. This feature is supported only by the chunks storage engine.")
+ cfg.ShardAggregations = []string{}
+ f.Var(&cfg.ShardAggregations, "querier.shard-aggregations",
+ "A comma-separated list of LogQL vector and range aggregations that should be sharded")
+
cfg.ResultsCacheConfig.RegisterFlags(f)
}
@@ -57,6 +64,11 @@ func (cfg *Config) Validate() error {
return errors.Wrap(err, "invalid results_cache config")
}
}
+
+ if len(cfg.ShardAggregations) > 0 && !cfg.ShardedQueries {
+ return errors.New("shard_aggregations requires parallelise_shardable_queries=true")
+ }
+
return nil
}
diff --git a/pkg/querier/queryrange/queryrangebase/util.go b/pkg/querier/queryrange/queryrangebase/util.go
index ee3aad8c15694..5073b715bc269 100644
--- a/pkg/querier/queryrange/queryrangebase/util.go
+++ b/pkg/querier/queryrange/queryrangebase/util.go
@@ -2,6 +2,8 @@ package queryrangebase
import (
"context"
+
+ "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"
)
// RequestResponse contains a request response and the respective request that was used.
@@ -58,3 +60,23 @@ func DoRequests(ctx context.Context, downstream Handler, reqs []Request, paralle
return resps, firstErr
}
+
+type queryMergerAsCacheResponseMerger struct {
+ Merger
+}
+
+func (m *queryMergerAsCacheResponseMerger) MergeResponse(responses ...resultscache.Response) (resultscache.Response, error) {
+ cacheResponses := make([]Response, 0, len(responses))
+ for _, r := range responses {
+ cacheResponses = append(cacheResponses, r.(Response))
+ }
+ response, err := m.Merger.MergeResponse(cacheResponses...)
+ if err != nil {
+ return nil, err
+ }
+ return response.(resultscache.Response), nil
+}
+
+func FromQueryResponseMergerToCacheResponseMerger(m Merger) resultscache.ResponseMerger {
+ return &queryMergerAsCacheResponseMerger{m}
+}
diff --git a/pkg/querier/queryrange/querysharding.go b/pkg/querier/queryrange/querysharding.go
index 26ec924ce5c4f..a6c32b1525862 100644
--- a/pkg/querier/queryrange/querysharding.go
+++ b/pkg/querier/queryrange/querysharding.go
@@ -41,6 +41,7 @@ func NewQueryShardMiddleware(
limits Limits,
maxShards int,
statsHandler queryrangebase.Handler,
+ shardAggregation []string,
) queryrangebase.Middleware {
noshards := !hasShards(confs)
@@ -54,7 +55,7 @@ func NewQueryShardMiddleware(
}
mapperware := queryrangebase.MiddlewareFunc(func(next queryrangebase.Handler) queryrangebase.Handler {
- return newASTMapperware(confs, engineOpts, next, statsHandler, logger, shardingMetrics, limits, maxShards)
+ return newASTMapperware(confs, engineOpts, next, statsHandler, logger, shardingMetrics, limits, maxShards, shardAggregation)
})
return queryrangebase.MiddlewareFunc(func(next queryrangebase.Handler) queryrangebase.Handler {
@@ -79,16 +80,18 @@ func newASTMapperware(
metrics *logql.MapperMetrics,
limits Limits,
maxShards int,
+ shardAggregation []string,
) *astMapperware {
ast := &astMapperware{
- confs: confs,
- logger: log.With(logger, "middleware", "QueryShard.astMapperware"),
- limits: limits,
- next: next,
- statsHandler: next,
- ng: logql.NewDownstreamEngine(engineOpts, DownstreamHandler{next: next, limits: limits}, limits, logger),
- metrics: metrics,
- maxShards: maxShards,
+ confs: confs,
+ logger: log.With(logger, "middleware", "QueryShard.astMapperware"),
+ limits: limits,
+ next: next,
+ statsHandler: next,
+ ng: logql.NewDownstreamEngine(engineOpts, DownstreamHandler{next: next, limits: limits}, limits, logger),
+ metrics: metrics,
+ maxShards: maxShards,
+ shardAggregation: shardAggregation,
}
if statsHandler != nil {
@@ -107,6 +110,10 @@ type astMapperware struct {
ng *logql.DownstreamEngine
metrics *logql.MapperMetrics
maxShards int
+
+ // Feature flag for sharding range and vector aggregations such as
+ // quantile_ver_time with probabilistic data structures.
+ shardAggregation []string
}
func (ast *astMapperware) checkQuerySizeLimit(ctx context.Context, bytesPerShard uint64, notShardable bool) error {
@@ -143,7 +150,12 @@ func (ast *astMapperware) Do(ctx context.Context, r queryrangebase.Request) (que
util_log.WithContext(ctx, ast.logger),
)
- maxRVDuration, maxOffset, err := maxRangeVectorAndOffsetDuration(r.GetQuery())
+ params, err := ParamsFromRequest(r)
+ if err != nil {
+ return nil, err
+ }
+
+ maxRVDuration, maxOffset, err := maxRangeVectorAndOffsetDuration(params.GetExpression())
if err != nil {
level.Warn(logger).Log("err", err.Error(), "msg", "failed to get range-vector and offset duration so skipped AST mapper for request")
return ast.next.Do(ctx, r)
@@ -183,9 +195,9 @@ func (ast *astMapperware) Do(ctx context.Context, r queryrangebase.Request) (que
return ast.next.Do(ctx, r)
}
- mapper := logql.NewShardMapper(resolver, ast.metrics)
+ mapper := logql.NewShardMapper(resolver, ast.metrics, ast.shardAggregation)
- noop, bytesPerShard, parsed, err := mapper.Parse(r.GetQuery())
+ noop, bytesPerShard, parsed, err := mapper.Parse(params.GetExpression())
if err != nil {
level.Warn(logger).Log("msg", "failed mapping AST", "err", err.Error(), "query", r.GetQuery())
return nil, err
@@ -203,11 +215,6 @@ func (ast *astMapperware) Do(ctx context.Context, r queryrangebase.Request) (que
return ast.next.Do(ctx, r)
}
- params, err := ParamsFromRequest(r)
- if err != nil {
- return nil, err
- }
-
var path string
switch r := r.(type) {
case *LokiRequest:
@@ -217,7 +224,7 @@ func (ast *astMapperware) Do(ctx context.Context, r queryrangebase.Request) (que
default:
return nil, fmt.Errorf("expected *LokiRequest or *LokiInstantRequest, got (%T)", r)
}
- query := ast.ng.Query(ctx, params, parsed)
+ query := ast.ng.Query(ctx, logql.ParamsWithExpressionOverride{Params: params, ExpressionOverride: parsed})
res, err := query.Exec(ctx)
if err != nil {
diff --git a/pkg/querier/queryrange/querysharding_test.go b/pkg/querier/queryrange/querysharding_test.go
index 8c77afe0410cf..b17dfc4d3678a 100644
--- a/pkg/querier/queryrange/querysharding_test.go
+++ b/pkg/querier/queryrange/querysharding_test.go
@@ -19,7 +19,9 @@ import (
"github.com/grafana/loki/pkg/loghttp"
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logql"
+ "github.com/grafana/loki/pkg/logql/syntax"
"github.com/grafana/loki/pkg/logqlmodel/stats"
+ "github.com/grafana/loki/pkg/querier/plan"
"github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"
"github.com/grafana/loki/pkg/querier/queryrange/queryrangebase/definitions"
"github.com/grafana/loki/pkg/storage/config"
@@ -170,9 +172,15 @@ func Test_astMapper(t *testing.T) {
nilShardingMetrics,
fakeLimits{maxSeries: math.MaxInt32, maxQueryParallelism: 1, queryTimeout: time.Second},
0,
+ []string{},
)
- resp, err := mware.Do(user.InjectOrgID(context.Background(), "1"), defaultReq().WithQuery(`{food="bar"}`))
+ req := defaultReq()
+ req.Query = `{foo="bar"}`
+ req.Plan = &plan.QueryPlan{
+ AST: syntax.MustParseExpr(req.Query),
+ }
+ resp, err := mware.Do(user.InjectOrgID(context.Background(), "1"), req)
require.Nil(t, err)
require.Equal(t, []*definitions.PrometheusResponseHeader{
@@ -309,9 +317,15 @@ func Test_astMapper_QuerySizeLimits(t *testing.T) {
maxQuerierBytesRead: tc.maxQuerierBytesSize,
},
0,
+ []string{},
)
- _, err := mware.Do(user.InjectOrgID(context.Background(), "1"), defaultReq().WithQuery(tc.query))
+ req := defaultReq()
+ req.Query = tc.query
+ req.Plan = &plan.QueryPlan{
+ AST: syntax.MustParseExpr(tc.query),
+ }
+ _, err := mware.Do(user.InjectOrgID(context.Background(), "1"), req)
if err != nil {
require.ErrorContains(t, err, tc.err)
}
@@ -342,9 +356,16 @@ func Test_ShardingByPass(t *testing.T) {
nilShardingMetrics,
fakeLimits{maxSeries: math.MaxInt32, maxQueryParallelism: 1},
0,
+ []string{},
)
- _, err := mware.Do(user.InjectOrgID(context.Background(), "1"), defaultReq().WithQuery(`1+1`))
+ req := defaultReq()
+ req.Query = `1+1`
+ req.Plan = &plan.QueryPlan{
+ AST: syntax.MustParseExpr(req.Query),
+ }
+
+ _, err := mware.Do(user.InjectOrgID(context.Background(), "1"), req)
require.Nil(t, err)
require.Equal(t, called, 1)
}
@@ -416,7 +437,9 @@ func Test_InstantSharding(t *testing.T) {
queryTimeout: time.Second,
},
0,
- nil)
+ nil,
+ []string{},
+ )
response, err := sharding.Wrap(queryrangebase.HandlerFunc(func(c context.Context, r queryrangebase.Request) (queryrangebase.Response, error) {
lock.Lock()
defer lock.Unlock()
@@ -437,6 +460,9 @@ func Test_InstantSharding(t *testing.T) {
Query: `rate({app="foo"}[1m])`,
TimeTs: util.TimeFromMillis(10),
Path: "/v1/query",
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`rate({app="foo"}[1m])`),
+ },
})
require.NoError(t, err)
require.Equal(t, 3, called, "expected 3 calls but got {}", called)
@@ -487,13 +513,13 @@ func Test_SeriesShardingHandler(t *testing.T) {
Version: 1,
Data: []logproto.SeriesIdentifier{
{
- Labels: map[string]string{
- "foo": "bar",
+ Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "foo", Value: "bar"},
},
},
{
- Labels: map[string]string{
- "shard": req.Shards[0],
+ Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "shard", Value: req.Shards[0]},
},
},
},
@@ -511,36 +537,31 @@ func Test_SeriesShardingHandler(t *testing.T) {
Version: 1,
Data: []logproto.SeriesIdentifier{
{
- Labels: map[string]string{
- "foo": "bar",
+ Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "foo", Value: "bar"},
},
},
{
- Labels: map[string]string{
- "shard": "0_of_3",
+ Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "shard", Value: "0_of_3"},
},
},
{
- Labels: map[string]string{
- "shard": "1_of_3",
+ Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "shard", Value: "1_of_3"},
},
},
{
- Labels: map[string]string{
- "shard": "2_of_3",
+ Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "shard", Value: "2_of_3"},
},
},
},
}
- sort.Slice(expected.Data, func(i, j int) bool {
- return expected.Data[i].Labels["shard"] > expected.Data[j].Labels["shard"]
- })
actual := response.(*LokiSeriesResponse)
- sort.Slice(actual.Data, func(i, j int) bool {
- return actual.Data[i].Labels["shard"] > actual.Data[j].Labels["shard"]
- })
require.NoError(t, err)
- require.Equal(t, expected, actual)
+ require.Equal(t, expected.Status, actual.Status)
+ require.ElementsMatch(t, expected.Data, actual.Data)
}
func TestShardingAcrossConfigs_ASTMapper(t *testing.T) {
@@ -564,7 +585,7 @@ func TestShardingAcrossConfigs_ASTMapper(t *testing.T) {
}{
{
name: "logs query touching just the active schema config",
- req: defaultReq().WithStartEndTime(now.Add(-time.Hour).Time(), now.Time()).WithQuery(`{foo="bar"}`),
+ req: defaultReq().WithStartEnd(now.Add(-time.Hour).Time(), now.Time()).WithQuery(`{foo="bar"}`),
resp: &LokiResponse{
Status: loghttp.QueryStatusSuccess,
Headers: []definitions.PrometheusResponseHeader{
@@ -575,7 +596,7 @@ func TestShardingAcrossConfigs_ASTMapper(t *testing.T) {
},
{
name: "logs query touching just the prev schema config",
- req: defaultReq().WithStartEndTime(confs[0].From.Time.Time(), confs[0].From.Time.Add(time.Hour).Time()).WithQuery(`{foo="bar"}`),
+ req: defaultReq().WithStartEnd(confs[0].From.Time.Time(), confs[0].From.Time.Add(time.Hour).Time()).WithQuery(`{foo="bar"}`),
resp: &LokiResponse{
Status: loghttp.QueryStatusSuccess,
Headers: []definitions.PrometheusResponseHeader{
@@ -586,7 +607,7 @@ func TestShardingAcrossConfigs_ASTMapper(t *testing.T) {
},
{
name: "metric query touching just the active schema config",
- req: defaultReq().WithStartEndTime(confs[1].From.Time.Add(5*time.Minute).Time(), confs[1].From.Time.Add(time.Hour).Time()).WithQuery(`rate({foo="bar"}[1m])`),
+ req: defaultReq().WithStartEnd(confs[1].From.Time.Add(5*time.Minute).Time(), confs[1].From.Time.Add(time.Hour).Time()).WithQuery(`rate({foo="bar"}[1m])`),
resp: &LokiPromResponse{
Response: &queryrangebase.PrometheusResponse{
Status: loghttp.QueryStatusSuccess,
@@ -603,7 +624,7 @@ func TestShardingAcrossConfigs_ASTMapper(t *testing.T) {
},
{
name: "metric query touching just the prev schema config",
- req: defaultReq().WithStartEndTime(confs[0].From.Time.Add(time.Hour).Time(), confs[0].From.Time.Add(2*time.Hour).Time()).WithQuery(`rate({foo="bar"}[1m])`),
+ req: defaultReq().WithStartEnd(confs[0].From.Time.Add(time.Hour).Time(), confs[0].From.Time.Add(2*time.Hour).Time()).WithQuery(`rate({foo="bar"}[1m])`),
resp: &LokiPromResponse{
Response: &queryrangebase.PrometheusResponse{
Status: loghttp.QueryStatusSuccess,
@@ -620,7 +641,7 @@ func TestShardingAcrossConfigs_ASTMapper(t *testing.T) {
},
{
name: "logs query covering both schemas",
- req: defaultReq().WithStartEndTime(confs[0].From.Time.Time(), now.Time()).WithQuery(`{foo="bar"}`),
+ req: defaultReq().WithStartEnd(confs[0].From.Time.Time(), now.Time()).WithQuery(`{foo="bar"}`),
resp: &LokiResponse{
Status: loghttp.QueryStatusSuccess,
Headers: []definitions.PrometheusResponseHeader{
@@ -631,7 +652,7 @@ func TestShardingAcrossConfigs_ASTMapper(t *testing.T) {
},
{
name: "metric query covering both schemas",
- req: defaultReq().WithStartEndTime(confs[0].From.Time.Time(), now.Time()).WithQuery(`rate({foo="bar"}[1m])`),
+ req: defaultReq().WithStartEnd(confs[0].From.Time.Time(), now.Time()).WithQuery(`rate({foo="bar"}[1m])`),
resp: &LokiPromResponse{
Response: &queryrangebase.PrometheusResponse{
Status: loghttp.QueryStatusSuccess,
@@ -648,7 +669,7 @@ func TestShardingAcrossConfigs_ASTMapper(t *testing.T) {
},
{
name: "metric query with start/end within first schema but with large enough range to cover previous schema too",
- req: defaultReq().WithStartEndTime(confs[1].From.Time.Add(5*time.Minute).Time(), confs[1].From.Time.Add(time.Hour).Time()).WithQuery(`rate({foo="bar"}[24h])`),
+ req: defaultReq().WithStartEnd(confs[1].From.Time.Add(5*time.Minute).Time(), confs[1].From.Time.Add(time.Hour).Time()).WithQuery(`rate({foo="bar"}[24h])`),
resp: &LokiPromResponse{
Response: &queryrangebase.PrometheusResponse{
Status: loghttp.QueryStatusSuccess,
@@ -665,7 +686,7 @@ func TestShardingAcrossConfigs_ASTMapper(t *testing.T) {
},
{
name: "metric query with start/end within first schema but with large enough offset to shift it to previous schema",
- req: defaultReq().WithStartEndTime(confs[1].From.Time.Add(5*time.Minute).Time(), now.Time()).WithQuery(`rate({foo="bar"}[1m] offset 12h)`),
+ req: defaultReq().WithStartEnd(confs[1].From.Time.Add(5*time.Minute).Time(), now.Time()).WithQuery(`rate({foo="bar"}[1m] offset 12h)`),
resp: &LokiPromResponse{
Response: &queryrangebase.PrometheusResponse{
Status: loghttp.QueryStatusSuccess,
@@ -701,8 +722,16 @@ func TestShardingAcrossConfigs_ASTMapper(t *testing.T) {
nilShardingMetrics,
fakeLimits{maxSeries: math.MaxInt32, maxQueryParallelism: 1, queryTimeout: time.Second},
0,
+ []string{},
)
+ // currently all the tests call `defaultReq()` which creates an instance of the type LokiRequest
+ // if in the future that isn't true, we need another way to access the Plan field of an arbitrary query type
+ // or we should set the Plan in calls to `GetExpression` if the Plan is nil by calling `ParseExpr` or similar
+ tc.req.(*LokiRequest).Plan = &plan.QueryPlan{
+ AST: syntax.MustParseExpr(tc.req.GetQuery()),
+ }
+
resp, err := mware.Do(user.InjectOrgID(context.Background(), "1"), tc.req)
require.Nil(t, err)
@@ -828,14 +857,19 @@ func Test_ASTMapper_MaxLookBackPeriod(t *testing.T) {
nilShardingMetrics,
fakeLimits{maxSeries: math.MaxInt32, tsdbMaxQueryParallelism: 1, queryTimeout: time.Second},
0,
+ []string{},
)
+ q := `{cluster="dev-us-central-0"}`
lokiReq := &LokiInstantRequest{
- Query: `{cluster="dev-us-central-0"}`,
+ Query: q,
Limit: 1000,
TimeTs: testTime,
Direction: logproto.FORWARD,
Path: "/loki/api/v1/query",
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(q),
+ },
}
ctx := user.InjectOrgID(context.Background(), "foo")
diff --git a/pkg/querier/queryrange/roundtrip.go b/pkg/querier/queryrange/roundtrip.go
index e2a2ed0021690..c03d459ba9b23 100644
--- a/pkg/querier/queryrange/roundtrip.go
+++ b/pkg/querier/queryrange/roundtrip.go
@@ -7,11 +7,10 @@ import (
"strings"
"time"
- "github.com/grafana/dskit/user"
-
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/grafana/dskit/httpgrpc"
+ "github.com/grafana/dskit/user"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
@@ -24,6 +23,7 @@ import (
base "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"
"github.com/grafana/loki/pkg/storage/chunk/cache"
"github.com/grafana/loki/pkg/storage/config"
+ "github.com/grafana/loki/pkg/util"
"github.com/grafana/loki/pkg/util/constants"
logutil "github.com/grafana/loki/pkg/util/log"
)
@@ -242,15 +242,25 @@ func (r roundTripper) Do(ctx context.Context, req base.Request) (base.Response,
switch op := req.(type) {
case *LokiRequest:
- expr, err := syntax.ParseExpr(op.Query)
- if err != nil {
- return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error())
- }
+ queryHash := util.HashedQuery(op.Query)
+ level.Info(logger).Log(
+ "msg", "executing query",
+ "type", "range",
+ "query", op.Query,
+ "start", op.StartTs.Format(time.RFC3339Nano),
+ "end", op.EndTs.Format(time.RFC3339Nano),
+ "start_delta", time.Since(op.StartTs),
+ "end_delta", time.Since(op.EndTs),
+ "length", op.EndTs.Sub(op.StartTs),
+ "step", op.Step,
+ "query_hash", queryHash,
+ )
- queryHash := logql.HashedQuery(op.Query)
- level.Info(logger).Log("msg", "executing query", "type", "range", "query", op.Query, "length", op.EndTs.Sub(op.StartTs), "step", op.Step, "query_hash", queryHash)
+ if op.Plan == nil {
+ return nil, errors.New("query plan is empty")
+ }
- switch e := expr.(type) {
+ switch e := op.Plan.AST.(type) {
case syntax.SampleExpr:
// The error will be handled later.
groups, err := e.MatcherGroups()
@@ -291,15 +301,10 @@ func (r roundTripper) Do(ctx context.Context, req base.Request) (base.Response,
return r.labels.Do(ctx, req)
case *LokiInstantRequest:
- expr, err := syntax.ParseExpr(op.Query)
- if err != nil {
- return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error())
- }
-
- queryHash := logql.HashedQuery(op.Query)
+ queryHash := util.HashedQuery(op.Query)
level.Info(logger).Log("msg", "executing query", "type", "instant", "query", op.Query, "query_hash", queryHash)
- switch expr.(type) {
+ switch op.Plan.AST.(type) {
case syntax.SampleExpr:
return r.instantMetric.Do(ctx, req)
default:
@@ -429,6 +434,7 @@ func NewLogFilterTripperware(
limits,
0, // 0 is unlimited shards
statsHandler,
+ cfg.ShardAggregations,
),
)
} else {
@@ -446,10 +452,7 @@ func NewLogFilterTripperware(
)
}
- if len(queryRangeMiddleware) > 0 {
- return NewLimitedRoundTripper(next, limits, schema.Configs, queryRangeMiddleware...)
- }
- return next
+ return NewLimitedRoundTripper(next, limits, schema.Configs, queryRangeMiddleware...)
}), nil
}
@@ -529,10 +532,7 @@ func NewSeriesTripperware(
}
return base.MiddlewareFunc(func(next base.Handler) base.Handler {
- if len(queryRangeMiddleware) > 0 {
- return NewLimitedRoundTripper(next, limits, schema.Configs, queryRangeMiddleware...)
- }
- return next
+ return NewLimitedRoundTripper(next, limits, schema.Configs, queryRangeMiddleware...)
}), nil
}
@@ -563,11 +563,8 @@ func NewLabelsTripperware(
}
return base.MiddlewareFunc(func(next base.Handler) base.Handler {
- if len(queryRangeMiddleware) > 0 {
- // Do not forward any request header.
- return base.MergeMiddlewares(queryRangeMiddleware...).Wrap(next)
- }
- return next
+ // Do not forward any request header.
+ return base.MergeMiddlewares(queryRangeMiddleware...).Wrap(next)
}), nil
}
@@ -662,6 +659,7 @@ func NewMetricTripperware(
limits,
0, // 0 is unlimited shards
statsHandler,
+ cfg.ShardAggregations,
),
)
} else {
@@ -726,6 +724,7 @@ func NewInstantMetricTripperware(
limits,
0, // 0 is unlimited shards
statsHandler,
+ cfg.ShardAggregations,
),
)
}
diff --git a/pkg/querier/queryrange/roundtrip_test.go b/pkg/querier/queryrange/roundtrip_test.go
index 4c19a1ffc1202..c0b05103ded36 100644
--- a/pkg/querier/queryrange/roundtrip_test.go
+++ b/pkg/querier/queryrange/roundtrip_test.go
@@ -23,10 +23,13 @@ import (
"github.com/grafana/loki/pkg/loghttp"
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logql"
+ "github.com/grafana/loki/pkg/logql/syntax"
"github.com/grafana/loki/pkg/logqlmodel"
"github.com/grafana/loki/pkg/logqlmodel/stats"
+ "github.com/grafana/loki/pkg/querier/plan"
base "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"
"github.com/grafana/loki/pkg/storage/chunk/cache"
+ "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"
"github.com/grafana/loki/pkg/storage/config"
"github.com/grafana/loki/pkg/storage/stores/index/seriesvolume"
"github.com/grafana/loki/pkg/util"
@@ -44,11 +47,13 @@ var (
MaxRetries: 3,
CacheResults: true,
ResultsCacheConfig: base.ResultsCacheConfig{
- CacheConfig: cache.Config{
- EmbeddedCache: cache.EmbeddedCacheConfig{
- Enabled: true,
- MaxSizeMB: 1024,
- TTL: 24 * time.Hour,
+ Config: resultscache.Config{
+ CacheConfig: cache.Config{
+ EmbeddedCache: cache.EmbeddedCacheConfig{
+ Enabled: true,
+ MaxSizeMB: 1024,
+ TTL: 24 * time.Hour,
+ },
},
},
},
@@ -57,22 +62,26 @@ var (
CacheIndexStatsResults: true,
StatsCacheConfig: IndexStatsCacheConfig{
ResultsCacheConfig: base.ResultsCacheConfig{
- CacheConfig: cache.Config{
- EmbeddedCache: cache.EmbeddedCacheConfig{
- Enabled: true,
- MaxSizeMB: 1024,
- TTL: 24 * time.Hour,
+ Config: resultscache.Config{
+ CacheConfig: cache.Config{
+ EmbeddedCache: cache.EmbeddedCacheConfig{
+ Enabled: true,
+ MaxSizeMB: 1024,
+ TTL: 24 * time.Hour,
+ },
},
},
},
},
VolumeCacheConfig: VolumeCacheConfig{
ResultsCacheConfig: base.ResultsCacheConfig{
- CacheConfig: cache.Config{
- EmbeddedCache: cache.EmbeddedCacheConfig{
- Enabled: true,
- MaxSizeMB: 1024,
- TTL: 24 * time.Hour,
+ Config: resultscache.Config{
+ CacheConfig: cache.Config{
+ EmbeddedCache: cache.EmbeddedCacheConfig{
+ Enabled: true,
+ MaxSizeMB: 1024,
+ TTL: 24 * time.Hour,
+ },
},
},
},
@@ -131,10 +140,16 @@ var (
series = logproto.SeriesResponse{
Series: []logproto.SeriesIdentifier{
{
- Labels: map[string]string{"filename": "/var/hostlog/apport.log", "job": "varlogs"},
+ Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "filename", Value: "/var/hostlog/apport.log"},
+ {Key: "job", Value: "varlogs"},
+ },
},
{
- Labels: map[string]string{"filename": "/var/hostlog/test.log", "job": "varlogs"},
+ Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "filename", Value: "/var/hostlog/test.log"},
+ {Key: "job", Value: "varlogs"},
+ },
},
},
}
@@ -190,6 +205,9 @@ func TestMetricsTripperware(t *testing.T) {
EndTs: testTime,
Direction: logproto.FORWARD,
Path: "/query_range",
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`rate({app="foo"} |= "foo"[1m])`),
+ },
}
ctx := user.InjectOrgID(context.Background(), "1")
@@ -273,6 +291,9 @@ func TestLogFilterTripperware(t *testing.T) {
EndTs: testTime,
Direction: logproto.FORWARD,
Path: "/loki/api/v1/query_range",
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`{app="foo"} |= "foo"`),
+ },
}
ctx := user.InjectOrgID(context.Background(), "1")
@@ -332,12 +353,16 @@ func TestInstantQueryTripperware(t *testing.T) {
}
require.NoError(t, err)
+ q := `sum by (job) (bytes_rate({cluster="dev-us-central-0"}[15m]))`
lreq := &LokiInstantRequest{
- Query: `sum by (job) (bytes_rate({cluster="dev-us-central-0"}[15m]))`,
+ Query: q,
Limit: 1000,
TimeTs: testTime,
Direction: logproto.FORWARD,
Path: "/loki/api/v1/query",
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(q),
+ },
}
ctx := user.InjectOrgID(context.Background(), "1")
@@ -659,10 +684,12 @@ func TestNewTripperware_Caches(t *testing.T) {
Config: base.Config{
CacheResults: true,
ResultsCacheConfig: base.ResultsCacheConfig{
- CacheConfig: cache.Config{
- EmbeddedCache: cache.EmbeddedCacheConfig{
- MaxSizeMB: 1,
- Enabled: true,
+ Config: resultscache.Config{
+ CacheConfig: cache.Config{
+ EmbeddedCache: cache.EmbeddedCacheConfig{
+ MaxSizeMB: 1,
+ Enabled: true,
+ },
},
},
},
@@ -678,10 +705,12 @@ func TestNewTripperware_Caches(t *testing.T) {
Config: base.Config{
CacheResults: true,
ResultsCacheConfig: base.ResultsCacheConfig{
- CacheConfig: cache.Config{
- EmbeddedCache: cache.EmbeddedCacheConfig{
- MaxSizeMB: 1,
- Enabled: true,
+ Config: resultscache.Config{
+ CacheConfig: cache.Config{
+ EmbeddedCache: cache.EmbeddedCacheConfig{
+ MaxSizeMB: 1,
+ Enabled: true,
+ },
},
},
},
@@ -697,10 +726,12 @@ func TestNewTripperware_Caches(t *testing.T) {
Config: base.Config{
CacheResults: true,
ResultsCacheConfig: base.ResultsCacheConfig{
- CacheConfig: cache.Config{
- EmbeddedCache: cache.EmbeddedCacheConfig{
- Enabled: true,
- MaxSizeMB: 2000,
+ Config: resultscache.Config{
+ CacheConfig: cache.Config{
+ EmbeddedCache: cache.EmbeddedCacheConfig{
+ Enabled: true,
+ MaxSizeMB: 2000,
+ },
},
},
},
@@ -708,10 +739,12 @@ func TestNewTripperware_Caches(t *testing.T) {
CacheIndexStatsResults: true,
StatsCacheConfig: IndexStatsCacheConfig{
ResultsCacheConfig: base.ResultsCacheConfig{
- CacheConfig: cache.Config{
- EmbeddedCache: cache.EmbeddedCacheConfig{
- Enabled: true,
- MaxSizeMB: 1000,
+ Config: resultscache.Config{
+ CacheConfig: cache.Config{
+ EmbeddedCache: cache.EmbeddedCacheConfig{
+ Enabled: true,
+ MaxSizeMB: 1000,
+ },
},
},
},
@@ -785,6 +818,9 @@ func TestLogNoFilter(t *testing.T) {
EndTs: testTime,
Direction: logproto.FORWARD,
Path: "/loki/api/v1/query_range",
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`{app="foo"}`),
+ },
}
ctx := user.InjectOrgID(context.Background(), "1")
@@ -796,7 +832,12 @@ func TestLogNoFilter(t *testing.T) {
}
func TestPostQueries(t *testing.T) {
- lreq := &LokiRequest{Query: `{app="foo"} |~ "foo"`}
+ lreq := &LokiRequest{
+ Query: `{app="foo"} |~ "foo"`,
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`{app="foo"} |~ "foo"`),
+ },
+ }
ctx := user.InjectOrgID(context.Background(), "1")
handler := base.HandlerFunc(func(context.Context, base.Request) (base.Response, error) {
t.Error("unexpected default roundtripper called")
@@ -834,6 +875,9 @@ func TestTripperware_EntriesLimit(t *testing.T) {
EndTs: testTime,
Direction: logproto.FORWARD,
Path: "/loki/api/v1/query_range",
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`{app="foo"}`),
+ },
}
ctx := user.InjectOrgID(context.Background(), "1")
@@ -881,6 +925,9 @@ func TestTripperware_RequiredLabels(t *testing.T) {
EndTs: testTime,
Direction: logproto.FORWARD,
Path: "/loki/api/v1/query_range",
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(test.qs),
+ },
}
// See loghttp.step
step := time.Duration(int(math.Max(math.Floor(lreq.EndTs.Sub(lreq.StartTs).Seconds()/250), 1))) * time.Second
@@ -986,6 +1033,9 @@ func TestTripperware_RequiredNumberLabels(t *testing.T) {
EndTs: testTime,
Direction: logproto.FORWARD,
Path: "/loki/api/v1/query_range",
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(tc.query),
+ },
}
// See loghttp.step
step := time.Duration(int(math.Max(math.Floor(lreq.EndTs.Sub(lreq.StartTs).Seconds()/250), 1))) * time.Second
@@ -1101,6 +1151,9 @@ func TestMetricsTripperware_SplitShardStats(t *testing.T) {
TimeTs: testTime,
Direction: logproto.FORWARD,
Path: "/loki/api/v1/query",
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`sum by (app) (rate({app="foo"} |= "foo"[2h]))`),
+ },
},
expectedSplitStats: 2, // [2h] interval split by 1h configured split interval
expectedShardStats: 8, // 2 time splits * 4 row shards
@@ -1113,6 +1166,9 @@ func TestMetricsTripperware_SplitShardStats(t *testing.T) {
TimeTs: testTime,
Direction: logproto.FORWARD,
Path: "/loki/api/v1/query",
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`sum by (app) (rate({app="foo"} |= "foo"[1h]))`),
+ },
},
expectedSplitStats: 0, // [1h] interval not split
expectedShardStats: 4, // 4 row shards
@@ -1127,6 +1183,9 @@ func TestMetricsTripperware_SplitShardStats(t *testing.T) {
EndTs: testTime,
Direction: logproto.FORWARD,
Path: "/query_range",
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`sum by (app) (rate({app="foo"} |= "foo"[1h]))`),
+ },
},
expectedSplitStats: 3, // 2 hour range interval split based on the base hour + the remainder
expectedShardStats: 12, // 3 time splits * 4 row shards
@@ -1141,6 +1200,9 @@ func TestMetricsTripperware_SplitShardStats(t *testing.T) {
EndTs: testTime,
Direction: logproto.FORWARD,
Path: "/query_range",
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`sum by (app) (rate({app="foo"} |= "foo"[1h]))`),
+ },
},
expectedSplitStats: 0, // 1 minute range interval not split
expectedShardStats: 4, // 4 row shards
diff --git a/pkg/querier/queryrange/split_by_interval.go b/pkg/querier/queryrange/split_by_interval.go
index da8326a678ec5..d568fe65ddde8 100644
--- a/pkg/querier/queryrange/split_by_interval.go
+++ b/pkg/querier/queryrange/split_by_interval.go
@@ -259,6 +259,7 @@ func splitByTime(req queryrangebase.Request, interval time.Duration) ([]queryran
Path: r.Path,
StartTs: start,
EndTs: end,
+ Plan: r.Plan,
})
})
case *LokiSeriesRequest:
@@ -310,13 +311,17 @@ func splitByTime(req queryrangebase.Request, interval time.Duration) ([]queryran
return reqs, nil
}
-// maxRangeVectorAndOffsetDuration returns the maximum range vector and offset duration within a LogQL query.
-func maxRangeVectorAndOffsetDuration(q string) (time.Duration, time.Duration, error) {
- expr, err := syntax.ParseExpr(q)
+// maxRangeVectorAndOffsetDurationFromQueryString
+func maxRangeVectorAndOffsetDurationFromQueryString(q string) (time.Duration, time.Duration, error) {
+ parsed, err := syntax.ParseExpr(q)
if err != nil {
return 0, 0, err
}
+ return maxRangeVectorAndOffsetDuration(parsed)
+}
+// maxRangeVectorAndOffsetDuration returns the maximum range vector and offset duration within a LogQL query.
+func maxRangeVectorAndOffsetDuration(expr syntax.Expr) (time.Duration, time.Duration, error) {
if _, ok := expr.(syntax.SampleExpr); !ok {
return 0, 0, nil
}
@@ -337,8 +342,8 @@ func maxRangeVectorAndOffsetDuration(q string) (time.Duration, time.Duration, er
// reduceSplitIntervalForRangeVector reduces the split interval for a range query based on the duration of the range vector.
// Large range vector durations will not be split into smaller intervals because it can cause the queries to be slow by over-processing data.
-func reduceSplitIntervalForRangeVector(r queryrangebase.Request, interval time.Duration) (time.Duration, error) {
- maxRange, _, err := maxRangeVectorAndOffsetDuration(r.GetQuery())
+func reduceSplitIntervalForRangeVector(r *LokiRequest, interval time.Duration) (time.Duration, error) {
+ maxRange, _, err := maxRangeVectorAndOffsetDuration(r.Plan.AST)
if err != nil {
return 0, err
}
@@ -351,13 +356,13 @@ func reduceSplitIntervalForRangeVector(r queryrangebase.Request, interval time.D
func splitMetricByTime(r queryrangebase.Request, interval time.Duration) ([]queryrangebase.Request, error) {
var reqs []queryrangebase.Request
- interval, err := reduceSplitIntervalForRangeVector(r, interval)
+ lokiReq := r.(*LokiRequest)
+
+ interval, err := reduceSplitIntervalForRangeVector(lokiReq, interval)
if err != nil {
return nil, err
}
- lokiReq := r.(*LokiRequest)
-
// step align start and end time of the query. Start time is rounded down and end time is rounded up.
stepNs := r.GetStep() * 1e6
startNs := lokiReq.StartTs.UnixNano()
@@ -383,6 +388,7 @@ func splitMetricByTime(r queryrangebase.Request, interval time.Duration) ([]quer
Path: lokiReq.Path,
StartTs: start,
EndTs: end,
+ Plan: lokiReq.Plan,
})
})
@@ -403,6 +409,7 @@ func splitMetricByTime(r queryrangebase.Request, interval time.Duration) ([]quer
Path: lokiReq.Path,
StartTs: start,
EndTs: end,
+ Plan: lokiReq.Plan,
})
}
diff --git a/pkg/querier/queryrange/split_by_interval_test.go b/pkg/querier/queryrange/split_by_interval_test.go
index ce02105d091fe..58b78b820a51c 100644
--- a/pkg/querier/queryrange/split_by_interval_test.go
+++ b/pkg/querier/queryrange/split_by_interval_test.go
@@ -17,7 +17,9 @@ import (
"github.com/grafana/loki/pkg/loghttp"
"github.com/grafana/loki/pkg/logproto"
+ "github.com/grafana/loki/pkg/logql/syntax"
"github.com/grafana/loki/pkg/logqlmodel/stats"
+ "github.com/grafana/loki/pkg/querier/plan"
"github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"
"github.com/grafana/loki/pkg/storage/config"
)
@@ -57,25 +59,31 @@ var testSchemasTSDB = func() []config.PeriodConfig {
func Test_splitQuery(t *testing.T) {
buildLokiRequest := func(start, end time.Time) queryrangebase.Request {
return &LokiRequest{
- Query: "foo",
+ Query: `{app="foo"}`,
Limit: 1,
Step: 2,
StartTs: start,
EndTs: end,
Direction: logproto.BACKWARD,
Path: "/path",
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`{app="foo"}`),
+ },
}
}
buildLokiRequestWithInterval := func(start, end time.Time) queryrangebase.Request {
return &LokiRequest{
- Query: "foo",
+ Query: `{app="foo"}`,
Limit: 1,
Interval: 2,
StartTs: start,
EndTs: end,
Direction: logproto.BACKWARD,
Path: "/path",
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`{app="foo"}`),
+ },
}
}
@@ -220,7 +228,7 @@ func Test_splitMetricQuery(t *testing.T) {
const seconds = 1e3 // 1e3 milliseconds per second.
for i, tc := range []struct {
- input queryrangebase.Request
+ input *LokiRequest
expected []queryrangebase.Request
interval time.Duration
}{
@@ -592,6 +600,17 @@ func Test_splitMetricQuery(t *testing.T) {
interval: 15 * time.Minute,
},
} {
+ // Set query plans
+ tc.input.Plan = &plan.QueryPlan{
+ AST: syntax.MustParseExpr(tc.input.Query),
+ }
+
+ for _, e := range tc.expected {
+ e.(*LokiRequest).Plan = &plan.QueryPlan{
+ AST: syntax.MustParseExpr(e.GetQuery()),
+ }
+ }
+
t.Run(strconv.Itoa(i), func(t *testing.T) {
splits, err := splitMetricByTime(tc.input, tc.interval)
require.NoError(t, err)
@@ -788,13 +807,22 @@ func Test_series_splitByInterval_Do(t *testing.T) {
Version: uint32(loghttp.VersionV1),
Data: []logproto.SeriesIdentifier{
{
- Labels: map[string]string{"filename": "/var/hostlog/apport.log", "job": "varlogs"},
+ Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "filename", Value: "/var/hostlog/apport.log"},
+ {Key: "job", Value: "varlogs"},
+ },
},
{
- Labels: map[string]string{"filename": "/var/hostlog/test.log", "job": "varlogs"},
+ Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "filename", Value: "/var/hostlog/test.log"},
+ {Key: "job", Value: "varlogs"},
+ },
},
{
- Labels: map[string]string{"filename": "/var/hostlog/test.log", "job": "varlogs"},
+ Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "filename", Value: "/var/hostlog/test.log"},
+ {Key: "job", Value: "varlogs"},
+ },
},
},
}, nil
@@ -828,10 +856,16 @@ func Test_series_splitByInterval_Do(t *testing.T) {
Version: 1,
Data: []logproto.SeriesIdentifier{
{
- Labels: map[string]string{"filename": "/var/hostlog/apport.log", "job": "varlogs"},
+ Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "filename", Value: "/var/hostlog/apport.log"},
+ {Key: "job", Value: "varlogs"},
+ },
},
{
- Labels: map[string]string{"filename": "/var/hostlog/test.log", "job": "varlogs"},
+ Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "filename", Value: "/var/hostlog/test.log"},
+ {Key: "job", Value: "varlogs"},
+ },
},
},
},
diff --git a/pkg/querier/queryrange/split_by_range.go b/pkg/querier/queryrange/split_by_range.go
index e3640761d57ec..6845846d4deaa 100644
--- a/pkg/querier/queryrange/split_by_range.go
+++ b/pkg/querier/queryrange/split_by_range.go
@@ -47,6 +47,11 @@ func NewSplitByRangeMiddleware(logger log.Logger, engineOpts logql.EngineOpts, l
func (s *splitByRange) Do(ctx context.Context, request queryrangebase.Request) (queryrangebase.Response, error) {
logger := util_log.WithContext(ctx, s.logger)
+ params, err := ParamsFromRequest(request)
+ if err != nil {
+ return nil, err
+ }
+
tenants, err := tenant.TenantIDs(ctx)
if err != nil {
return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error())
@@ -64,7 +69,7 @@ func (s *splitByRange) Do(ctx context.Context, request queryrangebase.Request) (
return nil, err
}
- noop, parsed, err := mapper.Parse(request.GetQuery())
+ noop, parsed, err := mapper.Parse(params.GetExpression())
if err != nil {
level.Warn(logger).Log("msg", "failed mapping AST", "err", err.Error(), "query", request.GetQuery())
return nil, err
@@ -80,16 +85,11 @@ func (s *splitByRange) Do(ctx context.Context, request queryrangebase.Request) (
queryStatsCtx := stats.FromContext(ctx)
queryStatsCtx.AddSplitQueries(int64(mapperStats.GetSplitQueries()))
- params, err := ParamsFromRequest(request)
- if err != nil {
- return nil, err
- }
-
if _, ok := request.(*LokiInstantRequest); !ok {
- return nil, fmt.Errorf("expected *LokiInstantRequest")
+ return nil, fmt.Errorf("expected *LokiInstantRequest, got %T", request)
}
- query := s.ng.Query(ctx, params, parsed)
+ query := s.ng.Query(ctx, logql.ParamsWithExpressionOverride{Params: params, ExpressionOverride: parsed})
res, err := query.Exec(ctx)
if err != nil {
diff --git a/pkg/querier/queryrange/split_by_range_test.go b/pkg/querier/queryrange/split_by_range_test.go
index c3b4587a1dbb1..ef25e3f910fb3 100644
--- a/pkg/querier/queryrange/split_by_range_test.go
+++ b/pkg/querier/queryrange/split_by_range_test.go
@@ -6,13 +6,14 @@ import (
"testing"
"time"
- "github.com/grafana/loki/pkg/loghttp"
-
"github.com/go-kit/log"
"github.com/grafana/dskit/user"
"github.com/stretchr/testify/require"
+ "github.com/grafana/loki/pkg/loghttp"
"github.com/grafana/loki/pkg/logproto"
+ "github.com/grafana/loki/pkg/logql/syntax"
+ "github.com/grafana/loki/pkg/querier/plan"
"github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"
)
@@ -37,6 +38,9 @@ func Test_RangeVectorSplit(t *testing.T) {
Query: `sum(bytes_over_time({app="foo"}[3m]))`,
TimeTs: time.Unix(1, 0),
Path: "/loki/api/v1/query",
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`sum(bytes_over_time({app="foo"}[3m]))`),
+ },
},
subQueries: []queryrangebase.RequestResponse{
subQueryRequestResponse(`sum(bytes_over_time({app="foo"}[1m]))`, 1),
@@ -50,6 +54,9 @@ func Test_RangeVectorSplit(t *testing.T) {
Query: `sum by (bar) (bytes_over_time({app="foo"}[3m]))`,
TimeTs: time.Unix(1, 0),
Path: "/loki/api/v1/query",
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`sum by (bar) (bytes_over_time({app="foo"}[3m]))`),
+ },
},
subQueries: []queryrangebase.RequestResponse{
subQueryRequestResponse(`sum by (bar)(bytes_over_time({app="foo"}[1m]))`, 10),
@@ -63,6 +70,9 @@ func Test_RangeVectorSplit(t *testing.T) {
Query: `sum(count_over_time({app="foo"}[3m]))`,
TimeTs: time.Unix(1, 0),
Path: "/loki/api/v1/query",
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`sum(count_over_time({app="foo"}[3m]))`),
+ },
},
subQueries: []queryrangebase.RequestResponse{
subQueryRequestResponse(`sum(count_over_time({app="foo"}[1m]))`, 1),
@@ -76,6 +86,9 @@ func Test_RangeVectorSplit(t *testing.T) {
Query: `sum by (bar) (count_over_time({app="foo"}[3m]))`,
TimeTs: time.Unix(1, 0),
Path: "/loki/api/v1/query",
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`sum by (bar) (count_over_time({app="foo"}[3m]))`),
+ },
},
subQueries: []queryrangebase.RequestResponse{
subQueryRequestResponse(`sum by (bar)(count_over_time({app="foo"}[1m]))`, 0),
@@ -89,6 +102,9 @@ func Test_RangeVectorSplit(t *testing.T) {
Query: `sum(sum_over_time({app="foo"} | unwrap bar [3m]))`,
TimeTs: time.Unix(1, 0),
Path: "/loki/api/v1/query",
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`sum(sum_over_time({app="foo"} | unwrap bar [3m]))`),
+ },
},
subQueries: []queryrangebase.RequestResponse{
subQueryRequestResponse(`sum(sum_over_time({app="foo"} | unwrap bar[1m]))`, 1),
@@ -102,6 +118,9 @@ func Test_RangeVectorSplit(t *testing.T) {
Query: `sum by (bar) (sum_over_time({app="foo"} | unwrap bar [3m]))`,
TimeTs: time.Unix(1, 0),
Path: "/loki/api/v1/query",
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`sum by (bar) (sum_over_time({app="foo"} | unwrap bar [3m]))`),
+ },
},
subQueries: []queryrangebase.RequestResponse{
subQueryRequestResponse(`sum by (bar)(sum_over_time({app="foo"} | unwrap bar[1m]))`, 1),
@@ -140,6 +159,9 @@ func subQueryRequestResponse(expectedSubQuery string, sampleValue float64) query
Query: expectedSubQuery,
TimeTs: time.Unix(1, 0),
Path: "/loki/api/v1/query",
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(expectedSubQuery),
+ },
},
Response: &LokiPromResponse{
Response: &queryrangebase.PrometheusResponse{
diff --git a/pkg/querier/queryrange/stats.go b/pkg/querier/queryrange/stats.go
index 0233d886c98f2..71f93959c3b69 100644
--- a/pkg/querier/queryrange/stats.go
+++ b/pkg/querier/queryrange/stats.go
@@ -53,13 +53,13 @@ func recordQueryMetrics(data *queryData) {
case queryTypeLog, queryTypeMetric:
logql.RecordRangeAndInstantQueryMetrics(data.ctx, logger, data.params, data.status, *data.statistics, data.result)
case queryTypeLabel:
- logql.RecordLabelQueryMetrics(data.ctx, logger, data.params.Start(), data.params.End(), data.label, data.params.Query(), data.status, *data.statistics)
+ logql.RecordLabelQueryMetrics(data.ctx, logger, data.params.Start(), data.params.End(), data.label, data.params.QueryString(), data.status, *data.statistics)
case queryTypeSeries:
logql.RecordSeriesQueryMetrics(data.ctx, logger, data.params.Start(), data.params.End(), data.match, data.status, []string{}, *data.statistics)
case queryTypeStats:
- logql.RecordStatsQueryMetrics(data.ctx, logger, data.params.Start(), data.params.End(), data.params.Query(), data.status, *data.statistics)
+ logql.RecordStatsQueryMetrics(data.ctx, logger, data.params.Start(), data.params.End(), data.params.QueryString(), data.status, *data.statistics)
case queryTypeVolume:
- logql.RecordVolumeQueryMetrics(data.ctx, logger, data.params.Start(), data.params.End(), data.params.Query(), data.params.Limit(), data.params.Step(), data.status, *data.statistics)
+ logql.RecordVolumeQueryMetrics(data.ctx, logger, data.params.Start(), data.params.End(), data.params.QueryString(), data.params.Limit(), data.params.Step(), data.status, *data.statistics)
default:
level.Error(logger).Log("msg", "failed to record query metrics", "err", fmt.Errorf("expected one of the *LokiRequest, *LokiInstantRequest, *LokiSeriesRequest, *LokiLabelNamesRequest, got %s", data.queryType))
}
diff --git a/pkg/querier/queryrange/stats_test.go b/pkg/querier/queryrange/stats_test.go
index 54c9004d88cec..28f8d12de7f6d 100644
--- a/pkg/querier/queryrange/stats_test.go
+++ b/pkg/querier/queryrange/stats_test.go
@@ -30,7 +30,7 @@ func TestStatsCollectorMiddleware(t *testing.T) {
Query: "foo",
StartTs: now,
})
- require.Equal(t, "foo", data.params.Query())
+ require.Equal(t, "foo", data.params.QueryString())
require.Equal(t, true, data.recorded)
require.Equal(t, now, data.params.Start())
require.Nil(t, data.statistics)
@@ -60,7 +60,7 @@ func TestStatsCollectorMiddleware(t *testing.T) {
Query: "foo",
StartTs: now,
})
- require.Equal(t, "foo", data.params.Query())
+ require.Equal(t, "foo", data.params.QueryString())
require.Equal(t, true, data.recorded)
require.Equal(t, now, data.params.Start())
require.Equal(t, int32(10), data.statistics.Ingester.TotalReached)
@@ -108,7 +108,7 @@ func Test_StatsHTTP(t *testing.T) {
}),
func(t *testing.T, data *queryData) {
require.Equal(t, fmt.Sprintf("%d", http.StatusOK), data.status)
- require.Equal(t, "foo", data.params.Query())
+ require.Equal(t, "foo", data.params.QueryString())
require.Equal(t, logproto.BACKWARD, data.params.Direction())
require.Equal(t, uint32(100), data.params.Limit())
require.Equal(t, stats.Result{}, *data.statistics)
@@ -129,7 +129,7 @@ func Test_StatsHTTP(t *testing.T) {
}),
func(t *testing.T, data *queryData) {
require.Equal(t, fmt.Sprintf("%d", http.StatusTeapot), data.status)
- require.Equal(t, "foo", data.params.Query())
+ require.Equal(t, "foo", data.params.QueryString())
require.Equal(t, logproto.BACKWARD, data.params.Direction())
require.Equal(t, uint32(100), data.params.Limit())
require.Equal(t, statsResult, *data.statistics)
@@ -151,7 +151,7 @@ func Test_StatsHTTP(t *testing.T) {
}),
func(t *testing.T, data *queryData) {
require.Equal(t, fmt.Sprintf("%d", http.StatusTeapot), data.status)
- require.Equal(t, "foo", data.params.Query())
+ require.Equal(t, "foo", data.params.QueryString())
require.Equal(t, logproto.BACKWARD, data.params.Direction())
require.Equal(t, uint32(100), data.params.Limit())
require.Equal(t, statsResult, *data.statistics)
@@ -173,7 +173,7 @@ func Test_StatsHTTP(t *testing.T) {
}),
func(t *testing.T, data *queryData) {
require.Equal(t, fmt.Sprintf("%d", http.StatusTeapot), data.status)
- require.Equal(t, "foo", data.params.Query())
+ require.Equal(t, "foo", data.params.QueryString())
require.Equal(t, uint32(100), data.params.Limit())
require.Equal(t, statsResult, *data.statistics)
require.Equal(t, streams, data.result)
diff --git a/pkg/querier/queryrange/views.go b/pkg/querier/queryrange/views.go
index 9b310e57a7cda..be9eee016b4b5 100644
--- a/pkg/querier/queryrange/views.go
+++ b/pkg/querier/queryrange/views.go
@@ -302,9 +302,9 @@ func (v *MergedSeriesResponseView) ForEachUniqueSeries(fn func(*SeriesIdentifier
func (v *MergedSeriesResponseView) Materialize() (*LokiSeriesResponse, error) {
mat := &LokiSeriesResponse{}
err := v.ForEachUniqueSeries(func(series *SeriesIdentifierView) error {
- identifier := logproto.SeriesIdentifier{Labels: make(map[string]string)}
+ identifier := logproto.SeriesIdentifier{Labels: make([]logproto.SeriesIdentifier_LabelsEntry, 0)}
err := series.ForEachLabel(func(name, value string) error {
- identifier.Labels[name] = value
+ identifier.Labels = append(identifier.Labels, logproto.SeriesIdentifier_LabelsEntry{Key: name, Value: value})
return nil
})
if err != nil {
diff --git a/pkg/querier/queryrange/views_test.go b/pkg/querier/queryrange/views_test.go
index adcd9d41ff156..c4c28fe462c0d 100644
--- a/pkg/querier/queryrange/views_test.go
+++ b/pkg/querier/queryrange/views_test.go
@@ -26,9 +26,9 @@ func TestGetLokiSeriesResponse(t *testing.T) {
Status: "success",
Data: []logproto.SeriesIdentifier{
{
- Labels: map[string]string{
- "foo": "bar",
- "baz": "woof",
+ Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "foo", Value: "bar"},
+ {Key: "baz", Value: "woof"},
},
},
},
@@ -52,9 +52,9 @@ func TestGetLokiSeriesResponse(t *testing.T) {
func TestSeriesIdentifierViewHash(t *testing.T) {
identifier := &logproto.SeriesIdentifier{
- Labels: map[string]string{
- "foo": "bar",
- "baz": "woof",
+ Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "foo", Value: "bar"},
+ {Key: "baz", Value: "woof"},
},
}
@@ -70,14 +70,14 @@ func TestSeriesIdentifierViewHash(t *testing.T) {
require.NoError(t, err)
require.ElementsMatch(t, keyLabelPairs, []string{"baz\xffwoof\xff", "foo\xffbar\xff"})
- expected, _ := identifier.Hash(b, keyLabelPairs)
+ expected := identifier.Hash(b)
require.Equal(t, expected, actual)
}
func TestSeriesIdentifierViewForEachLabel(t *testing.T) {
identifier := &logproto.SeriesIdentifier{
- Labels: map[string]string{
- "foo": "bar",
- "baz": "woof",
+ Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "foo", Value: "bar"},
+ {Key: "baz", Value: "woof"},
},
}
@@ -100,10 +100,16 @@ func TestSeriesResponseViewForEach(t *testing.T) {
response := &LokiSeriesResponse{
Data: []logproto.SeriesIdentifier{
{
- Labels: map[string]string{"i": "1", "baz": "woof"},
+ Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "i", Value: "1"},
+ {Key: "baz", Value: "woof"},
+ },
},
{
- Labels: map[string]string{"i": "2", "foo": "bar"},
+ Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "i", Value: "2"},
+ {Key: "foo", Value: "bar"},
+ },
},
},
}
@@ -128,8 +134,7 @@ func TestSeriesResponseViewForEach(t *testing.T) {
expectedHashes := make([]uint64, 0)
for _, id := range response.Data {
b := make([]byte, 0, 1024)
- keyLabelPairs := make([]string, 0)
- hash, _ := id.Hash(b, keyLabelPairs)
+ hash := id.Hash(b)
expectedHashes = append(expectedHashes, hash)
}
require.ElementsMatch(t, expectedHashes, actualHashes)
@@ -140,20 +145,32 @@ func TestMergedViewDeduplication(t *testing.T) {
{
Data: []logproto.SeriesIdentifier{
{
- Labels: map[string]string{"i": "1", "baz": "woof"},
+ Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "i", Value: "1"},
+ {Key: "baz", Value: "woof"},
+ },
},
{
- Labels: map[string]string{"i": "2", "foo": "bar"},
+ Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "i", Value: "2"},
+ {Key: "foo", Value: "bar"},
+ },
},
},
},
{
Data: []logproto.SeriesIdentifier{
{
- Labels: map[string]string{"i": "3", "baz": "woof"},
+ Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "i", Value: "3"},
+ {Key: "baz", Value: "woof"},
+ },
},
{
- Labels: map[string]string{"i": "2", "foo": "bar"},
+ Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "i", Value: "2"},
+ {Key: "foo", Value: "bar"},
+ },
},
},
},
@@ -181,20 +198,32 @@ func TestMergedViewMaterialize(t *testing.T) {
{
Data: []logproto.SeriesIdentifier{
{
- Labels: map[string]string{"i": "1", "baz": "woof"},
+ Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "i", Value: "1"},
+ {Key: "baz", Value: "woof"},
+ },
},
{
- Labels: map[string]string{"i": "2", "foo": "bar"},
+ Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "i", Value: "2"},
+ {Key: "foo", Value: "bar"},
+ },
},
},
},
{
Data: []logproto.SeriesIdentifier{
{
- Labels: map[string]string{"i": "3", "baz": "woof"},
+ Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "i", Value: "3"},
+ {Key: "baz", Value: "woof"},
+ },
},
{
- Labels: map[string]string{"i": "2", "foo": "bar"},
+ Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "i", Value: "2"},
+ {Key: "foo", Value: "bar"},
+ },
},
},
},
@@ -213,9 +242,13 @@ func TestMergedViewMaterialize(t *testing.T) {
require.Len(t, mat.Data, 3)
series := make([]string, 0)
for _, d := range mat.Data {
- series = append(series, labels.FromMap(d.Labels).String())
+ l := make([]labels.Label, 0, len(d.Labels))
+ for _, p := range d.Labels {
+ l = append(l, labels.Label{Name: p.Key, Value: p.Value})
+ }
+ series = append(series, labels.Labels(l).String())
}
- expected := []string{`{baz="woof", i="1"}`, `{baz="woof", i="3"}`, `{foo="bar", i="2"}`}
+ expected := []string{`{i="1", baz="woof"}`, `{i="3", baz="woof"}`, `{i="2", foo="bar"}`}
require.ElementsMatch(t, series, expected)
}
@@ -224,13 +257,22 @@ func TestMergedViewJSON(t *testing.T) {
response := &LokiSeriesResponse{
Data: []logproto.SeriesIdentifier{
{
- Labels: map[string]string{"i": "1", "baz": "woof"},
+ Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "i", Value: "1"},
+ {Key: "baz", Value: "woof"},
+ },
},
{
- Labels: map[string]string{"i": "2", "foo": "bar"},
+ Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "i", Value: "2"},
+ {Key: "foo", Value: "bar"},
+ },
},
{
- Labels: map[string]string{"i": "3", "baz": "woof"},
+ Labels: []logproto.SeriesIdentifier_LabelsEntry{
+ {Key: "i", Value: "3"},
+ {Key: "baz", Value: "woof"},
+ },
},
},
}
diff --git a/pkg/querier/queryrange/volume_cache.go b/pkg/querier/queryrange/volume_cache.go
index 0c54745654004..954c642ffef8b 100644
--- a/pkg/querier/queryrange/volume_cache.go
+++ b/pkg/querier/queryrange/volume_cache.go
@@ -15,6 +15,7 @@ import (
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"
"github.com/grafana/loki/pkg/storage/chunk/cache"
+ "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"
"github.com/grafana/loki/pkg/util"
"github.com/grafana/loki/pkg/util/validation"
)
@@ -24,7 +25,7 @@ type VolumeSplitter struct {
}
// GenerateCacheKey generates a cache key based on the userID, Request and interval.
-func (i VolumeSplitter) GenerateCacheKey(ctx context.Context, userID string, r queryrangebase.Request) string {
+func (i VolumeSplitter) GenerateCacheKey(ctx context.Context, userID string, r resultscache.Request) string {
cacheKey := i.cacheKeyLimits.GenerateCacheKey(ctx, userID, r)
volumeReq := r.(*logproto.VolumeRequest)
@@ -38,7 +39,7 @@ type VolumeExtractor struct{}
// Extract favors the ability to cache over exactness of results. It assumes a constant distribution
// of log volumes over a range and will extract subsets proportionally.
-func (p VolumeExtractor) Extract(start, end int64, res queryrangebase.Response, resStart, resEnd int64) queryrangebase.Response {
+func (p VolumeExtractor) Extract(start, end int64, res resultscache.Response, resStart, resEnd int64) resultscache.Response {
factor := util.GetFactorOfTime(start, end, resStart, resEnd)
volumeRes := res.(*VolumeResponse)
@@ -101,7 +102,7 @@ func NewVolumeCacheMiddleware(
c cache.Cache,
cacheGenNumberLoader queryrangebase.CacheGenNumberLoader,
shouldCache queryrangebase.ShouldCacheFn,
- parallelismForReq func(ctx context.Context, tenantIDs []string, r queryrangebase.Request) int,
+ parallelismForReq queryrangebase.ParallelismForReqFn,
retentionEnabled bool,
transformer UserIDTransformer,
metrics *queryrangebase.ResultsCacheMetrics,
diff --git a/pkg/querier/queryrange/volume_cache_test.go b/pkg/querier/queryrange/volume_cache_test.go
index ebe9ef8094b83..904e0fc7c3a99 100644
--- a/pkg/querier/queryrange/volume_cache_test.go
+++ b/pkg/querier/queryrange/volume_cache_test.go
@@ -10,6 +10,8 @@ import (
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
+ "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"
+
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logqlmodel/stats"
"github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"
@@ -22,8 +24,10 @@ import (
func TestVolumeCache(t *testing.T) {
setup := func(volResp *VolumeResponse) (*int, queryrangebase.Handler) {
cfg := queryrangebase.ResultsCacheConfig{
- CacheConfig: cache.Config{
- Cache: cache.NewMockCache(),
+ Config: resultscache.Config{
+ CacheConfig: cache.Config{
+ Cache: cache.NewMockCache(),
+ },
},
}
c, err := cache.New(cfg.CacheConfig, nil, log.NewNopLogger(), stats.ResultCache, constants.Loki)
@@ -281,8 +285,10 @@ func TestVolumeCache_RecentData(t *testing.T) {
} {
t.Run(tc.name, func(t *testing.T) {
cfg := queryrangebase.ResultsCacheConfig{
- CacheConfig: cache.Config{
- Cache: cache.NewMockCache(),
+ Config: resultscache.Config{
+ CacheConfig: cache.Config{
+ Cache: cache.NewMockCache(),
+ },
},
}
c, err := cache.New(cfg.CacheConfig, nil, log.NewNopLogger(), stats.ResultCache, constants.Loki)
diff --git a/pkg/querier/worker/frontend_processor.go b/pkg/querier/worker/frontend_processor.go
index 3e77c3f0e91a8..45c61862d0598 100644
--- a/pkg/querier/worker/frontend_processor.go
+++ b/pkg/querier/worker/frontend_processor.go
@@ -58,7 +58,7 @@ func (fp *frontendProcessor) notifyShutdown(ctx context.Context, conn *grpc.Clie
}
// runOne loops, trying to establish a stream to the frontend to begin request processing.
-func (fp *frontendProcessor) processQueriesOnSingleStream(ctx context.Context, conn *grpc.ClientConn, address string) {
+func (fp *frontendProcessor) processQueriesOnSingleStream(ctx context.Context, conn *grpc.ClientConn, address, _ string) {
client := frontendv1pb.NewFrontendClient(conn)
backoff := backoff.New(ctx, processorBackoffConfig)
diff --git a/pkg/querier/worker/frontend_processor_test.go b/pkg/querier/worker/frontend_processor_test.go
index e446500dd804b..cecdb7bfe27d3 100644
--- a/pkg/querier/worker/frontend_processor_test.go
+++ b/pkg/querier/worker/frontend_processor_test.go
@@ -39,7 +39,7 @@ func TestRecvFailDoesntCancelProcess(t *testing.T) {
running.Store(true)
defer running.Store(false)
- mgr.processQueriesOnSingleStream(ctx, cc, "test:12345")
+ mgr.processQueriesOnSingleStream(ctx, cc, "test:12345", "")
}()
test.Poll(t, time.Second, true, func() interface{} {
diff --git a/pkg/querier/worker/processor_manager.go b/pkg/querier/worker/processor_manager.go
index 5d675c88a6576..3a2c8c338865d 100644
--- a/pkg/querier/worker/processor_manager.go
+++ b/pkg/querier/worker/processor_manager.go
@@ -2,6 +2,7 @@ package worker
import (
"context"
+ "strconv"
"sync"
"time"
@@ -64,7 +65,9 @@ func (pm *processorManager) concurrency(n int) {
n = 0
}
+ workerID := 0
for len(pm.cancels) < n {
+ workerID++
ctx, cancel := context.WithCancel(pm.ctx)
pm.cancels = append(pm.cancels, cancel)
@@ -75,7 +78,7 @@ func (pm *processorManager) concurrency(n int) {
pm.currentProcessors.Inc()
defer pm.currentProcessors.Dec()
- pm.p.processQueriesOnSingleStream(ctx, pm.conn, pm.address)
+ pm.p.processQueriesOnSingleStream(ctx, pm.conn, pm.address, strconv.Itoa(workerID))
}()
}
diff --git a/pkg/querier/worker/scheduler_processor.go b/pkg/querier/worker/scheduler_processor.go
index 15e3985b60fbd..16d0e59d1ed14 100644
--- a/pkg/querier/worker/scheduler_processor.go
+++ b/pkg/querier/worker/scheduler_processor.go
@@ -83,7 +83,7 @@ func (sp *schedulerProcessor) notifyShutdown(ctx context.Context, conn *grpc.Cli
}
}
-func (sp *schedulerProcessor) processQueriesOnSingleStream(workerCtx context.Context, conn *grpc.ClientConn, address string) {
+func (sp *schedulerProcessor) processQueriesOnSingleStream(workerCtx context.Context, conn *grpc.ClientConn, address, workerID string) {
schedulerClient := sp.schedulerClientFactory(conn)
// Run the querier loop (and so all the queries) in a dedicated context that we call the "execution context".
@@ -104,7 +104,7 @@ func (sp *schedulerProcessor) processQueriesOnSingleStream(workerCtx context.Con
continue
}
- if err := sp.querierLoop(c, address, inflightQuery); err != nil {
+ if err := sp.querierLoop(c, address, inflightQuery, workerID); err != nil {
// Do not log an error if the query-scheduler is shutting down.
if s, ok := status.FromError(err); !ok || !strings.Contains(s.Message(), schedulerpb.ErrSchedulerIsNotRunning.Error()) {
level.Error(sp.log).Log("msg", "error processing requests from scheduler", "err", err, "addr", address)
@@ -119,17 +119,20 @@ func (sp *schedulerProcessor) processQueriesOnSingleStream(workerCtx context.Con
}
// process loops processing requests on an established stream.
-func (sp *schedulerProcessor) querierLoop(c schedulerpb.SchedulerForQuerier_QuerierLoopClient, address string, inflightQuery *atomic.Bool) error {
+func (sp *schedulerProcessor) querierLoop(c schedulerpb.SchedulerForQuerier_QuerierLoopClient, address string, inflightQuery *atomic.Bool, workerID string) error {
// Build a child context so we can cancel a query when the stream is closed.
ctx, cancel := context.WithCancel(c.Context())
defer cancel()
for {
+ start := time.Now()
request, err := c.Recv()
if err != nil {
return err
}
+ level.Debug(sp.log).Log("msg", "received query", "worker", workerID, "wait_time_sec", time.Since(start).Seconds())
+
inflightQuery.Store(true)
// Handle the request on a "background" goroutine, so we go back to
diff --git a/pkg/querier/worker/scheduler_processor_test.go b/pkg/querier/worker/scheduler_processor_test.go
index b1971bdd76077..154ba1ae4fa73 100644
--- a/pkg/querier/worker/scheduler_processor_test.go
+++ b/pkg/querier/worker/scheduler_processor_test.go
@@ -41,7 +41,7 @@ func TestSchedulerProcessor_processQueriesOnSingleStream(t *testing.T) {
requestHandler.On("Do", mock.Anything, mock.Anything).Return(&queryrange.LokiResponse{}, nil)
- sp.processQueriesOnSingleStream(workerCtx, nil, "127.0.0.1")
+ sp.processQueriesOnSingleStream(workerCtx, nil, "127.0.0.1", "1")
// We expect at this point, the execution context has been canceled too.
require.Error(t, loopClient.Context().Err())
@@ -91,7 +91,7 @@ func TestSchedulerProcessor_processQueriesOnSingleStream(t *testing.T) {
}).Return(&queryrange.LokiResponse{}, nil)
startTime := time.Now()
- sp.processQueriesOnSingleStream(workerCtx, nil, "127.0.0.1")
+ sp.processQueriesOnSingleStream(workerCtx, nil, "127.0.0.1", "1")
assert.GreaterOrEqual(t, time.Since(startTime), time.Second)
// We expect at this point, the execution context has been canceled too.
@@ -122,7 +122,7 @@ func TestSchedulerProcessor_processQueriesOnSingleStream(t *testing.T) {
requestHandler.On("Do", mock.Anything, mock.Anything).Return(&queryrange.LokiResponse{}, nil)
- sp.processQueriesOnSingleStream(workerCtx, nil, "127.0.0.1")
+ sp.processQueriesOnSingleStream(workerCtx, nil, "127.0.0.1", "1")
// We expect no error in the log.
assert.NotContains(t, logs.String(), "error")
diff --git a/pkg/querier/worker/util.go b/pkg/querier/worker/util.go
index 7de49d179089a..812236809a097 100644
--- a/pkg/querier/worker/util.go
+++ b/pkg/querier/worker/util.go
@@ -139,10 +139,7 @@ func handleQueryRequest(ctx context.Context, request *queryrange.QueryRequest, h
// This block covers any errors that are not gRPC errors and will include all query errors.
// It's important to map non-retryable errors to a non 5xx status code so they will not be retried.
- code, err := server.ClientHTTPStatusAndError(err)
- return &queryrange.QueryResponse{
- Status: status.New(codes.Code(code), err.Error()).Proto(),
- }
+ return queryrange.QueryResponseWrapError(err)
}
response, err := queryrange.QueryResponseWrap(resp)
diff --git a/pkg/querier/worker/util_test.go b/pkg/querier/worker/util_test.go
index a0213e3bb708b..25dd8127a0da4 100644
--- a/pkg/querier/worker/util_test.go
+++ b/pkg/querier/worker/util_test.go
@@ -61,7 +61,7 @@ func TestHandleQueryRequest(t *testing.T) {
} {
t.Run(name, func(t *testing.T) {
ctx := user.InjectOrgID(context.Background(), "1")
- request, err := queryrange.DefaultCodec.QueryRequestWrap(ctx, &queryrange.LokiRequest{})
+ request, err := queryrange.DefaultCodec.QueryRequestWrap(ctx, &queryrange.LokiRequest{Query: `{app="foo"}`})
require.NoError(t, err)
mockHandler := HandlerFunc(func(context.Context, queryrangebase.Request) (queryrangebase.Response, error) {
diff --git a/pkg/querier/worker/worker.go b/pkg/querier/worker/worker.go
index 055b7b5c92717..b2e50b205d143 100644
--- a/pkg/querier/worker/worker.go
+++ b/pkg/querier/worker/worker.go
@@ -20,7 +20,6 @@ import (
"github.com/grafana/loki/pkg/querier/queryrange"
"github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"
"github.com/grafana/loki/pkg/util"
- lokiutil "github.com/grafana/loki/pkg/util"
)
type Config struct {
@@ -71,7 +70,7 @@ type processor interface {
// This method must react on context being finished, and stop when that happens.
//
// processorManager (not processor) is responsible for starting as many goroutines as needed for each connection.
- processQueriesOnSingleStream(ctx context.Context, conn *grpc.ClientConn, address string)
+ processQueriesOnSingleStream(ctx context.Context, conn *grpc.ClientConn, address, workerID string)
// notifyShutdown notifies the remote query-frontend or query-scheduler that the querier is
// shutting down.
@@ -151,7 +150,7 @@ func newQuerierWorkerWithProcessor(cfg Config, metrics *Metrics, logger log.Logg
}
if ring != nil {
- w, err := lokiutil.NewRingWatcher(log.With(logger, "component", "querier-scheduler-worker"), ring, cfg.DNSLookupPeriod, f)
+ w, err := util.NewRingWatcher(log.With(logger, "component", "querier-scheduler-worker"), ring, cfg.DNSLookupPeriod, f)
if err != nil {
return nil, err
}
diff --git a/pkg/querier/worker/worker_test.go b/pkg/querier/worker/worker_test.go
index 2f1ccb98d3097..68791b214f178 100644
--- a/pkg/querier/worker/worker_test.go
+++ b/pkg/querier/worker/worker_test.go
@@ -88,7 +88,7 @@ func getConcurrentProcessors(w *querierWorker) int {
type mockProcessor struct{}
-func (m mockProcessor) processQueriesOnSingleStream(ctx context.Context, _ *grpc.ClientConn, _ string) {
+func (m mockProcessor) processQueriesOnSingleStream(ctx context.Context, _ *grpc.ClientConn, _, _ string) {
<-ctx.Done()
}
diff --git a/pkg/querier/worker_service.go b/pkg/querier/worker_service.go
index d0837e4180652..f95da0eba16d4 100644
--- a/pkg/querier/worker_service.go
+++ b/pkg/querier/worker_service.go
@@ -1,8 +1,10 @@
package querier
import (
- "fmt"
+ "net"
+ "strconv"
+ "github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/grafana/dskit/ring"
"github.com/grafana/dskit/services"
@@ -52,6 +54,7 @@ func (cfg WorkerServiceConfig) QuerierRunningStandalone() bool {
// HTTP router for the Prometheus API routes. Then the external HTTP server will be passed
// as a http.Handler to the frontend worker.
func InitWorkerService(
+ logger log.Logger,
cfg WorkerServiceConfig,
reg prometheus.Registerer,
handler queryrangebase.Handler,
@@ -76,7 +79,7 @@ func InitWorkerService(
*(cfg.QuerierWorkerConfig),
cfg.SchedulerRing,
handler,
- util_log.Logger,
+ logger,
reg,
codec,
)
@@ -89,7 +92,7 @@ func InitWorkerService(
if cfg.GrpcListenAddress != "" {
listenAddress = cfg.GrpcListenAddress
}
- address := fmt.Sprintf("%s:%d", listenAddress, cfg.GrpcListenPort)
+ address := net.JoinHostPort(listenAddress, strconv.Itoa(cfg.GrpcListenPort))
level.Warn(util_log.Logger).Log(
"msg", "Worker address is empty, attempting automatic worker configuration. If queries are unresponsive consider configuring the worker explicitly.",
"address", address)
@@ -102,7 +105,7 @@ func InitWorkerService(
*(cfg.QuerierWorkerConfig),
cfg.SchedulerRing,
handler,
- util_log.Logger,
+ logger,
reg,
codec,
)
diff --git a/pkg/queue/dequeue_qos_test.go b/pkg/queue/dequeue_qos_test.go
index 6b1de885943a3..c889cbe8f4c60 100644
--- a/pkg/queue/dequeue_qos_test.go
+++ b/pkg/queue/dequeue_qos_test.go
@@ -44,7 +44,7 @@ func enqueueRequestsForActor(t testing.TB, actor []string, useActor bool, queue
if !useActor {
actor = nil
}
- err := queue.Enqueue("tenant", actor, r, 0, nil)
+ err := queue.Enqueue("tenant", actor, r, nil)
if err != nil {
t.Fatal(err)
}
@@ -58,7 +58,7 @@ func BenchmarkQueryFairness(t *testing.B) {
for _, useActor := range []bool{false, true} {
t.Run(fmt.Sprintf("use hierarchical queues = %v", useActor), func(t *testing.B) {
- requestQueue := NewRequestQueue(1024, 0, NewMetrics(nil, constants.Loki, "query_scheduler"))
+ requestQueue := NewRequestQueue(1024, 0, noQueueLimits, NewMetrics(nil, constants.Loki, "query_scheduler"))
enqueueRequestsForActor(t, []string{}, useActor, requestQueue, numSubRequestsActorA, 50*time.Millisecond)
enqueueRequestsForActor(t, []string{"a"}, useActor, requestQueue, numSubRequestsActorA, 100*time.Millisecond)
enqueueRequestsForActor(t, []string{"b"}, useActor, requestQueue, numSubRequestsActorB, 50*time.Millisecond)
@@ -133,18 +133,18 @@ func TestQueryFairnessAcrossSameLevel(t *testing.T) {
456: [210]
**/
- requestQueue := NewRequestQueue(1024, 0, NewMetrics(nil, constants.Loki, "query_scheduler"))
- _ = requestQueue.Enqueue("tenant1", []string{}, r(0), 0, nil)
- _ = requestQueue.Enqueue("tenant1", []string{}, r(1), 0, nil)
- _ = requestQueue.Enqueue("tenant1", []string{}, r(2), 0, nil)
- _ = requestQueue.Enqueue("tenant1", []string{"abc"}, r(10), 0, nil)
- _ = requestQueue.Enqueue("tenant1", []string{"abc"}, r(11), 0, nil)
- _ = requestQueue.Enqueue("tenant1", []string{"abc"}, r(12), 0, nil)
- _ = requestQueue.Enqueue("tenant1", []string{"xyz"}, r(20), 0, nil)
- _ = requestQueue.Enqueue("tenant1", []string{"xyz"}, r(21), 0, nil)
- _ = requestQueue.Enqueue("tenant1", []string{"xyz"}, r(22), 0, nil)
- _ = requestQueue.Enqueue("tenant1", []string{"xyz", "123"}, r(200), 0, nil)
- _ = requestQueue.Enqueue("tenant1", []string{"xyz", "456"}, r(210), 0, nil)
+ requestQueue := NewRequestQueue(1024, 0, noQueueLimits, NewMetrics(nil, constants.Loki, "query_scheduler"))
+ _ = requestQueue.Enqueue("tenant1", []string{}, r(0), nil)
+ _ = requestQueue.Enqueue("tenant1", []string{}, r(1), nil)
+ _ = requestQueue.Enqueue("tenant1", []string{}, r(2), nil)
+ _ = requestQueue.Enqueue("tenant1", []string{"abc"}, r(10), nil)
+ _ = requestQueue.Enqueue("tenant1", []string{"abc"}, r(11), nil)
+ _ = requestQueue.Enqueue("tenant1", []string{"abc"}, r(12), nil)
+ _ = requestQueue.Enqueue("tenant1", []string{"xyz"}, r(20), nil)
+ _ = requestQueue.Enqueue("tenant1", []string{"xyz"}, r(21), nil)
+ _ = requestQueue.Enqueue("tenant1", []string{"xyz"}, r(22), nil)
+ _ = requestQueue.Enqueue("tenant1", []string{"xyz", "123"}, r(200), nil)
+ _ = requestQueue.Enqueue("tenant1", []string{"xyz", "456"}, r(210), nil)
requestQueue.queues.recomputeUserConsumers()
items := make([]int, 0)
diff --git a/pkg/queue/metrics.go b/pkg/queue/metrics.go
index 5d00edb1a3b16..769fb51c23708 100644
--- a/pkg/queue/metrics.go
+++ b/pkg/queue/metrics.go
@@ -6,10 +6,9 @@ import (
)
type Metrics struct {
- queueLength *prometheus.GaugeVec // Per tenant
- discardedRequests *prometheus.CounterVec // Per tenant
- enqueueCount *prometheus.CounterVec // Per tenant and level
- querierWaitTime *prometheus.HistogramVec // Per querier wait time
+ queueLength *prometheus.GaugeVec // Per tenant
+ discardedRequests *prometheus.CounterVec // Per tenant
+ enqueueCount *prometheus.CounterVec // Per tenant and level
}
func NewMetrics(registerer prometheus.Registerer, metricsNamespace, subsystem string) *Metrics {
@@ -32,13 +31,6 @@ func NewMetrics(registerer prometheus.Registerer, metricsNamespace, subsystem st
Name: "enqueue_count",
Help: "Total number of enqueued (sub-)queries.",
}, []string{"user", "level"}),
- querierWaitTime: promauto.With(registerer).NewHistogramVec(prometheus.HistogramOpts{
- Namespace: metricsNamespace,
- Subsystem: subsystem,
- Name: "querier_wait_seconds",
- Help: "Time spend waiting for new requests.",
- Buckets: []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10, 30, 60, 120, 240},
- }, []string{"querier"}),
}
}
diff --git a/pkg/queue/queue.go b/pkg/queue/queue.go
index fa1860e4e88d3..006106aa44a61 100644
--- a/pkg/queue/queue.go
+++ b/pkg/queue/queue.go
@@ -39,6 +39,11 @@ func (ui QueueIndex) ReuseLastIndex() QueueIndex {
return ui - 1
}
+type Limits interface {
+ // MaxConsumers returns the max consumers to use per tenant or 0 to allow all consumers to consume from the queue.
+ MaxConsumers(user string, allConsumers int) int
+}
+
// Request stored into the queue.
type Request any
@@ -59,13 +64,15 @@ type RequestQueue struct {
stopped bool
metrics *Metrics
+ pool *SlicePool[Request]
}
-func NewRequestQueue(maxOutstandingPerTenant int, forgetDelay time.Duration, metrics *Metrics) *RequestQueue {
+func NewRequestQueue(maxOutstandingPerTenant int, forgetDelay time.Duration, limits Limits, metrics *Metrics) *RequestQueue {
q := &RequestQueue{
- queues: newTenantQueues(maxOutstandingPerTenant, forgetDelay),
+ queues: newTenantQueues(maxOutstandingPerTenant, forgetDelay, limits),
connectedConsumers: atomic.NewInt32(0),
metrics: metrics,
+ pool: NewSlicePool[Request](1<<6, 1<<10, 2), // Buckets are [64, 128, 256, 512, 1024].
}
q.cond = contextCond{Cond: sync.NewCond(&q.mtx)}
@@ -74,12 +81,9 @@ func NewRequestQueue(maxOutstandingPerTenant int, forgetDelay time.Duration, met
return q
}
-// Enqueue puts the request into the queue. MaxQueries is tenant-specific value that specifies how many queriers can
-// this tenant use (zero or negative = all queriers). It is passed to each Enqueue, because it can change
-// between calls.
-//
+// Enqueue puts the request into the queue.
// If request is successfully enqueued, successFn is called with the lock held, before any querier can receive the request.
-func (q *RequestQueue) Enqueue(tenant string, path []string, req Request, maxQueriers int, successFn func()) error {
+func (q *RequestQueue) Enqueue(tenant string, path []string, req Request, successFn func()) error {
q.mtx.Lock()
defer q.mtx.Unlock()
@@ -87,10 +91,9 @@ func (q *RequestQueue) Enqueue(tenant string, path []string, req Request, maxQue
return ErrStopped
}
- queue := q.queues.getOrAddQueue(tenant, path, maxQueriers)
- if queue == nil {
- // This can only happen if tenant is "".
- return errors.New("no queue found")
+ queue, err := q.queues.getOrAddQueue(tenant, path)
+ if err != nil {
+ return fmt.Errorf("no queue found: %w", err)
}
// Optimistically increase queue counter for tenant instead of doing separate
@@ -125,6 +128,41 @@ func (q *RequestQueue) Enqueue(tenant string, path []string, req Request, maxQue
}
}
+// ReleaseRequests returns items back to the slice pool.
+// Must only be called in combination with DequeueMany().
+func (q *RequestQueue) ReleaseRequests(items []Request) {
+ q.pool.Put(items)
+}
+
+// DequeueMany consumes multiple items for a single tenant from the queue.
+// It returns maxItems and waits maxWait if no requests for this tenant are enqueued.
+// The caller is responsible for returning the dequeued requests back to the
+// pool by calling ReleaseRequests(items).
+func (q *RequestQueue) DequeueMany(ctx context.Context, last QueueIndex, consumerID string, maxItems int, maxWait time.Duration) ([]Request, QueueIndex, error) {
+ // create a context for dequeuing with a max time we want to wait to fulfill the desired maxItems
+
+ dequeueCtx, cancel := context.WithTimeout(ctx, maxWait)
+ defer cancel()
+
+ var idx QueueIndex
+
+ items := q.pool.Get(maxItems)
+ for {
+ item, newIdx, err := q.Dequeue(dequeueCtx, last, consumerID)
+ if err != nil {
+ if err == context.DeadlineExceeded {
+ err = nil
+ }
+ return items, idx, err
+ }
+ items = append(items, item)
+ idx = newIdx
+ if len(items) == maxItems {
+ return items, idx, nil
+ }
+ }
+}
+
// Dequeue find next tenant queue and takes the next request off of it. Will block if there are no requests.
// By passing tenant index from previous call of this method, querier guarantees that it iterates over all tenants fairly.
// If consumer finds that request from the tenant is already expired, it can get a request for the same tenant by using UserIndex.ReuseLastUser.
@@ -138,9 +176,7 @@ FindQueue:
// We need to wait if there are no tenants, or no pending requests for given querier.
for (q.queues.hasNoTenantQueues() || querierWait) && ctx.Err() == nil && !q.stopped {
querierWait = false
- start := time.Now()
q.cond.Wait(ctx)
- q.metrics.querierWaitTime.WithLabelValues(consumerID).Observe(time.Since(start).Seconds())
}
if q.stopped {
diff --git a/pkg/queue/queue_test.go b/pkg/queue/queue_test.go
index a2cb42441c02e..623e240733886 100644
--- a/pkg/queue/queue_test.go
+++ b/pkg/queue/queue_test.go
@@ -47,7 +47,7 @@ func BenchmarkGetNextRequest(b *testing.B) {
queues := make([]*RequestQueue, 0, b.N)
for n := 0; n < b.N; n++ {
- queue := NewRequestQueue(maxOutstandingPerTenant, 0, NewMetrics(nil, constants.Loki, "query_scheduler"))
+ queue := NewRequestQueue(maxOutstandingPerTenant, 0, noQueueLimits, NewMetrics(nil, constants.Loki, "query_scheduler"))
queues = append(queues, queue)
for ix := 0; ix < queriers; ix++ {
@@ -57,7 +57,7 @@ func BenchmarkGetNextRequest(b *testing.B) {
for i := 0; i < maxOutstandingPerTenant; i++ {
for j := 0; j < numTenants; j++ {
userID := strconv.Itoa(j)
- err := queue.Enqueue(userID, benchCase.fn(j), "request", 0, nil)
+ err := queue.Enqueue(userID, benchCase.fn(j), "request", nil)
if err != nil {
b.Fatal(err)
}
@@ -105,7 +105,7 @@ func BenchmarkQueueRequest(b *testing.B) {
requests := make([]string, 0, numTenants)
for n := 0; n < b.N; n++ {
- q := NewRequestQueue(maxOutstandingPerTenant, 0, NewMetrics(nil, constants.Loki, "query_scheduler"))
+ q := NewRequestQueue(maxOutstandingPerTenant, 0, noQueueLimits, NewMetrics(nil, constants.Loki, "query_scheduler"))
for ix := 0; ix < queriers; ix++ {
q.RegisterConsumerConnection(fmt.Sprintf("querier-%d", ix))
@@ -123,7 +123,7 @@ func BenchmarkQueueRequest(b *testing.B) {
for n := 0; n < b.N; n++ {
for i := 0; i < maxOutstandingPerTenant; i++ {
for j := 0; j < numTenants; j++ {
- err := queues[n].Enqueue(users[j], nil, requests[j], 0, nil)
+ err := queues[n].Enqueue(users[j], nil, requests[j], nil)
if err != nil {
b.Fatal(err)
}
@@ -135,7 +135,7 @@ func BenchmarkQueueRequest(b *testing.B) {
func TestRequestQueue_GetNextRequestForQuerier_ShouldGetRequestAfterReshardingBecauseQuerierHasBeenForgotten(t *testing.T) {
const forgetDelay = 3 * time.Second
- queue := NewRequestQueue(1, forgetDelay, NewMetrics(nil, constants.Loki, "query_scheduler"))
+ queue := NewRequestQueue(1, forgetDelay, &mockQueueLimits{maxConsumers: 1}, NewMetrics(nil, constants.Loki, "query_scheduler"))
// Start the queue service.
ctx := context.Background()
@@ -162,7 +162,7 @@ func TestRequestQueue_GetNextRequestForQuerier_ShouldGetRequestAfterReshardingBe
// Enqueue a request from an user which would be assigned to querier-1.
// NOTE: "user-1" hash falls in the querier-1 shard.
- require.NoError(t, queue.Enqueue("user-1", nil, "request", 1, nil))
+ require.NoError(t, queue.Enqueue("user-1", nil, "request", nil))
startTime := time.Now()
querier2wg.Wait()
@@ -306,17 +306,17 @@ func TestContextCond(t *testing.T) {
func TestMaxQueueSize(t *testing.T) {
t.Run("queue size is tracked per tenant", func(t *testing.T) {
maxSize := 3
- queue := NewRequestQueue(maxSize, 0, NewMetrics(nil, constants.Loki, "query_scheduler"))
+ queue := NewRequestQueue(maxSize, 0, noQueueLimits, NewMetrics(nil, constants.Loki, "query_scheduler"))
queue.RegisterConsumerConnection("querier")
// enqueue maxSize items with different actors
// different actors have individual channels with maxSize length
- assert.NoError(t, queue.Enqueue("tenant", []string{"user-a"}, 1, 0, nil))
- assert.NoError(t, queue.Enqueue("tenant", []string{"user-b"}, 2, 0, nil))
- assert.NoError(t, queue.Enqueue("tenant", []string{"user-c"}, 3, 0, nil))
+ assert.NoError(t, queue.Enqueue("tenant", []string{"user-a"}, 1, nil))
+ assert.NoError(t, queue.Enqueue("tenant", []string{"user-b"}, 2, nil))
+ assert.NoError(t, queue.Enqueue("tenant", []string{"user-c"}, 3, nil))
// max queue length per tenant is tracked globally for all actors within a tenant
- err := queue.Enqueue("tenant", []string{"user-a"}, 4, 0, nil)
+ err := queue.Enqueue("tenant", []string{"user-a"}, 4, nil)
assert.Equal(t, err, ErrTooManyRequests)
// dequeue and enqueue some items
@@ -325,10 +325,10 @@ func TestMaxQueueSize(t *testing.T) {
_, _, err = queue.Dequeue(context.Background(), StartIndexWithLocalQueue, "querier")
assert.NoError(t, err)
- assert.NoError(t, queue.Enqueue("tenant", []string{"user-a"}, 4, 0, nil))
- assert.NoError(t, queue.Enqueue("tenant", []string{"user-b"}, 5, 0, nil))
+ assert.NoError(t, queue.Enqueue("tenant", []string{"user-a"}, 4, nil))
+ assert.NoError(t, queue.Enqueue("tenant", []string{"user-b"}, 5, nil))
- err = queue.Enqueue("tenant", []string{"user-c"}, 6, 0, nil)
+ err = queue.Enqueue("tenant", []string{"user-c"}, 6, nil)
assert.Equal(t, err, ErrTooManyRequests)
})
}
diff --git a/pkg/queue/tenant_queues.go b/pkg/queue/tenant_queues.go
index 46e8a999fb88e..69fac6ed60a01 100644
--- a/pkg/queue/tenant_queues.go
+++ b/pkg/queue/tenant_queues.go
@@ -6,11 +6,17 @@
package queue
import (
+ "fmt"
"math/rand"
"sort"
"time"
+ "github.com/go-kit/log/level"
+ "github.com/grafana/dskit/tenant"
+
"github.com/grafana/loki/pkg/util"
+ util_log "github.com/grafana/loki/pkg/util/log"
+ "github.com/grafana/loki/pkg/util/validation"
)
type intPointerMap map[string]*int
@@ -67,6 +73,8 @@ type tenantQueues struct {
// sortedConsumer list of consumer IDs, used when creating per-user shard.
sortedConsumers []string
+
+ limits Limits
}
type Queue interface {
@@ -87,16 +95,15 @@ type tenantQueue struct {
*TreeQueue
// If not nil, only these consumers can handle user requests. If nil, all consumers can.
- // We set this to nil if number of available consumers <= maxQueriers.
- consumers map[string]struct{}
- maxQueriers int
+ // We set this to nil if number of available consumers <= MaxConsumers.
+ consumers map[string]struct{}
// Seed for shuffle sharding of consumers. This seed is based on userID only and is therefore consistent
// between different frontends.
seed int64
}
-func newTenantQueues(maxUserQueueSize int, forgetDelay time.Duration) *tenantQueues {
+func newTenantQueues(maxUserQueueSize int, forgetDelay time.Duration, limits Limits) *tenantQueues {
mm := &Mapping[*tenantQueue]{}
mm.Init(64)
return &tenantQueues{
@@ -106,6 +113,7 @@ func newTenantQueues(maxUserQueueSize int, forgetDelay time.Duration) *tenantQue
forgetDelay: forgetDelay,
consumers: map[string]*consumer{},
sortedConsumers: nil,
+ limits: limits,
}
}
@@ -118,37 +126,42 @@ func (q *tenantQueues) deleteQueue(tenant string) {
}
// Returns existing or new queue for a tenant.
-// MaxQueriers is used to compute which consumers should handle requests for this tenant.
-// If maxQueriers is <= 0, all consumers can handle this tenant's requests.
-// If maxQueriers has changed since the last call, consumers for this are recomputed.
-func (q *tenantQueues) getOrAddQueue(tenant string, path []string, maxQueriers int) Queue {
+func (q *tenantQueues) getOrAddQueue(tenantID string, path []string) (Queue, error) {
// Empty tenant is not allowed, as that would break our tenants list ("" is used for free spot).
- if tenant == "" {
- return nil
+ if tenantID == "" {
+ return nil, fmt.Errorf("empty tenant is not allowed")
}
- if maxQueriers < 0 {
- maxQueriers = 0
+ // extract tenantIDs to compute limits for multi-tenant queries
+ tenantIDs, err := tenant.TenantIDsFromOrgID(tenantID)
+ if err != nil {
+ return nil, fmt.Errorf("extract tenant ids: %w", err)
}
- uq := q.mapping.GetByKey(tenant)
+ uq := q.mapping.GetByKey(tenantID)
if uq == nil {
uq = &tenantQueue{
- seed: util.ShuffleShardSeed(tenant, ""),
+ seed: util.ShuffleShardSeed(tenantID, ""),
}
- uq.TreeQueue = newTreeQueue(q.maxUserQueueSize, tenant)
- q.mapping.Put(tenant, uq)
+ uq.TreeQueue = newTreeQueue(q.maxUserQueueSize, tenantID)
+ q.mapping.Put(tenantID, uq)
}
- if uq.maxQueriers != maxQueriers {
- uq.maxQueriers = maxQueriers
- uq.consumers = shuffleConsumersForTenants(uq.seed, maxQueriers, q.sortedConsumers, nil)
+ consumersToSelect := validation.SmallestPositiveNonZeroIntPerTenant(
+ tenantIDs,
+ func(tenantID string) int {
+ return q.limits.MaxConsumers(tenantID, len(q.sortedConsumers))
+ },
+ )
+
+ if len(uq.consumers) != consumersToSelect {
+ uq.consumers = shuffleConsumersForTenants(uq.seed, consumersToSelect, q.sortedConsumers, nil)
}
if len(path) == 0 {
- return uq
+ return uq, nil
}
- return uq.add(path)
+ return uq.add(path), nil
}
// Finds next queue for the consumer. To support fair scheduling between users, client is expected
@@ -294,8 +307,23 @@ func (q *tenantQueues) forgetDisconnectedConsumers(now time.Time) int {
func (q *tenantQueues) recomputeUserConsumers() {
scratchpad := make([]string, 0, len(q.sortedConsumers))
- for _, uq := range q.mapping.Values() {
- uq.consumers = shuffleConsumersForTenants(uq.seed, uq.maxQueriers, q.sortedConsumers, scratchpad)
+ for _, tenantID := range q.mapping.Keys() {
+ if uq := q.mapping.GetByKey(tenantID); uq != nil {
+ tenantIDs, err := tenant.TenantIDsFromOrgID(tenantID)
+ if err != nil {
+ // this is unlikely to happen since we do tenantID validation when creating the queue.
+ level.Error(util_log.Logger).Log("msg", "failed to shuffle consumers because of errors in tenantID extraction", "tenant", tenantID, "error", err)
+ continue
+ }
+
+ consumersToSelect := validation.SmallestPositiveNonZeroIntPerTenant(
+ tenantIDs,
+ func(tenantID string) int {
+ return q.limits.MaxConsumers(tenantID, len(q.sortedConsumers))
+ },
+ )
+ uq.consumers = shuffleConsumersForTenants(uq.seed, consumersToSelect, q.sortedConsumers, scratchpad)
+ }
}
}
diff --git a/pkg/queue/tenant_queues_test.go b/pkg/queue/tenant_queues_test.go
index 95f2a67963aa7..4f49b8233304d 100644
--- a/pkg/queue/tenant_queues_test.go
+++ b/pkg/queue/tenant_queues_test.go
@@ -15,53 +15,57 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+
+ "github.com/grafana/loki/pkg/scheduler/limits"
)
+var noQueueLimits = limits.NewQueueLimits(nil)
+
func TestQueues(t *testing.T) {
- uq := newTenantQueues(0, 0)
+ uq := newTenantQueues(0, 0, noQueueLimits)
assert.NotNil(t, uq)
assert.NoError(t, isConsistent(uq))
- uq.addConsumerToConnection("querier-1")
- uq.addConsumerToConnection("querier-2")
+ uq.addConsumerToConnection("consumer-1")
+ uq.addConsumerToConnection("consumer-2")
- q, u, lastUserIndex := uq.getNextQueueForConsumer(-1, "querier-1")
+ q, u, lastUserIndex := uq.getNextQueueForConsumer(-1, "consumer-1")
assert.Nil(t, q)
assert.Equal(t, "", u)
// Add queues: [one]
- qOne := getOrAdd(t, uq, "one", 0)
- lastUserIndex = confirmOrderForQuerier(t, uq, "querier-1", lastUserIndex, qOne, qOne)
+ qOne := getOrAdd(t, uq, "one")
+ lastUserIndex = confirmOrderForConsumer(t, uq, "consumer-1", lastUserIndex, qOne, qOne)
// [one two]
- qTwo := getOrAdd(t, uq, "two", 0)
+ qTwo := getOrAdd(t, uq, "two")
assert.NotEqual(t, qOne, qTwo)
- lastUserIndex = confirmOrderForQuerier(t, uq, "querier-1", lastUserIndex, qTwo, qOne, qTwo, qOne)
- confirmOrderForQuerier(t, uq, "querier-2", -1, qOne, qTwo, qOne)
+ lastUserIndex = confirmOrderForConsumer(t, uq, "consumer-1", lastUserIndex, qTwo, qOne, qTwo, qOne)
+ confirmOrderForConsumer(t, uq, "consumer-2", -1, qOne, qTwo, qOne)
// [one two three]
// confirm fifo by adding a third queue and iterating to it
- qThree := getOrAdd(t, uq, "three", 0)
+ qThree := getOrAdd(t, uq, "three")
- lastUserIndex = confirmOrderForQuerier(t, uq, "querier-1", lastUserIndex, qTwo, qThree, qOne)
+ lastUserIndex = confirmOrderForConsumer(t, uq, "consumer-1", lastUserIndex, qTwo, qThree, qOne)
// Remove one: ["" two three]
uq.deleteQueue("one")
assert.NoError(t, isConsistent(uq))
- lastUserIndex = confirmOrderForQuerier(t, uq, "querier-1", lastUserIndex, qTwo, qThree, qTwo)
+ lastUserIndex = confirmOrderForConsumer(t, uq, "consumer-1", lastUserIndex, qTwo, qThree, qTwo)
// "four" is added at the beginning of the list: [four two three]
- qFour := getOrAdd(t, uq, "four", 0)
+ qFour := getOrAdd(t, uq, "four")
- lastUserIndex = confirmOrderForQuerier(t, uq, "querier-1", lastUserIndex, qThree, qFour, qTwo, qThree)
+ lastUserIndex = confirmOrderForConsumer(t, uq, "consumer-1", lastUserIndex, qThree, qFour, qTwo, qThree)
// Remove two: [four "" three]
uq.deleteQueue("two")
assert.NoError(t, isConsistent(uq))
- lastUserIndex = confirmOrderForQuerier(t, uq, "querier-1", lastUserIndex, qFour, qThree, qFour)
+ lastUserIndex = confirmOrderForConsumer(t, uq, "consumer-1", lastUserIndex, qFour, qThree, qFour)
// Remove three: [four]
uq.deleteQueue("three")
@@ -71,55 +75,55 @@ func TestQueues(t *testing.T) {
uq.deleteQueue("four")
assert.NoError(t, isConsistent(uq))
- q, _, _ = uq.getNextQueueForConsumer(lastUserIndex, "querier-1")
+ q, _, _ = uq.getNextQueueForConsumer(lastUserIndex, "consumer-1")
assert.Nil(t, q)
}
-func TestQueuesOnTerminatingQuerier(t *testing.T) {
- uq := newTenantQueues(0, 0)
+func TestQueuesOnTerminatingConsumer(t *testing.T) {
+ uq := newTenantQueues(0, 0, noQueueLimits)
assert.NotNil(t, uq)
assert.NoError(t, isConsistent(uq))
- uq.addConsumerToConnection("querier-1")
- uq.addConsumerToConnection("querier-2")
+ uq.addConsumerToConnection("consumer-1")
+ uq.addConsumerToConnection("consumer-2")
// Add queues: [one, two]
- qOne := getOrAdd(t, uq, "one", 0)
- qTwo := getOrAdd(t, uq, "two", 0)
- confirmOrderForQuerier(t, uq, "querier-1", -1, qOne, qTwo, qOne, qTwo)
- confirmOrderForQuerier(t, uq, "querier-2", -1, qOne, qTwo, qOne, qTwo)
-
- // After notify shutdown for querier-2, it's expected to own no queue.
- uq.notifyQuerierShutdown("querier-2")
- q, u, _ := uq.getNextQueueForConsumer(-1, "querier-2")
+ qOne := getOrAdd(t, uq, "one")
+ qTwo := getOrAdd(t, uq, "two")
+ confirmOrderForConsumer(t, uq, "consumer-1", -1, qOne, qTwo, qOne, qTwo)
+ confirmOrderForConsumer(t, uq, "consumer-2", -1, qOne, qTwo, qOne, qTwo)
+
+ // After notify shutdown for consumer-2, it's expected to own no queue.
+ uq.notifyQuerierShutdown("consumer-2")
+ q, u, _ := uq.getNextQueueForConsumer(-1, "consumer-2")
assert.Nil(t, q)
assert.Equal(t, "", u)
- // However, querier-1 still get queues because it's still running.
- confirmOrderForQuerier(t, uq, "querier-1", -1, qOne, qTwo, qOne, qTwo)
+ // However, consumer-1 still get queues because it's still running.
+ confirmOrderForConsumer(t, uq, "consumer-1", -1, qOne, qTwo, qOne, qTwo)
- // After disconnecting querier-2, it's expected to own no queue.
- uq.removeConsumer("querier-2")
- q, u, _ = uq.getNextQueueForConsumer(-1, "querier-2")
+ // After disconnecting consumer-2, it's expected to own no queue.
+ uq.removeConsumer("consumer-2")
+ q, u, _ = uq.getNextQueueForConsumer(-1, "consumer-2")
assert.Nil(t, q)
assert.Equal(t, "", u)
}
-func TestQueuesWithQueriers(t *testing.T) {
- uq := newTenantQueues(0, 0)
+func TestQueuesWithConsumers(t *testing.T) {
+ maxConsumers := 5
+ uq := newTenantQueues(0, 0, &mockQueueLimits{maxConsumers: maxConsumers})
assert.NotNil(t, uq)
assert.NoError(t, isConsistent(uq))
- queriers := 30
+ consumers := 30
users := 1000
- maxQueriersPerUser := 5
- // Add some queriers.
- for ix := 0; ix < queriers; ix++ {
- qid := fmt.Sprintf("querier-%d", ix)
+ // Add some consumers.
+ for ix := 0; ix < consumers; ix++ {
+ qid := fmt.Sprintf("consumer-%d", ix)
uq.addConsumerToConnection(qid)
- // No querier has any queues yet.
+ // No consumer has any queues yet.
q, u, _ := uq.getNextQueueForConsumer(-1, qid)
assert.Nil(t, q)
assert.Equal(t, "", u)
@@ -130,19 +134,19 @@ func TestQueuesWithQueriers(t *testing.T) {
// Add user queues.
for u := 0; u < users; u++ {
uid := fmt.Sprintf("user-%d", u)
- getOrAdd(t, uq, uid, maxQueriersPerUser)
+ getOrAdd(t, uq, uid)
- // Verify it has maxQueriersPerUser queriers assigned now.
+ // Verify it has maxConsumers consumers assigned now.
qs := uq.mapping.GetByKey(uid).consumers
- assert.Equal(t, maxQueriersPerUser, len(qs))
+ assert.Equal(t, maxConsumers, len(qs))
}
- // After adding all users, verify results. For each querier, find out how many different users it handles,
+ // After adding all users, verify results. For each consumer, find out how many different users it handles,
// and compute mean and stdDev.
- queriersMap := make(map[string]int)
+ consumerMap := make(map[string]int)
- for q := 0; q < queriers; q++ {
- qid := fmt.Sprintf("querier-%d", q)
+ for q := 0; q < consumers; q++ {
+ qid := fmt.Sprintf("consumer-%d", q)
lastUserIndex := StartIndex
for {
@@ -151,25 +155,25 @@ func TestQueuesWithQueriers(t *testing.T) {
break
}
lastUserIndex = newIx
- queriersMap[qid]++
+ consumerMap[qid]++
}
}
mean := float64(0)
- for _, c := range queriersMap {
+ for _, c := range consumerMap {
mean += float64(c)
}
- mean = mean / float64(len(queriersMap))
+ mean = mean / float64(len(consumerMap))
stdDev := float64(0)
- for _, c := range queriersMap {
+ for _, c := range consumerMap {
d := float64(c) - mean
stdDev += (d * d)
}
- stdDev = math.Sqrt(stdDev / float64(len(queriersMap)))
+ stdDev = math.Sqrt(stdDev / float64(len(consumerMap)))
t.Log("mean:", mean, "stddev:", stdDev)
- assert.InDelta(t, users*maxQueriersPerUser/queriers, mean, 1)
+ assert.InDelta(t, users*maxConsumers/consumers, mean, 1)
assert.InDelta(t, stdDev, 0, mean*0.2)
}
@@ -183,7 +187,7 @@ func TestQueuesConsistency(t *testing.T) {
for testName, testData := range tests {
t.Run(testName, func(t *testing.T) {
- uq := newTenantQueues(0, testData.forgetDelay)
+ uq := newTenantQueues(0, testData.forgetDelay, &mockQueueLimits{maxConsumers: 3})
assert.NotNil(t, uq)
assert.NoError(t, isConsistent(uq))
@@ -196,25 +200,27 @@ func TestQueuesConsistency(t *testing.T) {
for i := 0; i < 10000; i++ {
switch r.Int() % 6 {
case 0:
- assert.NotNil(t, uq.getOrAddQueue(generateTenant(r), generateActor(r), 3))
+ q, err := uq.getOrAddQueue(generateTenant(r), generateActor(r))
+ assert.NoError(t, err)
+ assert.NotNil(t, q)
case 1:
- qid := generateQuerier(r)
+ qid := generateConsumer(r)
_, _, luid := uq.getNextQueueForConsumer(lastUserIndexes[qid], qid)
lastUserIndexes[qid] = luid
case 2:
uq.deleteQueue(generateTenant(r))
case 3:
- q := generateQuerier(r)
+ q := generateConsumer(r)
uq.addConsumerToConnection(q)
conns[q]++
case 4:
- q := generateQuerier(r)
+ q := generateConsumer(r)
if conns[q] > 0 {
uq.removeConsumerConnection(q, time.Now())
conns[q]--
}
case 5:
- q := generateQuerier(r)
+ q := generateConsumer(r)
uq.notifyQuerierShutdown(q)
}
@@ -226,166 +232,166 @@ func TestQueuesConsistency(t *testing.T) {
func TestQueues_ForgetDelay(t *testing.T) {
const (
- forgetDelay = time.Minute
- maxQueriersPerUser = 1
- numUsers = 100
+ forgetDelay = time.Minute
+ maxConsumers = 1
+ numUsers = 100
)
now := time.Now()
- uq := newTenantQueues(0, forgetDelay)
+ uq := newTenantQueues(0, forgetDelay, &mockQueueLimits{maxConsumers: maxConsumers})
assert.NotNil(t, uq)
assert.NoError(t, isConsistent(uq))
- // 3 queriers open 2 connections each.
+ // 3 consumers open 2 connections each.
for i := 1; i <= 3; i++ {
- uq.addConsumerToConnection(fmt.Sprintf("querier-%d", i))
- uq.addConsumerToConnection(fmt.Sprintf("querier-%d", i))
+ uq.addConsumerToConnection(fmt.Sprintf("consumer-%d", i))
+ uq.addConsumerToConnection(fmt.Sprintf("consumer-%d", i))
}
// Add user queues.
for i := 0; i < numUsers; i++ {
userID := fmt.Sprintf("user-%d", i)
- getOrAdd(t, uq, userID, maxQueriersPerUser)
+ getOrAdd(t, uq, userID)
}
- // We expect querier-1 to have some users.
- querier1Users := getUsersByQuerier(uq, "querier-1")
- require.NotEmpty(t, querier1Users)
+ // We expect consumer-1 to have some users.
+ consumer1Users := getUsersByConsumer(uq, "consumer-1")
+ require.NotEmpty(t, consumer1Users)
- // Gracefully shutdown querier-1.
- uq.removeConsumerConnection("querier-1", now.Add(20*time.Second))
- uq.removeConsumerConnection("querier-1", now.Add(21*time.Second))
- uq.notifyQuerierShutdown("querier-1")
+ // Gracefully shutdown consumer-1.
+ uq.removeConsumerConnection("consumer-1", now.Add(20*time.Second))
+ uq.removeConsumerConnection("consumer-1", now.Add(21*time.Second))
+ uq.notifyQuerierShutdown("consumer-1")
- // We expect querier-1 has been removed.
- assert.NotContains(t, uq.consumers, "querier-1")
+ // We expect consumer-1 has been removed.
+ assert.NotContains(t, uq.consumers, "consumer-1")
assert.NoError(t, isConsistent(uq))
- // We expect querier-1 users have been shuffled to other queriers.
- for _, userID := range querier1Users {
- assert.Contains(t, append(getUsersByQuerier(uq, "querier-2"), getUsersByQuerier(uq, "querier-3")...), userID)
+ // We expect consumer-1 users have been shuffled to other consumers.
+ for _, userID := range consumer1Users {
+ assert.Contains(t, append(getUsersByConsumer(uq, "consumer-2"), getUsersByConsumer(uq, "consumer-3")...), userID)
}
- // Querier-1 reconnects.
- uq.addConsumerToConnection("querier-1")
- uq.addConsumerToConnection("querier-1")
+ // Consumer-1 reconnects.
+ uq.addConsumerToConnection("consumer-1")
+ uq.addConsumerToConnection("consumer-1")
- // We expect the initial querier-1 users have got back to querier-1.
- for _, userID := range querier1Users {
- assert.Contains(t, getUsersByQuerier(uq, "querier-1"), userID)
- assert.NotContains(t, getUsersByQuerier(uq, "querier-2"), userID)
- assert.NotContains(t, getUsersByQuerier(uq, "querier-3"), userID)
+ // We expect the initial consumer-1 users have got back to consumer-1.
+ for _, userID := range consumer1Users {
+ assert.Contains(t, getUsersByConsumer(uq, "consumer-1"), userID)
+ assert.NotContains(t, getUsersByConsumer(uq, "consumer-2"), userID)
+ assert.NotContains(t, getUsersByConsumer(uq, "consumer-3"), userID)
}
- // Querier-1 abruptly terminates (no shutdown notification received).
- uq.removeConsumerConnection("querier-1", now.Add(40*time.Second))
- uq.removeConsumerConnection("querier-1", now.Add(41*time.Second))
+ // Consumer-1 abruptly terminates (no shutdown notification received).
+ uq.removeConsumerConnection("consumer-1", now.Add(40*time.Second))
+ uq.removeConsumerConnection("consumer-1", now.Add(41*time.Second))
- // We expect querier-1 has NOT been removed.
- assert.Contains(t, uq.consumers, "querier-1")
+ // We expect consumer-1 has NOT been removed.
+ assert.Contains(t, uq.consumers, "consumer-1")
assert.NoError(t, isConsistent(uq))
- // We expect the querier-1 users have not been shuffled to other queriers.
- for _, userID := range querier1Users {
- assert.Contains(t, getUsersByQuerier(uq, "querier-1"), userID)
- assert.NotContains(t, getUsersByQuerier(uq, "querier-2"), userID)
- assert.NotContains(t, getUsersByQuerier(uq, "querier-3"), userID)
+ // We expect the consumer-1 users have not been shuffled to other consumers.
+ for _, userID := range consumer1Users {
+ assert.Contains(t, getUsersByConsumer(uq, "consumer-1"), userID)
+ assert.NotContains(t, getUsersByConsumer(uq, "consumer-2"), userID)
+ assert.NotContains(t, getUsersByConsumer(uq, "consumer-3"), userID)
}
- // Try to forget disconnected queriers, but querier-1 forget delay hasn't passed yet.
+ // Try to forget disconnected consumers, but consumer-1 forget delay hasn't passed yet.
uq.forgetDisconnectedConsumers(now.Add(90 * time.Second))
- assert.Contains(t, uq.consumers, "querier-1")
+ assert.Contains(t, uq.consumers, "consumer-1")
assert.NoError(t, isConsistent(uq))
- for _, userID := range querier1Users {
- assert.Contains(t, getUsersByQuerier(uq, "querier-1"), userID)
- assert.NotContains(t, getUsersByQuerier(uq, "querier-2"), userID)
- assert.NotContains(t, getUsersByQuerier(uq, "querier-3"), userID)
+ for _, userID := range consumer1Users {
+ assert.Contains(t, getUsersByConsumer(uq, "consumer-1"), userID)
+ assert.NotContains(t, getUsersByConsumer(uq, "consumer-2"), userID)
+ assert.NotContains(t, getUsersByConsumer(uq, "consumer-3"), userID)
}
- // Try to forget disconnected queriers. This time querier-1 forget delay has passed.
+ // Try to forget disconnected consumers. This time consumer-1 forget delay has passed.
uq.forgetDisconnectedConsumers(now.Add(105 * time.Second))
- assert.NotContains(t, uq.consumers, "querier-1")
+ assert.NotContains(t, uq.consumers, "consumer-1")
assert.NoError(t, isConsistent(uq))
- // We expect querier-1 users have been shuffled to other queriers.
- for _, userID := range querier1Users {
- assert.Contains(t, append(getUsersByQuerier(uq, "querier-2"), getUsersByQuerier(uq, "querier-3")...), userID)
+ // We expect consumer-1 users have been shuffled to other consumers.
+ for _, userID := range consumer1Users {
+ assert.Contains(t, append(getUsersByConsumer(uq, "consumer-2"), getUsersByConsumer(uq, "consumer-3")...), userID)
}
}
-func TestQueues_ForgetDelay_ShouldCorrectlyHandleQuerierReconnectingBeforeForgetDelayIsPassed(t *testing.T) {
+func TestQueues_ForgetDelay_ShouldCorrectlyHandleConsumerReconnectingBeforeForgetDelayIsPassed(t *testing.T) {
const (
- forgetDelay = time.Minute
- maxQueriersPerUser = 1
- numUsers = 100
+ forgetDelay = time.Minute
+ maxConsumers = 1
+ numUsers = 100
)
now := time.Now()
- uq := newTenantQueues(0, forgetDelay)
+ uq := newTenantQueues(0, forgetDelay, &mockQueueLimits{maxConsumers: maxConsumers})
assert.NotNil(t, uq)
assert.NoError(t, isConsistent(uq))
- // 3 queriers open 2 connections each.
+ // 3 consumers open 2 connections each.
for i := 1; i <= 3; i++ {
- uq.addConsumerToConnection(fmt.Sprintf("querier-%d", i))
- uq.addConsumerToConnection(fmt.Sprintf("querier-%d", i))
+ uq.addConsumerToConnection(fmt.Sprintf("consumer-%d", i))
+ uq.addConsumerToConnection(fmt.Sprintf("consumer-%d", i))
}
// Add user queues.
for i := 0; i < numUsers; i++ {
userID := fmt.Sprintf("user-%d", i)
- getOrAdd(t, uq, userID, maxQueriersPerUser)
+ getOrAdd(t, uq, userID)
}
- // We expect querier-1 to have some users.
- querier1Users := getUsersByQuerier(uq, "querier-1")
- require.NotEmpty(t, querier1Users)
+ // We expect consumer-1 to have some users.
+ consumer1Users := getUsersByConsumer(uq, "consumer-1")
+ require.NotEmpty(t, consumer1Users)
- // Querier-1 abruptly terminates (no shutdown notification received).
- uq.removeConsumerConnection("querier-1", now.Add(40*time.Second))
- uq.removeConsumerConnection("querier-1", now.Add(41*time.Second))
+ // Consumer-1 abruptly terminates (no shutdown notification received).
+ uq.removeConsumerConnection("consumer-1", now.Add(40*time.Second))
+ uq.removeConsumerConnection("consumer-1", now.Add(41*time.Second))
- // We expect querier-1 has NOT been removed.
- assert.Contains(t, uq.consumers, "querier-1")
+ // We expect consumer-1 has NOT been removed.
+ assert.Contains(t, uq.consumers, "consumer-1")
assert.NoError(t, isConsistent(uq))
- // We expect the querier-1 users have not been shuffled to other queriers.
- for _, userID := range querier1Users {
- assert.Contains(t, getUsersByQuerier(uq, "querier-1"), userID)
- assert.NotContains(t, getUsersByQuerier(uq, "querier-2"), userID)
- assert.NotContains(t, getUsersByQuerier(uq, "querier-3"), userID)
+ // We expect the consumer-1 users have not been shuffled to other consumers.
+ for _, userID := range consumer1Users {
+ assert.Contains(t, getUsersByConsumer(uq, "consumer-1"), userID)
+ assert.NotContains(t, getUsersByConsumer(uq, "consumer-2"), userID)
+ assert.NotContains(t, getUsersByConsumer(uq, "consumer-3"), userID)
}
- // Try to forget disconnected queriers, but querier-1 forget delay hasn't passed yet.
+ // Try to forget disconnected consumers, but consumer-1 forget delay hasn't passed yet.
uq.forgetDisconnectedConsumers(now.Add(90 * time.Second))
- // Querier-1 reconnects.
- uq.addConsumerToConnection("querier-1")
- uq.addConsumerToConnection("querier-1")
+ // Consumer-1 reconnects.
+ uq.addConsumerToConnection("consumer-1")
+ uq.addConsumerToConnection("consumer-1")
- assert.Contains(t, uq.consumers, "querier-1")
+ assert.Contains(t, uq.consumers, "consumer-1")
assert.NoError(t, isConsistent(uq))
- // We expect the querier-1 users have not been shuffled to other queriers.
- for _, userID := range querier1Users {
- assert.Contains(t, getUsersByQuerier(uq, "querier-1"), userID)
- assert.NotContains(t, getUsersByQuerier(uq, "querier-2"), userID)
- assert.NotContains(t, getUsersByQuerier(uq, "querier-3"), userID)
+ // We expect the consumer-1 users have not been shuffled to other consumers.
+ for _, userID := range consumer1Users {
+ assert.Contains(t, getUsersByConsumer(uq, "consumer-1"), userID)
+ assert.NotContains(t, getUsersByConsumer(uq, "consumer-2"), userID)
+ assert.NotContains(t, getUsersByConsumer(uq, "consumer-3"), userID)
}
- // Try to forget disconnected queriers far in the future, but there's no disconnected querier.
+ // Try to forget disconnected consumers far in the future, but there's no disconnected consumer.
uq.forgetDisconnectedConsumers(now.Add(200 * time.Second))
- assert.Contains(t, uq.consumers, "querier-1")
+ assert.Contains(t, uq.consumers, "consumer-1")
assert.NoError(t, isConsistent(uq))
- for _, userID := range querier1Users {
- assert.Contains(t, getUsersByQuerier(uq, "querier-1"), userID)
- assert.NotContains(t, getUsersByQuerier(uq, "querier-2"), userID)
- assert.NotContains(t, getUsersByQuerier(uq, "querier-3"), userID)
+ for _, userID := range consumer1Users {
+ assert.Contains(t, getUsersByConsumer(uq, "consumer-1"), userID)
+ assert.NotContains(t, getUsersByConsumer(uq, "consumer-2"), userID)
+ assert.NotContains(t, getUsersByConsumer(uq, "consumer-3"), userID)
}
}
@@ -397,24 +403,27 @@ func generateTenant(r *rand.Rand) string {
return fmt.Sprint("tenant-", r.Int()%5)
}
-func generateQuerier(r *rand.Rand) string {
- return fmt.Sprint("querier-", r.Int()%5)
+func generateConsumer(r *rand.Rand) string {
+ return fmt.Sprint("consumer-", r.Int()%5)
}
-func getOrAdd(t *testing.T, uq *tenantQueues, tenant string, maxQueriers int) Queue {
+func getOrAdd(t *testing.T, uq *tenantQueues, tenant string) Queue {
actor := []string{}
- q := uq.getOrAddQueue(tenant, actor, maxQueriers)
+ q, err := uq.getOrAddQueue(tenant, actor)
+ assert.NoError(t, err)
assert.NotNil(t, q)
assert.NoError(t, isConsistent(uq))
- assert.Equal(t, q, uq.getOrAddQueue(tenant, actor, maxQueriers))
+ q2, err := uq.getOrAddQueue(tenant, actor)
+ assert.NoError(t, err)
+ assert.Equal(t, q, q2)
return q
}
-func confirmOrderForQuerier(t *testing.T, uq *tenantQueues, querier string, lastUserIndex QueueIndex, qs ...Queue) QueueIndex {
+func confirmOrderForConsumer(t *testing.T, uq *tenantQueues, consumer string, lastUserIndex QueueIndex, qs ...Queue) QueueIndex {
t.Helper()
var n Queue
for _, q := range qs {
- n, _, lastUserIndex = uq.getNextQueueForConsumer(lastUserIndex, querier)
+ n, _, lastUserIndex = uq.getNextQueueForConsumer(lastUserIndex, consumer)
assert.Equal(t, q, n)
assert.NoError(t, isConsistent(uq))
}
@@ -423,7 +432,7 @@ func confirmOrderForQuerier(t *testing.T, uq *tenantQueues, querier string, last
func isConsistent(uq *tenantQueues) error {
if len(uq.sortedConsumers) != len(uq.consumers) {
- return fmt.Errorf("inconsistent number of sorted queriers and querier connections")
+ return fmt.Errorf("inconsistent number of sorted consumers and consumer connections")
}
uc := 0
@@ -441,16 +450,17 @@ func isConsistent(uq *tenantQueues) error {
uc++
- if q.maxQueriers == 0 && q.consumers != nil {
- return fmt.Errorf("user %s has queriers, but maxQueriers=0", u)
+ maxConsumers := uq.limits.MaxConsumers(u, len(uq.consumers))
+ if maxConsumers == 0 && q.consumers != nil {
+ return fmt.Errorf("consumers for user %s should be nil when no limits are set (when MaxConsumers is 0)", u)
}
- if q.maxQueriers > 0 && len(uq.sortedConsumers) <= q.maxQueriers && q.consumers != nil {
- return fmt.Errorf("user %s has queriers set despite not enough queriers available", u)
+ if maxConsumers > 0 && len(uq.sortedConsumers) <= maxConsumers && q.consumers != nil {
+ return fmt.Errorf("consumers for user %s should be nil when MaxConsumers allowed is higher than the available consumers", u)
}
- if q.maxQueriers > 0 && len(uq.sortedConsumers) > q.maxQueriers && len(q.consumers) != q.maxQueriers {
- return fmt.Errorf("user %s has incorrect number of queriers, expected=%d, got=%d", u, len(q.consumers), q.maxQueriers)
+ if maxConsumers > 0 && len(uq.sortedConsumers) > maxConsumers && len(q.consumers) != maxConsumers {
+ return fmt.Errorf("user %s has incorrect number of consumers, expected=%d, got=%d", u, maxConsumers, len(q.consumers))
}
}
@@ -461,67 +471,75 @@ func isConsistent(uq *tenantQueues) error {
return nil
}
-// getUsersByQuerier returns the list of users handled by the provided querierID.
-func getUsersByQuerier(queues *tenantQueues, querierID string) []string {
+// getUsersByConsumer returns the list of users handled by the provided consumerID.
+func getUsersByConsumer(queues *tenantQueues, consumerID string) []string {
var userIDs []string
for _, userID := range queues.mapping.Keys() {
q := queues.mapping.GetByKey(userID)
if q.consumers == nil {
- // If it's nil then all queriers can handle this user.
+ // If it's nil then all consumers can handle this user.
userIDs = append(userIDs, userID)
continue
}
- if _, ok := q.consumers[querierID]; ok {
+ if _, ok := q.consumers[consumerID]; ok {
userIDs = append(userIDs, userID)
}
}
return userIDs
}
-func TestShuffleQueriers(t *testing.T) {
- allQueriers := []string{"a", "b", "c", "d", "e"}
+func TestShuffleConsumers(t *testing.T) {
+ allConsumers := []string{"a", "b", "c", "d", "e"}
- require.Nil(t, shuffleConsumersForTenants(12345, 10, allQueriers, nil))
- require.Nil(t, shuffleConsumersForTenants(12345, len(allQueriers), allQueriers, nil))
+ require.Nil(t, shuffleConsumersForTenants(12345, 10, allConsumers, nil))
+ require.Nil(t, shuffleConsumersForTenants(12345, len(allConsumers), allConsumers, nil))
- r1 := shuffleConsumersForTenants(12345, 3, allQueriers, nil)
+ r1 := shuffleConsumersForTenants(12345, 3, allConsumers, nil)
require.Equal(t, 3, len(r1))
// Same input produces same output.
- r2 := shuffleConsumersForTenants(12345, 3, allQueriers, nil)
+ r2 := shuffleConsumersForTenants(12345, 3, allConsumers, nil)
require.Equal(t, 3, len(r2))
require.Equal(t, r1, r2)
}
-func TestShuffleQueriersCorrectness(t *testing.T) {
- const queriersCount = 100
+func TestShuffleConsumersCorrectness(t *testing.T) {
+ const consumersCount = 100
- var allSortedQueriers []string
- for i := 0; i < queriersCount; i++ {
- allSortedQueriers = append(allSortedQueriers, fmt.Sprintf("%d", i))
+ var allSortedConsumers []string
+ for i := 0; i < consumersCount; i++ {
+ allSortedConsumers = append(allSortedConsumers, fmt.Sprintf("%d", i))
}
- sort.Strings(allSortedQueriers)
+ sort.Strings(allSortedConsumers)
r := rand.New(rand.NewSource(time.Now().UnixNano()))
const tests = 1000
for i := 0; i < tests; i++ {
- toSelect := r.Intn(queriersCount)
+ toSelect := r.Intn(consumersCount)
if toSelect == 0 {
toSelect = 3
}
- selected := shuffleConsumersForTenants(r.Int63(), toSelect, allSortedQueriers, nil)
+ selected := shuffleConsumersForTenants(r.Int63(), toSelect, allSortedConsumers, nil)
require.Equal(t, toSelect, len(selected))
- sort.Strings(allSortedQueriers)
- prevQuerier := ""
- for _, q := range allSortedQueriers {
- require.True(t, prevQuerier < q, "non-unique querier")
- prevQuerier = q
+ sort.Strings(allSortedConsumers)
+ prevConsumer := ""
+ for _, q := range allSortedConsumers {
+ require.True(t, prevConsumer < q, "non-unique consumer")
+ prevConsumer = q
- ix := sort.SearchStrings(allSortedQueriers, q)
- require.True(t, ix < len(allSortedQueriers) && allSortedQueriers[ix] == q, "selected querier is not between all queriers")
+ ix := sort.SearchStrings(allSortedConsumers, q)
+ require.True(t, ix < len(allSortedConsumers) && allSortedConsumers[ix] == q, "selected consumer is not between all consumers")
}
}
}
+
+type mockQueueLimits struct {
+ maxConsumers int
+}
+
+func (l *mockQueueLimits) MaxConsumers(_ string, _ int) int {
+ return l.maxConsumers
+}
diff --git a/pkg/queue/util.go b/pkg/queue/util.go
new file mode 100644
index 0000000000000..9b7fced6dfbf7
--- /dev/null
+++ b/pkg/queue/util.go
@@ -0,0 +1,25 @@
+package queue
+
+import "github.com/prometheus/prometheus/util/pool"
+
+// SlicePool uses a bucket pool and wraps the Get() and Put() functions for
+// simpler access.
+type SlicePool[T any] struct {
+ p *pool.Pool
+}
+
+func NewSlicePool[T any](minSize, maxSize int, factor float64) *SlicePool[T] {
+ return &SlicePool[T]{
+ p: pool.New(minSize, maxSize, factor, func(i int) interface{} {
+ return make([]T, 0, i)
+ }),
+ }
+}
+
+func (sp *SlicePool[T]) Get(n int) []T {
+ return sp.p.Get(n).([]T)
+}
+
+func (sp *SlicePool[T]) Put(buf []T) {
+ sp.p.Put(buf[0:0])
+}
diff --git a/pkg/ruler/compat.go b/pkg/ruler/compat.go
index db6316e9986d0..8f70d314da884 100644
--- a/pkg/ruler/compat.go
+++ b/pkg/ruler/compat.go
@@ -24,11 +24,11 @@ import (
"github.com/prometheus/prometheus/rules"
"github.com/prometheus/prometheus/template"
- "github.com/grafana/loki/pkg/logql"
"github.com/grafana/loki/pkg/logql/syntax"
ruler "github.com/grafana/loki/pkg/ruler/base"
"github.com/grafana/loki/pkg/ruler/rulespb"
- "github.com/grafana/loki/pkg/ruler/util"
+ rulerutil "github.com/grafana/loki/pkg/ruler/util"
+ "github.com/grafana/loki/pkg/util"
)
// RulesLimits is the one function we need from limits.Overrides, and
@@ -40,7 +40,7 @@ type RulesLimits interface {
RulerRemoteWriteURL(userID string) string
RulerRemoteWriteTimeout(userID string) time.Duration
RulerRemoteWriteHeaders(userID string) map[string]string
- RulerRemoteWriteRelabelConfigs(userID string) []*util.RelabelConfig
+ RulerRemoteWriteRelabelConfigs(userID string) []*rulerutil.RelabelConfig
RulerRemoteWriteConfig(userID string, id string) *config.RemoteWriteConfig
RulerRemoteWriteQueueCapacity(userID string) int
RulerRemoteWriteQueueMinShards(userID string) int
@@ -60,7 +60,7 @@ type RulesLimits interface {
// and passing an altered timestamp.
func queryFunc(evaluator Evaluator, checker readyChecker, userID string, logger log.Logger) rules.QueryFunc {
return func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) {
- hash := logql.HashedQuery(qs)
+ hash := util.HashedQuery(qs)
detail := rules.FromOriginContext(ctx)
detailLog := log.With(logger, "rule_name", detail.Name, "rule_type", detail.Kind, "query", qs, "query_hash", hash)
diff --git a/pkg/ruler/evaluator_jitter.go b/pkg/ruler/evaluator_jitter.go
index ef337c73396be..449ca0e18011c 100644
--- a/pkg/ruler/evaluator_jitter.go
+++ b/pkg/ruler/evaluator_jitter.go
@@ -10,8 +10,8 @@ import (
"github.com/go-kit/log"
"github.com/go-kit/log/level"
- "github.com/grafana/loki/pkg/logql"
"github.com/grafana/loki/pkg/logqlmodel"
+ "github.com/grafana/loki/pkg/util"
)
// EvaluatorWithJitter wraps a given Evaluator. It applies a consistent jitter based on a rule's query string by hashing
@@ -44,7 +44,7 @@ func NewEvaluatorWithJitter(inner Evaluator, maxJitter time.Duration, hasher has
}
func (e *EvaluatorWithJitter) Eval(ctx context.Context, qs string, now time.Time) (*logqlmodel.Result, error) {
- logger := log.With(e.logger, "query", qs, "query_hash", logql.HashedQuery(qs))
+ logger := log.With(e.logger, "query", qs, "query_hash", util.HashedQuery(qs))
jitter := e.calculateJitter(qs, logger)
if jitter > 0 {
diff --git a/pkg/ruler/evaluator_local.go b/pkg/ruler/evaluator_local.go
index fed0f2f02ef11..91efd5a14d995 100644
--- a/pkg/ruler/evaluator_local.go
+++ b/pkg/ruler/evaluator_local.go
@@ -28,7 +28,7 @@ func NewLocalEvaluator(engine *logql.Engine, logger log.Logger) (*LocalEvaluator
}
func (l *LocalEvaluator) Eval(ctx context.Context, qs string, now time.Time) (*logqlmodel.Result, error) {
- params := logql.NewLiteralParams(
+ params, err := logql.NewLiteralParams(
qs,
now,
now,
@@ -38,6 +38,9 @@ func (l *LocalEvaluator) Eval(ctx context.Context, qs string, now time.Time) (*l
0,
nil,
)
+ if err != nil {
+ return nil, err
+ }
q := l.engine.Query(params)
res, err := q.Exec(ctx)
diff --git a/pkg/ruler/evaluator_remote.go b/pkg/ruler/evaluator_remote.go
index 4f953876d6c0f..97a0c1ce7f9dd 100644
--- a/pkg/ruler/evaluator_remote.go
+++ b/pkg/ruler/evaluator_remote.go
@@ -36,8 +36,8 @@ import (
"google.golang.org/grpc/keepalive"
"github.com/grafana/loki/pkg/loghttp"
- "github.com/grafana/loki/pkg/logql"
"github.com/grafana/loki/pkg/logqlmodel"
+ "github.com/grafana/loki/pkg/util"
"github.com/grafana/loki/pkg/util/build"
"github.com/grafana/loki/pkg/util/constants"
"github.com/grafana/loki/pkg/util/httpreq"
@@ -220,7 +220,7 @@ func (r *RemoteEvaluator) query(ctx context.Context, orgID, query string, ts tim
args.Set("time", ts.Format(time.RFC3339Nano))
}
body := []byte(args.Encode())
- hash := logql.HashedQuery(query)
+ hash := util.HashedQuery(query)
req := httpgrpc.HTTPRequest{
Method: http.MethodPost,
diff --git a/pkg/scheduler/limits/definitions.go b/pkg/scheduler/limits/definitions.go
index 2a00db7d4a6db..e2c2e26cca6f2 100644
--- a/pkg/scheduler/limits/definitions.go
+++ b/pkg/scheduler/limits/definitions.go
@@ -1,7 +1,46 @@
package limits
+import (
+ "math"
+)
+
// Limits needed for the Query Scheduler - interface used for decoupling.
type Limits interface {
// MaxQueriersPerUser returns max queriers to use per tenant, or 0 if shuffle sharding is disabled.
- MaxQueriersPerUser(user string) int
+ MaxQueriersPerUser(user string) uint
+
+ // MaxQueryCapacity returns how much of the available query capacity can be used by this user.
+ MaxQueryCapacity(user string) float64
+}
+
+func NewQueueLimits(limits Limits) *QueueLimits {
+ return &QueueLimits{limits: limits}
+}
+
+type QueueLimits struct {
+ limits Limits
+}
+
+// MaxConsumers is used to compute how many of the available queriers are allowed to handle requests for a given tenant.
+// Returns the min value or one of (frontend.max-queriers-per-tenant, ceil(querier_replicas * frontend.max-query-capacity))
+// depending of whether both or only one of the two limits are configured.
+// 0 is returned when neither limits are applied.
+func (c *QueueLimits) MaxConsumers(tenantID string, allConsumers int) int {
+ if c == nil || c.limits == nil {
+ return 0
+ }
+
+ maxQueriers := int(c.limits.MaxQueriersPerUser(tenantID))
+ maxCapacity := c.limits.MaxQueryCapacity(tenantID)
+
+ if maxCapacity == 0 {
+ return maxQueriers
+ }
+
+ res := int(math.Ceil(float64(allConsumers) * maxCapacity))
+ if maxQueriers != 0 && maxQueriers < res {
+ return maxQueriers
+ }
+
+ return res
}
diff --git a/pkg/scheduler/limits/definitions_test.go b/pkg/scheduler/limits/definitions_test.go
new file mode 100644
index 0000000000000..26139e2186900
--- /dev/null
+++ b/pkg/scheduler/limits/definitions_test.go
@@ -0,0 +1,78 @@
+package limits
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestQueueLimitsMaxConsumers(t *testing.T) {
+ for name, tt := range map[string]struct {
+ limits *QueueLimits
+ expected int
+ }{
+ "nil limits": {
+ limits: NewQueueLimits(nil),
+ expected: 0,
+ },
+ "no limits": {
+ limits: NewQueueLimits(mockLimits{
+ maxQueriers: 0,
+ maxQueryCapacity: 0,
+ }),
+ expected: 0,
+ },
+ "enforce max queriers": {
+ limits: NewQueueLimits(mockLimits{
+ maxQueriers: 5,
+ maxQueryCapacity: 0,
+ }),
+ expected: 5,
+ },
+ "prefer max queriers over query capacity": {
+ limits: NewQueueLimits(mockLimits{
+ maxQueriers: 5,
+ maxQueryCapacity: 1.0,
+ }),
+ expected: 5,
+ },
+ "enforce max query capacity": {
+ limits: NewQueueLimits(mockLimits{
+ maxQueriers: 0,
+ maxQueryCapacity: 0.5,
+ }),
+ expected: 5,
+ },
+ "prefer query capacity over max queriers": {
+ limits: NewQueueLimits(mockLimits{
+ maxQueriers: 5,
+ maxQueryCapacity: 0.4,
+ }),
+ expected: 4,
+ },
+ "query capacity of 1.0": {
+ limits: NewQueueLimits(mockLimits{
+ maxQueryCapacity: 1.0,
+ }),
+ expected: 10,
+ },
+ } {
+ t.Run(name, func(t *testing.T) {
+ res := tt.limits.MaxConsumers("", 10)
+ assert.Equal(t, tt.expected, res)
+ })
+ }
+}
+
+type mockLimits struct {
+ maxQueriers uint
+ maxQueryCapacity float64
+}
+
+func (l mockLimits) MaxQueriersPerUser(_ string) uint {
+ return l.maxQueriers
+}
+
+func (l mockLimits) MaxQueryCapacity(_ string) float64 {
+ return l.maxQueryCapacity
+}
diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go
index 305d47b17e571..5cd163ff0ffa1 100644
--- a/pkg/scheduler/scheduler.go
+++ b/pkg/scheduler/scheduler.go
@@ -19,7 +19,6 @@ import (
"github.com/grafana/dskit/middleware"
"github.com/grafana/dskit/ring"
"github.com/grafana/dskit/services"
- "github.com/grafana/dskit/tenant"
"github.com/grafana/dskit/user"
otgrpc "github.com/opentracing-contrib/go-grpc"
"github.com/opentracing/opentracing-go"
@@ -38,7 +37,6 @@ import (
lokigrpc "github.com/grafana/loki/pkg/util/httpgrpc"
lokihttpreq "github.com/grafana/loki/pkg/util/httpreq"
lokiring "github.com/grafana/loki/pkg/util/ring"
- "github.com/grafana/loki/pkg/util/validation"
)
var errSchedulerIsNotRunning = errors.New("scheduler is not running")
@@ -117,7 +115,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
}
// NewScheduler creates a new Scheduler.
-func NewScheduler(cfg Config, limits Limits, log log.Logger, ringManager *lokiring.RingManager, registerer prometheus.Registerer, metricsNamespace string) (*Scheduler, error) {
+func NewScheduler(cfg Config, schedulerLimits Limits, log log.Logger, ringManager *lokiring.RingManager, registerer prometheus.Registerer, metricsNamespace string) (*Scheduler, error) {
if cfg.UseSchedulerRing {
if ringManager == nil {
return nil, errors.New("ring manager can't be empty when use_scheduler_ring is true")
@@ -130,13 +128,13 @@ func NewScheduler(cfg Config, limits Limits, log log.Logger, ringManager *lokiri
s := &Scheduler{
cfg: cfg,
log: log,
- limits: limits,
+ limits: schedulerLimits,
pendingRequests: map[requestKey]*schedulerRequest{},
connectedFrontends: map[string]*connectedFrontend{},
queueMetrics: queueMetrics,
ringManager: ringManager,
- requestQueue: queue.NewRequestQueue(cfg.MaxOutstandingPerTenant, cfg.QuerierForgetDelay, queueMetrics),
+ requestQueue: queue.NewRequestQueue(cfg.MaxOutstandingPerTenant, cfg.QuerierForgetDelay, limits.NewQueueLimits(schedulerLimits), queueMetrics),
}
s.queueDuration = promauto.With(registerer).NewHistogram(prometheus.HistogramOpts{
@@ -353,13 +351,6 @@ func (s *Scheduler) enqueueRequest(frontendContext context.Context, frontendAddr
req.queueTime = now
req.ctxCancel = cancel
- // aggregate the max queriers limit in the case of a multi tenant query
- tenantIDs, err := tenant.TenantIDsFromOrgID(req.tenantID)
- if err != nil {
- return err
- }
- maxQueriers := validation.SmallestPositiveNonZeroIntPerTenant(tenantIDs, s.limits.MaxQueriersPerUser)
-
var queuePath []string
if s.cfg.MaxQueueHierarchyLevels > 0 {
queuePath = msg.QueuePath
@@ -378,7 +369,7 @@ func (s *Scheduler) enqueueRequest(frontendContext context.Context, frontendAddr
}
s.activeUsers.UpdateUserTimestamp(req.tenantID, now)
- return s.requestQueue.Enqueue(req.tenantID, queuePath, req, maxQueriers, func() {
+ return s.requestQueue.Enqueue(req.tenantID, queuePath, req, func() {
shouldCancel = false
s.pendingRequestsMu.Lock()
diff --git a/pkg/storage/bloom/v1/block_writer.go b/pkg/storage/bloom/v1/block_writer.go
index 317d1e598414a..99ab65ef9cd40 100644
--- a/pkg/storage/bloom/v1/block_writer.go
+++ b/pkg/storage/bloom/v1/block_writer.go
@@ -12,8 +12,8 @@ import (
)
const (
- bloomFileName = "bloom"
- seriesFileName = "series"
+ BloomFileName = "bloom"
+ SeriesFileName = "series"
)
type BlockWriter interface {
@@ -66,12 +66,12 @@ func (b *DirectoryBlockWriter) Init() error {
return errors.Wrap(err, "creating bloom block dir")
}
- b.index, err = os.Create(filepath.Join(b.dir, seriesFileName))
+ b.index, err = os.Create(filepath.Join(b.dir, SeriesFileName))
if err != nil {
return errors.Wrap(err, "creating series file")
}
- b.blooms, err = os.Create(filepath.Join(b.dir, bloomFileName))
+ b.blooms, err = os.Create(filepath.Join(b.dir, BloomFileName))
if err != nil {
return errors.Wrap(err, "creating bloom file")
}
diff --git a/pkg/storage/bloom/v1/bloom_tokenizer.go b/pkg/storage/bloom/v1/bloom_tokenizer.go
index 26ebd63006383..b154f18fba788 100644
--- a/pkg/storage/bloom/v1/bloom_tokenizer.go
+++ b/pkg/storage/bloom/v1/bloom_tokenizer.go
@@ -5,6 +5,10 @@ import (
"math"
"time"
+ "github.com/prometheus/client_golang/prometheus/promauto"
+
+ "github.com/grafana/loki/pkg/util/constants"
+
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
@@ -13,10 +17,17 @@ import (
"github.com/grafana/loki/pkg/logql/log"
"github.com/grafana/loki/pkg/storage/chunk"
+ "github.com/grafana/loki/pkg/util/encoding"
util_log "github.com/grafana/loki/pkg/util/log"
)
-type metrics struct{}
+type metrics struct {
+ sbfCreationTime prometheus.Counter // time spent creating sbfs
+ chunkSize prometheus.Histogram // uncompressed size of all chunks summed per series
+ bloomSize prometheus.Histogram // size of the bloom filter in bytes
+ hammingWeightRatio prometheus.Histogram // ratio of the hamming weight of the bloom filter to the number of bits in the bloom filter
+ estimatedCount prometheus.Histogram // estimated number of elements in the bloom filter
+}
/*
BloomTokenizer is a utility that converts either Loki chunks or individual lines into tokens.
@@ -27,57 +38,115 @@ Bloom filters are utilized for faster lookups of log lines.
type BloomTokenizer struct {
metrics *metrics
- lineTokenizer Tokenizer
- chunkIDTokenizer *WrappedTokenizer
- cache map[string]interface{}
+ lineTokenizer *NGramTokenizer
+ cache map[string]interface{}
}
-const CacheSize = 150000
-const DefaultNGramLength = 4
-const DefaultNGramSkip = 0
+const cacheSize = 150000
+const bloomTokenizerMetricsSubsystem = "bloom_tokenizer"
+const eightBits = 8
// NewBloomTokenizer returns a new instance of the Bloom Tokenizer.
// Warning: the tokens returned use the same byte slice to reduce allocations. This has two consequences:
// 1) The token slices generated must not be mutated externally
// 2) The token slice must not be used after the next call to `Tokens()` as it will repopulate the slice.
// 2) This is not thread safe.
-func NewBloomTokenizer(reg prometheus.Registerer) (*BloomTokenizer, error) {
+func NewBloomTokenizer(reg prometheus.Registerer, NGramLength, NGramSkip int) (*BloomTokenizer, error) {
t := &BloomTokenizer{
- metrics: newMetrics(reg),
+ metrics: newMetrics(reg, constants.Loki, bloomTokenizerMetricsSubsystem),
}
- t.cache = make(map[string]interface{}, CacheSize)
- t.lineTokenizer = NewNGramTokenizer(DefaultNGramLength, DefaultNGramLength+1, DefaultNGramSkip) // default to 4-grams, no skip
- t.chunkIDTokenizer = ChunkIDTokenizer(t.lineTokenizer)
+ t.cache = make(map[string]interface{}, cacheSize)
+ t.lineTokenizer = NewNGramTokenizer(NGramLength, NGramSkip)
level.Info(util_log.Logger).Log("bloom tokenizer created")
return t, nil
}
-func (bt *BloomTokenizer) SetLineTokenizer(t Tokenizer) {
+func (bt *BloomTokenizer) SetLineTokenizer(t *NGramTokenizer) {
bt.lineTokenizer = t
- bt.chunkIDTokenizer = ChunkIDTokenizer(bt.lineTokenizer)
}
-// TODO: Something real here with metrics
-func newMetrics(_ prometheus.Registerer) *metrics {
- return &metrics{}
+func (bt *BloomTokenizer) GetNGramLength() uint64 {
+ return uint64(bt.lineTokenizer.N)
}
-func clearCache(cache map[string]interface{}) {
- for k := range cache {
- delete(cache, k)
+func (bt *BloomTokenizer) GetNGramSkip() uint64 {
+ return uint64(bt.lineTokenizer.Skip)
+}
+
+func newMetrics(r prometheus.Registerer, namespace, subsystem string) *metrics {
+ return &metrics{
+ sbfCreationTime: promauto.With(r).NewCounter(prometheus.CounterOpts{
+ Name: "bloom_creation_time",
+ Help: "Time spent creating scalable bloom filters",
+ Namespace: namespace,
+ Subsystem: subsystem,
+ }),
+ chunkSize: promauto.With(r).NewHistogram(prometheus.HistogramOpts{
+ Name: "bloom_chunk_series_size",
+ Help: "Uncompressed size of chunks in a series",
+ Buckets: prometheus.ExponentialBucketsRange(1024, 1073741824, 10),
+ Namespace: namespace,
+ Subsystem: subsystem,
+ }),
+ bloomSize: promauto.With(r).NewHistogram(prometheus.HistogramOpts{
+ Name: "bloom_size",
+ Help: "Size of the bloom filter in bytes",
+ Buckets: prometheus.ExponentialBucketsRange(128, 16777216, 8),
+ Namespace: namespace,
+ Subsystem: subsystem,
+ }),
+ hammingWeightRatio: promauto.With(r).NewHistogram(prometheus.HistogramOpts{
+ Name: "bloom_hamming_weight_ratio",
+ Help: "Ratio of the hamming weight of the bloom filter to the number of bits in the bloom filter",
+ Buckets: prometheus.ExponentialBucketsRange(0.001, 1, 12),
+ Namespace: namespace,
+ Subsystem: subsystem,
+ }),
+ estimatedCount: promauto.With(r).NewHistogram(prometheus.HistogramOpts{
+ Name: "bloom_estimated_count",
+ Help: "Estimated number of elements in the bloom filter",
+ Buckets: prometheus.ExponentialBucketsRange(1, 33554432, 10),
+ Namespace: namespace,
+ Subsystem: subsystem,
+ }),
}
}
+func clearCache(cache map[string]interface{}) {
+ clear(cache)
+}
+
+// prefixedToken returns a byte slice with sufficient capacity for a chunk-ref prefixed token
+// of specific ngram length, along with the length of the prefix.
+// It ensures enough capacity for the prefix and the token so additional tokens can be created
+// without allocations by appending them to the prefix length
+func prefixedToken(ngram int, chk logproto.ChunkRef) ([]byte, int) {
+ var enc encoding.Encbuf
+ enc.PutBE64(uint64(chk.From))
+ enc.PutBE64(uint64(chk.Through))
+ enc.PutBE32(chk.Checksum)
+ prefixLn := enc.Len() // record the length of the prefix
+
+ enc.PutBytes(make([]byte, ngram*MaxRuneLen)) // ensure enough capacity for the ngram
+
+ // return the underlying byte slice and the length of the prefix
+ return enc.Get(), prefixLn
+}
+
// PopulateSeriesWithBloom is intended to be called on the write path, and is used to populate the bloom filter for a given series.
-func (bt *BloomTokenizer) PopulateSeriesWithBloom(seriesWithBloom *SeriesWithBloom, chunks []chunk.Chunk) {
+func (bt *BloomTokenizer) PopulateSeriesWithBloom(seriesWithBloom *SeriesWithBloom, chunks []chunk.Chunk) error {
+ startTime := time.Now().UnixMilli()
+
clearCache(bt.cache)
+ chunkTotalUncompressedSize := 0
+
for idx := range chunks {
lc := chunks[idx].Data.(*chunkenc.Facade).LokiChunk()
- bt.chunkIDTokenizer.Reinit(chunks[idx].ChunkRef)
+ tokenBuf, prefixLn := prefixedToken(bt.lineTokenizer.N, chunks[idx].ChunkRef)
+ chunkTotalUncompressedSize += lc.UncompressedSize()
- // TODO: error handling
itr, err := lc.Iterator(
context.Background(),
time.Unix(0, 0), // TODO: Parameterize/better handle the timestamps?
@@ -86,30 +155,48 @@ func (bt *BloomTokenizer) PopulateSeriesWithBloom(seriesWithBloom *SeriesWithBlo
log.NewNoopPipeline().ForStream(chunks[idx].Metric),
)
if err != nil {
- level.Info(util_log.Logger).Log("chunk iterator cannot be created")
- return
+ level.Error(util_log.Logger).Log("msg", "chunk iterator cannot be created", "err", err)
+ return err
}
defer itr.Close()
for itr.Next() && itr.Error() == nil {
- toks := bt.chunkIDTokenizer.Tokens(itr.Entry().Line)
+ chunkTokenizer := NewPrefixedTokenIter(tokenBuf, prefixLn, bt.lineTokenizer.Tokens(itr.Entry().Line))
+ for chunkTokenizer.Next() {
+ tok := chunkTokenizer.At()
+ if tok != nil {
+ str := string(tok)
+ _, found := bt.cache[str] // A cache is used ahead of the SBF, as it cuts out the costly operations of scaling bloom filters
+ if !found {
+ bt.cache[str] = nil
+
+ seriesWithBloom.Bloom.ScalableBloomFilter.TestAndAdd(tok)
- for _, tok := range toks {
- if tok.Key != nil {
- str := string(tok.Key)
+ if len(bt.cache) >= cacheSize { // While crude, this has proven efficient in performance testing. This speaks to the similarity in log lines near each other
+ clearCache(bt.cache)
+ }
+ }
+ }
+ }
+ lineTokenizer := bt.lineTokenizer.Tokens(itr.Entry().Line)
+ for lineTokenizer.Next() {
+ tok := lineTokenizer.At()
+ if tok != nil {
+ str := string(tok)
_, found := bt.cache[str] // A cache is used ahead of the SBF, as it cuts out the costly operations of scaling bloom filters
if !found {
bt.cache[str] = nil
- seriesWithBloom.Bloom.ScalableBloomFilter.TestAndAdd(tok.Key)
+ seriesWithBloom.Bloom.ScalableBloomFilter.TestAndAdd(tok)
- if len(bt.cache) >= CacheSize { // While crude, this has proven efficient in performance testing. This speaks to the similarity in log lines near each other
+ if len(bt.cache) >= cacheSize { // While crude, this has proven efficient in performance testing. This speaks to the similarity in log lines near each other
clearCache(bt.cache)
}
}
}
}
+
}
seriesWithBloom.Series.Chunks = append(seriesWithBloom.Series.Chunks, ChunkRef{
Start: chunks[idx].From,
@@ -117,34 +204,21 @@ func (bt *BloomTokenizer) PopulateSeriesWithBloom(seriesWithBloom *SeriesWithBlo
Checksum: chunks[idx].Checksum,
})
} // for each chunk
-}
-// SearchesForTokenizerAndLine is for taking a given search string (ex: on the read/query path) and returning
-// all the possible tokens, given a tokenizer.
-// This is a multi-dimensional slice where the first slice is the offset into the line, and the
-// second slice is the tokens for that offset. If an offset into the line returns no tokens, this first dimension
-// will be less than 1 + the number of skips specified in the tokenizer
-// The offset is used if the Tokenizer has a skip value being utilized.
-func SearchesForTokenizerAndLine(t Tokenizer, line string) (res [][]Token) {
- res = make([][]Token, 0, 10)
- for i := range line { // iterate by runes
- if i >= t.GetSkip()+1 {
- break
- }
- tmpTokens := make([]Token, 0, 100)
- tokens := t.Tokens(line[i:])
- // As the way the tokenizer is coded, it will reuse its internal buffers,
- // but we need to save the data, hence the need for copying
- for _, token := range tokens {
- tmpToken := Token{}
- tmpToken.Key = make([]byte, len(token.Key))
- copy(tmpToken.Key, token.Key)
- tmpTokens = append(tmpTokens, tmpToken)
- }
- if len(tokens) > 0 {
- res = append(res, tmpTokens)
- }
- }
+ endTime := time.Now().UnixMilli()
+
+ fillRatio := seriesWithBloom.Bloom.ScalableBloomFilter.FillRatio()
+ bt.metrics.hammingWeightRatio.Observe(fillRatio)
+ bt.metrics.estimatedCount.Observe(
+ float64(estimatedCount(seriesWithBloom.Bloom.ScalableBloomFilter.Capacity(), fillRatio)),
+ )
+ bt.metrics.bloomSize.Observe(float64(seriesWithBloom.Bloom.ScalableBloomFilter.Capacity() / eightBits))
+ bt.metrics.sbfCreationTime.Add(float64(endTime - startTime))
+ bt.metrics.chunkSize.Observe(float64(chunkTotalUncompressedSize))
+ return nil
+}
- return res
+// n ≈ −m ln(1 − p).
+func estimatedCount(m uint, p float64) uint {
+ return uint(-float64(m) * math.Log(1-p))
}
diff --git a/pkg/storage/bloom/v1/bloom_tokenizer_test.go b/pkg/storage/bloom/v1/bloom_tokenizer_test.go
index 034301f88c1aa..4a3f62ccbefa8 100644
--- a/pkg/storage/bloom/v1/bloom_tokenizer_test.go
+++ b/pkg/storage/bloom/v1/bloom_tokenizer_test.go
@@ -2,16 +2,16 @@ package v1
import (
"fmt"
+ "testing"
"time"
"github.com/prometheus/prometheus/model/labels"
"github.com/grafana/loki/pkg/chunkenc"
+ "github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/push"
"github.com/grafana/loki/pkg/storage/chunk"
- "testing"
-
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
@@ -20,100 +20,74 @@ import (
"github.com/prometheus/client_golang/prometheus"
)
-func TestSetLineTokenizer(t *testing.T) {
- bt, _ := NewBloomTokenizer(prometheus.DefaultRegisterer)
-
- // Validate defaults
- require.Equal(t, bt.lineTokenizer.GetMin(), DefaultNGramLength)
- require.Equal(t, bt.lineTokenizer.GetMax(), DefaultNGramLength+1)
- require.Equal(t, bt.lineTokenizer.GetSkip(), DefaultNGramSkip)
+const (
+ DefaultNGramLength = 4
+ DefaultNGramSkip = 0
+)
- require.Equal(t, bt.chunkIDTokenizer.GetMin(), DefaultNGramLength)
- require.Equal(t, bt.chunkIDTokenizer.GetMax(), DefaultNGramLength+1)
- require.Equal(t, bt.chunkIDTokenizer.GetSkip(), DefaultNGramSkip)
+var (
+ four = NewNGramTokenizer(4, 0)
+)
- // Set new tokenizer, and validate against that
- bt.SetLineTokenizer(NewNGramTokenizer(6, 7, 2))
- require.Equal(t, bt.lineTokenizer.GetMin(), 6)
- require.Equal(t, bt.lineTokenizer.GetMax(), 7)
- require.Equal(t, bt.lineTokenizer.GetSkip(), 2)
-
- require.Equal(t, bt.chunkIDTokenizer.GetMin(), 6)
- require.Equal(t, bt.chunkIDTokenizer.GetMax(), 7)
- require.Equal(t, bt.chunkIDTokenizer.GetSkip(), 2)
-}
+func TestPrefixedKeyCreation(t *testing.T) {
+ var ones uint64 = 0xffffffffffffffff
-func TestSearchesForTokenizerAndLine(t *testing.T) {
+ ref := logproto.ChunkRef{
+ From: 0,
+ Through: model.Time(int64(ones)),
+ Checksum: 0xffffffff,
+ }
for _, tc := range []struct {
- desc string
- input string
- t Tokenizer
- exp [][]Token
+ desc string
+ ngram, expLen int
}{
{
- desc: "empty",
- input: "",
- t: four,
- exp: [][]Token{},
+ desc: "0-gram",
+ ngram: 0,
+ expLen: 20,
},
{
- desc: "single char",
- input: "a",
- t: four,
- exp: [][]Token{},
- },
- {
- desc: "four chars",
- input: "abcd",
- t: four,
- exp: [][]Token{
- {{Key: []byte("abcd")}}},
- },
- {
- desc: "uuid partial",
- input: "2b1a5e46-36a2-4",
- t: four,
- exp: [][]Token{{
- {Key: []byte("2b1a")},
- {Key: []byte("b1a5")},
- {Key: []byte("1a5e")},
- {Key: []byte("a5e4")},
- {Key: []byte("5e46")},
- {Key: []byte("e46-")},
- {Key: []byte("46-3")},
- {Key: []byte("6-36")},
- {Key: []byte("-36a")},
- {Key: []byte("36a2")},
- {Key: []byte("6a2-")},
- {Key: []byte("a2-4")}},
- },
- },
- {
- desc: "short special chars",
- t: four,
- input: "日本語",
- exp: [][]Token{},
- },
- {
- desc: "longer special chars",
- t: four,
- input: "日本語日本語",
- exp: [][]Token{{
- {Key: []byte("日本語日")},
- {Key: []byte("本語日本")},
- {Key: []byte("語日本語")}}},
+ desc: "4-gram",
+ ngram: 4,
+ expLen: 20 + 4*MaxRuneLen,
},
} {
t.Run(tc.desc, func(t *testing.T) {
- require.Equal(t, tc.exp, SearchesForTokenizerAndLine(tc.t, tc.input))
+ token, prefixLn := prefixedToken(tc.ngram, ref)
+ require.Equal(t, 20, prefixLn)
+ require.Equal(t, tc.expLen, len(token))
+ // first 8 bytes should be zeros from `from`
+ for i := 0; i < 8; i++ {
+ require.Equal(t, byte(0), token[i])
+ }
+ // next 8 bytes should be ones from `through`
+ for i := 8; i < 16; i++ {
+ require.Equal(t, byte(255), token[i])
+ }
+ // next 4 bytes should be ones from `checksum`
+ for i := 16; i < 20; i++ {
+ require.Equal(t, byte(255), token[i])
+ }
})
}
+}
+func TestSetLineTokenizer(t *testing.T) {
+ bt, _ := NewBloomTokenizer(prometheus.NewRegistry(), DefaultNGramLength, DefaultNGramSkip)
+
+ // Validate defaults
+ require.Equal(t, bt.lineTokenizer.N, DefaultNGramLength)
+ require.Equal(t, bt.lineTokenizer.Skip, DefaultNGramSkip)
+
+ // Set new tokenizer, and validate against that
+ bt.SetLineTokenizer(NewNGramTokenizer(6, 7))
+ require.Equal(t, bt.lineTokenizer.N, 6)
+ require.Equal(t, bt.lineTokenizer.Skip, 7)
}
func TestPopulateSeriesWithBloom(t *testing.T) {
var testLine = "this is a log line"
- bt, _ := NewBloomTokenizer(prometheus.DefaultRegisterer)
+ bt, _ := NewBloomTokenizer(prometheus.NewRegistry(), DefaultNGramLength, DefaultNGramSkip)
sbf := filter.NewScalableBloomFilter(1024, 0.01, 0.8)
var lbsList []labels.Labels
@@ -148,17 +122,63 @@ func TestPopulateSeriesWithBloom(t *testing.T) {
Series: &series,
}
- bt.PopulateSeriesWithBloom(&swb, chunks)
- tokens := SearchesForTokenizerAndLine(four, testLine)
- for _, token := range tokens[0] {
- require.True(t, swb.Bloom.Test(token.Key))
+ err := bt.PopulateSeriesWithBloom(&swb, chunks)
+ require.NoError(t, err)
+ tokenizer := NewNGramTokenizer(DefaultNGramLength, DefaultNGramSkip)
+ itr := tokenizer.Tokens(testLine)
+ for itr.Next() {
+ token := itr.At()
+ require.True(t, swb.Bloom.Test(token))
+ }
+}
+
+func BenchmarkPopulateSeriesWithBloom(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ var testLine = lorem + lorem + lorem
+ bt, _ := NewBloomTokenizer(prometheus.NewRegistry(), DefaultNGramLength, DefaultNGramSkip)
+
+ sbf := filter.NewScalableBloomFilter(1024, 0.01, 0.8)
+ var lbsList []labels.Labels
+ lbsList = append(lbsList, labels.FromStrings("foo", "bar"))
+
+ var fpList []model.Fingerprint
+ for i := range lbsList {
+ fpList = append(fpList, model.Fingerprint(lbsList[i].Hash()))
+ }
+
+ var memChunks = make([]*chunkenc.MemChunk, 0)
+ memChunk0 := chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, chunkenc.EncSnappy, chunkenc.ChunkHeadFormatFor(chunkenc.ChunkFormatV4), 256000, 1500000)
+ _ = memChunk0.Append(&push.Entry{
+ Timestamp: time.Unix(0, 1),
+ Line: testLine,
+ })
+ memChunks = append(memChunks, memChunk0)
+
+ var chunks = make([]chunk.Chunk, 0)
+ for i := range memChunks {
+ chunks = append(chunks, chunk.NewChunk("user", fpList[i], lbsList[i], chunkenc.NewFacade(memChunks[i], 256000, 1500000), model.TimeFromUnixNano(0), model.TimeFromUnixNano(1)))
+ }
+
+ bloom := Bloom{
+ ScalableBloomFilter: *sbf,
+ }
+ series := Series{
+ Fingerprint: model.Fingerprint(lbsList[0].Hash()),
+ }
+ swb := SeriesWithBloom{
+ Bloom: &bloom,
+ Series: &series,
+ }
+
+ err := bt.PopulateSeriesWithBloom(&swb, chunks)
+ require.NoError(b, err)
}
}
func BenchmarkMapClear(b *testing.B) {
- bt, _ := NewBloomTokenizer(prometheus.DefaultRegisterer)
+ bt, _ := NewBloomTokenizer(prometheus.NewRegistry(), DefaultNGramLength, DefaultNGramSkip)
for i := 0; i < b.N; i++ {
- for k := 0; k < CacheSize; k++ {
+ for k := 0; k < cacheSize; k++ {
bt.cache[fmt.Sprint(k)] = k
}
@@ -167,12 +187,12 @@ func BenchmarkMapClear(b *testing.B) {
}
func BenchmarkNewMap(b *testing.B) {
- bt, _ := NewBloomTokenizer(prometheus.DefaultRegisterer)
+ bt, _ := NewBloomTokenizer(prometheus.NewRegistry(), DefaultNGramLength, DefaultNGramSkip)
for i := 0; i < b.N; i++ {
- for k := 0; k < CacheSize; k++ {
+ for k := 0; k < cacheSize; k++ {
bt.cache[fmt.Sprint(k)] = k
}
- bt.cache = make(map[string]interface{}, CacheSize)
+ bt.cache = make(map[string]interface{}, cacheSize)
}
}
diff --git a/pkg/storage/bloom/v1/builder.go b/pkg/storage/bloom/v1/builder.go
index 7608c85245b07..7b5d0dc3d73ff 100644
--- a/pkg/storage/bloom/v1/builder.go
+++ b/pkg/storage/bloom/v1/builder.go
@@ -30,10 +30,12 @@ type BlockBuilder struct {
blooms *BloomBlockBuilder
}
-func NewBlockOptions() BlockOptions {
+func NewBlockOptions(NGramLength, NGramSkip uint64) BlockOptions {
return BlockOptions{
schema: Schema{
- version: byte(1),
+ version: byte(1),
+ nGramLength: NGramLength,
+ nGramSkip: NGramSkip,
},
SeriesPageSize: 100,
BloomPageSize: 10 << 10, // 0.01MB
@@ -495,7 +497,7 @@ func NewMergeBuilder(blocks []PeekingIterator[*SeriesWithBloom], store Iterator[
// NB: this will build one block. Ideally we would build multiple blocks once a target size threshold is met
// but this gives us a good starting point.
-func (mb *MergeBuilder) Build(builder *BlockBuilder) error {
+func (mb *MergeBuilder) Build(builder *BlockBuilder) (uint32, error) {
var (
nextInBlocks *SeriesWithBloom
)
@@ -544,6 +546,7 @@ func (mb *MergeBuilder) Build(builder *BlockBuilder) error {
cur = &SeriesWithBloom{
Series: nextInStore,
Bloom: &Bloom{
+ // TODO parameterise SBF options. fp_rate
ScalableBloomFilter: *filter.NewScalableBloomFilter(1024, 0.01, 0.8),
},
}
@@ -560,21 +563,21 @@ func (mb *MergeBuilder) Build(builder *BlockBuilder) error {
},
cur.Bloom,
); err != nil {
- return errors.Wrapf(err, "populating bloom for series with fingerprint: %v", nextInStore.Fingerprint)
+ return 0, errors.Wrapf(err, "populating bloom for series with fingerprint: %v", nextInStore.Fingerprint)
}
}
if err := builder.AddSeries(*cur); err != nil {
- return errors.Wrap(err, "adding series to block")
+ return 0, errors.Wrap(err, "adding series to block")
}
}
- _, err := builder.blooms.Close()
+ checksum, err := builder.blooms.Close()
if err != nil {
- return errors.Wrap(err, "closing bloom file")
+ return 0, errors.Wrap(err, "closing bloom file")
}
if err := builder.index.Close(); err != nil {
- return errors.Wrap(err, "closing series file")
+ return 0, errors.Wrap(err, "closing series file")
}
- return nil
+ return checksum, nil
}
diff --git a/pkg/storage/bloom/v1/builder_test.go b/pkg/storage/bloom/v1/builder_test.go
index 2a547cd479bf0..622e076f97b03 100644
--- a/pkg/storage/bloom/v1/builder_test.go
+++ b/pkg/storage/bloom/v1/builder_test.go
@@ -3,51 +3,13 @@ package v1
import (
"bytes"
"errors"
- "fmt"
"testing"
- "github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
"github.com/grafana/loki/pkg/chunkenc"
- "github.com/grafana/loki/pkg/storage/bloom/v1/filter"
)
-func mkBasicSeriesWithBlooms(nSeries, keysPerSeries int, fromFp, throughFp model.Fingerprint, fromTs, throughTs model.Time) (seriesList []SeriesWithBloom, keysList [][][]byte) {
- seriesList = make([]SeriesWithBloom, 0, nSeries)
- keysList = make([][][]byte, 0, nSeries)
- for i := 0; i < nSeries; i++ {
- var series Series
- step := (throughFp - fromFp) / (model.Fingerprint(nSeries))
- series.Fingerprint = fromFp + model.Fingerprint(i)*step
- timeDelta := fromTs + (throughTs-fromTs)/model.Time(nSeries)*model.Time(i)
- series.Chunks = []ChunkRef{
- {
- Start: fromTs + timeDelta*model.Time(i),
- End: fromTs + timeDelta*model.Time(i),
- Checksum: uint32(i),
- },
- }
-
- var bloom Bloom
- bloom.ScalableBloomFilter = *filter.NewScalableBloomFilter(1024, 0.01, 0.8)
-
- keys := make([][]byte, 0, keysPerSeries)
- for j := 0; j < keysPerSeries; j++ {
- key := []byte(fmt.Sprint(j))
- bloom.Add(key)
- keys = append(keys, key)
- }
-
- seriesList = append(seriesList, SeriesWithBloom{
- Series: &series,
- Bloom: &bloom,
- })
- keysList = append(keysList, keys)
- }
- return
-}
-
func EqualIterators[T any](t *testing.T, test func(a, b T), expected, actual Iterator[T]) {
for expected.Next() {
require.True(t, actual.Next())
@@ -87,12 +49,16 @@ func TestBlockBuilderRoundTrip(t *testing.T) {
},
} {
t.Run(tc.desc, func(t *testing.T) {
+ schema := Schema{
+ version: DefaultSchemaVersion,
+ encoding: chunkenc.EncSnappy,
+ nGramLength: 10,
+ nGramSkip: 2,
+ }
+
builder, err := NewBlockBuilder(
BlockOptions{
- schema: Schema{
- version: DefaultSchemaVersion,
- encoding: chunkenc.EncSnappy,
- },
+ schema: schema,
SeriesPageSize: 100,
BloomPageSize: 10 << 10,
},
@@ -106,6 +72,10 @@ func TestBlockBuilderRoundTrip(t *testing.T) {
block := NewBlock(tc.reader)
querier := NewBlockQuerier(block)
+ err = block.LoadHeaders()
+ require.Nil(t, err)
+ require.Equal(t, block.blooms.schema, schema)
+
for i := 0; i < len(data); i++ {
require.Equal(t, true, querier.Next(), "on iteration %d with error %v", i, querier.Err())
got := querier.At()
@@ -208,7 +178,9 @@ func TestMergeBuilder(t *testing.T) {
)
require.Nil(t, err)
- require.Nil(t, mergeBuilder.Build(builder))
+ _, err = mergeBuilder.Build(builder)
+ require.Nil(t, err)
+
block := NewBlock(reader)
querier := NewBlockQuerier(block)
diff --git a/pkg/storage/bloom/v1/fuse.go b/pkg/storage/bloom/v1/fuse.go
index 150c656aca04d..c397a7a55fd57 100644
--- a/pkg/storage/bloom/v1/fuse.go
+++ b/pkg/storage/bloom/v1/fuse.go
@@ -5,49 +5,49 @@ import (
"github.com/prometheus/common/model"
)
-type request struct {
- fp model.Fingerprint
- chks ChunkRefs
- searches [][]byte
- response chan output
+type Request struct {
+ Fp model.Fingerprint
+ Chks ChunkRefs
+ Searches [][]byte
+ Response chan<- Output
}
-// output represents a chunk that failed to pass all searches
+// Output represents a chunk that failed to pass all searches
// and must be downloaded
-type output struct {
- fp model.Fingerprint
- chks ChunkRefs
+type Output struct {
+ Fp model.Fingerprint
+ Removals ChunkRefs
}
// Fuse combines multiple requests into a single loop iteration
// over the data set and returns the corresponding outputs
// TODO(owen-d): better async control
-func (bq *BlockQuerier) Fuse(inputs []PeekingIterator[request]) *FusedQuerier {
+func (bq *BlockQuerier) Fuse(inputs []PeekingIterator[Request]) *FusedQuerier {
return NewFusedQuerier(bq, inputs)
}
type FusedQuerier struct {
bq *BlockQuerier
- inputs Iterator[[]request]
+ inputs Iterator[[]Request]
}
-func NewFusedQuerier(bq *BlockQuerier, inputs []PeekingIterator[request]) *FusedQuerier {
- heap := NewHeapIterator[request](
- func(a, b request) bool {
- return a.fp < b.fp
+func NewFusedQuerier(bq *BlockQuerier, inputs []PeekingIterator[Request]) *FusedQuerier {
+ heap := NewHeapIterator[Request](
+ func(a, b Request) bool {
+ return a.Fp < b.Fp
},
inputs...,
)
- merging := NewDedupingIter[request, []request](
- func(a request, b []request) bool {
- return a.fp == b[0].fp
+ merging := NewDedupingIter[Request, []Request](
+ func(a Request, b []Request) bool {
+ return a.Fp == b[0].Fp
},
- func(a request) []request { return []request{a} },
- func(a request, b []request) []request {
+ func(a Request) []Request { return []Request{a} },
+ func(a Request, b []Request) []Request {
return append(b, a)
},
- NewPeekingIter[request](heap),
+ NewPeekingIter[Request](heap),
)
return &FusedQuerier{
bq: bq,
@@ -60,7 +60,7 @@ func (fq *FusedQuerier) Run() error {
// find all queries for the next relevant fingerprint
nextBatch := fq.inputs.At()
- fp := nextBatch[0].fp
+ fp := nextBatch[0].Fp
// advance the series iterator to the next fingerprint
if err := fq.bq.Seek(fp); err != nil {
@@ -76,9 +76,9 @@ func (fq *FusedQuerier) Run() error {
if series.Fingerprint != fp {
// fingerprint not found, can't remove chunks
for _, input := range nextBatch {
- input.response <- output{
- fp: fp,
- chks: input.chks,
+ input.Response <- Output{
+ Fp: fp,
+ Removals: nil,
}
}
}
@@ -88,9 +88,9 @@ func (fq *FusedQuerier) Run() error {
if !fq.bq.blooms.Next() {
// fingerprint not found, can't remove chunks
for _, input := range nextBatch {
- input.response <- output{
- fp: fp,
- chks: input.chks,
+ input.Response <- Output{
+ Fp: fp,
+ Removals: nil,
}
}
continue
@@ -100,41 +100,42 @@ func (fq *FusedQuerier) Run() error {
// test every input against this chunk
inputLoop:
for _, input := range nextBatch {
- mustCheck, inBlooms := input.chks.Compare(series.Chunks, true)
+ _, inBlooms := input.Chks.Compare(series.Chunks, true)
// First, see if the search passes the series level bloom before checking for chunks individually
- for _, search := range input.searches {
+ for _, search := range input.Searches {
if !bloom.Test(search) {
- // the entire series bloom didn't pass one of the searches,
- // so we can skip checking chunks individually.
- // We still return all chunks that are not included in the bloom
- // as they may still have the data
- input.response <- output{
- fp: fp,
- chks: mustCheck,
+ // We return all the chunks that were the intersection of the query
+ // because they for sure do not match the search and don't
+ // need to be downloaded
+ input.Response <- Output{
+ Fp: fp,
+ Removals: inBlooms,
}
continue inputLoop
}
}
+ // TODO(owen-d): pool
+ var removals ChunkRefs
+
chunkLoop:
for _, chk := range inBlooms {
- for _, search := range input.searches {
+ for _, search := range input.Searches {
// TODO(owen-d): meld chunk + search into a single byte slice from the block schema
var combined = search
if !bloom.ScalableBloomFilter.Test(combined) {
+ removals = append(removals, chk)
continue chunkLoop
}
}
- // chunk passed all searches, add to the list of chunks to download
- mustCheck = append(mustCheck, chk)
-
+ // Otherwise, the chunk passed all the searches
}
- input.response <- output{
- fp: fp,
- chks: mustCheck,
+ input.Response <- Output{
+ Fp: fp,
+ Removals: removals,
}
}
diff --git a/pkg/storage/bloom/v1/fuse_test.go b/pkg/storage/bloom/v1/fuse_test.go
index e73f654c5295f..e784ac0168201 100644
--- a/pkg/storage/bloom/v1/fuse_test.go
+++ b/pkg/storage/bloom/v1/fuse_test.go
@@ -41,37 +41,39 @@ func TestFusedQuerier(t *testing.T) {
querier := NewBlockQuerier(block)
nReqs := 10
- var inputs [][]request
+ var inputs [][]Request
+ var resChans []chan Output
for i := 0; i < nReqs; i++ {
- ch := make(chan output)
- var reqs []request
+ ch := make(chan Output)
+ var reqs []Request
// find 2 series for each
for j := 0; j < 2; j++ {
idx := numSeries/nReqs*i + j
- reqs = append(reqs, request{
- fp: data[idx].Series.Fingerprint,
- chks: data[idx].Series.Chunks,
- response: ch,
+ reqs = append(reqs, Request{
+ Fp: data[idx].Series.Fingerprint,
+ Chks: data[idx].Series.Chunks,
+ Response: ch,
})
}
inputs = append(inputs, reqs)
+ resChans = append(resChans, ch)
}
- var itrs []PeekingIterator[request]
+ var itrs []PeekingIterator[Request]
for _, reqs := range inputs {
- itrs = append(itrs, NewPeekingIter[request](NewSliceIter[request](reqs)))
+ itrs = append(itrs, NewPeekingIter[Request](NewSliceIter[Request](reqs)))
}
- resps := make([][]output, nReqs)
+ resps := make([][]Output, nReqs)
var g sync.WaitGroup
g.Add(1)
go func() {
require.Nil(t, concurrency.ForEachJob(
context.Background(),
- len(resps),
- len(resps),
+ len(resChans),
+ len(resChans),
func(_ context.Context, i int) error {
- for v := range inputs[i][0].response {
+ for v := range resChans[i] {
resps[i] = append(resps[i], v)
}
return nil
@@ -84,7 +86,7 @@ func TestFusedQuerier(t *testing.T) {
require.Nil(t, fused.Run())
for _, input := range inputs {
- close(input[0].response)
+ close(input[0].Response)
}
g.Wait()
@@ -93,9 +95,9 @@ func TestFusedQuerier(t *testing.T) {
resp := resps[i][j]
require.Equal(
t,
- output{
- fp: req.fp,
- chks: req.chks,
+ Output{
+ Fp: req.Fp,
+ Removals: nil,
},
resp,
)
@@ -103,7 +105,7 @@ func TestFusedQuerier(t *testing.T) {
}
}
-func setupBlockForBenchmark(b *testing.B) (*BlockQuerier, [][]request) {
+func setupBlockForBenchmark(b *testing.B) (*BlockQuerier, [][]Request, []chan Output) {
indexBuf := bytes.NewBuffer(nil)
bloomsBuf := bytes.NewBuffer(nil)
writer := NewMemoryBlockWriter(indexBuf, bloomsBuf)
@@ -132,11 +134,12 @@ func setupBlockForBenchmark(b *testing.B) (*BlockQuerier, [][]request) {
numRequestChains := 100
seriesPerRequest := 100
- var requestChains [][]request
+ var requestChains [][]Request
+ var responseChans []chan Output
for i := 0; i < numRequestChains; i++ {
- var reqs []request
+ var reqs []Request
// ensure they use the same channel
- ch := make(chan output)
+ ch := make(chan Output)
// evenly spread out the series queried within a single request chain
// to mimic series distribution across keyspace
for j := 0; j < seriesPerRequest; j++ {
@@ -145,21 +148,22 @@ func setupBlockForBenchmark(b *testing.B) (*BlockQuerier, [][]request) {
if idx >= numSeries {
idx = numSeries - 1
}
- reqs = append(reqs, request{
- fp: data[idx].Series.Fingerprint,
- chks: data[idx].Series.Chunks,
- response: ch,
+ reqs = append(reqs, Request{
+ Fp: data[idx].Series.Fingerprint,
+ Chks: data[idx].Series.Chunks,
+ Response: ch,
})
}
requestChains = append(requestChains, reqs)
+ responseChans = append(responseChans, ch)
}
- return querier, requestChains
+ return querier, requestChains, responseChans
}
func BenchmarkBlockQuerying(b *testing.B) {
b.StopTimer()
- querier, requestChains := setupBlockForBenchmark(b)
+ querier, requestChains, responseChans := setupBlockForBenchmark(b)
// benchmark
b.StartTimer()
@@ -167,7 +171,7 @@ func BenchmarkBlockQuerying(b *testing.B) {
for i := 0; i < b.N; i++ {
for _, chain := range requestChains {
for _, req := range chain {
- _, _ = querier.CheckChunksForSeries(req.fp, req.chks, nil)
+ _, _ = querier.CheckChunksForSeries(req.Fp, req.Chks, nil)
}
}
}
@@ -178,22 +182,22 @@ func BenchmarkBlockQuerying(b *testing.B) {
go func() {
require.Nil(b, concurrency.ForEachJob(
context.Background(),
- len(requestChains), len(requestChains),
+ len(responseChans), len(responseChans),
func(_ context.Context, idx int) error {
// nolint:revive
- for range requestChains[idx][0].response {
+ for range responseChans[idx] {
}
return nil
},
))
}()
- var itrs []PeekingIterator[request]
+ var itrs []PeekingIterator[Request]
for i := 0; i < b.N; i++ {
itrs = itrs[:0]
for _, reqs := range requestChains {
- itrs = append(itrs, NewPeekingIter[request](NewSliceIter[request](reqs)))
+ itrs = append(itrs, NewPeekingIter[Request](NewSliceIter[Request](reqs)))
}
fused := querier.Fuse(itrs)
_ = fused.Run()
diff --git a/pkg/storage/bloom/v1/index.go b/pkg/storage/bloom/v1/index.go
index 98e170b183e7c..cc168bc06a6d0 100644
--- a/pkg/storage/bloom/v1/index.go
+++ b/pkg/storage/bloom/v1/index.go
@@ -12,14 +12,15 @@ import (
)
type Schema struct {
- version byte
- encoding chunkenc.Encoding
+ version byte
+ encoding chunkenc.Encoding
+ nGramLength, nGramSkip uint64
}
// byte length
func (s Schema) Len() int {
- // magic number + version + encoding
- return 4 + 1 + 1
+ // magic number + version + encoding + ngram length + ngram skip
+ return 4 + 1 + 1 + 8 + 8
}
func (s *Schema) DecompressorPool() chunkenc.ReaderPool {
@@ -35,6 +36,9 @@ func (s *Schema) Encode(enc *encoding.Encbuf) {
enc.PutBE32(magicNumber)
enc.PutByte(s.version)
enc.PutByte(byte(s.encoding))
+ enc.PutBE64(s.nGramLength)
+ enc.PutBE64(s.nGramSkip)
+
}
func (s *Schema) DecodeFrom(r io.ReadSeeker) error {
@@ -64,6 +68,9 @@ func (s *Schema) Decode(dec *encoding.Decbuf) error {
return errors.Wrap(err, "parsing encoding")
}
+ s.nGramLength = dec.Be64()
+ s.nGramSkip = dec.Be64()
+
return dec.Err()
}
diff --git a/pkg/storage/bloom/v1/iter.go b/pkg/storage/bloom/v1/iter.go
new file mode 100644
index 0000000000000..b1b460fb64207
--- /dev/null
+++ b/pkg/storage/bloom/v1/iter.go
@@ -0,0 +1,70 @@
+package v1
+
+type IndexedValue[T any] struct {
+ idx int
+ val T
+}
+
+func (iv IndexedValue[T]) Value() T {
+ return iv.val
+}
+
+func (iv IndexedValue[T]) Index() int {
+ return iv.idx
+}
+
+type IterWithIndex[T any] struct {
+ Iterator[T]
+ zero T // zero value of T
+ cache IndexedValue[T]
+}
+
+func (it *IterWithIndex[T]) At() IndexedValue[T] {
+ it.cache.val = it.Iterator.At()
+ return it.cache
+}
+
+func NewIterWithIndex[T any](iter Iterator[T], idx int) Iterator[IndexedValue[T]] {
+ return &IterWithIndex[T]{
+ Iterator: iter,
+ cache: IndexedValue[T]{idx: idx},
+ }
+}
+
+type SliceIterWithIndex[T any] struct {
+ xs []T // source slice
+ pos int // position within the slice
+ zero T // zero value of T
+ cache IndexedValue[T]
+}
+
+func (it *SliceIterWithIndex[T]) Next() bool {
+ it.pos++
+ return it.pos < len(it.xs)
+}
+
+func (it *SliceIterWithIndex[T]) Err() error {
+ return nil
+}
+
+func (it *SliceIterWithIndex[T]) At() IndexedValue[T] {
+ it.cache.val = it.xs[it.pos]
+ return it.cache
+}
+
+func (it *SliceIterWithIndex[T]) Peek() (IndexedValue[T], bool) {
+ if it.pos+1 >= len(it.xs) {
+ it.cache.val = it.zero
+ return it.cache, false
+ }
+ it.cache.val = it.xs[it.pos+1]
+ return it.cache, true
+}
+
+func NewSliceIterWithIndex[T any](xs []T, idx int) PeekingIterator[IndexedValue[T]] {
+ return &SliceIterWithIndex[T]{
+ xs: xs,
+ pos: -1,
+ cache: IndexedValue[T]{idx: idx},
+ }
+}
diff --git a/pkg/storage/bloom/v1/iter_test.go b/pkg/storage/bloom/v1/iter_test.go
new file mode 100644
index 0000000000000..3ec8ead536e75
--- /dev/null
+++ b/pkg/storage/bloom/v1/iter_test.go
@@ -0,0 +1,35 @@
+package v1
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestSliceIterWithIndex(t *testing.T) {
+ t.Run("SliceIterWithIndex implements PeekingIterator interface", func(t *testing.T) {
+ xs := []string{"a", "b", "c"}
+ it := NewSliceIterWithIndex(xs, 123)
+
+ // peek at first item
+ p, ok := it.Peek()
+ require.True(t, ok)
+ require.Equal(t, "a", p.val)
+ require.Equal(t, 123, p.idx)
+
+ // proceed to first item
+ require.True(t, it.Next())
+ require.Equal(t, "a", it.At().val)
+ require.Equal(t, 123, it.At().idx)
+
+ // proceed to second and third item
+ require.True(t, it.Next())
+ require.True(t, it.Next())
+
+ // peek at non-existing fourth item
+ p, ok = it.Peek()
+ require.False(t, ok)
+ require.Equal(t, "", p.val) // "" is zero value for type string
+ require.Equal(t, 123, p.idx)
+ })
+}
diff --git a/pkg/storage/bloom/v1/reader.go b/pkg/storage/bloom/v1/reader.go
index e4de9609b9082..d5c70a2b64d83 100644
--- a/pkg/storage/bloom/v1/reader.go
+++ b/pkg/storage/bloom/v1/reader.go
@@ -49,12 +49,12 @@ func NewDirectoryBlockReader(dir string) *DirectoryBlockReader {
func (r *DirectoryBlockReader) Init() error {
if !r.initialized {
var err error
- r.index, err = os.Open(filepath.Join(r.dir, seriesFileName))
+ r.index, err = os.Open(filepath.Join(r.dir, SeriesFileName))
if err != nil {
return errors.Wrap(err, "opening series file")
}
- r.blooms, err = os.Open(filepath.Join(r.dir, bloomFileName))
+ r.blooms, err = os.Open(filepath.Join(r.dir, BloomFileName))
if err != nil {
return errors.Wrap(err, "opening bloom file")
}
diff --git a/pkg/storage/bloom/v1/test_util.go b/pkg/storage/bloom/v1/test_util.go
new file mode 100644
index 0000000000000..215ecaffe177e
--- /dev/null
+++ b/pkg/storage/bloom/v1/test_util.go
@@ -0,0 +1,81 @@
+package v1
+
+import (
+ "bytes"
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/prometheus/common/model"
+ "github.com/stretchr/testify/require"
+
+ "github.com/grafana/loki/pkg/chunkenc"
+ "github.com/grafana/loki/pkg/storage/bloom/v1/filter"
+)
+
+func MakeBlockQuerier(t testing.TB, fromFp, throughFp model.Fingerprint, fromTs, throughTs model.Time) (*BlockQuerier, []SeriesWithBloom) {
+ // references for linking in memory reader+writer
+ indexBuf := bytes.NewBuffer(nil)
+ bloomsBuf := bytes.NewBuffer(nil)
+ writer := NewMemoryBlockWriter(indexBuf, bloomsBuf)
+ reader := NewByteReader(indexBuf, bloomsBuf)
+ numSeries := int(throughFp - fromFp)
+ numKeysPerSeries := 1000
+ data, _ := mkBasicSeriesWithBlooms(numSeries, numKeysPerSeries, fromFp, throughFp, fromTs, throughTs)
+
+ builder, err := NewBlockBuilder(
+ BlockOptions{
+ schema: Schema{
+ version: DefaultSchemaVersion,
+ encoding: chunkenc.EncSnappy,
+ },
+ SeriesPageSize: 100,
+ BloomPageSize: 10 << 10,
+ },
+ writer,
+ )
+ require.Nil(t, err)
+ itr := NewSliceIter[SeriesWithBloom](data)
+ _, err = builder.BuildFrom(itr)
+ require.Nil(t, err)
+ block := NewBlock(reader)
+ return NewBlockQuerier(block), data
+}
+
+func mkBasicSeriesWithBlooms(nSeries, keysPerSeries int, fromFp, throughFp model.Fingerprint, fromTs, throughTs model.Time) (seriesList []SeriesWithBloom, keysList [][][]byte) {
+ seriesList = make([]SeriesWithBloom, 0, nSeries)
+ keysList = make([][][]byte, 0, nSeries)
+
+ step := (throughFp - fromFp) / model.Fingerprint(nSeries)
+ timeDelta := time.Duration(throughTs.Sub(fromTs).Nanoseconds() / int64(nSeries))
+
+ for i := 0; i < nSeries; i++ {
+ var series Series
+ series.Fingerprint = fromFp + model.Fingerprint(i)*step
+ from := fromTs.Add(timeDelta * time.Duration(i))
+ series.Chunks = []ChunkRef{
+ {
+ Start: from,
+ End: from.Add(timeDelta),
+ Checksum: uint32(i),
+ },
+ }
+
+ var bloom Bloom
+ bloom.ScalableBloomFilter = *filter.NewScalableBloomFilter(1024, 0.01, 0.8)
+
+ keys := make([][]byte, 0, keysPerSeries)
+ for j := 0; j < keysPerSeries; j++ {
+ key := []byte(fmt.Sprint(i*keysPerSeries + j))
+ bloom.Add(key)
+ keys = append(keys, key)
+ }
+
+ seriesList = append(seriesList, SeriesWithBloom{
+ Series: &series,
+ Bloom: &bloom,
+ })
+ keysList = append(keysList, keys)
+ }
+ return
+}
diff --git a/pkg/storage/bloom/v1/tokenizer.go b/pkg/storage/bloom/v1/tokenizer.go
index 96e51f2cd0488..e3e1e065bf084 100644
--- a/pkg/storage/bloom/v1/tokenizer.go
+++ b/pkg/storage/bloom/v1/tokenizer.go
@@ -1,170 +1,113 @@
package v1
import (
- "encoding/binary"
"unicode/utf8"
-
- "github.com/grafana/loki/pkg/logproto"
)
-type Token struct {
- Key []byte
-}
+const (
+ MaxRuneLen = 4
+)
-type Tokenizer interface {
- Tokens(line string) []Token
- GetSkip() int
- GetMin() int
- GetMax() int
+func reassemble(buf []rune, ln, pos int, result []byte) []byte {
+ result = result[:0] // Reset the result slice
+ for i := 0; i < ln; i++ {
+ cur := pos % len(buf)
+ pos++
+ result = utf8.AppendRune(result, buf[cur])
+ }
+ return result
}
-const TokenBufferSize = 4096
-const TokenKeySize = 132
-
-type NgramTokenizer struct {
- // [min,max) exclusivity
- min, max, skip int
- buffers [][]rune // circular buffers used for ngram generation
- runeBuffer []byte // buffer used for token generation
- internalTokenBuffer []Token // circular buffer for tokens
+// Iterable variants (more performant, less space)
+type NGramTokenizer struct {
+ N, Skip int
+ buffer []rune // circular buffer used for ngram generation
+ res []byte // buffer used for token generation
}
/*
N-Grams (https://en.wikipedia.org/wiki/N-gram) are a series of 'n' adjacent characters in a string.
These will be utilized for the bloom filters to allow for fuzzy searching.
*/
-func NewNGramTokenizer(min, max, skip int) *NgramTokenizer {
- capacity := max - min
- t := &NgramTokenizer{
- min: min,
- max: max,
- skip: skip,
- buffers: make([][]rune, capacity),
- runeBuffer: make([]byte, 0, max*4),
- internalTokenBuffer: make([]Token, 0, TokenBufferSize),
- }
-
- for i := range t.buffers {
- t.buffers[i] = make([]rune, t.min+i)
- }
-
- for i := 0; i < cap(t.internalTokenBuffer); i++ {
- t.internalTokenBuffer = append(t.internalTokenBuffer, Token{Key: make([]byte, 0, TokenKeySize)})
+func NewNGramTokenizer(n, skip int) *NGramTokenizer {
+ t := &NGramTokenizer{
+ N: n,
+ Skip: skip,
+ buffer: make([]rune, n+skip),
+ res: make([]byte, 0, n*MaxRuneLen), // maximum 4 bytes per rune
}
return t
}
-func (t *NgramTokenizer) GetSkip() int {
- return t.skip
-}
-
-func (t *NgramTokenizer) GetMin() int {
- return t.min
-}
+// The Token iterator uses shared buffers for performance. The []byte returned by At()
+// is not safe for use after subsequent calls to Next()
+func (t *NGramTokenizer) Tokens(line string) NGramTokenIter {
+ return NGramTokenIter{
+ n: t.N,
+ skip: t.Skip,
-func (t *NgramTokenizer) GetMax() int {
- return t.max
-}
+ line: line,
-func (t *NgramTokenizer) Tokens(line string) []Token {
- var i int // rune index (not position that is measured in the range loop)
- numToks := 0
- for _, r := range line {
-
- // j is the index of the buffer to use
- for j := 0; j < (t.max - t.min); j++ {
- // n is the length of the ngram
- n := j + t.min
- // pos is the position in the buffer to overwrite
- pos := i % n
- t.buffers[j][pos] = r
-
- if i >= n-1 && (i+1-n)%(t.skip+1) == 0 {
- t.runeBuffer = reassemble(t.buffers[j], (i+1)%n, t.runeBuffer)
- if numToks >= cap(t.internalTokenBuffer) || numToks == len(t.internalTokenBuffer) {
- t.internalTokenBuffer = append(t.internalTokenBuffer, Token{Key: make([]byte, 0, TokenKeySize)})
- }
- t.internalTokenBuffer[numToks].Key = t.internalTokenBuffer[numToks].Key[:0]
- t.internalTokenBuffer[numToks].Key = append(t.internalTokenBuffer[numToks].Key, t.runeBuffer...)
- numToks++
- }
- }
- i++
+ buffer: t.buffer,
+ res: t.res,
}
- return t.internalTokenBuffer[0:numToks]
}
-func reassemble(buf []rune, pos int, result []byte) []byte {
- result = result[:0] // Reset the result slice
- for i := 0; i < len(buf); i++ {
- cur := (pos + i) % len(buf)
- result = utf8.AppendRune(result, buf[cur])
- }
- return result
-}
+type NGramTokenIter struct {
+ n, skip int
-func chunkIDTransformer(tok Token, prefix []byte) Token {
- tok.Key = append(append(tok.Key, prefix...), tok.Key...)[len(tok.Key):]
- return tok
-}
+ runeIndex, offset int
+ line string // source
-type WrappedTokenizer struct {
- t Tokenizer
- tokenBuffer []Token
- prefix []byte
- i64buf []byte
- i32buf []byte
+ buffer []rune // circular buffers used for ngram generation
+ res []byte
}
-func (w *WrappedTokenizer) Tokens(line string) []Token {
- w.tokenBuffer = w.tokenBuffer[:0] // Reset the result slice
- toks := w.t.Tokens(line)
- for _, tok := range toks {
- w.tokenBuffer = append(w.tokenBuffer, chunkIDTransformer(tok, w.prefix), tok)
- }
+func (t *NGramTokenIter) Next() bool {
+ for i, r := range t.line[t.offset:] {
+ t.buffer[t.runeIndex%len(t.buffer)] = r
+ t.runeIndex++
- return w.tokenBuffer
-}
+ if t.runeIndex < t.n {
+ continue
+ }
+
+ // if the start of the ngram is at the interval of our skip factor, emit it.
+ // we increment the skip due to modulo logic:
+ // because `n % 0 is a divide by zero and n % 1 is always 0`
+ if (t.runeIndex-t.n)%(t.skip+1) == 0 {
+ t.offset += (i + utf8.RuneLen(r))
+ return true
+ }
-func (w *WrappedTokenizer) GetSkip() int {
- return w.t.GetSkip()
+ }
+ return false
}
-func (w *WrappedTokenizer) GetMin() int {
- return w.t.GetMin()
+func (t *NGramTokenIter) At() []byte {
+ return reassemble(t.buffer, t.n, (t.runeIndex-t.n)%len(t.buffer), t.res[:0])
}
-func (w *WrappedTokenizer) GetMax() int {
- return w.t.GetMax()
+func (t *NGramTokenIter) Err() error {
+ return nil
}
-func ChunkIDTokenizer(t Tokenizer) *WrappedTokenizer {
- p := make([]byte, 0, 256)
- return &WrappedTokenizer{
- t: t,
- tokenBuffer: make([]Token, 0, TokenBufferSize),
- prefix: p,
- i64buf: make([]byte, binary.MaxVarintLen64),
- i32buf: make([]byte, 4),
- }
+type PrefixedTokenIter struct {
+ buf []byte
+ prefixLen int
+
+ NGramTokenIter
}
-func zeroBuffer(buf []byte) {
- for i := range buf {
- buf[i] = 0
- }
+func (t *PrefixedTokenIter) At() []byte {
+ return append(t.buf[:t.prefixLen], t.NGramTokenIter.At()...)
}
-func (w *WrappedTokenizer) Reinit(chk logproto.ChunkRef) {
- w.prefix = w.prefix[:0]
- zeroBuffer(w.i64buf)
- zeroBuffer(w.i32buf)
-
- binary.PutVarint(w.i64buf, int64(chk.From))
- w.prefix = append(w.prefix, w.i64buf...)
- binary.PutVarint(w.i64buf, int64(chk.Through))
- w.prefix = append(w.prefix, w.i64buf...)
- binary.LittleEndian.PutUint32(w.i32buf, chk.Checksum)
- w.prefix = append(w.prefix, w.i32buf...)
+func NewPrefixedTokenIter(buf []byte, prefixLn int, iter NGramTokenIter) *PrefixedTokenIter {
+ return &PrefixedTokenIter{
+ buf: buf,
+ prefixLen: prefixLn,
+ NGramTokenIter: iter,
+ }
}
diff --git a/pkg/storage/bloom/v1/tokenizer_test.go b/pkg/storage/bloom/v1/tokenizer_test.go
index 8a2c32d7930d8..471eaea74081b 100644
--- a/pkg/storage/bloom/v1/tokenizer_test.go
+++ b/pkg/storage/bloom/v1/tokenizer_test.go
@@ -1,574 +1,215 @@
package v1
import (
- "bufio"
- "encoding/binary"
- "os"
"testing"
- "github.com/grafana/loki/pkg/logproto"
-
"github.com/stretchr/testify/require"
+
+ "github.com/grafana/loki/pkg/logproto"
)
const BigFile = "../../../logql/sketch/testdata/war_peace.txt"
-var (
- twoSkipOne = NewNGramTokenizer(2, 3, 1)
- three = NewNGramTokenizer(3, 4, 0)
- threeSkip1 = NewNGramTokenizer(3, 4, 1)
- threeSkip2 = NewNGramTokenizer(3, 4, 2)
- four = NewNGramTokenizer(4, 5, 0)
- fourSkip1 = NewNGramTokenizer(4, 5, 1)
- fourSkip2 = NewNGramTokenizer(4, 5, 2)
- five = NewNGramTokenizer(5, 6, 0)
- six = NewNGramTokenizer(6, 7, 0)
-)
+func TestNGramIterator(t *testing.T) {
+ var (
+ three = NewNGramTokenizer(3, 0)
+ threeSkip1 = NewNGramTokenizer(3, 1)
+ threeSkip3 = NewNGramTokenizer(3, 3)
+ )
-func TestNGrams(t *testing.T) {
- tokenizer := NewNGramTokenizer(2, 4, 0)
for _, tc := range []struct {
desc string
+ t *NGramTokenizer
input string
- exp []Token
+ exp []string
}{
{
- desc: "empty",
+ t: three,
input: "",
- exp: []Token{},
- },
- {
- desc: "single char",
- input: "a",
- exp: []Token{},
+ exp: []string{},
},
{
- desc: "two chars",
+ t: three,
input: "ab",
- exp: []Token{{Key: []byte("ab")}},
- },
- {
- desc: "three chars",
- input: "abc",
- exp: []Token{{Key: []byte("ab")}, {Key: []byte("bc")}, {Key: []byte("abc")}},
- },
- {
- desc: "four chars",
- input: "abcd",
- exp: []Token{{Key: []byte("ab")}, {Key: []byte("bc")}, {Key: []byte("abc")}, {Key: []byte("cd")}, {Key: []byte("bcd")}},
- },
- {
- desc: "foo",
- input: "日本語",
- exp: []Token{{Key: []byte("日本")}, {Key: []byte("本語")}, {Key: []byte("日本語")}},
- },
- } {
- t.Run(tc.desc, func(t *testing.T) {
- require.Equal(t, tc.exp, tokenizer.Tokens(tc.input))
- })
- }
-}
-
-func TestNGramsSkip(t *testing.T) {
-
- for _, tc := range []struct {
- desc string
- tokenizer *NgramTokenizer
- input string
- exp []Token
- }{
- {
- desc: "four chars",
- tokenizer: twoSkipOne,
- input: "abcd",
- exp: []Token{{Key: []byte("ab")}, {Key: []byte("cd")}},
- },
- {
- desc: "special chars",
- tokenizer: twoSkipOne,
- input: "日本語",
- exp: []Token{{Key: []byte("日本")}},
- },
- {
- desc: "multi",
- tokenizer: NewNGramTokenizer(2, 4, 1),
- input: "abcdefghij",
- exp: []Token{
- {Key: []byte("ab")},
- {Key: []byte("abc")},
- {Key: []byte("cd")},
- {Key: []byte("cde")},
- {Key: []byte("ef")},
- {Key: []byte("efg")},
- {Key: []byte("gh")},
- {Key: []byte("ghi")},
- {Key: []byte("ij")},
- },
- },
- } {
- t.Run(tc.desc, func(t *testing.T) {
- require.Equal(t, tc.exp, tc.tokenizer.Tokens(tc.input))
- })
- }
-}
-
-func Test3GramSkip0Tokenizer(t *testing.T) {
- tokenizer := three
- for _, tc := range []struct {
- desc string
- input string
- exp []Token
- }{
- {
- desc: "empty",
- input: "",
- exp: []Token{},
- },
- {
- desc: "single char",
- input: "a",
- exp: []Token{},
- },
- {
- desc: "three char",
- input: "abc",
- exp: []Token{{Key: []byte("abc")}},
- },
- {
- desc: "four chars",
- input: "abcd",
- exp: []Token{{Key: []byte("abc")}, {Key: []byte("bcd")}},
- },
- } {
- t.Run(tc.desc, func(t *testing.T) {
- require.Equal(t, tc.exp, tokenizer.Tokens(tc.input))
- })
- }
-}
-
-func Test3GramSkip1Tokenizer(t *testing.T) {
- tokenizer := threeSkip1
- for _, tc := range []struct {
- desc string
- input string
- exp []Token
- }{
- {
- desc: "empty",
- input: "",
- exp: []Token{},
- },
- {
- desc: "single char",
- input: "a",
- exp: []Token{},
- },
- {
- desc: "three char",
- input: "abc",
- exp: []Token{{Key: []byte("abc")}},
- },
- {
- desc: "four chars",
- input: "abcd",
- exp: []Token{{Key: []byte("abc")}},
- },
- {
- desc: "five chars",
- input: "abcde",
- exp: []Token{{Key: []byte("abc")}, {Key: []byte("cde")}},
- },
- } {
- t.Run(tc.desc, func(t *testing.T) {
- require.Equal(t, tc.exp, tokenizer.Tokens(tc.input))
- })
- }
-}
-
-func Test3GramSkip2Tokenizer(t *testing.T) {
- tokenizer := threeSkip2
- for _, tc := range []struct {
- desc string
- input string
- exp []Token
- }{
- {
- desc: "empty",
- input: "",
- exp: []Token{},
- },
- {
- desc: "single char",
- input: "a",
- exp: []Token{},
- },
- {
- desc: "four chars",
- input: "abcd",
- exp: []Token{{Key: []byte("abc")}},
- },
- } {
- t.Run(tc.desc, func(t *testing.T) {
- require.Equal(t, tc.exp, tokenizer.Tokens(tc.input))
- })
- }
-}
-
-func Test4GramSkip0Tokenizer(t *testing.T) {
- tokenizer := four
- for _, tc := range []struct {
- desc string
- input string
- exp []Token
- }{
- {
- desc: "empty",
- input: "",
- exp: []Token{},
- },
- {
- desc: "single char",
- input: "a",
- exp: []Token{},
- },
- {
- desc: "three char",
- input: "abc",
- exp: []Token{},
- },
- {
- desc: "four chars",
- input: "abcd",
- exp: []Token{{Key: []byte("abcd")}},
- },
- {
- desc: "five chars",
- input: "abcde",
- exp: []Token{{Key: []byte("abcd")}, {Key: []byte("bcde")}},
- },
- } {
- t.Run(tc.desc, func(t *testing.T) {
- require.Equal(t, tc.exp, tokenizer.Tokens(tc.input))
- })
- }
-}
-
-func Test4GramSkip1Tokenizer(t *testing.T) {
- tokenizer := fourSkip1
- for _, tc := range []struct {
- desc string
- input string
- exp []Token
- }{
- {
- desc: "empty",
- input: "",
- exp: []Token{},
+ exp: []string{},
},
{
- desc: "single char",
- input: "a",
- exp: []Token{},
- },
- {
- desc: "three char",
- input: "abc",
- exp: []Token{},
- },
- {
- desc: "four chars",
- input: "abcd",
- exp: []Token{{Key: []byte("abcd")}},
- },
- {
- desc: "five chars",
- input: "abcde",
- exp: []Token{{Key: []byte("abcd")}},
- },
- {
- desc: "six chars",
- input: "abcdef",
- exp: []Token{{Key: []byte("abcd")}, {Key: []byte("cdef")}},
- },
- {
- desc: "seven chars",
+ t: three,
input: "abcdefg",
- exp: []Token{{Key: []byte("abcd")}, {Key: []byte("cdef")}},
- },
- {
- desc: "eight chars",
- input: "abcdefgh",
- exp: []Token{{Key: []byte("abcd")}, {Key: []byte("cdef")}, {Key: []byte("efgh")}},
+ exp: []string{"abc", "bcd", "cde", "def", "efg"},
},
- } {
- t.Run(tc.desc, func(t *testing.T) {
- require.Equal(t, tc.exp, tokenizer.Tokens(tc.input))
- })
- }
-}
-
-func Test4GramSkip2Tokenizer(t *testing.T) {
- tokenizer := fourSkip2
- for _, tc := range []struct {
- desc string
- input string
- exp []Token
- }{
{
- desc: "empty",
- input: "",
- exp: []Token{},
- },
- {
- desc: "single char",
- input: "a",
- exp: []Token{},
- },
- {
- desc: "three char",
- input: "abc",
- exp: []Token{},
- },
- {
- desc: "four chars",
- input: "abcd",
- exp: []Token{{Key: []byte("abcd")}},
- },
- {
- desc: "five chars",
- input: "abcde",
- exp: []Token{{Key: []byte("abcd")}},
- },
- {
- desc: "six chars",
- input: "abcdef",
- exp: []Token{{Key: []byte("abcd")}},
- },
- {
- desc: "seven chars",
+ t: threeSkip1,
input: "abcdefg",
- exp: []Token{{Key: []byte("abcd")}, {Key: []byte("defg")}},
+ exp: []string{"abc", "cde", "efg"},
},
{
- desc: "eight chars",
+ t: threeSkip3,
input: "abcdefgh",
- exp: []Token{{Key: []byte("abcd")}, {Key: []byte("defg")}},
+ exp: []string{"abc", "efg"},
},
{
- desc: "nine chars",
- input: "abcdefghi",
- exp: []Token{{Key: []byte("abcd")}, {Key: []byte("defg")}},
+ t: three,
+ input: "日本語",
+ exp: []string{"日本語"},
},
{
- desc: "ten chars",
- input: "abcdefghij",
- exp: []Token{{Key: []byte("abcd")}, {Key: []byte("defg")}, {Key: []byte("ghij")}},
+ t: four,
+ input: "日本語日本語",
+ exp: []string{
+ "日本語日",
+ "本語日本",
+ "語日本語"},
},
} {
t.Run(tc.desc, func(t *testing.T) {
- require.Equal(t, tc.exp, tokenizer.Tokens(tc.input))
+ itr := tc.t.Tokens(tc.input)
+ for _, exp := range tc.exp {
+ require.True(t, itr.Next())
+ require.Equal(t, exp, string(itr.At()))
+ }
+ require.False(t, itr.Next())
})
}
}
-func Test5GramSkip0Tokenizer(t *testing.T) {
- tokenizer := five
- for _, tc := range []struct {
- desc string
- input string
- exp []Token
- }{
- {
- desc: "empty",
- input: "",
- exp: []Token{},
- },
- {
- desc: "single char",
- input: "a",
- exp: []Token{},
- },
- {
- desc: "three char",
- input: "abc",
- exp: []Token{},
- },
- {
- desc: "four chars",
- input: "abcd",
- exp: []Token{},
- },
- {
- desc: "five chars",
- input: "abcde",
- exp: []Token{{Key: []byte("abcde")}},
- },
- {
- desc: "six chars",
- input: "abcdef",
- exp: []Token{{Key: []byte("abcde")}, {Key: []byte("bcdef")}},
- },
- } {
- t.Run(tc.desc, func(t *testing.T) {
- require.Equal(t, tc.exp, tokenizer.Tokens(tc.input))
- })
- }
-}
+func TestPrefixedIterator(t *testing.T) {
+ var (
+ three = NewNGramTokenizer(3, 0)
+ )
-func Test6GramSkip0Tokenizer(t *testing.T) {
- tokenizer := six
for _, tc := range []struct {
desc string
input string
- exp []Token
+ exp []string
}{
{
- desc: "empty",
input: "",
- exp: []Token{},
- },
- {
- desc: "single char",
- input: "a",
- exp: []Token{},
+ exp: []string{},
},
{
- desc: "three char",
- input: "abc",
- exp: []Token{},
- },
- {
- desc: "four chars",
- input: "abcd",
- exp: []Token{},
- },
- {
- desc: "five chars",
- input: "abcde",
- exp: []Token{},
+ input: "ab",
+ exp: []string{},
},
{
- desc: "six chars",
- input: "abcdef",
- exp: []Token{{Key: []byte("abcdef")}},
+ input: "abcdefg",
+ exp: []string{"0123abc", "0123bcd", "0123cde", "0123def", "0123efg"},
},
+
{
- desc: "seven chars",
- input: "abcdefg",
- exp: []Token{{Key: []byte("abcdef")}, {Key: []byte("bcdefg")}},
+ input: "日本語",
+ exp: []string{"0123日本語"},
},
} {
+ prefix := []byte("0123")
t.Run(tc.desc, func(t *testing.T) {
- require.Equal(t, tc.exp, tokenizer.Tokens(tc.input))
+ itr := NewPrefixedTokenIter(prefix, len(prefix), three.Tokens(tc.input))
+ for _, exp := range tc.exp {
+ require.True(t, itr.Next())
+ require.Equal(t, exp, string(itr.At()))
+ }
+ require.False(t, itr.Next())
})
}
}
-func makeBuf(from, through, checksum int) []byte {
- p := make([]byte, 0, 256)
- i64buf := make([]byte, binary.MaxVarintLen64)
- i32buf := make([]byte, 4)
+const lorem = `
+lorum ipsum dolor sit amet consectetur adipiscing elit sed do eiusmod tempor incididunt ut labore et dolore magna
+aliqua ut enim ad minim veniam quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat
+duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur excepteur
+sint occaecat cupidatat non proident sunt in culpa qui officia deserunt mollit anim id est
+laborum ipsum dolor sit amet consectetur adipiscing elit sed do eiusmod tempor incididunt ut labore et dolore magna
+aliqua ut enim ad minim veniam quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat
+duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur excepteur
+sint occaecat cupidatat non proident sunt in culpa qui officia deserunt mollit anim id est
+`
- binary.PutVarint(i64buf, int64(from))
- p = append(p, i64buf...)
- binary.PutVarint(i64buf, int64(through))
- p = append(p, i64buf...)
- binary.LittleEndian.PutUint32(i32buf, uint32(checksum))
- p = append(p, i32buf...)
- return p
-}
-
-func TestWrappedTokenizer(t *testing.T) {
- tokenizer := threeSkip2
- for _, tc := range []struct {
+func BenchmarkTokens(b *testing.B) {
+ var (
+ v2Three = NewNGramTokenizer(3, 0)
+ v2ThreeSkip1 = NewNGramTokenizer(3, 1)
+ )
+
+ type impl struct {
+ desc string
+ f func()
+ }
+ type tc struct {
desc string
- input string
- exp []Token
- }{
- {
- desc: "empty",
- input: "",
- exp: []Token{},
+ impls []impl
+ }
+ for _, tc := range []tc{
+ {
+ desc: "three",
+ impls: []impl{
+ {
+ desc: "v2",
+ f: func() {
+ itr := v2Three.Tokens(lorem)
+ for itr.Next() {
+ _ = itr.At()
+ }
+ },
+ },
+ },
},
{
- desc: "single char",
- input: "a",
- exp: []Token{},
+ desc: "threeSkip1",
+ impls: []impl{
+ {
+ desc: "v2",
+ f: func() {
+ itr := v2ThreeSkip1.Tokens(lorem)
+ for itr.Next() {
+ _ = itr.At()
+ }
+ },
+ },
+ },
},
{
- desc: "four chars",
- input: "abcd",
- exp: []Token{
- {Key: append(makeBuf(0, 999999, 1), []byte("abc")...)},
- {Key: []byte("abc")}},
+ desc: "threeChunk",
+ impls: []impl{
+ {
+ desc: "v2",
+ f: func() func() {
+ buf, prefixLn := prefixedToken(v2Three.N, logproto.ChunkRef{})
+ return func() {
+ itr := NewPrefixedTokenIter(buf, prefixLn, v2Three.Tokens(lorem))
+ for itr.Next() {
+ _ = itr.At()
+ }
+ }
+ }(),
+ },
+ },
},
{
- desc: "uuid",
- input: "2b1a5e46-36a2-4694-a4b1-f34cc7bdfc45",
- exp: []Token{
- {Key: append(makeBuf(0, 999999, 1), []byte("2b1")...)},
- {Key: []byte("2b1")},
- {Key: append(makeBuf(0, 999999, 1), []byte("a5e")...)},
- {Key: []byte("a5e")},
- {Key: append(makeBuf(0, 999999, 1), []byte("46-")...)},
- {Key: []byte("46-")},
- {Key: append(makeBuf(0, 999999, 1), []byte("36a")...)},
- {Key: []byte("36a")},
- {Key: append(makeBuf(0, 999999, 1), []byte("2-4")...)},
- {Key: []byte("2-4")},
- {Key: append(makeBuf(0, 999999, 1), []byte("694")...)},
- {Key: []byte("694")},
- {Key: append(makeBuf(0, 999999, 1), []byte("-a4")...)},
- {Key: []byte("-a4")},
- {Key: append(makeBuf(0, 999999, 1), []byte("b1-")...)},
- {Key: []byte("b1-")},
- {Key: append(makeBuf(0, 999999, 1), []byte("f34")...)},
- {Key: []byte("f34")},
- {Key: append(makeBuf(0, 999999, 1), []byte("cc7")...)},
- {Key: []byte("cc7")},
- {Key: append(makeBuf(0, 999999, 1), []byte("bdf")...)},
- {Key: []byte("bdf")},
- {Key: append(makeBuf(0, 999999, 1), []byte("c45")...)},
- {Key: []byte("c45")},
+ desc: "threeSkip1Chunk",
+ impls: []impl{
+ {
+ desc: "v2",
+ f: func() func() {
+ buf, prefixLn := prefixedToken(v2Three.N, logproto.ChunkRef{})
+ return func() {
+ itr := NewPrefixedTokenIter(buf, prefixLn, v2ThreeSkip1.Tokens(lorem))
+ for itr.Next() {
+ _ = itr.At()
+ }
+ }
+ }(),
+ },
},
},
} {
- t.Run(tc.desc, func(t *testing.T) {
- chunkTokenizer := ChunkIDTokenizer(tokenizer)
- chunkTokenizer.Reinit(logproto.ChunkRef{From: 0, Through: 999999, Checksum: 1})
- require.Equal(t, tc.exp, chunkTokenizer.Tokens(tc.input))
+ b.Run(tc.desc, func(b *testing.B) {
+ for _, impl := range tc.impls {
+ b.Run(impl.desc, func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ impl.f()
+ }
+ })
+ }
})
}
}
-
-func BenchmarkTokens(b *testing.B) {
- for i := 0; i < b.N; i++ {
- b.StopTimer()
- file, _ := os.Open(BigFile)
- defer file.Close()
- scanner := bufio.NewScanner(file)
-
- b.StartTimer()
- for scanner.Scan() {
- line := scanner.Text()
- _ = three.Tokens(line)
- }
- }
-}
-
-func BenchmarkWrappedTokens(b *testing.B) {
- chunkTokenizer := ChunkIDTokenizer(three)
- chunkTokenizer.Reinit(logproto.ChunkRef{From: 0, Through: 999999, Checksum: 1})
- for i := 0; i < b.N; i++ {
- b.StopTimer()
- file, _ := os.Open(BigFile)
- defer file.Close()
- scanner := bufio.NewScanner(file)
-
- b.StartTimer()
- for scanner.Scan() {
- line := scanner.Text()
- _ = chunkTokenizer.Tokens(line)
- }
- }
-}
diff --git a/pkg/storage/bloom/v1/util.go b/pkg/storage/bloom/v1/util.go
index e764cba5c6197..15de62e9f9590 100644
--- a/pkg/storage/bloom/v1/util.go
+++ b/pkg/storage/bloom/v1/util.go
@@ -7,6 +7,7 @@ import (
"io"
"sync"
+ "github.com/prometheus/common/model"
"github.com/prometheus/prometheus/util/pool"
)
@@ -241,3 +242,49 @@ func PointerSlice[T any](xs []T) []*T {
}
return out
}
+
+type BoundsCheck uint8
+
+const (
+ Before BoundsCheck = iota
+ Overlap
+ After
+)
+
+type FingerprintBounds struct {
+ Min, Max model.Fingerprint
+}
+
+// Cmp returns the fingerprint's position relative to the bounds
+func (b FingerprintBounds) Cmp(fp model.Fingerprint) BoundsCheck {
+ if fp < b.Min {
+ return Before
+ } else if fp > b.Max {
+ return After
+ }
+ return Overlap
+}
+
+// unused, but illustrative
+type BoundedIter[V any] struct {
+ Iterator[V]
+ cmp func(V) BoundsCheck
+}
+
+func (bi *BoundedIter[V]) Next() bool {
+ for bi.Iterator.Next() {
+ switch bi.cmp(bi.Iterator.At()) {
+ case Before:
+ continue
+ case After:
+ return false
+ default:
+ return true
+ }
+ }
+ return false
+}
+
+func NewBoundedIter[V any](itr Iterator[V], cmp func(V) BoundsCheck) *BoundedIter[V] {
+ return &BoundedIter[V]{Iterator: itr, cmp: cmp}
+}
diff --git a/pkg/storage/chunk/cache/cache.go b/pkg/storage/chunk/cache/cache.go
index f651b252cdaab..870d7c19e5c7c 100644
--- a/pkg/storage/chunk/cache/cache.go
+++ b/pkg/storage/chunk/cache/cache.go
@@ -51,7 +51,7 @@ func (cfg *Config) RegisterFlagsWithPrefix(prefix string, description string, f
cfg.Memcache.RegisterFlagsWithPrefix(prefix, description, f)
cfg.MemcacheClient.RegisterFlagsWithPrefix(prefix, description, f)
cfg.Redis.RegisterFlagsWithPrefix(prefix, description, f)
- cfg.EmbeddedCache.RegisterFlagsWithPrefix(prefix, description, f)
+ cfg.EmbeddedCache.RegisterFlagsWithPrefix(prefix+"embedded-cache.", description, f)
f.IntVar(&cfg.AsyncCacheWriteBackConcurrency, prefix+"max-async-cache-write-back-concurrency", 16, "The maximum number of concurrent asynchronous writeback cache can occur.")
f.IntVar(&cfg.AsyncCacheWriteBackBufferSize, prefix+"max-async-cache-write-back-buffer-size", 500, "The maximum number of enqueued asynchronous writeback cache allowed.")
f.DurationVar(&cfg.DefaultValidity, prefix+"default-validity", time.Hour, description+"The default validity of entries for caches unless overridden.")
diff --git a/pkg/storage/chunk/cache/embeddedcache.go b/pkg/storage/chunk/cache/embeddedcache.go
index b27d6a903f23e..871c1ef4e1e78 100644
--- a/pkg/storage/chunk/cache/embeddedcache.go
+++ b/pkg/storage/chunk/cache/embeddedcache.go
@@ -58,10 +58,10 @@ type EmbeddedCache[K comparable, V any] struct {
memoryBytes prometheus.Gauge
}
-type cacheEntry[K comparable, V any] struct {
+type Entry[K comparable, V any] struct {
updated time.Time
- key K
- value V
+ Key K
+ Value V
}
// EmbeddedCacheConfig represents in-process embedded cache config.
@@ -77,17 +77,21 @@ type EmbeddedCacheConfig struct {
}
func (cfg *EmbeddedCacheConfig) RegisterFlagsWithPrefix(prefix, description string, f *flag.FlagSet) {
- f.BoolVar(&cfg.Enabled, prefix+"embedded-cache.enabled", false, description+"Whether embedded cache is enabled.")
- f.Int64Var(&cfg.MaxSizeMB, prefix+"embedded-cache.max-size-mb", 100, description+"Maximum memory size of the cache in MB.")
- f.IntVar(&cfg.MaxSizeItems, prefix+"embedded-cache.max-size-items", 0, description+"Maximum number of entries in the cache.")
- f.DurationVar(&cfg.TTL, prefix+"embedded-cache.ttl", time.Hour, description+"The time to live for items in the cache before they get purged.")
+ cfg.RegisterFlagsWithPrefixAndDefaults(prefix, description, f, time.Hour)
+}
+
+func (cfg *EmbeddedCacheConfig) RegisterFlagsWithPrefixAndDefaults(prefix, description string, f *flag.FlagSet, defaultTTL time.Duration) {
+ f.BoolVar(&cfg.Enabled, prefix+"enabled", false, description+"Whether embedded cache is enabled.")
+ f.Int64Var(&cfg.MaxSizeMB, prefix+"max-size-mb", 100, description+"Maximum memory size of the cache in MB.")
+ f.IntVar(&cfg.MaxSizeItems, prefix+"max-size-items", 0, description+"Maximum number of entries in the cache.")
+ f.DurationVar(&cfg.TTL, prefix+"ttl", defaultTTL, description+"The time to live for items in the cache before they get purged.")
}
func (cfg *EmbeddedCacheConfig) IsEnabled() bool {
return cfg.Enabled
}
-type cacheEntrySizeCalculator[K comparable, V any] func(entry *cacheEntry[K, V]) uint64
+type cacheEntrySizeCalculator[K comparable, V any] func(entry *Entry[K, V]) uint64
// NewEmbeddedCache returns a new initialised EmbeddedCache where the key is a string and the value is a slice of bytes.
func NewEmbeddedCache(name string, cfg EmbeddedCacheConfig, reg prometheus.Registerer, logger log.Logger, cacheType stats.CacheType) *EmbeddedCache[string, []byte] {
@@ -191,7 +195,7 @@ func (c *EmbeddedCache[K, V]) pruneExpiredItems(ttl time.Duration) {
defer c.lock.Unlock()
for k, v := range c.entries {
- entry := v.Value.(*cacheEntry[K, V])
+ entry := v.Value.(*Entry[K, V])
if time.Since(entry.updated) > ttl {
c.remove(k, v, expiredReason)
}
@@ -244,10 +248,10 @@ func (c *EmbeddedCache[K, V]) GetCacheType() stats.CacheType {
}
func (c *EmbeddedCache[K, V]) remove(key K, element *list.Element, reason string) {
- entry := c.lru.Remove(element).(*cacheEntry[K, V])
+ entry := c.lru.Remove(element).(*Entry[K, V])
delete(c.entries, key)
if c.onEntryRemoved != nil {
- c.onEntryRemoved(entry.key, entry.value)
+ c.onEntryRemoved(entry.Key, entry.Value)
}
c.currSizeBytes -= c.cacheEntrySizeCalculator(entry)
c.entriesCurrent.Dec()
@@ -262,10 +266,10 @@ func (c *EmbeddedCache[K, V]) put(key K, value V) {
c.remove(key, element, replacedReason)
}
- entry := &cacheEntry[K, V]{
+ entry := &Entry[K, V]{
updated: time.Now(),
- key: key,
- value: value,
+ Key: key,
+ Value: value,
}
entrySz := c.cacheEntrySizeCalculator(entry)
@@ -285,8 +289,8 @@ func (c *EmbeddedCache[K, V]) put(key K, value V) {
if lastElement == nil {
break
}
- entryToRemove := lastElement.Value.(*cacheEntry[K, V])
- c.remove(entryToRemove.key, lastElement, fullReason)
+ entryToRemove := lastElement.Value.(*Entry[K, V])
+ c.remove(entryToRemove.Key, lastElement, fullReason)
}
// Finally, we have space to add the item.
@@ -306,17 +310,17 @@ func (c *EmbeddedCache[K, V]) Get(_ context.Context, key K) (V, bool) {
element, ok := c.entries[key]
if ok {
- entry := element.Value.(*cacheEntry[K, V])
- return entry.value, true
+ entry := element.Value.(*Entry[K, V])
+ return entry.Value, true
}
var empty V
return empty, false
}
-func sizeOf(item *cacheEntry[string, []byte]) uint64 {
- return uint64(int(unsafe.Sizeof(*item)) + // size of cacheEntry
- len(item.key) + // size of key
- cap(item.value) + // size of value
+func sizeOf(item *Entry[string, []byte]) uint64 {
+ return uint64(int(unsafe.Sizeof(*item)) + // size of Entry
+ len(item.Key) + // size of Key
+ cap(item.Value) + // size of Value
elementSize + // size of the element in linked list
elementPrtSize) // size of the pointer to an element in the map
}
diff --git a/pkg/storage/chunk/cache/embeddedcache_test.go b/pkg/storage/chunk/cache/embeddedcache_test.go
index b318e0f6b5a75..473c1b8e83a09 100644
--- a/pkg/storage/chunk/cache/embeddedcache_test.go
+++ b/pkg/storage/chunk/cache/embeddedcache_test.go
@@ -23,13 +23,13 @@ func TestEmbeddedCacheEviction(t *testing.T) {
// compute value size such that 10 entries account to exactly 1MB.
// adding one more entry to the cache would result in eviction when MaxSizeMB is configured to a value of 1.
// value cap = target size of each entry (0.1MB) - size of cache entry with empty value.
- valueCap := (1e6 / cnt) - sizeOf(&cacheEntry[string, []byte]{
- key: "00",
+ valueCap := (1e6 / cnt) - sizeOf(&Entry[string, []byte]{
+ Key: "00",
})
- itemTemplate := &cacheEntry[string, []byte]{
- key: "00",
- value: make([]byte, 0, valueCap),
+ itemTemplate := &Entry[string, []byte]{
+ Key: "00",
+ Value: make([]byte, 0, valueCap),
}
tests := []struct {
@@ -176,9 +176,9 @@ func TestEmbeddedCacheExpiry(t *testing.T) {
key1, key2, key3, key4 := "01", "02", "03", "04"
data1, data2, data3, data4 := genBytes(32), genBytes(64), genBytes(128), genBytes(32)
- memorySz := sizeOf(&cacheEntry[string, []byte]{key: key1, value: data1}) +
- sizeOf(&cacheEntry[string, []byte]{key: key2, value: data2}) +
- sizeOf(&cacheEntry[string, []byte]{key: key3, value: data3})
+ memorySz := sizeOf(&Entry[string, []byte]{Key: key1, Value: data1}) +
+ sizeOf(&Entry[string, []byte]{Key: key2, Value: data2}) +
+ sizeOf(&Entry[string, []byte]{Key: key3, Value: data3})
cfg := EmbeddedCacheConfig{
MaxSizeItems: 3,
diff --git a/pkg/storage/chunk/cache/memcached_client.go b/pkg/storage/chunk/cache/memcached_client.go
index b497b5c5917fd..f05763ba59d13 100644
--- a/pkg/storage/chunk/cache/memcached_client.go
+++ b/pkg/storage/chunk/cache/memcached_client.go
@@ -3,9 +3,9 @@ package cache
import (
"context"
"flag"
- "fmt"
"net"
"sort"
+ "strconv"
"strings"
"sync"
"time"
@@ -254,7 +254,7 @@ func (c *memcachedClient) updateMemcacheServers() error {
return err
}
for _, srv := range addrs {
- servers = append(servers, fmt.Sprintf("%s:%d", srv.Target, srv.Port))
+ servers = append(servers, net.JoinHostPort(srv.Target, strconv.Itoa(int(srv.Port))))
}
}
diff --git a/pkg/storage/chunk/cache/resultscache/cache.go b/pkg/storage/chunk/cache/resultscache/cache.go
new file mode 100644
index 0000000000000..0999ca3271068
--- /dev/null
+++ b/pkg/storage/chunk/cache/resultscache/cache.go
@@ -0,0 +1,467 @@
+package resultscache
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "sort"
+ "time"
+
+ "github.com/go-kit/log"
+ "github.com/go-kit/log/level"
+ "github.com/gogo/protobuf/proto"
+ "github.com/gogo/protobuf/types"
+ "github.com/grafana/dskit/httpgrpc"
+ "github.com/opentracing/opentracing-go"
+ otlog "github.com/opentracing/opentracing-go/log"
+ "github.com/prometheus/common/model"
+ "github.com/uber/jaeger-client-go"
+
+ "github.com/grafana/dskit/tenant"
+
+ "github.com/grafana/loki/pkg/storage/chunk/cache"
+ "github.com/grafana/loki/pkg/util/math"
+ "github.com/grafana/loki/pkg/util/spanlogger"
+ "github.com/grafana/loki/pkg/util/validation"
+)
+
+// ConstSplitter is a utility for using a constant split interval when determining cache keys
+type ConstSplitter time.Duration
+
+// GenerateCacheKey generates a cache key based on the userID, Request and interval.
+func (t ConstSplitter) GenerateCacheKey(_ context.Context, userID string, r Request) string {
+ currentInterval := r.GetStart().UnixMilli() / int64(time.Duration(t)/time.Millisecond)
+ return fmt.Sprintf("%s:%s:%d:%d", userID, r.GetQuery(), r.GetStep(), currentInterval)
+}
+
+// ShouldCacheReqFn checks whether the current request should go to cache or not.
+// If not, just send the request to next handler.
+type ShouldCacheReqFn func(ctx context.Context, r Request) bool
+
+// ShouldCacheResFn checks whether the current response should go to cache or not.
+type ShouldCacheResFn func(ctx context.Context, r Request, res Response, maxCacheTime int64) bool
+
+// ParallelismForReqFn returns the parallelism for a given request.
+type ParallelismForReqFn func(ctx context.Context, tenantIDs []string, r Request) int
+
+type ResultsCache struct {
+ logger log.Logger
+ next Handler
+ cache cache.Cache
+ limits Limits
+ splitter KeyGenerator
+ cacheGenNumberLoader CacheGenNumberLoader
+ retentionEnabled bool
+ extractor Extractor
+ minCacheExtent int64 // discard any cache extent smaller than this
+ merger ResponseMerger
+ shouldCacheReq ShouldCacheReqFn
+ shouldCacheRes ShouldCacheResFn
+ parallelismForReq func(ctx context.Context, tenantIDs []string, r Request) int
+}
+
+// NewResultsCache creates results cache from config.
+// The middleware cache result using a unique cache key for a given request (step,query,user) and interval.
+// The cache assumes that each request length (end-start) is below or equal the interval.
+// Each request starting from within the same interval will hit the same cache entry.
+// If the cache doesn't have the entire duration of the request cached, it will query the uncached parts and append them to the cache entries.
+// see `generateKey`.
+func NewResultsCache(
+ logger log.Logger,
+ c cache.Cache,
+ next Handler,
+ keyGen KeyGenerator,
+ limits Limits,
+ merger ResponseMerger,
+ extractor Extractor,
+ shouldCacheReq ShouldCacheReqFn,
+ shouldCacheRes ShouldCacheResFn,
+ parallelismForReq func(ctx context.Context, tenantIDs []string, r Request) int,
+ cacheGenNumberLoader CacheGenNumberLoader,
+ retentionEnabled bool,
+) *ResultsCache {
+ return &ResultsCache{
+ logger: logger,
+ next: next,
+ cache: c,
+ limits: limits,
+ splitter: keyGen,
+ cacheGenNumberLoader: cacheGenNumberLoader,
+ retentionEnabled: retentionEnabled,
+ extractor: extractor,
+ minCacheExtent: (5 * time.Minute).Milliseconds(),
+ merger: merger,
+ shouldCacheReq: shouldCacheReq,
+ shouldCacheRes: shouldCacheRes,
+ parallelismForReq: parallelismForReq,
+ }
+}
+
+func (s ResultsCache) Do(ctx context.Context, r Request) (Response, error) {
+ sp, ctx := opentracing.StartSpanFromContext(ctx, "resultsCache.Do")
+ defer sp.Finish()
+ tenantIDs, err := tenant.TenantIDs(ctx)
+ if err != nil {
+ return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error())
+ }
+
+ if s.shouldCacheReq != nil && !s.shouldCacheReq(ctx, r) {
+ return s.next.Do(ctx, r)
+ }
+
+ if s.cacheGenNumberLoader != nil && s.retentionEnabled {
+ ctx = cache.InjectCacheGenNumber(ctx, s.cacheGenNumberLoader.GetResultsCacheGenNumber(tenantIDs))
+ }
+
+ var (
+ key = s.splitter.GenerateCacheKey(ctx, tenant.JoinTenantIDs(tenantIDs), r)
+ extents []Extent
+ response Response
+ )
+
+ sp.LogKV(
+ "query", r.GetQuery(),
+ "step", time.UnixMilli(r.GetStep()),
+ "start", r.GetStart(),
+ "end", r.GetEnd(),
+ "key", key,
+ )
+
+ cacheFreshnessCapture := func(id string) time.Duration { return s.limits.MaxCacheFreshness(ctx, id) }
+ maxCacheFreshness := validation.MaxDurationPerTenant(tenantIDs, cacheFreshnessCapture)
+ maxCacheTime := int64(model.Now().Add(-maxCacheFreshness))
+ if r.GetStart().UnixMilli() > maxCacheTime {
+ return s.next.Do(ctx, r)
+ }
+
+ cached, ok := s.get(ctx, key)
+ if ok {
+ response, extents, err = s.handleHit(ctx, r, cached, maxCacheTime)
+ } else {
+ response, extents, err = s.handleMiss(ctx, r, maxCacheTime)
+ }
+
+ if err == nil && len(extents) > 0 {
+ extents, err := s.filterRecentExtents(r, maxCacheFreshness, extents)
+ if err != nil {
+ return nil, err
+ }
+ s.put(ctx, key, extents)
+ }
+
+ return response, err
+}
+
+func (s ResultsCache) handleMiss(ctx context.Context, r Request, maxCacheTime int64) (Response, []Extent, error) {
+ response, err := s.next.Do(ctx, r)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if s.shouldCacheRes != nil && !s.shouldCacheRes(ctx, r, response, maxCacheTime) {
+ return response, []Extent{}, nil
+ }
+
+ extent, err := toExtent(ctx, r, response)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ extents := []Extent{
+ extent,
+ }
+ return response, extents, nil
+}
+
+func (s ResultsCache) handleHit(ctx context.Context, r Request, extents []Extent, maxCacheTime int64) (Response, []Extent, error) {
+ var (
+ reqResps []RequestResponse
+ err error
+ )
+ sp, ctx := opentracing.StartSpanFromContext(ctx, "handleHit")
+ defer sp.Finish()
+ log := spanlogger.FromContext(ctx)
+ defer log.Finish()
+
+ requests, responses, err := s.partition(r, extents)
+ if err != nil {
+ return nil, nil, err
+ }
+ if len(requests) == 0 {
+ response, err := s.merger.MergeResponse(responses...)
+ // No downstream requests so no need to write back to the cache.
+ return response, nil, err
+ }
+
+ tenantIDs, err := tenant.TenantIDs(ctx)
+ if err != nil {
+ return nil, nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error())
+ }
+ reqResps, err = DoRequests(ctx, s.next, requests, s.parallelismForReq(ctx, tenantIDs, r))
+
+ if err != nil {
+ return nil, nil, err
+ }
+
+ for _, reqResp := range reqResps {
+ responses = append(responses, reqResp.Response)
+ if s.shouldCacheRes != nil && !s.shouldCacheRes(ctx, r, reqResp.Response, maxCacheTime) {
+ continue
+ }
+ extent, err := toExtent(ctx, reqResp.Request, reqResp.Response)
+ if err != nil {
+ return nil, nil, err
+ }
+ extents = append(extents, extent)
+ }
+ sort.Slice(extents, func(i, j int) bool {
+ if extents[i].Start == extents[j].Start {
+ // as an optimization, for two extents starts at the same time, we
+ // put bigger extent at the front of the slice, which helps
+ // to reduce the amount of merge we have to do later.
+ return extents[i].End > extents[j].End
+ }
+
+ return extents[i].Start < extents[j].Start
+ })
+
+ // Merge any extents - potentially overlapping
+ accumulator, err := newAccumulator(extents[0])
+ if err != nil {
+ return nil, nil, err
+ }
+ mergedExtents := make([]Extent, 0, len(extents))
+
+ for i := 1; i < len(extents); i++ {
+ if accumulator.End+r.GetStep() < extents[i].Start {
+ mergedExtents, err = merge(mergedExtents, accumulator)
+ if err != nil {
+ return nil, nil, err
+ }
+ accumulator, err = newAccumulator(extents[i])
+ if err != nil {
+ return nil, nil, err
+ }
+ continue
+ }
+
+ if accumulator.End >= extents[i].End {
+ continue
+ }
+
+ accumulator.TraceId = jaegerTraceID(ctx)
+ accumulator.End = extents[i].End
+ currentRes, err := extents[i].toResponse()
+ if err != nil {
+ return nil, nil, err
+ }
+ merged, err := s.merger.MergeResponse(accumulator.Response, currentRes)
+ if err != nil {
+ return nil, nil, err
+ }
+ accumulator.Response = merged
+ }
+
+ mergedExtents, err = merge(mergedExtents, accumulator)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ response, err := s.merger.MergeResponse(responses...)
+ return response, mergedExtents, err
+}
+
+type accumulator struct {
+ Response
+ Extent
+}
+
+func merge(extents []Extent, acc *accumulator) ([]Extent, error) {
+ anyResp, err := types.MarshalAny(acc.Response)
+ if err != nil {
+ return nil, err
+ }
+ return append(extents, Extent{
+ Start: acc.Extent.Start,
+ End: acc.Extent.End,
+ Response: anyResp,
+ TraceId: acc.Extent.TraceId,
+ }), nil
+}
+
+func newAccumulator(base Extent) (*accumulator, error) {
+ res, err := base.toResponse()
+ if err != nil {
+ return nil, err
+ }
+ return &accumulator{
+ Response: res,
+ Extent: base,
+ }, nil
+}
+
+func toExtent(ctx context.Context, req Request, res Response) (Extent, error) {
+ anyResp, err := types.MarshalAny(res)
+ if err != nil {
+ return Extent{}, err
+ }
+ return Extent{
+ Start: req.GetStart().UnixMilli(),
+ End: req.GetEnd().UnixMilli(),
+ Response: anyResp,
+ TraceId: jaegerTraceID(ctx),
+ }, nil
+}
+
+// partition calculates the required requests to satisfy req given the cached data.
+// extents must be in order by start time.
+func (s ResultsCache) partition(req Request, extents []Extent) ([]Request, []Response, error) {
+ var requests []Request
+ var cachedResponses []Response
+ start := req.GetStart().UnixMilli()
+ end := req.GetEnd().UnixMilli()
+
+ for _, extent := range extents {
+ // If there is no overlap, ignore this extent.
+ if extent.GetEnd() < start || extent.GetStart() > end {
+ continue
+ }
+
+ // If this extent is tiny and request is not tiny, discard it: more efficient to do a few larger queries.
+ // Hopefully tiny request can make tiny extent into not-so-tiny extent.
+
+ // However if the step is large enough, the split_query_by_interval middleware would generate a query with same start and end.
+ // For example, if the step size is more than 12h and the interval is 24h.
+ // This means the extent's start and end time would be same, even if the timerange covers several hours.
+ if (req.GetStart() != req.GetEnd()) && ((end - start) > s.minCacheExtent) && (extent.End-extent.Start < s.minCacheExtent) {
+ continue
+ }
+
+ // If there is a bit missing at the front, make a request for that.
+ if start < extent.Start {
+ r := req.WithStartEndForCache(time.UnixMilli(start), time.UnixMilli(extent.Start))
+ requests = append(requests, r)
+ }
+ res, err := extent.toResponse()
+ if err != nil {
+ return nil, nil, err
+ }
+ // extract the overlap from the cached extent.
+ cachedResponses = append(cachedResponses, s.extractor.Extract(start, end, res, extent.GetStart(), extent.GetEnd()))
+ start = extent.End
+ }
+
+ // Lastly, make a request for any data missing at the end.
+ if start < req.GetEnd().UnixMilli() {
+ r := req.WithStartEndForCache(time.UnixMilli(start), time.UnixMilli(end))
+ requests = append(requests, r)
+ }
+
+ // If start and end are the same (valid in promql), start == req.GetEnd() and we won't do the query.
+ // But we should only do the request if we don't have a valid cached response for it.
+ if req.GetStart() == req.GetEnd() && len(cachedResponses) == 0 {
+ requests = append(requests, req)
+ }
+
+ return requests, cachedResponses, nil
+}
+
+func (s ResultsCache) filterRecentExtents(req Request, maxCacheFreshness time.Duration, extents []Extent) ([]Extent, error) {
+ step := math.Max64(1, req.GetStep())
+ maxCacheTime := (int64(model.Now().Add(-maxCacheFreshness)) / step) * step
+ for i := range extents {
+ // Never cache data for the latest freshness period.
+ if extents[i].End > maxCacheTime {
+ extents[i].End = maxCacheTime
+ res, err := extents[i].toResponse()
+ if err != nil {
+ return nil, err
+ }
+ extracted := s.extractor.Extract(extents[i].GetStart(), maxCacheTime, res, extents[i].GetStart(), extents[i].GetEnd())
+ anyResp, err := types.MarshalAny(extracted)
+ if err != nil {
+ return nil, err
+ }
+ extents[i].Response = anyResp
+ }
+ }
+ return extents, nil
+}
+
+func (s ResultsCache) get(ctx context.Context, key string) ([]Extent, bool) {
+ found, bufs, _, _ := s.cache.Fetch(ctx, []string{cache.HashKey(key)})
+ if len(found) != 1 {
+ return nil, false
+ }
+
+ var resp CachedResponse
+ sp, ctx := opentracing.StartSpanFromContext(ctx, "unmarshal-extent") //nolint:ineffassign,staticcheck
+ defer sp.Finish()
+ log := spanlogger.FromContext(ctx)
+ defer log.Finish()
+
+ log.LogFields(otlog.Int("bytes", len(bufs[0])))
+
+ if err := proto.Unmarshal(bufs[0], &resp); err != nil {
+ level.Error(log).Log("msg", "error unmarshalling cached value", "err", err)
+ log.Error(err)
+ return nil, false
+ }
+
+ if resp.Key != key {
+ return nil, false
+ }
+
+ // Refreshes the cache if it contains an old proto schema.
+ for _, e := range resp.Extents {
+ if e.Response == nil {
+ return nil, false
+ }
+ }
+
+ return resp.Extents, true
+}
+
+func (s ResultsCache) put(ctx context.Context, key string, extents []Extent) {
+ buf, err := proto.Marshal(&CachedResponse{
+ Key: key,
+ Extents: extents,
+ })
+ if err != nil {
+ level.Error(s.logger).Log("msg", "error marshalling cached value", "err", err)
+ return
+ }
+
+ _ = s.cache.Store(ctx, []string{cache.HashKey(key)}, [][]byte{buf})
+}
+
+func jaegerTraceID(ctx context.Context) string {
+ span := opentracing.SpanFromContext(ctx)
+ if span == nil {
+ return ""
+ }
+
+ spanContext, ok := span.Context().(jaeger.SpanContext)
+ if !ok {
+ return ""
+ }
+
+ return spanContext.TraceID().String()
+}
+
+func (e *Extent) toResponse() (Response, error) {
+ msg, err := types.EmptyAny(e.Response)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := types.UnmarshalAny(e.Response, msg); err != nil {
+ return nil, err
+ }
+
+ resp, ok := msg.(Response)
+ if !ok {
+ return nil, fmt.Errorf("bad cached type")
+ }
+ return resp, nil
+}
diff --git a/pkg/storage/chunk/cache/resultscache/cache_test.go b/pkg/storage/chunk/cache/resultscache/cache_test.go
new file mode 100644
index 0000000000000..db6e9d6c8a4a1
--- /dev/null
+++ b/pkg/storage/chunk/cache/resultscache/cache_test.go
@@ -0,0 +1,605 @@
+package resultscache
+
+import (
+ "context"
+ "strconv"
+ "testing"
+ "time"
+
+ "github.com/go-kit/log"
+ "github.com/gogo/protobuf/types"
+ "github.com/grafana/dskit/flagext"
+ "github.com/grafana/dskit/user"
+ "github.com/prometheus/common/model"
+ "github.com/stretchr/testify/require"
+ "golang.org/x/exp/slices"
+
+ "github.com/grafana/loki/pkg/logqlmodel/stats"
+ "github.com/grafana/loki/pkg/storage/chunk/cache"
+ "github.com/grafana/loki/pkg/util/constants"
+)
+
+const day = 24 * time.Hour
+
+var (
+ parsedRequest = &MockRequest{
+ Start: time.UnixMilli(1536673680 * 1e3),
+ End: time.UnixMilli(1536716898 * 1e3),
+ Step: 120 * 1e3,
+ Query: "sum(container_memory_rss) by (namespace)",
+ }
+
+ parsedResponse = &MockResponse{
+ Labels: []*MockLabelsPair{
+ {Name: "foo", Value: "bar"},
+ },
+ Samples: []*MockSample{
+ {Value: 137, TimestampMs: 1536673680000},
+ {Value: 137, TimestampMs: 1536673780000},
+ },
+ }
+)
+
+func TestPartition(t *testing.T) {
+ for _, tc := range []struct {
+ name string
+ input Request
+ prevCachedResponse []Extent
+ expectedRequests []Request
+ expectedCachedResponse []Response
+ }{
+ {
+ name: "Test a complete hit.",
+ input: &MockRequest{
+ Start: time.UnixMilli(0),
+ End: time.UnixMilli(100),
+ },
+ prevCachedResponse: []Extent{
+ mkExtent(0, 100),
+ },
+ expectedCachedResponse: []Response{
+ mkAPIResponse(0, 100, 10),
+ },
+ },
+
+ {
+ name: "Test with a complete miss.",
+ input: &MockRequest{
+ Start: time.UnixMilli(0),
+ End: time.UnixMilli(100),
+ },
+ prevCachedResponse: []Extent{
+ mkExtent(110, 210),
+ },
+ expectedRequests: []Request{
+ &MockRequest{
+ Start: time.UnixMilli(0),
+ End: time.UnixMilli(100),
+ },
+ },
+ },
+ {
+ name: "Test a partial hit.",
+ input: &MockRequest{
+ Start: time.UnixMilli(0),
+ End: time.UnixMilli(100),
+ },
+ prevCachedResponse: []Extent{
+ mkExtent(50, 100),
+ },
+ expectedRequests: []Request{
+ &MockRequest{
+ Start: time.UnixMilli(0),
+ End: time.UnixMilli(50),
+ },
+ },
+ expectedCachedResponse: []Response{
+ mkAPIResponse(50, 100, 10),
+ },
+ },
+ {
+ name: "Test multiple partial hits.",
+ input: &MockRequest{
+ Start: time.UnixMilli(100),
+ End: time.UnixMilli(200),
+ },
+ prevCachedResponse: []Extent{
+ mkExtent(50, 120),
+ mkExtent(160, 250),
+ },
+ expectedRequests: []Request{
+ &MockRequest{
+ Start: time.UnixMilli(120),
+ End: time.UnixMilli(160),
+ },
+ },
+ expectedCachedResponse: []Response{
+ mkAPIResponse(100, 120, 10),
+ mkAPIResponse(160, 200, 10),
+ },
+ },
+ {
+ name: "Partial hits with tiny gap.",
+ input: &MockRequest{
+ Start: time.UnixMilli(100),
+ End: time.UnixMilli(160),
+ },
+ prevCachedResponse: []Extent{
+ mkExtent(50, 120),
+ mkExtent(122, 130),
+ },
+ expectedRequests: []Request{
+ &MockRequest{
+ Start: time.UnixMilli(120),
+ End: time.UnixMilli(160),
+ },
+ },
+ expectedCachedResponse: []Response{
+ mkAPIResponse(100, 120, 10),
+ },
+ },
+ {
+ name: "Extent is outside the range and the request has a single step (same start and end).",
+ input: &MockRequest{
+ Start: time.UnixMilli(100),
+ End: time.UnixMilli(100),
+ },
+ prevCachedResponse: []Extent{
+ mkExtent(50, 90),
+ },
+ expectedRequests: []Request{
+ &MockRequest{
+ Start: time.UnixMilli(100),
+ End: time.UnixMilli(100),
+ },
+ },
+ },
+ {
+ name: "Test when hit has a large step and only a single sample extent.",
+ // If there is a only a single sample in the split interval, start and end will be the same.
+ input: &MockRequest{
+ Start: time.UnixMilli(100),
+ End: time.UnixMilli(100),
+ },
+ prevCachedResponse: []Extent{
+ mkExtent(100, 100),
+ },
+ expectedCachedResponse: []Response{
+ mkAPIResponse(100, 105, 10),
+ },
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ s := ResultsCache{
+ extractor: MockExtractor{},
+ minCacheExtent: 10,
+ }
+ reqs, resps, err := s.partition(tc.input, tc.prevCachedResponse)
+ require.Nil(t, err)
+ require.Equal(t, tc.expectedRequests, reqs)
+ require.Equal(t, tc.expectedCachedResponse, resps)
+ })
+ }
+}
+
+func TestHandleHit(t *testing.T) {
+ for _, tc := range []struct {
+ name string
+ input Request
+ cachedEntry []Extent
+ expectedUpdatedCachedEntry []Extent
+ }{
+ {
+ name: "Should drop tiny extent that overlaps with non-tiny request only",
+ input: &MockRequest{
+ Start: time.UnixMilli(100),
+ End: time.UnixMilli(120),
+ Step: 5,
+ },
+ cachedEntry: []Extent{
+ mkExtentWithStep(0, 50, 5),
+ mkExtentWithStep(60, 65, 5),
+ mkExtentWithStep(100, 105, 5),
+ mkExtentWithStep(110, 150, 5),
+ mkExtentWithStep(160, 165, 5),
+ },
+ expectedUpdatedCachedEntry: []Extent{
+ mkExtentWithStep(0, 50, 5),
+ mkExtentWithStep(60, 65, 5),
+ mkExtentWithStep(100, 150, 5),
+ mkExtentWithStep(160, 165, 5),
+ },
+ },
+ {
+ name: "Should replace tiny extents that are cover by bigger request",
+ input: &MockRequest{
+ Start: time.UnixMilli(100),
+ End: time.UnixMilli(200),
+ Step: 5,
+ },
+ cachedEntry: []Extent{
+ mkExtentWithStep(0, 50, 5),
+ mkExtentWithStep(60, 65, 5),
+ mkExtentWithStep(100, 105, 5),
+ mkExtentWithStep(110, 115, 5),
+ mkExtentWithStep(120, 125, 5),
+ mkExtentWithStep(220, 225, 5),
+ mkExtentWithStep(240, 250, 5),
+ },
+ expectedUpdatedCachedEntry: []Extent{
+ mkExtentWithStep(0, 50, 5),
+ mkExtentWithStep(60, 65, 5),
+ mkExtentWithStep(100, 200, 5),
+ mkExtentWithStep(220, 225, 5),
+ mkExtentWithStep(240, 250, 5),
+ },
+ },
+ {
+ name: "Should not drop tiny extent that completely overlaps with tiny request",
+ input: &MockRequest{
+ Start: time.UnixMilli(100),
+ End: time.UnixMilli(105),
+ Step: 5,
+ },
+ cachedEntry: []Extent{
+ mkExtentWithStep(0, 50, 5),
+ mkExtentWithStep(60, 65, 5),
+ mkExtentWithStep(100, 105, 5),
+ mkExtentWithStep(160, 165, 5),
+ },
+ expectedUpdatedCachedEntry: nil, // no cache update need, request fulfilled using cache
+ },
+ {
+ name: "Should not drop tiny extent that partially center-overlaps with tiny request",
+ input: &MockRequest{
+ Start: time.UnixMilli(106),
+ End: time.UnixMilli(108),
+ Step: 2,
+ },
+ cachedEntry: []Extent{
+ mkExtentWithStep(60, 64, 2),
+ mkExtentWithStep(104, 110, 2),
+ mkExtentWithStep(160, 166, 2),
+ },
+ expectedUpdatedCachedEntry: nil, // no cache update need, request fulfilled using cache
+ },
+ {
+ name: "Should not drop tiny extent that partially left-overlaps with tiny request",
+ input: &MockRequest{
+ Start: time.UnixMilli(100),
+ End: time.UnixMilli(106),
+ Step: 2,
+ },
+ cachedEntry: []Extent{
+ mkExtentWithStep(60, 64, 2),
+ mkExtentWithStep(104, 110, 2),
+ mkExtentWithStep(160, 166, 2),
+ },
+ expectedUpdatedCachedEntry: []Extent{
+ mkExtentWithStep(60, 64, 2),
+ mkExtentWithStep(100, 110, 2),
+ mkExtentWithStep(160, 166, 2),
+ },
+ },
+ {
+ name: "Should not drop tiny extent that partially right-overlaps with tiny request",
+ input: &MockRequest{
+ Start: time.UnixMilli(100),
+ End: time.UnixMilli(106),
+ Step: 2,
+ },
+ cachedEntry: []Extent{
+ mkExtentWithStep(60, 64, 2),
+ mkExtentWithStep(98, 102, 2),
+ mkExtentWithStep(160, 166, 2),
+ },
+ expectedUpdatedCachedEntry: []Extent{
+ mkExtentWithStep(60, 64, 2),
+ mkExtentWithStep(98, 106, 2),
+ mkExtentWithStep(160, 166, 2),
+ },
+ },
+ {
+ name: "Should merge fragmented extents if request fills the hole",
+ input: &MockRequest{
+ Start: time.UnixMilli(40),
+ End: time.UnixMilli(80),
+ Step: 20,
+ },
+ cachedEntry: []Extent{
+ mkExtentWithStep(0, 20, 20),
+ mkExtentWithStep(80, 100, 20),
+ },
+ expectedUpdatedCachedEntry: []Extent{
+ mkExtentWithStep(0, 100, 20),
+ },
+ },
+ {
+ name: "Should left-extend extent if request starts earlier than extent in cache",
+ input: &MockRequest{
+ Start: time.UnixMilli(40),
+ End: time.UnixMilli(80),
+ Step: 20,
+ },
+ cachedEntry: []Extent{
+ mkExtentWithStep(60, 160, 20),
+ },
+ expectedUpdatedCachedEntry: []Extent{
+ mkExtentWithStep(40, 160, 20),
+ },
+ },
+ {
+ name: "Should right-extend extent if request ends later than extent in cache",
+ input: &MockRequest{
+ Start: time.UnixMilli(100),
+ End: time.UnixMilli(180),
+ Step: 20,
+ },
+ cachedEntry: []Extent{
+ mkExtentWithStep(60, 160, 20),
+ },
+ expectedUpdatedCachedEntry: []Extent{
+ mkExtentWithStep(60, 180, 20),
+ },
+ },
+ {
+ name: "Should not throw error if complete-overlapped smaller Extent is erroneous",
+ input: &MockRequest{
+ // This request is carefully crated such that cachedEntry is not used to fulfill
+ // the request.
+ Start: time.UnixMilli(160),
+ End: time.UnixMilli(180),
+ Step: 20,
+ },
+ cachedEntry: []Extent{
+ {
+ Start: 60,
+ End: 80,
+
+ // if the optimization of "sorting by End when Start of 2 Extents are equal" is not there, this nil
+ // response would cause error during Extents merge phase. With the optimization
+ // this bad Extent should be dropped. The good Extent below can be used instead.
+ Response: nil,
+ },
+ mkExtentWithStep(60, 160, 20),
+ },
+ expectedUpdatedCachedEntry: []Extent{
+ mkExtentWithStep(60, 180, 20),
+ },
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ sut := ResultsCache{
+ extractor: MockExtractor{},
+ minCacheExtent: 10,
+ limits: mockLimits{},
+ merger: MockMerger{},
+ parallelismForReq: func(_ context.Context, tenantIDs []string, r Request) int { return 1 },
+ next: HandlerFunc(func(_ context.Context, req Request) (Response, error) {
+ return mkAPIResponse(req.GetStart().UnixMilli(), req.GetEnd().UnixMilli(), req.GetStep()), nil
+ }),
+ }
+
+ ctx := user.InjectOrgID(context.Background(), "1")
+ response, updatedExtents, err := sut.handleHit(ctx, tc.input, tc.cachedEntry, 0)
+ require.NoError(t, err)
+
+ expectedResponse := mkAPIResponse(tc.input.GetStart().UnixMilli(), tc.input.GetEnd().UnixMilli(), tc.input.GetStep())
+ require.Equal(t, expectedResponse, response, "response does not match the expectation")
+ require.Equal(t, tc.expectedUpdatedCachedEntry, updatedExtents, "updated cache entry does not match the expectation")
+ })
+ }
+}
+
+func TestResultsCacheMaxFreshness(t *testing.T) {
+ modelNow := model.Now()
+ for i, tc := range []struct {
+ fakeLimits Limits
+ Handler HandlerFunc
+ expectedResponse *MockResponse
+ }{
+ {
+ fakeLimits: mockLimits{maxCacheFreshness: 5 * time.Second},
+ Handler: nil,
+ expectedResponse: mkAPIResponse(int64(modelNow)-(50*1e3), int64(modelNow)-(10*1e3), 10),
+ },
+ {
+ // should not lookup cache because per-tenant override will be applied
+ fakeLimits: mockLimits{maxCacheFreshness: 10 * time.Minute},
+ Handler: HandlerFunc(func(_ context.Context, _ Request) (Response, error) {
+ return parsedResponse, nil
+ }),
+ expectedResponse: parsedResponse,
+ },
+ } {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+ var cfg Config
+ flagext.DefaultValues(&cfg)
+ cfg.CacheConfig.Cache = cache.NewMockCache()
+ c, err := cache.New(cfg.CacheConfig, nil, log.NewNopLogger(), stats.ResultCache, constants.Loki)
+ require.NoError(t, err)
+ fakeLimits := tc.fakeLimits
+ rc := NewResultsCache(
+ log.NewNopLogger(),
+ c,
+ tc.Handler,
+ ConstSplitter(day),
+ fakeLimits,
+ MockMerger{},
+ MockExtractor{},
+ nil,
+ nil,
+ func(_ context.Context, tenantIDs []string, r Request) int {
+ return 10
+ },
+ nil,
+ false,
+ )
+ require.NoError(t, err)
+
+ // create cache with handler
+ ctx := user.InjectOrgID(context.Background(), "1")
+
+ // create request with start end within the key extents
+ req := parsedRequest.WithStartEndForCache(time.UnixMilli(int64(modelNow)-(50*1e3)), time.UnixMilli(int64(modelNow)-(10*1e3)))
+
+ // fill cache
+ key := ConstSplitter(day).GenerateCacheKey(context.Background(), "1", req)
+ rc.put(ctx, key, []Extent{mkExtent(int64(modelNow)-(600*1e3), int64(modelNow))})
+
+ resp, err := rc.Do(ctx, req)
+ require.NoError(t, err)
+ require.Equal(t, tc.expectedResponse, resp)
+ })
+ }
+}
+
+func Test_resultsCache_MissingData(t *testing.T) {
+ cfg := Config{
+ CacheConfig: cache.Config{
+ Cache: cache.NewMockCache(),
+ },
+ }
+ c, err := cache.New(cfg.CacheConfig, nil, log.NewNopLogger(), stats.ResultCache, constants.Loki)
+ require.NoError(t, err)
+ rc := NewResultsCache(
+ log.NewNopLogger(),
+ c,
+ nil,
+ ConstSplitter(day),
+ mockLimits{},
+ MockMerger{},
+ MockExtractor{},
+ nil,
+ nil,
+ func(_ context.Context, tenantIDs []string, r Request) int {
+ return 10
+ },
+ nil,
+ false,
+ )
+ require.NoError(t, err)
+ ctx := context.Background()
+
+ // fill up the cache
+ rc.put(ctx, "empty", []Extent{{
+ Start: 100,
+ End: 200,
+ Response: nil,
+ }})
+ rc.put(ctx, "notempty", []Extent{mkExtent(100, 120)})
+ rc.put(ctx, "mixed", []Extent{mkExtent(100, 120), {
+ Start: 120,
+ End: 200,
+ Response: nil,
+ }})
+
+ extents, hit := rc.get(ctx, "empty")
+ require.Empty(t, extents)
+ require.False(t, hit)
+
+ extents, hit = rc.get(ctx, "notempty")
+ require.Equal(t, len(extents), 1)
+ require.True(t, hit)
+
+ extents, hit = rc.get(ctx, "mixed")
+ require.Equal(t, len(extents), 0)
+ require.False(t, hit)
+}
+
+func mkAPIResponse(start, end, step int64) *MockResponse {
+ var samples []*MockSample
+ for i := start; i <= end; i += step {
+ samples = append(samples, &MockSample{
+ TimestampMs: i,
+ Value: float64(i),
+ })
+ }
+
+ return &MockResponse{
+ Labels: []*MockLabelsPair{
+ {Name: "foo", Value: "bar"},
+ },
+ Samples: samples,
+ }
+}
+
+func mkExtent(start, end int64) Extent {
+ return mkExtentWithStep(start, end, 10)
+}
+
+func mkExtentWithStep(start, end, step int64) Extent {
+ res := mkAPIResponse(start, end, step)
+ anyRes, err := types.MarshalAny(res)
+ if err != nil {
+ panic(err)
+ }
+ return Extent{
+ Start: start,
+ End: end,
+ Response: anyRes,
+ }
+}
+
+func (r *MockRequest) WithStartEndForCache(start time.Time, end time.Time) Request {
+ m := *r
+ m.Start = start
+ m.End = end
+ return &m
+}
+
+type MockMerger struct{}
+
+func (m MockMerger) MergeResponse(responses ...Response) (Response, error) {
+ samples := make([]*MockSample, 0, len(responses)*2)
+ for _, response := range responses {
+ samples = append(samples, response.(*MockResponse).Samples...)
+ }
+
+ // Merge samples by:
+ // 1. Sorting them by time.
+ // 2. Removing duplicates.
+ slices.SortFunc(samples, func(a, b *MockSample) int {
+ if a.TimestampMs == b.TimestampMs {
+ return 0
+ }
+ if a.TimestampMs < b.TimestampMs {
+ return -1
+ }
+ return 1
+ })
+ samples = slices.CompactFunc(samples, func(a, b *MockSample) bool {
+ return a.TimestampMs == b.TimestampMs
+ })
+
+ return &MockResponse{
+ Labels: responses[0].(*MockResponse).Labels,
+ Samples: samples,
+ }, nil
+}
+
+type MockExtractor struct{}
+
+func (m MockExtractor) Extract(start, end int64, res Response, _, _ int64) Response {
+ mockRes := res.(*MockResponse)
+
+ result := MockResponse{
+ Labels: mockRes.Labels,
+ Samples: make([]*MockSample, 0, len(mockRes.Samples)),
+ }
+
+ for _, sample := range mockRes.Samples {
+ if start <= sample.TimestampMs && sample.TimestampMs <= end {
+ result.Samples = append(result.Samples, sample)
+ }
+ }
+ return &result
+}
+
+type mockLimits struct {
+ maxCacheFreshness time.Duration
+}
+
+func (m mockLimits) MaxCacheFreshness(context.Context, string) time.Duration {
+ return m.maxCacheFreshness
+}
diff --git a/pkg/storage/chunk/cache/resultscache/config.go b/pkg/storage/chunk/cache/resultscache/config.go
new file mode 100644
index 0000000000000..5a329168e8372
--- /dev/null
+++ b/pkg/storage/chunk/cache/resultscache/config.go
@@ -0,0 +1,45 @@
+package resultscache
+
+import (
+ "context"
+ "flag"
+ "time"
+
+ "github.com/pkg/errors"
+
+ "github.com/grafana/loki/pkg/storage/chunk/cache"
+)
+
+// Config is the config for the results cache.
+type Config struct {
+ CacheConfig cache.Config `yaml:"cache"`
+ Compression string `yaml:"compression"`
+}
+
+func (cfg *Config) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string) {
+ cfg.CacheConfig.RegisterFlagsWithPrefix(prefix, "", f)
+ f.StringVar(&cfg.Compression, prefix+"compression", "", "Use compression in cache. The default is an empty value '', which disables compression. Supported values are: 'snappy' and ''.")
+}
+
+func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
+ cfg.RegisterFlagsWithPrefix(f, "")
+}
+
+func (cfg *Config) Validate() error {
+ switch cfg.Compression {
+ case "snappy", "":
+ // valid
+ default:
+ return errors.Errorf("unsupported compression type: %s", cfg.Compression)
+ }
+
+ if !cache.IsCacheConfigured(cfg.CacheConfig) {
+ return errors.New("no cache configured")
+ }
+
+ return nil
+}
+
+type Limits interface {
+ MaxCacheFreshness(ctx context.Context, tenantID string) time.Duration
+}
diff --git a/pkg/storage/chunk/cache/resultscache/interface.go b/pkg/storage/chunk/cache/resultscache/interface.go
new file mode 100644
index 0000000000000..7d359c9628583
--- /dev/null
+++ b/pkg/storage/chunk/cache/resultscache/interface.go
@@ -0,0 +1,56 @@
+package resultscache
+
+import (
+ "context"
+ "time"
+
+ "github.com/gogo/protobuf/proto"
+)
+
+type Request interface {
+ proto.Message
+ // GetStart returns the start timestamp of the request in milliseconds.
+ GetStart() time.Time
+ // GetEnd returns the end timestamp of the request in milliseconds.
+ GetEnd() time.Time
+ // GetStep returns the step of the request in milliseconds.
+ GetStep() int64
+ // GetQuery returns the query of the request.
+ GetQuery() string
+ // GetCachingOptions returns the caching options.
+ GetCachingOptions() CachingOptions
+ // WithStartEndForCache clone the current request with different start and end timestamp.
+ WithStartEndForCache(start time.Time, end time.Time) Request
+}
+
+type Response interface {
+ proto.Message
+}
+
+// ResponseMerger is used by middlewares making multiple requests to merge back all responses into a single one.
+type ResponseMerger interface {
+ // MergeResponse merges responses from multiple requests into a single Response
+ MergeResponse(...Response) (Response, error)
+}
+
+type Handler interface {
+ Do(ctx context.Context, req Request) (Response, error)
+}
+
+// Extractor is used by the cache to extract a subset of a response from a cache entry.
+type Extractor interface {
+ // Extract extracts a subset of a response from the `start` and `end` timestamps in milliseconds
+ // in the `res` response which spans from `resStart` to `resEnd`.
+ Extract(start, end int64, res Response, resStart, resEnd int64) Response
+}
+
+// KeyGenerator generates cache keys. This is a useful interface for downstream
+// consumers who wish to implement their own strategies.
+type KeyGenerator interface {
+ GenerateCacheKey(ctx context.Context, userID string, r Request) string
+}
+
+type CacheGenNumberLoader interface {
+ GetResultsCacheGenNumber(tenantIDs []string) string
+ Stop()
+}
diff --git a/pkg/storage/chunk/cache/resultscache/test_types.pb.go b/pkg/storage/chunk/cache/resultscache/test_types.pb.go
new file mode 100644
index 0000000000000..7d3a54864e3df
--- /dev/null
+++ b/pkg/storage/chunk/cache/resultscache/test_types.pb.go
@@ -0,0 +1,1520 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: pkg/storage/chunk/cache/resultscache/test_types.proto
+
+package resultscache
+
+import (
+ encoding_binary "encoding/binary"
+ fmt "fmt"
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/gogo/protobuf/proto"
+ _ "github.com/gogo/protobuf/types"
+ github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+ reflect "reflect"
+ strings "strings"
+ time "time"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+var _ = time.Kitchen
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+type MockRequest struct {
+ Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
+ Start time.Time `protobuf:"bytes,2,opt,name=start,proto3,stdtime" json:"start"`
+ End time.Time `protobuf:"bytes,3,opt,name=end,proto3,stdtime" json:"end"`
+ Step int64 `protobuf:"varint,4,opt,name=step,proto3" json:"step,omitempty"`
+ Query string `protobuf:"bytes,6,opt,name=query,proto3" json:"query,omitempty"`
+ CachingOptions CachingOptions `protobuf:"bytes,7,opt,name=cachingOptions,proto3" json:"cachingOptions"`
+}
+
+func (m *MockRequest) Reset() { *m = MockRequest{} }
+func (*MockRequest) ProtoMessage() {}
+func (*MockRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5b2c489557407809, []int{0}
+}
+func (m *MockRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MockRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MockRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MockRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MockRequest.Merge(m, src)
+}
+func (m *MockRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *MockRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_MockRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MockRequest proto.InternalMessageInfo
+
+func (m *MockRequest) GetPath() string {
+ if m != nil {
+ return m.Path
+ }
+ return ""
+}
+
+func (m *MockRequest) GetStart() time.Time {
+ if m != nil {
+ return m.Start
+ }
+ return time.Time{}
+}
+
+func (m *MockRequest) GetEnd() time.Time {
+ if m != nil {
+ return m.End
+ }
+ return time.Time{}
+}
+
+func (m *MockRequest) GetStep() int64 {
+ if m != nil {
+ return m.Step
+ }
+ return 0
+}
+
+func (m *MockRequest) GetQuery() string {
+ if m != nil {
+ return m.Query
+ }
+ return ""
+}
+
+func (m *MockRequest) GetCachingOptions() CachingOptions {
+ if m != nil {
+ return m.CachingOptions
+ }
+ return CachingOptions{}
+}
+
+type MockResponse struct {
+ Labels []*MockLabelsPair `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels,omitempty"`
+ Samples []*MockSample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples,omitempty"`
+}
+
+func (m *MockResponse) Reset() { *m = MockResponse{} }
+func (*MockResponse) ProtoMessage() {}
+func (*MockResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5b2c489557407809, []int{1}
+}
+func (m *MockResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MockResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MockResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MockResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MockResponse.Merge(m, src)
+}
+func (m *MockResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *MockResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_MockResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MockResponse proto.InternalMessageInfo
+
+func (m *MockResponse) GetLabels() []*MockLabelsPair {
+ if m != nil {
+ return m.Labels
+ }
+ return nil
+}
+
+func (m *MockResponse) GetSamples() []*MockSample {
+ if m != nil {
+ return m.Samples
+ }
+ return nil
+}
+
+type MockLabelsPair struct {
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *MockLabelsPair) Reset() { *m = MockLabelsPair{} }
+func (*MockLabelsPair) ProtoMessage() {}
+func (*MockLabelsPair) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5b2c489557407809, []int{2}
+}
+func (m *MockLabelsPair) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MockLabelsPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MockLabelsPair.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MockLabelsPair) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MockLabelsPair.Merge(m, src)
+}
+func (m *MockLabelsPair) XXX_Size() int {
+ return m.Size()
+}
+func (m *MockLabelsPair) XXX_DiscardUnknown() {
+ xxx_messageInfo_MockLabelsPair.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MockLabelsPair proto.InternalMessageInfo
+
+func (m *MockLabelsPair) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *MockLabelsPair) GetValue() string {
+ if m != nil {
+ return m.Value
+ }
+ return ""
+}
+
+type MockSample struct {
+ Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
+ TimestampMs int64 `protobuf:"varint,2,opt,name=timestamp_ms,json=timestampMs,proto3" json:"timestamp_ms,omitempty"`
+}
+
+func (m *MockSample) Reset() { *m = MockSample{} }
+func (*MockSample) ProtoMessage() {}
+func (*MockSample) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5b2c489557407809, []int{3}
+}
+func (m *MockSample) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MockSample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MockSample.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MockSample) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MockSample.Merge(m, src)
+}
+func (m *MockSample) XXX_Size() int {
+ return m.Size()
+}
+func (m *MockSample) XXX_DiscardUnknown() {
+ xxx_messageInfo_MockSample.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MockSample proto.InternalMessageInfo
+
+func (m *MockSample) GetValue() float64 {
+ if m != nil {
+ return m.Value
+ }
+ return 0
+}
+
+func (m *MockSample) GetTimestampMs() int64 {
+ if m != nil {
+ return m.TimestampMs
+ }
+ return 0
+}
+
+func init() {
+ proto.RegisterType((*MockRequest)(nil), "resultscache.MockRequest")
+ proto.RegisterType((*MockResponse)(nil), "resultscache.MockResponse")
+ proto.RegisterType((*MockLabelsPair)(nil), "resultscache.MockLabelsPair")
+ proto.RegisterType((*MockSample)(nil), "resultscache.MockSample")
+}
+
+func init() {
+ proto.RegisterFile("pkg/storage/chunk/cache/resultscache/test_types.proto", fileDescriptor_5b2c489557407809)
+}
+
+var fileDescriptor_5b2c489557407809 = []byte{
+ // 462 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x52, 0x3f, 0x6f, 0x13, 0x31,
+ 0x14, 0x3f, 0xe7, 0xd2, 0x94, 0x3a, 0x51, 0x07, 0xab, 0xc3, 0x29, 0x42, 0x4e, 0xc8, 0x94, 0xe9,
+ 0x2c, 0x95, 0x3f, 0x43, 0xc5, 0x14, 0xc4, 0x82, 0xa8, 0x40, 0x86, 0x89, 0xa5, 0x72, 0x0e, 0xd7,
+ 0x39, 0xe5, 0xee, 0xec, 0xde, 0xf3, 0x21, 0xba, 0xb1, 0xb3, 0xf4, 0x63, 0xf0, 0x51, 0x3a, 0x66,
+ 0xec, 0x04, 0xe4, 0xb2, 0x30, 0xf6, 0x23, 0x20, 0xfb, 0x92, 0x36, 0xa5, 0x0b, 0xdd, 0xde, 0xf3,
+ 0xfb, 0xfd, 0xb1, 0x7e, 0xef, 0xe1, 0xe7, 0x66, 0xae, 0x18, 0x58, 0x5d, 0x0a, 0x25, 0x59, 0x32,
+ 0xab, 0x8a, 0x39, 0x4b, 0x44, 0x32, 0x93, 0xac, 0x94, 0x50, 0x65, 0x16, 0x9a, 0xc6, 0x4a, 0xb0,
+ 0x27, 0xf6, 0xdc, 0x48, 0x88, 0x4d, 0xa9, 0xad, 0x26, 0xbd, 0xed, 0x71, 0xff, 0x40, 0x69, 0xa5,
+ 0xfd, 0x80, 0xb9, 0xaa, 0xc1, 0xf4, 0x07, 0x4a, 0x6b, 0x95, 0x49, 0xe6, 0xbb, 0x69, 0x75, 0xca,
+ 0x6c, 0x9a, 0x4b, 0xb0, 0x22, 0x37, 0x6b, 0x40, 0x77, 0x4b, 0x71, 0xf4, 0xbd, 0x85, 0xbb, 0xc7,
+ 0x3a, 0x99, 0x73, 0x79, 0x56, 0x49, 0xb0, 0x84, 0xe0, 0xb6, 0x11, 0x76, 0x16, 0xa1, 0x21, 0x1a,
+ 0xef, 0x71, 0x5f, 0x93, 0x23, 0xbc, 0x03, 0x56, 0x94, 0x36, 0x6a, 0x0d, 0xd1, 0xb8, 0x7b, 0xd8,
+ 0x8f, 0x1b, 0x87, 0x78, 0xe3, 0x10, 0x7f, 0xdc, 0x38, 0x4c, 0x1e, 0x5d, 0xfe, 0x1c, 0x04, 0x17,
+ 0xbf, 0x06, 0x88, 0x37, 0x14, 0xf2, 0x02, 0x87, 0xb2, 0xf8, 0x1c, 0x85, 0x0f, 0x60, 0x3a, 0x82,
+ 0xfb, 0x07, 0x58, 0x69, 0xa2, 0xf6, 0x10, 0x8d, 0x43, 0xee, 0x6b, 0x72, 0x80, 0x77, 0xce, 0x2a,
+ 0x59, 0x9e, 0x47, 0x1d, 0xff, 0xb9, 0xa6, 0x21, 0x6f, 0xf0, 0xbe, 0x8b, 0x23, 0x2d, 0xd4, 0x3b,
+ 0x63, 0x53, 0x5d, 0x40, 0xb4, 0xeb, 0xcd, 0x1e, 0xc7, 0xdb, 0x61, 0xc5, 0xaf, 0xee, 0x60, 0x26,
+ 0x6d, 0x67, 0xc7, 0xff, 0x61, 0x8e, 0xbe, 0xe2, 0x5e, 0x13, 0x06, 0x18, 0x5d, 0x80, 0x24, 0xcf,
+ 0x70, 0x27, 0x13, 0x53, 0x99, 0x41, 0x84, 0x86, 0xe1, 0x7d, 0x4d, 0x87, 0x7d, 0xeb, 0xe7, 0xef,
+ 0x45, 0x5a, 0xf2, 0x35, 0x96, 0x1c, 0xe2, 0x5d, 0x10, 0xb9, 0xc9, 0x24, 0x44, 0x2d, 0x4f, 0x8b,
+ 0xee, 0xd3, 0x3e, 0x78, 0x00, 0xdf, 0x00, 0x47, 0x47, 0x78, 0xff, 0xae, 0x9a, 0x4b, 0xa0, 0x10,
+ 0xb9, 0xdc, 0x6c, 0xc2, 0xd5, 0x2e, 0x81, 0x2f, 0x22, 0xab, 0xa4, 0xdf, 0xc4, 0x1e, 0x6f, 0x9a,
+ 0xd1, 0x6b, 0x8c, 0x6f, 0x25, 0x6f, 0x31, 0x8e, 0x88, 0xd6, 0x18, 0xf2, 0x04, 0xf7, 0x6e, 0xee,
+ 0xe0, 0x24, 0x07, 0x2f, 0x10, 0xf2, 0xee, 0xcd, 0xdb, 0x31, 0x4c, 0xca, 0xc5, 0x92, 0x06, 0x57,
+ 0x4b, 0x1a, 0x5c, 0x2f, 0x29, 0xfa, 0x56, 0x53, 0xf4, 0xa3, 0xa6, 0xe8, 0xb2, 0xa6, 0x68, 0x51,
+ 0x53, 0xf4, 0xbb, 0xa6, 0xe8, 0x4f, 0x4d, 0x83, 0xeb, 0x9a, 0xa2, 0x8b, 0x15, 0x0d, 0x16, 0x2b,
+ 0x1a, 0x5c, 0xad, 0x68, 0xf0, 0xe9, 0xa5, 0x4a, 0xed, 0xac, 0x9a, 0xc6, 0x89, 0xce, 0x99, 0x2a,
+ 0xc5, 0xa9, 0x28, 0x04, 0xcb, 0xf4, 0x3c, 0x65, 0xff, 0x73, 0xe1, 0xd3, 0x8e, 0xbf, 0x84, 0xa7,
+ 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x20, 0x73, 0x6a, 0xfb, 0x10, 0x03, 0x00, 0x00,
+}
+
+func (this *MockRequest) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*MockRequest)
+ if !ok {
+ that2, ok := that.(MockRequest)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if this.Path != that1.Path {
+ return false
+ }
+ if !this.Start.Equal(that1.Start) {
+ return false
+ }
+ if !this.End.Equal(that1.End) {
+ return false
+ }
+ if this.Step != that1.Step {
+ return false
+ }
+ if this.Query != that1.Query {
+ return false
+ }
+ if !this.CachingOptions.Equal(&that1.CachingOptions) {
+ return false
+ }
+ return true
+}
+func (this *MockResponse) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*MockResponse)
+ if !ok {
+ that2, ok := that.(MockResponse)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if len(this.Labels) != len(that1.Labels) {
+ return false
+ }
+ for i := range this.Labels {
+ if !this.Labels[i].Equal(that1.Labels[i]) {
+ return false
+ }
+ }
+ if len(this.Samples) != len(that1.Samples) {
+ return false
+ }
+ for i := range this.Samples {
+ if !this.Samples[i].Equal(that1.Samples[i]) {
+ return false
+ }
+ }
+ return true
+}
+func (this *MockLabelsPair) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*MockLabelsPair)
+ if !ok {
+ that2, ok := that.(MockLabelsPair)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if this.Name != that1.Name {
+ return false
+ }
+ if this.Value != that1.Value {
+ return false
+ }
+ return true
+}
+func (this *MockSample) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*MockSample)
+ if !ok {
+ that2, ok := that.(MockSample)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if this.Value != that1.Value {
+ return false
+ }
+ if this.TimestampMs != that1.TimestampMs {
+ return false
+ }
+ return true
+}
+func (this *MockRequest) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 10)
+ s = append(s, "&resultscache.MockRequest{")
+ s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n")
+ s = append(s, "Start: "+fmt.Sprintf("%#v", this.Start)+",\n")
+ s = append(s, "End: "+fmt.Sprintf("%#v", this.End)+",\n")
+ s = append(s, "Step: "+fmt.Sprintf("%#v", this.Step)+",\n")
+ s = append(s, "Query: "+fmt.Sprintf("%#v", this.Query)+",\n")
+ s = append(s, "CachingOptions: "+strings.Replace(this.CachingOptions.GoString(), `&`, ``, 1)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *MockResponse) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&resultscache.MockResponse{")
+ if this.Labels != nil {
+ s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n")
+ }
+ if this.Samples != nil {
+ s = append(s, "Samples: "+fmt.Sprintf("%#v", this.Samples)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *MockLabelsPair) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&resultscache.MockLabelsPair{")
+ s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n")
+ s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *MockSample) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&resultscache.MockSample{")
+ s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n")
+ s = append(s, "TimestampMs: "+fmt.Sprintf("%#v", this.TimestampMs)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func valueToGoStringTestTypes(v interface{}, typ string) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
+}
+func (m *MockRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MockRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MockRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.CachingOptions.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTestTypes(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ if len(m.Query) > 0 {
+ i -= len(m.Query)
+ copy(dAtA[i:], m.Query)
+ i = encodeVarintTestTypes(dAtA, i, uint64(len(m.Query)))
+ i--
+ dAtA[i] = 0x32
+ }
+ if m.Step != 0 {
+ i = encodeVarintTestTypes(dAtA, i, uint64(m.Step))
+ i--
+ dAtA[i] = 0x20
+ }
+ n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.End):])
+ if err2 != nil {
+ return 0, err2
+ }
+ i -= n2
+ i = encodeVarintTestTypes(dAtA, i, uint64(n2))
+ i--
+ dAtA[i] = 0x1a
+ n3, err3 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Start):])
+ if err3 != nil {
+ return 0, err3
+ }
+ i -= n3
+ i = encodeVarintTestTypes(dAtA, i, uint64(n3))
+ i--
+ dAtA[i] = 0x12
+ if len(m.Path) > 0 {
+ i -= len(m.Path)
+ copy(dAtA[i:], m.Path)
+ i = encodeVarintTestTypes(dAtA, i, uint64(len(m.Path)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *MockResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MockResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MockResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Samples) > 0 {
+ for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTestTypes(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Labels) > 0 {
+ for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTestTypes(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *MockLabelsPair) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MockLabelsPair) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MockLabelsPair) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Value) > 0 {
+ i -= len(m.Value)
+ copy(dAtA[i:], m.Value)
+ i = encodeVarintTestTypes(dAtA, i, uint64(len(m.Value)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintTestTypes(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *MockSample) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MockSample) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MockSample) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.TimestampMs != 0 {
+ i = encodeVarintTestTypes(dAtA, i, uint64(m.TimestampMs))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Value != 0 {
+ i -= 8
+ encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value))))
+ i--
+ dAtA[i] = 0x9
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintTestTypes(dAtA []byte, offset int, v uint64) int {
+ offset -= sovTestTypes(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *MockRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Path)
+ if l > 0 {
+ n += 1 + l + sovTestTypes(uint64(l))
+ }
+ l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Start)
+ n += 1 + l + sovTestTypes(uint64(l))
+ l = github_com_gogo_protobuf_types.SizeOfStdTime(m.End)
+ n += 1 + l + sovTestTypes(uint64(l))
+ if m.Step != 0 {
+ n += 1 + sovTestTypes(uint64(m.Step))
+ }
+ l = len(m.Query)
+ if l > 0 {
+ n += 1 + l + sovTestTypes(uint64(l))
+ }
+ l = m.CachingOptions.Size()
+ n += 1 + l + sovTestTypes(uint64(l))
+ return n
+}
+
+func (m *MockResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Labels) > 0 {
+ for _, e := range m.Labels {
+ l = e.Size()
+ n += 1 + l + sovTestTypes(uint64(l))
+ }
+ }
+ if len(m.Samples) > 0 {
+ for _, e := range m.Samples {
+ l = e.Size()
+ n += 1 + l + sovTestTypes(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *MockLabelsPair) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovTestTypes(uint64(l))
+ }
+ l = len(m.Value)
+ if l > 0 {
+ n += 1 + l + sovTestTypes(uint64(l))
+ }
+ return n
+}
+
+func (m *MockSample) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Value != 0 {
+ n += 9
+ }
+ if m.TimestampMs != 0 {
+ n += 1 + sovTestTypes(uint64(m.TimestampMs))
+ }
+ return n
+}
+
+func sovTestTypes(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozTestTypes(x uint64) (n int) {
+ return sovTestTypes(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *MockRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&MockRequest{`,
+ `Path:` + fmt.Sprintf("%v", this.Path) + `,`,
+ `Start:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Start), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
+ `End:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.End), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
+ `Step:` + fmt.Sprintf("%v", this.Step) + `,`,
+ `Query:` + fmt.Sprintf("%v", this.Query) + `,`,
+ `CachingOptions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CachingOptions), "CachingOptions", "CachingOptions", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *MockResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForLabels := "[]*MockLabelsPair{"
+ for _, f := range this.Labels {
+ repeatedStringForLabels += strings.Replace(f.String(), "MockLabelsPair", "MockLabelsPair", 1) + ","
+ }
+ repeatedStringForLabels += "}"
+ repeatedStringForSamples := "[]*MockSample{"
+ for _, f := range this.Samples {
+ repeatedStringForSamples += strings.Replace(f.String(), "MockSample", "MockSample", 1) + ","
+ }
+ repeatedStringForSamples += "}"
+ s := strings.Join([]string{`&MockResponse{`,
+ `Labels:` + repeatedStringForLabels + `,`,
+ `Samples:` + repeatedStringForSamples + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *MockLabelsPair) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&MockLabelsPair{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Value:` + fmt.Sprintf("%v", this.Value) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *MockSample) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&MockSample{`,
+ `Value:` + fmt.Sprintf("%v", this.Value) + `,`,
+ `TimestampMs:` + fmt.Sprintf("%v", this.TimestampMs) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringTestTypes(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *MockRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTestTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MockRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MockRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTestTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTestTypes
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTestTypes
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Path = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTestTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTestTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTestTypes
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Start, dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field End", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTestTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTestTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTestTypes
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.End, dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Step", wireType)
+ }
+ m.Step = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTestTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Step |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTestTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTestTypes
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTestTypes
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Query = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CachingOptions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTestTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTestTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTestTypes
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.CachingOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTestTypes(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTestTypes
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthTestTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MockResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTestTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MockResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MockResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTestTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTestTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTestTypes
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Labels = append(m.Labels, &MockLabelsPair{})
+ if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTestTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTestTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTestTypes
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Samples = append(m.Samples, &MockSample{})
+ if err := m.Samples[len(m.Samples)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTestTypes(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTestTypes
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthTestTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MockLabelsPair) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTestTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MockLabelsPair: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MockLabelsPair: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTestTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTestTypes
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTestTypes
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTestTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTestTypes
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTestTypes
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Value = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTestTypes(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTestTypes
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthTestTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MockSample) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTestTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MockSample: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MockSample: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 1 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var v uint64
+ if (iNdEx + 8) > l {
+ return io.ErrUnexpectedEOF
+ }
+ v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+ iNdEx += 8
+ m.Value = float64(math.Float64frombits(v))
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TimestampMs", wireType)
+ }
+ m.TimestampMs = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTestTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.TimestampMs |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTestTypes(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTestTypes
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthTestTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipTestTypes(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTestTypes
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTestTypes
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTestTypes
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthTestTypes
+ }
+ iNdEx += length
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthTestTypes
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTestTypes
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipTestTypes(dAtA[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthTestTypes
+ }
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthTestTypes = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowTestTypes = fmt.Errorf("proto: integer overflow")
+)
diff --git a/pkg/storage/chunk/cache/resultscache/test_types.proto b/pkg/storage/chunk/cache/resultscache/test_types.proto
new file mode 100644
index 0000000000000..920db66314de4
--- /dev/null
+++ b/pkg/storage/chunk/cache/resultscache/test_types.proto
@@ -0,0 +1,41 @@
+syntax = "proto3";
+
+package resultscache;
+
+import "gogoproto/gogo.proto";
+import "google/protobuf/timestamp.proto";
+import "types.proto";
+
+option go_package = "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache";
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+
+message MockRequest {
+ string path = 1;
+ google.protobuf.Timestamp start = 2 [
+ (gogoproto.stdtime) = true,
+ (gogoproto.nullable) = false
+ ];
+ google.protobuf.Timestamp end = 3 [
+ (gogoproto.stdtime) = true,
+ (gogoproto.nullable) = false
+ ];
+ int64 step = 4;
+ string query = 6;
+ CachingOptions cachingOptions = 7 [(gogoproto.nullable) = false];
+}
+
+message MockResponse {
+ repeated MockLabelsPair labels = 1;
+ repeated MockSample samples = 2;
+}
+
+message MockLabelsPair {
+ string name = 1;
+ string value = 2;
+}
+
+message MockSample {
+ double value = 1;
+ int64 timestamp_ms = 2;
+}
diff --git a/pkg/storage/chunk/cache/resultscache/types.pb.go b/pkg/storage/chunk/cache/resultscache/types.pb.go
new file mode 100644
index 0000000000000..7c63abdda4bf6
--- /dev/null
+++ b/pkg/storage/chunk/cache/resultscache/types.pb.go
@@ -0,0 +1,1078 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: pkg/storage/chunk/cache/resultscache/types.proto
+
+package resultscache
+
+import (
+ fmt "fmt"
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/gogo/protobuf/proto"
+ types "github.com/gogo/protobuf/types"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+ reflect "reflect"
+ strings "strings"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// Defined here to prevent circular imports between logproto & queryrangebase
+type CachingOptions struct {
+ Disabled bool `protobuf:"varint,1,opt,name=disabled,proto3" json:"disabled,omitempty"`
+}
+
+func (m *CachingOptions) Reset() { *m = CachingOptions{} }
+func (*CachingOptions) ProtoMessage() {}
+func (*CachingOptions) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6b13efd4ce8649ef, []int{0}
+}
+func (m *CachingOptions) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *CachingOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_CachingOptions.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *CachingOptions) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CachingOptions.Merge(m, src)
+}
+func (m *CachingOptions) XXX_Size() int {
+ return m.Size()
+}
+func (m *CachingOptions) XXX_DiscardUnknown() {
+ xxx_messageInfo_CachingOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CachingOptions proto.InternalMessageInfo
+
+func (m *CachingOptions) GetDisabled() bool {
+ if m != nil {
+ return m.Disabled
+ }
+ return false
+}
+
+type CachedResponse struct {
+ Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key"`
+ // List of cached responses; non-overlapping and in order.
+ Extents []Extent `protobuf:"bytes,2,rep,name=extents,proto3" json:"extents"`
+}
+
+func (m *CachedResponse) Reset() { *m = CachedResponse{} }
+func (*CachedResponse) ProtoMessage() {}
+func (*CachedResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6b13efd4ce8649ef, []int{1}
+}
+func (m *CachedResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *CachedResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_CachedResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *CachedResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CachedResponse.Merge(m, src)
+}
+func (m *CachedResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *CachedResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_CachedResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CachedResponse proto.InternalMessageInfo
+
+func (m *CachedResponse) GetKey() string {
+ if m != nil {
+ return m.Key
+ }
+ return ""
+}
+
+func (m *CachedResponse) GetExtents() []Extent {
+ if m != nil {
+ return m.Extents
+ }
+ return nil
+}
+
+type Extent struct {
+ Start int64 `protobuf:"varint,1,opt,name=start,proto3" json:"start"`
+ End int64 `protobuf:"varint,2,opt,name=end,proto3" json:"end"`
+ TraceId string `protobuf:"bytes,4,opt,name=trace_id,json=traceId,proto3" json:"-"`
+ Response *types.Any `protobuf:"bytes,5,opt,name=response,proto3" json:"response"`
+}
+
+func (m *Extent) Reset() { *m = Extent{} }
+func (*Extent) ProtoMessage() {}
+func (*Extent) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6b13efd4ce8649ef, []int{2}
+}
+func (m *Extent) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Extent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Extent.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Extent) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Extent.Merge(m, src)
+}
+func (m *Extent) XXX_Size() int {
+ return m.Size()
+}
+func (m *Extent) XXX_DiscardUnknown() {
+ xxx_messageInfo_Extent.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Extent proto.InternalMessageInfo
+
+func (m *Extent) GetStart() int64 {
+ if m != nil {
+ return m.Start
+ }
+ return 0
+}
+
+func (m *Extent) GetEnd() int64 {
+ if m != nil {
+ return m.End
+ }
+ return 0
+}
+
+func (m *Extent) GetTraceId() string {
+ if m != nil {
+ return m.TraceId
+ }
+ return ""
+}
+
+func (m *Extent) GetResponse() *types.Any {
+ if m != nil {
+ return m.Response
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*CachingOptions)(nil), "resultscache.CachingOptions")
+ proto.RegisterType((*CachedResponse)(nil), "resultscache.CachedResponse")
+ proto.RegisterType((*Extent)(nil), "resultscache.Extent")
+}
+
+func init() {
+ proto.RegisterFile("pkg/storage/chunk/cache/resultscache/types.proto", fileDescriptor_6b13efd4ce8649ef)
+}
+
+var fileDescriptor_6b13efd4ce8649ef = []byte{
+ // 404 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xb1, 0x6e, 0xd4, 0x30,
+ 0x18, 0xc7, 0xe3, 0xde, 0x5d, 0x2f, 0x35, 0x15, 0xa0, 0xa8, 0x43, 0x7a, 0x83, 0x73, 0xba, 0xa9,
+ 0x03, 0xc4, 0x08, 0x56, 0x04, 0x22, 0x88, 0x01, 0x16, 0x24, 0x8f, 0x2c, 0xc8, 0x49, 0x5c, 0x27,
+ 0x4a, 0xb0, 0x23, 0xdb, 0x91, 0xc8, 0xc6, 0x23, 0xf0, 0x18, 0x0c, 0x3c, 0x48, 0xc7, 0x1b, 0x3b,
+ 0x45, 0x5c, 0x6e, 0x41, 0x99, 0xfa, 0x08, 0x28, 0x36, 0x77, 0xea, 0xd8, 0xe5, 0xf3, 0xf7, 0xff,
+ 0xfe, 0x7f, 0xc9, 0x3f, 0x7f, 0x32, 0x7c, 0xd1, 0x54, 0x1c, 0x6b, 0x23, 0x15, 0xe5, 0x0c, 0x67,
+ 0x45, 0x2b, 0x2a, 0x9c, 0xd1, 0xac, 0x60, 0x58, 0x31, 0xdd, 0xd6, 0x46, 0x3b, 0x61, 0xba, 0x86,
+ 0xe9, 0xb8, 0x51, 0xd2, 0xc8, 0xe0, 0xfc, 0xbe, 0xb3, 0xba, 0xe0, 0x92, 0x4b, 0x6b, 0xe0, 0xa9,
+ 0x73, 0x99, 0xd5, 0x25, 0x97, 0x92, 0xd7, 0x0c, 0x5b, 0x95, 0xb6, 0xd7, 0x98, 0x8a, 0xce, 0x59,
+ 0x9b, 0x67, 0xf0, 0xf1, 0x7b, 0x9a, 0x15, 0xa5, 0xe0, 0x9f, 0x1b, 0x53, 0x4a, 0xa1, 0x83, 0x15,
+ 0xf4, 0xf3, 0x52, 0xd3, 0xb4, 0x66, 0x79, 0x08, 0xd6, 0xe0, 0xca, 0x27, 0x47, 0xbd, 0xa9, 0x5d,
+ 0x9a, 0xe5, 0x84, 0xe9, 0x46, 0x0a, 0xcd, 0x82, 0x4b, 0x38, 0xab, 0x58, 0x67, 0x83, 0x67, 0xc9,
+ 0x72, 0xec, 0xa3, 0x49, 0x92, 0xa9, 0x04, 0x6f, 0xe1, 0x92, 0x7d, 0x37, 0x4c, 0x18, 0x1d, 0x9e,
+ 0xac, 0x67, 0x57, 0x8f, 0x5e, 0x5e, 0xc4, 0xf7, 0x59, 0xe3, 0x0f, 0xd6, 0x4c, 0x9e, 0xdc, 0xf4,
+ 0x91, 0x37, 0xf6, 0xd1, 0x21, 0x4c, 0x0e, 0xcd, 0xe6, 0x37, 0x80, 0xa7, 0x2e, 0x14, 0x44, 0x70,
+ 0xa1, 0x0d, 0x55, 0xc6, 0x5e, 0x34, 0x4b, 0xce, 0xc6, 0x3e, 0x72, 0x03, 0xe2, 0x8e, 0x89, 0x83,
+ 0x89, 0x3c, 0x3c, 0xb1, 0xb6, 0xe5, 0x60, 0x22, 0x27, 0x53, 0x09, 0xd6, 0xd0, 0x37, 0x8a, 0x66,
+ 0xec, 0x6b, 0x99, 0x87, 0x73, 0xcb, 0xb9, 0x18, 0xfb, 0x08, 0x3c, 0x27, 0x4b, 0x3b, 0xfe, 0x98,
+ 0x07, 0x6f, 0xa0, 0xaf, 0xfe, 0x3f, 0x28, 0x5c, 0xac, 0x81, 0x45, 0x75, 0x2b, 0x8b, 0x0f, 0x2b,
+ 0x8b, 0xdf, 0x89, 0x2e, 0x39, 0x1f, 0xfb, 0xe8, 0x98, 0x24, 0xc7, 0xee, 0xd3, 0xdc, 0x9f, 0x3d,
+ 0x9d, 0x27, 0x6a, 0xbb, 0x43, 0xde, 0xed, 0x0e, 0x79, 0x77, 0x3b, 0x04, 0x7e, 0x0c, 0x08, 0xfc,
+ 0x1a, 0x10, 0xb8, 0x19, 0x10, 0xd8, 0x0e, 0x08, 0xfc, 0x19, 0x10, 0xf8, 0x3b, 0x20, 0xef, 0x6e,
+ 0x40, 0xe0, 0xe7, 0x1e, 0x79, 0xdb, 0x3d, 0xf2, 0x6e, 0xf7, 0xc8, 0xfb, 0xf2, 0x9a, 0x97, 0xa6,
+ 0x68, 0xd3, 0x38, 0x93, 0xdf, 0x30, 0x57, 0xf4, 0x9a, 0x0a, 0x8a, 0x6b, 0x59, 0x95, 0xf8, 0x21,
+ 0x3f, 0x21, 0x3d, 0xb5, 0x7c, 0xaf, 0xfe, 0x05, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xc6, 0x6f, 0x43,
+ 0x38, 0x02, 0x00, 0x00,
+}
+
+func (this *CachingOptions) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*CachingOptions)
+ if !ok {
+ that2, ok := that.(CachingOptions)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if this.Disabled != that1.Disabled {
+ return false
+ }
+ return true
+}
+func (this *CachedResponse) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*CachedResponse)
+ if !ok {
+ that2, ok := that.(CachedResponse)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if this.Key != that1.Key {
+ return false
+ }
+ if len(this.Extents) != len(that1.Extents) {
+ return false
+ }
+ for i := range this.Extents {
+ if !this.Extents[i].Equal(&that1.Extents[i]) {
+ return false
+ }
+ }
+ return true
+}
+func (this *Extent) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*Extent)
+ if !ok {
+ that2, ok := that.(Extent)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if this.Start != that1.Start {
+ return false
+ }
+ if this.End != that1.End {
+ return false
+ }
+ if this.TraceId != that1.TraceId {
+ return false
+ }
+ if !this.Response.Equal(that1.Response) {
+ return false
+ }
+ return true
+}
+func (this *CachingOptions) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&resultscache.CachingOptions{")
+ s = append(s, "Disabled: "+fmt.Sprintf("%#v", this.Disabled)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *CachedResponse) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&resultscache.CachedResponse{")
+ s = append(s, "Key: "+fmt.Sprintf("%#v", this.Key)+",\n")
+ if this.Extents != nil {
+ vs := make([]*Extent, len(this.Extents))
+ for i := range vs {
+ vs[i] = &this.Extents[i]
+ }
+ s = append(s, "Extents: "+fmt.Sprintf("%#v", vs)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *Extent) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 8)
+ s = append(s, "&resultscache.Extent{")
+ s = append(s, "Start: "+fmt.Sprintf("%#v", this.Start)+",\n")
+ s = append(s, "End: "+fmt.Sprintf("%#v", this.End)+",\n")
+ s = append(s, "TraceId: "+fmt.Sprintf("%#v", this.TraceId)+",\n")
+ if this.Response != nil {
+ s = append(s, "Response: "+fmt.Sprintf("%#v", this.Response)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func valueToGoStringTypes(v interface{}, typ string) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
+}
+func (m *CachingOptions) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CachingOptions) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CachingOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Disabled {
+ i--
+ if m.Disabled {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *CachedResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CachedResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CachedResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Extents) > 0 {
+ for iNdEx := len(m.Extents) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Extents[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTypes(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Key) > 0 {
+ i -= len(m.Key)
+ copy(dAtA[i:], m.Key)
+ i = encodeVarintTypes(dAtA, i, uint64(len(m.Key)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Extent) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Extent) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Extent) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Response != nil {
+ {
+ size, err := m.Response.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTypes(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ if len(m.TraceId) > 0 {
+ i -= len(m.TraceId)
+ copy(dAtA[i:], m.TraceId)
+ i = encodeVarintTypes(dAtA, i, uint64(len(m.TraceId)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.End != 0 {
+ i = encodeVarintTypes(dAtA, i, uint64(m.End))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Start != 0 {
+ i = encodeVarintTypes(dAtA, i, uint64(m.Start))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintTypes(dAtA []byte, offset int, v uint64) int {
+ offset -= sovTypes(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *CachingOptions) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Disabled {
+ n += 2
+ }
+ return n
+}
+
+func (m *CachedResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Key)
+ if l > 0 {
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ if len(m.Extents) > 0 {
+ for _, e := range m.Extents {
+ l = e.Size()
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *Extent) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Start != 0 {
+ n += 1 + sovTypes(uint64(m.Start))
+ }
+ if m.End != 0 {
+ n += 1 + sovTypes(uint64(m.End))
+ }
+ l = len(m.TraceId)
+ if l > 0 {
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ if m.Response != nil {
+ l = m.Response.Size()
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ return n
+}
+
+func sovTypes(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozTypes(x uint64) (n int) {
+ return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *CachingOptions) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&CachingOptions{`,
+ `Disabled:` + fmt.Sprintf("%v", this.Disabled) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *CachedResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForExtents := "[]Extent{"
+ for _, f := range this.Extents {
+ repeatedStringForExtents += strings.Replace(strings.Replace(f.String(), "Extent", "Extent", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForExtents += "}"
+ s := strings.Join([]string{`&CachedResponse{`,
+ `Key:` + fmt.Sprintf("%v", this.Key) + `,`,
+ `Extents:` + repeatedStringForExtents + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Extent) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Extent{`,
+ `Start:` + fmt.Sprintf("%v", this.Start) + `,`,
+ `End:` + fmt.Sprintf("%v", this.End) + `,`,
+ `TraceId:` + fmt.Sprintf("%v", this.TraceId) + `,`,
+ `Response:` + strings.Replace(fmt.Sprintf("%v", this.Response), "Any", "types.Any", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringTypes(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *CachingOptions) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CachingOptions: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CachingOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Disabled = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CachedResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CachedResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CachedResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Extents", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Extents = append(m.Extents, Extent{})
+ if err := m.Extents[len(m.Extents)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Extent) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Extent: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Extent: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType)
+ }
+ m.Start = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Start |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field End", wireType)
+ }
+ m.End = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.End |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.TraceId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Response", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Response == nil {
+ m.Response = &types.Any{}
+ }
+ if err := m.Response.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipTypes(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthTypes
+ }
+ iNdEx += length
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthTypes
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipTypes(dAtA[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthTypes
+ }
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow")
+)
diff --git a/pkg/storage/chunk/cache/resultscache/types.proto b/pkg/storage/chunk/cache/resultscache/types.proto
new file mode 100644
index 0000000000000..835950a0581e7
--- /dev/null
+++ b/pkg/storage/chunk/cache/resultscache/types.proto
@@ -0,0 +1,34 @@
+syntax = "proto3";
+
+package resultscache;
+
+import "gogoproto/gogo.proto";
+import "google/protobuf/any.proto";
+
+option go_package = "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache";
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+
+// Defined here to prevent circular imports between logproto & queryrangebase
+message CachingOptions {
+ bool disabled = 1;
+}
+
+message CachedResponse {
+ string key = 1 [(gogoproto.jsontag) = "key"];
+
+ // List of cached responses; non-overlapping and in order.
+ repeated Extent extents = 2 [
+ (gogoproto.nullable) = false,
+ (gogoproto.jsontag) = "extents"
+ ];
+}
+
+message Extent {
+ int64 start = 1 [(gogoproto.jsontag) = "start"];
+ int64 end = 2 [(gogoproto.jsontag) = "end"];
+ // reserved the previous key to ensure cache transition
+ reserved 3;
+ string trace_id = 4 [(gogoproto.jsontag) = "-"];
+ google.protobuf.Any response = 5 [(gogoproto.jsontag) = "response"];
+}
diff --git a/pkg/storage/chunk/cache/resultscache/util.go b/pkg/storage/chunk/cache/resultscache/util.go
new file mode 100644
index 0000000000000..eedc14a1f0b7f
--- /dev/null
+++ b/pkg/storage/chunk/cache/resultscache/util.go
@@ -0,0 +1,67 @@
+package resultscache
+
+import (
+ "context"
+)
+
+type HandlerFunc func(context.Context, Request) (Response, error)
+
+// Do implements Handler.
+func (q HandlerFunc) Do(ctx context.Context, req Request) (Response, error) {
+ return q(ctx, req)
+}
+
+// RequestResponse contains a request response and the respective request that was used.
+type RequestResponse struct {
+ Request Request
+ Response Response
+}
+
+// DoRequests executes a list of requests in parallel.
+func DoRequests(ctx context.Context, downstream Handler, reqs []Request, parallelism int) ([]RequestResponse, error) {
+ // If one of the requests fail, we want to be able to cancel the rest of them.
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ // Feed all requests to a bounded intermediate channel to limit parallelism.
+ intermediate := make(chan Request)
+ go func() {
+ for _, req := range reqs {
+ intermediate <- req
+ }
+ close(intermediate)
+ }()
+
+ respChan, errChan := make(chan RequestResponse), make(chan error)
+ if parallelism > len(reqs) {
+ parallelism = len(reqs)
+ }
+ for i := 0; i < parallelism; i++ {
+ go func() {
+ for req := range intermediate {
+ resp, err := downstream.Do(ctx, req)
+ if err != nil {
+ errChan <- err
+ } else {
+ respChan <- RequestResponse{req, resp}
+ }
+ }
+ }()
+ }
+
+ resps := make([]RequestResponse, 0, len(reqs))
+ var firstErr error
+ for range reqs {
+ select {
+ case resp := <-respChan:
+ resps = append(resps, resp)
+ case err := <-errChan:
+ if firstErr == nil {
+ cancel()
+ firstErr = err
+ }
+ }
+ }
+
+ return resps, firstErr
+}
diff --git a/pkg/storage/chunk/client/aws/s3_storage_client.go b/pkg/storage/chunk/client/aws/s3_storage_client.go
index d21513f1150b9..0c2136801f812 100644
--- a/pkg/storage/chunk/client/aws/s3_storage_client.go
+++ b/pkg/storage/chunk/client/aws/s3_storage_client.go
@@ -405,6 +405,7 @@ func (a *S3ObjectClient) PutObject(ctx context.Context, objectKey string, object
func (a *S3ObjectClient) List(ctx context.Context, prefix, delimiter string) ([]client.StorageObject, []client.StorageCommonPrefix, error) {
var storageObjects []client.StorageObject
var commonPrefixes []client.StorageCommonPrefix
+ var commonPrefixesSet = make(map[string]bool)
for i := range a.bucketNames {
err := loki_instrument.TimeRequest(ctx, "S3.List", s3RequestDuration, instrument.ErrorCode, func(ctx context.Context) error {
@@ -428,7 +429,10 @@ func (a *S3ObjectClient) List(ctx context.Context, prefix, delimiter string) ([]
}
for _, commonPrefix := range output.CommonPrefixes {
- commonPrefixes = append(commonPrefixes, client.StorageCommonPrefix(aws.StringValue(commonPrefix.Prefix)))
+ if !commonPrefixesSet[aws.StringValue(commonPrefix.Prefix)] {
+ commonPrefixes = append(commonPrefixes, client.StorageCommonPrefix(aws.StringValue(commonPrefix.Prefix)))
+ commonPrefixesSet[aws.StringValue(commonPrefix.Prefix)] = true
+ }
}
if output.IsTruncated == nil || !*output.IsTruncated {
diff --git a/pkg/storage/chunk/client/aws/s3_storage_client_test.go b/pkg/storage/chunk/client/aws/s3_storage_client_test.go
index 00ec9eba4072f..769f8cf00665c 100644
--- a/pkg/storage/chunk/client/aws/s3_storage_client_test.go
+++ b/pkg/storage/chunk/client/aws/s3_storage_client_test.go
@@ -21,6 +21,11 @@ import (
"go.uber.org/atomic"
"github.com/grafana/loki/pkg/storage/chunk/client/hedging"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/service/s3"
+ "github.com/aws/aws-sdk-go/service/s3/s3iface"
)
type RoundTripperFunc func(*http.Request) (*http.Response, error)
@@ -195,3 +200,23 @@ session_token: session token
require.Equal(t, underTest.SessionToken.String(), "session token")
}
+
+type testCommonPrefixesS3Client struct {
+ s3iface.S3API
+}
+
+func (m *testCommonPrefixesS3Client) ListObjectsV2WithContext(aws.Context, *s3.ListObjectsV2Input, ...request.Option) (*s3.ListObjectsV2Output, error) {
+ var commonPrefixes []*s3.CommonPrefix
+ commonPrefix := "common-prefix-repeated/"
+ for i := 0; i < 2; i++ {
+ commonPrefixes = append(commonPrefixes, &s3.CommonPrefix{Prefix: aws.String(commonPrefix)})
+ }
+ return &s3.ListObjectsV2Output{CommonPrefixes: commonPrefixes, IsTruncated: aws.Bool(false)}, nil
+}
+
+func TestCommonPrefixes(t *testing.T) {
+ s3 := S3ObjectClient{S3: &testCommonPrefixesS3Client{}, bucketNames: []string{"bucket"}}
+ _, CommonPrefixes, err := s3.List(context.Background(), "", "/")
+ require.Equal(t, nil, err)
+ require.Equal(t, 1, len(CommonPrefixes))
+}
diff --git a/pkg/storage/config/schema_config.go b/pkg/storage/config/schema_config.go
index f2eaa9f3733db..d4b5902516d20 100644
--- a/pkg/storage/config/schema_config.go
+++ b/pkg/storage/config/schema_config.go
@@ -491,6 +491,42 @@ func (cfg *IndexPeriodicTableConfig) Validate() error {
return ValidatePathPrefix(cfg.PathPrefix)
}
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (cfg *IndexPeriodicTableConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ g := struct {
+ PathPrefix string `yaml:"path_prefix"`
+ Prefix string `yaml:"prefix"`
+ Period model.Duration `yaml:"period"`
+ Tags Tags `yaml:"tags"`
+ }{}
+ if err := unmarshal(&g); err != nil {
+ return err
+ }
+
+ cfg.PathPrefix = g.PathPrefix
+ cfg.Prefix = g.Prefix
+ cfg.Period = time.Duration(g.Period)
+ cfg.Tags = g.Tags
+
+ return nil
+}
+
+// MarshalYAML implements the yaml.Marshaler interface.
+func (cfg IndexPeriodicTableConfig) MarshalYAML() (interface{}, error) {
+ g := &struct {
+ PathPrefix string `yaml:"path_prefix"`
+ Prefix string `yaml:"prefix"`
+ Period model.Duration `yaml:"period"`
+ Tags Tags `yaml:"tags"`
+ }{
+ PathPrefix: cfg.PathPrefix,
+ Prefix: cfg.Prefix,
+ Period: model.Duration(cfg.Period),
+ Tags: cfg.Tags,
+ }
+
+ return g, nil
+}
func ValidatePathPrefix(prefix string) error {
if prefix == "" {
return errors.New("prefix must be set")
diff --git a/pkg/storage/config/schema_config_test.go b/pkg/storage/config/schema_config_test.go
index a547419987786..06fd191b7092a 100644
--- a/pkg/storage/config/schema_config_test.go
+++ b/pkg/storage/config/schema_config_test.go
@@ -503,6 +503,37 @@ func MustParseDayTime(s string) DayTime {
return DayTime{model.TimeFromUnix(t.Unix())}
}
+func TestIndexPeriodicTableConfigCustomUnmarshalling(t *testing.T) {
+ yamlFile := `path_prefix: loki_index/
+prefix: cortex_
+period: 1w
+tags:
+ foo: bar
+`
+
+ cfg := IndexPeriodicTableConfig{}
+ err := yaml.Unmarshal([]byte(yamlFile), &cfg)
+ require.NoError(t, err)
+
+ expectedCfg := IndexPeriodicTableConfig{
+ PathPrefix: "loki_index/",
+ PeriodicTableConfig: PeriodicTableConfig{
+ Prefix: "cortex_",
+ Period: 7 * 24 * time.Hour,
+ Tags: map[string]string{
+ "foo": "bar",
+ },
+ },
+ }
+
+ require.Equal(t, expectedCfg, cfg)
+
+ yamlGenerated, err := yaml.Marshal(&cfg)
+ require.NoError(t, err)
+
+ require.Equal(t, yamlFile, string(yamlGenerated))
+}
+
func TestPeriodicTableConfigCustomUnmarshalling(t *testing.T) {
yamlFile := `prefix: cortex_
period: 1w
diff --git a/pkg/storage/store.go b/pkg/storage/store.go
index 6781dbbff8a3b..440b9273c7803 100644
--- a/pkg/storage/store.go
+++ b/pkg/storage/store.go
@@ -6,6 +6,8 @@ import (
"math"
"time"
+ lokilog "github.com/grafana/loki/pkg/logql/log"
+
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/pkg/errors"
@@ -57,10 +59,17 @@ type SchemaConfigProvider interface {
GetSchemaConfigs() []config.PeriodConfig
}
+type Instrumentable interface {
+ SetExtractorWrapper(wrapper lokilog.SampleExtractorWrapper)
+
+ SetPipelineWrapper(wrapper lokilog.PipelineWrapper)
+}
+
type Store interface {
stores.Store
SelectStore
SchemaConfigProvider
+ Instrumentable
}
type LokiStore struct {
@@ -84,6 +93,8 @@ type LokiStore struct {
logger log.Logger
chunkFilterer chunk.RequestChunkFilterer
+ extractorWrapper lokilog.SampleExtractorWrapper
+ pipelineWrapper lokilog.PipelineWrapper
congestionControllerFactory func(cfg congestion.Config, logger log.Logger, metrics *congestion.Metrics) congestion.Controller
metricsNamespace string
@@ -381,6 +392,14 @@ func (s *LokiStore) SetChunkFilterer(chunkFilterer chunk.RequestChunkFilterer) {
s.Store.SetChunkFilterer(chunkFilterer)
}
+func (s *LokiStore) SetExtractorWrapper(wrapper lokilog.SampleExtractorWrapper) {
+ s.extractorWrapper = wrapper
+}
+
+func (s *LokiStore) SetPipelineWrapper(wrapper lokilog.PipelineWrapper) {
+ s.pipelineWrapper = wrapper
+}
+
// lazyChunks is an internal function used to resolve a set of lazy chunks from the store without actually loading them. It's used internally by `LazyQuery` and `GetSeries`
func (s *LokiStore) lazyChunks(ctx context.Context, matchers []*labels.Matcher, from, through model.Time) ([]*LazyChunk, error) {
userID, err := tenant.TenantID(ctx)
@@ -454,9 +473,7 @@ func (s *LokiStore) SelectSeries(ctx context.Context, req logql.SelectLogParams)
}
result := make([]logproto.SeriesIdentifier, len(series))
for i, s := range series {
- result[i] = logproto.SeriesIdentifier{
- Labels: s.Map(),
- }
+ result[i] = logproto.SeriesIdentifierFromLabels(s)
}
return result, nil
}
@@ -493,6 +510,15 @@ func (s *LokiStore) SelectLogs(ctx context.Context, req logql.SelectLogParams) (
return nil, err
}
+ if s.pipelineWrapper != nil {
+ userID, err := tenant.TenantID(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ pipeline = s.pipelineWrapper.Wrap(ctx, pipeline, expr.String(), userID)
+ }
+
var chunkFilterer chunk.Filterer
if s.chunkFilterer != nil {
chunkFilterer = s.chunkFilterer.ForRequest(ctx)
@@ -531,6 +557,15 @@ func (s *LokiStore) SelectSamples(ctx context.Context, req logql.SelectSamplePar
return nil, err
}
+ if s.extractorWrapper != nil {
+ userID, err := tenant.TenantID(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ extractor = s.extractorWrapper.Wrap(ctx, extractor, expr.String(), userID)
+ }
+
var chunkFilterer chunk.Filterer
if s.chunkFilterer != nil {
chunkFilterer = s.chunkFilterer.ForRequest(ctx)
diff --git a/pkg/storage/store_test.go b/pkg/storage/store_test.go
index 9311de2090bd5..31d84b21b4f18 100644
--- a/pkg/storage/store_test.go
+++ b/pkg/storage/store_test.go
@@ -14,6 +14,8 @@ import (
"testing"
"time"
+ lokilog "github.com/grafana/loki/pkg/logql/log"
+
"github.com/cespare/xxhash/v2"
"github.com/grafana/dskit/user"
"github.com/prometheus/common/model"
@@ -25,7 +27,9 @@ import (
"github.com/grafana/loki/pkg/iter"
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logql"
+ "github.com/grafana/loki/pkg/logql/syntax"
"github.com/grafana/loki/pkg/querier/astmapper"
+ "github.com/grafana/loki/pkg/querier/plan"
"github.com/grafana/loki/pkg/storage/chunk"
"github.com/grafana/loki/pkg/storage/chunk/client/local"
"github.com/grafana/loki/pkg/storage/config"
@@ -494,6 +498,10 @@ func Test_store_SelectLogs(t *testing.T) {
chunkMetrics: NilMetrics,
}
+ tt.req.Plan = &plan.QueryPlan{
+ AST: syntax.MustParseExpr(tt.req.Selector),
+ }
+
ctx = user.InjectOrgID(context.Background(), "test-user")
it, err := s.SelectLogs(ctx, logql.SelectLogParams{QueryRequest: tt.req})
if err != nil {
@@ -818,6 +826,10 @@ func Test_store_SelectSample(t *testing.T) {
chunkMetrics: NilMetrics,
}
+ tt.req.Plan = &plan.QueryPlan{
+ AST: syntax.MustParseExpr(tt.req.Selector),
+ }
+
ctx = user.InjectOrgID(context.Background(), "test-user")
it, err := s.SelectSamples(ctx, logql.SelectSampleParams{SampleQueryRequest: tt.req})
if err != nil {
@@ -862,8 +874,9 @@ func Test_ChunkFilterer(t *testing.T) {
}
defer it.Close()
for it.Next() {
- v := mustParseLabels(it.Labels())["foo"]
- require.NotEqual(t, "bazz", v)
+ l, err := syntax.ParseLabels(it.Labels())
+ require.NoError(t, err)
+ require.NotEqual(t, "bazz", l.Get("foo"))
}
logit, err := s.SelectLogs(ctx, logql.SelectLogParams{QueryRequest: newQuery("{foo=~\"ba.*\"}", from, from.Add(1*time.Hour), nil, nil)})
@@ -873,15 +886,177 @@ func Test_ChunkFilterer(t *testing.T) {
}
defer logit.Close()
for logit.Next() {
- v := mustParseLabels(it.Labels())["foo"]
- require.NotEqual(t, "bazz", v)
+ l, err := syntax.ParseLabels(it.Labels())
+ require.NoError(t, err)
+ require.NotEqual(t, "bazz", l.Get("foo"))
}
ids, err := s.SelectSeries(ctx, logql.SelectLogParams{QueryRequest: newQuery("{foo=~\"ba.*\"}", from, from.Add(1*time.Hour), nil, nil)})
require.NoError(t, err)
for _, id := range ids {
- v := id.Labels["foo"]
- require.NotEqual(t, "bazz", v)
+ require.NotEqual(t, "bazz", id.Get("foo"))
+ }
+}
+
+func Test_PipelineWrapper(t *testing.T) {
+ s := &LokiStore{
+ Store: storeFixture,
+ cfg: Config{
+ MaxChunkBatchSize: 10,
+ },
+ chunkMetrics: NilMetrics,
+ }
+ wrapper := &testPipelineWrapper{
+ pipeline: newMockPipeline(),
+ }
+
+ s.SetPipelineWrapper(wrapper)
+ ctx = user.InjectOrgID(context.Background(), "test-user")
+ logit, err := s.SelectLogs(ctx, logql.SelectLogParams{QueryRequest: newQuery("{foo=~\"ba.*\"}", from, from.Add(1*time.Hour), nil, nil)})
+ if err != nil {
+ t.Errorf("store.SelectLogs() error = %v", err)
+ return
}
+ defer logit.Close()
+ for logit.Next() {
+ require.NoError(t, logit.Error()) // consume the iterator
+ }
+
+ require.Equal(t, "test-user", wrapper.tenant)
+ require.Equal(t, "{foo=~\"ba.*\"}", wrapper.query)
+ require.Equal(t, 28, wrapper.pipeline.sp.called) // we've passed every log line through the wrapper
+}
+
+type testPipelineWrapper struct {
+ query string
+ pipeline *mockPipeline
+ tenant string
+}
+
+func (t *testPipelineWrapper) Wrap(_ context.Context, pipeline lokilog.Pipeline, query, tenant string) lokilog.Pipeline {
+ t.tenant = tenant
+ t.query = query
+ t.pipeline.wrappedExtractor = pipeline
+ return t.pipeline
+}
+
+func newMockPipeline() *mockPipeline {
+ return &mockPipeline{
+ sp: &mockStreamPipeline{},
+ }
+}
+
+type mockPipeline struct {
+ wrappedExtractor lokilog.Pipeline
+ sp *mockStreamPipeline
+}
+
+func (p *mockPipeline) ForStream(l labels.Labels) lokilog.StreamPipeline {
+ sp := p.wrappedExtractor.ForStream(l)
+ p.sp.wrappedSP = sp
+ return p.sp
+}
+
+func (p *mockPipeline) Reset() {}
+
+// A stub always returns the same data
+type mockStreamPipeline struct {
+ wrappedSP lokilog.StreamPipeline
+ called int
+}
+
+func (p *mockStreamPipeline) BaseLabels() lokilog.LabelsResult {
+ return p.wrappedSP.BaseLabels()
+}
+
+func (p *mockStreamPipeline) Process(ts int64, line []byte, lbs ...labels.Label) ([]byte, lokilog.LabelsResult, bool) {
+ p.called++
+ return p.wrappedSP.Process(ts, line, lbs...)
+}
+
+func (p *mockStreamPipeline) ProcessString(ts int64, line string, lbs ...labels.Label) (string, lokilog.LabelsResult, bool) {
+ p.called++
+ return p.wrappedSP.ProcessString(ts, line, lbs...)
+}
+
+func Test_SampleWrapper(t *testing.T) {
+ s := &LokiStore{
+ Store: storeFixture,
+ cfg: Config{
+ MaxChunkBatchSize: 10,
+ },
+ chunkMetrics: NilMetrics,
+ }
+ wrapper := &testExtractorWrapper{
+ extractor: newMockExtractor(),
+ }
+ s.SetExtractorWrapper(wrapper)
+
+ ctx = user.InjectOrgID(context.Background(), "test-user")
+ it, err := s.SelectSamples(ctx, logql.SelectSampleParams{SampleQueryRequest: newSampleQuery("count_over_time({foo=~\"ba.*\"}[1s])", from, from.Add(1*time.Hour), nil)})
+ if err != nil {
+ t.Errorf("store.SelectSamples() error = %v", err)
+ return
+ }
+ defer it.Close()
+ for it.Next() {
+ require.NoError(t, it.Error()) // consume the iterator
+ }
+
+ require.Equal(t, "test-user", wrapper.tenant)
+ require.Equal(t, "count_over_time({foo=~\"ba.*\"}[1s])", wrapper.query)
+ require.Equal(t, 28, wrapper.extractor.sp.called) // we've passed every log line through the wrapper
+}
+
+type testExtractorWrapper struct {
+ query string
+ tenant string
+ extractor *mockExtractor
+}
+
+func (t *testExtractorWrapper) Wrap(_ context.Context, extractor lokilog.SampleExtractor, query, tenant string) lokilog.SampleExtractor {
+ t.tenant = tenant
+ t.query = query
+ t.extractor.wrappedExtractor = extractor
+ return t.extractor
+}
+
+func newMockExtractor() *mockExtractor {
+ return &mockExtractor{
+ sp: &mockStreamExtractor{},
+ }
+}
+
+type mockExtractor struct {
+ wrappedExtractor lokilog.SampleExtractor
+ sp *mockStreamExtractor
+}
+
+func (p *mockExtractor) ForStream(l labels.Labels) lokilog.StreamSampleExtractor {
+ sp := p.wrappedExtractor.ForStream(l)
+ p.sp.wrappedSP = sp
+ return p.sp
+}
+
+func (p *mockExtractor) Reset() {}
+
+// A stub always returns the same data
+type mockStreamExtractor struct {
+ wrappedSP lokilog.StreamSampleExtractor
+ called int
+}
+
+func (p *mockStreamExtractor) BaseLabels() lokilog.LabelsResult {
+ return p.wrappedSP.BaseLabels()
+}
+
+func (p *mockStreamExtractor) Process(ts int64, line []byte, lbs ...labels.Label) (float64, lokilog.LabelsResult, bool) {
+ p.called++
+ return p.wrappedSP.Process(ts, line, lbs...)
+}
+
+func (p *mockStreamExtractor) ProcessString(ts int64, line string, lbs ...labels.Label) (float64, lokilog.LabelsResult, bool) {
+ p.called++
+ return p.wrappedSP.ProcessString(ts, line, lbs...)
}
func Test_store_GetSeries(t *testing.T) {
@@ -1307,13 +1482,17 @@ func TestStore_MultiPeriod(t *testing.T) {
}
-func mustParseLabels(s string) map[string]string {
+func mustParseLabels(s string) []logproto.SeriesIdentifier_LabelsEntry {
l, err := marshal.NewLabelSet(s)
if err != nil {
log.Fatalf("Failed to parse %s", s)
}
- return l
+ result := make([]logproto.SeriesIdentifier_LabelsEntry, 0, len(l))
+ for k, v := range l {
+ result = append(result, logproto.SeriesIdentifier_LabelsEntry{Key: k, Value: v})
+ }
+ return result
}
func parseDate(in string) time.Time {
@@ -1385,6 +1564,9 @@ func Test_OverlappingChunks(t *testing.T) {
Direction: logproto.BACKWARD,
Start: time.Unix(0, 0),
End: time.Unix(0, 10),
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`{foo="bar"}`),
+ },
}})
if err != nil {
t.Errorf("store.SelectLogs() error = %v", err)
@@ -1441,13 +1623,16 @@ func Test_GetSeries(t *testing.T) {
ctx = user.InjectOrgID(context.Background(), "test-user")
expectedSeries = []logproto.SeriesIdentifier{
{
- Labels: map[string]string{"bar": "foo"},
+ Labels: logproto.MustNewSeriesEntries("bar", "foo"),
},
{
- Labels: map[string]string{"foo": "bar", "buzz": "boo"},
+ Labels: logproto.MustNewSeriesEntries(
+ "buzz", "boo",
+ "foo", "bar",
+ ),
},
{
- Labels: map[string]string{"foo": "buzz"},
+ Labels: logproto.MustNewSeriesEntries("foo", "buzz"),
},
}
)
@@ -1479,7 +1664,10 @@ func Test_GetSeries(t *testing.T) {
},
[]logproto.SeriesIdentifier{
{
- Labels: map[string]string{"foo": "bar", "buzz": "boo"},
+ Labels: logproto.MustNewSeriesEntries(
+ "buzz", "boo",
+ "foo", "bar",
+ ),
},
},
},
@@ -1497,6 +1685,15 @@ func Test_GetSeries(t *testing.T) {
} {
tt := tt
t.Run(tt.name, func(t *testing.T) {
+ if tt.req.Selector != "" {
+ tt.req.Plan = &plan.QueryPlan{
+ AST: syntax.MustParseExpr(tt.req.Selector),
+ }
+ } else {
+ tt.req.Plan = &plan.QueryPlan{
+ AST: nil,
+ }
+ }
series, err := store.SelectSeries(ctx, tt.req)
require.NoError(t, err)
require.Equal(t, tt.expectedSeries, series)
diff --git a/pkg/storage/stores/shipper/bloomshipper/block_downloader.go b/pkg/storage/stores/shipper/bloomshipper/block_downloader.go
new file mode 100644
index 0000000000000..81355f78e84ec
--- /dev/null
+++ b/pkg/storage/stores/shipper/bloomshipper/block_downloader.go
@@ -0,0 +1,407 @@
+package bloomshipper
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "path"
+ "path/filepath"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/go-kit/log"
+ "github.com/go-kit/log/level"
+ "github.com/grafana/dskit/services"
+ "github.com/prometheus/client_golang/prometheus"
+ "go.uber.org/atomic"
+ "k8s.io/utils/keymutex"
+
+ "github.com/grafana/loki/pkg/logqlmodel/stats"
+ "github.com/grafana/loki/pkg/queue"
+ v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
+ "github.com/grafana/loki/pkg/storage/chunk/cache"
+ "github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper/config"
+ "github.com/grafana/loki/pkg/util"
+ "github.com/grafana/loki/pkg/util/constants"
+)
+
+type blockDownloader struct {
+ logger log.Logger
+
+ queueMetrics *queue.Metrics
+ queue *queue.RequestQueue
+
+ limits Limits
+ activeUsersService *util.ActiveUsersCleanupService
+
+ ctx context.Context
+ manager *services.Manager
+ wg sync.WaitGroup
+
+ strategy downloadingStrategy
+}
+
+type queueLimits struct {
+ limits Limits
+}
+
+func (l *queueLimits) MaxConsumers(tenantID string, _ int) int {
+ return l.limits.BloomGatewayBlocksDownloadingParallelism(tenantID)
+}
+
+func newBlockDownloader(config config.Config, blockClient BlockClient, limits Limits, logger log.Logger, reg prometheus.Registerer) (*blockDownloader, error) {
+ queueMetrics := queue.NewMetrics(reg, constants.Loki, "bloom_blocks_downloader")
+ //add cleanup service
+ downloadingQueue := queue.NewRequestQueue(config.BlocksDownloadingQueue.MaxTasksEnqueuedPerTenant, time.Minute, &queueLimits{limits: limits}, queueMetrics)
+ activeUsersService := util.NewActiveUsersCleanupWithDefaultValues(queueMetrics.Cleanup)
+
+ ctx := context.Background()
+ manager, err := services.NewManager(downloadingQueue, activeUsersService)
+ if err != nil {
+ return nil, fmt.Errorf("error creating service manager: %w", err)
+ }
+ err = services.StartManagerAndAwaitHealthy(ctx, manager)
+ if err != nil {
+ return nil, fmt.Errorf("error starting service manager: %w", err)
+ }
+
+ strategy := createDownloadingStrategy(config, blockClient, reg, logger)
+ b := &blockDownloader{
+ ctx: ctx,
+ logger: logger,
+ queueMetrics: queueMetrics,
+ queue: downloadingQueue,
+ strategy: strategy,
+ activeUsersService: activeUsersService,
+ limits: limits,
+ manager: manager,
+ wg: sync.WaitGroup{},
+ }
+
+ for i := 0; i < config.BlocksDownloadingQueue.WorkersCount; i++ {
+ b.wg.Add(1)
+ go b.serveDownloadingTasks(fmt.Sprintf("worker-%d", i))
+ }
+ return b, nil
+}
+
+type BlockDownloadingTask struct {
+ ctx context.Context
+ block BlockRef
+ // ErrCh is a send-only channel to write an error to
+ ErrCh chan<- error
+ // ResultsCh is a send-only channel to return the block querier for the downloaded block
+ ResultsCh chan<- blockWithQuerier
+}
+
+func NewBlockDownloadingTask(ctx context.Context, block BlockRef, resCh chan<- blockWithQuerier, errCh chan<- error) *BlockDownloadingTask {
+ return &BlockDownloadingTask{
+ ctx: ctx,
+ block: block,
+ ErrCh: errCh,
+ ResultsCh: resCh,
+ }
+}
+
+func (d *blockDownloader) serveDownloadingTasks(workerID string) {
+ // defer first, so it gets executed as last of the deferred functions
+ defer d.wg.Done()
+
+ logger := log.With(d.logger, "worker", workerID)
+ level.Debug(logger).Log("msg", "starting worker")
+
+ d.queue.RegisterConsumerConnection(workerID)
+ defer d.queue.UnregisterConsumerConnection(workerID)
+
+ idx := queue.StartIndexWithLocalQueue
+
+ for {
+ item, newIdx, err := d.queue.Dequeue(d.ctx, idx, workerID)
+ if err != nil {
+ if !errors.Is(err, queue.ErrStopped) && !errors.Is(err, context.Canceled) {
+ level.Error(logger).Log("msg", "failed to dequeue task", "err", err)
+ continue
+ }
+ level.Info(logger).Log("msg", "stopping worker")
+ return
+ }
+ task, ok := item.(*BlockDownloadingTask)
+ if !ok {
+ level.Error(logger).Log("msg", "failed to cast to BlockDownloadingTask", "item", fmt.Sprintf("%+v", item), "type", fmt.Sprintf("%T", item))
+ continue
+ }
+
+ idx = newIdx
+ result, err := d.strategy.downloadBlock(task, logger)
+ if err != nil {
+ task.ErrCh <- err
+ continue
+ }
+ task.ResultsCh <- result
+ continue
+ }
+}
+
+func createDownloadingStrategy(cfg config.Config, blockClient BlockClient, reg prometheus.Registerer, logger log.Logger) downloadingStrategy {
+ if cfg.BlocksCache.EmbeddedCacheConfig.Enabled {
+ blocksCache := NewBlocksCache(cfg, reg, logger)
+ return &cacheDownloadingStrategy{
+ config: cfg,
+ workingDirectory: cfg.WorkingDirectory,
+ blockClient: blockClient,
+ blocksCache: blocksCache,
+ keyMutex: keymutex.NewHashed(cfg.BlocksDownloadingQueue.WorkersCount),
+ }
+ }
+ return &storageDownloadingStrategy{
+ workingDirectory: cfg.WorkingDirectory,
+ blockClient: blockClient,
+ }
+}
+
+type downloadingStrategy interface {
+ downloadBlock(task *BlockDownloadingTask, logger log.Logger) (blockWithQuerier, error)
+ close()
+}
+
+type cacheDownloadingStrategy struct {
+ config config.Config
+ workingDirectory string
+ blockClient BlockClient
+ blocksCache *cache.EmbeddedCache[string, *cachedBlock]
+ keyMutex keymutex.KeyMutex
+}
+
+func (s *cacheDownloadingStrategy) downloadBlock(task *BlockDownloadingTask, logger log.Logger) (blockWithQuerier, error) {
+ blockPath := task.block.BlockPath
+ s.keyMutex.LockKey(blockPath)
+ defer func() {
+ _ = s.keyMutex.UnlockKey(blockPath)
+ }()
+ blockFromCache, exists := s.blocksCache.Get(task.ctx, task.block.BlockPath)
+ if exists {
+ return blockWithQuerier{
+ BlockRef: task.block,
+ closableBlockQuerier: newBlockQuerierFromCache(blockFromCache),
+ }, nil
+ }
+
+ directory, err := downloadBlockToDirectory(logger, task, s.workingDirectory, s.blockClient)
+ if err != nil {
+ return blockWithQuerier{}, err
+ }
+ blockFromCache = newCachedBlock(directory, s.config.BlocksCache.RemoveDirectoryGracefulPeriod, logger)
+ err = s.blocksCache.Store(task.ctx, []string{task.block.BlockPath}, []*cachedBlock{blockFromCache})
+ if err != nil {
+ level.Error(logger).Log("msg", "error storing the block in the cache", "block", blockPath, "err", err)
+ return blockWithQuerier{}, fmt.Errorf("error storing the block %s in the cache : %w", blockPath, err)
+ }
+ return blockWithQuerier{
+ BlockRef: task.block,
+ closableBlockQuerier: newBlockQuerierFromCache(blockFromCache),
+ }, nil
+}
+
+func (s *cacheDownloadingStrategy) close() {
+ s.blocksCache.Stop()
+}
+
+type storageDownloadingStrategy struct {
+ workingDirectory string
+ blockClient BlockClient
+}
+
+func (s *storageDownloadingStrategy) downloadBlock(task *BlockDownloadingTask, logger log.Logger) (blockWithQuerier, error) {
+ directory, err := downloadBlockToDirectory(logger, task, s.workingDirectory, s.blockClient)
+ if err != nil {
+ return blockWithQuerier{}, err
+ }
+ return blockWithQuerier{
+ BlockRef: task.block,
+ closableBlockQuerier: newBlockQuerierFromFS(directory),
+ }, nil
+}
+
+func (s *storageDownloadingStrategy) close() {
+ // noop implementation
+}
+
+func downloadBlockToDirectory(logger log.Logger, task *BlockDownloadingTask, workingDirectory string, blockClient BlockClient) (string, error) {
+ blockPath := task.block.BlockPath
+ level.Debug(logger).Log("msg", "start downloading the block", "block", blockPath)
+ block, err := blockClient.GetBlock(task.ctx, task.block)
+ if err != nil {
+ level.Error(logger).Log("msg", "error downloading the block", "block", blockPath, "err", err)
+ return "", fmt.Errorf("error downloading the block %s : %w", blockPath, err)
+ }
+ directory, err := extractBlock(&block, time.Now(), workingDirectory, logger)
+ if err != nil {
+ level.Error(logger).Log("msg", "error extracting the block", "block", blockPath, "err", err)
+ return "", fmt.Errorf("error extracting the block %s : %w", blockPath, err)
+ }
+ level.Debug(logger).Log("msg", "block has been downloaded and extracted", "block", task.block.BlockPath, "directory", directory)
+ return directory, nil
+}
+
+func (d *blockDownloader) downloadBlocks(ctx context.Context, tenantID string, references []BlockRef) (chan blockWithQuerier, chan error) {
+ d.activeUsersService.UpdateUserTimestamp(tenantID, time.Now())
+ // we need to have errCh with size that can keep max count of errors to prevent the case when
+ // the queue worker reported the error to this channel before the current goroutine
+ // and this goroutine will go to the deadlock because it won't be able to report an error
+ // because nothing reads this channel at this moment.
+ errCh := make(chan error, len(references))
+ blocksCh := make(chan blockWithQuerier, len(references))
+
+ for _, reference := range references {
+ task := NewBlockDownloadingTask(ctx, reference, blocksCh, errCh)
+ level.Debug(d.logger).Log("msg", "enqueuing task to download block", "block", reference.BlockPath)
+ err := d.queue.Enqueue(tenantID, nil, task, nil)
+ if err != nil {
+ errCh <- fmt.Errorf("error enquing downloading task for block %s : %w", reference.BlockPath, err)
+ return blocksCh, errCh
+ }
+ }
+ return blocksCh, errCh
+}
+
+type blockWithQuerier struct {
+ BlockRef
+ *closableBlockQuerier
+}
+
+// extract the files into directory and returns absolute path to this directory.
+func extractBlock(block *LazyBlock, ts time.Time, workingDirectory string, logger log.Logger) (string, error) {
+ workingDirectoryPath := filepath.Join(workingDirectory, block.BlockPath, strconv.FormatInt(ts.UnixNano(), 10))
+ err := os.MkdirAll(workingDirectoryPath, os.ModePerm)
+ if err != nil {
+ return "", fmt.Errorf("can not create directory to extract the block: %w", err)
+ }
+ archivePath, err := writeDataToTempFile(workingDirectoryPath, block)
+ if err != nil {
+ return "", fmt.Errorf("error writing data to temp file: %w", err)
+ }
+ defer func() {
+ err = os.Remove(archivePath)
+ if err != nil {
+ level.Error(logger).Log("msg", "error removing temp archive file", "err", err)
+ }
+ }()
+ err = extractArchive(archivePath, workingDirectoryPath)
+ if err != nil {
+ return "", fmt.Errorf("error extracting archive: %w", err)
+ }
+ return workingDirectoryPath, nil
+}
+
+func (d *blockDownloader) stop() {
+ _ = services.StopManagerAndAwaitStopped(d.ctx, d.manager)
+ d.wg.Wait()
+ d.strategy.close()
+}
+
+type closableBlockQuerier struct {
+ *v1.BlockQuerier
+ Close func() error
+}
+
+func newBlockQuerierFromCache(cached *cachedBlock) *closableBlockQuerier {
+ cached.activeQueriers.Inc()
+ return &closableBlockQuerier{
+ BlockQuerier: createBlockQuerier(cached.blockDirectory),
+ Close: func() error {
+ cached.activeQueriers.Dec()
+ return nil
+ },
+ }
+}
+
+func newBlockQuerierFromFS(blockDirectory string) *closableBlockQuerier {
+ return &closableBlockQuerier{
+ BlockQuerier: createBlockQuerier(blockDirectory),
+ Close: func() error {
+ return deleteFolder(blockDirectory)
+ },
+ }
+}
+
+func createBlockQuerier(directory string) *v1.BlockQuerier {
+ reader := v1.NewDirectoryBlockReader(directory)
+ block := v1.NewBlock(reader)
+ return v1.NewBlockQuerier(block)
+}
+
+func NewBlocksCache(config config.Config, reg prometheus.Registerer, logger log.Logger) *cache.EmbeddedCache[string, *cachedBlock] {
+ return cache.NewTypedEmbeddedCache[string, *cachedBlock](
+ "bloom-blocks-cache",
+ config.BlocksCache.EmbeddedCacheConfig,
+ reg,
+ logger,
+ stats.BloomBlocksCache,
+ calculateBlockDirectorySize,
+ func(key string, value *cachedBlock) {
+ value.removeDirectoryAsync()
+ })
+}
+
+func calculateBlockDirectorySize(entry *cache.Entry[string, *cachedBlock]) uint64 {
+ value := entry.Value
+ bloomFileStats, _ := os.Lstat(path.Join(value.blockDirectory, v1.BloomFileName))
+ seriesFileStats, _ := os.Lstat(path.Join(value.blockDirectory, v1.SeriesFileName))
+ return uint64(bloomFileStats.Size() + seriesFileStats.Size())
+}
+
+func newCachedBlock(blockDirectory string, removeDirectoryTimeout time.Duration, logger log.Logger) *cachedBlock {
+ return &cachedBlock{
+ blockDirectory: blockDirectory,
+ removeDirectoryTimeout: removeDirectoryTimeout,
+ logger: logger,
+ activeQueriersCheckInterval: defaultActiveQueriersCheckInterval,
+ }
+}
+
+type cachedBlock struct {
+ blockDirectory string
+ removeDirectoryTimeout time.Duration
+ activeQueriers atomic.Int32
+ logger log.Logger
+ activeQueriersCheckInterval time.Duration
+}
+
+const defaultActiveQueriersCheckInterval = 100 * time.Millisecond
+
+func (b *cachedBlock) removeDirectoryAsync() {
+ go func() {
+ timeout := time.After(b.removeDirectoryTimeout)
+ ticker := time.NewTicker(b.activeQueriersCheckInterval)
+ defer ticker.Stop()
+ for {
+ select {
+ case <-ticker.C:
+ if b.activeQueriers.Load() == 0 {
+ err := deleteFolder(b.blockDirectory)
+ if err == nil {
+ return
+ }
+ level.Error(b.logger).Log("msg", "error deleting block directory", "err", err)
+ }
+ case <-timeout:
+ level.Warn(b.logger).Log("msg", "force deleting block folder after timeout", "timeout", b.removeDirectoryTimeout)
+ err := deleteFolder(b.blockDirectory)
+ if err == nil {
+ return
+ }
+ level.Error(b.logger).Log("msg", "error force deleting block directory", "err", err)
+ }
+ }
+ }()
+}
+
+func deleteFolder(folderPath string) error {
+ err := os.RemoveAll(folderPath)
+ if err != nil {
+ return fmt.Errorf("error deleting bloom block directory: %w", err)
+ }
+ return nil
+}
diff --git a/pkg/storage/stores/shipper/bloomshipper/block_downloader_test.go b/pkg/storage/stores/shipper/bloomshipper/block_downloader_test.go
new file mode 100644
index 0000000000000..a28c76c12f785
--- /dev/null
+++ b/pkg/storage/stores/shipper/bloomshipper/block_downloader_test.go
@@ -0,0 +1,426 @@
+package bloomshipper
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strconv"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/go-kit/log"
+ "github.com/google/uuid"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/atomic"
+
+ v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
+ "github.com/grafana/loki/pkg/storage/chunk/cache"
+ "github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper/config"
+ "github.com/grafana/loki/pkg/util"
+ "github.com/grafana/loki/pkg/validation"
+)
+
+func Test_blockDownloader_downloadBlocks(t *testing.T) {
+ overrides, err := validation.NewOverrides(validation.Limits{BloomGatewayBlocksDownloadingParallelism: 20}, nil)
+ require.NoError(t, err)
+ workingDirectory := t.TempDir()
+
+ blockReferences, blockClient := createFakeBlocks(t, 20)
+ blockClient.responseDelay = 100 * time.Millisecond
+ workersCount := 10
+ downloader, err := newBlockDownloader(config.Config{
+ WorkingDirectory: workingDirectory,
+ BlocksDownloadingQueue: config.DownloadingQueueConfig{
+ WorkersCount: workersCount,
+ MaxTasksEnqueuedPerTenant: 20,
+ },
+ }, blockClient, overrides, log.NewNopLogger(), prometheus.DefaultRegisterer)
+ require.NoError(t, err)
+ blocksCh, errorsCh := downloader.downloadBlocks(context.Background(), "fake", blockReferences)
+ downloadedBlocks := make(map[string]any, len(blockReferences))
+ done := make(chan bool)
+ go func() {
+ for i := 0; i < 20; i++ {
+ block := <-blocksCh
+ downloadedBlocks[block.BlockPath] = nil
+ }
+ done <- true
+ }()
+
+ select {
+ //20 blocks, 10 workers, fixed delay 100ms per block: the total downloading time must be ~200ms.
+ case <-time.After(2 * time.Second):
+ t.Fatalf("test must complete before the timeout")
+ case err := <-errorsCh:
+ require.NoError(t, err)
+ case <-done:
+ }
+ require.Len(t, downloadedBlocks, 20, "all 20 block must be downloaded")
+
+ // We want all workers to be connected to the queue
+ require.Equal(t, workersCount, int(downloader.queue.GetConnectedConsumersMetric()))
+
+ downloader.stop()
+
+ // We want all workers to be disconnected from the queue
+ require.Equal(t, 0, int(downloader.queue.GetConnectedConsumersMetric()))
+}
+
+func Test_blockDownloader_downloadBlock(t *testing.T) {
+ tests := map[string]struct {
+ cacheEnabled bool
+ expectedTotalGetBlocksCalls int
+ }{
+ "cache disabled": {
+ cacheEnabled: false,
+ expectedTotalGetBlocksCalls: 40,
+ },
+ "cache enabled": {
+ cacheEnabled: true,
+ expectedTotalGetBlocksCalls: 20,
+ },
+ }
+ for name, testData := range tests {
+ t.Run(name, func(t *testing.T) {
+ overrides, err := validation.NewOverrides(validation.Limits{BloomGatewayBlocksDownloadingParallelism: 20}, nil)
+ require.NoError(t, err)
+ workingDirectory := t.TempDir()
+
+ blockReferences, blockClient := createFakeBlocks(t, 20)
+ workersCount := 10
+ downloader, err := newBlockDownloader(config.Config{
+ WorkingDirectory: workingDirectory,
+ BlocksDownloadingQueue: config.DownloadingQueueConfig{
+ WorkersCount: workersCount,
+ MaxTasksEnqueuedPerTenant: 20,
+ },
+ BlocksCache: config.BlocksCacheConfig{
+ EmbeddedCacheConfig: cache.EmbeddedCacheConfig{
+ Enabled: testData.cacheEnabled,
+ MaxSizeItems: 20,
+ },
+ RemoveDirectoryGracefulPeriod: 1 * time.Second,
+ },
+ }, blockClient, overrides, log.NewNopLogger(), prometheus.NewRegistry())
+ t.Cleanup(downloader.stop)
+ require.NoError(t, err)
+
+ blocksCh, errorsCh := downloader.downloadBlocks(context.Background(), "fake", blockReferences)
+ downloadedBlocks := make(map[string]any, len(blockReferences))
+ done := make(chan bool)
+ go func() {
+ for i := 0; i < 20; i++ {
+ block := <-blocksCh
+ downloadedBlocks[block.BlockPath] = nil
+ }
+ done <- true
+ }()
+
+ select {
+ case <-time.After(2 * time.Second):
+ t.Fatalf("test must complete before the timeout")
+ case err := <-errorsCh:
+ require.NoError(t, err)
+ case <-done:
+ }
+ require.Len(t, downloadedBlocks, 20, "all 20 block must be downloaded")
+ require.Equal(t, 20, blockClient.getBlockCalls)
+
+ blocksCh, errorsCh = downloader.downloadBlocks(context.Background(), "fake", blockReferences)
+ downloadedBlocks = make(map[string]any, len(blockReferences))
+ done = make(chan bool)
+ go func() {
+ for i := 0; i < 20; i++ {
+ block := <-blocksCh
+ downloadedBlocks[block.BlockPath] = nil
+ }
+ done <- true
+ }()
+
+ select {
+ case <-time.After(2 * time.Second):
+ t.Fatalf("test must complete before the timeout")
+ case err := <-errorsCh:
+ require.NoError(t, err)
+ case <-done:
+ }
+ require.Len(t, downloadedBlocks, 20, "all 20 block must be downloaded")
+ require.Equal(t, testData.expectedTotalGetBlocksCalls, blockClient.getBlockCalls)
+ })
+ }
+}
+
+func Test_blockDownloader_downloadBlock_deduplication(t *testing.T) {
+ tests := map[string]struct {
+ cacheEnabled bool
+ expectedTotalGetBlocksCalls int
+ }{
+ "requests to blockClient must be deduplicated by blockPath if cache is enabled": {
+ cacheEnabled: true,
+ expectedTotalGetBlocksCalls: 1,
+ },
+ "requests to blockClient must NOT be deduplicated by blockPath if cache is disabled": {
+ cacheEnabled: false,
+ expectedTotalGetBlocksCalls: 10,
+ },
+ }
+ for name, testData := range tests {
+ t.Run(name, func(t *testing.T) {
+
+ overrides, err := validation.NewOverrides(validation.Limits{BloomGatewayBlocksDownloadingParallelism: 20}, nil)
+ require.NoError(t, err)
+ workingDirectory := t.TempDir()
+
+ blockReferences, blockClient := createFakeBlocks(t, 1)
+ workersCount := 10
+ downloader, err := newBlockDownloader(config.Config{
+ WorkingDirectory: workingDirectory,
+ BlocksDownloadingQueue: config.DownloadingQueueConfig{
+ WorkersCount: workersCount,
+ MaxTasksEnqueuedPerTenant: 20,
+ },
+ BlocksCache: config.BlocksCacheConfig{
+ EmbeddedCacheConfig: cache.EmbeddedCacheConfig{
+ Enabled: testData.cacheEnabled,
+ MaxSizeItems: 20,
+ },
+ RemoveDirectoryGracefulPeriod: 1 * time.Second,
+ },
+ }, blockClient, overrides, log.NewNopLogger(), prometheus.NewRegistry())
+ t.Cleanup(downloader.stop)
+ require.NoError(t, err)
+
+ blocksDownloadedCount := atomic.Uint32{}
+ mutex := sync.Mutex{}
+ multiError := util.MultiError{}
+ waitGroup := sync.WaitGroup{}
+ for i := 0; i < 10; i++ {
+ waitGroup.Add(1)
+ go func() {
+ defer waitGroup.Done()
+ blocksCh, errCh := downloader.downloadBlocks(context.Background(), "fake", blockReferences)
+ var err error
+ select {
+ case <-blocksCh:
+ blocksDownloadedCount.Inc()
+ case downloaderErr := <-errCh:
+ err = downloaderErr
+ case <-time.After(1 * time.Second):
+ err = fmt.Errorf("timeout in the test waiting for a single block to be downloaded")
+ }
+ if err == nil {
+ return
+ }
+ mutex.Lock()
+ defer mutex.Unlock()
+ multiError.Add(err)
+ }()
+ }
+ waitGroup.Wait()
+
+ require.NoError(t, multiError.Err())
+ require.Equal(t, uint32(10), blocksDownloadedCount.Load())
+ require.Equal(t, testData.expectedTotalGetBlocksCalls, blockClient.getBlockCalls)
+ })
+ }
+}
+
+func Test_cachedBlock(t *testing.T) {
+ tests := map[string]struct {
+ releaseQuerier bool
+ expectDirectoryToBeDeletedWithin time.Duration
+ }{
+ "expected block directory to be removed once all queriers are released": {
+ releaseQuerier: true,
+ // four times grater than activeQueriersCheckInterval
+ expectDirectoryToBeDeletedWithin: 200 * time.Millisecond,
+ },
+ "expected block directory to be force removed after timeout": {
+ releaseQuerier: false,
+ // four times grater than removeDirectoryTimeout
+ expectDirectoryToBeDeletedWithin: 2 * time.Second,
+ },
+ }
+ for name, testData := range tests {
+ t.Run(name, func(t *testing.T) {
+ extractedBlockDirectory := t.TempDir()
+ blockFilePath, _, _ := createBlockArchive(t)
+ err := extractArchive(blockFilePath, extractedBlockDirectory)
+ require.NoError(t, err)
+ require.DirExists(t, extractedBlockDirectory)
+
+ cached := &cachedBlock{
+ blockDirectory: extractedBlockDirectory,
+ removeDirectoryTimeout: 500 * time.Millisecond,
+ activeQueriersCheckInterval: 50 * time.Millisecond,
+ logger: log.NewLogfmtLogger(os.Stderr),
+ }
+ cached.activeQueriers.Inc()
+ cached.removeDirectoryAsync()
+ //ensure directory exists
+ require.Never(t, func() bool {
+ return directoryDoesNotExist(extractedBlockDirectory)
+ }, 200*time.Millisecond, 50*time.Millisecond)
+
+ if testData.releaseQuerier {
+ cached.activeQueriers.Dec()
+ }
+ //ensure directory does not exist
+ require.Eventually(t, func() bool {
+ return directoryDoesNotExist(extractedBlockDirectory)
+ }, testData.expectDirectoryToBeDeletedWithin, 50*time.Millisecond)
+ })
+ }
+}
+
+func Test_closableBlockQuerier(t *testing.T) {
+ t.Run("cached", func(t *testing.T) {
+ blockFilePath, _, _ := createBlockArchive(t)
+ extractedBlockDirectory := t.TempDir()
+ err := extractArchive(blockFilePath, extractedBlockDirectory)
+ require.NoError(t, err)
+
+ cached := &cachedBlock{blockDirectory: extractedBlockDirectory, removeDirectoryTimeout: 100 * time.Millisecond}
+ require.Equal(t, int32(0), cached.activeQueriers.Load())
+ querier := newBlockQuerierFromCache(cached)
+ require.Equal(t, int32(1), cached.activeQueriers.Load())
+ require.NoError(t, querier.Close())
+ require.Equal(t, int32(0), cached.activeQueriers.Load())
+ })
+
+ t.Run("file system", func(t *testing.T) {
+ blockFilePath, _, _ := createBlockArchive(t)
+ extractedBlockDirectory := t.TempDir()
+ err := extractArchive(blockFilePath, extractedBlockDirectory)
+ require.NoError(t, err)
+
+ querier := newBlockQuerierFromFS(extractedBlockDirectory)
+ require.DirExists(t, extractedBlockDirectory)
+
+ require.NoError(t, querier.Close())
+
+ //ensure directory does not exist
+ require.Eventually(t, func() bool {
+ return directoryDoesNotExist(extractedBlockDirectory)
+ }, 1*time.Second, 100*time.Millisecond)
+ })
+}
+
+// creates fake blocks and returns map[block-path]Block and mockBlockClient
+func createFakeBlocks(t *testing.T, count int) ([]BlockRef, *mockBlockClient) {
+ mockData := make(map[string]blockSupplier, count)
+ refs := make([]BlockRef, 0, count)
+ for i := 0; i < count; i++ {
+ archivePath, _, _ := createBlockArchive(t)
+ _, err := os.OpenFile(archivePath, os.O_RDONLY, 0700)
+ //ensure file can be opened
+ require.NoError(t, err)
+ blockRef := BlockRef{
+ BlockPath: fmt.Sprintf("block-path-%d", i),
+ }
+ mockData[blockRef.BlockPath] = func() LazyBlock {
+ file, _ := os.OpenFile(archivePath, os.O_RDONLY, 0700)
+ return LazyBlock{
+ BlockRef: blockRef,
+ Data: file,
+ }
+ }
+ refs = append(refs, blockRef)
+ }
+ return refs, &mockBlockClient{mockData: mockData}
+}
+
+type blockSupplier func() LazyBlock
+
+type mockBlockClient struct {
+ responseDelay time.Duration
+ mockData map[string]blockSupplier
+ getBlockCalls int
+}
+
+func (m *mockBlockClient) GetBlock(_ context.Context, reference BlockRef) (LazyBlock, error) {
+ m.getBlockCalls++
+ time.Sleep(m.responseDelay)
+ supplier, exists := m.mockData[reference.BlockPath]
+ if exists {
+ return supplier(), nil
+ }
+
+ return LazyBlock{}, fmt.Errorf("block %s is not found in mockData", reference.BlockPath)
+}
+
+func (m *mockBlockClient) PutBlocks(_ context.Context, _ []Block) ([]Block, error) {
+ panic("implement me")
+}
+
+func (m *mockBlockClient) DeleteBlocks(_ context.Context, _ []BlockRef) error {
+ panic("implement me")
+}
+
+func Test_blockDownloader_extractBlock(t *testing.T) {
+ blockFilePath, bloomFileContent, seriesFileContent := createBlockArchive(t)
+ blockFile, err := os.OpenFile(blockFilePath, os.O_RDONLY, 0700)
+ require.NoError(t, err)
+
+ workingDir := t.TempDir()
+ ts := time.Now().UTC()
+ block := LazyBlock{
+ BlockRef: BlockRef{BlockPath: "first-period-19621/tenantA/metas/ff-fff-1695272400-1695276000-aaa"},
+ Data: blockFile,
+ }
+
+ actualPath, err := extractBlock(&block, ts, workingDir, nil)
+
+ require.NoError(t, err)
+ expectedPath := filepath.Join(workingDir, block.BlockPath, strconv.FormatInt(ts.UnixNano(), 10))
+ require.Equal(t, expectedPath, actualPath,
+ "expected archive to be extracted to working directory under the same path as blockPath and with timestamp suffix")
+ require.FileExists(t, filepath.Join(expectedPath, v1.BloomFileName))
+ require.FileExists(t, filepath.Join(expectedPath, v1.SeriesFileName))
+
+ actualBloomFileContent, err := os.ReadFile(filepath.Join(expectedPath, v1.BloomFileName))
+ require.NoError(t, err)
+ require.Equal(t, bloomFileContent, string(actualBloomFileContent))
+
+ actualSeriesFileContent, err := os.ReadFile(filepath.Join(expectedPath, v1.SeriesFileName))
+ require.NoError(t, err)
+ require.Equal(t, seriesFileContent, string(actualSeriesFileContent))
+}
+
+func directoryDoesNotExist(path string) bool {
+ _, err := os.Lstat(path)
+ return err != nil
+}
+
+const testArchiveFileName = "test-block-archive"
+
+func createBlockArchive(t *testing.T) (string, string, string) {
+ dir := t.TempDir()
+ mockBlockDir := filepath.Join(dir, "mock-block-dir")
+ err := os.MkdirAll(mockBlockDir, 0777)
+ require.NoError(t, err)
+ bloomFile, err := os.Create(filepath.Join(mockBlockDir, v1.BloomFileName))
+ require.NoError(t, err)
+ bloomFileContent := uuid.NewString()
+ _, err = io.Copy(bloomFile, bytes.NewReader([]byte(bloomFileContent)))
+ require.NoError(t, err)
+
+ seriesFile, err := os.Create(filepath.Join(mockBlockDir, v1.SeriesFileName))
+ require.NoError(t, err)
+ seriesFileContent := uuid.NewString()
+ _, err = io.Copy(seriesFile, bytes.NewReader([]byte(seriesFileContent)))
+ require.NoError(t, err)
+
+ blockFilePath := filepath.Join(dir, testArchiveFileName)
+ file, err := os.OpenFile(blockFilePath, os.O_CREATE|os.O_RDWR, 0700)
+ require.NoError(t, err)
+ err = v1.TarGz(file, v1.NewDirectoryBlockReader(mockBlockDir))
+ require.NoError(t, err)
+
+ return blockFilePath, bloomFileContent, seriesFileContent
+}
diff --git a/pkg/storage/stores/shipper/bloomshipper/client.go b/pkg/storage/stores/shipper/bloomshipper/client.go
index a68959e1d908e..5636d1916f183 100644
--- a/pkg/storage/stores/shipper/bloomshipper/client.go
+++ b/pkg/storage/stores/shipper/bloomshipper/client.go
@@ -11,11 +11,11 @@ import (
"strings"
"time"
- "github.com/prometheus/common/model"
-
"github.com/grafana/dskit/concurrency"
+ "github.com/prometheus/common/model"
"github.com/grafana/loki/pkg/storage"
+ v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
"github.com/grafana/loki/pkg/storage/chunk/client"
"github.com/grafana/loki/pkg/storage/config"
"github.com/grafana/loki/pkg/util/math"
@@ -37,6 +37,16 @@ type Ref struct {
Checksum uint32
}
+// Cmp returns the fingerprint's position relative to the bounds
+func (b Ref) Cmp(fp uint64) v1.BoundsCheck {
+ if fp < b.MinFingerprint {
+ return v1.Before
+ } else if fp > b.MaxFingerprint {
+ return v1.After
+ }
+ return v1.Overlap
+}
+
type BlockRef struct {
Ref
IndexPath string
@@ -72,14 +82,18 @@ type MetaClient interface {
DeleteMeta(ctx context.Context, meta Meta) error
}
-type Block struct {
+type LazyBlock struct {
BlockRef
-
Data io.ReadCloser
}
+type Block struct {
+ BlockRef
+ Data io.ReadSeekCloser
+}
+
type BlockClient interface {
- GetBlocks(ctx context.Context, references []BlockRef) (chan Block, chan error)
+ GetBlock(ctx context.Context, reference BlockRef) (LazyBlock, error)
PutBlocks(ctx context.Context, blocks []Block) ([]Block, error)
DeleteBlocks(ctx context.Context, blocks []BlockRef) error
}
@@ -181,51 +195,33 @@ func findPeriod(configs []config.PeriodConfig, timestamp int64) (config.DayTime,
}
return config.DayTime{}, fmt.Errorf("can not find period for timestamp %d", timestamp)
}
+
func (b *BloomClient) DeleteMeta(ctx context.Context, meta Meta) error {
periodFrom, err := findPeriod(b.periodicConfigs, meta.StartTimestamp)
if err != nil {
- return fmt.Errorf("error updloading meta file: %w", err)
+ return err
}
key := createMetaObjectKey(meta.MetaRef.Ref)
return b.periodicObjectClients[periodFrom].DeleteObject(ctx, key)
}
-// GetBlocks downloads all the blocks from objectStorage in parallel and sends the downloaded blocks
-// via the channel Block that is closed only if all the blocks are downloaded without errors.
-// If an error happens, the error will be sent via error channel.
-func (b *BloomClient) GetBlocks(ctx context.Context, references []BlockRef) (chan Block, chan error) {
- blocksChannel := make(chan Block, len(references))
- errChannel := make(chan error)
- go func() {
- //todo move concurrency to the config
- err := concurrency.ForEachJob(ctx, len(references), 100, func(ctx context.Context, idx int) error {
- reference := references[idx]
- period, err := findPeriod(b.periodicConfigs, reference.StartTimestamp)
- if err != nil {
- return fmt.Errorf("error while period lookup: %w", err)
- }
- objectClient := b.periodicObjectClients[period]
- readCloser, _, err := objectClient.GetObject(ctx, createBlockObjectKey(reference.Ref))
- if err != nil {
- return fmt.Errorf("error while fetching object from storage: %w", err)
- }
- blocksChannel <- Block{
- BlockRef: reference,
- Data: readCloser,
- }
- return nil
- })
- if err != nil {
- errChannel <- fmt.Errorf("error downloading block file: %w", err)
- return
- }
- //close blocks channel only if there is no error
- close(blocksChannel)
- }()
- return blocksChannel, errChannel
+// GetBlock downloads the blocks from objectStorage and returns the downloaded block
+func (b *BloomClient) GetBlock(ctx context.Context, reference BlockRef) (LazyBlock, error) {
+ period, err := findPeriod(b.periodicConfigs, reference.StartTimestamp)
+ if err != nil {
+ return LazyBlock{}, fmt.Errorf("error while period lookup: %w", err)
+ }
+ objectClient := b.periodicObjectClients[period]
+ readCloser, _, err := objectClient.GetObject(ctx, createBlockObjectKey(reference.Ref))
+ if err != nil {
+ return LazyBlock{}, fmt.Errorf("error while fetching object from storage: %w", err)
+ }
+ return LazyBlock{
+ BlockRef: reference,
+ Data: readCloser,
+ }, nil
}
-// TODO zip (archive) blocks before uploading to storage
func (b *BloomClient) PutBlocks(ctx context.Context, blocks []Block) ([]Block, error) {
results := make([]Block, len(blocks))
//todo move concurrency to the config
@@ -237,17 +233,13 @@ func (b *BloomClient) PutBlocks(ctx context.Context, blocks []Block) ([]Block, e
period, err := findPeriod(b.periodicConfigs, block.StartTimestamp)
if err != nil {
- return fmt.Errorf("error updloading block file: %w", err)
+ return fmt.Errorf("error uploading block file: %w", err)
}
key := createBlockObjectKey(block.Ref)
objectClient := b.periodicObjectClients[period]
- data, err := io.ReadAll(block.Data)
+ err = objectClient.PutObject(ctx, key, block.Data)
if err != nil {
- return fmt.Errorf("error while reading object data: %w", err)
- }
- err = objectClient.PutObject(ctx, key, bytes.NewReader(data))
- if err != nil {
- return fmt.Errorf("error updloading block file: %w", err)
+ return fmt.Errorf("error uploading block file: %w", err)
}
block.BlockPath = key
results[idx] = block
@@ -288,13 +280,9 @@ func (b *BloomClient) downloadMeta(ctx context.Context, metaRef MetaRef, client
if err != nil {
return Meta{}, fmt.Errorf("error downloading meta file %s : %w", metaRef.FilePath, err)
}
- defer func() { _ = reader.Close() }()
+ defer reader.Close()
- buf, err := io.ReadAll(reader)
- if err != nil {
- return Meta{}, fmt.Errorf("error reading meta file %s: %w", metaRef.FilePath, err)
- }
- err = json.Unmarshal(buf, &meta)
+ err = json.NewDecoder(reader).Decode(&meta)
if err != nil {
return Meta{}, fmt.Errorf("error unmarshalling content of meta file %s: %w", metaRef.FilePath, err)
}
diff --git a/pkg/storage/stores/shipper/bloomshipper/client_test.go b/pkg/storage/stores/shipper/bloomshipper/client_test.go
index 4c4b6f855a8ec..7267856a43155 100644
--- a/pkg/storage/stores/shipper/bloomshipper/client_test.go
+++ b/pkg/storage/stores/shipper/bloomshipper/client_test.go
@@ -32,7 +32,7 @@ var (
)
func Test_BloomClient_GetMetas(t *testing.T) {
- shipper := createShipper(t)
+ shipper := createClient(t)
var expected []Meta
folder1 := shipper.storageConfig.NamedStores.Filesystem["folder-1"].Directory
@@ -99,12 +99,12 @@ func Test_BloomClient_PutMeta(t *testing.T) {
}
for name, data := range tests {
t.Run(name, func(t *testing.T) {
- shipper := createShipper(t)
+ bloomClient := createClient(t)
- err := shipper.PutMeta(context.Background(), data.source)
+ err := bloomClient.PutMeta(context.Background(), data.source)
require.NoError(t, err)
- directory := shipper.storageConfig.NamedStores.Filesystem[data.expectedStorage].Directory
+ directory := bloomClient.storageConfig.NamedStores.Filesystem[data.expectedStorage].Directory
filePath := filepath.Join(directory, data.expectedFilePath)
require.FileExists(t, filePath)
content, err := os.ReadFile(filePath)
@@ -155,15 +155,15 @@ func Test_BloomClient_DeleteMeta(t *testing.T) {
}
for name, data := range tests {
t.Run(name, func(t *testing.T) {
- shipper := createShipper(t)
- directory := shipper.storageConfig.NamedStores.Filesystem[data.expectedStorage].Directory
+ bloomClient := createClient(t)
+ directory := bloomClient.storageConfig.NamedStores.Filesystem[data.expectedStorage].Directory
file := filepath.Join(directory, data.expectedFilePath)
err := os.MkdirAll(file[:strings.LastIndex(file, delimiter)], 0755)
require.NoError(t, err)
err = os.WriteFile(file, []byte("dummy content"), 0700)
require.NoError(t, err)
- err = shipper.DeleteMeta(context.Background(), data.source)
+ err = bloomClient.DeleteMeta(context.Background(), data.source)
require.NoError(t, err)
require.NoFileExists(t, file)
@@ -173,8 +173,8 @@ func Test_BloomClient_DeleteMeta(t *testing.T) {
}
func Test_BloomClient_GetBlocks(t *testing.T) {
- shipper := createShipper(t)
- fsNamedStores := shipper.storageConfig.NamedStores.Filesystem
+ bloomClient := createClient(t)
+ fsNamedStores := bloomClient.storageConfig.NamedStores.Filesystem
firstBlockPath := "bloom/first-period-19621/tenantA/blooms/eeee-ffff/1695272400-1695276000-1"
firstBlockFullPath := filepath.Join(fsNamedStores["folder-1"].Directory, firstBlockPath)
firstBlockData := createBlockFile(t, firstBlockFullPath)
@@ -209,44 +209,21 @@ func Test_BloomClient_GetBlocks(t *testing.T) {
BlockPath: secondBlockPath,
}
- blocksToDownload := []BlockRef{firstBlockRef, secondBlockRef}
-
- blocksCh, errorsCh := shipper.GetBlocks(context.Background(), blocksToDownload)
- blocks := make(map[string]string)
- func() {
- timout := time.After(5 * time.Second)
- for {
- select {
- case <-timout:
- t.Fatalf("the test had to be completed before the timeout")
- return
- case err := <-errorsCh:
- require.NoError(t, err)
- case block, ok := <-blocksCh:
- if !ok {
- return
- }
- blockData, err := io.ReadAll(block.Data)
- require.NoError(t, err)
- blocks[block.BlockRef.BlockPath] = string(blockData)
-
- }
- }
- }()
-
- firstBlockActualData, exists := blocks[firstBlockRef.BlockPath]
- require.Truef(t, exists, "data for the first block must be present in the results: %+v", blocks)
- require.Equal(t, firstBlockData, firstBlockActualData)
-
- secondBlockActualData, exists := blocks[secondBlockRef.BlockPath]
- require.True(t, exists, "data for the second block must be present in the results: %+v", blocks)
- require.Equal(t, secondBlockData, secondBlockActualData)
+ downloadedFirstBlock, err := bloomClient.GetBlock(context.Background(), firstBlockRef)
+ require.NoError(t, err)
+ firstBlockActualData, err := io.ReadAll(downloadedFirstBlock.Data)
+ require.NoError(t, err)
+ require.Equal(t, firstBlockData, string(firstBlockActualData))
- require.Len(t, blocks, 2)
+ downloadedSecondBlock, err := bloomClient.GetBlock(context.Background(), secondBlockRef)
+ require.NoError(t, err)
+ secondBlockActualData, err := io.ReadAll(downloadedSecondBlock.Data)
+ require.NoError(t, err)
+ require.Equal(t, secondBlockData, string(secondBlockActualData))
}
func Test_BloomClient_PutBlocks(t *testing.T) {
- shipper := createShipper(t)
+ bloomClient := createClient(t)
blockForFirstFolderData := "data1"
blockForFirstFolder := Block{
BlockRef: BlockRef{
@@ -281,7 +258,7 @@ func Test_BloomClient_PutBlocks(t *testing.T) {
Data: aws_io.ReadSeekNopCloser{ReadSeeker: bytes.NewReader([]byte(blockForSecondFolderData))},
}
- results, err := shipper.PutBlocks(context.Background(), []Block{blockForFirstFolder, blockForSecondFolder})
+ results, err := bloomClient.PutBlocks(context.Background(), []Block{blockForFirstFolder, blockForSecondFolder})
require.NoError(t, err)
require.Len(t, results, 2)
firstResultBlock := results[0]
@@ -295,7 +272,7 @@ func Test_BloomClient_PutBlocks(t *testing.T) {
require.Equal(t, blockForFirstFolder.EndTimestamp, firstResultBlock.EndTimestamp)
require.Equal(t, blockForFirstFolder.Checksum, firstResultBlock.Checksum)
require.Equal(t, blockForFirstFolder.IndexPath, firstResultBlock.IndexPath)
- folder1 := shipper.storageConfig.NamedStores.Filesystem["folder-1"].Directory
+ folder1 := bloomClient.storageConfig.NamedStores.Filesystem["folder-1"].Directory
savedFilePath := filepath.Join(folder1, path)
require.FileExists(t, savedFilePath)
savedData, err := os.ReadFile(savedFilePath)
@@ -313,7 +290,7 @@ func Test_BloomClient_PutBlocks(t *testing.T) {
require.Equal(t, blockForSecondFolder.EndTimestamp, secondResultBlock.EndTimestamp)
require.Equal(t, blockForSecondFolder.Checksum, secondResultBlock.Checksum)
require.Equal(t, blockForSecondFolder.IndexPath, secondResultBlock.IndexPath)
- folder2 := shipper.storageConfig.NamedStores.Filesystem["folder-2"].Directory
+ folder2 := bloomClient.storageConfig.NamedStores.Filesystem["folder-2"].Directory
savedFilePath = filepath.Join(folder2, path)
require.FileExists(t, savedFilePath)
@@ -323,8 +300,8 @@ func Test_BloomClient_PutBlocks(t *testing.T) {
}
func Test_BloomClient_DeleteBlocks(t *testing.T) {
- shipper := createShipper(t)
- fsNamedStores := shipper.storageConfig.NamedStores.Filesystem
+ bloomClient := createClient(t)
+ fsNamedStores := bloomClient.storageConfig.NamedStores.Filesystem
block1Path := filepath.Join(fsNamedStores["folder-1"].Directory, "bloom/first-period-19621/tenantA/blooms/eeee-ffff/1695272400-1695276000-1")
createBlockFile(t, block1Path)
block2Path := filepath.Join(fsNamedStores["folder-2"].Directory, "bloom/second-period-19624/tenantA/blooms/aaaa-bbbb/1695531600-1695535200-2")
@@ -358,7 +335,7 @@ func Test_BloomClient_DeleteBlocks(t *testing.T) {
IndexPath: uuid.New().String(),
},
}
- err := shipper.DeleteBlocks(context.Background(), blocksToDelete)
+ err := bloomClient.DeleteBlocks(context.Background(), blocksToDelete)
require.NoError(t, err)
require.NoFileExists(t, block1Path)
require.NoFileExists(t, block2Path)
@@ -500,7 +477,7 @@ func Test_createMetaRef(t *testing.T) {
}
}
-func createShipper(t *testing.T) *BloomClient {
+func createClient(t *testing.T) *BloomClient {
periodicConfigs := createPeriodConfigs()
namedStores := storage.NamedStores{
Filesystem: map[string]storage.NamedFSConfig{
@@ -513,9 +490,9 @@ func createShipper(t *testing.T) *BloomClient {
metrics := storage.NewClientMetrics()
t.Cleanup(metrics.Unregister)
- bshipper, err := NewBloomClient(periodicConfigs, storageConfig, metrics)
+ bloomClient, err := NewBloomClient(periodicConfigs, storageConfig, metrics)
require.NoError(t, err)
- return bshipper
+ return bloomClient
}
func createPeriodConfigs() []config.PeriodConfig {
diff --git a/pkg/storage/stores/shipper/bloomshipper/compress_utils.go b/pkg/storage/stores/shipper/bloomshipper/compress_utils.go
new file mode 100644
index 0000000000000..99cde47e91b4d
--- /dev/null
+++ b/pkg/storage/stores/shipper/bloomshipper/compress_utils.go
@@ -0,0 +1,79 @@
+package bloomshipper
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/go-kit/log"
+ "github.com/go-kit/log/level"
+
+ v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
+)
+
+func CompressBloomBlock(ref BlockRef, archivePath, localDst string, logger log.Logger) (Block, error) {
+ blockToUpload := Block{}
+ archiveFile, err := os.Create(archivePath)
+ if err != nil {
+ return blockToUpload, err
+ }
+
+ err = v1.TarGz(archiveFile, v1.NewDirectoryBlockReader(localDst))
+ if err != nil {
+ level.Error(logger).Log("msg", "creating bloom block archive file", "err", err)
+ return blockToUpload, err
+ }
+
+ blockToUpload.BlockRef = ref
+ blockToUpload.Data = archiveFile
+ return blockToUpload, nil
+}
+
+func UncompressBloomBlock(block *LazyBlock, workingDirectory string, logger log.Logger) (string, error) {
+ workingDirectoryPath := filepath.Join(workingDirectory, block.BlockPath)
+ err := os.MkdirAll(workingDirectoryPath, os.ModePerm)
+ if err != nil {
+ return "", fmt.Errorf("can not create directory to extract the block: %w", err)
+ }
+ archivePath, err := writeDataToTempFile(workingDirectoryPath, block)
+ if err != nil {
+ return "", fmt.Errorf("error writing data to temp file: %w", err)
+ }
+ defer func() {
+ os.Remove(archivePath)
+ if err != nil {
+ level.Error(logger).Log("msg", "removing archive file", "err", err, "file", archivePath)
+ }
+ }()
+ err = extractArchive(archivePath, workingDirectoryPath)
+ if err != nil {
+ return "", fmt.Errorf("error extracting archive: %w", err)
+ }
+ return workingDirectoryPath, nil
+}
+
+func writeDataToTempFile(workingDirectoryPath string, block *LazyBlock) (string, error) {
+ defer block.Data.Close()
+ archivePath := filepath.Join(workingDirectoryPath, block.BlockPath[strings.LastIndex(block.BlockPath, "/")+1:])
+
+ archiveFile, err := os.Create(archivePath)
+ if err != nil {
+ return "", fmt.Errorf("error creating empty file to store the archiver: %w", err)
+ }
+ defer archiveFile.Close()
+ _, err = io.Copy(archiveFile, block.Data)
+ if err != nil {
+ return "", fmt.Errorf("error writing data to archive file: %w", err)
+ }
+ return archivePath, nil
+}
+
+func extractArchive(archivePath string, workingDirectoryPath string) error {
+ file, err := os.Open(archivePath)
+ if err != nil {
+ return fmt.Errorf("error opening archive file %s: %w", file.Name(), err)
+ }
+ return v1.UnTarGz(workingDirectoryPath, file)
+}
diff --git a/pkg/storage/stores/shipper/bloomshipper/config/config.go b/pkg/storage/stores/shipper/bloomshipper/config/config.go
index 7e9ab787ff3ab..fbfe5f7803516 100644
--- a/pkg/storage/stores/shipper/bloomshipper/config/config.go
+++ b/pkg/storage/stores/shipper/bloomshipper/config/config.go
@@ -5,14 +5,42 @@ import (
"errors"
"flag"
"strings"
+ "time"
+
+ "github.com/grafana/loki/pkg/storage/chunk/cache"
)
type Config struct {
- WorkingDirectory string `yaml:"working_directory"`
+ WorkingDirectory string `yaml:"working_directory"`
+ BlocksDownloadingQueue DownloadingQueueConfig `yaml:"blocks_downloading_queue"`
+ BlocksCache BlocksCacheConfig `yaml:"blocks_cache"`
+}
+
+type BlocksCacheConfig struct {
+ EmbeddedCacheConfig cache.EmbeddedCacheConfig `yaml:",inline"`
+ RemoveDirectoryGracefulPeriod time.Duration `yaml:"remove_directory_graceful_period"`
+}
+
+func (c *BlocksCacheConfig) RegisterFlagsWithPrefixAndDefaults(prefix string, f *flag.FlagSet) {
+ c.EmbeddedCacheConfig.RegisterFlagsWithPrefixAndDefaults(prefix, "", f, 0)
+ f.DurationVar(&c.RemoveDirectoryGracefulPeriod, prefix+"remove-directory-graceful-period", 5*time.Minute,
+ "During this period the process waits until the directory becomes not used and only after this it will be deleted. If the timeout is reached, the directory is force deleted.")
+}
+
+type DownloadingQueueConfig struct {
+ WorkersCount int `yaml:"workers_count"`
+ MaxTasksEnqueuedPerTenant int `yaml:"max_tasks_enqueued_per_tenant"`
+}
+
+func (cfg *DownloadingQueueConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
+ f.IntVar(&cfg.WorkersCount, prefix+"workers-count", 100, "The count of parallel workers that download Bloom Blocks.")
+ f.IntVar(&cfg.MaxTasksEnqueuedPerTenant, prefix+"max_tasks_enqueued_per_tenant", 10_000, "Maximum number of task in queue per tenant per bloom-gateway. Enqueuing the tasks above this limit will fail an error.")
}
func (c *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
f.StringVar(&c.WorkingDirectory, prefix+"shipper.working-directory", "bloom-shipper", "Working directory to store downloaded Bloom Blocks.")
+ c.BlocksDownloadingQueue.RegisterFlagsWithPrefix(prefix+"shipper.blocks-downloading-queue.", f)
+ c.BlocksCache.RegisterFlagsWithPrefixAndDefaults("blocks-cache.", f)
}
func (c *Config) Validate() error {
diff --git a/pkg/storage/stores/shipper/bloomshipper/shipper.go b/pkg/storage/stores/shipper/bloomshipper/shipper.go
index 2df1f41cd4a25..ee0665c4f6c30 100644
--- a/pkg/storage/stores/shipper/bloomshipper/shipper.go
+++ b/pkg/storage/stores/shipper/bloomshipper/shipper.go
@@ -4,64 +4,67 @@ import (
"cmp"
"context"
"fmt"
- "io"
- "os"
- "path/filepath"
- "strconv"
- "strings"
"time"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
+ "github.com/prometheus/client_golang/prometheus"
"golang.org/x/exp/slices"
- v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
"github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper/config"
)
type Shipper struct {
- client Client
- config config.Config
- logger log.Logger
+ client Client
+ config config.Config
+ logger log.Logger
+ blockDownloader *blockDownloader
}
-func NewShipper(client Client, config config.Config, logger log.Logger) (*Shipper, error) {
+type Limits interface {
+ BloomGatewayBlocksDownloadingParallelism(tenantID string) int
+}
+
+func NewShipper(client Client, config config.Config, limits Limits, logger log.Logger, reg prometheus.Registerer) (*Shipper, error) {
+ logger = log.With(logger, "component", "bloom-shipper")
+ downloader, err := newBlockDownloader(config, client, limits, logger, reg)
+ if err != nil {
+ return nil, fmt.Errorf("error creating block downloader: %w", err)
+ }
return &Shipper{
- client: client,
- config: config,
- logger: log.With(logger, "component", "bloom-shipper"),
+ client: client,
+ config: config,
+ logger: logger,
+ blockDownloader: downloader,
}, nil
}
-func (s *Shipper) ForEachBlock(
- ctx context.Context,
- tenantID string,
- from, through time.Time,
- fingerprints []uint64,
- callback ForEachBlockCallback) error {
-
- level.Debug(s.logger).Log("msg", "ForEachBlock", "tenant", tenantID, "from", from, "through", through, "fingerprints", len(fingerprints))
+func (s *Shipper) GetBlockRefs(ctx context.Context, tenantID string, from, through time.Time) ([]BlockRef, error) {
+ level.Debug(s.logger).Log("msg", "GetBlockRefs", "tenant", tenantID, "from", from, "through", through)
- blockRefs, err := s.getActiveBlockRefs(ctx, tenantID, from.UnixNano(), through.UnixNano(), fingerprints)
+ blockRefs, err := s.getActiveBlockRefs(ctx, tenantID, from.UnixNano(), through.UnixNano(), nil)
if err != nil {
- return fmt.Errorf("error fetching active block references : %w", err)
+ return nil, fmt.Errorf("error fetching active block references : %w", err)
}
+ return blockRefs, nil
+}
+
+func (s *Shipper) Fetch(ctx context.Context, tenantID string, blocks []BlockRef, callback ForEachBlockCallback) error {
+ cancelContext, cancelFunc := context.WithCancel(ctx)
+ defer cancelFunc()
+ blocksChannel, errorsChannel := s.blockDownloader.downloadBlocks(cancelContext, tenantID, blocks)
- blocksChannel, errorsChannel := s.client.GetBlocks(ctx, blockRefs)
for {
select {
- case block, ok := <-blocksChannel:
+ case <-ctx.Done():
+ return fmt.Errorf("failed to fetch blocks: %w", ctx.Err())
+ case result, ok := <-blocksChannel:
if !ok {
return nil
}
- directory, err := s.extractBlock(&block, time.Now().UTC())
- if err != nil {
- return fmt.Errorf("error unarchiving block %s err: %w", block.BlockPath, err)
- }
- blockQuerier := s.createBlockQuerier(directory)
- err = callback(blockQuerier)
+ err := runCallback(callback, result)
if err != nil {
- return fmt.Errorf("error running callback function for block %s err: %w", block.BlockPath, err)
+ return err
}
case err := <-errorsChannel:
if err != nil {
@@ -71,26 +74,45 @@ func (s *Shipper) ForEachBlock(
}
}
+func runCallback(callback ForEachBlockCallback, block blockWithQuerier) error {
+ defer func(result blockWithQuerier) {
+ _ = result.Close()
+ }(block)
+ err := callback(block.closableBlockQuerier.BlockQuerier, block.MinFingerprint, block.MaxFingerprint)
+ if err != nil {
+ return fmt.Errorf("error running callback function for block %s err: %w", block.BlockPath, err)
+ }
+ return nil
+}
+
+func (s *Shipper) ForEachBlock(ctx context.Context, tenantID string, from, through time.Time, fingerprints []uint64, callback ForEachBlockCallback) error {
+ level.Debug(s.logger).Log("msg", "ForEachBlock", "tenant", tenantID, "from", from, "through", through, "fingerprints", len(fingerprints))
+
+ blockRefs, err := s.getActiveBlockRefs(ctx, tenantID, from.UnixNano(), through.UnixNano(), fingerprints)
+ if err != nil {
+ return fmt.Errorf("error fetching active block references : %w", err)
+ }
+
+ return s.Fetch(ctx, tenantID, blockRefs, callback)
+}
+
func (s *Shipper) Stop() {
s.client.Stop()
+ s.blockDownloader.stop()
}
-// getFromThrough returns the first and list item of a fingerprint slice
+// getFirstLast returns the first and last item of a fingerprint slice
// It assumes an ascending sorted list of fingerprints.
-func getFromThrough(fingerprints []uint64) (uint64, uint64) {
- if len(fingerprints) == 0 {
- return 0, 0
+func getFirstLast[T any](s []T) (T, T) {
+ var zero T
+ if len(s) == 0 {
+ return zero, zero
}
- return fingerprints[0], fingerprints[len(fingerprints)-1]
+ return s[0], s[len(s)-1]
}
-func (s *Shipper) getActiveBlockRefs(
- ctx context.Context,
- tenantID string,
- from, through int64,
- fingerprints []uint64) ([]BlockRef, error) {
-
- minFingerprint, maxFingerprint := getFromThrough(fingerprints)
+func (s *Shipper) getActiveBlockRefs(ctx context.Context, tenantID string, from, through int64, fingerprints []uint64) ([]BlockRef, error) {
+ minFingerprint, maxFingerprint := getFirstLast(fingerprints)
metas, err := s.client.GetMetas(ctx, MetaSearchParams{
TenantID: tenantID,
MinFingerprint: minFingerprint,
@@ -160,7 +182,7 @@ func isOutsideRange(b *BlockRef, startTimestamp, endTimestamp int64, fingerprint
}
// Then, check if outside of min/max of fingerprint slice
- minFp, maxFp := getFromThrough(fingerprints)
+ minFp, maxFp := getFirstLast(fingerprints)
if b.MaxFingerprint < minFp || b.MinFingerprint > maxFp {
return true
}
@@ -177,55 +199,3 @@ func isOutsideRange(b *BlockRef, startTimestamp, endTimestamp int64, fingerprint
}
return b.MaxFingerprint < fingerprints[idx]
}
-
-// extract the files into directory and returns absolute path to this directory.
-func (s *Shipper) extractBlock(block *Block, ts time.Time) (string, error) {
- workingDirectoryPath := filepath.Join(s.config.WorkingDirectory, block.BlockPath, strconv.FormatInt(ts.UnixMilli(), 10))
- err := os.MkdirAll(workingDirectoryPath, os.ModePerm)
- if err != nil {
- return "", fmt.Errorf("can not create directory to extract the block: %w", err)
- }
- archivePath, err := writeDataToTempFile(workingDirectoryPath, block)
- if err != nil {
- return "", fmt.Errorf("error writing data to temp file: %w", err)
- }
- defer func() {
- os.Remove(archivePath)
- // todo log err
- }()
- err = extractArchive(archivePath, workingDirectoryPath)
- if err != nil {
- return "", fmt.Errorf("error extracting archive: %w", err)
- }
- return workingDirectoryPath, nil
-}
-
-func (s *Shipper) createBlockQuerier(directory string) *v1.BlockQuerier {
- reader := v1.NewDirectoryBlockReader(directory)
- block := v1.NewBlock(reader)
- return v1.NewBlockQuerier(block)
-}
-
-func writeDataToTempFile(workingDirectoryPath string, block *Block) (string, error) {
- defer block.Data.Close()
- archivePath := filepath.Join(workingDirectoryPath, block.BlockPath[strings.LastIndex(block.BlockPath, delimiter)+1:])
-
- archiveFile, err := os.Create(archivePath)
- if err != nil {
- return "", fmt.Errorf("error creating empty file to store the archiver: %w", err)
- }
- defer archiveFile.Close()
- _, err = io.Copy(archiveFile, block.Data)
- if err != nil {
- return "", fmt.Errorf("error writing data to archive file: %w", err)
- }
- return archivePath, nil
-}
-
-func extractArchive(archivePath string, workingDirectoryPath string) error {
- file, err := os.Open(archivePath)
- if err != nil {
- return fmt.Errorf("error opening archive file %s: %w", file.Name(), err)
- }
- return v1.UnTarGz(workingDirectoryPath, file)
-}
diff --git a/pkg/storage/stores/shipper/bloomshipper/shipper_test.go b/pkg/storage/stores/shipper/bloomshipper/shipper_test.go
index 45450c0e3838b..17f21793680ca 100644
--- a/pkg/storage/stores/shipper/bloomshipper/shipper_test.go
+++ b/pkg/storage/stores/shipper/bloomshipper/shipper_test.go
@@ -1,21 +1,11 @@
package bloomshipper
import (
- "bytes"
"fmt"
- "io"
"math"
- "os"
- "path/filepath"
- "strconv"
"testing"
- "time"
- "github.com/google/uuid"
"github.com/stretchr/testify/require"
-
- v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
- "github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper/config"
)
func Test_Shipper_findBlocks(t *testing.T) {
@@ -208,61 +198,3 @@ func createBlockRef(
BlockPath: blockPath,
}
}
-
-const (
- bloomFileName = "bloom"
- seriesFileName = "series"
-)
-
-func Test_Shipper_extractBlock(t *testing.T) {
- dir := t.TempDir()
-
- mockBlockDir := filepath.Join(dir, "mock-block-dir")
- err := os.MkdirAll(mockBlockDir, 0777)
- require.NoError(t, err)
- bloomFile, err := os.Create(filepath.Join(mockBlockDir, bloomFileName))
- require.NoError(t, err)
- bloomFileContent := uuid.NewString()
- _, err = io.Copy(bloomFile, bytes.NewReader([]byte(bloomFileContent)))
- require.NoError(t, err)
-
- seriesFile, err := os.Create(filepath.Join(mockBlockDir, seriesFileName))
- require.NoError(t, err)
- seriesFileContent := uuid.NewString()
- _, err = io.Copy(seriesFile, bytes.NewReader([]byte(seriesFileContent)))
- require.NoError(t, err)
-
- blockFilePath := filepath.Join(dir, "test-block-archive")
- file, err := os.OpenFile(blockFilePath, os.O_CREATE|os.O_RDWR, 0700)
- require.NoError(t, err)
- err = v1.TarGz(file, v1.NewDirectoryBlockReader(mockBlockDir))
- require.NoError(t, err)
-
- blockFile, err := os.OpenFile(blockFilePath, os.O_RDONLY, 0700)
- require.NoError(t, err)
-
- workingDir := t.TempDir()
- shipper := Shipper{config: config.Config{WorkingDirectory: workingDir}}
- ts := time.Now().UTC()
- block := Block{
- BlockRef: BlockRef{BlockPath: "first-period-19621/tenantA/metas/ff-fff-1695272400-1695276000-aaa"},
- Data: blockFile,
- }
-
- actualPath, err := shipper.extractBlock(&block, ts)
-
- require.NoError(t, err)
- expectedPath := filepath.Join(workingDir, block.BlockPath, strconv.FormatInt(ts.UnixMilli(), 10))
- require.Equal(t, expectedPath, actualPath,
- "expected archive to be extracted to working directory under the same path as blockPath and with timestamp suffix")
- require.FileExists(t, filepath.Join(expectedPath, bloomFileName))
- require.FileExists(t, filepath.Join(expectedPath, seriesFileName))
-
- actualBloomFileContent, err := os.ReadFile(filepath.Join(expectedPath, bloomFileName))
- require.NoError(t, err)
- require.Equal(t, bloomFileContent, string(actualBloomFileContent))
-
- actualSeriesFileContent, err := os.ReadFile(filepath.Join(expectedPath, seriesFileName))
- require.NoError(t, err)
- require.Equal(t, seriesFileContent, string(actualSeriesFileContent))
-}
diff --git a/pkg/storage/stores/shipper/bloomshipper/store.go b/pkg/storage/stores/shipper/bloomshipper/store.go
index 80f2c352d5326..e24d7e35c412a 100644
--- a/pkg/storage/stores/shipper/bloomshipper/store.go
+++ b/pkg/storage/stores/shipper/bloomshipper/store.go
@@ -2,18 +2,20 @@ package bloomshipper
import (
"context"
+ "sort"
"time"
"github.com/prometheus/common/model"
- "github.com/grafana/loki/pkg/logproto"
v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
)
-type ForEachBlockCallback func(bq *v1.BlockQuerier) error
+type ForEachBlockCallback func(bq *v1.BlockQuerier, minFp, maxFp uint64) error
type ReadShipper interface {
+ GetBlockRefs(ctx context.Context, tenant string, from, through time.Time) ([]BlockRef, error)
ForEachBlock(ctx context.Context, tenant string, from, through time.Time, fingerprints []uint64, callback ForEachBlockCallback) error
+ Fetch(ctx context.Context, tenant string, blocks []BlockRef, callback ForEachBlockCallback) error
}
type Interface interface {
@@ -21,8 +23,16 @@ type Interface interface {
Stop()
}
+type BlockQuerierWithFingerprintRange struct {
+ *v1.BlockQuerier
+ MinFp, MaxFp model.Fingerprint
+}
+
type Store interface {
- FilterChunkRefs(ctx context.Context, tenant string, from, through time.Time, chunkRefs []*logproto.GroupedChunkRefs, filters ...*logproto.LineFilterExpression) ([]*logproto.GroupedChunkRefs, error)
+ GetBlockRefs(ctx context.Context, tenant string, from, through time.Time) ([]BlockRef, error)
+ GetBlockQueriers(ctx context.Context, tenant string, from, through time.Time, fingerprints []uint64) ([]BlockQuerierWithFingerprintRange, error)
+ GetBlockQueriersForBlockRefs(ctx context.Context, tenant string, blocks []BlockRef) ([]BlockQuerierWithFingerprintRange, error)
+ ForEach(ctx context.Context, tenant string, blocks []BlockRef, callback ForEachBlockCallback) error
Stop()
}
@@ -40,84 +50,46 @@ func (bs *BloomStore) Stop() {
bs.shipper.Stop()
}
-func (bs *BloomStore) FilterChunkRefs(ctx context.Context, tenant string, from, through time.Time, chunkRefs []*logproto.GroupedChunkRefs, filters ...*logproto.LineFilterExpression) ([]*logproto.GroupedChunkRefs, error) {
- fingerprints := make([]uint64, 0, len(chunkRefs))
- for _, ref := range chunkRefs {
- fingerprints = append(fingerprints, ref.Fingerprint)
- }
-
- blooms, err := bs.queriers(ctx, tenant, from, through, fingerprints)
- if err != nil {
- return nil, err
- }
-
- searches := convertLineFilterExpressions(filters)
+// GetBlockRefs implements Store
+func (bs *BloomStore) GetBlockRefs(ctx context.Context, tenant string, from, through time.Time) ([]BlockRef, error) {
+ return bs.shipper.GetBlockRefs(ctx, tenant, from, through)
+}
- for _, ref := range chunkRefs {
- refs, err := blooms.Filter(ctx, model.Fingerprint(ref.Fingerprint), convertToChunkRefs(ref.Refs), searches)
- if err != nil {
- return nil, err
- }
- ref.Refs = convertToShortRefs(refs)
- }
- return chunkRefs, nil
+// ForEach implements Store
+func (bs *BloomStore) ForEach(ctx context.Context, tenant string, blocks []BlockRef, callback ForEachBlockCallback) error {
+ return bs.shipper.Fetch(ctx, tenant, blocks, callback)
}
-func (bs *BloomStore) queriers(ctx context.Context, tenant string, from, through time.Time, fingerprints []uint64) (*bloomQueriers, error) {
- bf := newBloomFilters(1024)
- err := bs.shipper.ForEachBlock(ctx, tenant, from, through, fingerprints, func(bq *v1.BlockQuerier) error {
- bf.queriers = append(bf.queriers, bq)
+// GetQueriersForBlocks implements Store
+func (bs *BloomStore) GetBlockQueriersForBlockRefs(ctx context.Context, tenant string, blocks []BlockRef) ([]BlockQuerierWithFingerprintRange, error) {
+ bqs := make([]BlockQuerierWithFingerprintRange, 0, 32)
+ err := bs.shipper.Fetch(ctx, tenant, blocks, func(bq *v1.BlockQuerier, minFp uint64, maxFp uint64) error {
+ bqs = append(bqs, BlockQuerierWithFingerprintRange{
+ BlockQuerier: bq,
+ MinFp: model.Fingerprint(minFp),
+ MaxFp: model.Fingerprint(maxFp),
+ })
return nil
})
- return bf, err
-}
-
-func convertLineFilterExpressions(filters []*logproto.LineFilterExpression) [][]byte {
- searches := make([][]byte, len(filters))
- for _, f := range filters {
- searches = append(searches, []byte(f.Match))
- }
- return searches
-}
-
-// convertToShortRefs converts a v1.ChunkRefs into []*logproto.ShortRef
-// TODO(chaudum): Avoid conversion by transferring v1.ChunkRefs in gRPC request.
-func convertToShortRefs(refs v1.ChunkRefs) []*logproto.ShortRef {
- result := make([]*logproto.ShortRef, len(refs))
- for _, ref := range refs {
- result = append(result, &logproto.ShortRef{From: ref.Start, Through: ref.End, Checksum: ref.Checksum})
- }
- return result
-}
-
-// convertToChunkRefs converts a []*logproto.ShortRef into v1.ChunkRefs
-// TODO(chaudum): Avoid conversion by transferring v1.ChunkRefs in gRPC request.
-func convertToChunkRefs(refs []*logproto.ShortRef) v1.ChunkRefs {
- result := make(v1.ChunkRefs, len(refs))
- for _, ref := range refs {
- result = append(result, v1.ChunkRef{Start: ref.From, End: ref.Through, Checksum: ref.Checksum})
- }
- return result
-}
-
-type bloomQueriers struct {
- queriers []*v1.BlockQuerier
-}
-
-func newBloomFilters(size int) *bloomQueriers {
- return &bloomQueriers{
- queriers: make([]*v1.BlockQuerier, size),
- }
+ sort.Slice(bqs, func(i, j int) bool {
+ return bqs[i].MinFp < bqs[j].MinFp
+ })
+ return bqs, err
}
-func (bf *bloomQueriers) Filter(_ context.Context, fp model.Fingerprint, chunkRefs v1.ChunkRefs, filters [][]byte) (v1.ChunkRefs, error) {
- result := make(v1.ChunkRefs, len(chunkRefs))
- for _, bq := range bf.queriers {
- refs, err := bq.CheckChunksForSeries(fp, chunkRefs, filters)
- if err != nil {
- return nil, err
- }
- result = append(result, refs...)
- }
- return result, nil
+// BlockQueriers implements Store
+func (bs *BloomStore) GetBlockQueriers(ctx context.Context, tenant string, from, through time.Time, fingerprints []uint64) ([]BlockQuerierWithFingerprintRange, error) {
+ bqs := make([]BlockQuerierWithFingerprintRange, 0, 32)
+ err := bs.shipper.ForEachBlock(ctx, tenant, from, through, fingerprints, func(bq *v1.BlockQuerier, minFp uint64, maxFp uint64) error {
+ bqs = append(bqs, BlockQuerierWithFingerprintRange{
+ BlockQuerier: bq,
+ MinFp: model.Fingerprint(minFp),
+ MaxFp: model.Fingerprint(maxFp),
+ })
+ return nil
+ })
+ sort.Slice(bqs, func(i, j int) bool {
+ return bqs[i].MinFp < bqs[j].MinFp
+ })
+ return bqs, err
}
diff --git a/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/compacted_index.go b/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/compacted_index.go
index d1ea9fcca68ff..584116b240417 100644
--- a/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/compacted_index.go
+++ b/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/compacted_index.go
@@ -62,7 +62,7 @@ func (c *CompactedIndex) isEmpty() (bool, error) {
// bbolt.Compact fills the whole page by setting FillPercent to 1 which works well here since while copying the data, it receives the index entries in order.
// The storage space goes down from anywhere between 25% to 50% as per my(Sandeep) tests.
func (c *CompactedIndex) recreateCompactedDB() error {
- destDB, err := openBoltdbFileWithNoSync(filepath.Join(c.workingDir, fmt.Sprint(time.Now().Unix())))
+ destDB, err := openBoltdbFileWithNoSync(filepath.Join(c.workingDir, fmt.Sprint(time.Now().UnixNano())))
if err != nil {
return err
}
@@ -178,7 +178,7 @@ func (c *CompactedIndex) ToIndexFile() (shipperindex.Index, error) {
if c.compactedFileRecreated {
fileNameFormat = "%s" + recreatedCompactedDBSuffix
}
- fileName := fmt.Sprintf(fileNameFormat, shipperutil.BuildIndexFileName(c.tableName, uploaderName, fmt.Sprint(time.Now().Unix())))
+ fileName := fmt.Sprintf(fileNameFormat, shipperutil.BuildIndexFileName(c.tableName, uploaderName, fmt.Sprint(time.Now().UnixNano())))
idxFile := boltdb.BoltDBToIndexFile(c.compactedFile, fileName)
c.compactedFile = nil
diff --git a/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/table_compactor.go b/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/table_compactor.go
index 95a02137fcb75..d864d306a2ba7 100644
--- a/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/table_compactor.go
+++ b/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/table_compactor.go
@@ -248,7 +248,7 @@ func (t *tableCompactor) fetchOrCreateUserCompactedIndexSet(userID string) error
return err
}
- compactedFile, err := openBoltdbFileWithNoSync(filepath.Join(userIndexSet.GetWorkingDir(), fmt.Sprint(time.Now().Unix())))
+ compactedFile, err := openBoltdbFileWithNoSync(filepath.Join(userIndexSet.GetWorkingDir(), fmt.Sprint(time.Now().UnixNano())))
if err != nil {
return err
}
@@ -272,7 +272,7 @@ func (t *tableCompactor) fetchOrCreateUserCompactedIndexSet(userID string) error
func (t *tableCompactor) compactUserIndexes(idxSet compactor.IndexSet) (*CompactedIndex, error) {
indexes := idxSet.ListSourceFiles()
workingDir := idxSet.GetWorkingDir()
- compactedDBName := filepath.Join(workingDir, fmt.Sprint(time.Now().Unix()))
+ compactedDBName := filepath.Join(workingDir, fmt.Sprint(time.Now().UnixNano()))
compactedFile, err := openBoltdbFileWithNoSync(compactedDBName)
if err != nil {
@@ -318,7 +318,7 @@ func (t *tableCompactor) compactCommonIndexes(ctx context.Context) (*CompactedIn
indexes := idxSet.ListSourceFiles()
compactedFileIdx := compactedFileIdx(indexes)
workingDir := idxSet.GetWorkingDir()
- compactedDBName := filepath.Join(workingDir, fmt.Sprint(time.Now().Unix()))
+ compactedDBName := filepath.Join(workingDir, fmt.Sprint(time.Now().UnixNano()))
// if we find a previously compacted file, use it as a seed file to copy other index into it
if compactedFileIdx != -1 {
diff --git a/pkg/storage/stores/shipper/indexshipper/downloads/index_set.go b/pkg/storage/stores/shipper/indexshipper/downloads/index_set.go
index b9a24d52533c2..f9c12edd9c6f3 100644
--- a/pkg/storage/stores/shipper/indexshipper/downloads/index_set.go
+++ b/pkg/storage/stores/shipper/indexshipper/downloads/index_set.go
@@ -209,15 +209,28 @@ func (t *indexSet) ForEachConcurrent(ctx context.Context, callback index.ForEach
}
defer t.indexMtx.rUnlock()
+ logger := util_log.WithContext(ctx, t.logger)
+ level.Debug(logger).Log("index-files-count", len(t.index))
+
+ if len(t.index) == 0 {
+ return nil
+ }
+
+ // shortcut; if there's only one index, there's no need for bounded concurrency
+ if len(t.index) == 1 {
+ for i := range t.index {
+ idx := t.index[i]
+ return callback(t.userID == "", idx)
+ }
+ }
+
+ //nolint:ineffassign,staticcheck
g, ctx := errgroup.WithContext(ctx)
if t.maxConcurrent == 0 {
panic("maxConcurrent cannot be 0, indexSet is being initialized without setting maxConcurrent")
}
g.SetLimit(t.maxConcurrent)
- logger := util_log.WithContext(ctx, t.logger)
- level.Debug(logger).Log("index-files-count", len(t.index))
-
for i := range t.index {
idx := t.index[i]
g.Go(func() error {
diff --git a/pkg/storage/stores/shipper/indexshipper/tsdb/multi_file_index.go b/pkg/storage/stores/shipper/indexshipper/tsdb/multi_file_index.go
index 50a162533d719..01935a842d539 100644
--- a/pkg/storage/stores/shipper/indexshipper/tsdb/multi_file_index.go
+++ b/pkg/storage/stores/shipper/indexshipper/tsdb/multi_file_index.go
@@ -35,6 +35,15 @@ type IndexIter interface {
type IndexSlice []Index
func (xs IndexSlice) For(ctx context.Context, maxConcurrent int, fn func(context.Context, Index) error) error {
+ if len(xs) == 0 {
+ return nil
+ }
+
+ // shortcut; if there's only one slice, there's no need for bounded concurrency
+ if len(xs) == 1 {
+ return fn(ctx, xs[0])
+ }
+
g, ctx := errgroup.WithContext(ctx)
if maxConcurrent == 0 {
panic("maxConcurrent cannot be 0, IndexIter is being called with a maxConcurrent of 0")
diff --git a/pkg/storage/util_test.go b/pkg/storage/util_test.go
index 6dff5146af42b..b59b729b0d5bd 100644
--- a/pkg/storage/util_test.go
+++ b/pkg/storage/util_test.go
@@ -17,6 +17,7 @@ import (
"github.com/grafana/loki/pkg/logql/syntax"
"github.com/grafana/loki/pkg/logqlmodel/stats"
"github.com/grafana/loki/pkg/querier/astmapper"
+ "github.com/grafana/loki/pkg/querier/plan"
"github.com/grafana/loki/pkg/storage/chunk"
"github.com/grafana/loki/pkg/storage/chunk/cache"
chunkclient "github.com/grafana/loki/pkg/storage/chunk/client"
@@ -135,6 +136,9 @@ func newQuery(query string, start, end time.Time, shards []astmapper.ShardAnnota
End: end,
Direction: logproto.FORWARD,
Deletes: deletes,
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(query),
+ },
}
for _, shard := range shards {
req.Shards = append(req.Shards, shard.String())
@@ -148,6 +152,9 @@ func newSampleQuery(query string, start, end time.Time, deletes []*logproto.Dele
Start: start,
End: end,
Deletes: deletes,
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(query),
+ },
}
return req
}
diff --git a/pkg/util/hash_fp.go b/pkg/util/hash_fp.go
index 209b8b45c0646..e7c0253865b65 100644
--- a/pkg/util/hash_fp.go
+++ b/pkg/util/hash_fp.go
@@ -1,6 +1,10 @@
package util
-import "github.com/prometheus/common/model"
+import (
+ "hash/fnv"
+
+ "github.com/prometheus/common/model"
+)
// HashFP simply moves entropy from the most significant 48 bits of the
// fingerprint into the least significant 16 bits (by XORing) so that a simple
@@ -12,3 +16,10 @@ import "github.com/prometheus/common/model"
func HashFP(fp model.Fingerprint) uint32 {
return uint32(fp ^ (fp >> 32) ^ (fp >> 16))
}
+
+// HashedQuery returns a unique hash value for the given `query`.
+func HashedQuery(query string) uint32 {
+ h := fnv.New32()
+ _, _ = h.Write([]byte(query))
+ return h.Sum32()
+}
diff --git a/pkg/util/marshal/marshal.go b/pkg/util/marshal/marshal.go
index fd28907d0579a..bb961039cdd35 100644
--- a/pkg/util/marshal/marshal.go
+++ b/pkg/util/marshal/marshal.go
@@ -124,7 +124,11 @@ func WriteSeriesResponseJSON(series []logproto.SeriesIdentifier, w io.Writer) er
}
for _, series := range series {
- adapter.Data = append(adapter.Data, series.GetLabels())
+ m := make(map[string]string, 0)
+ for _, pair := range series.GetLabels() {
+ m[pair.Key] = pair.Value
+ }
+ adapter.Data = append(adapter.Data, m)
}
s := jsoniter.ConfigFastest.BorrowStream(w)
diff --git a/pkg/util/marshal/marshal_test.go b/pkg/util/marshal/marshal_test.go
index fa8cc5d8aa3e5..b35b5e2b05687 100644
--- a/pkg/util/marshal/marshal_test.go
+++ b/pkg/util/marshal/marshal_test.go
@@ -177,7 +177,7 @@ var queryTestWithEncodingFlags = []struct {
"test": "test"
},
"values":[
- [ "123456789012345", "super line"],
+ [ "123456789012345", "super line", {}],
[ "123456789012346", "super line with labels", {
"structuredMetadata": {
"foo": "a",
@@ -518,7 +518,7 @@ var tailTestWithEncodingFlags = []struct {
"test": "test"
},
"values":[
- [ "123456789012345", "super line"],
+ [ "123456789012345", "super line", {}],
[ "123456789012346", "super line with labels", {
"structuredMetadata": {
"foo": "a",
@@ -692,16 +692,10 @@ func Test_WriteSeriesResponseJSON(t *testing.T) {
logproto.SeriesResponse{
Series: []logproto.SeriesIdentifier{
{
- Labels: map[string]string{
- "a": "1",
- "b": "2",
- },
+ Labels: logproto.MustNewSeriesEntries("a", "1", "b", "2"),
},
{
- Labels: map[string]string{
- "c": "3",
- "d": "4",
- },
+ Labels: logproto.MustNewSeriesEntries("c", "3", "d", "4"),
},
},
},
@@ -812,7 +806,7 @@ func Test_WriteQueryResponseJSON_EncodeFlags(t *testing.T) {
"test": "test"
},
"values":[
- [ "123456789012346", "super line"]
+ [ "123456789012346", "super line", {}]
]
},
{
@@ -965,7 +959,7 @@ func Test_EncodeResult_And_ResultValue_Parity(t *testing.T) {
f := func(w wrappedValue) bool {
var buf bytes.Buffer
js := json.NewStream(json.ConfigFastest, &buf, 0)
- err := encodeResult(w.Value, js, httpreq.NewEncodingFlags(httpreq.FlagCategorizeLabels))
+ err := encodeResult(w.Value, js, nil)
require.NoError(t, err)
js.Flush()
actual := buf.String()
diff --git a/pkg/util/marshal/query.go b/pkg/util/marshal/query.go
index b048b0a952f87..8f41915c720a8 100644
--- a/pkg/util/marshal/query.go
+++ b/pkg/util/marshal/query.go
@@ -401,7 +401,7 @@ func encodeStream(stream logproto.Stream, s *jsoniter.Stream, encodeFlags httpre
s.WriteMore()
s.WriteStringWithHTMLEscaped(e.Line)
- if categorizeLabels && (len(e.StructuredMetadata) > 0 || len(e.Parsed) > 0) {
+ if categorizeLabels {
s.WriteMore()
s.WriteObjectStart()
diff --git a/pkg/util/ring/sharding.go b/pkg/util/ring/sharding.go
index cb549ec02bb90..45a53cf40cfe7 100644
--- a/pkg/util/ring/sharding.go
+++ b/pkg/util/ring/sharding.go
@@ -83,3 +83,22 @@ func (s *FingerprintShuffleSharding) OwnsFingerprint(fp uint64) (bool, error) {
return rs.Includes(s.ringLifeCycler.GetInstanceAddr()), nil
}
+
+// NoopStrategy is an implementation of the ShardingStrategy that does not
+// shard anything.
+type NoopStrategy struct{}
+
+// OwnsTenant implements TenantShuffleSharding.
+func (s *NoopStrategy) OwnsTenant(_ string) bool {
+ return false
+}
+
+// GetTenantSubRing implements TenantShuffleSharding.
+func (s *NoopStrategy) GetTenantSubRing(_ string) ring.ReadRing {
+ return nil
+}
+
+// OwnsFingerprint implements FingerprintSharding.
+func (s *NoopStrategy) OwnsFingerprint(_ uint64) (bool, error) {
+ return false, nil
+}
diff --git a/pkg/util/server/error.go b/pkg/util/server/error.go
index ef4dedec93094..fc04218d5a733 100644
--- a/pkg/util/server/error.go
+++ b/pkg/util/server/error.go
@@ -9,7 +9,9 @@ import (
"github.com/grafana/dskit/user"
"github.com/prometheus/prometheus/promql"
"google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
+
+ "github.com/gogo/googleapis/google/rpc"
+ "github.com/gogo/status"
"github.com/grafana/loki/pkg/logqlmodel"
storage_errors "github.com/grafana/loki/pkg/storage/errors"
@@ -46,17 +48,24 @@ func ClientHTTPStatusAndError(err error) (int, error) {
return http.StatusGatewayTimeout, errors.New(ErrDeadlineExceeded)
}
- s, isRPC := status.FromError(err)
+ if s, isRPC := status.FromError(err); isRPC {
+ if s.Code() == codes.DeadlineExceeded {
+ return http.StatusGatewayTimeout, errors.New(ErrDeadlineExceeded)
+ } else if int(s.Code())/100 == 4 || int(s.Code())/100 == 5 {
+ return int(s.Code()), errors.New(s.Message())
+ }
+ return http.StatusInternalServerError, err
+ }
+
switch {
case errors.Is(err, context.Canceled) ||
(errors.As(err, &promErr) && errors.Is(promErr.Err, context.Canceled)):
return StatusClientClosedRequest, errors.New(ErrClientCanceled)
- case errors.Is(err, context.DeadlineExceeded) ||
- (isRPC && s.Code() == codes.DeadlineExceeded):
+ case errors.Is(err, context.DeadlineExceeded):
return http.StatusGatewayTimeout, errors.New(ErrDeadlineExceeded)
case errors.As(err, &queryErr):
return http.StatusBadRequest, err
- case errors.Is(err, logqlmodel.ErrLimit) || errors.Is(err, logqlmodel.ErrParse) || errors.Is(err, logqlmodel.ErrPipeline) || errors.Is(err, logqlmodel.ErrBlocked):
+ case errors.Is(err, logqlmodel.ErrLimit) || errors.Is(err, logqlmodel.ErrParse) || errors.Is(err, logqlmodel.ErrPipeline) || errors.Is(err, logqlmodel.ErrBlocked) || errors.Is(err, logqlmodel.ErrParseMatchers):
return http.StatusBadRequest, err
case errors.Is(err, user.ErrNoOrgID):
return http.StatusBadRequest, err
@@ -67,3 +76,17 @@ func ClientHTTPStatusAndError(err error) (int, error) {
return http.StatusInternalServerError, err
}
}
+
+// WrapError wraps an error in a protobuf status.
+func WrapError(err error) *rpc.Status {
+ if s, ok := status.FromError(err); ok {
+ return s.Proto()
+ }
+
+ code, err := ClientHTTPStatusAndError(err)
+ return status.New(codes.Code(code), err.Error()).Proto()
+}
+
+func UnwrapError(s *rpc.Status) error {
+ return status.ErrorProto(s)
+}
diff --git a/pkg/util/server/error_test.go b/pkg/util/server/error_test.go
index 1b8132ff653a4..1fe15b0322e49 100644
--- a/pkg/util/server/error_test.go
+++ b/pkg/util/server/error_test.go
@@ -9,13 +9,12 @@ import (
"net/http/httptest"
"testing"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
-
+ "github.com/gogo/status"
"github.com/grafana/dskit/httpgrpc"
"github.com/grafana/dskit/user"
"github.com/prometheus/prometheus/promql"
"github.com/stretchr/testify/require"
+ "google.golang.org/grpc/codes"
"github.com/grafana/loki/pkg/logqlmodel"
storage_errors "github.com/grafana/loki/pkg/storage/errors"
@@ -32,9 +31,9 @@ func Test_writeError(t *testing.T) {
}{
{"cancelled", context.Canceled, ErrClientCanceled, StatusClientClosedRequest},
{"cancelled multi", util.MultiError{context.Canceled, context.Canceled}, ErrClientCanceled, StatusClientClosedRequest},
- {"rpc cancelled", status.New(codes.Canceled, context.Canceled.Error()).Err(), "rpc error: code = Canceled desc = context canceled", http.StatusInternalServerError},
- {"rpc cancelled multi", util.MultiError{status.New(codes.Canceled, context.Canceled.Error()).Err(), status.New(codes.Canceled, context.Canceled.Error()).Err()}, "2 errors: rpc error: code = Canceled desc = context canceled; rpc error: code = Canceled desc = context canceled", http.StatusInternalServerError},
- {"mixed context and rpc cancelled", util.MultiError{context.Canceled, status.New(codes.Canceled, context.Canceled.Error()).Err()}, "2 errors: context canceled; rpc error: code = Canceled desc = context canceled", http.StatusInternalServerError},
+ {"rpc cancelled", status.Error(codes.Canceled, context.Canceled.Error()), "rpc error: code = Canceled desc = context canceled", http.StatusInternalServerError},
+ {"rpc cancelled multi", util.MultiError{status.Error(codes.Canceled, context.Canceled.Error()), status.Error(codes.Canceled, context.Canceled.Error())}, "2 errors: rpc error: code = Canceled desc = context canceled; rpc error: code = Canceled desc = context canceled", http.StatusInternalServerError},
+ {"mixed context and rpc cancelled", util.MultiError{context.Canceled, status.Error(codes.Canceled, context.Canceled.Error())}, "2 errors: context canceled; rpc error: code = Canceled desc = context canceled", http.StatusInternalServerError},
{"mixed context, rpc cancelled and another", util.MultiError{errors.New("standard error"), context.Canceled, status.New(codes.Canceled, context.Canceled.Error()).Err()}, "3 errors: standard error; context canceled; rpc error: code = Canceled desc = context canceled", http.StatusInternalServerError},
{"cancelled storage", promql.ErrStorage{Err: context.Canceled}, ErrClientCanceled, StatusClientClosedRequest},
{"orgid", user.ErrNoOrgID, user.ErrNoOrgID.Error(), http.StatusBadRequest},
@@ -56,9 +55,19 @@ func Test_writeError(t *testing.T) {
WriteError(tt.err, rec)
require.Equal(t, tt.expectedStatus, rec.Result().StatusCode)
b, err := io.ReadAll(rec.Result().Body)
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
+ require.Equal(t, tt.msg, string(b[:len(b)-1]))
+ })
+
+ t.Run(tt.name+"-roundtrip", func(t *testing.T) {
+ status := WrapError(tt.err)
+ unwrappedErr := UnwrapError(status)
+
+ rec := httptest.NewRecorder()
+ WriteError(unwrappedErr, rec)
+ require.Equal(t, tt.expectedStatus, rec.Result().StatusCode)
+ b, err := io.ReadAll(rec.Result().Body)
+ require.NoError(t, err)
require.Equal(t, tt.msg, string(b[:len(b)-1]))
})
}
diff --git a/pkg/validation/exporter.go b/pkg/validation/exporter.go
index bbc26d1b544d5..ad9dde8574dd0 100644
--- a/pkg/validation/exporter.go
+++ b/pkg/validation/exporter.go
@@ -52,7 +52,7 @@ func (oe *OverridesExporter) Collect(ch chan<- prometheus.Metric) {
return float64(val.Field(i).Int()), true
case model.Duration:
return float64(val.Field(i).Interface().(model.Duration)), true
- case flagext.ByteSize:
+ case uint, flagext.ByteSize:
return float64(val.Field(i).Uint()), true
case float64:
return val.Field(i).Float(), true
diff --git a/pkg/validation/limits.go b/pkg/validation/limits.go
index 0a482b2c0401f..cc55662aa27ef 100644
--- a/pkg/validation/limits.go
+++ b/pkg/validation/limits.go
@@ -98,7 +98,8 @@ type Limits struct {
MaxEntriesLimitPerQuery int `yaml:"max_entries_limit_per_query" json:"max_entries_limit_per_query"`
MaxCacheFreshness model.Duration `yaml:"max_cache_freshness_per_query" json:"max_cache_freshness_per_query"`
MaxStatsCacheFreshness model.Duration `yaml:"max_stats_cache_freshness" json:"max_stats_cache_freshness"`
- MaxQueriersPerTenant int `yaml:"max_queriers_per_tenant" json:"max_queriers_per_tenant"`
+ MaxQueriersPerTenant uint `yaml:"max_queriers_per_tenant" json:"max_queriers_per_tenant"`
+ MaxQueryCapacity float64 `yaml:"max_query_capacity" json:"max_query_capacity"`
QueryReadyIndexNumDays int `yaml:"query_ready_index_num_days" json:"query_ready_index_num_days"`
QueryTimeout model.Duration `yaml:"query_timeout" json:"query_timeout"`
@@ -182,10 +183,15 @@ type Limits struct {
BloomGatewayShardSize int `yaml:"bloom_gateway_shard_size" json:"bloom_gateway_shard_size"`
BloomGatewayEnabled bool `yaml:"bloom_gateway_enable_filtering" json:"bloom_gateway_enable_filtering"`
- BloomCompactorShardSize int `yaml:"bloom_compactor_shard_size" json:"bloom_compactor_shard_size"`
- BloomCompactorMaxTableAge time.Duration `yaml:"bloom_compactor_max_table_age" json:"bloom_compactor_max_table_age"`
- BloomCompactorMinTableAge time.Duration `yaml:"bloom_compactor_min_table_age" json:"bloom_compactor_min_table_age"`
- BloomCompactorEnabled bool `yaml:"bloom_compactor_enable_compaction" json:"bloom_compactor_enable_compaction"`
+ BloomCompactorShardSize int `yaml:"bloom_compactor_shard_size" json:"bloom_compactor_shard_size"`
+ BloomCompactorMaxTableAge time.Duration `yaml:"bloom_compactor_max_table_age" json:"bloom_compactor_max_table_age"`
+ BloomCompactorMinTableAge time.Duration `yaml:"bloom_compactor_min_table_age" json:"bloom_compactor_min_table_age"`
+ BloomCompactorEnabled bool `yaml:"bloom_compactor_enable_compaction" json:"bloom_compactor_enable_compaction"`
+ BloomNGramLength int `yaml:"bloom_ngram_length" json:"bloom_ngram_length"`
+ BloomNGramSkip int `yaml:"bloom_ngram_skip" json:"bloom_ngram_skip"`
+ BloomFalsePositiveRate float64 `yaml:"bloom_false_positive_rate" json:"bloom_false_positive_rate"`
+ BloomGatewayBlocksDownloadingParallelism int `yaml:"bloom_gateway_blocks_downloading_parallelism" json:"bloom_gateway_blocks_downloading_parallelism"`
+ BloomGatewayCacheKeyInterval time.Duration `yaml:"bloom_gateway_cache_key_interval" json:"bloom_gateway_cache_key_interval"`
AllowStructuredMetadata bool `yaml:"allow_structured_metadata,omitempty" json:"allow_structured_metadata,omitempty" doc:"description=Allow user to send structured metadata in push payload."`
MaxStructuredMetadataSize flagext.ByteSize `yaml:"max_structured_metadata_size" json:"max_structured_metadata_size" doc:"description=Maximum size accepted for structured metadata per log line."`
@@ -272,7 +278,8 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) {
_ = l.MaxStatsCacheFreshness.Set("10m")
f.Var(&l.MaxStatsCacheFreshness, "frontend.max-stats-cache-freshness", "Do not cache requests with an end time that falls within Now minus this duration. 0 disables this feature (default).")
- f.IntVar(&l.MaxQueriersPerTenant, "frontend.max-queriers-per-tenant", 0, "Maximum number of queriers that can handle requests for a single tenant. If set to 0 or value higher than number of available queriers, *all* queriers will handle requests for the tenant. Each frontend (or query-scheduler, if used) will select the same set of queriers for the same tenant (given that all queriers are connected to all frontends / query-schedulers). This option only works with queriers connecting to the query-frontend / query-scheduler, not when using downstream URL.")
+ f.UintVar(&l.MaxQueriersPerTenant, "frontend.max-queriers-per-tenant", 0, "Maximum number of queriers that can handle requests for a single tenant. If set to 0 or value higher than number of available queriers, *all* queriers will handle requests for the tenant. Each frontend (or query-scheduler, if used) will select the same set of queriers for the same tenant (given that all queriers are connected to all frontends / query-schedulers). This option only works with queriers connecting to the query-frontend / query-scheduler, not when using downstream URL.")
+ f.Float64Var(&l.MaxQueryCapacity, "frontend.max-query-capacity", 0, "How much of the available query capacity (\"querier\" components in distributed mode, \"read\" components in SSD mode) can be used by a single tenant. Allowed values are 0.0 to 1.0. For example, setting this to 0.5 would allow a tenant to use half of the available queriers for processing the query workload. If set to 0, query capacity is determined by frontend.max-queriers-per-tenant. When both frontend.max-queriers-per-tenant and frontend.max-query-capacity are configured, smaller value of the resulting querier replica count is considered: min(frontend.max-queriers-per-tenant, ceil(querier_replicas * frontend.max-query-capacity)). *All* queriers will handle requests for the tenant if neither limits are applied. This option only works with queriers connecting to the query-frontend / query-scheduler, not when using downstream URL. Use this feature in a multi-tenant setup where you need to limit query capacity for certain tenants.")
f.IntVar(&l.QueryReadyIndexNumDays, "store.query-ready-index-num-days", 0, "Number of days of index to be kept always downloaded for queries. Applies only to per user index in boltdb-shipper index store. 0 to disable.")
f.IntVar(&l.RulerMaxRulesPerRuleGroup, "ruler.max-rules-per-rule-group", 0, "Maximum number of rules per rule group per-tenant. 0 to disable.")
@@ -303,6 +310,11 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) {
f.DurationVar(&l.BloomCompactorMaxTableAge, "bloom-compactor.max-table-age", 7*24*time.Hour, "The maximum age of a table before it is compacted. Do not compact tables older than the the configured time. Default to 7 days. 0s means no limit.")
f.DurationVar(&l.BloomCompactorMinTableAge, "bloom-compactor.min-table-age", 1*time.Hour, "The minimum age of a table before it is compacted. Do not compact tables newer than the the configured time. Default to 1 hour. 0s means no limit. This is useful to avoid compacting tables that will be updated with out-of-order writes.")
f.BoolVar(&l.BloomCompactorEnabled, "bloom-compactor.enable-compaction", false, "Whether to compact chunks into bloom filters.")
+ f.IntVar(&l.BloomNGramLength, "bloom-compactor.ngram-length", 4, "Length of the n-grams created when computing blooms from log lines.")
+ f.IntVar(&l.BloomNGramSkip, "bloom-compactor.ngram-skip", 0, "Skip factor for the n-grams created when computing blooms from log lines.")
+ f.Float64Var(&l.BloomFalsePositiveRate, "bloom-compactor.false-positive-rate", 0.01, "Scalable Bloom Filter desired false-positive rate.")
+ f.IntVar(&l.BloomGatewayBlocksDownloadingParallelism, "bloom-gateway.blocks-downloading-parallelism", 50, "Maximum number of blocks will be downloaded in parallel by the Bloom Gateway.")
+ f.DurationVar(&l.BloomGatewayCacheKeyInterval, "bloom-gateway.cache-key-interval", 15*time.Minute, "Interval for computing the cache key in the Bloom Gateway.")
l.ShardStreams = &shardstreams.Config{}
l.ShardStreams.RegisterFlagsWithPrefix("shard-streams", f)
@@ -360,6 +372,16 @@ func (l *Limits) Validate() error {
level.Warn(util_log.Logger).Log("msg", "The compactor.allow-deletes configuration option has been deprecated and will be ignored. Instead, use deletion_mode in the limits_configs to adjust deletion functionality")
}
+ if l.MaxQueryCapacity < 0 {
+ level.Warn(util_log.Logger).Log("msg", "setting frontend.max-query-capacity to 0 as it is configured to a value less than 0")
+ l.MaxQueryCapacity = 0
+ }
+
+ if l.MaxQueryCapacity > 1 {
+ level.Warn(util_log.Logger).Log("msg", "setting frontend.max-query-capacity to 1 as it is configured to a value greater than 1")
+ l.MaxQueryCapacity = 1
+ }
+
return nil
}
@@ -494,10 +516,15 @@ func (o *Overrides) MaxQueryRange(_ context.Context, userID string) time.Duratio
}
// MaxQueriersPerUser returns the maximum number of queriers that can handle requests for this user.
-func (o *Overrides) MaxQueriersPerUser(userID string) int {
+func (o *Overrides) MaxQueriersPerUser(userID string) uint {
return o.getOverridesForUser(userID).MaxQueriersPerTenant
}
+// MaxQueryCapacity returns how much of the available query capacity can be used by this user..
+func (o *Overrides) MaxQueryCapacity(userID string) float64 {
+ return o.getOverridesForUser(userID).MaxQueryCapacity
+}
+
// QueryReadyIndexNumDays returns the number of days for which we have to be query ready for a user.
func (o *Overrides) QueryReadyIndexNumDays(userID string) int {
return o.getOverridesForUser(userID).QueryReadyIndexNumDays
@@ -782,6 +809,14 @@ func (o *Overrides) BloomGatewayShardSize(userID string) int {
return o.getOverridesForUser(userID).BloomGatewayShardSize
}
+func (o *Overrides) BloomGatewayBlocksDownloadingParallelism(userID string) int {
+ return o.getOverridesForUser(userID).BloomGatewayBlocksDownloadingParallelism
+}
+
+func (o *Overrides) BloomGatewayCacheKeyInterval(userID string) time.Duration {
+ return o.getOverridesForUser(userID).BloomGatewayCacheKeyInterval
+}
+
func (o *Overrides) BloomGatewayEnabled(userID string) bool {
return o.getOverridesForUser(userID).BloomGatewayEnabled
}
@@ -802,6 +837,18 @@ func (o *Overrides) BloomCompactorEnabled(userID string) bool {
return o.getOverridesForUser(userID).BloomCompactorEnabled
}
+func (o *Overrides) BloomNGramLength(userID string) int {
+ return o.getOverridesForUser(userID).BloomNGramLength
+}
+
+func (o *Overrides) BloomNGramSkip(userID string) int {
+ return o.getOverridesForUser(userID).BloomNGramSkip
+}
+
+func (o *Overrides) BloomFalsePositiveRate(userID string) float64 {
+ return o.getOverridesForUser(userID).BloomFalsePositiveRate
+}
+
func (o *Overrides) AllowStructuredMetadata(userID string) bool {
return o.getOverridesForUser(userID).AllowStructuredMetadata
}
diff --git a/production/docker/config/loki.yaml b/production/docker/config/loki.yaml
index e6a2f5fe31d84..6e4541164a235 100644
--- a/production/docker/config/loki.yaml
+++ b/production/docker/config/loki.yaml
@@ -97,9 +97,6 @@ limits_config:
split_queries_by_interval: 15m
volume_enabled: true
-chunk_store_config:
- max_look_back_period: 336h
-
table_manager:
retention_deletes_enabled: true
retention_period: 336h
diff --git a/production/helm/loki/CHANGELOG.md b/production/helm/loki/CHANGELOG.md
index 7f45b3155661c..626523e1bae4d 100644
--- a/production/helm/loki/CHANGELOG.md
+++ b/production/helm/loki/CHANGELOG.md
@@ -13,6 +13,46 @@ Entries should include a reference to the pull request that introduced the chang
[//]: # ( : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.)
+## 5.41.4
+
+- [CHANGE] Use `/ingester/shutdown?terminate=false` for write `preStop` hook
+
+## 5.41.3
+
+- [FEATURE] Add support for defining an s3 backoff config.
+
+## 5.41.2
+
+- [FEATURE] Add ciliumnetworkpolicies.
+
+## 5.41.1
+
+- [FEATURE] Allow topology spread constraints for Loki read deployment component.
+
+## 5.41.0
+
+- [CHANGE] Changed version of Loki to 2.9.3
+
+## 5.40.1
+
+- [BUGFIX] Remove ruler enabled condition in networkpolicies.
+
+## 5.40.0
+
+- [CHANGE] Add extraContainers parameter for the write pod
+
+## 5.39.0
+
+- [FEATURE] Add support for adding OpenStack swift container credentials via helm chart
+
+## 5.38.0
+
+- [CHANGE] Changed MinIO Helm Chart version to 4.0.15
+
+## 5.37.0
+
+- [FEATURE] Add support for enabling tracing.
+
## 5.36.2
- [BUGFIX] Add support to run dnsmasq
diff --git a/production/helm/loki/Chart.lock b/production/helm/loki/Chart.lock
index c2bbe88846859..17f1dafad7ae9 100644
--- a/production/helm/loki/Chart.lock
+++ b/production/helm/loki/Chart.lock
@@ -1,9 +1,9 @@
dependencies:
- name: minio
repository: https://charts.min.io/
- version: 4.0.12
+ version: 4.0.15
- name: grafana-agent-operator
repository: https://grafana.github.io/helm-charts
version: 0.2.16
-digest: sha256:3605bf81141e70309ef7efab98523d59615f3f5cf4e7b2eb7fd2be04cd52c906
-generated: "2023-06-27T16:57:05.871386+02:00"
+digest: sha256:56eeb13a669bc816c1452cde5d6dddc61f6893f8aff3da1d2b56ce3bdcbcf84d
+generated: "2023-11-09T12:22:25.317696-03:00"
diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml
index 06768ba93d2d1..095e2745a364a 100644
--- a/production/helm/loki/Chart.yaml
+++ b/production/helm/loki/Chart.yaml
@@ -2,8 +2,8 @@ apiVersion: v2
name: loki
description: Helm chart for Grafana Loki in simple, scalable mode
type: application
-appVersion: 2.9.2
-version: 5.36.3
+appVersion: 2.9.3
+version: 5.41.4
home: https://grafana.github.io/helm-charts
sources:
- https://github.com/grafana/loki
@@ -13,7 +13,7 @@ icon: https://grafana.com/docs/loki/latest/logo_and_name.png
dependencies:
- name: minio
alias: minio
- version: 4.0.12
+ version: 4.0.15
repository: https://charts.min.io/
condition: minio.enabled
- name: grafana-agent-operator
diff --git a/production/helm/loki/README.md b/production/helm/loki/README.md
index b5cd5883819aa..2857f553e13f7 100644
--- a/production/helm/loki/README.md
+++ b/production/helm/loki/README.md
@@ -1,6 +1,6 @@
# loki
-![Version: 5.36.3](https://img.shields.io/badge/Version-5.36.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.9.2](https://img.shields.io/badge/AppVersion-2.9.2-informational?style=flat-square)
+![Version: 5.41.4](https://img.shields.io/badge/Version-5.41.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.9.3](https://img.shields.io/badge/AppVersion-2.9.3-informational?style=flat-square)
Helm chart for Grafana Loki in simple, scalable mode
@@ -14,7 +14,7 @@ Helm chart for Grafana Loki in simple, scalable mode
| Repository | Name | Version |
|------------|------|---------|
-| https://charts.min.io/ | minio(minio) | 4.0.12 |
+| https://charts.min.io/ | minio(minio) | 4.0.15 |
| https://grafana.github.io/helm-charts | grafana-agent-operator(grafana-agent-operator) | 0.2.16 |
Find more information in the Loki Helm Chart [documentation](https://grafana.com/docs/loki/next/installation/helm).
diff --git a/production/helm/loki/src/dashboards/loki-logs.json b/production/helm/loki/src/dashboards/loki-logs.json
index bde101d35738a..0f113cf9b5280 100644
--- a/production/helm/loki/src/dashboards/loki-logs.json
+++ b/production/helm/loki/src/dashboards/loki-logs.json
@@ -78,7 +78,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -165,7 +165,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -251,7 +251,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -337,7 +337,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -423,7 +423,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -509,7 +509,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -596,7 +596,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -683,7 +683,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -788,7 +788,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
diff --git a/production/helm/loki/src/helm-test/Dockerfile b/production/helm/loki/src/helm-test/Dockerfile
index 253a10fd44bf8..012e48b84a38f 100644
--- a/production/helm/loki/src/helm-test/Dockerfile
+++ b/production/helm/loki/src/helm-test/Dockerfile
@@ -7,7 +7,7 @@ COPY . /src/loki
WORKDIR /src/loki
RUN make clean && make BUILD_IN_CONTAINER=false helm-test
-FROM alpine:3.16.7
+FROM alpine:3.18.4
RUN apk add --update --no-cache ca-certificates=20230506-r0
COPY --from=build /src/loki/production/helm/loki/src/helm-test/helm-test /usr/bin/helm-test
ENTRYPOINT [ "/usr/bin/helm-test" ]
diff --git a/production/helm/loki/templates/_helpers.tpl b/production/helm/loki/templates/_helpers.tpl
index 964a5a6dcd97b..08e4dd063babe 100644
--- a/production/helm/loki/templates/_helpers.tpl
+++ b/production/helm/loki/templates/_helpers.tpl
@@ -245,7 +245,20 @@ s3:
ca_file: {{ . }}
{{- end}}
{{- end }}
+ {{- with .backoff_config}}
+ backoff_config:
+ {{- with .min_period }}
+ min_period: {{ . }}
+ {{- end}}
+ {{- with .max_period }}
+ max_period: {{ . }}
+ {{- end}}
+ {{- with .max_retries }}
+ max_retries: {{ . }}
+ {{- end}}
+ {{- end }}
{{- end -}}
+
{{- else if eq .Values.loki.storage.type "gcs" -}}
{{- with .Values.loki.storage.gcs }}
gcs:
@@ -277,6 +290,39 @@ azure:
endpoint_suffix: {{ . }}
{{- end }}
{{- end -}}
+{{- else if eq .Values.loki.storage.type "swift" -}}
+{{- with .Values.loki.storage.swift }}
+swift:
+ {{- with .auth_version }}
+ auth_version: {{ . }}
+ {{- end }}
+ auth_url: {{ .auth_url }}
+ {{- with .internal }}
+ internal: {{ . }}
+ {{- end }}
+ username: {{ .username }}
+ user_domain_name: {{ .user_domain_name }}
+ {{- with .user_domain_id }}
+ user_domain_id: {{ . }}
+ {{- end }}
+ {{- with .user_id }}
+ user_id: {{ . }}
+ {{- end }}
+ password: {{ .password }}
+ {{- with .domain_id }}
+ domain_id: {{ . }}
+ {{- end }}
+ domain_name: {{ .domain_name }}
+ project_id: {{ .project_id }}
+ project_name: {{ .project_name }}
+ project_domain_id: {{ .project_domain_id }}
+ project_domain_name: {{ .project_domain_name }}
+ region_name: {{ .region_name }}
+ container_name: {{ .container_name }}
+ max_retries: {{ .max_retries | default 3 }}
+ connect_timeout: {{ .connect_timeout | default "10s" }}
+ request_timeout: {{ .request_timeout | default "5s" }}
+{{- end -}}
{{- else -}}
{{- with .Values.loki.storage.filesystem }}
filesystem:
@@ -350,6 +396,39 @@ azure:
endpoint_suffix: {{ . }}
{{- end }}
{{- end -}}
+{{- else if eq .Values.loki.storage.type "swift" -}}
+{{- with .Values.loki.storage.swift }}
+swift:
+ {{- with .auth_version }}
+ auth_version: {{ . }}
+ {{- end }}
+ auth_url: {{ .auth_url }}
+ {{- with .internal }}
+ internal: {{ . }}
+ {{- end }}
+ username: {{ .username }}
+ user_domain_name: {{ .user_domain_name }}
+ {{- with .user_domain_id }}
+ user_domain_id: {{ . }}
+ {{- end }}
+ {{- with .user_id }}
+ user_id: {{ . }}
+ {{- end }}
+ password: {{ .password }}
+ {{- with .domain_id }}
+ domain_id: {{ . }}
+ {{- end }}
+ domain_name: {{ .domain_name }}
+ project_id: {{ .project_id }}
+ project_name: {{ .project_name }}
+ project_domain_id: {{ .project_domain_id }}
+ project_domain_name: {{ .project_domain_name }}
+ region_name: {{ .region_name }}
+ container_name: {{ .container_name }}
+ max_retries: {{ .max_retries | default 3 }}
+ connect_timeout: {{ .connect_timeout | default "10s" }}
+ request_timeout: {{ .request_timeout | default "5s" }}
+{{- end -}}
{{- else }}
type: "local"
{{- end -}}
diff --git a/production/helm/loki/templates/ciliumnetworkpolicy.yaml b/production/helm/loki/templates/ciliumnetworkpolicy.yaml
new file mode 100644
index 0000000000000..5633ae1945206
--- /dev/null
+++ b/production/helm/loki/templates/ciliumnetworkpolicy.yaml
@@ -0,0 +1,184 @@
+{{- if and (.Values.networkPolicy.enabled) (eq .Values.networkPolicy.flavor "cilium") }}
+---
+apiVersion: cilium.io/v2
+kind: CiliumNetworkPolicy
+metadata:
+ name: {{ include "loki.name" . }}-namespace-only
+ namespace: {{ $.Release.Namespace }}
+ labels:
+ {{- include "loki.labels" . | nindent 4 }}
+spec:
+ endpointSelector: {}
+ egress:
+ - toEndpoints:
+ - {}
+ ingress:
+ - fromEndpoints:
+ - {}
+
+---
+apiVersion: cilium.io/v2
+kind: CiliumNetworkPolicy
+metadata:
+ name: {{ include "loki.name" . }}-egress-dns
+ namespace: {{ $.Release.Namespace }}
+ labels:
+ {{- include "loki.labels" . | nindent 4 }}
+spec:
+ endpointSelector:
+ matchLabels:
+ {{- include "loki.selectorLabels" . | nindent 6 }}
+ egress:
+ - toPorts:
+ - ports:
+ - port: dns
+ protocol: UDP
+ toEndpoints:
+ - namespaceSelector: {}
+
+---
+apiVersion: cilium.io/v2
+kind: CiliumNetworkPolicy
+metadata:
+ name: {{ include "loki.name" . }}-ingress
+ namespace: {{ $.Release.Namespace }}
+ labels:
+ {{- include "loki.labels" . | nindent 4 }}
+spec:
+ endpointSelector:
+ matchExpressions:
+ - key: app.kubernetes.io/component
+ operator: In
+ values:
+ {{- if .Values.gateway.enabled }}
+ - gateway
+ {{- else }}
+ - read
+ - write
+ {{- end }}
+ matchLabels:
+ {{- include "loki.selectorLabels" . | nindent 6 }}
+ ingress:
+ - toPorts:
+ - port: http
+ protocol: TCP
+ {{- if .Values.networkPolicy.ingress.namespaceSelector }}
+ fromEndpoints:
+ - matchLabels:
+ {{- toYaml .Values.networkPolicy.ingress.namespaceSelector | nindent 8 }}
+ {{- if .Values.networkPolicy.ingress.podSelector }}
+ {{- toYaml .Values.networkPolicy.ingress.podSelector | nindent 8 }}
+ {{- end }}
+ {{- end }}
+
+---
+apiVersion: cilium.io/v2
+kind: CiliumNetworkPolicy
+metadata:
+ name: {{ include "loki.name" . }}-ingress-metrics
+ namespace: {{ $.Release.Namespace }}
+ labels:
+ {{- include "loki.labels" . | nindent 4 }}
+spec:
+ endpointSelector:
+ matchLabels:
+ {{- include "loki.selectorLabels" . | nindent 6 }}
+ ingress:
+ - toPorts:
+ - port: http-metrics
+ protocol: TCP
+ {{- if .Values.networkPolicy.metrics.cidrs }}
+ {{- range $cidr := .Values.networkPolicy.metrics.cidrs }}
+ toCIDR:
+ - {{ $cidr }}
+ {{- end }}
+ {{- if .Values.networkPolicy.metrics.namespaceSelector }}
+ fromEndpoints:
+ - matchLabels:
+ {{- toYaml .Values.networkPolicy.metrics.namespaceSelector | nindent 8 }}
+ {{- if .Values.networkPolicy.metrics.podSelector }}
+ {{- toYaml .Values.networkPolicy.metrics.podSelector | nindent 8 }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+
+---
+apiVersion: cilium.io/v2
+kind: CiliumNetworkPolicy
+metadata:
+ name: {{ include "loki.name" . }}-egress-alertmanager
+ namespace: {{ $.Release.Namespace }}
+ labels:
+ {{- include "loki.labels" . | nindent 4 }}
+spec:
+ endpointSelector:
+ matchLabels:
+ {{- include "loki.backendSelectorLabels" . | nindent 6 }}
+ egress:
+ - toPorts:
+ - port: {{ .Values.networkPolicy.alertmanager.port }}
+ protocol: TCP
+ {{- if .Values.networkPolicy.alertmanager.namespaceSelector }}
+ toEndpoints:
+ - matchLabels:
+ {{- toYaml .Values.networkPolicy.alertmanager.namespaceSelector | nindent 8 }}
+ {{- if .Values.networkPolicy.alertmanager.podSelector }}
+ {{- toYaml .Values.networkPolicy.alertmanager.podSelector | nindent 8 }}
+ {{- end }}
+ {{- end }}
+
+{{- if .Values.networkPolicy.externalStorage.ports }}
+---
+apiVersion: cilium.io/v2
+kind: CiliumNetworkPolicy
+metadata:
+ name: {{ include "loki.name" . }}-egress-external-storage
+ namespace: {{ $.Release.Namespace }}
+ labels:
+ {{- include "loki.labels" . | nindent 4 }}
+spec:
+ endpointSelector:
+ matchLabels:
+ {{- include "loki.selectorLabels" . | nindent 6 }}
+ egress:
+ - toPorts:
+ {{- range $port := .Values.networkPolicy.externalStorage.ports }}
+ - port: {{ $port }}
+ protocol: TCP
+ {{- end }}
+ {{- if .Values.networkPolicy.externalStorage.cidrs }}
+ {{- range $cidr := .Values.networkPolicy.externalStorage.cidrs }}
+ toCIDR:
+ - {{ $cidr }}
+ {{- end }}
+ {{- end }}
+{{- end }}
+
+{{- end }}
+
+{{- if .Values.networkPolicy.discovery.port }}
+---
+apiVersion: cilium.io/v2
+kind: CiliumNetworkPolicy
+metadata:
+ name: {{ include "loki.name" . }}-egress-discovery
+ namespace: {{ $.Release.Namespace }}
+ labels:
+ {{- include "loki.labels" . | nindent 4 }}
+spec:
+ endpointSelector:
+ matchLabels:
+ {{- include "loki.selectorLabels" . | nindent 6 }}
+ egress:
+ - toPorts:
+ - port: {{ .Values.networkPolicy.discovery.port }}
+ protocol: TCP
+ {{- if .Values.networkPolicy.discovery.namespaceSelector }}
+ toEndpoints:
+ - matchLabels:
+ {{- toYaml .Values.networkPolicy.discovery.namespaceSelector | nindent 8 }}
+ {{- if .Values.networkPolicy.discovery.podSelector }}
+ {{- toYaml .Values.networkPolicy.discovery.podSelector | nindent 8 }}
+ {{- end }}
+ {{- end }}
+{{- end }}
diff --git a/production/helm/loki/templates/networkpolicy.yaml b/production/helm/loki/templates/networkpolicy.yaml
index c6d5fa0264a41..27c85280eb08c 100644
--- a/production/helm/loki/templates/networkpolicy.yaml
+++ b/production/helm/loki/templates/networkpolicy.yaml
@@ -1,4 +1,4 @@
-{{- if .Values.networkPolicy.enabled }}
+{{- if and (.Values.networkPolicy.enabled) (eq .Values.networkPolicy.flavor "kubernetes") }}
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
@@ -112,7 +112,6 @@ spec:
{{- end }}
{{- end }}
-{{- if .Values.ruler.enabled }}
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
@@ -126,7 +125,7 @@ spec:
- Egress
podSelector:
matchLabels:
- {{- include "loki.rulerSelectorLabels" . | nindent 6 }}
+ {{- include "loki.backendSelectorLabels" . | nindent 6 }}
egress:
- ports:
- port: {{ .Values.networkPolicy.alertmanager.port }}
@@ -140,7 +139,6 @@ spec:
{{- toYaml .Values.networkPolicy.alertmanager.podSelector | nindent 12 }}
{{- end }}
{{- end }}
-{{- end }}
{{- if .Values.networkPolicy.externalStorage.ports }}
---
diff --git a/production/helm/loki/templates/read/deployment-read.yaml b/production/helm/loki/templates/read/deployment-read.yaml
index e468752d5723f..a5e7524f2a05f 100644
--- a/production/helm/loki/templates/read/deployment-read.yaml
+++ b/production/helm/loki/templates/read/deployment-read.yaml
@@ -127,6 +127,10 @@ spec:
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
+ {{- with .Values.read.topologySpreadConstraints }}
+ topologySpreadConstraints:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
{{- with .Values.read.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
diff --git a/production/helm/loki/templates/write/statefulset-write.yaml b/production/helm/loki/templates/write/statefulset-write.yaml
index fdbc2f04d20b3..ca67038a16192 100644
--- a/production/helm/loki/templates/write/statefulset-write.yaml
+++ b/production/helm/loki/templates/write/statefulset-write.yaml
@@ -119,7 +119,7 @@ spec:
lifecycle:
preStop:
httpGet:
- path: "/ingester/flush_shutdown"
+ path: "/ingester/shutdown?terminate=false"
port: http-metrics
{{- end }}
volumeMounts:
@@ -138,6 +138,9 @@ spec:
{{- end }}
resources:
{{- toYaml .Values.write.resources | nindent 12 }}
+ {{- with .Values.write.extraContainers }}
+ {{- toYaml . | nindent 8}}
+ {{- end }}
{{- with .Values.write.affinity }}
affinity:
{{- tpl . $ | nindent 8 }}
diff --git a/production/helm/loki/values.yaml b/production/helm/loki/values.yaml
index de6048aecc712..b8c09ee76465b 100644
--- a/production/helm/loki/values.yaml
+++ b/production/helm/loki/values.yaml
@@ -240,6 +240,9 @@ loki:
distributor:
{{- tpl (. | toYaml) $ | nindent 4 }}
{{- end }}
+
+ tracing:
+ enabled: {{ .Values.loki.tracing.enabled }}
# Should authentication be enabled
auth_enabled: true
# -- memberlist configuration (overrides embedded default)
@@ -282,6 +285,8 @@ loki:
s3ForcePathStyle: false
insecure: false
http_config: {}
+ # -- Check https://grafana.com/docs/loki/latest/configure/#s3_storage_config for more info on how to provide a backoff_config
+ backoff_config: {}
gcs:
chunkBufferSize: 0
requestTimeout: "0s"
@@ -295,6 +300,26 @@ loki:
userAssignedId: null
requestTimeout: null
endpointSuffix: null
+ swift:
+ auth_version: null
+ auth_url: null
+ internal: null
+ username: null
+ user_domain_name: null
+ user_domain_id: null
+ user_id: null
+ password: null
+ domain_id: null
+ domain_name: null
+ project_id: null
+ project_name: null
+ project_domain_id: null
+ project_domain_name: null
+ region_name: null
+ container_name: null
+ max_retries: null
+ connect_timeout: null
+ request_timeout: null
filesystem:
chunks_directory: /var/loki/chunks
rules_directory: /var/loki/rules
@@ -344,6 +369,9 @@ loki:
scheduler_address: '{{ include "loki.querySchedulerAddress" . }}'
# -- Optional distributor configuration
distributor: {}
+ # -- Enable tracing
+ tracing:
+ enabled: false
enterprise:
# Enable enterprise features, license must be provided
enabled: false
@@ -774,6 +802,8 @@ write:
# https://github.com/grafana/loki/blob/main/docs/sources/operations/storage/wal.md#how-to-scale-updown
# -- Init containers to add to the write pods
initContainers: []
+ # -- Containers to add to the write pods
+ extraContainers: []
# -- Volume mounts to add to the write pods
extraVolumeMounts: []
# -- Volumes to add to the write pods
@@ -1437,6 +1467,9 @@ gateway:
networkPolicy:
# -- Specifies whether Network Policies should be created
enabled: false
+ # -- Specifies whether the policies created will be standard Network Policies (flavor: kubernetes)
+ # or Cilium Network Policies (flavor: cilium)
+ flavor: kubernetes
metrics:
# -- Specifies the Pods which are allowed to access the metrics port.
# As this is cross-namespace communication, you also need the namespaceSelector.
@@ -1474,8 +1507,6 @@ networkPolicy:
podSelector: {}
# -- Specifies the namespace the discovery Pods are running in
namespaceSelector: {}
-tracing:
- jaegerAgentHost: ""
# -------------------------------------
# Configuration for `minio` child chart
# -------------------------------------
diff --git a/production/ksonnet/loki/bloom-compactor.libsonnet b/production/ksonnet/loki/bloom-compactor.libsonnet
new file mode 100644
index 0000000000000..d8c5e862fa106
--- /dev/null
+++ b/production/ksonnet/loki/bloom-compactor.libsonnet
@@ -0,0 +1,125 @@
+{
+ local k = import 'ksonnet-util/kausal.libsonnet',
+ local container = k.core.v1.container,
+ local containerPort = k.core.v1.containerPort,
+ local pvc = k.core.v1.persistentVolumeClaim,
+ local service = k.core.v1.service,
+ local statefulSet = k.apps.v1.statefulSet,
+ local volume = k.core.v1.volume,
+ local volumeMount = k.core.v1.volumeMount,
+
+ local name = 'bloom-compactor',
+
+ _config+:: {
+ bloom_compactor+: {
+ // number of replicas
+ replicas: if $._config.use_bloom_filters then 3 else 0,
+ // PVC config
+ pvc_size: if $._config.use_bloom_filters then error 'bloom_compactor.pvc_size needs to be defined' else '',
+ pvc_class: if $._config.use_bloom_filters then error 'bloom_compactor.pvc_class needs to be defined' else '',
+ },
+ loki+:
+ if $._config.use_bloom_filters
+ then
+ {
+ bloom_compactor: {
+ enabled: true,
+ working_directory: '/data/blooms',
+ compaction_interval: '15m',
+ max_compaction_parallelism: 1,
+ },
+ }
+ else {},
+ },
+
+ local cfg = self._config.bloom_compactor,
+
+ local volumeName = name + '-data',
+ local volumeMounts = [volumeMount.new(volumeName, '/data')],
+
+ bloom_compactor_args::
+ if $._config.use_bloom_filters
+ then
+ $._config.commonArgs {
+ target: 'bloom-compactor',
+ }
+ else {},
+
+ bloom_compactor_ports:: [
+ containerPort.new(name='http-metrics', port=$._config.http_listen_port),
+ containerPort.new(name='grpc', port=9095),
+ ],
+
+ bloom_compactor_data_pvc::
+ if $._config.use_bloom_filters
+ then
+ pvc.new(volumeName)
+ // set disk size
+ + pvc.mixin.spec.resources.withRequests({ storage: $._config.bloom_compactor.pvc_size })
+ // mount the volume as read-write by a single node
+ + pvc.mixin.spec.withAccessModes(['ReadWriteOnce'])
+ // set persistent volume storage class
+ + pvc.mixin.spec.withStorageClassName($._config.bloom_compactor.pvc_class)
+ else {},
+
+
+ bloom_compactor_container::
+ if $._config.use_bloom_filters
+ then
+ container.new(name, $._images.bloom_compactor)
+ // add default ports
+ + container.withPorts($.bloom_compactor_ports)
+ // add target specific CLI arguments
+ + container.withArgsMixin(k.util.mapToFlags($.bloom_compactor_args))
+ // mount the data pvc at given mountpoint
+ + container.withVolumeMountsMixin(volumeMounts)
+ // add globale environment variables
+ + container.withEnvMixin($._config.commonEnvs)
+ // add HTTP readiness probe
+ + container.mixin.readinessProbe.httpGet.withPath('/ready')
+ + container.mixin.readinessProbe.httpGet.withPort($._config.http_listen_port)
+ + container.mixin.readinessProbe.withTimeoutSeconds(1)
+ // define container resource requests
+ + k.util.resourcesRequests('2', '4Gi')
+ // define container resource limits
+ + k.util.resourcesLimits(null, '8Gi')
+ else {},
+
+ bloom_compactor_statefulset:
+ if $._config.use_bloom_filters
+ then
+ statefulSet.new(name, cfg.replicas, [$.bloom_compactor_container], $.bloom_compactor_data_pvc)
+ // add clusterIP service
+ + statefulSet.mixin.spec.withServiceName(name)
+ // perform rolling update when statefulset configuration changes
+ + statefulSet.mixin.spec.updateStrategy.withType('RollingUpdate')
+ // launch or terminate pods in parallel, *does not* affect upgrades
+ + statefulSet.mixin.spec.withPodManagementPolicy('Parallel')
+ // 10001 is the user/group ID assigned to Loki in the Dockerfile
+ + statefulSet.mixin.spec.template.spec.securityContext.withRunAsUser(10001)
+ + statefulSet.mixin.spec.template.spec.securityContext.withRunAsGroup(10001)
+ + statefulSet.mixin.spec.template.spec.securityContext.withFsGroup(10001)
+ // ensure statefulset is updated when loki config changes
+ + $.config_hash_mixin
+ // ensure no other workloads are scheduled
+ + k.util.antiAffinity
+ // mount the loki config.yaml
+ + k.util.configVolumeMount('loki', '/etc/loki/config')
+ // mount the runtime overrides.yaml
+ + k.util.configVolumeMount('overrides', '/etc/loki/overrides')
+ else {},
+
+ bloom_compactor_service:
+ if $._config.use_bloom_filters
+ then
+ k.util.serviceFor($.bloom_compactor_statefulset, $._config.service_ignored_labels)
+ else {},
+
+ bloom_compactor_headless_service:
+ if $._config.use_bloom_filters
+ then
+ k.util.serviceFor($.bloom_compactor_statefulset, $._config.service_ignored_labels)
+ + service.mixin.metadata.withName(name + '-headless')
+ + service.mixin.spec.withClusterIp('None')
+ else {},
+}
diff --git a/production/ksonnet/loki/bloom-gateway.libsonnet b/production/ksonnet/loki/bloom-gateway.libsonnet
new file mode 100644
index 0000000000000..387896ee40d15
--- /dev/null
+++ b/production/ksonnet/loki/bloom-gateway.libsonnet
@@ -0,0 +1,170 @@
+{
+ local k = import 'ksonnet-util/kausal.libsonnet',
+ local container = k.core.v1.container,
+ local containerPort = k.core.v1.containerPort,
+ local pvc = k.core.v1.persistentVolumeClaim,
+ local service = k.core.v1.service,
+ local statefulSet = k.apps.v1.statefulSet,
+ local volume = k.core.v1.volume,
+ local volumeMount = k.core.v1.volumeMount,
+
+ local name = 'bloom-gateway',
+
+ _config+:: {
+ bloom_gateway+: {
+ // number of replicas
+ replicas: if $._config.use_bloom_filters then 3 else 0,
+ // if true, the host needs to have local SSD disks mounted, otherwise PVCs are used
+ use_local_ssd: false,
+ // PVC config
+ pvc_size: if !self.use_local_ssd then error 'bloom_gateway.pvc_size needs to be defined when using PVC' else '',
+ pvc_class: if !self.use_local_ssd then error 'bloom_gateway.pvc_class needs to be defined when using PVC' else '',
+ // local SSD config
+ hostpath: if self.use_local_ssd then error 'bloom_gateway.hostpath needs to be defined when using local SSDs' else '',
+ node_selector: if self.use_local_ssd then error 'bloom_gateway.node_selector needs to be defined when using local SSDs' else {},
+ tolerations: if self.use_local_ssd then error 'bloom_gateway.tolerations needs to be defined when using local SSDs' else [],
+ },
+ loki+:
+ if $._config.use_bloom_filters
+ then
+ {
+ bloom_gateway+: {
+ enabled: true,
+ worker_concurrency: 8,
+ ring: {
+ replication_factor: 3,
+ },
+ client: {
+ cache_results: false,
+ },
+ },
+ storage_config+: {
+ bloom_shipper+: {
+ working_directory: '/data/blooms',
+ blocks_downloading_queue: {
+ workers_count: 10,
+ },
+ blocks_cache: {
+ enabled: true,
+ max_size_mb: error 'set bloom_shipper.blocks_cache.max_size_mb to ~80% of available disk size',
+ ttl: 3600 * 24, // 24h
+ },
+ },
+ },
+ }
+ else {},
+ },
+
+ local cfg = self._config.bloom_gateway,
+
+ local volumeName = name + '-data',
+
+ local volumes =
+ if cfg.use_local_ssd
+ then [volume.fromHostPath(volumeName, cfg.hostpath)]
+ else [],
+
+ local volumeMounts = [
+ volumeMount.new(volumeName, '/data'),
+ ],
+
+ bloom_gateway_args::
+ if $._config.use_bloom_filters
+ then
+ $._config.commonArgs {
+ target: 'bloom-gateway',
+ }
+ else {},
+
+ bloom_gateway_ports:: [
+ containerPort.new(name='http-metrics', port=$._config.http_listen_port),
+ containerPort.new(name='grpc', port=9095),
+ ],
+
+ bloom_gateway_data_pvc::
+ if $._config.use_bloom_filters && !cfg.use_local_ssd
+ then
+ pvc.new(volumeName)
+ // set disk size
+ + pvc.mixin.spec.resources.withRequests({ storage: $._config.bloom_gateway.pvc_size })
+ // mount the volume as read-write by a single node
+ + pvc.mixin.spec.withAccessModes(['ReadWriteOnce'])
+ // set persistent volume storage class
+ + pvc.mixin.spec.withStorageClassName($._config.bloom_compactor.pvc_class)
+ else
+ null,
+
+ bloom_gateway_container::
+ if $._config.use_bloom_filters
+ then
+ container.new(name, $._images.bloom_gateway)
+ // add default ports
+ + container.withPorts($.bloom_gateway_ports)
+ // add target specific CLI arguments
+ + container.withArgsMixin(k.util.mapToFlags($.bloom_gateway_args))
+ // mount local SSD or PVC
+ + container.withVolumeMountsMixin(volumeMounts)
+ // add globale environment variables
+ + container.withEnvMixin($._config.commonEnvs)
+ // add HTTP readiness probe
+ + container.mixin.readinessProbe.httpGet.withPath('/ready')
+ + container.mixin.readinessProbe.httpGet.withPort($._config.http_listen_port)
+ + container.mixin.readinessProbe.withTimeoutSeconds(1)
+ // define container resource requests
+ + k.util.resourcesRequests('2', '4Gi')
+ // define container resource limits
+ + k.util.resourcesLimits(null, '8Gi')
+ else {},
+
+ bloom_gateway_statefulset:
+ if $._config.use_bloom_filters
+ then
+ statefulSet.new(name, cfg.replicas, [$.bloom_gateway_container])
+ // add clusterIP service
+ + statefulSet.mixin.spec.withServiceName(name)
+ // perform rolling update when statefulset configuration changes
+ + statefulSet.mixin.spec.updateStrategy.withType('RollingUpdate')
+ // launch or terminate pods in parallel, *does not* affect upgrades
+ + statefulSet.mixin.spec.withPodManagementPolicy('Parallel')
+ // 10001 is the user/group ID assigned to Loki in the Dockerfile
+ + statefulSet.mixin.spec.template.spec.securityContext.withRunAsUser(10001)
+ + statefulSet.mixin.spec.template.spec.securityContext.withRunAsGroup(10001)
+ + statefulSet.mixin.spec.template.spec.securityContext.withFsGroup(10001)
+ // ensure statefulset is updated when loki config changes
+ + $.config_hash_mixin
+ // ensure no other workloads are scheduled
+ + k.util.antiAffinity
+ // mount the loki config.yaml
+ + k.util.configVolumeMount('loki', '/etc/loki/config')
+ // mount the runtime overrides.yaml
+ + k.util.configVolumeMount('overrides', '/etc/loki/overrides')
+ // configuration specific to SSD/PVC usage
+ + (
+ if cfg.use_local_ssd
+ then
+ // ensure the pod is scheduled on a node with local SSDs if needed
+ statefulSet.mixin.spec.template.spec.withNodeSelector(cfg.node_selector)
+ // tolerate the local-ssd taint
+ + statefulSet.mixin.spec.template.spec.withTolerationsMixin(cfg.tolerations)
+ // mount the local SSDs
+ + statefulSet.mixin.spec.template.spec.withVolumesMixin(volumes)
+ else
+ // create persistent volume claim
+ statefulSet.mixin.spec.withVolumeClaimTemplates([$.bloom_gateway_data_pvc])
+ )
+ else {},
+
+ bloom_gateway_service:
+ if $._config.use_bloom_filters
+ then
+ k.util.serviceFor($.bloom_gateway_statefulset, $._config.service_ignored_labels)
+ else {},
+
+ bloom_gateway_headless_service:
+ if $._config.use_bloom_filters
+ then
+ k.util.serviceFor($.bloom_gateway_statefulset, $._config.service_ignored_labels)
+ + service.mixin.metadata.withName(name + '-headless')
+ + service.mixin.spec.withClusterIp('None')
+ else {},
+}
diff --git a/production/ksonnet/loki/bloomfilters.libsonnet b/production/ksonnet/loki/bloomfilters.libsonnet
new file mode 100644
index 0000000000000..78231a808e1a0
--- /dev/null
+++ b/production/ksonnet/loki/bloomfilters.libsonnet
@@ -0,0 +1,8 @@
+{
+ _config+:: {
+ // globally enable/disable bloom gateway and bloom compactor
+ use_bloom_filters: false,
+ },
+}
++ (import 'bloom-compactor.libsonnet')
++ (import 'bloom-gateway.libsonnet')
diff --git a/production/ksonnet/loki/images.libsonnet b/production/ksonnet/loki/images.libsonnet
index 5cb79554ac1dc..0dc2bbe105ce9 100644
--- a/production/ksonnet/loki/images.libsonnet
+++ b/production/ksonnet/loki/images.libsonnet
@@ -16,5 +16,7 @@
compactor:: self.loki,
index_gateway:: self.loki,
overrides_exporter:: self.loki,
+ bloom_gateway:: self.loki,
+ bloom_compactor:: self.loki,
},
}
diff --git a/production/ksonnet/loki/loki.libsonnet b/production/ksonnet/loki/loki.libsonnet
index ad0489a69cd3f..871a68025e990 100644
--- a/production/ksonnet/loki/loki.libsonnet
+++ b/production/ksonnet/loki/loki.libsonnet
@@ -26,6 +26,9 @@
// BoltDB and TSDB Shipper support. Anything that modifies the compactor must be imported after this.
(import 'shipper.libsonnet') +
+// Accelerated search using bloom filters
+(import 'bloomfilters.libsonnet') +
+
(import 'table-manager.libsonnet') +
// Multi-zone ingester related config
diff --git a/production/ksonnet/loki/memberlist.libsonnet b/production/ksonnet/loki/memberlist.libsonnet
index 5bd95183c6bef..636fd90e1f0cb 100644
--- a/production/ksonnet/loki/memberlist.libsonnet
+++ b/production/ksonnet/loki/memberlist.libsonnet
@@ -159,4 +159,5 @@
// Disable the consul deployment if not migrating and using memberlist
consul_deployment: if $._config.memberlist_ring_enabled && !$._config.multikv_migration_enabled && !$._config.multikv_migration_teardown then {} else super.consul_deployment,
consul_service: if $._config.memberlist_ring_enabled && !$._config.multikv_migration_enabled && !$._config.multikv_migration_teardown then {} else super.consul_service,
+ consul_config_map: if $._config.memberlist_ring_enabled && !$._config.multikv_migration_enabled && !$._config.multikv_migration_teardown then {} else super.consul_config_map,
}
diff --git a/production/ksonnet/loki/shipper.libsonnet b/production/ksonnet/loki/shipper.libsonnet
index 374a797eba0c9..18f38f3ab89e8 100644
--- a/production/ksonnet/loki/shipper.libsonnet
+++ b/production/ksonnet/loki/shipper.libsonnet
@@ -19,17 +19,16 @@
compactor_pvc_class: 'fast',
index_period_hours: if self.using_shipper_store then 24 else super.index_period_hours,
loki+: if self.using_shipper_store then {
- storage_config+: if $._config.using_boltdb_shipper then {
- boltdb_shipper+: {
+ storage_config+: {
+ boltdb_shipper+: if $._config.using_boltdb_shipper then {
active_index_directory: '/data/index',
cache_location: '/data/boltdb-cache',
- },
- } else {} + if $._config.using_tsdb_shipper then {
- tsdb_shipper+: {
+ } else {},
+ tsdb_shipper+: if $._config.using_tsdb_shipper then {
active_index_directory: '/data/tsdb-index',
cache_location: '/data/tsdb-cache',
- },
- } else {},
+ } else {},
+ },
compactor+: {
working_directory: '/data/compactor',
},
diff --git a/production/loki-mixin-compiled-ssd/dashboards/loki-logs.json b/production/loki-mixin-compiled-ssd/dashboards/loki-logs.json
index e2e402eab3b51..d1a2ebaae5a70 100644
--- a/production/loki-mixin-compiled-ssd/dashboards/loki-logs.json
+++ b/production/loki-mixin-compiled-ssd/dashboards/loki-logs.json
@@ -77,7 +77,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -164,7 +164,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -250,7 +250,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -336,7 +336,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -422,7 +422,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -508,7 +508,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -595,7 +595,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -682,7 +682,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -787,7 +787,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
diff --git a/production/loki-mixin-compiled-ssd/dashboards/loki-operational.json b/production/loki-mixin-compiled-ssd/dashboards/loki-operational.json
index c40cdb516a28c..71e950931e0e8 100644
--- a/production/loki-mixin-compiled-ssd/dashboards/loki-operational.json
+++ b/production/loki-mixin-compiled-ssd/dashboards/loki-operational.json
@@ -102,7 +102,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -198,7 +198,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -293,7 +293,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -389,7 +389,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -485,7 +485,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -591,7 +591,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -697,7 +697,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -794,7 +794,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -903,7 +903,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -1000,7 +1000,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -1109,7 +1109,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -1215,7 +1215,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -1312,7 +1312,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -1420,7 +1420,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -1517,7 +1517,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -1618,7 +1618,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -1815,7 +1815,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -1907,7 +1907,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -1999,7 +1999,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -2116,7 +2116,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -2205,7 +2205,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -2294,7 +2294,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -2383,7 +2383,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -2490,7 +2490,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -2581,7 +2581,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -2697,7 +2697,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -2850,7 +2850,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -3022,7 +3022,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -3114,7 +3114,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -3206,7 +3206,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -3323,7 +3323,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -3443,7 +3443,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -3535,7 +3535,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -3655,7 +3655,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -3747,7 +3747,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -3861,7 +3861,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -3964,7 +3964,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -4067,7 +4067,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -4165,7 +4165,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -4255,7 +4255,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -4345,7 +4345,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -4435,7 +4435,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -4525,7 +4525,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -4645,7 +4645,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -4737,7 +4737,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -4839,7 +4839,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -4925,7 +4925,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -5011,7 +5011,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -5097,7 +5097,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -5194,7 +5194,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -5297,7 +5297,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -5388,7 +5388,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -5507,7 +5507,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -5598,7 +5598,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -5717,7 +5717,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -5808,7 +5808,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -5927,7 +5927,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -6018,7 +6018,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
diff --git a/production/loki-mixin-compiled-ssd/dashboards/loki-reads.json b/production/loki-mixin-compiled-ssd/dashboards/loki-reads.json
index 1c563628fdabc..fcf6c120fa606 100644
--- a/production/loki-mixin-compiled-ssd/dashboards/loki-reads.json
+++ b/production/loki-mixin-compiled-ssd/dashboards/loki-reads.json
@@ -217,9 +217,9 @@
"group": "A",
"mode": "normal"
}
- }
- },
- "unit": "s"
+ },
+ "unit": "s"
+ }
},
"fill": 1,
"id": 3,
@@ -493,9 +493,9 @@
"group": "A",
"mode": "normal"
}
- }
- },
- "unit": "s"
+ },
+ "unit": "s"
+ }
},
"fill": 1,
"id": 6,
diff --git a/production/loki-mixin-compiled-ssd/dashboards/loki-retention.json b/production/loki-mixin-compiled-ssd/dashboards/loki-retention.json
index 73791bf2b11e1..95bc7b6e0f83b 100644
--- a/production/loki-mixin-compiled-ssd/dashboards/loki-retention.json
+++ b/production/loki-mixin-compiled-ssd/dashboards/loki-retention.json
@@ -375,7 +375,7 @@
"renderer": "flot",
"seriesOverrides": [ ],
"spaceLength": 10,
- "span": 4,
+ "span": 6,
"stack": false,
"steppedLine": false,
"targets": [
@@ -389,7 +389,7 @@
"thresholds": [ ],
"timeFrom": null,
"timeShift": null,
- "title": "Last Compact and Mark Operation Success",
+ "title": "Last Compact Tables Operation Success",
"tooltip": {
"shared": true,
"sort": 2,
@@ -449,7 +449,7 @@
"renderer": "flot",
"seriesOverrides": [ ],
"spaceLength": 10,
- "span": 4,
+ "span": 6,
"stack": false,
"steppedLine": false,
"targets": [
@@ -465,7 +465,7 @@
"thresholds": [ ],
"timeFrom": null,
"timeShift": null,
- "title": "Compact and Mark Operations Duration",
+ "title": "Compact Tables Operations Duration",
"tooltip": {
"shared": true,
"sort": 2,
@@ -497,7 +497,19 @@
"show": false
}
]
- },
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Compaction",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
{
"aliasColors": { },
"bars": false,
@@ -525,7 +537,83 @@
"renderer": "flot",
"seriesOverrides": [ ],
"spaceLength": 10,
- "span": 4,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(increase(loki_compactor_skipped_compacting_locked_table_total{cluster=~\"$cluster\", namespace=~\"$namespace\"}[$__range]))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{table_name}}",
+ "legendLink": null,
+ "step": 10
+ }
+ ],
+ "thresholds": [ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Number of times Tables were skipped during Compaction",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ },
+ {
+ "aliasColors": { },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "id": 7,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [ ],
+ "spaceLength": 10,
+ "span": 6,
"stack": false,
"steppedLine": false,
"targets": [
@@ -541,7 +629,279 @@
"thresholds": [ ],
"timeFrom": null,
"timeShift": null,
- "title": "Compact and Mark Operations Per Status",
+ "title": "Compact Tables Operations Per Status",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": { },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "blue",
+ "mode": "fixed"
+ },
+ "custom": { },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ },
+ "unit": "dateTimeFromNow"
+ }
+ },
+ "fill": 1,
+ "id": 8,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [ ],
+ "nullPointMode": "null as zero",
+ "options": {
+ "colorMode": "value",
+ "graphMode": "area",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "text": { },
+ "textMode": "auto"
+ },
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [ ],
+ "spaceLength": 10,
+ "span": 4,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "loki_compactor_apply_retention_last_successful_run_timestamp_seconds{cluster=~\"$cluster\", namespace=~\"$namespace\"} * 1e3",
+ "format": "time_series",
+ "instant": true,
+ "refId": "A"
+ }
+ ],
+ "thresholds": [ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Last Mark Operation Success",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "stat",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ },
+ {
+ "aliasColors": { },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "id": 9,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [ ],
+ "spaceLength": 10,
+ "span": 4,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "loki_compactor_apply_retention_operation_duration_seconds{cluster=~\"$cluster\", namespace=~\"$namespace\"}",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "duration",
+ "legendLink": null,
+ "step": 10
+ }
+ ],
+ "thresholds": [ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Mark Operations Duration",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [ ]
+ },
+ "yaxes": [
+ {
+ "format": "s",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ },
+ {
+ "aliasColors": { },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "id": 10,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [ ],
+ "spaceLength": 10,
+ "span": 4,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum by (status)(rate(loki_compactor_apply_retention_operation_total{cluster=~\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval]))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{success}}",
+ "legendLink": null,
+ "step": 10
+ }
+ ],
+ "thresholds": [ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Mark Operations Per Status",
"tooltip": {
"shared": true,
"sort": 2,
@@ -579,7 +939,7 @@
"repeatIteration": null,
"repeatRowId": null,
"showTitle": true,
- "title": "Compact and Mark",
+ "title": "Retention",
"titleSize": "h6"
},
{
@@ -593,7 +953,7 @@
"dashes": false,
"datasource": "$datasource",
"fill": 10,
- "id": 7,
+ "id": 11,
"legend": {
"avg": false,
"current": false,
@@ -669,7 +1029,7 @@
"dashes": false,
"datasource": "$datasource",
"fill": 10,
- "id": 8,
+ "id": 12,
"legend": {
"avg": false,
"current": false,
@@ -745,7 +1105,7 @@
"dashes": false,
"datasource": "$datasource",
"fill": 10,
- "id": 9,
+ "id": 13,
"legend": {
"avg": false,
"current": false,
@@ -834,7 +1194,7 @@
"datasource": "$datasource",
"fill": 1,
"format": "short",
- "id": 10,
+ "id": 14,
"legend": {
"avg": false,
"current": false,
@@ -909,7 +1269,7 @@
"dashes": false,
"datasource": "$datasource",
"fill": 1,
- "id": 11,
+ "id": 15,
"legend": {
"avg": false,
"current": false,
@@ -1014,7 +1374,7 @@
"datasource": "$datasource",
"fill": 1,
"format": "short",
- "id": 12,
+ "id": 16,
"legend": {
"avg": false,
"current": false,
@@ -1089,7 +1449,7 @@
"dashes": false,
"datasource": "$datasource",
"fill": 1,
- "id": 13,
+ "id": 17,
"legend": {
"avg": false,
"current": false,
@@ -1193,7 +1553,7 @@
"dashes": false,
"datasource": "$datasource",
"fill": 1,
- "id": 14,
+ "id": 18,
"legend": {
"avg": false,
"current": false,
@@ -1269,7 +1629,7 @@
"dashes": false,
"datasource": "$datasource",
"fill": 1,
- "id": 15,
+ "id": 19,
"legend": {
"avg": false,
"current": false,
@@ -1345,7 +1705,7 @@
"dashes": false,
"datasource": "$datasource",
"fill": 1,
- "id": 16,
+ "id": 20,
"legend": {
"avg": false,
"current": false,
@@ -1428,7 +1788,7 @@
"panels": [
{
"datasource": "$loki_datasource",
- "id": 17,
+ "id": 21,
"span": 12,
"targets": [
{
diff --git a/production/loki-mixin-compiled-ssd/dashboards/loki-writes.json b/production/loki-mixin-compiled-ssd/dashboards/loki-writes.json
index 17d54cf81f788..bcd620e69e4a9 100644
--- a/production/loki-mixin-compiled-ssd/dashboards/loki-writes.json
+++ b/production/loki-mixin-compiled-ssd/dashboards/loki-writes.json
@@ -210,6 +210,170 @@
"title": "Write Path",
"titleSize": "h6"
},
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": { },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "id": 3,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [ ],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum (rate(loki_distributor_structured_metadata_bytes_received_total{cluster=~\"$cluster\",job=~\"($namespace)/(loki|enterprise-logs)-write\",}[$__rate_interval])) / sum(rate(loki_distributor_bytes_received_total{cluster=~\"$cluster\",job=~\"($namespace)/(loki|enterprise-logs)-write\",}[$__rate_interval]))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "bytes",
+ "legendLink": null,
+ "step": 10
+ }
+ ],
+ "thresholds": [ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Per Total Received Bytes",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ },
+ {
+ "aliasColors": { },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "id": 4,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [ ],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum by (tenant) (rate(loki_distributor_structured_metadata_bytes_received_total{cluster=~\"$cluster\",job=~\"($namespace)/(loki|enterprise-logs)-write\",}[$__rate_interval])) / ignoring(tenant) group_left sum(rate(loki_distributor_structured_metadata_bytes_received_total{cluster=~\"$cluster\",job=~\"($namespace)/(loki|enterprise-logs)-write\",}[$__rate_interval]))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{tenant}}",
+ "legendLink": null,
+ "step": 10
+ }
+ ],
+ "thresholds": [ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Per Tenant",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": 1,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": 1,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Write Path",
+ "titleSize": "h6"
+ },
{
"collapse": false,
"height": "250px",
@@ -229,7 +393,7 @@
"dashes": false,
"datasource": "$datasource",
"fill": 10,
- "id": 3,
+ "id": 5,
"legend": {
"avg": false,
"current": false,
@@ -305,7 +469,7 @@
"dashes": false,
"datasource": "$datasource",
"fill": 1,
- "id": 4,
+ "id": 6,
"legend": {
"avg": false,
"current": false,
diff --git a/production/loki-mixin-compiled/dashboards/loki-logs.json b/production/loki-mixin-compiled/dashboards/loki-logs.json
index e2e402eab3b51..d1a2ebaae5a70 100644
--- a/production/loki-mixin-compiled/dashboards/loki-logs.json
+++ b/production/loki-mixin-compiled/dashboards/loki-logs.json
@@ -77,7 +77,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -164,7 +164,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -250,7 +250,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -336,7 +336,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -422,7 +422,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -508,7 +508,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -595,7 +595,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -682,7 +682,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -787,7 +787,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
diff --git a/production/loki-mixin-compiled/dashboards/loki-operational.json b/production/loki-mixin-compiled/dashboards/loki-operational.json
index 5f04aadc665e8..de4735b4bae6d 100644
--- a/production/loki-mixin-compiled/dashboards/loki-operational.json
+++ b/production/loki-mixin-compiled/dashboards/loki-operational.json
@@ -102,7 +102,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -198,7 +198,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -294,7 +294,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -389,7 +389,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -485,7 +485,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -581,7 +581,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -687,7 +687,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -793,7 +793,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -890,7 +890,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -999,7 +999,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -1096,7 +1096,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -1205,7 +1205,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -1311,7 +1311,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -1408,7 +1408,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -1516,7 +1516,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -1613,7 +1613,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -1714,7 +1714,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -1911,7 +1911,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -2003,7 +2003,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -2095,7 +2095,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -2212,7 +2212,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -2301,7 +2301,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -2390,7 +2390,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -2479,7 +2479,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -2584,7 +2584,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -2676,7 +2676,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -2768,7 +2768,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -2885,7 +2885,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -2992,7 +2992,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -3083,7 +3083,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -3199,7 +3199,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -3352,7 +3352,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -3524,7 +3524,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -3616,7 +3616,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -3708,7 +3708,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -3825,7 +3825,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -3945,7 +3945,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -4037,7 +4037,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -4157,7 +4157,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -4249,7 +4249,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -4363,7 +4363,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -4466,7 +4466,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -4569,7 +4569,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -4667,7 +4667,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -4757,7 +4757,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -4847,7 +4847,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -4937,7 +4937,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -5027,7 +5027,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -5147,7 +5147,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -5239,7 +5239,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -5341,7 +5341,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -5427,7 +5427,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -5513,7 +5513,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -5599,7 +5599,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -5696,7 +5696,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -5799,7 +5799,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -5890,7 +5890,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -6009,7 +6009,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -6100,7 +6100,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -6219,7 +6219,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -6310,7 +6310,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -6429,7 +6429,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -6520,7 +6520,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
diff --git a/production/loki-mixin-compiled/dashboards/loki-reads.json b/production/loki-mixin-compiled/dashboards/loki-reads.json
index a35120412a3a1..085275b1e4fde 100644
--- a/production/loki-mixin-compiled/dashboards/loki-reads.json
+++ b/production/loki-mixin-compiled/dashboards/loki-reads.json
@@ -217,9 +217,9 @@
"group": "A",
"mode": "normal"
}
- }
- },
- "unit": "s"
+ },
+ "unit": "s"
+ }
},
"fill": 1,
"id": 3,
@@ -493,9 +493,9 @@
"group": "A",
"mode": "normal"
}
- }
- },
- "unit": "s"
+ },
+ "unit": "s"
+ }
},
"fill": 1,
"id": 6,
@@ -769,9 +769,9 @@
"group": "A",
"mode": "normal"
}
- }
- },
- "unit": "s"
+ },
+ "unit": "s"
+ }
},
"fill": 1,
"id": 9,
@@ -1045,9 +1045,9 @@
"group": "A",
"mode": "normal"
}
- }
- },
- "unit": "s"
+ },
+ "unit": "s"
+ }
},
"fill": 1,
"id": 12,
@@ -1321,9 +1321,9 @@
"group": "A",
"mode": "normal"
}
- }
- },
- "unit": "s"
+ },
+ "unit": "s"
+ }
},
"fill": 1,
"id": 15,
@@ -1597,9 +1597,9 @@
"group": "A",
"mode": "normal"
}
- }
- },
- "unit": "s"
+ },
+ "unit": "s"
+ }
},
"fill": 1,
"id": 18,
diff --git a/production/loki-mixin-compiled/dashboards/loki-retention.json b/production/loki-mixin-compiled/dashboards/loki-retention.json
index fc8f9e5619757..a266d15734208 100644
--- a/production/loki-mixin-compiled/dashboards/loki-retention.json
+++ b/production/loki-mixin-compiled/dashboards/loki-retention.json
@@ -375,7 +375,7 @@
"renderer": "flot",
"seriesOverrides": [ ],
"spaceLength": 10,
- "span": 4,
+ "span": 6,
"stack": false,
"steppedLine": false,
"targets": [
@@ -389,7 +389,7 @@
"thresholds": [ ],
"timeFrom": null,
"timeShift": null,
- "title": "Last Compact and Mark Operation Success",
+ "title": "Last Compact Tables Operation Success",
"tooltip": {
"shared": true,
"sort": 2,
@@ -449,7 +449,7 @@
"renderer": "flot",
"seriesOverrides": [ ],
"spaceLength": 10,
- "span": 4,
+ "span": 6,
"stack": false,
"steppedLine": false,
"targets": [
@@ -465,7 +465,7 @@
"thresholds": [ ],
"timeFrom": null,
"timeShift": null,
- "title": "Compact and Mark Operations Duration",
+ "title": "Compact Tables Operations Duration",
"tooltip": {
"shared": true,
"sort": 2,
@@ -497,7 +497,19 @@
"show": false
}
]
- },
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Compaction",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
{
"aliasColors": { },
"bars": false,
@@ -525,7 +537,83 @@
"renderer": "flot",
"seriesOverrides": [ ],
"spaceLength": 10,
- "span": 4,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(increase(loki_compactor_skipped_compacting_locked_table_total{cluster=~\"$cluster\", namespace=~\"$namespace\"}[$__range]))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{table_name}}",
+ "legendLink": null,
+ "step": 10
+ }
+ ],
+ "thresholds": [ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Number of times Tables were skipped during Compaction",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ },
+ {
+ "aliasColors": { },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "id": 7,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [ ],
+ "spaceLength": 10,
+ "span": 6,
"stack": false,
"steppedLine": false,
"targets": [
@@ -541,7 +629,279 @@
"thresholds": [ ],
"timeFrom": null,
"timeShift": null,
- "title": "Compact and Mark Operations Per Status",
+ "title": "Compact Tables Operations Per Status",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": { },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "blue",
+ "mode": "fixed"
+ },
+ "custom": { },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ },
+ "unit": "dateTimeFromNow"
+ }
+ },
+ "fill": 1,
+ "id": 8,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [ ],
+ "nullPointMode": "null as zero",
+ "options": {
+ "colorMode": "value",
+ "graphMode": "area",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "text": { },
+ "textMode": "auto"
+ },
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [ ],
+ "spaceLength": 10,
+ "span": 4,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "loki_compactor_apply_retention_last_successful_run_timestamp_seconds{cluster=~\"$cluster\", namespace=~\"$namespace\"} * 1e3",
+ "format": "time_series",
+ "instant": true,
+ "refId": "A"
+ }
+ ],
+ "thresholds": [ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Last Mark Operation Success",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "stat",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ },
+ {
+ "aliasColors": { },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "id": 9,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [ ],
+ "spaceLength": 10,
+ "span": 4,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "loki_compactor_apply_retention_operation_duration_seconds{cluster=~\"$cluster\", namespace=~\"$namespace\"}",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "duration",
+ "legendLink": null,
+ "step": 10
+ }
+ ],
+ "thresholds": [ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Mark Operations Duration",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [ ]
+ },
+ "yaxes": [
+ {
+ "format": "s",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ },
+ {
+ "aliasColors": { },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "id": 10,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [ ],
+ "spaceLength": 10,
+ "span": 4,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum by (status)(rate(loki_compactor_apply_retention_operation_total{cluster=~\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval]))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{success}}",
+ "legendLink": null,
+ "step": 10
+ }
+ ],
+ "thresholds": [ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Mark Operations Per Status",
"tooltip": {
"shared": true,
"sort": 2,
@@ -579,7 +939,7 @@
"repeatIteration": null,
"repeatRowId": null,
"showTitle": true,
- "title": "Compact and Mark",
+ "title": "Retention",
"titleSize": "h6"
},
{
@@ -593,7 +953,7 @@
"dashes": false,
"datasource": "$datasource",
"fill": 10,
- "id": 7,
+ "id": 11,
"legend": {
"avg": false,
"current": false,
@@ -669,7 +1029,7 @@
"dashes": false,
"datasource": "$datasource",
"fill": 10,
- "id": 8,
+ "id": 12,
"legend": {
"avg": false,
"current": false,
@@ -745,7 +1105,7 @@
"dashes": false,
"datasource": "$datasource",
"fill": 10,
- "id": 9,
+ "id": 13,
"legend": {
"avg": false,
"current": false,
@@ -834,7 +1194,7 @@
"datasource": "$datasource",
"fill": 1,
"format": "short",
- "id": 10,
+ "id": 14,
"legend": {
"avg": false,
"current": false,
@@ -909,7 +1269,7 @@
"dashes": false,
"datasource": "$datasource",
"fill": 1,
- "id": 11,
+ "id": 15,
"legend": {
"avg": false,
"current": false,
@@ -1014,7 +1374,7 @@
"datasource": "$datasource",
"fill": 1,
"format": "short",
- "id": 12,
+ "id": 16,
"legend": {
"avg": false,
"current": false,
@@ -1089,7 +1449,7 @@
"dashes": false,
"datasource": "$datasource",
"fill": 1,
- "id": 13,
+ "id": 17,
"legend": {
"avg": false,
"current": false,
@@ -1193,7 +1553,7 @@
"dashes": false,
"datasource": "$datasource",
"fill": 1,
- "id": 14,
+ "id": 18,
"legend": {
"avg": false,
"current": false,
@@ -1269,7 +1629,7 @@
"dashes": false,
"datasource": "$datasource",
"fill": 1,
- "id": 15,
+ "id": 19,
"legend": {
"avg": false,
"current": false,
@@ -1345,7 +1705,7 @@
"dashes": false,
"datasource": "$datasource",
"fill": 1,
- "id": 16,
+ "id": 20,
"legend": {
"avg": false,
"current": false,
@@ -1428,7 +1788,7 @@
"panels": [
{
"datasource": "$loki_datasource",
- "id": 17,
+ "id": 21,
"span": 12,
"targets": [
{
diff --git a/production/loki-mixin-compiled/dashboards/loki-writes.json b/production/loki-mixin-compiled/dashboards/loki-writes.json
index 3946e7897c496..fdb347f56055f 100644
--- a/production/loki-mixin-compiled/dashboards/loki-writes.json
+++ b/production/loki-mixin-compiled/dashboards/loki-writes.json
@@ -210,6 +210,170 @@
"title": "Distributor",
"titleSize": "h6"
},
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": { },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "id": 3,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [ ],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum (rate(loki_distributor_structured_metadata_bytes_received_total{cluster=~\"$cluster\",job=~\"($namespace)/distributor\",}[$__rate_interval])) / sum(rate(loki_distributor_bytes_received_total{cluster=~\"$cluster\",job=~\"($namespace)/distributor\",}[$__rate_interval]))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "bytes",
+ "legendLink": null,
+ "step": 10
+ }
+ ],
+ "thresholds": [ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Per Total Received Bytes",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ },
+ {
+ "aliasColors": { },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "id": 4,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [ ],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum by (tenant) (rate(loki_distributor_structured_metadata_bytes_received_total{cluster=~\"$cluster\",job=~\"($namespace)/distributor\",}[$__rate_interval])) / ignoring(tenant) group_left sum(rate(loki_distributor_structured_metadata_bytes_received_total{cluster=~\"$cluster\",job=~\"($namespace)/distributor\",}[$__rate_interval]))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{tenant}}",
+ "legendLink": null,
+ "step": 10
+ }
+ ],
+ "thresholds": [ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Per Tenant",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": 1,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": 1,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Distributor - Structured Metadata",
+ "titleSize": "h6"
+ },
{
"collapse": false,
"height": "250px",
@@ -229,7 +393,7 @@
"dashes": false,
"datasource": "$datasource",
"fill": 10,
- "id": 3,
+ "id": 5,
"legend": {
"avg": false,
"current": false,
@@ -305,7 +469,7 @@
"dashes": false,
"datasource": "$datasource",
"fill": 1,
- "id": 4,
+ "id": 6,
"legend": {
"avg": false,
"current": false,
@@ -417,7 +581,7 @@
"dashes": false,
"datasource": "$datasource",
"fill": 10,
- "id": 5,
+ "id": 7,
"legend": {
"avg": false,
"current": false,
@@ -493,7 +657,7 @@
"dashes": false,
"datasource": "$datasource",
"fill": 1,
- "id": 6,
+ "id": 8,
"legend": {
"avg": false,
"current": false,
@@ -605,7 +769,7 @@
"dashes": false,
"datasource": "$datasource",
"fill": 10,
- "id": 7,
+ "id": 9,
"legend": {
"avg": false,
"current": false,
@@ -681,7 +845,7 @@
"dashes": false,
"datasource": "$datasource",
"fill": 1,
- "id": 8,
+ "id": 10,
"legend": {
"avg": false,
"current": false,
@@ -793,7 +957,7 @@
"dashes": false,
"datasource": "$datasource",
"fill": 10,
- "id": 9,
+ "id": 11,
"legend": {
"avg": false,
"current": false,
@@ -869,7 +1033,7 @@
"dashes": false,
"datasource": "$datasource",
"fill": 1,
- "id": 10,
+ "id": 12,
"legend": {
"avg": false,
"current": false,
diff --git a/production/loki-mixin/config.libsonnet b/production/loki-mixin/config.libsonnet
index e0b09677ea45c..1fa22f566cc69 100644
--- a/production/loki-mixin/config.libsonnet
+++ b/production/loki-mixin/config.libsonnet
@@ -20,6 +20,9 @@
enabled: true,
},
+ // Enable TSDB specific dashboards
+ tsdb: true,
+
// SSD related configuration for dashboards.
ssd: {
// Support Loki SSD mode on dashboards.
diff --git a/production/loki-mixin/dashboards/dashboard-loki-logs.json b/production/loki-mixin/dashboards/dashboard-loki-logs.json
index 916a4acc12a80..bcb5737aab52c 100644
--- a/production/loki-mixin/dashboards/dashboard-loki-logs.json
+++ b/production/loki-mixin/dashboards/dashboard-loki-logs.json
@@ -79,7 +79,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -166,7 +166,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -252,7 +252,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -338,7 +338,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -424,7 +424,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -510,7 +510,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -597,7 +597,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -684,7 +684,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -789,7 +789,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
diff --git a/production/loki-mixin/dashboards/dashboard-loki-operational.json b/production/loki-mixin/dashboards/dashboard-loki-operational.json
index e1a9ddbf68b4c..2dd944c202984 100644
--- a/production/loki-mixin/dashboards/dashboard-loki-operational.json
+++ b/production/loki-mixin/dashboards/dashboard-loki-operational.json
@@ -105,7 +105,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -200,7 +200,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -295,7 +295,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -389,7 +389,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -484,7 +484,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -579,7 +579,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -684,7 +684,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -789,7 +789,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -885,7 +885,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -993,7 +993,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -1089,7 +1089,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -1197,7 +1197,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -1302,7 +1302,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -1398,7 +1398,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -1505,7 +1505,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -1601,7 +1601,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -1701,7 +1701,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -1895,7 +1895,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -1986,7 +1986,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -2077,7 +2077,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -2192,7 +2192,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -2280,7 +2280,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -2368,7 +2368,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -2456,7 +2456,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -2559,7 +2559,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -2650,7 +2650,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -2741,7 +2741,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -2856,7 +2856,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -2961,7 +2961,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -3051,7 +3051,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -3165,7 +3165,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -3316,7 +3316,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -3485,7 +3485,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -3576,7 +3576,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -3667,7 +3667,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -3782,7 +3782,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -3900,7 +3900,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -3991,7 +3991,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -4109,7 +4109,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -4200,7 +4200,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -4312,7 +4312,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -4414,7 +4414,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -4516,7 +4516,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -4613,7 +4613,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -4702,7 +4702,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -4791,7 +4791,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -4880,7 +4880,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -4969,7 +4969,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -5087,7 +5087,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -5178,7 +5178,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -5278,7 +5278,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -5363,7 +5363,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -5448,7 +5448,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -5533,7 +5533,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -5629,7 +5629,7 @@
"sort": 0,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -5731,7 +5731,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -5821,7 +5821,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -5938,7 +5938,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -6028,7 +6028,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -6145,7 +6145,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -6235,7 +6235,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -6352,7 +6352,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -6442,7 +6442,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -6559,7 +6559,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -6649,7 +6649,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "graph",
+ "type": "timeseries",
"xaxis": {
"buckets": null,
"mode": "time",
diff --git a/production/loki-mixin/dashboards/loki-reads.libsonnet b/production/loki-mixin/dashboards/loki-reads.libsonnet
index 2e9de3d88195f..538cade448fac 100644
--- a/production/loki-mixin/dashboards/loki-reads.libsonnet
+++ b/production/loki-mixin/dashboards/loki-reads.libsonnet
@@ -37,8 +37,8 @@ local utils = import 'mixin-utils/utils.libsonnet';
mode: 'normal',
},
},
+ unit: 's',
},
- unit: 's',
},
},
diff --git a/production/loki-mixin/dashboards/loki-retention.libsonnet b/production/loki-mixin/dashboards/loki-retention.libsonnet
index 8e28ccdb0e7a7..a5aa45a13d756 100644
--- a/production/loki-mixin/dashboards/loki-retention.libsonnet
+++ b/production/loki-mixin/dashboards/loki-retention.libsonnet
@@ -25,20 +25,42 @@ local utils = import 'mixin-utils/utils.libsonnet';
)
.addRow(
- $.row('Compact and Mark')
+ $.row('Compaction')
.addPanel(
- $.fromNowPanel('Last Compact and Mark Operation Success', 'loki_boltdb_shipper_compact_tables_operation_last_successful_run_timestamp_seconds')
+ $.fromNowPanel('Last Compact Tables Operation Success', 'loki_boltdb_shipper_compact_tables_operation_last_successful_run_timestamp_seconds')
)
.addPanel(
- $.panel('Compact and Mark Operations Duration') +
+ $.panel('Compact Tables Operations Duration') +
$.queryPanel(['loki_boltdb_shipper_compact_tables_operation_duration_seconds{%s}' % $.namespaceMatcher()], ['duration']) +
{ yaxes: $.yaxes('s') },
)
+ )
+ .addRow(
+ $.row('')
.addPanel(
- $.panel('Compact and Mark Operations Per Status') +
+ $.panel('Number of times Tables were skipped during Compaction') +
+ $.queryPanel(['sum(increase(loki_compactor_skipped_compacting_locked_table_total{%s}[$__range]))' % $.namespaceMatcher()], ['{{table_name}}']),
+ )
+ .addPanel(
+ $.panel('Compact Tables Operations Per Status') +
$.queryPanel(['sum by (status)(rate(loki_boltdb_shipper_compact_tables_operation_total{%s}[$__rate_interval]))' % $.namespaceMatcher()], ['{{success}}']),
)
)
+ .addRow(
+ $.row('Retention')
+ .addPanel(
+ $.fromNowPanel('Last Mark Operation Success', 'loki_compactor_apply_retention_last_successful_run_timestamp_seconds')
+ )
+ .addPanel(
+ $.panel('Mark Operations Duration') +
+ $.queryPanel(['loki_compactor_apply_retention_operation_duration_seconds{%s}' % $.namespaceMatcher()], ['duration']) +
+ { yaxes: $.yaxes('s') },
+ )
+ .addPanel(
+ $.panel('Mark Operations Per Status') +
+ $.queryPanel(['sum by (status)(rate(loki_compactor_apply_retention_operation_total{%s}[$__rate_interval]))' % $.namespaceMatcher()], ['{{success}}']),
+ )
+ )
.addRow(
$.row('Per Table Marker')
.addPanel(
diff --git a/production/loki-mixin/dashboards/loki-writes.libsonnet b/production/loki-mixin/dashboards/loki-writes.libsonnet
index df710bb0700e4..a12f4f7cea6e0 100644
--- a/production/loki-mixin/dashboards/loki-writes.libsonnet
+++ b/production/loki-mixin/dashboards/loki-writes.libsonnet
@@ -69,6 +69,24 @@ local utils = import 'mixin-utils/utils.libsonnet';
)
)
)
+ .addRowIf(
+ $._config.tsdb,
+ $.row(if $._config.ssd.enabled then 'Write Path' else 'Distributor - Structured Metadata')
+ .addPanel(
+ $.panel('Per Total Received Bytes') +
+ $.queryPanel('sum (rate(loki_distributor_structured_metadata_bytes_received_total{%s}[$__rate_interval])) / sum(rate(loki_distributor_bytes_received_total{%s}[$__rate_interval]))' % [dashboards['loki-writes.json'].distributorSelector, dashboards['loki-writes.json'].distributorSelector], 'bytes')
+ )
+ .addPanel(
+ $.panel('Per Tenant') +
+ $.queryPanel('sum by (tenant) (rate(loki_distributor_structured_metadata_bytes_received_total{%s}[$__rate_interval])) / ignoring(tenant) group_left sum(rate(loki_distributor_structured_metadata_bytes_received_total{%s}[$__rate_interval]))' % [dashboards['loki-writes.json'].distributorSelector, dashboards['loki-writes.json'].distributorSelector], '{{tenant}}') + {
+ stack: true,
+ yaxes: [
+ { format: 'short', label: null, logBase: 1, max: 1, min: 0, show: true },
+ { format: 'short', label: null, logBase: 1, max: 1, min: null, show: false },
+ ],
+ },
+ )
+ )
.addRowIf(
!$._config.ssd.enabled,
$.row('Ingester - Zone Aware')
diff --git a/tools/dev/loki-boltdb-storage-s3/compose-up.sh b/tools/dev/loki-boltdb-storage-s3/compose-up.sh
index 1841f312ca33f..2d26a83123c9e 100755
--- a/tools/dev/loki-boltdb-storage-s3/compose-up.sh
+++ b/tools/dev/loki-boltdb-storage-s3/compose-up.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
set -e
diff --git a/tools/dev/loki-boltdb-storage-s3/config/loki.yaml b/tools/dev/loki-boltdb-storage-s3/config/loki.yaml
index de0dbd713d92c..83149885fe85b 100644
--- a/tools/dev/loki-boltdb-storage-s3/config/loki.yaml
+++ b/tools/dev/loki-boltdb-storage-s3/config/loki.yaml
@@ -108,6 +108,7 @@ schema_config:
object_store: s3
schema: v11
store: boltdb-shipper
+ row_shards: 4
server:
graceful_shutdown_timeout: 5s
grpc_server_max_concurrent_streams: 1000
diff --git a/tools/dev/loki-boltdb-storage-s3/dev.dockerfile b/tools/dev/loki-boltdb-storage-s3/dev.dockerfile
index 3b8912b4120ab..4a2a420fd0938 100644
--- a/tools/dev/loki-boltdb-storage-s3/dev.dockerfile
+++ b/tools/dev/loki-boltdb-storage-s3/dev.dockerfile
@@ -2,7 +2,7 @@ FROM golang:1.20.4
ENV CGO_ENABLED=0
RUN go install github.com/go-delve/delve/cmd/dlv@v1.21.1
-FROM alpine:3.18.3
+FROM alpine:3.18.4
RUN mkdir /loki
WORKDIR /loki
diff --git a/tools/lambda-promtail/Dockerfile b/tools/lambda-promtail/Dockerfile
index 8e94327990996..bac1cdf258f2a 100644
--- a/tools/lambda-promtail/Dockerfile
+++ b/tools/lambda-promtail/Dockerfile
@@ -12,7 +12,7 @@ RUN go mod download
RUN go build -o ./main -tags lambda.norpc -ldflags="-s -w" lambda-promtail/*.go
-FROM alpine:3.18.3
+FROM alpine:3.18.4
WORKDIR /app
diff --git a/tools/lambda-promtail/lambda-promtail/cw.go b/tools/lambda-promtail/lambda-promtail/cw.go
index 895cd66c8f450..1ad6bf34878ed 100644
--- a/tools/lambda-promtail/lambda-promtail/cw.go
+++ b/tools/lambda-promtail/lambda-promtail/cw.go
@@ -18,6 +18,7 @@ func parseCWEvent(ctx context.Context, b *batch, ev *events.CloudwatchLogsEvent)
}
labels := model.LabelSet{
+ model.LabelName("__aws_log_type"): model.LabelValue("cloudwatch"),
model.LabelName("__aws_cloudwatch_log_group"): model.LabelValue(data.LogGroup),
model.LabelName("__aws_cloudwatch_owner"): model.LabelValue(data.Owner),
}
diff --git a/tools/lambda-promtail/lambda-promtail/cw_test.go b/tools/lambda-promtail/lambda-promtail/cw_test.go
new file mode 100644
index 0000000000000..9ad5a907c7711
--- /dev/null
+++ b/tools/lambda-promtail/lambda-promtail/cw_test.go
@@ -0,0 +1,60 @@
+package main
+
+import (
+ "context"
+ "testing"
+
+ "github.com/aws/aws-lambda-go/events"
+ "github.com/stretchr/testify/require"
+
+ "github.com/grafana/loki/pkg/logproto"
+)
+
+func Test_parseCWEvent(t *testing.T) {
+ tests := []struct {
+ name string
+ b *batch
+ expectedStream string
+ keepStream bool
+ }{
+ {
+ name: "cloudwatch",
+ b: &batch{
+ streams: map[string]*logproto.Stream{},
+ },
+ expectedStream: `{__aws_cloudwatch_log_group="testLogGroup", __aws_cloudwatch_owner="123456789123", __aws_log_type="cloudwatch"}`,
+ keepStream: false,
+ },
+ {
+ name: "cloudwatch_keepStream",
+ b: &batch{
+ streams: map[string]*logproto.Stream{},
+ },
+ expectedStream: `{__aws_cloudwatch_log_group="testLogGroup", __aws_cloudwatch_log_stream="testLogStream", __aws_cloudwatch_owner="123456789123", __aws_log_type="cloudwatch"}`,
+ keepStream: true,
+ },
+ }
+
+ for _, tt := range tests {
+ // Docs: https://docs.aws.amazon.com/lambda/latest/dg/services-cloudwatchlogs.html
+ // Example CloudWatchLogEvent copied from https://github.com/aws/aws-lambda-go/blob/main/events/cloudwatch_logs_test.go
+ cwevent := &events.CloudwatchLogsEvent{
+ AWSLogs: events.CloudwatchLogsRawData{
+ Data: "H4sIAAAAAAAAAHWPwQqCQBCGX0Xm7EFtK+smZBEUgXoLCdMhFtKV3akI8d0bLYmibvPPN3wz00CJxmQnTO41whwWQRIctmEcB6sQbFC3CjW3XW8kxpOpP+OC22d1Wml1qZkQGtoMsScxaczKN3plG8zlaHIta5KqWsozoTYw3/djzwhpLwivWFGHGpAFe7DL68JlBUk+l7KSN7tCOEJ4M3/qOI49vMHj+zCKdlFqLaU2ZHV2a4Ct/an0/ivdX8oYc1UVX860fQDQiMdxRQEAAA==",
+ },
+ }
+
+ t.Run(tt.name, func(t *testing.T) {
+ batchSize = 131072 // Set large enough we don't send to promtail
+ keepStream = tt.keepStream
+ err := parseCWEvent(context.Background(), tt.b, cwevent)
+ if err != nil {
+ t.Error(err)
+ }
+ require.Len(t, tt.b.streams, 1)
+ stream, ok := tt.b.streams[tt.expectedStream]
+ require.True(t, ok, "batch does not contain stream: %s", tt.expectedStream)
+ require.NotNil(t, stream)
+ })
+ }
+}
diff --git a/tools/tsdb/bloom-tester/Dockerfile b/tools/tsdb/bloom-tester/Dockerfile
index d471e5d907005..d5f45d54da355 100644
--- a/tools/tsdb/bloom-tester/Dockerfile
+++ b/tools/tsdb/bloom-tester/Dockerfile
@@ -6,7 +6,7 @@ WORKDIR /src/bloom-tester
RUN make bloom-tester
-FROM alpine:3.18.3
+FROM alpine:3.18.4
RUN apk add --update --no-cache ca-certificates
COPY --from=build /src/bloom-tester/tools/tsdb/bloom-tester/bloom-tester /usr/bin/bloom-tester
ENTRYPOINT [ "/usr/bin/bloom-tester", "--config.file=/etc/loki/config.yaml" ]
diff --git a/tools/tsdb/bloom-tester/lib.go b/tools/tsdb/bloom-tester/lib.go
index 7eefb56342c40..1bdd4042ade0e 100644
--- a/tools/tsdb/bloom-tester/lib.go
+++ b/tools/tsdb/bloom-tester/lib.go
@@ -36,6 +36,11 @@ import (
"github.com/grafana/loki/tools/tsdb/helpers"
)
+const (
+ DefaultNGramLength = 4
+ DefaultNGramSkip = 0
+)
+
func execute() {
conf, svc, bucket, err := helpers.Setup()
helpers.ExitErr("setting up", err)
@@ -89,18 +94,10 @@ func execute() {
}
var (
- three = bt.NewNGramTokenizer(3, 4, 0)
- threeSkip1 = bt.NewNGramTokenizer(3, 4, 1)
- threeSkip2 = bt.NewNGramTokenizer(3, 4, 2)
- threeSkip3 = bt.NewNGramTokenizer(3, 4, 3)
- four = bt.NewNGramTokenizer(4, 5, 0)
- fourSkip1 = bt.NewNGramTokenizer(4, 5, 1)
- fourSkip2 = bt.NewNGramTokenizer(4, 5, 2)
- five = bt.NewNGramTokenizer(5, 6, 0)
- six = bt.NewNGramTokenizer(6, 7, 0)
-
- onePctError = func() *filter.ScalableBloomFilter { return filter.NewScalableBloomFilter(1024, 0.01, 0.8) }
- fivePctError = func() *filter.ScalableBloomFilter { return filter.NewScalableBloomFilter(1024, 0.05, 0.8) }
+ three = bt.NewNGramTokenizer(3, 0)
+ four = bt.NewNGramTokenizer(4, 0)
+
+ onePctError = func() *filter.ScalableBloomFilter { return filter.NewScalableBloomFilter(1024, 0.01, 0.8) }
)
var experiments = []Experiment{
@@ -116,7 +113,7 @@ var experiments = []Experiment{
*/
NewExperiment(
"token=4skip0_error=1%_indexchunks=true",
- four,
+ *four,
true,
onePctError,
),
@@ -267,9 +264,9 @@ func analyze(metrics *Metrics, sampler Sampler, indexShipper indexshipper.IndexS
level.Info(util_log.Logger).Log("msg", "starting analyze()", "tester", testerNumber, "total", numTesters)
var n int // count iterated series
- //pool := newPool(runtime.NumCPU())
- //pool := newPool(1)
- bloomTokenizer, _ := bt.NewBloomTokenizer(prometheus.DefaultRegisterer)
+ // pool := newPool(runtime.NumCPU())
+ // pool := newPool(1)
+ bloomTokenizer, _ := bt.NewBloomTokenizer(prometheus.DefaultRegisterer, DefaultNGramLength, DefaultNGramSkip)
for _, tenant := range tenants {
level.Info(util_log.Logger).Log("Analyzing tenant", tenant, "table", tableName)
err := indexShipper.ForEach(
@@ -344,7 +341,7 @@ func analyze(metrics *Metrics, sampler Sampler, indexShipper indexshipper.IndexS
tenant,
ls.String(),
objectClient) {
- bloomTokenizer.SetLineTokenizer(experiment.tokenizer)
+ bloomTokenizer.SetLineTokenizer(&experiment.tokenizer)
level.Info(util_log.Logger).Log("Starting work on: ", ls.String(), "'", FNV32a(ls.String()), "'", experiment.name, tenant)
startTime := time.Now().UnixMilli()
@@ -360,8 +357,10 @@ func analyze(metrics *Metrics, sampler Sampler, indexShipper indexshipper.IndexS
Bloom: &bloom,
Series: &series,
}
- bloomTokenizer.PopulateSeriesWithBloom(&swb, got)
-
+ err := bloomTokenizer.PopulateSeriesWithBloom(&swb, got)
+ if err != nil {
+ level.Error(util_log.Logger).Log("msg", "failed populating SeriesWithBloom", "err", err)
+ }
endTime := time.Now().UnixMilli()
if len(got) > 0 {
metrics.bloomSize.WithLabelValues(experiment.name).Observe(float64(sbf.Capacity() / 8))
@@ -381,7 +380,6 @@ func analyze(metrics *Metrics, sampler Sampler, indexShipper indexshipper.IndexS
metrics.sbfCreationTime.WithLabelValues(experiment.name).Add(float64(endTime - startTime))
metrics.sbfsCreated.WithLabelValues(experiment.name).Inc()
- metrics.chunkSize.Observe(float64(chunkTotalUncompressedSize))
if err != nil {
helpers.ExitErr("writing sbf to file", err)
diff --git a/tools/tsdb/bloom-tester/lib_test.go b/tools/tsdb/bloom-tester/lib_test.go
index 419ff44f59007..3269592f4abcb 100644
--- a/tools/tsdb/bloom-tester/lib_test.go
+++ b/tools/tsdb/bloom-tester/lib_test.go
@@ -16,7 +16,7 @@ func BenchmarkSBFTestAndAdd(b *testing.B) {
scanner := bufio.NewScanner(file)
experiment := NewExperiment(
"token=3skip0_error=1%_indexchunks=true",
- three,
+ *three,
true,
onePctError,
)
@@ -25,8 +25,10 @@ func BenchmarkSBFTestAndAdd(b *testing.B) {
for scanner.Scan() {
line := scanner.Text()
tokens := experiment.tokenizer.Tokens(line)
- for _, token := range tokens {
- sbf.TestAndAdd(token.Key)
+
+ for tokens.Next() {
+ tok := tokens.At()
+ sbf.TestAndAdd(tok)
}
}
}
@@ -40,7 +42,7 @@ func BenchmarkSBFAdd(b *testing.B) {
scanner := bufio.NewScanner(file)
experiment := NewExperiment(
"token=3skip0_error=1%_indexchunks=true",
- three,
+ *three,
true,
onePctError,
)
@@ -49,8 +51,10 @@ func BenchmarkSBFAdd(b *testing.B) {
for scanner.Scan() {
line := scanner.Text()
tokens := experiment.tokenizer.Tokens(line)
- for _, token := range tokens {
- sbf.Add(token.Key)
+
+ for tokens.Next() {
+ tok := tokens.At()
+ sbf.TestAndAdd(tok)
}
}
}
@@ -64,7 +68,7 @@ func BenchmarkSBFSeparateTestAndAdd(b *testing.B) {
scanner := bufio.NewScanner(file)
experiment := NewExperiment(
"token=3skip0_error=1%_indexchunks=true",
- three,
+ *three,
true,
onePctError,
)
@@ -73,45 +77,16 @@ func BenchmarkSBFSeparateTestAndAdd(b *testing.B) {
for scanner.Scan() {
line := scanner.Text()
tokens := experiment.tokenizer.Tokens(line)
- for _, token := range tokens {
- found := sbf.Test(token.Key)
- if !found {
- sbf.Add(token.Key)
- }
- }
- }
- }
-}
-func BenchmarkSBFTestAndAddWithLRU(b *testing.B) {
- for i := 0; i < b.N; i++ {
- b.StopTimer()
- file, _ := os.Open(BigFile)
- defer file.Close()
- scanner := bufio.NewScanner(file)
- experiment := NewExperiment(
- "token=3skip0_error=1%_indexchunks=true",
- three,
- true,
- onePctError,
- )
- sbf := experiment.bloom()
- cache := NewLRUCache4(150000)
- b.StartTimer()
- for scanner.Scan() {
- line := scanner.Text()
- tokens := experiment.tokenizer.Tokens(line)
- for _, token := range tokens {
- if !cache.Get(token.Key) {
- cache.Put(token.Key)
- sbf.TestAndAdd(token.Key)
- }
+ for tokens.Next() {
+ tok := tokens.At()
+ sbf.TestAndAdd(tok)
}
}
}
}
-func BenchmarkSBFSeparateTestAndAddWithLRU(b *testing.B) {
+func BenchmarkSBFTestAndAddWithLRU(b *testing.B) {
for i := 0; i < b.N; i++ {
b.StopTimer()
file, _ := os.Open(BigFile)
@@ -119,7 +94,7 @@ func BenchmarkSBFSeparateTestAndAddWithLRU(b *testing.B) {
scanner := bufio.NewScanner(file)
experiment := NewExperiment(
"token=3skip0_error=1%_indexchunks=true",
- three,
+ *three,
true,
onePctError,
)
@@ -129,151 +104,20 @@ func BenchmarkSBFSeparateTestAndAddWithLRU(b *testing.B) {
for scanner.Scan() {
line := scanner.Text()
tokens := experiment.tokenizer.Tokens(line)
- for _, token := range tokens {
- if !cache.Get(token.Key) {
- cache.Put(token.Key)
-
- found := sbf.Test(token.Key)
- if !found {
- sbf.Add(token.Key)
- }
- //sbf.TestAndAdd(token.Key)
- }
- }
- }
- }
-}
-
-func BenchmarkSBFSeparateTestAndAddWithLRU5(b *testing.B) {
- for i := 0; i < b.N; i++ {
- b.StopTimer()
- file, _ := os.Open(BigFile)
- defer file.Close()
- scanner := bufio.NewScanner(file)
- experiment := NewExperiment(
- "token=3skip0_error=1%_indexchunks=true",
- three,
- true,
- onePctError,
- )
- sbf := experiment.bloom()
- cache := NewLRUCache5(150000)
-
- b.StartTimer()
- for scanner.Scan() {
- line := scanner.Text()
- tokens := experiment.tokenizer.Tokens(line)
- for _, token := range tokens {
- str := string(token.Key)
- if !cache.Get(str) {
- cache.Put(str)
-
- found := sbf.Test(token.Key)
- if !found {
- sbf.Add(token.Key)
- }
- }
- }
- }
- }
-}
-
-func BenchmarkSBFTestAndAddWithLRU5(b *testing.B) {
- for i := 0; i < b.N; i++ {
- b.StopTimer()
- file, _ := os.Open(BigFile)
- defer file.Close()
- scanner := bufio.NewScanner(file)
- experiment := NewExperiment(
- "token=3skip0_error=1%_indexchunks=true",
- three,
- true,
- onePctError,
- )
- sbf := experiment.bloom()
- cache := NewLRUCache5(150000)
-
- b.StartTimer()
- for scanner.Scan() {
- line := scanner.Text()
- tokens := experiment.tokenizer.Tokens(line)
- for _, token := range tokens {
- str := string(token.Key)
- if !cache.Get(str) {
- cache.Put(str)
-
- sbf.TestAndAdd(token.Key)
- }
- }
- }
- }
-}
-
-func BenchmarkSBFTestAndAddWithByteKeyLRU(b *testing.B) {
- for i := 0; i < b.N; i++ {
- b.StopTimer()
- file, _ := os.Open(BigFile)
- defer file.Close()
- scanner := bufio.NewScanner(file)
- experiment := NewExperiment(
- "token=4skip0_error=1%_indexchunks=false",
- four,
- false,
- onePctError,
- )
- sbf := experiment.bloom()
- cache := NewByteKeyLRUCache(150000)
- b.StartTimer()
- for scanner.Scan() {
- line := scanner.Text()
- tokens := experiment.tokenizer.Tokens(line)
- for _, token := range tokens {
-
- array := NewFourByteKeyFromSlice(token.Key)
- if !cache.Get(array) {
- cache.Put(array)
- sbf.TestAndAdd(token.Key)
- }
- }
- }
- }
-}
-
-func BenchmarkSBFTestAndAddWithFourByteKeyLRU(b *testing.B) {
- for i := 0; i < b.N; i++ {
- b.StopTimer()
- file, _ := os.Open(BigFile)
- defer file.Close()
- scanner := bufio.NewScanner(file)
- experiment := NewExperiment(
- "token=4skip0_error=1%_indexchunks=false",
- four,
- false,
- onePctError,
- )
- sbf := experiment.bloom()
- cache := NewFourByteKeyLRUCache(150000)
- b.StartTimer()
- for scanner.Scan() {
- line := scanner.Text()
- tokens := experiment.tokenizer.Tokens(line)
- for _, token := range tokens {
- if !cache.Get([4]byte(token.Key)) {
- cache.Put([4]byte(token.Key))
- found := sbf.Test(token.Key)
- if !found {
- sbf.Add(token.Key)
- }
- //sbf.TestAndAdd(token.Key)
+ for tokens.Next() {
+ tok := tokens.At()
+ if !cache.Get(tok) {
+ cache.Put(tok)
+ sbf.TestAndAdd(tok)
}
-
+ sbf.TestAndAdd(tok)
}
}
}
}
-func BenchmarkSBFAddWithLRU(b *testing.B) {
+func BenchmarkSBFSeparateTestAndAddWithLRU(b *testing.B) {
for i := 0; i < b.N; i++ {
b.StopTimer()
file, _ := os.Open(BigFile)
@@ -281,7 +125,7 @@ func BenchmarkSBFAddWithLRU(b *testing.B) {
scanner := bufio.NewScanner(file)
experiment := NewExperiment(
"token=3skip0_error=1%_indexchunks=true",
- three,
+ *three,
true,
onePctError,
)
@@ -291,44 +135,16 @@ func BenchmarkSBFAddWithLRU(b *testing.B) {
for scanner.Scan() {
line := scanner.Text()
tokens := experiment.tokenizer.Tokens(line)
- for _, token := range tokens {
- if !cache.Get(token.Key) {
- cache.Put(token.Key)
- sbf.Add(token.Key)
- }
- }
- }
- }
-}
-
-func BenchmarkSBFSeparateTestAndAddWithLRU1(b *testing.B) {
- for i := 0; i < b.N; i++ {
- b.StopTimer()
- file, _ := os.Open(BigFile)
- defer file.Close()
- scanner := bufio.NewScanner(file)
- experiment := NewExperiment(
- "token=3skip0_error=1%_indexchunks=true",
- three,
- true,
- onePctError,
- )
- sbf := experiment.bloom()
- cache := NewLRUCache(150000)
- b.StartTimer()
- for scanner.Scan() {
- line := scanner.Text()
- tokens := experiment.tokenizer.Tokens(line)
- for _, token := range tokens {
- str := string(token.Key)
- if !cache.Get(str) {
- cache.Put(str)
- found := sbf.Test(token.Key)
+ for tokens.Next() {
+ tok := tokens.At()
+ if !cache.Get(tok) {
+ cache.Put(tok)
+ found := sbf.Test(tok)
if !found {
- sbf.Add(token.Key)
+ sbf.Add(tok)
}
- //sbf.Add(token.Key)
}
+ sbf.TestAndAdd(tok)
}
}
}
@@ -342,7 +158,7 @@ func BenchmarkSBFSeparateTestAndAddWithMap(b *testing.B) {
scanner := bufio.NewScanner(file)
experiment := NewExperiment(
"token=3skip0_error=1%_indexchunks=true",
- three,
+ *three,
true,
onePctError,
)
@@ -352,15 +168,15 @@ func BenchmarkSBFSeparateTestAndAddWithMap(b *testing.B) {
for scanner.Scan() {
line := scanner.Text()
tokens := experiment.tokenizer.Tokens(line)
- for _, token := range tokens {
- str := string(token.Key)
-
- _, found := cache[str]
+ for tokens.Next() {
+ tok := tokens.At()
+ tokStr := string(tok)
+ _, found := cache[tokStr]
if !found {
- cache[str] = ""
- f := sbf.Test(token.Key)
+ cache[tokStr] = ""
+ f := sbf.Test(tok)
if !f {
- sbf.Add(token.Key)
+ sbf.Add(tok)
}
if len(cache) > 150000 {
diff --git a/tools/tsdb/bloom-tester/metrics.go b/tools/tsdb/bloom-tester/metrics.go
index 193f829063db8..2805901a3b9c3 100644
--- a/tools/tsdb/bloom-tester/metrics.go
+++ b/tools/tsdb/bloom-tester/metrics.go
@@ -10,12 +10,12 @@ import (
type Experiment struct {
name string
- tokenizer bt.Tokenizer
+ tokenizer bt.NGramTokenizer
bloom func() *filter.ScalableBloomFilter
encodeChunkID bool
}
-func NewExperiment(name string, tokenizer bt.Tokenizer, encodeChunkID bool, bloom func() *filter.ScalableBloomFilter) Experiment {
+func NewExperiment(name string, tokenizer bt.NGramTokenizer, encodeChunkID bool, bloom func() *filter.ScalableBloomFilter) Experiment {
return Experiment{
name: name,
tokenizer: tokenizer,
diff --git a/tools/tsdb/bloom-tester/readlib.go b/tools/tsdb/bloom-tester/readlib.go
index eaca7a38c15bd..77d9e3967ca86 100644
--- a/tools/tsdb/bloom-tester/readlib.go
+++ b/tools/tsdb/bloom-tester/readlib.go
@@ -119,14 +119,14 @@ func analyzeRead(metrics *Metrics, sampler Sampler, shipper indexshipper.IndexSh
}
level.Info(util_log.Logger).Log("msg", "starting analyze()", "tester", testerNumber, "total", numTesters)
- //var n int // count iterated series
- //reportEvery := 10 // report every n chunks
- //pool := newPool(runtime.NumCPU())
- //pool := newPool(16)
- //searchString := os.Getenv("SEARCH_STRING")
- //147854,148226,145541,145603,147159,147836,145551,145599,147393,147841,145265,145620,146181,147225,147167,146131,146189,146739,147510,145572,146710,148031,29,146205,147175,146984,147345
- //mytenants := []string{"29"}
- bloomTokenizer, _ := bt.NewBloomTokenizer(prometheus.DefaultRegisterer)
+ // var n int // count iterated series
+ // reportEvery := 10 // report every n chunks
+ // pool := newPool(runtime.NumCPU())
+ // pool := newPool(16)
+ // searchString := os.Getenv("SEARCH_STRING")
+ // 147854,148226,145541,145603,147159,147836,145551,145599,147393,147841,145265,145620,146181,147225,147167,146131,146189,146739,147510,145572,146710,148031,29,146205,147175,146984,147345
+ // mytenants := []string{"29"}
+ bloomTokenizer, _ := bt.NewBloomTokenizer(prometheus.DefaultRegisterer, DefaultNGramLength, DefaultNGramSkip)
for _, tenant := range tenants {
level.Info(util_log.Logger).Log("Analyzing tenant", tenant, "table", tableName)
err := shipper.ForEach(
@@ -200,10 +200,10 @@ func analyzeRead(metrics *Metrics, sampler Sampler, shipper indexshipper.IndexSh
tenant,
ls.String(),
objectClient)
- bloomTokenizer.SetLineTokenizer(experiment.tokenizer)
+ bloomTokenizer.SetLineTokenizer(&experiment.tokenizer)
for gotIdx := range got { // for every chunk
for _, queryExperiment := range queryExperiments { // for each search string
- if len(queryExperiment.searchString) >= experiment.tokenizer.GetMin()+experiment.tokenizer.GetSkip() {
+ if len(queryExperiment.searchString) >= experiment.tokenizer.N+experiment.tokenizer.Skip {
foundInChunk := false
foundInSbf := false
@@ -245,11 +245,6 @@ func analyzeRead(metrics *Metrics, sampler Sampler, shipper indexshipper.IndexSh
helpers.ExitErr("iterating chunks ", itr.Error())
}
- /*else // if search string is long enough
- {
- // fmt.Println("Skipping", queryExperiment.name, "because it's too short", experiment.name)
- }*/
-
} // for each search string
} // for every chunk
@@ -306,21 +301,21 @@ func readSBFFromObjectStorage(location, prefix, period, tenant, series string, o
return sbf
}
-func searchSbf(sbf *filter.ScalableBloomFilter, tokenizer bt.Tokenizer, searchString string) bool {
- tokens := bt.SearchesForTokenizerAndLine(tokenizer, searchString)
- for _, tokenSet := range tokens {
- numMatches := 0
- for _, token := range tokenSet {
- if sbf.Test(token.Key) {
- numMatches++
- }
+func searchSbf(sbf *filter.ScalableBloomFilter, tokenizer bt.NGramTokenizer, searchString string) bool {
+ itr := tokenizer.Tokens(searchString)
+ numMatches := 0
+ numTokens := 0
+ for itr.Next() {
+ token := itr.At()
+ numTokens++
+ if sbf.Test(token) {
+ numMatches++
}
- if numMatches > 0 {
- if numMatches == len(tokenSet) {
- return true
- }
+ }
+ if numMatches > 0 {
+ if numMatches == numTokens {
+ return true
}
-
}
return false
diff --git a/tools/tsdb/bloom-tester/readlib_test.go b/tools/tsdb/bloom-tester/readlib_test.go
index 5216918010bc1..edec2c37fe599 100644
--- a/tools/tsdb/bloom-tester/readlib_test.go
+++ b/tools/tsdb/bloom-tester/readlib_test.go
@@ -1,7 +1,6 @@
package main
import (
- bt "github.com/grafana/loki/pkg/storage/bloom/v1"
"testing"
"github.com/stretchr/testify/require"
@@ -10,7 +9,7 @@ import (
func TestSearchSbf(t *testing.T) {
experiment := NewExperiment(
"token=4skip0_error=1%_indexchunks=true",
- four,
+ *four,
true,
onePctError,
)
@@ -66,13 +65,13 @@ func TestSearchSbf(t *testing.T) {
} {
t.Run(tc.desc, func(t *testing.T) {
sbf := experiment.bloom()
- tokens := bt.SearchesForTokenizerAndLine(four, tc.inputLine)
- for _, tokenSet := range tokens {
- for _, token := range tokenSet {
- sbf.Add(token.Key)
- }
+ tokens := four.Tokens(tc.inputLine)
+ for tokens.Next() {
+ tok := tokens.At()
+ sbf.Add(tok)
}
- require.Equal(t, tc.exp, searchSbf(sbf, four, tc.inputSearch))
+
+ require.Equal(t, tc.exp, searchSbf(sbf, *four, tc.inputSearch))
})
}
}
diff --git a/vendor/github.com/cristalhq/hedgedhttp/README.md b/vendor/github.com/cristalhq/hedgedhttp/README.md
index aec2a1b3548d5..104213b350b12 100644
--- a/vendor/github.com/cristalhq/hedgedhttp/README.md
+++ b/vendor/github.com/cristalhq/hedgedhttp/README.md
@@ -10,7 +10,7 @@ Hedged HTTP client which helps to reduce tail latency at scale.
## Rationale
-See paper [Tail at Scale](https://cacm.acm.org/magazines/2013/2/160173-the-tail-at-scale/fulltext) by Jeffrey Dean, Luiz André Barroso. In short: the client first sends one request, but then sends an additional request after a timeout if the previous hasn't returned an answer in the expected time. The client cancels remaining requests once the first result is received.
+See paper [Tail at Scale](https://www.barroso.org/publications/TheTailAtScale.pdf) by Jeffrey Dean, Luiz André Barroso. In short: the client first sends one request, but then sends an additional request after a timeout if the previous hasn't returned an answer in the expected time. The client cancels remaining requests once the first result is received.
## Acknowledge
diff --git a/vendor/github.com/cristalhq/hedgedhttp/hedged.go b/vendor/github.com/cristalhq/hedgedhttp/hedged.go
index 56d65b0b1c44e..b7b33f50b89d3 100644
--- a/vendor/github.com/cristalhq/hedgedhttp/hedged.go
+++ b/vendor/github.com/cristalhq/hedgedhttp/hedged.go
@@ -12,6 +12,79 @@ import (
const infiniteTimeout = 30 * 24 * time.Hour // domain specific infinite
+// Client represents a hedged HTTP client.
+type Client struct {
+ rt http.RoundTripper
+ stats *Stats
+}
+
+// Config for the [Client].
+type Config struct {
+ // Transport of the [Client].
+ // Default is nil which results in [net/http.DefaultTransport].
+ Transport http.RoundTripper
+
+ // Upto says how much requests to make.
+ // Default is zero which means no hedged requests will be made.
+ Upto int
+
+ // Delay before 2 consequitive hedged requests.
+ Delay time.Duration
+
+ // Next returns the upto and delay for each HTTP that will be hedged.
+ // Default is nil which results in (Upto, Delay) result.
+ Next NextFn
+}
+
+// NextFn represents a function that is called for each HTTP request for retrieving hedging options.
+type NextFn func() (upto int, delay time.Duration)
+
+// New returns a new Client for the given config.
+func New(cfg Config) (*Client, error) {
+ switch {
+ case cfg.Delay < 0:
+ return nil, errors.New("hedgedhttp: timeout cannot be negative")
+ case cfg.Upto < 0:
+ return nil, errors.New("hedgedhttp: upto cannot be negative")
+ }
+ if cfg.Transport == nil {
+ cfg.Transport = http.DefaultTransport
+ }
+
+ rt, stats, err := NewRoundTripperAndStats(cfg.Delay, cfg.Upto, cfg.Transport)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO(cristaloleg): this should be removed after internals cleanup.
+ rt2, ok := rt.(*hedgedTransport)
+ if !ok {
+ panic(fmt.Sprintf("want *hedgedTransport got %T", rt))
+ }
+ rt2.next = cfg.Next
+
+ c := &Client{
+ rt: rt2,
+ stats: stats,
+ }
+ return c, nil
+}
+
+// Stats returns statistics for the given client, see [Stats] methods.
+func (c *Client) Stats() *Stats {
+ return c.stats
+}
+
+// Do does the same as [RoundTrip], this method is presented to align with [net/http.Client].
+func (c *Client) Do(req *http.Request) (*http.Response, error) {
+ return c.rt.RoundTrip(req)
+}
+
+// RoundTrip implements [net/http.RoundTripper] interface.
+func (c *Client) RoundTrip(req *http.Request) (*http.Response, error) {
+ return c.rt.RoundTrip(req)
+}
+
// NewClient returns a new http.Client which implements hedged requests pattern.
// Given Client starts a new request after a timeout from previous request.
// Starts no more than upto requests.
@@ -63,8 +136,8 @@ func NewRoundTripperAndStats(timeout time.Duration, upto int, rt http.RoundTripp
switch {
case timeout < 0:
return nil, nil, errors.New("hedgedhttp: timeout cannot be negative")
- case upto < 1:
- return nil, nil, errors.New("hedgedhttp: upto must be greater than 0")
+ case upto < 0:
+ return nil, nil, errors.New("hedgedhttp: upto cannot be negative")
}
if rt == nil {
@@ -88,21 +161,35 @@ type hedgedTransport struct {
rt http.RoundTripper
timeout time.Duration
upto int
+ next NextFn
metrics *Stats
}
func (ht *hedgedTransport) RoundTrip(req *http.Request) (*http.Response, error) {
mainCtx := req.Context()
- timeout := ht.timeout
+ upto, timeout := ht.upto, ht.timeout
+ if ht.next != nil {
+ upto, timeout = ht.next()
+ }
+
+ // no hedged requests, just a regular one.
+ if upto <= 0 {
+ return ht.rt.RoundTrip(req)
+ }
+ // rollback to default timeout.
+ if timeout < 0 {
+ timeout = ht.timeout
+ }
+
errOverall := &MultiError{}
- resultCh := make(chan indexedResp, ht.upto)
- errorCh := make(chan error, ht.upto)
+ resultCh := make(chan indexedResp, upto)
+ errorCh := make(chan error, upto)
ht.metrics.requestedRoundTripsInc()
resultIdx := -1
- cancels := make([]func(), ht.upto)
+ cancels := make([]func(), upto)
defer runInPool(func() {
for i, cancel := range cancels {
@@ -113,8 +200,8 @@ func (ht *hedgedTransport) RoundTrip(req *http.Request) (*http.Response, error)
}
})
- for sent := 0; len(errOverall.Errors) < ht.upto; sent++ {
- if sent < ht.upto {
+ for sent := 0; len(errOverall.Errors) < upto; sent++ {
+ if sent < upto {
idx := sent
subReq, cancel := reqWithCtx(req, mainCtx, idx != 0)
cancels[idx] = cancel
@@ -132,7 +219,7 @@ func (ht *hedgedTransport) RoundTrip(req *http.Request) (*http.Response, error)
}
// all request sent - effectively disabling timeout between requests
- if sent == ht.upto {
+ if sent == upto {
timeout = infiniteTimeout
}
resp, err := waitResult(mainCtx, resultCh, errorCh, timeout)
@@ -140,6 +227,11 @@ func (ht *hedgedTransport) RoundTrip(req *http.Request) (*http.Response, error)
switch {
case resp.Resp != nil:
resultIdx = resp.Index
+ if resultIdx == 0 {
+ ht.metrics.originalRequestWinsInc()
+ } else {
+ ht.metrics.hedgedRequestWinsInc()
+ }
return resp.Resp, nil
case mainCtx.Err() != nil:
ht.metrics.canceledByUserRoundTripsInc()
diff --git a/vendor/github.com/cristalhq/hedgedhttp/stats.go b/vendor/github.com/cristalhq/hedgedhttp/stats.go
index fceeb234a22e5..f29331890826a 100644
--- a/vendor/github.com/cristalhq/hedgedhttp/stats.go
+++ b/vendor/github.com/cristalhq/hedgedhttp/stats.go
@@ -16,6 +16,8 @@ type Stats struct {
requestedRoundTrips atomicCounter
actualRoundTrips atomicCounter
failedRoundTrips atomicCounter
+ originalRequestWins atomicCounter
+ hedgedRequestWins atomicCounter
canceledByUserRoundTrips atomicCounter
canceledSubRequests atomicCounter
_ cacheLine
@@ -24,6 +26,8 @@ type Stats struct {
func (s *Stats) requestedRoundTripsInc() { atomic.AddUint64(&s.requestedRoundTrips.count, 1) }
func (s *Stats) actualRoundTripsInc() { atomic.AddUint64(&s.actualRoundTrips.count, 1) }
func (s *Stats) failedRoundTripsInc() { atomic.AddUint64(&s.failedRoundTrips.count, 1) }
+func (s *Stats) originalRequestWinsInc() { atomic.AddUint64(&s.originalRequestWins.count, 1) }
+func (s *Stats) hedgedRequestWinsInc() { atomic.AddUint64(&s.hedgedRequestWins.count, 1) }
func (s *Stats) canceledByUserRoundTripsInc() { atomic.AddUint64(&s.canceledByUserRoundTrips.count, 1) }
func (s *Stats) canceledSubRequestsInc() { atomic.AddUint64(&s.canceledSubRequests.count, 1) }
@@ -42,6 +46,16 @@ func (s *Stats) FailedRoundTrips() uint64 {
return atomic.LoadUint64(&s.failedRoundTrips.count)
}
+// OriginalRequestWins returns count of original requests that were faster than the original.
+func (s *Stats) OriginalRequestWins() uint64 {
+ return atomic.LoadUint64(&s.originalRequestWins.count)
+}
+
+// HedgedRequestWins returns count of hedged requests that were faster than the original.
+func (s *Stats) HedgedRequestWins() uint64 {
+ return atomic.LoadUint64(&s.hedgedRequestWins.count)
+}
+
// CanceledByUserRoundTrips returns count of requests that were canceled by user, using request context.
func (s *Stats) CanceledByUserRoundTrips() uint64 {
return atomic.LoadUint64(&s.canceledByUserRoundTrips.count)
diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml
deleted file mode 100644
index d8156a60ba9b3..0000000000000
--- a/vendor/github.com/google/uuid/.travis.yml
+++ /dev/null
@@ -1,9 +0,0 @@
-language: go
-
-go:
- - 1.4.3
- - 1.5.3
- - tip
-
-script:
- - go test -v ./...
diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md
new file mode 100644
index 0000000000000..2bd78667afbb3
--- /dev/null
+++ b/vendor/github.com/google/uuid/CHANGELOG.md
@@ -0,0 +1,10 @@
+# Changelog
+
+## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18)
+
+
+### Bug Fixes
+
+* Use .EqualFold() to parse urn prefixed UUIDs ([#118](https://github.com/google/uuid/issues/118)) ([574e687](https://github.com/google/uuid/commit/574e6874943741fb99d41764c705173ada5293f0))
+
+## Changelog
diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md
index 04fdf09f136bb..5566888726d98 100644
--- a/vendor/github.com/google/uuid/CONTRIBUTING.md
+++ b/vendor/github.com/google/uuid/CONTRIBUTING.md
@@ -2,6 +2,22 @@
We definitely welcome patches and contribution to this project!
+### Tips
+
+Commits must be formatted according to the [Conventional Commits Specification](https://www.conventionalcommits.org).
+
+Always try to include a test case! If it is not possible or not necessary,
+please explain why in the pull request description.
+
+### Releasing
+
+Commits that would precipitate a SemVer change, as desrcibed in the Conventional
+Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action)
+to create a release candidate pull request. Once submitted, `release-please`
+will create a release.
+
+For tips on how to work with `release-please`, see its documentation.
+
### Legal requirements
In order to protect both you and ourselves, you will need to sign the
diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md
index f765a46f91501..3e9a61889de48 100644
--- a/vendor/github.com/google/uuid/README.md
+++ b/vendor/github.com/google/uuid/README.md
@@ -1,6 +1,6 @@
-# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master)
+# uuid
The uuid package generates and inspects UUIDs based on
-[RFC 4122](http://tools.ietf.org/html/rfc4122)
+[RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122)
and DCE 1.1: Authentication and Security Services.
This package is based on the github.com/pborman/uuid package (previously named
@@ -9,10 +9,12 @@ a UUID is a 16 byte array rather than a byte slice. One loss due to this
change is the ability to represent an invalid UUID (vs a NIL UUID).
###### Install
-`go get github.com/google/uuid`
+```sh
+go get github.com/google/uuid
+```
###### Documentation
-[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid)
+[![Go Reference](https://pkg.go.dev/badge/github.com/google/uuid.svg)](https://pkg.go.dev/github.com/google/uuid)
Full `go doc` style documentation for the package can be viewed online without
installing this package by using the GoDoc site here:
diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go
index 24b78edc90710..b2a0bc8711b3d 100644
--- a/vendor/github.com/google/uuid/node_js.go
+++ b/vendor/github.com/google/uuid/node_js.go
@@ -7,6 +7,6 @@
package uuid
// getHardwareInterface returns nil values for the JS version of the code.
-// This remvoves the "net" dependency, because it is not used in the browser.
+// This removes the "net" dependency, because it is not used in the browser.
// Using the "net" library inflates the size of the transpiled JS code by 673k bytes.
func getHardwareInterface(name string) (string, []byte) { return "", nil }
diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go
index a57207aeb6fd8..a56138cc4bd04 100644
--- a/vendor/github.com/google/uuid/uuid.go
+++ b/vendor/github.com/google/uuid/uuid.go
@@ -69,7 +69,7 @@ func Parse(s string) (UUID, error) {
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
case 36 + 9:
- if strings.ToLower(s[:9]) != "urn:uuid:" {
+ if !strings.EqualFold(s[:9], "urn:uuid:") {
return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9])
}
s = s[9:]
@@ -101,7 +101,8 @@ func Parse(s string) (UUID, error) {
9, 11,
14, 16,
19, 21,
- 24, 26, 28, 30, 32, 34} {
+ 24, 26, 28, 30, 32, 34,
+ } {
v, ok := xtob(s[x], s[x+1])
if !ok {
return uuid, errors.New("invalid UUID format")
@@ -117,7 +118,7 @@ func ParseBytes(b []byte) (UUID, error) {
switch len(b) {
case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
- if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) {
+ if !bytes.EqualFold(b[:9], []byte("urn:uuid:")) {
return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9])
}
b = b[9:]
@@ -145,7 +146,8 @@ func ParseBytes(b []byte) (UUID, error) {
9, 11,
14, 16,
19, 21,
- 24, 26, 28, 30, 32, 34} {
+ 24, 26, 28, 30, 32, 34,
+ } {
v, ok := xtob(b[x], b[x+1])
if !ok {
return uuid, errors.New("invalid UUID format")
diff --git a/vendor/github.com/grafana/dskit/concurrency/buffer.go b/vendor/github.com/grafana/dskit/concurrency/buffer.go
index 623b9a707612f..b8da4423f10e8 100644
--- a/vendor/github.com/grafana/dskit/concurrency/buffer.go
+++ b/vendor/github.com/grafana/dskit/concurrency/buffer.go
@@ -24,3 +24,10 @@ func (sb *SyncBuffer) String() string {
return sb.buf.String()
}
+
+func (sb *SyncBuffer) Reset() {
+ sb.mu.Lock()
+ defer sb.mu.Unlock()
+
+ sb.buf.Reset()
+}
diff --git a/vendor/github.com/grafana/dskit/concurrency/worker.go b/vendor/github.com/grafana/dskit/concurrency/worker.go
new file mode 100644
index 0000000000000..f40f0334800b7
--- /dev/null
+++ b/vendor/github.com/grafana/dskit/concurrency/worker.go
@@ -0,0 +1,38 @@
+package concurrency
+
+// NewReusableGoroutinesPool creates a new worker pool with the given size.
+// These workers will run the workloads passed through Go() calls.
+// If all workers are busy, Go() will spawn a new goroutine to run the workload.
+func NewReusableGoroutinesPool(size int) *ReusableGoroutinesPool {
+ p := &ReusableGoroutinesPool{
+ jobs: make(chan func()),
+ }
+ for i := 0; i < size; i++ {
+ go func() {
+ for f := range p.jobs {
+ f()
+ }
+ }()
+ }
+ return p
+}
+
+type ReusableGoroutinesPool struct {
+ jobs chan func()
+}
+
+// Go will run the given function in a worker of the pool.
+// If all workers are busy, Go() will spawn a new goroutine to run the workload.
+func (p *ReusableGoroutinesPool) Go(f func()) {
+ select {
+ case p.jobs <- f:
+ default:
+ go f()
+ }
+}
+
+// Close stops the workers of the pool.
+// No new Do() calls should be performed after calling Close().
+// Close does NOT wait for all jobs to finish, it is the caller's responsibility to ensure that in the provided workloads.
+// Close is intended to be used in tests to ensure that no goroutines are leaked.
+func (p *ReusableGoroutinesPool) Close() { close(p.jobs) }
diff --git a/vendor/github.com/grafana/dskit/grpcclient/instrumentation.go b/vendor/github.com/grafana/dskit/grpcclient/instrumentation.go
index 4a10ce48d27a8..280f02180c3e9 100644
--- a/vendor/github.com/grafana/dskit/grpcclient/instrumentation.go
+++ b/vendor/github.com/grafana/dskit/grpcclient/instrumentation.go
@@ -9,14 +9,14 @@ import (
"github.com/grafana/dskit/middleware"
)
-func Instrument(requestDuration *prometheus.HistogramVec) ([]grpc.UnaryClientInterceptor, []grpc.StreamClientInterceptor) {
+func Instrument(requestDuration *prometheus.HistogramVec, instrumentationLabelOptions ...middleware.InstrumentationOption) ([]grpc.UnaryClientInterceptor, []grpc.StreamClientInterceptor) {
return []grpc.UnaryClientInterceptor{
otgrpc.OpenTracingClientInterceptor(opentracing.GlobalTracer()),
middleware.ClientUserHeaderInterceptor,
- middleware.UnaryClientInstrumentInterceptor(requestDuration),
+ middleware.UnaryClientInstrumentInterceptor(requestDuration, instrumentationLabelOptions...),
}, []grpc.StreamClientInterceptor{
otgrpc.OpenTracingStreamClientInterceptor(opentracing.GlobalTracer()),
middleware.StreamClientUserHeaderInterceptor,
- middleware.StreamClientInstrumentInterceptor(requestDuration),
+ middleware.StreamClientInstrumentInterceptor(requestDuration, instrumentationLabelOptions...),
}
}
diff --git a/vendor/github.com/grafana/dskit/grpcutil/cancel.go b/vendor/github.com/grafana/dskit/grpcutil/cancel.go
deleted file mode 100644
index b1d369d2a3ea8..0000000000000
--- a/vendor/github.com/grafana/dskit/grpcutil/cancel.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Provenance-includes-location: https://github.com/weaveworks/common/blob/main/grpc/cancel.go
-// Provenance-includes-license: Apache-2.0
-// Provenance-includes-copyright: Weaveworks Ltd.
-
-package grpcutil
-
-import (
- "context"
- "errors"
-
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
-)
-
-// IsCanceled checks whether an error comes from an operation being canceled
-func IsCanceled(err error) bool {
- if errors.Is(err, context.Canceled) {
- return true
- }
- s, ok := status.FromError(err)
- if ok && s.Code() == codes.Canceled {
- return true
- }
- return false
-}
diff --git a/vendor/github.com/grafana/dskit/grpcutil/status.go b/vendor/github.com/grafana/dskit/grpcutil/status.go
new file mode 100644
index 0000000000000..a9e9aab249a34
--- /dev/null
+++ b/vendor/github.com/grafana/dskit/grpcutil/status.go
@@ -0,0 +1,70 @@
+package grpcutil
+
+import (
+ "context"
+ "errors"
+
+ "github.com/gogo/status"
+ "google.golang.org/grpc/codes"
+ grpcstatus "google.golang.org/grpc/status"
+)
+
+// ErrorToStatus returns a *github.com/gogo/status.Status representation of err.
+//
+// - If err implements the method `GRPCStatus() *google.golang.org/grpc/status.Status` and
+// `GRPCStatus()` does not return nil, or if err wraps a type satisfying this, Status from
+// `GRPCStatus()` is converted to gogo Status, and returned. In that case, ok is true.
+//
+// - If err is or GRPCStatus() returns nil, a nil Status is returned and ok is false.
+//
+// - Otherwise, err is an error not compatible with this function. In this
+// case, a nil Status is returned and ok is false.
+func ErrorToStatus(err error) (*status.Status, bool) {
+ if err == nil {
+ return nil, false
+ }
+ type grpcStatus interface{ GRPCStatus() *grpcstatus.Status }
+ var gs grpcStatus
+ if errors.As(err, &gs) {
+ st := gs.GRPCStatus()
+ if st == nil {
+ return nil, false
+ }
+ return status.FromGRPCStatus(st), true
+ }
+ return nil, false
+}
+
+// ErrorToStatusCode extracts gRPC status code from error and returns it.
+//
+// - If err is nil, codes.OK is returned.
+//
+// - If err implements (or wraps error that implements) the method
+// `GRPCStatus() *google.golang.org/grpc/status.Status`, and
+// `GRPCStatus()` returns a non-nil status, code from the status
+// is returned.
+//
+// - Otherwise code.Unknown is returned.
+func ErrorToStatusCode(err error) codes.Code {
+ if err == nil {
+ return codes.OK
+ }
+ type grpcStatus interface{ GRPCStatus() *grpcstatus.Status }
+ var gs grpcStatus
+ if errors.As(err, &gs) {
+ st := gs.GRPCStatus()
+ if st != nil {
+ return st.Code()
+ }
+ }
+ return codes.Unknown
+}
+
+// IsCanceled checks whether an error comes from an operation being canceled.
+func IsCanceled(err error) bool {
+ if errors.Is(err, context.Canceled) {
+ return true
+ }
+ statusCode := ErrorToStatusCode(err)
+ return statusCode == codes.Canceled
+}
diff --git a/vendor/github.com/grafana/dskit/httpgrpc/httpgrpc.go b/vendor/github.com/grafana/dskit/httpgrpc/httpgrpc.go
index 3012edd422ba6..e1f044d8650bb 100644
--- a/vendor/github.com/grafana/dskit/httpgrpc/httpgrpc.go
+++ b/vendor/github.com/grafana/dskit/httpgrpc/httpgrpc.go
@@ -5,19 +5,105 @@
package httpgrpc
import (
+ "bytes"
"context"
"fmt"
+ "io"
+ "net/http"
"github.com/go-kit/log/level"
- "google.golang.org/grpc/metadata"
-
spb "github.com/gogo/googleapis/google/rpc"
"github.com/gogo/protobuf/types"
"github.com/gogo/status"
+ "google.golang.org/grpc/metadata"
+ "github.com/grafana/dskit/grpcutil"
"github.com/grafana/dskit/log"
)
+const (
+ MetadataMethod = "httpgrpc-method"
+ MetadataURL = "httpgrpc-url"
+)
+
+// AppendRequestMetadataToContext appends metadata of HTTPRequest into gRPC metadata.
+func AppendRequestMetadataToContext(ctx context.Context, req *HTTPRequest) context.Context {
+ return metadata.AppendToOutgoingContext(ctx,
+ MetadataMethod, req.Method,
+ MetadataURL, req.Url)
+}
+
+type nopCloser struct {
+ *bytes.Buffer
+}
+
+func (nopCloser) Close() error { return nil }
+
+// BytesBuffer returns the underlaying `bytes.buffer` used to build this io.ReadCloser.
+func (n nopCloser) BytesBuffer() *bytes.Buffer { return n.Buffer }
+
+// FromHTTPRequest converts an ordinary http.Request into an httpgrpc.HTTPRequest
+func FromHTTPRequest(r *http.Request) (*HTTPRequest, error) {
+ body, err := io.ReadAll(r.Body)
+ if err != nil {
+ return nil, err
+ }
+ return &HTTPRequest{
+ Method: r.Method,
+ Url: r.RequestURI,
+ Body: body,
+ Headers: FromHeader(r.Header),
+ }, nil
+}
+
+// ToHTTPRequest converts httpgrpc.HTTPRequest to http.Request.
+func ToHTTPRequest(ctx context.Context, r *HTTPRequest) (*http.Request, error) {
+ req, err := http.NewRequest(r.Method, r.Url, nopCloser{Buffer: bytes.NewBuffer(r.Body)})
+ if err != nil {
+ return nil, err
+ }
+ ToHeader(r.Headers, req.Header)
+ req = req.WithContext(ctx)
+ req.RequestURI = r.Url
+ req.ContentLength = int64(len(r.Body))
+ return req, nil
+}
+
+// WriteResponse converts an httpgrpc response to an HTTP one
+func WriteResponse(w http.ResponseWriter, resp *HTTPResponse) error {
+ ToHeader(resp.Headers, w.Header())
+ w.WriteHeader(int(resp.Code))
+ _, err := w.Write(resp.Body)
+ return err
+}
+
+// WriteError converts an httpgrpc error to an HTTP one
+func WriteError(w http.ResponseWriter, err error) {
+ resp, ok := HTTPResponseFromError(err)
+ if ok {
+ _ = WriteResponse(w, resp)
+ } else {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ }
+}
+
+func ToHeader(hs []*Header, header http.Header) {
+ for _, h := range hs {
+ header[h.Key] = h.Values
+ }
+}
+
+func FromHeader(hs http.Header) []*Header {
+ result := make([]*Header, 0, len(hs))
+ for k, vs := range hs {
+ result = append(result, &Header{
+ Key: k,
+ Values: vs,
+ })
+ }
+ return result
+}
+
// Errorf returns a HTTP gRPC error than is correctly forwarded over
// gRPC, and can eventually be converted back to a HTTP response with
// HTTPResponseFromError.
@@ -44,7 +130,7 @@ func ErrorFromHTTPResponse(resp *HTTPResponse) error {
// HTTPResponseFromError converts a grpc error into an HTTP response
func HTTPResponseFromError(err error) (*HTTPResponse, bool) {
- s, ok := status.FromError(err)
+ s, ok := grpcutil.ErrorToStatus(err)
if !ok {
return nil, false
}
@@ -62,15 +148,3 @@ func HTTPResponseFromError(err error) (*HTTPResponse, bool) {
return &resp, true
}
-
-const (
- MetadataMethod = "httpgrpc-method"
- MetadataURL = "httpgrpc-url"
-)
-
-// AppendRequestMetadataToContext appends metadata of HTTPRequest into gRPC metadata.
-func AppendRequestMetadataToContext(ctx context.Context, req *HTTPRequest) context.Context {
- return metadata.AppendToOutgoingContext(ctx,
- MetadataMethod, req.Method,
- MetadataURL, req.Url)
-}
diff --git a/vendor/github.com/grafana/dskit/httpgrpc/server/server.go b/vendor/github.com/grafana/dskit/httpgrpc/server/server.go
index b0d808b7b75a1..c642f7fa13fda 100644
--- a/vendor/github.com/grafana/dskit/httpgrpc/server/server.go
+++ b/vendor/github.com/grafana/dskit/httpgrpc/server/server.go
@@ -5,10 +5,8 @@
package server
import (
- "bytes"
"context"
"fmt"
- "io"
"net"
"net/http"
"net/http/httptest"
@@ -27,6 +25,13 @@ import (
"github.com/grafana/dskit/middleware"
)
+var (
+ // DoNotLogErrorHeaderKey is a header key used for marking non-loggable errors. More precisely, if an HTTP response
+ // has a status code 5xx, and contains a header with key DoNotLogErrorHeaderKey and any values, the generated error
+ // will be marked as non-loggable.
+ DoNotLogErrorHeaderKey = http.CanonicalHeaderKey("X-DoNotLogError")
+)
+
// Server implements HTTPServer. HTTPServer is a generated interface that gRPC
// servers must implement.
type Server struct {
@@ -40,35 +45,34 @@ func NewServer(handler http.Handler) *Server {
}
}
-type nopCloser struct {
- *bytes.Buffer
-}
-
-func (nopCloser) Close() error { return nil }
-
-// BytesBuffer returns the underlaying `bytes.buffer` used to build this io.ReadCloser.
-func (n nopCloser) BytesBuffer() *bytes.Buffer { return n.Buffer }
-
// Handle implements HTTPServer.
func (s Server) Handle(ctx context.Context, r *httpgrpc.HTTPRequest) (*httpgrpc.HTTPResponse, error) {
- req, err := http.NewRequest(r.Method, r.Url, nopCloser{Buffer: bytes.NewBuffer(r.Body)})
+ req, err := httpgrpc.ToHTTPRequest(ctx, r)
if err != nil {
return nil, err
}
- toHeader(r.Headers, req.Header)
- req = req.WithContext(ctx)
- req.RequestURI = r.Url
- req.ContentLength = int64(len(r.Body))
recorder := httptest.NewRecorder()
s.handler.ServeHTTP(recorder, req)
+ header := recorder.Header()
+
+ doNotLogError := false
+ if _, ok := header[DoNotLogErrorHeaderKey]; ok {
+ doNotLogError = true
+ header.Del(DoNotLogErrorHeaderKey) // remove before converting to httpgrpc resp
+ }
+
resp := &httpgrpc.HTTPResponse{
Code: int32(recorder.Code),
- Headers: fromHeader(recorder.Header()),
+ Headers: httpgrpc.FromHeader(header),
Body: recorder.Body.Bytes(),
}
if recorder.Code/100 == 5 {
- return nil, httpgrpc.ErrorFromHTTPResponse(resp)
+ err := httpgrpc.ErrorFromHTTPResponse(resp)
+ if doNotLogError {
+ err = middleware.DoNotLogError{Err: err}
+ }
+ return nil, err
}
return resp, nil
}
@@ -153,38 +157,6 @@ func NewClient(address string) (*Client, error) {
}, nil
}
-// HTTPRequest wraps an ordinary HTTPRequest with a gRPC one
-func HTTPRequest(r *http.Request) (*httpgrpc.HTTPRequest, error) {
- body, err := io.ReadAll(r.Body)
- if err != nil {
- return nil, err
- }
- return &httpgrpc.HTTPRequest{
- Method: r.Method,
- Url: r.RequestURI,
- Body: body,
- Headers: fromHeader(r.Header),
- }, nil
-}
-
-// WriteResponse converts an httpgrpc response to an HTTP one
-func WriteResponse(w http.ResponseWriter, resp *httpgrpc.HTTPResponse) error {
- toHeader(resp.Headers, w.Header())
- w.WriteHeader(int(resp.Code))
- _, err := w.Write(resp.Body)
- return err
-}
-
-// WriteError converts an httpgrpc error to an HTTP one
-func WriteError(w http.ResponseWriter, err error) {
- resp, ok := httpgrpc.HTTPResponseFromError(err)
- if ok {
- _ = WriteResponse(w, resp)
- } else {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- }
-}
-
// ServeHTTP implements http.Handler
func (c *Client) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if tracer := opentracing.GlobalTracer(); tracer != nil {
@@ -195,7 +167,7 @@ func (c *Client) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
}
- req, err := HTTPRequest(r)
+ req, err := httpgrpc.FromHTTPRequest(r)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
@@ -212,25 +184,8 @@ func (c *Client) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
}
- if err := WriteResponse(w, resp); err != nil {
+ if err := httpgrpc.WriteResponse(w, resp); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
-
-func toHeader(hs []*httpgrpc.Header, header http.Header) {
- for _, h := range hs {
- header[h.Key] = h.Values
- }
-}
-
-func fromHeader(hs http.Header) []*httpgrpc.Header {
- result := make([]*httpgrpc.Header, 0, len(hs))
- for k, vs := range hs {
- result = append(result, &httpgrpc.Header{
- Key: k,
- Values: vs,
- })
- }
- return result
-}
diff --git a/vendor/github.com/grafana/dskit/kv/memberlist/memberlist_client.go b/vendor/github.com/grafana/dskit/kv/memberlist/memberlist_client.go
index 30a27531fd08d..693964b5ad067 100644
--- a/vendor/github.com/grafana/dskit/kv/memberlist/memberlist_client.go
+++ b/vendor/github.com/grafana/dskit/kv/memberlist/memberlist_client.go
@@ -222,7 +222,7 @@ func generateRandomSuffix(logger log.Logger) string {
// If joining of the cluster if configured, it is done in Running state, and if join fails and Abort flag is set, service
// fails.
type KV struct {
- services.Service
+ services.NamedService
cfg KVConfig
logger log.Logger
@@ -374,7 +374,8 @@ func NewKV(cfg KVConfig, logger log.Logger, dnsProvider DNSProvider, registerer
mlkv.codecs[c.CodecID()] = c
}
- mlkv.Service = services.NewBasicService(mlkv.starting, mlkv.running, mlkv.stopping)
+ mlkv.NamedService = services.NewBasicService(mlkv.starting, mlkv.running, mlkv.stopping).WithName("memberlist_kv")
+
return mlkv
}
@@ -485,17 +486,17 @@ func (m *KV) running(ctx context.Context) error {
tickerChan = t.C
}
+ logger := log.With(m.logger, "phase", "periodic_rejoin")
for {
select {
case <-tickerChan:
- members := m.discoverMembers(ctx, m.cfg.JoinMembers)
-
- reached, err := m.memberlist.Join(members)
+ const numAttempts = 1 // don't retry if resolution fails, we will try again next time
+ reached, err := m.joinMembersWithRetries(ctx, numAttempts, logger)
if err == nil {
- level.Info(m.logger).Log("msg", "re-joined memberlist cluster", "reached_nodes", reached)
+ level.Info(logger).Log("msg", "re-joined memberlist cluster", "reached_nodes", reached)
} else {
// Don't report error from rejoin, otherwise KV service would be stopped completely.
- level.Warn(m.logger).Log("msg", "re-joining memberlist cluster failed", "err", err)
+ level.Warn(logger).Log("msg", "re-joining memberlist cluster failed", "err", err, "next_try_in", m.cfg.RejoinInterval)
}
case <-ctx.Done():
@@ -540,7 +541,7 @@ func (m *KV) fastJoinMembersOnStartup(ctx context.Context) {
level.Info(m.logger).Log("msg", "memberlist fast-join starting", "nodes_found", len(nodes), "to_join", toJoin)
totalJoined := 0
- for toJoin > 0 && len(nodes) > 0 {
+ for toJoin > 0 && len(nodes) > 0 && ctx.Err() == nil {
reached, err := m.memberlist.Join(nodes[0:1]) // Try to join single node only.
if err != nil {
level.Debug(m.logger).Log("msg", "fast-joining node failed", "node", nodes[0], "err", err)
@@ -568,41 +569,122 @@ func (m *KV) joinMembersOnStartup(ctx context.Context) bool {
return true
}
+ logger := log.With(m.logger, "phase", "startup")
+ level.Info(logger).Log("msg", "joining memberlist cluster", "join_members", strings.Join(m.cfg.JoinMembers, ","))
startTime := time.Now()
+ reached, err := m.joinMembersWithRetries(ctx, m.cfg.MaxJoinRetries, logger)
+ if err != nil {
+ level.Error(logger).Log("msg", "joining memberlist cluster failed", "err", err, "elapsed_time", time.Since(startTime))
+ return false
+ }
+ level.Info(logger).Log("msg", "joining memberlist cluster succeeded", "reached_nodes", reached, "elapsed_time", time.Since(startTime))
+ return true
+}
- level.Info(m.logger).Log("msg", "joining memberlist cluster", "join_members", strings.Join(m.cfg.JoinMembers, ","))
-
- cfg := backoff.Config{
- MinBackoff: m.cfg.MinJoinBackoff,
- MaxBackoff: m.cfg.MaxJoinBackoff,
- MaxRetries: m.cfg.MaxJoinRetries,
+// joinMembersWithRetries joins m.cfg.JoinMembers 100 at a time. After each batch of 100 it rediscoveres the members.
+// This helps when the list of members is big and by the time we reach the end the originally resolved addresses may be obsolete.
+// joinMembersWithRetries returns an error iff it couldn't successfully join any node OR the context was cancelled.
+func (m *KV) joinMembersWithRetries(ctx context.Context, numAttempts int, logger log.Logger) (int, error) {
+ var (
+ cfg = backoff.Config{
+ MinBackoff: m.cfg.MinJoinBackoff,
+ MaxBackoff: m.cfg.MaxJoinBackoff,
+ MaxRetries: numAttempts,
+ }
+ boff = backoff.New(ctx, cfg)
+ err error
+ successfullyJoined = 0
+ )
+
+ for ; boff.Ongoing(); boff.Wait() {
+ successfullyJoined, err = m.joinMembersInBatches(ctx)
+ if successfullyJoined > 0 {
+ // If there are _some_ successful joins, then we can consider the join done.
+ // Mimicking the Join semantics we return an error only when we couldn't join any node at all
+ err = nil
+ break
+ }
+ level.Warn(logger).Log("msg", "joining memberlist cluster", "attempts", boff.NumRetries()+1, "max_attempts", numAttempts, "err", err)
+ }
+ if err == nil && boff.Err() != nil {
+ err = fmt.Errorf("joining memberlist: %w", boff.Err())
}
- boff := backoff.New(ctx, cfg)
- var lastErr error
+ return successfullyJoined, err
+}
- for boff.Ongoing() {
- // We rejoin all nodes, including those that were joined during "fast-join".
- // This is harmless and simpler.
- nodes := m.discoverMembers(ctx, m.cfg.JoinMembers)
+// joinMembersInBatches joins m.cfg.JoinMembers and re-resolves the address of m.cfg.JoinMembers after joining 100 nodes.
+// joinMembersInBatches returns the number of nodes joined. joinMembersInBatches returns an error only when the
+// number of joined nodes is 0.
+func (m *KV) joinMembersInBatches(ctx context.Context) (int, error) {
+ const batchSize = 100
+ var (
+ attemptedNodes = make(map[string]bool)
+ successfullyJoined = 0
+ lastErr error
+ batch = make([]string, batchSize)
+ nodes []string
+ )
+ for moreAvailableNodes := true; ctx.Err() == nil && moreAvailableNodes; {
+ // Rediscover nodes and try to join a subset of them with each batch.
+ // When the list of nodes is large by the time we reach the end of the list some of the
+ // IPs can be unreachable.
+ newlyResolved := m.discoverMembers(ctx, m.cfg.JoinMembers)
+ if len(newlyResolved) > 0 {
+ // If the resolution fails we keep using the nodes list from the last resolution.
+ // If that failed too, then we fail the join attempt.
+ nodes = newlyResolved
+ }
- if len(nodes) > 0 {
- reached, err := m.memberlist.Join(nodes) // err is only returned if reached==0.
- if err == nil {
- level.Info(m.logger).Log("msg", "joining memberlist cluster succeeded", "reached_nodes", reached, "elapsed_time", time.Since(startTime))
- return true
+ // Prepare batch
+ batch = batch[:0]
+ moreAvailableNodes = false
+ for _, n := range nodes {
+ if attemptedNodes[n] {
+ continue
}
- level.Warn(m.logger).Log("msg", "joining memberlist cluster: failed to reach any nodes", "retries", boff.NumRetries(), "err", err)
- lastErr = err
- } else {
- level.Warn(m.logger).Log("msg", "joining memberlist cluster: found no nodes to join", "retries", boff.NumRetries())
+ if len(batch) >= batchSize {
+ moreAvailableNodes = true
+ break
+ }
+ batch = append(batch, n)
+ attemptedNodes[n] = true
}
- boff.Wait()
+ // Join batch
+ joinedInBatch, err := m.joinMembersBatch(ctx, batch)
+ if err != nil {
+ lastErr = err
+ }
+ successfullyJoined += joinedInBatch
+ }
+ if successfullyJoined > 0 {
+ return successfullyJoined, nil
+ }
+ if successfullyJoined == 0 && lastErr == nil {
+ return 0, errors.New("found no nodes to join")
}
+ return 0, lastErr
+}
- level.Error(m.logger).Log("msg", "joining memberlist cluster failed", "last_error", lastErr, "elapsed_time", time.Since(startTime))
- return false
+// joinMembersBatch returns an error only if it couldn't successfully join any nodes or if ctx is cancelled.
+func (m *KV) joinMembersBatch(ctx context.Context, nodes []string) (successfullyJoined int, lastErr error) {
+ for nodeIdx := range nodes {
+ if ctx.Err() != nil {
+ return successfullyJoined, fmt.Errorf("joining batch: %w", context.Cause(ctx))
+ }
+ // Attempt to join a single node.
+ // The cost of calling Join shouldn't be different between passing all nodes in one invocation versus passing a single node per invocation.
+ reached, err := m.memberlist.Join(nodes[nodeIdx : nodeIdx+1])
+ successfullyJoined += reached
+ if err != nil {
+ lastErr = err
+ }
+ }
+ if successfullyJoined > 0 {
+ lastErr = nil
+ }
+ return successfullyJoined, lastErr
}
// Provides a dns-based member disovery to join a memberlist cluster w/o knowning members' addresses upfront.
diff --git a/vendor/github.com/grafana/dskit/middleware/grpc_instrumentation.go b/vendor/github.com/grafana/dskit/middleware/grpc_instrumentation.go
index 70069fa36fadd..e4052b8ed05ff 100644
--- a/vendor/github.com/grafana/dskit/middleware/grpc_instrumentation.go
+++ b/vendor/github.com/grafana/dskit/middleware/grpc_instrumentation.go
@@ -6,6 +6,7 @@ package middleware
import (
"context"
+ "errors"
"io"
"strconv"
"time"
@@ -13,72 +14,69 @@ import (
"github.com/prometheus/client_golang/prometheus"
"go.uber.org/atomic"
"google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"github.com/grafana/dskit/grpcutil"
- "github.com/grafana/dskit/httpgrpc"
"github.com/grafana/dskit/instrument"
)
-func observe(ctx context.Context, hist *prometheus.HistogramVec, method string, err error, duration time.Duration) {
- respStatus := "success"
- if err != nil {
- if errResp, ok := httpgrpc.HTTPResponseFromError(err); ok {
- respStatus = strconv.Itoa(int(errResp.Code))
- } else if grpcutil.IsCanceled(err) {
- respStatus = "cancel"
- } else {
- respStatus = "error"
- }
- }
- instrument.ObserveWithExemplar(ctx, hist.WithLabelValues(gRPC, method, respStatus, "false"), duration.Seconds())
+func observe(ctx context.Context, hist *prometheus.HistogramVec, method string, err error, duration time.Duration, instrumentLabel instrumentationLabel) {
+ instrument.ObserveWithExemplar(ctx, hist.WithLabelValues(gRPC, method, instrumentLabel.getInstrumentationLabel(err), "false"), duration.Seconds())
}
// UnaryServerInstrumentInterceptor instruments gRPC requests for errors and latency.
-func UnaryServerInstrumentInterceptor(hist *prometheus.HistogramVec) grpc.UnaryServerInterceptor {
+func UnaryServerInstrumentInterceptor(hist *prometheus.HistogramVec, instrumentationOptions ...InstrumentationOption) grpc.UnaryServerInterceptor {
+ instrumentationLabel := applyInstrumentationOptions(false, instrumentationOptions...)
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
begin := time.Now()
resp, err := handler(ctx, req)
- observe(ctx, hist, info.FullMethod, err, time.Since(begin))
+ observe(ctx, hist, info.FullMethod, err, time.Since(begin), instrumentationLabel)
return resp, err
}
}
// StreamServerInstrumentInterceptor instruments gRPC requests for errors and latency.
-func StreamServerInstrumentInterceptor(hist *prometheus.HistogramVec) grpc.StreamServerInterceptor {
+func StreamServerInstrumentInterceptor(hist *prometheus.HistogramVec, instrumentationOptions ...InstrumentationOption) grpc.StreamServerInterceptor {
+ instrumentationLabel := applyInstrumentationOptions(false, instrumentationOptions...)
return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
begin := time.Now()
err := handler(srv, ss)
- observe(ss.Context(), hist, info.FullMethod, err, time.Since(begin))
+ observe(ss.Context(), hist, info.FullMethod, err, time.Since(begin), instrumentationLabel)
return err
}
}
// UnaryClientInstrumentInterceptor records duration of gRPC requests client side.
-func UnaryClientInstrumentInterceptor(metric *prometheus.HistogramVec) grpc.UnaryClientInterceptor {
+func UnaryClientInstrumentInterceptor(metric *prometheus.HistogramVec, instrumentationOptions ...InstrumentationOption) grpc.UnaryClientInterceptor {
+ // we enforce masking of HTTP statuses.
+ instrumentationLabel := applyInstrumentationOptions(true, instrumentationOptions...)
return func(ctx context.Context, method string, req, resp interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
start := time.Now()
err := invoker(ctx, method, req, resp, cc, opts...)
- metric.WithLabelValues(method, errorCode(err)).Observe(time.Since(start).Seconds())
+ metric.WithLabelValues(method, instrumentationLabel.getInstrumentationLabel(err)).Observe(time.Since(start).Seconds())
return err
}
}
// StreamClientInstrumentInterceptor records duration of streaming gRPC requests client side.
-func StreamClientInstrumentInterceptor(metric *prometheus.HistogramVec) grpc.StreamClientInterceptor {
+func StreamClientInstrumentInterceptor(metric *prometheus.HistogramVec, instrumentationOptions ...InstrumentationOption) grpc.StreamClientInterceptor {
+ // we enforce masking of HTTP statuses.
+ instrumentationLabel := applyInstrumentationOptions(true, instrumentationOptions...)
return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string,
streamer grpc.Streamer, opts ...grpc.CallOption,
) (grpc.ClientStream, error) {
start := time.Now()
stream, err := streamer(ctx, desc, cc, method, opts...)
s := &instrumentedClientStream{
- metric: metric,
- start: start,
- method: method,
- serverStreams: desc.ServerStreams,
- finished: atomic.NewBool(false),
- finishedChan: make(chan struct{}),
- stream: stream,
+ metric: metric,
+ start: start,
+ method: method,
+ serverStreams: desc.ServerStreams,
+ finished: atomic.NewBool(false),
+ finishedChan: make(chan struct{}),
+ stream: stream,
+ instrumentationLabel: instrumentationLabel,
}
s.awaitCompletion(ctx)
return s, err
@@ -87,13 +85,14 @@ func StreamClientInstrumentInterceptor(metric *prometheus.HistogramVec) grpc.Str
// This implementation is heavily inspired by github.com/opentracing-contrib/go-grpc's openTracingClientStream.
type instrumentedClientStream struct {
- metric *prometheus.HistogramVec
- start time.Time
- method string
- serverStreams bool
- finished *atomic.Bool
- finishedChan chan struct{}
- stream grpc.ClientStream
+ metric *prometheus.HistogramVec
+ start time.Time
+ method string
+ serverStreams bool
+ finished *atomic.Bool
+ finishedChan chan struct{}
+ stream grpc.ClientStream
+ instrumentationLabel instrumentationLabel
}
func (s *instrumentedClientStream) Trailer() metadata.MD {
@@ -122,7 +121,7 @@ func (s *instrumentedClientStream) finish(err error) {
close(s.finishedChan)
- s.metric.WithLabelValues(s.method, errorCode(err)).Observe(time.Since(s.start).Seconds())
+ s.metric.WithLabelValues(s.method, s.instrumentationLabel.getInstrumentationLabel(err)).Observe(time.Since(s.start).Seconds())
}
func (s *instrumentedClientStream) SendMsg(m interface{}) error {
@@ -173,18 +172,75 @@ func (s *instrumentedClientStream) CloseSend() error {
return err
}
-// errorCode converts an error into an error code string.
-func errorCode(err error) string {
- if err == nil {
- return "2xx"
+type InstrumentationOption func(*instrumentationLabel)
+
+var (
+ // ReportGRPCStatusOption is an InstrumentationOption that is used for enabling gRPC status codes to be used
+ // in instrumentation labels.
+ ReportGRPCStatusOption InstrumentationOption = func(instrumentationLabel *instrumentationLabel) {
+ instrumentationLabel.reportGRPCStatus = true
+ }
+)
+
+func applyInstrumentationOptions(maskHTTPStatuses bool, options ...InstrumentationOption) instrumentationLabel {
+ instrumentationLabel := instrumentationLabel{maskHTTPStatus: maskHTTPStatuses}
+ for _, opt := range options {
+ opt(&instrumentationLabel)
+ }
+ return instrumentationLabel
+}
+
+type instrumentationLabel struct {
+ reportGRPCStatus bool
+ maskHTTPStatus bool
+}
+
+// getInstrumentationLabel converts an error into an error code string by applying the configurations
+// contained in this instrumentationLabel object.
+func (i *instrumentationLabel) getInstrumentationLabel(err error) string {
+ statusCode := errorToStatusCode(err)
+ return i.statusCodeToString(statusCode)
+}
+
+func (i *instrumentationLabel) statusCodeToString(statusCode codes.Code) string {
+ if isHTTPStatusCode(statusCode) {
+ statusFamily := int(statusCode / 100)
+ if i.maskHTTPStatus {
+ return strconv.Itoa(statusFamily) + "xx"
+ }
+ return strconv.Itoa(int(statusCode))
}
- if errResp, ok := httpgrpc.HTTPResponseFromError(err); ok {
- statusFamily := int(errResp.Code / 100)
- return strconv.Itoa(statusFamily) + "xx"
- } else if grpcutil.IsCanceled(err) {
+ if i.reportGRPCStatus {
+ return statusCode.String()
+ }
+
+ if statusCode == codes.OK {
+ if i.maskHTTPStatus {
+ return "2xx"
+ }
+ return "success"
+ }
+
+ if statusCode == codes.Canceled {
return "cancel"
- } else {
- return "error"
}
+
+ return "error"
+}
+
+func errorToStatusCode(err error) codes.Code {
+ if err == nil {
+ return codes.OK
+ }
+
+ if errors.Is(err, context.Canceled) {
+ return codes.Canceled
+ }
+
+ return grpcutil.ErrorToStatusCode(err)
+}
+
+func isHTTPStatusCode(statusCode codes.Code) bool {
+ return int(statusCode) >= 100 && int(statusCode) < 600
}
diff --git a/vendor/github.com/grafana/dskit/middleware/grpc_logging.go b/vendor/github.com/grafana/dskit/middleware/grpc_logging.go
index 7f5db7725c945..feab364743225 100644
--- a/vendor/github.com/grafana/dskit/middleware/grpc_logging.go
+++ b/vendor/github.com/grafana/dskit/middleware/grpc_logging.go
@@ -29,6 +29,12 @@ type OptionalLogging interface {
ShouldLog(ctx context.Context, duration time.Duration) bool
}
+type DoNotLogError struct{ Err error }
+
+func (i DoNotLogError) Error() string { return i.Err.Error() }
+func (i DoNotLogError) Unwrap() error { return i.Err }
+func (i DoNotLogError) ShouldLog(_ context.Context, _ time.Duration) bool { return false }
+
// GRPCServerLog logs grpc requests, errors, and latency.
type GRPCServerLog struct {
Log log.Logger
diff --git a/vendor/github.com/grafana/dskit/middleware/zero_response.go b/vendor/github.com/grafana/dskit/middleware/zero_response.go
new file mode 100644
index 0000000000000..1bb4ecc8d1f6b
--- /dev/null
+++ b/vendor/github.com/grafana/dskit/middleware/zero_response.go
@@ -0,0 +1,132 @@
+package middleware
+
+import (
+ "errors"
+ "net"
+ "os"
+ "regexp"
+ "strconv"
+ "sync"
+
+ "github.com/go-kit/log"
+ "go.uber.org/atomic"
+)
+
+// NewZeroResponseListener returns a Listener that logs all connections that encountered io timeout on reads, and were closed before sending any response.
+func NewZeroResponseListener(list net.Listener, log log.Logger) net.Listener {
+ return &zeroResponseListener{
+ Listener: list,
+ log: log,
+ bufPool: sync.Pool{
+ New: func() interface{} { return &bufHolder{buf: make([]byte, 0, requestBufSize)} },
+ },
+ }
+}
+
+// Wrap a slice in a struct, so we can store a pointer in sync.Pool
+type bufHolder struct {
+ buf []byte
+}
+
+// Size of buffer for read data. We log this eventually.
+const requestBufSize = 512
+
+type zeroResponseListener struct {
+ net.Listener
+ log log.Logger
+ bufPool sync.Pool // pool of &bufHolder.
+}
+
+func (zl *zeroResponseListener) Accept() (net.Conn, error) {
+ conn, err := zl.Listener.Accept()
+ if err != nil {
+ return nil, err
+ }
+ bh := zl.bufPool.Get().(*bufHolder)
+ bh.buf = bh.buf[:0]
+ return &zeroResponseConn{Conn: conn, log: zl.log, bufHolder: bh, returnPool: &zl.bufPool}, nil
+}
+
+type zeroResponseConn struct {
+ net.Conn
+
+ log log.Logger
+ once sync.Once
+ returnPool *sync.Pool
+
+ bufHolderMux sync.Mutex
+ bufHolder *bufHolder // Buffer with first requestBufSize bytes from connection. Set to nil as soon as data is written to the connection.
+
+ lastReadErrIsDeadlineExceeded atomic.Bool
+}
+
+func (zc *zeroResponseConn) Read(b []byte) (n int, err error) {
+ n, err = zc.Conn.Read(b)
+ if err != nil && errors.Is(err, os.ErrDeadlineExceeded) {
+ zc.lastReadErrIsDeadlineExceeded.Store(true)
+ } else {
+ zc.lastReadErrIsDeadlineExceeded.Store(false)
+ }
+
+ // Store first requestBufSize read bytes on connection into the buffer for logging.
+ if n > 0 {
+ zc.bufHolderMux.Lock()
+ defer zc.bufHolderMux.Unlock()
+
+ if zc.bufHolder != nil {
+ rem := requestBufSize - len(zc.bufHolder.buf) // how much space is in our buffer.
+ if rem > n {
+ rem = n
+ }
+ if rem > 0 {
+ zc.bufHolder.buf = append(zc.bufHolder.buf, b[:rem]...)
+ }
+ }
+ }
+ return
+}
+
+func (zc *zeroResponseConn) Write(b []byte) (n int, err error) {
+ n, err = zc.Conn.Write(b)
+ if n > 0 {
+ zc.bufHolderMux.Lock()
+ if zc.bufHolder != nil {
+ zc.returnPool.Put(zc.bufHolder)
+ zc.bufHolder = nil
+ }
+ zc.bufHolderMux.Unlock()
+ }
+ return
+}
+
+var authRegexp = regexp.MustCompile(`((?i)\r\nauthorization:\s+)(\S+\s+)(\S+)`)
+
+func (zc *zeroResponseConn) Close() error {
+ err := zc.Conn.Close()
+
+ zc.once.Do(func() {
+ zc.bufHolderMux.Lock()
+ defer zc.bufHolderMux.Unlock()
+
+ // If buffer was already returned, it means there was some data written on the connection, nothing to do.
+ if zc.bufHolder == nil {
+ return
+ }
+
+ // If we didn't write anything to this connection, and we've got timeout while reading data, it looks like
+ // slow a slow client failing to send a request to us.
+ if !zc.lastReadErrIsDeadlineExceeded.Load() {
+ return
+ }
+
+ b := zc.bufHolder.buf
+ b = authRegexp.ReplaceAll(b, []byte("${1}${2}***")) // Replace value in Authorization header with ***.
+
+ _ = zc.log.Log("msg", "read timeout, connection closed with no response", "read", strconv.Quote(string(b)), "remote", zc.RemoteAddr().String())
+
+ zc.returnPool.Put(zc.bufHolder)
+ zc.bufHolder = nil
+ })
+
+ return err
+}
diff --git a/vendor/github.com/grafana/dskit/modules/module_service.go b/vendor/github.com/grafana/dskit/modules/module_service.go
index 8ca4e25714de4..a0fcdb876fcde 100644
--- a/vendor/github.com/grafana/dskit/modules/module_service.go
+++ b/vendor/github.com/grafana/dskit/modules/module_service.go
@@ -79,13 +79,19 @@ func (w *moduleService) start(serviceContext context.Context) error {
// we don't want to let this service to stop until all dependant services are stopped,
// so we use independent context here
- level.Info(w.logger).Log("msg", "initialising", "module", w.name)
+ level.Info(w.logger).Log("msg", "starting", "module", w.name)
err := w.service.StartAsync(context.Background())
if err != nil {
return errors.Wrapf(err, "error starting module: %s", w.name)
}
- return w.service.AwaitRunning(serviceContext)
+ err = w.service.AwaitRunning(serviceContext)
+ if err != nil {
+ // Make sure that underlying service is stopped before returning
+ // (e.g. in case of context cancellation, AwaitRunning returns early, but service may still be starting).
+ _ = services.StopAndAwaitTerminated(context.Background(), w.service)
+ }
+ return errors.Wrapf(err, "starting module %s", w.name)
}
func (w *moduleService) run(serviceContext context.Context) error {
diff --git a/vendor/github.com/grafana/dskit/ring/batch.go b/vendor/github.com/grafana/dskit/ring/batch.go
index fa627445ed2c6..5acd8fd008620 100644
--- a/vendor/github.com/grafana/dskit/ring/batch.go
+++ b/vendor/github.com/grafana/dskit/ring/batch.go
@@ -8,7 +8,8 @@ import (
"sync"
"go.uber.org/atomic"
- "google.golang.org/grpc/status"
+
+ grpcUtils "github.com/grafana/dskit/grpcutil"
)
type batchTracker struct {
@@ -25,40 +26,79 @@ type instance struct {
}
type itemTracker struct {
- minSuccess int
- maxFailures int
- succeeded atomic.Int32
- failed4xx atomic.Int32
- failed5xx atomic.Int32
- remaining atomic.Int32
- err atomic.Error
+ minSuccess int
+ maxFailures int
+ succeeded atomic.Int32
+ failedClient atomic.Int32
+ failedServer atomic.Int32
+ remaining atomic.Int32
+ err atomic.Error
}
-func (i *itemTracker) recordError(err error) int32 {
+func (i *itemTracker) recordError(err error, isClientError func(error) bool) int32 {
i.err.Store(err)
- if s, ok := status.FromError(err); ok && s.Code()/100 == 4 {
- return i.failed4xx.Inc()
+ if isClientError(err) {
+ return i.failedClient.Inc()
}
+ return i.failedServer.Inc()
+}
- return i.failed5xx.Inc()
+func isHTTPStatus4xx(err error) bool {
+ code := grpcUtils.ErrorToStatusCode(err)
+ return code/100 == 4
}
-// DoBatch request against a set of keys in the ring, handling replication and
-// failures. For example if we want to write N items where they may all
-// hit different instances, and we want them all replicated R ways with
-// quorum writes, we track the relationship between batch RPCs and the items
-// within them.
-//
-// Callback is passed the instance to target, and the indexes of the keys
-// to send to that instance.
+// DoBatch is a deprecated version of DoBatchWithOptions where grpc errors containing status codes 4xx are treated as client errors.
+// Deprecated. Use DoBatchWithOptions instead.
+func DoBatch(ctx context.Context, op Operation, r ReadRing, keys []uint32, callback func(InstanceDesc, []int) error, cleanup func()) error {
+ return DoBatchWithOptions(ctx, op, r, keys, callback, DoBatchOptions{
+ Cleanup: cleanup,
+ IsClientError: isHTTPStatus4xx,
+ })
+}
+
+// DoBatchOptions defines options for the DoBatchWithOptions call.
+// Zero value options are valid, as well as individual zero valued fields.
+type DoBatchOptions struct {
+ // Cleanup is always called, either on an error before starting the batches or after they are all finished.
+ // If nil, a noop will be called.
+ Cleanup func()
+
+ // IsClientError classifies errors returned by `callback()` into client or server errors.
+ // See `batchTracker.record()` function for details about how errors are combined into final error returned by DoBatchWithClientError.
+ // If nil, a default implementation is used that classifies grpc errors containing status codes 4xx as client errors.
+ IsClientError func(error) bool
+
+ // Go will be used to spawn the callback goroutines, and can be used to use a worker pool like concurrency.ReusableGoroutinesPool.
+ Go func(func())
+}
+
+func (o *DoBatchOptions) replaceZeroValuesWithDefaults() {
+ if o.Cleanup == nil {
+ o.Cleanup = func() {}
+ }
+ if o.IsClientError == nil {
+ o.IsClientError = isHTTPStatus4xx
+ }
+ if o.Go == nil {
+ o.Go = func(f func()) { go f() }
+ }
+}
+
+// DoBatchWithOptions request against a set of keys in the ring, handling replication and failures.
+// For example if we want to write N items where they may all hit different instances,
+// and we want them all replicated R ways with quorum writes,
+// we track the relationship between batch RPCs and the items within them.
//
-// cleanup() is always called, either on an error before starting the batches or after they all finish.
+// See comments on DoBatchOptions for available options for this call.
//
-// Not implemented as a method on Ring so we can test separately.
-func DoBatch(ctx context.Context, op Operation, r ReadRing, keys []uint32, callback func(InstanceDesc, []int) error, cleanup func()) error {
+// Not implemented as a method on Ring, so we can test separately.
+func DoBatchWithOptions(ctx context.Context, op Operation, r ReadRing, keys []uint32, callback func(InstanceDesc, []int) error, o DoBatchOptions) error {
+ o.replaceZeroValuesWithDefaults()
+
if r.InstancesCount() <= 0 {
- cleanup()
+ o.Cleanup()
return fmt.Errorf("DoBatch: InstancesCount <= 0")
}
expectedTrackers := len(keys) * (r.ReplicationFactor() + 1) / r.InstancesCount()
@@ -73,7 +113,7 @@ func DoBatch(ctx context.Context, op Operation, r ReadRing, keys []uint32, callb
for i, key := range keys {
replicationSet, err := r.Get(key, op, bufDescs[:0], bufHosts[:0], bufZones[:0])
if err != nil {
- cleanup()
+ o.Cleanup()
return err
}
itemTrackers[i].minSuccess = len(replicationSet.Instances) - replicationSet.MaxErrors
@@ -104,19 +144,19 @@ func DoBatch(ctx context.Context, op Operation, r ReadRing, keys []uint32, callb
wg.Add(len(instances))
for _, i := range instances {
- go func(i instance) {
+ i := i
+ o.Go(func() {
err := callback(i.desc, i.indexes)
- tracker.record(i.itemTrackers, err)
+ tracker.record(i.itemTrackers, err, o.IsClientError)
wg.Done()
- }(i)
+ })
}
// Perform cleanup at the end.
- go func() {
+ o.Go(func() {
wg.Wait()
-
- cleanup()
- }()
+ o.Cleanup()
+ })
select {
case err := <-tracker.err:
@@ -128,35 +168,36 @@ func DoBatch(ctx context.Context, op Operation, r ReadRing, keys []uint32, callb
}
}
-func (b *batchTracker) record(itemTrackers []*itemTracker, err error) {
+func (b *batchTracker) record(itemTrackers []*itemTracker, err error, isClientError func(error) bool) {
// If we reach the required number of successful puts on this item, then decrement the
// number of pending items by one.
//
// The use of atomic increments here is needed as:
// * rpcsPending and rpcsFailed guarantee only a single goroutine will write to either channel
- // * succeeded, failed4xx, failed5xx and remaining guarantee that the "return decision" is made atomically
+ // * succeeded, failedClient, failedServer and remaining guarantee that the "return decision" is made atomically
// avoiding race condition
- for i := range itemTrackers {
+ for _, it := range itemTrackers {
if err != nil {
// Track the number of errors by error family, and if it exceeds maxFailures
// shortcut the waiting rpc.
- errCount := itemTrackers[i].recordError(err)
+ errCount := it.recordError(err, isClientError)
// We should return an error if we reach the maxFailure (quorum) on a given error family OR
- // we don't have any remaining instances to try.
+ // we don't have any remaining instances to try. In the following we use ClientError and ServerError
+ // to denote errors, for which isClientError() returns true and false respectively.
//
- // Ex: 2xx, 4xx, 5xx -> return 5xx
- // Ex: 4xx, 4xx, _ -> return 4xx
- // Ex: 5xx, _, 5xx -> return 5xx
+ // Ex: Success, ClientError, ServerError -> return ServerError
+ // Ex: ClientError, ClientError, Success -> return ClientError
+ // Ex: ServerError, Success, ServerError -> return ServerError
//
- // The reason for searching for quorum in 4xx and 5xx errors separately is to give a more accurate
- // response to the initial request. So if a quorum of instances rejects the request with 4xx, then the request should be rejected
- // even if less-than-quorum instances indicated a failure to process the request (via 5xx).
+ // The reason for searching for quorum in ClientError and ServerError errors separately is to give a more accurate
+ // response to the initial request. So if a quorum of instances rejects the request with ClientError, then the request should be rejected
+ // even if less-than-quorum instances indicated a failure to process the request (via ServerError).
// The speculation is that had the unavailable instances been available,
- // they would have rejected the request with a 4xx as well.
- // Conversely, if a quorum of instances failed to process the request via 5xx and less-than-quorum
- // instances rejected it with 4xx, then we do not have quorum to reject the request as a 4xx. Instead,
- // we return the last 5xx error for debuggability.
- if errCount > int32(itemTrackers[i].maxFailures) || itemTrackers[i].remaining.Dec() == 0 {
+ // they would have rejected the request with a ClientError as well.
+ // Conversely, if a quorum of instances failed to process the request via ServerError and less-than-quorum
+ // instances rejected it with ClientError, then we do not have quorum to reject the request as a ClientError. Instead,
+ // we return the last ServerError error for debuggability.
+ if errCount > int32(it.maxFailures) || it.remaining.Dec() == 0 {
if b.rpcsFailed.Inc() == 1 {
b.err <- err
}
@@ -164,7 +205,8 @@ func (b *batchTracker) record(itemTrackers []*itemTracker, err error) {
} else {
// If we successfully process items in minSuccess instances,
// then wake up the waiting rpc, so it can return early.
- if itemTrackers[i].succeeded.Inc() >= int32(itemTrackers[i].minSuccess) {
+ succeeded := it.succeeded.Inc()
+ if succeeded == int32(it.minSuccess) {
if b.rpcsPending.Dec() == 0 {
b.done <- struct{}{}
}
@@ -172,11 +214,12 @@ func (b *batchTracker) record(itemTrackers []*itemTracker, err error) {
}
// If we successfully called this particular instance, but we don't have any remaining instances to try,
- // and we failed to call minSuccess instances, then we need to return the last error
- // Ex: 4xx, 5xx, 2xx
- if itemTrackers[i].remaining.Dec() == 0 {
- if b.rpcsFailed.Inc() == 1 {
- b.err <- itemTrackers[i].err.Load()
+ // and we failed to call minSuccess instances, then we need to return the last error.
+ if succeeded < int32(it.minSuccess) {
+ if it.remaining.Dec() == 0 {
+ if b.rpcsFailed.Inc() == 1 {
+ b.err <- it.err.Load()
+ }
}
}
}
diff --git a/vendor/github.com/grafana/dskit/ring/replication_set.go b/vendor/github.com/grafana/dskit/ring/replication_set.go
index cc43331e44d95..f389f4766fc55 100644
--- a/vendor/github.com/grafana/dskit/ring/replication_set.go
+++ b/vendor/github.com/grafana/dskit/ring/replication_set.go
@@ -9,6 +9,7 @@ import (
kitlog "github.com/go-kit/log"
"github.com/go-kit/log/level"
+ "github.com/opentracing/opentracing-go/ext"
"github.com/grafana/dskit/spanlogger"
)
@@ -294,7 +295,7 @@ func DoUntilQuorumWithoutSuccessfulContextCancellation[T any](ctx context.Contex
terminate := func(err error) ([]T, error) {
if cfg.Logger != nil {
- _ = cfg.Logger.Error(err)
+ ext.Error.Set(cfg.Logger.Span, true)
}
contextTracker.cancelAllContexts()
@@ -325,7 +326,7 @@ func DoUntilQuorumWithoutSuccessfulContextCancellation[T any](ctx context.Contex
resultsRemaining--
if result.err != nil && cfg.IsTerminalError != nil && cfg.IsTerminalError(result.err) {
- level.Error(logger).Log("msg", "cancelling all outstanding requests because a terminal error occurred", "err", result.err)
+ level.Warn(logger).Log("msg", "cancelling all outstanding requests because a terminal error occurred", "err", result.err)
// We must return before calling resultTracker.done() below, otherwise done() might start further requests if request minimisation is enabled.
return terminate(result.err)
}
diff --git a/vendor/github.com/grafana/dskit/ring/util.go b/vendor/github.com/grafana/dskit/ring/util.go
index b5ee485ef25c6..a21c0f2fe2cad 100644
--- a/vendor/github.com/grafana/dskit/ring/util.go
+++ b/vendor/github.com/grafana/dskit/ring/util.go
@@ -7,6 +7,7 @@ import (
"time"
"github.com/go-kit/log"
+ "golang.org/x/exp/slices"
"github.com/grafana/dskit/backoff"
"github.com/grafana/dskit/netutil"
@@ -127,9 +128,11 @@ func getZones(tokens map[string][]uint32) []string {
// searchToken returns the offset of the tokens entry holding the range for the provided key.
func searchToken(tokens []uint32, key uint32) int {
- i := sort.Search(len(tokens), func(x int) bool {
- return tokens[x] > key
- })
+ i, found := slices.BinarySearch(tokens, key)
+ if found {
+ // we want the first token > key, not >= key
+ i = i + 1
+ }
if i >= len(tokens) {
i = 0
}
diff --git a/vendor/github.com/grafana/dskit/server/limits.go b/vendor/github.com/grafana/dskit/server/limits.go
index 6b18bb1cb0c2c..4a8651e323abc 100644
--- a/vendor/github.com/grafana/dskit/server/limits.go
+++ b/vendor/github.com/grafana/dskit/server/limits.go
@@ -4,6 +4,7 @@ import (
"context"
"strings"
+ "google.golang.org/grpc/metadata"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/tap"
)
@@ -11,19 +12,15 @@ import (
type GrpcInflightMethodLimiter interface {
// RPCCallStarting is called before request has been read into memory.
// All that's known about the request at this point is grpc method name.
+ //
+ // Returned context is used during the remainder of the gRPC call.
+ //
// Returned error should be convertible to gRPC Status via status.FromError,
// otherwise gRPC-server implementation-specific error will be returned to the client (codes.PermissionDenied in grpc@v1.55.0).
- RPCCallStarting(methodName string) error
- RPCCallFinished(methodName string)
-}
-
-// Custom type to hide it from other packages.
-type grpcLimitCheckContextKey int
+ RPCCallStarting(ctx context.Context, methodName string, md metadata.MD) (context.Context, error)
-// Presence of this key in the context indicates that inflight request counter was increased for this request, and needs to be decreased when request ends.
-const (
- requestFullMethod grpcLimitCheckContextKey = 1
-)
+ RPCCallFinished(ctx context.Context)
+}
func newGrpcInflightLimitCheck(methodLimiter GrpcInflightMethodLimiter) *grpcInflightLimitCheck {
return &grpcInflightLimitCheck{
@@ -38,8 +35,8 @@ type grpcInflightLimitCheck struct {
}
// TapHandle is called after receiving grpc request and headers, but before reading any request data yet.
-// If we reject request here, it won't be counted towards any metrics (eg. in middleware.grpcStatsHandler).
-// If we accept request (not return error), eventually HandleRPC with stats.End notification will be called.
+// If we reject request here (by returning non-nil error), it won't be counted towards any metrics (eg. in middleware.grpcStatsHandler).
+// If we accept request (no error), eventually HandleRPC with stats.End notification will be called.
func (g *grpcInflightLimitCheck) TapHandle(ctx context.Context, info *tap.Info) (context.Context, error) {
if !isMethodNameValid(info.FullMethodName) {
// If method name is not valid, we let the request continue, but not call method limiter.
@@ -47,12 +44,7 @@ func (g *grpcInflightLimitCheck) TapHandle(ctx context.Context, info *tap.Info)
return ctx, nil
}
- if err := g.methodLimiter.RPCCallStarting(info.FullMethodName); err != nil {
- return ctx, err
- }
-
- ctx = context.WithValue(ctx, requestFullMethod, info.FullMethodName)
- return ctx, nil
+ return g.methodLimiter.RPCCallStarting(ctx, info.FullMethodName, info.Header)
}
func (g *grpcInflightLimitCheck) TagRPC(ctx context.Context, _ *stats.RPCTagInfo) context.Context {
@@ -65,9 +57,7 @@ func (g *grpcInflightLimitCheck) HandleRPC(ctx context.Context, rpcStats stats.R
return
}
- if name, ok := ctx.Value(requestFullMethod).(string); ok {
- g.methodLimiter.RPCCallFinished(name)
- }
+ g.methodLimiter.RPCCallFinished(ctx)
}
func (g *grpcInflightLimitCheck) TagConn(ctx context.Context, _ *stats.ConnTagInfo) context.Context {
diff --git a/vendor/github.com/grafana/dskit/server/server.go b/vendor/github.com/grafana/dskit/server/server.go
index 9e65b01053809..2b54283df7f21 100644
--- a/vendor/github.com/grafana/dskit/server/server.go
+++ b/vendor/github.com/grafana/dskit/server/server.go
@@ -92,15 +92,19 @@ type Config struct {
HTTPTLSConfig TLSConfig `yaml:"http_tls_config"`
GRPCTLSConfig TLSConfig `yaml:"grpc_tls_config"`
- RegisterInstrumentation bool `yaml:"register_instrumentation"`
- ExcludeRequestInLog bool `yaml:"-"`
- DisableRequestSuccessLog bool `yaml:"-"`
+ RegisterInstrumentation bool `yaml:"register_instrumentation"`
+ ReportGRPCCodesInInstrumentationLabel bool `yaml:"report_grpc_codes_in_instrumentation_label_enabled"`
+ ExcludeRequestInLog bool `yaml:"-"`
+ DisableRequestSuccessLog bool `yaml:"-"`
ServerGracefulShutdownTimeout time.Duration `yaml:"graceful_shutdown_timeout"`
HTTPServerReadTimeout time.Duration `yaml:"http_server_read_timeout"`
+ HTTPServerReadHeaderTimeout time.Duration `yaml:"http_server_read_header_timeout"`
HTTPServerWriteTimeout time.Duration `yaml:"http_server_write_timeout"`
HTTPServerIdleTimeout time.Duration `yaml:"http_server_idle_timeout"`
+ HTTPLogClosedConnectionsWithoutResponse bool `yaml:"http_log_closed_connections_without_response_enabled"`
+
GRPCOptions []grpc.ServerOption `yaml:"-"`
GRPCMiddleware []grpc.UnaryServerInterceptor `yaml:"-"`
GRPCStreamMiddleware []grpc.StreamServerInterceptor `yaml:"-"`
@@ -109,9 +113,9 @@ type Config struct {
DoNotAddDefaultHTTPMiddleware bool `yaml:"-"`
RouteHTTPToGRPC bool `yaml:"-"`
- GPRCServerMaxRecvMsgSize int `yaml:"grpc_server_max_recv_msg_size"`
+ GRPCServerMaxRecvMsgSize int `yaml:"grpc_server_max_recv_msg_size"`
GRPCServerMaxSendMsgSize int `yaml:"grpc_server_max_send_msg_size"`
- GPRCServerMaxConcurrentStreams uint `yaml:"grpc_server_max_concurrent_streams"`
+ GRPCServerMaxConcurrentStreams uint `yaml:"grpc_server_max_concurrent_streams"`
GRPCServerMaxConnectionIdle time.Duration `yaml:"grpc_server_max_connection_idle"`
GRPCServerMaxConnectionAge time.Duration `yaml:"grpc_server_max_connection_age"`
GRPCServerMaxConnectionAgeGrace time.Duration `yaml:"grpc_server_max_connection_age_grace"`
@@ -167,13 +171,16 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
f.IntVar(&cfg.GRPCListenPort, "server.grpc-listen-port", 9095, "gRPC server listen port.")
f.IntVar(&cfg.GRPCConnLimit, "server.grpc-conn-limit", 0, "Maximum number of simultaneous grpc connections, <=0 to disable")
f.BoolVar(&cfg.RegisterInstrumentation, "server.register-instrumentation", true, "Register the intrumentation handlers (/metrics etc).")
+ f.BoolVar(&cfg.ReportGRPCCodesInInstrumentationLabel, "server.report-grpc-codes-in-instrumentation-label-enabled", false, "If set to true, gRPC statuses will be reported in instrumentation labels with their string representations. Otherwise, they will be reported as \"error\".")
f.DurationVar(&cfg.ServerGracefulShutdownTimeout, "server.graceful-shutdown-timeout", 30*time.Second, "Timeout for graceful shutdowns")
- f.DurationVar(&cfg.HTTPServerReadTimeout, "server.http-read-timeout", 30*time.Second, "Read timeout for HTTP server")
+ f.DurationVar(&cfg.HTTPServerReadTimeout, "server.http-read-timeout", 30*time.Second, "Read timeout for entire HTTP request, including headers and body.")
+ f.DurationVar(&cfg.HTTPServerReadHeaderTimeout, "server.http-read-header-timeout", 0, "Read timeout for HTTP request headers. If set to 0, value of -server.http-read-timeout is used.")
f.DurationVar(&cfg.HTTPServerWriteTimeout, "server.http-write-timeout", 30*time.Second, "Write timeout for HTTP server")
f.DurationVar(&cfg.HTTPServerIdleTimeout, "server.http-idle-timeout", 120*time.Second, "Idle timeout for HTTP server")
- f.IntVar(&cfg.GPRCServerMaxRecvMsgSize, "server.grpc-max-recv-msg-size-bytes", 4*1024*1024, "Limit on the size of a gRPC message this server can receive (bytes).")
+ f.BoolVar(&cfg.HTTPLogClosedConnectionsWithoutResponse, "server.http-log-closed-connections-without-response-enabled", false, "Log closed connections that did not receive any response, most likely because client didn't send any request within timeout.")
+ f.IntVar(&cfg.GRPCServerMaxRecvMsgSize, "server.grpc-max-recv-msg-size-bytes", 4*1024*1024, "Limit on the size of a gRPC message this server can receive (bytes).")
f.IntVar(&cfg.GRPCServerMaxSendMsgSize, "server.grpc-max-send-msg-size-bytes", 4*1024*1024, "Limit on the size of a gRPC message this server can send (bytes).")
- f.UintVar(&cfg.GPRCServerMaxConcurrentStreams, "server.grpc-max-concurrent-streams", 100, "Limit on the number of concurrent streams for gRPC calls per client connection (0 = unlimited)")
+ f.UintVar(&cfg.GRPCServerMaxConcurrentStreams, "server.grpc-max-concurrent-streams", 100, "Limit on the number of concurrent streams for gRPC calls per client connection (0 = unlimited)")
f.DurationVar(&cfg.GRPCServerMaxConnectionIdle, "server.grpc.keepalive.max-connection-idle", infinty, "The duration after which an idle connection should be closed. Default: infinity")
f.DurationVar(&cfg.GRPCServerMaxConnectionAge, "server.grpc.keepalive.max-connection-age", infinty, "The duration for the maximum amount of time a connection may exist before it will be closed. Default: infinity")
f.DurationVar(&cfg.GRPCServerMaxConnectionAgeGrace, "server.grpc.keepalive.max-connection-age-grace", infinty, "An additive period after max-connection-age after which the connection will be forcibly closed. Default: infinity")
@@ -259,6 +266,9 @@ func newServer(cfg Config, metrics *Metrics) (*Server, error) {
return nil, err
}
httpListener = middleware.CountingListener(httpListener, metrics.TCPConnections.WithLabelValues("http"))
+ if cfg.HTTPLogClosedConnectionsWithoutResponse {
+ httpListener = middleware.NewZeroResponseListener(httpListener, level.Warn(logger))
+ }
metrics.TCPConnectionsLimit.WithLabelValues("http").Set(float64(cfg.HTTPConnLimit))
if cfg.HTTPConnLimit > 0 {
@@ -346,17 +356,21 @@ func newServer(cfg Config, metrics *Metrics) (*Server, error) {
WithRequest: !cfg.ExcludeRequestInLog,
DisableRequestSuccessLog: cfg.DisableRequestSuccessLog,
}
+ var reportGRPCStatusesOptions []middleware.InstrumentationOption
+ if cfg.ReportGRPCCodesInInstrumentationLabel {
+ reportGRPCStatusesOptions = []middleware.InstrumentationOption{middleware.ReportGRPCStatusOption}
+ }
grpcMiddleware := []grpc.UnaryServerInterceptor{
serverLog.UnaryServerInterceptor,
otgrpc.OpenTracingServerInterceptor(opentracing.GlobalTracer()),
- middleware.UnaryServerInstrumentInterceptor(metrics.RequestDuration),
+ middleware.UnaryServerInstrumentInterceptor(metrics.RequestDuration, reportGRPCStatusesOptions...),
}
grpcMiddleware = append(grpcMiddleware, cfg.GRPCMiddleware...)
grpcStreamMiddleware := []grpc.StreamServerInterceptor{
serverLog.StreamServerInterceptor,
otgrpc.OpenTracingStreamServerInterceptor(opentracing.GlobalTracer()),
- middleware.StreamServerInstrumentInterceptor(metrics.RequestDuration),
+ middleware.StreamServerInstrumentInterceptor(metrics.RequestDuration, reportGRPCStatusesOptions...),
}
grpcStreamMiddleware = append(grpcStreamMiddleware, cfg.GRPCStreamMiddleware...)
@@ -378,9 +392,9 @@ func newServer(cfg Config, metrics *Metrics) (*Server, error) {
grpc.ChainStreamInterceptor(grpcStreamMiddleware...),
grpc.KeepaliveParams(grpcKeepAliveOptions),
grpc.KeepaliveEnforcementPolicy(grpcKeepAliveEnforcementPolicy),
- grpc.MaxRecvMsgSize(cfg.GPRCServerMaxRecvMsgSize),
+ grpc.MaxRecvMsgSize(cfg.GRPCServerMaxRecvMsgSize),
grpc.MaxSendMsgSize(cfg.GRPCServerMaxSendMsgSize),
- grpc.MaxConcurrentStreams(uint32(cfg.GPRCServerMaxConcurrentStreams)),
+ grpc.MaxConcurrentStreams(uint32(cfg.GRPCServerMaxConcurrentStreams)),
grpc.NumStreamWorkers(uint32(cfg.GRPCServerNumWorkers)),
}
@@ -457,10 +471,11 @@ func newServer(cfg Config, metrics *Metrics) (*Server, error) {
}
httpServer := &http.Server{
- ReadTimeout: cfg.HTTPServerReadTimeout,
- WriteTimeout: cfg.HTTPServerWriteTimeout,
- IdleTimeout: cfg.HTTPServerIdleTimeout,
- Handler: middleware.Merge(httpMiddleware...).Wrap(router),
+ ReadTimeout: cfg.HTTPServerReadTimeout,
+ ReadHeaderTimeout: cfg.HTTPServerReadHeaderTimeout,
+ WriteTimeout: cfg.HTTPServerWriteTimeout,
+ IdleTimeout: cfg.HTTPServerIdleTimeout,
+ Handler: middleware.Merge(httpMiddleware...).Wrap(router),
}
if httpTLSConfig != nil {
httpServer.TLSConfig = httpTLSConfig
diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen1.go b/vendor/golang.org/x/oauth2/google/appengine_gen1.go
index 16c6c6b90ce50..e61587945b08f 100644
--- a/vendor/golang.org/x/oauth2/google/appengine_gen1.go
+++ b/vendor/golang.org/x/oauth2/google/appengine_gen1.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build appengine
-// +build appengine
// This file applies to App Engine first generation runtimes (<= Go 1.9).
diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go
index a7e27b3d2991c..9c79aa0a0cc5d 100644
--- a/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go
+++ b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !appengine
-// +build !appengine
// This file applies to App Engine second generation runtimes (>= Go 1.11) and App Engine flexible.
diff --git a/vendor/golang.org/x/oauth2/internal/client_appengine.go b/vendor/golang.org/x/oauth2/internal/client_appengine.go
index e1755d1d9acf4..d28140f789ec9 100644
--- a/vendor/golang.org/x/oauth2/internal/client_appengine.go
+++ b/vendor/golang.org/x/oauth2/internal/client_appengine.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build appengine
-// +build appengine
package internal
diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go
index dbe2e2d0c6579..6ce01ac9a69c7 100644
--- a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go
@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
-// protoc v3.21.9
+// protoc v3.21.12
// source: google/api/field_behavior.proto
package annotations
@@ -78,6 +78,19 @@ const (
// a non-empty value will be returned. The user will not be aware of what
// non-empty value to expect.
FieldBehavior_NON_EMPTY_DEFAULT FieldBehavior = 7
+ // Denotes that the field in a resource (a message annotated with
+ // google.api.resource) is used in the resource name to uniquely identify the
+ // resource. For AIP-compliant APIs, this should only be applied to the
+ // `name` field on the resource.
+ //
+ // This behavior should not be applied to references to other resources within
+ // the message.
+ //
+ // The identifier field of resources often have different field behavior
+ // depending on the request it is embedded in (e.g. for Create methods name
+ // is optional and unused, while for Update methods it is required). Instead
+ // of method-specific annotations, only `IDENTIFIER` is required.
+ FieldBehavior_IDENTIFIER FieldBehavior = 8
)
// Enum value maps for FieldBehavior.
@@ -91,6 +104,7 @@ var (
5: "IMMUTABLE",
6: "UNORDERED_LIST",
7: "NON_EMPTY_DEFAULT",
+ 8: "IDENTIFIER",
}
FieldBehavior_value = map[string]int32{
"FIELD_BEHAVIOR_UNSPECIFIED": 0,
@@ -101,6 +115,7 @@ var (
"IMMUTABLE": 5,
"UNORDERED_LIST": 6,
"NON_EMPTY_DEFAULT": 7,
+ "IDENTIFIER": 8,
}
)
@@ -169,7 +184,7 @@ var file_google_api_field_behavior_proto_rawDesc = []byte{
0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64,
0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2a,
- 0xa6, 0x01, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f,
+ 0xb6, 0x01, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f,
0x72, 0x12, 0x1e, 0x0a, 0x1a, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x42, 0x45, 0x48, 0x41, 0x56,
0x49, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10,
0x00, 0x12, 0x0c, 0x0a, 0x08, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12,
@@ -179,7 +194,8 @@ var file_google_api_field_behavior_proto_rawDesc = []byte{
0x0a, 0x09, 0x49, 0x4d, 0x4d, 0x55, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x05, 0x12, 0x12, 0x0a,
0x0e, 0x55, 0x4e, 0x4f, 0x52, 0x44, 0x45, 0x52, 0x45, 0x44, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x10,
0x06, 0x12, 0x15, 0x0a, 0x11, 0x4e, 0x4f, 0x4e, 0x5f, 0x45, 0x4d, 0x50, 0x54, 0x59, 0x5f, 0x44,
- 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x07, 0x3a, 0x60, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c,
+ 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x07, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4e,
+ 0x54, 0x49, 0x46, 0x49, 0x45, 0x52, 0x10, 0x08, 0x3a, 0x60, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c,
0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65,
0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9c, 0x08, 0x20, 0x03, 0x28, 0x0e,
diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md
index 1bc92248cb470..ab0fbb79b863d 100644
--- a/vendor/google.golang.org/grpc/README.md
+++ b/vendor/google.golang.org/grpc/README.md
@@ -1,8 +1,8 @@
# gRPC-Go
-[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go)
[![GoDoc](https://pkg.go.dev/badge/google.golang.org/grpc)][API]
[![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go)
+[![codecov](https://codecov.io/gh/grpc/grpc-go/graph/badge.svg)](https://codecov.io/gh/grpc/grpc-go)
The [Go][] implementation of [gRPC][]: A high performance, open source, general
RPC framework that puts mobile and HTTP/2 first. For more information see the
diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go
index 712fef4d0fb9d..52d530d7ad01c 100644
--- a/vendor/google.golang.org/grpc/attributes/attributes.go
+++ b/vendor/google.golang.org/grpc/attributes/attributes.go
@@ -121,9 +121,9 @@ func (a *Attributes) String() string {
return sb.String()
}
-func str(x any) string {
+func str(x any) (s string) {
if v, ok := x.(fmt.Stringer); ok {
- return v.String()
+ return fmt.Sprint(v)
} else if v, ok := x.(string); ok {
return v
}
diff --git a/vendor/google.golang.org/grpc/authz/audit/audit_logger.go b/vendor/google.golang.org/grpc/authz/audit/audit_logger.go
index b9b7219703876..7ea79410ad743 100644
--- a/vendor/google.golang.org/grpc/authz/audit/audit_logger.go
+++ b/vendor/google.golang.org/grpc/authz/audit/audit_logger.go
@@ -89,9 +89,9 @@ type LoggerConfig interface {
// decision meets the condition for audit, all the configured audit loggers'
// Log() method will be invoked to log that event.
//
-// TODO(lwge): Change the link to the merged gRFC once it's ready.
-// Please refer to https://github.com/grpc/proposal/pull/346 for more details
-// about audit logging.
+// Please refer to
+// https://github.com/grpc/proposal/blob/master/A59-audit-logging.md for more
+// details about audit logging.
type Logger interface {
// Log performs audit logging for the provided audit event.
//
@@ -107,9 +107,9 @@ type Logger interface {
// implement this interface, along with the Logger interface, and register
// it by calling RegisterLoggerBuilder() at init time.
//
-// TODO(lwge): Change the link to the merged gRFC once it's ready.
-// Please refer to https://github.com/grpc/proposal/pull/346 for more details
-// about audit logging.
+// Please refer to
+// https://github.com/grpc/proposal/blob/master/A59-audit-logging.md for more
+// details about audit logging.
type LoggerBuilder interface {
// ParseLoggerConfig parses the given JSON bytes into a structured
// logger config this builder can use to build an audit logger.
diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go
index b6377f445ad24..d79560a2e268f 100644
--- a/vendor/google.golang.org/grpc/balancer/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer/balancer.go
@@ -30,6 +30,7 @@ import (
"google.golang.org/grpc/channelz"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/resolver"
@@ -39,6 +40,8 @@ import (
var (
// m is a map from name to balancer builder.
m = make(map[string]Builder)
+
+ logger = grpclog.Component("balancer")
)
// Register registers the balancer builder to the balancer map. b.Name
@@ -51,6 +54,12 @@ var (
// an init() function), and is not thread-safe. If multiple Balancers are
// registered with the same name, the one registered last will take effect.
func Register(b Builder) {
+ if strings.ToLower(b.Name()) != b.Name() {
+ // TODO: Skip the use of strings.ToLower() to index the map after v1.59
+ // is released to switch to case sensitive balancer registry. Also,
+ // remove this warning and update the docstrings for Register and Get.
+ logger.Warningf("Balancer registered with name %q. grpc-go will be switching to case sensitive balancer registries soon", b.Name())
+ }
m[strings.ToLower(b.Name())] = b
}
@@ -70,6 +79,12 @@ func init() {
// Note that the compare is done in a case-insensitive fashion.
// If no builder is register with the name, nil will be returned.
func Get(name string) Builder {
+ if strings.ToLower(name) != name {
+ // TODO: Skip the use of strings.ToLower() to index the map after v1.59
+ // is released to switch to case sensitive balancer registry. Also,
+ // remove this warning and update the docstrings for Register and Get.
+ logger.Warningf("Balancer retrieved for name %q. grpc-go will be switching to case sensitive balancer registries soon", name)
+ }
if b, ok := m[strings.ToLower(name)]; ok {
return b
}
diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go
index f2ddfc3788ed9..86ba65be4c004 100644
--- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go
+++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go
@@ -32,14 +32,18 @@ import (
"google.golang.org/grpc"
"google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/balancer/base"
grpclbstate "google.golang.org/grpc/balancer/grpclb/state"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal"
"google.golang.org/grpc/internal/backoff"
+ internalgrpclog "google.golang.org/grpc/internal/grpclog"
+ "google.golang.org/grpc/internal/pretty"
"google.golang.org/grpc/internal/resolver/dns"
"google.golang.org/grpc/resolver"
+ "google.golang.org/grpc/resolver/manual"
durationpb "github.com/golang/protobuf/ptypes/duration"
lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1"
@@ -132,7 +136,11 @@ func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) bal
// This generates a manual resolver builder with a fixed scheme. This
// scheme will be used to dial to remote LB, so we can send filtered
// address updates to remote LB ClientConn using this manual resolver.
- r := &lbManualResolver{scheme: "grpclb-internal", ccb: cc}
+ mr := manual.NewBuilderWithScheme("grpclb-internal")
+ // ResolveNow() on this manual resolver is forwarded to the parent
+ // ClientConn, so when grpclb client loses contact with the remote balancer,
+ // the parent ClientConn's resolver will re-resolve.
+ mr.ResolveNowCallback = cc.ResolveNow
lb := &lbBalancer{
cc: newLBCacheClientConn(cc),
@@ -142,23 +150,24 @@ func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) bal
fallbackTimeout: b.fallbackTimeout,
doneCh: make(chan struct{}),
- manualResolver: r,
+ manualResolver: mr,
subConns: make(map[resolver.Address]balancer.SubConn),
scStates: make(map[balancer.SubConn]connectivity.State),
- picker: &errPicker{err: balancer.ErrNoSubConnAvailable},
+ picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable),
clientStats: newRPCStats(),
backoff: backoff.DefaultExponential, // TODO: make backoff configurable.
}
+ lb.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[grpclb %p] ", lb))
var err error
if opt.CredsBundle != nil {
lb.grpclbClientConnCreds, err = opt.CredsBundle.NewWithMode(internal.CredsBundleModeBalancer)
if err != nil {
- logger.Warningf("lbBalancer: client connection creds NewWithMode failed: %v", err)
+ lb.logger.Warningf("Failed to create credentials used for connecting to grpclb: %v", err)
}
lb.grpclbBackendCreds, err = opt.CredsBundle.NewWithMode(internal.CredsBundleModeBackendFromBalancer)
if err != nil {
- logger.Warningf("lbBalancer: backend creds NewWithMode failed: %v", err)
+ lb.logger.Warningf("Failed to create credentials used for connecting to backends returned by grpclb: %v", err)
}
}
@@ -170,6 +179,7 @@ type lbBalancer struct {
dialTarget string // user's dial target
target string // same as dialTarget unless overridden in service config
opt balancer.BuildOptions
+ logger *internalgrpclog.PrefixLogger
usePickFirst bool
@@ -188,7 +198,7 @@ type lbBalancer struct {
// manualResolver is used in the remote LB ClientConn inside grpclb. When
// resolved address updates are received by grpclb, filtered updates will be
// send to remote LB ClientConn through this resolver.
- manualResolver *lbManualResolver
+ manualResolver *manual.Resolver
// The ClientConn to talk to the remote balancer.
ccRemoteLB *remoteBalancerCCWrapper
// backoff for calling remote balancer.
@@ -236,12 +246,12 @@ type lbBalancer struct {
// Caller must hold lb.mu.
func (lb *lbBalancer) regeneratePicker(resetDrop bool) {
if lb.state == connectivity.TransientFailure {
- lb.picker = &errPicker{err: fmt.Errorf("all SubConns are in TransientFailure, last connection error: %v", lb.connErr)}
+ lb.picker = base.NewErrPicker(fmt.Errorf("all SubConns are in TransientFailure, last connection error: %v", lb.connErr))
return
}
if lb.state == connectivity.Connecting {
- lb.picker = &errPicker{err: balancer.ErrNoSubConnAvailable}
+ lb.picker = base.NewErrPicker(balancer.ErrNoSubConnAvailable)
return
}
@@ -268,7 +278,7 @@ func (lb *lbBalancer) regeneratePicker(resetDrop bool) {
//
// This doesn't seem to be necessary after the connecting check above.
// Kept for safety.
- lb.picker = &errPicker{err: balancer.ErrNoSubConnAvailable}
+ lb.picker = base.NewErrPicker(balancer.ErrNoSubConnAvailable)
return
}
if lb.inFallback {
@@ -322,21 +332,21 @@ func (lb *lbBalancer) aggregateSubConnStates() connectivity.State {
// UpdateSubConnState is unused; NewSubConn's options always specifies
// updateSubConnState as the listener.
func (lb *lbBalancer) UpdateSubConnState(sc balancer.SubConn, scs balancer.SubConnState) {
- logger.Errorf("grpclb: UpdateSubConnState(%v, %+v) called unexpectedly", sc, scs)
+ lb.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, scs)
}
func (lb *lbBalancer) updateSubConnState(sc balancer.SubConn, scs balancer.SubConnState) {
s := scs.ConnectivityState
- if logger.V(2) {
- logger.Infof("lbBalancer: handle SubConn state change: %p, %v", sc, s)
+ if lb.logger.V(2) {
+ lb.logger.Infof("SubConn state change: %p, %v", sc, s)
}
lb.mu.Lock()
defer lb.mu.Unlock()
oldS, ok := lb.scStates[sc]
if !ok {
- if logger.V(2) {
- logger.Infof("lbBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
+ if lb.logger.V(2) {
+ lb.logger.Infof("Received state change for an unknown SubConn: %p, %v", sc, s)
}
return
}
@@ -441,8 +451,8 @@ func (lb *lbBalancer) handleServiceConfig(gc *grpclbServiceConfig) {
if lb.usePickFirst == newUsePickFirst {
return
}
- if logger.V(2) {
- logger.Infof("lbBalancer: switching mode, new usePickFirst: %+v", newUsePickFirst)
+ if lb.logger.V(2) {
+ lb.logger.Infof("Switching mode. Is pick_first used for backends? %v", newUsePickFirst)
}
lb.refreshSubConns(lb.backendAddrs, lb.inFallback, newUsePickFirst)
}
@@ -453,8 +463,8 @@ func (lb *lbBalancer) ResolverError(error) {
}
func (lb *lbBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error {
- if logger.V(2) {
- logger.Infof("lbBalancer: UpdateClientConnState: %+v", ccs)
+ if lb.logger.V(2) {
+ lb.logger.Infof("UpdateClientConnState: %s", pretty.ToJSON(ccs))
}
gc, _ := ccs.BalancerConfig.(*grpclbServiceConfig)
lb.handleServiceConfig(gc)
@@ -482,7 +492,9 @@ func (lb *lbBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error
} else if lb.ccRemoteLB == nil {
// First time receiving resolved addresses, create a cc to remote
// balancers.
- lb.newRemoteBalancerCCWrapper()
+ if err := lb.newRemoteBalancerCCWrapper(); err != nil {
+ return err
+ }
// Start the fallback goroutine.
go lb.fallbackToBackendsAfter(lb.fallbackTimeout)
}
diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go
index 39bc5cc71e819..20c5f2ec3967b 100644
--- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go
+++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go
@@ -98,15 +98,6 @@ func (s *rpcStats) knownReceived() {
atomic.AddInt64(&s.numCallsFinished, 1)
}
-type errPicker struct {
- // Pick always returns this err.
- err error
-}
-
-func (p *errPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
- return balancer.PickResult{}, p.err
-}
-
// rrPicker does roundrobin on subConns. It's typically used when there's no
// response from remote balancer, and grpclb falls back to the resolved
// backends.
diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go
index edb66a90a3b1b..c8fe1edd8e530 100644
--- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go
+++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go
@@ -27,11 +27,8 @@ import (
"time"
"github.com/golang/protobuf/proto"
- timestamppb "github.com/golang/protobuf/ptypes/timestamp"
- "github.com/google/go-cmp/cmp"
"google.golang.org/grpc"
"google.golang.org/grpc/balancer"
- lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/credentials/insecure"
"google.golang.org/grpc/internal/backoff"
@@ -39,13 +36,28 @@ import (
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/resolver"
+
+ timestamppb "github.com/golang/protobuf/ptypes/timestamp"
+ lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1"
)
+func serverListEqual(a, b []*lbpb.Server) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i := 0; i < len(a); i++ {
+ if !proto.Equal(a[i], b[i]) {
+ return false
+ }
+ }
+ return true
+}
+
// processServerList updates balancer's internal state, create/remove SubConns
// and regenerates picker using the received serverList.
func (lb *lbBalancer) processServerList(l *lbpb.ServerList) {
- if logger.V(2) {
- logger.Infof("lbBalancer: processing server list: %+v", l)
+ if lb.logger.V(2) {
+ lb.logger.Infof("Processing server list: %#v", l)
}
lb.mu.Lock()
defer lb.mu.Unlock()
@@ -55,9 +67,9 @@ func (lb *lbBalancer) processServerList(l *lbpb.ServerList) {
lb.serverListReceived = true
// If the new server list == old server list, do nothing.
- if cmp.Equal(lb.fullServerList, l.Servers, cmp.Comparer(proto.Equal)) {
- if logger.V(2) {
- logger.Infof("lbBalancer: new serverlist same as the previous one, ignoring")
+ if serverListEqual(lb.fullServerList, l.Servers) {
+ if lb.logger.V(2) {
+ lb.logger.Infof("Ignoring new server list as it is the same as the previous one")
}
return
}
@@ -78,9 +90,8 @@ func (lb *lbBalancer) processServerList(l *lbpb.ServerList) {
ipStr = fmt.Sprintf("[%s]", ipStr)
}
addr := imetadata.Set(resolver.Address{Addr: fmt.Sprintf("%s:%d", ipStr, s.Port)}, md)
- if logger.V(2) {
- logger.Infof("lbBalancer: server list entry[%d]: ipStr:|%s|, port:|%d|, load balancer token:|%v|",
- i, ipStr, s.Port, s.LoadBalanceToken)
+ if lb.logger.V(2) {
+ lb.logger.Infof("Server list entry:|%d|, ipStr:|%s|, port:|%d|, load balancer token:|%v|", i, ipStr, s.Port, s.LoadBalanceToken)
}
backendAddrs = append(backendAddrs, addr)
}
@@ -149,7 +160,7 @@ func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address, fallback
// This bypasses the cc wrapper with SubConn cache.
sc, err := lb.cc.ClientConn.NewSubConn(backendAddrs, opts)
if err != nil {
- logger.Warningf("grpclb: failed to create new SubConn: %v", err)
+ lb.logger.Warningf("Failed to create new SubConn: %v", err)
return
}
sc.Connect()
@@ -174,7 +185,7 @@ func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address, fallback
opts.StateListener = func(scs balancer.SubConnState) { lb.updateSubConnState(sc, scs) }
sc, err := lb.cc.NewSubConn([]resolver.Address{addr}, opts)
if err != nil {
- logger.Warningf("grpclb: failed to create new SubConn: %v", err)
+ lb.logger.Warningf("Failed to create new SubConn: %v", err)
continue
}
lb.subConns[addrWithoutAttrs] = sc // Use the addr without MD as key for the map.
@@ -217,7 +228,7 @@ type remoteBalancerCCWrapper struct {
wg sync.WaitGroup
}
-func (lb *lbBalancer) newRemoteBalancerCCWrapper() {
+func (lb *lbBalancer) newRemoteBalancerCCWrapper() error {
var dopts []grpc.DialOption
if creds := lb.opt.DialCreds; creds != nil {
dopts = append(dopts, grpc.WithTransportCredentials(creds))
@@ -248,9 +259,10 @@ func (lb *lbBalancer) newRemoteBalancerCCWrapper() {
//
// The grpclb server addresses will set field ServerName, and creds will
// receive ServerName as authority.
- cc, err := grpc.DialContext(context.Background(), lb.manualResolver.Scheme()+":///grpclb.subClientConn", dopts...)
+ target := lb.manualResolver.Scheme() + ":///grpclb.subClientConn"
+ cc, err := grpc.Dial(target, dopts...)
if err != nil {
- logger.Fatalf("failed to dial: %v", err)
+ return fmt.Errorf("grpc.Dial(%s): %v", target, err)
}
ccw := &remoteBalancerCCWrapper{
cc: cc,
@@ -261,6 +273,7 @@ func (lb *lbBalancer) newRemoteBalancerCCWrapper() {
lb.ccRemoteLB = ccw
ccw.wg.Add(1)
go ccw.watchRemoteBalancer()
+ return nil
}
// close closed the ClientConn to remote balancer, and waits until all
@@ -408,9 +421,9 @@ func (ccw *remoteBalancerCCWrapper) watchRemoteBalancer() {
default:
if err != nil {
if err == errServerTerminatedConnection {
- logger.Info(err)
+ ccw.lb.logger.Infof("Call to remote balancer failed: %v", err)
} else {
- logger.Warning(err)
+ ccw.lb.logger.Warningf("Call to remote balancer failed: %v", err)
}
}
}
diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go
index 680779f1c82eb..c0f762c0c050e 100644
--- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go
+++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go
@@ -27,67 +27,6 @@ import (
"google.golang.org/grpc/resolver"
)
-// The parent ClientConn should re-resolve when grpclb loses connection to the
-// remote balancer. When the ClientConn inside grpclb gets a TransientFailure,
-// it calls lbManualResolver.ResolveNow(), which calls parent ClientConn's
-// ResolveNow, and eventually results in re-resolve happening in parent
-// ClientConn's resolver (DNS for example).
-//
-// parent
-// ClientConn
-// +-----------------------------------------------------------------+
-// | parent +---------------------------------+ |
-// | DNS ClientConn | grpclb | |
-// | resolver balancerWrapper | | |
-// | + + | grpclb grpclb | |
-// | | | | ManualResolver ClientConn | |
-// | | | | + + | |
-// | | | | | | Transient | |
-// | | | | | | Failure | |
-// | | | | | <--------- | | |
-// | | | <--------------- | ResolveNow | | |
-// | | <--------- | ResolveNow | | | | |
-// | | ResolveNow | | | | | |
-// | | | | | | | |
-// | + + | + + | |
-// | +---------------------------------+ |
-// +-----------------------------------------------------------------+
-
-// lbManualResolver is used by the ClientConn inside grpclb. It's a manual
-// resolver with a special ResolveNow() function.
-//
-// When ResolveNow() is called, it calls ResolveNow() on the parent ClientConn,
-// so when grpclb client lose contact with remote balancers, the parent
-// ClientConn's resolver will re-resolve.
-type lbManualResolver struct {
- scheme string
- ccr resolver.ClientConn
-
- ccb balancer.ClientConn
-}
-
-func (r *lbManualResolver) Build(_ resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) {
- r.ccr = cc
- return r, nil
-}
-
-func (r *lbManualResolver) Scheme() string {
- return r.scheme
-}
-
-// ResolveNow calls resolveNow on the parent ClientConn.
-func (r *lbManualResolver) ResolveNow(o resolver.ResolveNowOptions) {
- r.ccb.ResolveNow(o)
-}
-
-// Close is a noop for Resolver.
-func (*lbManualResolver) Close() {}
-
-// UpdateState calls cc.UpdateState.
-func (r *lbManualResolver) UpdateState(s resolver.State) {
- r.ccr.UpdateState(s)
-}
-
const subConnCacheTime = time.Second * 10
// lbCacheClientConn is a wrapper balancer.ClientConn with a SubConn cache.
diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go
index ff7fea102288c..429c389e4730d 100644
--- a/vendor/google.golang.org/grpc/clientconn.go
+++ b/vendor/google.golang.org/grpc/clientconn.go
@@ -337,8 +337,8 @@ func (cc *ClientConn) exitIdleMode() error {
return errConnClosing
}
if cc.idlenessState != ccIdlenessStateIdle {
- cc.mu.Unlock()
channelz.Infof(logger, cc.channelzID, "ClientConn asked to exit idle mode, current mode is %v", cc.idlenessState)
+ cc.mu.Unlock()
return nil
}
@@ -404,13 +404,13 @@ func (cc *ClientConn) exitIdleMode() error {
// name resolver, load balancer and any subchannels.
func (cc *ClientConn) enterIdleMode() error {
cc.mu.Lock()
+ defer cc.mu.Unlock()
+
if cc.conns == nil {
- cc.mu.Unlock()
return ErrClientConnClosing
}
if cc.idlenessState != ccIdlenessStateActive {
- channelz.Errorf(logger, cc.channelzID, "ClientConn asked to enter idle mode, current mode is %v", cc.idlenessState)
- cc.mu.Unlock()
+ channelz.Warningf(logger, cc.channelzID, "ClientConn asked to enter idle mode, current mode is %v", cc.idlenessState)
return nil
}
@@ -431,14 +431,14 @@ func (cc *ClientConn) enterIdleMode() error {
cc.balancerWrapper.enterIdleMode()
cc.csMgr.updateState(connectivity.Idle)
cc.idlenessState = ccIdlenessStateIdle
- cc.mu.Unlock()
+ cc.addTraceEvent("entering idle mode")
go func() {
- cc.addTraceEvent("entering idle mode")
for ac := range conns {
ac.tearDown(errConnIdling)
}
}()
+
return nil
}
@@ -804,6 +804,12 @@ func init() {
internal.SubscribeToConnectivityStateChanges = func(cc *ClientConn, s grpcsync.Subscriber) func() {
return cc.csMgr.pubSub.Subscribe(s)
}
+ internal.EnterIdleModeForTesting = func(cc *ClientConn) error {
+ return cc.enterIdleMode()
+ }
+ internal.ExitIdleModeForTesting = func(cc *ClientConn) error {
+ return cc.exitIdleMode()
+ }
}
func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) {
diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go
index 1fd0d5c127f4f..cfc9fd85e8dd9 100644
--- a/vendor/google.golang.org/grpc/dialoptions.go
+++ b/vendor/google.golang.org/grpc/dialoptions.go
@@ -644,6 +644,7 @@ func defaultDialOptions() dialOptions {
UseProxy: true,
},
recvBufferPool: nopBufferPool{},
+ idleTimeout: 30 * time.Minute,
}
}
@@ -680,8 +681,8 @@ func WithResolvers(rs ...resolver.Builder) DialOption {
// channel will exit idle mode when the Connect() method is called or when an
// RPC is initiated.
//
-// By default this feature is disabled, which can also be explicitly configured
-// by passing zero to this function.
+// A default timeout of 30 minutes will be used if this dial option is not set
+// at dial time and idleness can be disabled by passing a timeout of zero.
//
// # Experimental
//
diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go
index 69d5580b6adfd..5ebf88d7147f2 100644
--- a/vendor/google.golang.org/grpc/encoding/encoding.go
+++ b/vendor/google.golang.org/grpc/encoding/encoding.go
@@ -38,6 +38,10 @@ const Identity = "identity"
// Compressor is used for compressing and decompressing when sending or
// receiving messages.
+//
+// If a Compressor implements `DecompressedSize(compressedBytes []byte) int`,
+// gRPC will invoke it to determine the size of the buffer allocated for the
+// result of decompression. A return value of -1 indicates unknown size.
type Compressor interface {
// Compress writes the data written to wc to w after compressing it. If an
// error occurs while initializing the compressor, that error is returned
@@ -51,15 +55,6 @@ type Compressor interface {
// coding header. The result must be static; the result cannot change
// between calls.
Name() string
- // If a Compressor implements
- // DecompressedSize(compressedBytes []byte) int, gRPC will call it
- // to determine the size of the buffer allocated for the result of decompression.
- // Return -1 to indicate unknown size.
- //
- // Experimental
- //
- // Notice: This API is EXPERIMENTAL and may be changed or removed in a
- // later release.
}
var registeredCompressor = make(map[string]Compressor)
diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
index a01a1b4d54bd5..4439cda0f3cb7 100644
--- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
+++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
@@ -44,8 +44,15 @@ const (
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type HealthClient interface {
- // If the requested service is unknown, the call will fail with status
- // NOT_FOUND.
+ // Check gets the health of the specified service. If the requested service
+ // is unknown, the call will fail with status NOT_FOUND. If the caller does
+ // not specify a service name, the server should respond with its overall
+ // health status.
+ //
+ // Clients should set a deadline when calling Check, and can declare the
+ // server unhealthy if they do not receive a timely response.
+ //
+ // Check implementations should be idempotent and side effect free.
Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error)
// Performs a watch for the serving status of the requested service.
// The server will immediately send back a message indicating the current
@@ -118,8 +125,15 @@ func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) {
// All implementations should embed UnimplementedHealthServer
// for forward compatibility
type HealthServer interface {
- // If the requested service is unknown, the call will fail with status
- // NOT_FOUND.
+ // Check gets the health of the specified service. If the requested service
+ // is unknown, the call will fail with status NOT_FOUND. If the caller does
+ // not specify a service name, the server should respond with its overall
+ // health status.
+ //
+ // Clients should set a deadline when calling Check, and can declare the
+ // server unhealthy if they do not receive a timely response.
+ //
+ // Check implementations should be idempotent and side effect free.
Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error)
// Performs a watch for the serving status of the requested service.
// The server will immediately send back a message indicating the current
diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go
index 5fc0ee3da53bc..fed1c011a3259 100644
--- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go
+++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go
@@ -23,6 +23,8 @@
package backoff
import (
+ "context"
+ "errors"
"time"
grpcbackoff "google.golang.org/grpc/backoff"
@@ -71,3 +73,37 @@ func (bc Exponential) Backoff(retries int) time.Duration {
}
return time.Duration(backoff)
}
+
+// ErrResetBackoff is the error to be returned by the function executed by RunF,
+// to instruct the latter to reset its backoff state.
+var ErrResetBackoff = errors.New("reset backoff state")
+
+// RunF provides a convenient way to run a function f repeatedly until the
+// context expires or f returns a non-nil error that is not ErrResetBackoff.
+// When f returns ErrResetBackoff, RunF continues to run f, but resets its
+// backoff state before doing so. backoff accepts an integer representing the
+// number of retries, and returns the amount of time to backoff.
+func RunF(ctx context.Context, f func() error, backoff func(int) time.Duration) {
+ attempt := 0
+ timer := time.NewTimer(0)
+ for ctx.Err() == nil {
+ select {
+ case <-timer.C:
+ case <-ctx.Done():
+ timer.Stop()
+ return
+ }
+
+ err := f()
+ if errors.Is(err, ErrResetBackoff) {
+ timer.Reset(0)
+ attempt = 0
+ continue
+ }
+ if err != nil {
+ return
+ }
+ timer.Reset(backoff(attempt))
+ attempt++
+ }
+}
diff --git a/vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go b/vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go
index 8177fb58da9aa..4cee66aeb6e69 100644
--- a/vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go
+++ b/vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go
@@ -328,6 +328,11 @@ func (bg *BalancerGroup) AddWithClientConn(id, balancerName string, cc balancer.
// caching is disabled.
if bg.outgoingStarted && bg.deletedBalancerCache != nil {
if old, ok := bg.deletedBalancerCache.Remove(id); ok {
+ if bg.logger.V(2) {
+ bg.logger.Infof("Removing and reusing child policy of type %q for locality %q from the balancer cache", balancerName, id)
+ bg.logger.Infof("Number of items remaining in the balancer cache: %d", bg.deletedBalancerCache.Len())
+ }
+
sbc, _ = old.(*subBalancerWrapper)
if sbc != nil && sbc.builder != builder {
// If the sub-balancer in cache was built with a different
@@ -403,7 +408,7 @@ func (bg *BalancerGroup) Remove(id string) {
sbToRemove, ok := bg.idToBalancerConfig[id]
if !ok {
- bg.logger.Infof("balancer group: trying to remove a non-existing locality from balancer group: %v", id)
+ bg.logger.Errorf("Child policy for locality %q does not exist in the balancer group", id)
bg.outgoingMu.Unlock()
return
}
@@ -418,7 +423,17 @@ func (bg *BalancerGroup) Remove(id string) {
}
if bg.deletedBalancerCache != nil {
+ if bg.logger.V(2) {
+ bg.logger.Infof("Adding child policy for locality %q to the balancer cache", id)
+ bg.logger.Infof("Number of items remaining in the balancer cache: %d", bg.deletedBalancerCache.Len())
+ }
+
bg.deletedBalancerCache.Add(id, sbToRemove, func() {
+ if bg.logger.V(2) {
+ bg.logger.Infof("Removing child policy for locality %q from the balancer cache after timeout", id)
+ bg.logger.Infof("Number of items remaining in the balancer cache: %d", bg.deletedBalancerCache.Len())
+ }
+
// A sub-balancer evicted from the timeout cache needs to closed
// and its subConns need to removed, unconditionally. There is a
// possibility that a sub-balancer might be removed (thereby
diff --git a/vendor/google.golang.org/grpc/internal/cache/timeoutCache.go b/vendor/google.golang.org/grpc/internal/cache/timeoutCache.go
index 3f2d47302c4e1..2fa48701023df 100644
--- a/vendor/google.golang.org/grpc/internal/cache/timeoutCache.go
+++ b/vendor/google.golang.org/grpc/internal/cache/timeoutCache.go
@@ -142,3 +142,10 @@ func (c *TimeoutCache) Clear(runCallback bool) {
entry.callback()
}
}
+
+// Len returns the number of entries in the cache.
+func (c *TimeoutCache) Len() int {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ return len(c.cache)
+}
diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go
index c8a8c76d628ca..0d94c63e06e2f 100644
--- a/vendor/google.golang.org/grpc/internal/internal.go
+++ b/vendor/google.golang.org/grpc/internal/internal.go
@@ -175,6 +175,12 @@ var (
// GRPCResolverSchemeExtraMetadata determines when gRPC will add extra
// metadata to RPCs.
GRPCResolverSchemeExtraMetadata string = "xds"
+
+ // EnterIdleModeForTesting gets the ClientConn to enter IDLE mode.
+ EnterIdleModeForTesting any // func(*grpc.ClientConn) error
+
+ // ExitIdleModeForTesting gets the ClientConn to exit IDLE mode.
+ ExitIdleModeForTesting any // func(*grpc.ClientConn) error
)
// HealthChecker defines the signature of the client-side LB channel health checking function.
diff --git a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go
index 2f0417bd8db66..00f524a4809eb 100644
--- a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go
+++ b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go
@@ -23,6 +23,7 @@ package grpc_lookup_v1
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
reflect "reflect"
sync "sync"
)
@@ -98,6 +99,8 @@ type RouteLookupRequest struct {
StaleHeaderData string `protobuf:"bytes,6,opt,name=stale_header_data,json=staleHeaderData,proto3" json:"stale_header_data,omitempty"`
// Map of key values extracted via key builders for the gRPC or HTTP request.
KeyMap map[string]string `protobuf:"bytes,4,rep,name=key_map,json=keyMap,proto3" json:"key_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // Application-specific optional extensions.
+ Extensions []*anypb.Any `protobuf:"bytes,7,rep,name=extensions,proto3" json:"extensions,omitempty"`
}
func (x *RouteLookupRequest) Reset() {
@@ -160,6 +163,13 @@ func (x *RouteLookupRequest) GetKeyMap() map[string]string {
return nil
}
+func (x *RouteLookupRequest) GetExtensions() []*anypb.Any {
+ if x != nil {
+ return x.Extensions
+ }
+ return nil
+}
+
type RouteLookupResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -173,6 +183,8 @@ type RouteLookupResponse struct {
// Cached with "target" and sent with all requests that match the request key.
// Allows the RLS to pass its work product to the eventual target.
HeaderData string `protobuf:"bytes,2,opt,name=header_data,json=headerData,proto3" json:"header_data,omitempty"`
+ // Application-specific optional extensions.
+ Extensions []*anypb.Any `protobuf:"bytes,4,rep,name=extensions,proto3" json:"extensions,omitempty"`
}
func (x *RouteLookupResponse) Reset() {
@@ -221,55 +233,70 @@ func (x *RouteLookupResponse) GetHeaderData() string {
return ""
}
+func (x *RouteLookupResponse) GetExtensions() []*anypb.Any {
+ if x != nil {
+ return x.Extensions
+ }
+ return nil
+}
+
var File_grpc_lookup_v1_rls_proto protoreflect.FileDescriptor
var file_grpc_lookup_v1_rls_proto_rawDesc = []byte{
0x0a, 0x18, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2f, 0x76, 0x31,
0x2f, 0x72, 0x6c, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x67, 0x72, 0x70, 0x63,
- 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x22, 0x83, 0x03, 0x0a, 0x12, 0x52,
+ 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb9, 0x03, 0x0a, 0x12, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c,
+ 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b,
+ 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a,
+ 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e,
+ 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52,
+ 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x2e, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e,
+ 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x74, 0x61, 0x6c, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72,
+ 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x74, 0x61,
+ 0x6c, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x12, 0x47, 0x0a, 0x07,
+ 0x6b, 0x65, 0x79, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e,
+ 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52,
+ 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x2e, 0x4b, 0x65, 0x79, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6b,
+ 0x65, 0x79, 0x4d, 0x61, 0x70, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69,
+ 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52,
+ 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4b,
+ 0x65, 0x79, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
+ 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3f, 0x0a, 0x06, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e,
+ 0x12, 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f,
+ 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x4d,
+ 0x49, 0x53, 0x53, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f,
+ 0x53, 0x54, 0x41, 0x4c, 0x45, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08,
+ 0x02, 0x10, 0x03, 0x52, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x04, 0x70, 0x61, 0x74,
+ 0x68, 0x22, 0x94, 0x01, 0x0a, 0x13, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75,
+ 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x61, 0x72,
+ 0x67, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67,
+ 0x65, 0x74, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x64, 0x61,
+ 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72,
+ 0x44, 0x61, 0x74, 0x61, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f,
+ 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0a,
+ 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02,
+ 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x32, 0x6e, 0x0a, 0x12, 0x52, 0x6f, 0x75, 0x74,
+ 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x58,
+ 0x0a, 0x0b, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x12, 0x22, 0x2e,
+ 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52,
0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79,
- 0x70, 0x65, 0x12, 0x41, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01,
- 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70,
- 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x52, 0x06, 0x72,
- 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x74, 0x61, 0x6c, 0x65, 0x5f, 0x68,
- 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x0f, 0x73, 0x74, 0x61, 0x6c, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x44, 0x61, 0x74,
- 0x61, 0x12, 0x47, 0x0a, 0x07, 0x6b, 0x65, 0x79, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x04, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70,
- 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4b, 0x65, 0x79, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74,
- 0x72, 0x79, 0x52, 0x06, 0x6b, 0x65, 0x79, 0x4d, 0x61, 0x70, 0x1a, 0x39, 0x0a, 0x0b, 0x4b, 0x65,
- 0x79, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76,
- 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
- 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3f, 0x0a, 0x06, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12,
- 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57,
- 0x4e, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x4d, 0x49,
- 0x53, 0x53, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x53,
- 0x54, 0x41, 0x4c, 0x45, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02,
- 0x10, 0x03, 0x52, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68,
- 0x22, 0x5e, 0x0a, 0x13, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65,
- 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74,
- 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x64, 0x61, 0x74, 0x61,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x44, 0x61,
- 0x74, 0x61, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74,
- 0x32, 0x6e, 0x0a, 0x12, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x53,
- 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x58, 0x0a, 0x0b, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c,
- 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f,
- 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b,
- 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63,
- 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65,
- 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
- 0x42, 0x4d, 0x0a, 0x11, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b,
- 0x75, 0x70, 0x2e, 0x76, 0x31, 0x42, 0x08, 0x52, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
- 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
- 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70,
- 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x76, 0x31, 0x62,
- 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e,
+ 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x4d, 0x0a, 0x11, 0x69, 0x6f, 0x2e, 0x67,
+ 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x42, 0x08, 0x52,
+ 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70,
+ 0x63, 0x2f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x6c, 0x6f,
+ 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -291,17 +318,20 @@ var file_grpc_lookup_v1_rls_proto_goTypes = []interface{}{
(*RouteLookupRequest)(nil), // 1: grpc.lookup.v1.RouteLookupRequest
(*RouteLookupResponse)(nil), // 2: grpc.lookup.v1.RouteLookupResponse
nil, // 3: grpc.lookup.v1.RouteLookupRequest.KeyMapEntry
+ (*anypb.Any)(nil), // 4: google.protobuf.Any
}
var file_grpc_lookup_v1_rls_proto_depIdxs = []int32{
0, // 0: grpc.lookup.v1.RouteLookupRequest.reason:type_name -> grpc.lookup.v1.RouteLookupRequest.Reason
3, // 1: grpc.lookup.v1.RouteLookupRequest.key_map:type_name -> grpc.lookup.v1.RouteLookupRequest.KeyMapEntry
- 1, // 2: grpc.lookup.v1.RouteLookupService.RouteLookup:input_type -> grpc.lookup.v1.RouteLookupRequest
- 2, // 3: grpc.lookup.v1.RouteLookupService.RouteLookup:output_type -> grpc.lookup.v1.RouteLookupResponse
- 3, // [3:4] is the sub-list for method output_type
- 2, // [2:3] is the sub-list for method input_type
- 2, // [2:2] is the sub-list for extension type_name
- 2, // [2:2] is the sub-list for extension extendee
- 0, // [0:2] is the sub-list for field type_name
+ 4, // 2: grpc.lookup.v1.RouteLookupRequest.extensions:type_name -> google.protobuf.Any
+ 4, // 3: grpc.lookup.v1.RouteLookupResponse.extensions:type_name -> google.protobuf.Any
+ 1, // 4: grpc.lookup.v1.RouteLookupService.RouteLookup:input_type -> grpc.lookup.v1.RouteLookupRequest
+ 2, // 5: grpc.lookup.v1.RouteLookupService.RouteLookup:output_type -> grpc.lookup.v1.RouteLookupResponse
+ 5, // [5:6] is the sub-list for method output_type
+ 4, // [4:5] is the sub-list for method input_type
+ 4, // [4:4] is the sub-list for extension type_name
+ 4, // [4:4] is the sub-list for extension extendee
+ 0, // [0:4] is the sub-list for field type_name
}
func init() { file_grpc_lookup_v1_rls_proto_init() }
diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go
index 4cf85cad9f810..03ef2fedd5cb5 100644
--- a/vendor/google.golang.org/grpc/internal/status/status.go
+++ b/vendor/google.golang.org/grpc/internal/status/status.go
@@ -43,6 +43,34 @@ type Status struct {
s *spb.Status
}
+// NewWithProto returns a new status including details from statusProto. This
+// is meant to be used by the gRPC library only.
+func NewWithProto(code codes.Code, message string, statusProto []string) *Status {
+ if len(statusProto) != 1 {
+ // No grpc-status-details bin header, or multiple; just ignore.
+ return &Status{s: &spb.Status{Code: int32(code), Message: message}}
+ }
+ st := &spb.Status{}
+ if err := proto.Unmarshal([]byte(statusProto[0]), st); err != nil {
+ // Probably not a google.rpc.Status proto; do not provide details.
+ return &Status{s: &spb.Status{Code: int32(code), Message: message}}
+ }
+ if st.Code == int32(code) {
+ // The codes match between the grpc-status header and the
+ // grpc-status-details-bin header; use the full details proto.
+ return &Status{s: st}
+ }
+ return &Status{
+ s: &spb.Status{
+ Code: int32(codes.Internal),
+ Message: fmt.Sprintf(
+ "grpc-status-details-bin mismatch: grpc-status=%v, grpc-message=%q, grpc-status-details-bin=%+v",
+ code, message, st,
+ ),
+ },
+ }
+}
+
// New returns a Status representing c and msg.
func New(c codes.Code, msg string) *Status {
return &Status{s: &spb.Status{Code: int32(c), Message: msg}}
diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
index 98f80e3fa00aa..17f7a21b5a9f0 100644
--- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
@@ -220,18 +220,20 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
h.Set("Grpc-Message", encodeGrpcMessage(m))
}
+ s.hdrMu.Lock()
if p := st.Proto(); p != nil && len(p.Details) > 0 {
+ delete(s.trailer, grpcStatusDetailsBinHeader)
stBytes, err := proto.Marshal(p)
if err != nil {
// TODO: return error instead, when callers are able to handle it.
panic(err)
}
- h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes))
+ h.Set(grpcStatusDetailsBinHeader, encodeBinHeader(stBytes))
}
- if md := s.Trailer(); len(md) > 0 {
- for k, vv := range md {
+ if len(s.trailer) > 0 {
+ for k, vv := range s.trailer {
// Clients don't tolerate reading restricted headers after some non restricted ones were sent.
if isReservedHeader(k) {
continue
@@ -243,6 +245,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
}
}
}
+ s.hdrMu.Unlock()
})
if err == nil { // transport has not been closed
@@ -287,7 +290,7 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) {
}
// writeCustomHeaders sets custom headers set on the stream via SetHeader
-// on the first write call (Write, WriteHeader, or WriteStatus).
+// on the first write call (Write, WriteHeader, or WriteStatus)
func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) {
h := ht.rw.Header()
@@ -344,7 +347,7 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
return err
}
-func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) {
+func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream)) {
// With this transport type there will be exactly 1 stream: this HTTP request.
ctx := ht.req.Context()
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
index badab8acf3b11..d6f5c49358b58 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
@@ -1399,7 +1399,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
mdata = make(map[string][]string)
contentTypeErr = "malformed header: missing HTTP content-type"
grpcMessage string
- statusGen *status.Status
recvCompress string
httpStatusCode *int
httpStatusErr string
@@ -1434,12 +1433,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
rawStatusCode = codes.Code(uint32(code))
case "grpc-message":
grpcMessage = decodeGrpcMessage(hf.Value)
- case "grpc-status-details-bin":
- var err error
- statusGen, err = decodeGRPCStatusDetails(hf.Value)
- if err != nil {
- headerError = fmt.Sprintf("transport: malformed grpc-status-details-bin: %v", err)
- }
case ":status":
if hf.Value == "200" {
httpStatusErr = ""
@@ -1548,14 +1541,12 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
return
}
- if statusGen == nil {
- statusGen = status.New(rawStatusCode, grpcMessage)
- }
+ status := istatus.NewWithProto(rawStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader])
// If client received END_STREAM from server while stream was still active,
// send RST_STREAM.
rstStream := s.getState() == streamActive
- t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, statusGen, mdata, true)
+ t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, status, mdata, true)
}
// readServerPreface reads and handles the initial settings frame from the
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
index c06db679d89cc..6fa1eb41992a0 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
@@ -342,7 +342,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
// operateHeaders takes action on the decoded headers. Returns an error if fatal
// error encountered and transport needs to close, otherwise returns nil.
-func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) error {
+func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream)) error {
// Acquire max stream ID lock for entire duration
t.maxStreamMu.Lock()
defer t.maxStreamMu.Unlock()
@@ -561,7 +561,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
}
if t.inTapHandle != nil {
var err error
- if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil {
+ if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method, Header: mdata}); err != nil {
t.mu.Unlock()
if t.logger.V(logLevel) {
t.logger.Infof("Aborting the stream early due to InTapHandle failure: %v", err)
@@ -592,7 +592,6 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
s.requestRead = func(n int) {
t.adjustWindow(s, uint32(n))
}
- s.ctx = traceCtx(s.ctx, s.method)
for _, sh := range t.stats {
s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
inHeader := &stats.InHeader{
@@ -630,7 +629,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
// HandleStreams receives incoming streams using the given handler. This is
// typically run in a separate goroutine.
// traceCtx attaches trace to ctx and returns the new context.
-func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) {
+func (t *http2Server) HandleStreams(handle func(*Stream)) {
defer close(t.readerDone)
for {
t.controlBuf.throttle()
@@ -665,7 +664,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
}
switch frame := frame.(type) {
case *http2.MetaHeadersFrame:
- if err := t.operateHeaders(frame, handle, traceCtx); err != nil {
+ if err := t.operateHeaders(frame, handle); err != nil {
t.Close(err)
break
}
@@ -1053,12 +1052,15 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())})
if p := st.Proto(); p != nil && len(p.Details) > 0 {
+ // Do not use the user's grpc-status-details-bin (if present) if we are
+ // even attempting to set our own.
+ delete(s.trailer, grpcStatusDetailsBinHeader)
stBytes, err := proto.Marshal(p)
if err != nil {
// TODO: return error instead, when callers are able to handle it.
t.logger.Errorf("Failed to marshal rpc status: %s, error: %v", pretty.ToJSON(p), err)
} else {
- headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)})
+ headerFields = append(headerFields, hpack.HeaderField{Name: grpcStatusDetailsBinHeader, Value: encodeBinHeader(stBytes)})
}
}
diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go
index 1958140082b35..dc29d590e91fb 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http_util.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go
@@ -34,12 +34,9 @@ import (
"time"
"unicode/utf8"
- "github.com/golang/protobuf/proto"
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
- spb "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
)
const (
@@ -88,6 +85,8 @@ var (
}
)
+var grpcStatusDetailsBinHeader = "grpc-status-details-bin"
+
// isReservedHeader checks whether hdr belongs to HTTP2 headers
// reserved by gRPC protocol. Any other headers are classified as the
// user-specified metadata.
@@ -103,7 +102,6 @@ func isReservedHeader(hdr string) bool {
"grpc-message",
"grpc-status",
"grpc-timeout",
- "grpc-status-details-bin",
// Intentionally exclude grpc-previous-rpc-attempts and
// grpc-retry-pushback-ms, which are "reserved", but their API
// intentionally works via metadata.
@@ -154,18 +152,6 @@ func decodeMetadataHeader(k, v string) (string, error) {
return v, nil
}
-func decodeGRPCStatusDetails(rawDetails string) (*status.Status, error) {
- v, err := decodeBinHeader(rawDetails)
- if err != nil {
- return nil, err
- }
- st := &spb.Status{}
- if err = proto.Unmarshal(v, st); err != nil {
- return nil, err
- }
- return status.FromProto(st), nil
-}
-
type timeoutUnit uint8
const (
diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go
index 74a811fc0590b..aac056e723bb5 100644
--- a/vendor/google.golang.org/grpc/internal/transport/transport.go
+++ b/vendor/google.golang.org/grpc/internal/transport/transport.go
@@ -698,7 +698,7 @@ type ClientTransport interface {
// Write methods for a given Stream will be called serially.
type ServerTransport interface {
// HandleStreams receives incoming streams using the given handler.
- HandleStreams(func(*Stream), func(context.Context, string) context.Context)
+ HandleStreams(func(*Stream))
// WriteHeader sends the header metadata for the given stream.
// WriteHeader may not be called on all streams.
diff --git a/vendor/google.golang.org/grpc/orca/producer.go b/vendor/google.golang.org/grpc/orca/producer.go
index 2d58725547fc0..04edae6de66f1 100644
--- a/vendor/google.golang.org/grpc/orca/producer.go
+++ b/vendor/google.golang.org/grpc/orca/producer.go
@@ -24,6 +24,7 @@ import (
"google.golang.org/grpc"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/codes"
+ "google.golang.org/grpc/internal/backoff"
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/orca/internal"
"google.golang.org/grpc/status"
@@ -169,48 +170,29 @@ func (p *producer) updateRunLocked() {
func (p *producer) run(ctx context.Context, done chan struct{}, interval time.Duration) {
defer close(done)
- backoffAttempt := 0
- backoffTimer := time.NewTimer(0)
- for ctx.Err() == nil {
- select {
- case <-backoffTimer.C:
- case <-ctx.Done():
- return
- }
-
+ runStream := func() error {
resetBackoff, err := p.runStream(ctx, interval)
-
- if resetBackoff {
- backoffTimer.Reset(0)
- backoffAttempt = 0
- } else {
- backoffTimer.Reset(p.backoff(backoffAttempt))
- backoffAttempt++
- }
-
- switch {
- case err == nil:
- // No error was encountered; restart the stream.
- case ctx.Err() != nil:
- // Producer was stopped; exit immediately and without logging an
- // error.
- return
- case status.Code(err) == codes.Unimplemented:
+ if status.Code(err) == codes.Unimplemented {
// Unimplemented; do not retry.
logger.Error("Server doesn't support ORCA OOB load reporting protocol; not listening for load reports.")
- return
- case status.Code(err) == codes.Unavailable, status.Code(err) == codes.Canceled:
- // TODO: these codes should ideally log an error, too, but for now
- // we receive them when shutting down the ClientConn (Unavailable
- // if the stream hasn't started yet, and Canceled if it happens
- // mid-stream). Once we can determine the state or ensure the
- // producer is stopped before the stream ends, we can log an error
- // when it's not a natural shutdown.
- default:
- // Log all other errors.
+ return err
+ }
+ // Retry for all other errors.
+ if code := status.Code(err); code != codes.Unavailable && code != codes.Canceled {
+ // TODO: Unavailable and Canceled should also ideally log an error,
+ // but for now we receive them when shutting down the ClientConn
+ // (Unavailable if the stream hasn't started yet, and Canceled if it
+ // happens mid-stream). Once we can determine the state or ensure
+ // the producer is stopped before the stream ends, we can log an
+ // error when it's not a natural shutdown.
logger.Error("Received unexpected stream error:", err)
}
+ if resetBackoff {
+ return backoff.ErrResetBackoff
+ }
+ return nil
}
+ backoff.RunF(ctx, runStream, p.backoff)
}
// runStream runs a single stream on the subchannel and returns the resulting
diff --git a/vendor/google.golang.org/grpc/resolver/manual/manual.go b/vendor/google.golang.org/grpc/resolver/manual/manual.go
index e6b0f14cd941f..0a4262342f358 100644
--- a/vendor/google.golang.org/grpc/resolver/manual/manual.go
+++ b/vendor/google.golang.org/grpc/resolver/manual/manual.go
@@ -26,7 +26,9 @@ import (
"google.golang.org/grpc/resolver"
)
-// NewBuilderWithScheme creates a new test resolver builder with the given scheme.
+// NewBuilderWithScheme creates a new manual resolver builder with the given
+// scheme. Every instance of the manual resolver may only ever be used with a
+// single grpc.ClientConn. Otherwise, bad things will happen.
func NewBuilderWithScheme(scheme string) *Resolver {
return &Resolver{
BuildCallback: func(resolver.Target, resolver.ClientConn, resolver.BuildOptions) {},
@@ -58,30 +60,34 @@ type Resolver struct {
scheme string
// Fields actually belong to the resolver.
- mu sync.Mutex // Guards access to CC.
- CC resolver.ClientConn
- bootstrapState *resolver.State
+ // Guards access to below fields.
+ mu sync.Mutex
+ CC resolver.ClientConn
+ // Storing the most recent state update makes this resolver resilient to
+ // restarts, which is possible with channel idleness.
+ lastSeenState *resolver.State
}
// InitialState adds initial state to the resolver so that UpdateState doesn't
// need to be explicitly called after Dial.
func (r *Resolver) InitialState(s resolver.State) {
- r.bootstrapState = &s
+ r.lastSeenState = &s
}
// Build returns itself for Resolver, because it's both a builder and a resolver.
func (r *Resolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
+ r.BuildCallback(target, cc, opts)
r.mu.Lock()
r.CC = cc
- r.mu.Unlock()
- r.BuildCallback(target, cc, opts)
- if r.bootstrapState != nil {
- r.UpdateState(*r.bootstrapState)
+ if r.lastSeenState != nil {
+ err := r.CC.UpdateState(*r.lastSeenState)
+ go r.UpdateStateCallback(err)
}
+ r.mu.Unlock()
return r, nil
}
-// Scheme returns the test scheme.
+// Scheme returns the manual resolver's scheme.
func (r *Resolver) Scheme() string {
return r.scheme
}
@@ -100,6 +106,7 @@ func (r *Resolver) Close() {
func (r *Resolver) UpdateState(s resolver.State) {
r.mu.Lock()
err := r.CC.UpdateState(s)
+ r.lastSeenState = &s
r.mu.Unlock()
r.UpdateStateCallback(err)
}
diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go
index eeae92fbe0204..8f60d421437d9 100644
--- a/vendor/google.golang.org/grpc/server.go
+++ b/vendor/google.golang.org/grpc/server.go
@@ -983,7 +983,7 @@ func (s *Server) serveStreams(st transport.ServerTransport) {
f := func() {
defer streamQuota.release()
defer wg.Done()
- s.handleStream(st, stream, s.traceInfo(st, stream))
+ s.handleStream(st, stream)
}
if s.opts.numServerWorkers > 0 {
@@ -995,12 +995,6 @@ func (s *Server) serveStreams(st transport.ServerTransport) {
}
}
go f()
- }, func(ctx context.Context, method string) context.Context {
- if !EnableTracing {
- return ctx
- }
- tr := trace.New("grpc.Recv."+methodFamily(method), method)
- return trace.NewContext(ctx, tr)
})
wg.Wait()
}
@@ -1049,30 +1043,6 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
s.serveStreams(st)
}
-// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled.
-// If tracing is not enabled, it returns nil.
-func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) {
- if !EnableTracing {
- return nil
- }
- tr, ok := trace.FromContext(stream.Context())
- if !ok {
- return nil
- }
-
- trInfo = &traceInfo{
- tr: tr,
- firstLine: firstLine{
- client: false,
- remoteAddr: st.RemoteAddr(),
- },
- }
- if dl, ok := stream.Context().Deadline(); ok {
- trInfo.firstLine.deadline = time.Until(dl)
- }
- return trInfo
-}
-
func (s *Server) addConn(addr string, st transport.ServerTransport) bool {
s.mu.Lock()
defer s.mu.Unlock()
@@ -1133,7 +1103,7 @@ func (s *Server) incrCallsFailed() {
atomic.AddInt64(&s.czData.callsFailed, 1)
}
-func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error {
+func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error {
data, err := encode(s.getCodec(stream.ContentSubtype()), msg)
if err != nil {
channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err)
@@ -1152,7 +1122,7 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str
err = t.Write(stream, hdr, payload, opts)
if err == nil {
for _, sh := range s.opts.statsHandlers {
- sh.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now()))
+ sh.HandleRPC(ctx, outPayload(false, msg, data, payload, time.Now()))
}
}
return err
@@ -1194,7 +1164,7 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info
}
}
-func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) {
+func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) {
shs := s.opts.statsHandlers
if len(shs) != 0 || trInfo != nil || channelz.IsOn() {
if channelz.IsOn() {
@@ -1208,7 +1178,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
IsClientStream: false,
IsServerStream: false,
}
- sh.HandleRPC(stream.Context(), statsBegin)
+ sh.HandleRPC(ctx, statsBegin)
}
if trInfo != nil {
trInfo.tr.LazyLog(&trInfo.firstLine, false)
@@ -1240,7 +1210,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
if err != nil && err != io.EOF {
end.Error = toRPCErr(err)
}
- sh.HandleRPC(stream.Context(), end)
+ sh.HandleRPC(ctx, end)
}
if channelz.IsOn() {
@@ -1262,7 +1232,6 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
}
}
if len(binlogs) != 0 {
- ctx := stream.Context()
md, _ := metadata.FromIncomingContext(ctx)
logEntry := &binarylog.ClientHeader{
Header: md,
@@ -1348,7 +1317,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
}
for _, sh := range shs {
- sh.HandleRPC(stream.Context(), &stats.InPayload{
+ sh.HandleRPC(ctx, &stats.InPayload{
RecvTime: time.Now(),
Payload: v,
Length: len(d),
@@ -1362,7 +1331,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
Message: d,
}
for _, binlog := range binlogs {
- binlog.Log(stream.Context(), cm)
+ binlog.Log(ctx, cm)
}
}
if trInfo != nil {
@@ -1370,7 +1339,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
}
return nil
}
- ctx := NewContextWithServerTransportStream(stream.Context(), stream)
+ ctx = NewContextWithServerTransportStream(ctx, stream)
reply, appErr := md.Handler(info.serviceImpl, ctx, df, s.opts.unaryInt)
if appErr != nil {
appStatus, ok := status.FromError(appErr)
@@ -1395,7 +1364,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
Header: h,
}
for _, binlog := range binlogs {
- binlog.Log(stream.Context(), sh)
+ binlog.Log(ctx, sh)
}
}
st := &binarylog.ServerTrailer{
@@ -1403,7 +1372,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
Err: appErr,
}
for _, binlog := range binlogs {
- binlog.Log(stream.Context(), st)
+ binlog.Log(ctx, st)
}
}
return appErr
@@ -1418,7 +1387,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
if stream.SendCompress() != sendCompressorName {
comp = encoding.GetCompressor(stream.SendCompress())
}
- if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil {
+ if err := s.sendResponse(ctx, t, stream, reply, cp, opts, comp); err != nil {
if err == io.EOF {
// The entire stream is done (for unary RPC only).
return err
@@ -1445,8 +1414,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
Err: appErr,
}
for _, binlog := range binlogs {
- binlog.Log(stream.Context(), sh)
- binlog.Log(stream.Context(), st)
+ binlog.Log(ctx, sh)
+ binlog.Log(ctx, st)
}
}
return err
@@ -1460,8 +1429,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
Message: reply,
}
for _, binlog := range binlogs {
- binlog.Log(stream.Context(), sh)
- binlog.Log(stream.Context(), sm)
+ binlog.Log(ctx, sh)
+ binlog.Log(ctx, sm)
}
}
if channelz.IsOn() {
@@ -1479,7 +1448,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
Err: appErr,
}
for _, binlog := range binlogs {
- binlog.Log(stream.Context(), st)
+ binlog.Log(ctx, st)
}
}
return t.WriteStatus(stream, statusOK)
@@ -1521,7 +1490,7 @@ func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, inf
}
}
-func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) {
+func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) {
if channelz.IsOn() {
s.incrCallsStarted()
}
@@ -1535,10 +1504,10 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
IsServerStream: sd.ServerStreams,
}
for _, sh := range shs {
- sh.HandleRPC(stream.Context(), statsBegin)
+ sh.HandleRPC(ctx, statsBegin)
}
}
- ctx := NewContextWithServerTransportStream(stream.Context(), stream)
+ ctx = NewContextWithServerTransportStream(ctx, stream)
ss := &serverStream{
ctx: ctx,
t: t,
@@ -1574,7 +1543,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
end.Error = toRPCErr(err)
}
for _, sh := range shs {
- sh.HandleRPC(stream.Context(), end)
+ sh.HandleRPC(ctx, end)
}
}
@@ -1616,7 +1585,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
logEntry.PeerAddr = peer.Addr
}
for _, binlog := range ss.binlogs {
- binlog.Log(stream.Context(), logEntry)
+ binlog.Log(ctx, logEntry)
}
}
@@ -1694,7 +1663,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
Err: appErr,
}
for _, binlog := range ss.binlogs {
- binlog.Log(stream.Context(), st)
+ binlog.Log(ctx, st)
}
}
t.WriteStatus(ss.s, appStatus)
@@ -1712,33 +1681,50 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
Err: appErr,
}
for _, binlog := range ss.binlogs {
- binlog.Log(stream.Context(), st)
+ binlog.Log(ctx, st)
}
}
return t.WriteStatus(ss.s, statusOK)
}
-func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) {
+func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) {
+ ctx := stream.Context()
+ var ti *traceInfo
+ if EnableTracing {
+ tr := trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method())
+ ctx = trace.NewContext(ctx, tr)
+ ti = &traceInfo{
+ tr: tr,
+ firstLine: firstLine{
+ client: false,
+ remoteAddr: t.RemoteAddr(),
+ },
+ }
+ if dl, ok := ctx.Deadline(); ok {
+ ti.firstLine.deadline = time.Until(dl)
+ }
+ }
+
sm := stream.Method()
if sm != "" && sm[0] == '/' {
sm = sm[1:]
}
pos := strings.LastIndex(sm, "/")
if pos == -1 {
- if trInfo != nil {
- trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true)
- trInfo.tr.SetError()
+ if ti != nil {
+ ti.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true)
+ ti.tr.SetError()
}
errDesc := fmt.Sprintf("malformed method name: %q", stream.Method())
if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
- if trInfo != nil {
- trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
- trInfo.tr.SetError()
+ if ti != nil {
+ ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
+ ti.tr.SetError()
}
channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err)
}
- if trInfo != nil {
- trInfo.tr.Finish()
+ if ti != nil {
+ ti.tr.Finish()
}
return
}
@@ -1748,17 +1734,17 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
srv, knownService := s.services[service]
if knownService {
if md, ok := srv.methods[method]; ok {
- s.processUnaryRPC(t, stream, srv, md, trInfo)
+ s.processUnaryRPC(ctx, t, stream, srv, md, ti)
return
}
if sd, ok := srv.streams[method]; ok {
- s.processStreamingRPC(t, stream, srv, sd, trInfo)
+ s.processStreamingRPC(ctx, t, stream, srv, sd, ti)
return
}
}
// Unknown service, or known server unknown method.
if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil {
- s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo)
+ s.processStreamingRPC(ctx, t, stream, nil, unknownDesc, ti)
return
}
var errDesc string
@@ -1767,19 +1753,19 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
} else {
errDesc = fmt.Sprintf("unknown method %v for service %v", method, service)
}
- if trInfo != nil {
- trInfo.tr.LazyPrintf("%s", errDesc)
- trInfo.tr.SetError()
+ if ti != nil {
+ ti.tr.LazyPrintf("%s", errDesc)
+ ti.tr.SetError()
}
if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
- if trInfo != nil {
- trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
- trInfo.tr.SetError()
+ if ti != nil {
+ ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
+ ti.tr.SetError()
}
channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err)
}
- if trInfo != nil {
- trInfo.tr.Finish()
+ if ti != nil {
+ ti.tr.Finish()
}
}
diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go
index bfa5dfa40e4d1..07f0125768808 100644
--- a/vendor/google.golang.org/grpc/tap/tap.go
+++ b/vendor/google.golang.org/grpc/tap/tap.go
@@ -27,6 +27,8 @@ package tap
import (
"context"
+
+ "google.golang.org/grpc/metadata"
)
// Info defines the relevant information needed by the handles.
@@ -34,6 +36,10 @@ type Info struct {
// FullMethodName is the string of grpc method (in the format of
// /package.service/method).
FullMethodName string
+
+ // Header contains the header metadata received.
+ Header metadata.MD
+
// TODO: More to be added.
}
diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go
index 724ad21021300..6d2cadd79a9b9 100644
--- a/vendor/google.golang.org/grpc/version.go
+++ b/vendor/google.golang.org/grpc/version.go
@@ -19,4 +19,4 @@
package grpc
// Version is the current grpc version.
-const Version = "1.58.3"
+const Version = "1.59.0"
diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh
index bbc9e2e3c8e36..bb480f1f9cca1 100644
--- a/vendor/google.golang.org/grpc/vet.sh
+++ b/vendor/google.golang.org/grpc/vet.sh
@@ -93,6 +93,9 @@ git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpc
# - Ensure all ptypes proto packages are renamed when importing.
not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go"
+# - Ensure all usages of grpc_testing package are renamed when importing.
+not git grep "\(import \|^\s*\)\"google.golang.org/grpc/interop/grpc_testing" -- "*.go"
+
# - Ensure all xds proto imports are renamed to *pb or *grpc.
git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "'
diff --git a/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go b/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go
index f8f749835c24f..074154a751be3 100644
--- a/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go
+++ b/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go
@@ -47,9 +47,8 @@ import (
)
const (
- c2pScheme = "google-c2p"
- c2pExperimentalScheme = "google-c2p-experimental"
- c2pAuthority = "traffic-director-c2p.xds.googleapis.com"
+ c2pScheme = "google-c2p"
+ c2pAuthority = "traffic-director-c2p.xds.googleapis.com"
tdURL = "dns:///directpath-pa.googleapis.com"
httpReqTimeout = 10 * time.Second
@@ -77,18 +76,10 @@ var (
)
func init() {
- resolver.Register(c2pResolverBuilder{
- scheme: c2pScheme,
- })
- // TODO(apolcyn): remove this experimental scheme before the 1.52 release
- resolver.Register(c2pResolverBuilder{
- scheme: c2pExperimentalScheme,
- })
+ resolver.Register(c2pResolverBuilder{})
}
-type c2pResolverBuilder struct {
- scheme string
-}
+type c2pResolverBuilder struct{}
func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
if t.URL.Host != "" {
@@ -165,7 +156,7 @@ func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts
}
func (b c2pResolverBuilder) Scheme() string {
- return b.scheme
+ return c2pScheme
}
type c2pResolver struct {
diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go
index 85a081d09df55..34c3592180750 100644
--- a/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go
+++ b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go
@@ -18,8 +18,8 @@
package cdsbalancer
import (
+ "context"
"encoding/json"
- "errors"
"fmt"
"google.golang.org/grpc/balancer"
@@ -28,7 +28,6 @@ import (
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/tls/certprovider"
"google.golang.org/grpc/internal/balancer/nop"
- "google.golang.org/grpc/internal/buffer"
xdsinternal "google.golang.org/grpc/internal/credentials/xds"
"google.golang.org/grpc/internal/envconfig"
"google.golang.org/grpc/internal/grpclog"
@@ -42,11 +41,13 @@ import (
)
const (
- cdsName = "cds_experimental"
+ cdsName = "cds_experimental"
+ aggregateClusterMaxDepth = 16
)
var (
- errBalancerClosed = errors.New("cds_experimental LB policy is closed")
+ errBalancerClosed = fmt.Errorf("cds_experimental LB policy is closed")
+ errExceedsMaxDepth = fmt.Errorf("aggregate cluster graph exceeds max depth (%d)", aggregateClusterMaxDepth)
// newChildBalancer is a helper function to build a new cluster_resolver
// balancer and will be overridden in unittests.
@@ -81,22 +82,29 @@ func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Bal
logger.Errorf("%q LB policy is needed but not registered", clusterresolver.Name)
return nop.NewBalancer(cc, fmt.Errorf("%q LB policy is needed but not registered", clusterresolver.Name))
}
- crParser, ok := builder.(balancer.ConfigParser)
+ parser, ok := builder.(balancer.ConfigParser)
if !ok {
// Shouldn't happen, imported Cluster Resolver builder has this method.
logger.Errorf("%q LB policy does not implement a config parser", clusterresolver.Name)
return nop.NewBalancer(cc, fmt.Errorf("%q LB policy does not implement a config parser", clusterresolver.Name))
}
+
+ ctx, cancel := context.WithCancel(context.Background())
b := &cdsBalancer{
- bOpts: opts,
- updateCh: buffer.NewUnbounded(),
- closed: grpcsync.NewEvent(),
- done: grpcsync.NewEvent(),
- crParser: crParser,
- xdsHI: xdsinternal.NewHandshakeInfo(nil, nil),
+ bOpts: opts,
+ childConfigParser: parser,
+ serializer: grpcsync.NewCallbackSerializer(ctx),
+ serializerCancel: cancel,
+ xdsHI: xdsinternal.NewHandshakeInfo(nil, nil),
+ watchers: make(map[string]*watcherState),
+ }
+ b.ccw = &ccWrapper{
+ ClientConn: cc,
+ xdsHI: b.xdsHI,
}
b.logger = prefixLogger((b))
b.logger.Infof("Created")
+
var creds credentials.TransportCredentials
switch {
case opts.DialCreds != nil:
@@ -108,12 +116,6 @@ func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Bal
b.xdsCredsInUse = true
}
b.logger.Infof("xDS credentials in use: %v", b.xdsCredsInUse)
- b.clusterHandler = newClusterHandler(b)
- b.ccw = &ccWrapper{
- ClientConn: cc,
- xdsHI: b.xdsHI,
- }
- go b.run()
return b
}
@@ -139,61 +141,45 @@ func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, err
return &cfg, nil
}
-// ccUpdate wraps a clientConn update received from gRPC (pushed from the
-// xdsResolver). A valid clusterName causes the cdsBalancer to register a CDS
-// watcher with the xdsClient, while a non-nil error causes it to cancel the
-// existing watch and propagate the error to the underlying cluster_resolver
-// balancer.
-type ccUpdate struct {
- clusterName string
- err error
-}
-
-type exitIdle struct{}
-
// cdsBalancer implements a CDS based LB policy. It instantiates a
// cluster_resolver balancer to further resolve the serviceName received from
// CDS, into localities and endpoints. Implements the balancer.Balancer
// interface which is exposed to gRPC and implements the balancer.ClientConn
// interface which is exposed to the cluster_resolver balancer.
type cdsBalancer struct {
- ccw *ccWrapper // ClientConn interface passed to child LB.
- bOpts balancer.BuildOptions // BuildOptions passed to child LB.
- updateCh *buffer.Unbounded // Channel for gRPC and xdsClient updates.
- xdsClient xdsclient.XDSClient // xDS client to watch Cluster resource.
- clusterHandler *clusterHandler // To watch the clusters.
- childLB balancer.Balancer
- logger *grpclog.PrefixLogger
- closed *grpcsync.Event
- done *grpcsync.Event
- crParser balancer.ConfigParser
+ // The following fields are initialized at build time and are either
+ // read-only after that or provide their own synchronization, and therefore
+ // do not need to be guarded by a mutex.
+ ccw *ccWrapper // ClientConn interface passed to child LB.
+ bOpts balancer.BuildOptions // BuildOptions passed to child LB.
+ childConfigParser balancer.ConfigParser // Config parser for cluster_resolver LB policy.
+ xdsHI *xdsinternal.HandshakeInfo // Handshake info from security configuration.
+ logger *grpclog.PrefixLogger // Prefix logger for all logging.
+
+ // The serializer and its cancel func are initialized at build time, and the
+ // rest of the fields here are only accessed from serializer callbacks (or
+ // from balancer.Balancer methods, which themselves are guaranteed to be
+ // mutually exclusive) and hence do not need to be guarded by a mutex.
+ serializer *grpcsync.CallbackSerializer // Serializes updates from gRPC and xDS client.
+ serializerCancel context.CancelFunc // Stops the above serializer.
+ childLB balancer.Balancer // Child policy, built upon resolution of the cluster graph.
+ xdsClient xdsclient.XDSClient // xDS client to watch Cluster resources.
+ watchers map[string]*watcherState // Set of watchers and associated state, keyed by cluster name.
+ lbCfg *lbConfig // Current load balancing configuration.
// The certificate providers are cached here to that they can be closed when
// a new provider is to be created.
cachedRoot certprovider.Provider
cachedIdentity certprovider.Provider
- xdsHI *xdsinternal.HandshakeInfo
xdsCredsInUse bool
}
-// handleClientConnUpdate handles a ClientConnUpdate received from gRPC. Good
-// updates lead to registration of a CDS watch. Updates with error lead to
-// cancellation of existing watch and propagation of the same error to the
-// cluster_resolver balancer.
-func (b *cdsBalancer) handleClientConnUpdate(update *ccUpdate) {
- // We first handle errors, if any, and then proceed with handling the
- // update, only if the status quo has changed.
- if err := update.err; err != nil {
- b.handleErrorFromUpdate(err, true)
- return
- }
- b.clusterHandler.updateRootCluster(update.clusterName)
-}
-
// handleSecurityConfig processes the security configuration received from the
// management server, creates appropriate certificate provider plugins, and
// updates the HandhakeInfo which is added as an address attribute in
// NewSubConn() calls.
+//
+// Only executed in the context of a serializer callback.
func (b *cdsBalancer) handleSecurityConfig(config *xdsresource.SecurityConfig) error {
// If xdsCredentials are not in use, i.e, the user did not want to get
// security configuration from an xDS server, we should not be acting on the
@@ -220,7 +206,7 @@ func (b *cdsBalancer) handleSecurityConfig(config *xdsresource.SecurityConfig) e
// Bootstrap did not find any certificate provider configs, but the user
// has specified xdsCredentials and the management server has sent down
// security configuration.
- return errors.New("xds: certificate_providers config missing in bootstrap file")
+ return fmt.Errorf("xds: certificate_providers config missing in bootstrap file")
}
cpc := bc.CertProviderConfigs
@@ -278,220 +264,29 @@ func buildProviderFunc(configs map[string]*certprovider.BuildableConfig, instanc
return provider, nil
}
-// handleWatchUpdate handles a watch update from the xDS Client. Good updates
-// lead to clientConn updates being invoked on the underlying cluster_resolver balancer.
-func (b *cdsBalancer) handleWatchUpdate(update clusterHandlerUpdate) {
- if err := update.err; err != nil {
- b.logger.Warningf("Watch error from xds-client %p: %v", b.xdsClient, err)
- b.handleErrorFromUpdate(err, false)
- return
- }
-
- b.logger.Infof("Received Cluster resource contains content: %s, security config: %s", pretty.ToJSON(update.updates), pretty.ToJSON(update.securityCfg))
-
- // Process the security config from the received update before building the
- // child policy or forwarding the update to it. We do this because the child
- // policy may try to create a new subConn inline. Processing the security
- // configuration here and setting up the handshakeInfo will make sure that
- // such attempts are handled properly.
- if err := b.handleSecurityConfig(update.securityCfg); err != nil {
- // If the security config is invalid, for example, if the provider
- // instance is not found in the bootstrap config, we need to put the
- // channel in transient failure.
- b.logger.Warningf("Received Cluster resource contains invalid security config: %v", err)
- b.handleErrorFromUpdate(err, false)
- return
- }
-
- // The first good update from the watch API leads to the instantiation of an
- // cluster_resolver balancer. Further updates/errors are propagated to the existing
- // cluster_resolver balancer.
- if b.childLB == nil {
- childLB, err := newChildBalancer(b.ccw, b.bOpts)
- if err != nil {
- b.logger.Errorf("Failed to create child policy of type %s: %v", clusterresolver.Name, err)
- return
- }
- b.childLB = childLB
- b.logger.Infof("Created child policy %p of type %s", b.childLB, clusterresolver.Name)
- }
-
- dms := make([]clusterresolver.DiscoveryMechanism, len(update.updates))
- for i, cu := range update.updates {
- switch cu.ClusterType {
- case xdsresource.ClusterTypeEDS:
- dms[i] = clusterresolver.DiscoveryMechanism{
- Type: clusterresolver.DiscoveryMechanismTypeEDS,
- Cluster: cu.ClusterName,
- EDSServiceName: cu.EDSServiceName,
- MaxConcurrentRequests: cu.MaxRequests,
- }
- if cu.LRSServerConfig == xdsresource.ClusterLRSServerSelf {
- bootstrapConfig := b.xdsClient.BootstrapConfig()
- parsedName := xdsresource.ParseName(cu.ClusterName)
- if parsedName.Scheme == xdsresource.FederationScheme {
- // Is a federation resource name, find the corresponding
- // authority server config.
- if cfg, ok := bootstrapConfig.Authorities[parsedName.Authority]; ok {
- dms[i].LoadReportingServer = cfg.XDSServer
- }
- } else {
- // Not a federation resource name, use the default
- // authority.
- dms[i].LoadReportingServer = bootstrapConfig.XDSServer
- }
- }
- case xdsresource.ClusterTypeLogicalDNS:
- dms[i] = clusterresolver.DiscoveryMechanism{
- Type: clusterresolver.DiscoveryMechanismTypeLogicalDNS,
- Cluster: cu.ClusterName,
- DNSHostname: cu.DNSHostName,
- }
- default:
- b.logger.Infof("Unexpected cluster type %v when handling update from cluster handler", cu.ClusterType)
- }
- if envconfig.XDSOutlierDetection {
- odJSON := cu.OutlierDetection
- // "In the cds LB policy, if the outlier_detection field is not set in
- // the Cluster resource, a "no-op" outlier_detection config will be
- // generated in the corresponding DiscoveryMechanism config, with all
- // fields unset." - A50
- if odJSON == nil {
- // This will pick up top level defaults in Cluster Resolver
- // ParseConfig, but sre and fpe will be nil still so still a
- // "no-op" config.
- odJSON = json.RawMessage(`{}`)
- }
- dms[i].OutlierDetection = odJSON
- }
- }
-
- // Prepare Cluster Resolver config, marshal into JSON, and then Parse it to
- // get configuration to send downward to Cluster Resolver.
- lbCfg := &clusterresolver.LBConfig{
- DiscoveryMechanisms: dms,
- XDSLBPolicy: update.lbPolicy,
- }
- crLBCfgJSON, err := json.Marshal(lbCfg)
- if err != nil {
- // Shouldn't happen, since we just prepared struct.
- b.logger.Errorf("cds_balancer: error marshalling prepared config: %v", lbCfg)
- return
- }
-
- var sc serviceconfig.LoadBalancingConfig
- if sc, err = b.crParser.ParseConfig(crLBCfgJSON); err != nil {
- b.logger.Errorf("cds_balancer: cluster_resolver config generated %v is invalid: %v", string(crLBCfgJSON), err)
- return
- }
-
- ccState := balancer.ClientConnState{
- ResolverState: xdsclient.SetClient(resolver.State{}, b.xdsClient),
- BalancerConfig: sc,
+// A convenience method to create a watcher for cluster `name`. It also
+// registers the watch with the xDS client, and adds the newly created watcher
+// to the list of watchers maintained by the LB policy.
+func (b *cdsBalancer) createAndAddWatcherForCluster(name string) {
+ w := &clusterWatcher{
+ name: name,
+ parent: b,
}
- if err := b.childLB.UpdateClientConnState(ccState); err != nil {
- b.logger.Errorf("Encountered error when sending config {%+v} to child policy: %v", ccState, err)
- }
-}
-
-// run is a long-running goroutine which handles all updates from gRPC. All
-// methods which are invoked directly by gRPC or xdsClient simply push an
-// update onto a channel which is read and acted upon right here.
-func (b *cdsBalancer) run() {
- for {
- select {
- case u, ok := <-b.updateCh.Get():
- if !ok {
- return
- }
- b.updateCh.Load()
- switch update := u.(type) {
- case *ccUpdate:
- b.handleClientConnUpdate(update)
- case exitIdle:
- if b.childLB == nil {
- b.logger.Errorf("Received ExitIdle with no child policy")
- break
- }
- // This implementation assumes the child balancer supports
- // ExitIdle (but still checks for the interface's existence to
- // avoid a panic if not). If the child does not, no subconns
- // will be connected.
- if ei, ok := b.childLB.(balancer.ExitIdler); ok {
- ei.ExitIdle()
- }
- }
- case u := <-b.clusterHandler.updateChannel:
- b.handleWatchUpdate(u)
- case <-b.closed.Done():
- b.clusterHandler.close()
- if b.childLB != nil {
- b.childLB.Close()
- b.childLB = nil
- }
- if b.cachedRoot != nil {
- b.cachedRoot.Close()
- }
- if b.cachedIdentity != nil {
- b.cachedIdentity.Close()
- }
- b.updateCh.Close()
- b.logger.Infof("Shutdown")
- b.done.Fire()
- return
- }
- }
-}
-
-// handleErrorFromUpdate handles both the error from parent ClientConn (from
-// resolver) and the error from xds client (from the watcher). fromParent is
-// true if error is from parent ClientConn.
-//
-// If the error is connection error, it's passed down to the child policy.
-// Nothing needs to be done in CDS (e.g. it doesn't go into fallback).
-//
-// If the error is resource-not-found:
-// - If it's from resolver, it means LDS resources were removed. The CDS watch
-// should be canceled.
-// - If it's from xds client, it means CDS resource were removed. The CDS
-// watcher should keep watching.
-//
-// In both cases, the error will be forwarded to the child balancer. And if
-// error is resource-not-found, the child balancer will stop watching EDS.
-func (b *cdsBalancer) handleErrorFromUpdate(err error, fromParent bool) {
- // This is not necessary today, because xds client never sends connection
- // errors.
- if fromParent && xdsresource.ErrType(err) == xdsresource.ErrorTypeResourceNotFound {
- b.clusterHandler.close()
- }
- if b.childLB != nil {
- if xdsresource.ErrType(err) != xdsresource.ErrorTypeConnection {
- // Connection errors will be sent to the child balancers directly.
- // There's no need to forward them.
- b.childLB.ResolverError(err)
- }
- } else {
- // If child balancer was never created, fail the RPCs with
- // errors.
- b.ccw.UpdateState(balancer.State{
- ConnectivityState: connectivity.TransientFailure,
- Picker: base.NewErrPicker(err),
- })
+ ws := &watcherState{
+ watcher: w,
+ cancelWatch: xdsresource.WatchCluster(b.xdsClient, name, w),
}
+ b.watchers[name] = ws
}
// UpdateClientConnState receives the serviceConfig (which contains the
// clusterName to watch for in CDS) and the xdsClient object from the
// xdsResolver.
func (b *cdsBalancer) UpdateClientConnState(state balancer.ClientConnState) error {
- if b.closed.HasFired() {
- b.logger.Errorf("Received balancer config after close")
- return errBalancerClosed
- }
-
if b.xdsClient == nil {
c := xdsclient.FromResolverState(state.ResolverState)
if c == nil {
+ b.logger.Warningf("Received balancer config with no xDS client")
return balancer.ErrBadResolverState
}
b.xdsClient = c
@@ -510,17 +305,49 @@ func (b *cdsBalancer) UpdateClientConnState(state balancer.ClientConnState) erro
b.logger.Warningf("Received balancer config with no cluster name")
return balancer.ErrBadResolverState
}
- b.updateCh.Put(&ccUpdate{clusterName: lbCfg.ClusterName})
+
+ // Do nothing and return early if configuration has not changed.
+ if b.lbCfg != nil && b.lbCfg.ClusterName == lbCfg.ClusterName {
+ return nil
+ }
+ b.lbCfg = lbCfg
+
+ // Handle the update in a blocking fashion.
+ done := make(chan struct{})
+ ok = b.serializer.Schedule(func(context.Context) {
+ // A config update with a changed top-level cluster name means that none
+ // of our old watchers make any sense any more.
+ b.closeAllWatchers()
+
+ // Create a new watcher for the top-level cluster. Upon resolution, it
+ // could end up creating more watchers if turns out to be an aggregate
+ // cluster.
+ b.createAndAddWatcherForCluster(lbCfg.ClusterName)
+ close(done)
+ })
+ if !ok {
+ // The call to Schedule returns false *only* if the serializer has been
+ // closed, which happens only when we receive an update after close.
+ return errBalancerClosed
+ }
+ <-done
return nil
}
// ResolverError handles errors reported by the xdsResolver.
func (b *cdsBalancer) ResolverError(err error) {
- if b.closed.HasFired() {
- b.logger.Warningf("Received resolver error after close: %v", err)
- return
- }
- b.updateCh.Put(&ccUpdate{err: err})
+ b.serializer.Schedule(func(context.Context) {
+ // Resource not found error is reported by the resolver when the
+ // top-level cluster resource is removed by the management server.
+ if xdsresource.ErrType(err) == xdsresource.ErrorTypeResourceNotFound {
+ b.closeAllWatchers()
+ }
+ var root string
+ if b.lbCfg != nil {
+ root = b.lbCfg.ClusterName
+ }
+ b.onClusterError(root, err)
+ })
}
// UpdateSubConnState handles subConn updates from gRPC.
@@ -528,15 +355,303 @@ func (b *cdsBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Sub
b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state)
}
+// Closes all registered cluster wathers and removes them from the internal map.
+//
+// Only executed in the context of a serializer callback.
+func (b *cdsBalancer) closeAllWatchers() {
+ for name, state := range b.watchers {
+ state.cancelWatch()
+ delete(b.watchers, name)
+ }
+}
+
// Close cancels the CDS watch, closes the child policy and closes the
// cdsBalancer.
func (b *cdsBalancer) Close() {
- b.closed.Fire()
- <-b.done.Done()
+ b.serializer.Schedule(func(ctx context.Context) {
+ b.closeAllWatchers()
+
+ if b.childLB != nil {
+ b.childLB.Close()
+ b.childLB = nil
+ }
+ if b.cachedRoot != nil {
+ b.cachedRoot.Close()
+ }
+ if b.cachedIdentity != nil {
+ b.cachedIdentity.Close()
+ }
+ b.logger.Infof("Shutdown")
+ })
+ b.serializerCancel()
+ <-b.serializer.Done()
}
func (b *cdsBalancer) ExitIdle() {
- b.updateCh.Put(exitIdle{})
+ b.serializer.Schedule(func(context.Context) {
+ if b.childLB == nil {
+ b.logger.Warningf("Received ExitIdle with no child policy")
+ return
+ }
+ // This implementation assumes the child balancer supports
+ // ExitIdle (but still checks for the interface's existence to
+ // avoid a panic if not). If the child does not, no subconns
+ // will be connected.
+ if ei, ok := b.childLB.(balancer.ExitIdler); ok {
+ ei.ExitIdle()
+ }
+ })
+}
+
+// Handles a good Cluster update from the xDS client. Kicks off the discovery
+// mechanism generation process from the top-level cluster and if the cluster
+// graph is resolved, generates child policy config and pushes it down.
+//
+// Only executed in the context of a serializer callback.
+func (b *cdsBalancer) onClusterUpdate(name string, update xdsresource.ClusterUpdate) {
+ state := b.watchers[name]
+ if state == nil {
+ // We are currently not watching this cluster anymore. Return early.
+ return
+ }
+
+ b.logger.Infof("Received Cluster resource: %s", pretty.ToJSON(update))
+
+ // Update the watchers map with the update for the cluster.
+ state.lastUpdate = &update
+
+ // For an aggregate cluster, always use the security configuration on the
+ // root cluster.
+ if name == b.lbCfg.ClusterName {
+ // Process the security config from the received update before building the
+ // child policy or forwarding the update to it. We do this because the child
+ // policy may try to create a new subConn inline. Processing the security
+ // configuration here and setting up the handshakeInfo will make sure that
+ // such attempts are handled properly.
+ if err := b.handleSecurityConfig(update.SecurityCfg); err != nil {
+ // If the security config is invalid, for example, if the provider
+ // instance is not found in the bootstrap config, we need to put the
+ // channel in transient failure.
+ b.onClusterError(name, fmt.Errorf("received Cluster resource contains invalid security config: %v", err))
+ return
+ }
+ }
+
+ clustersSeen := make(map[string]bool)
+ dms, ok, err := b.generateDMsForCluster(b.lbCfg.ClusterName, 0, nil, clustersSeen)
+ if err != nil {
+ b.onClusterError(b.lbCfg.ClusterName, fmt.Errorf("failed to generate discovery mechanisms: %v", err))
+ return
+ }
+ if ok {
+ if len(dms) == 0 {
+ b.onClusterError(b.lbCfg.ClusterName, fmt.Errorf("aggregate cluster graph has no leaf clusters"))
+ return
+ }
+ // Child policy is built the first time we resolve the cluster graph.
+ if b.childLB == nil {
+ childLB, err := newChildBalancer(b.ccw, b.bOpts)
+ if err != nil {
+ b.logger.Errorf("Failed to create child policy of type %s: %v", clusterresolver.Name, err)
+ return
+ }
+ b.childLB = childLB
+ b.logger.Infof("Created child policy %p of type %s", b.childLB, clusterresolver.Name)
+ }
+
+ // Prepare the child policy configuration, convert it to JSON, have it
+ // parsed by the child policy to convert it into service config and push
+ // an update to it.
+ childCfg := &clusterresolver.LBConfig{
+ DiscoveryMechanisms: dms,
+ // The LB policy is configured by the root cluster.
+ XDSLBPolicy: b.watchers[b.lbCfg.ClusterName].lastUpdate.LBPolicy,
+ }
+ cfgJSON, err := json.Marshal(childCfg)
+ if err != nil {
+ // Shouldn't happen, since we just prepared struct.
+ b.logger.Errorf("cds_balancer: error marshalling prepared config: %v", childCfg)
+ return
+ }
+
+ var sc serviceconfig.LoadBalancingConfig
+ if sc, err = b.childConfigParser.ParseConfig(cfgJSON); err != nil {
+ b.logger.Errorf("cds_balancer: cluster_resolver config generated %v is invalid: %v", string(cfgJSON), err)
+ return
+ }
+
+ ccState := balancer.ClientConnState{
+ ResolverState: xdsclient.SetClient(resolver.State{}, b.xdsClient),
+ BalancerConfig: sc,
+ }
+ if err := b.childLB.UpdateClientConnState(ccState); err != nil {
+ b.logger.Errorf("Encountered error when sending config {%+v} to child policy: %v", ccState, err)
+ }
+ }
+ // We no longer need the clusters that we did not see in this iteration of
+ // generateDMsForCluster().
+ for cluster := range clustersSeen {
+ state, ok := b.watchers[cluster]
+ if ok {
+ continue
+ }
+ state.cancelWatch()
+ delete(b.watchers, cluster)
+ }
+}
+
+// Handles an error Cluster update from the xDS client. Propagates the error
+// down to the child policy if one exists, or puts the channel in
+// TRANSIENT_FAILURE.
+//
+// Only executed in the context of a serializer callback.
+func (b *cdsBalancer) onClusterError(name string, err error) {
+ b.logger.Warningf("Cluster resource %q received error update: %v", name, err)
+
+ if b.childLB != nil {
+ if xdsresource.ErrType(err) != xdsresource.ErrorTypeConnection {
+ // Connection errors will be sent to the child balancers directly.
+ // There's no need to forward them.
+ b.childLB.ResolverError(err)
+ }
+ } else {
+ // If child balancer was never created, fail the RPCs with
+ // errors.
+ b.ccw.UpdateState(balancer.State{
+ ConnectivityState: connectivity.TransientFailure,
+ Picker: base.NewErrPicker(fmt.Errorf("%q: %v", name, err)),
+ })
+ }
+}
+
+// Handles a resource-not-found error from the xDS client. Propagates the error
+// down to the child policy if one exists, or puts the channel in
+// TRANSIENT_FAILURE.
+//
+// Only executed in the context of a serializer callback.
+func (b *cdsBalancer) onClusterResourceNotFound(name string) {
+ err := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "resource name %q of type Cluster not found in received response", name)
+ if b.childLB != nil {
+ b.childLB.ResolverError(err)
+ } else {
+ // If child balancer was never created, fail the RPCs with errors.
+ b.ccw.UpdateState(balancer.State{
+ ConnectivityState: connectivity.TransientFailure,
+ Picker: base.NewErrPicker(err),
+ })
+ }
+}
+
+// Generates discovery mechanisms for the cluster graph rooted at `name`. This
+// method is called recursively if `name` corresponds to an aggregate cluster,
+// with the base case for recursion being a leaf cluster. If a new cluster is
+// encountered when traversing the graph, a watcher is created for it.
+//
+// Inputs:
+// - name: name of the cluster to start from
+// - depth: recursion depth of the current cluster, starting from root
+// - dms: prioritized list of current discovery mechanisms
+// - clustersSeen: cluster names seen so far in the graph traversal
+//
+// Outputs:
+// - new prioritized list of discovery mechanisms
+// - boolean indicating if traversal of the aggregate cluster graph is
+// complete. If false, the above list of discovery mechanisms is ignored.
+// - error indicating if any error was encountered as part of the graph
+// traversal. If error is non-nil, the other return values are ignored.
+//
+// Only executed in the context of a serializer callback.
+func (b *cdsBalancer) generateDMsForCluster(name string, depth int, dms []clusterresolver.DiscoveryMechanism, clustersSeen map[string]bool) ([]clusterresolver.DiscoveryMechanism, bool, error) {
+ if depth >= aggregateClusterMaxDepth {
+ return dms, false, errExceedsMaxDepth
+ }
+
+ if clustersSeen[name] {
+ // Discovery mechanism already seen through a different branch.
+ return dms, true, nil
+ }
+ clustersSeen[name] = true
+
+ state, ok := b.watchers[name]
+ if !ok {
+ // If we have not seen this cluster so far, create a watcher for it, add
+ // it to the map, start the watch and return.
+ b.createAndAddWatcherForCluster(name)
+
+ // And since we just created the watcher, we know that we haven't
+ // resolved the cluster graph yet.
+ return dms, false, nil
+ }
+
+ // A watcher exists, but no update has been received yet.
+ if state.lastUpdate == nil {
+ return dms, false, nil
+ }
+
+ var dm clusterresolver.DiscoveryMechanism
+ cluster := state.lastUpdate
+ switch cluster.ClusterType {
+ case xdsresource.ClusterTypeAggregate:
+ // This boolean is used to track if any of the clusters in the graph is
+ // not yet completely resolved or returns errors, thereby allowing us to
+ // traverse as much of the graph as possible (and start the associated
+ // watches where required) to ensure that clustersSeen contains all
+ // clusters in the graph that we can traverse to.
+ missingCluster := false
+ var err error
+ for _, child := range cluster.PrioritizedClusterNames {
+ var ok bool
+ dms, ok, err = b.generateDMsForCluster(child, depth+1, dms, clustersSeen)
+ if err != nil || !ok {
+ missingCluster = true
+ }
+ }
+ return dms, !missingCluster, err
+ case xdsresource.ClusterTypeEDS:
+ dm = clusterresolver.DiscoveryMechanism{
+ Type: clusterresolver.DiscoveryMechanismTypeEDS,
+ Cluster: cluster.ClusterName,
+ EDSServiceName: cluster.EDSServiceName,
+ MaxConcurrentRequests: cluster.MaxRequests,
+ }
+ if cluster.LRSServerConfig == xdsresource.ClusterLRSServerSelf {
+ bootstrapConfig := b.xdsClient.BootstrapConfig()
+ parsedName := xdsresource.ParseName(cluster.ClusterName)
+ if parsedName.Scheme == xdsresource.FederationScheme {
+ // Is a federation resource name, find the corresponding
+ // authority server config.
+ if cfg, ok := bootstrapConfig.Authorities[parsedName.Authority]; ok {
+ dm.LoadReportingServer = cfg.XDSServer
+ }
+ } else {
+ // Not a federation resource name, use the default
+ // authority.
+ dm.LoadReportingServer = bootstrapConfig.XDSServer
+ }
+ }
+ case xdsresource.ClusterTypeLogicalDNS:
+ dm = clusterresolver.DiscoveryMechanism{
+ Type: clusterresolver.DiscoveryMechanismTypeLogicalDNS,
+ Cluster: cluster.ClusterName,
+ DNSHostname: cluster.DNSHostName,
+ }
+ }
+ if envconfig.XDSOutlierDetection {
+ odJSON := cluster.OutlierDetection
+ // "In the cds LB policy, if the outlier_detection field is not set in
+ // the Cluster resource, a "no-op" outlier_detection config will be
+ // generated in the corresponding DiscoveryMechanism config, with all
+ // fields unset." - A50
+ if odJSON == nil {
+ // This will pick up top level defaults in Cluster Resolver
+ // ParseConfig, but sre and fpe will be nil still so still a
+ // "no-op" config.
+ odJSON = json.RawMessage(`{}`)
+ }
+ dm.OutlierDetection = odJSON
+ }
+
+ return append(dms, dm), true, nil
}
// ccWrapper wraps the balancer.ClientConn passed to the CDS balancer at
diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_handler.go b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_handler.go
deleted file mode 100644
index aa2d9674a7904..0000000000000
--- a/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_handler.go
+++ /dev/null
@@ -1,368 +0,0 @@
-/*
- * Copyright 2021 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package cdsbalancer
-
-import (
- "encoding/json"
- "errors"
- "sync"
-
- "google.golang.org/grpc/xds/internal/xdsclient"
- "google.golang.org/grpc/xds/internal/xdsclient/xdsresource"
-)
-
-const maxDepth = 16
-
-var (
- errNotReceivedUpdate = errors.New("tried to construct a cluster update on a cluster that has not received an update")
- errExceedsMaxDepth = errors.New("aggregate cluster graph exceeds max depth")
-)
-
-// clusterHandlerUpdate wraps the information received from the registered CDS
-// watcher. A non-nil error is propagated to the underlying cluster_resolver
-// balancer. A valid update results in creating a new cluster_resolver balancer
-// (if one doesn't already exist) and pushing the update to it.
-type clusterHandlerUpdate struct {
- // securityCfg is the Security Config from the top (root) cluster.
- securityCfg *xdsresource.SecurityConfig
-
- // lbPolicy is the the child of the cluster_impl policy, for all priorities.
- lbPolicy json.RawMessage
-
- // updates is a list of ClusterUpdates from all the leaf clusters.
- updates []xdsresource.ClusterUpdate
- err error
-}
-
-// clusterHandler will be given a name representing a cluster. It will then
-// update the CDS policy constantly with a list of Clusters to pass down to
-// XdsClusterResolverLoadBalancingPolicyConfig in a stream like fashion.
-type clusterHandler struct {
- parent *cdsBalancer
-
- // A mutex to protect entire tree of clusters.
- clusterMutex sync.Mutex
- rootClusterName string
-
- createdClusters map[string]*clusterNode
-
- // A way to ping CDS Balancer about any updates or errors to a Node in the
- // tree. This will either get called from this handler constructing an
- // update or from a child with an error. Capacity of one as the only update
- // CDS Balancer cares about is the most recent update.
- updateChannel chan clusterHandlerUpdate
-}
-
-func newClusterHandler(parent *cdsBalancer) *clusterHandler {
- return &clusterHandler{
- parent: parent,
- updateChannel: make(chan clusterHandlerUpdate, 1),
- createdClusters: make(map[string]*clusterNode),
- }
-}
-
-func (ch *clusterHandler) updateRootCluster(rootClusterName string) {
- ch.clusterMutex.Lock()
- defer ch.clusterMutex.Unlock()
- if ch.createdClusters[ch.rootClusterName] == nil {
- // Construct a root node on first update.
- createClusterNode(rootClusterName, ch.parent.xdsClient, ch, 0)
- ch.rootClusterName = rootClusterName
- return
- }
- // Check if root cluster was changed. If it was, delete old one and start
- // new one, if not do nothing.
- if rootClusterName != ch.rootClusterName {
- ch.createdClusters[ch.rootClusterName].delete()
- createClusterNode(rootClusterName, ch.parent.xdsClient, ch, 0)
- ch.rootClusterName = rootClusterName
- }
-}
-
-// This function tries to construct a cluster update to send to CDS.
-func (ch *clusterHandler) constructClusterUpdate() {
- if ch.createdClusters[ch.rootClusterName] == nil {
- // If root is nil, this handler is closed, ignore the update.
- return
- }
- clusterUpdate, err := ch.createdClusters[ch.rootClusterName].constructClusterUpdate(make(map[string]bool))
- if err != nil {
- // If there was an error received no op, as this can mean one of the
- // children hasn't received an update yet, or the graph continued to
- // stay in an error state. If the graph continues to stay in an error
- // state, no new error needs to be written to the update buffer as that
- // would be redundant information.
- return
- }
- if clusterUpdate == nil {
- // This means that there was an aggregated cluster with no EDS or DNS as
- // leaf nodes. No update to be written.
- return
- }
- // For a ClusterUpdate, the only update CDS cares about is the most
- // recent one, so opportunistically drain the update channel before
- // sending the new update.
- select {
- case <-ch.updateChannel:
- default:
- }
-
- ch.updateChannel <- clusterHandlerUpdate{
- securityCfg: ch.createdClusters[ch.rootClusterName].clusterUpdate.SecurityCfg,
- lbPolicy: ch.createdClusters[ch.rootClusterName].clusterUpdate.LBPolicy,
- updates: clusterUpdate,
- }
-}
-
-// close() is meant to be called by CDS when the CDS balancer is closed, and it
-// cancels the watches for every cluster in the cluster tree.
-func (ch *clusterHandler) close() {
- ch.clusterMutex.Lock()
- defer ch.clusterMutex.Unlock()
- if ch.createdClusters[ch.rootClusterName] == nil {
- return
- }
- ch.createdClusters[ch.rootClusterName].delete()
- ch.rootClusterName = ""
-}
-
-// This logically represents a cluster. This handles all the logic for starting
-// and stopping a cluster watch, handling any updates, and constructing a list
-// recursively for the ClusterHandler.
-type clusterNode struct {
- // A way to cancel the watch for the cluster.
- cancelFunc func()
-
- // A list of children, as the Node can be an aggregate Cluster.
- children []string
-
- // A ClusterUpdate in order to build a list of cluster updates for CDS to
- // send down to child XdsClusterResolverLoadBalancingPolicy.
- clusterUpdate xdsresource.ClusterUpdate
-
- // This boolean determines whether this Node has received an update or not.
- // This isn't the best practice, but this will protect a list of Cluster
- // Updates from being constructed if a cluster in the tree has not received
- // an update yet.
- receivedUpdate bool
-
- clusterHandler *clusterHandler
-
- depth int32
- refCount int32
-
- // maxDepthErr is set if this cluster node is an aggregate cluster and has a
- // child that causes the graph to exceed the maximum depth allowed. This is
- // used to show a cluster graph as being in an error state when it constructs
- // a cluster update.
- maxDepthErr error
-}
-
-// CreateClusterNode creates a cluster node from a given clusterName. This will
-// also start the watch for that cluster.
-func createClusterNode(clusterName string, xdsClient xdsclient.XDSClient, topLevelHandler *clusterHandler, depth int32) {
- // If the cluster has already been created, simply return, which ignores
- // duplicates.
- if topLevelHandler.createdClusters[clusterName] != nil {
- topLevelHandler.createdClusters[clusterName].refCount++
- return
- }
- c := &clusterNode{
- clusterHandler: topLevelHandler,
- depth: depth,
- refCount: 1,
- }
- // Communicate with the xds client here.
- topLevelHandler.parent.logger.Infof("CDS watch started on %v", clusterName)
- cancel := xdsClient.WatchCluster(clusterName, c.handleResp)
- c.cancelFunc = func() {
- topLevelHandler.parent.logger.Infof("CDS watch canceled on %v", clusterName)
- cancel()
- }
- topLevelHandler.createdClusters[clusterName] = c
-}
-
-// This function cancels the cluster watch on the cluster and all of it's
-// children.
-func (c *clusterNode) delete() {
- c.refCount--
- if c.refCount == 0 {
- c.cancelFunc()
- delete(c.clusterHandler.createdClusters, c.clusterUpdate.ClusterName)
- for _, child := range c.children {
- if c.clusterHandler.createdClusters[child] != nil {
- c.clusterHandler.createdClusters[child].delete()
- }
- }
- }
-}
-
-// Construct cluster update (potentially a list of ClusterUpdates) for a node.
-func (c *clusterNode) constructClusterUpdate(clustersSeen map[string]bool) ([]xdsresource.ClusterUpdate, error) {
- // If the cluster has not yet received an update, the cluster update is not
- // yet ready.
- if !c.receivedUpdate {
- return nil, errNotReceivedUpdate
- }
- if c.maxDepthErr != nil {
- return nil, c.maxDepthErr
- }
- // Ignore duplicates. It's ok to ignore duplicates because the second
- // occurrence of a cluster will never be used. I.e. in [C, D, C], the second
- // C will never be used (the only way to fall back to lower priority D is if
- // C is down, which means second C will never be chosen). Thus, [C, D, C] is
- // logically equivalent to [C, D].
- if clustersSeen[c.clusterUpdate.ClusterName] {
- return []xdsresource.ClusterUpdate{}, nil
- }
- clustersSeen[c.clusterUpdate.ClusterName] = true
-
- // Base case - LogicalDNS or EDS. Both of these cluster types will be tied
- // to a single ClusterUpdate.
- if c.clusterUpdate.ClusterType != xdsresource.ClusterTypeAggregate {
- return []xdsresource.ClusterUpdate{c.clusterUpdate}, nil
- }
-
- // If an aggregate construct a list by recursively calling down to all of
- // it's children.
- var childrenUpdates []xdsresource.ClusterUpdate
- for _, child := range c.children {
- childUpdateList, err := c.clusterHandler.createdClusters[child].constructClusterUpdate(clustersSeen)
- if err != nil {
- return nil, err
- }
- childrenUpdates = append(childrenUpdates, childUpdateList...)
- }
- return childrenUpdates, nil
-}
-
-// handleResp handles a xds response for a particular cluster. This function
-// also handles any logic with regards to any child state that may have changed.
-// At the end of the handleResp(), the clusterUpdate will be pinged in certain
-// situations to try and construct an update to send back to CDS.
-func (c *clusterNode) handleResp(clusterUpdate xdsresource.ClusterUpdate, err error) {
- c.clusterHandler.clusterMutex.Lock()
- defer c.clusterHandler.clusterMutex.Unlock()
- if err != nil { // Write this error for run() to pick up in CDS LB policy.
- // For a ClusterUpdate, the only update CDS cares about is the most
- // recent one, so opportunistically drain the update channel before
- // sending the new update.
- select {
- case <-c.clusterHandler.updateChannel:
- default:
- }
- c.clusterHandler.updateChannel <- clusterHandlerUpdate{err: err}
- c.receivedUpdate = false
- c.maxDepthErr = nil
- return
- }
-
- c.receivedUpdate = true
- c.clusterUpdate = clusterUpdate
-
- // If the cluster was a leaf node, if the cluster update received had change
- // in the cluster update then the overall cluster update would change and
- // there is a possibility for the overall update to build so ping cluster
- // handler to return. Also, if there was any children from previously,
- // delete the children, as the cluster type is no longer an aggregate
- // cluster.
- if clusterUpdate.ClusterType != xdsresource.ClusterTypeAggregate {
- for _, child := range c.children {
- c.clusterHandler.createdClusters[child].delete()
- }
- c.children = nil
- c.maxDepthErr = nil
- // This is an update in the one leaf node, should try to send an update
- // to the parent CDS balancer.
- //
- // Note that this update might be a duplicate from the previous one.
- // Because the update contains not only the cluster name to watch, but
- // also the extra fields (e.g. security config). There's no good way to
- // compare all the fields.
- c.clusterHandler.constructClusterUpdate()
- return
- }
-
- // Aggregate cluster handling.
- if len(clusterUpdate.PrioritizedClusterNames) >= 1 {
- if c.depth == maxDepth-1 {
- // For a ClusterUpdate, the only update CDS cares about is the most
- // recent one, so opportunistically drain the update channel before
- // sending the new update.
- select {
- case <-c.clusterHandler.updateChannel:
- default:
- }
- c.clusterHandler.updateChannel <- clusterHandlerUpdate{err: errExceedsMaxDepth}
- c.children = []string{}
- c.maxDepthErr = errExceedsMaxDepth
- return
- }
- }
-
- newChildren := make(map[string]bool)
- for _, childName := range clusterUpdate.PrioritizedClusterNames {
- newChildren[childName] = true
- }
-
- // These booleans help determine whether this callback will ping the overall
- // clusterHandler to try and construct an update to send back to CDS. This
- // will be determined by whether there would be a change in the overall
- // clusterUpdate for the whole tree (ex. change in clusterUpdate for current
- // cluster or a deleted child) and also if there's even a possibility for
- // the update to build (ex. if a child is created and a watch is started,
- // that child hasn't received an update yet due to the mutex lock on this
- // callback).
- var createdChild bool
-
- // This map will represent the current children of the cluster. It will be
- // first added to in order to represent the new children. It will then have
- // any children deleted that are no longer present.
- mapCurrentChildren := make(map[string]bool)
- for _, child := range c.children {
- mapCurrentChildren[child] = true
- }
-
- // Add and construct any new child nodes.
- for child := range newChildren {
- if _, inChildrenAlready := mapCurrentChildren[child]; !inChildrenAlready {
- createClusterNode(child, c.clusterHandler.parent.xdsClient, c.clusterHandler, c.depth+1)
- }
- }
-
- // Delete any child nodes no longer in the aggregate cluster's children.
- for child := range mapCurrentChildren {
- if _, stillAChild := newChildren[child]; !stillAChild {
- c.clusterHandler.createdClusters[child].delete()
- delete(mapCurrentChildren, child)
- }
- }
-
- c.children = clusterUpdate.PrioritizedClusterNames
-
- c.maxDepthErr = nil
- // If the cluster is an aggregate cluster, if this callback created any new
- // child cluster nodes, then there's no possibility for a full cluster
- // update to successfully build, as those created children will not have
- // received an update yet. Even if this update did not delete a child, there
- // is still a possibility for the cluster update to build, as the aggregate
- // cluster can ignore duplicated children and thus the update can fill out
- // the full cluster update tree.
- if !createdChild {
- c.clusterHandler.constructClusterUpdate()
- }
-}
diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_watcher.go b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_watcher.go
new file mode 100644
index 0000000000000..0b0d168376d74
--- /dev/null
+++ b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_watcher.go
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2023 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package cdsbalancer
+
+import (
+ "context"
+
+ "google.golang.org/grpc/xds/internal/xdsclient/xdsresource"
+)
+
+// clusterWatcher implements the xdsresource.ClusterWatcher interface, and is
+// passed to the xDS client as part of the WatchResource() API.
+//
+// It watches a single cluster and handles callbacks from the xDS client by
+// scheduling them on the parent LB policy's serializer.
+type clusterWatcher struct {
+ name string
+ parent *cdsBalancer
+}
+
+func (cw *clusterWatcher) OnUpdate(u *xdsresource.ClusterResourceData) {
+ cw.parent.serializer.Schedule(func(context.Context) {
+ cw.parent.onClusterUpdate(cw.name, u.Resource)
+ })
+}
+
+func (cw *clusterWatcher) OnError(err error) {
+ cw.parent.serializer.Schedule(func(context.Context) {
+ cw.parent.onClusterError(cw.name, err)
+ })
+}
+
+func (cw *clusterWatcher) OnResourceDoesNotExist() {
+ cw.parent.serializer.Schedule(func(context.Context) {
+ cw.parent.onClusterResourceNotFound(cw.name)
+ })
+}
+
+// watcherState groups the state associated with a clusterWatcher.
+type watcherState struct {
+ watcher *clusterWatcher // The underlying watcher.
+ cancelWatch func() // Cancel func to cancel the watch.
+ lastUpdate *xdsresource.ClusterUpdate // Most recent update received for this cluster.
+}
diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go
index b9a81e9ba8293..151c54dae6d09 100644
--- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go
+++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go
@@ -200,7 +200,7 @@ func (rr *resourceResolver) updateMechanisms(mechanisms []DiscoveryMechanism) {
for dm, r := range rr.childrenMap {
if !newDMs[dm] {
delete(rr.childrenMap, dm)
- r.r.stop()
+ go r.r.stop()
}
}
// Regenerate even if there's no change in discovery mechanism, in case
diff --git a/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go b/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go
index 02470ddca5e45..06f6a47519c41 100644
--- a/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go
+++ b/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go
@@ -31,6 +31,7 @@ import (
"google.golang.org/grpc/codes"
"google.golang.org/grpc/internal/envconfig"
"google.golang.org/grpc/internal/grpcrand"
+ "google.golang.org/grpc/internal/grpcutil"
iresolver "google.golang.org/grpc/internal/resolver"
"google.golang.org/grpc/internal/serviceconfig"
"google.golang.org/grpc/internal/wrr"
@@ -229,19 +230,30 @@ func retryConfigToPolicy(config *xdsresource.RetryConfig) *serviceconfig.RetryPo
func (cs *configSelector) generateHash(rpcInfo iresolver.RPCInfo, hashPolicies []*xdsresource.HashPolicy) uint64 {
var hash uint64
var generatedHash bool
+ var md, emd metadata.MD
+ var mdRead bool
for _, policy := range hashPolicies {
var policyHash uint64
var generatedPolicyHash bool
switch policy.HashPolicyType {
case xdsresource.HashPolicyTypeHeader:
- md, ok := metadata.FromOutgoingContext(rpcInfo.Context)
- if !ok {
+ if strings.HasSuffix(policy.HeaderName, "-bin") {
continue
}
- values := md.Get(policy.HeaderName)
- // If the header isn't present, no-op.
+ if !mdRead {
+ md, _ = metadata.FromOutgoingContext(rpcInfo.Context)
+ emd, _ = grpcutil.ExtraMetadata(rpcInfo.Context)
+ mdRead = true
+ }
+ values := emd.Get(policy.HeaderName)
if len(values) == 0 {
- continue
+ // Extra metadata (e.g. the "content-type" header) takes
+ // precedence over the user's metadata.
+ values = md.Get(policy.HeaderName)
+ if len(values) == 0 {
+ // If the header isn't present at all, this policy is a no-op.
+ continue
+ }
}
joinedValues := strings.Join(values, ",")
if policy.Regex != nil {
diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go
index 44f6d3bc0a1cf..542c5e025fd1b 100644
--- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go
+++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go
@@ -32,7 +32,6 @@ import (
type XDSClient interface {
WatchListener(string, func(xdsresource.ListenerUpdate, error)) func()
WatchRouteConfig(string, func(xdsresource.RouteConfigUpdate, error)) func()
- WatchCluster(string, func(xdsresource.ClusterUpdate, error)) func()
// WatchResource uses xDS to discover the resource associated with the
// provided resource name. The resource type implementation determines how
diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go
index e503349dbc29a..5866221e2696d 100644
--- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go
+++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go
@@ -81,37 +81,6 @@ func (c *clientImpl) WatchRouteConfig(resourceName string, cb func(xdsresource.R
return xdsresource.WatchRouteConfig(c, resourceName, watcher)
}
-// This is only required temporarily, while we modify the
-// clientImpl.WatchCluster API to be implemented via the wrapper WatchCluster()
-// API which calls the WatchResource() API.
-type clusterWatcher struct {
- resourceName string
- cb func(xdsresource.ClusterUpdate, error)
-}
-
-func (c *clusterWatcher) OnUpdate(update *xdsresource.ClusterResourceData) {
- c.cb(update.Resource, nil)
-}
-
-func (c *clusterWatcher) OnError(err error) {
- c.cb(xdsresource.ClusterUpdate{}, err)
-}
-
-func (c *clusterWatcher) OnResourceDoesNotExist() {
- err := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "resource name %q of type Cluster not found in received response", c.resourceName)
- c.cb(xdsresource.ClusterUpdate{}, err)
-}
-
-// WatchCluster uses CDS to discover information about the Cluster resource
-// identified by resourceName.
-//
-// WatchCluster can be called multiple times, with same or different
-// clusterNames. Each call will start an independent watcher for the resource.
-func (c *clientImpl) WatchCluster(resourceName string, cb func(xdsresource.ClusterUpdate, error)) (cancel func()) {
- watcher := &clusterWatcher{resourceName: resourceName, cb: cb}
- return xdsresource.WatchCluster(c, resourceName, watcher)
-}
-
// WatchResource uses xDS to discover the resource associated with the provided
// resource name. The resource type implementation determines how xDS requests
// are sent out and how responses are deserialized and validated. Upon receipt
diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go
index 89ffc4fcec661..4b8ca29ce93f3 100644
--- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go
+++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go
@@ -25,6 +25,7 @@ import (
"time"
"github.com/golang/protobuf/ptypes"
+ "google.golang.org/grpc/internal/backoff"
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/internal/pretty"
"google.golang.org/grpc/xds/internal"
@@ -100,54 +101,36 @@ func (t *Transport) lrsRunner(ctx context.Context) {
node := proto.Clone(t.nodeProto).(*v3corepb.Node)
node.ClientFeatures = append(node.ClientFeatures, "envoy.lrs.supports_send_all_clusters")
- backoffAttempt := 0
- backoffTimer := time.NewTimer(0)
- for ctx.Err() == nil {
- select {
- case <-backoffTimer.C:
- case <-ctx.Done():
- backoffTimer.Stop()
- return
+ runLoadReportStream := func() error {
+ // streamCtx is created and canceled in case we terminate the stream
+ // early for any reason, to avoid gRPC-Go leaking the RPC's monitoring
+ // goroutine.
+ streamCtx, cancel := context.WithCancel(ctx)
+ defer cancel()
+ stream, err := v3lrsgrpc.NewLoadReportingServiceClient(t.cc).StreamLoadStats(streamCtx)
+ if err != nil {
+ t.logger.Warningf("Creating LRS stream to server %q failed: %v", t.serverURI, err)
+ return nil
}
+ t.logger.Infof("Created LRS stream to server %q", t.serverURI)
- // We reset backoff state when we successfully receive at least one
- // message from the server.
- resetBackoff := func() bool {
- // streamCtx is created and canceled in case we terminate the stream
- // early for any reason, to avoid gRPC-Go leaking the RPC's monitoring
- // goroutine.
- streamCtx, cancel := context.WithCancel(ctx)
- defer cancel()
- stream, err := v3lrsgrpc.NewLoadReportingServiceClient(t.cc).StreamLoadStats(streamCtx)
- if err != nil {
- t.logger.Warningf("Creating LRS stream to server %q failed: %v", t.serverURI, err)
- return false
- }
- t.logger.Infof("Created LRS stream to server %q", t.serverURI)
-
- if err := t.sendFirstLoadStatsRequest(stream, node); err != nil {
- t.logger.Warningf("Sending first LRS request failed: %v", err)
- return false
- }
-
- clusters, interval, err := t.recvFirstLoadStatsResponse(stream)
- if err != nil {
- t.logger.Warningf("Reading from LRS stream failed: %v", err)
- return false
- }
-
- t.sendLoads(streamCtx, stream, clusters, interval)
- return true
- }()
+ if err := t.sendFirstLoadStatsRequest(stream, node); err != nil {
+ t.logger.Warningf("Sending first LRS request failed: %v", err)
+ return nil
+ }
- if resetBackoff {
- backoffTimer.Reset(0)
- backoffAttempt = 0
- } else {
- backoffTimer.Reset(t.backoff(backoffAttempt))
- backoffAttempt++
+ clusters, interval, err := t.recvFirstLoadStatsResponse(stream)
+ if err != nil {
+ t.logger.Warningf("Reading from LRS stream failed: %v", err)
+ return nil
}
+
+ // We reset backoff state when we successfully receive at least one
+ // message from the server.
+ t.sendLoads(streamCtx, stream, clusters, interval)
+ return backoff.ErrResetBackoff
}
+ backoff.RunF(ctx, runLoadReportStream, t.backoff)
}
func (t *Transport) sendLoads(ctx context.Context, stream lrsStream, clusterNames []string, interval time.Duration) {
diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go
index 86803588a7cc2..001552d7b4798 100644
--- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go
+++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go
@@ -325,43 +325,29 @@ func (t *Transport) adsRunner(ctx context.Context) {
go t.send(ctx)
- backoffAttempt := 0
- backoffTimer := time.NewTimer(0)
- for ctx.Err() == nil {
- select {
- case <-backoffTimer.C:
- case <-ctx.Done():
- backoffTimer.Stop()
- return
+ // We reset backoff state when we successfully receive at least one
+ // message from the server.
+ runStreamWithBackoff := func() error {
+ stream, err := t.newAggregatedDiscoveryServiceStream(ctx, t.cc)
+ if err != nil {
+ t.onErrorHandler(err)
+ t.logger.Warningf("Creating new ADS stream failed: %v", err)
+ return nil
}
+ t.logger.Infof("ADS stream created")
- // We reset backoff state when we successfully receive at least one
- // message from the server.
- resetBackoff := func() bool {
- stream, err := t.newAggregatedDiscoveryServiceStream(ctx, t.cc)
- if err != nil {
- t.onErrorHandler(err)
- t.logger.Warningf("Creating new ADS stream failed: %v", err)
- return false
- }
- t.logger.Infof("ADS stream created")
-
- select {
- case <-t.adsStreamCh:
- default:
- }
- t.adsStreamCh <- stream
- return t.recv(stream)
- }()
-
- if resetBackoff {
- backoffTimer.Reset(0)
- backoffAttempt = 0
- } else {
- backoffTimer.Reset(t.backoff(backoffAttempt))
- backoffAttempt++
+ select {
+ case <-t.adsStreamCh:
+ default:
+ }
+ t.adsStreamCh <- stream
+ msgReceived := t.recv(stream)
+ if msgReceived {
+ return backoff.ErrResetBackoff
}
+ return nil
}
+ backoff.RunF(ctx, runStreamWithBackoff, t.backoff)
}
// send is a separate goroutine for sending resource requests on the ADS stream.
diff --git a/vendor/k8s.io/utils/keymutex/hashed.go b/vendor/k8s.io/utils/keymutex/hashed.go
new file mode 100644
index 0000000000000..4ddb00867ff7d
--- /dev/null
+++ b/vendor/k8s.io/utils/keymutex/hashed.go
@@ -0,0 +1,58 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package keymutex
+
+import (
+ "hash/fnv"
+ "runtime"
+ "sync"
+)
+
+// NewHashed returns a new instance of KeyMutex which hashes arbitrary keys to
+// a fixed set of locks. `n` specifies number of locks, if n <= 0, we use
+// number of cpus.
+// Note that because it uses fixed set of locks, different keys may share same
+// lock, so it's possible to wait on same lock.
+func NewHashed(n int) KeyMutex {
+ if n <= 0 {
+ n = runtime.NumCPU()
+ }
+ return &hashedKeyMutex{
+ mutexes: make([]sync.Mutex, n),
+ }
+}
+
+type hashedKeyMutex struct {
+ mutexes []sync.Mutex
+}
+
+// Acquires a lock associated with the specified ID.
+func (km *hashedKeyMutex) LockKey(id string) {
+ km.mutexes[km.hash(id)%uint32(len(km.mutexes))].Lock()
+}
+
+// Releases the lock associated with the specified ID.
+func (km *hashedKeyMutex) UnlockKey(id string) error {
+ km.mutexes[km.hash(id)%uint32(len(km.mutexes))].Unlock()
+ return nil
+}
+
+func (km *hashedKeyMutex) hash(id string) uint32 {
+ h := fnv.New32a()
+ h.Write([]byte(id))
+ return h.Sum32()
+}
diff --git a/vendor/k8s.io/utils/keymutex/keymutex.go b/vendor/k8s.io/utils/keymutex/keymutex.go
new file mode 100644
index 0000000000000..89dc022397c72
--- /dev/null
+++ b/vendor/k8s.io/utils/keymutex/keymutex.go
@@ -0,0 +1,27 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package keymutex
+
+// KeyMutex is a thread-safe interface for acquiring locks on arbitrary strings.
+type KeyMutex interface {
+ // Acquires a lock associated with the specified ID, creates the lock if one doesn't already exist.
+ LockKey(id string)
+
+ // Releases the lock associated with the specified ID.
+ // Returns an error if the specified ID doesn't exist.
+ UnlockKey(id string) error
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index ae84c85f97224..23e4a1802fb27 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -464,7 +464,7 @@ github.com/coreos/go-systemd/sdjournal
## explicit; go 1.12
github.com/coreos/go-systemd/v22/activation
github.com/coreos/go-systemd/v22/journal
-# github.com/cristalhq/hedgedhttp v0.7.2
+# github.com/cristalhq/hedgedhttp v0.9.1
## explicit; go 1.16
github.com/cristalhq/hedgedhttp
# github.com/d4l3k/messagediff v1.2.1
@@ -816,7 +816,7 @@ github.com/google/s2a-go/internal/v2/certverifier
github.com/google/s2a-go/internal/v2/remotesigner
github.com/google/s2a-go/internal/v2/tlsconfigstore
github.com/google/s2a-go/stream
-# github.com/google/uuid v1.3.0
+# github.com/google/uuid v1.3.1
## explicit
github.com/google/uuid
# github.com/googleapis/enterprise-certificate-proxy v0.2.5
@@ -853,8 +853,8 @@ github.com/gorilla/websocket
# github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2
## explicit; go 1.17
github.com/grafana/cloudflare-go
-# github.com/grafana/dskit v0.0.0-20231017083947-7b512eb54d47
-## explicit; go 1.19
+# github.com/grafana/dskit v0.0.0-20231120170505-765e343eda4f
+## explicit; go 1.20
github.com/grafana/dskit/aws
github.com/grafana/dskit/backoff
github.com/grafana/dskit/concurrency
@@ -903,7 +903,7 @@ github.com/grafana/go-gelf/v2/gelf
# github.com/grafana/gomemcache v0.0.0-20231023152154-6947259a0586
## explicit; go 1.18
github.com/grafana/gomemcache/memcache
-# github.com/grafana/loki/pkg/push v0.0.0-20231023154132-0a7737e7c7eb => ./pkg/push
+# github.com/grafana/loki/pkg/push v0.0.0-20231124142027-e52380921608 => ./pkg/push
## explicit; go 1.19
github.com/grafana/loki/pkg/push
# github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd => github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd
@@ -1615,8 +1615,8 @@ golang.org/x/net/netutil
golang.org/x/net/proxy
golang.org/x/net/publicsuffix
golang.org/x/net/trace
-# golang.org/x/oauth2 v0.10.0
-## explicit; go 1.17
+# golang.org/x/oauth2 v0.11.0
+## explicit; go 1.18
golang.org/x/oauth2
golang.org/x/oauth2/authhandler
golang.org/x/oauth2/clientcredentials
@@ -1736,7 +1736,7 @@ google.golang.org/genproto/googleapis/type/date
google.golang.org/genproto/googleapis/type/expr
google.golang.org/genproto/internal
google.golang.org/genproto/protobuf/field_mask
-# google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5
+# google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d
## explicit; go 1.19
google.golang.org/genproto/googleapis/api
google.golang.org/genproto/googleapis/api/annotations
@@ -1746,7 +1746,7 @@ google.golang.org/genproto/googleapis/api/expr/v1alpha1
google.golang.org/genproto/googleapis/rpc/code
google.golang.org/genproto/googleapis/rpc/errdetails
google.golang.org/genproto/googleapis/rpc/status
-# google.golang.org/grpc v1.58.3
+# google.golang.org/grpc v1.59.0
## explicit; go 1.19
google.golang.org/grpc
google.golang.org/grpc/attributes
@@ -2188,6 +2188,7 @@ k8s.io/utils/clock
k8s.io/utils/clock/testing
k8s.io/utils/integer
k8s.io/utils/internal/third_party/forked/golang/net
+k8s.io/utils/keymutex
k8s.io/utils/net
k8s.io/utils/pointer
k8s.io/utils/strings/slices