Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Swap to using integers for extended bounds in the data histogram aggregation #61

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 4 additions & 5 deletions .github/CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -119,11 +119,10 @@ Astra datasource URL inside your Grafana instance.
mage -v
```

3. List all available Mage targets for additional commands:

```bash
mage -l
```
3. Move the binaries into the target directory
```bash
mv dist/gpx_slack_astra_app_datasource_backend_* dist/datasource
```

### Releases

Expand Down
3 changes: 3 additions & 0 deletions docker-compose.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -21,3 +21,6 @@ services:
GF_PATHS_PLUGINS: "/var/lib/grafana/plugins"
GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS: "slack-astra-app,slack-astra-app-backend-datasource"
GF_SERVER_ENABLE_GZIP: "true"
GF_UNIFIED_ALERTING_ENABLED: "false"
GF_ALERTING_ENABLED: "true"
GF_DATABASE_WAL: "true"
8 changes: 7 additions & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,12 @@ require (
golang.org/x/net v0.0.0-20210614182718-04defd469f4e
)

require github.com/grafana/opensearch-datasource v1.2.0
require (
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
github.com/grafana/opensearch-datasource v1.2.0
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5 // indirect
github.com/sirupsen/logrus v1.6.0 // indirect
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 // indirect
)

replace golang.org/x/sys => golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c
2 changes: 2 additions & 0 deletions pkg/astra/astra.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@ type AstraExecutor struct{}

var (
intervalCalculator tsdb.IntervalCalculator
_ backend.QueryDataHandler = (*AstraDatasource)(nil)
_ backend.CheckHealthHandler = (*AstraDatasource)(nil)
)

type TsdbQueryEndpoint interface {
Expand Down
3 changes: 1 addition & 2 deletions pkg/astra/client/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,6 @@ func (c *baseClientImpl) encodeBatchRequests(requests []*multiRequest) ([]byte,
body := string(reqBody)
body = strings.ReplaceAll(body, "$__interval_ms", strconv.FormatInt(r.interval.Milliseconds(), 10))
body = strings.ReplaceAll(body, "$__interval", r.interval.Text)

payload.WriteString(body + "\n")
}

Expand Down Expand Up @@ -464,7 +463,7 @@ func (c *baseClientImpl) executePPLQueryRequest(method, uriPath string, body []b
return nil, err
}

clientLog.Debug("Executing request", "url", req.URL.String(), "method", method)
clientLog.Debug("Executing PPL request", "url", req.URL.String(), "method", method)

var reqInfo *PPLRequestInfo
if c.debugEnabled {
Expand Down
6 changes: 3 additions & 3 deletions pkg/astra/client/models.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ type SearchResponse struct {
Error map[string]interface{} `json:"error"`
Aggregations map[string]interface{} `json:"aggregations"`
Hits *SearchResponseHits `json:"hits"`
Shards map[string]interface{} `json:"_shards"`
Shards map[string]interface{} `json:"_shards"`
}

// MultiSearchRequest represents a multi search request
Expand Down Expand Up @@ -271,8 +271,8 @@ type TermsAggregation struct {

// ExtendedBounds represents extended bounds
type ExtendedBounds struct {
Min string `json:"min"`
Max string `json:"max"`
Min int64 `json:"min"`
Max int64 `json:"max"`
}

// GeoHashGridAggregation represents a geo hash grid aggregation
Expand Down
4 changes: 2 additions & 2 deletions pkg/astra/lucene_handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ func (h *luceneHandler) processQuery(q *Query) error {
for _, bucketAgg := range q.BucketAggs {
switch bucketAgg.Type {
case dateHistType:
aggBuilder = addDateHistogramAgg(aggBuilder, bucketAgg, from, to)
aggBuilder = addDateHistogramAgg(aggBuilder, bucketAgg, int64(fromMs), int64(toMs))
case histogramType:
aggBuilder = addHistogramAgg(aggBuilder, bucketAgg)
case filtersType:
Expand Down Expand Up @@ -168,7 +168,7 @@ func (h *luceneHandler) executeQueries() (*backend.QueryDataResponse, error) {
return rp.getTimeSeries()
}

func addDateHistogramAgg(aggBuilder es.AggBuilder, bucketAgg *BucketAgg, timeFrom, timeTo string) es.AggBuilder {
func addDateHistogramAgg(aggBuilder es.AggBuilder, bucketAgg *BucketAgg, timeFrom, timeTo int64) es.AggBuilder {
aggBuilder.DateHistogram(bucketAgg.ID, bucketAgg.Field, func(a *es.DateHistogramAgg, b es.AggBuilder) {
a.Interval = bucketAgg.Settings.Get("interval").MustString("auto")
a.MinDocCount = bucketAgg.Settings.Get("min_doc_count").MustInt(0)
Expand Down
14 changes: 8 additions & 6 deletions pkg/astra/time_series_query_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,10 @@ import (
func TestExecuteTimeSeriesQuery(t *testing.T) {
from := time.Date(2018, 5, 15, 17, 50, 0, 0, time.UTC)
to := time.Date(2018, 5, 15, 17, 55, 0, 0, time.UTC)
fromStr := fmt.Sprintf("%d", from.UnixNano()/int64(time.Millisecond))
toStr := fmt.Sprintf("%d", to.UnixNano()/int64(time.Millisecond))
fromMs := from.UnixNano() / int64(time.Millisecond)
toMs := to.UnixNano() / int64(time.Millisecond)
fromStr := fmt.Sprintf("%d", fromMs)
toStr := fmt.Sprintf("%d", toMs)

Convey("Test execute time series query", t, func() {
Convey("With defaults on Elasticsearch 2.0.0", func() {
Expand All @@ -36,8 +38,8 @@ func TestExecuteTimeSeriesQuery(t *testing.T) {
So(sr.Aggs[0].Key, ShouldEqual, "2")
dateHistogramAgg := sr.Aggs[0].Aggregation.Aggregation.(*es.DateHistogramAgg)
So(dateHistogramAgg.Field, ShouldEqual, "@timestamp")
So(dateHistogramAgg.ExtendedBounds.Min, ShouldEqual, fromStr)
So(dateHistogramAgg.ExtendedBounds.Max, ShouldEqual, toStr)
So(dateHistogramAgg.ExtendedBounds.Min, ShouldEqual, fromMs)
So(dateHistogramAgg.ExtendedBounds.Max, ShouldEqual, toMs)
})

Convey("With defaults on Elasticsearch 5.0.0", func() {
Expand All @@ -51,8 +53,8 @@ func TestExecuteTimeSeriesQuery(t *testing.T) {
sr := c.multisearchRequests[0].Requests[0]
So(sr.Query.Bool.Filters[0].(*es.RangeFilter).Key, ShouldEqual, c.timeField)
So(sr.Aggs[0].Key, ShouldEqual, "2")
So(sr.Aggs[0].Aggregation.Aggregation.(*es.DateHistogramAgg).ExtendedBounds.Min, ShouldEqual, fromStr)
So(sr.Aggs[0].Aggregation.Aggregation.(*es.DateHistogramAgg).ExtendedBounds.Max, ShouldEqual, toStr)
So(sr.Aggs[0].Aggregation.Aggregation.(*es.DateHistogramAgg).ExtendedBounds.Min, ShouldEqual, fromMs)
So(sr.Aggs[0].Aggregation.Aggregation.(*es.DateHistogramAgg).ExtendedBounds.Max, ShouldEqual, toMs)
})

Convey("With multiple bucket aggs", func() {
Expand Down
Loading