From 3bd603466cf770e2d12b81ba8f36f34690901ecd Mon Sep 17 00:00:00 2001 From: Tapajit Chandra Paul Date: Wed, 17 Jul 2024 19:03:20 +0600 Subject: [PATCH] Add druid client Signed-off-by: Tapajit Chandra Paul --- druid/client.go | 451 +++++++++ druid/kubedb_client_builder.go | 109 +++ go.mod | 5 + go.sum | 18 + .../github.com/google/go-querystring/LICENSE | 27 + .../google/go-querystring/query/encode.go | 357 +++++++ .../github.com/grafadruid/go-druid/.gitignore | 21 + vendor/github.com/grafadruid/go-druid/LICENSE | 201 ++++ .../grafadruid/go-druid/Magefile.go | 232 +++++ .../github.com/grafadruid/go-druid/README.md | 30 + .../builder/aggregation/aggregation.go | 115 +++ .../builder/aggregation/cardinality.go | 34 + .../go-druid/builder/aggregation/count.go | 16 + .../builder/aggregation/double_any.go | 22 + .../builder/aggregation/double_first.go | 22 + .../builder/aggregation/double_last.go | 22 + .../builder/aggregation/double_max.go | 28 + .../builder/aggregation/double_mean.go | 22 + .../builder/aggregation/double_min.go | 28 + .../builder/aggregation/double_sum.go | 28 + .../go-druid/builder/aggregation/filtered.go | 57 ++ .../go-druid/builder/aggregation/float_any.go | 22 + .../builder/aggregation/float_first.go | 22 + .../builder/aggregation/float_last.go | 22 + .../go-druid/builder/aggregation/float_max.go | 28 + .../go-druid/builder/aggregation/float_min.go | 28 + .../go-druid/builder/aggregation/float_sum.go | 28 + .../go-druid/builder/aggregation/histogram.go | 28 + .../builder/aggregation/hll_sketch.go | 55 ++ .../builder/aggregation/hyper_unique.go | 34 + .../builder/aggregation/javascript.go | 40 + .../go-druid/builder/aggregation/long_any.go | 22 + .../builder/aggregation/long_first.go | 22 + .../go-druid/builder/aggregation/long_last.go | 22 + .../go-druid/builder/aggregation/long_max.go | 28 + .../go-druid/builder/aggregation/long_min.go | 28 + .../go-druid/builder/aggregation/long_sum.go | 28 + .../aggregation/quantiles_doubles_sketch.go | 34 + .../builder/aggregation/string_any.go | 28 + .../builder/aggregation/string_first.go | 28 + .../aggregation/string_first_folding.go | 28 + .../builder/aggregation/string_last.go | 28 + .../aggregation/string_last_folding.go | 28 + .../builder/aggregation/tdigestsketch.go | 34 + .../builder/aggregation/thetasketch.go | 41 + .../go-druid/builder/bound/bound.go | 45 + .../go-druid/builder/bound/polygon.go | 23 + .../go-druid/builder/bound/radius.go | 23 + .../go-druid/builder/bound/rectangular.go | 29 + .../grafadruid/go-druid/builder/builder.go | 71 ++ .../builder/datasource/data_source.go | 53 + .../builder/datasource/global_table.go | 17 + .../go-druid/builder/datasource/inline.go | 29 + .../go-druid/builder/datasource/join.go | 77 ++ .../go-druid/builder/datasource/lookup.go | 17 + .../go-druid/builder/datasource/query.go | 39 + .../go-druid/builder/datasource/table.go | 17 + .../go-druid/builder/datasource/union.go | 17 + .../go-druid/builder/dimension/default.go | 28 + .../go-druid/builder/dimension/dimension.go | 70 ++ .../go-druid/builder/dimension/extraction.go | 56 ++ .../builder/dimension/list_filtered.go | 72 ++ .../go-druid/builder/dimension/lookup.go | 83 ++ .../builder/dimension/prefix_filtered.go | 64 ++ .../builder/dimension/regex_filtered.go | 64 ++ .../go-druid/builder/extractionfn/bucket.go | 23 + .../go-druid/builder/extractionfn/cascade.go | 45 + .../builder/extractionfn/extraction_fn.go | 71 ++ .../go-druid/builder/extractionfn/identity.go | 11 + .../builder/extractionfn/javascript.go | 23 + .../go-druid/builder/extractionfn/lookup.go | 72 ++ .../go-druid/builder/extractionfn/lower.go | 17 + .../go-druid/builder/extractionfn/partial.go | 17 + .../go-druid/builder/extractionfn/regex.go | 35 + .../builder/extractionfn/registered_lookup.go | 43 + .../builder/extractionfn/search_query.go | 41 + .../builder/extractionfn/string_format.go | 27 + .../go-druid/builder/extractionfn/strlen.go | 11 + .../builder/extractionfn/substring.go | 23 + .../go-druid/builder/extractionfn/time.go | 29 + .../builder/extractionfn/time_format.go | 73 ++ .../go-druid/builder/extractionfn/upper.go | 17 + .../grafadruid/go-druid/builder/filter/and.go | 45 + .../go-druid/builder/filter/bound.go | 94 ++ .../builder/filter/column_comparison.go | 46 + .../go-druid/builder/filter/expression.go | 23 + .../go-druid/builder/filter/extraction.go | 61 ++ .../go-druid/builder/filter/false.go | 11 + .../go-druid/builder/filter/filter.go | 75 ++ .../go-druid/builder/filter/filter_tuning.go | 29 + .../grafadruid/go-druid/builder/filter/in.go | 69 ++ .../go-druid/builder/filter/interval.go | 79 ++ .../go-druid/builder/filter/javascript.go | 69 ++ .../go-druid/builder/filter/like.go | 77 ++ .../grafadruid/go-druid/builder/filter/not.go | 44 + .../grafadruid/go-druid/builder/filter/or.go | 44 + .../go-druid/builder/filter/regex.go | 69 ++ .../go-druid/builder/filter/search.go | 69 ++ .../go-druid/builder/filter/selector.go | 69 ++ .../go-druid/builder/filter/spatial.go | 57 ++ .../go-druid/builder/filter/true.go | 11 + .../go-druid/builder/granularity/duration.go | 33 + .../builder/granularity/granularity.go | 52 + .../go-druid/builder/granularity/period.go | 42 + .../go-druid/builder/granularity/simple.go | 39 + .../go-druid/builder/havingspec/always.go | 11 + .../go-druid/builder/havingspec/and.go | 45 + .../builder/havingspec/dim_selector.go | 57 ++ .../go-druid/builder/havingspec/equal_to.go | 23 + .../builder/havingspec/greather_than.go | 23 + .../builder/havingspec/having_spec.go | 57 ++ .../go-druid/builder/havingspec/less_than.go | 23 + .../go-druid/builder/havingspec/never.go | 11 + .../go-druid/builder/havingspec/not.go | 40 + .../go-druid/builder/havingspec/or.go | 45 + .../go-druid/builder/intervals/default.go | 17 + .../go-druid/builder/intervals/interval.go | 28 + .../go-druid/builder/intervals/intervals.go | 41 + .../go-druid/builder/limitspec/default.go | 46 + .../go-druid/builder/limitspec/limit_spec.go | 41 + .../go-druid/builder/lookup/lookup.go | 41 + .../grafadruid/go-druid/builder/lookup/map.go | 23 + .../builder/postaggregation/arithmetic.go | 66 ++ .../builder/postaggregation/constant.go | 22 + .../postaggregation/double_greatest.go | 50 + .../builder/postaggregation/double_least.go | 50 + .../builder/postaggregation/expression.go | 28 + .../builder/postaggregation/field_access.go | 22 + .../finalizing_field_access.go | 22 + .../hyper_unique_finalizing.go | 22 + .../builder/postaggregation/javascript.go | 28 + .../builder/postaggregation/long_greatest.go | 50 + .../builder/postaggregation/long_least.go | 50 + .../postaggregation/post_aggregator.go | 83 ++ .../quantile_from_tdigestsketch.go | 59 ++ .../quantiles_doubles_sketch.go | 31 + .../quantiles_doubles_sketch_to_cdf.go | 34 + .../quantiles_doubles_sketch_to_histogram.go | 41 + .../quantiles_doubles_sketch_to_quantile.go | 41 + .../quantiles_doubles_sketch_to_quantiles.go | 34 + .../quantiles_doubles_sketch_to_rank.go | 34 + .../quantiles_doubles_sketch_to_string.go | 27 + .../quantiles_from_tdigestsketch.go | 58 ++ .../builder/query/datasource_metadata.go | 30 + .../go-druid/builder/query/group_by.go | 182 ++++ .../go-druid/builder/query/query.go | 118 +++ .../grafadruid/go-druid/builder/query/scan.go | 168 ++++ .../go-druid/builder/query/search.go | 124 +++ .../builder/query/segment_metadata.go | 104 ++ .../grafadruid/go-druid/builder/query/sql.go | 68 ++ .../go-druid/builder/query/time_boundary.go | 66 ++ .../go-druid/builder/query/timeseries.go | 141 +++ .../go-druid/builder/query/top_n.go | 156 +++ .../go-druid/builder/searchqueryspec/all.go | 11 + .../builder/searchqueryspec/contains.go | 23 + .../builder/searchqueryspec/fragment.go | 23 + .../searchqueryspec/insensitive_contains.go | 17 + .../go-druid/builder/searchqueryspec/regex.go | 17 + .../searchqueryspec/search_query_spec.go | 49 + .../go-druid/builder/toinclude/all.go | 7 + .../go-druid/builder/toinclude/list.go | 17 + .../go-druid/builder/toinclude/none.go | 7 + .../go-druid/builder/toinclude/to_include.go | 45 + .../builder/topnmetric/alpha_numeric.go | 17 + .../go-druid/builder/topnmetric/dimension.go | 25 + .../go-druid/builder/topnmetric/inverted.go | 40 + .../builder/topnmetric/lexicographic.go | 17 + .../go-druid/builder/topnmetric/numeric.go | 17 + .../builder/topnmetric/top_n_metric.go | 49 + .../go-druid/builder/types/date_time_zone.go | 7 + .../go-druid/builder/types/join_types.go | 10 + .../go-druid/builder/types/null_handling.go | 9 + .../go-druid/builder/types/output_types.go | 11 + .../builder/types/string_comparators.go | 11 + .../builder/virtualcolumn/expression.go | 31 + .../builder/virtualcolumn/virtual_column.go | 41 + .../github.com/grafadruid/go-druid/common.go | 69 ++ .../github.com/grafadruid/go-druid/druid.go | 392 ++++++++ .../github.com/grafadruid/go-druid/query.go | 42 + .../github.com/hashicorp/go-cleanhttp/LICENSE | 363 +++++++ .../hashicorp/go-cleanhttp/README.md | 30 + .../hashicorp/go-cleanhttp/cleanhttp.go | 58 ++ .../github.com/hashicorp/go-cleanhttp/doc.go | 20 + .../hashicorp/go-cleanhttp/handlers.go | 48 + .../hashicorp/go-retryablehttp/.gitignore | 4 + .../hashicorp/go-retryablehttp/.go-version | 1 + .../hashicorp/go-retryablehttp/CHANGELOG.md | 33 + .../hashicorp/go-retryablehttp/CODEOWNERS | 1 + .../hashicorp/go-retryablehttp/LICENSE | 365 +++++++ .../hashicorp/go-retryablehttp/Makefile | 11 + .../hashicorp/go-retryablehttp/README.md | 62 ++ .../go-retryablehttp/cert_error_go119.go | 14 + .../go-retryablehttp/cert_error_go120.go | 14 + .../hashicorp/go-retryablehttp/client.go | 919 ++++++++++++++++++ .../go-retryablehttp/roundtripper.go | 55 ++ vendor/github.com/magefile/mage/LICENSE | 201 ++++ vendor/github.com/magefile/mage/mg/color.go | 80 ++ .../magefile/mage/mg/color_string.go | 38 + vendor/github.com/magefile/mage/mg/deps.go | 204 ++++ vendor/github.com/magefile/mage/mg/errors.go | 51 + vendor/github.com/magefile/mage/mg/fn.go | 181 ++++ vendor/github.com/magefile/mage/mg/runtime.go | 136 +++ vendor/github.com/magefile/mage/sh/cmd.go | 177 ++++ vendor/github.com/magefile/mage/sh/helpers.go | 40 + .../client-go/tools/healthchecker/const.go | 45 + .../tools/healthchecker/health_card.go | 78 ++ .../tools/healthchecker/health_checker.go | 128 +++ vendor/modules.txt | 36 + 208 files changed, 12208 insertions(+) create mode 100644 druid/client.go create mode 100644 druid/kubedb_client_builder.go create mode 100644 vendor/github.com/google/go-querystring/LICENSE create mode 100644 vendor/github.com/google/go-querystring/query/encode.go create mode 100644 vendor/github.com/grafadruid/go-druid/.gitignore create mode 100644 vendor/github.com/grafadruid/go-druid/LICENSE create mode 100644 vendor/github.com/grafadruid/go-druid/Magefile.go create mode 100644 vendor/github.com/grafadruid/go-druid/README.md create mode 100644 vendor/github.com/grafadruid/go-druid/builder/aggregation/aggregation.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/aggregation/cardinality.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/aggregation/count.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/aggregation/double_any.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/aggregation/double_first.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/aggregation/double_last.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/aggregation/double_max.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/aggregation/double_mean.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/aggregation/double_min.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/aggregation/double_sum.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/aggregation/filtered.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/aggregation/float_any.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/aggregation/float_first.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/aggregation/float_last.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/aggregation/float_max.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/aggregation/float_min.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/aggregation/float_sum.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/aggregation/histogram.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/aggregation/hll_sketch.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/aggregation/hyper_unique.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/aggregation/javascript.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/aggregation/long_any.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/aggregation/long_first.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/aggregation/long_last.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/aggregation/long_max.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/aggregation/long_min.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/aggregation/long_sum.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/aggregation/quantiles_doubles_sketch.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/aggregation/string_any.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/aggregation/string_first.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/aggregation/string_first_folding.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/aggregation/string_last.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/aggregation/string_last_folding.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/aggregation/tdigestsketch.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/aggregation/thetasketch.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/bound/bound.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/bound/polygon.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/bound/radius.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/bound/rectangular.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/builder.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/datasource/data_source.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/datasource/global_table.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/datasource/inline.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/datasource/join.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/datasource/lookup.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/datasource/query.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/datasource/table.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/datasource/union.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/dimension/default.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/dimension/dimension.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/dimension/extraction.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/dimension/list_filtered.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/dimension/lookup.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/dimension/prefix_filtered.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/dimension/regex_filtered.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/extractionfn/bucket.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/extractionfn/cascade.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/extractionfn/extraction_fn.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/extractionfn/identity.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/extractionfn/javascript.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/extractionfn/lookup.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/extractionfn/lower.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/extractionfn/partial.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/extractionfn/regex.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/extractionfn/registered_lookup.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/extractionfn/search_query.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/extractionfn/string_format.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/extractionfn/strlen.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/extractionfn/substring.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/extractionfn/time.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/extractionfn/time_format.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/extractionfn/upper.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/filter/and.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/filter/bound.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/filter/column_comparison.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/filter/expression.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/filter/extraction.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/filter/false.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/filter/filter.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/filter/filter_tuning.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/filter/in.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/filter/interval.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/filter/javascript.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/filter/like.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/filter/not.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/filter/or.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/filter/regex.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/filter/search.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/filter/selector.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/filter/spatial.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/filter/true.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/granularity/duration.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/granularity/granularity.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/granularity/period.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/granularity/simple.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/havingspec/always.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/havingspec/and.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/havingspec/dim_selector.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/havingspec/equal_to.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/havingspec/greather_than.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/havingspec/having_spec.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/havingspec/less_than.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/havingspec/never.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/havingspec/not.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/havingspec/or.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/intervals/default.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/intervals/interval.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/intervals/intervals.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/limitspec/default.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/limitspec/limit_spec.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/lookup/lookup.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/lookup/map.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/postaggregation/arithmetic.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/postaggregation/constant.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/postaggregation/double_greatest.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/postaggregation/double_least.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/postaggregation/expression.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/postaggregation/field_access.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/postaggregation/finalizing_field_access.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/postaggregation/hyper_unique_finalizing.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/postaggregation/javascript.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/postaggregation/long_greatest.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/postaggregation/long_least.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/postaggregation/post_aggregator.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/postaggregation/quantile_from_tdigestsketch.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/postaggregation/quantiles_doubles_sketch.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/postaggregation/quantiles_doubles_sketch_to_cdf.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/postaggregation/quantiles_doubles_sketch_to_histogram.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/postaggregation/quantiles_doubles_sketch_to_quantile.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/postaggregation/quantiles_doubles_sketch_to_quantiles.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/postaggregation/quantiles_doubles_sketch_to_rank.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/postaggregation/quantiles_doubles_sketch_to_string.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/postaggregation/quantiles_from_tdigestsketch.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/query/datasource_metadata.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/query/group_by.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/query/query.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/query/scan.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/query/search.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/query/segment_metadata.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/query/sql.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/query/time_boundary.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/query/timeseries.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/query/top_n.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/searchqueryspec/all.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/searchqueryspec/contains.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/searchqueryspec/fragment.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/searchqueryspec/insensitive_contains.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/searchqueryspec/regex.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/searchqueryspec/search_query_spec.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/toinclude/all.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/toinclude/list.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/toinclude/none.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/toinclude/to_include.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/topnmetric/alpha_numeric.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/topnmetric/dimension.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/topnmetric/inverted.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/topnmetric/lexicographic.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/topnmetric/numeric.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/topnmetric/top_n_metric.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/types/date_time_zone.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/types/join_types.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/types/null_handling.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/types/output_types.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/types/string_comparators.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/virtualcolumn/expression.go create mode 100644 vendor/github.com/grafadruid/go-druid/builder/virtualcolumn/virtual_column.go create mode 100644 vendor/github.com/grafadruid/go-druid/common.go create mode 100644 vendor/github.com/grafadruid/go-druid/druid.go create mode 100644 vendor/github.com/grafadruid/go-druid/query.go create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/LICENSE create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/README.md create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/doc.go create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/handlers.go create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/.gitignore create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/.go-version create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/CHANGELOG.md create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/CODEOWNERS create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/LICENSE create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/Makefile create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/README.md create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/cert_error_go119.go create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/cert_error_go120.go create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/client.go create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go create mode 100644 vendor/github.com/magefile/mage/LICENSE create mode 100644 vendor/github.com/magefile/mage/mg/color.go create mode 100644 vendor/github.com/magefile/mage/mg/color_string.go create mode 100644 vendor/github.com/magefile/mage/mg/deps.go create mode 100644 vendor/github.com/magefile/mage/mg/errors.go create mode 100644 vendor/github.com/magefile/mage/mg/fn.go create mode 100644 vendor/github.com/magefile/mage/mg/runtime.go create mode 100644 vendor/github.com/magefile/mage/sh/cmd.go create mode 100644 vendor/github.com/magefile/mage/sh/helpers.go create mode 100644 vendor/kmodules.xyz/client-go/tools/healthchecker/const.go create mode 100644 vendor/kmodules.xyz/client-go/tools/healthchecker/health_card.go create mode 100644 vendor/kmodules.xyz/client-go/tools/healthchecker/health_checker.go diff --git a/druid/client.go b/druid/client.go new file mode 100644 index 000000000..9c1af5340 --- /dev/null +++ b/druid/client.go @@ -0,0 +1,451 @@ +/* +Copyright AppsCode Inc. and Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package druid + +import ( + "bytes" + "encoding/json" + "fmt" + druidgo "github.com/grafadruid/go-druid" + "github.com/hashicorp/go-retryablehttp" + "github.com/pkg/errors" + "k8s.io/klog/v2" + health "kmodules.xyz/client-go/tools/healthchecker" + "kubedb.dev/apimachinery/apis/kubedb" + "log" + "time" +) + +type Client struct { + druidgo.Client +} + +type DruidTaskType int32 + +const ( + DruidIngestionTask DruidTaskType = 0 + DruidKillTask DruidTaskType = 1 +) + +func (c *Client) CloseDruidClient(hcs *health.HealthCard) { + err := c.Close() + if err != nil { + klog.Error(err, "Failed to close druid middleManagers client") + return + } + hcs.ClientClosed() +} + +func IsDBConnected(druidClients []Client) (bool, error) { + // First, check the health of the nodes + for _, druidClient := range druidClients { + healthStatus, err := druidClient.CheckNodeHealth() + if err != nil { + klog.Error(err, "Failed to check node health") + return false, err + } + // If the health of any node is false, no point of checking health of the remaining + if !healthStatus { + return false, nil + } + } + + // Check self-discovery status, i.e. indicating whether the node has received + // a confirmation from the central node discovery mechanism (currently ZooKeeper) of the Druid cluster + for _, druidClient := range druidClients { + discoveryStatus, err := druidClient.CheckNodeDiscoveryStatus() + if err != nil { + klog.Error(err, "Failed to check node discovery status") + return false, err + } + // If the health of any node is false, no point of checking health of the remaining + if !discoveryStatus { + return false, nil + } + } + return true, nil +} + +func (c *Client) CheckNodeHealth() (bool, error) { + healthStatus, _, err := c.Common().Health() + if err != nil { + klog.Error(err, "Failed to check node health") + return false, err + } + return bool(*healthStatus), err +} + +func (c *Client) CheckNodeDiscoveryStatus() (bool, error) { + discoveryStatus, _, err := c.Common().SelfDiscovered() + if err != nil { + klog.Error(err, "Failed to check node discovery status") + return false, err + } + return discoveryStatus.SelfDiscovered, err +} + +func (c *Client) CheckDataSourceExistence() (bool, error) { + method := "POST" + path := "druid/v2/sql" + + data := map[string]interface{}{ + "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'druid' AND TABLE_NAME = 'kubedb-datasource'", + } + + jsonData, err := json.Marshal(data) + if err != nil { + return false, errors.Wrap(err, "failed to marshal json response") + } + rawMessage := json.RawMessage(jsonData) + response, err := c.SubmitRequest(method, path, rawMessage) + if err != nil { + return false, err + } + + exists, err := parseDatasourceExistenceQueryResponse(response) + if err != nil { + return false, errors.Wrap(err, "Failed to parse response of datasource existence request") + } + + if err := closeResponse(response); err != nil { + return exists, err + } + return exists, nil +} + +func (c *Client) SubmitRequest(method, path string, opts interface{}) (*druidgo.Response, error) { + res, err := c.NewRequest(method, path, opts) + if err != nil { + return nil, errors.Wrap(err, "failed to submit API request") + } + http := retryablehttp.NewClient() + + var b []byte + buf := bytes.NewBuffer(b) + http.Logger = log.New(buf, "", 0) + + resp, err := http.Do(res) + if err != nil { + return nil, err + } + response := &druidgo.Response{Response: resp} + return response, nil +} + +func parseDatasourceExistenceQueryResponse(res *druidgo.Response) (bool, error) { + var responseBody []map[string]interface{} + if err := json.NewDecoder(res.Body).Decode(&responseBody); err != nil { + return false, errors.Wrap(err, "failed to deserialize the response") + } + return len(responseBody) != 0, nil +} + +func closeResponse(response *druidgo.Response) error { + err := response.Body.Close() + if err != nil { + return errors.Wrap(err, "Failed to close the response body") + } + return nil +} + +// CheckDBReadWriteAccess checks read and write access in the DB +// if there is an error +// flag == false, corresponds to write check error +// flag == true, corresponds to read check error +func CheckDBReadWriteAccess(druidCoordinatorsClient Client, druidBrokersClient Client, druidOverlordsClient Client) (error, bool) { + exist, err := druidBrokersClient.CheckDataSourceExistence() + if err != nil { + klog.Error(err, "Failed to check the existence of kubedb-datasource") + return err, false + } + + var oldData, newData string + if exist { + oldData, err = druidBrokersClient.getData() + if err != nil { + klog.Error(err, "Failed to read datasource") + return err, false + } + if oldData == kubedb.DruidHealthDataZero { + newData = kubedb.DruidHealthDataOne + } else { + newData = kubedb.DruidHealthDataZero + } + } else { + // In the first iteration of health Check update coordinators config + // to delete unused segments after 5 seconds of being leader + err := druidCoordinatorsClient.updateCoordinatorsWaitBeforeDeletingConfig(500) + if err != nil { + return err, false + } + klog.Info("Successfully updated coordinators config to wait before deleting segment") + oldData = kubedb.DruidHealthDataZero + newData = kubedb.DruidHealthDataOne + } + + // Submit Ingestion Task and check status + if err := druidOverlordsClient.submitTaskRecurrently(DruidIngestionTask, newData); err != nil { + klog.Error(err, "Ingestion task failed") + return err, true + } + + if !exist { + time.Sleep(5 * time.Second) + } + + // Check if new data can be read + if err := druidBrokersClient.checkDBReadAccess(oldData); err != nil { + return err, true + } + + // Drop the unused segments of previous health checks + if err := druidOverlordsClient.submitTaskRecurrently(DruidKillTask, ""); err != nil { + klog.Error(err, "Kill task for dropping unused segment failed") + return err, true + } + return nil, false +} + +func (c *Client) getData() (string, error) { + id, err := c.runSelectQuery() + if err != nil { + klog.Error(err, "Failed to query the datasource") + return "", err + } + return id, nil +} + +func (c *Client) runSelectQuery() (string, error) { + method := "POST" + path := "druid/v2/sql" + + data := map[string]interface{}{ + "query": "SELECT * FROM \"kubedb-datasource\"", + } + jsonData, err := json.Marshal(data) + if err != nil { + return "", errors.Wrap(err, "failed to marshal query json data") + } + rawMessage := json.RawMessage(jsonData) + response, err := c.SubmitRequest(method, path, rawMessage) + if err != nil { + return "", err + } + if response == nil { + return "", errors.New("response body is empty") + } + + id, err := parseSelectQueryResponse(response, "id") + if err != nil { + return "", errors.Wrap(err, "failed to parse the response body") + } + + if err := closeResponse(response); err != nil { + return "", err + } + return id.(string), nil +} + +func parseSelectQueryResponse(res *druidgo.Response, key string) (interface{}, error) { + var responseBody []map[string]interface{} + if err := json.NewDecoder(res.Body).Decode(&responseBody); err != nil { + return "", errors.Wrap(err, "failed to deserialize the response") + } + value := responseBody[0][key] + return value, nil +} + +func (c *Client) updateCoordinatorsWaitBeforeDeletingConfig(value int32) error { + data := map[string]interface{}{ + "millisToWaitBeforeDeleting": value, + } + if err := c.updateCoordinatorDynamicConfig(data); err != nil { + klog.Error(err, "Failed to update coordinator dynamic config") + return err + } + return nil +} + +func (c *Client) updateCoordinatorDynamicConfig(data map[string]interface{}) error { + method := "POST" + path := "druid/coordinator/v1/config" + + jsonData, err := json.Marshal(data) + if err != nil { + return err + } + rawMessage := json.RawMessage(jsonData) + + response, err := c.SubmitRequest(method, path, rawMessage) + if err != nil { + return err + } + if err := closeResponse(response); err != nil { + return err + } + return nil +} + +func (c *Client) submitTaskRecurrently(taskType DruidTaskType, data string) error { + taskID, err := c.submitTask(taskType, data) + if err != nil { + klog.Error(err, "Failed to submit task") + return err + } + + var taskStatus bool + for i := 0; i < 10; i++ { + taskStatus, err = c.CheckTaskStatus(taskID) + if err != nil { + klog.Error(err, "Failed to check task status") + return err + } + if taskStatus { + klog.Info("Task successful") + return nil + } + time.Sleep(6 * time.Second) + } + return errors.New("task status is failed") +} + +func (c *Client) submitTask(taskType DruidTaskType, data string) (string, error) { + var task string + if taskType == DruidIngestionTask { + task = GetIngestionTaskDefinition(data) + } else { + task = GetKillTaskDefinition() + } + + rawMessage := json.RawMessage(task) + method := "POST" + path := "druid/indexer/v1/task" + + response, err := c.SubmitRequest(method, path, rawMessage) + if err != nil { + return "", err + } + + taskID, err := GetValueFromClusterResponse(response, "task") + if err != nil { + return "", errors.Wrap(err, "failed to parse response of task api request") + } + if err = closeResponse(response); err != nil { + return "", err + } + return fmt.Sprintf("%v", taskID), nil +} + +func GetValueFromClusterResponse(res *druidgo.Response, key string) (interface{}, error) { + responseBody := make(map[string]interface{}) + if err := json.NewDecoder(res.Body).Decode(&responseBody); err != nil { + return "", errors.Wrap(err, "failed to deserialize the response") + } + value := responseBody[key] + return value, nil +} + +func GetIngestionTaskDefinition(data string) string { + task := `{ + "type": "index_parallel", + "spec": { + "ioConfig": { + "type": "index_parallel", + "inputSource": { + "type": "inline", + "data": "{\"id\": \"%s\", \"name\": \"kubedb-druid\", \"time\": \"2015-09-12T00:46:58.771Z\"}" + }, + "inputFormat": { + "type": "json" + } + }, + "tuningConfig": { + "type": "index_parallel", + "partitionsSpec": { + "type": "dynamic" + } + }, + "dataSchema": { + "dataSource": "kubedb-datasource", + "timestampSpec": { + "column": "time", + "format": "iso" + }, + "dimensionsSpec": { + "dimensions": ["id", "name", "time"] + }, + "granularitySpec": { + "queryGranularity": "none", + "rollup": false, + "segmentGranularity": "day", + "intervals": ["2015-09-12/2015-09-13"] + } + } + } + }` + task = fmt.Sprintf(task, data) + return task +} + +func GetKillTaskDefinition() string { + task := `{ + "type": "kill", + "dataSource": "kubedb-datasource", + "interval": "2015-09-12/2015-09-13" + }` + return task +} + +func (c *Client) CheckTaskStatus(taskID string) (bool, error) { + method := "GET" + path := fmt.Sprintf("druid/indexer/v1/task/%s/status", taskID) + response, err := c.SubmitRequest(method, path, nil) + if err != nil { + return false, errors.Wrap(err, "failed to check task status") + } + + statusRes, err := GetValueFromClusterResponse(response, "status") + if err != nil { + return false, errors.Wrap(err, "failed to parse respons of task ingestion request") + } + statusMap := statusRes.(map[string]interface{}) + status := statusMap["status"].(string) + + if err = closeResponse(response); err != nil { + return false, err + } + return status == "SUCCESS", nil +} + +func (c *Client) checkDBReadAccess(oldData string) error { + klog.Info("waiting for the segments to be available for query...") + + for i := 0; i < 5; i++ { + time.Sleep(6 * time.Second) + + data, err := c.getData() + if err != nil { + klog.Error(err, "failed to read ingested data") + return err + } + if data != oldData { + klog.Info("successfully read ingested data") + return nil + } + } + return errors.New("failed to read ingested data") +} diff --git a/druid/kubedb_client_builder.go b/druid/kubedb_client_builder.go new file mode 100644 index 000000000..c9148ea3c --- /dev/null +++ b/druid/kubedb_client_builder.go @@ -0,0 +1,109 @@ +/* +Copyright AppsCode Inc. and Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package druid + +import ( + "context" + "errors" + "fmt" + druidgo "github.com/grafadruid/go-druid" + kerr "k8s.io/apimachinery/pkg/api/errors" + + olddbapi "kubedb.dev/apimachinery/apis/kubedb/v1alpha2" + + _ "github.com/lib/pq" + core "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + DefaultBackendDBName = "postgres" + DefaultPgpoolPort = 9999 +) + +type KubeDBClientBuilder struct { + kc client.Client + db *olddbapi.Druid + url string + podName string + ctx context.Context +} + +func NewKubeDBClientBuilder(kc client.Client, druid *olddbapi.Druid) *KubeDBClientBuilder { + return &KubeDBClientBuilder{ + kc: kc, + db: druid, + } +} + +func (o *KubeDBClientBuilder) WithURL(url string) *KubeDBClientBuilder { + o.url = url + return o +} + +func (o *KubeDBClientBuilder) WithPod(podName string) *KubeDBClientBuilder { + o.podName = podName + return o +} + +func (o *KubeDBClientBuilder) WithContext(ctx context.Context) *KubeDBClientBuilder { + o.ctx = ctx + return o +} + +func (o *KubeDBClientBuilder) GetDruidClient(nodeType olddbapi.DruidNodeRoleType) (*druidgo.Client, error) { + var druidOpts []druidgo.ClientOption + if !*o.db.Spec.DisableSecurity { + if o.db.Spec.AuthSecret == nil { + klog.Error("AuthSecret not set") + return nil, errors.New("auth-secret is not set") + } + + authSecret := &core.Secret{} + err := o.kc.Get(o.ctx, types.NamespacedName{ + Namespace: o.db.Namespace, + Name: o.db.Spec.AuthSecret.Name, + }, authSecret) + if err != nil { + if kerr.IsNotFound(err) { + klog.Error(err, "AuthSecret not found") + return nil, errors.New("auth-secret not found") + } + return nil, err + } + userName := string(authSecret.Data[core.BasicAuthUsernameKey]) + password := string(authSecret.Data[core.BasicAuthPasswordKey]) + + druidOpts = append(druidOpts, druidgo.WithBasicAuth(userName, password)) + } + + baseUrl := o.GetNodesAddress(nodeType) + druidClient, err := druidgo.NewClient(baseUrl, druidOpts...) + if err != nil { + return nil, err + } + + return druidClient, nil +} + +// GetNodesAddress returns DNS for the nodes based on type of the node +func (o *KubeDBClientBuilder) GetNodesAddress(nodeType olddbapi.DruidNodeRoleType) string { + baseUrl := fmt.Sprintf("http://%s-0.%s.%s.svc.cluster.local:%d", o.db.PetSetName(nodeType), o.db.GoverningServiceName(), o.db.Namespace, o.db.DruidNodeContainerPort(nodeType)) + return baseUrl +} diff --git a/go.mod b/go.mod index 69ea7295e..79b9fbacf 100644 --- a/go.mod +++ b/go.mod @@ -14,6 +14,7 @@ require ( github.com/elastic/go-elasticsearch/v8 v8.4.0 github.com/go-resty/resty/v2 v2.11.0 github.com/go-sql-driver/mysql v1.8.1 + github.com/grafadruid/go-druid v0.0.6 github.com/lib/pq v1.10.7 github.com/michaelklishin/rabbit-hole/v2 v2.16.0 github.com/microsoft/go-mssqldb v1.6.0 @@ -67,10 +68,13 @@ require ( github.com/golang/snappy v0.0.4 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect + github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.6.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-retryablehttp v0.7.7 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/imdario/mergo v0.3.16 // indirect github.com/jcmturner/aescts/v2 v2.0.0 // indirect @@ -82,6 +86,7 @@ require ( github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.17.7 // indirect github.com/klauspost/cpuid/v2 v2.0.9 // indirect + github.com/magefile/mage v1.11.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect diff --git a/go.sum b/go.sum index 8e5f9bbb2..3d62dfdbf 100644 --- a/go.sum +++ b/go.sum @@ -100,6 +100,8 @@ github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lSh github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= @@ -181,6 +183,9 @@ github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -194,11 +199,22 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= +github.com/grafadruid/go-druid v0.0.6 h1:Nt9jQrhrtHi1BJICN9aDJgYDmBmc10pJYpQiuwAsxa4= +github.com/grafadruid/go-druid v0.0.6/go.mod h1:KY3a6MrVMKkXgMTwBS9Nrhm1E8OWyR4gd0WzUi8d/zM= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.6.7/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= +github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= @@ -294,6 +310,8 @@ github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw= github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/magefile/mage v1.11.0 h1:C/55Ywp9BpgVVclD3lRnSYCwXTYxmSppIgLeDYlNuls= +github.com/magefile/mage v1.11.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= diff --git a/vendor/github.com/google/go-querystring/LICENSE b/vendor/github.com/google/go-querystring/LICENSE new file mode 100644 index 000000000..ae121a1e4 --- /dev/null +++ b/vendor/github.com/google/go-querystring/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 Google. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/go-querystring/query/encode.go b/vendor/github.com/google/go-querystring/query/encode.go new file mode 100644 index 000000000..91198f819 --- /dev/null +++ b/vendor/github.com/google/go-querystring/query/encode.go @@ -0,0 +1,357 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package query implements encoding of structs into URL query parameters. +// +// As a simple example: +// +// type Options struct { +// Query string `url:"q"` +// ShowAll bool `url:"all"` +// Page int `url:"page"` +// } +// +// opt := Options{ "foo", true, 2 } +// v, _ := query.Values(opt) +// fmt.Print(v.Encode()) // will output: "q=foo&all=true&page=2" +// +// The exact mapping between Go values and url.Values is described in the +// documentation for the Values() function. +package query + +import ( + "bytes" + "fmt" + "net/url" + "reflect" + "strconv" + "strings" + "time" +) + +var timeType = reflect.TypeOf(time.Time{}) + +var encoderType = reflect.TypeOf(new(Encoder)).Elem() + +// Encoder is an interface implemented by any type that wishes to encode +// itself into URL values in a non-standard way. +type Encoder interface { + EncodeValues(key string, v *url.Values) error +} + +// Values returns the url.Values encoding of v. +// +// Values expects to be passed a struct, and traverses it recursively using the +// following encoding rules. +// +// Each exported struct field is encoded as a URL parameter unless +// +// - the field's tag is "-", or +// - the field is empty and its tag specifies the "omitempty" option +// +// The empty values are false, 0, any nil pointer or interface value, any array +// slice, map, or string of length zero, and any type (such as time.Time) that +// returns true for IsZero(). +// +// The URL parameter name defaults to the struct field name but can be +// specified in the struct field's tag value. The "url" key in the struct +// field's tag value is the key name, followed by an optional comma and +// options. For example: +// +// // Field is ignored by this package. +// Field int `url:"-"` +// +// // Field appears as URL parameter "myName". +// Field int `url:"myName"` +// +// // Field appears as URL parameter "myName" and the field is omitted if +// // its value is empty +// Field int `url:"myName,omitempty"` +// +// // Field appears as URL parameter "Field" (the default), but the field +// // is skipped if empty. Note the leading comma. +// Field int `url:",omitempty"` +// +// For encoding individual field values, the following type-dependent rules +// apply: +// +// Boolean values default to encoding as the strings "true" or "false". +// Including the "int" option signals that the field should be encoded as the +// strings "1" or "0". +// +// time.Time values default to encoding as RFC3339 timestamps. Including the +// "unix" option signals that the field should be encoded as a Unix time (see +// time.Unix()). The "unixmilli" and "unixnano" options will encode the number +// of milliseconds and nanoseconds, respectively, since January 1, 1970 (see +// time.UnixNano()). Including the "layout" struct tag (separate from the +// "url" tag) will use the value of the "layout" tag as a layout passed to +// time.Format. For example: +// +// // Encode a time.Time as YYYY-MM-DD +// Field time.Time `layout:"2006-01-02"` +// +// Slice and Array values default to encoding as multiple URL values of the +// same name. Including the "comma" option signals that the field should be +// encoded as a single comma-delimited value. Including the "space" option +// similarly encodes the value as a single space-delimited string. Including +// the "semicolon" option will encode the value as a semicolon-delimited string. +// Including the "brackets" option signals that the multiple URL values should +// have "[]" appended to the value name. "numbered" will append a number to +// the end of each incidence of the value name, example: +// name0=value0&name1=value1, etc. Including the "del" struct tag (separate +// from the "url" tag) will use the value of the "del" tag as the delimiter. +// For example: +// +// // Encode a slice of bools as ints ("1" for true, "0" for false), +// // separated by exclamation points "!". +// Field []bool `url:",int" del:"!"` +// +// Anonymous struct fields are usually encoded as if their inner exported +// fields were fields in the outer struct, subject to the standard Go +// visibility rules. An anonymous struct field with a name given in its URL +// tag is treated as having that name, rather than being anonymous. +// +// Non-nil pointer values are encoded as the value pointed to. +// +// Nested structs are encoded including parent fields in value names for +// scoping. e.g: +// +// "user[name]=acme&user[addr][postcode]=1234&user[addr][city]=SFO" +// +// All other values are encoded using their default string representation. +// +// Multiple fields that encode to the same URL parameter name will be included +// as multiple URL values of the same name. +func Values(v interface{}) (url.Values, error) { + values := make(url.Values) + val := reflect.ValueOf(v) + for val.Kind() == reflect.Ptr { + if val.IsNil() { + return values, nil + } + val = val.Elem() + } + + if v == nil { + return values, nil + } + + if val.Kind() != reflect.Struct { + return nil, fmt.Errorf("query: Values() expects struct input. Got %v", val.Kind()) + } + + err := reflectValue(values, val, "") + return values, err +} + +// reflectValue populates the values parameter from the struct fields in val. +// Embedded structs are followed recursively (using the rules defined in the +// Values function documentation) breadth-first. +func reflectValue(values url.Values, val reflect.Value, scope string) error { + var embedded []reflect.Value + + typ := val.Type() + for i := 0; i < typ.NumField(); i++ { + sf := typ.Field(i) + if sf.PkgPath != "" && !sf.Anonymous { // unexported + continue + } + + sv := val.Field(i) + tag := sf.Tag.Get("url") + if tag == "-" { + continue + } + name, opts := parseTag(tag) + + if name == "" { + if sf.Anonymous { + v := reflect.Indirect(sv) + if v.IsValid() && v.Kind() == reflect.Struct { + // save embedded struct for later processing + embedded = append(embedded, v) + continue + } + } + + name = sf.Name + } + + if scope != "" { + name = scope + "[" + name + "]" + } + + if opts.Contains("omitempty") && isEmptyValue(sv) { + continue + } + + if sv.Type().Implements(encoderType) { + // if sv is a nil pointer and the custom encoder is defined on a non-pointer + // method receiver, set sv to the zero value of the underlying type + if !reflect.Indirect(sv).IsValid() && sv.Type().Elem().Implements(encoderType) { + sv = reflect.New(sv.Type().Elem()) + } + + m := sv.Interface().(Encoder) + if err := m.EncodeValues(name, &values); err != nil { + return err + } + continue + } + + // recursively dereference pointers. break on nil pointers + for sv.Kind() == reflect.Ptr { + if sv.IsNil() { + break + } + sv = sv.Elem() + } + + if sv.Kind() == reflect.Slice || sv.Kind() == reflect.Array { + var del string + if opts.Contains("comma") { + del = "," + } else if opts.Contains("space") { + del = " " + } else if opts.Contains("semicolon") { + del = ";" + } else if opts.Contains("brackets") { + name = name + "[]" + } else { + del = sf.Tag.Get("del") + } + + if del != "" { + s := new(bytes.Buffer) + first := true + for i := 0; i < sv.Len(); i++ { + if first { + first = false + } else { + s.WriteString(del) + } + s.WriteString(valueString(sv.Index(i), opts, sf)) + } + values.Add(name, s.String()) + } else { + for i := 0; i < sv.Len(); i++ { + k := name + if opts.Contains("numbered") { + k = fmt.Sprintf("%s%d", name, i) + } + values.Add(k, valueString(sv.Index(i), opts, sf)) + } + } + continue + } + + if sv.Type() == timeType { + values.Add(name, valueString(sv, opts, sf)) + continue + } + + if sv.Kind() == reflect.Struct { + if err := reflectValue(values, sv, name); err != nil { + return err + } + continue + } + + values.Add(name, valueString(sv, opts, sf)) + } + + for _, f := range embedded { + if err := reflectValue(values, f, scope); err != nil { + return err + } + } + + return nil +} + +// valueString returns the string representation of a value. +func valueString(v reflect.Value, opts tagOptions, sf reflect.StructField) string { + for v.Kind() == reflect.Ptr { + if v.IsNil() { + return "" + } + v = v.Elem() + } + + if v.Kind() == reflect.Bool && opts.Contains("int") { + if v.Bool() { + return "1" + } + return "0" + } + + if v.Type() == timeType { + t := v.Interface().(time.Time) + if opts.Contains("unix") { + return strconv.FormatInt(t.Unix(), 10) + } + if opts.Contains("unixmilli") { + return strconv.FormatInt((t.UnixNano() / 1e6), 10) + } + if opts.Contains("unixnano") { + return strconv.FormatInt(t.UnixNano(), 10) + } + if layout := sf.Tag.Get("layout"); layout != "" { + return t.Format(layout) + } + return t.Format(time.RFC3339) + } + + return fmt.Sprint(v.Interface()) +} + +// isEmptyValue checks if a value should be considered empty for the purposes +// of omitting fields with the "omitempty" option. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + + type zeroable interface { + IsZero() bool + } + + if z, ok := v.Interface().(zeroable); ok { + return z.IsZero() + } + + return false +} + +// tagOptions is the string following a comma in a struct field's "url" tag, or +// the empty string. It does not include the leading comma. +type tagOptions []string + +// parseTag splits a struct field's url tag into its name and comma-separated +// options. +func parseTag(tag string) (string, tagOptions) { + s := strings.Split(tag, ",") + return s[0], s[1:] +} + +// Contains checks whether the tagOptions contains the specified option. +func (o tagOptions) Contains(option string) bool { + for _, s := range o { + if s == option { + return true + } + } + return false +} diff --git a/vendor/github.com/grafadruid/go-druid/.gitignore b/vendor/github.com/grafadruid/go-druid/.gitignore new file mode 100644 index 000000000..aaea12328 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/.gitignore @@ -0,0 +1,21 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +*.html +.vscode/* +.idea/ + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# MAC OS +.DS_Store diff --git a/vendor/github.com/grafadruid/go-druid/LICENSE b/vendor/github.com/grafadruid/go-druid/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/grafadruid/go-druid/Magefile.go b/vendor/github.com/grafadruid/go-druid/Magefile.go new file mode 100644 index 000000000..6f638720b --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/Magefile.go @@ -0,0 +1,232 @@ +//go:build mage +// +build mage + +package main + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + + "github.com/magefile/mage/mg" + "github.com/magefile/mage/sh" +) + +var ldflags = "" + +// allow user to override go executable by running as GOEXE=xxx make ... on unix-like systems +var goexe = "go" + +// Build is the default that fmt, vet, runs test and builds +var Default = Build + +func init() { + if exe := os.Getenv("GOEXE"); exe != "" { + goexe = exe + } + + // We want to use Go 1.11 modules even if the source lives inside GOPATH. + // The default is "auto". + os.Setenv("GO111MODULE", "on") +} + +// Fmt run gofmt linter +func Fmt() error { + if !isGoLatest() { + return nil + } + pkgs, err := packages() + if err != nil { + return err + } + failed := false + first := true + for _, pkg := range pkgs { + files, err := filepath.Glob(filepath.Join(pkg, "*.go")) + if err != nil { + return nil + } + for _, f := range files { + // gofmt doesn't exit with non-zero when it finds unformatted code + // so we have to explicitly look for output, and if we find any, we + // should fail this target. + s, err := sh.Output("gofmt", "-l", f) + if err != nil { + fmt.Printf("ERROR: running gofmt on %q: %v\n", f, err) + failed = true + } + if s != "" { + if first { + fmt.Println("The following files are not gofmt'ed:") + first = false + } + failed = true + fmt.Println(s) + } + } + } + if failed { + return errors.New("improperly formatted go files") + } + return nil +} + +// Vet run go vet linter +func Vet() error { + if err := sh.Run(goexe, "vet", "./..."); err != nil { + return fmt.Errorf("error running go vet: %v", err) + } + return nil +} + +// Run tests +func Test() error { + env := map[string]string{"GOFLAGS": testGoFlags()} + return runCmd(env, goexe, "test", "./...", buildFlags(), "-tags", buildTags()) +} + +// TestRace run tests with race detector +func TestRace() error { + env := map[string]string{"GOFLAGS": testGoFlags()} + return runCmd(env, goexe, "test", "-race", "./...", buildFlags(), "-tags", buildTags()) +} + +// TestCoverHTML generates test coverage report +func TestCoverHTML() error { + const ( + coverAll = "coverage-all.out" + cover = "coverage.out" + ) + f, err := os.Create(coverAll) + if err != nil { + return err + } + defer f.Close() + if _, err := f.Write([]byte("mode: count")); err != nil { + return err + } + pkgs, err := packages() + if err != nil { + return err + } + for _, pkg := range pkgs { + if err := sh.Run(goexe, "test", "-coverprofile="+cover, pkg); err != nil { + return err + } + b, err := ioutil.ReadFile(cover) + if err != nil { + if os.IsNotExist(err) { + continue + } + return err + } + idx := bytes.Index(b, []byte{'\n'}) + b = b[idx+1:] + if _, err := f.Write(b); err != nil { + return err + } + } + if err := f.Close(); err != nil { + return err + } + return sh.Run(goexe, "tool", "cover", "-html="+coverAll) +} + +// Build run linters, vet and tests +func Build() error { + if strings.Contains(runtime.Version(), "1.8") { + // Go 1.8 doesn't play along with go test ./... and /vendor. + // We could fix that, but that would take time. + fmt.Printf("Skip Build on %s\n", runtime.Version()) + return nil + } + + // TODO: Add lint after fixing errors + mg.Deps(Fmt, Vet, TestRace) + return sh.RunV(goexe, "build", "-ldflags="+ldflags, "./...") +} + +var ( + pkgPrefixLen = len("github.com/grafadruid/go-druid") + pkgs []string + pkgsInit sync.Once +) + +// testGoFlags returns test flags that need to be set +func testGoFlags() string { + return "-v" +} + +func packages() ([]string, error) { + var err error + pkgsInit.Do(func() { + var s string + s, err = sh.Output(goexe, "list", "./...") + if err != nil { + return + } + pkgs = strings.Split(s, "\n") + for i := range pkgs { + pkgs[i] = "." + pkgs[i][pkgPrefixLen:] + } + }) + return pkgs, err +} + +func buildFlags() []string { + if runtime.GOOS == "windows" { + return []string{"-buildmode", "exe"} + } + return nil +} + +func buildTags() string { + return "none" +} + +func isGoLatest() bool { + return strings.Contains(runtime.Version(), "1.14") +} + +func runCmd(env map[string]string, cmd string, args ...interface{}) error { + if mg.Verbose() { + return runWith(env, cmd, args...) + } + output, err := sh.OutputWith(env, cmd, argsToStrings(args...)...) + if err != nil { + fmt.Fprint(os.Stderr, output) + } + + return err +} + +func runWith(env map[string]string, cmd string, inArgs ...interface{}) error { + s := argsToStrings(inArgs...) + return sh.RunWith(env, cmd, s...) +} + +func argsToStrings(v ...interface{}) []string { + var args []string + for _, arg := range v { + switch v := arg.(type) { + case string: + if v != "" { + args = append(args, v) + } + case []string: + if v != nil { + args = append(args, v...) + } + default: + panic("invalid type") + } + } + + return args +} diff --git a/vendor/github.com/grafadruid/go-druid/README.md b/vendor/github.com/grafadruid/go-druid/README.md new file mode 100644 index 000000000..af0ad0e81 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/README.md @@ -0,0 +1,30 @@ +[![Join us](https://img.shields.io/static/v1?label=slack&message=Join%20Us&color=blue)](https://grafadruid.slack.com) + +# go-druid +A Golang client for Druid. +Now supports Query API and Common API. + +### Development + +#### Testing +`go-druid` uses mage to run tests locally. + Install Mage: +``` + git clone https://github.com/magefile/mage + cd mage + go run bootstrap.go +``` +`mage -l` provides a list of targets that can be run. Default is `Check` + +``` +Targets: + build runs go mod download and then installs the binary. + check* run linters and tests + fmt run gofmt linter + lint run golint linter https://github.com/golang/lint + testCoverHTML generates test coverage report + testRace run tests with race detector + vet run go vet linter + +* default target +``` \ No newline at end of file diff --git a/vendor/github.com/grafadruid/go-druid/builder/aggregation/aggregation.go b/vendor/github.com/grafadruid/go-druid/builder/aggregation/aggregation.go new file mode 100644 index 000000000..a9ec22617 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/aggregation/aggregation.go @@ -0,0 +1,115 @@ +package aggregation + +import ( + "encoding/json" + "errors" + + "github.com/grafadruid/go-druid/builder" +) + +type Base struct { + Typ builder.ComponentType `json:"type,omitempty"` + Name string `json:"name,omitempty"` +} + +func (b *Base) SetType(typ builder.ComponentType) *Base { + b.Typ = typ + return b +} + +func (b *Base) SetName(name string) *Base { + b.Name = name + return b +} + +func (b *Base) Type() builder.ComponentType { + return b.Typ +} + +func Load(data []byte) (builder.Aggregator, error) { + var a builder.Aggregator + if string(data) == "null" { + return a, nil + } + var t struct { + Typ builder.ComponentType `json:"type,omitempty"` + } + if err := json.Unmarshal(data, &t); err != nil { + return nil, err + } + switch t.Typ { + case "cardinality": + a = NewCardinality() + case "count": + a = NewCount() + case "doubleAny": + a = NewDoubleAny() + case "doubleFirst": + a = NewDoubleFirst() + case "doubleLast": + a = NewDoubleLast() + case "doubleMax": + a = NewDoubleMax() + case "doubleMean": + a = NewDoubleMean() + case "doubleMin": + a = NewDoubleMin() + case "doubleSum": + a = NewDoubleSum() + case "filtered": + a = NewFiltered() + case "floatAny": + a = NewFloatAny() + case "floatFirst": + a = NewFloatFirst() + case "floatLast": + a = NewFloatLast() + case "floatMax": + a = NewFloatMax() + case "floatMin": + a = NewFloatMin() + case "floatSum": + a = NewFloatSum() + case "histogram": + a = NewHistogram() + case "HLLSketchBuild": + a = NewHLLSketchBuild() + case "HLLSketchMerge": + a = NewHLLSketchMerge() + case "hyperUnique": + a = NewHyperUnique() + case "javascript": + a = NewJavascript() + case "longAny": + a = NewLongAny() + case "longFirst": + a = NewLongFirst() + case "longLast": + a = NewLongLast() + case "longMax": + a = NewLongMax() + case "longMin": + a = NewLongMin() + case "longSum": + a = NewLongSum() + case "stringAny": + a = NewStringAny() + case "stringFirstFolding": + a = NewStringFirstFolding() + case "stringFirst": + a = NewStringFirst() + case "stringLastFolding": + a = NewStringLastFolding() + case "stringLast": + a = NewStringLast() + case "tDigestSketch": + a = NewTDigestSketch() + case "quantilesDoublesSketch": + a = NewQuantilesDoublesSketch() + case "thetaSketch": + a = NewThetaSketch() + default: + return nil, errors.New("unsupported aggregation type") + } + return a, json.Unmarshal(data, &a) +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/aggregation/cardinality.go b/vendor/github.com/grafadruid/go-druid/builder/aggregation/cardinality.go new file mode 100644 index 000000000..a237bf5f3 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/aggregation/cardinality.go @@ -0,0 +1,34 @@ +package aggregation + +type Cardinality struct { + Base + Fields []string `json:"fields,omitempty"` + ByRow *bool `json:"byRow,omitempty"` + Round *bool `json:"round,omitempty"` +} + +func NewCardinality() *Cardinality { + c := &Cardinality{} + c.SetType("cardinality") + return c +} + +func (c *Cardinality) SetName(name string) *Cardinality { + c.Base.SetName(name) + return c +} + +func (c *Cardinality) SetFields(fields []string) *Cardinality { + c.Fields = fields + return c +} + +func (c *Cardinality) SetByRow(byRow bool) *Cardinality { + c.ByRow = &byRow + return c +} + +func (c *Cardinality) SetRound(round bool) *Cardinality { + c.Round = &round + return c +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/aggregation/count.go b/vendor/github.com/grafadruid/go-druid/builder/aggregation/count.go new file mode 100644 index 000000000..87e78d859 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/aggregation/count.go @@ -0,0 +1,16 @@ +package aggregation + +type Count struct { + Base +} + +func NewCount() *Count { + c := &Count{} + c.SetType("count") + return c +} + +func (c *Count) SetName(name string) *Count { + c.Base.SetName(name) + return c +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/aggregation/double_any.go b/vendor/github.com/grafadruid/go-druid/builder/aggregation/double_any.go new file mode 100644 index 000000000..5e187d084 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/aggregation/double_any.go @@ -0,0 +1,22 @@ +package aggregation + +type DoubleAny struct { + Base + FieldName string `json:"fieldName,omitempty"` +} + +func NewDoubleAny() *DoubleAny { + d := &DoubleAny{} + d.SetType("doubleAny") + return d +} + +func (d *DoubleAny) SetName(name string) *DoubleAny { + d.Base.SetName(name) + return d +} + +func (d *DoubleAny) SetFieldName(fieldName string) *DoubleAny { + d.FieldName = fieldName + return d +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/aggregation/double_first.go b/vendor/github.com/grafadruid/go-druid/builder/aggregation/double_first.go new file mode 100644 index 000000000..e2ef8047e --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/aggregation/double_first.go @@ -0,0 +1,22 @@ +package aggregation + +type DoubleFirst struct { + Base + FieldName string `json:"fieldName,omitempty"` +} + +func NewDoubleFirst() *DoubleFirst { + d := &DoubleFirst{} + d.SetType("doubleFirst") + return d +} + +func (d *DoubleFirst) SetName(name string) *DoubleFirst { + d.Base.SetName(name) + return d +} + +func (d *DoubleFirst) SetFieldName(fieldName string) *DoubleFirst { + d.FieldName = fieldName + return d +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/aggregation/double_last.go b/vendor/github.com/grafadruid/go-druid/builder/aggregation/double_last.go new file mode 100644 index 000000000..1aae5c211 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/aggregation/double_last.go @@ -0,0 +1,22 @@ +package aggregation + +type DoubleLast struct { + Base + FieldName string `json:"fieldName,omitempty"` +} + +func NewDoubleLast() *DoubleLast { + d := &DoubleLast{} + d.SetType("doubleLast") + return d +} + +func (d *DoubleLast) SetName(name string) *DoubleLast { + d.Base.SetName(name) + return d +} + +func (d *DoubleLast) SetFieldName(fieldName string) *DoubleLast { + d.FieldName = fieldName + return d +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/aggregation/double_max.go b/vendor/github.com/grafadruid/go-druid/builder/aggregation/double_max.go new file mode 100644 index 000000000..77c414384 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/aggregation/double_max.go @@ -0,0 +1,28 @@ +package aggregation + +type DoubleMax struct { + Base + FieldName string `json:"fieldName,omitempty"` + Expression string `json:"expression,omitempty"` +} + +func NewDoubleMax() *DoubleMax { + d := &DoubleMax{} + d.SetType("doubleMax") + return d +} + +func (d *DoubleMax) SetName(name string) *DoubleMax { + d.Base.SetName(name) + return d +} + +func (d *DoubleMax) SetFieldName(fieldName string) *DoubleMax { + d.FieldName = fieldName + return d +} + +func (d *DoubleMax) SetExpression(expression string) *DoubleMax { + d.Expression = expression + return d +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/aggregation/double_mean.go b/vendor/github.com/grafadruid/go-druid/builder/aggregation/double_mean.go new file mode 100644 index 000000000..d210d7966 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/aggregation/double_mean.go @@ -0,0 +1,22 @@ +package aggregation + +type DoubleMean struct { + Base + FieldName string `json:"fieldName,omitempty"` +} + +func NewDoubleMean() *DoubleMean { + d := &DoubleMean{} + d.SetType("doubleMean") + return d +} + +func (d *DoubleMean) SetName(name string) *DoubleMean { + d.Base.SetName(name) + return d +} + +func (d *DoubleMean) SetFieldName(fieldName string) *DoubleMean { + d.FieldName = fieldName + return d +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/aggregation/double_min.go b/vendor/github.com/grafadruid/go-druid/builder/aggregation/double_min.go new file mode 100644 index 000000000..fd5cd7051 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/aggregation/double_min.go @@ -0,0 +1,28 @@ +package aggregation + +type DoubleMin struct { + Base + FieldName string `json:"fieldName,omitempty"` + Expression string `json:"expression,omitempty"` +} + +func NewDoubleMin() *DoubleMin { + d := &DoubleMin{} + d.SetType("doubleMin") + return d +} + +func (d *DoubleMin) SetName(name string) *DoubleMin { + d.Base.SetName(name) + return d +} + +func (d *DoubleMin) SetFieldName(fieldName string) *DoubleMin { + d.FieldName = fieldName + return d +} + +func (d *DoubleMin) SetExpression(expression string) *DoubleMin { + d.Expression = expression + return d +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/aggregation/double_sum.go b/vendor/github.com/grafadruid/go-druid/builder/aggregation/double_sum.go new file mode 100644 index 000000000..4687b7208 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/aggregation/double_sum.go @@ -0,0 +1,28 @@ +package aggregation + +type DoubleSum struct { + Base + FieldName string `json:"fieldName,omitempty"` + Expression string `json:"expression,omitempty"` +} + +func NewDoubleSum() *DoubleSum { + d := &DoubleSum{} + d.SetType("doubleSum") + return d +} + +func (d *DoubleSum) SetName(name string) *DoubleSum { + d.Base.SetName(name) + return d +} + +func (d *DoubleSum) SetFieldName(fieldName string) *DoubleSum { + d.FieldName = fieldName + return d +} + +func (d *DoubleSum) SetExpression(expression string) *DoubleSum { + d.Expression = expression + return d +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/aggregation/filtered.go b/vendor/github.com/grafadruid/go-druid/builder/aggregation/filtered.go new file mode 100644 index 000000000..763f41d54 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/aggregation/filtered.go @@ -0,0 +1,57 @@ +package aggregation + +import ( + "encoding/json" + "github.com/grafadruid/go-druid/builder" + "github.com/grafadruid/go-druid/builder/filter" +) + +type Filtered struct { + Base + Aggregator builder.Aggregator `json:"aggregator,omitempty"` + Filter builder.Filter `json:"filter,omitempty"` +} + +func NewFiltered() *Filtered { + f := &Filtered{} + f.SetType("filtered") + return f +} + +func (f *Filtered) SetName(name string) *Filtered { + f.Base.SetName(name) + return f +} + +func (f *Filtered) SetAggregator(aggregator builder.Aggregator) *Filtered { + f.Aggregator = aggregator + return f +} + +func (f *Filtered) SetFilter(filter builder.Filter) *Filtered { + f.Filter = filter + return f +} + +func (f *Filtered) UnmarshalJSON(data []byte) error { + var tmp struct { + Base + Aggregator json.RawMessage `json:"aggregator,omitempty"` + Filter json.RawMessage `json:"filter,omitempty"` + } + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + a, err := Load(tmp.Aggregator) + if err != nil { + return err + } + filter, err := filter.Load(tmp.Filter) + if err != nil { + return err + } + f.Base = tmp.Base + f.Aggregator = a + f.Filter = filter + return nil +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/aggregation/float_any.go b/vendor/github.com/grafadruid/go-druid/builder/aggregation/float_any.go new file mode 100644 index 000000000..1ea3b61d9 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/aggregation/float_any.go @@ -0,0 +1,22 @@ +package aggregation + +type FloatAny struct { + Base + FieldName string `json:"fieldName,omitempty"` +} + +func NewFloatAny() *FloatAny { + f := &FloatAny{} + f.SetType("floatAny") + return f +} + +func (f *FloatAny) SetName(name string) *FloatAny { + f.Base.SetName(name) + return f +} + +func (f *FloatAny) SetFieldName(fieldName string) *FloatAny { + f.FieldName = fieldName + return f +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/aggregation/float_first.go b/vendor/github.com/grafadruid/go-druid/builder/aggregation/float_first.go new file mode 100644 index 000000000..b1f817f13 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/aggregation/float_first.go @@ -0,0 +1,22 @@ +package aggregation + +type FloatFirst struct { + Base + FieldName string `json:"fieldName,omitempty"` +} + +func NewFloatFirst() *FloatFirst { + f := &FloatFirst{} + f.SetType("floatFirst") + return f +} + +func (f *FloatFirst) SetName(name string) *FloatFirst { + f.Base.SetName(name) + return f +} + +func (f *FloatFirst) SetFieldName(fieldName string) *FloatFirst { + f.FieldName = fieldName + return f +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/aggregation/float_last.go b/vendor/github.com/grafadruid/go-druid/builder/aggregation/float_last.go new file mode 100644 index 000000000..2caec511f --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/aggregation/float_last.go @@ -0,0 +1,22 @@ +package aggregation + +type FloatLast struct { + Base + FieldName string `json:"fieldName,omitempty"` +} + +func NewFloatLast() *FloatLast { + f := &FloatLast{} + f.SetType("floatLast") + return f +} + +func (f *FloatLast) SetName(name string) *FloatLast { + f.Base.SetName(name) + return f +} + +func (f *FloatLast) SetFieldName(fieldName string) *FloatLast { + f.FieldName = fieldName + return f +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/aggregation/float_max.go b/vendor/github.com/grafadruid/go-druid/builder/aggregation/float_max.go new file mode 100644 index 000000000..95a9e2bae --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/aggregation/float_max.go @@ -0,0 +1,28 @@ +package aggregation + +type FloatMax struct { + Base + FieldName string `json:"fieldName,omitempty"` + Expression string `json:"expression,omitempty"` +} + +func NewFloatMax() *FloatMax { + f := &FloatMax{} + f.SetType("floatMax") + return f +} + +func (f *FloatMax) SetName(name string) *FloatMax { + f.Base.SetName(name) + return f +} + +func (f *FloatMax) SetFieldName(fieldName string) *FloatMax { + f.FieldName = fieldName + return f +} + +func (f *FloatMax) SetExpression(expression string) *FloatMax { + f.Expression = expression + return f +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/aggregation/float_min.go b/vendor/github.com/grafadruid/go-druid/builder/aggregation/float_min.go new file mode 100644 index 000000000..09c7e79ad --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/aggregation/float_min.go @@ -0,0 +1,28 @@ +package aggregation + +type FloatMin struct { + Base + FieldName string `json:"fieldName,omitempty"` + Expression string `json:"expression,omitempty"` +} + +func NewFloatMin() *FloatMin { + f := &FloatMin{} + f.SetType("floatMin") + return f +} + +func (f *FloatMin) SetName(name string) *FloatMin { + f.Base.SetName(name) + return f +} + +func (f *FloatMin) SetFieldName(fieldName string) *FloatMin { + f.FieldName = fieldName + return f +} + +func (f *FloatMin) SetExpression(expression string) *FloatMin { + f.Expression = expression + return f +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/aggregation/float_sum.go b/vendor/github.com/grafadruid/go-druid/builder/aggregation/float_sum.go new file mode 100644 index 000000000..0f37c42a3 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/aggregation/float_sum.go @@ -0,0 +1,28 @@ +package aggregation + +type FloatSum struct { + Base + FieldName string `json:"fieldName,omitempty"` + Expression string `json:"expression,omitempty"` +} + +func NewFloatSum() *FloatSum { + f := &FloatSum{} + f.SetType("floatSum") + return f +} + +func (f *FloatSum) SetName(name string) *FloatSum { + f.Base.SetName(name) + return f +} + +func (f *FloatSum) SetFieldName(fieldName string) *FloatSum { + f.FieldName = fieldName + return f +} + +func (f *FloatSum) SetExpression(expression string) *FloatSum { + f.Expression = expression + return f +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/aggregation/histogram.go b/vendor/github.com/grafadruid/go-druid/builder/aggregation/histogram.go new file mode 100644 index 000000000..9d8c5ab6d --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/aggregation/histogram.go @@ -0,0 +1,28 @@ +package aggregation + +type Histogram struct { + Base + FieldName string `json:"fieldName,omitempty"` + Breaks []float64 `json:"breaks,omitempty"` +} + +func NewHistogram() *Histogram { + h := &Histogram{} + h.SetType("histogram") + return h +} + +func (h *Histogram) SetName(name string) *Histogram { + h.Base.SetName(name) + return h +} + +func (h *Histogram) SetFieldName(fieldName string) *Histogram { + h.FieldName = fieldName + return h +} + +func (h *Histogram) SetBreaks(breaks []float64) *Histogram { + h.Breaks = breaks + return h +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/aggregation/hll_sketch.go b/vendor/github.com/grafadruid/go-druid/builder/aggregation/hll_sketch.go new file mode 100644 index 000000000..c5885fe93 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/aggregation/hll_sketch.go @@ -0,0 +1,55 @@ +package aggregation + +// HLLSketch holds the HLL sketch struct based on +// Aggregator section in https://druid.apache.org/docs/latest/development/extensions-core/datasketches-hll.html +type HLLSketch struct { + Base + FieldName string `json:"fieldName,omitempty"` + LgK int64 `json:"lgK,omitempty"` + TgtHLLType string `json:"tgtHllType,omitempty"` + Round *bool `json:"round,omitempty"` +} + +// NewHLLSketchBuild create a new instance of HLLSketch with type HLLSketchBuild +func NewHLLSketchBuild() *HLLSketch { + t := &HLLSketch{} + t.Base.SetType("HLLSketchBuild") + return t +} + +// NewHLLSketchMerge create a new instance of HLLSketch with type HLLSketchMerge +func NewHLLSketchMerge() *HLLSketch { + t := &HLLSketch{} + t.Base.SetType("HLLSketchMerge") + return t +} + +// SetName set name +func (t *HLLSketch) SetName(name string) *HLLSketch { + t.Base.SetName(name) + return t +} + +// SetFieldName set fieldName +func (t *HLLSketch) SetFieldName(fieldName string) *HLLSketch { + t.FieldName = fieldName + return t +} + +// SetLgK set lgK. The value needs to be in the [4, 21] range +func (t *HLLSketch) SetLgK(lgk int64) *HLLSketch { + t.LgK = lgk + return t +} + +// SetTgtHllType set tgtHllType +func (t *HLLSketch) SetTgtHLLType(tgtHLLType string) *HLLSketch { + t.TgtHLLType = tgtHLLType + return t +} + +// SetRound set round. +func (t *HLLSketch) SetRound(round bool) *HLLSketch { + t.Round = &round + return t +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/aggregation/hyper_unique.go b/vendor/github.com/grafadruid/go-druid/builder/aggregation/hyper_unique.go new file mode 100644 index 000000000..ae3f586ab --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/aggregation/hyper_unique.go @@ -0,0 +1,34 @@ +package aggregation + +type HyperUnique struct { + Base + FieldName string `json:"fieldName,omitempty"` + IsInputHyperUnique *bool `json:"isInputHyperUnique,omitempty"` + Round *bool `json:"round,omitempty"` +} + +func NewHyperUnique() *HyperUnique { + h := &HyperUnique{} + h.SetType("hyperUnique") + return h +} + +func (h *HyperUnique) SetName(name string) *HyperUnique { + h.Base.SetName(name) + return h +} + +func (h *HyperUnique) SetFieldName(fieldName string) *HyperUnique { + h.FieldName = fieldName + return h +} + +func (h *HyperUnique) SetIsInputHyperUnique(isInputHyperUnique bool) *HyperUnique { + h.IsInputHyperUnique = &isInputHyperUnique + return h +} + +func (h *HyperUnique) SetRound(round bool) *HyperUnique { + h.Round = &round + return h +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/aggregation/javascript.go b/vendor/github.com/grafadruid/go-druid/builder/aggregation/javascript.go new file mode 100644 index 000000000..137d71026 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/aggregation/javascript.go @@ -0,0 +1,40 @@ +package aggregation + +type Javascript struct { + Base + FieldNames []string `json:"fieldNames,omitempty"` + FnAggregate string `json:"fnAggregate,omitempty"` + FnReset string `json:"fnReset,omitempty"` + FnCombine string `json:"fnCombine,omitempty"` +} + +func NewJavascript() *Javascript { + j := &Javascript{} + j.SetType("javascript") + return j +} + +func (j *Javascript) SetName(name string) *Javascript { + j.Base.SetName(name) + return j +} + +func (j *Javascript) SetFieldNames(fieldNames []string) *Javascript { + j.FieldNames = fieldNames + return j +} + +func (j *Javascript) SetFnAggregate(fnAggregate string) *Javascript { + j.FnAggregate = fnAggregate + return j +} + +func (j *Javascript) SetFnReset(fnReset string) *Javascript { + j.FnReset = fnReset + return j +} + +func (j *Javascript) SetFnCombine(fnCombine string) *Javascript { + j.FnCombine = fnCombine + return j +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/aggregation/long_any.go b/vendor/github.com/grafadruid/go-druid/builder/aggregation/long_any.go new file mode 100644 index 000000000..d8953ee67 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/aggregation/long_any.go @@ -0,0 +1,22 @@ +package aggregation + +type LongAny struct { + Base + FieldName string `json:"fieldName,omitempty"` +} + +func NewLongAny() *LongAny { + l := &LongAny{} + l.SetType("longAny") + return l +} + +func (l *LongAny) SetName(name string) *LongAny { + l.Base.SetName(name) + return l +} + +func (l *LongAny) SetFieldName(fieldName string) *LongAny { + l.FieldName = fieldName + return l +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/aggregation/long_first.go b/vendor/github.com/grafadruid/go-druid/builder/aggregation/long_first.go new file mode 100644 index 000000000..fd0e7f912 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/aggregation/long_first.go @@ -0,0 +1,22 @@ +package aggregation + +type LongFirst struct { + Base + FieldName string `json:"fieldName,omitempty"` +} + +func NewLongFirst() *LongFirst { + l := &LongFirst{} + l.SetType("longFirst") + return l +} + +func (l *LongFirst) SetName(name string) *LongFirst { + l.Base.SetName(name) + return l +} + +func (l *LongFirst) SetFieldName(fieldName string) *LongFirst { + l.FieldName = fieldName + return l +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/aggregation/long_last.go b/vendor/github.com/grafadruid/go-druid/builder/aggregation/long_last.go new file mode 100644 index 000000000..605d7fc5c --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/aggregation/long_last.go @@ -0,0 +1,22 @@ +package aggregation + +type LongLast struct { + Base + FieldName string `json:"fieldName,omitempty"` +} + +func NewLongLast() *LongLast { + l := &LongLast{} + l.SetType("longLast") + return l +} + +func (l *LongLast) SetName(name string) *LongLast { + l.Base.SetName(name) + return l +} + +func (l *LongLast) SetFieldName(fieldName string) *LongLast { + l.FieldName = fieldName + return l +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/aggregation/long_max.go b/vendor/github.com/grafadruid/go-druid/builder/aggregation/long_max.go new file mode 100644 index 000000000..f386433e4 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/aggregation/long_max.go @@ -0,0 +1,28 @@ +package aggregation + +type LongMax struct { + Base + FieldName string `json:"fieldName,omitempty"` + Expression string `json:"expression,omitempty"` +} + +func NewLongMax() *LongMax { + l := &LongMax{} + l.SetType("longMax") + return l +} + +func (l *LongMax) SetName(name string) *LongMax { + l.Base.SetName(name) + return l +} + +func (l *LongMax) SetFieldName(fieldName string) *LongMax { + l.FieldName = fieldName + return l +} + +func (l *LongMax) SetExpression(expression string) *LongMax { + l.Expression = expression + return l +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/aggregation/long_min.go b/vendor/github.com/grafadruid/go-druid/builder/aggregation/long_min.go new file mode 100644 index 000000000..05563876b --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/aggregation/long_min.go @@ -0,0 +1,28 @@ +package aggregation + +type LongMin struct { + Base + FieldName string `json:"fieldName,omitempty"` + Expression string `json:"expression,omitempty"` +} + +func NewLongMin() *LongMin { + l := &LongMin{} + l.SetType("longMin") + return l +} + +func (l *LongMin) SetName(name string) *LongMin { + l.Base.SetName(name) + return l +} + +func (l *LongMin) SetFieldName(fieldName string) *LongMin { + l.FieldName = fieldName + return l +} + +func (l *LongMin) SetExpression(expression string) *LongMin { + l.Expression = expression + return l +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/aggregation/long_sum.go b/vendor/github.com/grafadruid/go-druid/builder/aggregation/long_sum.go new file mode 100644 index 000000000..661e3a053 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/aggregation/long_sum.go @@ -0,0 +1,28 @@ +package aggregation + +type LongSum struct { + Base + FieldName string `json:"fieldName,omitempty"` + Expression string `json:"expression,omitempty"` +} + +func NewLongSum() *LongSum { + l := &LongSum{} + l.SetType("longSum") + return l +} + +func (l *LongSum) SetName(name string) *LongSum { + l.Base.SetName(name) + return l +} + +func (l *LongSum) SetFieldName(fieldName string) *LongSum { + l.FieldName = fieldName + return l +} + +func (l *LongSum) SetExpression(expression string) *LongSum { + l.Expression = expression + return l +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/aggregation/quantiles_doubles_sketch.go b/vendor/github.com/grafadruid/go-druid/builder/aggregation/quantiles_doubles_sketch.go new file mode 100644 index 000000000..02ea984f9 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/aggregation/quantiles_doubles_sketch.go @@ -0,0 +1,34 @@ +package aggregation + +// QuantilesDoublesSketch holds the Quantiles Doubles sketch based on +// https://druid.apache.org/docs/latest/development/extensions-core/datasketches-quantiles.html +type QuantilesDoublesSketch struct { + Base + FieldName string `json:"fieldName,omitempty"` + K int64 `json:"k,omitempty"` +} + +// NewQuantilesDoublesSketch create a new instance of QuantilesDoublesSketch with type QuantilesDoublesSketch +func NewQuantilesDoublesSketch() *QuantilesDoublesSketch { + q := &QuantilesDoublesSketch{} + q.Base.SetType("quantilesDoublesSketch") + return q +} + +// SetName set name +func (q *QuantilesDoublesSketch) SetName(name string) *QuantilesDoublesSketch { + q.Base.SetName(name) + return q +} + +// SetFieldName set fieldName +func (q *QuantilesDoublesSketch) SetFieldName(fieldName string) *QuantilesDoublesSketch { + q.FieldName = fieldName + return q +} + +// SetK set K. The value needs to must be a power of 2 from 2 to 32768 +func (q *QuantilesDoublesSketch) SetK(k int64) *QuantilesDoublesSketch { + q.K = k + return q +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/aggregation/string_any.go b/vendor/github.com/grafadruid/go-druid/builder/aggregation/string_any.go new file mode 100644 index 000000000..cfd9fd764 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/aggregation/string_any.go @@ -0,0 +1,28 @@ +package aggregation + +type StringAny struct { + Base + FieldName string `json:"fieldName,omitempty"` + MaxStringBytes int64 `json:"maxStringBytes,omitempty"` +} + +func NewStringAny() *StringAny { + s := &StringAny{} + s.SetType("stringAny") + return s +} + +func (s *StringAny) SetName(name string) *StringAny { + s.Base.SetName(name) + return s +} + +func (s *StringAny) SetFieldName(fieldName string) *StringAny { + s.FieldName = fieldName + return s +} + +func (s *StringAny) SetMaxStringBytes(maxStringBytes int64) *StringAny { + s.MaxStringBytes = maxStringBytes + return s +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/aggregation/string_first.go b/vendor/github.com/grafadruid/go-druid/builder/aggregation/string_first.go new file mode 100644 index 000000000..c9e88fe4b --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/aggregation/string_first.go @@ -0,0 +1,28 @@ +package aggregation + +type StringFirst struct { + Base + FieldName string `json:"fieldName,omitempty"` + MaxStringBytes int64 `json:"maxStringBytes,omitempty"` +} + +func NewStringFirst() *StringFirst { + s := &StringFirst{} + s.SetType("stringFirst") + return s +} + +func (s *StringFirst) SetName(name string) *StringFirst { + s.Base.SetName(name) + return s +} + +func (s *StringFirst) SetFieldName(fieldName string) *StringFirst { + s.FieldName = fieldName + return s +} + +func (s *StringFirst) SetMaxStringBytes(maxStringBytes int64) *StringFirst { + s.MaxStringBytes = maxStringBytes + return s +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/aggregation/string_first_folding.go b/vendor/github.com/grafadruid/go-druid/builder/aggregation/string_first_folding.go new file mode 100644 index 000000000..ac36f317e --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/aggregation/string_first_folding.go @@ -0,0 +1,28 @@ +package aggregation + +type StringFirstFolding struct { + Base + FieldName string `json:"fieldName,omitempty"` + MaxStringBytes int64 `json:"maxStringBytes,omitempty"` +} + +func NewStringFirstFolding() *StringFirstFolding { + s := &StringFirstFolding{} + s.SetType("stringFirstFolding") + return s +} + +func (s *StringFirstFolding) SetName(name string) *StringFirstFolding { + s.Base.SetName(name) + return s +} + +func (s *StringFirstFolding) SetFieldName(fieldName string) *StringFirstFolding { + s.FieldName = fieldName + return s +} + +func (s *StringFirstFolding) SetMaxStringBytes(maxStringBytes int64) *StringFirstFolding { + s.MaxStringBytes = maxStringBytes + return s +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/aggregation/string_last.go b/vendor/github.com/grafadruid/go-druid/builder/aggregation/string_last.go new file mode 100644 index 000000000..13e995783 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/aggregation/string_last.go @@ -0,0 +1,28 @@ +package aggregation + +type StringLast struct { + Base + FieldName string `json:"fieldName,omitempty"` + MaxStringBytes int64 `json:"maxStringBytes,omitempty"` +} + +func NewStringLast() *StringLast { + s := &StringLast{} + s.SetType("stringLast") + return s +} + +func (s *StringLast) SetName(name string) *StringLast { + s.Base.SetName(name) + return s +} + +func (s *StringLast) SetFieldName(fieldName string) *StringLast { + s.FieldName = fieldName + return s +} + +func (s *StringLast) SetMaxStringBytes(maxStringBytes int64) *StringLast { + s.MaxStringBytes = maxStringBytes + return s +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/aggregation/string_last_folding.go b/vendor/github.com/grafadruid/go-druid/builder/aggregation/string_last_folding.go new file mode 100644 index 000000000..5af0af0cc --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/aggregation/string_last_folding.go @@ -0,0 +1,28 @@ +package aggregation + +type StringLastFolding struct { + Base + FieldName string `json:"fieldName,omitempty"` + MaxStringBytes int64 `json:"maxStringBytes,omitempty"` +} + +func NewStringLastFolding() *StringLastFolding { + s := &StringLastFolding{} + s.SetType("stringLastFolding") + return s +} + +func (s *StringLastFolding) SetName(name string) *StringLastFolding { + s.Base.SetName(name) + return s +} + +func (s *StringLastFolding) SetFieldName(fieldName string) *StringLastFolding { + s.FieldName = fieldName + return s +} + +func (s *StringLastFolding) SetMaxStringBytes(maxStringBytes int64) *StringLastFolding { + s.MaxStringBytes = maxStringBytes + return s +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/aggregation/tdigestsketch.go b/vendor/github.com/grafadruid/go-druid/builder/aggregation/tdigestsketch.go new file mode 100644 index 000000000..ff6957466 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/aggregation/tdigestsketch.go @@ -0,0 +1,34 @@ +package aggregation + +// TDigestSketch holds the tdigest sketch struct based on +// Aggregator section in https://druid.apache.org/docs/latest/development/extensions-contrib/tdigestsketch-quantiles.html +type TDigestSketch struct { + Base + FieldName string `json:"fieldName,omitempty"` + Compression int64 `json:"compression,omitempty"` +} + +// NewTDigestSketch create a new instance of TDigestSketch +func NewTDigestSketch() *TDigestSketch { + t := &TDigestSketch{} + t.Base.SetType("tDigestSketch") + return t +} + +// SetName set name +func (t *TDigestSketch) SetName(name string) *TDigestSketch { + t.Base.SetName(name) + return t +} + +// SetFieldName set fieldName +func (t *TDigestSketch) SetFieldName(fieldName string) *TDigestSketch { + t.FieldName = fieldName + return t +} + +// SetCompression set tdigest compression +func (t *TDigestSketch) SetCompression(compression int64) *TDigestSketch { + t.Compression = compression + return t +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/aggregation/thetasketch.go b/vendor/github.com/grafadruid/go-druid/builder/aggregation/thetasketch.go new file mode 100644 index 000000000..915ccea12 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/aggregation/thetasketch.go @@ -0,0 +1,41 @@ +package aggregation + +// ThetaSketch holds the theta sketch struct based on +// Aggregator section in http://druid.apache.org/docs/latest/development/extensions-core/datasketches-theta.html +type ThetaSketch struct { + Base + FieldName string `json:"fieldName,omitempty"` + IsInputThetaSketch *bool `json:"isInputThetaSketch,omitempty"` + Size int64 `json:"size,omitempty"` +} + +// NewThetaSketch create a new instance of ThetaSketch +func NewThetaSketch() *ThetaSketch { + t := &ThetaSketch{} + t.Base.SetType("thetaSketch") + return t +} + +// SetName set name +func (t *ThetaSketch) SetName(name string) *ThetaSketch { + t.Base.SetName(name) + return t +} + +// SetFieldName set fieldName +func (t *ThetaSketch) SetFieldName(fieldName string) *ThetaSketch { + t.FieldName = fieldName + return t +} + +// SetIsInputThetaSketch set theta isInputThetaSketch +func (t *ThetaSketch) SetIsInputThetaSketch(isInputThetaSketch bool) *ThetaSketch { + t.IsInputThetaSketch = &isInputThetaSketch + return t +} + +// SetSize set theta size +func (t *ThetaSketch) SetSize(size int64) *ThetaSketch { + t.Size = size + return t +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/bound/bound.go b/vendor/github.com/grafadruid/go-druid/builder/bound/bound.go new file mode 100644 index 000000000..856f73677 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/bound/bound.go @@ -0,0 +1,45 @@ +package bound + +import ( + "encoding/json" + "errors" + + "github.com/grafadruid/go-druid/builder" +) + +type Base struct { + Typ builder.ComponentType `json:"type,omitempty"` +} + +func (b *Base) SetType(typ builder.ComponentType) *Base { + b.Typ = typ + return b +} + +func (b *Base) Type() builder.ComponentType { + return b.Typ +} + +func Load(data []byte) (builder.Bound, error) { + var b builder.Bound + if string(data) == "null" { + return b, nil + } + var t struct { + Typ builder.ComponentType `json:"type,omitempty"` + } + if err := json.Unmarshal(data, &t); err != nil { + return nil, err + } + switch t.Typ { + case "polygon": + b = NewPolygon() + case "radius": + b = NewRadius() + case "rectangular": + b = NewRectangular() + default: + return nil, errors.New("unsupported bound type") + } + return b, json.Unmarshal(data, &b) +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/bound/polygon.go b/vendor/github.com/grafadruid/go-druid/builder/bound/polygon.go new file mode 100644 index 000000000..530437212 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/bound/polygon.go @@ -0,0 +1,23 @@ +package bound + +type Polygon struct { + Base + Abscissa []float64 `json:"abscissa,omitempty"` + Ordinate []float64 `json:"ordinate,omitempty"` +} + +func NewPolygon() *Polygon { + p := &Polygon{} + p.SetType("polygon") + return p +} + +func (p *Polygon) SetAbscissa(abscissa []float64) *Polygon { + p.Abscissa = abscissa + return p +} + +func (p *Polygon) SetOrdinate(ordinate []float64) *Polygon { + p.Ordinate = ordinate + return p +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/bound/radius.go b/vendor/github.com/grafadruid/go-druid/builder/bound/radius.go new file mode 100644 index 000000000..cf044a528 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/bound/radius.go @@ -0,0 +1,23 @@ +package bound + +type Radius struct { + Base + Coords []float64 `json:"coords,omitempty"` + Radius *float64 `json:"radius,omitempty"` +} + +func NewRadius() *Radius { + r := &Radius{} + r.SetType("radius") + return r +} + +func (r *Radius) SetCoords(coords []float64) *Radius { + r.Coords = coords + return r +} + +func (r *Radius) SetRadius(radius float64) *Radius { + r.Radius = &radius + return r +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/bound/rectangular.go b/vendor/github.com/grafadruid/go-druid/builder/bound/rectangular.go new file mode 100644 index 000000000..ed8be009f --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/bound/rectangular.go @@ -0,0 +1,29 @@ +package bound + +type Rectangular struct { + Base + MinCoords []float64 `json:"minCoords,omitempty"` + MaxCoords []float64 `json:"maxCoords,omitempty"` + Limit int64 `json:"limit,omitempty"` +} + +func NewRectangular() *Rectangular { + r := &Rectangular{} + r.SetType("rectangular") + return r +} + +func (r *Rectangular) SetMinCoords(minCoords []float64) *Rectangular { + r.MinCoords = minCoords + return r +} + +func (r *Rectangular) SetMaxCoords(maxCoords []float64) *Rectangular { + r.MaxCoords = maxCoords + return r +} + +func (r *Rectangular) SetLimit(limit int64) *Rectangular { + r.Limit = limit + return r +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/builder.go b/vendor/github.com/grafadruid/go-druid/builder/builder.go new file mode 100644 index 000000000..d2de68085 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/builder.go @@ -0,0 +1,71 @@ +package builder + +type ComponentType = string + +type Query interface { + Type() ComponentType +} + +type Aggregator interface { + Type() ComponentType +} + +type Bound interface { + Type() ComponentType +} + +type DataSource interface { + Type() ComponentType +} + +type Dimension interface { + Type() ComponentType +} + +type ExtractionFn interface { + Type() ComponentType +} + +type Filter interface { + Type() ComponentType +} + +type Granularity interface { + Type() ComponentType +} + +type HavingSpec interface { + Type() ComponentType +} + +type LimitSpec interface { + Type() ComponentType +} + +type LookupExtractor interface { + Type() ComponentType +} + +type PostAggregator interface { + Type() ComponentType +} + +type SearchQuerySpec interface { + Type() ComponentType +} + +type TopNMetric interface { + Type() ComponentType +} + +type ToInclude interface { + Type() ComponentType +} + +type VirtualColumn interface { + Type() ComponentType +} + +type Intervals interface { + Type() ComponentType +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/datasource/data_source.go b/vendor/github.com/grafadruid/go-druid/builder/datasource/data_source.go new file mode 100644 index 000000000..667697fb6 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/datasource/data_source.go @@ -0,0 +1,53 @@ +package datasource + +import ( + "encoding/json" + "errors" + + "github.com/grafadruid/go-druid/builder" +) + +type Base struct { + Typ builder.ComponentType `json:"type,omitempty"` +} + +func (b *Base) SetType(typ builder.ComponentType) *Base { + b.Typ = typ + return b +} + +func (b *Base) Type() builder.ComponentType { + return b.Typ +} + +func Load(data []byte) (builder.DataSource, error) { + var d builder.DataSource + if string(data) == "null" { + return d, nil + } + var t struct { + Typ builder.ComponentType `json:"type,omitempty"` + } + if err := json.Unmarshal(data, &t); err != nil { + return nil, err + } + switch t.Typ { + case "globalTable": + d = NewGlobalTable() + case "inline": + d = NewInline() + case "join": + d = NewJoin() + case "lookup": + d = NewLookup() + case "query": + d = NewQuery() + case "table": + d = NewTable() + case "union": + d = NewUnion() + default: + return nil, errors.New("unsupported datasource type") + } + return d, json.Unmarshal(data, &d) +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/datasource/global_table.go b/vendor/github.com/grafadruid/go-druid/builder/datasource/global_table.go new file mode 100644 index 000000000..c5b5bb21a --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/datasource/global_table.go @@ -0,0 +1,17 @@ +package datasource + +type GlobalTable struct { + Base + Name string `json:"name,omitempty"` +} + +func NewGlobalTable() *GlobalTable { + g := &GlobalTable{} + g.SetType("globalTable") + return g +} + +func (g *GlobalTable) SetName(name string) *GlobalTable { + g.Name = name + return g +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/datasource/inline.go b/vendor/github.com/grafadruid/go-druid/builder/datasource/inline.go new file mode 100644 index 000000000..15a0d682b --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/datasource/inline.go @@ -0,0 +1,29 @@ +package datasource + +type Inline struct { + Base + ColumnNames []string `json:"columnNames,omitempty"` + ColumnTypes []string `json:"columnTypes,omitempty"` + Rows [][]string `json:"rows,omitempty"` +} + +func NewInline() *Inline { + i := &Inline{} + i.SetType("inline") + return i +} + +func (i *Inline) SetColumnNames(columnNames []string) *Inline { + i.ColumnNames = columnNames + return i +} + +func (i *Inline) SetColumnTypes(columnTypes []string) *Inline { + i.ColumnTypes = columnTypes + return i +} + +func (i *Inline) SetRows(rows [][]string) *Inline { + i.Rows = rows + return i +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/datasource/join.go b/vendor/github.com/grafadruid/go-druid/builder/datasource/join.go new file mode 100644 index 000000000..03c287101 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/datasource/join.go @@ -0,0 +1,77 @@ +package datasource + +import ( + "encoding/json" + + "github.com/grafadruid/go-druid/builder" + "github.com/grafadruid/go-druid/builder/types" +) + +type Join struct { + Base + Left builder.DataSource `json:"left,omitempty"` + Right builder.DataSource `json:"right,omitempty"` + RightPrefix string `json:"rightPrefix,omitempty"` + Condition string `json:"condition,omitempty"` + JoinType types.JoinType `json:"joinType,omitempty"` +} + +func NewJoin() *Join { + j := &Join{} + j.SetType("join") + return j +} + +func (j *Join) SetLeft(left builder.DataSource) *Join { + j.Left = left + return j +} + +func (j *Join) SetRight(right builder.DataSource) *Join { + j.Right = right + return j +} + +func (j *Join) SetRightPrefix(rightPrefix string) *Join { + j.RightPrefix = rightPrefix + return j +} + +func (j *Join) SetCondition(condition string) *Join { + j.Condition = condition + return j +} + +func (j *Join) SetJoinType(joinType types.JoinType) *Join { + j.JoinType = joinType + return j +} + +func (j *Join) UnmarshalJSON(data []byte) error { + var tmp struct { + Base + Left json.RawMessage `json:"left,omitempty"` + Right json.RawMessage `json:"right,omitempty"` + RightPrefix string `json:"rightPrefix,omitempty"` + Condition string `json:"condition,omitempty"` + JoinType types.JoinType `json:"joinType,omitempty"` + } + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + l, err := Load(tmp.Left) + if err != nil { + return err + } + r, err := Load(tmp.Right) + if err != nil { + return err + } + j.Base = tmp.Base + j.Left = l + j.Right = r + j.RightPrefix = tmp.RightPrefix + j.Condition = tmp.Condition + j.JoinType = tmp.JoinType + return nil +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/datasource/lookup.go b/vendor/github.com/grafadruid/go-druid/builder/datasource/lookup.go new file mode 100644 index 000000000..39bdbea00 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/datasource/lookup.go @@ -0,0 +1,17 @@ +package datasource + +type Lookup struct { + Base + Lookup string `json:"lookup,omitempty"` +} + +func NewLookup() *Lookup { + l := &Lookup{} + l.SetType("lookup") + return l +} + +func (l *Lookup) SetLookup(lookup string) *Lookup { + l.Lookup = lookup + return l +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/datasource/query.go b/vendor/github.com/grafadruid/go-druid/builder/datasource/query.go new file mode 100644 index 000000000..c4a3b82de --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/datasource/query.go @@ -0,0 +1,39 @@ +package datasource + +import ( + "encoding/json" + + "github.com/grafadruid/go-druid/builder" +) + +type Query struct { + Base + Query builder.Query `json:"-,omitempty"` +} + +func NewQuery() *Query { + q := &Query{} + q.SetType("query") + return q +} + +func (q *Query) SetQuery(qry builder.Query) { + q.Query = qry +} + +func (q *Query) UnmarshalJSONWithQueryLoader(data []byte, loader func(data []byte) (builder.Query, error)) error { + var tmp struct { + Base + Query json.RawMessage `json:"query,omitempty"` + } + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + qry, err := loader(tmp.Query) + if err != nil { + return err + } + q.Base = tmp.Base + q.Query = qry + return nil +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/datasource/table.go b/vendor/github.com/grafadruid/go-druid/builder/datasource/table.go new file mode 100644 index 000000000..b3ffd846e --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/datasource/table.go @@ -0,0 +1,17 @@ +package datasource + +type Table struct { + Base + Name string `json:"name,omitempty"` +} + +func NewTable() *Table { + t := &Table{} + t.SetType("table") + return t +} + +func (t *Table) SetName(name string) *Table { + t.Name = name + return t +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/datasource/union.go b/vendor/github.com/grafadruid/go-druid/builder/datasource/union.go new file mode 100644 index 000000000..c69d44d78 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/datasource/union.go @@ -0,0 +1,17 @@ +package datasource + +type Union struct { + Base + DataSources []string `json:"dataSources,omitempty"` +} + +func NewUnion() *Union { + u := &Union{} + u.SetType("union") + return u +} + +func (u *Union) SetDataSources(dataSources []string) *Union { + u.DataSources = dataSources + return u +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/dimension/default.go b/vendor/github.com/grafadruid/go-druid/builder/dimension/default.go new file mode 100644 index 000000000..6c0d98be8 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/dimension/default.go @@ -0,0 +1,28 @@ +package dimension + +import "github.com/grafadruid/go-druid/builder/types" + +type Default struct { + Base +} + +func NewDefault() *Default { + d := &Default{} + d.SetType("default") + return d +} + +func (d *Default) SetDimension(dimension string) *Default { + d.Base.SetDimension(dimension) + return d +} + +func (d *Default) SetOutputName(outputName string) *Default { + d.Base.SetOutputName(outputName) + return d +} + +func (d *Default) SetOutputType(outputType types.OutputType) *Default { + d.Base.SetOutputType(outputType) + return d +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/dimension/dimension.go b/vendor/github.com/grafadruid/go-druid/builder/dimension/dimension.go new file mode 100644 index 000000000..d303864de --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/dimension/dimension.go @@ -0,0 +1,70 @@ +package dimension + +import ( + "encoding/json" + "errors" + + "github.com/grafadruid/go-druid/builder" + "github.com/grafadruid/go-druid/builder/types" +) + +type Base struct { + Typ builder.ComponentType `json:"type,omitempty"` + Dimension string `json:"dimension,omitempty"` + OutputName string `json:"outputName,omitempty"` + OutputType types.OutputType `json:"outputType,omitempty"` +} + +func (b *Base) SetType(typ builder.ComponentType) *Base { + b.Typ = typ + return b +} + +func (b *Base) SetDimension(dimension string) *Base { + b.Dimension = dimension + return b +} + +func (b *Base) SetOutputName(outputName string) *Base { + b.OutputName = outputName + return b +} + +func (b *Base) SetOutputType(outputType types.OutputType) *Base { + b.OutputType = outputType + return b +} + +func (b *Base) Type() builder.ComponentType { + return b.Typ +} + +func Load(data []byte) (builder.Dimension, error) { + var d builder.Dimension + if string(data) == "null" { + return d, nil + } + var t struct { + Typ builder.ComponentType `json:"type,omitempty"` + } + if err := json.Unmarshal(data, &t); err != nil { + return nil, err + } + switch t.Typ { + case "default": + d = NewDefault() + case "extraction": + d = NewExtraction() + case "listFiltered": + d = NewListFiltered() + case "lookup": + d = NewLookup() + case "prefixFiltered": + d = NewPrefixFiltered() + case "regexFiltered": + d = NewRegexFiltered() + default: + return nil, errors.New("unsupported dimension type") + } + return d, json.Unmarshal(data, &d) +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/dimension/extraction.go b/vendor/github.com/grafadruid/go-druid/builder/dimension/extraction.go new file mode 100644 index 000000000..126e84b6e --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/dimension/extraction.go @@ -0,0 +1,56 @@ +package dimension + +import ( + "encoding/json" + + "github.com/grafadruid/go-druid/builder" + "github.com/grafadruid/go-druid/builder/types" +) + +type Extraction struct { + Base + ExtractionFn builder.ExtractionFn `json:"extractionFn,omitempty"` +} + +func NewExtraction() *Extraction { + e := &Extraction{} + e.SetType("extraction") + return e +} + +func (e *Extraction) SetDimension(dimension string) *Extraction { + e.Base.SetDimension(dimension) + return e +} + +func (e *Extraction) SetOutputName(outputName string) *Extraction { + e.Base.SetOutputName(outputName) + return e +} + +func (e *Extraction) SetOutputType(outputType types.OutputType) *Extraction { + e.Base.SetOutputType(outputType) + return e +} + +func (e *Extraction) SetExtractionFn(extractionFn builder.ExtractionFn) *Extraction { + e.ExtractionFn = extractionFn + return e +} + +func (e *Extraction) UnmarshalJSON(data []byte) error { + var tmp struct { + Base + ExtractionFn json.RawMessage `json:"extractionFn,omitempty"` + } + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + ef, err := Load(tmp.ExtractionFn) + if err != nil { + return err + } + e.Base = tmp.Base + e.ExtractionFn = ef + return nil +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/dimension/list_filtered.go b/vendor/github.com/grafadruid/go-druid/builder/dimension/list_filtered.go new file mode 100644 index 000000000..614d1aa39 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/dimension/list_filtered.go @@ -0,0 +1,72 @@ +package dimension + +import ( + "encoding/json" + + "github.com/grafadruid/go-druid/builder" + "github.com/grafadruid/go-druid/builder/types" +) + +type ListFiltered struct { + Base + Delegate builder.Dimension `json:"delegate,omitempty"` + Values []string `json:"values,omitempty"` + IsWhiteList *bool `json:"isWhiteList,omitempty"` +} + +func NewListFiltered() *ListFiltered { + l := &ListFiltered{} + l.SetType("listFiltered") + return l +} + +func (l *ListFiltered) SetDimension(dimension string) *ListFiltered { + l.Base.SetDimension(dimension) + return l +} + +func (l *ListFiltered) SetOutputName(outputName string) *ListFiltered { + l.Base.SetOutputName(outputName) + return l +} + +func (l *ListFiltered) SetOutputType(outputType types.OutputType) *ListFiltered { + l.Base.SetOutputType(outputType) + return l +} + +func (l *ListFiltered) SetDelegate(delegate builder.Dimension) *ListFiltered { + l.Delegate = delegate + return l +} + +func (l *ListFiltered) SetValues(values []string) *ListFiltered { + l.Values = values + return l +} + +func (l *ListFiltered) SetIsWhiteList(isWhiteList bool) *ListFiltered { + l.IsWhiteList = &isWhiteList + return l +} + +func (l *ListFiltered) UnmarshalJSON(data []byte) error { + var tmp struct { + Base + Delegate json.RawMessage `json:"delegate,omitempty"` + Values []string `json:"values,omitempty"` + IsWhiteList *bool `json:"isWhiteList,omitempty"` + } + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + d, err := Load(tmp.Delegate) + if err != nil { + return err + } + l.Base = tmp.Base + l.Delegate = d + l.Values = tmp.Values + l.IsWhiteList = tmp.IsWhiteList + return nil +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/dimension/lookup.go b/vendor/github.com/grafadruid/go-druid/builder/dimension/lookup.go new file mode 100644 index 000000000..5557bda22 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/dimension/lookup.go @@ -0,0 +1,83 @@ +package dimension + +import ( + "encoding/json" + + "github.com/grafadruid/go-druid/builder" + "github.com/grafadruid/go-druid/builder/lookup" +) + +type Lookup struct { + Base + Name string `json:"name,omitempty"` + ReplaceMissingValueWith string `json:"replaceMissingValueWith,omitempty"` + RetainMissingValue *bool `json:"retainMissingValue,omitempty"` + Lookup builder.LookupExtractor `json:"lookup,omitempty"` + Optimize *bool `json:"optimize,omitempty"` +} + +type RegisteredLookup struct { + Base + Name string `json:"name,omitempty"` +} + +func NewLookup() *Lookup { + l := &Lookup{} + l.SetType("lookup") + return l +} + +func (l *Lookup) SetName(name string) *Lookup { + l.Name = name + return l +} + +func (l *Lookup) SetOutputName(outputName string) *Lookup { + l.Base.SetOutputName(outputName) + return l +} + +func (l *Lookup) SetReplaceMissingValueWith(replaceMissingValueWith string) *Lookup { + l.ReplaceMissingValueWith = replaceMissingValueWith + return l +} + +func (l *Lookup) SetRetainMissingValue(retainMissingValue bool) *Lookup { + l.RetainMissingValue = &retainMissingValue + return l +} + +func (l *Lookup) SetLookup(lookup builder.LookupExtractor) *Lookup { + l.Lookup = lookup + return l +} + +func (l *Lookup) SetOptimize(optimize bool) *Lookup { + l.Optimize = &optimize + return l +} + +func (l *Lookup) UnmarshalJSON(data []byte) error { + var tmp struct { + Base + Name string `json:"name,omitempty"` + ReplaceMissingValueWith string `json:"replaceMissingValueWith,omitempty"` + RetainMissingValue *bool `json:"retainMissingValue,omitempty"` + Lookup json.RawMessage `json:"lookup,omitempty"` + Optimize *bool `json:"optimize,omitempty"` + } + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + lu, err := lookup.Load(tmp.Lookup) + if err != nil { + return err + } + l.Base = tmp.Base + l.Name = tmp.Name + l.ReplaceMissingValueWith = tmp.ReplaceMissingValueWith + l.RetainMissingValue = tmp.RetainMissingValue + l.Lookup = lu + l.Optimize = tmp.Optimize + return nil +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/dimension/prefix_filtered.go b/vendor/github.com/grafadruid/go-druid/builder/dimension/prefix_filtered.go new file mode 100644 index 000000000..d1d39c312 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/dimension/prefix_filtered.go @@ -0,0 +1,64 @@ +package dimension + +import ( + "encoding/json" + + "github.com/grafadruid/go-druid/builder" + "github.com/grafadruid/go-druid/builder/types" +) + +type PrefixFiltered struct { + Base + Delegate builder.Dimension `json:"delegate,omitempty"` + Prefix string `json:"prefix,omitempty"` +} + +func NewPrefixFiltered() *PrefixFiltered { + p := &PrefixFiltered{} + p.SetType("prefixFiltered") + return p +} + +func (p *PrefixFiltered) SetDimension(dimension string) *PrefixFiltered { + p.Base.SetDimension(dimension) + return p +} + +func (p *PrefixFiltered) SetOutputName(outputName string) *PrefixFiltered { + p.Base.SetOutputName(outputName) + return p +} + +func (p *PrefixFiltered) SetOutputType(outputType types.OutputType) *PrefixFiltered { + p.Base.SetOutputType(outputType) + return p +} + +func (p *PrefixFiltered) SetDelegate(delegate builder.Dimension) *PrefixFiltered { + p.Delegate = delegate + return p +} + +func (p *PrefixFiltered) SetPrefix(prefix string) *PrefixFiltered { + p.Prefix = prefix + return p +} + +func (p *PrefixFiltered) UnmarshalJSON(data []byte) error { + var tmp struct { + Base + Delegate json.RawMessage `json:"delegate,omitempty"` + Prefix string `json:"prefix,omitempty"` + } + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + d, err := Load(tmp.Delegate) + if err != nil { + return err + } + p.Base = tmp.Base + p.Delegate = d + p.Prefix = tmp.Prefix + return nil +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/dimension/regex_filtered.go b/vendor/github.com/grafadruid/go-druid/builder/dimension/regex_filtered.go new file mode 100644 index 000000000..62e2b4a04 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/dimension/regex_filtered.go @@ -0,0 +1,64 @@ +package dimension + +import ( + "encoding/json" + + "github.com/grafadruid/go-druid/builder" + "github.com/grafadruid/go-druid/builder/types" +) + +type RegexFiltered struct { + Base + Delegate builder.Dimension `json:"delegate,omitempty"` + Pattern string `json:"pattern,omitempty"` +} + +func NewRegexFiltered() *RegexFiltered { + r := &RegexFiltered{} + r.SetType("regexFiltered") + return r +} + +func (r *RegexFiltered) SetDimension(dimension string) *RegexFiltered { + r.Base.SetDimension(dimension) + return r +} + +func (r *RegexFiltered) SetOutputName(outputName string) *RegexFiltered { + r.Base.SetOutputName(outputName) + return r +} + +func (r *RegexFiltered) SetOutputType(outputType types.OutputType) *RegexFiltered { + r.Base.SetOutputType(outputType) + return r +} + +func (r *RegexFiltered) SetDelegate(delegate builder.Dimension) *RegexFiltered { + r.Delegate = delegate + return r +} + +func (r *RegexFiltered) SetPattern(pattern string) *RegexFiltered { + r.Pattern = pattern + return r +} + +func (r *RegexFiltered) UnmarshalJSON(data []byte) error { + var tmp struct { + Base + Delegate json.RawMessage `json:"delegate,omitempty"` + Pattern string `json:"pattern,omitempty"` + } + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + d, err := Load(tmp.Delegate) + if err != nil { + return err + } + r.Base = tmp.Base + r.Delegate = d + r.Pattern = tmp.Pattern + return nil +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/extractionfn/bucket.go b/vendor/github.com/grafadruid/go-druid/builder/extractionfn/bucket.go new file mode 100644 index 000000000..c85e5eaa2 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/extractionfn/bucket.go @@ -0,0 +1,23 @@ +package extractionfn + +type Bucket struct { + Base + Size *float64 `json:"size,omitempty"` + Offset *float64 `json:"offset,omitempty"` +} + +func NewBucket() *Bucket { + b := &Bucket{} + b.SetType("bucket") + return b +} + +func (b *Bucket) SetSize(size float64) *Bucket { + b.Size = &size + return b +} + +func (b *Bucket) SetOffset(offset float64) *Bucket { + b.Offset = &offset + return b +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/extractionfn/cascade.go b/vendor/github.com/grafadruid/go-druid/builder/extractionfn/cascade.go new file mode 100644 index 000000000..90e9b757f --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/extractionfn/cascade.go @@ -0,0 +1,45 @@ +package extractionfn + +import ( + "encoding/json" + + "github.com/grafadruid/go-druid/builder" +) + +type Cascade struct { + Base + ExtractionFns []builder.ExtractionFn `json:"extractionFns,omitempty"` +} + +func NewCascade() *Cascade { + c := &Cascade{} + c.SetType("cascade") + return c +} + +func (c *Cascade) SetExtractionFns(extractionFns []builder.ExtractionFn) *Cascade { + c.ExtractionFns = extractionFns + return c +} + +func (c *Cascade) UnmarshalJSON(data []byte) error { + var tmp struct { + Base + ExtractionFns []json.RawMessage `json:"extractionFns,omitempty"` + } + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + var err error + var e builder.ExtractionFn + ee := make([]builder.ExtractionFn, len(tmp.ExtractionFns)) + for i := range tmp.ExtractionFns { + if e, err = Load(tmp.ExtractionFns[i]); err != nil { + return err + } + ee[i] = e + } + c.Base = tmp.Base + c.ExtractionFns = ee + return nil +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/extractionfn/extraction_fn.go b/vendor/github.com/grafadruid/go-druid/builder/extractionfn/extraction_fn.go new file mode 100644 index 000000000..26ed0678a --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/extractionfn/extraction_fn.go @@ -0,0 +1,71 @@ +package extractionfn + +import ( + "encoding/json" + "errors" + + "github.com/grafadruid/go-druid/builder" +) + +type Base struct { + Typ builder.ComponentType `json:"type,omitempty"` +} + +func (b *Base) SetType(typ builder.ComponentType) *Base { + b.Typ = typ + return b +} + +func (b *Base) Type() builder.ComponentType { + return b.Typ +} + +func Load(data []byte) (builder.ExtractionFn, error) { + var e builder.ExtractionFn + if string(data) == "null" { + return e, nil + } + var t struct { + Typ builder.ComponentType `json:"type,omitempty"` + } + if err := json.Unmarshal(data, &t); err != nil { + return nil, err + } + switch t.Typ { + case "bucket": + e = NewBucket() + case "cascade": + e = NewCascade() + case "identity": + e = NewIdentity() + case "javascript": + e = NewJavascript() + case "lookup": + e = NewLookup() + case "lower": + e = NewLower() + case "partial": + e = NewPartial() + case "regex": + e = NewRegex() + case "registeredLookup": + e = NewRegisteredLookup() + case "searchQuery": + e = NewSearchQuery() + case "stringFormat": + e = NewStringFormat() + case "strlen": + e = NewStrlen() + case "substring": + e = NewSubstring() + case "time": + e = NewTime() + case "timeFormat": + e = NewTimeFormat() + case "upper": + e = NewUpper() + default: + return nil, errors.New("unsupported extractionfn type") + } + return e, json.Unmarshal(data, &e) +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/extractionfn/identity.go b/vendor/github.com/grafadruid/go-druid/builder/extractionfn/identity.go new file mode 100644 index 000000000..441173839 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/extractionfn/identity.go @@ -0,0 +1,11 @@ +package extractionfn + +type Identity struct { + Base +} + +func NewIdentity() *Identity { + i := &Identity{} + i.SetType("identity") + return i +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/extractionfn/javascript.go b/vendor/github.com/grafadruid/go-druid/builder/extractionfn/javascript.go new file mode 100644 index 000000000..6039d800e --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/extractionfn/javascript.go @@ -0,0 +1,23 @@ +package extractionfn + +type Javascript struct { + Base + Function string `json:"function,omitempty"` + Injective *bool `json:"injective,omitempty"` +} + +func NewJavascript() *Javascript { + j := &Javascript{} + j.SetType("javascript") + return j +} + +func (j *Javascript) SetFunction(function string) *Javascript { + j.Function = function + return j +} + +func (j *Javascript) SetInjective(injective bool) *Javascript { + j.Injective = &injective + return j +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/extractionfn/lookup.go b/vendor/github.com/grafadruid/go-druid/builder/extractionfn/lookup.go new file mode 100644 index 000000000..6e0650247 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/extractionfn/lookup.go @@ -0,0 +1,72 @@ +package extractionfn + +import ( + "encoding/json" + + "github.com/grafadruid/go-druid/builder" + "github.com/grafadruid/go-druid/builder/lookup" +) + +type Lookup struct { + Base + Lookup builder.LookupExtractor `json:"lookup,omitempty"` + RetainMissingValue *bool `json:"retainMissingValue,omitempty"` + ReplaceMissingValueWith string `json:"replaceMissingValueWith,omitempty"` + Injective *bool `json:"injective,omitempty"` + Optimize *bool `json:"optimize,omitempty"` +} + +func NewLookup() *Lookup { + l := &Lookup{} + l.SetType("lookup") + return l +} + +func (l *Lookup) SetLookup(lookup builder.LookupExtractor) *Lookup { + l.Lookup = lookup + return l +} + +func (l *Lookup) SetRetainMissingValue(retainMissingValue bool) *Lookup { + l.RetainMissingValue = &retainMissingValue + return l +} + +func (l *Lookup) SetReplaceMissingValueWith(replaceMissingValueWith string) *Lookup { + l.ReplaceMissingValueWith = replaceMissingValueWith + return l +} + +func (l *Lookup) SetInjective(injective bool) *Lookup { + l.Injective = &injective + return l +} + +func (l *Lookup) SetOptimize(optimize bool) *Lookup { + l.Optimize = &optimize + return l +} +func (l *Lookup) UnmarshalJSON(data []byte) error { + var tmp struct { + Base + Lookup json.RawMessage `json:"lookup,omitempty"` + RetainMissingValue *bool `json:"retainMissingValue,omitempty"` + ReplaceMissingValueWith string `json:"replaceMissingValueWith,omitempty"` + Injective *bool `json:"injective,omitempty"` + Optimize *bool `json:"optimize,omitempty"` + } + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + lu, err := lookup.Load(tmp.Lookup) + if err != nil { + return err + } + l.Base = tmp.Base + l.Lookup = lu + l.RetainMissingValue = tmp.RetainMissingValue + l.ReplaceMissingValueWith = tmp.ReplaceMissingValueWith + l.Injective = tmp.Injective + l.Optimize = tmp.Optimize + return nil +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/extractionfn/lower.go b/vendor/github.com/grafadruid/go-druid/builder/extractionfn/lower.go new file mode 100644 index 000000000..f3ffba4fe --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/extractionfn/lower.go @@ -0,0 +1,17 @@ +package extractionfn + +type Lower struct { + Base + Locale string `json:"locale,omitempty"` +} + +func NewLower() *Lower { + l := &Lower{} + l.SetType("lower") + return l +} + +func (l *Lower) SetLocale(locale string) *Lower { + l.Locale = locale + return l +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/extractionfn/partial.go b/vendor/github.com/grafadruid/go-druid/builder/extractionfn/partial.go new file mode 100644 index 000000000..cea09ef3c --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/extractionfn/partial.go @@ -0,0 +1,17 @@ +package extractionfn + +type Partial struct { + Base + Expr string `json:"expr,omitempty"` +} + +func NewPartial() *Partial { + p := &Partial{} + p.SetType("partial") + return p +} + +func (p *Partial) SetExpr(expr string) *Partial { + p.Expr = expr + return p +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/extractionfn/regex.go b/vendor/github.com/grafadruid/go-druid/builder/extractionfn/regex.go new file mode 100644 index 000000000..64e38787b --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/extractionfn/regex.go @@ -0,0 +1,35 @@ +package extractionfn + +type Regex struct { + Base + Expr string `json:"expr,omitempty"` + Index int64 `json:"index,omitempty"` + ReplaceMissingValue *bool `json:"replaceMissingValue,omitempty"` + ReplaceMissingValueWith string `json:"replaceMissingValueWith,omitempty"` +} + +func NewRegex() *Regex { + r := &Regex{} + r.SetType("regex") + return r +} + +func (r *Regex) SetExpr(expr string) *Regex { + r.Expr = expr + return r +} + +func (r *Regex) SetIndex(index int64) *Regex { + r.Index = index + return r +} + +func (r *Regex) SetReplaceMissingValue(replaceMissingValue bool) *Regex { + r.ReplaceMissingValue = &replaceMissingValue + return r +} + +func (r *Regex) SetReplaceMissingValueWith(replaceMissingValueWith string) *Regex { + r.ReplaceMissingValueWith = replaceMissingValueWith + return r +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/extractionfn/registered_lookup.go b/vendor/github.com/grafadruid/go-druid/builder/extractionfn/registered_lookup.go new file mode 100644 index 000000000..a1c718f0b --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/extractionfn/registered_lookup.go @@ -0,0 +1,43 @@ +package extractionfn + +// RegisteredLookup holds the registered lookup extraction function struct based on +// https://druid.apache.org/docs/latest/querying/dimensionspecs.html#registered-lookup-extraction-function +type RegisteredLookup struct { + Base + Lookup string `json:"lookup,omitempty"` + RetainMissingValue *bool `json:"retainMissingValue,omitempty"` + ReplaceMissingValueWith string `json:"replaceMissingValueWith,omitempty"` + Injective *bool `json:"injective,omitempty"` + Optimize *bool `json:"optimize,omitempty"` +} + +func NewRegisteredLookup() *RegisteredLookup { + l := &RegisteredLookup{} + l.SetType("registeredLookup") + return l +} + +func (l *RegisteredLookup) SetLookup(lookup string) *RegisteredLookup { + l.Lookup = lookup + return l +} + +func (l *RegisteredLookup) SetRetainMissingValue(retainMissingValue bool) *RegisteredLookup { + l.RetainMissingValue = &retainMissingValue + return l +} + +func (l *RegisteredLookup) SetReplaceMissingValueWith(replaceMissingValueWith string) *RegisteredLookup { + l.ReplaceMissingValueWith = replaceMissingValueWith + return l +} + +func (l *RegisteredLookup) SetInjective(injective bool) *RegisteredLookup { + l.Injective = &injective + return l +} + +func (l *RegisteredLookup) SetOptimize(optimize bool) *RegisteredLookup { + l.Optimize = &optimize + return l +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/extractionfn/search_query.go b/vendor/github.com/grafadruid/go-druid/builder/extractionfn/search_query.go new file mode 100644 index 000000000..76ddecb6a --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/extractionfn/search_query.go @@ -0,0 +1,41 @@ +package extractionfn + +import ( + "encoding/json" + + "github.com/grafadruid/go-druid/builder" + "github.com/grafadruid/go-druid/builder/searchqueryspec" +) + +type SearchQuery struct { + Base + Query builder.SearchQuerySpec `json:"query,omitempty"` +} + +func NewSearchQuery() *SearchQuery { + s := &SearchQuery{} + s.SetType("searchQuery") + return s +} + +func (s *SearchQuery) SetQuery(q builder.SearchQuerySpec) *SearchQuery { + s.Query = q + return s +} + +func (s *SearchQuery) UnmarshalJSON(data []byte) error { + var tmp struct { + Base + Query json.RawMessage `json:"query,omitempty"` + } + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + q, err := searchqueryspec.Load(tmp.Query) + if err != nil { + return err + } + s.Base = tmp.Base + s.Query = q + return nil +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/extractionfn/string_format.go b/vendor/github.com/grafadruid/go-druid/builder/extractionfn/string_format.go new file mode 100644 index 000000000..2861753dd --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/extractionfn/string_format.go @@ -0,0 +1,27 @@ +package extractionfn + +import ( + "github.com/grafadruid/go-druid/builder/types" +) + +type StringFormat struct { + Base + Format string `json:"format,omitempty"` + NullHandling types.NullHandling `json:"nullHandling,omitempty"` +} + +func NewStringFormat() *StringFormat { + s := &StringFormat{} + s.SetType("stringFormat") + return s +} + +func (s *StringFormat) SetFormat(format string) *StringFormat { + s.Format = format + return s +} + +func (s *StringFormat) SetNullHandling(nullHandling types.NullHandling) *StringFormat { + s.NullHandling = nullHandling + return s +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/extractionfn/strlen.go b/vendor/github.com/grafadruid/go-druid/builder/extractionfn/strlen.go new file mode 100644 index 000000000..9b77013aa --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/extractionfn/strlen.go @@ -0,0 +1,11 @@ +package extractionfn + +type Strlen struct { + Base +} + +func NewStrlen() *Strlen { + s := &Strlen{} + s.SetType("strlen") + return s +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/extractionfn/substring.go b/vendor/github.com/grafadruid/go-druid/builder/extractionfn/substring.go new file mode 100644 index 000000000..b6198f083 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/extractionfn/substring.go @@ -0,0 +1,23 @@ +package extractionfn + +type Substring struct { + Base + Index int64 `json:"index,omitempty"` + Length int64 `json:"length,omitempty"` +} + +func NewSubstring() *Substring { + s := &Substring{} + s.SetType("substring") + return s +} + +func (s *Substring) SetIndex(index int64) *Substring { + s.Index = index + return s +} + +func (s *Substring) SetLength(length int64) *Substring { + s.Length = length + return s +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/extractionfn/time.go b/vendor/github.com/grafadruid/go-druid/builder/extractionfn/time.go new file mode 100644 index 000000000..d27a8c549 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/extractionfn/time.go @@ -0,0 +1,29 @@ +package extractionfn + +type Time struct { + Base + TimeFormat string `json:"timeFormat,omitempty"` + ResultFormat string `json:"resultFormat,omitempty"` + Joda *bool `json:"joda,omitempty"` +} + +func NewTime() *Time { + t := &Time{} + t.SetType("time") + return t +} + +func (t *Time) SetTimeFormat(timeFormat string) *Time { + t.TimeFormat = timeFormat + return t +} + +func (t *Time) SetResultFormat(resultFormat string) *Time { + t.ResultFormat = resultFormat + return t +} + +func (t *Time) SetJoda(joda bool) *Time { + t.Joda = &joda + return t +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/extractionfn/time_format.go b/vendor/github.com/grafadruid/go-druid/builder/extractionfn/time_format.go new file mode 100644 index 000000000..8ff897907 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/extractionfn/time_format.go @@ -0,0 +1,73 @@ +package extractionfn + +import ( + "encoding/json" + + "github.com/grafadruid/go-druid/builder" + "github.com/grafadruid/go-druid/builder/types" +) + +type TimeFormat struct { + Base + Format string `json:"format,omitempty"` + TimeZone types.DateTimeZone `json:"timeZone,omitempty"` + Locale string `json:"locale,omitempty"` + Granularity builder.Granularity `json:"granularity,omitempty"` + AsMillis *bool `json:"asMillis,omitempty"` +} + +func NewTimeFormat() *TimeFormat { + t := &TimeFormat{} + t.SetType("timeFormat") + return t +} + +func (t *TimeFormat) SetFormat(format string) *TimeFormat { + t.Format = format + return t +} + +func (t *TimeFormat) SetTimeZone(timeZone types.DateTimeZone) *TimeFormat { + t.TimeZone = timeZone + return t +} + +func (t *TimeFormat) SetLocale(locale string) *TimeFormat { + t.Locale = locale + return t +} + +func (t *TimeFormat) SetGranularity(granularity builder.Granularity) *TimeFormat { + t.Granularity = granularity + return t +} + +func (t *TimeFormat) SetAsMillis(asMillis bool) *TimeFormat { + t.AsMillis = &asMillis + return t +} + +func (t *TimeFormat) UnmarshalJSON(data []byte) error { + var tmp struct { + Base + Format string `json:"format,omitempty"` + TimeZone types.DateTimeZone `json:"timeZone,omitempty"` + Locale string `json:"locale,omitempty"` + Granularity json.RawMessage `json:"granularity,omitempty"` + AsMillis *bool `json:"asMillis,omitempty"` + } + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + g, err := Load(tmp.Granularity) + if err != nil { + return err + } + t.Base = tmp.Base + t.Format = tmp.Format + t.TimeZone = tmp.TimeZone + t.Locale = tmp.Locale + t.Granularity = g + t.AsMillis = tmp.AsMillis + return nil +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/extractionfn/upper.go b/vendor/github.com/grafadruid/go-druid/builder/extractionfn/upper.go new file mode 100644 index 000000000..adab4d9bd --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/extractionfn/upper.go @@ -0,0 +1,17 @@ +package extractionfn + +type Upper struct { + Base + Locale string `json:"locale,omitempty"` +} + +func NewUpper() *Upper { + u := &Upper{} + u.SetType("upper") + return u +} + +func (u *Upper) SetLocale(locale string) *Upper { + u.Locale = locale + return u +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/filter/and.go b/vendor/github.com/grafadruid/go-druid/builder/filter/and.go new file mode 100644 index 000000000..87e246a74 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/filter/and.go @@ -0,0 +1,45 @@ +package filter + +import ( + "encoding/json" + + "github.com/grafadruid/go-druid/builder" +) + +type And struct { + Base + Fields []builder.Filter `json:"fields,omitempty"` +} + +func NewAnd() *And { + a := &And{} + a.SetType("and") + return a +} + +func (a *And) SetFields(fields []builder.Filter) *And { + a.Fields = fields + return a +} + +func (a *And) UnmarshalJSON(data []byte) error { + var err error + var tmp struct { + Base + Fields []json.RawMessage `json:"fields,omitempty"` + } + if err = json.Unmarshal(data, &tmp); err != nil { + return err + } + var f builder.Filter + ff := make([]builder.Filter, len(tmp.Fields)) + for i := range tmp.Fields { + if f, err = Load(tmp.Fields[i]); err != nil { + return err + } + ff[i] = f + } + a.Base = tmp.Base + a.Fields = ff + return nil +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/filter/bound.go b/vendor/github.com/grafadruid/go-druid/builder/filter/bound.go new file mode 100644 index 000000000..9f1b915e4 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/filter/bound.go @@ -0,0 +1,94 @@ +package filter + +import ( + "encoding/json" + + "github.com/grafadruid/go-druid/builder" + "github.com/grafadruid/go-druid/builder/extractionfn" + "github.com/grafadruid/go-druid/builder/types" +) + +type Bound struct { + Base + Dimension string `json:"dimension,omitempty"` + Lower string `json:"lower,omitempty"` + Upper string `json:"upper,omitempty"` + LowerStrict *bool `json:"lowerStrict,omitempty"` + UpperStrict *bool `json:"upperStrict,omitempty"` + ExtractionFn builder.ExtractionFn `json:"extractionFn,omitempty"` + Ordering types.StringComparator `json:"ordering,omitempty"` +} + +func NewBound() *Bound { + b := &Bound{} + b.SetType("bound") + return b +} + +func (b *Bound) SetDimension(dimension string) *Bound { + b.Dimension = dimension + return b +} + +func (b *Bound) SetLower(lower string) *Bound { + b.Lower = lower + return b +} + +func (b *Bound) SetUpper(upper string) *Bound { + b.Upper = upper + return b +} + +func (b *Bound) SetLowerStrict(lowerStrict bool) *Bound { + b.LowerStrict = &lowerStrict + return b +} + +func (b *Bound) SetUpperStrict(upperStrict bool) *Bound { + b.UpperStrict = &upperStrict + return b +} + +func (b *Bound) SetExtractionFn(extractionFn builder.ExtractionFn) *Bound { + b.ExtractionFn = extractionFn + return b +} + +func (b *Bound) SetOrdering(ordering types.StringComparator) *Bound { + b.Ordering = ordering + return b +} + +func (b *Bound) UnmarshalJSON(data []byte) error { + var err error + var tmp struct { + Base + Dimension string `json:"dimension,omitempty"` + Lower string `json:"lower,omitempty"` + Upper string `json:"upper,omitempty"` + LowerStrict *bool `json:"lowerStrict,omitempty"` + UpperStrict *bool `json:"upperStrict,omitempty"` + ExtractionFn json.RawMessage `json:"extractionFn,omitempty"` + Ordering types.StringComparator `json:"ordering,omitempty"` + } + if err = json.Unmarshal(data, &tmp); err != nil { + return err + } + var e builder.ExtractionFn + if tmp.ExtractionFn != nil { + e, err = extractionfn.Load(tmp.ExtractionFn) + if err != nil { + return err + } + } + b.Base = tmp.Base + b.Dimension = tmp.Dimension + b.Lower = tmp.Lower + b.Upper = tmp.Upper + b.LowerStrict = tmp.LowerStrict + b.UpperStrict = tmp.UpperStrict + b.ExtractionFn = e + b.Ordering = tmp.Ordering + return nil +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/filter/column_comparison.go b/vendor/github.com/grafadruid/go-druid/builder/filter/column_comparison.go new file mode 100644 index 000000000..d2cc9e6b1 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/filter/column_comparison.go @@ -0,0 +1,46 @@ +package filter + +import ( + "encoding/json" + + "github.com/grafadruid/go-druid/builder" + "github.com/grafadruid/go-druid/builder/dimension" +) + +type ColumnComparison struct { + Base + Dimensions []builder.Dimension `json:"dimensions,omitempty"` +} + +func NewColumnComparison() *ColumnComparison { + c := &ColumnComparison{} + c.SetType("columnComparison") + return c +} + +func (c *ColumnComparison) SetDimensions(dimensions []builder.Dimension) *ColumnComparison { + c.Dimensions = dimensions + return c +} + +func (c *ColumnComparison) UnmarshalJSON(data []byte) error { + var err error + var tmp struct { + Base + Dimensions []json.RawMessage `json:"dimensions,omitempty"` + } + if err = json.Unmarshal(data, &tmp); err != nil { + return err + } + var d builder.Dimension + dd := make([]builder.Dimension, len(tmp.Dimensions)) + for i := range tmp.Dimensions { + if d, err = dimension.Load(tmp.Dimensions[i]); err != nil { + return err + } + dd[i] = d + } + c.Base = tmp.Base + c.Dimensions = dd + return nil +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/filter/expression.go b/vendor/github.com/grafadruid/go-druid/builder/filter/expression.go new file mode 100644 index 000000000..378b858be --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/filter/expression.go @@ -0,0 +1,23 @@ +package filter + +type Expression struct { + Base + Expression string `json:"expression,omitempty"` + FilterTuning *FilterTuning `json:"filterTuning,omitempty"` +} + +func NewExpression() *Expression { + e := &Expression{} + e.SetType("expression") + return e +} + +func (e *Expression) SetExpression(expression string) *Expression { + e.Expression = expression + return e +} + +func (e *Expression) SetFilterTuning(filterTuning *FilterTuning) *Expression { + e.FilterTuning = filterTuning + return e +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/filter/extraction.go b/vendor/github.com/grafadruid/go-druid/builder/filter/extraction.go new file mode 100644 index 000000000..3ed7695c2 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/filter/extraction.go @@ -0,0 +1,61 @@ +package filter + +import ( + "encoding/json" + + "github.com/grafadruid/go-druid/builder" + "github.com/grafadruid/go-druid/builder/extractionfn" +) + +type Extraction struct { + Base + Dimension string `json:"dimension,omitempty"` + Value string `json:"value,omitempty"` + ExtractionFn builder.ExtractionFn `json:"extractionFn,omitempty"` +} + +func NewExtraction() *Extraction { + e := &Extraction{} + e.SetType("extraction") + return e +} + +func (e *Extraction) SetDimension(dimension string) *Extraction { + e.Dimension = dimension + return e +} + +func (e *Extraction) SetValue(value string) *Extraction { + e.Value = value + return e +} + +func (e *Extraction) SetExtractionFn(extractionFn builder.ExtractionFn) *Extraction { + e.ExtractionFn = extractionFn + return e +} + +func (e *Extraction) UnmarshalJSON(data []byte) error { + var err error + var tmp struct { + Base + Dimension string `json:"dimension,omitempty"` + Value string `json:"value,omitempty"` + ExtractionFn json.RawMessage `json:"extractionFn,omitempty"` + } + if err = json.Unmarshal(data, &tmp); err != nil { + return err + } + var ex builder.ExtractionFn + if tmp.ExtractionFn != nil { + ex, err = extractionfn.Load(tmp.ExtractionFn) + if err != nil { + return err + } + } + e.Base = tmp.Base + e.Dimension = tmp.Dimension + e.Value = tmp.Value + e.ExtractionFn = ex + return nil +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/filter/false.go b/vendor/github.com/grafadruid/go-druid/builder/filter/false.go new file mode 100644 index 000000000..1fd3f6b01 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/filter/false.go @@ -0,0 +1,11 @@ +package filter + +type False struct { + Base +} + +func NewFalse() *False { + f := &False{} + f.SetType("false") + return f +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/filter/filter.go b/vendor/github.com/grafadruid/go-druid/builder/filter/filter.go new file mode 100644 index 000000000..8924ff8b7 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/filter/filter.go @@ -0,0 +1,75 @@ +package filter + +import ( + "encoding/json" + "errors" + + "github.com/grafadruid/go-druid/builder" +) + +type Base struct { + Typ builder.ComponentType `json:"type,omitempty"` +} + +func (b *Base) SetType(typ builder.ComponentType) *Base { + b.Typ = typ + return b +} + +func (b *Base) Type() builder.ComponentType { + return b.Typ +} + +func Load(data []byte) (builder.Filter, error) { + var f builder.Filter + if string(data) == "null" { + return f, nil + } + var t struct { + Typ builder.ComponentType `json:"type,omitempty"` + } + if err := json.Unmarshal(data, &t); err != nil { + return nil, err + } + switch t.Typ { + case "and": + f = NewAnd() + case "bound": + f = NewBound() + case "columnComparison": + f = NewColumnComparison() + case "expression": + f = NewExpression() + case "extraction": + f = NewExtraction() + case "false": + f = NewFalse() + case "filterTuning": + f = NewFilterTuning() + case "in": + f = NewIn() + case "interval": + f = NewInterval() + case "javascript": + f = NewJavascript() + case "like": + f = NewLike() + case "not": + f = NewNot() + case "or": + f = NewOr() + case "regex": + f = NewRegex() + case "search": + f = NewSearch() + case "selector": + f = NewSelector() + case "spatial": + f = NewSpatial() + case "true": + f = NewTrue() + default: + return nil, errors.New("unsupported filter type") + } + return f, json.Unmarshal(data, &f) +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/filter/filter_tuning.go b/vendor/github.com/grafadruid/go-druid/builder/filter/filter_tuning.go new file mode 100644 index 000000000..328ce5ada --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/filter/filter_tuning.go @@ -0,0 +1,29 @@ +package filter + +type FilterTuning struct { + Base + UseBitmapIndex *bool `json:"useBitmapIndex,omitempty"` + MinCardinalityToUseBitmapIndex int64 `json:"minCardinalityToUseBitmapIndex,omitempty"` + MaxCardinalityToUseBitmapIndex int64 `json:"maxCardinalityToUseBitmapIndex,omitempty"` +} + +func NewFilterTuning() *FilterTuning { + f := &FilterTuning{} + f.SetType("filterTuning") + return f +} + +func (f *FilterTuning) SetUseBitmapIndex(useBitmapIndex bool) *FilterTuning { + f.UseBitmapIndex = &useBitmapIndex + return f +} + +func (f *FilterTuning) SetMinCardinalityToUseBitmapIndex(minCardinalityToUseBitmapIndex int64) *FilterTuning { + f.MinCardinalityToUseBitmapIndex = minCardinalityToUseBitmapIndex + return f +} + +func (f *FilterTuning) SetMaxCardinalityToUseBitmapIndex(maxCardinalityToUseBitmapIndex int64) *FilterTuning { + f.MaxCardinalityToUseBitmapIndex = maxCardinalityToUseBitmapIndex + return f +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/filter/in.go b/vendor/github.com/grafadruid/go-druid/builder/filter/in.go new file mode 100644 index 000000000..5c68d61af --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/filter/in.go @@ -0,0 +1,69 @@ +package filter + +import ( + "encoding/json" + + "github.com/grafadruid/go-druid/builder" + "github.com/grafadruid/go-druid/builder/extractionfn" +) + +type In struct { + Base + Dimension string `json:"dimension,omitempty"` + Values []string `json:"values,omitempty"` + ExtractionFn builder.ExtractionFn `json:"extractionFn,omitempty"` + FilterTuning *FilterTuning `json:"filterTuning,omitempty"` +} + +func NewIn() *In { + i := &In{} + i.SetType("in") + return i +} + +func (i *In) SetDimension(dimension string) *In { + i.Dimension = dimension + return i +} + +func (i *In) SetValues(values []string) *In { + i.Values = values + return i +} + +func (i *In) SetExtractionFn(extractionFn builder.ExtractionFn) *In { + i.ExtractionFn = extractionFn + return i +} + +func (i *In) SetFilterTuning(filterTuning *FilterTuning) *In { + i.FilterTuning = filterTuning + return i +} + +func (i *In) UnmarshalJSON(data []byte) error { + var err error + var tmp struct { + Base + Dimension string `json:"dimension,omitempty"` + Values []string `json:"values,omitempty"` + ExtractionFn json.RawMessage `json:"extractionFn,omitempty"` + FilterTuning *FilterTuning `json:"filterTuning,omitempty"` + } + if err = json.Unmarshal(data, &tmp); err != nil { + return err + } + var e builder.ExtractionFn + if tmp.ExtractionFn != nil { + e, err = extractionfn.Load(tmp.ExtractionFn) + if err != nil { + return err + } + } + i.Base = tmp.Base + i.Dimension = tmp.Dimension + i.Values = tmp.Values + i.ExtractionFn = e + i.FilterTuning = tmp.FilterTuning + return nil +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/filter/interval.go b/vendor/github.com/grafadruid/go-druid/builder/filter/interval.go new file mode 100644 index 000000000..1b2bcd322 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/filter/interval.go @@ -0,0 +1,79 @@ +package filter + +import ( + "encoding/json" + + "github.com/grafadruid/go-druid/builder" + "github.com/grafadruid/go-druid/builder/extractionfn" + "github.com/grafadruid/go-druid/builder/intervals" +) + +type Interval struct { + Base + Dimension string `json:"dimension,omitempty"` + Intervals []*intervals.Interval `json:"intervals,omitempty"` + ExtractionFn builder.ExtractionFn `json:"extractionFn,omitempty"` + FilterTuning *FilterTuning `json:"filterTuning,omitempty"` +} + +func NewInterval() *Interval { + i := &Interval{} + i.SetType("interval") + return i +} + +func (i *Interval) SetDimension(dimension string) *Interval { + i.Dimension = dimension + return i +} + +func (i *Interval) SetIntervals(intervals []*intervals.Interval) *Interval { + i.Intervals = intervals + return i +} + +func (i *Interval) SetExtractionFn(extractionFn builder.ExtractionFn) *Interval { + i.ExtractionFn = extractionFn + return i +} + +func (i *Interval) SetFilterTuning(filterTuning *FilterTuning) *Interval { + i.FilterTuning = filterTuning + return i +} + +func (i *Interval) UnmarshalJSON(data []byte) error { + var err error + var tmp struct { + Base + Dimension string `json:"dimension,omitempty"` + Intervals json.RawMessage `json:"intervals,omitempty"` + ExtractionFn json.RawMessage `json:"extractionFn,omitempty"` + FilterTuning *FilterTuning `json:"filterTuning,omitempty"` + } + if err = json.Unmarshal(data, + &tmp); err != nil { + return err + } + var e builder.ExtractionFn + if tmp.ExtractionFn != nil { + e, err = extractionfn.Load(tmp.ExtractionFn) + if err != nil { + return err + } + } + var ii []*intervals.Interval + if tmp.Intervals != nil { + err = json.Unmarshal(tmp.Intervals, + &ii) + if err != nil { + return err + } + } + i.Base = tmp.Base + i.Dimension = tmp.Dimension + i.Intervals = ii + i.ExtractionFn = e + i.FilterTuning = tmp.FilterTuning + return nil +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/filter/javascript.go b/vendor/github.com/grafadruid/go-druid/builder/filter/javascript.go new file mode 100644 index 000000000..c76db8267 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/filter/javascript.go @@ -0,0 +1,69 @@ +package filter + +import ( + "encoding/json" + + "github.com/grafadruid/go-druid/builder" + "github.com/grafadruid/go-druid/builder/extractionfn" +) + +type Javascript struct { + Base + Dimension string `json:"dimension,omitempty"` + Function string `json:"function,omitempty"` + ExtractionFn builder.ExtractionFn `json:"extractionFn,omitempty"` + FilterTuning *FilterTuning `json:"filterTuning,omitempty"` +} + +func NewJavascript() *Javascript { + j := &Javascript{} + j.SetType("javascript") + return j +} + +func (j *Javascript) SetDimension(dimension string) *Javascript { + j.Dimension = dimension + return j +} + +func (j *Javascript) SetFunction(function string) *Javascript { + j.Function = function + return j +} + +func (j *Javascript) SetExtractionFn(extractionFn builder.ExtractionFn) *Javascript { + j.ExtractionFn = extractionFn + return j +} + +func (j *Javascript) SetFilterTuning(filterTuning *FilterTuning) *Javascript { + j.FilterTuning = filterTuning + return j +} + +func (j *Javascript) UnmarshalJSON(data []byte) error { + var err error + var tmp struct { + Base + Dimension string `json:"dimension,omitempty"` + Function string `json:"function,omitempty"` + ExtractionFn json.RawMessage `json:"extractionFn,omitempty"` + FilterTuning *FilterTuning `json:"filterTuning,omitempty"` + } + if err = json.Unmarshal(data, &tmp); err != nil { + return err + } + var e builder.ExtractionFn + if tmp.ExtractionFn != nil { + e, err = extractionfn.Load(tmp.ExtractionFn) + if err != nil { + return err + } + } + j.Base = tmp.Base + j.Dimension = tmp.Dimension + j.Function = tmp.Function + j.ExtractionFn = e + j.FilterTuning = tmp.FilterTuning + return nil +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/filter/like.go b/vendor/github.com/grafadruid/go-druid/builder/filter/like.go new file mode 100644 index 000000000..fa43e7b02 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/filter/like.go @@ -0,0 +1,77 @@ +package filter + +import ( + "encoding/json" + + "github.com/grafadruid/go-druid/builder" + "github.com/grafadruid/go-druid/builder/extractionfn" +) + +type Like struct { + Base + Dimension string `json:"dimension,omitempty"` + Pattern string `json:"pattern,omitempty"` + Escape string `json:"escapte,omitempty"` + ExtractionFn builder.ExtractionFn `json:"extractionFn,omitempty"` + FilterTuning *FilterTuning `json:"filterTuning,omitempty"` +} + +func NewLike() *Like { + l := &Like{} + l.SetType("like") + return l +} + +func (l *Like) SetDimension(dimension string) *Like { + l.Dimension = dimension + return l +} + +func (l *Like) SetPattern(pattern string) *Like { + l.Pattern = pattern + return l +} + +func (l *Like) SetEscape(escape string) *Like { + l.Escape = escape + return l +} + +func (l *Like) SetExtractionFn(extractionFn builder.ExtractionFn) *Like { + l.ExtractionFn = extractionFn + return l +} + +func (l *Like) SetFilterTuning(filterTuning *FilterTuning) *Like { + l.FilterTuning = filterTuning + return l +} + +func (l *Like) UnmarshalJSON(data []byte) error { + var err error + var tmp struct { + Base + Dimension string `json:"dimension,omitempty"` + Pattern string `json:"pattern,omitempty"` + Escape string `json:"escape,omitempty"` + ExtractionFn json.RawMessage `json:"extractionFn,omitempty"` + FilterTuning *FilterTuning `json:"filterTuning,omitempty"` + } + if err = json.Unmarshal(data, &tmp); err != nil { + return err + } + var e builder.ExtractionFn + if tmp.ExtractionFn != nil { + e, err = extractionfn.Load(tmp.ExtractionFn) + if err != nil { + return err + } + } + l.Base = tmp.Base + l.Dimension = tmp.Dimension + l.Pattern = tmp.Pattern + l.Escape = tmp.Escape + l.ExtractionFn = e + l.FilterTuning = tmp.FilterTuning + return nil +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/filter/not.go b/vendor/github.com/grafadruid/go-druid/builder/filter/not.go new file mode 100644 index 000000000..19defea21 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/filter/not.go @@ -0,0 +1,44 @@ +package filter + +import ( + "encoding/json" + + "github.com/grafadruid/go-druid/builder" +) + +type Not struct { + Base + Field builder.Filter `json:"field,omitempty"` +} + +func NewNot() *Not { + n := &Not{} + n.SetType("not") + return n +} + +func (n *Not) SetField(field builder.Filter) *Not { + n.Field = field + return n +} + +func (n *Not) UnmarshalJSON(data []byte) error { + var err error + var tmp struct { + Base + Field json.RawMessage `json:"field,omitempty"` + } + if err = json.Unmarshal(data, &tmp); err != nil { + return err + } + var f builder.Filter + if tmp.Field != nil { + f, err = Load(tmp.Field) + if err != nil { + return err + } + } + n.Base = tmp.Base + n.Field = f + return nil +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/filter/or.go b/vendor/github.com/grafadruid/go-druid/builder/filter/or.go new file mode 100644 index 000000000..3037ead56 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/filter/or.go @@ -0,0 +1,44 @@ +package filter + +import ( + "encoding/json" + "github.com/grafadruid/go-druid/builder" +) + +type Or struct { + Base + Fields []builder.Filter `json:"fields,omitempty"` +} + +func NewOr() *Or { + o := &Or{} + o.SetType("or") + return o +} + +func (o *Or) SetFields(fields []builder.Filter) *Or { + o.Fields = fields + return o +} + +func (o *Or) UnmarshalJSON(data []byte) error { + var err error + var tmp struct { + Base + Fields []json.RawMessage `json:"fields,omitempty"` + } + if err = json.Unmarshal(data, &tmp); err != nil { + return err + } + var f builder.Filter + ff := make([]builder.Filter, len(tmp.Fields)) + for i := range tmp.Fields { + if f, err = Load(tmp.Fields[i]); err != nil { + return err + } + ff[i] = f + } + o.Base = tmp.Base + o.Fields = ff + return nil +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/filter/regex.go b/vendor/github.com/grafadruid/go-druid/builder/filter/regex.go new file mode 100644 index 000000000..73054109f --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/filter/regex.go @@ -0,0 +1,69 @@ +package filter + +import ( + "encoding/json" + + "github.com/grafadruid/go-druid/builder" + "github.com/grafadruid/go-druid/builder/extractionfn" +) + +type Regex struct { + Base + Dimension string `json:"dimension,omitempty"` + Pattern string `json:"pattern,omitempty"` + ExtractionFn builder.ExtractionFn `json:"extractionFn,omitempty"` + FilterTuning *FilterTuning `json:"filterTuning,omitempty"` +} + +func NewRegex() *Regex { + r := &Regex{} + r.SetType("regex") + return r +} + +func (r *Regex) SetDimension(dimension string) *Regex { + r.Dimension = dimension + return r +} + +func (r *Regex) SetPattern(pattern string) *Regex { + r.Pattern = pattern + return r +} + +func (r *Regex) SetExtractionFn(extractionFn builder.ExtractionFn) *Regex { + r.ExtractionFn = extractionFn + return r +} + +func (r *Regex) SetFilterTuning(filterTuning *FilterTuning) *Regex { + r.FilterTuning = filterTuning + return r +} + +func (r *Regex) UnmarshalJSON(data []byte) error { + var err error + var tmp struct { + Base + Dimension string `json:"dimension,omitempty"` + Pattern string `json:"pattern,omitempty"` + ExtractionFn json.RawMessage `json:"extractionFn,omitempty"` + FilterTuning *FilterTuning `json:"filterTuning,omitempty"` + } + if err = json.Unmarshal(data, &tmp); err != nil { + return err + } + var e builder.ExtractionFn + if tmp.ExtractionFn != nil { + e, err = extractionfn.Load(tmp.ExtractionFn) + if err != nil { + return err + } + } + r.Base = tmp.Base + r.Dimension = tmp.Dimension + r.Pattern = tmp.Pattern + r.ExtractionFn = e + r.FilterTuning = tmp.FilterTuning + return nil +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/filter/search.go b/vendor/github.com/grafadruid/go-druid/builder/filter/search.go new file mode 100644 index 000000000..3d73a8e67 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/filter/search.go @@ -0,0 +1,69 @@ +package filter + +import ( + "encoding/json" + + "github.com/grafadruid/go-druid/builder" + "github.com/grafadruid/go-druid/builder/extractionfn" +) + +type Search struct { + Base + Dimension string `json:"dimension,omitempty"` + Query string `json:"builder,omitempty"` + ExtractionFn builder.ExtractionFn `json:"extractionFn,omitempty"` + FilterTuning *FilterTuning `json:"filterTuning,omitempty"` +} + +func NewSearch() *Search { + s := &Search{} + s.SetType("search") + return s +} + +func (s *Search) SetDimension(dimension string) *Search { + s.Dimension = dimension + return s +} + +func (s *Search) SetQuery(q string) *Search { + s.Query = q + return s +} + +func (s *Search) SetExtractionFn(extractionFn builder.ExtractionFn) *Search { + s.ExtractionFn = extractionFn + return s +} + +func (s *Search) SetFilterTuning(filterTuning *FilterTuning) *Search { + s.FilterTuning = filterTuning + return s +} + +func (s *Search) UnmarshalJSON(data []byte) error { + var err error + var tmp struct { + Base + Dimension string `json:"dimension,omitempty"` + Query string `json:"builder,omitempty"` + ExtractionFn json.RawMessage `json:"extractionFn,omitempty"` + FilterTuning *FilterTuning `json:"filterTuning,omitempty"` + } + if err = json.Unmarshal(data, &tmp); err != nil { + return err + } + var e builder.ExtractionFn + if tmp.ExtractionFn != nil { + e, err = extractionfn.Load(tmp.ExtractionFn) + if err != nil { + return err + } + } + s.Base = tmp.Base + s.Dimension = tmp.Dimension + s.Query = tmp.Query + s.ExtractionFn = e + s.FilterTuning = tmp.FilterTuning + return nil +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/filter/selector.go b/vendor/github.com/grafadruid/go-druid/builder/filter/selector.go new file mode 100644 index 000000000..97ba89af0 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/filter/selector.go @@ -0,0 +1,69 @@ +package filter + +import ( + "encoding/json" + + "github.com/grafadruid/go-druid/builder" + "github.com/grafadruid/go-druid/builder/extractionfn" +) + +type Selector struct { + Base + Dimension string `json:"dimension,omitempty"` + Value string `json:"value,omitempty"` + ExtractionFn builder.ExtractionFn `json:"extractionFn,omitempty"` + FilterTuning *FilterTuning `json:"filterTuning,omitempty"` +} + +func NewSelector() *Selector { + s := &Selector{} + s.SetType("selector") + return s +} + +func (s *Selector) SetDimension(dimension string) *Selector { + s.Dimension = dimension + return s +} + +func (s *Selector) SetValue(value string) *Selector { + s.Value = value + return s +} + +func (s *Selector) SetExtractionFn(extractionFn builder.ExtractionFn) *Selector { + s.ExtractionFn = extractionFn + return s +} + +func (s *Selector) SetFilterTuning(filterTuning *FilterTuning) *Selector { + s.FilterTuning = filterTuning + return s +} + +func (s *Selector) UnmarshalJSON(data []byte) error { + var err error + var tmp struct { + Base + Dimension string `json:"dimension,omitempty"` + Value string `json:"value,omitempty"` + ExtractionFn json.RawMessage `json:"extractionFn,omitempty"` + FilterTuning *FilterTuning `json:"filterTuning,omitempty"` + } + if err = json.Unmarshal(data, &tmp); err != nil { + return err + } + var e builder.ExtractionFn + if tmp.ExtractionFn != nil { + e, err = extractionfn.Load(tmp.ExtractionFn) + if err != nil { + return err + } + } + s.Base = tmp.Base + s.Dimension = tmp.Dimension + s.Value = tmp.Value + s.ExtractionFn = e + s.FilterTuning = tmp.FilterTuning + return nil +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/filter/spatial.go b/vendor/github.com/grafadruid/go-druid/builder/filter/spatial.go new file mode 100644 index 000000000..464efac99 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/filter/spatial.go @@ -0,0 +1,57 @@ +package filter + +import ( + "encoding/json" + + "github.com/grafadruid/go-druid/builder" + "github.com/grafadruid/go-druid/builder/bound" +) + +type Spatial struct { + Base + Dimension string `json:"dimension,omitempty"` + Bound builder.Bound `json:"bound,omitempty"` + FilterTuning *FilterTuning `json:"filterTuning,omitempty"` +} + +func NewSpatial() *Spatial { + s := &Spatial{} + s.SetType("spatial") + return s +} + +func (s *Spatial) SetDimension(dimension string) *Spatial { + s.Dimension = dimension + return s +} + +func (s *Spatial) SetBound(bound builder.Bound) *Spatial { + s.Bound = bound + return s +} + +func (s *Spatial) SetFilterTuning(filterTuning *FilterTuning) *Spatial { + s.FilterTuning = filterTuning + return s +} + +func (s *Spatial) UnmarshalJSON(data []byte) error { + var tmp struct { + Base + Dimension string `json:"dimension,omitempty"` + Bound json.RawMessage `json:"bound,omitempty"` + FilterTuning *FilterTuning `json:"filterTuning,omitempty"` + } + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + b, err := bound.Load(tmp.Bound) + if err != nil { + return err + } + s.Base = tmp.Base + s.Dimension = tmp.Dimension + s.Bound = b + s.FilterTuning = tmp.FilterTuning + return nil +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/filter/true.go b/vendor/github.com/grafadruid/go-druid/builder/filter/true.go new file mode 100644 index 000000000..4c31f9dd8 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/filter/true.go @@ -0,0 +1,11 @@ +package filter + +type True struct { + Base +} + +func NewTrue() *True { + t := &True{} + t.SetType("true") + return t +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/granularity/duration.go b/vendor/github.com/grafadruid/go-druid/builder/granularity/duration.go new file mode 100644 index 000000000..57cec5139 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/granularity/duration.go @@ -0,0 +1,33 @@ +package granularity + +import ( + "time" +) + +// Duration granularity is specified as an exact duration in milliseconds and timestamps are returned as UTC. +// Duration granularity values are in millis. +// https://druid.apache.org/docs/latest/querying/granularities.html#duration-granularities +type Duration struct { + Base + Duration time.Duration `json:"duration,omitempty"` + Origin time.Time `json:"origin,omitempty"` +} + +// NewDuration creates new Duration. +func NewDuration() *Duration { + d := &Duration{} + d.SetType("duration") + return d +} + +// SetDuration sets duration. +func (d *Duration) SetDuration(duration time.Duration) *Duration { + d.Duration = duration + return d +} + +// SetOrigin sets the origin +func (d *Duration) SetOrigin(origin time.Time) *Duration { + d.Origin = origin + return d +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/granularity/granularity.go b/vendor/github.com/grafadruid/go-druid/builder/granularity/granularity.go new file mode 100644 index 000000000..753361978 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/granularity/granularity.go @@ -0,0 +1,52 @@ +package granularity + +import ( + "encoding/json" + "errors" + "strconv" + + "github.com/grafadruid/go-druid/builder" +) + +// Base is the base for granularity. +type Base struct { + Typ string `json:"type,omitempty"` +} + +// SetType sets type. +func (b *Base) SetType(typ string) *Base { + b.Typ = typ + return b +} + +// Type returns the type. +func (b *Base) Type() builder.ComponentType { + return b.Typ +} + +// Load converts the druid native query to builder.Granularity +func Load(data []byte) (builder.Granularity, error) { + var g builder.Granularity + if string(data) == "null" { + return g, nil + } + var t struct { + Typ string `json:"type,omitempty"` + } + if err := json.Unmarshal(data, &t); err != nil { + g = NewSimple() + return g, json.Unmarshal(data, &g) + } + switch t.Typ { + case "duration": + g = NewDuration() + case "period": + g = NewPeriod() + case "all", "none": + g = NewSimple() + data = []byte(strconv.Quote(t.Typ)) + default: + return nil, errors.New("unsupported granularity type") + } + return g, json.Unmarshal(data, &g) +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/granularity/period.go b/vendor/github.com/grafadruid/go-druid/builder/granularity/period.go new file mode 100644 index 000000000..b251cd042 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/granularity/period.go @@ -0,0 +1,42 @@ +package granularity + +import ( + "time" + + "github.com/grafadruid/go-druid/builder/types" +) + +// Period granularity is specified as arbitrary period combinations of years, months, weeks, hours, minutes and seconds +// (e.g. P2W, P3M, PT1H30M, PT0.750S) in ISO8601 format. +// https://druid.apache.org/docs/latest/querying/granularities.html#period-granularities +type Period struct { + Base + Period time.Duration `json:"period,omitempty"` + Origin time.Time `json:"origin,omitempty"` + TimeZone types.DateTimeZone `json:"timeZone,omitempty"` +} + +// NewPeriod creates a Period type. +func NewPeriod() *Period { + p := &Period{} + p.SetType("period") + return p +} + +// SetPeriod sets period. +func (p *Period) SetPeriod(period time.Duration) *Period { + p.Period = period + return p +} + +// SetOrigin sets origin. +func (p *Period) SetOrigin(origin time.Time) *Period { + p.Origin = origin + return p +} + +// SetTimeZone sets timezone. +func (p *Period) SetTimeZone(timeZone types.DateTimeZone) *Period { + p.TimeZone = timeZone + return p +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/granularity/simple.go b/vendor/github.com/grafadruid/go-druid/builder/granularity/simple.go new file mode 100644 index 000000000..70de4a35e --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/granularity/simple.go @@ -0,0 +1,39 @@ +package granularity + +import "github.com/grafadruid/go-druid/builder" + +// Simple granularities are specified as a string and bucket timestamps by their UTC time. +// https://druid.apache.org/docs/latest/querying/granularities.html#simple-granularities +type Simple string + +const ( + All Simple = "all" + None = "none" + Second = "second" + Minute = "minute" + FifteenMinute = "fifteen_minute" + ThirtyMinute = "thirty_minute" + Hour = "hour" + Day = "day" + Week = "week" + Month = "month" + Quarter = "quarter" + Year = "year" +) + +// Type sets the type to Simple +func (s *Simple) Type() builder.ComponentType { + return "simple" +} + +// SetGranularity sets granularity. +func (s *Simple) SetGranularity(g Simple) *Simple { + *s = g + return s +} + +// NewSimple creates a Simple type. +func NewSimple() *Simple { + var s Simple + return &s +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/havingspec/always.go b/vendor/github.com/grafadruid/go-druid/builder/havingspec/always.go new file mode 100644 index 000000000..a87b3589d --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/havingspec/always.go @@ -0,0 +1,11 @@ +package havingspec + +type Always struct { + Base +} + +func NewAlways() *Always { + a := &Always{} + a.SetType("always") + return a +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/havingspec/and.go b/vendor/github.com/grafadruid/go-druid/builder/havingspec/and.go new file mode 100644 index 000000000..a30deb645 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/havingspec/and.go @@ -0,0 +1,45 @@ +package havingspec + +import ( + "encoding/json" + + "github.com/grafadruid/go-druid/builder" +) + +type And struct { + Base + HavingSpecs []builder.HavingSpec `json:"havingSpecs,omitempty"` +} + +func NewAnd() *And { + a := &And{} + a.SetType("and") + return a +} + +func (a *And) SetHavingSpecs(havingSpecs []builder.HavingSpec) *And { + a.HavingSpecs = havingSpecs + return a +} + +func (a *And) UnmarshalJSON(data []byte) error { + var tmp struct { + Base + HavingSpecs []json.RawMessage `json:"havingSpecs,omitempty"` + } + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + var err error + var h builder.HavingSpec + hh := make([]builder.HavingSpec, len(tmp.HavingSpecs)) + for i := range tmp.HavingSpecs { + if h, err = Load(tmp.HavingSpecs[i]); err != nil { + return err + } + hh[i] = h + } + a.Base = tmp.Base + a.HavingSpecs = hh + return nil +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/havingspec/dim_selector.go b/vendor/github.com/grafadruid/go-druid/builder/havingspec/dim_selector.go new file mode 100644 index 000000000..d8a3464c3 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/havingspec/dim_selector.go @@ -0,0 +1,57 @@ +package havingspec + +import ( + "encoding/json" + + "github.com/grafadruid/go-druid/builder" + "github.com/grafadruid/go-druid/builder/extractionfn" +) + +type DimSelector struct { + Base + Dimension string `json:"dimension,omitempty"` + Value string `json:"value,omitempty"` + ExtractionFn builder.ExtractionFn `json:"extractionFn,omitempty"` +} + +func NewDimSelector() *DimSelector { + d := &DimSelector{} + d.SetType("dimSelector") + return d +} + +func (d *DimSelector) SetDimension(dimension string) *DimSelector { + d.Dimension = dimension + return d +} + +func (d *DimSelector) SetValue(value string) *DimSelector { + d.Value = value + return d +} + +func (d *DimSelector) SetExtractionFn(extractionFn builder.ExtractionFn) *DimSelector { + d.ExtractionFn = extractionFn + return d +} + +func (d *DimSelector) UnmarshalJSON(data []byte) error { + var tmp struct { + Base + Dimension string `json:"dimension,omitempty"` + Value string `json:"value,omitempty"` + ExtractionFn json.RawMessage `json:"extractionFn,omitempty"` + } + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + e, err := extractionfn.Load(tmp.ExtractionFn) + if err != nil { + return err + } + d.Base = tmp.Base + d.Dimension = tmp.Dimension + d.Value = tmp.Value + d.ExtractionFn = e + return nil +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/havingspec/equal_to.go b/vendor/github.com/grafadruid/go-druid/builder/havingspec/equal_to.go new file mode 100644 index 000000000..bd8dfe4cb --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/havingspec/equal_to.go @@ -0,0 +1,23 @@ +package havingspec + +type EqualTo struct { + Base + Aggregation string `json:"aggregation,omitempty"` + Value *float64 `json:"value,omitempty"` +} + +func NewEqualTo() *EqualTo { + e := &EqualTo{} + e.SetType("equalTo") + return e +} + +func (e *EqualTo) SetAggregation(aggregation string) *EqualTo { + e.Aggregation = aggregation + return e +} + +func (e *EqualTo) SetValue(value float64) *EqualTo { + e.Value = &value + return e +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/havingspec/greather_than.go b/vendor/github.com/grafadruid/go-druid/builder/havingspec/greather_than.go new file mode 100644 index 000000000..e0ce38956 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/havingspec/greather_than.go @@ -0,0 +1,23 @@ +package havingspec + +type GreaterThan struct { + Base + Aggregation string `json:"aggregation,omitempty"` + Value *float64 `json:"value,omitempty"` +} + +func NewGreaterThan() *GreaterThan { + g := &GreaterThan{} + g.SetType("greaterThan") + return g +} + +func (g *GreaterThan) SetAggregation(aggregation string) *GreaterThan { + g.Aggregation = aggregation + return g +} + +func (g *GreaterThan) SetValue(value float64) *GreaterThan { + g.Value = &value + return g +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/havingspec/having_spec.go b/vendor/github.com/grafadruid/go-druid/builder/havingspec/having_spec.go new file mode 100644 index 000000000..07755ecba --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/havingspec/having_spec.go @@ -0,0 +1,57 @@ +package havingspec + +import ( + "encoding/json" + "errors" + + "github.com/grafadruid/go-druid/builder" +) + +type Base struct { + Typ builder.ComponentType `json:"type,omitempty"` +} + +func (b *Base) SetType(typ builder.ComponentType) *Base { + b.Typ = typ + return b +} + +func (b *Base) Type() builder.ComponentType { + return b.Typ +} + +func Load(data []byte) (builder.HavingSpec, error) { + var h builder.HavingSpec + if string(data) == "null" { + return h, nil + } + var t struct { + Typ builder.ComponentType `json:"type,omitempty"` + } + if err := json.Unmarshal(data, &t); err != nil { + return nil, err + } + switch t.Typ { + case "always": + h = NewAlways() + case "and": + h = NewAnd() + case "dimSelector": + h = NewDimSelector() + case "equalTo": + h = NewEqualTo() + case "greaterThan": + h = NewGreaterThan() + case "lessThan": + h = NewLessThan() + case "never": + h = NewNever() + case "not": + h = NewNot() + case "or": + h = NewOr() + default: + return nil, errors.New("unsupported havingspec type") + } + return h, json.Unmarshal(data, &h) +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/havingspec/less_than.go b/vendor/github.com/grafadruid/go-druid/builder/havingspec/less_than.go new file mode 100644 index 000000000..6ec9ba45c --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/havingspec/less_than.go @@ -0,0 +1,23 @@ +package havingspec + +type LessThan struct { + Base + Aggregation string `json:"aggregation,omitempty"` + Value *float64 `json:"value,omitempty"` +} + +func NewLessThan() *LessThan { + l := &LessThan{} + l.SetType("lessThan") + return l +} + +func (l *LessThan) SetAggregation(aggregation string) *LessThan { + l.Aggregation = aggregation + return l +} + +func (l *LessThan) SetValue(value float64) *LessThan { + l.Value = &value + return l +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/havingspec/never.go b/vendor/github.com/grafadruid/go-druid/builder/havingspec/never.go new file mode 100644 index 000000000..8068c8a89 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/havingspec/never.go @@ -0,0 +1,11 @@ +package havingspec + +type Never struct { + Base +} + +func NewNever() *Never { + n := &Never{} + n.SetType("never") + return n +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/havingspec/not.go b/vendor/github.com/grafadruid/go-druid/builder/havingspec/not.go new file mode 100644 index 000000000..3a12a383f --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/havingspec/not.go @@ -0,0 +1,40 @@ +package havingspec + +import ( + "encoding/json" + + "github.com/grafadruid/go-druid/builder" +) + +type Not struct { + Base + HavingSpec builder.HavingSpec `json:"havingSpec,omitempty"` +} + +func NewNot() *Not { + n := &Not{} + n.SetType("not") + return n +} + +func (n *Not) SetHavingSpecs(havingSpec builder.HavingSpec) *Not { + n.HavingSpec = havingSpec + return n +} + +func (n *Not) UnmarshalJSON(data []byte) error { + var tmp struct { + Base + HavingSpec json.RawMessage `json:"havingSpec,omitempty"` + } + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + h, err := Load(tmp.HavingSpec) + if err != nil { + return err + } + n.Base = tmp.Base + n.HavingSpec = h + return nil +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/havingspec/or.go b/vendor/github.com/grafadruid/go-druid/builder/havingspec/or.go new file mode 100644 index 000000000..7ecda81a0 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/havingspec/or.go @@ -0,0 +1,45 @@ +package havingspec + +import ( + "encoding/json" + + "github.com/grafadruid/go-druid/builder" +) + +type Or struct { + Base + HavingSpecs []builder.HavingSpec `json:"havingSpecs,omitempty"` +} + +func NewOr() *Or { + o := &Or{} + o.SetType("or") + return o +} + +func (o *Or) SetHavingSpecs(havingSpecs []builder.HavingSpec) *Or { + o.HavingSpecs = havingSpecs + return o +} + +func (o *Or) UnmarshalJSON(data []byte) error { + var tmp struct { + Base + HavingSpecs []json.RawMessage `json:"havingSpecs,omitempty"` + } + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + var err error + var h builder.HavingSpec + hh := make([]builder.HavingSpec, len(tmp.HavingSpecs)) + for i := range tmp.HavingSpecs { + if h, err = Load(tmp.HavingSpecs[i]); err != nil { + return err + } + hh[i] = h + } + o.Base = tmp.Base + o.HavingSpecs = hh + return nil +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/intervals/default.go b/vendor/github.com/grafadruid/go-druid/builder/intervals/default.go new file mode 100644 index 000000000..d0cb1a28d --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/intervals/default.go @@ -0,0 +1,17 @@ +package intervals + +type Intervals struct { + Base + Intervals []*Interval `json:"intervals,omitempty"` +} + +func NewIntervals() *Intervals { + i := &Intervals{} + i.SetType("intervals") + return i +} + +func (i *Intervals) SetIntervals(intervals []*Interval) *Intervals { + i.Intervals = intervals + return i +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/intervals/interval.go b/vendor/github.com/grafadruid/go-druid/builder/intervals/interval.go new file mode 100644 index 000000000..fca39b857 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/intervals/interval.go @@ -0,0 +1,28 @@ +package intervals + +import ( + "time" +) + +const ( + IntervalTimeFormat = time.RFC3339Nano +) + +// Interval represents a druid interval. +type Interval string + +// NewInterval instantiate a new interval. +func NewInterval() *Interval { + var i Interval + return &i +} + +func (i *Interval) SetInterval(start, end time.Time) *Interval { + *i = Interval(start.Format(IntervalTimeFormat) + "/" + end.Format(IntervalTimeFormat)) + return i +} + +func (i *Interval) SetIntervalWithString(start, end string) *Interval { + *i = Interval(start + "/" + end) + return i +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/intervals/intervals.go b/vendor/github.com/grafadruid/go-druid/builder/intervals/intervals.go new file mode 100644 index 000000000..f6601d199 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/intervals/intervals.go @@ -0,0 +1,41 @@ +package intervals + +import ( + "encoding/json" + "errors" + + "github.com/grafadruid/go-druid/builder" +) + +type Base struct { + Typ builder.ComponentType `json:"type,omitempty"` +} + +func (b *Base) SetType(typ builder.ComponentType) *Base { + b.Typ = typ + return b +} + +func (b *Base) Type() builder.ComponentType { + return b.Typ +} + +func Load(data []byte) (builder.Intervals, error) { + var i builder.Intervals + if string(data) == "null" { + return i, nil + } + var t struct { + Typ builder.ComponentType `json:"type,omitempty"` + } + if err := json.Unmarshal(data, &t); err != nil { + return nil, err + } + switch t.Typ { + case "intervals": + i = NewIntervals() + default: + return nil, errors.New("unsupported intervals type") + } + return i, json.Unmarshal(data, &i) +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/limitspec/default.go b/vendor/github.com/grafadruid/go-druid/builder/limitspec/default.go new file mode 100644 index 000000000..689daf2d3 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/limitspec/default.go @@ -0,0 +1,46 @@ +package limitspec + +import ( + "github.com/grafadruid/go-druid/builder/types" +) + +type Direction string + +const ( + Ascending Direction = "ASCENDING" + Descending = "DESCENDING" +) + +type OrderByColumnSpec struct { + Dimension string `json:"dimension,omitempty"` + Direction Direction `json:"direction,omitempty"` + DimensionOrder types.StringComparator `json:"dimensionOrder,omitempty"` +} + +type Default struct { + Base + Columns []OrderByColumnSpec `json:"columns,omitempty"` + Offset int `json:"offset,omitempty"` + Limit int `json:"limit,omitempty"` +} + +func NewDefault() *Default { + d := &Default{} + d.SetType("default") + return d +} + +func (d *Default) SetColumns(columns []OrderByColumnSpec) *Default { + d.Columns = columns + return d +} + +func (d *Default) SetOffset(offset int) *Default { + d.Offset = offset + return d +} + +func (d *Default) SetLimit(limit int) *Default { + d.Limit = limit + return d +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/limitspec/limit_spec.go b/vendor/github.com/grafadruid/go-druid/builder/limitspec/limit_spec.go new file mode 100644 index 000000000..f841a145f --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/limitspec/limit_spec.go @@ -0,0 +1,41 @@ +package limitspec + +import ( + "encoding/json" + "errors" + + "github.com/grafadruid/go-druid/builder" +) + +type Base struct { + Typ builder.ComponentType `json:"type,omitempty"` +} + +func (b *Base) SetType(typ builder.ComponentType) *Base { + b.Typ = typ + return b +} + +func (b *Base) Type() builder.ComponentType { + return b.Typ +} + +func Load(data []byte) (builder.LimitSpec, error) { + var l builder.LimitSpec + if string(data) == "null" { + return l, nil + } + var t struct { + Typ builder.ComponentType `json:"type,omitempty"` + } + if err := json.Unmarshal(data, &t); err != nil { + return nil, err + } + switch t.Typ { + case "default": + l = NewDefault() + default: + return nil, errors.New("unsupported limitspec type") + } + return l, json.Unmarshal(data, &l) +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/lookup/lookup.go b/vendor/github.com/grafadruid/go-druid/builder/lookup/lookup.go new file mode 100644 index 000000000..edf73d661 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/lookup/lookup.go @@ -0,0 +1,41 @@ +package lookup + +import ( + "encoding/json" + "errors" + + "github.com/grafadruid/go-druid/builder" +) + +type Base struct { + Typ string `json:"type,omitempty"` +} + +func (b *Base) SetType(typ string) *Base { + b.Typ = typ + return b +} + +func (b *Base) Type() builder.ComponentType { + return b.Typ +} + +func Load(data []byte) (builder.LookupExtractor, error) { + var l builder.LookupExtractor + if string(data) == "null" { + return l, nil + } + var t struct { + Typ string `json:"type,omitempty"` + } + if err := json.Unmarshal(data, &t); err != nil { + return nil, err + } + switch t.Typ { + case "map": + l = NewMap() + default: + return nil, errors.New("unsupported lookup type") + } + return l, json.Unmarshal(data, &l) +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/lookup/map.go b/vendor/github.com/grafadruid/go-druid/builder/lookup/map.go new file mode 100644 index 000000000..d60b3f7a1 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/lookup/map.go @@ -0,0 +1,23 @@ +package lookup + +type Map struct { + Base + Map map[string]string `json:"map,omitempty"` + IsOneToOne *bool `json:"isOneToOne,omitempty"` +} + +func NewMap() *Map { + m := &Map{} + m.SetType("map") + return m +} + +func (m *Map) SetMap(mp map[string]string) *Map { + m.Map = mp + return m +} + +func (m *Map) SetIsOneToOne(i bool) *Map { + m.IsOneToOne = &i + return m +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/postaggregation/arithmetic.go b/vendor/github.com/grafadruid/go-druid/builder/postaggregation/arithmetic.go new file mode 100644 index 000000000..797649adb --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/postaggregation/arithmetic.go @@ -0,0 +1,66 @@ +package postaggregation + +import ( + "encoding/json" + + "github.com/grafadruid/go-druid/builder" +) + +type Arithmetic struct { + Base + Fn string `json:"fn,omitempty"` + Fields []builder.PostAggregator `json:"fields,omitempty"` + Ordering string `json:"ordering,omitempty"` +} + +func NewArithmetic() *Arithmetic { + a := &Arithmetic{} + a.SetType("arithmetic") + return a +} + +func (a *Arithmetic) SetName(name string) *Arithmetic { + a.Base.SetName(name) + return a +} + +func (a *Arithmetic) SetFn(fn string) *Arithmetic { + a.Fn = fn + return a +} + +func (a *Arithmetic) SetFields(fields []builder.PostAggregator) *Arithmetic { + a.Fields = fields + return a +} + +func (a *Arithmetic) SetOrdering(ordering string) *Arithmetic { + a.Ordering = ordering + return a +} + +func (a *Arithmetic) UnmarshalJSON(data []byte) error { + var tmp struct { + Base + Fn string `json:"fn,omitempty"` + Fields []json.RawMessage `json:"fields,omitempty"` + Ordering string `json:"ordering,omitempty"` + } + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + var err error + var p builder.PostAggregator + pp := make([]builder.PostAggregator, len(tmp.Fields)) + for i := range tmp.Fields { + if p, err = Load(tmp.Fields[i]); err != nil { + return err + } + pp[i] = p + } + a.Base = tmp.Base + a.Fn = tmp.Fn + a.Fields = pp + a.Ordering = tmp.Ordering + return nil +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/postaggregation/constant.go b/vendor/github.com/grafadruid/go-druid/builder/postaggregation/constant.go new file mode 100644 index 000000000..afe2f6128 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/postaggregation/constant.go @@ -0,0 +1,22 @@ +package postaggregation + +type Constant struct { + Base + Value *float64 `json:"value,omitempty"` +} + +func NewConstant() *Constant { + c := &Constant{} + c.SetType("constant") + return c +} + +func (c *Constant) SetName(name string) *Constant { + c.Base.SetName(name) + return c +} + +func (c *Constant) SetValue(value float64) *Constant { + c.Value = &value + return c +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/postaggregation/double_greatest.go b/vendor/github.com/grafadruid/go-druid/builder/postaggregation/double_greatest.go new file mode 100644 index 000000000..7ce0ace59 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/postaggregation/double_greatest.go @@ -0,0 +1,50 @@ +package postaggregation + +import ( + "encoding/json" + + "github.com/grafadruid/go-druid/builder" +) + +type DoubleGreatest struct { + Base + Fields []builder.PostAggregator `json:"fields,omitempty"` +} + +func NewDoubleGreatest() *DoubleGreatest { + d := &DoubleGreatest{} + d.SetType("doubleGreatest") + return d +} + +func (d *DoubleGreatest) SetName(name string) *DoubleGreatest { + d.Base.SetName(name) + return d +} + +func (d *DoubleGreatest) SetFields(fields []builder.PostAggregator) *DoubleGreatest { + d.Fields = fields + return d +} + +func (d *DoubleGreatest) UnmarshalJSON(data []byte) error { + var tmp struct { + Base + Fields []json.RawMessage `json:"fields,omitempty"` + } + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + var err error + var p builder.PostAggregator + pp := make([]builder.PostAggregator, len(tmp.Fields)) + for i := range tmp.Fields { + if p, err = Load(tmp.Fields[i]); err != nil { + return err + } + pp[i] = p + } + d.Base = tmp.Base + d.Fields = pp + return nil +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/postaggregation/double_least.go b/vendor/github.com/grafadruid/go-druid/builder/postaggregation/double_least.go new file mode 100644 index 000000000..bf21ba2f2 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/postaggregation/double_least.go @@ -0,0 +1,50 @@ +package postaggregation + +import ( + "encoding/json" + + "github.com/grafadruid/go-druid/builder" +) + +type DoubleLeast struct { + Base + Fields []builder.PostAggregator `json:"fields,omitempty"` +} + +func NewDoubleLeast() *DoubleLeast { + d := &DoubleLeast{} + d.SetType("doubleLeast") + return d +} + +func (d *DoubleLeast) SetName(name string) *DoubleLeast { + d.Base.SetName(name) + return d +} + +func (d *DoubleLeast) SetFields(fields []builder.PostAggregator) *DoubleLeast { + d.Fields = fields + return d +} + +func (d *DoubleLeast) UnmarshalJSON(data []byte) error { + var tmp struct { + Base + Fields []json.RawMessage `json:"fields,omitempty"` + } + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + var err error + var p builder.PostAggregator + pp := make([]builder.PostAggregator, len(tmp.Fields)) + for i := range tmp.Fields { + if p, err = Load(tmp.Fields[i]); err != nil { + return err + } + pp[i] = p + } + d.Base = tmp.Base + d.Fields = pp + return nil +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/postaggregation/expression.go b/vendor/github.com/grafadruid/go-druid/builder/postaggregation/expression.go new file mode 100644 index 000000000..e22c133ff --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/postaggregation/expression.go @@ -0,0 +1,28 @@ +package postaggregation + +type Expression struct { + Base + Expression string `json:"expression,omitempty"` + Ordering string `json:"ordering,omitempty"` +} + +func NewExpression() *Expression { + e := &Expression{} + e.SetType("expression") + return e +} + +func (e *Expression) SetName(name string) *Expression { + e.Base.SetName(name) + return e +} + +func (e *Expression) SetExpression(expression string) *Expression { + e.Expression = expression + return e +} + +func (e *Expression) SetOrdering(ordering string) *Expression { + e.Ordering = ordering + return e +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/postaggregation/field_access.go b/vendor/github.com/grafadruid/go-druid/builder/postaggregation/field_access.go new file mode 100644 index 000000000..1f46642af --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/postaggregation/field_access.go @@ -0,0 +1,22 @@ +package postaggregation + +type FieldAccess struct { + Base + FieldName string `json:"fieldName,omitempty"` +} + +func NewFieldAccess() *FieldAccess { + f := &FieldAccess{} + f.SetType("fieldAccess") + return f +} + +func (f *FieldAccess) SetName(name string) *FieldAccess { + f.Base.SetName(name) + return f +} + +func (f *FieldAccess) SetFieldName(fieldName string) *FieldAccess { + f.FieldName = fieldName + return f +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/postaggregation/finalizing_field_access.go b/vendor/github.com/grafadruid/go-druid/builder/postaggregation/finalizing_field_access.go new file mode 100644 index 000000000..2e3a774ed --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/postaggregation/finalizing_field_access.go @@ -0,0 +1,22 @@ +package postaggregation + +type FinalizingFieldAccess struct { + Base + FieldName string `json:"fieldName,omitempty"` +} + +func NewFinalizingFieldAccess() *FinalizingFieldAccess { + f := &FinalizingFieldAccess{} + f.SetType("finalizingFieldAccess") + return f +} + +func (f *FinalizingFieldAccess) SetName(name string) *FinalizingFieldAccess { + f.Base.SetName(name) + return f +} + +func (f *FinalizingFieldAccess) SetFieldName(fieldName string) *FinalizingFieldAccess { + f.FieldName = fieldName + return f +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/postaggregation/hyper_unique_finalizing.go b/vendor/github.com/grafadruid/go-druid/builder/postaggregation/hyper_unique_finalizing.go new file mode 100644 index 000000000..3de201096 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/postaggregation/hyper_unique_finalizing.go @@ -0,0 +1,22 @@ +package postaggregation + +type HyperUniqueFinalizing struct { + Base + FieldName string `json:"fieldName,omitempty"` +} + +func NewHyperUniqueFinalizing() *HyperUniqueFinalizing { + h := &HyperUniqueFinalizing{} + h.SetType("hyperUniqueFinalizing") + return h +} + +func (h *HyperUniqueFinalizing) SetName(name string) *HyperUniqueFinalizing { + h.Base.SetName(name) + return h +} + +func (h *HyperUniqueFinalizing) SetFieldName(fieldName string) *HyperUniqueFinalizing { + h.FieldName = fieldName + return h +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/postaggregation/javascript.go b/vendor/github.com/grafadruid/go-druid/builder/postaggregation/javascript.go new file mode 100644 index 000000000..bbcce70a2 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/postaggregation/javascript.go @@ -0,0 +1,28 @@ +package postaggregation + +type Javascript struct { + Base + FieldNames []string `json:"fieldNames,omitempty"` + Function string `json:"function,omitempty"` +} + +func NewJavascript() *Javascript { + j := &Javascript{} + j.SetType("javascript") + return j +} + +func (j *Javascript) SetName(name string) *Javascript { + j.Base.SetName(name) + return j +} + +func (j *Javascript) SetFieldNames(fieldNames []string) *Javascript { + j.FieldNames = fieldNames + return j +} + +func (j *Javascript) SetFunction(function string) *Javascript { + j.Function = function + return j +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/postaggregation/long_greatest.go b/vendor/github.com/grafadruid/go-druid/builder/postaggregation/long_greatest.go new file mode 100644 index 000000000..232f85ee4 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/postaggregation/long_greatest.go @@ -0,0 +1,50 @@ +package postaggregation + +import ( + "encoding/json" + + "github.com/grafadruid/go-druid/builder" +) + +type LongGreatest struct { + Base + Fields []builder.PostAggregator `json:"fields,omitempty"` +} + +func NewLongGreatest() *LongGreatest { + l := &LongGreatest{} + l.SetType("longGreatest") + return l +} + +func (l *LongGreatest) SetName(name string) *LongGreatest { + l.Base.SetName(name) + return l +} + +func (l *LongGreatest) SetFields(fields []builder.PostAggregator) *LongGreatest { + l.Fields = fields + return l +} + +func (l *LongGreatest) UnmarshalJSON(data []byte) error { + var tmp struct { + Base + Fields []json.RawMessage `json:"fields,omitempty"` + } + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + var err error + var p builder.PostAggregator + pp := make([]builder.PostAggregator, len(tmp.Fields)) + for i := range tmp.Fields { + if p, err = Load(tmp.Fields[i]); err != nil { + return err + } + pp[i] = p + } + l.Base = tmp.Base + l.Fields = pp + return nil +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/postaggregation/long_least.go b/vendor/github.com/grafadruid/go-druid/builder/postaggregation/long_least.go new file mode 100644 index 000000000..61cdc8af0 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/postaggregation/long_least.go @@ -0,0 +1,50 @@ +package postaggregation + +import ( + "encoding/json" + + "github.com/grafadruid/go-druid/builder" +) + +type LongLeast struct { + Base + Fields []builder.PostAggregator `json:"fields,omitempty"` +} + +func NewLongLeast() *LongLeast { + l := &LongLeast{} + l.SetType("longLeast") + return l +} + +func (l *LongLeast) SetName(name string) *LongLeast { + l.Base.SetName(name) + return l +} + +func (l *LongLeast) SetFields(fields []builder.PostAggregator) *LongLeast { + l.Fields = fields + return l +} + +func (l *LongLeast) UnmarshalJSON(data []byte) error { + var tmp struct { + Base + Fields []json.RawMessage `json:"fields,omitempty"` + } + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + var err error + var p builder.PostAggregator + pp := make([]builder.PostAggregator, len(tmp.Fields)) + for i := range tmp.Fields { + if p, err = Load(tmp.Fields[i]); err != nil { + return err + } + pp[i] = p + } + l.Base = tmp.Base + l.Fields = pp + return nil +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/postaggregation/post_aggregator.go b/vendor/github.com/grafadruid/go-druid/builder/postaggregation/post_aggregator.go new file mode 100644 index 000000000..3f83cb4f6 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/postaggregation/post_aggregator.go @@ -0,0 +1,83 @@ +package postaggregation + +import ( + "encoding/json" + "errors" + + "github.com/grafadruid/go-druid/builder" +) + +type Base struct { + Typ builder.ComponentType `json:"type,omitempty"` + Name string `json:"name,omitempty"` +} + +func (b *Base) SetType(typ builder.ComponentType) *Base { + b.Typ = typ + return b +} + +func (b *Base) SetName(name string) *Base { + b.Name = name + return b +} + +func (b *Base) Type() builder.ComponentType { + return b.Typ +} + +func Load(data []byte) (builder.PostAggregator, error) { + var p builder.PostAggregator + if string(data) == "null" { + return p, nil + } + var t struct { + Typ builder.ComponentType `json:"type,omitempty"` + } + if err := json.Unmarshal(data, &t); err != nil { + return nil, err + } + switch t.Typ { + case "arithmetic": + p = NewArithmetic() + case "constant": + p = NewConstant() + case "doubleGreatest": + p = NewDoubleGreatest() + case "doubleLeast": + p = NewDoubleLeast() + case "expression": + p = NewExpression() + case "fieldAccess": + p = NewFieldAccess() + case "finalizingFieldAccess": + p = NewFinalizingFieldAccess() + case "hyperUniqueFinalizing": + p = NewHyperUniqueFinalizing() + case "javascript": + p = NewJavascript() + case "longGreatest": + p = NewLongGreatest() + case "longLeast": + p = NewLongLeast() + case "quantileFromTDigestSketch": + p = NewQuantileFromTDigestSketch() + case "quantilesFromTDigestSketch": + p = NewQuantilesFromTDigestSketch() + case "quantilesDoublesSketchToQuantile": + p = NewQuantilesDoublesSketchToQuantile() + case "quantilesDoublesSketchToQuantiles": + p = NewQuantilesDoublesSketchToQuantiles() + case "quantilesDoublesSketchToHistogram": + p = NewQuantilesDoublesSketchToHistogram() + case "quantilesDoublesSketchToRank": + p = NewQuantilesDoublesSketchToRank() + case "quantilesDoublesSketchToCDF": + p = NewQuantilesDoublesSketchToCDF() + case "quantilesDoublesSketchToString": + p = NewQuantilesDoublesSketchToString() + default: + return nil, errors.New("unsupported postaggregation type") + } + return p, json.Unmarshal(data, &p) +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/postaggregation/quantile_from_tdigestsketch.go b/vendor/github.com/grafadruid/go-druid/builder/postaggregation/quantile_from_tdigestsketch.go new file mode 100644 index 000000000..d4bd671cd --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/postaggregation/quantile_from_tdigestsketch.go @@ -0,0 +1,59 @@ +package postaggregation + +// QuantileFromTDigestSketch struct based on +// PostAggregator section in https://druid.apache.org/docs/latest/development/extensions-contrib/tdigestsketch-quantiles.html +// See the "Similar to quantilesFromTDigestSketch except it takes in a single fraction for computing quantile" section +type QuantileFromTDigestSketch struct { + Base + Fraction *float64 `json:"fraction,omitempty"` + Field *QuantileFromTDigestSketchField `json:"field,omitempty"` +} + +// QuantileFromTDigestSketchField struct for Field in QuantileFromTDigestSketch +type QuantileFromTDigestSketchField struct { + Type string `json:"type,omitempty"` + FieldName string `json:"fieldName,omitempty"` +} + +// NewQuantileFromTDigestSketch new instance of QuantileFromTDigestSketch +func NewQuantileFromTDigestSketch() *QuantileFromTDigestSketch { + q := &QuantileFromTDigestSketch{} + q.SetType("quantileFromTDigestSketch") + return q +} + +// SetName set name +func (q *QuantileFromTDigestSketch) SetName(name string) *QuantileFromTDigestSketch { + q.Base.SetName(name) + return q +} + +// SetFraction set fraction +func (q *QuantileFromTDigestSketch) SetFraction(fraction float64) *QuantileFromTDigestSketch { + q.Fraction = &fraction + return q +} + +// SetField set QuantileFromTDigestSketchField +func (q *QuantileFromTDigestSketch) SetField(field *QuantileFromTDigestSketchField) *QuantileFromTDigestSketch { + q.Field = field + return q +} + +// NewQuantileFromTDigestSketchField new instance of QuantileFromTDigestSketchField +func NewQuantileFromTDigestSketchField() *QuantileFromTDigestSketchField { + qf := &QuantileFromTDigestSketchField{} + return qf +} + +// SetType set type +func (qf *QuantileFromTDigestSketchField) SetType(typ string) *QuantileFromTDigestSketchField { + qf.Type = typ + return qf +} + +// SetFieldName set fieldName +func (qf *QuantileFromTDigestSketchField) SetFieldName(fieldName string) *QuantileFromTDigestSketchField { + qf.FieldName = fieldName + return qf +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/postaggregation/quantiles_doubles_sketch.go b/vendor/github.com/grafadruid/go-druid/builder/postaggregation/quantiles_doubles_sketch.go new file mode 100644 index 000000000..e9cfaf0e4 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/postaggregation/quantiles_doubles_sketch.go @@ -0,0 +1,31 @@ +package postaggregation + +// QuantilesDoublesSketchField struct for Field in QuantilesDoublesSketch Post Aggregators +type QuantilesDoublesSketchField struct { + Type string `json:"type,omitempty"` + Name string `json:"name,omitempty"` + FieldName string `json:"fieldName,omitempty"` +} + +// NewQuantilesDoublesSketchField new instance of QuantilesDoublesSketchField +func NewQuantilesDoublesSketchField() *QuantilesDoublesSketchField { + return &QuantilesDoublesSketchField{} +} + +// SetName set name +func (qf *QuantilesDoublesSketchField) SetName(name string) *QuantilesDoublesSketchField { + qf.Name = name + return qf +} + +// SetType set type +func (qf *QuantilesDoublesSketchField) SetType(typ string) *QuantilesDoublesSketchField { + qf.Type = typ + return qf +} + +// SetFieldName set fieldName +func (qf *QuantilesDoublesSketchField) SetFieldName(fieldName string) *QuantilesDoublesSketchField { + qf.FieldName = fieldName + return qf +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/postaggregation/quantiles_doubles_sketch_to_cdf.go b/vendor/github.com/grafadruid/go-druid/builder/postaggregation/quantiles_doubles_sketch_to_cdf.go new file mode 100644 index 000000000..03bc482ee --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/postaggregation/quantiles_doubles_sketch_to_cdf.go @@ -0,0 +1,34 @@ +package postaggregation + +// QuantilesDoublesSketchToCDF struct based on +// PostAggregator section in https://druid.apache.org/docs/latest/development/extensions-core/datasketches-quantiles.html#cdf +type QuantilesDoublesSketchToCDF struct { + Base + Field *QuantilesDoublesSketchField `json:"field,omitempty"` + SplitPoints []float64 `json:"splitPoints,omitempty"` +} + +// NewQuantilesDoublesSketchToCDF new instance of QuantilesDoublesSketchToCDF +func NewQuantilesDoublesSketchToCDF() *QuantilesDoublesSketchToCDF { + q := &QuantilesDoublesSketchToCDF{} + q.SetType("quantilesDoublesSketchToCDF") + return q +} + +// SetName set name +func (q *QuantilesDoublesSketchToCDF) SetName(name string) *QuantilesDoublesSketchToCDF { + q.Base.SetName(name) + return q +} + +// SetSplitPoints set splitPoints +func (q *QuantilesDoublesSketchToCDF) SetSplitPoints(splitPoints []float64) *QuantilesDoublesSketchToCDF { + q.SplitPoints = splitPoints + return q +} + +// SetField set QuantilesDoublesSketchField +func (q *QuantilesDoublesSketchToCDF) SetField(field *QuantilesDoublesSketchField) *QuantilesDoublesSketchToCDF { + q.Field = field + return q +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/postaggregation/quantiles_doubles_sketch_to_histogram.go b/vendor/github.com/grafadruid/go-druid/builder/postaggregation/quantiles_doubles_sketch_to_histogram.go new file mode 100644 index 000000000..4621738d4 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/postaggregation/quantiles_doubles_sketch_to_histogram.go @@ -0,0 +1,41 @@ +package postaggregation + +// QuantilesDoublesSketchToHistogram struct based on +// PostAggregator section in https://druid.apache.org/docs/latest/development/extensions-core/datasketches-quantiles.html#histogram +type QuantilesDoublesSketchToHistogram struct { + Base + Field *QuantilesDoublesSketchField `json:"field,omitempty"` + SplitPoints []float64 `json:"splitPoints,omitempty"` + NumBins int64 `json:"numBins,omitempty"` +} + +// NewQuantilesDoublesSketchToHistogram new instance of QuantilesDoublesSketchToHistogram +func NewQuantilesDoublesSketchToHistogram() *QuantilesDoublesSketchToHistogram { + q := &QuantilesDoublesSketchToHistogram{} + q.SetType("quantilesDoublesSketchToHistogram") + return q +} + +// SetName set name +func (q *QuantilesDoublesSketchToHistogram) SetName(name string) *QuantilesDoublesSketchToHistogram { + q.Base.SetName(name) + return q +} + +// SetSplitPoints set splitPoints +func (q *QuantilesDoublesSketchToHistogram) SetSplitPoints(splitPoints []float64) *QuantilesDoublesSketchToHistogram { + q.SplitPoints = splitPoints + return q +} + +// SetNumBins set namBins +func (q *QuantilesDoublesSketchToHistogram) SetNumBins(numBins int64) *QuantilesDoublesSketchToHistogram { + q.NumBins = numBins + return q +} + +// SetField set QuantilesDoublesSketchField +func (q *QuantilesDoublesSketchToHistogram) SetField(field *QuantilesDoublesSketchField) *QuantilesDoublesSketchToHistogram { + q.Field = field + return q +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/postaggregation/quantiles_doubles_sketch_to_quantile.go b/vendor/github.com/grafadruid/go-druid/builder/postaggregation/quantiles_doubles_sketch_to_quantile.go new file mode 100644 index 000000000..da4609ef7 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/postaggregation/quantiles_doubles_sketch_to_quantile.go @@ -0,0 +1,41 @@ +package postaggregation + +// QuantilesDoublesSketchToQuantile struct based on +// PostAggregator section in https://druid.apache.org/docs/latest/development/extensions-core/datasketches-quantiles.html#quantile +type QuantilesDoublesSketchToQuantile struct { + Base + Field *QuantilesDoublesSketchField `json:"field,omitempty"` + Fraction *float64 `json:"fraction,omitempty"` +} + +// NewQuantilesDoublesSketchToQuantile new instance of QuantilesDoublesSketchToQuantile +func NewQuantilesDoublesSketchToQuantile() *QuantilesDoublesSketchToQuantile { + q := &QuantilesDoublesSketchToQuantile{} + q.SetType("quantilesDoublesSketchToQuantile") + return q +} + +// SetName set name +func (q *QuantilesDoublesSketchToQuantile) SetName(name string) *QuantilesDoublesSketchToQuantile { + q.Base.SetName(name) + return q +} + +// SetFraction set fraction +func (q *QuantilesDoublesSketchToQuantile) SetFraction(fraction float64) *QuantilesDoublesSketchToQuantile { + q.Fraction = &fraction + return q +} + +// SetField set QuantilesDoublesSketchField +func (q *QuantilesDoublesSketchToQuantile) SetField(field *QuantilesDoublesSketchField) *QuantilesDoublesSketchToQuantile { + q.Field = field + return q +} + +// NewQuantilesDoublesSketchToQuantileField new instance of QuantilesDoublesSketchField +// Deprecated: Use NewQuantilesDoublesSketchField instead. +// TODO: This function is a duplicate of "func NewQuantilesDoublesSketchField()" to keep backward compatible +func NewQuantilesDoublesSketchToQuantileField() *QuantilesDoublesSketchField { + return &QuantilesDoublesSketchField{} +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/postaggregation/quantiles_doubles_sketch_to_quantiles.go b/vendor/github.com/grafadruid/go-druid/builder/postaggregation/quantiles_doubles_sketch_to_quantiles.go new file mode 100644 index 000000000..28d613291 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/postaggregation/quantiles_doubles_sketch_to_quantiles.go @@ -0,0 +1,34 @@ +package postaggregation + +// QuantilesDoublesSketchToQuantiles struct based on +// PostAggregator section in https://druid.apache.org/docs/latest/development/extensions-core/datasketches-quantiles.html#quantiles +type QuantilesDoublesSketchToQuantiles struct { + Base + Field *QuantilesDoublesSketchField `json:"field,omitempty"` + Fractions []float64 `json:"fractions,omitempty"` +} + +// NewQuantilesDoublesSketchToQuantiles new instance of QuantilesDoublesSketchToHistogram +func NewQuantilesDoublesSketchToQuantiles() *QuantilesDoublesSketchToQuantiles { + q := &QuantilesDoublesSketchToQuantiles{} + q.SetType("quantilesDoublesSketchToQuantiles") + return q +} + +// SetName set name +func (q *QuantilesDoublesSketchToQuantiles) SetName(name string) *QuantilesDoublesSketchToQuantiles { + q.Base.SetName(name) + return q +} + +// SetFractions set fractions +func (q *QuantilesDoublesSketchToQuantiles) SetFractions(fractions []float64) *QuantilesDoublesSketchToQuantiles { + q.Fractions = fractions + return q +} + +// SetField set QuantilesDoublesSketchField +func (q *QuantilesDoublesSketchToQuantiles) SetField(field *QuantilesDoublesSketchField) *QuantilesDoublesSketchToQuantiles { + q.Field = field + return q +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/postaggregation/quantiles_doubles_sketch_to_rank.go b/vendor/github.com/grafadruid/go-druid/builder/postaggregation/quantiles_doubles_sketch_to_rank.go new file mode 100644 index 000000000..601763282 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/postaggregation/quantiles_doubles_sketch_to_rank.go @@ -0,0 +1,34 @@ +package postaggregation + +// QuantilesDoublesSketchToRank struct based on +// PostAggregator section in https://druid.apache.org/docs/latest/development/extensions-core/datasketches-quantiles.html#rank +type QuantilesDoublesSketchToRank struct { + Base + Field *QuantilesDoublesSketchField `json:"field,omitempty"` + Value *float64 `json:"value,omitempty"` +} + +// NewQuantilesDoublesSketchToRank new instance of NewQuantilesDoublesSketchToRank +func NewQuantilesDoublesSketchToRank() *QuantilesDoublesSketchToRank { + q := &QuantilesDoublesSketchToRank{} + q.SetType("quantilesDoublesSketchToRank") + return q +} + +// SetName set name +func (q *QuantilesDoublesSketchToRank) SetName(name string) *QuantilesDoublesSketchToRank { + q.Base.SetName(name) + return q +} + +// SetValue set value +func (q *QuantilesDoublesSketchToRank) SetValue(value float64) *QuantilesDoublesSketchToRank { + q.Value = &value + return q +} + +// SetField set QuantilesDoublesSketchField +func (q *QuantilesDoublesSketchToRank) SetField(field *QuantilesDoublesSketchField) *QuantilesDoublesSketchToRank { + q.Field = field + return q +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/postaggregation/quantiles_doubles_sketch_to_string.go b/vendor/github.com/grafadruid/go-druid/builder/postaggregation/quantiles_doubles_sketch_to_string.go new file mode 100644 index 000000000..258340e1f --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/postaggregation/quantiles_doubles_sketch_to_string.go @@ -0,0 +1,27 @@ +package postaggregation + +// QuantilesDoublesSketchToString struct based on +// PostAggregator section in https://druid.apache.org/docs/latest/development/extensions-core/datasketches-quantiles.html#sketch-summary +type QuantilesDoublesSketchToString struct { + Base + Field *QuantilesDoublesSketchField `json:"field,omitempty"` +} + +// NewQuantilesDoublesSketchToString new instance of QuantilesDoublesSketchToString +func NewQuantilesDoublesSketchToString() *QuantilesDoublesSketchToString { + q := &QuantilesDoublesSketchToString{} + q.SetType("quantilesDoublesSketchToString") + return q +} + +// SetName set name +func (q *QuantilesDoublesSketchToString) SetName(name string) *QuantilesDoublesSketchToString { + q.Base.SetName(name) + return q +} + +// SetField set QuantilesDoublesSketchField +func (q *QuantilesDoublesSketchToString) SetField(field *QuantilesDoublesSketchField) *QuantilesDoublesSketchToString { + q.Field = field + return q +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/postaggregation/quantiles_from_tdigestsketch.go b/vendor/github.com/grafadruid/go-druid/builder/postaggregation/quantiles_from_tdigestsketch.go new file mode 100644 index 000000000..70c11923a --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/postaggregation/quantiles_from_tdigestsketch.go @@ -0,0 +1,58 @@ +package postaggregation + +// QuantilesFromTDigestSketch struct based on +// PostAggregator section in https://druid.apache.org/docs/latest/development/extensions-contrib/tdigestsketch-quantiles.html +type QuantilesFromTDigestSketch struct { + Base + Fractions []float64 `json:"fractions,omitempty"` + Field *QuantilesFromTDigestSketchField `json:"field,omitempty"` +} + +// QuantilesFromTDigestSketchField struct for Field in QuantilesFromTDigestSketch +type QuantilesFromTDigestSketchField struct { + Type string `json:"type,omitempty"` + FieldName string `json:"fieldName,omitempty"` +} + +// NewQuantilesFromTDigestSketch new instance of QuantilesFromTDigestSketch +func NewQuantilesFromTDigestSketch() *QuantilesFromTDigestSketch { + q := &QuantilesFromTDigestSketch{} + q.SetType("quantilesFromTDigestSketch") + return q +} + +// SetName set name +func (q *QuantilesFromTDigestSketch) SetName(name string) *QuantilesFromTDigestSketch { + q.Base.SetName(name) + return q +} + +// SetFractions set fractions +func (q *QuantilesFromTDigestSketch) SetFractions(fractions []float64) *QuantilesFromTDigestSketch { + q.Fractions = fractions + return q +} + +// SetField set QuantilesFromTDigestSketchField +func (q *QuantilesFromTDigestSketch) SetField(field *QuantilesFromTDigestSketchField) *QuantilesFromTDigestSketch { + q.Field = field + return q +} + +// NewQuantilesFromTDigestSketchField new instance of QuantilesFromTDigestSketchField +func NewQuantilesFromTDigestSketchField() *QuantilesFromTDigestSketchField { + qf := &QuantilesFromTDigestSketchField{} + return qf +} + +// SetType set type +func (qf *QuantilesFromTDigestSketchField) SetType(typ string) *QuantilesFromTDigestSketchField { + qf.Type = typ + return qf +} + +// SetFieldName set fieldName +func (qf *QuantilesFromTDigestSketchField) SetFieldName(fieldName string) *QuantilesFromTDigestSketchField { + qf.FieldName = fieldName + return qf +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/query/datasource_metadata.go b/vendor/github.com/grafadruid/go-druid/builder/query/datasource_metadata.go new file mode 100644 index 000000000..44cd3ad47 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/query/datasource_metadata.go @@ -0,0 +1,30 @@ +package query + +import ( + "github.com/grafadruid/go-druid/builder" +) + +type DataSourceMetadata struct { + Base +} + +func NewDataSourceMetadata() *DataSourceMetadata { + d := &DataSourceMetadata{} + d.SetQueryType("dataSourceMetadata") + return d +} + +func (d *DataSourceMetadata) SetDataSource(dataSource builder.DataSource) *DataSourceMetadata { + d.Base.SetDataSource(dataSource) + return d +} + +func (d *DataSourceMetadata) SetIntervals(intervals builder.Intervals) *DataSourceMetadata { + d.Base.SetIntervals(intervals) + return d +} + +func (d *DataSourceMetadata) SetContext(context map[string]interface{}) *DataSourceMetadata { + d.Base.SetContext(context) + return d +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/query/group_by.go b/vendor/github.com/grafadruid/go-druid/builder/query/group_by.go new file mode 100644 index 000000000..3d94e4e7a --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/query/group_by.go @@ -0,0 +1,182 @@ +package query + +import ( + "encoding/json" + "github.com/grafadruid/go-druid/builder" + "github.com/grafadruid/go-druid/builder/aggregation" + "github.com/grafadruid/go-druid/builder/dimension" + "github.com/grafadruid/go-druid/builder/filter" + "github.com/grafadruid/go-druid/builder/granularity" + "github.com/grafadruid/go-druid/builder/havingspec" + "github.com/grafadruid/go-druid/builder/limitspec" + "github.com/grafadruid/go-druid/builder/postaggregation" + "github.com/grafadruid/go-druid/builder/virtualcolumn" +) + +type GroupBy struct { + Base + Dimensions []builder.Dimension `json:"dimensions,omitempty"` + VirtualColumns []builder.VirtualColumn `json:"virtualColumns,omitempty"` + Filter builder.Filter `json:"filter,omitempty"` + Granularity builder.Granularity `json:"granularity,omitempty"` + Aggregations []builder.Aggregator `json:"aggregations,omitempty"` + PostAggregations []builder.PostAggregator `json:"postAggregations,omitempty"` + Having builder.HavingSpec `json:"having,omitempty"` + LimitSpec builder.LimitSpec `json:"limitSpec,omitempty"` + SubtotalsSpec [][]string `json:"subtotalsSpec,omitempty"` +} + +func NewGroupBy() *GroupBy { + g := &GroupBy{} + g.SetQueryType("groupBy") + return g +} + +func (g *GroupBy) SetDataSource(dataSource builder.DataSource) *GroupBy { + g.Base.SetDataSource(dataSource) + return g +} + +func (g *GroupBy) SetIntervals(intervals builder.Intervals) *GroupBy { + g.Base.SetIntervals(intervals) + return g +} + +func (g *GroupBy) SetContext(context map[string]interface{}) *GroupBy { + g.Base.SetContext(context) + return g +} + +func (g *GroupBy) SetDimensions(dimensions []builder.Dimension) *GroupBy { + g.Dimensions = dimensions + return g +} + +func (g *GroupBy) SetVirtualColumns(virtualColumns []builder.VirtualColumn) *GroupBy { + g.VirtualColumns = virtualColumns + return g +} + +func (g *GroupBy) SetFilter(filter builder.Filter) *GroupBy { + g.Filter = filter + return g +} + +func (g *GroupBy) SetGranularity(granularity builder.Granularity) *GroupBy { + g.Granularity = granularity + return g +} + +func (g *GroupBy) SetAggregations(aggregations []builder.Aggregator) *GroupBy { + g.Aggregations = aggregations + return g +} + +func (g *GroupBy) SetPostAggregations(postAggregations []builder.PostAggregator) *GroupBy { + g.PostAggregations = postAggregations + return g +} + +func (g *GroupBy) SetHaving(having builder.HavingSpec) *GroupBy { + g.Having = having + return g +} + +func (g *GroupBy) SetLimitSpec(limitSpec builder.LimitSpec) *GroupBy { + g.LimitSpec = limitSpec + return g +} + +func (g *GroupBy) SetSubtotalsSpec(subtotalsSpec [][]string) *GroupBy { + g.SubtotalsSpec = subtotalsSpec + return g +} + +func (g *GroupBy) UnmarshalJSON(data []byte) error { + var err error + var tmp struct { + Dimensions []json.RawMessage `json:"dimensions,omitempty"` + VirtualColumns []json.RawMessage `json:"virtualColumns,omitempty"` + Filter json.RawMessage `json:"filter,omitempty"` + Granularity json.RawMessage `json:"granularity,omitempty"` + Aggregations []json.RawMessage `json:"aggregations,omitempty"` + PostAggregations []json.RawMessage `json:"postAggregations,omitempty"` + Having json.RawMessage `json:"having,omitempty"` + LimitSpec json.RawMessage `json:"limitSpec,omitempty"` + SubtotalsSpec [][]string `json:"subtotalsSpec,omitempty"` + } + if err = json.Unmarshal(data, &tmp); err != nil { + return err + } + var d builder.Dimension + dd := make([]builder.Dimension, len(tmp.Dimensions)) + for i := range tmp.Dimensions { + if d, err = dimension.Load(tmp.Dimensions[i]); err != nil { + return err + } + dd[i] = d + } + var v builder.VirtualColumn + vv := make([]builder.VirtualColumn, len(tmp.VirtualColumns)) + for i := range tmp.VirtualColumns { + if v, err = virtualcolumn.Load(tmp.VirtualColumns[i]); err != nil { + return err + } + vv[i] = v + } + var f builder.Filter + if tmp.Filter != nil { + f, err = filter.Load(tmp.Filter) + if err != nil { + return err + } + } + gr, err := granularity.Load(tmp.Granularity) + if err != nil { + return err + } + var a builder.Aggregator + aa := make([]builder.Aggregator, len(tmp.Aggregations)) + for i := range tmp.Aggregations { + if a, err = aggregation.Load(tmp.Aggregations[i]); err != nil { + return err + } + aa[i] = a + } + var p builder.PostAggregator + pp := make([]builder.PostAggregator, len(tmp.PostAggregations)) + for i := range tmp.PostAggregations { + if p, err = postaggregation.Load(tmp.PostAggregations[i]); err != nil { + return err + } + pp[i] = p + } + var h builder.HavingSpec + if tmp.Having != nil { + h, err = havingspec.Load(tmp.Having) + if err != nil { + return err + } + } + var l builder.LimitSpec + if tmp.LimitSpec != nil { + l, err = limitspec.Load(tmp.LimitSpec) + if err != nil { + return err + } + } + if len(tmp.SubtotalsSpec) == 0 { + tmp.SubtotalsSpec = nil + } + err = g.Base.UnmarshalJSON(data) + g.Dimensions = dd + g.VirtualColumns = vv + g.Filter = f + g.Granularity = gr + g.Aggregations = aa + g.PostAggregations = pp + g.Having = h + g.LimitSpec = l + g.SubtotalsSpec = tmp.SubtotalsSpec + return err +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/query/query.go b/vendor/github.com/grafadruid/go-druid/builder/query/query.go new file mode 100644 index 000000000..5a70ae522 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/query/query.go @@ -0,0 +1,118 @@ +package query + +import ( + "encoding/json" + "errors" + + "github.com/grafadruid/go-druid/builder" + "github.com/grafadruid/go-druid/builder/datasource" + "github.com/grafadruid/go-druid/builder/intervals" +) + +type Base struct { + ID string `json:"ID,omitempty"` + QueryType builder.ComponentType `json:"queryType,omitempty"` + DataSource builder.DataSource `json:"dataSource,omitempty"` + Intervals builder.Intervals `json:"intervals,omitempty"` + Context map[string]interface{} `json:"context,omitempty"` +} + +func (b *Base) SetID(ID string) *Base { + b.ID = ID + return b +} + +func (b *Base) SetQueryType(queryType builder.ComponentType) *Base { + b.QueryType = queryType + return b +} + +func (b *Base) SetDataSource(dataSource builder.DataSource) *Base { + b.DataSource = dataSource + return b +} + +func (b *Base) SetIntervals(intervals builder.Intervals) *Base { + b.Intervals = intervals + return b +} + +func (b *Base) SetContext(context map[string]interface{}) *Base { + b.Context = context + return b +} + +func (b *Base) Type() builder.ComponentType { + return b.QueryType +} + +func (b *Base) UnmarshalJSON(data []byte) error { + var tmp struct { + ID string `json:"ID,omitempty"` + QueryType builder.ComponentType `json:"queryType,omitempty"` + DataSource json.RawMessage `json:"dataSource,omitempty"` + Intervals json.RawMessage `json:"intervals,omitempty"` + Context map[string]interface{} `json:"context,omitempty"` + } + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + if b.Type() != "sql" { + d, err := datasource.Load(tmp.DataSource) + if err != nil { + return err + } + if d.Type() == "query" { + d.(*datasource.Query).UnmarshalJSONWithQueryLoader(tmp.DataSource, Load) + } + b.DataSource = d + var i builder.Intervals + if tmp.Intervals != nil { + i, err = intervals.Load(tmp.Intervals) + if err != nil { + return err + } + } + b.Intervals = i + } + b.ID = tmp.ID + b.QueryType = tmp.QueryType + b.Context = tmp.Context + return nil +} + +func Load(data []byte) (builder.Query, error) { + var q builder.Query + if string(data) == "null" { + return q, nil + } + var t struct { + Typ builder.ComponentType `json:"queryType,omitempty"` + } + if err := json.Unmarshal(data, &t); err != nil { + return nil, err + } + switch t.Typ { + case "dataSourceMetadata": + q = NewDataSourceMetadata() + case "groupBy": + q = NewGroupBy() + case "scan": + q = NewScan() + case "search": + q = NewSearch() + case "segmentMetadata": + q = NewSegmentMetadata() + case "sql": + q = NewSQL() + case "timeBoundary": + q = NewTimeBoundary() + case "timeseries": + q = NewTimeseries() + case "topN": + q = NewTopN() + default: + return nil, errors.New("unsupported query type") + } + return q, json.Unmarshal(data, &q) +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/query/scan.go b/vendor/github.com/grafadruid/go-druid/builder/query/scan.go new file mode 100644 index 000000000..2c4b452b2 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/query/scan.go @@ -0,0 +1,168 @@ +package query + +import ( + "encoding/json" + + "github.com/grafadruid/go-druid/builder" + "github.com/grafadruid/go-druid/builder/filter" + "github.com/grafadruid/go-druid/builder/virtualcolumn" +) + +type Order string + +const ( + Ascending Order = "ASCENDING" + Descending = "DESCENDING" + None = "NONE" +) + +// Scan query returns raw Apache Druid rows in streaming mode. +// https://druid.apache.org/docs/latest/querying/scan-query.html +type Scan struct { + Base + VirtualColumns []builder.VirtualColumn `json:"virtualColumns,omitempty"` + ResultFormat string `json:"resultFormat,omitempty"` + BatchSize int64 `json:"batchSize,omitempty"` + Limit int64 `json:"limit,omitempty"` + Offset int64 `json:"offset,omitempty"` + Order Order `json:"order,omitempty"` + Filter builder.Filter `json:"filter,omitempty"` + Columns []string `json:"columns,omitempty"` + Legacy *bool `json:"legacy,omitempty"` +} + +// NewScan returns *builder.Scan which can be used to build a scan query. +// Eg, +// table := datasource.NewTable().SetName("table-name") +// +// now := time.Now() +// i := intervals.NewInterval().SetInterval(now.Add(-60*time.Minute), now) +// is := intervals.NewIntervals().SetIntervals([]*intervals.Interval{i}) +// +// filter1 := filter.NewSelector().SetDimension("key1").SetValue("val1") +// filter2 := filter.NewSelector().SetDimension("key2").SetValue("val2") +// filters := filter.NewAnd().SetFields([]builder.Filter{filter1, filter2}) +// +// ts := query.NewScan().SetDataSource(table).SetIntervals(is).SetFilter(filters).SetResultFormat("compactedList").SetLimit(10) +func NewScan() *Scan { + s := &Scan{} + s.Base.SetQueryType("scan") + return s +} + +// SetDataSource sets data source. +func (s *Scan) SetDataSource(dataSource builder.DataSource) *Scan { + s.Base.SetDataSource(dataSource) + return s +} + +// SetIntervals set the intervals. +func (s *Scan) SetIntervals(intervals builder.Intervals) *Scan { + s.Base.SetIntervals(intervals) + return s +} + +// SetContext sets the context. +func (s *Scan) SetContext(context map[string]interface{}) *Scan { + s.Base.SetContext(context) + return s +} + +// SetVirtualColumns sets virtual columns. +func (s *Scan) SetVirtualColumns(virtualColumns []builder.VirtualColumn) *Scan { + s.VirtualColumns = virtualColumns + return s +} + +// SetResultFormat sets the result format. +func (s *Scan) SetResultFormat(resultFormat string) *Scan { + s.ResultFormat = resultFormat + return s +} + +// SetBatchSize sets the batch size. +func (s *Scan) SetBatchSize(batchSize int64) *Scan { + s.BatchSize = batchSize + return s +} + +// SetLimit sets the limit. +func (s *Scan) SetLimit(limit int64) *Scan { + s.Limit = limit + return s +} + +// SetOffset sets the offset. +func (s *Scan) SetOffset(offset int64) *Scan { + s.Offset = offset + return s +} + +// SetOrder sets the order. +func (s *Scan) SetOrder(order Order) *Scan { + s.Order = order + return s +} + +// SetFilter sets the filter. +func (s *Scan) SetFilter(filter builder.Filter) *Scan { + s.Filter = filter + return s +} + +// SetColumns set columns. +func (s *Scan) SetColumns(columns []string) *Scan { + s.Columns = columns + return s +} + +// SetLegacy sets the `druid.query.scan.legacy` field. +func (s *Scan) SetLegacy(legacy bool) *Scan { + s.Legacy = &legacy + return s +} + +// UnmarshalJSON unmarshalls a druid scan native query json string into builder type. +func (s *Scan) UnmarshalJSON(data []byte) error { + var err error + var tmp struct { + VirtualColumns []json.RawMessage `json:"virtualColumns,omitempty"` + ResultFormat string `json:"resultFormat,omitempty"` + BatchSize int64 `json:"batchSize,omitempty"` + Limit int64 `json:"limit,omitempty"` + Offset int64 `json:"offset,omitempty"` + Order Order `json:"order,omitempty"` + Filter json.RawMessage `json:"filter,omitempty"` + Columns []string `json:"columns,omitempty"` + Legacy *bool `json:"legacy,omitempty"` + } + if err = json.Unmarshal(data, &tmp); err != nil { + return err + } + var v builder.VirtualColumn + vv := make([]builder.VirtualColumn, len(tmp.VirtualColumns)) + for i := range tmp.VirtualColumns { + if v, err = virtualcolumn.Load(tmp.VirtualColumns[i]); err != nil { + return err + } + vv[i] = v + } + var f builder.Filter + if tmp.Filter != nil { + f, err = filter.Load(tmp.Filter) + if err != nil { + return err + } + } + err = s.Base.UnmarshalJSON(data) + s.VirtualColumns = vv + s.ResultFormat = tmp.ResultFormat + s.BatchSize = tmp.BatchSize + s.Limit = tmp.Limit + s.Offset = tmp.Offset + s.Order = tmp.Order + s.Filter = f + s.Columns = tmp.Columns + s.Legacy = tmp.Legacy + return err +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/query/search.go b/vendor/github.com/grafadruid/go-druid/builder/query/search.go new file mode 100644 index 000000000..405ff3741 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/query/search.go @@ -0,0 +1,124 @@ +package query + +import ( + "encoding/json" + "github.com/grafadruid/go-druid/builder" + "github.com/grafadruid/go-druid/builder/dimension" + "github.com/grafadruid/go-druid/builder/filter" + "github.com/grafadruid/go-druid/builder/granularity" + "github.com/grafadruid/go-druid/builder/searchqueryspec" + "github.com/grafadruid/go-druid/builder/types" +) + +type SearchSortSpec struct { + Type types.StringComparator `json:"type,omitempty"` +} + +type Search struct { + Base + Filter builder.Filter `json:"filter,omitempty"` + Granularity builder.Granularity `json:"granularity,omitempty"` + Limit int64 `json:"limit,omitempty"` + SearchDimensions []builder.Dimension `json:"searchDimensions,omitempty"` + Query builder.SearchQuerySpec `json:"query,omitempty"` + Sort *SearchSortSpec `json:"sort,omitempty"` +} + +func NewSearch() *Search { + s := &Search{} + s.SetQueryType("search") + return s +} + +func (s *Search) SetDataSource(dataSource builder.DataSource) *Search { + s.Base.SetDataSource(dataSource) + return s +} + +func (s *Search) SetIntervals(intervals builder.Intervals) *Search { + s.Base.SetIntervals(intervals) + return s +} + +func (s *Search) SetContext(context map[string]interface{}) *Search { + s.Base.SetContext(context) + return s +} + +func (s *Search) SetFilter(filter builder.Filter) *Search { + s.Filter = filter + return s +} + +func (s *Search) SetGranularity(granularity builder.Granularity) *Search { + s.Granularity = granularity + return s +} + +func (s *Search) SetLimit(limit int64) *Search { + s.Limit = limit + return s +} + +func (s *Search) SetSearchDimensions(searchDimensions []builder.Dimension) *Search { + s.SearchDimensions = searchDimensions + return s +} + +func (s *Search) SetQuery(q builder.SearchQuerySpec) *Search { + s.Query = q + return s +} + +func (s *Search) SetSort(sort *SearchSortSpec) *Search { + s.Sort = sort + return s +} + +func (s *Search) UnmarshalJSON(data []byte) error { + var err error + var tmp struct { + Filter json.RawMessage `json:"filter,omitempty"` + Granularity json.RawMessage `json:"granularity,omitempty"` + Limit int64 `json:"limit,omitempty"` + SearchDimensions []json.RawMessage `json:"searchDimensions,omitempty"` + Query json.RawMessage `json:"query,omitempty"` + Sort *SearchSortSpec `json:"sort,omitempty"` + } + if err = json.Unmarshal(data, &tmp); err != nil { + return err + } + var f builder.Filter + if tmp.Filter != nil { + f, err = filter.Load(tmp.Filter) + if err != nil { + return err + } + } + var gr builder.Granularity + if tmp.Granularity != nil { + gr, err = granularity.Load(tmp.Granularity) + if err != nil { + return err + } + } + var se builder.Dimension + ss := make([]builder.Dimension, len(tmp.SearchDimensions)) + for i := range tmp.SearchDimensions { + if se, err = dimension.Load(tmp.SearchDimensions[i]); err != nil { + return err + } + ss[i] = se + } + q, err := searchqueryspec.Load(tmp.Query) + if err != nil { + return err + } + err = s.Base.UnmarshalJSON(data) + s.Filter = f + s.Granularity = gr + s.SearchDimensions = ss + s.Query = q + s.Sort = tmp.Sort + return err +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/query/segment_metadata.go b/vendor/github.com/grafadruid/go-druid/builder/query/segment_metadata.go new file mode 100644 index 000000000..2cef71803 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/query/segment_metadata.go @@ -0,0 +1,104 @@ +package query + +import ( + "encoding/json" + + "github.com/grafadruid/go-druid/builder" + "github.com/grafadruid/go-druid/builder/toinclude" +) + +type AnalysisType string + +const ( + Cardinality AnalysisType = "CARDINALITY" + Size = "SIZE" + Interval = "INTERVAL" + Aggregators = "AGGREGATORS" + MinMax = "MINMAX" + TimestampSpec = "TIMESTAMPSPEC" + QueryGranularity = "QUERYGRANULARITY" + Rollup = "ROLLUP" +) + +type SegmentMetadata struct { + Base + ToInclude builder.ToInclude `json:"toInclude,omitempty"` + Merge *bool `json:"merge,omitempty"` + AnalysisTypes []AnalysisType `json:"analysisTypes,omitempty"` + UsingDefaultInterval *bool `json:"usingDefaultInterval,omitempty"` + LenientAggregatorMerge *bool `json:"lenientAggregatorMerge,omitempty"` +} + +func NewSegmentMetadata() *SegmentMetadata { + s := &SegmentMetadata{} + s.SetQueryType("segmentMetadata") + return s +} + +func (s *SegmentMetadata) SetDataSource(dataSource builder.DataSource) *SegmentMetadata { + s.Base.SetDataSource(dataSource) + return s +} + +func (s *SegmentMetadata) SetIntervals(intervals builder.Intervals) *SegmentMetadata { + s.Base.SetIntervals(intervals) + return s +} + +func (s *SegmentMetadata) SetContext(context map[string]interface{}) *SegmentMetadata { + s.Base.SetContext(context) + return s +} + +func (s *SegmentMetadata) SetToInclude(toInclude builder.ToInclude) *SegmentMetadata { + s.ToInclude = toInclude + return s +} + +func (s *SegmentMetadata) SetMerge(merge bool) *SegmentMetadata { + s.Merge = &merge + return s +} + +func (s *SegmentMetadata) SetAnalysisTypes(analysisTypes []AnalysisType) *SegmentMetadata { + s.AnalysisTypes = analysisTypes + return s +} + +func (s *SegmentMetadata) SetUsingDefaultInterval(usingDefaultInterval bool) *SegmentMetadata { + s.UsingDefaultInterval = &usingDefaultInterval + return s +} + +func (s *SegmentMetadata) SetLenientAggregatorMerge(lenientAggregatorMerge bool) *SegmentMetadata { + s.LenientAggregatorMerge = &lenientAggregatorMerge + return s +} + +func (s *SegmentMetadata) UnmarshalJSON(data []byte) error { + var err error + var tmp struct { + ToInclude json.RawMessage `json:"toInclude,omitempty"` + Merge *bool `json:"merge,omitempty"` + AnalysisTypes []AnalysisType `json:"analysisTypes,omitempty"` + UsingDefaultInterval *bool `json:"usingDefaultInterval,omitempty"` + LenientAggregatorMerge *bool `json:"lenientAggregatorMerge,omitempty"` + } + if err = json.Unmarshal(data, &tmp); err != nil { + return err + } + var t builder.ToInclude + if tmp.ToInclude != nil { + t, err = toinclude.Load(tmp.ToInclude) + if err != nil { + return err + } + } + err = s.Base.UnmarshalJSON(data) + s.ToInclude = t + s.Merge = tmp.Merge + s.AnalysisTypes = tmp.AnalysisTypes + s.UsingDefaultInterval = tmp.UsingDefaultInterval + s.LenientAggregatorMerge = tmp.LenientAggregatorMerge + return err +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/query/sql.go b/vendor/github.com/grafadruid/go-druid/builder/query/sql.go new file mode 100644 index 000000000..7c652934c --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/query/sql.go @@ -0,0 +1,68 @@ +package query + +import ( + "encoding/json" +) + +type SQL struct { + Base + Query string `json:"query,omitempty"` + ResultFormat string `json:"resultFormat,omitempty"` + Header *bool `json:"header,omitempty"` + Parameters []SQLParameter `json:"parameters,omitempty"` +} + +type SQLParameter struct { + Type string `json:"type,omitempty"` + Value string `json:"value,omitempty"` +} + +func NewSQL() *SQL { + s := &SQL{} + s.Base.SetQueryType("sql") + return s +} + +func NewSQLParameter() *SQLParameter { + p := &SQLParameter{} + return p +} + +func (s *SQL) SetQuery(query string) *SQL { + s.Query = query + return s +} + +func (s *SQL) SetResultFormat(resultFormat string) *SQL { + s.ResultFormat = resultFormat + return s +} + +func (s *SQL) SetHeader(header bool) *SQL { + s.Header = &header + return s +} + +func (s *SQL) SetParameters(parameters []SQLParameter) *SQL { + s.Parameters = parameters + return s +} + +func (s *SQL) UnmarshalJSON(data []byte) error { + var err error + var tmp struct { + Query string `json:"query,omitempty"` + ResultFormat string `json:"resultFormat,omitempty"` + Header *bool `json:"header,omitempty"` + Parameters []SQLParameter `json:"parameters,omitempty"` + } + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + err = s.Base.UnmarshalJSON(data) + s.Query = tmp.Query + s.ResultFormat = tmp.ResultFormat + s.Header = tmp.Header + s.Parameters = tmp.Parameters + return err +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/query/time_boundary.go b/vendor/github.com/grafadruid/go-druid/builder/query/time_boundary.go new file mode 100644 index 000000000..2ac2604b4 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/query/time_boundary.go @@ -0,0 +1,66 @@ +package query + +import ( + "encoding/json" + "github.com/grafadruid/go-druid/builder" + "github.com/grafadruid/go-druid/builder/filter" +) + +type TimeBoundary struct { + Base + Bound string `json:"bound,omitempty"` + Filter builder.Filter `json:"filter,omitempty"` +} + +func NewTimeBoundary() *TimeBoundary { + t := &TimeBoundary{} + t.SetQueryType("timeBoundary") + return t +} + +func (t *TimeBoundary) SetDataSource(dataSource builder.DataSource) *TimeBoundary { + t.Base.SetDataSource(dataSource) + return t +} + +func (t *TimeBoundary) SetIntervals(intervals builder.Intervals) *TimeBoundary { + t.Base.SetIntervals(intervals) + return t +} + +func (t *TimeBoundary) SetContext(context map[string]interface{}) *TimeBoundary { + t.Base.SetContext(context) + return t +} + +func (t *TimeBoundary) SetBound(bound string) *TimeBoundary { + t.Bound = bound + return t +} + +func (t *TimeBoundary) SetFilter(filter builder.Filter) *TimeBoundary { + t.Filter = filter + return t +} + +func (t *TimeBoundary) UnmarshalJSON(data []byte) error { + var err error + var tmp struct { + Bound string `json:"bound,omitempty"` + Filter json.RawMessage `json:"filter,omitempty"` + } + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + var f builder.Filter + if tmp.Filter != nil { + f, err = filter.Load(tmp.Filter) + if err != nil { + return err + } + } + err = t.Base.UnmarshalJSON(data) + t.Bound = tmp.Bound + t.Filter = f + return err +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/query/timeseries.go b/vendor/github.com/grafadruid/go-druid/builder/query/timeseries.go new file mode 100644 index 000000000..49d72965c --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/query/timeseries.go @@ -0,0 +1,141 @@ +package query + +import ( + "encoding/json" + "errors" + + "github.com/grafadruid/go-druid/builder" + "github.com/grafadruid/go-druid/builder/aggregation" + "github.com/grafadruid/go-druid/builder/filter" + "github.com/grafadruid/go-druid/builder/granularity" + "github.com/grafadruid/go-druid/builder/postaggregation" + "github.com/grafadruid/go-druid/builder/virtualcolumn" +) + +type Timeseries struct { + Base + Descending *bool `json:"descending,omitempty"` + VirtualColumns []builder.VirtualColumn `json:"virtualColumns,omitempty"` + Filter builder.Filter `json:"filter,omitempty"` + Granularity builder.Granularity `json:"granularity,omitempty"` + Aggregations []builder.Aggregator `json:"aggregations,omitempty"` + PostAggregations []builder.PostAggregator `json:"postAggregations,omitempty"` + Limit int64 `json:"limit,omitempty"` +} + +func NewTimeseries() *Timeseries { + t := &Timeseries{} + t.SetQueryType("timeseries") + return t +} + +func (t *Timeseries) SetDataSource(dataSource builder.DataSource) *Timeseries { + t.Base.SetDataSource(dataSource) + return t +} + +func (t *Timeseries) SetIntervals(intervals builder.Intervals) *Timeseries { + t.Base.SetIntervals(intervals) + return t +} + +func (t *Timeseries) SetContext(context map[string]interface{}) *Timeseries { + t.Base.SetContext(context) + return t +} + +func (t *Timeseries) SetDescending(descending bool) *Timeseries { + t.Descending = &descending + return t +} + +func (t *Timeseries) SetVirtualColumns(virtualColumns []builder.VirtualColumn) *Timeseries { + t.VirtualColumns = virtualColumns + return t +} + +func (t *Timeseries) SetFilter(filter builder.Filter) *Timeseries { + t.Filter = filter + return t +} + +func (t *Timeseries) SetGranularity(granularity builder.Granularity) *Timeseries { + t.Granularity = granularity + return t +} + +func (t *Timeseries) SetAggregations(aggregations []builder.Aggregator) *Timeseries { + t.Aggregations = aggregations + return t +} + +func (t *Timeseries) SetPostAggregations(postAggregations []builder.PostAggregator) *Timeseries { + t.PostAggregations = postAggregations + return t +} + +func (t *Timeseries) SetLimit(limit int64) *Timeseries { + t.Limit = limit + return t +} + +func (t *Timeseries) UnmarshalJSON(data []byte) error { + var err error + var tmp struct { + Descending *bool `json:"descending,omitempty"` + VirtualColumns []json.RawMessage `json:"virtualColumns,omitempty"` + Filter json.RawMessage `json:"filter,omitempty"` + Granularity json.RawMessage `json:"granularity,omitempty"` + Aggregations []json.RawMessage `json:"aggregations,omitempty"` + PostAggregations []json.RawMessage `json:"postAggregations,omitempty"` + Limit int64 `json:"limit,omitempty"` + } + if err = json.Unmarshal(data, &tmp); err != nil { + return err + } + var v builder.VirtualColumn + vv := make([]builder.VirtualColumn, len(tmp.VirtualColumns)) + for i := range tmp.VirtualColumns { + if v, err = virtualcolumn.Load(tmp.VirtualColumns[i]); err != nil { + err = errors.New("2") + return err + } + vv[i] = v + } + var f builder.Filter + if tmp.Filter != nil { + f, err = filter.Load(tmp.Filter) + if err != nil { + return err + } + } + gr, err := granularity.Load(tmp.Granularity) + if err != nil { + return err + } + var a builder.Aggregator + aa := make([]builder.Aggregator, len(tmp.Aggregations)) + for i := range tmp.Aggregations { + if a, err = aggregation.Load(tmp.Aggregations[i]); err != nil { + return err + } + aa[i] = a + } + var p builder.PostAggregator + pp := make([]builder.PostAggregator, len(tmp.PostAggregations)) + for i := range tmp.PostAggregations { + if p, err = postaggregation.Load(tmp.PostAggregations[i]); err != nil { + return err + } + pp[i] = p + } + err = t.Base.UnmarshalJSON(data) + t.Descending = tmp.Descending + t.VirtualColumns = vv + t.Filter = f + t.Granularity = gr + t.Aggregations = aa + t.PostAggregations = pp + t.Limit = tmp.Limit + return err +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/query/top_n.go b/vendor/github.com/grafadruid/go-druid/builder/query/top_n.go new file mode 100644 index 000000000..01e1fc89b --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/query/top_n.go @@ -0,0 +1,156 @@ +package query + +import ( + "encoding/json" + "github.com/grafadruid/go-druid/builder" + "github.com/grafadruid/go-druid/builder/aggregation" + "github.com/grafadruid/go-druid/builder/dimension" + "github.com/grafadruid/go-druid/builder/filter" + "github.com/grafadruid/go-druid/builder/granularity" + "github.com/grafadruid/go-druid/builder/postaggregation" + "github.com/grafadruid/go-druid/builder/topnmetric" + "github.com/grafadruid/go-druid/builder/virtualcolumn" +) + +type TopN struct { + Base + VirtualColumns []builder.VirtualColumn `json:"virtualColumns,omitempty"` + Dimension builder.Dimension `json:"dimension,omitempty"` + Metric builder.TopNMetric `json:"metric,omitempty"` + Threshold int64 `json:"threshold,omitempty"` + Filter builder.Filter `json:"filter,omitempty"` + Granularity builder.Granularity `json:"granularity,omitempty"` + Aggregations []builder.Aggregator `json:"aggregations,omitempty"` + PostAggregations []builder.PostAggregator `json:"postAggregations,omitempty"` +} + +func NewTopN() *TopN { + t := &TopN{} + t.SetQueryType("topN") + return t +} + +func (t *TopN) SetDataSource(dataSource builder.DataSource) *TopN { + t.Base.SetDataSource(dataSource) + return t +} + +func (t *TopN) SetIntervals(intervals builder.Intervals) *TopN { + t.Base.SetIntervals(intervals) + return t +} + +func (t *TopN) SetContext(context map[string]interface{}) *TopN { + t.Base.SetContext(context) + return t +} + +func (t *TopN) SetVirtualColumns(virtualColumns []builder.VirtualColumn) *TopN { + t.VirtualColumns = virtualColumns + return t +} + +func (t *TopN) SetDimension(dimension builder.Dimension) *TopN { + t.Dimension = dimension + return t +} + +func (t *TopN) SetMetric(metric builder.TopNMetric) *TopN { + t.Metric = metric + return t +} + +func (t *TopN) SetThreshold(threshold int64) *TopN { + t.Threshold = threshold + return t +} + +func (t *TopN) SetFilter(filter builder.Filter) *TopN { + t.Filter = filter + return t +} + +func (t *TopN) SetGranularity(granularity builder.Granularity) *TopN { + t.Granularity = granularity + return t +} + +func (t *TopN) SetAggregations(aggregations []builder.Aggregator) *TopN { + t.Aggregations = aggregations + return t +} + +func (t *TopN) SetPostAggregations(postAggregations []builder.PostAggregator) *TopN { + t.PostAggregations = postAggregations + return t +} + +func (t *TopN) UnmarshalJSON(data []byte) error { + var err error + var tmp struct { + VirtualColumns []json.RawMessage `json:"virtualColumns,omitempty"` + Dimension json.RawMessage `json:"dimension,omitempty"` + Metric json.RawMessage `json:"metric,omitempty"` + Threshold int64 `json:"threshold,omitempty"` + Filter json.RawMessage `json:"filter,omitempty"` + Granularity json.RawMessage `json:"granularity,omitempty"` + Aggregations []json.RawMessage `json:"aggregations,omitempty"` + PostAggregations []json.RawMessage `json:"postAggregations,omitempty"` + } + if err = json.Unmarshal(data, &tmp); err != nil { + return err + } + var v builder.VirtualColumn + vv := make([]builder.VirtualColumn, len(tmp.VirtualColumns)) + for i := range tmp.VirtualColumns { + if v, err = virtualcolumn.Load(tmp.VirtualColumns[i]); err != nil { + return err + } + vv[i] = v + } + d, err := dimension.Load(tmp.Dimension) + if err != nil { + return err + } + m, err := topnmetric.Load(tmp.Metric) + if err != nil { + return err + } + var f builder.Filter + if tmp.Filter != nil { + f, err = filter.Load(tmp.Filter) + if err != nil { + return err + } + } + gr, err := granularity.Load(tmp.Granularity) + if err != nil { + return err + } + var a builder.Aggregator + aa := make([]builder.Aggregator, len(tmp.Aggregations)) + for i := range tmp.Aggregations { + if a, err = aggregation.Load(tmp.Aggregations[i]); err != nil { + return err + } + aa[i] = a + } + var p builder.PostAggregator + pp := make([]builder.PostAggregator, len(tmp.PostAggregations)) + for i := range tmp.PostAggregations { + if p, err = postaggregation.Load(tmp.PostAggregations[i]); err != nil { + return err + } + pp[i] = p + } + err = t.Base.UnmarshalJSON(data) + t.VirtualColumns = vv + t.Dimension = d + t.Metric = m + t.Threshold = tmp.Threshold + t.Filter = f + t.Granularity = gr + t.Aggregations = aa + t.PostAggregations = pp + return err +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/searchqueryspec/all.go b/vendor/github.com/grafadruid/go-druid/builder/searchqueryspec/all.go new file mode 100644 index 000000000..7f1feea34 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/searchqueryspec/all.go @@ -0,0 +1,11 @@ +package searchqueryspec + +type All struct { + Base +} + +func NewAll() *All { + a := &All{} + a.SetType("all") + return a +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/searchqueryspec/contains.go b/vendor/github.com/grafadruid/go-druid/builder/searchqueryspec/contains.go new file mode 100644 index 000000000..b322f59eb --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/searchqueryspec/contains.go @@ -0,0 +1,23 @@ +package searchqueryspec + +type Contains struct { + Base + Value string `json:"value,omitempty"` + CaseSensitive *bool `json:"caseSensitive,omitempty"` +} + +func NewContains() *Contains { + c := &Contains{} + c.SetType("contains") + return c +} + +func (c *Contains) SetValue(value string) *Contains { + c.Value = value + return c +} + +func (c *Contains) SetCaseSensitive(caseSensitive bool) *Contains { + c.CaseSensitive = &caseSensitive + return c +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/searchqueryspec/fragment.go b/vendor/github.com/grafadruid/go-druid/builder/searchqueryspec/fragment.go new file mode 100644 index 000000000..2b5e9f2d7 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/searchqueryspec/fragment.go @@ -0,0 +1,23 @@ +package searchqueryspec + +type Fragment struct { + Base + Value string `json:"value,omitempty"` + CaseSensitive *bool `json:"caseSensitive,omitempty"` +} + +func NewFragment() *Fragment { + f := &Fragment{} + f.SetType("fragment") + return f +} + +func (f *Fragment) SetValue(value string) *Fragment { + f.Value = value + return f +} + +func (f *Fragment) SetCaseSensitive(caseSensitive bool) *Fragment { + f.CaseSensitive = &caseSensitive + return f +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/searchqueryspec/insensitive_contains.go b/vendor/github.com/grafadruid/go-druid/builder/searchqueryspec/insensitive_contains.go new file mode 100644 index 000000000..8cef59f00 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/searchqueryspec/insensitive_contains.go @@ -0,0 +1,17 @@ +package searchqueryspec + +type InsensitiveContains struct { + Base + Value string `json:"value,omitempty"` +} + +func NewInsensitiveContains() *InsensitiveContains { + i := &InsensitiveContains{} + i.SetType("insensitiveContains") + return i +} + +func (i *InsensitiveContains) SetValue(value string) *InsensitiveContains { + i.Value = value + return i +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/searchqueryspec/regex.go b/vendor/github.com/grafadruid/go-druid/builder/searchqueryspec/regex.go new file mode 100644 index 000000000..936d2cb87 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/searchqueryspec/regex.go @@ -0,0 +1,17 @@ +package searchqueryspec + +type Regex struct { + Base + Pattern string `json:"pattern,omitempty"` +} + +func NewRegex() *Regex { + r := &Regex{} + r.SetType("regex") + return r +} + +func (r *Regex) SetPattern(pattern string) *Regex { + r.Pattern = pattern + return r +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/searchqueryspec/search_query_spec.go b/vendor/github.com/grafadruid/go-druid/builder/searchqueryspec/search_query_spec.go new file mode 100644 index 000000000..55c25d0fe --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/searchqueryspec/search_query_spec.go @@ -0,0 +1,49 @@ +package searchqueryspec + +import ( + "encoding/json" + "errors" + + "github.com/grafadruid/go-druid/builder" +) + +type Base struct { + Typ builder.ComponentType `json:"type,omitempty"` +} + +func (b *Base) SetType(typ builder.ComponentType) *Base { + b.Typ = typ + return b +} + +func (b *Base) Type() builder.ComponentType { + return b.Typ +} + +func Load(data []byte) (builder.SearchQuerySpec, error) { + var s builder.SearchQuerySpec + if string(data) == "null" { + return s, nil + } + var t struct { + Typ builder.ComponentType `json:"type,omitempty"` + } + if err := json.Unmarshal(data, &t); err != nil { + return nil, err + } + switch t.Typ { + case "all": + s = NewAll() + case "contains": + s = NewContains() + case "fragment": + s = NewFragment() + case "insensitiveContains": + s = NewInsensitiveContains() + case "regex": + s = NewRegex() + default: + return nil, errors.New("unsupported searchqueryspec type") + } + return s, json.Unmarshal(data, &s) +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/toinclude/all.go b/vendor/github.com/grafadruid/go-druid/builder/toinclude/all.go new file mode 100644 index 000000000..93d0a79e5 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/toinclude/all.go @@ -0,0 +1,7 @@ +package toinclude + +func NewAll() *Base { + a := &Base{} + a.SetType("all") + return a +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/toinclude/list.go b/vendor/github.com/grafadruid/go-druid/builder/toinclude/list.go new file mode 100644 index 000000000..1bedf5c00 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/toinclude/list.go @@ -0,0 +1,17 @@ +package toinclude + +type List struct { + Base + Columns []string `json:"columns,omitempty"` +} + +func NewList() *List { + l := &List{} + l.SetType("list") + return l +} + +func (l *List) SetColumns(columns []string) *List { + l.Columns = columns + return l +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/toinclude/none.go b/vendor/github.com/grafadruid/go-druid/builder/toinclude/none.go new file mode 100644 index 000000000..82d69cf98 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/toinclude/none.go @@ -0,0 +1,7 @@ +package toinclude + +func NewNone() *Base { + n := &Base{} + n.SetType("none") + return n +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/toinclude/to_include.go b/vendor/github.com/grafadruid/go-druid/builder/toinclude/to_include.go new file mode 100644 index 000000000..89401fc46 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/toinclude/to_include.go @@ -0,0 +1,45 @@ +package toinclude + +import ( + "encoding/json" + "errors" + + "github.com/grafadruid/go-druid/builder" +) + +type Base struct { + Typ builder.ComponentType `json:"type,omitempty"` +} + +func (b *Base) SetType(typ builder.ComponentType) *Base { + b.Typ = typ + return b +} + +func (b *Base) Type() builder.ComponentType { + return b.Typ +} + +func Load(data []byte) (builder.ToInclude, error) { + var ti builder.ToInclude + if string(data) == "null" { + return ti, nil + } + var t struct { + Typ builder.ComponentType `json:"type,omitempty"` + } + if err := json.Unmarshal(data, &t); err != nil { + return nil, err + } + switch t.Typ { + case "all": + ti = NewAll() + case "list": + ti = NewList() + case "none": + ti = NewNone() + default: + return nil, errors.New("unsupported toinclude type") + } + return ti, json.Unmarshal(data, &ti) +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/topnmetric/alpha_numeric.go b/vendor/github.com/grafadruid/go-druid/builder/topnmetric/alpha_numeric.go new file mode 100644 index 000000000..3e61bf30c --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/topnmetric/alpha_numeric.go @@ -0,0 +1,17 @@ +package topnmetric + +type AlphaNumeric struct { + Base + PreviousStop string `json:"previousStop,omitempty"` +} + +func NewAlphaNumeric() *AlphaNumeric { + a := &AlphaNumeric{} + a.SetType("alphaNumeric") + return a +} + +func (a *AlphaNumeric) SetPreviousStop(previousStop string) *AlphaNumeric { + a.PreviousStop = previousStop + return a +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/topnmetric/dimension.go b/vendor/github.com/grafadruid/go-druid/builder/topnmetric/dimension.go new file mode 100644 index 000000000..2dfc2c3ba --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/topnmetric/dimension.go @@ -0,0 +1,25 @@ +package topnmetric + +import "github.com/grafadruid/go-druid/builder/types" + +type Dimension struct { + Base + PreviousStop string `json:"previousStop,omitempty"` + Ordering types.StringComparator `json:"ordering,omitempty"` +} + +func NewDimension() *Dimension { + d := &Dimension{} + d.SetType("dimension") + return d +} + +func (d *Dimension) SetPreviousStop(previousStop string) *Dimension { + d.PreviousStop = previousStop + return d +} + +func (d *Dimension) SetOrdering(ordering types.StringComparator) *Dimension { + d.Ordering = ordering + return d +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/topnmetric/inverted.go b/vendor/github.com/grafadruid/go-druid/builder/topnmetric/inverted.go new file mode 100644 index 000000000..27acf9d72 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/topnmetric/inverted.go @@ -0,0 +1,40 @@ +package topnmetric + +import ( + "encoding/json" + + "github.com/grafadruid/go-druid/builder" +) + +type Inverted struct { + Base + Metric builder.TopNMetric `json:"metric,omitempty"` +} + +func NewInverted() *Inverted { + i := &Inverted{} + i.SetType("inverted") + return i +} + +func (i *Inverted) SetMetric(metric builder.TopNMetric) *Inverted { + i.Metric = metric + return i +} + +func (i *Inverted) UnmarshalJSON(data []byte) error { + var tmp struct { + Base + Metric json.RawMessage `json:"metric,omitempty"` + } + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + m, err := Load(tmp.Metric) + if err != nil { + return err + } + i.Base = tmp.Base + i.Metric = m + return nil +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/topnmetric/lexicographic.go b/vendor/github.com/grafadruid/go-druid/builder/topnmetric/lexicographic.go new file mode 100644 index 000000000..fc17efdcf --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/topnmetric/lexicographic.go @@ -0,0 +1,17 @@ +package topnmetric + +type Lexicographic struct { + Base + PreviousStop string `json:"previousStop,omitempty"` +} + +func NewLexicographic() *Lexicographic { + l := &Lexicographic{} + l.SetType("lexicographic") + return l +} + +func (l *Lexicographic) SetPreviousStop(previousStop string) *Lexicographic { + l.PreviousStop = previousStop + return l +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/topnmetric/numeric.go b/vendor/github.com/grafadruid/go-druid/builder/topnmetric/numeric.go new file mode 100644 index 000000000..1e2764377 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/topnmetric/numeric.go @@ -0,0 +1,17 @@ +package topnmetric + +type Numeric struct { + Base + Metric string `json:"metric,omitempty"` +} + +func NewNumeric() *Numeric { + n := &Numeric{} + n.SetType("numeric") + return n +} + +func (n *Numeric) SetMetric(metric string) *Numeric { + n.Metric = metric + return n +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/topnmetric/top_n_metric.go b/vendor/github.com/grafadruid/go-druid/builder/topnmetric/top_n_metric.go new file mode 100644 index 000000000..4390fa398 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/topnmetric/top_n_metric.go @@ -0,0 +1,49 @@ +package topnmetric + +import ( + "encoding/json" + "errors" + + "github.com/grafadruid/go-druid/builder" +) + +type Base struct { + Typ builder.ComponentType `json:"type,omitempty"` +} + +func (b *Base) SetType(typ builder.ComponentType) *Base { + b.Typ = typ + return b +} + +func (b *Base) Type() builder.ComponentType { + return b.Typ +} + +func Load(data []byte) (builder.TopNMetric, error) { + var tnm builder.TopNMetric + if string(data) == "null" { + return tnm, nil + } + var t struct { + Typ builder.ComponentType `json:"type,omitempty"` + } + if err := json.Unmarshal(data, &t); err != nil { + return nil, err + } + switch t.Typ { + case "alphaNumeric": + tnm = NewAlphaNumeric() + case "dimension": + tnm = NewDimension() + case "inverted": + tnm = NewInverted() + case "lexicographic": + tnm = NewLexicographic() + case "numeric": + tnm = NewNumeric() + default: + return nil, errors.New("unsupported topnmetric type") + } + return tnm, json.Unmarshal(data, &tnm) +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/types/date_time_zone.go b/vendor/github.com/grafadruid/go-druid/builder/types/date_time_zone.go new file mode 100644 index 000000000..ac2522286 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/types/date_time_zone.go @@ -0,0 +1,7 @@ +package types + +type DateTimeZone string + +const ( + UTC DateTimeZone = "UTC" +) diff --git a/vendor/github.com/grafadruid/go-druid/builder/types/join_types.go b/vendor/github.com/grafadruid/go-druid/builder/types/join_types.go new file mode 100644 index 000000000..da5223d7f --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/types/join_types.go @@ -0,0 +1,10 @@ +package types + +type JoinType string + +const ( + Inner JoinType = "INNER" + Left = "LEFT" + Right = "RIGHT" + Full = "FULL" +) diff --git a/vendor/github.com/grafadruid/go-druid/builder/types/null_handling.go b/vendor/github.com/grafadruid/go-druid/builder/types/null_handling.go new file mode 100644 index 000000000..3e35a01fa --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/types/null_handling.go @@ -0,0 +1,9 @@ +package types + +type NullHandling string + +const ( + NullString NullHandling = "NULLSTRING" + EmptyString = "EMPTYSTRING" + ReturnNull = "RETURNNULL" +) diff --git a/vendor/github.com/grafadruid/go-druid/builder/types/output_types.go b/vendor/github.com/grafadruid/go-druid/builder/types/output_types.go new file mode 100644 index 000000000..b29fce96b --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/types/output_types.go @@ -0,0 +1,11 @@ +package types + +type OutputType string + +const ( + Float OutputType = "FLOAT" + Double = "DOUBLE" + Long = "LONG" + String = "STRING" + Complex = "COMPLEX" +) diff --git a/vendor/github.com/grafadruid/go-druid/builder/types/string_comparators.go b/vendor/github.com/grafadruid/go-druid/builder/types/string_comparators.go new file mode 100644 index 000000000..926b8cec7 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/types/string_comparators.go @@ -0,0 +1,11 @@ +package types + +type StringComparator string + +const ( + Lexicographic StringComparator = "lexicographic" + Alphanumeric = "alphanumeric" + Numeric = "numeric" + Strlen = "strlen" + Version = "version" +) diff --git a/vendor/github.com/grafadruid/go-druid/builder/virtualcolumn/expression.go b/vendor/github.com/grafadruid/go-druid/builder/virtualcolumn/expression.go new file mode 100644 index 000000000..5b4e21e71 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/virtualcolumn/expression.go @@ -0,0 +1,31 @@ +package virtualcolumn + +import "github.com/grafadruid/go-druid/builder/types" + +type Expression struct { + Base + Name string `json:"name,omitempty"` + Expression string `json:"expression,omitempty"` + OutputType types.OutputType `json:"outputType,omitempty"` +} + +func NewExpression() *Expression { + e := &Expression{} + e.SetType("expression") + return e +} + +func (e *Expression) SetName(name string) *Expression { + e.Name = name + return e +} + +func (e *Expression) SetExpression(expression string) *Expression { + e.Expression = expression + return e +} + +func (e *Expression) SetOutputType(outputType types.OutputType) *Expression { + e.OutputType = outputType + return e +} diff --git a/vendor/github.com/grafadruid/go-druid/builder/virtualcolumn/virtual_column.go b/vendor/github.com/grafadruid/go-druid/builder/virtualcolumn/virtual_column.go new file mode 100644 index 000000000..49dc4880d --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/builder/virtualcolumn/virtual_column.go @@ -0,0 +1,41 @@ +package virtualcolumn + +import ( + "encoding/json" + "errors" + + "github.com/grafadruid/go-druid/builder" +) + +type Base struct { + Typ builder.ComponentType `json:"type,omitempty"` +} + +func (b *Base) SetType(typ builder.ComponentType) *Base { + b.Typ = typ + return b +} + +func (b *Base) Type() builder.ComponentType { + return b.Typ +} + +func Load(data []byte) (builder.VirtualColumn, error) { + var v builder.VirtualColumn + if string(data) == "null" { + return v, nil + } + var t struct { + Typ builder.ComponentType `json:"type,omitempty"` + } + if err := json.Unmarshal(data, &t); err != nil { + return nil, err + } + switch t.Typ { + case "expression": + v = NewExpression() + default: + return nil, errors.New("unsupported virtualcolumn type") + } + return v, json.Unmarshal(data, &v) +} diff --git a/vendor/github.com/grafadruid/go-druid/common.go b/vendor/github.com/grafadruid/go-druid/common.go new file mode 100644 index 000000000..b5517b056 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/common.go @@ -0,0 +1,69 @@ +package druid + +const ( + StatusEndpoint = "status" + HealthEndpoint = "status/health" + PropertiesEndpoint = "status/properties" + SelfDiscoveredEndpoint = "status/selfDiscovered/status" +) + +type Status struct { + Version string `json:"version"` + Modules []struct { + Name string `json:"name"` + Artifact string `json:"artifact"` + Version string `json:"version"` + } `json:"modules"` + Memory struct { + MaxMemory int `json:"maxMemory"` + TotalMemory int `json:"totalMemory"` + FreeMemory int `json:"freeMemory"` + UsedMemory int `json:"usedMemory"` + DirectMemory int `json:"directMemory"` + } `json:"memory"` +} +type Health bool +type Properties map[string]string +type SelfDiscovered struct { + SelfDiscovered bool `json:"selfDiscovered"` +} + +type CommonService struct { + client *Client +} + +func (c *CommonService) Status() (*Status, *Response, error) { + var s *Status + response, err := c.client.ExecuteRequest("GET", StatusEndpoint, nil, &s) + if err != nil { + return nil, response, err + } + return s, response, nil +} + +func (c *CommonService) Health() (*Health, *Response, error) { + var h *Health + response, err := c.client.ExecuteRequest("GET", HealthEndpoint, nil, &h) + if err != nil { + return nil, response, err + } + return h, response, nil +} + +func (c *CommonService) Properties() (*Properties, *Response, error) { + var p *Properties + response, err := c.client.ExecuteRequest("GET", PropertiesEndpoint, nil, &p) + if err != nil { + return nil, response, err + } + return p, response, nil +} + +func (c *CommonService) SelfDiscovered() (*SelfDiscovered, *Response, error) { + var s *SelfDiscovered + response, err := c.client.ExecuteRequest("GET", SelfDiscoveredEndpoint, nil, &s) + if err != nil { + return nil, response, err + } + return s, response, nil +} diff --git a/vendor/github.com/grafadruid/go-druid/druid.go b/vendor/github.com/grafadruid/go-druid/druid.go new file mode 100644 index 000000000..4a76e71cc --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/druid.go @@ -0,0 +1,392 @@ +package druid + +import ( + "context" + "crypto/tls" + "crypto/x509" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "regexp" + "strings" + "time" + + querystring "github.com/google/go-querystring/query" + "github.com/hashicorp/go-retryablehttp" +) + +const ( + processInformationPathPrefix = "status/" + coordinatorPathPrefix = "druid/coordinator/v1/" + overlordPathPrefix = "druid/indexer/v1/" + middleManagerPathPrefix = "druid/worker/v1/" + peonPathPrefix = "druid/worker/v1/chat/" + historicalPathPrefix = "druid/historical/v1/" + defaultRetryWaitMin = 100 * time.Millisecond + defaultRetryWaitMax = 3 * time.Second + defaultRetryMax = 5 +) + +var ( + defaultBackoff = retryablehttp.DefaultBackoff + // A regular expression to match the error returned by net/http when the + // configured number of redirects is exhausted. This error isn't typed + // specifically so we resort to matching on the error string. + redirectsErrorRe = regexp.MustCompile(`stopped after \d+ redirects\z`) + + // A regular expression to match the error returned by net/http when the + // scheme specified in the URL is invalid. This error isn't typed + // specifically so we resort to matching on the error string. + schemeErrorRe = regexp.MustCompile(`unsupported protocol scheme`) + + // We need to consume response bodies to maintain http connections, but + // limit the size we consume to respReadLimit. + respReadLimit = int64(4096) +) + +type Client struct { + http *retryablehttp.Client + baseURL *url.URL + username string + password string + basicAuth bool +} + +type clientOptions struct { + httpClient *http.Client + username string + password string + backoff retryablehttp.Backoff + errorHandler retryablehttp.ErrorHandler + retry retryablehttp.CheckRetry + retryWaitMin time.Duration + retryWaitMax time.Duration + retryMax int +} + +type ClientOption func(*clientOptions) + +type druidErrorReponse struct { + Error string + ErrorMessage string + ErrorClass string + Host string +} + +func NewClient(baseURL string, options ...ClientOption) (*Client, error) { + opts := &clientOptions{ + httpClient: defaultHTTPClient(), + backoff: defaultBackoff, + errorHandler: defaultErrorHandler, + retry: defaultRetry, + retryWaitMin: defaultRetryWaitMin, + retryWaitMax: defaultRetryWaitMax, + retryMax: defaultRetryMax, + } + for _, opt := range options { + opt(opts) + } + c := &Client{ + http: &retryablehttp.Client{ + Backoff: opts.backoff, + CheckRetry: opts.retry, + HTTPClient: opts.httpClient, + RetryWaitMin: opts.retryWaitMin, + RetryWaitMax: opts.retryWaitMax, + RetryMax: opts.retryMax, + }, + username: opts.username, + password: opts.password, + basicAuth: opts.username != "" && opts.password != "", + } + if err := c.setBaseURL(baseURL); err != nil { + return nil, err + } + + return c, nil +} + +func (c *Client) Close() error { + return nil +} + +func (c *Client) NewRequest(method, path string, opt interface{}) (*retryablehttp.Request, error) { + u := *c.baseURL + unescaped, err := url.PathUnescape(path) + if err != nil { + return nil, err + } + + u.RawPath = c.baseURL.Path + path + u.Path = c.baseURL.Path + unescaped + + reqHeaders := make(http.Header) + reqHeaders.Set("Accept", "application/json") + + var body interface{} + if opt != nil { + switch { + case method == "POST" || method == "PUT": + reqHeaders.Set("Content-Type", "application/json") + if opt != nil { + body, err = json.Marshal(opt) + if err != nil { + return nil, err + } + } + default: + q, err := querystring.Values(opt) + if err != nil { + return nil, err + } + u.RawQuery = q.Encode() + } + } + + r, err := retryablehttp.NewRequest(method, u.String(), body) + if err != nil { + return nil, err + } + r.Header = reqHeaders + if c.basicAuth { + r.SetBasicAuth(c.username, c.password) + } + + return r, nil +} + +func (c *Client) Do(r *retryablehttp.Request, result interface{}) (*Response, error) { + resp, err := c.http.Do(r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + response := &Response{resp} + if err = response.ExtractError(); err != nil { + return nil, err + } + if result != nil { + if err = json.NewDecoder(resp.Body).Decode(result); err != nil { + return nil, err + } + } + return response, nil +} + +func (c *Client) ExecuteRequest(method, path string, opt, result interface{}) (*Response, error) { + req, err := c.NewRequest(method, path, opt) + if err != nil { + return nil, err + } + return c.Do(req, result) +} + +func defaultRetry(ctx context.Context, resp *http.Response, err error) (bool, error) { + if ctx.Err() != nil { + return false, ctx.Err() + } + + // As explained here https://golang.org/pkg/net/http/#Client.Do, + // An error is returned if caused by client policy (such as CheckRedirect), or failure to speak HTTP (such as a network connectivity problem). A non-2xx status code doesn't cause an error. + if err != nil { + if v, ok := err.(*url.Error); ok { + // Don't retry if the error was due to too many redirects. + if redirectsErrorRe.MatchString(v.Error()) { + return false, v + } + + // Don't retry if the error was due to an invalid protocol scheme. + if schemeErrorRe.MatchString(v.Error()) { + return false, v + } + + // Don't retry if the error was due to TLS cert verification failure. + if _, ok := v.Err.(x509.UnknownAuthorityError); ok { + return false, v + } + } + + return true, nil + } + + if resp.StatusCode == http.StatusOK { + return false, nil + } + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return true, fmt.Errorf("failed to read the response from Druid: %w", err) + } + var errResp druidErrorReponse + err = json.Unmarshal(body, &errResp) + if err != nil { + return true, fmt.Errorf("failed to read the response from Druid: %w", err) + } + + // https://druid.apache.org/docs/latest/querying/querying.html#query-execution-failures + switch errResp.Error { + case "SQL parse failed": + goto ABORT + case "Plan validation failed": + goto ABORT + case "Unsupported operation": + goto ABORT + case "Query cancelled": + goto ABORT + case "Unknown exception": + goto ABORT + default: + return true, fmt.Errorf("error response from Druid: %+v", errResp) + } + +ABORT: + // When aborting the retry, the response body should be closed: + // https://pkg.go.dev/github.com/hashicorp/go-retryablehttp#CheckRetry + resp.Body.Close() + return false, fmt.Errorf("failed to query Druid: %+v", errResp) +} + +func defaultErrorHandler(resp *http.Response, err error, numTries int) (*http.Response, error) { + // Drain and close the response body so the connection can be reused: + // https://pkg.go.dev/github.com/hashicorp/go-retryablehttp#ErrorHandler + defer resp.Body.Close() + io.Copy(ioutil.Discard, io.LimitReader(resp.Body, respReadLimit)) + + return resp, fmt.Errorf("Failed after %d attempt(s). Last error: %w", numTries, err) +} + +func (c *Client) setBaseURL(urlStr string) error { + if !strings.HasSuffix(urlStr, "/") { + urlStr += "/" + } + baseURL, err := url.ParseRequestURI(urlStr) + if err != nil { + return err + } + c.baseURL = baseURL + return nil +} + +func (c *Client) Common() *CommonService { + return &CommonService{client: c} +} + +func (c *Client) Query() *QueryService { + return &QueryService{client: c} +} + +func WithBasicAuth(username, password string) ClientOption { + return func(opts *clientOptions) { + opts.username = username + opts.password = password + } +} + +func WithSkipTLSVerify() ClientOption { + return func(opts *clientOptions) { + if nil == opts.httpClient.Transport { + opts.httpClient.Transport = &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + } + } + opts.httpClient.Transport.(*http.Transport).TLSClientConfig.InsecureSkipVerify = true + } +} + +func WithCustomBackoff(backoff retryablehttp.Backoff) ClientOption { + return func(opts *clientOptions) { + opts.backoff = backoff + } +} + +func WithCustomRetry(retry retryablehttp.CheckRetry) ClientOption { + return func(opts *clientOptions) { + opts.retry = retry + } +} + +func WithCustomErrorHandler(h retryablehttp.ErrorHandler) ClientOption { + return func(opts *clientOptions) { + opts.errorHandler = h + } +} + +func WithHTTPClient(httpClient *http.Client) ClientOption { + return func(opts *clientOptions) { + opts.httpClient = httpClient + } +} + +func WithRetryWaitMin(retryWaitMin time.Duration) ClientOption { + return func(opts *clientOptions) { + opts.retryWaitMin = retryWaitMin + } +} + +func WithRetryWaitMax(retryWaitMax time.Duration) ClientOption { + return func(opts *clientOptions) { + opts.retryWaitMax = retryWaitMax + } +} + +func WithRetryMax(retryMax int) ClientOption { + return func(opts *clientOptions) { + opts.retryMax = retryMax + } +} + +type Response struct { + *http.Response +} + +func (r *Response) ExtractError() error { + switch r.StatusCode { + case 200, 201, 202, 204, 304: + return nil + } + errorResponse := &errResponse{Response: r.Response} + data, err := ioutil.ReadAll(r.Body) + if err == nil && data != nil { + errorResponse.Body = data + var raw interface{} + if err := json.Unmarshal(data, &raw); err != nil { + errorResponse.Message = r.Status + } else { + errorResponse.Message = parseError(raw) + } + } + return errorResponse +} + +type errResponse struct { + Body []byte + Response *http.Response + Message string +} + +func (e *errResponse) Error() string { + path, _ := url.QueryUnescape(e.Response.Request.URL.Path) + return fmt.Sprintf( + "error with code %d %s %s message: %s", + e.Response.StatusCode, + e.Response.Request.Method, + fmt.Sprintf("%s://%s%s", e.Response.Request.URL.Scheme, e.Response.Request.URL.Host, path), + e.Message, + ) +} + +func parseError(raw interface{}) string { + if raw, isMapSI := raw.(map[string]interface{}); isMapSI { + if errStr, hasErrorStr := raw["error"]; hasErrorStr { + return errStr.(string) + } + } + return fmt.Sprintf("failed to parse unexpected error type: %T", raw) +} + +func defaultHTTPClient() *http.Client { + return &http.Client{} +} diff --git a/vendor/github.com/grafadruid/go-druid/query.go b/vendor/github.com/grafadruid/go-druid/query.go new file mode 100644 index 000000000..599719015 --- /dev/null +++ b/vendor/github.com/grafadruid/go-druid/query.go @@ -0,0 +1,42 @@ +package druid + +import ( + "github.com/grafadruid/go-druid/builder" + "github.com/grafadruid/go-druid/builder/query" +) + +const ( + NativeQueryEndpoint = "druid/v2" + SQLQueryEndpoint = "druid/v2/sql" +) + +type QueryService struct { + client *Client +} + +func (q *QueryService) Execute(qry builder.Query, result interface{}) (*Response, error) { + var path string + switch qry.Type() { + case "sql": + path = SQLQueryEndpoint + default: + path = NativeQueryEndpoint + } + r, err := q.client.NewRequest("POST", path, qry) + if err != nil { + return nil, err + } + resp, err := q.client.Do(r, result) + if err != nil { + return nil, err + } + return resp, nil +} + +//func (q *QueryService) Cancel(query builder.Query) () {} + +//func (q *QueryService) Candidates(query builder.Query, result interface{}) (*Response, error) {} + +func (q *QueryService) Load(data []byte) (builder.Query, error) { + return query.Load(data) +} diff --git a/vendor/github.com/hashicorp/go-cleanhttp/LICENSE b/vendor/github.com/hashicorp/go-cleanhttp/LICENSE new file mode 100644 index 000000000..e87a115e4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-cleanhttp/README.md b/vendor/github.com/hashicorp/go-cleanhttp/README.md new file mode 100644 index 000000000..036e5313f --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/README.md @@ -0,0 +1,30 @@ +# cleanhttp + +Functions for accessing "clean" Go http.Client values + +------------- + +The Go standard library contains a default `http.Client` called +`http.DefaultClient`. It is a common idiom in Go code to start with +`http.DefaultClient` and tweak it as necessary, and in fact, this is +encouraged; from the `http` package documentation: + +> The Client's Transport typically has internal state (cached TCP connections), +so Clients should be reused instead of created as needed. Clients are safe for +concurrent use by multiple goroutines. + +Unfortunately, this is a shared value, and it is not uncommon for libraries to +assume that they are free to modify it at will. With enough dependencies, it +can be very easy to encounter strange problems and race conditions due to +manipulation of this shared value across libraries and goroutines (clients are +safe for concurrent use, but writing values to the client struct itself is not +protected). + +Making things worse is the fact that a bare `http.Client` will use a default +`http.Transport` called `http.DefaultTransport`, which is another global value +that behaves the same way. So it is not simply enough to replace +`http.DefaultClient` with `&http.Client{}`. + +This repository provides some simple functions to get a "clean" `http.Client` +-- one that uses the same default values as the Go standard library, but +returns a client that does not share any state with other clients. diff --git a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go new file mode 100644 index 000000000..fe28d15b6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go @@ -0,0 +1,58 @@ +package cleanhttp + +import ( + "net" + "net/http" + "runtime" + "time" +) + +// DefaultTransport returns a new http.Transport with similar default values to +// http.DefaultTransport, but with idle connections and keepalives disabled. +func DefaultTransport() *http.Transport { + transport := DefaultPooledTransport() + transport.DisableKeepAlives = true + transport.MaxIdleConnsPerHost = -1 + return transport +} + +// DefaultPooledTransport returns a new http.Transport with similar default +// values to http.DefaultTransport. Do not use this for transient transports as +// it can leak file descriptors over time. Only use this for transports that +// will be re-used for the same host(s). +func DefaultPooledTransport() *http.Transport { + transport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + ForceAttemptHTTP2: true, + MaxIdleConnsPerHost: runtime.GOMAXPROCS(0) + 1, + } + return transport +} + +// DefaultClient returns a new http.Client with similar default values to +// http.Client, but with a non-shared Transport, idle connections disabled, and +// keepalives disabled. +func DefaultClient() *http.Client { + return &http.Client{ + Transport: DefaultTransport(), + } +} + +// DefaultPooledClient returns a new http.Client with similar default values to +// http.Client, but with a shared Transport. Do not use this function for +// transient clients as it can leak file descriptors over time. Only use this +// for clients that will be re-used for the same host(s). +func DefaultPooledClient() *http.Client { + return &http.Client{ + Transport: DefaultPooledTransport(), + } +} diff --git a/vendor/github.com/hashicorp/go-cleanhttp/doc.go b/vendor/github.com/hashicorp/go-cleanhttp/doc.go new file mode 100644 index 000000000..05841092a --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/doc.go @@ -0,0 +1,20 @@ +// Package cleanhttp offers convenience utilities for acquiring "clean" +// http.Transport and http.Client structs. +// +// Values set on http.DefaultClient and http.DefaultTransport affect all +// callers. This can have detrimental effects, esepcially in TLS contexts, +// where client or root certificates set to talk to multiple endpoints can end +// up displacing each other, leading to hard-to-debug issues. This package +// provides non-shared http.Client and http.Transport structs to ensure that +// the configuration will not be overwritten by other parts of the application +// or dependencies. +// +// The DefaultClient and DefaultTransport functions disable idle connections +// and keepalives. Without ensuring that idle connections are closed before +// garbage collection, short-term clients/transports can leak file descriptors, +// eventually leading to "too many open files" errors. If you will be +// connecting to the same hosts repeatedly from the same client, you can use +// DefaultPooledClient to receive a client that has connection pooling +// semantics similar to http.DefaultClient. +// +package cleanhttp diff --git a/vendor/github.com/hashicorp/go-cleanhttp/handlers.go b/vendor/github.com/hashicorp/go-cleanhttp/handlers.go new file mode 100644 index 000000000..3c845dc0d --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/handlers.go @@ -0,0 +1,48 @@ +package cleanhttp + +import ( + "net/http" + "strings" + "unicode" +) + +// HandlerInput provides input options to cleanhttp's handlers +type HandlerInput struct { + ErrStatus int +} + +// PrintablePathCheckHandler is a middleware that ensures the request path +// contains only printable runes. +func PrintablePathCheckHandler(next http.Handler, input *HandlerInput) http.Handler { + // Nil-check on input to make it optional + if input == nil { + input = &HandlerInput{ + ErrStatus: http.StatusBadRequest, + } + } + + // Default to http.StatusBadRequest on error + if input.ErrStatus == 0 { + input.ErrStatus = http.StatusBadRequest + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r != nil { + // Check URL path for non-printable characters + idx := strings.IndexFunc(r.URL.Path, func(c rune) bool { + return !unicode.IsPrint(c) + }) + + if idx != -1 { + w.WriteHeader(input.ErrStatus) + return + } + + if next != nil { + next.ServeHTTP(w, r) + } + } + + return + }) +} diff --git a/vendor/github.com/hashicorp/go-retryablehttp/.gitignore b/vendor/github.com/hashicorp/go-retryablehttp/.gitignore new file mode 100644 index 000000000..4e309e0b3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/.gitignore @@ -0,0 +1,4 @@ +.idea/ +*.iml +*.test +.vscode/ \ No newline at end of file diff --git a/vendor/github.com/hashicorp/go-retryablehttp/.go-version b/vendor/github.com/hashicorp/go-retryablehttp/.go-version new file mode 100644 index 000000000..6fee2fedb --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/.go-version @@ -0,0 +1 @@ +1.22.2 diff --git a/vendor/github.com/hashicorp/go-retryablehttp/CHANGELOG.md b/vendor/github.com/hashicorp/go-retryablehttp/CHANGELOG.md new file mode 100644 index 000000000..68a627c6d --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/CHANGELOG.md @@ -0,0 +1,33 @@ +## 0.7.7 (May 30, 2024) + +BUG FIXES: + +- client: avoid potentially leaking URL-embedded basic authentication credentials in logs (#158) + +## 0.7.6 (May 9, 2024) + +ENHANCEMENTS: + +- client: support a `RetryPrepare` function for modifying the request before retrying (#216) +- client: support HTTP-date values for `Retry-After` header value (#138) +- client: avoid reading entire body when the body is a `*bytes.Reader` (#197) + +BUG FIXES: + +- client: fix a broken check for invalid server certificate in go 1.20+ (#210) + +## 0.7.5 (Nov 8, 2023) + +BUG FIXES: + +- client: fixes an issue where the request body is not preserved on temporary redirects or re-established HTTP/2 connections (#207) + +## 0.7.4 (Jun 6, 2023) + +BUG FIXES: + +- client: fixing an issue where the Content-Type header wouldn't be sent with an empty payload when using HTTP/2 (#194) + +## 0.7.3 (May 15, 2023) + +Initial release diff --git a/vendor/github.com/hashicorp/go-retryablehttp/CODEOWNERS b/vendor/github.com/hashicorp/go-retryablehttp/CODEOWNERS new file mode 100644 index 000000000..d6dd78a2d --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/CODEOWNERS @@ -0,0 +1 @@ +* @hashicorp/go-retryablehttp-maintainers diff --git a/vendor/github.com/hashicorp/go-retryablehttp/LICENSE b/vendor/github.com/hashicorp/go-retryablehttp/LICENSE new file mode 100644 index 000000000..f4f97ee58 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/LICENSE @@ -0,0 +1,365 @@ +Copyright (c) 2015 HashiCorp, Inc. + +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-retryablehttp/Makefile b/vendor/github.com/hashicorp/go-retryablehttp/Makefile new file mode 100644 index 000000000..525524196 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/Makefile @@ -0,0 +1,11 @@ +default: test + +test: + go vet ./... + go test -v -race ./... + +updatedeps: + go get -f -t -u ./... + go get -f -u ./... + +.PHONY: default test updatedeps diff --git a/vendor/github.com/hashicorp/go-retryablehttp/README.md b/vendor/github.com/hashicorp/go-retryablehttp/README.md new file mode 100644 index 000000000..145a62f21 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/README.md @@ -0,0 +1,62 @@ +go-retryablehttp +================ + +[![Build Status](http://img.shields.io/travis/hashicorp/go-retryablehttp.svg?style=flat-square)][travis] +[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] + +[travis]: http://travis-ci.org/hashicorp/go-retryablehttp +[godocs]: http://godoc.org/github.com/hashicorp/go-retryablehttp + +The `retryablehttp` package provides a familiar HTTP client interface with +automatic retries and exponential backoff. It is a thin wrapper over the +standard `net/http` client library and exposes nearly the same public API. This +makes `retryablehttp` very easy to drop into existing programs. + +`retryablehttp` performs automatic retries under certain conditions. Mainly, if +an error is returned by the client (connection errors, etc.), or if a 500-range +response code is received (except 501), then a retry is invoked after a wait +period. Otherwise, the response is returned and left to the caller to +interpret. + +The main difference from `net/http` is that requests which take a request body +(POST/PUT et. al) can have the body provided in a number of ways (some more or +less efficient) that allow "rewinding" the request body if the initial request +fails so that the full request can be attempted again. See the +[godoc](http://godoc.org/github.com/hashicorp/go-retryablehttp) for more +details. + +Version 0.6.0 and before are compatible with Go prior to 1.12. From 0.6.1 onward, Go 1.12+ is required. +From 0.6.7 onward, Go 1.13+ is required. + +Example Use +=========== + +Using this library should look almost identical to what you would do with +`net/http`. The most simple example of a GET request is shown below: + +```go +resp, err := retryablehttp.Get("/foo") +if err != nil { + panic(err) +} +``` + +The returned response object is an `*http.Response`, the same thing you would +usually get from `net/http`. Had the request failed one or more times, the above +call would block and retry with exponential backoff. + +## Getting a stdlib `*http.Client` with retries + +It's possible to convert a `*retryablehttp.Client` directly to a `*http.Client`. +This makes use of retryablehttp broadly applicable with minimal effort. Simply +configure a `*retryablehttp.Client` as you wish, and then call `StandardClient()`: + +```go +retryClient := retryablehttp.NewClient() +retryClient.RetryMax = 10 + +standardClient := retryClient.StandardClient() // *http.Client +``` + +For more usage and examples see the +[pkg.go.dev](https://pkg.go.dev/github.com/hashicorp/go-retryablehttp). diff --git a/vendor/github.com/hashicorp/go-retryablehttp/cert_error_go119.go b/vendor/github.com/hashicorp/go-retryablehttp/cert_error_go119.go new file mode 100644 index 000000000..b2b27e872 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/cert_error_go119.go @@ -0,0 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !go1.20 +// +build !go1.20 + +package retryablehttp + +import "crypto/x509" + +func isCertError(err error) bool { + _, ok := err.(x509.UnknownAuthorityError) + return ok +} diff --git a/vendor/github.com/hashicorp/go-retryablehttp/cert_error_go120.go b/vendor/github.com/hashicorp/go-retryablehttp/cert_error_go120.go new file mode 100644 index 000000000..a3cd315a2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/cert_error_go120.go @@ -0,0 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build go1.20 +// +build go1.20 + +package retryablehttp + +import "crypto/tls" + +func isCertError(err error) bool { + _, ok := err.(*tls.CertificateVerificationError) + return ok +} diff --git a/vendor/github.com/hashicorp/go-retryablehttp/client.go b/vendor/github.com/hashicorp/go-retryablehttp/client.go new file mode 100644 index 000000000..efee53c40 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/client.go @@ -0,0 +1,919 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package retryablehttp provides a familiar HTTP client interface with +// automatic retries and exponential backoff. It is a thin wrapper over the +// standard net/http client library and exposes nearly the same public API. +// This makes retryablehttp very easy to drop into existing programs. +// +// retryablehttp performs automatic retries under certain conditions. Mainly, if +// an error is returned by the client (connection errors etc), or if a 500-range +// response is received, then a retry is invoked. Otherwise, the response is +// returned and left to the caller to interpret. +// +// Requests which take a request body should provide a non-nil function +// parameter. The best choice is to provide either a function satisfying +// ReaderFunc which provides multiple io.Readers in an efficient manner, a +// *bytes.Buffer (the underlying raw byte slice will be used) or a raw byte +// slice. As it is a reference type, and we will wrap it as needed by readers, +// we can efficiently re-use the request body without needing to copy it. If an +// io.Reader (such as a *bytes.Reader) is provided, the full body will be read +// prior to the first request, and will be efficiently re-used for any retries. +// ReadSeeker can be used, but some users have observed occasional data races +// between the net/http library and the Seek functionality of some +// implementations of ReadSeeker, so should be avoided if possible. +package retryablehttp + +import ( + "bytes" + "context" + "fmt" + "io" + "log" + "math" + "math/rand" + "net/http" + "net/url" + "os" + "regexp" + "strconv" + "strings" + "sync" + "time" + + cleanhttp "github.com/hashicorp/go-cleanhttp" +) + +var ( + // Default retry configuration + defaultRetryWaitMin = 1 * time.Second + defaultRetryWaitMax = 30 * time.Second + defaultRetryMax = 4 + + // defaultLogger is the logger provided with defaultClient + defaultLogger = log.New(os.Stderr, "", log.LstdFlags) + + // defaultClient is used for performing requests without explicitly making + // a new client. It is purposely private to avoid modifications. + defaultClient = NewClient() + + // We need to consume response bodies to maintain http connections, but + // limit the size we consume to respReadLimit. + respReadLimit = int64(4096) + + // timeNow sets the function that returns the current time. + // This defaults to time.Now. Changes to this should only be done in tests. + timeNow = time.Now + + // A regular expression to match the error returned by net/http when the + // configured number of redirects is exhausted. This error isn't typed + // specifically so we resort to matching on the error string. + redirectsErrorRe = regexp.MustCompile(`stopped after \d+ redirects\z`) + + // A regular expression to match the error returned by net/http when the + // scheme specified in the URL is invalid. This error isn't typed + // specifically so we resort to matching on the error string. + schemeErrorRe = regexp.MustCompile(`unsupported protocol scheme`) + + // A regular expression to match the error returned by net/http when a + // request header or value is invalid. This error isn't typed + // specifically so we resort to matching on the error string. + invalidHeaderErrorRe = regexp.MustCompile(`invalid header`) + + // A regular expression to match the error returned by net/http when the + // TLS certificate is not trusted. This error isn't typed + // specifically so we resort to matching on the error string. + notTrustedErrorRe = regexp.MustCompile(`certificate is not trusted`) +) + +// ReaderFunc is the type of function that can be given natively to NewRequest +type ReaderFunc func() (io.Reader, error) + +// ResponseHandlerFunc is a type of function that takes in a Response, and does something with it. +// The ResponseHandlerFunc is called when the HTTP client successfully receives a response and the +// CheckRetry function indicates that a retry of the base request is not necessary. +// If an error is returned from this function, the CheckRetry policy will be used to determine +// whether to retry the whole request (including this handler). +// +// Make sure to check status codes! Even if the request was completed it may have a non-2xx status code. +// +// The response body is not automatically closed. It must be closed either by the ResponseHandlerFunc or +// by the caller out-of-band. Failure to do so will result in a memory leak. +type ResponseHandlerFunc func(*http.Response) error + +// LenReader is an interface implemented by many in-memory io.Reader's. Used +// for automatically sending the right Content-Length header when possible. +type LenReader interface { + Len() int +} + +// Request wraps the metadata needed to create HTTP requests. +type Request struct { + // body is a seekable reader over the request body payload. This is + // used to rewind the request data in between retries. + body ReaderFunc + + responseHandler ResponseHandlerFunc + + // Embed an HTTP request directly. This makes a *Request act exactly + // like an *http.Request so that all meta methods are supported. + *http.Request +} + +// WithContext returns wrapped Request with a shallow copy of underlying *http.Request +// with its context changed to ctx. The provided ctx must be non-nil. +func (r *Request) WithContext(ctx context.Context) *Request { + return &Request{ + body: r.body, + responseHandler: r.responseHandler, + Request: r.Request.WithContext(ctx), + } +} + +// SetResponseHandler allows setting the response handler. +func (r *Request) SetResponseHandler(fn ResponseHandlerFunc) { + r.responseHandler = fn +} + +// BodyBytes allows accessing the request body. It is an analogue to +// http.Request's Body variable, but it returns a copy of the underlying data +// rather than consuming it. +// +// This function is not thread-safe; do not call it at the same time as another +// call, or at the same time this request is being used with Client.Do. +func (r *Request) BodyBytes() ([]byte, error) { + if r.body == nil { + return nil, nil + } + body, err := r.body() + if err != nil { + return nil, err + } + buf := new(bytes.Buffer) + _, err = buf.ReadFrom(body) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// SetBody allows setting the request body. +// +// It is useful if a new body needs to be set without constructing a new Request. +func (r *Request) SetBody(rawBody interface{}) error { + bodyReader, contentLength, err := getBodyReaderAndContentLength(rawBody) + if err != nil { + return err + } + r.body = bodyReader + r.ContentLength = contentLength + if bodyReader != nil { + r.GetBody = func() (io.ReadCloser, error) { + body, err := bodyReader() + if err != nil { + return nil, err + } + if rc, ok := body.(io.ReadCloser); ok { + return rc, nil + } + return io.NopCloser(body), nil + } + } else { + r.GetBody = func() (io.ReadCloser, error) { return http.NoBody, nil } + } + return nil +} + +// WriteTo allows copying the request body into a writer. +// +// It writes data to w until there's no more data to write or +// when an error occurs. The return int64 value is the number of bytes +// written. Any error encountered during the write is also returned. +// The signature matches io.WriterTo interface. +func (r *Request) WriteTo(w io.Writer) (int64, error) { + body, err := r.body() + if err != nil { + return 0, err + } + if c, ok := body.(io.Closer); ok { + defer c.Close() + } + return io.Copy(w, body) +} + +func getBodyReaderAndContentLength(rawBody interface{}) (ReaderFunc, int64, error) { + var bodyReader ReaderFunc + var contentLength int64 + + switch body := rawBody.(type) { + // If they gave us a function already, great! Use it. + case ReaderFunc: + bodyReader = body + tmp, err := body() + if err != nil { + return nil, 0, err + } + if lr, ok := tmp.(LenReader); ok { + contentLength = int64(lr.Len()) + } + if c, ok := tmp.(io.Closer); ok { + c.Close() + } + + case func() (io.Reader, error): + bodyReader = body + tmp, err := body() + if err != nil { + return nil, 0, err + } + if lr, ok := tmp.(LenReader); ok { + contentLength = int64(lr.Len()) + } + if c, ok := tmp.(io.Closer); ok { + c.Close() + } + + // If a regular byte slice, we can read it over and over via new + // readers + case []byte: + buf := body + bodyReader = func() (io.Reader, error) { + return bytes.NewReader(buf), nil + } + contentLength = int64(len(buf)) + + // If a bytes.Buffer we can read the underlying byte slice over and + // over + case *bytes.Buffer: + buf := body + bodyReader = func() (io.Reader, error) { + return bytes.NewReader(buf.Bytes()), nil + } + contentLength = int64(buf.Len()) + + // We prioritize *bytes.Reader here because we don't really want to + // deal with it seeking so want it to match here instead of the + // io.ReadSeeker case. + case *bytes.Reader: + snapshot := *body + bodyReader = func() (io.Reader, error) { + r := snapshot + return &r, nil + } + contentLength = int64(body.Len()) + + // Compat case + case io.ReadSeeker: + raw := body + bodyReader = func() (io.Reader, error) { + _, err := raw.Seek(0, 0) + return io.NopCloser(raw), err + } + if lr, ok := raw.(LenReader); ok { + contentLength = int64(lr.Len()) + } + + // Read all in so we can reset + case io.Reader: + buf, err := io.ReadAll(body) + if err != nil { + return nil, 0, err + } + if len(buf) == 0 { + bodyReader = func() (io.Reader, error) { + return http.NoBody, nil + } + contentLength = 0 + } else { + bodyReader = func() (io.Reader, error) { + return bytes.NewReader(buf), nil + } + contentLength = int64(len(buf)) + } + + // No body provided, nothing to do + case nil: + + // Unrecognized type + default: + return nil, 0, fmt.Errorf("cannot handle type %T", rawBody) + } + return bodyReader, contentLength, nil +} + +// FromRequest wraps an http.Request in a retryablehttp.Request +func FromRequest(r *http.Request) (*Request, error) { + bodyReader, _, err := getBodyReaderAndContentLength(r.Body) + if err != nil { + return nil, err + } + // Could assert contentLength == r.ContentLength + return &Request{body: bodyReader, Request: r}, nil +} + +// NewRequest creates a new wrapped request. +func NewRequest(method, url string, rawBody interface{}) (*Request, error) { + return NewRequestWithContext(context.Background(), method, url, rawBody) +} + +// NewRequestWithContext creates a new wrapped request with the provided context. +// +// The context controls the entire lifetime of a request and its response: +// obtaining a connection, sending the request, and reading the response headers and body. +func NewRequestWithContext(ctx context.Context, method, url string, rawBody interface{}) (*Request, error) { + httpReq, err := http.NewRequestWithContext(ctx, method, url, nil) + if err != nil { + return nil, err + } + + req := &Request{ + Request: httpReq, + } + if err := req.SetBody(rawBody); err != nil { + return nil, err + } + + return req, nil +} + +// Logger interface allows to use other loggers than +// standard log.Logger. +type Logger interface { + Printf(string, ...interface{}) +} + +// LeveledLogger is an interface that can be implemented by any logger or a +// logger wrapper to provide leveled logging. The methods accept a message +// string and a variadic number of key-value pairs. For log.Printf style +// formatting where message string contains a format specifier, use Logger +// interface. +type LeveledLogger interface { + Error(msg string, keysAndValues ...interface{}) + Info(msg string, keysAndValues ...interface{}) + Debug(msg string, keysAndValues ...interface{}) + Warn(msg string, keysAndValues ...interface{}) +} + +// hookLogger adapts an LeveledLogger to Logger for use by the existing hook functions +// without changing the API. +type hookLogger struct { + LeveledLogger +} + +func (h hookLogger) Printf(s string, args ...interface{}) { + h.Info(fmt.Sprintf(s, args...)) +} + +// RequestLogHook allows a function to run before each retry. The HTTP +// request which will be made, and the retry number (0 for the initial +// request) are available to users. The internal logger is exposed to +// consumers. +type RequestLogHook func(Logger, *http.Request, int) + +// ResponseLogHook is like RequestLogHook, but allows running a function +// on each HTTP response. This function will be invoked at the end of +// every HTTP request executed, regardless of whether a subsequent retry +// needs to be performed or not. If the response body is read or closed +// from this method, this will affect the response returned from Do(). +type ResponseLogHook func(Logger, *http.Response) + +// CheckRetry specifies a policy for handling retries. It is called +// following each request with the response and error values returned by +// the http.Client. If CheckRetry returns false, the Client stops retrying +// and returns the response to the caller. If CheckRetry returns an error, +// that error value is returned in lieu of the error from the request. The +// Client will close any response body when retrying, but if the retry is +// aborted it is up to the CheckRetry callback to properly close any +// response body before returning. +type CheckRetry func(ctx context.Context, resp *http.Response, err error) (bool, error) + +// Backoff specifies a policy for how long to wait between retries. +// It is called after a failing request to determine the amount of time +// that should pass before trying again. +type Backoff func(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration + +// ErrorHandler is called if retries are expired, containing the last status +// from the http library. If not specified, default behavior for the library is +// to close the body and return an error indicating how many tries were +// attempted. If overriding this, be sure to close the body if needed. +type ErrorHandler func(resp *http.Response, err error, numTries int) (*http.Response, error) + +// PrepareRetry is called before retry operation. It can be used for example to re-sign the request +type PrepareRetry func(req *http.Request) error + +// Client is used to make HTTP requests. It adds additional functionality +// like automatic retries to tolerate minor outages. +type Client struct { + HTTPClient *http.Client // Internal HTTP client. + Logger interface{} // Customer logger instance. Can be either Logger or LeveledLogger + + RetryWaitMin time.Duration // Minimum time to wait + RetryWaitMax time.Duration // Maximum time to wait + RetryMax int // Maximum number of retries + + // RequestLogHook allows a user-supplied function to be called + // before each retry. + RequestLogHook RequestLogHook + + // ResponseLogHook allows a user-supplied function to be called + // with the response from each HTTP request executed. + ResponseLogHook ResponseLogHook + + // CheckRetry specifies the policy for handling retries, and is called + // after each request. The default policy is DefaultRetryPolicy. + CheckRetry CheckRetry + + // Backoff specifies the policy for how long to wait between retries + Backoff Backoff + + // ErrorHandler specifies the custom error handler to use, if any + ErrorHandler ErrorHandler + + // PrepareRetry can prepare the request for retry operation, for example re-sign it + PrepareRetry PrepareRetry + + loggerInit sync.Once + clientInit sync.Once +} + +// NewClient creates a new Client with default settings. +func NewClient() *Client { + return &Client{ + HTTPClient: cleanhttp.DefaultPooledClient(), + Logger: defaultLogger, + RetryWaitMin: defaultRetryWaitMin, + RetryWaitMax: defaultRetryWaitMax, + RetryMax: defaultRetryMax, + CheckRetry: DefaultRetryPolicy, + Backoff: DefaultBackoff, + } +} + +func (c *Client) logger() interface{} { + c.loggerInit.Do(func() { + if c.Logger == nil { + return + } + + switch c.Logger.(type) { + case Logger, LeveledLogger: + // ok + default: + // This should happen in dev when they are setting Logger and work on code, not in prod. + panic(fmt.Sprintf("invalid logger type passed, must be Logger or LeveledLogger, was %T", c.Logger)) + } + }) + + return c.Logger +} + +// DefaultRetryPolicy provides a default callback for Client.CheckRetry, which +// will retry on connection errors and server errors. +func DefaultRetryPolicy(ctx context.Context, resp *http.Response, err error) (bool, error) { + // do not retry on context.Canceled or context.DeadlineExceeded + if ctx.Err() != nil { + return false, ctx.Err() + } + + // don't propagate other errors + shouldRetry, _ := baseRetryPolicy(resp, err) + return shouldRetry, nil +} + +// ErrorPropagatedRetryPolicy is the same as DefaultRetryPolicy, except it +// propagates errors back instead of returning nil. This allows you to inspect +// why it decided to retry or not. +func ErrorPropagatedRetryPolicy(ctx context.Context, resp *http.Response, err error) (bool, error) { + // do not retry on context.Canceled or context.DeadlineExceeded + if ctx.Err() != nil { + return false, ctx.Err() + } + + return baseRetryPolicy(resp, err) +} + +func baseRetryPolicy(resp *http.Response, err error) (bool, error) { + if err != nil { + if v, ok := err.(*url.Error); ok { + // Don't retry if the error was due to too many redirects. + if redirectsErrorRe.MatchString(v.Error()) { + return false, v + } + + // Don't retry if the error was due to an invalid protocol scheme. + if schemeErrorRe.MatchString(v.Error()) { + return false, v + } + + // Don't retry if the error was due to an invalid header. + if invalidHeaderErrorRe.MatchString(v.Error()) { + return false, v + } + + // Don't retry if the error was due to TLS cert verification failure. + if notTrustedErrorRe.MatchString(v.Error()) { + return false, v + } + if isCertError(v.Err) { + return false, v + } + } + + // The error is likely recoverable so retry. + return true, nil + } + + // 429 Too Many Requests is recoverable. Sometimes the server puts + // a Retry-After response header to indicate when the server is + // available to start processing request from client. + if resp.StatusCode == http.StatusTooManyRequests { + return true, nil + } + + // Check the response code. We retry on 500-range responses to allow + // the server time to recover, as 500's are typically not permanent + // errors and may relate to outages on the server side. This will catch + // invalid response codes as well, like 0 and 999. + if resp.StatusCode == 0 || (resp.StatusCode >= 500 && resp.StatusCode != http.StatusNotImplemented) { + return true, fmt.Errorf("unexpected HTTP status %s", resp.Status) + } + + return false, nil +} + +// DefaultBackoff provides a default callback for Client.Backoff which +// will perform exponential backoff based on the attempt number and limited +// by the provided minimum and maximum durations. +// +// It also tries to parse Retry-After response header when a http.StatusTooManyRequests +// (HTTP Code 429) is found in the resp parameter. Hence it will return the number of +// seconds the server states it may be ready to process more requests from this client. +func DefaultBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration { + if resp != nil { + if resp.StatusCode == http.StatusTooManyRequests || resp.StatusCode == http.StatusServiceUnavailable { + if sleep, ok := parseRetryAfterHeader(resp.Header["Retry-After"]); ok { + return sleep + } + } + } + + mult := math.Pow(2, float64(attemptNum)) * float64(min) + sleep := time.Duration(mult) + if float64(sleep) != mult || sleep > max { + sleep = max + } + return sleep +} + +// parseRetryAfterHeader parses the Retry-After header and returns the +// delay duration according to the spec: https://httpwg.org/specs/rfc7231.html#header.retry-after +// The bool returned will be true if the header was successfully parsed. +// Otherwise, the header was either not present, or was not parseable according to the spec. +// +// Retry-After headers come in two flavors: Seconds or HTTP-Date +// +// Examples: +// * Retry-After: Fri, 31 Dec 1999 23:59:59 GMT +// * Retry-After: 120 +func parseRetryAfterHeader(headers []string) (time.Duration, bool) { + if len(headers) == 0 || headers[0] == "" { + return 0, false + } + header := headers[0] + // Retry-After: 120 + if sleep, err := strconv.ParseInt(header, 10, 64); err == nil { + if sleep < 0 { // a negative sleep doesn't make sense + return 0, false + } + return time.Second * time.Duration(sleep), true + } + + // Retry-After: Fri, 31 Dec 1999 23:59:59 GMT + retryTime, err := time.Parse(time.RFC1123, header) + if err != nil { + return 0, false + } + if until := retryTime.Sub(timeNow()); until > 0 { + return until, true + } + // date is in the past + return 0, true +} + +// LinearJitterBackoff provides a callback for Client.Backoff which will +// perform linear backoff based on the attempt number and with jitter to +// prevent a thundering herd. +// +// min and max here are *not* absolute values. The number to be multiplied by +// the attempt number will be chosen at random from between them, thus they are +// bounding the jitter. +// +// For instance: +// * To get strictly linear backoff of one second increasing each retry, set +// both to one second (1s, 2s, 3s, 4s, ...) +// * To get a small amount of jitter centered around one second increasing each +// retry, set to around one second, such as a min of 800ms and max of 1200ms +// (892ms, 2102ms, 2945ms, 4312ms, ...) +// * To get extreme jitter, set to a very wide spread, such as a min of 100ms +// and a max of 20s (15382ms, 292ms, 51321ms, 35234ms, ...) +func LinearJitterBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration { + // attemptNum always starts at zero but we want to start at 1 for multiplication + attemptNum++ + + if max <= min { + // Unclear what to do here, or they are the same, so return min * + // attemptNum + return min * time.Duration(attemptNum) + } + + // Seed rand; doing this every time is fine + source := rand.New(rand.NewSource(int64(time.Now().Nanosecond()))) + + // Pick a random number that lies somewhere between the min and max and + // multiply by the attemptNum. attemptNum starts at zero so we always + // increment here. We first get a random percentage, then apply that to the + // difference between min and max, and add to min. + jitter := source.Float64() * float64(max-min) + jitterMin := int64(jitter) + int64(min) + return time.Duration(jitterMin * int64(attemptNum)) +} + +// PassthroughErrorHandler is an ErrorHandler that directly passes through the +// values from the net/http library for the final request. The body is not +// closed. +func PassthroughErrorHandler(resp *http.Response, err error, _ int) (*http.Response, error) { + return resp, err +} + +// Do wraps calling an HTTP method with retries. +func (c *Client) Do(req *Request) (*http.Response, error) { + c.clientInit.Do(func() { + if c.HTTPClient == nil { + c.HTTPClient = cleanhttp.DefaultPooledClient() + } + }) + + logger := c.logger() + + if logger != nil { + switch v := logger.(type) { + case LeveledLogger: + v.Debug("performing request", "method", req.Method, "url", redactURL(req.URL)) + case Logger: + v.Printf("[DEBUG] %s %s", req.Method, redactURL(req.URL)) + } + } + + var resp *http.Response + var attempt int + var shouldRetry bool + var doErr, respErr, checkErr, prepareErr error + + for i := 0; ; i++ { + doErr, respErr, prepareErr = nil, nil, nil + attempt++ + + // Always rewind the request body when non-nil. + if req.body != nil { + body, err := req.body() + if err != nil { + c.HTTPClient.CloseIdleConnections() + return resp, err + } + if c, ok := body.(io.ReadCloser); ok { + req.Body = c + } else { + req.Body = io.NopCloser(body) + } + } + + if c.RequestLogHook != nil { + switch v := logger.(type) { + case LeveledLogger: + c.RequestLogHook(hookLogger{v}, req.Request, i) + case Logger: + c.RequestLogHook(v, req.Request, i) + default: + c.RequestLogHook(nil, req.Request, i) + } + } + + // Attempt the request + resp, doErr = c.HTTPClient.Do(req.Request) + + // Check if we should continue with retries. + shouldRetry, checkErr = c.CheckRetry(req.Context(), resp, doErr) + if !shouldRetry && doErr == nil && req.responseHandler != nil { + respErr = req.responseHandler(resp) + shouldRetry, checkErr = c.CheckRetry(req.Context(), resp, respErr) + } + + err := doErr + if respErr != nil { + err = respErr + } + if err != nil { + switch v := logger.(type) { + case LeveledLogger: + v.Error("request failed", "error", err, "method", req.Method, "url", redactURL(req.URL)) + case Logger: + v.Printf("[ERR] %s %s request failed: %v", req.Method, redactURL(req.URL), err) + } + } else { + // Call this here to maintain the behavior of logging all requests, + // even if CheckRetry signals to stop. + if c.ResponseLogHook != nil { + // Call the response logger function if provided. + switch v := logger.(type) { + case LeveledLogger: + c.ResponseLogHook(hookLogger{v}, resp) + case Logger: + c.ResponseLogHook(v, resp) + default: + c.ResponseLogHook(nil, resp) + } + } + } + + if !shouldRetry { + break + } + + // We do this before drainBody because there's no need for the I/O if + // we're breaking out + remain := c.RetryMax - i + if remain <= 0 { + break + } + + // We're going to retry, consume any response to reuse the connection. + if doErr == nil { + c.drainBody(resp.Body) + } + + wait := c.Backoff(c.RetryWaitMin, c.RetryWaitMax, i, resp) + if logger != nil { + desc := fmt.Sprintf("%s %s", req.Method, redactURL(req.URL)) + if resp != nil { + desc = fmt.Sprintf("%s (status: %d)", desc, resp.StatusCode) + } + switch v := logger.(type) { + case LeveledLogger: + v.Debug("retrying request", "request", desc, "timeout", wait, "remaining", remain) + case Logger: + v.Printf("[DEBUG] %s: retrying in %s (%d left)", desc, wait, remain) + } + } + timer := time.NewTimer(wait) + select { + case <-req.Context().Done(): + timer.Stop() + c.HTTPClient.CloseIdleConnections() + return nil, req.Context().Err() + case <-timer.C: + } + + // Make shallow copy of http Request so that we can modify its body + // without racing against the closeBody call in persistConn.writeLoop. + httpreq := *req.Request + req.Request = &httpreq + + if c.PrepareRetry != nil { + if err := c.PrepareRetry(req.Request); err != nil { + prepareErr = err + break + } + } + } + + // this is the closest we have to success criteria + if doErr == nil && respErr == nil && checkErr == nil && prepareErr == nil && !shouldRetry { + return resp, nil + } + + defer c.HTTPClient.CloseIdleConnections() + + var err error + if prepareErr != nil { + err = prepareErr + } else if checkErr != nil { + err = checkErr + } else if respErr != nil { + err = respErr + } else { + err = doErr + } + + if c.ErrorHandler != nil { + return c.ErrorHandler(resp, err, attempt) + } + + // By default, we close the response body and return an error without + // returning the response + if resp != nil { + c.drainBody(resp.Body) + } + + // this means CheckRetry thought the request was a failure, but didn't + // communicate why + if err == nil { + return nil, fmt.Errorf("%s %s giving up after %d attempt(s)", + req.Method, redactURL(req.URL), attempt) + } + + return nil, fmt.Errorf("%s %s giving up after %d attempt(s): %w", + req.Method, redactURL(req.URL), attempt, err) +} + +// Try to read the response body so we can reuse this connection. +func (c *Client) drainBody(body io.ReadCloser) { + defer body.Close() + _, err := io.Copy(io.Discard, io.LimitReader(body, respReadLimit)) + if err != nil { + if c.logger() != nil { + switch v := c.logger().(type) { + case LeveledLogger: + v.Error("error reading response body", "error", err) + case Logger: + v.Printf("[ERR] error reading response body: %v", err) + } + } + } +} + +// Get is a shortcut for doing a GET request without making a new client. +func Get(url string) (*http.Response, error) { + return defaultClient.Get(url) +} + +// Get is a convenience helper for doing simple GET requests. +func (c *Client) Get(url string) (*http.Response, error) { + req, err := NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return c.Do(req) +} + +// Head is a shortcut for doing a HEAD request without making a new client. +func Head(url string) (*http.Response, error) { + return defaultClient.Head(url) +} + +// Head is a convenience method for doing simple HEAD requests. +func (c *Client) Head(url string) (*http.Response, error) { + req, err := NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + return c.Do(req) +} + +// Post is a shortcut for doing a POST request without making a new client. +func Post(url, bodyType string, body interface{}) (*http.Response, error) { + return defaultClient.Post(url, bodyType, body) +} + +// Post is a convenience method for doing simple POST requests. +func (c *Client) Post(url, bodyType string, body interface{}) (*http.Response, error) { + req, err := NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", bodyType) + return c.Do(req) +} + +// PostForm is a shortcut to perform a POST with form data without creating +// a new client. +func PostForm(url string, data url.Values) (*http.Response, error) { + return defaultClient.PostForm(url, data) +} + +// PostForm is a convenience method for doing simple POST operations using +// pre-filled url.Values form data. +func (c *Client) PostForm(url string, data url.Values) (*http.Response, error) { + return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} + +// StandardClient returns a stdlib *http.Client with a custom Transport, which +// shims in a *retryablehttp.Client for added retries. +func (c *Client) StandardClient() *http.Client { + return &http.Client{ + Transport: &RoundTripper{Client: c}, + } +} + +// Taken from url.URL#Redacted() which was introduced in go 1.15. +// We can switch to using it directly if we'll bump the minimum required go version. +func redactURL(u *url.URL) string { + if u == nil { + return "" + } + + ru := *u + if _, has := ru.User.Password(); has { + ru.User = url.UserPassword(ru.User.Username(), "xxxxx") + } + return ru.String() +} diff --git a/vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go b/vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go new file mode 100644 index 000000000..8c407adb3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go @@ -0,0 +1,55 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package retryablehttp + +import ( + "errors" + "net/http" + "net/url" + "sync" +) + +// RoundTripper implements the http.RoundTripper interface, using a retrying +// HTTP client to execute requests. +// +// It is important to note that retryablehttp doesn't always act exactly as a +// RoundTripper should. This is highly dependent on the retryable client's +// configuration. +type RoundTripper struct { + // The client to use during requests. If nil, the default retryablehttp + // client and settings will be used. + Client *Client + + // once ensures that the logic to initialize the default client runs at + // most once, in a single thread. + once sync.Once +} + +// init initializes the underlying retryable client. +func (rt *RoundTripper) init() { + if rt.Client == nil { + rt.Client = NewClient() + } +} + +// RoundTrip satisfies the http.RoundTripper interface. +func (rt *RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + rt.once.Do(rt.init) + + // Convert the request to be retryable. + retryableReq, err := FromRequest(req) + if err != nil { + return nil, err + } + + // Execute the request. + resp, err := rt.Client.Do(retryableReq) + // If we got an error returned by standard library's `Do` method, unwrap it + // otherwise we will wind up erroneously re-nesting the error. + if _, ok := err.(*url.Error); ok { + return resp, errors.Unwrap(err) + } + + return resp, err +} diff --git a/vendor/github.com/magefile/mage/LICENSE b/vendor/github.com/magefile/mage/LICENSE new file mode 100644 index 000000000..d0632bc14 --- /dev/null +++ b/vendor/github.com/magefile/mage/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2017 the Mage authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/magefile/mage/mg/color.go b/vendor/github.com/magefile/mage/mg/color.go new file mode 100644 index 000000000..3e2710332 --- /dev/null +++ b/vendor/github.com/magefile/mage/mg/color.go @@ -0,0 +1,80 @@ +package mg + +// Color is ANSI color type +type Color int + +// If you add/change/remove any items in this constant, +// you will need to run "stringer -type=Color" in this directory again. +// NOTE: Please keep the list in an alphabetical order. +const ( + Black Color = iota + Red + Green + Yellow + Blue + Magenta + Cyan + White + BrightBlack + BrightRed + BrightGreen + BrightYellow + BrightBlue + BrightMagenta + BrightCyan + BrightWhite +) + +// AnsiColor are ANSI color codes for supported terminal colors. +var ansiColor = map[Color]string{ + Black: "\u001b[30m", + Red: "\u001b[31m", + Green: "\u001b[32m", + Yellow: "\u001b[33m", + Blue: "\u001b[34m", + Magenta: "\u001b[35m", + Cyan: "\u001b[36m", + White: "\u001b[37m", + BrightBlack: "\u001b[30;1m", + BrightRed: "\u001b[31;1m", + BrightGreen: "\u001b[32;1m", + BrightYellow: "\u001b[33;1m", + BrightBlue: "\u001b[34;1m", + BrightMagenta: "\u001b[35;1m", + BrightCyan: "\u001b[36;1m", + BrightWhite: "\u001b[37;1m", +} + +// AnsiColorReset is an ANSI color code to reset the terminal color. +const AnsiColorReset = "\033[0m" + +// DefaultTargetAnsiColor is a default ANSI color for colorizing targets. +// It is set to Cyan as an arbitrary color, because it has a neutral meaning +var DefaultTargetAnsiColor = ansiColor[Cyan] + +func toLowerCase(s string) string { + // this is a naive implementation + // borrowed from https://golang.org/src/strings/strings.go + // and only considers alphabetical characters [a-zA-Z] + // so that we don't depend on the "strings" package + buf := make([]byte, len(s)) + for i := 0; i < len(s); i++ { + c := s[i] + if 'A' <= c && c <= 'Z' { + c += 'a' - 'A' + } + buf[i] = c + } + return string(buf) +} + +func getAnsiColor(color string) (string, bool) { + colorLower := toLowerCase(color) + for k, v := range ansiColor { + colorConstLower := toLowerCase(k.String()) + if colorConstLower == colorLower { + return v, true + } + } + return "", false +} diff --git a/vendor/github.com/magefile/mage/mg/color_string.go b/vendor/github.com/magefile/mage/mg/color_string.go new file mode 100644 index 000000000..06debca54 --- /dev/null +++ b/vendor/github.com/magefile/mage/mg/color_string.go @@ -0,0 +1,38 @@ +// Code generated by "stringer -type=Color"; DO NOT EDIT. + +package mg + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[Black-0] + _ = x[Red-1] + _ = x[Green-2] + _ = x[Yellow-3] + _ = x[Blue-4] + _ = x[Magenta-5] + _ = x[Cyan-6] + _ = x[White-7] + _ = x[BrightBlack-8] + _ = x[BrightRed-9] + _ = x[BrightGreen-10] + _ = x[BrightYellow-11] + _ = x[BrightBlue-12] + _ = x[BrightMagenta-13] + _ = x[BrightCyan-14] + _ = x[BrightWhite-15] +} + +const _Color_name = "BlackRedGreenYellowBlueMagentaCyanWhiteBrightBlackBrightRedBrightGreenBrightYellowBrightBlueBrightMagentaBrightCyanBrightWhite" + +var _Color_index = [...]uint8{0, 5, 8, 13, 19, 23, 30, 34, 39, 50, 59, 70, 82, 92, 105, 115, 126} + +func (i Color) String() string { + if i < 0 || i >= Color(len(_Color_index)-1) { + return "Color(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Color_name[_Color_index[i]:_Color_index[i+1]] +} diff --git a/vendor/github.com/magefile/mage/mg/deps.go b/vendor/github.com/magefile/mage/mg/deps.go new file mode 100644 index 000000000..cae591d34 --- /dev/null +++ b/vendor/github.com/magefile/mage/mg/deps.go @@ -0,0 +1,204 @@ +package mg + +import ( + "context" + "fmt" + "log" + "os" + "reflect" + "runtime" + "strings" + "sync" +) + +var logger = log.New(os.Stderr, "", 0) + +type onceMap struct { + mu *sync.Mutex + m map[onceKey]*onceFun +} + +type onceKey struct { + Name string + ID string +} + +func (o *onceMap) LoadOrStore(f Fn) *onceFun { + defer o.mu.Unlock() + o.mu.Lock() + + key := onceKey{ + Name: f.Name(), + ID: f.ID(), + } + existing, ok := o.m[key] + if ok { + return existing + } + one := &onceFun{ + once: &sync.Once{}, + fn: f, + displayName: displayName(f.Name()), + } + o.m[key] = one + return one +} + +var onces = &onceMap{ + mu: &sync.Mutex{}, + m: map[onceKey]*onceFun{}, +} + +// SerialDeps is like Deps except it runs each dependency serially, instead of +// in parallel. This can be useful for resource intensive dependencies that +// shouldn't be run at the same time. +func SerialDeps(fns ...interface{}) { + funcs := checkFns(fns) + ctx := context.Background() + for i := range fns { + runDeps(ctx, funcs[i:i+1]) + } +} + +// SerialCtxDeps is like CtxDeps except it runs each dependency serially, +// instead of in parallel. This can be useful for resource intensive +// dependencies that shouldn't be run at the same time. +func SerialCtxDeps(ctx context.Context, fns ...interface{}) { + funcs := checkFns(fns) + for i := range fns { + runDeps(ctx, funcs[i:i+1]) + } +} + +// CtxDeps runs the given functions as dependencies of the calling function. +// Dependencies must only be of type: +// func() +// func() error +// func(context.Context) +// func(context.Context) error +// Or a similar method on a mg.Namespace type. +// Or an mg.Fn interface. +// +// The function calling Deps is guaranteed that all dependent functions will be +// run exactly once when Deps returns. Dependent functions may in turn declare +// their own dependencies using Deps. Each dependency is run in their own +// goroutines. Each function is given the context provided if the function +// prototype allows for it. +func CtxDeps(ctx context.Context, fns ...interface{}) { + funcs := checkFns(fns) + runDeps(ctx, funcs) +} + +// runDeps assumes you've already called checkFns. +func runDeps(ctx context.Context, fns []Fn) { + mu := &sync.Mutex{} + var errs []string + var exit int + wg := &sync.WaitGroup{} + for _, f := range fns { + fn := onces.LoadOrStore(f) + wg.Add(1) + go func() { + defer func() { + if v := recover(); v != nil { + mu.Lock() + if err, ok := v.(error); ok { + exit = changeExit(exit, ExitStatus(err)) + } else { + exit = changeExit(exit, 1) + } + errs = append(errs, fmt.Sprint(v)) + mu.Unlock() + } + wg.Done() + }() + if err := fn.run(ctx); err != nil { + mu.Lock() + errs = append(errs, fmt.Sprint(err)) + exit = changeExit(exit, ExitStatus(err)) + mu.Unlock() + } + }() + } + + wg.Wait() + if len(errs) > 0 { + panic(Fatal(exit, strings.Join(errs, "\n"))) + } +} + +func checkFns(fns []interface{}) []Fn { + funcs := make([]Fn, len(fns)) + for i, f := range fns { + if fn, ok := f.(Fn); ok { + funcs[i] = fn + continue + } + funcs[i] = F(f) + } + return funcs +} + +// Deps runs the given functions in parallel, exactly once. Dependencies must +// only be of type: +// func() +// func() error +// func(context.Context) +// func(context.Context) error +// Or a similar method on a mg.Namespace type. +// Or an mg.Fn interface. +// +// This is a way to build up a tree of dependencies with each dependency +// defining its own dependencies. Functions must have the same signature as a +// Mage target, i.e. optional context argument, optional error return. +func Deps(fns ...interface{}) { + CtxDeps(context.Background(), fns...) +} + +func changeExit(old, new int) int { + if new == 0 { + return old + } + if old == 0 { + return new + } + if old == new { + return old + } + // both different and both non-zero, just set + // exit to 1. Nothing more we can do. + return 1 +} + +// funcName returns the unique name for the function +func funcName(i interface{}) string { + return runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name() +} + +func displayName(name string) string { + splitByPackage := strings.Split(name, ".") + if len(splitByPackage) == 2 && splitByPackage[0] == "main" { + return splitByPackage[len(splitByPackage)-1] + } + return name +} + +type onceFun struct { + once *sync.Once + fn Fn + err error + + displayName string +} + +// run will run the function exactly once and capture the error output. Further runs simply return +// the same error output. +func (o *onceFun) run(ctx context.Context) error { + o.once.Do(func() { + if Verbose() { + logger.Println("Running dependency:", displayName(o.fn.Name())) + } + o.err = o.fn.Run(ctx) + }) + return o.err +} diff --git a/vendor/github.com/magefile/mage/mg/errors.go b/vendor/github.com/magefile/mage/mg/errors.go new file mode 100644 index 000000000..2dd780fe3 --- /dev/null +++ b/vendor/github.com/magefile/mage/mg/errors.go @@ -0,0 +1,51 @@ +package mg + +import ( + "errors" + "fmt" +) + +type fatalErr struct { + code int + error +} + +func (f fatalErr) ExitStatus() int { + return f.code +} + +type exitStatus interface { + ExitStatus() int +} + +// Fatal returns an error that will cause mage to print out the +// given args and exit with the given exit code. +func Fatal(code int, args ...interface{}) error { + return fatalErr{ + code: code, + error: errors.New(fmt.Sprint(args...)), + } +} + +// Fatalf returns an error that will cause mage to print out the +// given message and exit with the given exit code. +func Fatalf(code int, format string, args ...interface{}) error { + return fatalErr{ + code: code, + error: fmt.Errorf(format, args...), + } +} + +// ExitStatus queries the error for an exit status. If the error is nil, it +// returns 0. If the error does not implement ExitStatus() int, it returns 1. +// Otherwise it retiurns the value from ExitStatus(). +func ExitStatus(err error) int { + if err == nil { + return 0 + } + exit, ok := err.(exitStatus) + if !ok { + return 1 + } + return exit.ExitStatus() +} diff --git a/vendor/github.com/magefile/mage/mg/fn.go b/vendor/github.com/magefile/mage/mg/fn.go new file mode 100644 index 000000000..573760811 --- /dev/null +++ b/vendor/github.com/magefile/mage/mg/fn.go @@ -0,0 +1,181 @@ +package mg + +import ( + "context" + "encoding/json" + "fmt" + "reflect" + "time" +) + +// Fn represents a function that can be run with mg.Deps. Package, Name, and ID must combine to +// uniquely identify a function, while ensuring the "same" function has identical values. These are +// used as a map key to find and run (or not run) the function. +type Fn interface { + // Name should return the fully qualified name of the function. Usually + // it's best to use runtime.FuncForPC(reflect.ValueOf(f).Pointer()).Name(). + Name() string + + // ID should be an additional uniqueness qualifier in case the name is insufficiently unique. + // This can be the case for functions that take arguments (mg.F json-encodes an array of the + // args). + ID() string + + // Run should run the function. + Run(ctx context.Context) error +} + +// F takes a function that is compatible as a mage target, and any args that need to be passed to +// it, and wraps it in an mg.Fn that mg.Deps can run. Args must be passed in the same order as they +// are declared by the function. Note that you do not need to and should not pass a context.Context +// to F, even if the target takes a context. Compatible args are int, bool, string, and +// time.Duration. +func F(target interface{}, args ...interface{}) Fn { + hasContext, isNamespace, err := checkF(target, args) + if err != nil { + panic(err) + } + id, err := json.Marshal(args) + if err != nil { + panic(fmt.Errorf("can't convert args into a mage-compatible id for mg.Deps: %s", err)) + } + return fn{ + name: funcName(target), + id: string(id), + f: func(ctx context.Context) error { + v := reflect.ValueOf(target) + count := len(args) + if hasContext { + count++ + } + if isNamespace { + count++ + } + vargs := make([]reflect.Value, count) + x := 0 + if isNamespace { + vargs[0] = reflect.ValueOf(struct{}{}) + x++ + } + if hasContext { + vargs[x] = reflect.ValueOf(ctx) + x++ + } + for y := range args { + vargs[x+y] = reflect.ValueOf(args[y]) + } + ret := v.Call(vargs) + if len(ret) > 0 { + // we only allow functions with a single error return, so this should be safe. + if ret[0].IsNil() { + return nil + } + return ret[0].Interface().(error) + } + return nil + }, + } +} + +type fn struct { + name string + id string + f func(ctx context.Context) error +} + +// Name returns the fully qualified name of the function. +func (f fn) Name() string { + return f.name +} + +// ID returns a hash of the argument values passed in +func (f fn) ID() string { + return f.id +} + +// Run runs the function. +func (f fn) Run(ctx context.Context) error { + return f.f(ctx) +} + +func checkF(target interface{}, args []interface{}) (hasContext, isNamespace bool, _ error) { + t := reflect.TypeOf(target) + if t.Kind() != reflect.Func { + return false, false, fmt.Errorf("non-function passed to mg.F: %T", target) + } + + if t.NumOut() > 1 { + return false, false, fmt.Errorf("target has too many return values, must be zero or just an error: %T", target) + } + if t.NumOut() == 1 && t.Out(0) != errType { + return false, false, fmt.Errorf("target's return value is not an error") + } + + // more inputs than slots is always an error + if len(args) > t.NumIn() { + return false, false, fmt.Errorf("too many arguments for target, got %d for %T", len(args), target) + } + + if t.NumIn() == 0 { + return false, false, nil + } + + x := 0 + inputs := t.NumIn() + + if t.In(0).AssignableTo(emptyType) { + // nameSpace func + isNamespace = true + x++ + // callers must leave off the namespace value + inputs-- + } + if t.NumIn() > x && t.In(x) == ctxType { + // callers must leave off the context + inputs-- + + // let the upper function know it should pass us a context. + hasContext = true + + // skip checking the first argument in the below loop if it's a context, since first arg is + // special. + x++ + } + + if len(args) != inputs { + return false, false, fmt.Errorf("wrong number of arguments for target, got %d for %T", len(args), target) + } + + for _, arg := range args { + argT := t.In(x) + if !argTypes[argT] { + return false, false, fmt.Errorf("argument %d (%s), is not a supported argument type", x, argT) + } + passedT := reflect.TypeOf(arg) + if argT != passedT { + return false, false, fmt.Errorf("argument %d expected to be %s, but is %s", x, argT, passedT) + } + x++ + } + return hasContext, isNamespace, nil +} + +// Here we define the types that are supported as arguments/returns +var ( + ctxType = reflect.TypeOf(func(context.Context) {}).In(0) + errType = reflect.TypeOf(func() error { return nil }).Out(0) + emptyType = reflect.TypeOf(struct{}{}) + + intType = reflect.TypeOf(int(0)) + stringType = reflect.TypeOf(string("")) + boolType = reflect.TypeOf(bool(false)) + durType = reflect.TypeOf(time.Second) + + // don't put ctx in here, this is for non-context types + argTypes = map[reflect.Type]bool{ + intType: true, + boolType: true, + stringType: true, + durType: true, + } +) diff --git a/vendor/github.com/magefile/mage/mg/runtime.go b/vendor/github.com/magefile/mage/mg/runtime.go new file mode 100644 index 000000000..9a8de12ce --- /dev/null +++ b/vendor/github.com/magefile/mage/mg/runtime.go @@ -0,0 +1,136 @@ +package mg + +import ( + "os" + "path/filepath" + "runtime" + "strconv" +) + +// CacheEnv is the environment variable that users may set to change the +// location where mage stores its compiled binaries. +const CacheEnv = "MAGEFILE_CACHE" + +// VerboseEnv is the environment variable that indicates the user requested +// verbose mode when running a magefile. +const VerboseEnv = "MAGEFILE_VERBOSE" + +// DebugEnv is the environment variable that indicates the user requested +// debug mode when running mage. +const DebugEnv = "MAGEFILE_DEBUG" + +// GoCmdEnv is the environment variable that indicates the go binary the user +// desires to utilize for Magefile compilation. +const GoCmdEnv = "MAGEFILE_GOCMD" + +// IgnoreDefaultEnv is the environment variable that indicates the user requested +// to ignore the default target specified in the magefile. +const IgnoreDefaultEnv = "MAGEFILE_IGNOREDEFAULT" + +// HashFastEnv is the environment variable that indicates the user requested to +// use a quick hash of magefiles to determine whether or not the magefile binary +// needs to be rebuilt. This results in faster runtimes, but means that mage +// will fail to rebuild if a dependency has changed. To force a rebuild, run +// mage with the -f flag. +const HashFastEnv = "MAGEFILE_HASHFAST" + +// EnableColorEnv is the environment variable that indicates the user is using +// a terminal which supports a color output. The default is false for backwards +// compatibility. When the value is true and the detected terminal does support colors +// then the list of mage targets will be displayed in ANSI color. When the value +// is true but the detected terminal does not support colors, then the list of +// mage targets will be displayed in the default colors (e.g. black and white). +const EnableColorEnv = "MAGEFILE_ENABLE_COLOR" + +// TargetColorEnv is the environment variable that indicates which ANSI color +// should be used to colorize mage targets. This is only applicable when +// the MAGEFILE_ENABLE_COLOR environment variable is true. +// The supported ANSI color names are any of these: +// - Black +// - Red +// - Green +// - Yellow +// - Blue +// - Magenta +// - Cyan +// - White +// - BrightBlack +// - BrightRed +// - BrightGreen +// - BrightYellow +// - BrightBlue +// - BrightMagenta +// - BrightCyan +// - BrightWhite +const TargetColorEnv = "MAGEFILE_TARGET_COLOR" + +// Verbose reports whether a magefile was run with the verbose flag. +func Verbose() bool { + b, _ := strconv.ParseBool(os.Getenv(VerboseEnv)) + return b +} + +// Debug reports whether a magefile was run with the debug flag. +func Debug() bool { + b, _ := strconv.ParseBool(os.Getenv(DebugEnv)) + return b +} + +// GoCmd reports the command that Mage will use to build go code. By default mage runs +// the "go" binary in the PATH. +func GoCmd() string { + if cmd := os.Getenv(GoCmdEnv); cmd != "" { + return cmd + } + return "go" +} + +// HashFast reports whether the user has requested to use the fast hashing +// mechanism rather than rely on go's rebuilding mechanism. +func HashFast() bool { + b, _ := strconv.ParseBool(os.Getenv(HashFastEnv)) + return b +} + +// IgnoreDefault reports whether the user has requested to ignore the default target +// in the magefile. +func IgnoreDefault() bool { + b, _ := strconv.ParseBool(os.Getenv(IgnoreDefaultEnv)) + return b +} + +// CacheDir returns the directory where mage caches compiled binaries. It +// defaults to $HOME/.magefile, but may be overridden by the MAGEFILE_CACHE +// environment variable. +func CacheDir() string { + d := os.Getenv(CacheEnv) + if d != "" { + return d + } + switch runtime.GOOS { + case "windows": + return filepath.Join(os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH"), "magefile") + default: + return filepath.Join(os.Getenv("HOME"), ".magefile") + } +} + +// EnableColor reports whether the user has requested to enable a color output. +func EnableColor() bool { + b, _ := strconv.ParseBool(os.Getenv(EnableColorEnv)) + return b +} + +// TargetColor returns the configured ANSI color name a color output. +func TargetColor() string { + s, exists := os.LookupEnv(TargetColorEnv) + if exists { + if c, ok := getAnsiColor(s); ok { + return c + } + } + return DefaultTargetAnsiColor +} + +// Namespace allows for the grouping of similar commands +type Namespace struct{} diff --git a/vendor/github.com/magefile/mage/sh/cmd.go b/vendor/github.com/magefile/mage/sh/cmd.go new file mode 100644 index 000000000..06af62de2 --- /dev/null +++ b/vendor/github.com/magefile/mage/sh/cmd.go @@ -0,0 +1,177 @@ +package sh + +import ( + "bytes" + "fmt" + "io" + "log" + "os" + "os/exec" + "strings" + + "github.com/magefile/mage/mg" +) + +// RunCmd returns a function that will call Run with the given command. This is +// useful for creating command aliases to make your scripts easier to read, like +// this: +// +// // in a helper file somewhere +// var g0 = sh.RunCmd("go") // go is a keyword :( +// +// // somewhere in your main code +// if err := g0("install", "github.com/gohugo/hugo"); err != nil { +// return err +// } +// +// Args passed to command get baked in as args to the command when you run it. +// Any args passed in when you run the returned function will be appended to the +// original args. For example, this is equivalent to the above: +// +// var goInstall = sh.RunCmd("go", "install") goInstall("github.com/gohugo/hugo") +// +// RunCmd uses Exec underneath, so see those docs for more details. +func RunCmd(cmd string, args ...string) func(args ...string) error { + return func(args2 ...string) error { + return Run(cmd, append(args, args2...)...) + } +} + +// OutCmd is like RunCmd except the command returns the output of the +// command. +func OutCmd(cmd string, args ...string) func(args ...string) (string, error) { + return func(args2 ...string) (string, error) { + return Output(cmd, append(args, args2...)...) + } +} + +// Run is like RunWith, but doesn't specify any environment variables. +func Run(cmd string, args ...string) error { + return RunWith(nil, cmd, args...) +} + +// RunV is like Run, but always sends the command's stdout to os.Stdout. +func RunV(cmd string, args ...string) error { + _, err := Exec(nil, os.Stdout, os.Stderr, cmd, args...) + return err +} + +// RunWith runs the given command, directing stderr to this program's stderr and +// printing stdout to stdout if mage was run with -v. It adds adds env to the +// environment variables for the command being run. Environment variables should +// be in the format name=value. +func RunWith(env map[string]string, cmd string, args ...string) error { + var output io.Writer + if mg.Verbose() { + output = os.Stdout + } + _, err := Exec(env, output, os.Stderr, cmd, args...) + return err +} + +// RunWithV is like RunWith, but always sends the command's stdout to os.Stdout. +func RunWithV(env map[string]string, cmd string, args ...string) error { + _, err := Exec(env, os.Stdout, os.Stderr, cmd, args...) + return err +} + +// Output runs the command and returns the text from stdout. +func Output(cmd string, args ...string) (string, error) { + buf := &bytes.Buffer{} + _, err := Exec(nil, buf, os.Stderr, cmd, args...) + return strings.TrimSuffix(buf.String(), "\n"), err +} + +// OutputWith is like RunWith, but returns what is written to stdout. +func OutputWith(env map[string]string, cmd string, args ...string) (string, error) { + buf := &bytes.Buffer{} + _, err := Exec(env, buf, os.Stderr, cmd, args...) + return strings.TrimSuffix(buf.String(), "\n"), err +} + +// Exec executes the command, piping its stderr to mage's stderr and +// piping its stdout to the given writer. If the command fails, it will return +// an error that, if returned from a target or mg.Deps call, will cause mage to +// exit with the same code as the command failed with. Env is a list of +// environment variables to set when running the command, these override the +// current environment variables set (which are also passed to the command). cmd +// and args may include references to environment variables in $FOO format, in +// which case these will be expanded before the command is run. +// +// Ran reports if the command ran (rather than was not found or not executable). +// Code reports the exit code the command returned if it ran. If err == nil, ran +// is always true and code is always 0. +func Exec(env map[string]string, stdout, stderr io.Writer, cmd string, args ...string) (ran bool, err error) { + expand := func(s string) string { + s2, ok := env[s] + if ok { + return s2 + } + return os.Getenv(s) + } + cmd = os.Expand(cmd, expand) + for i := range args { + args[i] = os.Expand(args[i], expand) + } + ran, code, err := run(env, stdout, stderr, cmd, args...) + if err == nil { + return true, nil + } + if ran { + return ran, mg.Fatalf(code, `running "%s %s" failed with exit code %d`, cmd, strings.Join(args, " "), code) + } + return ran, fmt.Errorf(`failed to run "%s %s: %v"`, cmd, strings.Join(args, " "), err) +} + +func run(env map[string]string, stdout, stderr io.Writer, cmd string, args ...string) (ran bool, code int, err error) { + c := exec.Command(cmd, args...) + c.Env = os.Environ() + for k, v := range env { + c.Env = append(c.Env, k+"="+v) + } + c.Stderr = stderr + c.Stdout = stdout + c.Stdin = os.Stdin + log.Println("exec:", cmd, strings.Join(args, " ")) + err = c.Run() + return CmdRan(err), ExitStatus(err), err +} + +// CmdRan examines the error to determine if it was generated as a result of a +// command running via os/exec.Command. If the error is nil, or the command ran +// (even if it exited with a non-zero exit code), CmdRan reports true. If the +// error is an unrecognized type, or it is an error from exec.Command that says +// the command failed to run (usually due to the command not existing or not +// being executable), it reports false. +func CmdRan(err error) bool { + if err == nil { + return true + } + ee, ok := err.(*exec.ExitError) + if ok { + return ee.Exited() + } + return false +} + +type exitStatus interface { + ExitStatus() int +} + +// ExitStatus returns the exit status of the error if it is an exec.ExitError +// or if it implements ExitStatus() int. +// 0 if it is nil or 1 if it is a different error. +func ExitStatus(err error) int { + if err == nil { + return 0 + } + if e, ok := err.(exitStatus); ok { + return e.ExitStatus() + } + if e, ok := err.(*exec.ExitError); ok { + if ex, ok := e.Sys().(exitStatus); ok { + return ex.ExitStatus() + } + } + return 1 +} diff --git a/vendor/github.com/magefile/mage/sh/helpers.go b/vendor/github.com/magefile/mage/sh/helpers.go new file mode 100644 index 000000000..f5d20a271 --- /dev/null +++ b/vendor/github.com/magefile/mage/sh/helpers.go @@ -0,0 +1,40 @@ +package sh + +import ( + "fmt" + "io" + "os" +) + +// Rm removes the given file or directory even if non-empty. It will not return +// an error if the target doesn't exist, only if the target cannot be removed. +func Rm(path string) error { + err := os.RemoveAll(path) + if err == nil || os.IsNotExist(err) { + return nil + } + return fmt.Errorf(`failed to remove %s: %v`, path, err) +} + +// Copy robustly copies the source file to the destination, overwriting the destination if necessary. +func Copy(dst string, src string) error { + from, err := os.Open(src) + if err != nil { + return fmt.Errorf(`can't copy %s: %v`, src, err) + } + defer from.Close() + finfo, err := from.Stat() + if err != nil { + return fmt.Errorf(`can't stat %s: %v`, src, err) + } + to, err := os.OpenFile(dst, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, finfo.Mode()) + if err != nil { + return fmt.Errorf(`can't copy to %s: %v`, dst, err) + } + defer to.Close() + _, err = io.Copy(to, from) + if err != nil { + return fmt.Errorf(`error copying %s to %s: %v`, src, dst, err) + } + return nil +} diff --git a/vendor/kmodules.xyz/client-go/tools/healthchecker/const.go b/vendor/kmodules.xyz/client-go/tools/healthchecker/const.go new file mode 100644 index 000000000..940f53d69 --- /dev/null +++ b/vendor/kmodules.xyz/client-go/tools/healthchecker/const.go @@ -0,0 +1,45 @@ +/* +Copyright AppsCode Inc. and Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package healthchecker + +type HealthCheckFailureLabel string + +const ( + HealthCheckClientFailure HealthCheckFailureLabel = "ClientFailure" + HealthCheckPingFailure HealthCheckFailureLabel = "PingFailure" + HealthCheckWriteFailure HealthCheckFailureLabel = "WriteFailure" + HealthCheckReadFailure HealthCheckFailureLabel = "ReadFailure" + HealthCheckPrimaryFailure HealthCheckFailureLabel = "PrimaryFailure" + HealthCheckSecondaryFailure HealthCheckFailureLabel = "SecondaryFailure" + HealthCheckSecondaryUnusualLocked HealthCheckFailureLabel = "SecondaryUnusualLocked" + HealthCheckSecondaryLockCheckingFailure HealthCheckFailureLabel = "SecondaryLockCheckingFailure" + HealthCheckKubernetesClientFailure HealthCheckFailureLabel = "KubernetesClientFailure" + + // replica + HealthCheckReplicaFailure HealthCheckFailureLabel = "ReplicaFailure" + + // MariaDB Constants + HealthCheckClusterFailure HealthCheckFailureLabel = "ClusterFailure" + + // Redis Constants + HealthCheckClusterSlotFailure HealthCheckFailureLabel = "ClusterSlotFailure" + HealthCheckNodesNotReadyFailure HealthCheckFailureLabel = "NodesNotReadyFailure" + + // Write Check Constants + KubeDBSystemDatabase = "kubedb_system" + KubeDBWriteCheckTable = "kubedb_write_check" +) diff --git a/vendor/kmodules.xyz/client-go/tools/healthchecker/health_card.go b/vendor/kmodules.xyz/client-go/tools/healthchecker/health_card.go new file mode 100644 index 000000000..4224f6864 --- /dev/null +++ b/vendor/kmodules.xyz/client-go/tools/healthchecker/health_card.go @@ -0,0 +1,78 @@ +/* +Copyright AppsCode Inc. and Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package healthchecker + +import "k8s.io/klog/v2" + +type HealthCard struct { + lastFailure HealthCheckFailureLabel + totalFailure int32 + threshold int32 + clientCount int32 + key string +} + +func newHealthCard(key string, threshold int32) *HealthCard { + return &HealthCard{ + threshold: threshold, + key: key, + } +} + +// SetThreshold sets the current failure threshold. +// Call this function on the start of each health check. +func (hcf *HealthCard) SetThreshold(threshold int32) { + hcf.threshold = threshold +} + +// HasFailed returns true or false based on the threshold. +// Update the health check condition if this function returns true. +func (hcf *HealthCard) HasFailed(label HealthCheckFailureLabel, err error) bool { + if hcf.lastFailure == label { + hcf.totalFailure++ + } else { + hcf.totalFailure = 1 + } + hcf.lastFailure = label + klog.V(5).InfoS("Health check failed for database", "Key", hcf.key, "FailureType", hcf.lastFailure, "Error", err.Error(), "TotalFailure", hcf.totalFailure) + return hcf.totalFailure >= hcf.threshold +} + +// Clear is used to reset the error counter. +// Call this method after each successful health check. +func (hcf *HealthCard) Clear() { + hcf.totalFailure = 0 + hcf.lastFailure = "" +} + +// ClientCreated is used to track the client which are created on the health check. +// Call this method after a client is successfully created in the health check. +func (hcf *HealthCard) ClientCreated() { + hcf.clientCount++ +} + +// ClientClosed is used to track the client which are closed on the health check. +// Call this method after a client is successfully closed in the health check. +func (hcf *HealthCard) ClientClosed() { + hcf.clientCount-- +} + +// GetClientCount is used to get the current open client count. +// This should always be 0. +func (hcf *HealthCard) GetClientCount() int32 { + return hcf.clientCount +} diff --git a/vendor/kmodules.xyz/client-go/tools/healthchecker/health_checker.go b/vendor/kmodules.xyz/client-go/tools/healthchecker/health_checker.go new file mode 100644 index 000000000..9e27a5934 --- /dev/null +++ b/vendor/kmodules.xyz/client-go/tools/healthchecker/health_checker.go @@ -0,0 +1,128 @@ +/* +Copyright AppsCode Inc. and Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package healthchecker + +import ( + "context" + "sync" + "time" + + kmapi "kmodules.xyz/client-go/api/v1" + + "k8s.io/klog/v2" +) + +type HealthChecker struct { + healthCheckerMap map[string]healthCheckerData + mux sync.Mutex +} + +type healthCheckerData struct { + cancel context.CancelFunc + ticker *time.Ticker + lastPeriodSeconds int32 +} + +func NewHealthChecker() *HealthChecker { + return &HealthChecker{ + healthCheckerMap: make(map[string]healthCheckerData), + mux: sync.Mutex{}, + } +} + +// Start creates a health check go routine. +// Call this method after successful creation of all the replicas of a database. +func (hc *HealthChecker) Start(key string, healthCheckSpec kmapi.HealthCheckSpec, fn func(string, *HealthCard)) { + if healthCheckSpec.PeriodSeconds == nil || healthCheckSpec.TimeoutSeconds == nil || healthCheckSpec.FailureThreshold == nil { + klog.Errorf("spec.healthCheck values are nil, can't start or modify health check.") + return + } + + if *healthCheckSpec.PeriodSeconds <= 0 { + klog.Errorf("spec.healthCheck.PeriodSeconds can't be less than 1, can't start or modify health check.") + return + } + + if !hc.keyExists(key) { + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + ticker := time.NewTicker(time.Duration(*healthCheckSpec.PeriodSeconds) * time.Second) + healthCheckStore := newHealthCard(key, *healthCheckSpec.FailureThreshold) + hc.set(key, healthCheckerData{ + cancel: cancel, + ticker: ticker, + lastPeriodSeconds: *healthCheckSpec.PeriodSeconds, + }) + go func() { + for { + select { + case <-ctx.Done(): + hc.delete(key) + cancel() + ticker.Stop() + klog.Infoln("Health check stopped for key " + key) + return + case <-ticker.C: + klog.V(5).Infoln("Health check running for key " + key) + fn(key, healthCheckStore) + klog.V(5).Infof("Debug client count = %d\n", healthCheckStore.GetClientCount()) + } + } + }() + } else { + data := hc.get(key) + if data.lastPeriodSeconds != *healthCheckSpec.PeriodSeconds { + data.ticker.Reset(time.Duration(*healthCheckSpec.PeriodSeconds) * time.Second) + data.lastPeriodSeconds = *healthCheckSpec.PeriodSeconds + hc.set(key, data) + } + } +} + +// Stop stops a health check go routine. +// Call this method when the database is deleted or halted. +func (hc *HealthChecker) Stop(key string) { + if hc.keyExists(key) { + hc.get(key).cancel() + hc.delete(key) + } +} + +func (hc *HealthChecker) keyExists(key string) bool { + hc.mux.Lock() + defer hc.mux.Unlock() + _, ok := hc.healthCheckerMap[key] + return ok +} + +func (hc *HealthChecker) get(key string) healthCheckerData { + hc.mux.Lock() + defer hc.mux.Unlock() + return hc.healthCheckerMap[key] +} + +func (hc *HealthChecker) set(key string, data healthCheckerData) { + hc.mux.Lock() + defer hc.mux.Unlock() + hc.healthCheckerMap[key] = data +} + +func (hc *HealthChecker) delete(key string) { + hc.mux.Lock() + defer hc.mux.Unlock() + delete(hc.healthCheckerMap, key) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index ace83c2f1..407692816 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -763,6 +763,9 @@ github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value +# github.com/google/go-querystring v1.1.0 +## explicit; go 1.10 +github.com/google/go-querystring/query # github.com/google/gofuzz v1.2.0 ## explicit; go 1.12 github.com/google/gofuzz @@ -770,12 +773,40 @@ github.com/google/gofuzz/bytesource # github.com/google/uuid v1.6.0 ## explicit github.com/google/uuid +# github.com/grafadruid/go-druid v0.0.6 +## explicit; go 1.14 +github.com/grafadruid/go-druid +github.com/grafadruid/go-druid/builder +github.com/grafadruid/go-druid/builder/aggregation +github.com/grafadruid/go-druid/builder/bound +github.com/grafadruid/go-druid/builder/datasource +github.com/grafadruid/go-druid/builder/dimension +github.com/grafadruid/go-druid/builder/extractionfn +github.com/grafadruid/go-druid/builder/filter +github.com/grafadruid/go-druid/builder/granularity +github.com/grafadruid/go-druid/builder/havingspec +github.com/grafadruid/go-druid/builder/intervals +github.com/grafadruid/go-druid/builder/limitspec +github.com/grafadruid/go-druid/builder/lookup +github.com/grafadruid/go-druid/builder/postaggregation +github.com/grafadruid/go-druid/builder/query +github.com/grafadruid/go-druid/builder/searchqueryspec +github.com/grafadruid/go-druid/builder/toinclude +github.com/grafadruid/go-druid/builder/topnmetric +github.com/grafadruid/go-druid/builder/types +github.com/grafadruid/go-druid/builder/virtualcolumn # github.com/hashicorp/errwrap v1.1.0 ## explicit github.com/hashicorp/errwrap +# github.com/hashicorp/go-cleanhttp v0.5.2 +## explicit; go 1.13 +github.com/hashicorp/go-cleanhttp # github.com/hashicorp/go-multierror v1.1.1 ## explicit; go 1.13 github.com/hashicorp/go-multierror +# github.com/hashicorp/go-retryablehttp v0.7.7 +## explicit; go 1.19 +github.com/hashicorp/go-retryablehttp # github.com/hashicorp/go-uuid v1.0.3 ## explicit github.com/hashicorp/go-uuid @@ -853,6 +884,10 @@ github.com/klauspost/cpuid/v2 github.com/lib/pq github.com/lib/pq/oid github.com/lib/pq/scram +# github.com/magefile/mage v1.11.0 +## explicit; go 1.12 +github.com/magefile/mage/mg +github.com/magefile/mage/sh # github.com/mailru/easyjson v0.7.7 ## explicit; go 1.12 github.com/mailru/easyjson/buffer @@ -1499,6 +1534,7 @@ kmodules.xyz/client-go/discovery kmodules.xyz/client-go/meta kmodules.xyz/client-go/policy/secomp kmodules.xyz/client-go/tools/certholder +kmodules.xyz/client-go/tools/healthchecker # kmodules.xyz/custom-resources v0.30.0 ## explicit; go 1.22.0 kmodules.xyz/custom-resources/apis/appcatalog