diff --git a/.config/nextest.toml b/.config/nextest.toml index e6342f6bdb4ba..b21c53f67f28d 100644 --- a/.config/nextest.toml +++ b/.config/nextest.toml @@ -13,6 +13,9 @@ failure-output = "immediate-final" # don't cancel the test run on the first failure fail-fast = false +# timeout tests after 2 minutes +slow-timeout = { period = "30s", terminate-after = 4 } + [profile.default.junit] # output test results at target/nextest/default/junit.xml path = "junit.xml" diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index c42b902aa2c0a..31043e805b35e 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,100 +1,100 @@ docs/ @vectordotdev/ux-team -lib/dnsmsg-parser/ @vectordotdev/integrations-team -lib/file-source/ @vectordotdev/integrations-team -lib/k8s-e2e-tests/ @vectordotdev/integrations-team -lib/k8s-test-framework/ @vectordotdev/integrations-team -lib/opentelemetry-proto/ @vectordotdev/integrations-team -lib/vector-common/ @vectordotdev/core-team -lib/vector-config/ @vectordotdev/core-team -lib/vector-config-common/ @vectordotdev/core-team -lib/vector-config-macros/ @vectordotdev/core-team -lib/vector-core/ @vectordotdev/core-team -lib/vector-vrl-functions/ @vectordotdev/processing-team -lib/vrl/ @vectordotdev/processing-team -src/config/ @vectordotdev/core-team -src/internal_telemetry/ @vectordotdev/core-team -src/sinks/ @vectordotdev/integrations-team -src/sinks/amqp/ @StephenWakely @vectordotdev/integrations-team -src/sinks/appsignal/ @neuronull @vectordotdev/integrations-team -src/sinks/aws_cloudwatch_logs/ @vectordotdev/integrations-team -src/sinks/aws_cloudwatch_metrics/ @vectordotdev/integrations-team -src/sinks/aws_kinesis/ @vectordotdev/integrations-team # sink_aws_kinesis_firehose,sink_aws_kinesis_stream -src/sinks/aws_s3/ @vectordotdev/integrations-team -src/sinks/aws_sqs/ @vectordotdev/integrations-team -src/sinks/axiom.rs @vectordotdev/integrations-team -src/sinks/azure_blob/ @dsmith3197 @vectordotdev/integrations-team -src/sinks/azure_monitor_logs.rs @dsmith3197 @vectordotdev/integrations-team -src/sinks/blackhole/ @dsmith3197 @vectordotdev/integrations-team -src/sinks/clickhouse/ @dsmith3197 @vectordotdev/integrations-team -src/sinks/console/ @dsmith3197 @vectordotdev/integrations-team -src/sinks/databend/ @vectordotdev/integrations-team -src/sinks/datadog_events/ @neuronull @vectordotdev/integrations-team -src/sinks/datadog_logs/ @neuronull @vectordotdev/integrations-team -src/sinks/datadog_metrics/ @neuronull @vectordotdev/integrations-team -src/sinks/datadog_traces/ @neuronull @vectordotdev/integrations-team -src/sinks/elasticsearch/ @vectordotdev/integrations-team -src/sinks/file/ @vectordotdev/integrations-team -src/sinks/gcp/ @StephenWakely @vectordotdev/integrations-team # sink_gcp_chronicle_unstructured,sink_gcp_cloud_storage,sink_gcp_pubsub,sink_gcp_stackdriver_logs,sink_gcp_stackdriver_metrics -src/sinks/honeycomb.rs @vectordotdev/integrations-team -src/sinks/http.rs @neuronull @vectordotdev/integrations-team -src/sinks/humio/ @StephenWakely @vectordotdev/integrations-team # sink_humio_logs,sink_humio_metrics -src/sinks/influxdb/ @dsmith3197 @vectordotdev/integrations-team # sink_influxdb_logs,sink_influxdb_metrics -src/sinks/kafka/ @dsmith3197 @vectordotdev/integrations-team -src/sinks/logdna.rs @neuronull @vectordotdev/integrations-team -src/sinks/loki/ @vectordotdev/integrations-team -src/sinks/nats.rs @StephenWakely @vectordotdev/integrations-team -src/sinks/new_relic/ @dsmith3197 @vectordotdev/integrations-team # sink_newrelix,sink_newrelic_logs -src/sinks/papertrail.rs @StephenWakely @vectordotdev/integrations-team -src/sinks/prometheus/ @StephenWakely @vectordotdev/integrations-team # sink_prometheus_exporter,sink_prometheus_remote_write -src/sinks/pulsar.rs @dsmith3197 @vectordotdev/integrations-team -src/sinks/redis.rs @StephenWakely @vectordotdev/integrations-team -src/sinks/sematext/ @vectordotdev/integrations-team # sink_sematext_logs,sink_sematext_metrics -src/sinks/socket.rs @neuronull @vectordotdev/integrations-team -src/sinks/splunk_hec/ @StephenWakely @vectordotdev/integrations-team # sink_splunk_hec_logs,sink_splunk_hec_metrics -src/sinks/statsd.rs @neuronull @vectordotdev/integrations-team -src/sinks/vector/ @neuronull @vectordotdev/integrations-team -src/sinks/websocket/ @neuronull @vectordotdev/integrations-team -src/source_sender/ @vectordotdev/core-team -src/sources/ @vectordotdev/integrations-team -src/sources/amqp.rs @StephenWakely @vectordotdev/integrations-team -src/sources/apache_metrics/ @dsmith3197 @vectordotdev/integrations-team -src/sources/aws_ecs_metrics/ @vectordotdev/integrations-team -src/sources/aws_kinesis_firehose/ @vectordotdev/integrations-team -src/sources/aws_s3/ @vectordotdev/integrations-team -src/sources/aws_sqs/ @vectordotdev/integrations-team -src/sources/datadog_agent/ @neuronull @vectordotdev/integrations-team -src/sources/demo_logs.rs @StephenWakely @vectordotdev/integrations-team -src/sources/dnstap/ @StephenWakely @vectordotdev/integrations-team -src/sources/docker_logs/ @vectordotdev/integrations-team -src/sources/eventstoredb_metrics/ @dsmith3197 @vectordotdev/integrations-team -src/sources/exec/ @dsmith3197 @vectordotdev/integrations-team -src/sources/file.rs @vectordotdev/integrations-team -src/sources/file_descriptors/ @dsmith3197 @vectordotdev/integrations-team # source_file_descriptor,source_stdin -src/sources/fluent/ @neuronull @vectordotdev/integrations-team -src/sources/gcp_pubsub.rs @StephenWakely @vectordotdev/integrations-team -src/sources/heroku_logs.rs @vectordotdev/integrations-team -src/sources/host_metrics/ @dsmith3197 @vectordotdev/integrations-team -src/sources/http_client/ @neuronull @vectordotdev/integrations-team -src/sources/http_server.rs @neuronull @vectordotdev/integrations-team -src/sources/internal_logs.rs @neuronull @vectordotdev/integrations-team -src/sources/internal_metrics.rs @neuronull @vectordotdev/integrations-team -src/sources/journald.rs @vectordotdev/integrations-team -src/sources/kafka.rs @dsmith3197 @vectordotdev/integrations-team -src/sources/kubernetes_logs/ @vectordotdev/integrations-team -src/sources/logstash.rs @neuronull @vectordotdev/integrations-team -src/sources/mongodb_metrics/ @dsmith3197 @vectordotdev/integrations-team -src/sources/nats.rs @StephenWakely @vectordotdev/integrations-team -src/sources/nginx_metrics/ @dsmith3197 @vectordotdev/integrations-team -src/sources/opentelemetry/ @vectordotdev/integrations-team -src/sources/postgresql_metrics.rs @dsmith3197 @vectordotdev/integrations-team -src/sources/prometheus/ @StephenWakely @vectordotdev/integrations-team # source_prometheus_remote_write,source_prometheus_scrape -src/sources/redis/ @StephenWakely @vectordotdev/integrations-team -src/sources/socket/ @neuronull @vectordotdev/integrations-team -src/sources/splunk_hec/ @StephenWakely @vectordotdev/integrations-team -src/sources/statsd/ @neuronull @vectordotdev/integrations-team -src/sources/syslog.rs @StephenWakely @vectordotdev/integrations-team -src/sources/vector/ @neuronull @vectordotdev/integrations-team -src/test_util/ @vectordotdev/core-team -src/topology/ @vectordotdev/core-team -src/transforms/ @vectordotdev/processing-team +lib/dnsmsg-parser/ @vectordotdev/vector +lib/file-source/ @vectordotdev/vector +lib/k8s-e2e-tests/ @vectordotdev/vector +lib/k8s-test-framework/ @vectordotdev/vector +lib/opentelemetry-proto/ @vectordotdev/vector +lib/vector-common/ @vectordotdev/vector +lib/vector-config/ @vectordotdev/vector +lib/vector-config-common/ @vectordotdev/vector +lib/vector-config-macros/ @vectordotdev/vector +lib/vector-core/ @vectordotdev/vector +lib/vector-vrl-functions/ @vectordotdev/vector +lib/vrl/ @vectordotdev/vector +src/config/ @vectordotdev/vector +src/internal_telemetry/ @vectordotdev/vector +src/sinks/ @vectordotdev/vector +src/sinks/amqp/ @vectordotdev/vector +src/sinks/appsignal/ @vectordotdev/vector +src/sinks/aws_cloudwatch_logs/ @vectordotdev/vector +src/sinks/aws_cloudwatch_metrics/ @vectordotdev/vector +src/sinks/aws_kinesis/ @vectordotdev/vector # sink_aws_kinesis_firehose,sink_aws_kinesis_stream +src/sinks/aws_s3/ @vectordotdev/vector +src/sinks/aws_sqs/ @vectordotdev/vector +src/sinks/axiom.rs @vectordotdev/vector +src/sinks/azure_blob/ @vectordotdev/vector +src/sinks/azure_monitor_logs.rs @vectordotdev/vector +src/sinks/blackhole/ @vectordotdev/vector +src/sinks/clickhouse/ @vectordotdev/vector +src/sinks/console/ @vectordotdev/vector +src/sinks/databend/ @vectordotdev/vector +src/sinks/datadog_events/ @vectordotdev/vector +src/sinks/datadog_logs/ @vectordotdev/vector +src/sinks/datadog_metrics/ @vectordotdev/vector +src/sinks/datadog_traces/ @vectordotdev/vector +src/sinks/elasticsearch/ @vectordotdev/vector +src/sinks/file/ @vectordotdev/vector +src/sinks/gcp/ @vectordotdev/vector # sink_gcp_chronicle_unstructured,sink_gcp_cloud_storage,sink_gcp_pubsub,sink_gcp_stackdriver_logs,sink_gcp_stackdriver_metrics +src/sinks/honeycomb.rs @vectordotdev/vector +src/sinks/http.rs @vectordotdev/vector +src/sinks/humio/ @vectordotdev/vector # sink_humio_logs,sink_humio_metrics +src/sinks/influxdb/ @vectordotdev/vector # sink_influxdb_logs,sink_influxdb_metrics +src/sinks/kafka/ @vectordotdev/vector +src/sinks/logdna.rs @vectordotdev/vector +src/sinks/loki/ @vectordotdev/vector +src/sinks/nats.rs @vectordotdev/vector +src/sinks/new_relic/ @vectordotdev/vector # sink_newrelix,sink_newrelic_logs +src/sinks/papertrail.rs @vectordotdev/vector +src/sinks/prometheus/ @vectordotdev/vector # sink_prometheus_exporter,sink_prometheus_remote_write +src/sinks/pulsar.rs @vectordotdev/vector +src/sinks/redis.rs @vectordotdev/vector +src/sinks/sematext/ @vectordotdev/vector # sink_sematext_logs,sink_sematext_metrics +src/sinks/socket.rs @vectordotdev/vector +src/sinks/splunk_hec/ @vectordotdev/vector # sink_splunk_hec_logs,sink_splunk_hec_metrics +src/sinks/statsd.rs @vectordotdev/vector +src/sinks/vector/ @vectordotdev/vector +src/sinks/websocket/ @vectordotdev/vector +src/source_sender/ @vectordotdev/vector +src/sources/ @vectordotdev/vector +src/sources/amqp.rs @vectordotdev/vector +src/sources/apache_metrics/ @vectordotdev/vector +src/sources/aws_ecs_metrics/ @vectordotdev/vector +src/sources/aws_kinesis_firehose/ @vectordotdev/vector +src/sources/aws_s3/ @vectordotdev/vector +src/sources/aws_sqs/ @vectordotdev/vector +src/sources/datadog_agent/ @vectordotdev/vector +src/sources/demo_logs.rs @vectordotdev/vector +src/sources/dnstap/ @vectordotdev/vector +src/sources/docker_logs/ @vectordotdev/vector +src/sources/eventstoredb_metrics/ @vectordotdev/vector +src/sources/exec/ @vectordotdev/vector +src/sources/file.rs @vectordotdev/vector +src/sources/file_descriptors/ @vectordotdev/vector # source_file_descriptor,source_stdin +src/sources/fluent/ @vectordotdev/vector +src/sources/gcp_pubsub.rs @vectordotdev/vector +src/sources/heroku_logs.rs @vectordotdev/vector +src/sources/host_metrics/ @vectordotdev/vector +src/sources/http_client/ @vectordotdev/vector +src/sources/http_server.rs @vectordotdev/vector +src/sources/internal_logs.rs @vectordotdev/vector +src/sources/internal_metrics.rs @vectordotdev/vector +src/sources/journald.rs @vectordotdev/vector +src/sources/kafka.rs @vectordotdev/vector +src/sources/kubernetes_logs/ @vectordotdev/vector +src/sources/logstash.rs @vectordotdev/vector +src/sources/mongodb_metrics/ @vectordotdev/vector +src/sources/nats.rs @vectordotdev/vector +src/sources/nginx_metrics/ @vectordotdev/vector +src/sources/opentelemetry/ @vectordotdev/vector +src/sources/postgresql_metrics.rs @vectordotdev/vector +src/sources/prometheus/ @vectordotdev/vector # source_prometheus_remote_write,source_prometheus_scrape +src/sources/redis/ @vectordotdev/vector +src/sources/socket/ @vectordotdev/vector +src/sources/splunk_hec/ @vectordotdev/vector +src/sources/statsd/ @vectordotdev/vector +src/sources/syslog.rs @vectordotdev/vector +src/sources/vector/ @vectordotdev/vector +src/test_util/ @vectordotdev/vector +src/topology/ @vectordotdev/vector +src/transforms/ @vectordotdev/vector website/ @vectordotdev/ux-team @vectordotdev/documentation diff --git a/.github/ISSUE_TEMPLATE/minor-release.md b/.github/ISSUE_TEMPLATE/minor-release.md index a9d49abfafdfb..8c82e25067c71 100644 --- a/.github/ISSUE_TEMPLATE/minor-release.md +++ b/.github/ISSUE_TEMPLATE/minor-release.md @@ -49,6 +49,6 @@ On the day of release: - [ ] Add docker images to [https://github.com/DataDog/images](https://github.com/DataDog/images/tree/master/vector) to have them available internally. - [ ] Cherry-pick any release commits from the release branch that are not on `master`, to `master` - [ ] Bump the release number in the `Cargo.toml` on master to the next major release -- [ ] Drop a note in the #websites Slack channel to request an update of the branch deployed - at https://vector.dev to the new release branch. +- [ ] Reset the `website` branch to the `HEAD` of the release branch to update https://vector.dev + - [ ] `git checkout website && git reset --hard origin/v0. && git push` - [ ] Kick-off post-mortems for any regressions resolved by the release diff --git a/.github/ISSUE_TEMPLATE/patch-release.md b/.github/ISSUE_TEMPLATE/patch-release.md index 903c7d5befbad..2beb8531dbd9a 100644 --- a/.github/ISSUE_TEMPLATE/patch-release.md +++ b/.github/ISSUE_TEMPLATE/patch-release.md @@ -13,9 +13,6 @@ Before the release: - If any merge conflicts occur, attempt to solve them and if needed enlist the aid of those familiar with the conflicting commits. - [ ] Run `cargo vdev build release-cue` to generate a new cue file for the release - [ ] Add `changelog` key to generated cue file - - [ ] `git log --no-merges --cherry-pick --right-only ...` - - [ ] Should be hand-written list of changes - ([example](https://github.com/vectordotdev/vector/blob/9fecdc8b5c45c613de2d01d4d2aee22be3a2e570/website/cue/reference/releases/0.19.0.cue#L44)) - [ ] Add description key to the generated cue file with a description of the release (see previous releases for examples). - [ ] Update version number in `distribution/install.sh` @@ -51,4 +48,6 @@ On the day of release: - [ ] Add docker images to [https://github.com/DataDog/images](https://github.com/DataDog/images/tree/master/vector) to have them available internally. - Follow the [instructions at the top of the mirror.yaml file](https://github.com/DataDog/images/blob/fbf12868e90d52e513ebca0389610dea8a3c7e1a/mirror.yaml#L33-L49). - [ ] Cherry-pick any release commits from the release branch that are not on `master`, to `master` +- [ ] Reset the `website` branch to the `HEAD` of the release branch to update https://vector.dev + - [ ] `git checkout website && git reset --hard origin/v0.. && git push` - [ ] Kick-off post-mortems for any regressions resolved by the release diff --git a/.github/actions/spelling/allow.txt b/.github/actions/spelling/allow.txt index f4cc7aa7344b3..cc66dca7762ee 100644 --- a/.github/actions/spelling/allow.txt +++ b/.github/actions/spelling/allow.txt @@ -23,6 +23,8 @@ Arival Arnova Asus Atlassian +atleastonce +atmostonce Attab Audiosonic avsc @@ -52,8 +54,12 @@ dkr Dockerfiles DOOV Douban +E2ETest +emqx +eventloop Enot Evercoss +exactlyonce Explay FAQs FDO @@ -125,6 +131,7 @@ Moto Mpman Multilaser Mumbai +musleabi Mytab NLB Nabi @@ -158,6 +165,7 @@ pront Proscan Qmobilevn RPZ +RRSIGs Rackspace Rathi Regza @@ -167,6 +175,7 @@ Rockchip Roku Roundcube Rowling +rumqttc SBT SKtelesys Salesforce @@ -246,6 +255,7 @@ buildname buildroot bytestream callsites +cdnskey cncf codepath codepaths @@ -254,6 +264,7 @@ compiletime coredns corejs coreutils +csync curta daemonset dalek @@ -275,8 +286,11 @@ dnsutils dockercmd downsides downwardapi +ede emoji esbuild +etld +fakeintake fargate fibonacci fileapi @@ -294,6 +308,7 @@ gcr gcs gdpr github +gnueabi gnueabihf gnupg gnuplot @@ -327,11 +342,13 @@ htmltest https humungus icecream +idn ifeq ifneq imobile influxd ionik +ipallowlist ipod ircd jemalloc @@ -419,13 +436,17 @@ security-tab servlet Sinjo sublocation +sundar +svcb snyk socketaddr solarwinds splunk ssh staticuser +statsd symbian +tanushri timeframe timeseries timespan diff --git a/.github/actions/spelling/excludes.txt b/.github/actions/spelling/excludes.txt index 422d90220da82..581f4c62f0cb4 100644 --- a/.github/actions/spelling/excludes.txt +++ b/.github/actions/spelling/excludes.txt @@ -5,10 +5,10 @@ (?:^|/)amplify\.yml$ (?:^|/)build_preview_sites\.yml$ (?:^|/)create_preview_sites\.yml$ -(?:^|/)preview_site_trigger\.yml$ (?:^|/)go\.sum$ (?:^|/)package(?:-lock|)\.json$ (?:^|/)Pipfile$ +(?:^|/)preview_site_trigger\.yml$ (?:^|/)pyproject.toml (?:^|/)requirements(?:-dev|-doc|-test|)\.txt$ (?:^|/)vendor/ @@ -82,6 +82,9 @@ ^\Qbenches/transform/route.rs\E$ ^\Qlib/codecs/tests/data/decoding/protobuf/test_protobuf.desc\E$ ^\Qlib/codecs/tests/data/decoding/protobuf/test_protobuf3.desc\E$ +^\Qlib/codecs/tests/data/protobuf/protos/test.desc\E$ +^\Qlib/codecs/tests/data/protobuf/protos/test_protobuf.desc\E$ +^\Qlib/codecs/tests/data/protobuf/protos/test_protobuf3.desc\E$ ^\Qlib/codecs/tests/data/protobuf/test.desc\E$ ^\Qlib/codecs/tests/data/protobuf/test_protobuf.desc\E$ ^\Qlib/codecs/tests/data/protobuf/test_protobuf3.desc\E$ diff --git a/.github/actions/spelling/expect.txt b/.github/actions/spelling/expect.txt index 0fbaf09256624..743eb16c52620 100644 --- a/.github/actions/spelling/expect.txt +++ b/.github/actions/spelling/expect.txt @@ -8,7 +8,6 @@ Acq AEAD agentpayload aimd -akx allowerased amka amping @@ -21,7 +20,6 @@ andy ansicpg anumber anycondition -anymap anypb apievent apipodspec @@ -33,7 +31,6 @@ aqf architecting archivable ARNOTAREALIDD -arshiyasolei asdf asdfasdf assertverify @@ -168,7 +165,6 @@ clibtype clickhouse clonable cloudresourcemanager -cloudsmith cloudwatchlogs cmark CMK @@ -289,7 +285,6 @@ Doop downcasted droptest dsl -dsmith dstat dstport dtype @@ -320,6 +315,7 @@ endler enduml eni enp +Ensar enumdecl enumflags ENVARS @@ -328,7 +324,6 @@ EOIG EOL'ed Err'ing errorf -Errorsfor esb esque etheus @@ -417,7 +412,6 @@ fsevent fslock FSTRM fsyncdata -fuchsnj fullhuman futs fuzzcheck @@ -449,6 +443,7 @@ gnux gny Godbolt gogoproto +gohai goldberg goldmark GPB @@ -565,6 +560,7 @@ jmxrmi jmxtrans Jolokia jsn +JSONAs jsonify jsonlines jstype @@ -656,7 +652,6 @@ maybeanothertest mbean mcache mcr -meh meln memfd memmap @@ -818,7 +813,6 @@ overaligned overalignment owo oyaml -pablosichert pageheap paq parallelizable @@ -951,6 +945,7 @@ samerole sameuser sandboxed sandboxing +Saraj sccache schemaless schemars @@ -1237,7 +1232,6 @@ wktpointer wmem woooooow woothee -wor wordlist workdir workstreams @@ -1250,7 +1244,6 @@ wtime wtimeouts wtr wurstmeister -wwang xact xcatsy Xcg diff --git a/.github/actions/spelling/patterns.txt b/.github/actions/spelling/patterns.txt index 2abd94fd26bc7..28109b552e719 100644 --- a/.github/actions/spelling/patterns.txt +++ b/.github/actions/spelling/patterns.txt @@ -224,3 +224,12 @@ user:P@ssw0rd # Ignore base64 encoded values in Prometheus Pushgateway URL paths /.+@base64/.+ + +# Ignore base64 encoded values in VRL examples (requires padding to avoid false positives) +"[A-Za-z0-9]*==" + +# Ignore punycode +\bxn--[-0-9a-z]+ + +# changelog.d fragment authors line +^authors: .*$ diff --git a/.github/dependabot.yml b/.github/dependabot.yml index a5511ea5af67f..5270e50781e38 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -60,7 +60,8 @@ updates: interval: "daily" time: "04:00" # UTC labels: - - "domain: deps" + - "domain: releasing" + - "no-changelog" commit-message: prefix: "chore(deps)" open-pull-requests-limit: 100 @@ -70,6 +71,7 @@ updates: interval: "daily" labels: - "domain: ci" + - "no-changelog" commit-message: prefix: "chore(ci)" groups: diff --git a/.github/semantic.yml b/.github/semantic.yml index 84340954ea7ba..f25e763d2ec6e 100644 --- a/.github/semantic.yml +++ b/.github/semantic.yml @@ -158,6 +158,7 @@ scopes: - postgresql_metrics source # Anything `postgresql_metrics` source related - prometheus_remote_write source # Anything `prometheus_remote_write` source related - prometheus_scrape source # Anything `prometheus_scrape` source related + - pulsar source # Anything `pulsar` source related - redis source # Anything `redis` source related - socket source # Anything `socket` source related - splunk_hec source # Anything `splunk_hec` source related diff --git a/.github/workflows/changelog.yaml b/.github/workflows/changelog.yaml index 17084fcd5b27d..8ad6077d265c3 100644 --- a/.github/workflows/changelog.yaml +++ b/.github/workflows/changelog.yaml @@ -23,7 +23,12 @@ jobs: env: PR_HAS_LABEL: ${{ contains( github.event.pull_request.labels.*.name, 'no-changelog') }} steps: + # checkout full depth because in the check_changelog_fragments script, we need to specify a + # merge base. If we only shallow clone the repo, git may not have enough history to determine + # the base. - uses: actions/checkout@v3 + with: + fetch-depth: 0 - name: Generate authentication token # don't run this step if the PR is from a fork or dependabot since the secrets won't exist diff --git a/.github/workflows/changes.yml b/.github/workflows/changes.yml index 42710935abc24..a497ad2dd16fd 100644 --- a/.github/workflows/changes.yml +++ b/.github/workflows/changes.yml @@ -19,6 +19,10 @@ on: required: false type: boolean default: false + e2e_tests: + required: false + type: boolean + default: false source: required: false type: boolean @@ -41,7 +45,7 @@ on: k8s: value: ${{ jobs.source.outputs.k8s }} all-int: - value: ${{ jobs.int_tests.outputs.all-int }} + value: ${{ jobs.int_tests.outputs.all-tests }} amqp: value: ${{ jobs.int_tests.outputs.amqp }} appsignal: @@ -92,6 +96,8 @@ on: value: ${{ jobs.int_tests.outputs.loki }} mongodb: value: ${{ jobs.int_tests.outputs.mongodb }} + mqtt: + value: ${{ jobs.int_tests.outputs.mqtt }} nats: value: ${{ jobs.int_tests.outputs.nats }} nginx: @@ -110,11 +116,19 @@ on: value: ${{ jobs.int_tests.outputs.splunk }} webhdfs: value: ${{ jobs.int_tests.outputs.webhdfs }} + # e2e tests + all-e2e: + value: ${{ jobs.e2e_tests.outputs.all-tests }} + e2e-datadog-logs: + value: ${{ jobs.e2e_tests.outputs.datadog-logs }} + e2e-datadog-metrics: + value: ${{ jobs.e2e_tests.outputs.datadog-metrics }} jobs: # Detects changes that are not specific to integration tests source: runs-on: ubuntu-20.04 + timeout-minutes: 5 if: ${{ inputs.source }} outputs: source: ${{ steps.filter.outputs.source }} @@ -182,9 +196,10 @@ jobs: # Detects changes that are specific to integration tests int_tests: runs-on: ubuntu-latest + timeout-minutes: 5 if: ${{ inputs.int_tests }} outputs: - all-int: ${{ steps.filter.outputs.all-int}} + all-tests: ${{ steps.filter.outputs.all-tests}} amqp: ${{ steps.filter.outputs.amqp }} appsignal: ${{ steps.filter.outputs.appsignal}} aws: ${{ steps.filter.outputs.aws }} @@ -210,6 +225,7 @@ jobs: logstash: ${{ steps.filter.outputs.logstash }} loki: ${{ steps.filter.outputs.loki }} mongodb: ${{ steps.filter.outputs.mongodb }} + mqtt: ${{ steps.filter.outputs.mqtt }} nats: ${{ steps.filter.outputs.nats }} nginx: ${{ steps.filter.outputs.nginx }} opentelemetry: ${{ steps.filter.outputs.opentelemetry }} @@ -234,3 +250,28 @@ jobs: base: ${{ inputs.base_ref }} ref: ${{ inputs.head_ref }} filters: int_test_filters.yaml + + # Detects changes that are specific to e2e tests + e2e_tests: + runs-on: ubuntu-latest + timeout-minutes: 5 + if: ${{ inputs.e2e_tests }} + outputs: + all-tests: ${{ steps.filter.outputs.all-tests}} + datadog-logs: ${{ steps.filter.outputs.datadog-logs }} + datadog-metrics: ${{ steps.filter.outputs.datadog-metrics }} + steps: + - uses: actions/checkout@v3 + + # creates a yaml file that contains the filters for each test, + # extracted from the output of the `vdev int ci-paths` command, which + # sources the paths from the scripts/integration/.../test.yaml files + - name: Create filter rules for e2e tests + run: cargo vdev e2e ci-paths > int_test_filters.yaml + + - uses: dorny/paths-filter@v3 + id: filter + with: + base: ${{ inputs.base_ref }} + ref: ${{ inputs.head_ref }} + filters: int_test_filters.yaml diff --git a/.github/workflows/cli.yml b/.github/workflows/cli.yml index 460bfdaa01cf9..0ac6fb3070e02 100644 --- a/.github/workflows/cli.yml +++ b/.github/workflows/cli.yml @@ -6,6 +6,7 @@ on: jobs: test-cli: runs-on: ubuntu-latest + timeout-minutes: 15 env: CARGO_INCREMENTAL: 0 steps: @@ -16,7 +17,7 @@ jobs: - name: (PR comment) Set latest commit status as pending if: ${{ github.event_name == 'issue_comment' }} - uses: myrotvorets/set-commit-status-action@v2.0.0 + uses: myrotvorets/set-commit-status-action@v2.0.1 with: sha: ${{ steps.comment-branch.outputs.head_sha }} token: ${{ secrets.GITHUB_TOKEN }} @@ -54,7 +55,7 @@ jobs: if: always() - name: (PR comment) Set latest commit status as ${{ job.status }} - uses: myrotvorets/set-commit-status-action@v2.0.0 + uses: myrotvorets/set-commit-status-action@v2.0.1 if: always() && github.event_name == 'issue_comment' with: sha: ${{ steps.comment-branch.outputs.head_sha }} diff --git a/.github/workflows/comment-trigger.yml b/.github/workflows/comment-trigger.yml index 1a6dba864219b..cde8982661374 100644 --- a/.github/workflows/comment-trigger.yml +++ b/.github/workflows/comment-trigger.yml @@ -48,6 +48,7 @@ jobs: validate: name: Validate comment runs-on: ubuntu-latest + timeout-minutes: 5 if: | github.event.issue.pull_request && ( contains(github.event.comment.body, '/ci-run-all') || contains(github.event.comment.body, '/ci-run-cli') diff --git a/.github/workflows/compilation-timings.yml b/.github/workflows/compilation-timings.yml index e96bea65ea946..5b26604109093 100644 --- a/.github/workflows/compilation-timings.yml +++ b/.github/workflows/compilation-timings.yml @@ -13,7 +13,7 @@ env: jobs: release-build-optimized: name: "Release Build (optimized)" - runs-on: [linux, ubuntu-20.04-8core] + runs-on: ubuntu-20.04-8core steps: - uses: colpal/actions-clean@v1 - uses: actions/checkout@v3 @@ -24,7 +24,7 @@ jobs: release-build-normal: name: "Release Build (normal)" - runs-on: [linux, ubuntu-20.04-8core] + runs-on: ubuntu-20.04-8core env: # We're not actually doing a debug build, we're just turning off the logic # in release-flags.sh so that we don't override the Cargo "release" profile @@ -40,7 +40,7 @@ jobs: debug-build: name: "Debug Build" - runs-on: [linux, ubuntu-20.04-8core] + runs-on: ubuntu-20.04-8core steps: - uses: colpal/actions-clean@v1 - uses: actions/checkout@v3 @@ -51,7 +51,7 @@ jobs: debug-rebuild: name: "Debug Rebuild" - runs-on: [linux, ubuntu-20.04-8core] + runs-on: ubuntu-20.04-8core steps: - uses: colpal/actions-clean@v1 - uses: actions/checkout@v3 @@ -64,7 +64,7 @@ jobs: check: name: "Cargo Check" - runs-on: [linux, ubuntu-20.04-8core] + runs-on: ubuntu-20.04-8core steps: - uses: colpal/actions-clean@v1 - uses: actions/checkout@v3 diff --git a/.github/workflows/component_features.yml b/.github/workflows/component_features.yml index c441d4bf374d6..41ff4cff96025 100644 --- a/.github/workflows/component_features.yml +++ b/.github/workflows/component_features.yml @@ -29,7 +29,7 @@ jobs: - name: (PR comment) Set latest commit status as pending if: github.event_name == 'issue_comment' - uses: myrotvorets/set-commit-status-action@v2.0.0 + uses: myrotvorets/set-commit-status-action@v2.0.1 with: sha: ${{ steps.comment-branch.outputs.head_sha }} token: ${{ secrets.GITHUB_TOKEN }} @@ -53,7 +53,7 @@ jobs: - name: (PR comment) Set latest commit status as ${{ job.status }} if: always() && github.event_name == 'issue_comment' - uses: myrotvorets/set-commit-status-action@v2.0.0 + uses: myrotvorets/set-commit-status-action@v2.0.1 with: sha: ${{ steps.comment-branch.outputs.head_sha }} token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/create_preview_sites.yml b/.github/workflows/create_preview_sites.yml index bb6b7aa8e0982..ecb2023c425a1 100644 --- a/.github/workflows/create_preview_sites.yml +++ b/.github/workflows/create_preview_sites.yml @@ -25,6 +25,7 @@ on: jobs: create_preview_site: runs-on: ubuntu-latest + timeout-minutes: 5 steps: # Get the artifacts with the PR number and branch name diff --git a/.github/workflows/cross.yml b/.github/workflows/cross.yml index cc139a6efad55..ca1d49a02f8fa 100644 --- a/.github/workflows/cross.yml +++ b/.github/workflows/cross.yml @@ -7,6 +7,7 @@ jobs: cross-linux: name: Cross - ${{ matrix.target }} runs-on: ubuntu-latest + timeout-minutes: 45 env: CARGO_INCREMENTAL: 0 strategy: @@ -18,6 +19,8 @@ jobs: - aarch64-unknown-linux-musl - armv7-unknown-linux-gnueabihf - armv7-unknown-linux-musleabihf + - arm-unknown-linux-gnueabi + - arm-unknown-linux-musleabi steps: - name: (PR comment) Get PR branch @@ -27,7 +30,7 @@ jobs: - name: (PR comment) Set latest commit status as pending if: ${{ github.event_name == 'issue_comment' }} - uses: myrotvorets/set-commit-status-action@v2.0.0 + uses: myrotvorets/set-commit-status-action@v2.0.1 with: sha: ${{ steps.comment-branch.outputs.head_sha }} token: ${{ secrets.GITHUB_TOKEN }} @@ -68,7 +71,7 @@ jobs: path: "./target/${{ matrix.target }}/debug/vector" - name: (PR comment) Set latest commit status as failed - uses: myrotvorets/set-commit-status-action@v2.0.0 + uses: myrotvorets/set-commit-status-action@v2.0.1 if: failure() && github.event_name == 'issue_comment' with: sha: ${{ steps.comment-branch.outputs.head_sha }} @@ -79,6 +82,7 @@ jobs: update-pr-status: name: (PR comment) Signal result to PR runs-on: ubuntu-20.04 + timeout-minutes: 5 needs: cross-linux if: needs.cross-linux.result == 'success' && github.event_name == 'issue_comment' steps: @@ -87,7 +91,7 @@ jobs: id: comment-branch - name: (PR comment) Submit PR result as success - uses: myrotvorets/set-commit-status-action@v2.0.0 + uses: myrotvorets/set-commit-status-action@v2.0.1 with: sha: ${{ steps.comment-branch.outputs.head_sha }} token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/deny.yml b/.github/workflows/deny.yml index e3e7036bb8668..d3cbc15304e20 100644 --- a/.github/workflows/deny.yml +++ b/.github/workflows/deny.yml @@ -19,6 +19,7 @@ on: jobs: test-deny: runs-on: ubuntu-latest + timeout-minutes: 15 env: CARGO_INCREMENTAL: 0 steps: @@ -29,7 +30,7 @@ jobs: - name: (PR comment) Set latest commit status as pending if: ${{ github.event_name == 'issue_comment' }} - uses: myrotvorets/set-commit-status-action@v2.0.0 + uses: myrotvorets/set-commit-status-action@v2.0.1 with: sha: ${{ steps.comment-branch.outputs.head_sha }} token: ${{ secrets.GITHUB_TOKEN }} @@ -65,7 +66,7 @@ jobs: run: make check-deny - name: (PR comment) Set latest commit status as ${{ job.status }} - uses: myrotvorets/set-commit-status-action@v2.0.0 + uses: myrotvorets/set-commit-status-action@v2.0.1 if: always() && github.event_name == 'issue_comment' with: sha: ${{ steps.comment-branch.outputs.head_sha }} diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml new file mode 100644 index 0000000000000..3a6524f156c1b --- /dev/null +++ b/.github/workflows/e2e.yml @@ -0,0 +1,116 @@ +# End to End Suite +# +# Runs on: +# - PRs if there are code changes to the source files that are noted in `vdev e2e ci_paths` +# - MQ (always pass) +# - Scheduled: at midnight UTC Tues-Sat + +name: E2E Test Suite + +on: + pull_request: + # Needs to pass by default in MQ + merge_group: + types: [checks_requested] + schedule: + # At midnight UTC Tue-Sat + - cron: '0 0 * * 2-6' + +concurrency: + # `github.event.number` exists for pull requests, otherwise fall back to SHA for merge queue + group: ${{ github.workflow }}-${{ github.event.number || github.event.merge_group.head_sha }} + cancel-in-progress: true + +env: + CONTAINER_TOOL: "docker" + DD_ENV: "ci" + DD_API_KEY: ${{ secrets.DD_API_KEY }} + TEST_DATADOG_API_KEY: ${{ secrets.CI_TEST_DATADOG_API_KEY }} + RUST_BACKTRACE: full + TEST_LOG: vector=debug + VERBOSE: true + CI: true + PROFILE: debug + # observing issues fetching boringssl via HTTPS in the OSX build, seeing if this helps + # can be removed when we switch back to the upstream openssl-sys crate + CARGO_NET_GIT_FETCH_WITH_CLI: true + +jobs: + + changes: + if: github.event_name == 'pull_request' + uses: ./.github/workflows/changes.yml + with: + base_ref: ${{ github.event.pull_request.base.ref }} + head_ref: ${{ github.event.pull_request.head.ref }} + source: false + e2e_tests: true + secrets: inherit + + e2e-tests: + name: E2E Tests + runs-on: ubuntu-20.04-8core + timeout-minutes: 45 + needs: changes + if: always() && ( + github.event_name == 'schedule' || ( + needs.changes.outputs.all-e2e == 'true' + || needs.changes.outputs.e2e-datadog-logs == 'true' + || needs.changes.outputs.e2e-datadog-metrics == 'true' + ) + ) + steps: + - uses: actions/checkout@v3 + with: + submodules: "recursive" + + - run: sudo npm -g install @datadog/datadog-ci + + - run: docker image prune -af ; docker container prune -f + + - name: Determine if secrets are defined (PR author is team member). + if: github.event_name == 'pull_request' + env: + GH_APP_DATADOG_VECTOR_CI_APP_ID: ${{ secrets.GH_APP_DATADOG_VECTOR_CI_APP_ID }} + run: | + if [[ "$GH_APP_DATADOG_VECTOR_CI_APP_ID" != "" ]] ; then + echo "PR_HAS_ACCESS_TO_SECRETS=true" >> "$GITHUB_ENV" + else + echo "PR_HAS_ACCESS_TO_SECRETS=false" >> "$GITHUB_ENV" + fi + + - if: (github.event_name == 'schedule' || needs.changes.outputs.all-e2e == 'true' || needs.changes.outputs.e2e-datadog-logs == 'true') && + (github.event_name != 'pull_request' || env.PR_HAS_ACCESS_TO_SECRETS == 'true') + name: e2e-datadog-logs + uses: nick-fields/retry@v3 + with: + timeout_minutes: 35 + max_attempts: 3 + command: bash scripts/ci-int-e2e-test.sh e2e datadog-logs + + - if: (github.event_name == 'schedule' || needs.changes.outputs.all-e2e == 'true' || needs.changes.outputs.e2e-datadog-metrics == 'true') && + (github.event_name != 'pull_request' || env.PR_HAS_ACCESS_TO_SECRETS == 'true') + name: e2e-datadog-metrics + uses: nick-fields/retry@v3 + with: + timeout_minutes: 35 + max_attempts: 3 + command: bash scripts/ci-int-e2e-test.sh e2e datadog-metrics + + + e2e-test-suite: + name: E2E Test Suite + runs-on: ubuntu-latest + timeout-minutes: 5 + if: always() + needs: e2e-tests + env: + FAILED: ${{ contains(needs.*.result, 'failure') }} + steps: + - run: | + echo "failed=${{ env.FAILED }}" + if [[ "$FAILED" == "true" ]] ; then + exit 1 + else + exit 0 + fi diff --git a/.github/workflows/environment.yml b/.github/workflows/environment.yml index c44057db7ca1e..0832855fba3e5 100644 --- a/.github/workflows/environment.yml +++ b/.github/workflows/environment.yml @@ -14,6 +14,7 @@ env: jobs: publish-new-environment: runs-on: ubuntu-20.04 + timeout-minutes: 15 steps: - name: (PR comment) Get PR branch if: ${{ github.event_name == 'issue_comment' }} @@ -22,7 +23,7 @@ jobs: - name: (PR comment) Set latest commit status as pending if: ${{ github.event_name == 'issue_comment' }} - uses: myrotvorets/set-commit-status-action@v2.0.0 + uses: myrotvorets/set-commit-status-action@v2.0.1 with: sha: ${{ steps.comment-branch.outputs.head_sha }} token: ${{ secrets.GITHUB_TOKEN }} @@ -42,7 +43,7 @@ jobs: - name: Set up QEMU uses: docker/setup-qemu-action@v3.0.0 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3.0.0 + uses: docker/setup-buildx-action@v3.3.0 - name: Login to DockerHub uses: docker/login-action@v3 if: github.ref == 'refs/heads/master' @@ -51,7 +52,7 @@ jobs: password: ${{ secrets.CI_DOCKER_PASSWORD }} - name: Extract metadata (tags, labels) for Docker id: meta - uses: docker/metadata-action@dbef88086f6cef02e264edb7dbf63250c17cef6c + uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81 with: images: timberio/vector-dev flavor: | @@ -63,7 +64,7 @@ jobs: org.opencontainers.image.title=Vector development environment org.opencontainers.image.url=https://github.com/vectordotdev/vector - name: Build and push - uses: docker/build-push-action@v5.1.0 + uses: docker/build-push-action@v5.3.0 with: context: . file: ./scripts/environment/Dockerfile @@ -72,7 +73,7 @@ jobs: labels: ${{ steps.meta.outputs.labels }} - name: (PR comment) Set latest commit status as ${{ job.status }} - uses: myrotvorets/set-commit-status-action@v2.0.0 + uses: myrotvorets/set-commit-status-action@v2.0.1 if: always() && github.event_name == 'issue_comment' with: sha: ${{ steps.comment-branch.outputs.head_sha }} diff --git a/.github/workflows/gardener_issue_comment.yml b/.github/workflows/gardener_issue_comment.yml index 73625863650c7..91afeff7b3c0c 100644 --- a/.github/workflows/gardener_issue_comment.yml +++ b/.github/workflows/gardener_issue_comment.yml @@ -14,6 +14,7 @@ jobs: move-to-backlog: name: Move issues back to Gardener project board Triage runs-on: ubuntu-latest + timeout-minutes: 5 if: ${{ !github.event.issue.pull_request }} steps: - name: Generate authentication token diff --git a/.github/workflows/gardener_open_issue.yml b/.github/workflows/gardener_open_issue.yml index 58e21be64f30d..3c10b64281d85 100644 --- a/.github/workflows/gardener_open_issue.yml +++ b/.github/workflows/gardener_open_issue.yml @@ -10,8 +10,9 @@ jobs: add-to-project: name: Add issue to Gardener project board runs-on: ubuntu-latest + timeout-minutes: 5 steps: - - uses: actions/add-to-project@v0.5.0 + - uses: actions/add-to-project@v1.0.1 with: project-url: https://github.com/orgs/vectordotdev/projects/49 github-token: ${{ secrets.GH_PROJECT_PAT }} diff --git a/.github/workflows/gardener_open_pr.yml b/.github/workflows/gardener_open_pr.yml index c047b196330d8..9791d32450b41 100644 --- a/.github/workflows/gardener_open_pr.yml +++ b/.github/workflows/gardener_open_pr.yml @@ -11,6 +11,7 @@ jobs: add-contributor-to-project: name: Add contributor PR to Gardener project board runs-on: ubuntu-latest + timeout-minutes: 5 if: ${{ github.actor != 'dependabot[bot]' }} steps: - name: Generate authentication token @@ -25,7 +26,7 @@ jobs: username: ${{ github.actor }} team: vector GITHUB_TOKEN: ${{ steps.generate_token.outputs.token }} - - uses: actions/add-to-project@v0.5.0 + - uses: actions/add-to-project@v1.0.1 if: ${{ steps.checkVectorMember.outputs.isTeamMember == 'false' }} with: project-url: https://github.com/orgs/vectordotdev/projects/49 @@ -33,9 +34,10 @@ jobs: add-dependabot-to-project: name: Add dependabot PR to Gardener project board runs-on: ubuntu-latest + timeout-minutes: 5 if: ${{ github.actor == 'dependabot[bot]' }} steps: - - uses: actions/add-to-project@v0.5.0 + - uses: actions/add-to-project@v1.0.1 with: project-url: https://github.com/orgs/vectordotdev/projects/49 github-token: ${{ secrets.GH_PROJECT_PAT }} diff --git a/.github/workflows/gardener_remove_waiting_author.yml b/.github/workflows/gardener_remove_waiting_author.yml index 9fe063e50b40d..0317d27454358 100644 --- a/.github/workflows/gardener_remove_waiting_author.yml +++ b/.github/workflows/gardener_remove_waiting_author.yml @@ -7,6 +7,7 @@ on: jobs: remove_label: runs-on: ubuntu-latest + timeout-minutes: 5 steps: - uses: actions/checkout@v3 - uses: actions-ecosystem/action-remove-labels@v1 diff --git a/.github/workflows/install-sh.yml b/.github/workflows/install-sh.yml index effc2c46bf95d..edc14cd22012b 100644 --- a/.github/workflows/install-sh.yml +++ b/.github/workflows/install-sh.yml @@ -8,6 +8,7 @@ jobs: sync-install: runs-on: ubuntu-20.04 + timeout-minutes: 10 steps: - name: (PR comment) Get PR branch if: ${{ github.event_name == 'issue_comment' }} @@ -16,7 +17,7 @@ jobs: - name: (PR comment) Set latest commit status as pending if: ${{ github.event_name == 'issue_comment' }} - uses: myrotvorets/set-commit-status-action@v2.0.0 + uses: myrotvorets/set-commit-status-action@v2.0.1 with: sha: ${{ steps.comment-branch.outputs.head_sha }} token: ${{ secrets.GITHUB_TOKEN }} @@ -40,7 +41,7 @@ jobs: run: make sync-install - name: (PR comment) Set latest commit status as failed - uses: myrotvorets/set-commit-status-action@v2.0.0 + uses: myrotvorets/set-commit-status-action@v2.0.1 if: failure() && github.event_name == 'issue_comment' with: sha: ${{ steps.comment-branch.outputs.head_sha }} @@ -51,6 +52,7 @@ jobs: test-install: needs: sync-install runs-on: ubuntu-20.04 + timeout-minutes: 5 steps: - run: sudo apt-get install --yes curl bc - run: curl --proto '=https' --tlsv1.2 -sSfL https://sh.vector.dev | bash -s -- -y @@ -63,7 +65,7 @@ jobs: - name: (PR comment) Set latest commit status as ${{ job.status }} if: github.event_name == 'issue_comment' - uses: myrotvorets/set-commit-status-action@v2.0.0 + uses: myrotvorets/set-commit-status-action@v2.0.1 with: sha: ${{ steps.comment-branch.outputs.head_sha }} token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/integration-comment.yml b/.github/workflows/integration-comment.yml index 2298ede1ae7c5..1c4281d77f01f 100644 --- a/.github/workflows/integration-comment.yml +++ b/.github/workflows/integration-comment.yml @@ -48,6 +48,7 @@ jobs: prep-pr: name: (PR comment) Signal pending to PR runs-on: ubuntu-latest + timeout-minutes: 5 if: contains(github.event.comment.body, '/ci-run-integration') || contains(github.event.comment.body, '/ci-run-all') steps: - name: Generate authentication token @@ -73,7 +74,7 @@ jobs: id: comment-branch - name: (PR comment) Set latest commit status as pending - uses: myrotvorets/set-commit-status-action@v2.0.0 + uses: myrotvorets/set-commit-status-action@v2.0.1 with: sha: ${{ steps.comment-branch.outputs.head_sha }} token: ${{ secrets.GITHUB_TOKEN }} @@ -81,7 +82,8 @@ jobs: integration-tests: needs: prep-pr - runs-on: [linux, ubuntu-20.04-4core] + runs-on: ubuntu-20.04-4core + timeout-minutes: 90 steps: - uses: actions/checkout@v3 with: @@ -95,122 +97,122 @@ jobs: if: ${{ contains(github.event.comment.body, '/ci-run-integration-amqp') || contains(github.event.comment.body, '/ci-run-integration-all') || contains(github.event.comment.body, '/ci-run-all') }} - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: # First one requires more time, as we need to build the image from scratch timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh amqp + command: bash scripts/ci-int-e2e-test.sh int amqp - name: appsignal if: ${{ contains(github.event.comment.body, '/ci-run-integration-appsignal') || contains(github.event.comment.body, '/ci-run-integration-all') || contains(github.event.comment.body, '/ci-run-all') }} - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh appsignal + command: bash scripts/ci-int-e2e-test.sh int appsignal - name: aws if: ${{ contains(github.event.comment.body, '/ci-run-integration-aws') || contains(github.event.comment.body, '/ci-run-integration-all') || contains(github.event.comment.body, '/ci-run-all') }} - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh aws + command: bash scripts/ci-int-e2e-test.sh int aws - name: axiom if: ${{ contains(github.event.comment.body, '/ci-run-integration-axiom') || contains(github.event.comment.body, '/ci-run-integration-all') || contains(github.event.comment.body, '/ci-run-all') }} - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh axiom + command: bash scripts/ci-int-e2e-test.sh int axiom - name: azure if: ${{ contains(github.event.comment.body, '/ci-run-integration-azure') || contains(github.event.comment.body, '/ci-run-integration-all') || contains(github.event.comment.body, '/ci-run-all') }} - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh azure + command: bash scripts/ci-int-e2e-test.sh int azure - name: clickhouse if: ${{ contains(github.event.comment.body, '/ci-run-integration-clickhouse') || contains(github.event.comment.body, '/ci-run-integration-all') || contains(github.event.comment.body, '/ci-run-all') }} - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh clickhouse + command: bash scripts/ci-int-e2e-test.sh int clickhouse - name: databend if: ${{ contains(github.event.comment.body, '/ci-run-integration-databend') || contains(github.event.comment.body, '/ci-run-integration-all') || contains(github.event.comment.body, '/ci-run-all') }} - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh databend + command: bash scripts/ci-int-e2e-test.sh int databend - name: datadog-agent if: ${{ contains(github.event.comment.body, '/ci-run-integration-datadog-agent') || contains(github.event.comment.body, '/ci-run-integration-all') || contains(github.event.comment.body, '/ci-run-all') }} - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh datadog-agent + command: bash scripts/ci-int-e2e-test.sh int datadog-agent - name: datadog-logs if: ${{ contains(github.event.comment.body, '/ci-run-integration-datadog-logs') || contains(github.event.comment.body, '/ci-run-integration-all') || contains(github.event.comment.body, '/ci-run-all') }} - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh datadog-logs + command: bash scripts/ci-int-e2e-test.sh int datadog-logs - name: datadog-metrics if: ${{ contains(github.event.comment.body, '/ci-run-integration-datadog-metrics') || contains(github.event.comment.body, '/ci-run-integration-all') || contains(github.event.comment.body, '/ci-run-all') }} - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh datadog-metrics + command: bash scripts/ci-int-e2e-test.sh int datadog-metrics - name: datadog-traces if: ${{ contains(github.event.comment.body, '/ci-run-integration-datadog-traces') || contains(github.event.comment.body, '/ci-run-integration-all') || contains(github.event.comment.body, '/ci-run-all') }} - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh datadog-traces + command: bash scripts/ci-int-e2e-test.sh int datadog-traces - name: dnstap if: ${{ contains(github.event.comment.body, '/ci-run-integration-dnstap') || contains(github.event.comment.body, '/ci-run-integration-all') || contains(github.event.comment.body, '/ci-run-all') }} - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh dnstap + command: bash scripts/ci-int-e2e-test.sh int dnstap - run: docker image prune -af --filter=label!=vector-test-runner=true ; docker container prune -f @@ -218,131 +220,141 @@ jobs: if: ${{ contains(github.event.comment.body, '/ci-run-integration-docker-logs') || contains(github.event.comment.body, '/ci-run-integration-all') || contains(github.event.comment.body, '/ci-run-all') }} - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh docker-logs + command: bash scripts/ci-int-e2e-test.sh int docker-logs - name: elasticsearch if: ${{ contains(github.event.comment.body, '/ci-run-integration-elasticsearch') || contains(github.event.comment.body, '/ci-run-integration-all') || contains(github.event.comment.body, '/ci-run-all') }} - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh elasticsearch + command: bash scripts/ci-int-e2e-test.sh int elasticsearch - name: eventstoredb if: ${{ contains(github.event.comment.body, '/ci-run-integration-eventstoredb') || contains(github.event.comment.body, '/ci-run-integration-all') || contains(github.event.comment.body, '/ci-run-all') }} - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh eventstoredb + command: bash scripts/ci-int-e2e-test.sh int eventstoredb - name: fluent if: ${{ contains(github.event.comment.body, '/ci-run-integration-fluent') || contains(github.event.comment.body, '/ci-run-integration-all') || contains(github.event.comment.body, '/ci-run-all') }} - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh fluent + command: bash scripts/ci-int-e2e-test.sh int fluent - name: gcp if: ${{ contains(github.event.comment.body, '/ci-run-integration-gcp') || contains(github.event.comment.body, '/ci-run-integration-all') || contains(github.event.comment.body, '/ci-run-all') }} - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh gcp + command: bash scripts/ci-int-e2e-test.sh int gcp - name: greptimedb if: ${{ contains(github.event.comment.body, '/ci-run-integration-greptimedb') || contains(github.event.comment.body, '/ci-run-integration-all') || contains(github.event.comment.body, '/ci-run-all') }} - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh greptimedb + command: bash scripts/ci-int-e2e-test.sh int greptimedb - name: humio if: ${{ contains(github.event.comment.body, '/ci-run-integration-humio') || contains(github.event.comment.body, '/ci-run-integration-all') || contains(github.event.comment.body, '/ci-run-all') }} - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh humio + command: bash scripts/ci-int-e2e-test.sh int humio - name: http-client if: ${{ contains(github.event.comment.body, '/ci-run-integration-http-client') || contains(github.event.comment.body, '/ci-run-integration-all') || contains(github.event.comment.body, '/ci-run-all') }} - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh http-client + command: bash scripts/ci-int-e2e-test.sh int http-client - name: influxdb if: ${{ contains(github.event.comment.body, '/ci-run-integration-influxdb') || contains(github.event.comment.body, '/ci-run-integration-all') || contains(github.event.comment.body, '/ci-run-all') }} - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh influxdb + command: bash scripts/ci-int-e2e-test.sh int influxdb - name: kafka if: ${{ contains(github.event.comment.body, '/ci-run-integration-kafka') || contains(github.event.comment.body, '/ci-run-integration-all') || contains(github.event.comment.body, '/ci-run-all') }} - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh kafka + command: bash scripts/ci-int-e2e-test.sh int kafka - name: logstash if: ${{ contains(github.event.comment.body, '/ci-run-integration-logstash') || contains(github.event.comment.body, '/ci-run-integration-all') || contains(github.event.comment.body, '/ci-run-all') }} - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh logstash + command: bash scripts/ci-int-e2e-test.sh int logstash - name: loki if: ${{ contains(github.event.comment.body, '/ci-run-integration-loki') || contains(github.event.comment.body, '/ci-run-integration-all') || contains(github.event.comment.body, '/ci-run-all') }} - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh loki + command: bash scripts/ci-int-e2e-test.sh int loki - name: mongodb if: ${{ contains(github.event.comment.body, '/ci-run-integration-mongodb') || contains(github.event.comment.body, '/ci-run-integration-all') || contains(github.event.comment.body, '/ci-run-all') }} - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh mongodb + command: bash scripts/ci-int-e2e-test.sh int mongodb + + - name: mqtt + if: ${{ contains(github.event.comment.body, '/ci-run-integration-mqtt') + || contains(github.event.comment.body, '/ci-run-integration-all') + || contains(github.event.comment.body, '/ci-run-all') }} + uses: nick-fields/retry@v3 + with: + timeout_minutes: 30 + max_attempts: 3 + command: bash scripts/ci-int-e2e-test.sh int mqtt - run: docker image prune -af --filter=label!=vector-test-runner=true ; docker container prune -f @@ -350,107 +362,144 @@ jobs: if: ${{ contains(github.event.comment.body, '/ci-run-integration-nats') || contains(github.event.comment.body, '/ci-run-integration-all') || contains(github.event.comment.body, '/ci-run-all') }} - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh nats + command: bash scripts/ci-int-e2e-test.sh int nats - name: nginx if: ${{ contains(github.event.comment.body, '/ci-run-integration-nginx') || contains(github.event.comment.body, '/ci-run-integration-all') || contains(github.event.comment.body, '/ci-run-all') }} - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh nginx + command: bash scripts/ci-int-e2e-test.sh int nginx - name: opentelemetry if: ${{ contains(github.event.comment.body, '/ci-run-integration-opentelemetry') || contains(github.event.comment.body, '/ci-run-integration-all') || contains(github.event.comment.body, '/ci-run-all') }} - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh opentelemetry + command: bash scripts/ci-int-e2e-test.sh int opentelemetry - name: postgres if: ${{ contains(github.event.comment.body, '/ci-run-integration-postgres') || contains(github.event.comment.body, '/ci-run-integration-all') || contains(github.event.comment.body, '/ci-run-all') }} - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh postgres + command: bash scripts/ci-int-e2e-test.sh int postgres - name: prometheus if: ${{ contains(github.event.comment.body, '/ci-run-integration-prometheus') || contains(github.event.comment.body, '/ci-run-integration-all') || contains(github.event.comment.body, '/ci-run-all') }} - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh prometheus + command: bash scripts/ci-int-e2e-test.sh int prometheus - name: pulsar if: ${{ contains(github.event.comment.body, '/ci-run-integration-pulsar') || contains(github.event.comment.body, '/ci-run-integration-all') || contains(github.event.comment.body, '/ci-run-all') }} - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh pulsar + command: bash scripts/ci-int-e2e-test.sh int pulsar - name: redis if: ${{ contains(github.event.comment.body, '/ci-run-integration-redis') || contains(github.event.comment.body, '/ci-run-integration-all') || contains(github.event.comment.body, '/ci-run-all') }} - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh redis + command: bash scripts/ci-int-e2e-test.sh int redis - name: shutdown if: ${{ contains(github.event.comment.body, '/ci-run-integration-shutdown') || contains(github.event.comment.body, '/ci-run-integration-all') || contains(github.event.comment.body, '/ci-run-all') }} - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh shutdown + command: bash scripts/ci-int-e2e-test.sh int shutdown - name: splunk if: ${{ contains(github.event.comment.body, '/ci-run-integration-splunk') || contains(github.event.comment.body, '/ci-run-integration-all') || contains(github.event.comment.body, '/ci-run-all') }} - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh splunk + command: bash scripts/ci-int-e2e-test.sh int splunk - name: webhdfs if: ${{ contains(github.event.comment.body, '/ci-run-integration-webhdfs') || contains(github.event.comment.body, '/ci-run-integration-all') || contains(github.event.comment.body, '/ci-run-all') }} - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh webhdfs + command: bash scripts/ci-int-e2e-test.sh int webhdfs + + e2e-tests: + needs: prep-pr + runs-on: ubuntu-20.04-8core + timeout-minutes: 30 + steps: + - uses: actions/checkout@v3 + with: + submodules: "recursive" + + - run: sudo npm -g install @datadog/datadog-ci + + - run: docker image prune -af ; docker container prune -f + - name: e2e-datadog-logs + if: ${{ contains(github.event.comment.body, '/ci-run-e2e-datadog-logs') + || contains(github.event.comment.body, '/ci-run-integration-all') + || contains(github.event.comment.body, '/ci-run-all') }} + uses: nick-fields/retry@v3 + with: + timeout_minutes: 35 + max_attempts: 3 + command: bash scripts/ci-int-e2e-test.sh e2e datadog-logs + + - name: datadog-e2e-metrics + if: ${{ contains(github.event.comment.body, '/ci-run-e2e-datadog-metrics') + || contains(github.event.comment.body, '/ci-run-integration-all') + || contains(github.event.comment.body, '/ci-run-all') }} + uses: nick-fields/retry@v3 + with: + timeout_minutes: 35 + max_attempts: 3 + command: bash scripts/ci-int-e2e-test.sh e2e datadog-metrics update-pr-status: name: Signal result to PR runs-on: ubuntu-latest - needs: integration-tests + timeout-minutes: 5 + needs: + - integration-tests + - e2e-tests if: always() && (contains(github.event.comment.body, '/ci-run-integration') || contains(github.event.comment.body, '/ci-run-all')) + env: + FAILED: ${{ contains(needs.*.result, 'failure') }} steps: - name: Generate authentication token id: generate_token @@ -458,6 +507,7 @@ jobs: with: app_id: ${{ secrets.GH_APP_DATADOG_VECTOR_CI_APP_ID }} private_key: ${{ secrets.GH_APP_DATADOG_VECTOR_CI_APP_PRIVATE_KEY }} + - name: Validate issue comment if: github.event_name == 'issue_comment' uses: tspascoal/get-user-teams-membership@v3 @@ -467,12 +517,22 @@ jobs: GITHUB_TOKEN: ${{ steps.generate_token.outputs.token }} - name: (PR comment) Get PR branch + if: github.event_name == 'issue_comment' && env.FAILED != 'true' uses: xt0rted/pull-request-comment-branch@v2 id: comment-branch - - name: (PR comment) Submit PR result as ${{ needs.integration-tests.result }} - uses: myrotvorets/set-commit-status-action@v2.0.0 + - name: (PR comment) Submit PR result as success + if: github.event_name == 'issue_comment' && env.FAILED != 'true' + uses: myrotvorets/set-commit-status-action@v2.0.1 with: sha: ${{ steps.comment-branch.outputs.head_sha }} token: ${{ secrets.GITHUB_TOKEN }} - status: ${{ needs.integration-tests.result }} + status: 'success' + + - run: | + echo "failed=${{ env.FAILED }}" + if [[ "$FAILED" == "true" ]] ; then + exit 1 + else + exit 0 + fi diff --git a/.github/workflows/integration-test.yml b/.github/workflows/integration-test.yml index ce0dbb195ab4b..2d5833ee2b837 100644 --- a/.github/workflows/integration-test.yml +++ b/.github/workflows/integration-test.yml @@ -30,7 +30,7 @@ env: jobs: test-integration: - runs-on: [linux, ubuntu-20.04-4core] + runs-on: ubuntu-20.04-4core timeout-minutes: 40 if: inputs.if || github.event_name == 'workflow_dispatch' steps: diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index 709f2091fe0b1..f499c4cc649b6 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -48,7 +48,7 @@ jobs: integration-tests: name: Integration Tests - runs-on: [linux, ubuntu-20.04-4core] + runs-on: ubuntu-20.04-4core needs: changes if: always() && ( github.event_name == 'merge_group' || ( @@ -112,298 +112,309 @@ jobs: - if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.all-int == 'true' || needs.changes.outputs.amqp == 'true' }} name: amqp - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh amqp + command: bash scripts/ci-int-e2e-test.sh int amqp - if: (github.event_name == 'merge_group' || needs.changes.outputs.all-int == 'true' || needs.changes.outputs.appsignal == 'true') && (github.event_name != 'pull_request' || env.PR_HAS_ACCESS_TO_SECRETS == 'true') name: appsignal - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh appsignal + command: bash scripts/ci-int-e2e-test.sh int appsignal - if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.all-int == 'true' || needs.changes.outputs.aws == 'true' }} name: aws - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh aws + command: bash scripts/ci-int-e2e-test.sh int aws - if: (github.event_name == 'merge_group' || needs.changes.outputs.all-int == 'true' || needs.changes.outputs.axiom == 'true') && (github.event_name != 'pull_request' || env.PR_HAS_ACCESS_TO_SECRETS == 'true') name: axiom - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh axiom + command: bash scripts/ci-int-e2e-test.sh int axiom - if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.all-int == 'true' || needs.changes.outputs.azure == 'true' }} name: azure - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh azure + command: bash scripts/ci-int-e2e-test.sh int azure - if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.all-int == 'true' || needs.changes.outputs.clickhouse == 'true' }} name: clickhouse - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh clickhouse + command: bash scripts/ci-int-e2e-test.sh int clickhouse - if: (github.event_name == 'merge_group' || needs.changes.outputs.all-int == 'true' || needs.changes.outputs.databend == 'true') && (github.event_name != 'pull_request' || env.PR_HAS_ACCESS_TO_SECRETS == 'true') name: databend - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh databend + command: bash scripts/ci-int-e2e-test.sh int databend - if: (github.event_name == 'merge_group' || needs.changes.outputs.all-int == 'true' || needs.changes.outputs.datadog-agent == 'true') && (github.event_name != 'pull_request' || env.PR_HAS_ACCESS_TO_SECRETS == 'true') name: datadog-agent - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh datadog-agent + command: bash scripts/ci-int-e2e-test.sh int datadog-agent - if: (github.event_name == 'merge_group' || needs.changes.outputs.all-int == 'true' || needs.changes.outputs.datadog-logs == 'true') && (github.event_name != 'pull_request' || env.PR_HAS_ACCESS_TO_SECRETS == 'true') name: datadog-logs - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh datadog-logs + command: bash scripts/ci-int-e2e-test.sh int datadog-logs + + - run: docker image prune -af --filter=label!=vector-test-runner=true ; docker container prune -f - if: (github.event_name == 'merge_group' || needs.changes.outputs.all-int == 'true' || needs.changes.outputs.datadog-metrics == 'true') && (github.event_name != 'pull_request' || env.PR_HAS_ACCESS_TO_SECRETS == 'true') name: datadog-metrics - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh datadog-metrics + command: bash scripts/ci-int-e2e-test.sh int datadog-metrics - if: (github.event_name == 'merge_group' || needs.changes.outputs.all-int == 'true' || needs.changes.outputs.datadog-traces == 'true') && (github.event_name != 'pull_request' || env.PR_HAS_ACCESS_TO_SECRETS == 'true') name: datadog-traces - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh datadog-traces + command: bash scripts/ci-int-e2e-test.sh int datadog-traces - if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.all-int == 'true' || needs.changes.outputs.dnstap == 'true' }} name: dnstap - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh dnstap - - - run: docker image prune -af --filter=label!=vector-test-runner=true ; docker container prune -f + command: bash scripts/ci-int-e2e-test.sh int dnstap - if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.all-int == 'true' || needs.changes.outputs.docker-logs == 'true' }} name: docker-logs - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh docker-logs + command: bash scripts/ci-int-e2e-test.sh int docker-logs - if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.all-int == 'true' || needs.changes.outputs.elasticsearch == 'true' }} name: elasticsearch - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh elasticsearch + command: bash scripts/ci-int-e2e-test.sh int elasticsearch - if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.all-int == 'true' || needs.changes.outputs.eventstoredb == 'true' }} name: eventstoredb - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh eventstoredb + command: bash scripts/ci-int-e2e-test.sh int eventstoredb - if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.all-int == 'true' || needs.changes.outputs.fluent == 'true' }} name: fluent - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh fluent + command: bash scripts/ci-int-e2e-test.sh int fluent - if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.all-int == 'true' || needs.changes.outputs.gcp == 'true' }} name: gcp - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh gcp + command: bash scripts/ci-int-e2e-test.sh int gcp - if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.all-int == 'true' || needs.changes.outputs.greptimedb == 'true' }} name: greptimedb - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh greptimedb + command: bash scripts/ci-int-e2e-test.sh int greptimedb + + - run: docker image prune -af --filter=label!=vector-test-runner=true ; docker container prune -f - if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.all-int == 'true' || needs.changes.outputs.humio == 'true' }} name: humio - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh humio + command: bash scripts/ci-int-e2e-test.sh int humio - if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.all-int == 'true' || needs.changes.outputs.http-client == 'true' }} name: http-client - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh http-client + command: bash scripts/ci-int-e2e-test.sh int http-client - if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.all-int == 'true' || needs.changes.outputs.influxdb == 'true' }} name: influxdb - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh influxdb + command: bash scripts/ci-int-e2e-test.sh int influxdb - if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.all-int == 'true' || needs.changes.outputs.kafka == 'true' }} name: kafka - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh kafka + command: bash scripts/ci-int-e2e-test.sh int kafka - if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.all-int == 'true' || needs.changes.outputs.logstash == 'true' }} name: logstash - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh logstash + command: bash scripts/ci-int-e2e-test.sh int logstash - if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.all-int == 'true' || needs.changes.outputs.loki == 'true' }} name: loki - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh loki + command: bash scripts/ci-int-e2e-test.sh int loki - if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.all-int == 'true' || needs.changes.outputs.mongodb == 'true' }} name: mongodb - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh mongodb + command: bash scripts/ci-int-e2e-test.sh int mongodb - - run: docker image prune -af --filter=label!=vector-test-runner=true ; docker container prune -f + - if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.all-int == 'true' || needs.changes.outputs.mqtt == 'true' }} + name: mqtt + uses: nick-fields/retry@v3 + with: + timeout_minutes: 30 + max_attempts: 3 + command: bash scripts/ci-int-e2e-test.sh int mqtt - if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.all-int == 'true' || needs.changes.outputs.nats == 'true' }} name: nats - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh nats + command: bash scripts/ci-int-e2e-test.sh int nats - if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.all-int == 'true' || needs.changes.outputs.nginx == 'true' }} name: nginx - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh nginx + command: bash scripts/ci-int-e2e-test.sh int nginx + + - run: docker image prune -af --filter=label!=vector-test-runner=true ; docker container prune -f - if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.all-int == 'true' || needs.changes.outputs.opentelemetry == 'true' }} name: opentelemetry - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh opentelemetry + command: bash scripts/ci-int-e2e-test.sh int opentelemetry - if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.all-int == 'true' || needs.changes.outputs.postgres == 'true' }} name: postgres - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh postgres + command: bash scripts/ci-int-e2e-test.sh int postgres - if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.all-int == 'true' || needs.changes.outputs.prometheus == 'true' }} name: prometheus - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh prometheus + command: bash scripts/ci-int-e2e-test.sh int prometheus - if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.all-int == 'true' || needs.changes.outputs.pulsar == 'true' }} name: pulsar - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh pulsar + command: bash scripts/ci-int-e2e-test.sh int pulsar - if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.all-int == 'true' || needs.changes.outputs.redis == 'true' }} name: redis - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh redis + command: bash scripts/ci-int-e2e-test.sh int redis - if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.all-int == 'true' }} name: shutdown - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh shutdown + command: bash scripts/ci-int-e2e-test.sh int shutdown - if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.all-int == 'true' || needs.changes.outputs.splunk == 'true' }} name: splunk - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh splunk + command: bash scripts/ci-int-e2e-test.sh int splunk - if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.all-int == 'true' || needs.changes.outputs.webhdfs == 'true' }} name: webhdfs - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: timeout_minutes: 30 max_attempts: 3 - command: bash scripts/ci-integration-test.sh webhdfs + command: bash scripts/ci-int-e2e-test.sh int webhdfs integration-test-suite: name: Integration Test Suite runs-on: ubuntu-latest + timeout-minutes: 5 if: always() needs: - changes diff --git a/.github/workflows/k8s_e2e.yml b/.github/workflows/k8s_e2e.yml index d8d2d3ea55e06..f2a01f3d63bee 100644 --- a/.github/workflows/k8s_e2e.yml +++ b/.github/workflows/k8s_e2e.yml @@ -59,7 +59,8 @@ jobs: build-x86_64-unknown-linux-gnu: name: Build - x86_64-unknown-linux-gnu - runs-on: [linux, ubuntu-20.04-4core] + runs-on: ubuntu-20.04-4core + timeout-minutes: 45 needs: changes # Run this job even if `changes` job is skipped (non- pull request trigger) if: ${{ !failure() && !cancelled() && (github.event_name != 'pull_request' || needs.changes.outputs.k8s == 'true') }} @@ -76,7 +77,7 @@ jobs: - name: (PR comment) Set latest commit status as pending if: ${{ github.event_name == 'issue_comment' }} - uses: myrotvorets/set-commit-status-action@v2.0.0 + uses: myrotvorets/set-commit-status-action@v2.0.1 with: sha: ${{ steps.comment-branch.outputs.head_sha }} token: ${{ secrets.GITHUB_TOKEN }} @@ -110,7 +111,7 @@ jobs: path: target/artifacts/* - name: (PR comment) Set latest commit status as 'failure' - uses: myrotvorets/set-commit-status-action@v2.0.0 + uses: myrotvorets/set-commit-status-action@v2.0.1 if: failure() && github.event_name == 'issue_comment' with: sha: ${{ steps.comment-branch.outputs.head_sha }} @@ -126,6 +127,7 @@ jobs: compute-k8s-test-plan: name: Compute K8s test plan runs-on: ubuntu-latest + timeout-minutes: 5 needs: changes # Run this job even if `changes` job is skipped if: ${{ !failure() && !cancelled() && (github.event_name != 'pull_request' || needs.changes.outputs.k8s == 'true') }} @@ -179,7 +181,8 @@ jobs: test-e2e-kubernetes: name: K8s ${{ matrix.kubernetes_version.version }} / ${{ matrix.container_runtime }} (${{ matrix.kubernetes_version.role }}) - runs-on: [linux, ubuntu-20.04-4core] + runs-on: ubuntu-20.04-4core + timeout-minutes: 45 needs: - build-x86_64-unknown-linux-gnu - compute-k8s-test-plan @@ -223,7 +226,7 @@ jobs: CARGO_INCREMENTAL: 0 - name: (PR comment) Set latest commit status as failure - uses: myrotvorets/set-commit-status-action@v2.0.0 + uses: myrotvorets/set-commit-status-action@v2.0.1 if: failure() && github.event_name == 'issue_comment' with: sha: ${{ steps.comment-branch.outputs.head_sha }} @@ -233,6 +236,7 @@ jobs: final-result: name: K8s E2E Suite runs-on: ubuntu-latest + timeout-minutes: 5 needs: - changes - build-x86_64-unknown-linux-gnu @@ -249,7 +253,7 @@ jobs: - name: (PR comment) Submit PR result as success if: github.event_name == 'issue_comment' && env.FAILED != 'true' - uses: myrotvorets/set-commit-status-action@v2.0.0 + uses: myrotvorets/set-commit-status-action@v2.0.1 with: sha: ${{ steps.comment-branch.outputs.head_sha }} token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml index 013496b77b275..68baa1a388104 100644 --- a/.github/workflows/labeler.yml +++ b/.github/workflows/labeler.yml @@ -5,6 +5,7 @@ on: jobs: label: runs-on: ubuntu-20.04 + timeout-minutes: 5 permissions: contents: read pull-requests: write diff --git a/.github/workflows/master_merge_queue.yml b/.github/workflows/master_merge_queue.yml index 49dd0706b24b5..c255596c9aef9 100644 --- a/.github/workflows/master_merge_queue.yml +++ b/.github/workflows/master_merge_queue.yml @@ -105,6 +105,7 @@ jobs: # Always run this so that pull_request triggers are marked as success. if: always() runs-on: ubuntu-20.04 + timeout-minutes: 5 needs: - changes - test-cli diff --git a/.github/workflows/misc.yml b/.github/workflows/misc.yml index 014e8afd3ee09..e4ce62979fd0d 100644 --- a/.github/workflows/misc.yml +++ b/.github/workflows/misc.yml @@ -5,7 +5,8 @@ on: jobs: test-misc: - runs-on: [linux, ubuntu-20.04-4core] + runs-on: ubuntu-20.04-4core + timeout-minutes: 45 env: CARGO_INCREMENTAL: 0 steps: @@ -16,7 +17,7 @@ jobs: - name: (PR comment) Set latest commit status as pending if: ${{ github.event_name == 'issue_comment' }} - uses: myrotvorets/set-commit-status-action@v2.0.0 + uses: myrotvorets/set-commit-status-action@v2.0.1 with: sha: ${{ steps.comment-branch.outputs.head_sha }} token: ${{ secrets.GITHUB_TOKEN }} @@ -53,7 +54,7 @@ jobs: - run: make test-docs - name: (PR comment) Set latest commit status as ${{ job.status }} - uses: myrotvorets/set-commit-status-action@v2.0.0 + uses: myrotvorets/set-commit-status-action@v2.0.1 if: always() && github.event_name == 'issue_comment' with: sha: ${{ steps.comment-branch.outputs.head_sha }} diff --git a/.github/workflows/msrv.yml b/.github/workflows/msrv.yml index 9a82d1ddecbd0..b485698dd9ca4 100644 --- a/.github/workflows/msrv.yml +++ b/.github/workflows/msrv.yml @@ -14,6 +14,7 @@ env: jobs: check-msrv: runs-on: ubuntu-latest + timeout-minutes: 20 steps: - uses: actions/checkout@v3 - run: sudo -E bash scripts/environment/bootstrap-ubuntu-20.04.sh diff --git a/.github/workflows/preview_site_trigger.yml b/.github/workflows/preview_site_trigger.yml index 8002913a37902..f3835b4790407 100644 --- a/.github/workflows/preview_site_trigger.yml +++ b/.github/workflows/preview_site_trigger.yml @@ -7,6 +7,7 @@ on: jobs: approval_check: runs-on: ubuntu-latest + timeout-minutes: 5 if: ${{ ! contains(github.head_ref, 'dependabot*') && ! contains(github.head_ref, 'gh-readonly-queue*') }} steps: - name: Echo approval diff --git a/.github/workflows/protobuf.yml b/.github/workflows/protobuf.yml index 01add48f69ff6..6d768479aeada 100644 --- a/.github/workflows/protobuf.yml +++ b/.github/workflows/protobuf.yml @@ -16,12 +16,13 @@ concurrency: jobs: validate-protos: runs-on: ubuntu-latest + timeout-minutes: 5 steps: # Run `git checkout` - uses: actions/checkout@v3 # Install the `buf` CLI - - uses: bufbuild/buf-setup-action@v1.29.0 + - uses: bufbuild/buf-setup-action@v1.30.1 # Perform breaking change detection against the `master` branch - - uses: bufbuild/buf-breaking-action@v1.1.3 + - uses: bufbuild/buf-breaking-action@v1.1.4 with: against: "https://github.com/vectordotdev/vector.git#branch=master" diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 0c21848bb90b9..d07b7a223ba2c 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -29,6 +29,7 @@ jobs: generate-publish-metadata: name: Generate Publish-related Metadata runs-on: ubuntu-20.04 + timeout-minutes: 5 outputs: vector_version: ${{ steps.generate-publish-metadata.outputs.vector_version }} vector_build_desc: ${{ steps.generate-publish-metadata.outputs.vector_build_desc }} @@ -44,7 +45,8 @@ jobs: build-x86_64-unknown-linux-musl-packages: name: Build Vector for x86_64-unknown-linux-musl (.tar.gz) - runs-on: [linux, release-builder] + runs-on: release-builder-linux + timeout-minutes: 60 needs: generate-publish-metadata env: VECTOR_VERSION: ${{ needs.generate-publish-metadata.outputs.vector_version }} @@ -69,8 +71,9 @@ jobs: build-x86_64-unknown-linux-gnu-packages: name: Build Vector for x86_64-unknown-linux-gnu (.tar.gz, DEB, RPM) - runs-on: [linux, release-builder] + runs-on: release-builder-linux needs: generate-publish-metadata + timeout-minutes: 60 env: VECTOR_VERSION: ${{ needs.generate-publish-metadata.outputs.vector_version }} VECTOR_BUILD_DESC: ${{ needs.generate-publish-metadata.outputs.vector_build_desc }} @@ -94,7 +97,8 @@ jobs: build-aarch64-unknown-linux-musl-packages: name: Build Vector for aarch64-unknown-linux-musl (.tar.gz) - runs-on: [linux, release-builder] + runs-on: release-builder-linux + timeout-minutes: 60 needs: generate-publish-metadata env: VECTOR_VERSION: ${{ needs.generate-publish-metadata.outputs.vector_version }} @@ -121,7 +125,8 @@ jobs: build-aarch64-unknown-linux-gnu-packages: name: Build Vector for aarch64-unknown-linux-gnu (.tar.gz) - runs-on: [linux, release-builder] + runs-on: release-builder-linux + timeout-minutes: 60 needs: generate-publish-metadata env: VECTOR_VERSION: ${{ needs.generate-publish-metadata.outputs.vector_version }} @@ -148,7 +153,8 @@ jobs: build-armv7-unknown-linux-gnueabihf-packages: name: Build Vector for armv7-unknown-linux-gnueabihf (.tar.gz) - runs-on: [linux, release-builder] + runs-on: release-builder-linux + timeout-minutes: 60 needs: generate-publish-metadata env: VECTOR_VERSION: ${{ needs.generate-publish-metadata.outputs.vector_version }} @@ -175,7 +181,8 @@ jobs: build-armv7-unknown-linux-musleabihf-packages: name: Build Vector for armv7-unknown-linux-musleabihf (.tar.gz) - runs-on: [linux, release-builder] + runs-on: release-builder-linux + timeout-minutes: 60 needs: generate-publish-metadata env: VECTOR_VERSION: ${{ needs.generate-publish-metadata.outputs.vector_version }} @@ -200,9 +207,65 @@ jobs: name: vector-${{ env.VECTOR_VERSION }}-armv7-unknown-linux-musleabihf path: target/artifacts/vector* + build-arm-unknown-linux-gnueabi-packages: + name: Build Vector for arm-unknown-linux-gnueabi (.tar.gz) + runs-on: release-builder-linux + timeout-minutes: 60 + needs: generate-publish-metadata + env: + VECTOR_VERSION: ${{ needs.generate-publish-metadata.outputs.vector_version }} + VECTOR_BUILD_DESC: ${{ needs.generate-publish-metadata.outputs.vector_build_desc }} + CHANNEL: ${{ needs.generate-publish-metadata.outputs.vector_release_channel }} + steps: + - name: Checkout Vector + uses: actions/checkout@v3 + with: + ref: ${{ inputs.git_ref }} + - name: Bootstrap runner environment (Ubuntu-specific) + run: sudo -E bash scripts/environment/bootstrap-ubuntu-20.04.sh + - name: Bootstrap runner environment (generic) + run: bash scripts/environment/prepare.sh + - name: Build Vector + env: + DOCKER_PRIVILEGED: "true" + run: make package-arm-unknown-linux-gnueabi-all + - name: Stage package artifacts for publish + uses: actions/upload-artifact@v3 + with: + name: vector-${{ env.VECTOR_VERSION }}-arm-unknown-linux-gnueabi + path: target/artifacts/vector* + + build-arm-unknown-linux-musleabi-packages: + name: Build Vector for arm-unknown-linux-musleabi (.tar.gz) + runs-on: release-builder-linux + needs: generate-publish-metadata + env: + VECTOR_VERSION: ${{ needs.generate-publish-metadata.outputs.vector_version }} + VECTOR_BUILD_DESC: ${{ needs.generate-publish-metadata.outputs.vector_build_desc }} + CHANNEL: ${{ needs.generate-publish-metadata.outputs.vector_release_channel }} + steps: + - name: Checkout Vector + uses: actions/checkout@v3 + with: + ref: ${{ inputs.git_ref }} + - name: Bootstrap runner environment (Ubuntu-specific) + run: sudo -E bash scripts/environment/bootstrap-ubuntu-20.04.sh + - name: Bootstrap runner environment (generic) + run: bash scripts/environment/prepare.sh + - name: Build Vector + env: + DOCKER_PRIVILEGED: "true" + run: make package-arm-unknown-linux-musleabi + - name: Stage package artifacts for publish + uses: actions/upload-artifact@v3 + with: + name: vector-${{ env.VECTOR_VERSION }}-arm-unknown-linux-musleabi + path: target/artifacts/vector* + build-x86_64-apple-darwin-packages: name: Build Vector for x86_64-apple-darwin (.tar.gz) runs-on: macos-latest-xl + timeout-minutes: 90 needs: generate-publish-metadata env: VECTOR_VERSION: ${{ needs.generate-publish-metadata.outputs.vector_version }} @@ -230,7 +293,8 @@ jobs: build-x86_64-pc-windows-msvc-packages: name: Build Vector for x86_64-pc-windows-msvc (.zip) - runs-on: [windows, release-builder] + runs-on: release-builder-windows + timeout-minutes: 90 needs: generate-publish-metadata env: VECTOR_VERSION: ${{ needs.generate-publish-metadata.outputs.vector_version }} @@ -277,6 +341,7 @@ jobs: deb-verify: name: Verify DEB Packages runs-on: ubuntu-20.04 + timeout-minutes: 5 needs: - generate-publish-metadata - build-x86_64-unknown-linux-gnu-packages @@ -322,6 +387,7 @@ jobs: rpm-verify: name: Verify RPM Packages runs-on: ubuntu-20.04 + timeout-minutes: 5 needs: - generate-publish-metadata - build-x86_64-unknown-linux-gnu-packages @@ -371,6 +437,7 @@ jobs: macos-verify: name: Verify macOS Package runs-on: macos-12 + timeout-minutes: 5 needs: - generate-publish-metadata - build-x86_64-apple-darwin-packages @@ -393,6 +460,7 @@ jobs: publish-docker: name: Publish to Docker runs-on: ubuntu-20.04 + timeout-minutes: 15 needs: - generate-publish-metadata - build-aarch64-unknown-linux-gnu-packages @@ -401,6 +469,8 @@ jobs: - build-x86_64-unknown-linux-musl-packages - build-armv7-unknown-linux-musleabihf-packages - build-armv7-unknown-linux-gnueabihf-packages + - build-arm-unknown-linux-gnueabi-packages + - build-arm-unknown-linux-musleabi-packages - deb-verify env: VECTOR_VERSION: ${{ needs.generate-publish-metadata.outputs.vector_version }} @@ -422,7 +492,7 @@ jobs: platforms: all - name: Set up Docker Buildx id: buildx - uses: docker/setup-buildx-action@v3.0.0 + uses: docker/setup-buildx-action@v3.3.0 with: version: latest install: true @@ -456,15 +526,26 @@ jobs: with: name: vector-${{ env.VECTOR_VERSION }}-armv7-unknown-linux-musleabihf path: target/artifacts + - name: Download staged package artifacts (arm-unknown-linux-gnueabi) + uses: actions/download-artifact@v3 + with: + name: vector-${{ env.VECTOR_VERSION }}-arm-unknown-linux-gnueabi + path: target/artifacts + - name: Download staged package artifacts (arm-unknown-linux-musleabi) + uses: actions/download-artifact@v3 + with: + name: vector-${{ env.VECTOR_VERSION }}-arm-unknown-linux-musleabi + path: target/artifacts - name: Build and publish Docker images env: - PLATFORM: "linux/amd64,linux/arm64,linux/arm/v7" + PLATFORM: "linux/amd64,linux/arm64,linux/arm/v7,linux/arm/v6" run: | make release-docker publish-s3: name: Publish to S3 runs-on: ubuntu-20.04 + timeout-minutes: 10 needs: - generate-publish-metadata - build-x86_64-unknown-linux-gnu-packages @@ -475,6 +556,8 @@ jobs: - build-x86_64-pc-windows-msvc-packages - build-armv7-unknown-linux-musleabihf-packages - build-armv7-unknown-linux-gnueabihf-packages + - build-arm-unknown-linux-gnueabi-packages + - build-arm-unknown-linux-musleabi-packages - deb-verify - rpm-verify - macos-verify @@ -526,6 +609,16 @@ jobs: with: name: vector-${{ env.VECTOR_VERSION }}-armv7-unknown-linux-musleabihf path: target/artifacts + - name: Download staged package artifacts (arm-unknown-linux-gnueabi) + uses: actions/download-artifact@v3 + with: + name: vector-${{ env.VECTOR_VERSION }}-arm-unknown-linux-gnueabi + path: target/artifacts + - name: Download staged package artifacts (arm-unknown-linux-musleabi) + uses: actions/download-artifact@v3 + with: + name: vector-${{ env.VECTOR_VERSION }}-arm-unknown-linux-musleabi + path: target/artifacts - name: Publish artifacts to S3 env: AWS_ACCESS_KEY_ID: ${{ secrets.CI_AWS_ACCESS_KEY_ID }} @@ -537,6 +630,7 @@ jobs: # We only publish to GitHub for versioned releases, not nightlies. if: inputs.channel == 'release' runs-on: ubuntu-20.04 + timeout-minutes: 10 needs: - generate-publish-metadata - build-x86_64-unknown-linux-gnu-packages @@ -547,6 +641,8 @@ jobs: - build-x86_64-pc-windows-msvc-packages - build-armv7-unknown-linux-gnueabihf-packages - build-armv7-unknown-linux-musleabihf-packages + - build-arm-unknown-linux-gnueabi-packages + - build-arm-unknown-linux-musleabi-packages - deb-verify - rpm-verify - macos-verify @@ -603,6 +699,16 @@ jobs: with: name: vector-${{ env.VECTOR_VERSION }}-SHA256SUMS path: target/artifacts + - name: Download staged package artifacts (arm-unknown-linux-gnueabi) + uses: actions/download-artifact@v3 + with: + name: vector-${{ env.VECTOR_VERSION }}-arm-unknown-linux-gnueabi + path: target/artifacts + - name: Download staged package artifacts (arm-unknown-linux-musleabi) + uses: actions/download-artifact@v3 + with: + name: vector-${{ env.VECTOR_VERSION }}-arm-unknown-linux-musleabi + path: target/artifacts - name: Publish release to GitHub env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} @@ -613,6 +719,7 @@ jobs: # We only publish to Homebrew for versioned releases, not nightlies. if: inputs.channel == 'release' runs-on: ubuntu-20.04 + timeout-minutes: 10 needs: - generate-publish-metadata - publish-s3 @@ -631,6 +738,7 @@ jobs: generate-sha256sum: name: Generate SHA256 checksums runs-on: ubuntu-20.04 + timeout-minutes: 5 needs: - generate-publish-metadata - build-x86_64-unknown-linux-gnu-packages @@ -641,6 +749,8 @@ jobs: - build-x86_64-pc-windows-msvc-packages - build-armv7-unknown-linux-gnueabihf-packages - build-armv7-unknown-linux-musleabihf-packages + - build-arm-unknown-linux-gnueabi-packages + - build-arm-unknown-linux-musleabi-packages env: VECTOR_VERSION: ${{ needs.generate-publish-metadata.outputs.vector_version }} steps: @@ -688,6 +798,16 @@ jobs: with: name: vector-${{ env.VECTOR_VERSION }}-armv7-unknown-linux-musleabihf path: target/artifacts + - name: Download staged package artifacts (arm-unknown-linux-gnueabi) + uses: actions/download-artifact@v3 + with: + name: vector-${{ env.VECTOR_VERSION }}-arm-unknown-linux-gnueabi + path: target/artifacts + - name: Download staged package artifacts (arm-unknown-linux-musleabi) + uses: actions/download-artifact@v3 + with: + name: vector-${{ env.VECTOR_VERSION }}-arm-unknown-linux-musleabi + path: target/artifacts - name: Generate SHA256 checksums for artifacts run: make sha256sum - name: Stage checksum for publish diff --git a/.github/workflows/regression.yml b/.github/workflows/regression.yml index f663a6962990a..1cd29df371486 100644 --- a/.github/workflows/regression.yml +++ b/.github/workflows/regression.yml @@ -47,6 +47,7 @@ jobs: # Only run this workflow if files changed in areas that could possibly introduce a regression should-run: runs-on: ubuntu-latest + timeout-minutes: 5 if: github.event_name != 'pull_request' outputs: source_changed: ${{ steps.filter.outputs.SOURCE_CHANGED }} @@ -110,6 +111,7 @@ jobs: compute-metadata: name: Compute metadata runs-on: ubuntu-22.04 + timeout-minutes: 5 needs: should-run if: github.event_name != 'merge_group' || needs.should-run.outputs.source_changed == 'true' outputs: @@ -273,7 +275,7 @@ jobs: - name: (PR comment) Set latest commit status as pending if: ${{ github.event_name == 'issue_comment' }} - uses: myrotvorets/set-commit-status-action@v2.0.0 + uses: myrotvorets/set-commit-status-action@v2.0.1 with: sha: ${{ steps.pr-metadata-comment.outputs.COMPARISON_SHA }} token: ${{ secrets.GITHUB_TOKEN }} @@ -285,7 +287,8 @@ jobs: build-baseline: name: Build baseline Vector container - runs-on: [linux, ubuntu-20.04-4core] + runs-on: ubuntu-20.04-4core + timeout-minutes: 30 needs: - compute-metadata steps: @@ -300,10 +303,10 @@ jobs: - name: Set up Docker Buildx id: buildx - uses: docker/setup-buildx-action@v3.0.0 + uses: docker/setup-buildx-action@v3.3.0 - name: Build 'vector' target image - uses: docker/build-push-action@v5.1.0 + uses: docker/build-push-action@v5.3.0 with: context: baseline-vector/ cache-from: type=gha @@ -322,7 +325,8 @@ jobs: build-comparison: name: Build comparison Vector container - runs-on: [linux, ubuntu-20.04-4core] + runs-on: ubuntu-20.04-4core + timeout-minutes: 30 needs: - compute-metadata steps: @@ -337,10 +341,10 @@ jobs: - name: Set up Docker Buildx id: buildx - uses: docker/setup-buildx-action@v3.0.0 + uses: docker/setup-buildx-action@v3.3.0 - name: Build 'vector' target image - uses: docker/build-push-action@v5.1.0 + uses: docker/build-push-action@v5.3.0 with: context: comparison-vector/ cache-from: type=gha @@ -360,11 +364,12 @@ jobs: confirm-valid-credentials: name: Confirm AWS credentials are minimally valid runs-on: ubuntu-22.04 + timeout-minutes: 5 needs: - compute-metadata steps: - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v4.0.1 + uses: aws-actions/configure-aws-credentials@v4.0.2 with: aws-access-key-id: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_SECRET_ACCESS_KEY }} @@ -381,6 +386,7 @@ jobs: upload-baseline-image-to-ecr: name: Upload baseline images to ECR runs-on: ubuntu-22.04 + timeout-minutes: 5 needs: - compute-metadata - confirm-valid-credentials @@ -396,7 +402,7 @@ jobs: docker load --input baseline-image.tar - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v4.0.1 + uses: aws-actions/configure-aws-credentials@v4.0.2 with: aws-access-key-id: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_SECRET_ACCESS_KEY }} @@ -419,6 +425,7 @@ jobs: upload-comparison-image-to-ecr: name: Upload comparison images to ECR runs-on: ubuntu-22.04 + timeout-minutes: 5 needs: - compute-metadata - confirm-valid-credentials @@ -434,7 +441,7 @@ jobs: docker load --input comparison-image.tar - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v4.0.1 + uses: aws-actions/configure-aws-credentials@v4.0.2 with: aws-access-key-id: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_SECRET_ACCESS_KEY }} @@ -457,6 +464,7 @@ jobs: submit-job: name: Submit regression job runs-on: ubuntu-22.04 + timeout-minutes: 45 needs: - compute-metadata - upload-baseline-image-to-ecr @@ -480,7 +488,7 @@ jobs: ref: ${{ needs.compute-metadata.outputs.comparison-sha }} - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v4.0.1 + uses: aws-actions/configure-aws-credentials@v4.0.2 with: aws-access-key-id: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_SECRET_ACCESS_KEY }} @@ -593,6 +601,7 @@ jobs: detect-regression: name: Determine regression status runs-on: ubuntu-22.04 + timeout-minutes: 5 needs: - submit-job - compute-metadata @@ -600,7 +609,7 @@ jobs: - uses: actions/checkout@v3 - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v4.0.1 + uses: aws-actions/configure-aws-credentials@v4.0.2 with: aws-access-key-id: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_SECRET_ACCESS_KEY }} @@ -669,6 +678,7 @@ jobs: analyze-experiment: name: Download regression analysis & upload report runs-on: ubuntu-22.04 + timeout-minutes: 5 needs: - submit-job - compute-metadata @@ -691,7 +701,7 @@ jobs: ref: ${{ needs.compute-metadata.outputs.comparison-sha }} - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v4.0.1 + uses: aws-actions/configure-aws-credentials@v4.0.2 with: aws-access-key-id: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_SECRET_ACCESS_KEY }} @@ -782,6 +792,7 @@ jobs: regression-detection-suite: name: Regression Detection Suite runs-on: ubuntu-latest + timeout-minutes: 5 if: always() needs: - compute-metadata @@ -803,7 +814,7 @@ jobs: - name: (PR comment) Submit PR result as failed if: github.event_name == 'issue_comment' && env.FAILED == 'true' - uses: myrotvorets/set-commit-status-action@v2.0.0 + uses: myrotvorets/set-commit-status-action@v2.0.1 with: sha: ${{ steps.comment-branch.outputs.head_sha }} token: ${{ secrets.GITHUB_TOKEN }} @@ -812,7 +823,7 @@ jobs: - name: (PR comment) Submit PR result as success if: github.event_name == 'issue_comment' && env.FAILED != 'true' - uses: myrotvorets/set-commit-status-action@v2.0.0 + uses: myrotvorets/set-commit-status-action@v2.0.1 with: sha: ${{ steps.comment-branch.outputs.head_sha }} token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/spelling.yml b/.github/workflows/spelling.yml index 6bc9e822ba966..73de422266c44 100644 --- a/.github/workflows/spelling.yml +++ b/.github/workflows/spelling.yml @@ -78,6 +78,7 @@ jobs: outputs: followup: ${{ steps.spelling.outputs.followup }} runs-on: ubuntu-latest + timeout-minutes: 5 if: "contains(github.event_name, 'pull_request') || github.event_name == 'push'" concurrency: group: spelling-${{ github.event.pull_request.number || github.ref }} diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 6270fdbde79c9..5a53114a6b9a9 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -35,7 +35,8 @@ jobs: checks: name: Checks - runs-on: [linux, ubuntu-20.04-8core] + runs-on: ubuntu-20.04-8core + timeout-minutes: 60 needs: changes env: CARGO_INCREMENTAL: 0 @@ -80,6 +81,10 @@ jobs: env: CARGO_BUILD_JOBS: 5 + # Validates components for adherence to the Component Specification + - name: Check Component Spec + run: make test-component-validation + - name: Upload test results run: scripts/upload-test-results.sh if: always() @@ -118,10 +123,17 @@ jobs: if: needs.changes.outputs.source == 'true' || needs.changes.outputs.cue == 'true' run: cargo vdev test-vrl + - name: Build VRL Playground + if: needs.changes.outputs.source == 'true' || needs.changes.outputs.dependencies == 'true' + run: | + cd lib/vector-vrl/web-playground/ + wasm-pack build --target web --out-dir public/pkg + # This is a required status check, so it always needs to run if prior jobs failed, in order to mark the status correctly. all-checks: name: Test Suite runs-on: ubuntu-20.04 + timeout-minutes: 5 if: always() needs: [changes, checks] env: diff --git a/.github/workflows/unit_mac.yml b/.github/workflows/unit_mac.yml index 9d32e78441dac..0b4281b22cc7a 100644 --- a/.github/workflows/unit_mac.yml +++ b/.github/workflows/unit_mac.yml @@ -6,6 +6,7 @@ on: jobs: unit-mac: runs-on: macos-13 + timeout-minutes: 90 env: CARGO_INCREMENTAL: 0 steps: @@ -17,7 +18,7 @@ jobs: - name: (PR comment) Set latest commit status as pending if: ${{ github.event_name == 'issue_comment' }} - uses: myrotvorets/set-commit-status-action@v2.0.0 + uses: myrotvorets/set-commit-status-action@v2.0.1 with: sha: ${{ steps.comment-branch.outputs.head_sha }} token: ${{ secrets.GITHUB_TOKEN }} @@ -53,7 +54,7 @@ jobs: - run: make test-behavior - name: (PR comment) Set latest commit status as ${{ job.status }} - uses: myrotvorets/set-commit-status-action@v2.0.0 + uses: myrotvorets/set-commit-status-action@v2.0.1 if: always() && github.event_name == 'issue_comment' with: sha: ${{ steps.comment-branch.outputs.head_sha }} diff --git a/.github/workflows/unit_windows.yml b/.github/workflows/unit_windows.yml index 34cf634b467fc..46642beb1e273 100644 --- a/.github/workflows/unit_windows.yml +++ b/.github/workflows/unit_windows.yml @@ -6,7 +6,8 @@ on: jobs: test-windows: - runs-on: [windows, windows-2019-8core] + runs-on: windows-2019-8core + timeout-minutes: 60 steps: - name: (PR comment) Get PR branch if: ${{ github.event_name == 'issue_comment' }} @@ -15,7 +16,7 @@ jobs: - name: (PR comment) Set latest commit status as pending if: ${{ github.event_name == 'issue_comment' }} - uses: myrotvorets/set-commit-status-action@v2.0.0 + uses: myrotvorets/set-commit-status-action@v2.0.1 with: sha: ${{ steps.comment-branch.outputs.head_sha }} token: ${{ secrets.GITHUB_TOKEN }} @@ -36,7 +37,7 @@ jobs: - run: make test - name: (PR comment) Set latest commit status as ${{ job.status }} - uses: myrotvorets/set-commit-status-action@v2.0.0 + uses: myrotvorets/set-commit-status-action@v2.0.1 if: always() && github.event_name == 'issue_comment' with: sha: ${{ steps.comment-branch.outputs.head_sha }} diff --git a/.github/workflows/workload_checks.yml b/.github/workflows/workload_checks.yml index 1e66600a521ed..b64f810ce652a 100644 --- a/.github/workflows/workload_checks.yml +++ b/.github/workflows/workload_checks.yml @@ -30,6 +30,7 @@ jobs: compute-metadata: name: Compute metadata runs-on: ubuntu-latest + timeout-minutes: 5 outputs: replicas: ${{ steps.experimental-meta.outputs.REPLICAS }} warmup-seconds: ${{ steps.experimental-meta.outputs.WARMUP_SECONDS }} @@ -81,6 +82,7 @@ jobs: submit-job: name: Submit workload checks job runs-on: ubuntu-latest + timeout-minutes: 90 needs: - compute-metadata steps: @@ -100,7 +102,7 @@ jobs: - uses: actions/checkout@v3 - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v4.0.1 + uses: aws-actions/configure-aws-credentials@v4.0.2 with: aws-access-key-id: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_SECRET_ACCESS_KEY }} diff --git a/.gitignore b/.gitignore index 25a85d5f6b361..24b0593d51d6c 100644 --- a/.gitignore +++ b/.gitignore @@ -19,3 +19,6 @@ massif.* # tilt tilt_modules/ + +# Jetbrains +.idea/ diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1815eef06c44b..5e6ad4ef00406 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -102,7 +102,11 @@ outputs to reference the filter, and finally update the outputs of `workflow_cal ### Git Branches _All_ changes must be made in a branch and submitted as [pull requests](#github-pull-requests). -Vector does not adopt any type of branch naming style, but please use something + +If you want your branch to have a website preview build created, include the word `website` in the +branch. + +Otherwise, Vector does not adopt any type of branch naming style, but please use something descriptive of your changes. ### Git Commits diff --git a/Cargo.lock b/Cargo.lock index 454dfe2b3baa0..366be407afc28 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -66,7 +66,7 @@ version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a824f2aa7e75a0c98c5a504fceb80649e9c35265d44525b5f94de4771a395cd" dependencies = [ - "getrandom 0.2.12", + "getrandom 0.2.14", "once_cell", "version_check", ] @@ -78,7 +78,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91429305e9f0a25f6205c5b8e0d2db09e0708a7a6df0f42212bb56c32c8ac97a" dependencies = [ "cfg-if", - "getrandom 0.2.12", + "getrandom 0.2.14", "once_cell", "version_check", "zerocopy", @@ -227,9 +227,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.79" +version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "080e9890a082662b09c1ad45f567faeeb47f22b5fb23895fbe1e651e718e25ca" +checksum = "f538837af36e6f6a9be0faa67f9a314f8119e4e4b5867c6ab40ed60360142519" [[package]] name = "apache-avro" @@ -247,8 +247,8 @@ dependencies = [ "regex-lite", "serde", "serde_json", - "strum", - "strum_macros", + "strum 0.25.0", + "strum_macros 0.25.3", "thiserror", "typed-builder 0.16.2", "uuid", @@ -274,9 +274,9 @@ dependencies = [ [[package]] name = "arc-swap" -version = "1.6.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" +checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" [[package]] name = "arr_macro" @@ -296,7 +296,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c6368f9ae5c6ec403ca910327ae0c9437b0a85255b6950c90d497e6177f6e5e" dependencies = [ "proc-macro-hack", - "quote 1.0.35", + "quote 1.0.36", "syn 1.0.109", ] @@ -333,12 +333,12 @@ dependencies = [ [[package]] name = "assert_cmd" -version = "2.0.13" +version = "2.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00ad3f3a942eee60335ab4342358c161ee296829e0d16ff42fc1d6cb07815467" +checksum = "ed72493ac66d5804837f480ab3766c72bdfab91a65e565fc54fa9e42db0073a8" dependencies = [ "anstyle", - "bstr 1.9.0", + "bstr 1.9.1", "doc-comment", "predicates", "predicates-core", @@ -359,9 +359,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.6" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a116f46a969224200a0a97f29cfd4c50e7534e4b4826bd23ea2c3c533039c82c" +checksum = "07dbbf24db18d609b1462965249abdf49129ccad073ec257da372adc83259c60" dependencies = [ "flate2", "futures-core", @@ -426,22 +426,22 @@ dependencies = [ [[package]] name = "async-graphql" -version = "7.0.1" +version = "7.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b16926f97f683ff3b47b035cc79622f3d6a374730b07a5d9051e81e88b5f1904" +checksum = "261fa27d5bff5afdf7beff291b3bc73f99d1529804c70e51b0fbc51e70b1c6a9" dependencies = [ "async-graphql-derive", "async-graphql-parser", "async-graphql-value", "async-stream", "async-trait", - "base64 0.13.1", - "bytes 1.5.0", + "base64 0.21.7", + "bytes 1.6.0", "chrono", "fnv", "futures-util", "http 1.0.0", - "indexmap 2.1.0", + "indexmap 2.2.6", "mime", "multer", "num-traits", @@ -457,26 +457,26 @@ dependencies = [ [[package]] name = "async-graphql-derive" -version = "7.0.1" +version = "7.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6a7349168b79030e3172a620f4f0e0062268a954604e41475eff082380fe505" +checksum = "3188809947798ea6db736715a60cf645ba3b87ea031c710130e1476b48e45967" dependencies = [ "Inflector", "async-graphql-parser", - "darling 0.20.3", + "darling 0.20.8", "proc-macro-crate 1.3.1", - "proc-macro2 1.0.78", - "quote 1.0.35", - "strum", - "syn 2.0.48", + "proc-macro2 1.0.81", + "quote 1.0.36", + "strum 0.26.1", + "syn 2.0.60", "thiserror", ] [[package]] name = "async-graphql-parser" -version = "7.0.1" +version = "7.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58fdc0adf9f53c2b65bb0ff5170cba1912299f248d0e48266f444b6f005deb1d" +checksum = "d4e65a0b83027f35b2a5d9728a098bc66ac394caa8191d2c65ed9eb2985cf3d8" dependencies = [ "async-graphql-value", "pest", @@ -486,21 +486,21 @@ dependencies = [ [[package]] name = "async-graphql-value" -version = "7.0.1" +version = "7.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cf4d4e86208f4f9b81a503943c07e6e7f29ad3505e6c9ce6431fe64dc241681" +checksum = "68e40849c29a39012d38bff87bfed431f1ed6c53fbec493294c1045d61a7ae75" dependencies = [ - "bytes 1.5.0", - "indexmap 2.1.0", + "bytes 1.6.0", + "indexmap 2.2.6", "serde", "serde_json", ] [[package]] name = "async-graphql-warp" -version = "7.0.1" +version = "7.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d906b817c1499c0a814ea62b2a9cc03726e50d694d7e8cad3fcc1b24e8b62883" +checksum = "e901ea60bac5613a1c824da04c8e72906cf79efde5c56f657e3a4ac89624b0a5" dependencies = [ "async-graphql", "futures-util", @@ -542,7 +542,7 @@ dependencies = [ "futures-lite", "parking", "polling 3.3.0", - "rustix 0.38.28", + "rustix 0.38.31", "slab", "tracing 0.1.40", "waker-fn", @@ -576,7 +576,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dbc1f1a75fd07f0f517322d103211f12d757658e91676def9a2e688774656c60" dependencies = [ "base64 0.21.7", - "bytes 1.5.0", + "bytes 1.6.0", "futures 0.3.30", "http 0.2.9", "memchr", @@ -586,10 +586,10 @@ dependencies = [ "rand 0.8.5", "regex", "ring", - "rustls", - "rustls-native-certs", - "rustls-pemfile", - "rustls-webpki", + "rustls 0.21.11", + "rustls-native-certs 0.6.3", + "rustls-pemfile 1.0.3", + "rustls-webpki 0.101.7", "serde", "serde_json", "serde_nanos", @@ -598,7 +598,7 @@ dependencies = [ "time", "tokio", "tokio-retry", - "tokio-rustls", + "tokio-rustls 0.24.1", "tracing 0.1.40", "url", ] @@ -627,7 +627,7 @@ dependencies = [ "cfg-if", "event-listener 3.0.1", "futures-lite", - "rustix 0.38.28", + "rustix 0.38.31", "windows-sys 0.48.0", ] @@ -645,13 +645,13 @@ dependencies = [ [[package]] name = "async-recursion" -version = "1.0.5" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fd55a5ba1179988837d24ab4c7cc8ed6efdeff578ede0416b4225a5fca35bd0" +checksum = "30c5ef0ede93efbf733c1a727f3b6b5a1060bbedd5600183e66f6e4be4af0ec5" dependencies = [ - "proc-macro2 1.0.78", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] @@ -666,7 +666,7 @@ dependencies = [ "cfg-if", "futures-core", "futures-io", - "rustix 0.38.28", + "rustix 0.38.31", "signal-hook-registry", "slab", "windows-sys 0.48.0", @@ -689,9 +689,9 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ - "proc-macro2 1.0.78", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] @@ -702,15 +702,21 @@ checksum = "b4eb2cdb97421e01129ccb49169d8279ed21e829929144f4a22a6e54ac549ca1" [[package]] name = "async-trait" -version = "0.1.77" +version = "0.1.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" +checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" dependencies = [ - "proc-macro2 1.0.78", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] +[[package]] +name = "atomic" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c59bdb34bc650a32731b31bd8f0829cc15d24a708ee31559e0bb34f2bc320cba" + [[package]] name = "atomic-waker" version = "1.1.2" @@ -751,10 +757,10 @@ dependencies = [ "aws-smithy-runtime-api", "aws-smithy-types", "aws-types", - "bytes 1.5.0", + "bytes 1.6.0", "fastrand 2.0.1", "http 0.2.9", - "hyper", + "hyper 0.14.28", "time", "tokio", "tracing 0.1.40", @@ -762,9 +768,9 @@ dependencies = [ [[package]] name = "aws-credential-types" -version = "1.1.4" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33cc49dcdd31c8b6e79850a179af4c367669150c7ac0135f176c61bec81a70f7" +checksum = "e16838e6c9e12125face1c1eff1343c75e3ff540de98ff7ebd61874a89bcfeb9" dependencies = [ "aws-smithy-async", "aws-smithy-runtime-api", @@ -781,9 +787,9 @@ dependencies = [ "aws-smithy-runtime-api", "aws-smithy-types", "aws-types", - "bytes 1.5.0", + "bytes 1.6.0", "http 0.2.9", - "http-body", + "http-body 0.4.5", "pin-project-lite", "tracing 0.1.40", ] @@ -849,7 +855,7 @@ dependencies = [ "aws-smithy-runtime-api", "aws-smithy-types", "aws-types", - "bytes 1.5.0", + "bytes 1.6.0", "fastrand 2.0.1", "http 0.2.9", "regex", @@ -872,7 +878,7 @@ dependencies = [ "aws-smithy-runtime-api", "aws-smithy-types", "aws-types", - "bytes 1.5.0", + "bytes 1.6.0", "http 0.2.9", "regex", "tracing 0.1.40", @@ -894,7 +900,7 @@ dependencies = [ "aws-smithy-runtime-api", "aws-smithy-types", "aws-types", - "bytes 1.5.0", + "bytes 1.6.0", "http 0.2.9", "regex", "tracing 0.1.40", @@ -916,7 +922,7 @@ dependencies = [ "aws-smithy-runtime-api", "aws-smithy-types", "aws-types", - "bytes 1.5.0", + "bytes 1.6.0", "http 0.2.9", "regex", "tracing 0.1.40", @@ -924,9 +930,9 @@ dependencies = [ [[package]] name = "aws-sdk-s3" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "859a207781360445504b89e790aebf682d80883280aa0d9b6e2e67740a733147" +checksum = "9dcafc2fe52cc30b2d56685e2fa6a879ba50d79704594852112337a472ddbd24" dependencies = [ "aws-credential-types", "aws-http", @@ -942,9 +948,9 @@ dependencies = [ "aws-smithy-types", "aws-smithy-xml", "aws-types", - "bytes 1.5.0", + "bytes 1.6.0", "http 0.2.9", - "http-body", + "http-body 0.4.5", "once_cell", "percent-encoding", "regex", @@ -991,7 +997,7 @@ dependencies = [ "aws-smithy-runtime-api", "aws-smithy-types", "aws-types", - "bytes 1.5.0", + "bytes 1.6.0", "http 0.2.9", "regex", "tracing 0.1.40", @@ -999,9 +1005,9 @@ dependencies = [ [[package]] name = "aws-sdk-sts" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5700da387716ccfc30b27f44b008f457e1baca5b0f05b6b95455778005e3432a" +checksum = "798c8d82203af9e15a8b406574e0b36da91dd6db533028b74676489a1bc8bc7d" dependencies = [ "aws-credential-types", "aws-http", @@ -1022,16 +1028,16 @@ dependencies = [ [[package]] name = "aws-sigv4" -version = "1.1.3" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "511879249616f30e30fd2fa81edb4833784f65dd5d56053b7de2e2bcb583dda7" +checksum = "58b56f1cbe6fd4d0c2573df72868f20ab1c125ca9c9dbce17927a463433a2e57" dependencies = [ "aws-credential-types", "aws-smithy-eventstream", "aws-smithy-http", "aws-smithy-runtime-api", "aws-smithy-types", - "bytes 1.5.0", + "bytes 1.6.0", "form_urlencoded", "hex", "hmac", @@ -1046,9 +1052,9 @@ dependencies = [ [[package]] name = "aws-smithy-async" -version = "1.1.4" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72ee2d09cce0ef3ae526679b522835d63e75fb427aca5413cd371e490d52dcc6" +checksum = "62220bc6e97f946ddd51b5f1361f78996e704677afc518a4ff66b7a72ea1378c" dependencies = [ "futures-util", "pin-project-lite", @@ -1063,12 +1069,12 @@ checksum = "c5a373ec01aede3dd066ec018c1bc4e8f5dd11b2c11c59c8eef1a5c68101f397" dependencies = [ "aws-smithy-http", "aws-smithy-types", - "bytes 1.5.0", + "bytes 1.6.0", "crc32c", "crc32fast", "hex", "http 0.2.9", - "http-body", + "http-body 0.4.5", "md-5", "pin-project-lite", "sha1", @@ -1083,24 +1089,24 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6363078f927f612b970edf9d1903ef5cef9a64d1e8423525ebb1f0a1633c858" dependencies = [ "aws-smithy-types", - "bytes 1.5.0", + "bytes 1.6.0", "crc32fast", ] [[package]] name = "aws-smithy-http" -version = "0.60.4" +version = "0.60.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dab56aea3cd9e1101a0a999447fb346afb680ab1406cebc44b32346e25b4117d" +checksum = "4a7de001a1b9a25601016d8057ea16e31a45fdca3751304c8edf4ad72e706c08" dependencies = [ "aws-smithy-eventstream", "aws-smithy-runtime-api", "aws-smithy-types", - "bytes 1.5.0", + "bytes 1.6.0", "bytes-utils", "futures-core", "http 0.2.9", - "http-body", + "http-body 0.4.5", "once_cell", "percent-encoding", "pin-project-lite", @@ -1129,56 +1135,61 @@ dependencies = [ [[package]] name = "aws-smithy-runtime" -version = "1.1.4" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fafdab38f40ad7816e7da5dec279400dd505160780083759f01441af1bbb10ea" +checksum = "44e7945379821074549168917e89e60630647e186a69243248f08c6d168b975a" dependencies = [ "aws-smithy-async", "aws-smithy-http", "aws-smithy-runtime-api", "aws-smithy-types", - "bytes 1.5.0", + "bytes 1.6.0", "fastrand 2.0.1", - "h2 0.3.24", + "h2 0.3.26", "http 0.2.9", - "http-body", - "hyper", - "hyper-rustls", + "http-body 0.4.5", + "http-body 1.0.0", + "hyper 0.14.28", + "hyper-rustls 0.24.2", "once_cell", "pin-project-lite", "pin-utils", - "rustls", + "rustls 0.21.11", "tokio", "tracing 0.1.40", ] [[package]] name = "aws-smithy-runtime-api" -version = "1.1.4" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c18276dd28852f34b3bf501f4f3719781f4999a51c7bff1a5c6dc8c4529adc29" +checksum = "4cc56a5c96ec741de6c5e6bf1ce6948be969d6506dfa9c39cffc284e31e4979b" dependencies = [ "aws-smithy-async", "aws-smithy-types", - "bytes 1.5.0", + "bytes 1.6.0", "http 0.2.9", + "http 1.0.0", "pin-project-lite", "tokio", "tracing 0.1.40", + "zeroize", ] [[package]] name = "aws-smithy-types" -version = "1.1.4" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb3e134004170d3303718baa2a4eb4ca64ee0a1c0a7041dca31b38be0fb414f3" +checksum = "abe14dceea1e70101d38fbf2a99e6a34159477c0fb95e68e05c66bd7ae4c3729" dependencies = [ "base64-simd", - "bytes 1.5.0", + "bytes 1.6.0", "bytes-utils", - "futures-core", "http 0.2.9", - "http-body", + "http 1.0.0", + "http-body 0.4.5", + "http-body 1.0.0", + "http-body-util", "itoa", "num-integer", "pin-project-lite", @@ -1199,9 +1210,9 @@ dependencies = [ [[package]] name = "aws-types" -version = "1.1.3" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee2739d97d47f47cdf0d27982019a405dcc736df25925d1a75049f1faa79df88" +checksum = "5a43b56df2c529fe44cb4d92bd64d0479883fb9608ff62daede4df5405381814" dependencies = [ "aws-credential-types", "aws-smithy-async", @@ -1221,11 +1232,11 @@ dependencies = [ "async-trait", "axum-core", "bitflags 1.3.2", - "bytes 1.5.0", + "bytes 1.6.0", "futures-util", "http 0.2.9", - "http-body", - "hyper", + "http-body 0.4.5", + "hyper 0.14.28", "itoa", "matchit", "memchr", @@ -1248,10 +1259,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" dependencies = [ "async-trait", - "bytes 1.5.0", + "bytes 1.6.0", "futures-util", "http 0.2.9", - "http-body", + "http-body 0.4.5", "mime", "rustversion", "tower-layer", @@ -1266,15 +1277,15 @@ checksum = "4ccd63c07d1fbfb3d4543d7ea800941bf5a30db1911b9b9e4db3b2c4210a434f" dependencies = [ "async-trait", "base64 0.21.7", - "bytes 1.5.0", + "bytes 1.6.0", "dyn-clone", "futures 0.3.30", - "getrandom 0.2.12", + "getrandom 0.2.14", "http-types", "log", "paste", "pin-project", - "quick-xml 0.31.0", + "quick-xml", "rand 0.8.5", "reqwest", "rustc_version 0.4.0", @@ -1315,7 +1326,7 @@ dependencies = [ "RustyXML", "async-trait", "azure_core", - "bytes 1.5.0", + "bytes 1.6.0", "futures 0.3.30", "hmac", "log", @@ -1337,7 +1348,7 @@ dependencies = [ "RustyXML", "azure_core", "azure_storage", - "bytes 1.5.0", + "bytes 1.6.0", "futures 0.3.30", "log", "serde", @@ -1354,7 +1365,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b62ddb9cb1ec0a098ad4bbf9344d0713fa193ae1a80af55febcff2627b6a00c1" dependencies = [ - "getrandom 0.2.12", + "getrandom 0.2.14", "instant", "rand 0.8.5", ] @@ -1416,6 +1427,12 @@ version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" +[[package]] +name = "base64" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9475866fec1451be56a3c2400fd081ff546538961565ccb5b7142cbd22bc7a51" + [[package]] name = "base64-simd" version = "0.8.0" @@ -1465,8 +1482,8 @@ version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9990737a6d5740ff51cdbbc0f0503015cb30c390f6623968281eb214a520cfc0" dependencies = [ - "quote 1.0.35", - "syn 2.0.48", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] @@ -1526,28 +1543,31 @@ dependencies = [ [[package]] name = "bollard" -version = "0.15.0" +version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f03db470b3c0213c47e978da93200259a1eb4dae2e5512cba9955e2b540a6fc6" +checksum = "0aed08d3adb6ebe0eff737115056652670ae290f177759aac19c30456135f94c" dependencies = [ - "base64 0.21.7", + "base64 0.22.0", "bollard-stubs", - "bytes 1.5.0", + "bytes 1.6.0", "chrono", "futures-core", "futures-util", "hex", "home", - "http 0.2.9", - "hyper", - "hyper-rustls", - "hyperlocal", + "http 1.0.0", + "http-body-util", + "hyper 1.2.0", + "hyper-named-pipe", + "hyper-rustls 0.26.0", + "hyper-util", + "hyperlocal-next", "log", "pin-project-lite", - "rustls", - "rustls-native-certs", - "rustls-pemfile", - "rustls-webpki", + "rustls 0.22.4", + "rustls-native-certs 0.7.0", + "rustls-pemfile 2.1.0", + "rustls-pki-types", "serde", "serde_derive", "serde_json", @@ -1556,21 +1576,21 @@ dependencies = [ "thiserror", "tokio", "tokio-util", + "tower-service", "url", - "webpki-roots", "winapi", ] [[package]] name = "bollard-stubs" -version = "1.43.0-rc.2" +version = "1.44.0-rc.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b58071e8fd9ec1e930efd28e3a90c1251015872a2ce49f81f36421b86466932e" +checksum = "709d9aa1c37abb89d40f19f5d0ad6f0d88cb1581264e571c9350fc5bb89cf1c5" dependencies = [ "chrono", "serde", "serde_repr", - "serde_with 3.5.0", + "serde_with 3.7.0", ] [[package]] @@ -1591,9 +1611,9 @@ checksum = "f404657a7ea7b5249e36808dff544bc88a28f26e0ac40009f674b7a009d14be3" dependencies = [ "once_cell", "proc-macro-crate 2.0.0", - "proc-macro2 1.0.78", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", "syn_derive", ] @@ -1631,9 +1651,9 @@ dependencies = [ [[package]] name = "bstr" -version = "1.9.0" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c48f0051a4b4c5e0b6d365cd04af53aeaa209e3cc15ec2cdb69e73cc87fbd0dc" +checksum = "05efc5cfd9110c8416e471df0e96702d58690178e206e61b7173706673c93706" dependencies = [ "memchr", "regex-automata 0.4.4", @@ -1663,17 +1683,11 @@ version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7ec4c6f261935ad534c0c22dbef2201b45918860eb1c574b972bd213a76af61" dependencies = [ - "proc-macro2 1.0.78", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "syn 1.0.109", ] -[[package]] -name = "bytemuck" -version = "1.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "374d28ec25809ee0e23827c2ab573d729e293f281dfe393500e7ad618baa61c6" - [[package]] name = "byteorder" version = "1.5.0" @@ -1692,9 +1706,9 @@ dependencies = [ [[package]] name = "bytes" -version = "1.5.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" +checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" dependencies = [ "serde", ] @@ -1705,7 +1719,7 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e47d3a8076e283f3acd27400535992edb3ba4b5bb72f8891ad8fbe7932a7d4b9" dependencies = [ - "bytes 1.5.0", + "bytes 1.6.0", "either", ] @@ -1717,9 +1731,9 @@ checksum = "a3e368af43e418a04d52505cf3dbc23dda4e3407ae2fa99fd0e4f308ce546acc" [[package]] name = "cached" -version = "0.48.1" +version = "0.49.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "355face540df58778b96814c48abb3c2ed67c4878a8087ab1819c1fedeec505f" +checksum = "8e8e463fceca5674287f32d252fb1d94083758b8709c160efae66d263e5f4eba" dependencies = [ "ahash 0.8.6", "cached_proc_macro", @@ -1732,13 +1746,13 @@ dependencies = [ [[package]] name = "cached_proc_macro" -version = "0.19.1" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d52f526f7cbc875b296856ca8c964a9f6290556922c303a8a3883e3c676e6a1" +checksum = "ad9f16c0d84de31a2ab7fdf5f7783c14631f7075cf464eb3bb43119f61c9cb2a" dependencies = [ "darling 0.14.4", - "proc-macro2 1.0.78", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "syn 1.0.109", ] @@ -1750,9 +1764,9 @@ checksum = "ade8366b8bd5ba243f0a58f036cc0ca8a2f069cff1a2351ef1cac6b083e16fc0" [[package]] name = "cargo_toml" -version = "0.19.0" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "922d6ea3081d68b9e3e09557204bff47f9b5406a4a304dc917e187f8cafd582b" +checksum = "c8cb1d556b8b8f36e5ca74938008be3ac102f5dcb5b68a0477e4249ae2291cd3" dependencies = [ "serde", "toml", @@ -1770,6 +1784,15 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" +[[package]] +name = "castaway" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a17ed5635fc8536268e5d4de1e22e81ac34419e5f052d4d51f4e01dcc263fcc" +dependencies = [ + "rustversion", +] + [[package]] name = "cbc" version = "0.1.2" @@ -1852,9 +1875,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.33" +version = "0.4.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f13690e35a5e4ace198e7beea2895d29f3a9cc55015fcebe6336bd2010af9eb" +checksum = "8a0d04d43504c61aa6c7531f1871dd0d418d91130162063b789da00fd7057a5e" dependencies = [ "android-tzdata", "iana-time-zone", @@ -1867,9 +1890,9 @@ dependencies = [ [[package]] name = "chrono-tz" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91d7b79e99bfaa0d47da0687c43aa3b7381938a62ad3a6498599039321f660b7" +checksum = "d59ae0466b83e838b81a54256c39d5d7c20b9d7daa10510a242d9b75abd5936e" dependencies = [ "chrono", "chrono-tz-build", @@ -1973,9 +1996,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.4.18" +version = "4.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e578d6ec4194633722ccf9544794b71b1385c3c027efe0c55db226fc880865c" +checksum = "90bc066a67923782aa8515dbaea16946c5bcc5addbd668bb80af688e53e548a0" dependencies = [ "clap_builder", "clap_derive", @@ -1983,53 +2006,53 @@ dependencies = [ [[package]] name = "clap-verbosity-flag" -version = "2.1.2" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b57f73ca21b17a0352944b9bb61803b6007bd911b6cccfef7153f7f0600ac495" +checksum = "bb9b20c0dd58e4c2e991c8d203bbeb76c11304d1011659686b5b644bc29aa478" dependencies = [ - "clap 4.4.18", + "clap 4.5.4", "log", ] [[package]] name = "clap_builder" -version = "4.4.18" +version = "4.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4df4df40ec50c46000231c914968278b1eb05098cf8f1b3a518a95030e71d1c7" +checksum = "ae129e2e766ae0ec03484e609954119f123cc1fe650337e155d03b022f24f7b4" dependencies = [ "anstream", "anstyle", "clap_lex", - "strsim 0.10.0", + "strsim 0.11.0", "terminal_size", ] [[package]] name = "clap_complete" -version = "4.4.9" +version = "4.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df631ae429f6613fcd3a7c1adbdb65f637271e561b03680adaa6573015dfb106" +checksum = "dd79504325bf38b10165b02e89b4347300f855f273c4cb30c4a3209e6583275e" dependencies = [ - "clap 4.4.18", + "clap 4.5.4", ] [[package]] name = "clap_derive" -version = "4.4.7" +version = "4.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf9804afaaf59a91e75b022a30fb7229a7901f60c755489cc61c9b423b836442" +checksum = "528131438037fd55894f62d6e9f068b8f45ac57ffa77517819645d10aed04f64" dependencies = [ - "heck 0.4.1", - "proc-macro2 1.0.78", - "quote 1.0.35", - "syn 2.0.48", + "heck 0.5.0", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] name = "clap_lex" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "702fc72eb24e5a1e48ce58027a675bc24edd52096d5397d4aea7c6dd9eca0bd1" +checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" [[package]] name = "clipboard-win" @@ -2054,7 +2077,7 @@ name = "codecs" version = "0.1.0" dependencies = [ "apache-avro", - "bytes 1.5.0", + "bytes 1.6.0", "chrono", "csv-core", "derivative", @@ -2064,7 +2087,7 @@ dependencies = [ "memchr", "once_cell", "ordered-float 4.2.0", - "prost 0.12.3", + "prost 0.12.4", "prost-reflect", "regex", "rstest", @@ -2072,7 +2095,7 @@ dependencies = [ "serde_json", "similar-asserts", "smallvec", - "snafu", + "snafu 0.7.5", "syslog_loose", "tokio", "tokio-util", @@ -2132,7 +2155,7 @@ version = "4.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35ed6e9d84f0b51a7f52daf1c7d71dd136fd7a3f41a8462b8cdb8c78d920fad4" dependencies = [ - "bytes 1.5.0", + "bytes 1.6.0", "futures-core", "memchr", "pin-project-lite", @@ -2154,6 +2177,19 @@ dependencies = [ "sha1", ] +[[package]] +name = "compact_str" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f86b9c4c00838774a6d902ef931eff7470720c51d90c2e32cfe15dc304737b3f" +dependencies = [ + "castaway", + "cfg-if", + "itoa", + "ryu", + "static_assertions", +] + [[package]] name = "concurrent-queue" version = "2.3.0" @@ -2165,9 +2201,9 @@ dependencies = [ [[package]] name = "confy" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15d296c475c6ed4093824c28e222420831d27577aaaf0a1163a3b7fc35b248a5" +checksum = "45b1f4c00870f07dc34adcac82bb6a72cc5aabca8536ba1797e01df51d2ce9a0" dependencies = [ "directories", "serde", @@ -2195,8 +2231,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd326812b3fd01da5bb1af7d340d0d555fd3d4b641e7f1dfcf5962a902952787" dependencies = [ "futures-core", - "prost 0.12.3", - "prost-types 0.12.3", + "prost 0.12.4", + "prost-types 0.12.4", "tonic", "tracing-core 0.1.32", ] @@ -2213,7 +2249,7 @@ dependencies = [ "futures-task", "hdrhistogram", "humantime", - "prost-types 0.12.3", + "prost-types 0.12.4", "serde", "serde_json", "thread_local", @@ -2294,18 +2330,18 @@ dependencies = [ [[package]] name = "crc" -version = "3.0.1" +version = "3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86ec7a15cbe22e59248fc7eadb1907dab5ba09372595da4d73dd805ed4417dfe" +checksum = "69e6e4d7b33a94f0991c26729976b10ebde1d34c3ee82408fb536164fa10d636" dependencies = [ "crc-catalog", ] [[package]] name = "crc-catalog" -version = "2.3.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4939f9ed1444bd8c896d37f3090012fa6e7834fe84ef8c9daa166109515732f9" +checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" [[package]] name = "crc32c" @@ -2318,9 +2354,9 @@ dependencies = [ [[package]] name = "crc32fast" -version = "1.3.2" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" +checksum = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa" dependencies = [ "cfg-if", ] @@ -2334,7 +2370,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.4.18", + "clap 4.5.4", "criterion-plot", "futures 0.3.30", "is-terminal", @@ -2535,9 +2571,9 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ - "proc-macro2 1.0.78", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] @@ -2562,12 +2598,12 @@ dependencies = [ [[package]] name = "darling" -version = "0.20.3" +version = "0.20.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0209d94da627ab5605dcccf08bb18afa5009cfbef48d8a8b7d7bdbc79be25c5e" +checksum = "54e36fcd13ed84ffdfda6f5be89b31287cbb80c439841fe69e04841435464391" dependencies = [ - "darling_core 0.20.3", - "darling_macro 0.20.3", + "darling_core 0.20.8", + "darling_macro 0.20.8", ] [[package]] @@ -2578,8 +2614,8 @@ checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.78", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "strsim 0.10.0", "syn 1.0.109", ] @@ -2592,24 +2628,24 @@ checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.78", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "strsim 0.10.0", "syn 1.0.109", ] [[package]] name = "darling_core" -version = "0.20.3" +version = "0.20.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "177e3443818124b357d8e76f53be906d60937f0d3a90773a664fa63fa253e621" +checksum = "9c2cf1c23a687a1feeb728783b993c4e1ad83d99f351801977dd809b48d0a70f" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.78", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "strsim 0.10.0", - "syn 2.0.48", + "syn 2.0.60", ] [[package]] @@ -2619,7 +2655,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ "darling_core 0.13.4", - "quote 1.0.35", + "quote 1.0.36", "syn 1.0.109", ] @@ -2630,19 +2666,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" dependencies = [ "darling_core 0.14.4", - "quote 1.0.35", + "quote 1.0.36", "syn 1.0.109", ] [[package]] name = "darling_macro" -version = "0.20.3" +version = "0.20.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5" +checksum = "a668eda54683121533a393014d8692171709ff57a7d61f187b6e782719f8933f" dependencies = [ - "darling_core 0.20.3", - "quote 1.0.35", - "syn 2.0.48", + "darling_core 0.20.8", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] @@ -2676,6 +2712,27 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c297a1c74b71ae29df00c3e22dd9534821d60eb9af5a0192823fa2acea70c2a" +[[package]] +name = "databend-client" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32d42f932a3c2c5c1d5540579fe8c98bde9f95c8dc14e1d617bc87b583df2dce" +dependencies = [ + "async-trait", + "log", + "once_cell", + "percent-encoding", + "reqwest", + "serde", + "serde_json", + "tokio", + "tokio-retry", + "tokio-stream", + "tokio-util", + "url", + "uuid", +] + [[package]] name = "db-key" version = "0.0.5" @@ -2734,8 +2791,8 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ - "proc-macro2 1.0.78", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "syn 1.0.109", ] @@ -2745,9 +2802,9 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ - "proc-macro2 1.0.78", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] @@ -2757,8 +2814,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "convert_case 0.4.0", - "proc-macro2 1.0.78", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "rustc_version 0.4.0", "syn 1.0.109", ] @@ -2837,7 +2894,7 @@ checksum = "e5766087c2235fec47fafa4cfecc81e494ee679d0fd4a59887ea0919bfb0e4fc" dependencies = [ "cfg-if", "libc", - "socket2 0.5.5", + "socket2 0.5.6", "windows-sys 0.48.0", ] @@ -2864,7 +2921,7 @@ dependencies = [ "anyhow", "serde", "serde_json", - "snafu", + "snafu 0.7.5", "tracing 0.1.40", "tracing-subscriber", "vector-config", @@ -2891,9 +2948,9 @@ checksum = "56ce8c6da7551ec6c462cbaf3bfbc75131ebbfa1c944aeaa9dab51ca1c5f0c3b" [[package]] name = "dyn-clone" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "545b22097d44f8a9581187cdf93de7a71e4722bf51200cfaba810865b49a495d" +checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125" [[package]] name = "ecdsa" @@ -3014,8 +3071,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21cdad81446a7f7dc43f6a77409efeb9733d2fa65553efef6018ef257c959b73" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.78", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "syn 1.0.109", ] @@ -3026,41 +3083,41 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.78", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] name = "enum_dispatch" -version = "0.3.12" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f33313078bb8d4d05a2733a94ac4c2d8a0df9a2b84424ebf4f33bfc224a890e" +checksum = "aa18ce2bc66555b3218614519ac839ddb759a7d6720732f979ef8d13be147ecd" dependencies = [ "once_cell", - "proc-macro2 1.0.78", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] name = "enumflags2" -version = "0.7.8" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5998b4f30320c9d93aed72f63af821bfdac50465b75428fce77b48ec482c3939" +checksum = "3278c9d5fb675e0a51dabcf4c0d355f692b064171535ba72361be1528a9d8e8d" dependencies = [ "enumflags2_derive", ] [[package]] name = "enumflags2_derive" -version = "0.7.8" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f95e2801cd355d4a1a3e3953ce6ee5ae9603a5c833455343a8bfe3f44d418246" +checksum = "5c785274071b1b420972453b306eeca06acf4633829db4223b58a2a8c5953bc4" dependencies = [ - "proc-macro2 1.0.78", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] @@ -3185,9 +3242,9 @@ dependencies = [ [[package]] name = "fakedata_generator" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "302a45f60d105c247c9d3131107392da80df844f9215260b7f8ccfa301a6a6f4" +checksum = "57b82fba4b485b819fde74012109688a9d2bd4ce7b22583ac12c9fa239f74a02" dependencies = [ "passt", "rand 0.8.5", @@ -3236,8 +3293,8 @@ checksum = "a481586acf778f1b1455424c343f71124b048ffa5f4fc3f8f6ae9dc432dcb3c7" name = "file-source" version = "0.1.0" dependencies = [ - "bstr 1.9.0", - "bytes 1.5.0", + "bstr 1.9.1", + "bytes 1.6.0", "chrono", "crc", "criterion", @@ -3245,7 +3302,7 @@ dependencies = [ "flate2", "futures 0.3.30", "glob", - "indexmap 2.1.0", + "indexmap 2.2.6", "libc", "quickcheck", "scan_fmt", @@ -3319,6 +3376,17 @@ dependencies = [ "spin 0.9.8", ] +[[package]] +name = "flume" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55ac459de2512911e4b674ce33cf20befaba382d05b62b008afc1c8b57cbf181" +dependencies = [ + "futures-core", + "futures-sink", + "spin 0.9.8", +] + [[package]] name = "fnv" version = "1.0.7" @@ -3449,9 +3517,9 @@ version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ - "proc-macro2 1.0.78", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] @@ -3516,9 +3584,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.12" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" +checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c" dependencies = [ "cfg-if", "js-sys", @@ -3609,9 +3677,9 @@ dependencies = [ [[package]] name = "graphql_client" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09cdf7b487d864c2939b23902291a5041bc4a84418268f25fda1c8d4e15ad8fa" +checksum = "a50cfdc7f34b7f01909d55c2dcb71d4c13cbcbb4a1605d6c8bd760d654c1144b" dependencies = [ "graphql_query_derive", "serde", @@ -3620,16 +3688,16 @@ dependencies = [ [[package]] name = "graphql_client_codegen" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a40f793251171991c4eb75bd84bc640afa8b68ff6907bc89d3b712a22f700506" +checksum = "5e27ed0c2cf0c0cc52c6bcf3b45c907f433015e580879d14005386251842fb0a" dependencies = [ "graphql-introspection-query", "graphql-parser", "heck 0.4.1", "lazy_static", - "proc-macro2 1.0.78", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "serde", "serde_json", "syn 1.0.109", @@ -3637,12 +3705,12 @@ dependencies = [ [[package]] name = "graphql_query_derive" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00bda454f3d313f909298f626115092d348bc231025699f557b27e248475f48c" +checksum = "83febfa838f898cfa73dfaa7a8eb69ff3409021ac06ee94cfb3d622f6eeb1a97" dependencies = [ "graphql_client_codegen", - "proc-macro2 1.0.78", + "proc-macro2 1.0.81", "syn 1.0.109", ] @@ -3651,11 +3719,11 @@ name = "greptime-proto" version = "0.1.0" source = "git+https://github.com/GreptimeTeam/greptime-proto.git?tag=v0.4.1#4306ab645ee55b3f7f2ad3fb7acc5820f967c1aa" dependencies = [ - "prost 0.12.3", + "prost 0.12.4", "serde", "serde_json", - "strum", - "strum_macros", + "strum 0.25.0", + "strum_macros 0.25.3", "tonic", "tonic-build 0.10.2", ] @@ -3663,7 +3731,7 @@ dependencies = [ [[package]] name = "greptimedb-client" version = "0.1.0" -source = "git+https://github.com/GreptimeTeam/greptimedb-ingester-rust.git?rev=4cb19ec47eeaf634c451d9ae438dac445a8a3dce#4cb19ec47eeaf634c451d9ae438dac445a8a3dce" +source = "git+https://github.com/GreptimeTeam/greptimedb-ingester-rust.git?rev=d21dbcff680139ed2065b62100bac3123da7c789#d21dbcff680139ed2065b62100bac3123da7c789" dependencies = [ "dashmap", "enum_dispatch", @@ -3671,9 +3739,9 @@ dependencies = [ "futures-util", "greptime-proto", "parking_lot", - "prost 0.12.3", + "prost 0.12.4", "rand 0.8.5", - "snafu", + "snafu 0.7.5", "tokio", "tokio-stream", "tonic", @@ -3704,17 +3772,17 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.24" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb2c4422095b67ee78da96fbb51a4cc413b3b25883c7717ff7ca1ab31022c9c9" +checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" dependencies = [ - "bytes 1.5.0", + "bytes 1.6.0", "fnv", "futures-core", "futures-sink", "futures-util", "http 0.2.9", - "indexmap 2.1.0", + "indexmap 2.2.6", "slab", "tokio", "tokio-util", @@ -3723,17 +3791,17 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.2" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31d030e59af851932b72ceebadf4a2b5986dba4c3b99dd2493f8273a0f151943" +checksum = "816ec7294445779408f36fe57bc5b7fc1cf59664059096c65f905c1c61f58069" dependencies = [ - "bytes 1.5.0", + "bytes 1.6.0", "fnv", "futures-core", "futures-sink", "futures-util", "http 1.0.0", - "indexmap 2.1.0", + "indexmap 2.2.6", "slab", "tokio", "tokio-util", @@ -3801,7 +3869,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06683b93020a07e3dbcf5f8c0f6d40080d725bea7936fc01ad345c01b97dc270" dependencies = [ "base64 0.21.7", - "bytes 1.5.0", + "bytes 1.6.0", "headers-core", "http 0.2.9", "httpdate", @@ -3833,10 +3901,16 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + [[package]] name = "heim" version = "0.1.0-rc.1" -source = "git+https://github.com/vectordotdev/heim.git?branch=update-nix#76fa765c7ed7fbe43d1465bf52da6b8d19f2d2a9" +source = "git+https://github.com/vectordotdev/heim.git?branch=update-nix#a66c44074fb214e2b9355d7c407315f720664b18" dependencies = [ "heim-common", "heim-cpu", @@ -3850,7 +3924,7 @@ dependencies = [ [[package]] name = "heim-common" version = "0.1.0-rc.1" -source = "git+https://github.com/vectordotdev/heim.git?branch=update-nix#76fa765c7ed7fbe43d1465bf52da6b8d19f2d2a9" +source = "git+https://github.com/vectordotdev/heim.git?branch=update-nix#a66c44074fb214e2b9355d7c407315f720664b18" dependencies = [ "cfg-if", "core-foundation", @@ -3868,7 +3942,7 @@ dependencies = [ [[package]] name = "heim-cpu" version = "0.1.0-rc.1" -source = "git+https://github.com/vectordotdev/heim.git?branch=update-nix#76fa765c7ed7fbe43d1465bf52da6b8d19f2d2a9" +source = "git+https://github.com/vectordotdev/heim.git?branch=update-nix#a66c44074fb214e2b9355d7c407315f720664b18" dependencies = [ "cfg-if", "futures 0.3.30", @@ -3886,7 +3960,7 @@ dependencies = [ [[package]] name = "heim-disk" version = "0.1.0-rc.1" -source = "git+https://github.com/vectordotdev/heim.git?branch=update-nix#76fa765c7ed7fbe43d1465bf52da6b8d19f2d2a9" +source = "git+https://github.com/vectordotdev/heim.git?branch=update-nix#a66c44074fb214e2b9355d7c407315f720664b18" dependencies = [ "bitflags 1.3.2", "cfg-if", @@ -3902,7 +3976,7 @@ dependencies = [ [[package]] name = "heim-host" version = "0.1.0-rc.1" -source = "git+https://github.com/vectordotdev/heim.git?branch=update-nix#76fa765c7ed7fbe43d1465bf52da6b8d19f2d2a9" +source = "git+https://github.com/vectordotdev/heim.git?branch=update-nix#a66c44074fb214e2b9355d7c407315f720664b18" dependencies = [ "cfg-if", "heim-common", @@ -3919,7 +3993,7 @@ dependencies = [ [[package]] name = "heim-memory" version = "0.1.0-rc.1" -source = "git+https://github.com/vectordotdev/heim.git?branch=update-nix#76fa765c7ed7fbe43d1465bf52da6b8d19f2d2a9" +source = "git+https://github.com/vectordotdev/heim.git?branch=update-nix#a66c44074fb214e2b9355d7c407315f720664b18" dependencies = [ "cfg-if", "heim-common", @@ -3933,7 +4007,7 @@ dependencies = [ [[package]] name = "heim-net" version = "0.1.0-rc.1" -source = "git+https://github.com/vectordotdev/heim.git?branch=update-nix#76fa765c7ed7fbe43d1465bf52da6b8d19f2d2a9" +source = "git+https://github.com/vectordotdev/heim.git?branch=update-nix#a66c44074fb214e2b9355d7c407315f720664b18" dependencies = [ "bitflags 1.3.2", "cfg-if", @@ -3949,7 +4023,7 @@ dependencies = [ [[package]] name = "heim-runtime" version = "0.1.0-rc.1" -source = "git+https://github.com/vectordotdev/heim.git?branch=update-nix#76fa765c7ed7fbe43d1465bf52da6b8d19f2d2a9" +source = "git+https://github.com/vectordotdev/heim.git?branch=update-nix#a66c44074fb214e2b9355d7c407315f720664b18" dependencies = [ "futures 0.3.30", "futures-timer", @@ -3980,9 +4054,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hickory-proto" -version = "0.24.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "091a6fbccf4860009355e3efc52ff4acf37a63489aad7435372d44ceeb6fbbcf" +checksum = "07698b8420e2f0d6447a436ba999ec85d8fbf2a398bbd737b82cac4a2e96e512" dependencies = [ "async-trait", "cfg-if", @@ -4040,13 +4114,24 @@ dependencies = [ "winapi", ] +[[package]] +name = "hostname" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9c7c7c8ac16c798734b8a24560c1362120597c40d5e1459f09498f8f6c8f2ba" +dependencies = [ + "cfg-if", + "libc", + "windows", +] + [[package]] name = "http" version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" dependencies = [ - "bytes 1.5.0", + "bytes 1.6.0", "fnv", "itoa", ] @@ -4057,7 +4142,7 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b32afd38673a8016f7c9ae69e5af41a58f81b1d31689040f2f1959594ce194ea" dependencies = [ - "bytes 1.5.0", + "bytes 1.6.0", "fnv", "itoa", ] @@ -4068,11 +4153,34 @@ version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ - "bytes 1.5.0", + "bytes 1.6.0", "http 0.2.9", "pin-project-lite", ] +[[package]] +name = "http-body" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" +dependencies = [ + "bytes 1.6.0", + "http 1.0.0", +] + +[[package]] +name = "http-body-util" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41cb79eb393015dadd30fc252023adb0b2400a0caee0fa2a077e6e21a551e840" +dependencies = [ + "bytes 1.6.0", + "futures-util", + "http 1.0.0", + "http-body 1.0.0", + "pin-project-lite", +] + [[package]] name = "http-range-header" version = "0.3.1" @@ -4134,24 +4242,58 @@ version = "0.14.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" dependencies = [ - "bytes 1.5.0", + "bytes 1.6.0", "futures-channel", "futures-core", "futures-util", - "h2 0.3.24", + "h2 0.3.26", "http 0.2.9", - "http-body", + "http-body 0.4.5", "httparse", "httpdate", "itoa", "pin-project-lite", - "socket2 0.5.5", + "socket2 0.5.6", "tokio", "tower-service", "tracing 0.1.40", "want", ] +[[package]] +name = "hyper" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "186548d73ac615b32a73aafe38fb4f56c0d340e110e5a200bcadbaf2e199263a" +dependencies = [ + "bytes 1.6.0", + "futures-channel", + "futures-util", + "http 1.0.0", + "http-body 1.0.0", + "httparse", + "itoa", + "pin-project-lite", + "smallvec", + "tokio", + "want", +] + +[[package]] +name = "hyper-named-pipe" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73b7d8abf35697b81a825e386fc151e0d503e8cb5fcb93cc8669c376dfd6f278" +dependencies = [ + "hex", + "hyper 1.2.0", + "hyper-util", + "pin-project-lite", + "tokio", + "tower-service", + "winapi", +] + [[package]] name = "hyper-openssl" version = "0.9.2" @@ -4159,7 +4301,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6ee5d7a8f718585d1c3c61dfde28ef5b0bb14734b4db13f5ada856cdc6c612b" dependencies = [ "http 0.2.9", - "hyper", + "hyper 0.14.28", "linked_hash_set", "once_cell", "openssl", @@ -4176,11 +4318,11 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca815a891b24fdfb243fa3239c86154392b0953ee584aa1a2a1f66d20cbe75cc" dependencies = [ - "bytes 1.5.0", + "bytes 1.6.0", "futures 0.3.30", "headers", "http 0.2.9", - "hyper", + "hyper 0.14.28", "openssl", "tokio", "tokio-openssl", @@ -4195,12 +4337,31 @@ checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", "http 0.2.9", - "hyper", + "hyper 0.14.28", + "log", + "rustls 0.21.11", + "rustls-native-certs 0.6.3", + "tokio", + "tokio-rustls 0.24.1", +] + +[[package]] +name = "hyper-rustls" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0bea761b46ae2b24eb4aef630d8d1c398157b6fc29e6350ecf090a0b70c952c" +dependencies = [ + "futures-util", + "http 1.0.0", + "hyper 1.2.0", + "hyper-util", "log", - "rustls", - "rustls-native-certs", + "rustls 0.22.4", + "rustls-native-certs 0.7.0", + "rustls-pki-types", "tokio", - "tokio-rustls", + "tokio-rustls 0.25.0", + "tower-service", ] [[package]] @@ -4209,7 +4370,7 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" dependencies = [ - "hyper", + "hyper 0.14.28", "pin-project-lite", "tokio", "tokio-io-timeout", @@ -4221,31 +4382,53 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ - "bytes 1.5.0", - "hyper", + "bytes 1.6.0", + "hyper 0.14.28", "native-tls", "tokio", "tokio-native-tls", ] [[package]] -name = "hyperlocal" -version = "0.8.0" +name = "hyper-util" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fafdf7b2b2de7c9784f76e02c0935e65a8117ec3b768644379983ab333ac98c" +checksum = "ca38ef113da30126bbff9cd1705f9273e15d45498615d138b0c20279ac7a76aa" dependencies = [ + "bytes 1.6.0", + "futures-channel", "futures-util", + "http 1.0.0", + "http-body 1.0.0", + "hyper 1.2.0", + "pin-project-lite", + "socket2 0.5.6", + "tokio", + "tower", + "tower-service", + "tracing 0.1.40", +] + +[[package]] +name = "hyperlocal-next" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acf569d43fa9848e510358c07b80f4adf34084ddc28c6a4a651ee8474c070dcc" +dependencies = [ "hex", - "hyper", - "pin-project", + "http-body-util", + "hyper 1.2.0", + "hyper-util", + "pin-project-lite", "tokio", + "tower-service", ] [[package]] name = "iana-time-zone" -version = "0.1.58" +version = "0.1.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8326b86b6cff230b97d0d312a6c40a60726df3332e721f72a1b035f451663b20" +checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -4314,9 +4497,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.1.0" +version = "2.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" +checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" dependencies = [ "equivalent", "hashbrown 0.14.3", @@ -4325,9 +4508,9 @@ dependencies = [ [[package]] name = "indicatif" -version = "0.17.7" +version = "0.17.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb28741c9db9a713d93deb3bb9515c20788cef5815265bee4980e87bde7e0f25" +checksum = "763a5a8f45087d6bcea4222e7b72c291a054edf80e4ef6efd2a4979878c7bea3" dependencies = [ "console", "instant", @@ -4339,9 +4522,9 @@ dependencies = [ [[package]] name = "indoc" -version = "2.0.4" +version = "2.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e186cfbae8084e513daff4240b4797e342f988cecda4fb6c939150f96315fd8" +checksum = "b248f5224d1d606005e02c97f5aa4e88eeb230488bcc03bc9ca4d7991399f2b5" [[package]] name = "infer" @@ -4426,7 +4609,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.5", + "socket2 0.5.6", "widestring 1.0.2", "windows-sys 0.48.0", "winreg", @@ -4437,6 +4620,9 @@ name = "ipnet" version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" +dependencies = [ + "serde", +] [[package]] name = "ipnetwork" @@ -4454,7 +4640,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi 0.3.3", - "rustix 0.38.28", + "rustix 0.38.31", "windows-sys 0.48.0", ] @@ -4484,9 +4670,9 @@ dependencies = [ [[package]] name = "itertools" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25db6b064527c5d482d0423354fcd07a89a2dfe07b67892e62411946db7f07b0" +checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" dependencies = [ "either", ] @@ -4584,7 +4770,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d9455388f4977de4d0934efa9f7d36296295537d774574113a20f6082de03da" dependencies = [ "base64 0.13.1", - "bytes 1.5.0", + "bytes 1.6.0", "chrono", "serde", "serde-value", @@ -4598,7 +4784,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd990069640f9db34b3b0f7a1afc62a05ffaa3be9b66aa3c313f58346df7f788" dependencies = [ "base64 0.21.7", - "bytes 1.5.0", + "bytes 1.6.0", "chrono", "http 0.2.9", "percent-encoding", @@ -4676,14 +4862,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "544339f1665488243f79080441cacb09c997746fd763342303e66eebb9d3ba13" dependencies = [ "base64 0.20.0", - "bytes 1.5.0", + "bytes 1.6.0", "chrono", "dirs-next", "either", "futures 0.3.30", "http 0.2.9", - "http-body", - "hyper", + "http-body 0.4.5", + "hyper 0.14.28", "hyper-openssl", "hyper-timeout", "jsonpath_lib", @@ -4695,7 +4881,7 @@ dependencies = [ "secrecy", "serde", "serde_json", - "serde_yaml 0.9.30", + "serde_yaml 0.9.34+deprecated", "thiserror", "tokio", "tokio-util", @@ -4785,7 +4971,7 @@ dependencies = [ "async-reactor-trait", "async-trait", "executor-trait", - "flume", + "flume 0.10.14", "futures-core", "futures-io", "parking_lot", @@ -4807,9 +4993,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.152" +version = "0.2.153" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13e3bf6590cbc649f4d1a3eefc9d5d6eb746f5200ffb04e5e142700b8faa56e7" +checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" [[package]] name = "libflate" @@ -4903,15 +5089,15 @@ dependencies = [ [[package]] name = "lockfree-object-pool" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee33defb27b106378a6efcfcde4dda6226dfdac8ba7a2904f5bc93363cb88557" +checksum = "3a69c0481fc2424cb55795de7da41add33372ea75a94f9b6588ab6a2826dfebc" [[package]] name = "log" -version = "0.4.20" +version = "0.4.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" +checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" [[package]] name = "logfmt" @@ -4923,19 +5109,19 @@ checksum = "879777f0cc6f3646a044de60e4ab98c75617e3f9580f7a2032e6ad7ea0cd3054" name = "loki-logproto" version = "0.1.0" dependencies = [ - "bytes 1.5.0", + "bytes 1.6.0", "chrono", - "prost 0.12.3", - "prost-build 0.12.3", - "prost-types 0.12.3", + "prost 0.12.4", + "prost-build 0.12.4", + "prost-types 0.12.4", "snap", ] [[package]] name = "lru" -version = "0.12.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db2c024b41519440580066ba82aab04092b333e09066a5eb86c7c4890df31f22" +checksum = "d3262e75e648fce39813cb56ac41f3c3e3f65217ebf3844d818d1f9398cfb0dc" dependencies = [ "hashbrown 0.14.3", ] @@ -5082,9 +5268,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.7.1" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" +checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" [[package]] name = "memmap2" @@ -5139,9 +5325,9 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ddece26afd34c31585c74a4db0630c376df271c285d682d1e55012197830b6df" dependencies = [ - "proc-macro2 1.0.78", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] @@ -5212,9 +5398,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.9" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dce281c5e46beae905d4de1870d8b1509a9142b62eedf18b443b011ca8343d0" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ "libc", "log", @@ -5224,11 +5410,11 @@ dependencies = [ [[package]] name = "mlua" -version = "0.9.5" +version = "0.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d3561f79659ff3afad7b25e2bf2ec21507fe601ebecb7f81088669ec4bfd51e" +checksum = "6d9bed6bce296397a9d6a86f995dd10a547a4e6949825d45225906bdcbfe7367" dependencies = [ - "bstr 1.9.0", + "bstr 1.9.1", "mlua-sys", "mlua_derive", "num-traits", @@ -5255,26 +5441,26 @@ version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aaade5f94e5829db58791664ba98f35fea6a3ffebc783becb51dc97c7a21abee" dependencies = [ - "itertools 0.12.0", + "itertools 0.12.1", "once_cell", "proc-macro-error", - "proc-macro2 1.0.78", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "regex", - "syn 2.0.48", + "syn 2.0.60", ] [[package]] name = "mock_instant" -version = "0.3.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c1a54de846c4006b88b1516731cc1f6026eb5dc4bcb186aa071ef66d40524ec" +checksum = "6c356644192565524790740e4075307c2cfc26d04d2543fb8e3ab9ef43a115ec" [[package]] name = "mongodb" -version = "2.8.0" +version = "2.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46c30763a5c6c52079602be44fa360ca3bfacee55fca73f4734aecd23706a7f2" +checksum = "ef206acb1b72389b49bc9985efe7eb1f8a9bb18e5680d262fac26c07f44025f1" dependencies = [ "async-trait", "base64 0.13.1", @@ -5295,8 +5481,8 @@ dependencies = [ "percent-encoding", "rand 0.8.5", "rustc_version_runtime", - "rustls", - "rustls-pemfile", + "rustls 0.21.11", + "rustls-pemfile 1.0.3", "serde", "serde_bytes", "serde_with 1.14.0", @@ -5308,7 +5494,7 @@ dependencies = [ "take_mut", "thiserror", "tokio", - "tokio-rustls", + "tokio-rustls 0.24.1", "tokio-util", "trust-dns-proto", "trust-dns-resolver", @@ -5323,7 +5509,7 @@ version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a15d522be0a9c3e46fd2632e272d178f56387bdb5c9fbb3a36c649062e9b5219" dependencies = [ - "bytes 1.5.0", + "bytes 1.6.0", "encoding_rs", "futures-util", "http 1.0.0", @@ -5435,12 +5621,13 @@ dependencies = [ [[package]] name = "nix" -version = "0.27.1" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" +checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4" dependencies = [ "bitflags 2.4.1", "cfg-if", + "cfg_aliases", "libc", ] @@ -5454,7 +5641,7 @@ dependencies = [ "data-encoding", "ed25519", "ed25519-dalek", - "getrandom 0.2.12", + "getrandom 0.2.14", "log", "rand 0.8.5", "signatory", @@ -5462,15 +5649,14 @@ dependencies = [ [[package]] name = "nkeys" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6eafe79aeb8066a6f1f84dc44c03ae97403013e946bf0b13626468e0d5e26c6f" +checksum = "bc522a19199a0795776406619aa6aa78e1e55690fbeb3181b8db5265fd0e89ce" dependencies = [ - "byteorder", "data-encoding", "ed25519", "ed25519-dalek", - "getrandom 0.2.12", + "getrandom 0.2.14", "log", "rand 0.8.5", "signatory", @@ -5644,9 +5830,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.17" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" +checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" dependencies = [ "autocfg", "libm", @@ -5696,8 +5882,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcbff9bc912032c62bf65ef1d5aea88983b420f4f839db1e9b0c281a25c9c799" dependencies = [ "proc-macro-crate 1.3.1", - "proc-macro2 1.0.78", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "syn 1.0.109", ] @@ -5708,9 +5894,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96667db765a921f7b295ffee8b60472b686a51d4f21c2ee4ffdb94c7013b65a6" dependencies = [ "proc-macro-crate 1.3.1", - "proc-macro2 1.0.78", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] @@ -5720,9 +5906,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "681030a937600a36906c185595136d26abfebb4aa9c65701cefcaf8578bb982b" dependencies = [ "proc-macro-crate 2.0.0", - "proc-macro2 1.0.78", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] @@ -5748,7 +5934,7 @@ checksum = "c38841cdd844847e3e7c8d29cef9dcfed8877f8f56f9071f77843ecf3baf937f" dependencies = [ "base64 0.13.1", "chrono", - "getrandom 0.2.12", + "getrandom 0.2.14", "http 0.2.9", "rand 0.8.5", "reqwest", @@ -5829,25 +6015,25 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "opendal" -version = "0.44.2" +version = "0.45.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4af824652d4d2ffabf606d337a071677ae621b05622adf35df9562f69d9b4498" +checksum = "52c17c077f23fa2d2c25d9d22af98baa43b8bbe2ef0de80cf66339aa70401467" dependencies = [ "anyhow", "async-trait", "backon", "base64 0.21.7", - "bytes 1.5.0", + "bytes 1.6.0", "chrono", "flagset", "futures 0.3.30", - "getrandom 0.2.12", + "getrandom 0.2.14", "http 0.2.9", "log", "md-5", "once_cell", "percent-encoding", - "quick-xml 0.30.0", + "quick-xml", "reqwest", "serde", "serde_json", @@ -5880,7 +6066,7 @@ dependencies = [ "serde_json", "serde_path_to_error", "serde_plain", - "serde_with 3.5.0", + "serde_with 3.7.0", "sha2", "subtle", "thiserror", @@ -5889,9 +6075,9 @@ dependencies = [ [[package]] name = "openssl" -version = "0.10.63" +version = "0.10.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15c9d69dd87a29568d4d017cfe8ec518706046a05184e5aea92d0af890b803c8" +checksum = "95a0481286a310808298130d22dd1fef0fa571e05a8f44ec801801e84b216b1f" dependencies = [ "bitflags 2.4.1", "cfg-if", @@ -5908,9 +6094,9 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ - "proc-macro2 1.0.78", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] @@ -5921,18 +6107,18 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "300.2.1+3.2.0" +version = "300.2.3+3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fe476c29791a5ca0d1273c697e96085bbabbbea2ef7afd5617e78a4b40332d3" +checksum = "5cff92b6f71555b61bb9315f7c64da3ca43d87531622120fea0195fc761b4843" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.99" +version = "0.9.101" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22e1bf214306098e4832460f797824c05d25aacdf896f64a985fb0fd992454ae" +checksum = "dda2b0f344e78efc2facf7d195d098df0dd72151b26ab98da807afc26c198dff" dependencies = [ "cc", "libc", @@ -5945,12 +6131,12 @@ dependencies = [ name = "opentelemetry-proto" version = "0.1.0" dependencies = [ - "bytes 1.5.0", + "bytes 1.6.0", "chrono", "hex", "ordered-float 4.2.0", - "prost 0.12.3", - "prost-build 0.12.3", + "prost 0.12.4", + "prost-build 0.12.4", "tonic", "tonic-build 0.10.2", "vector-core", @@ -5993,12 +6179,12 @@ dependencies = [ [[package]] name = "os_info" -version = "3.7.0" +version = "3.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "006e42d5b888366f1880eda20371fedde764ed2213dc8496f49622fa0c99cd5e" +checksum = "ae99c7fa6dd38c7cafe1ec085e804f8f555a2f8659b0dbe03f1f9963a9b51092" dependencies = [ "log", - "winapi", + "windows-sys 0.52.0", ] [[package]] @@ -6166,9 +6352,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.5" +version = "2.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae9cee2a55a544be8b89dc6848072af97a20f2422603c10865be2a42b580fff5" +checksum = "219c0dcc30b6a27553f9cc242972b67f75b60eb0db71f0b5462f38b058c41546" dependencies = [ "memchr", "thiserror", @@ -6177,9 +6363,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.7.5" +version = "2.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81d78524685f5ef2a3b3bd1cafbc9fcabb036253d9b1463e726a91cd16e2dfc2" +checksum = "22e1288dbd7786462961e69bfd4df7848c1e37e8b74303dbdab82c3a9cdd2809" dependencies = [ "pest", "pest_generator", @@ -6187,22 +6373,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.5" +version = "2.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68bd1206e71118b5356dae5ddc61c8b11e28b09ef6a31acbd15ea48a28e0c227" +checksum = "1381c29a877c6d34b8c176e734f35d7f7f5b3adaefe940cb4d1bb7af94678e2e" dependencies = [ "pest", "pest_meta", - "proc-macro2 1.0.78", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] name = "pest_meta" -version = "2.7.5" +version = "2.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c747191d4ad9e4a4ab9c8798f1e82a39affe7ef9648390b7e5548d18e099de6" +checksum = "d0934d6907f148c22a3acbda520c7eed243ad7487a30f51f6ce52b58b7077a8a" dependencies = [ "once_cell", "pest", @@ -6216,7 +6402,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" dependencies = [ "fixedbitset", - "indexmap 2.1.0", + "indexmap 2.2.6", ] [[package]] @@ -6268,22 +6454,22 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.4" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0302c4a0442c456bd56f841aee5c3bfd17967563f6fadc9ceb9f9c23cf3807e0" +checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.4" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "266c042b60c9c76b8d53061e52b2e0d1116abc57cefc8c5cd671619a56ac3690" +checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ - "proc-macro2 1.0.78", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] @@ -6305,7 +6491,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d894b67aa7a4bf295db5e85349078c604edaa6fa5c8721e8eca3c7729a27f2ac" dependencies = [ "doc-comment", - "flume", + "flume 0.10.14", "parking_lot", "tracing 0.1.40", ] @@ -6413,7 +6599,7 @@ dependencies = [ "cfg-if", "concurrent-queue", "pin-project-lite", - "rustix 0.38.28", + "rustix 0.38.31", "tracing 0.1.40", "windows-sys 0.48.0", ] @@ -6463,7 +6649,7 @@ checksum = "49b6c5ef183cd3ab4ba005f1ca64c21e8bd97ce4699cfea9e8d9a2c4958ca520" dependencies = [ "base64 0.21.7", "byteorder", - "bytes 1.5.0", + "bytes 1.6.0", "fallible-iterator", "hmac", "md-5", @@ -6479,7 +6665,7 @@ version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d2234cdee9408b523530a9b6d2d6b373d1db34f6a8e51dc03ded1828d7fb67c" dependencies = [ - "bytes 1.5.0", + "bytes 1.6.0", "chrono", "fallible-iterator", "postgres-protocol", @@ -6549,7 +6735,7 @@ version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c8646e95016a7a6c4adea95bafa8a16baab64b583356217f2c85db4a39d9a86" dependencies = [ - "proc-macro2 1.0.78", + "proc-macro2 1.0.81", "syn 1.0.109", ] @@ -6559,8 +6745,8 @@ version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae005bd773ab59b4725093fd7df83fd7892f7d8eafb48dbd7de6e024e4215f9d" dependencies = [ - "proc-macro2 1.0.78", - "syn 2.0.48", + "proc-macro2 1.0.81", + "syn 2.0.60", ] [[package]] @@ -6612,8 +6798,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.78", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "syn 1.0.109", "version_check", ] @@ -6624,8 +6810,8 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.78", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "version_check", ] @@ -6652,9 +6838,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.78" +version = "1.0.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2422ad645d89c99f8f3e6b88a9fdeca7fabeac836b1002371c4367c8f984aae" +checksum = "3d1597b0c024618f09a9c3b8655b7e430397a36d23fdafec26d6965e9eec3eba" dependencies = [ "unicode-ident", ] @@ -6663,13 +6849,13 @@ dependencies = [ name = "prometheus-parser" version = "0.1.0" dependencies = [ - "indexmap 2.1.0", + "indexmap 2.2.6", "nom", "num_enum 0.7.2", - "prost 0.12.3", - "prost-build 0.12.3", - "prost-types 0.12.3", - "snafu", + "prost 0.12.4", + "prost-build 0.12.4", + "prost-types 0.12.4", + "snafu 0.7.5", "vector-common", ] @@ -6693,24 +6879,35 @@ dependencies = [ "unarray", ] +[[package]] +name = "proptest-derive" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cf16337405ca084e9c78985114633b6827711d22b9e6ef6c6c0d665eb3f0b6e" +dependencies = [ + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 1.0.109", +] + [[package]] name = "prost" version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd" dependencies = [ - "bytes 1.5.0", + "bytes 1.6.0", "prost-derive 0.11.9", ] [[package]] name = "prost" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "146c289cda302b98a28d40c8b3b90498d6e526dd24ac2ecea73e4e491685b94a" +checksum = "d0f5d036824e4761737860779c906171497f6d55681139d8312388f8fe398922" dependencies = [ - "bytes 1.5.0", - "prost-derive 0.12.3", + "bytes 1.6.0", + "prost-derive 0.12.4", ] [[package]] @@ -6719,7 +6916,7 @@ version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270" dependencies = [ - "bytes 1.5.0", + "bytes 1.6.0", "heck 0.4.1", "itertools 0.10.5", "lazy_static", @@ -6737,24 +6934,23 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c55e02e35260070b6f716a2423c2ff1c3bb1642ddca6f99e1f26d06268a0e2d2" +checksum = "80b776a1b2dc779f5ee0641f8ade0125bc1298dd41a9a0c16d8bd57b42d222b1" dependencies = [ - "bytes 1.5.0", - "heck 0.4.1", - "itertools 0.11.0", + "bytes 1.6.0", + "heck 0.5.0", + "itertools 0.12.1", "log", "multimap", "once_cell", "petgraph", "prettyplease 0.2.15", - "prost 0.12.3", - "prost-types 0.12.3", + "prost 0.12.4", + "prost-types 0.12.4", "regex", - "syn 2.0.48", + "syn 2.0.60", "tempfile", - "which 4.4.2", ] [[package]] @@ -6765,34 +6961,34 @@ checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", "itertools 0.10.5", - "proc-macro2 1.0.78", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "syn 1.0.109", ] [[package]] name = "prost-derive" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efb6c9a1dd1def8e2124d17e83a20af56f1570d6c2d2bd9e266ccb768df3840e" +checksum = "19de2de2a00075bf566bee3bd4db014b11587e84184d3f7a791bc17f1a8e9e48" dependencies = [ "anyhow", - "itertools 0.11.0", - "proc-macro2 1.0.78", - "quote 1.0.35", - "syn 2.0.48", + "itertools 0.12.1", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] name = "prost-reflect" -version = "0.12.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "057237efdb71cf4b3f9396302a3d6599a92fa94063ba537b66130980ea9909f3" +checksum = "6f5eec97d5d34bdd17ad2db2219aabf46b054c6c41bd5529767c9ce55be5898f" dependencies = [ - "base64 0.21.7", + "base64 0.22.0", "once_cell", - "prost 0.12.3", - "prost-types 0.12.3", + "prost 0.12.4", + "prost-types 0.12.4", "serde", "serde-value", ] @@ -6808,13 +7004,28 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.12.3" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3235c33eb02c1f1e212abdbe34c78b264b038fb58ca612664343271e36e55ffe" +dependencies = [ + "prost 0.12.4", +] + +[[package]] +name = "psl" +version = "2.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "193898f59edcf43c26227dcd4c8427f00d99d61e95dcde58dabd49fa291d470e" +checksum = "dc74a6e6a56708be1cf5c4c4d1a0dc21d33b2dcaa24e731b7fa9c287ce4f916f" dependencies = [ - "prost 0.12.3", + "psl-types", ] +[[package]] +name = "psl-types" +version = "2.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33cb294fe86a74cbcf50d4445b37da762029549ebeea341421c7c70370f86cac" + [[package]] name = "ptr_meta" version = "0.1.4" @@ -6830,8 +7041,8 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac" dependencies = [ - "proc-macro2 1.0.78", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "syn 1.0.109", ] @@ -6843,7 +7054,7 @@ checksum = "5d21c6a837986cf25d22ac5b951c267d95808f3c830ff009c2879fff259a0268" dependencies = [ "async-trait", "bit-vec", - "bytes 1.5.0", + "bytes 1.6.0", "chrono", "crc", "data-url", @@ -6898,9 +7109,9 @@ dependencies = [ [[package]] name = "quanta" -version = "0.12.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ca0b7bac0b97248c40bb77288fc52029cf1459c0461ea1b05ee32ccf011de2c" +checksum = "8e5167a477619228a0b284fac2674e3c388cba90631d7b7de620e6f1fcd08da5" dependencies = [ "crossbeam-utils", "libc", @@ -6917,16 +7128,6 @@ version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" -[[package]] -name = "quick-xml" -version = "0.30.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eff6510e86862b57b210fd8cbe8ed3f0d7d600b9c2863cd4549a2e033c66e956" -dependencies = [ - "memchr", - "serde", -] - [[package]] name = "quick-xml" version = "0.31.0" @@ -6954,8 +7155,8 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b22a693222d716a9587786f37ac3f6b4faedb5b80c23914e7303ff5a1d8016e9" dependencies = [ - "proc-macro2 1.0.78", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "syn 1.0.109", ] @@ -6970,11 +7171,11 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.35" +version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" +checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" dependencies = [ - "proc-macro2 1.0.78", + "proc-macro2 1.0.81", ] [[package]] @@ -7058,7 +7259,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.12", + "getrandom 0.2.14", ] [[package]] @@ -7091,19 +7292,20 @@ dependencies = [ [[package]] name = "ratatui" -version = "0.25.0" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5659e52e4ba6e07b2dad9f1158f578ef84a73762625ddb51536019f34d180eb" +checksum = "a564a852040e82671dc50a37d88f3aa83bbc690dfc6844cfe7a2591620206a80" dependencies = [ "bitflags 2.4.1", "cassowary", + "compact_str", "crossterm", "indoc", - "itertools 0.12.0", + "itertools 0.12.1", "lru", "paste", "stability", - "strum", + "strum 0.26.1", "unicode-segmentation", "unicode-width", ] @@ -7211,7 +7413,7 @@ checksum = "c580d9cbbe1d1b479e8d67cf9daf6a62c957e6846048408b80b43ac3f6af84cd" dependencies = [ "arc-swap", "async-trait", - "bytes 1.5.0", + "bytes 1.6.0", "combine 4.6.6", "futures 0.3.30", "futures-util", @@ -7260,16 +7462,16 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.12", + "getrandom 0.2.14", "redox_syscall 0.2.16", "thiserror", ] [[package]] name = "regex" -version = "1.10.3" +version = "1.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" +checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c" dependencies = [ "aho-corasick", "memchr", @@ -7338,38 +7540,40 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.23" +version = "0.11.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37b1ae8d9ac08420c66222fb9096fc5de435c3c48542bc5336c51892cffafb41" +checksum = "78bf93c4af7a8bb7d879d51cebe797356ff10ae8516ace542b5182d9dcac10b2" dependencies = [ "base64 0.21.7", - "bytes 1.5.0", + "bytes 1.6.0", "encoding_rs", "futures-core", "futures-util", - "h2 0.3.24", + "h2 0.3.26", "http 0.2.9", - "http-body", - "hyper", - "hyper-rustls", + "http-body 0.4.5", + "hyper 0.14.28", + "hyper-rustls 0.24.2", "hyper-tls", "ipnet", "js-sys", "log", "mime", + "mime_guess", "native-tls", "once_cell", "percent-encoding", "pin-project-lite", - "rustls", - "rustls-pemfile", + "rustls 0.21.11", + "rustls-pemfile 1.0.3", "serde", "serde_json", "serde_urlencoded", + "sync_wrapper", "system-configuration", "tokio", "tokio-native-tls", - "tokio-rustls", + "tokio-rustls 0.24.1", "tokio-util", "tower-service", "url", @@ -7387,7 +7591,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52e44394d2086d010551b14b53b1f24e31647570cd1deb0379e2c21b329aba00" dependencies = [ - "hostname", + "hostname 0.3.1", "quick-error", ] @@ -7414,7 +7618,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb0205304757e5d899b9c2e448b867ffd03ae7f988002e47cd24954391394d0b" dependencies = [ "cc", - "getrandom 0.2.12", + "getrandom 0.2.14", "libc", "spin 0.9.8", "untrusted", @@ -7423,13 +7627,13 @@ dependencies = [ [[package]] name = "rkyv" -version = "0.7.43" +version = "0.7.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "527a97cdfef66f65998b5f3b637c26f5a5ec09cc52a3f9932313ac645f4190f5" +checksum = "5cba464629b3394fc4dbc6f940ff8f5b4ff5c7aef40f29166fd4ad12acbc99c0" dependencies = [ "bitvec", "bytecheck", - "bytes 1.5.0", + "bytes 1.6.0", "hashbrown 0.12.3", "ptr_meta", "rend", @@ -7441,12 +7645,12 @@ dependencies = [ [[package]] name = "rkyv_derive" -version = "0.7.43" +version = "0.7.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5c462a1328c8e67e4d6dbad1eb0355dd43e8ab432c6e227a43657f16ade5033" +checksum = "a7dddfff8de25e6f62b9d64e6e432bf1c6736c57d20323e15ee10435fbda7c65" dependencies = [ - "proc-macro2 1.0.78", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "syn 1.0.109", ] @@ -7458,9 +7662,9 @@ checksum = "3582f63211428f83597b51b2ddb88e2a91a9d52d12831f9d08f5e624e8977422" [[package]] name = "rmp" -version = "0.8.12" +version = "0.8.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f9860a6cc38ed1da53456442089b4dfa35e7cedaa326df63017af88385e6b20" +checksum = "bddb316f4b9cae1a3e89c02f1926d557d1142d0d2e684b038c11c1b77705229a" dependencies = [ "byteorder", "num-traits", @@ -7469,9 +7673,9 @@ dependencies = [ [[package]] name = "rmp-serde" -version = "1.1.2" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bffea85eea980d8a74453e5d02a8d93028f3c34725de143085a844ebe953258a" +checksum = "938a142ab806f18b88a97b0dea523d39e0fd730a064b035726adcfc58a8a5188" dependencies = [ "byteorder", "rmp", @@ -7480,9 +7684,9 @@ dependencies = [ [[package]] name = "rmpv" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e0e0214a4a2b444ecce41a4025792fc31f77c7bb89c46d253953ea8c65701ec" +checksum = "e540282f11751956c82bc5529a7fb71b871b998fbf9cf06c2419b22e1b4350df" dependencies = [ "num-traits", "rmp", @@ -7492,14 +7696,9 @@ dependencies = [ [[package]] name = "roaring" -version = "0.10.2" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6106b5cf8587f5834158895e9715a3c6c9716c8aefab57f1f7680917191c7873" -dependencies = [ - "bytemuck", - "byteorder", - "retain_mut", -] +checksum = "a1c77081a55300e016cb86f2864415b7518741879db925b8d488a0ee0d2da6bf" [[package]] name = "roxmltree" @@ -7529,9 +7728,9 @@ dependencies = [ [[package]] name = "rstest" -version = "0.18.2" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97eeab2f3c0a199bc4be135c36c924b6590b88c377d416494288c14f2db30199" +checksum = "9d5316d2a1479eeef1ea21e7f9ddc67c191d497abc8fc3ba2467857abbb68330" dependencies = [ "futures 0.3.30", "futures-timer", @@ -7541,21 +7740,39 @@ dependencies = [ [[package]] name = "rstest_macros" -version = "0.18.2" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d428f8247852f894ee1be110b375111b586d4fa431f6c46e64ba5a0dcccbe605" +checksum = "04a9df72cc1f67020b0d63ad9bfe4a323e459ea7eb68e03bd9824db49f9a4c25" dependencies = [ "cfg-if", "glob", - "proc-macro2 1.0.78", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "regex", "relative-path", "rustc_version 0.4.0", - "syn 2.0.48", + "syn 2.0.60", "unicode-ident", ] +[[package]] +name = "rumqttc" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1568e15fab2d546f940ed3a21f48bbbd1c494c90c99c4481339364a497f94a9" +dependencies = [ + "bytes 1.6.0", + "flume 0.11.0", + "futures-util", + "log", + "rustls-native-certs 0.7.0", + "rustls-pemfile 2.1.0", + "rustls-webpki 0.102.2", + "thiserror", + "tokio", + "tokio-rustls 0.25.0", +] + [[package]] name = "rust_decimal" version = "1.33.1" @@ -7564,7 +7781,7 @@ checksum = "06676aec5ccb8fc1da723cc8c0f9a46549f21ebb8753d3915c6c41db1e7f1dc4" dependencies = [ "arrayvec", "borsh", - "bytes 1.5.0", + "bytes 1.6.0", "num-traits", "rand 0.8.5", "rkyv", @@ -7599,7 +7816,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.21", + "semver 1.0.22", ] [[package]] @@ -7628,9 +7845,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.28" +version = "0.38.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72e572a5e8ca657d7366229cdde4bd14c4eb5499a9573d4d366fe1b599daa316" +checksum = "6ea3e1a662af26cd7a3ba09c0297a31af215563ecf42817c98df621387f4e949" dependencies = [ "bitflags 2.4.1", "errno", @@ -7641,16 +7858,30 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.8" +version = "0.21.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "446e14c5cda4f3f30fe71863c34ec70f5ac79d6087097ad0bb433e1be5edf04c" +checksum = "7fecbfb7b1444f477b345853b1fce097a2c6fb637b2bfb87e6bc5db0f043fae4" dependencies = [ "log", "ring", - "rustls-webpki", + "rustls-webpki 0.101.7", "sct", ] +[[package]] +name = "rustls" +version = "0.22.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432" +dependencies = [ + "log", + "ring", + "rustls-pki-types", + "rustls-webpki 0.102.2", + "subtle", + "zeroize", +] + [[package]] name = "rustls-native-certs" version = "0.6.3" @@ -7658,7 +7889,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" dependencies = [ "openssl-probe", - "rustls-pemfile", + "rustls-pemfile 1.0.3", + "schannel", + "security-framework", +] + +[[package]] +name = "rustls-native-certs" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f1fb85efa936c42c6d5fc28d2629bb51e4b2f4b8a5211e297d599cc5a093792" +dependencies = [ + "openssl-probe", + "rustls-pemfile 2.1.0", + "rustls-pki-types", "schannel", "security-framework", ] @@ -7672,6 +7916,22 @@ dependencies = [ "base64 0.21.7", ] +[[package]] +name = "rustls-pemfile" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c333bb734fcdedcea57de1602543590f545f127dc8b533324318fd492c5c70b" +dependencies = [ + "base64 0.21.7", + "rustls-pki-types", +] + +[[package]] +name = "rustls-pki-types" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ede67b28608b4c60685c7d54122d4400d90f62b40caee7700e700380a390fa8" + [[package]] name = "rustls-webpki" version = "0.101.7" @@ -7682,6 +7942,17 @@ dependencies = [ "untrusted", ] +[[package]] +name = "rustls-webpki" +version = "0.102.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "faaa0a62740bedb9b2ef5afa303da42764c012f743917351dc9a237ea1663610" +dependencies = [ + "ring", + "rustls-pki-types", + "untrusted", +] + [[package]] name = "rustversion" version = "1.0.14" @@ -7702,9 +7973,9 @@ dependencies = [ [[package]] name = "rustyline" -version = "13.0.0" +version = "14.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02a2d683a4ac90aeef5b1013933f6d977bd37d51ff3f4dad829d4931a7e6be86" +checksum = "7803e8936da37efd9b6d4478277f4b2b9bb5cdb37a113e8d63222e58da647e63" dependencies = [ "bitflags 2.4.1", "cfg-if", @@ -7712,18 +7983,18 @@ dependencies = [ "libc", "log", "memchr", - "nix 0.27.1", + "nix 0.28.0", "unicode-segmentation", "unicode-width", "utf8parse", - "winapi", + "windows-sys 0.52.0", ] [[package]] name = "ryu" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c" +checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1" [[package]] name = "salsa20" @@ -7828,9 +8099,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.9.2" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" +checksum = "770452e37cad93e0a50d5abc3990d2bc351c36d0328f86cefec2f2fb206eaef6" dependencies = [ "bitflags 1.3.2", "core-foundation", @@ -7841,9 +8112,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.9.1" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" +checksum = "41f3cc463c0ef97e11c3461a9d3787412d30e8e7eb907c79180c4a57bf7c04ef" dependencies = [ "core-foundation-sys", "libc", @@ -7860,9 +8131,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.21" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97ed7a9823b74f99c7742f5336af7be5ecd3eeafcb1507d1fa93347b1d589b0" +checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca" dependencies = [ "serde", ] @@ -7875,18 +8146,18 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.195" +version = "1.0.198" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63261df402c67811e9ac6def069e4786148c4563f4b50fd4bf30aa370d626b02" +checksum = "9846a40c979031340571da2545a4e5b7c4163bdae79b301d5f86d03979451fcc" dependencies = [ "serde_derive", ] [[package]] name = "serde-toml-merge" -version = "0.3.3" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af5ae5f42c16d60b098ae5d4afd75c1d3b6512e6ca5d0b9b916e2ced30df264c" +checksum = "88075e75b01384301454b1c188243552c674263c0c0c3c7ed5dd82291b20798f" dependencies = [ "toml", ] @@ -7903,9 +8174,9 @@ dependencies = [ [[package]] name = "serde-wasm-bindgen" -version = "0.6.3" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9b713f70513ae1f8d92665bbbbda5c295c2cf1da5542881ae5eefe20c9af132" +checksum = "8302e169f0eddcc139c70f139d19d6467353af16f9fce27e8c30158036a1e16b" dependencies = [ "js-sys", "serde", @@ -7923,13 +8194,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.195" +version = "1.0.198" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46fe8f8603d81ba86327b23a2e9cdf49e1255fb94a4c5f297f6ee0547178ea2c" +checksum = "e88edab869b01783ba905e7d0153f9fc1a6505a96e4ad3018011eedb838566d9" dependencies = [ - "proc-macro2 1.0.78", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] @@ -7938,18 +8209,18 @@ version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "330f01ce65a3a5fe59a60c82f3c9a024b573b8a6e875bd233fe5f934e71d54e3" dependencies = [ - "proc-macro2 1.0.78", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] name = "serde_json" -version = "1.0.112" +version = "1.0.116" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d1bd37ce2324cf3bf85e5a25f96eb4baf0d5aa6eba43e7ae8958870c4ec48ed" +checksum = "3e17db7126d17feb94eb3fad46bf1a96b034e8aacbc2e775fe81505f8b0b2813" dependencies = [ - "indexmap 2.1.0", + "indexmap 2.2.6", "itoa", "ryu", "serde", @@ -8000,16 +8271,16 @@ version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3081f5ffbb02284dda55132aa26daecedd7372a42417bbbab6f14ab7d6bb9145" dependencies = [ - "proc-macro2 1.0.78", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] name = "serde_spanned" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12022b835073e5b11e90a14f86838ceb1c8fb0325b72416845c487ac0fa95e80" +checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1" dependencies = [ "serde", ] @@ -8038,18 +8309,19 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.5.0" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f58c3a1b3e418f61c25b2aeb43fc6c95eaa252b8cecdda67f401943e9e08d33f" +checksum = "ee80b0e361bbf88fd2f6e242ccd19cfda072cb0faa6ae694ecee08199938569a" dependencies = [ "base64 0.21.7", "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.1.0", + "indexmap 2.2.6", "serde", + "serde_derive", "serde_json", - "serde_with_macros 3.5.0", + "serde_with_macros 3.7.0", "time", ] @@ -8060,21 +8332,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ "darling 0.13.4", - "proc-macro2 1.0.78", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "syn 1.0.109", ] [[package]] name = "serde_with_macros" -version = "3.5.0" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2068b437a31fc68f25dd7edc296b078f04b45145c199d8eed9866e45f1ff274" +checksum = "6561dc161a9224638a31d876ccdfefbc1df91d3f3a8342eddb35f055d48c7655" dependencies = [ - "darling 0.20.3", - "proc-macro2 1.0.78", - "quote 1.0.35", - "syn 2.0.48", + "darling 0.20.8", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] @@ -8091,11 +8363,11 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.9.30" +version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1bf28c79a99f70ee1f1d83d10c875d2e70618417fda01ad1785e027579d9d38" +checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.1.0", + "indexmap 2.2.6", "itoa", "ryu", "serde", @@ -8271,9 +8543,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.13.1" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" dependencies = [ "serde", ] @@ -8320,7 +8592,16 @@ dependencies = [ "doc-comment", "futures-core", "pin-project", - "snafu-derive", + "snafu-derive 0.7.5", +] + +[[package]] +name = "snafu" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d342c51730e54029130d7dc9fd735d28c4cd360f1368c01981d4f03ff207f096" +dependencies = [ + "snafu-derive 0.8.0", ] [[package]] @@ -8330,11 +8611,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "990079665f075b699031e9c08fd3ab99be5029b96f3b78dc0709e8f77e4efebf" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.78", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "syn 1.0.109", ] +[[package]] +name = "snafu-derive" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "080c44971436b1af15d6f61ddd8b543995cf63ab8e677d46b00cc06f4ef267a0" +dependencies = [ + "heck 0.4.1", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", +] + [[package]] name = "snap" version = "1.1.1" @@ -8353,12 +8646,12 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.5" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" +checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871" dependencies = [ "libc", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -8388,12 +8681,12 @@ dependencies = [ [[package]] name = "stability" -version = "0.1.1" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebd1b177894da2a2d9120208c3386066af06a488255caabc5de8ddca22dbc3ce" +checksum = "2ff9eaf853dec4c8802325d8b6d3dffa86cc707fd7a1a4cdbf416e13b061787a" dependencies = [ - "quote 1.0.35", - "syn 1.0.109", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] @@ -8464,6 +8757,12 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" +[[package]] +name = "strsim" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ee073c9e4cd00e28217186dbe12796d692868f432bf2e97ee73bed0c56dfa01" + [[package]] name = "structopt" version = "0.3.26" @@ -8483,8 +8782,8 @@ checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" dependencies = [ "heck 0.3.3", "proc-macro-error", - "proc-macro2 1.0.78", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "syn 1.0.109", ] @@ -8493,8 +8792,14 @@ name = "strum" version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125" + +[[package]] +name = "strum" +version = "0.26.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "723b93e8addf9aa965ebe2d11da6d7540fa2283fcea14b3371ff055f7ba13f5f" dependencies = [ - "strum_macros", + "strum_macros 0.26.1", ] [[package]] @@ -8504,10 +8809,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.78", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "rustversion", - "syn 2.0.48", + "syn 2.0.60", +] + +[[package]] +name = "strum_macros" +version = "0.26.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a3417fc93d76740d974a01654a09777cb500428cc874ca9f45edfe0c4d4cd18" +dependencies = [ + "heck 0.4.1", + "proc-macro2 1.0.81", + "quote 1.0.36", + "rustversion", + "syn 2.0.60", ] [[package]] @@ -8543,19 +8861,19 @@ version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.78", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "unicode-ident", ] [[package]] name = "syn" -version = "2.0.48" +version = "2.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f" +checksum = "909518bc7b1c9b779f1bbf07f2929d35af9f0f37e47c6e9ef7f9dddc1e1821f3" dependencies = [ - "proc-macro2 1.0.78", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "unicode-ident", ] @@ -8566,9 +8884,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1329189c02ff984e9736652b1631330da25eaa6bc639089ed4915d25446cbe7b" dependencies = [ "proc-macro-error", - "proc-macro2 1.0.78", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] @@ -8579,12 +8897,12 @@ checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" [[package]] name = "syslog" -version = "6.1.0" +version = "6.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7434e95bcccce1215d30f4bf84fe8c00e8de1b9be4fb736d747ca53d36e7f96f" +checksum = "dfc7e95b5b795122fafe6519e27629b5ab4232c73ebb2428f568e82b1a457ad3" dependencies = [ "error-chain", - "hostname", + "hostname 0.3.1", "libc", "log", "time", @@ -8641,25 +8959,24 @@ checksum = "4da30af7998f51ee1aa48ab24276fe303a697b004e31ff542b192c088d5630a5" dependencies = [ "cfg-if", "native-tls", - "rustls-pemfile", + "rustls-pemfile 1.0.3", ] [[package]] name = "temp-dir" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd16aa9ffe15fe021c6ee3766772132c6e98dfa395a167e16864f61a9cfb71d6" +checksum = "1f227968ec00f0e5322f9b8173c7a0cbcff6181a0a5b28e9892491c286277231" [[package]] name = "tempfile" -version = "3.9.0" +version = "3.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01ce4141aa927a6d1bd34a041795abd0db1cccba5d5f24b009f694bdf3a1f3fa" +checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" dependencies = [ "cfg-if", "fastrand 2.0.1", - "redox_syscall 0.4.1", - "rustix 0.38.28", + "rustix 0.38.31", "windows-sys 0.52.0", ] @@ -8689,7 +9006,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21bebf2b7c9e0a515f6e0f8c51dc0f8e4696391e6f1ff30379559f8365fb0df7" dependencies = [ - "rustix 0.38.28", + "rustix 0.38.31", "windows-sys 0.48.0", ] @@ -8722,22 +9039,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.56" +version = "1.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54378c645627613241d077a3a79db965db602882668f9136ac42af9ecb730ad" +checksum = "f0126ad08bff79f29fc3ae6a55cc72352056dfff61e3ff8bb7129476d44b23aa" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.56" +version = "1.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa0faa943b50f3db30a20aa7e265dbc66076993efed8463e8de414e5d06d3471" +checksum = "d1cd413b5d558b4c5bf3680e324a6fa5014e7b7c067a51e69dbdf47eb7148b66" dependencies = [ - "proc-macro2 1.0.78", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] @@ -8838,19 +9155,19 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.35.1" +version = "1.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c89b4efa943be685f629b149f53829423f8f5531ea21249408e8e2f8671ec104" +checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787" dependencies = [ "backtrace", - "bytes 1.5.0", + "bytes 1.6.0", "libc", "mio", "num_cpus", "parking_lot", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.5", + "socket2 0.5.6", "tokio-macros", "tracing 0.1.40", "windows-sys 0.48.0", @@ -8883,9 +9200,9 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ - "proc-macro2 1.0.78", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] @@ -8918,7 +9235,7 @@ checksum = "d340244b32d920260ae7448cb72b6e238bddc3d4f7603394e7dd46ed8e48f5b8" dependencies = [ "async-trait", "byteorder", - "bytes 1.5.0", + "bytes 1.6.0", "fallible-iterator", "futures-channel", "futures-util", @@ -8930,7 +9247,7 @@ dependencies = [ "postgres-protocol", "postgres-types", "rand 0.8.5", - "socket2 0.5.5", + "socket2 0.5.6", "tokio", "tokio-util", "whoami", @@ -8953,15 +9270,26 @@ version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ - "rustls", + "rustls 0.21.11", + "tokio", +] + +[[package]] +name = "tokio-rustls" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" +dependencies = [ + "rustls 0.22.4", + "rustls-pki-types", "tokio", ] [[package]] name = "tokio-stream" -version = "0.1.14" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" +checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af" dependencies = [ "futures-core", "pin-project-lite", @@ -8971,12 +9299,12 @@ dependencies = [ [[package]] name = "tokio-test" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e89b3cbabd3ae862100094ae433e1def582cf86451b4e9bf83aa7ac1d8a7d719" +checksum = "2468baabc3311435b55dd935f702f42cd1b8abb7e754fb7dfb16bd36aa88f9f7" dependencies = [ "async-stream", - "bytes 1.5.0", + "bytes 1.6.0", "futures-core", "tokio", "tokio-stream", @@ -8990,9 +9318,21 @@ checksum = "212d5dcb2a1ce06d81107c3d0ffa3121fe974b73f068c8282cb1c32328113b6c" dependencies = [ "futures-util", "log", - "rustls", + "rustls 0.21.11", + "tokio", + "tungstenite 0.20.1", +] + +[[package]] +name = "tokio-tungstenite" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c83b561d025642014097b66e6c1bb422783339e0909e4429cde4749d1990bc38" +dependencies = [ + "futures-util", + "log", "tokio", - "tungstenite", + "tungstenite 0.21.0", ] [[package]] @@ -9000,7 +9340,7 @@ name = "tokio-util" version = "0.7.8" source = "git+https://github.com/vectordotdev/tokio?branch=tokio-util-0.7.8-framed-read-continue-on-error#3747655f8f0443e13fe20da3f613ea65c23347c2" dependencies = [ - "bytes 1.5.0", + "bytes 1.6.0", "futures-core", "futures-io", "futures-sink", @@ -9012,14 +9352,14 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.8" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1a195ec8c9da26928f773888e0742ca3ca1040c6cd859c919c9f59c1954ab35" +checksum = "e9dd1545e8208b4a5af1aa9bbd0b4cf7e9ea08fabc5d0a5c67fcaafa17433aa3" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.21.0", + "toml_edit 0.22.8", ] [[package]] @@ -9037,9 +9377,9 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.1.0", + "indexmap 2.2.6", "toml_datetime", - "winnow", + "winnow 0.5.18", ] [[package]] @@ -9048,22 +9388,22 @@ version = "0.20.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70f427fce4d84c72b5b732388bf4a9f4531b53f74e2887e3ecb2481f68f66d81" dependencies = [ - "indexmap 2.1.0", + "indexmap 2.2.6", "toml_datetime", - "winnow", + "winnow 0.5.18", ] [[package]] name = "toml_edit" -version = "0.21.0" +version = "0.22.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d34d383cd00a163b4a5b85053df514d45bc330f6de7737edfe0a93311d1eaa03" +checksum = "c12219811e0c1ba077867254e5ad62ee2c9c190b0d957110750ac0cda1ae96cd" dependencies = [ - "indexmap 2.1.0", + "indexmap 2.2.6", "serde", "serde_spanned", "toml_datetime", - "winnow", + "winnow 0.6.5", ] [[package]] @@ -9076,21 +9416,21 @@ dependencies = [ "async-trait", "axum", "base64 0.21.7", - "bytes 1.5.0", + "bytes 1.6.0", "flate2", - "h2 0.3.24", + "h2 0.3.26", "http 0.2.9", - "http-body", - "hyper", + "http-body 0.4.5", + "hyper 0.14.28", "hyper-timeout", "percent-encoding", "pin-project", - "prost 0.12.3", - "rustls", - "rustls-native-certs", - "rustls-pemfile", + "prost 0.12.4", + "rustls 0.21.11", + "rustls-native-certs 0.6.3", + "rustls-pemfile 1.0.3", "tokio", - "tokio-rustls", + "tokio-rustls 0.24.1", "tokio-stream", "tower", "tower-layer", @@ -9105,9 +9445,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a6fdaae4c2c638bb70fe42803a26fbd6fc6ac8c72f5c59f67ecc2a2dcabf4b07" dependencies = [ "prettyplease 0.1.25", - "proc-macro2 1.0.78", + "proc-macro2 1.0.81", "prost-build 0.11.9", - "quote 1.0.35", + "quote 1.0.36", "syn 1.0.109", ] @@ -9118,10 +9458,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d021fc044c18582b9a2408cd0dd05b1596e3ecdb5c4df822bb0183545683889" dependencies = [ "prettyplease 0.2.15", - "proc-macro2 1.0.78", - "prost-build 0.12.3", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2 1.0.81", + "prost-build 0.12.4", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] @@ -9153,11 +9493,11 @@ dependencies = [ "async-compression", "base64 0.21.7", "bitflags 2.4.1", - "bytes 1.5.0", + "bytes 1.6.0", "futures-core", "futures-util", "http 0.2.9", - "http-body", + "http-body 0.4.5", "http-range-header", "mime", "pin-project-lite", @@ -9222,9 +9562,9 @@ version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ - "proc-macro2 1.0.78", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] @@ -9408,7 +9748,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e3dac10fd62eaf6617d3a904ae222845979aec67c615d1c842b4002c7666fb9" dependencies = [ "byteorder", - "bytes 1.5.0", + "bytes 1.6.0", "data-encoding", "http 0.2.9", "httparse", @@ -9420,6 +9760,25 @@ dependencies = [ "utf-8", ] +[[package]] +name = "tungstenite" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ef1a641ea34f399a848dea702823bbecfb4c486f911735368f1f137cb8257e1" +dependencies = [ + "byteorder", + "bytes 1.6.0", + "data-encoding", + "http 1.0.0", + "httparse", + "log", + "rand 0.8.5", + "sha1", + "thiserror", + "url", + "utf-8", +] + [[package]] name = "twox-hash" version = "1.6.3" @@ -9436,8 +9795,8 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89851716b67b937e393b3daa8423e67ddfc4bbbf1654bcf05488e95e0828db0c" dependencies = [ - "proc-macro2 1.0.78", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "syn 1.0.109", ] @@ -9456,9 +9815,9 @@ version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f03ca4cb38206e2bef0700092660bb74d696f808514dae47fa1467cbfe26e96e" dependencies = [ - "proc-macro2 1.0.78", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] @@ -9469,9 +9828,9 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "typetag" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c43148481c7b66502c48f35b8eef38b6ccdc7a9f04bd4cc294226d901ccc9bc7" +checksum = "661d18414ec032a49ece2d56eee03636e43c4e8d577047ab334c0ba892e29aaf" dependencies = [ "erased-serde", "inventory", @@ -9482,13 +9841,13 @@ dependencies = [ [[package]] name = "typetag-impl" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "291db8a81af4840c10d636e047cac67664e343be44e24dfdbd1492df9a5d3390" +checksum = "ac73887f47b9312552aa90ef477927ff014d63d1920ca8037c6c1951eab64bb1" dependencies = [ - "proc-macro2 1.0.78", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] @@ -9601,9 +9960,9 @@ dependencies = [ [[package]] name = "unsafe-libyaml" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab4c90930b95a82d00dc9e9ac071b4991924390d46cbd0dfe566148667605e4b" +checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" [[package]] name = "untrusted" @@ -9660,11 +10019,12 @@ checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" [[package]] name = "uuid" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f00cc9702ca12d3c81455259621e676d0f7251cec66a21e98fe2e9a37db93b2a" +checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0" dependencies = [ - "getrandom 0.2.12", + "atomic", + "getrandom 0.2.14", "rand 0.8.5", "serde", "wasm-bindgen", @@ -9689,7 +10049,7 @@ dependencies = [ "anyhow", "cached", "chrono", - "clap 4.4.18", + "clap 4.5.4", "clap-verbosity-flag", "clap_complete", "confy", @@ -9697,9 +10057,9 @@ dependencies = [ "dunce", "glob", "hex", - "indexmap 2.1.0", + "indexmap 2.2.6", "indicatif", - "itertools 0.12.0", + "itertools 0.12.1", "log", "once_cell", "os_info", @@ -9709,7 +10069,7 @@ dependencies = [ "reqwest", "serde", "serde_json", - "serde_yaml 0.9.30", + "serde_yaml 0.9.34+deprecated", "sha2", "tempfile", "toml", @@ -9723,7 +10083,7 @@ checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" [[package]] name = "vector" -version = "0.36.0" +version = "0.38.0" dependencies = [ "apache-avro", "approx", @@ -9746,6 +10106,7 @@ dependencies = [ "aws-sdk-s3", "aws-sdk-sns", "aws-sdk-sqs", + "aws-sdk-sts", "aws-sigv4", "aws-smithy-async", "aws-smithy-http", @@ -9758,20 +10119,21 @@ dependencies = [ "azure_identity", "azure_storage", "azure_storage_blobs", - "base64 0.21.7", + "base64 0.22.0", "bloomy", "bollard", - "bytes 1.5.0", + "bytes 1.6.0", "bytesize", "chrono", "chrono-tz", "cidr-utils 0.6.1", - "clap 4.4.18", + "clap 4.5.4", "colored", "console-subscriber", "criterion", "crossterm", "csv", + "databend-client", "derivative", "dirs-next", "dnsmsg-parser", @@ -9788,25 +10150,26 @@ dependencies = [ "governor", "greptimedb-client", "grok", - "h2 0.4.2", + "h2 0.4.4", "hash_hasher", "hashbrown 0.14.3", "headers", "heim", "hex", "hickory-proto", - "hostname", + "hostname 0.4.0", "http 0.2.9", - "http-body", + "http-body 0.4.5", "http-serde", - "hyper", + "hyper 0.14.28", "hyper-openssl", "hyper-proxy", - "indexmap 2.1.0", + "indexmap 2.2.6", "indoc", "infer 0.15.0", "inventory", - "itertools 0.12.0", + "ipnet", + "itertools 0.12.1", "k8s-openapi 0.18.0", "kube", "lapin", @@ -9822,7 +10185,7 @@ dependencies = [ "mlua", "mongodb", "nix 0.26.2", - "nkeys 0.4.0", + "nkeys 0.4.1", "nom", "notify", "num-format", @@ -9839,10 +10202,10 @@ dependencies = [ "portpicker", "postgres-openssl", "proptest", - "prost 0.12.3", - "prost-build 0.12.3", + "prost 0.12.4", + "prost-build 0.12.4", "prost-reflect", - "prost-types 0.12.3", + "prost-types 0.12.4", "pulsar", "quickcheck", "rand 0.8.5", @@ -9856,21 +10219,22 @@ dependencies = [ "rmpv", "roaring", "rstest", + "rumqttc", "seahash", - "semver 1.0.21", + "semver 1.0.22", "serde", "serde-toml-merge", "serde_bytes", "serde_json", - "serde_with 3.5.0", - "serde_yaml 0.9.30", + "serde_with 3.7.0", + "serde_yaml 0.9.34+deprecated", "sha2", "similar-asserts", "smallvec", "smpl_jwt", - "snafu", + "snafu 0.7.5", "snap", - "socket2 0.5.5", + "socket2 0.5.6", "stream-cancel", "strip-ansi-escapes", "syslog", @@ -9882,7 +10246,7 @@ dependencies = [ "tokio-postgres", "tokio-stream", "tokio-test", - "tokio-tungstenite", + "tokio-tungstenite 0.20.1", "tokio-util", "toml", "tonic", @@ -9913,9 +10277,8 @@ name = "vector-api-client" version = "0.1.2" dependencies = [ "anyhow", - "async-trait", "chrono", - "clap 4.4.18", + "clap 4.5.4", "futures 0.3.30", "graphql_client", "indoc", @@ -9924,7 +10287,7 @@ dependencies = [ "serde_json", "tokio", "tokio-stream", - "tokio-tungstenite", + "tokio-tungstenite 0.20.1", "url", "uuid", ] @@ -9937,8 +10300,8 @@ dependencies = [ "async-stream", "async-trait", "bytecheck", - "bytes 1.5.0", - "clap 4.4.18", + "bytes 1.6.0", + "clap 4.5.4", "crc32fast", "criterion", "crossbeam-queue", @@ -9960,8 +10323,8 @@ dependencies = [ "rand 0.8.5", "rkyv", "serde", - "serde_yaml 0.9.30", - "snafu", + "serde_yaml 0.9.34+deprecated", + "snafu 0.7.5", "temp-dir", "tokio", "tokio-test", @@ -9980,13 +10343,13 @@ name = "vector-common" version = "0.1.0" dependencies = [ "async-stream", - "bytes 1.5.0", + "bytes 1.6.0", "chrono", "chrono-tz", "crossbeam-utils", "derivative", "futures 0.3.30", - "indexmap 2.1.0", + "indexmap 2.2.6", "metrics", "nom", "ordered-float 4.2.0", @@ -9998,7 +10361,7 @@ dependencies = [ "serde", "serde_json", "smallvec", - "snafu", + "snafu 0.7.5", "stream-cancel", "tokio", "tracing 0.1.40", @@ -10017,14 +10380,14 @@ dependencies = [ "chrono-tz", "encoding_rs", "http 0.2.9", - "indexmap 2.1.0", + "indexmap 2.2.6", "inventory", "no-proxy", "num-traits", "serde", "serde_json", - "serde_with 3.5.0", - "snafu", + "serde_with 3.7.0", + "snafu 0.7.5", "toml", "tracing 0.1.40", "url", @@ -10039,13 +10402,13 @@ name = "vector-config-common" version = "0.1.0" dependencies = [ "convert_case 0.6.0", - "darling 0.20.3", + "darling 0.20.8", "once_cell", - "proc-macro2 1.0.78", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "serde", "serde_json", - "syn 2.0.48", + "syn 2.0.60", "tracing 0.1.40", ] @@ -10053,12 +10416,12 @@ dependencies = [ name = "vector-config-macros" version = "0.1.0" dependencies = [ - "darling 0.20.3", - "proc-macro2 1.0.78", - "quote 1.0.35", + "darling 0.20.8", + "proc-macro2 1.0.81", + "quote 1.0.36", "serde", "serde_derive_internals", - "syn 2.0.48", + "syn 2.0.60", "vector-config", "vector-config-common", ] @@ -10069,9 +10432,9 @@ version = "0.1.0" dependencies = [ "async-graphql", "async-trait", - "base64 0.21.7", + "base64 0.22.0", "bitmask-enum", - "bytes 1.5.0", + "bytes 1.6.0", "chrono", "chrono-tz", "criterion", @@ -10087,7 +10450,8 @@ dependencies = [ "headers", "http 0.2.9", "hyper-proxy", - "indexmap 2.1.0", + "indexmap 2.2.6", + "ipnet", "metrics", "metrics-tracing-context", "metrics-util", @@ -10102,10 +10466,10 @@ dependencies = [ "parking_lot", "pin-project", "proptest", - "prost 0.12.3", - "prost-build 0.12.3", - "prost-types 0.12.3", - "quanta 0.12.2", + "prost 0.12.4", + "prost-build 0.12.4", + "prost-types 0.12.4", + "quanta 0.12.3", "quickcheck", "quickcheck_macros", "rand 0.8.5", @@ -10116,12 +10480,12 @@ dependencies = [ "security-framework", "serde", "serde_json", - "serde_with 3.5.0", - "serde_yaml 0.9.30", + "serde_with 3.7.0", + "serde_yaml 0.9.34+deprecated", "similar-asserts", "smallvec", - "snafu", - "socket2 0.5.5", + "snafu 0.7.5", + "socket2 0.5.6", "tokio", "tokio-openssl", "tokio-stream", @@ -10165,6 +10529,8 @@ dependencies = [ name = "vector-lookup" version = "0.1.0" dependencies = [ + "proptest", + "proptest-derive", "serde", "vector-config", "vector-config-macros", @@ -10195,7 +10561,7 @@ dependencies = [ name = "vector-vrl-cli" version = "0.1.0" dependencies = [ - "clap 4.4.18", + "clap 4.5.4", "vector-vrl-functions", "vrl", ] @@ -10214,7 +10580,7 @@ dependencies = [ "ansi_term", "chrono", "chrono-tz", - "clap 4.4.18", + "clap 4.5.4", "enrichment", "glob", "prettydiff", @@ -10233,7 +10599,7 @@ version = "0.1.0" dependencies = [ "cargo_toml", "enrichment", - "getrandom 0.2.12", + "getrandom 0.2.14", "gloo-utils", "serde", "serde-wasm-bindgen", @@ -10256,16 +10622,16 @@ checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" [[package]] name = "vrl" -version = "0.9.1" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c13adaad36ee7b6f8cb7e7baa17ac05d84439d787b0b058df7be1cd9b04f485" +checksum = "81f1e48235e8db47d5010723fc32c38b09820a1a2a57eaea77b089493a375f52" dependencies = [ "aes", "ansi_term", "arbitrary", "base16", - "base64 0.21.7", - "bytes 1.5.0", + "base64 0.22.0", + "bytes 1.6.0", "cbc", "cfb-mode", "cfg-if", @@ -10274,13 +10640,14 @@ dependencies = [ "chrono", "chrono-tz", "cidr-utils 0.6.1", - "clap 4.4.18", + "clap 4.5.4", "codespan-reporting", "community-id", "crypto_secretbox", "csv", "ctr", "data-encoding", + "digest", "dns-lookup", "dyn-clone", "exitcode", @@ -10288,10 +10655,12 @@ dependencies = [ "grok", "hex", "hmac", - "hostname", - "indexmap 2.1.0", + "hostname 0.3.1", + "iana-time-zone", + "idna 0.5.0", + "indexmap 2.2.6", "indoc", - "itertools 0.12.0", + "itertools 0.12.1", "lalrpop", "lalrpop-util", "md-5", @@ -10308,6 +10677,9 @@ dependencies = [ "pest_derive", "prettydiff", "prettytable-rs", + "prost 0.12.4", + "prost-reflect", + "psl", "quickcheck", "quoted_printable", "rand 0.8.5", @@ -10321,7 +10693,7 @@ dependencies = [ "sha-1", "sha2", "sha3", - "snafu", + "snafu 0.8.0", "snap", "strip-ansi-escapes", "syslog_loose", @@ -10359,8 +10731,8 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d257817081c7dffcdbab24b9e62d2def62e2ff7d00b1c20062551e6cccc145ff" dependencies = [ - "proc-macro2 1.0.78", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", ] [[package]] @@ -10399,29 +10771,27 @@ dependencies = [ [[package]] name = "warp" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1e92e22e03ff1230c03a1a8ee37d2f89cd489e2e541b7550d6afad96faed169" +checksum = "4378d202ff965b011c64817db11d5829506d3404edeadb61f190d111da3f231c" dependencies = [ - "bytes 1.5.0", + "bytes 1.6.0", "futures-channel", "futures-util", "headers", "http 0.2.9", - "hyper", + "hyper 0.14.28", "log", "mime", "mime_guess", "percent-encoding", "pin-project", - "rustls-pemfile", "scoped-tls", "serde", "serde_json", "serde_urlencoded", "tokio", - "tokio-stream", - "tokio-tungstenite", + "tokio-tungstenite 0.21.0", "tokio-util", "tower-service", "tracing 0.1.40", @@ -10439,11 +10809,17 @@ version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +[[package]] +name = "wasite" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" + [[package]] name = "wasm-bindgen" -version = "0.2.90" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1223296a201415c7fad14792dbefaace9bd52b62d33453ade1c5b5f07555406" +checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -10451,16 +10827,16 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.90" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcdc935b63408d58a32f8cc9738a0bffd8f05cc7c002086c6ef20b7312ad9dcd" +checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.78", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", "wasm-bindgen-shared", ] @@ -10478,38 +10854,38 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.90" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e4c238561b2d428924c49815533a8b9121c664599558a5d9ec51f8a1740a999" +checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" dependencies = [ - "quote 1.0.35", + "quote 1.0.36", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.90" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bae1abb6806dc1ad9e560ed242107c0f6c84335f1749dd4e8ddb012ebd5e25a7" +checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ - "proc-macro2 1.0.78", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.90" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d91413b1c31d7539ba5ef2451af3f0b833a005eb27a631cec32bc0635a8602b" +checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" [[package]] name = "wasm-streams" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4609d447824375f43e1ffbc051b50ad8f4b3ae8219680c94452ea05eb240ac7" +checksum = "b65dc4c90b63b118468cf747d8bf3566c1913ef60be765b5730ead9e0a3ba129" dependencies = [ "futures-util", "js-sys", @@ -10560,7 +10936,7 @@ dependencies = [ "either", "home", "once_cell", - "rustix 0.38.28", + "rustix 0.38.31", ] [[package]] @@ -10572,17 +10948,18 @@ dependencies = [ "either", "home", "once_cell", - "rustix 0.38.28", + "rustix 0.38.31", "windows-sys 0.48.0", ] [[package]] name = "whoami" -version = "1.4.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22fc3756b8a9133049b26c7f61ab35416c130e8c09b660f5b3958b446f52cc50" +checksum = "0fec781d48b41f8163426ed18e8fc2864c12937df9ce54c88ede7bd47270893e" dependencies = [ - "wasm-bindgen", + "redox_syscall 0.4.1", + "wasite", "web-sys", ] @@ -10629,24 +11006,34 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be" +dependencies = [ + "windows-core", + "windows-targets 0.52.0", +] + [[package]] name = "windows-core" -version = "0.51.1" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1f8cf84f35d2db49a46868f947758c7a1138116f7fac3bc844f43ade1292e64" +checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.48.5", + "windows-targets 0.52.0", ] [[package]] name = "windows-service" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd9db37ecb5b13762d95468a2fc6009d4b2c62801243223aabd44fca13ad13c8" +checksum = "d24d6bcc7f734a4091ecf8d7a64c5f7d7066f45585c1861eba06449909609c8a" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.4.1", "widestring 1.0.2", - "windows-sys 0.45.0", + "windows-sys 0.52.0", ] [[package]] @@ -10856,6 +11243,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "winnow" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dffa400e67ed5a4dd237983829e66475f0a4a26938c4b04c21baede6262215b8" +dependencies = [ + "memchr", +] + [[package]] name = "winreg" version = "0.50.0" @@ -10879,7 +11275,7 @@ dependencies = [ "futures 0.3.30", "futures-timer", "http-types", - "hyper", + "hyper 0.14.28", "log", "once_cell", "regex", @@ -10937,9 +11333,9 @@ version = "0.7.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b3c129550b3e6de3fd0ba67ba5c81818f9805e58b8d7fee80a3a59d2c9fc601a" dependencies = [ - "proc-macro2 1.0.78", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index d37473a7b1bcd..8a29485972357 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "vector" -version = "0.36.0" +version = "0.38.0" authors = ["Vector Contributors "] edition = "2021" description = "A lightweight and ultra-fast tool for building observability pipelines" @@ -12,17 +12,13 @@ default-run = "vector" autobenches = false # our benchmarks are not runnable on their own either way # Minimum supported rust version # See docs/DEVELOPING.md for policy -rust-version = "1.71.1" +rust-version = "1.77" [[bin]] name = "vector" test = false bench = false -[[test]] -name = "integration" -path = "tests/integration/lib.rs" - [[bin]] name = "graphql-schema" path = "src/api/schema/gen.rs" @@ -37,6 +33,14 @@ test = false bench = false required-features = ["secret-backend-example"] +[[test]] +name = "integration" +path = "tests/integration/lib.rs" + +[[test]] +name = "e2e" +path = "tests/e2e/mod.rs" + # CI-based builds use full release optimization. See scripts/environment/release-flags.sh. # This results in roughly a 5% reduction in performance when compiling locally vs when # compiled via the CI pipeline. @@ -73,6 +77,9 @@ start = false # libc requirements are defined by `cross` # https://github.com/rust-embedded/cross#supported-targets # Though, it seems like aarch64 libc is actually 2.18 and not 2.19 +[package.metadata.deb.variants.arm-unknown-linux-gnueabi] +depends = "libc6 (>= 2.15)" + [package.metadata.deb.variants.armv7-unknown-linux-gnueabihf] depends = "libc6 (>= 2.15)" @@ -122,11 +129,16 @@ members = [ ] [workspace.dependencies] -chrono = { version = "0.4.33", default-features = false, features = ["clock", "serde"] } -clap = { version = "4.4.18", default-features = false, features = ["derive", "error-context", "env", "help", "std", "string", "usage", "wrap_help"] } -pin-project = { version = "1.1.4", default-features = false } -serde_json = { version = "1.0.112", default-features = false, features = ["raw_value", "std"] } -vrl = { version = "0.9.1", features = ["arbitrary", "cli", "test", "test_framework"] } +chrono = { version = "0.4.37", default-features = false, features = ["clock", "serde"] } +clap = { version = "4.5.4", default-features = false, features = ["derive", "error-context", "env", "help", "std", "string", "usage", "wrap_help"] } +indexmap = { version = "2.2.6", default-features = false, features = ["serde", "std"] } +pin-project = { version = "1.1.5", default-features = false } +proptest = "1.4" +proptest-derive = "0.4.0" +serde_json = { version = "1.0.116", default-features = false, features = ["raw_value", "std"] } +serde = { version = "1.0.198", default-features = false, features = ["alloc", "derive", "rc"] } +toml = { version = "0.8.12", default-features = false, features = ["display", "parse"] } +vrl = { version = "0.13.0", features = ["arbitrary", "cli", "test", "test_framework"] } [dependencies] pin-project.workspace = true @@ -144,11 +156,11 @@ loki-logproto = { path = "lib/loki-logproto", optional = true } # Tokio / Futures async-stream = { version = "0.3.5", default-features = false } -async-trait = { version = "0.1.77", default-features = false } +async-trait = { version = "0.1.80", default-features = false } futures = { version = "0.3.30", default-features = false, features = ["compat", "io-compat"], package = "futures" } -tokio = { version = "1.35.1", default-features = false, features = ["full"] } +tokio = { version = "1.37.0", default-features = false, features = ["full"] } tokio-openssl = { version = "0.6.4", default-features = false } -tokio-stream = { version = "0.1.14", default-features = false, features = ["net", "sync", "time"] } +tokio-stream = { version = "0.1.15", default-features = false, features = ["net", "sync", "time"] } tokio-util = { version = "0.7", default-features = false, features = ["io", "time"] } console-subscriber = { version = "0.2.0", default-features = false, optional = true } @@ -164,7 +176,7 @@ metrics = "0.21.1" metrics-tracing-context = { version = "0.14.0", default-features = false } # AWS - Official SDK -aws-sdk-s3 = { version = "1.3.0", default-features = false, features = ["behavior-version-latest"], optional = true } +aws-sdk-s3 = { version = "1.4.0", default-features = false, features = ["behavior-version-latest"], optional = true } aws-sdk-sqs = { version = "1.3.0", default-features = false, features = ["behavior-version-latest"], optional = true } aws-sdk-sns = { version = "1.3.0", default-features = false, features = ["behavior-version-latest"], optional = true } aws-sdk-cloudwatch = { version = "1.3.0", default-features = false, features = ["behavior-version-latest"], optional = true } @@ -172,15 +184,19 @@ aws-sdk-cloudwatchlogs = { version = "1.3.0", default-features = false, features aws-sdk-elasticsearch = { version = "1.3.0", default-features = false, features = ["behavior-version-latest"], optional = true } aws-sdk-firehose = { version = "1.3.0", default-features = false, features = ["behavior-version-latest"], optional = true } aws-sdk-kinesis = { version = "1.3.0", default-features = false, features = ["behavior-version-latest"], optional = true } -aws-types = { version = "1.1.3", default-features = false, optional = true } -aws-sigv4 = { version = "1.1.3", default-features = false, features = ["sign-http"], optional = true } -aws-config = { version = "1.0.1", default-features = false, features = ["behavior-version-latest"], optional = true } -aws-credential-types = { version = "1.1.4", default-features = false, features = ["hardcoded-credentials"], optional = true } +# The sts crate is needed despite not being referred to anywhere in the code because we need to set the +# `behavior-version-latest` feature. Without this we get a runtime panic when `auth.assume_role` authentication +# is configured. +aws-sdk-sts = { version = "1.3.1", default-features = false, features = ["behavior-version-latest"], optional = true } +aws-types = { version = "1.2.0", default-features = false, optional = true } +aws-sigv4 = { version = "1.2.1", default-features = false, features = ["sign-http"], optional = true } +aws-config = { version = "1.0.1", default-features = false, features = ["behavior-version-latest", "credentials-process"], optional = true } +aws-credential-types = { version = "1.2.0", default-features = false, features = ["hardcoded-credentials"], optional = true } aws-smithy-http = { version = "0.60", default-features = false, features = ["event-stream"], optional = true } -aws-smithy-types = { version = "1.0.2", default-features = false, optional = true } -aws-smithy-runtime-api = { version = "1.1.3", default-features = false, optional = true } -aws-smithy-runtime = { version = "1.1.4", default-features = false, features = ["client", "connector-hyper-0-14-x", "rt-tokio"], optional = true } -aws-smithy-async = { version = "1.0.2", default-features = false, features = ["rt-tokio"], optional = true } +aws-smithy-types = { version = "1.1.8", default-features = false, optional = true } +aws-smithy-runtime-api = { version = "1.2.0", default-features = false, optional = true } +aws-smithy-runtime = { version = "1.3.1", default-features = false, features = ["client", "connector-hyper-0-14-x", "rt-tokio"], optional = true } +aws-smithy-async = { version = "1.2.1", default-features = false, features = ["rt-tokio"], optional = true } # Azure azure_core = { version = "0.17", default-features = false, features = ["enable_reqwest"], optional = true } @@ -189,26 +205,26 @@ azure_storage = { version = "0.17", default-features = false, optional = true } azure_storage_blobs = { version = "0.17", default-features = false, optional = true } # OpenDAL -opendal = {version = "0.44", default-features = false, features = ["native-tls", "services-webhdfs"], optional = true} +opendal = {version = "0.45", default-features = false, features = ["native-tls", "services-webhdfs"], optional = true} # Tower tower = { version = "0.4.13", default-features = false, features = ["buffer", "limit", "retry", "timeout", "util", "balance", "discover"] } tower-http = { version = "0.4.4", default-features = false, features = ["decompression-gzip", "trace"]} # Serde -serde = { version = "1.0.195", default-features = false, features = ["derive"] } -serde-toml-merge = { version = "0.3.3", default-features = false } +serde.workspace = true +serde-toml-merge = { version = "0.3.6", default-features = false } serde_bytes = { version = "0.11.14", default-features = false, features = ["std"], optional = true } serde_json.workspace = true -serde_with = { version = "3.5.0", default-features = false, features = ["macros", "std"] } -serde_yaml = { version = "0.9.30", default-features = false } +serde_with = { version = "3.7.0", default-features = false, features = ["macros", "std"] } +serde_yaml = { version = "0.9.34", default-features = false } # Messagepack -rmp-serde = { version = "1.1.2", default-features = false, optional = true } -rmpv = { version = "1.0.1", default-features = false, features = ["with-serde"], optional = true } +rmp-serde = { version = "1.2.0", default-features = false, optional = true } +rmpv = { version = "1.0.2", default-features = false, features = ["with-serde"], optional = true } # Prost / Protocol Buffers prost = { version = "0.12", default-features = false, features = ["std"] } -prost-reflect = { version = "0.12", default-features = false, optional = true } +prost-reflect = { version = "0.13", default-features = false, optional = true } prost-types = { version = "0.12", default-features = false, optional = true } # GCP @@ -219,14 +235,14 @@ smpl_jwt = { version = "0.8.0", default-features = false, optional = true } lapin = { version = "2.3.1", default-features = false, features = ["native-tls"], optional = true } # API -async-graphql = { version = "7.0.1", default-features = false, optional = true, features = ["chrono", "playground"] } -async-graphql-warp = { version = "7.0.1", default-features = false, optional = true } +async-graphql = { version = "7.0.3", default-features = false, optional = true, features = ["chrono", "playground"] } +async-graphql-warp = { version = "7.0.3", default-features = false, optional = true } # API client crossterm = { version = "0.27.0", default-features = false, features = ["event-stream", "windows"], optional = true } num-format = { version = "0.4.4", default-features = false, features = ["with-num-bigint"], optional = true } number_prefix = { version = "0.4.0", default-features = false, features = ["std"], optional = true } -ratatui = { version = "0.25.0", optional = true, default-features = false, features = ["crossterm"] } +ratatui = { version = "0.26.2", optional = true, default-features = false, features = ["crossterm"] } # Datadog Pipelines @@ -234,64 +250,66 @@ hex = { version = "0.4.3", default-features = false, optional = true } sha2 = { version = "0.10.8", default-features = false, optional = true } # GreptimeDB -greptimedb-client = { git = "https://github.com/GreptimeTeam/greptimedb-ingester-rust.git", rev = "4cb19ec47eeaf634c451d9ae438dac445a8a3dce", optional = true } +greptimedb-client = { git = "https://github.com/GreptimeTeam/greptimedb-ingester-rust.git", rev = "d21dbcff680139ed2065b62100bac3123da7c789", optional = true } # External libs -arc-swap = { version = "1.6", default-features = false, optional = true } -async-compression = { version = "0.4.6", default-features = false, features = ["tokio", "gzip", "zstd"], optional = true } +arc-swap = { version = "1.7", default-features = false, optional = true } +async-compression = { version = "0.4.8", default-features = false, features = ["tokio", "gzip", "zstd"], optional = true } apache-avro = { version = "0.16.0", default-features = false, optional = true } axum = { version = "0.6.20", default-features = false } -base64 = { version = "0.21.7", default-features = false, optional = true } +base64 = { version = "0.22.0", default-features = false, optional = true } bloomy = { version = "1.2.0", default-features = false, optional = true } -bollard = { version = "0.15.0", default-features = false, features = ["ssl", "chrono"], optional = true } -bytes = { version = "1.5.0", default-features = false, features = ["serde"] } +bollard = { version = "0.16.1", default-features = false, features = ["ssl", "chrono"], optional = true } +bytes = { version = "1.6.0", default-features = false, features = ["serde"] } bytesize = { version = "1.3.0", default-features = false } chrono.workspace = true -chrono-tz = { version = "0.8.5", default-features = false } +chrono-tz = { version = "0.8.6", default-features = false } cidr-utils = { version = "0.6.1", default-features = false } colored = { version = "2.1.0", default-features = false } csv = { version = "1.3", default-features = false } +databend-client ={ version = "0.17.0", default-features = false, features = ["rustls"], optional = true } derivative = { version = "2.2.0", default-features = false } dirs-next = { version = "2.0.0", default-features = false, optional = true } -dyn-clone = { version = "1.0.16", default-features = false } +dyn-clone = { version = "1.0.17", default-features = false } encoding_rs = { version = "0.8.33", default-features = false, features = ["serde"] } -enum_dispatch = { version = "0.3.12", default-features = false } +enum_dispatch = { version = "0.3.13", default-features = false } exitcode = { version = "1.1.2", default-features = false } flate2 = { version = "1.0.28", default-features = false, features = ["default"] } futures-util = { version = "0.3.29", default-features = false } glob = { version = "0.3.1", default-features = false } governor = { version = "0.6.0", default-features = false, features = ["dashmap", "jitter", "std"], optional = true } grok = { version = "2.0.0", default-features = false, optional = true } -h2 = { version = "0.4.1", default-features = false, optional = true } +h2 = { version = "0.4.3", default-features = false, optional = true } hash_hasher = { version = "2.0.0", default-features = false } hashbrown = { version = "0.14.3", default-features = false, optional = true, features = ["ahash"] } headers = { version = "0.3.9", default-features = false } -hostname = { version = "0.3.1", default-features = false } +hostname = { version = "0.4.0", default-features = false } http = { version = "0.2.9", default-features = false } http-serde = "1.1.3" http-body = { version = "0.4.5", default-features = false } hyper = { version = "0.14.28", default-features = false, features = ["client", "runtime", "http1", "http2", "server", "stream"] } hyper-openssl = { version = "0.9.2", default-features = false } hyper-proxy = { version = "0.9.1", default-features = false, features = ["openssl-tls"] } -indexmap = { version = "2.1.0", default-features = false, features = ["serde", "std"] } +indexmap.workspace = true infer = { version = "0.15.0", default-features = false, optional = true} -indoc = { version = "2.0.4", default-features = false } +indoc = { version = "2.0.5", default-features = false } inventory = { version = "0.3.15", default-features = false } -itertools = { version = "0.12.0", default-features = false, optional = false, features = ["use_alloc"] } +ipnet = { version = "2", default-features = false, optional = true, features = ["serde", "std"] } +itertools = { version = "0.12.1", default-features = false, optional = false, features = ["use_alloc"] } k8s-openapi = { version = "0.18.0", default-features = false, features = ["api", "v1_26"], optional = true } kube = { version = "0.82.0", default-features = false, features = ["client", "openssl-tls", "runtime"], optional = true } listenfd = { version = "1.0.1", default-features = false, optional = true } logfmt = { version = "0.0.2", default-features = false, optional = true } -lru = { version = "0.12.2", default-features = false, optional = true } +lru = { version = "0.12.3", default-features = false, optional = true } maxminddb = { version = "0.24.0", default-features = false, optional = true } md-5 = { version = "0.10", default-features = false, optional = true } -mongodb = { version = "2.8.0", default-features = false, features = ["tokio-runtime"], optional = true } +mongodb = { version = "2.8.2", default-features = false, features = ["tokio-runtime"], optional = true } async-nats = { version = "0.33.0", default-features = false, optional = true } -nkeys = { version = "0.4.0", default-features = false, optional = true } +nkeys = { version = "0.4.1", default-features = false, optional = true } nom = { version = "7.1.3", default-features = false, optional = true } notify = { version = "6.1.1", default-features = false, features = ["macos_fsevent"] } once_cell = { version = "1.19", default-features = false } -openssl = { version = "0.10.63", default-features = false, features = ["vendored"] } +openssl = { version = "0.10.64", default-features = false, features = ["vendored"] } openssl-probe = { version = "0.1.5", default-features = false } ordered-float = { version = "4.2.0", default-features = false } paste = "1.0.14" @@ -302,27 +320,28 @@ rand = { version = "0.8.5", default-features = false, features = ["small_rng"] } rand_distr = { version = "0.4.3", default-features = false } rdkafka = { version = "0.35.0", default-features = false, features = ["tokio", "libz", "ssl", "zstd"], optional = true } redis = { version = "0.24.0", default-features = false, features = ["connection-manager", "tokio-comp", "tokio-native-tls-comp"], optional = true } -regex = { version = "1.10.3", default-features = false, features = ["std", "perf"] } -roaring = { version = "0.10.2", default-features = false, optional = true } +regex = { version = "1.10.4", default-features = false, features = ["std", "perf"] } +roaring = { version = "0.10.3", default-features = false, optional = true } +rumqttc = { version = "0.24.0", default-features = false, features = ["use-rustls"], optional = true } seahash = { version = "4.1.0", default-features = false } -semver = { version = "1.0.21", default-features = false, features = ["serde", "std"], optional = true } +semver = { version = "1.0.22", default-features = false, features = ["serde", "std"], optional = true } smallvec = { version = "1", default-features = false, features = ["union", "serde"] } snafu = { version = "0.7.5", default-features = false, features = ["futures"] } snap = { version = "1.1.1", default-features = false } -socket2 = { version = "0.5.5", default-features = false } +socket2 = { version = "0.5.6", default-features = false } stream-cancel = { version = "0.8.2", default-features = false } strip-ansi-escapes = { version = "0.2.0", default-features = false } -syslog = { version = "6.1.0", default-features = false, optional = true } +syslog = { version = "6.1.1", default-features = false, optional = true } tikv-jemallocator = { version = "0.5.4", default-features = false, features = ["unprefixed_malloc_on_supported_platforms"], optional = true } tokio-postgres = { version = "0.7.10", default-features = false, features = ["runtime", "with-chrono-0_4"], optional = true } tokio-tungstenite = {version = "0.20.1", default-features = false, features = ["connect"], optional = true} -toml = { version = "0.8.8", default-features = false, features = ["parse", "display"] } +toml.workspace = true tonic = { version = "0.10", optional = true, default-features = false, features = ["transport", "codegen", "prost", "tls", "tls-roots", "gzip"] } -hickory-proto = { version = "0.24.0", default-features = false, features = ["dnssec"], optional = true } -typetag = { version = "0.2.15", default-features = false } +hickory-proto = { version = "0.24.1", default-features = false, features = ["dnssec"], optional = true } +typetag = { version = "0.2.16", default-features = false } url = { version = "2.5.0", default-features = false, features = ["serde"] } uuid = { version = "1", default-features = false, features = ["serde", "v4"] } -warp = { version = "0.3.6", default-features = false } +warp = { version = "0.3.7", default-features = false } zstd = { version = "0.13.0", default-features = false } arr_macro = { version = "0.2.1" } @@ -331,10 +350,10 @@ arr_macro = { version = "0.2.1" } heim = { git = "https://github.com/vectordotdev/heim.git", branch = "update-nix", default-features = false, features = ["disk"] } # make sure to update the external docs when the Lua version changes -mlua = { version = "0.9.5", default-features = false, features = ["lua54", "send", "vendored", "macros"], optional = true } +mlua = { version = "0.9.7", default-features = false, features = ["lua54", "send", "vendored", "macros"], optional = true } [target.'cfg(windows)'.dependencies] -windows-service = "0.6.0" +windows-service = "0.7.0" [target.'cfg(unix)'.dependencies] nix = { version = "0.26.2", default-features = false, features = ["socket", "signal"] } @@ -347,28 +366,28 @@ openssl-src = { version = "300", default-features = false, features = ["force-en [dev-dependencies] approx = "0.5.1" -assert_cmd = { version = "2.0.13", default-features = false } -aws-smithy-runtime = { version = "1.1.4", default-features = false, features = ["tls-rustls"] } +assert_cmd = { version = "2.0.14", default-features = false } +aws-smithy-runtime = { version = "1.3.1", default-features = false, features = ["tls-rustls"] } azure_core = { version = "0.17", default-features = false, features = ["enable_reqwest", "azurite_workaround"] } azure_identity = { version = "0.17", default-features = false, features = ["enable_reqwest"] } azure_storage_blobs = { version = "0.17", default-features = false, features = ["azurite_workaround"] } azure_storage = { version = "0.17", default-features = false } -base64 = "0.21.7" +base64 = "0.22.0" criterion = { version = "0.5.1", features = ["html_reports", "async_tokio"] } -itertools = { version = "0.12.0", default-features = false, features = ["use_alloc"] } -libc = "0.2.152" +itertools = { version = "0.12.1", default-features = false, features = ["use_alloc"] } +libc = "0.2.153" similar-asserts = "1.5.0" -proptest = "1.4" +proptest.workspace = true quickcheck = "1.0.3" reqwest = { version = "0.11", features = ["json"] } -rstest = {version = "0.18.2"} -tempfile = "3.9.0" +rstest = {version = "0.19.0"} +tempfile = "3.10.1" test-generator = "0.3.1" -tokio = { version = "1.35.1", features = ["test-util"] } -tokio-test = "0.4.3" +tokio = { version = "1.37.0", features = ["test-util"] } +tokio-test = "0.4.4" tower-test = "0.4.0" vector-lib = { path = "lib/vector-lib", default-features = false, features = ["vrl", "test"] } -vrl = { version = "0.9.0", features = ["cli", "test", "test_framework", "arbitrary"] } +vrl.workspace = true wiremock = "0.5.22" zstd = { version = "0.13.0", default-features = false } @@ -409,6 +428,8 @@ target-aarch64-unknown-linux-gnu = ["api", "api-client", "enrichment-tables", "r target-aarch64-unknown-linux-musl = ["api", "api-client", "enrichment-tables", "rdkafka?/cmake_build", "sinks", "sources", "sources-dnstap", "transforms", "unix", "enterprise"] target-armv7-unknown-linux-gnueabihf = ["api", "api-client", "enrichment-tables", "rdkafka?/cmake_build", "sinks", "sources", "sources-dnstap", "transforms", "unix", "enterprise"] target-armv7-unknown-linux-musleabihf = ["api", "api-client", "rdkafka?/cmake_build", "enrichment-tables", "sinks", "sources", "sources-dnstap", "transforms", "enterprise"] +target-arm-unknown-linux-gnueabi = ["api", "api-client", "enrichment-tables", "rdkafka?/cmake_build", "sinks", "sources", "sources-dnstap", "transforms", "unix", "enterprise"] +target-arm-unknown-linux-musleabi = ["api", "api-client", "rdkafka?/cmake_build", "enrichment-tables", "sinks", "sources", "sources-dnstap", "transforms", "enterprise"] target-x86_64-unknown-linux-gnu = ["api", "api-client", "rdkafka?/cmake_build", "enrichment-tables", "sinks", "sources", "sources-dnstap", "transforms", "unix", "rdkafka?/gssapi-vendored", "enterprise"] target-x86_64-unknown-linux-musl = ["api", "api-client", "rdkafka?/cmake_build", "enrichment-tables", "sinks", "sources", "sources-dnstap", "transforms", "unix", "enterprise"] # Does not currently build @@ -454,6 +475,7 @@ aws-core = [ "dep:aws-smithy-types", "dep:aws-smithy-runtime", "dep:aws-smithy-runtime-api", + "dep:aws-sdk-sts", ] # Anything that requires Protocol Buffers. @@ -462,8 +484,9 @@ protobuf-build = ["dep:tonic-build", "dep:prost-build"] gcp = ["dep:base64", "dep:goauth", "dep:smpl_jwt"] # Enrichment Tables -enrichment-tables = ["enrichment-tables-geoip"] +enrichment-tables = ["enrichment-tables-geoip", "enrichment-tables-mmdb"] enrichment-tables-geoip = ["dep:maxminddb"] +enrichment-tables-mmdb = ["dep:maxminddb"] # Codecs codecs-syslog = ["vector-lib/syslog"] @@ -492,6 +515,7 @@ sources-logs = [ "sources-logstash", "sources-nats", "sources-opentelemetry", + "sources-pulsar", "sources-file-descriptor", "sources-redis", "sources-socket", @@ -522,7 +546,7 @@ sources-aws_s3 = ["aws-core", "dep:aws-sdk-sqs", "dep:aws-sdk-s3", "dep:semver", sources-aws_sqs = ["aws-core", "dep:aws-sdk-sqs"] sources-datadog_agent = ["sources-utils-http-error", "protobuf-build"] sources-demo_logs = ["dep:fakedata"] -sources-dnstap = ["dep:base64", "dep:hickory-proto", "dep:dnsmsg-parser", "protobuf-build"] +sources-dnstap = ["sources-utils-net-tcp", "dep:base64", "dep:hickory-proto", "dep:dnsmsg-parser", "protobuf-build"] sources-docker_logs = ["docker"] sources-eventstoredb_metrics = [] sources-exec = [] @@ -549,6 +573,7 @@ sources-prometheus = ["sources-prometheus-scrape", "sources-prometheus-remote-wr sources-prometheus-scrape = ["sinks-prometheus", "sources-utils-http-client", "vector-lib/prometheus"] sources-prometheus-remote-write = ["sinks-prometheus", "sources-utils-http", "vector-lib/prometheus"] sources-prometheus-pushgateway = ["sinks-prometheus", "sources-utils-http", "vector-lib/prometheus"] +sources-pulsar = ["dep:apache-avro", "dep:pulsar"] sources-redis= ["dep:redis"] sources-socket = ["sources-utils-net", "tokio-util/net"] sources-splunk_hec = ["dep:roaring"] @@ -563,7 +588,7 @@ sources-utils-http-prelude = ["sources-utils-http", "sources-utils-http-auth", " sources-utils-http-query = [] sources-utils-http-client = ["sources-utils-http", "sources-http_server"] sources-utils-net = ["sources-utils-net-tcp", "sources-utils-net-udp", "sources-utils-net-unix"] -sources-utils-net-tcp = ["listenfd"] +sources-utils-net-tcp = ["listenfd", "dep:ipnet"] sources-utils-net-udp = ["listenfd"] sources-utils-net-unix = [] @@ -599,7 +624,7 @@ transforms-metrics = [ transforms-aggregate = [] transforms-aws_ec2_metadata = ["dep:arc-swap"] -transforms-dedupe = ["dep:lru"] +transforms-dedupe = ["transforms-impl-dedupe"] transforms-filter = [] transforms-log_to_metric = [] transforms-lua = ["dep:mlua", "vector-lib/lua"] @@ -608,10 +633,14 @@ transforms-pipelines = ["transforms-filter", "transforms-route"] transforms-reduce = [] transforms-remap = [] transforms-route = [] -transforms-sample = [] +transforms-sample = ["transforms-impl-sample"] transforms-tag_cardinality_limit = ["dep:bloomy", "dep:hashbrown"] transforms-throttle = ["dep:governor"] +# Implementations of transforms +transforms-impl-sample = [] +transforms-impl-dedupe = ["dep:lru"] + # Sinks sinks = ["sinks-logs", "sinks-metrics"] sinks-logs = [ @@ -644,6 +673,7 @@ sinks-logs = [ "sinks-kafka", "sinks-mezmo", "sinks-loki", + "sinks-mqtt", "sinks-nats", "sinks-new_relic_logs", "sinks-new_relic", @@ -690,7 +720,7 @@ sinks-blackhole = [] sinks-chronicle = [] sinks-clickhouse = [] sinks-console = [] -sinks-databend = [] +sinks-databend = ["dep:databend-client"] sinks-datadog_events = [] sinks-datadog_logs = [] sinks-datadog_metrics = ["protobuf-build", "dep:prost-reflect"] @@ -706,6 +736,7 @@ sinks-influxdb = [] sinks-kafka = ["dep:rdkafka"] sinks-mezmo = [] sinks-loki = ["loki-logproto"] +sinks-mqtt = ["dep:rumqttc"] sinks-nats = ["dep:async-nats", "dep:nkeys"] sinks-new_relic_logs = ["sinks-http"] sinks-new_relic = [] @@ -738,7 +769,7 @@ enterprise = [ # Identifies that the build is a nightly build nightly = [] -# Testing-related features +# Integration testing-related features all-integration-tests = [ "amqp-integration-tests", "appsignal-integration-tests", @@ -831,12 +862,13 @@ kafka-integration-tests = ["sinks-kafka", "sources-kafka"] logstash-integration-tests = ["docker", "sources-logstash"] loki-integration-tests = ["sinks-loki"] mongodb_metrics-integration-tests = ["sources-mongodb_metrics"] +mqtt-integration-tests = ["sinks-mqtt"] nats-integration-tests = ["sinks-nats", "sources-nats"] nginx-integration-tests = ["sources-nginx_metrics"] opentelemetry-integration-tests = ["sources-opentelemetry"] postgresql_metrics-integration-tests = ["sources-postgresql_metrics"] prometheus-integration-tests = ["sinks-prometheus", "sources-prometheus", "sinks-influxdb"] -pulsar-integration-tests = ["sinks-pulsar"] +pulsar-integration-tests = ["sinks-pulsar", "sources-pulsar"] redis-integration-tests = ["sinks-redis", "sources-redis"] splunk-integration-tests = ["sinks-splunk_hec"] dnstap-integration-tests = ["sources-dnstap", "dep:bollard"] @@ -844,6 +876,19 @@ webhdfs-integration-tests = ["sinks-webhdfs"] disable-resolv-conf = [] shutdown-tests = ["api", "sinks-blackhole", "sinks-console", "sinks-prometheus", "sources", "transforms-lua", "transforms-remap", "unix"] cli-tests = ["sinks-blackhole", "sinks-socket", "sources-demo_logs", "sources-file"] +test-utils = [] + +# End-to-End testing-related features +all-e2e-tests = [ + "e2e-tests-datadog" +] + +e2e-tests-datadog = [ + "sources-datadog_agent", + "sinks-datadog_logs", + "sinks-datadog_metrics" +] + vector-api-tests = [ "sources-demo_logs", "transforms-log_to_metric", @@ -867,7 +912,18 @@ enterprise-tests = [ ] component-validation-runner = ["dep:tonic", "sources-internal_logs", "sources-internal_metrics", "sources-vector", "sinks-vector"] -component-validation-tests = ["component-validation-runner", "sources", "transforms", "sinks"] +# For now, only include components that implement ValidatableComponent. +# In the future, this can change to simply reference the targets `sources`, `transforms`, `sinks` +component-validation-tests = [ + "component-validation-runner", + "sources-http_client", + "sources-http_server", + "sinks-http", + "sinks-splunk_hec", + "sources-splunk_hec", + "sinks-datadog_logs", + "sources-datadog_agent", +] # Grouping together features for benchmarks. We exclude the API client due to it causing the build process to run out # of memory when those additional dependencies are built in CI. @@ -889,7 +945,7 @@ remap-benches = ["transforms-remap"] transform-benches = ["transforms-filter", "transforms-dedupe", "transforms-reduce", "transforms-route"] codecs-benches = [] loki-benches = ["sinks-loki"] -enrichment-tables-benches = ["enrichment-tables-geoip"] +enrichment-tables-benches = ["enrichment-tables-geoip", "enrichment-tables-mmdb"] [[bench]] name = "default" diff --git a/Cross.toml b/Cross.toml index 1b138c3638f6d..96e97d7ff64dc 100644 --- a/Cross.toml +++ b/Cross.toml @@ -28,3 +28,9 @@ image = "vector-cross-env:armv7-unknown-linux-gnueabihf" [target.armv7-unknown-linux-musleabihf] image = "vector-cross-env:armv7-unknown-linux-musleabihf" + +[target.arm-unknown-linux-gnueabi] +image = "vector-cross-env:arm-unknown-linux-gnueabi" + +[target.arm-unknown-linux-musleabi] +image = "vector-cross-env:arm-unknown-linux-musleabi" diff --git a/LICENSE-3rdparty.csv b/LICENSE-3rdparty.csv index edcd68c9c2a22..3475fe9504ebd 100644 --- a/LICENSE-3rdparty.csv +++ b/LICENSE-3rdparty.csv @@ -42,6 +42,7 @@ async-signal,https://github.com/smol-rs/async-signal,Apache-2.0 OR MIT,John Nunl async-stream,https://github.com/tokio-rs/async-stream,MIT,Carl Lerche async-task,https://github.com/smol-rs/async-task,Apache-2.0 OR MIT,Stjepan Glavina async-trait,https://github.com/dtolnay/async-trait,MIT OR Apache-2.0,David Tolnay +atomic,https://github.com/Amanieu/atomic-rs,Apache-2.0 OR MIT,Amanieu d'Antras atomic-waker,https://github.com/smol-rs/atomic-waker,Apache-2.0 OR MIT,"Stjepan Glavina , Contributors to futures-rs" atty,https://github.com/softprops/atty,MIT,softprops aws-config,https://github.com/smithy-lang/smithy-rs,Apache-2.0,"AWS Rust SDK Team , Russell Cohen " @@ -98,13 +99,13 @@ bson,https://github.com/mongodb/bson-rust,MIT,"Y. T. Chung , bstr,https://github.com/BurntSushi/bstr,MIT OR Apache-2.0,Andrew Gallant bumpalo,https://github.com/fitzgen/bumpalo,MIT OR Apache-2.0,Nick Fitzgerald bytecheck,https://github.com/djkoloski/bytecheck,MIT,David Koloski -bytemuck,https://github.com/Lokathor/bytemuck,Zlib OR Apache-2.0 OR MIT,Lokathor byteorder,https://github.com/BurntSushi/byteorder,Unlicense OR MIT,Andrew Gallant bytes,https://github.com/carllerche/bytes,MIT,Carl Lerche bytes,https://github.com/tokio-rs/bytes,MIT,"Carl Lerche , Sean McArthur " bytes-utils,https://github.com/vorner/bytes-utils,Apache-2.0 OR MIT,Michal 'vorner' Vaner bytesize,https://github.com/hyunsik/bytesize,Apache-2.0,Hyunsik Choi cassowary,https://github.com/dylanede/cassowary-rs,MIT OR Apache-2.0,Dylan Ede +castaway,https://github.com/sagebind/castaway,MIT,Stephen M. Coakley cbc,https://github.com/RustCrypto/block-modes,MIT OR Apache-2.0,RustCrypto Developers cesu8,https://github.com/emk/cesu8-rs,Apache-2.0 OR MIT,Eric Kidd cfb-mode,https://github.com/RustCrypto/block-modes,MIT OR Apache-2.0,RustCrypto Developers @@ -128,6 +129,7 @@ colorchoice,https://github.com/rust-cli/anstyle,MIT OR Apache-2.0,The colorchoic colored,https://github.com/mackwic/colored,MPL-2.0,Thomas Wickham combine,https://github.com/Marwes/combine,MIT,Markus Westerlind community-id,https://github.com/traceflight/rs-community-id,MIT OR Apache-2.0,Julian Wang +compact_str,https://github.com/ParkMyCar/compact_str,MIT,Parker Timmerman concurrent-queue,https://github.com/smol-rs/concurrent-queue,Apache-2.0 OR MIT,"Stjepan Glavina , Taiki Endo , John Nunley " const-oid,https://github.com/RustCrypto/formats/tree/master/const-oid,Apache-2.0 OR MIT,RustCrypto Developers const_fn,https://github.com/taiki-e/const_fn,Apache-2.0 OR MIT,The const_fn Authors @@ -158,6 +160,7 @@ dary_heap,https://github.com/hanmertens/dary_heap,MIT OR Apache-2.0,Han Mertens dashmap,https://github.com/xacrimon/dashmap,MIT,Acrimon data-encoding,https://github.com/ia0/data-encoding,MIT,Julien Cretin data-url,https://github.com/servo/rust-url,MIT OR Apache-2.0,Simon Sapin +databend-client,https://github.com/datafuselabs/bendsql,Apache-2.0,Databend Authors debug-helper,https://github.com/magiclen/debug-helper,MIT,Magic Len der,https://github.com/RustCrypto/formats/tree/master/der,Apache-2.0 OR MIT,RustCrypto Developers deranged,https://github.com/jhpratt/deranged,MIT OR Apache-2.0,Jacob Pratt @@ -238,6 +241,7 @@ h2,https://github.com/hyperium/h2,MIT,"Carl Lerche , Sean McA hash_hasher,https://github.com/Fraser999/Hash-Hasher,Apache-2.0 OR MIT,Fraser Hutchison hashbrown,https://github.com/rust-lang/hashbrown,MIT OR Apache-2.0,Amanieu d'Antras headers,https://github.com/hyperium/headers,MIT,Sean McArthur +heck,https://github.com/withoutboats/heck,MIT OR Apache-2.0,The heck Authors heck,https://github.com/withoutboats/heck,MIT OR Apache-2.0,Without Boats heim,https://github.com/heim-rs/heim,Apache-2.0 OR MIT,svartalf hermit-abi,https://github.com/hermitcore/hermit-rs,MIT OR Apache-2.0,Stefan Lankes @@ -256,16 +260,19 @@ http-types,https://github.com/http-rs/http-types,MIT OR Apache-2.0,Yoshua Wuyts httparse,https://github.com/seanmonstar/httparse,MIT OR Apache-2.0,Sean McArthur httpdate,https://github.com/pyfisch/httpdate,MIT OR Apache-2.0,Pyfisch hyper,https://github.com/hyperium/hyper,MIT,Sean McArthur +hyper-named-pipe,https://github.com/fussybeaver/hyper-named-pipe,Apache-2.0,The hyper-named-pipe Authors hyper-openssl,https://github.com/sfackler/hyper-openssl,MIT OR Apache-2.0,Steven Fackler hyper-proxy,https://github.com/tafia/hyper-proxy,MIT,Johann Tuffe hyper-rustls,https://github.com/rustls/hyper-rustls,Apache-2.0 OR ISC OR MIT,The hyper-rustls Authors hyper-timeout,https://github.com/hjr3/hyper-timeout,MIT OR Apache-2.0,Herman J. Radtke III hyper-tls,https://github.com/hyperium/hyper-tls,MIT OR Apache-2.0,Sean McArthur -hyperlocal,https://github.com/softprops/hyperlocal,MIT,softprops +hyper-util,https://github.com/hyperium/hyper-util,MIT,Sean McArthur +hyperlocal-next,https://github.com/softprops/hyperlocal,MIT,softprops iana-time-zone,https://github.com/strawlab/iana-time-zone,MIT OR Apache-2.0,"Andrew Straw , René Kijewski , Ryan Lopopolo " iana-time-zone-haiku,https://github.com/strawlab/iana-time-zone,MIT OR Apache-2.0,René Kijewski ident_case,https://github.com/TedDriggs/ident_case,MIT OR Apache-2.0,Ted Driggs indexmap,https://github.com/bluss/indexmap,Apache-2.0 OR MIT,The indexmap Authors +indexmap,https://github.com/indexmap-rs/indexmap,Apache-2.0 OR MIT,The indexmap Authors indoc,https://github.com/dtolnay/indoc,MIT OR Apache-2.0,David Tolnay infer,https://github.com/bojand/infer,MIT,Bojan inotify,https://github.com/hannobraun/inotify,ISC,"Hanno Braun , Félix Saparelli , Cristian Kubis , Frank Denis " @@ -417,6 +424,8 @@ proptest,https://github.com/proptest-rs/proptest,MIT OR Apache-2.0,Jason Lingle prost,https://github.com/tokio-rs/prost,Apache-2.0,"Dan Burkert , Lucio Franco " prost-derive,https://github.com/tokio-rs/prost,Apache-2.0,"Dan Burkert , Lucio Franco , Tokio Contributors " prost-reflect,https://github.com/andrewhickman/prost-reflect,MIT OR Apache-2.0,Andrew Hickman +psl,https://github.com/addr-rs/psl,MIT OR Apache-2.0,rushmorem +psl-types,https://github.com/addr-rs/psl-types,MIT OR Apache-2.0,rushmorem ptr_meta,https://github.com/djkoloski/ptr_meta,MIT,David Koloski pulsar,https://github.com/streamnative/pulsar-rs,MIT OR Apache-2.0,"Colin Stearns , Kevin Stenerson , Geoffroy Couprie " quad-rand,https://github.com/not-fl3/quad-rand,MIT,not-fl3 @@ -449,7 +458,6 @@ regex-syntax,https://github.com/rust-lang/regex/tree/master/regex-syntax,MIT OR rend,https://github.com/djkoloski/rend,MIT,David Koloski reqwest,https://github.com/seanmonstar/reqwest,MIT OR Apache-2.0,Sean McArthur resolv-conf,http://github.com/tailhook/resolv-conf,MIT OR Apache-2.0,paul@colomiets.name -retain_mut,https://github.com/upsuper/retain_mut,MIT,Xidorn Quan rfc6979,https://github.com/RustCrypto/signatures/tree/master/rfc6979,Apache-2.0 OR MIT,RustCrypto Developers ring,https://github.com/briansmith/ring,ISC AND Custom,Brian Smith rkyv,https://github.com/rkyv/rkyv,MIT,David Koloski @@ -460,6 +468,7 @@ rmpv,https://github.com/3Hren/msgpack-rust,MIT,Evgeny Safronov , Kerollmops " roxmltree,https://github.com/RazrFalcon/roxmltree,MIT OR Apache-2.0,Yevhenii Reizner rsa,https://github.com/RustCrypto/RSA,MIT OR Apache-2.0,"RustCrypto Developers, dignifiedquire " +rumqttc,https://github.com/bytebeamio/rumqtt,Apache-2.0,tekjar rust_decimal,https://github.com/paupino/rust-decimal,MIT,Paul Mason rustc-demangle,https://github.com/alexcrichton/rustc-demangle,MIT OR Apache-2.0,Alex Crichton rustc-hash,https://github.com/rust-lang-nursery/rustc-hash,Apache-2.0 OR MIT,The Rust Project Developers @@ -468,7 +477,9 @@ rustc_version_runtime,https://github.com/seppo0010/rustc-version-runtime-rs,MIT, rustix,https://github.com/bytecodealliance/rustix,Apache-2.0 WITH LLVM-exception OR Apache-2.0 OR MIT,"Dan Gohman , Jakub Konka " rustls,https://github.com/rustls/rustls,Apache-2.0 OR ISC OR MIT,The rustls Authors rustls-native-certs,https://github.com/ctz/rustls-native-certs,Apache-2.0 OR ISC OR MIT,The rustls-native-certs Authors +rustls-native-certs,https://github.com/rustls/rustls-native-certs,Apache-2.0 OR ISC OR MIT,The rustls-native-certs Authors rustls-pemfile,https://github.com/rustls/pemfile,Apache-2.0 OR ISC OR MIT,The rustls-pemfile Authors +rustls-pki-types,https://github.com/rustls/pki-types,MIT OR Apache-2.0,The rustls-pki-types Authors rustls-webpki,https://github.com/rustls/webpki,ISC,The rustls-webpki Authors rustversion,https://github.com/dtolnay/rustversion,MIT OR Apache-2.0,David Tolnay rusty-fork,https://github.com/altsysrq/rusty-fork,MIT OR Apache-2.0,Jason Lingle @@ -535,6 +546,7 @@ stringprep,https://github.com/sfackler/rust-stringprep,MIT OR Apache-2.0,Steven strip-ansi-escapes,https://github.com/luser/strip-ansi-escapes,Apache-2.0 OR MIT,Ted Mielczarek strsim,https://github.com/dguo/strsim-rs,MIT,Danny Guo strsim,https://github.com/dguo/strsim-rs,MIT,Danny Guo +strsim,https://github.com/rapidfuzz/strsim-rs,MIT,"Danny Guo , maxbachmann " structopt,https://github.com/TeXitoi/structopt,Apache-2.0 OR MIT,"Guillaume Pinot , others" structopt-derive,https://github.com/TeXitoi/structopt,Apache-2.0 OR MIT,Guillaume Pinot strum,https://github.com/Peternator7/strum,MIT,Peter Glotfelty @@ -623,6 +635,7 @@ walkdir,https://github.com/BurntSushi/walkdir,Unlicense OR MIT,Andrew Gallant warp,https://github.com/seanmonstar/warp,MIT,Sean McArthur wasi,https://github.com/bytecodealliance/wasi,Apache-2.0 WITH LLVM-exception OR Apache-2.0 OR MIT,The Cranelift Project Developers +wasite,https://github.com/ardaku/wasite,Apache-2.0 OR BSL-1.0 OR MIT,The wasite Authors wasm-bindgen,https://github.com/rustwasm/wasm-bindgen,MIT OR Apache-2.0,The wasm-bindgen Developers wasm-bindgen-backend,https://github.com/rustwasm/wasm-bindgen/tree/master/crates/backend,MIT OR Apache-2.0,The wasm-bindgen Developers wasm-bindgen-futures,https://github.com/rustwasm/wasm-bindgen/tree/master/crates/futures,MIT OR Apache-2.0,The wasm-bindgen Developers @@ -638,17 +651,8 @@ widestring,https://github.com/starkat99/widestring-rs,MIT OR Apache-2.0,Kathryn widestring,https://github.com/starkat99/widestring-rs,MIT OR Apache-2.0,The widestring Authors winapi,https://github.com/retep998/winapi-rs,MIT OR Apache-2.0,Peter Atashian winapi-util,https://github.com/BurntSushi/winapi-util,Unlicense OR MIT,Andrew Gallant -windows-core,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,Microsoft +windows,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,Microsoft windows-service,https://github.com/mullvad/windows-service-rs,MIT OR Apache-2.0,Mullvad VPN -windows-sys,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,Microsoft -windows-targets,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,Microsoft -windows_aarch64_gnullvm,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,Microsoft -windows_aarch64_msvc,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,Microsoft -windows_i686_gnu,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,Microsoft -windows_i686_msvc,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,Microsoft -windows_x86_64_gnu,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,Microsoft -windows_x86_64_gnullvm,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,Microsoft -windows_x86_64_msvc,https://github.com/microsoft/windows-rs,MIT OR Apache-2.0,Microsoft winnow,https://github.com/winnow-rs/winnow,MIT,The winnow Authors winreg,https://github.com/gentoo90/winreg-rs,MIT,Igor Shaula woothee,https://github.com/woothee/woothee-rust,Apache-2.0,hhatto diff --git a/Makefile b/Makefile index b0c2f8f923fe6..96e83bc169e13 100644 --- a/Makefile +++ b/Makefile @@ -223,6 +223,14 @@ build-armv7-unknown-linux-gnueabihf: target/armv7-unknown-linux-gnueabihf/releas build-armv7-unknown-linux-musleabihf: target/armv7-unknown-linux-musleabihf/release/vector ## Build a release binary for the armv7-unknown-linux-musleabihf triple. @echo "Output to ${<}" +.PHONY: build-arm-unknown-linux-gnueabi +build-arm-unknown-linux-gnueabi: target/arm-unknown-linux-gnueabi/release/vector ## Build a release binary for the arm-unknown-linux-gnueabi triple. + @echo "Output to ${<}" + +.PHONY: build-arm-unknown-linux-musleabi +build-arm-unknown-linux-musleabi: target/arm-unknown-linux-musleabi/release/vector ## Build a release binary for the arm-unknown-linux-musleabi triple. + @echo "Output to ${<}" + .PHONY: build-graphql-schema build-graphql-schema: ## Generate the `schema.json` for Vector's GraphQL API ${MAYBE_ENVIRONMENT_EXEC} cargo run --bin graphql-schema --no-default-features --features=default-no-api-client @@ -361,7 +369,7 @@ test-integration: test-integration-databend test-integration-docker-logs test-in test-integration: test-integration-eventstoredb test-integration-fluent test-integration-gcp test-integration-greptimedb test-integration-humio test-integration-http-client test-integration-influxdb test-integration: test-integration-kafka test-integration-logstash test-integration-loki test-integration-mongodb test-integration-nats test-integration: test-integration-nginx test-integration-opentelemetry test-integration-postgres test-integration-prometheus test-integration-pulsar -test-integration: test-integration-redis test-integration-splunk test-integration-dnstap test-integration-datadog-agent test-integration-datadog-logs +test-integration: test-integration-redis test-integration-splunk test-integration-dnstap test-integration-datadog-agent test-integration-datadog-logs test-integration-e2e-datadog-logs test-integration: test-integration-datadog-traces test-integration-shutdown test-integration-%-cleanup: @@ -529,6 +537,9 @@ package-aarch64-unknown-linux-gnu-all: package-aarch64-unknown-linux-gnu package .PHONY: package-armv7-unknown-linux-gnueabihf-all package-armv7-unknown-linux-gnueabihf-all: package-armv7-unknown-linux-gnueabihf package-deb-armv7-gnu package-rpm-armv7hl-gnu # Build all armv7-unknown-linux-gnueabihf MUSL packages +.PHONY: package-arm-unknown-linux-gnueabi-all +package-arm-unknown-linux-gnueabi-all: package-arm-unknown-linux-gnueabi package-deb-arm-gnu # Build all arm-unknown-linux-gnueabihf GNU packages + .PHONY: package-x86_64-unknown-linux-gnu package-x86_64-unknown-linux-gnu: target/artifacts/vector-${VERSION}-x86_64-unknown-linux-gnu.tar.gz ## Build an archive suitable for the `x86_64-unknown-linux-gnu` triple. @echo "Output to ${<}." @@ -553,6 +564,14 @@ package-armv7-unknown-linux-gnueabihf: target/artifacts/vector-${VERSION}-armv7- package-armv7-unknown-linux-musleabihf: target/artifacts/vector-${VERSION}-armv7-unknown-linux-musleabihf.tar.gz ## Build an archive suitable for the `armv7-unknown-linux-musleabihf triple. @echo "Output to ${<}." +.PHONY: package-arm-unknown-linux-gnueabi +package-arm-unknown-linux-gnueabi: target/artifacts/vector-${VERSION}-arm-unknown-linux-gnueabi.tar.gz ## Build an archive suitable for the `arm-unknown-linux-gnueabi` triple. + @echo "Output to ${<}." + +.PHONY: package-arm-unknown-linux-musleabi +package-arm-unknown-linux-musleabi: target/artifacts/vector-${VERSION}-arm-unknown-linux-musleabi.tar.gz ## Build an archive suitable for the `arm-unknown-linux-musleabi` triple. + @echo "Output to ${<}." + # debs .PHONY: package-deb-x86_64-unknown-linux-gnu @@ -571,6 +590,10 @@ package-deb-aarch64: package-aarch64-unknown-linux-gnu ## Build the aarch64 deb package-deb-armv7-gnu: package-armv7-unknown-linux-gnueabihf ## Build the armv7-unknown-linux-gnueabihf deb package $(CONTAINER_TOOL) run -v $(PWD):/git/vectordotdev/vector/ -e TARGET=armv7-unknown-linux-gnueabihf -e VECTOR_VERSION $(ENVIRONMENT_UPSTREAM) cargo vdev package deb +.PHONY: package-deb-arm-gnu +package-deb-arm-gnu: package-arm-unknown-linux-gnueabi ## Build the arm-unknown-linux-gnueabi deb package + $(CONTAINER_TOOL) run -v $(PWD):/git/vectordotdev/vector/ -e TARGET=arm-unknown-linux-gnueabi -e VECTOR_VERSION $(ENVIRONMENT_UPSTREAM) cargo vdev package deb + # rpms .PHONY: package-rpm-x86_64-unknown-linux-gnu diff --git a/benches/enrichment_tables.rs b/benches/enrichment_tables.rs index 762383095794e..5c9a11f157a62 100644 --- a/benches/enrichment_tables.rs +++ b/benches/enrichment_tables.rs @@ -5,6 +5,7 @@ use criterion::{criterion_group, criterion_main, BatchSize, Criterion}; use vector::enrichment_tables::{ file::File, geoip::{Geoip, GeoipConfig}, + mmdb::{Mmdb, MmdbConfig}, Condition, Table, }; use vector_lib::enrichment::Case; @@ -13,7 +14,7 @@ use vrl::value::{ObjectMap, Value}; criterion_group!( name = benches; config = Criterion::default().noise_threshold(0.02).sample_size(10); - targets = benchmark_enrichment_tables_file, benchmark_enrichment_tables_geoip + targets = benchmark_enrichment_tables_file, benchmark_enrichment_tables_geoip, benchmark_enrichment_tables_mmdb ); criterion_main!(benches); @@ -323,3 +324,85 @@ fn benchmark_enrichment_tables_geoip(c: &mut Criterion) { ); }); } + +fn benchmark_enrichment_tables_mmdb(c: &mut Criterion) { + let mut group = c.benchmark_group("enrichment_tables_mmdb"); + let build = |path: &str| { + Mmdb::new(MmdbConfig { + path: path.to_string(), + }) + .unwrap() + }; + + group.bench_function("enrichment_tables/mmdb_isp", |b| { + let table = build("tests/data/GeoIP2-ISP-Test.mmdb"); + let ip = "208.192.1.2"; + let mut expected = ObjectMap::new(); + expected.insert("autonomous_system_number".into(), 701i64.into()); + expected.insert( + "autonomous_system_organization".into(), + "MCI Communications Services, Inc. d/b/a Verizon Business".into(), + ); + expected.insert("isp".into(), "Verizon Business".into()); + expected.insert("organization".into(), "Verizon Business".into()); + + b.iter_batched( + || (&table, ip, &expected), + |(table, ip, expected)| { + assert_eq!( + Ok(expected), + table + .find_table_row( + Case::Insensitive, + &[Condition::Equals { + field: "ip", + value: ip.into(), + }], + None, + None, + ) + .as_ref() + ) + }, + BatchSize::SmallInput, + ); + }); + + group.bench_function("enrichment_tables/mmdb_city", |b| { + let table = build("tests/data/GeoIP2-City-Test.mmdb"); + let ip = "67.43.156.9"; + let mut expected = ObjectMap::new(); + expected.insert( + "location".into(), + ObjectMap::from([ + ("latitude".into(), Value::from(27.5)), + ("longitude".into(), Value::from(90.5)), + ]) + .into(), + ); + + b.iter_batched( + || (&table, ip, &expected), + |(table, ip, expected)| { + assert_eq!( + Ok(expected), + table + .find_table_row( + Case::Insensitive, + &[Condition::Equals { + field: "ip", + value: ip.into(), + }], + Some(&[ + "location.latitude".to_string(), + "location.longitude".to_string(), + ]), + None, + ) + .as_ref() + ) + }, + BatchSize::SmallInput, + ); + }); +} diff --git a/changelog.d/10304_add_prometheus_pushgateway_source.feature.md b/changelog.d/10304_add_prometheus_pushgateway_source.feature.md deleted file mode 100644 index 5052a6671597b..0000000000000 --- a/changelog.d/10304_add_prometheus_pushgateway_source.feature.md +++ /dev/null @@ -1,5 +0,0 @@ -Vector can now emulate a [Prometheus Pushgateway](https://github.com/prometheus/pushgateway) through the new `prometheus_pushgateway` source. Counters and histograms can optionally be aggregated across pushes to support use-cases like cron jobs. - -There are some caveats, which are listed [here](https://github.com/Sinjo/vector/blob/0d4fc20091ddae7f3562bfdf07c9095c0c7223e0/src/sources/prometheus/pushgateway.rs#L8-L12). - -authors: @Sinjo diff --git a/changelog.d/17013_s3_filename_extension.fix.md b/changelog.d/17013_s3_filename_extension.fix.md deleted file mode 100644 index 12770687e4d35..0000000000000 --- a/changelog.d/17013_s3_filename_extension.fix.md +++ /dev/null @@ -1 +0,0 @@ -Fixed an issue where the `aws_s3` sink adds a trailing period to the s3 key when the `filename_extension` is empty. diff --git a/changelog.d/19183_chronicle_namespace_support.enhancement.md b/changelog.d/19183_chronicle_namespace_support.enhancement.md new file mode 100644 index 0000000000000..813f5adb9ad72 --- /dev/null +++ b/changelog.d/19183_chronicle_namespace_support.enhancement.md @@ -0,0 +1,3 @@ +Google Chronicle Unstructured Log sink now supports adding a namespace to the log events for indexing within Chronicle. + +authors: ChocPanda diff --git a/changelog.d/19370_remove_warning_for_unused_outputs.fix.md b/changelog.d/19370_remove_warning_for_unused_outputs.fix.md deleted file mode 100644 index 2bd3377b7b0b4..0000000000000 --- a/changelog.d/19370_remove_warning_for_unused_outputs.fix.md +++ /dev/null @@ -1 +0,0 @@ -Removed warnings for unused outputs in `datadog_agent` source when the corresponding output is disabled in the source config. diff --git a/changelog.d/19686_element_size_histogram.enhancement.md b/changelog.d/19686_element_size_histogram.enhancement.md new file mode 100644 index 0000000000000..fa1ed69ffb90a --- /dev/null +++ b/changelog.d/19686_element_size_histogram.enhancement.md @@ -0,0 +1,7 @@ +Added a new histogram metric, `component_received_bytes`, that measures the byte-size of individual events received by the following sources: + +- `socket` +- `statsd` +- `syslog` + +authors: pabloem diff --git a/changelog.d/19712_propagate_tracing_span_context_stream_sink_request_building.fix.md b/changelog.d/19712_propagate_tracing_span_context_stream_sink_request_building.fix.md deleted file mode 100644 index 8bc7a2229da0b..0000000000000 --- a/changelog.d/19712_propagate_tracing_span_context_stream_sink_request_building.fix.md +++ /dev/null @@ -1,36 +0,0 @@ -The following metrics now correctly have the `component_kind`, `component_type`, and `component_id` tags: - - `component_errors_total` - - `component_discarded_events_total` - -For the following sinks: - - `splunk_hec` - - `clickhouse` - - `loki` - - `redis` - - `azure_blob` - - `azure_monitor_logs` - - `webhdfs` - - `appsignal` - - `amqp` - - `aws_kinesis` - - `statsd` - - `honeycomb` - - `gcp_stackdriver_metrics` - - `gcs_chronicle_unstructured` - - `gcp_stackdriver_logs` - - `gcp_pubsub` - - `gcp_cloud_storage` - - `nats` - - `http` - - `kafka` - - `new_relic` - - `datadog_metrics` - - `datadog_traces` - - `datadog_events` - - `databend` - - `prometheus_remote_write` - - `pulsar` - - `aws_s3` - - `aws_sqs` - - `aws_sns` - - `elasticsearch` diff --git a/changelog.d/20074_protobuf_decoder.breaking.md b/changelog.d/20074_protobuf_decoder.breaking.md new file mode 100644 index 0000000000000..22e812c3fd7b4 --- /dev/null +++ b/changelog.d/20074_protobuf_decoder.breaking.md @@ -0,0 +1 @@ +The `protobuf` decoder will no longer set fields on the decoded event that are not set in the incoming byte stream. Previously it would set the default value for the field even if it wasn't in the event. This change ensures that the encoder will return the exact same bytes for the same given event. diff --git a/changelog.d/20154_length_delimited_framing_options.feature.md b/changelog.d/20154_length_delimited_framing_options.feature.md new file mode 100644 index 0000000000000..7774075bf3aba --- /dev/null +++ b/changelog.d/20154_length_delimited_framing_options.feature.md @@ -0,0 +1,3 @@ +Added support for additional config options for `length_delimited` framing. + +authors: esensar diff --git a/changelog.d/20172_max_number_of_messages.enhancement.md b/changelog.d/20172_max_number_of_messages.enhancement.md new file mode 100644 index 0000000000000..75f12b4e2d26c --- /dev/null +++ b/changelog.d/20172_max_number_of_messages.enhancement.md @@ -0,0 +1,3 @@ +Adds a `max_number_of_messages` to the sqs configuration of the `aws_s3` source + +authors: fdamstra diff --git a/changelog.d/20214_amqp_expiration.enhancement.md b/changelog.d/20214_amqp_expiration.enhancement.md new file mode 100644 index 0000000000000..7f9f285d2491c --- /dev/null +++ b/changelog.d/20214_amqp_expiration.enhancement.md @@ -0,0 +1,3 @@ +added support for `expiration_ms` on AMQP sink, to set an expiration on messages sent + +authors: sonnens diff --git a/changelog.d/20224_kafka_source_event_instrumentation.fix.md b/changelog.d/20224_kafka_source_event_instrumentation.fix.md new file mode 100644 index 0000000000000..33229a6c6bba9 --- /dev/null +++ b/changelog.d/20224_kafka_source_event_instrumentation.fix.md @@ -0,0 +1,3 @@ +The Kafka source emits received bytes and event counts correctly. + +authors: jches diff --git a/changelog.d/20228_fix-log-to-metric-set-supported.fix.md b/changelog.d/20228_fix-log-to-metric-set-supported.fix.md new file mode 100644 index 0000000000000..ace9344d7b401 --- /dev/null +++ b/changelog.d/20228_fix-log-to-metric-set-supported.fix.md @@ -0,0 +1,3 @@ +Fixed an issue where the log_to_metric transform with all_metrics=true config failed to convert properly-formatted 'set'-type events into metrics. + +authors: pabloem diff --git a/changelog.d/20244_change_inner_databend_client.enhancement.md b/changelog.d/20244_change_inner_databend_client.enhancement.md new file mode 100644 index 0000000000000..6a284c8595630 --- /dev/null +++ b/changelog.d/20244_change_inner_databend_client.enhancement.md @@ -0,0 +1,3 @@ +Changed the previous inner databend client to client provided by databend rust driver in https://github.com/datafuselabs/bendsql/. With the new client, the `endpoint` config supports both HTTP URI like `http://localhost:8000` as well as DSN like `databend://root:@localhost:8000/mydatabase?sslmode=disable&arg=value` which could provide more customization for the inner client. + +authors: everpcpc diff --git a/changelog.d/20265-distroless-debian12.enhancement.md b/changelog.d/20265-distroless-debian12.enhancement.md new file mode 100644 index 0000000000000..bf3c1baefeeda --- /dev/null +++ b/changelog.d/20265-distroless-debian12.enhancement.md @@ -0,0 +1 @@ +The distroless images have changed their base from Debian 11 to Debian 12. diff --git a/changelog.d/20282_aws_access_key_id_and_assume_role_auth.fix.md b/changelog.d/20282_aws_access_key_id_and_assume_role_auth.fix.md new file mode 100644 index 0000000000000..9e70c5747f631 --- /dev/null +++ b/changelog.d/20282_aws_access_key_id_and_assume_role_auth.fix.md @@ -0,0 +1,5 @@ +Vector would panic when attempting to use a combination af `access_key_id` and +`assume_role` authentication with the AWS components. This error has now been +fixed. + +authors: StephenWakely diff --git a/changelog.d/20292_splunk_hec_add_message_semantic_meaning.fix.md b/changelog.d/20292_splunk_hec_add_message_semantic_meaning.fix.md new file mode 100644 index 0000000000000..2f421158d4884 --- /dev/null +++ b/changelog.d/20292_splunk_hec_add_message_semantic_meaning.fix.md @@ -0,0 +1 @@ +Added a message semantic meaning to the Splunk HEC source. This only applies to the `Vector` log namespace. diff --git a/changelog.d/20331_databend_config_ndjson.enhancement.md b/changelog.d/20331_databend_config_ndjson.enhancement.md new file mode 100644 index 0000000000000..6cd97543b2431 --- /dev/null +++ b/changelog.d/20331_databend_config_ndjson.enhancement.md @@ -0,0 +1,3 @@ +Added a new config field `missing_field_as` to the `databend` sink to specify the behavior when fields are missing. Previously the behavior was the same as setting this new configuration option to `ERROR`. The new default value is `NULL`. + +authors: everpcpc diff --git a/changelog.d/20335_added_support_for_insert_distributed_one_random_shard.feature.md b/changelog.d/20335_added_support_for_insert_distributed_one_random_shard.feature.md new file mode 100644 index 0000000000000..9cedc0658f620 --- /dev/null +++ b/changelog.d/20335_added_support_for_insert_distributed_one_random_shard.feature.md @@ -0,0 +1,3 @@ +The `clickhouse` sink now has a new configuration option, `insert_random_shard`, to tell Clickhouse to insert into a random shard (by setting `insert_distributed_one_random_shard`). See the Clickhouse [Distributed Table Engine docs](https://clickhouse.com/docs/en/engines/table-engines/special/distributed) for details. + +authors: rguleryuz diff --git a/changelog.d/README.md b/changelog.d/README.md index fd48a3439142f..f7e6f9b27e8e9 100644 --- a/changelog.d/README.md +++ b/changelog.d/README.md @@ -74,7 +74,9 @@ the authors specified. The process for adding this is simply to have the last line of the file be in this format: - authors: , , <...> + authors: <...> + +Do not include a leading `@` when specifying your username. ## Example diff --git a/changelog.d/datadog_logs_batching.fix.md b/changelog.d/datadog_logs_batching.fix.md deleted file mode 100644 index 8816210c00c41..0000000000000 --- a/changelog.d/datadog_logs_batching.fix.md +++ /dev/null @@ -1,2 +0,0 @@ -Fixed an issue where the `datadog_logs` sink could produce a request larger than the allowed API -limit. diff --git a/changelog.d/dd_agent_parse_ddtags_format.breaking.md b/changelog.d/dd_agent_parse_ddtags_format.breaking.md new file mode 100644 index 0000000000000..ee2a3e6e6c051 --- /dev/null +++ b/changelog.d/dd_agent_parse_ddtags_format.breaking.md @@ -0,0 +1 @@ +Previously the `datadog_agent` setting `parse_ddtags` parsed the tag string into an Object. It is now parsed into an Array of `key:value` strings, which matches the behavior of the Datadog logs backend intake. diff --git a/changelog.d/dd_logs_reconstruct_ddtags.fix.md b/changelog.d/dd_logs_reconstruct_ddtags.fix.md new file mode 100644 index 0000000000000..32dff3c444f8e --- /dev/null +++ b/changelog.d/dd_logs_reconstruct_ddtags.fix.md @@ -0,0 +1 @@ +The `datadog_logs` sink was not re-constructing ddtags that may have been parsed upstream by the `datadog_agent` source's `parse_ddtags` setting. The sink log encoding was fixed to re-assemble the tags into a unified string that the Datadog logs intake expects. diff --git a/changelog.d/elasticsearch_sink_document_versioning.feature.md b/changelog.d/elasticsearch_sink_document_versioning.feature.md new file mode 100644 index 0000000000000..804e21df52d43 --- /dev/null +++ b/changelog.d/elasticsearch_sink_document_versioning.feature.md @@ -0,0 +1,3 @@ +Allows the Elasticsearch sink to use [external versioning for documents](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#index-versioning). To use it set `bulk.version_type` to `external` and then set `bulk.version` to either some static value like `123` or use templating to use an actual field from the document `{{ my_document_field }}`. + +authors: radimsuckr diff --git a/changelog.d/gelf_at_character.enhancement.md b/changelog.d/gelf_at_character.enhancement.md deleted file mode 100644 index 081add0d20466..0000000000000 --- a/changelog.d/gelf_at_character.enhancement.md +++ /dev/null @@ -1,2 +0,0 @@ -Gracefully accept `@` characters in labels when decoding GELF. -authors: MartinEmrich diff --git a/changelog.d/graphql_endpoint_toggle.enhancement.md b/changelog.d/graphql_endpoint_toggle.enhancement.md deleted file mode 100644 index 0336ae4f61f48..0000000000000 --- a/changelog.d/graphql_endpoint_toggle.enhancement.md +++ /dev/null @@ -1,3 +0,0 @@ -Added a boolean `graphql` field to the api configuration to allow disabling the graphql endpoint. - -Note that the `playground` endpoint will now only be enabled if the `graphql` endpoint is also enabled. diff --git a/changelog.d/prometheus_mixed_gauges.fix.md b/changelog.d/prometheus_mixed_gauges.fix.md new file mode 100644 index 0000000000000..8f3490a136fdd --- /dev/null +++ b/changelog.d/prometheus_mixed_gauges.fix.md @@ -0,0 +1,4 @@ +The `prometheus_exporter` sink is now able to correctly handle a mix of both incremental and +absolute valued gauges arriving for the same metric series. + +authors: RussellRollins diff --git a/changelog.d/splunk_hec_logs_auto_extract_ts.fix.md b/changelog.d/splunk_hec_logs_auto_extract_ts.fix.md new file mode 100644 index 0000000000000..6ec0df5afa6a5 --- /dev/null +++ b/changelog.d/splunk_hec_logs_auto_extract_ts.fix.md @@ -0,0 +1,3 @@ +Previously, when the `auto_extract_timestamp` setting in the `splunk_hec_logs` Sink was enabled, the sink was attempting to remove the existing event timestamp. This would throw a warning that the timestamp type was invalid. + +This has been fixed to correctly not attempt to remove the timestamp from the event if `auto_extract_timestamp` is enabled, since this setting indicates that Vector should let Splunk do that. diff --git a/distribution/docker/alpine/Dockerfile b/distribution/docker/alpine/Dockerfile index cca2d5cb75272..ccdfdae4fff64 100644 --- a/distribution/docker/alpine/Dockerfile +++ b/distribution/docker/alpine/Dockerfile @@ -1,9 +1,14 @@ -FROM docker.io/alpine:3.18 AS builder +FROM docker.io/alpine:3.19 AS builder WORKDIR /vector +ARG TARGETPLATFORM + COPY vector-*-unknown-linux-musl*.tar.gz ./ -RUN tar -xvf vector-0*-"$(cat /etc/apk/arch)"-unknown-linux-musl*.tar.gz --strip-components=2 + +# special case for arm v6 builds, /etc/apk/arch reports armhf which conflicts with the armv7 package +RUN ARCH=$(if [ "$TARGETPLATFORM" = "linux/arm/v6" ]; then echo "arm"; else cat /etc/apk/arch; fi) \ + && tar -xvf vector-0*-"$ARCH"-unknown-linux-musl*.tar.gz --strip-components=2 RUN mkdir -p /var/lib/vector diff --git a/distribution/docker/distroless-libc/Dockerfile b/distribution/docker/distroless-libc/Dockerfile index 773aeadcbdc93..2a0f7084c9a52 100644 --- a/distribution/docker/distroless-libc/Dockerfile +++ b/distribution/docker/distroless-libc/Dockerfile @@ -9,7 +9,7 @@ RUN mkdir -p /var/lib/vector # distroless doesn't use static tags # hadolint ignore=DL3007 -FROM gcr.io/distroless/cc-debian11:latest +FROM gcr.io/distroless/cc-debian12:latest COPY --from=builder /usr/bin/vector /usr/bin/vector COPY --from=builder /usr/share/doc/vector /usr/share/doc/vector diff --git a/distribution/install.sh b/distribution/install.sh index 39e3310a11d2a..3590ba9d063f6 100755 --- a/distribution/install.sh +++ b/distribution/install.sh @@ -13,7 +13,7 @@ set -u # If PACKAGE_ROOT is unset or empty, default it. PACKAGE_ROOT="${PACKAGE_ROOT:-"https://packages.timber.io/vector"}" # If VECTOR_VERSION is unset or empty, default it. -VECTOR_VERSION="${VECTOR_VERSION:-"0.35.0"}" +VECTOR_VERSION="${VECTOR_VERSION:-"0.37.1"}" _divider="--------------------------------------------------------------------------------" _prompt=">>>" _indent=" " diff --git a/distribution/kubernetes/vector-agent/README.md b/distribution/kubernetes/vector-agent/README.md index 1d0d44aa044b0..55e8722848b94 100644 --- a/distribution/kubernetes/vector-agent/README.md +++ b/distribution/kubernetes/vector-agent/README.md @@ -1,6 +1,6 @@ The kubernetes manifests found in this directory have been automatically generated from the [helm chart `vector/vector`](https://github.com/vectordotdev/helm-charts/tree/master/charts/vector) -version 0.30.0 with the following `values.yaml`: +version 0.32.0 with the following `values.yaml`: ```yaml role: Agent diff --git a/distribution/kubernetes/vector-agent/configmap.yaml b/distribution/kubernetes/vector-agent/configmap.yaml index 6150a7c47969a..bfe345207f77d 100644 --- a/distribution/kubernetes/vector-agent/configmap.yaml +++ b/distribution/kubernetes/vector-agent/configmap.yaml @@ -8,7 +8,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Agent - app.kubernetes.io/version: "0.35.0-distroless-libc" + app.kubernetes.io/version: "0.37.0-distroless-libc" data: agent.yaml: | data_dir: /vector-data-dir @@ -25,7 +25,7 @@ data: excludes: [binfmt_misc] filesystems: excludes: [binfmt_misc] - mountPoints: + mountpoints: excludes: ["*/proc/sys/fs/binfmt_misc"] type: host_metrics internal_metrics: diff --git a/distribution/kubernetes/vector-agent/daemonset.yaml b/distribution/kubernetes/vector-agent/daemonset.yaml index 8c4a3aa71728a..16604d4408ce7 100644 --- a/distribution/kubernetes/vector-agent/daemonset.yaml +++ b/distribution/kubernetes/vector-agent/daemonset.yaml @@ -8,7 +8,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Agent - app.kubernetes.io/version: "0.35.0-distroless-libc" + app.kubernetes.io/version: "0.37.0-distroless-libc" annotations: {} spec: selector: @@ -30,12 +30,14 @@ spec: dnsPolicy: ClusterFirst containers: - name: vector - image: "timberio/vector:0.35.0-distroless-libc" + image: "timberio/vector:0.37.0-distroless-libc" imagePullPolicy: IfNotPresent args: - --config-dir - /etc/vector/ env: + - name: VECTOR_LOG + value: "info" - name: VECTOR_SELF_NODE_NAME valueFrom: fieldRef: @@ -52,8 +54,6 @@ spec: value: "/host/proc" - name: SYSFS_ROOT value: "/host/sys" - - name: VECTOR_LOG - value: "info" ports: - name: prom-exporter containerPort: 9090 diff --git a/distribution/kubernetes/vector-agent/rbac.yaml b/distribution/kubernetes/vector-agent/rbac.yaml index 21eaabb6ce505..6161e45777303 100644 --- a/distribution/kubernetes/vector-agent/rbac.yaml +++ b/distribution/kubernetes/vector-agent/rbac.yaml @@ -10,7 +10,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Agent - app.kubernetes.io/version: "0.35.0-distroless-libc" + app.kubernetes.io/version: "0.37.0-distroless-libc" rules: - apiGroups: - "" @@ -31,7 +31,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Agent - app.kubernetes.io/version: "0.35.0-distroless-libc" + app.kubernetes.io/version: "0.37.0-distroless-libc" roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole diff --git a/distribution/kubernetes/vector-agent/service-headless.yaml b/distribution/kubernetes/vector-agent/service-headless.yaml index e716b46b5fbf8..49465ede3cd1b 100644 --- a/distribution/kubernetes/vector-agent/service-headless.yaml +++ b/distribution/kubernetes/vector-agent/service-headless.yaml @@ -8,7 +8,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Agent - app.kubernetes.io/version: "0.35.0-distroless-libc" + app.kubernetes.io/version: "0.37.0-distroless-libc" annotations: spec: clusterIP: None diff --git a/distribution/kubernetes/vector-agent/serviceaccount.yaml b/distribution/kubernetes/vector-agent/serviceaccount.yaml index 01b0cd040a753..726beb2e9de24 100644 --- a/distribution/kubernetes/vector-agent/serviceaccount.yaml +++ b/distribution/kubernetes/vector-agent/serviceaccount.yaml @@ -8,5 +8,5 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Agent - app.kubernetes.io/version: "0.35.0-distroless-libc" + app.kubernetes.io/version: "0.37.0-distroless-libc" automountServiceAccountToken: true diff --git a/distribution/kubernetes/vector-aggregator/README.md b/distribution/kubernetes/vector-aggregator/README.md index c770d0c0c52d5..9993816bcb101 100644 --- a/distribution/kubernetes/vector-aggregator/README.md +++ b/distribution/kubernetes/vector-aggregator/README.md @@ -1,6 +1,6 @@ The kubernetes manifests found in this directory have been automatically generated from the [helm chart `vector/vector`](https://github.com/vectordotdev/helm-charts/tree/master/charts/vector) -version 0.30.0 with the following `values.yaml`: +version 0.32.0 with the following `values.yaml`: ```yaml diff --git a/distribution/kubernetes/vector-aggregator/configmap.yaml b/distribution/kubernetes/vector-aggregator/configmap.yaml index 08d8098f39f59..9318a1b97f097 100644 --- a/distribution/kubernetes/vector-aggregator/configmap.yaml +++ b/distribution/kubernetes/vector-aggregator/configmap.yaml @@ -8,7 +8,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Aggregator - app.kubernetes.io/version: "0.35.0-distroless-libc" + app.kubernetes.io/version: "0.37.0-distroless-libc" data: aggregator.yaml: | data_dir: /vector-data-dir diff --git a/distribution/kubernetes/vector-aggregator/service-headless.yaml b/distribution/kubernetes/vector-aggregator/service-headless.yaml index a0bf61c348020..d21a91e690485 100644 --- a/distribution/kubernetes/vector-aggregator/service-headless.yaml +++ b/distribution/kubernetes/vector-aggregator/service-headless.yaml @@ -8,7 +8,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Aggregator - app.kubernetes.io/version: "0.35.0-distroless-libc" + app.kubernetes.io/version: "0.37.0-distroless-libc" annotations: spec: clusterIP: None diff --git a/distribution/kubernetes/vector-aggregator/service.yaml b/distribution/kubernetes/vector-aggregator/service.yaml index c3cd56a8cd882..e419f3cc6f096 100644 --- a/distribution/kubernetes/vector-aggregator/service.yaml +++ b/distribution/kubernetes/vector-aggregator/service.yaml @@ -8,7 +8,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Aggregator - app.kubernetes.io/version: "0.35.0-distroless-libc" + app.kubernetes.io/version: "0.37.0-distroless-libc" annotations: spec: ports: diff --git a/distribution/kubernetes/vector-aggregator/serviceaccount.yaml b/distribution/kubernetes/vector-aggregator/serviceaccount.yaml index 0c52d00aadc09..d5294b38517a6 100644 --- a/distribution/kubernetes/vector-aggregator/serviceaccount.yaml +++ b/distribution/kubernetes/vector-aggregator/serviceaccount.yaml @@ -8,5 +8,5 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Aggregator - app.kubernetes.io/version: "0.35.0-distroless-libc" + app.kubernetes.io/version: "0.37.0-distroless-libc" automountServiceAccountToken: true diff --git a/distribution/kubernetes/vector-aggregator/statefulset.yaml b/distribution/kubernetes/vector-aggregator/statefulset.yaml index 0620b2c8150ff..5bc2f9cfbf6c1 100644 --- a/distribution/kubernetes/vector-aggregator/statefulset.yaml +++ b/distribution/kubernetes/vector-aggregator/statefulset.yaml @@ -8,7 +8,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Aggregator - app.kubernetes.io/version: "0.35.0-distroless-libc" + app.kubernetes.io/version: "0.37.0-distroless-libc" annotations: {} spec: replicas: 1 @@ -18,6 +18,7 @@ spec: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Aggregator + minReadySeconds: 0 serviceName: vector-headless template: metadata: @@ -32,12 +33,14 @@ spec: dnsPolicy: ClusterFirst containers: - name: vector - image: "timberio/vector:0.35.0-distroless-libc" + image: "timberio/vector:0.37.0-distroless-libc" imagePullPolicy: IfNotPresent args: - --config-dir - /etc/vector/ env: + - name: VECTOR_LOG + value: "info" ports: - name: datadog-agent containerPort: 8282 diff --git a/distribution/kubernetes/vector-stateless-aggregator/README.md b/distribution/kubernetes/vector-stateless-aggregator/README.md index 63c9fcb6eebd3..6507dee0ef8c5 100644 --- a/distribution/kubernetes/vector-stateless-aggregator/README.md +++ b/distribution/kubernetes/vector-stateless-aggregator/README.md @@ -1,6 +1,6 @@ The kubernetes manifests found in this directory have been automatically generated from the [helm chart `vector/vector`](https://github.com/vectordotdev/helm-charts/tree/master/charts/vector) -version 0.30.0 with the following `values.yaml`: +version 0.32.0 with the following `values.yaml`: ```yaml role: Stateless-Aggregator diff --git a/distribution/kubernetes/vector-stateless-aggregator/configmap.yaml b/distribution/kubernetes/vector-stateless-aggregator/configmap.yaml index 5fd55085636cd..180fe30cf8cdd 100644 --- a/distribution/kubernetes/vector-stateless-aggregator/configmap.yaml +++ b/distribution/kubernetes/vector-stateless-aggregator/configmap.yaml @@ -8,7 +8,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Stateless-Aggregator - app.kubernetes.io/version: "0.35.0-distroless-libc" + app.kubernetes.io/version: "0.37.0-distroless-libc" data: aggregator.yaml: | data_dir: /vector-data-dir diff --git a/distribution/kubernetes/vector-stateless-aggregator/deployment.yaml b/distribution/kubernetes/vector-stateless-aggregator/deployment.yaml index 587e5771c7c58..2f639b4d4a87a 100644 --- a/distribution/kubernetes/vector-stateless-aggregator/deployment.yaml +++ b/distribution/kubernetes/vector-stateless-aggregator/deployment.yaml @@ -8,7 +8,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Stateless-Aggregator - app.kubernetes.io/version: "0.35.0-distroless-libc" + app.kubernetes.io/version: "0.37.0-distroless-libc" annotations: {} spec: replicas: 1 @@ -17,6 +17,7 @@ spec: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Stateless-Aggregator + minReadySeconds: 0 template: metadata: annotations: {} @@ -30,12 +31,14 @@ spec: dnsPolicy: ClusterFirst containers: - name: vector - image: "timberio/vector:0.35.0-distroless-libc" + image: "timberio/vector:0.37.0-distroless-libc" imagePullPolicy: IfNotPresent args: - --config-dir - /etc/vector/ env: + - name: VECTOR_LOG + value: "info" ports: - name: datadog-agent containerPort: 8282 diff --git a/distribution/kubernetes/vector-stateless-aggregator/service-headless.yaml b/distribution/kubernetes/vector-stateless-aggregator/service-headless.yaml index 4c6cd4476db5a..6c828782d4989 100644 --- a/distribution/kubernetes/vector-stateless-aggregator/service-headless.yaml +++ b/distribution/kubernetes/vector-stateless-aggregator/service-headless.yaml @@ -8,7 +8,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Stateless-Aggregator - app.kubernetes.io/version: "0.35.0-distroless-libc" + app.kubernetes.io/version: "0.37.0-distroless-libc" annotations: spec: clusterIP: None diff --git a/distribution/kubernetes/vector-stateless-aggregator/service.yaml b/distribution/kubernetes/vector-stateless-aggregator/service.yaml index 39b7bb4aad0b0..513a265b7209b 100644 --- a/distribution/kubernetes/vector-stateless-aggregator/service.yaml +++ b/distribution/kubernetes/vector-stateless-aggregator/service.yaml @@ -8,7 +8,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Stateless-Aggregator - app.kubernetes.io/version: "0.35.0-distroless-libc" + app.kubernetes.io/version: "0.37.0-distroless-libc" annotations: spec: ports: diff --git a/distribution/kubernetes/vector-stateless-aggregator/serviceaccount.yaml b/distribution/kubernetes/vector-stateless-aggregator/serviceaccount.yaml index 118ff15855b4f..27d4c6c81590c 100644 --- a/distribution/kubernetes/vector-stateless-aggregator/serviceaccount.yaml +++ b/distribution/kubernetes/vector-stateless-aggregator/serviceaccount.yaml @@ -8,5 +8,5 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Stateless-Aggregator - app.kubernetes.io/version: "0.35.0-distroless-libc" + app.kubernetes.io/version: "0.37.0-distroless-libc" automountServiceAccountToken: true diff --git a/docs/DEPRECATIONS.md b/docs/DEPRECATIONS.md index 019a262d706bd..a18ed62e132a8 100644 --- a/docs/DEPRECATIONS.md +++ b/docs/DEPRECATIONS.md @@ -16,7 +16,7 @@ For example: ## To be migrated -- v0.37.0 strict_env_vars Change the default for missing environment variable interpolation from - warning to erroring. - ## To be removed + +- v0.38.0 strict_env_vars Remove option for configuring missing environment variable interpolation + to be a warning rather than an error diff --git a/docs/DOCUMENTING.md b/docs/DOCUMENTING.md index 106106592a1ff..c84b6fa373d6a 100644 --- a/docs/DOCUMENTING.md +++ b/docs/DOCUMENTING.md @@ -61,7 +61,7 @@ Much of Vector's reference documentation is automatically compiled from source c To regenerate this content, run: ```bash -cargo vdev build component-docs +make generate-component-docs ``` ### Formatting diff --git a/docs/README.md b/docs/README.md index 68b4476de2ea1..cb3ed925a1158 100644 --- a/docs/README.md +++ b/docs/README.md @@ -10,7 +10,7 @@ Whether you're a Vector team member, or an outside contributor, this is the best place to start. This folder contains internal documentation to help with the development of Vector and ensuring your change gets approved in a timely manner. -1. **[CONTRIBUTING.md](CONTRIBUTING.md)** - Start here, contributor basics and workflow +1. **[CONTRIBUTING.md](../CONTRIBUTING.md)** - Start here, contributor basics and workflow 2. **[DEVELOPING.md](DEVELOPING.md)** - Everything necessary to develop 3. **[DOCUMENTING.md](DOCUMENTING.md)** - Preparing your change for Vector users diff --git a/lib/codecs/Cargo.toml b/lib/codecs/Cargo.toml index 66a89c8f36368..786c308ba5a77 100644 --- a/lib/codecs/Cargo.toml +++ b/lib/codecs/Cargo.toml @@ -20,10 +20,10 @@ lookup = { package = "vector-lookup", path = "../vector-lookup", default-feature memchr = { version = "2", default-features = false } once_cell = { version = "1.19", default-features = false } ordered-float = { version = "4.2.0", default-features = false } -prost = { version = "0.12.3", default-features = false, features = ["std"] } -prost-reflect = { version = "0.12", default-features = false, features = ["serde"] } -regex = { version = "1.10.3", default-features = false, features = ["std", "perf"] } -serde = { version = "1", default-features = false, features = ["derive"] } +prost = { version = "0.12.4", default-features = false, features = ["std"] } +prost-reflect = { version = "0.13", default-features = false, features = ["serde"] } +regex = { version = "1.10.4", default-features = false, features = ["std", "perf"] } +serde.workspace = true serde_json.workspace = true smallvec = { version = "1", default-features = false, features = ["union"] } snafu = { version = "0.7.5", default-features = false, features = ["futures"] } @@ -35,16 +35,16 @@ vector-common = { path = "../vector-common", default-features = false } vector-config = { path = "../vector-config", default-features = false } vector-config-common = { path = "../vector-config-common", default-features = false } vector-config-macros = { path = "../vector-config-macros", default-features = false } -vector-core = { path = "../vector-core", default-features = false } +vector-core = { path = "../vector-core", default-features = false, features = ["vrl"] } [dev-dependencies] futures = { version = "0.3", default-features = false } indoc = { version = "2", default-features = false } tokio = { version = "1", features = ["test-util"] } similar-asserts = "1.5.0" -vector-core = { path = "../vector-core", default-features = false, features = ["test"] } +vector-core = { path = "../vector-core", default-features = false, features = ["vrl", "test"] } uuid = { version = "1", default-features = false, features = ["serde", "v4"] } -rstest = "0.18.2" +rstest = "0.19.0" vrl.workspace = true [features] diff --git a/lib/codecs/src/common/length_delimited.rs b/lib/codecs/src/common/length_delimited.rs new file mode 100644 index 0000000000000..ae211beb88d43 --- /dev/null +++ b/lib/codecs/src/common/length_delimited.rs @@ -0,0 +1,66 @@ +use tokio_util::codec::LengthDelimitedCodec; +use vector_config::configurable_component; + +/// Options for building a `LengthDelimitedDecoder` or `LengthDelimitedEncoder`. +#[configurable_component] +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct LengthDelimitedCoderOptions { + /// Maximum frame length + #[serde(default = "default_max_frame_length")] + pub max_frame_length: usize, + + /// Number of bytes representing the field length + #[serde(default = "default_length_field_length")] + pub length_field_length: usize, + + /// Number of bytes in the header before the length field + #[serde(default = "default_length_field_offset")] + pub length_field_offset: usize, + + /// Length field byte order (little or big endian) + #[serde(default = "default_length_field_is_big_endian")] + pub length_field_is_big_endian: bool, +} + +const fn default_max_frame_length() -> usize { + 8 * 1_024 * 1_024 +} + +const fn default_length_field_length() -> usize { + 4 +} + +const fn default_length_field_offset() -> usize { + 0 +} + +const fn default_length_field_is_big_endian() -> bool { + true +} + +impl Default for LengthDelimitedCoderOptions { + fn default() -> Self { + Self { + max_frame_length: default_max_frame_length(), + length_field_length: default_length_field_length(), + length_field_offset: default_length_field_offset(), + length_field_is_big_endian: default_length_field_is_big_endian(), + } + } +} + +impl LengthDelimitedCoderOptions { + pub fn build_codec(&self) -> LengthDelimitedCodec { + let mut builder = tokio_util::codec::LengthDelimitedCodec::builder() + .length_field_length(self.length_field_length) + .length_field_offset(self.length_field_offset) + .max_frame_length(self.max_frame_length) + .to_owned(); + if self.length_field_is_big_endian { + builder.big_endian(); + } else { + builder.little_endian(); + }; + builder.new_codec() + } +} diff --git a/lib/codecs/src/common/mod.rs b/lib/codecs/src/common/mod.rs index 230f3b31d2f97..e2d14804f7d3b 100644 --- a/lib/codecs/src/common/mod.rs +++ b/lib/codecs/src/common/mod.rs @@ -1,3 +1,3 @@ //! A collection of common utility features used by both encoding and decoding logic. -pub mod protobuf; +pub mod length_delimited; diff --git a/lib/codecs/src/common/protobuf.rs b/lib/codecs/src/common/protobuf.rs deleted file mode 100644 index 5b557deb3e0a5..0000000000000 --- a/lib/codecs/src/common/protobuf.rs +++ /dev/null @@ -1,36 +0,0 @@ -use prost_reflect::{DescriptorPool, MessageDescriptor}; -use std::path::Path; - -/// Load a `MessageDescriptor` from a specific message type from the given descriptor set file. -/// -/// The path should point to the output of `protoc -o ...` -pub fn get_message_descriptor( - descriptor_set_path: &Path, - message_type: &str, -) -> vector_common::Result { - let b = std::fs::read(descriptor_set_path).map_err(|e| { - format!("Failed to open protobuf desc file '{descriptor_set_path:?}': {e}",) - })?; - let pool = DescriptorPool::decode(b.as_slice()).map_err(|e| { - format!("Failed to parse protobuf desc file '{descriptor_set_path:?}': {e}") - })?; - pool.get_message_by_name(message_type).ok_or_else(|| { - format!("The message type '{message_type}' could not be found in '{descriptor_set_path:?}'") - .into() - }) -} - -#[cfg(test)] -mod tests { - use super::*; - use std::path::PathBuf; - - #[test] - fn test_get_message_descriptor() { - let path = PathBuf::from(std::env::var_os("CARGO_MANIFEST_DIR").unwrap()) - .join("tests/data/protobuf/protos/test.desc"); - let message_descriptor = get_message_descriptor(&path, "test.Integers").unwrap(); - assert_eq!("Integers", message_descriptor.name()); - assert_eq!(4, message_descriptor.fields().count()); - } -} diff --git a/lib/codecs/src/decoding/format/gelf.rs b/lib/codecs/src/decoding/format/gelf.rs index 578d01f0ffc99..66613cee9c679 100644 --- a/lib/codecs/src/decoding/format/gelf.rs +++ b/lib/codecs/src/decoding/format/gelf.rs @@ -1,5 +1,5 @@ use bytes::Bytes; -use chrono::{NaiveDateTime, Utc}; +use chrono::{DateTime, Utc}; use derivative::Derivative; use lookup::{event_path, owned_value_path}; use serde::{Deserialize, Serialize}; @@ -133,12 +133,12 @@ impl GelfDeserializer { if let Some(timestamp_key) = log_schema().timestamp_key_target_path() { if let Some(timestamp) = parsed.timestamp { - let naive = NaiveDateTime::from_timestamp_opt( + let dt = DateTime::from_timestamp( f64::trunc(timestamp) as i64, f64::fract(timestamp) as u32, ) .expect("invalid timestamp"); - log.insert(timestamp_key, naive.and_utc()); + log.insert(timestamp_key, dt); // per GELF spec- add timestamp if not provided } else { log.insert(timestamp_key, Utc::now()); @@ -239,7 +239,6 @@ impl Deserializer for GelfDeserializer { mod tests { use super::*; use bytes::Bytes; - use chrono::NaiveDateTime; use lookup::event_path; use serde_json::json; use similar_asserts::assert_eq; @@ -303,8 +302,8 @@ mod tests { ))) ); // Vector does not use the nanos - let naive = NaiveDateTime::from_timestamp_opt(1385053862, 0).expect("invalid timestamp"); - assert_eq!(log.get(TIMESTAMP), Some(&Value::Timestamp(naive.and_utc()))); + let dt = DateTime::from_timestamp(1385053862, 0).expect("invalid timestamp"); + assert_eq!(log.get(TIMESTAMP), Some(&Value::Timestamp(dt))); assert_eq!(log.get(LEVEL), Some(&Value::Integer(1))); assert_eq!( log.get(FACILITY), diff --git a/lib/codecs/src/decoding/format/mod.rs b/lib/codecs/src/decoding/format/mod.rs index 1ba45a1b899de..c46b87b3ca3f4 100644 --- a/lib/codecs/src/decoding/format/mod.rs +++ b/lib/codecs/src/decoding/format/mod.rs @@ -12,6 +12,7 @@ mod native_json; mod protobuf; #[cfg(feature = "syslog")] mod syslog; +mod vrl; use ::bytes::Bytes; pub use avro::{AvroDeserializer, AvroDeserializerConfig, AvroDeserializerOptions}; @@ -31,6 +32,8 @@ use vector_core::event::Event; pub use self::bytes::{BytesDeserializer, BytesDeserializerConfig}; +pub use self::vrl::{VrlDeserializer, VrlDeserializerConfig, VrlDeserializerOptions}; + /// Parse structured events from bytes. pub trait Deserializer: DynClone + Send + Sync { /// Parses structured events from bytes. diff --git a/lib/codecs/src/decoding/format/protobuf.rs b/lib/codecs/src/decoding/format/protobuf.rs index 4f83432ebe7f4..42f8665891dab 100644 --- a/lib/codecs/src/decoding/format/protobuf.rs +++ b/lib/codecs/src/decoding/format/protobuf.rs @@ -3,8 +3,7 @@ use std::path::PathBuf; use bytes::Bytes; use chrono::Utc; use derivative::Derivative; -use ordered_float::NotNan; -use prost_reflect::{DynamicMessage, MessageDescriptor, ReflectMessage}; +use prost_reflect::{DynamicMessage, MessageDescriptor}; use smallvec::{smallvec, SmallVec}; use vector_config::configurable_component; use vector_core::event::LogEvent; @@ -13,9 +12,7 @@ use vector_core::{ event::Event, schema, }; -use vrl::value::{Kind, ObjectMap}; - -use crate::common::protobuf::get_message_descriptor; +use vrl::value::Kind; use super::Deserializer; @@ -98,7 +95,8 @@ impl Deserializer for ProtobufDeserializer { let dynamic_message = DynamicMessage::decode(self.message_descriptor.clone(), bytes) .map_err(|error| format!("Error parsing protobuf: {:?}", error))?; - let proto_vrl = to_vrl(&prost_reflect::Value::Message(dynamic_message), None)?; + let proto_vrl = + vrl::protobuf::proto_to_value(&prost_reflect::Value::Message(dynamic_message), None)?; let mut event = Event::Log(LogEvent::from(proto_vrl)); let event = match log_namespace { LogNamespace::Vector => event, @@ -121,101 +119,14 @@ impl Deserializer for ProtobufDeserializer { impl TryFrom<&ProtobufDeserializerConfig> for ProtobufDeserializer { type Error = vector_common::Error; fn try_from(config: &ProtobufDeserializerConfig) -> vector_common::Result { - let message_descriptor = - get_message_descriptor(&config.protobuf.desc_file, &config.protobuf.message_type)?; + let message_descriptor = vrl::protobuf::get_message_descriptor( + &config.protobuf.desc_file, + &config.protobuf.message_type, + )?; Ok(Self::new(message_descriptor)) } } -fn to_vrl( - prost_reflect_value: &prost_reflect::Value, - field_descriptor: Option<&prost_reflect::FieldDescriptor>, -) -> vector_common::Result { - let vrl_value = match prost_reflect_value { - prost_reflect::Value::Bool(v) => vrl::value::Value::from(*v), - prost_reflect::Value::I32(v) => vrl::value::Value::from(*v), - prost_reflect::Value::I64(v) => vrl::value::Value::from(*v), - prost_reflect::Value::U32(v) => vrl::value::Value::from(*v), - prost_reflect::Value::U64(v) => vrl::value::Value::from(*v), - prost_reflect::Value::F32(v) => vrl::value::Value::Float( - NotNan::new(f64::from(*v)).map_err(|_e| "Float number cannot be Nan")?, - ), - prost_reflect::Value::F64(v) => { - vrl::value::Value::Float(NotNan::new(*v).map_err(|_e| "F64 number cannot be Nan")?) - } - prost_reflect::Value::String(v) => vrl::value::Value::from(v.as_str()), - prost_reflect::Value::Bytes(v) => vrl::value::Value::from(v.clone()), - prost_reflect::Value::EnumNumber(v) => { - if let Some(field_descriptor) = field_descriptor { - let kind = field_descriptor.kind(); - let enum_desc = kind.as_enum().ok_or_else(|| { - format!( - "Internal error while parsing protobuf enum. Field descriptor: {:?}", - field_descriptor - ) - })?; - vrl::value::Value::from( - enum_desc - .get_value(*v) - .ok_or_else(|| { - format!("The number {} cannot be in '{}'", v, enum_desc.name()) - })? - .name(), - ) - } else { - Err("Expected valid field descriptor")? - } - } - prost_reflect::Value::Message(v) => { - let mut obj_map = ObjectMap::new(); - for field_desc in v.descriptor().fields() { - let field_value = v.get_field(&field_desc); - let out = to_vrl(field_value.as_ref(), Some(&field_desc))?; - obj_map.insert(field_desc.name().into(), out); - } - vrl::value::Value::from(obj_map) - } - prost_reflect::Value::List(v) => { - let vec = v - .iter() - .map(|o| to_vrl(o, field_descriptor)) - .collect::, vector_common::Error>>()?; - vrl::value::Value::from(vec) - } - prost_reflect::Value::Map(v) => { - if let Some(field_descriptor) = field_descriptor { - let kind = field_descriptor.kind(); - let message_desc = kind.as_message().ok_or_else(|| { - format!( - "Internal error while parsing protobuf field descriptor: {:?}", - field_descriptor - ) - })?; - vrl::value::Value::from( - v.iter() - .map(|kv| { - Ok(( - kv.0.as_str() - .ok_or_else(|| { - format!( - "Internal error while parsing protobuf map. Field descriptor: {:?}", - field_descriptor - ) - })? - .into(), - to_vrl(kv.1, Some(&message_desc.map_entry_value_field()))?, - )) - }) - .collect::>()?, - ) - } else { - Err("Expected valid field descriptor")? - } - } - }; - Ok(vrl_value) -} - #[cfg(test)] mod tests { // TODO: add test for bad file path & invalid message_type @@ -237,7 +148,8 @@ mod tests { validate_log: fn(&LogEvent), ) { let input = Bytes::from(protobuf_bin_message); - let message_descriptor = get_message_descriptor(&protobuf_desc_path, message_type).unwrap(); + let message_descriptor = + vrl::protobuf::get_message_descriptor(&protobuf_desc_path, message_type).unwrap(); let deserializer = ProtobufDeserializer::new(message_descriptor); for namespace in [LogNamespace::Legacy, LogNamespace::Vector] { @@ -315,7 +227,11 @@ mod tests { let protobuf_desc_path = test_data_dir().join("protos/test_protobuf.desc"); let message_type = "test_protobuf.Person"; let validate_log = |log: &LogEvent| { - assert_eq!(log["name"], "".into()); + // No field will be set. + assert!(!log.contains("name")); + assert!(!log.contains("id")); + assert!(!log.contains("email")); + assert!(!log.contains("phones")); }; parse_and_validate( @@ -329,7 +245,7 @@ mod tests { #[test] fn deserialize_error_invalid_protobuf() { let input = Bytes::from("{ foo"); - let message_descriptor = get_message_descriptor( + let message_descriptor = vrl::protobuf::get_message_descriptor( &test_data_dir().join("protos/test_protobuf.desc"), "test_protobuf.Person", ) diff --git a/lib/codecs/src/decoding/format/vrl.rs b/lib/codecs/src/decoding/format/vrl.rs new file mode 100644 index 0000000000000..63a127955ee87 --- /dev/null +++ b/lib/codecs/src/decoding/format/vrl.rs @@ -0,0 +1,320 @@ +use crate::decoding::format::Deserializer; +use crate::BytesDeserializerConfig; +use bytes::Bytes; +use derivative::Derivative; +use smallvec::{smallvec, SmallVec}; +use vector_config_macros::configurable_component; +use vector_core::config::{DataType, LogNamespace}; +use vector_core::event::{Event, TargetEvents, VrlTarget}; +use vector_core::{compile_vrl, schema}; +use vrl::compiler::state::ExternalEnv; +use vrl::compiler::{runtime::Runtime, CompileConfig, Program, TimeZone, TypeState}; +use vrl::diagnostic::Formatter; +use vrl::value::Kind; + +/// Config used to build a `VrlDeserializer`. +#[configurable_component] +#[derive(Debug, Clone, Default)] +pub struct VrlDeserializerConfig { + /// VRL-specific decoding options. + pub vrl: VrlDeserializerOptions, +} + +/// VRL-specific decoding options. +#[configurable_component] +#[derive(Debug, Clone, PartialEq, Eq, Derivative)] +#[derivative(Default)] +pub struct VrlDeserializerOptions { + /// The [Vector Remap Language][vrl] (VRL) program to execute for each event. + /// Note that the final contents of the `.` target will be used as the decoding result. + /// Compilation error or use of 'abort' in a program will result in a decoding error. + /// + /// + /// [vrl]: https://vector.dev/docs/reference/vrl + pub source: String, + + /// The name of the timezone to apply to timestamp conversions that do not contain an explicit + /// time zone. The time zone name may be any name in the [TZ database][tz_database], or `local` + /// to indicate system local time. + /// + /// If not set, `local` will be used. + /// + /// [tz_database]: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones + #[serde(default)] + #[configurable(metadata(docs::advanced))] + pub timezone: Option, +} + +impl VrlDeserializerConfig { + /// Build the `VrlDeserializer` from this configuration. + pub fn build(&self) -> vector_common::Result { + let state = TypeState { + local: Default::default(), + external: ExternalEnv::default(), + }; + + match compile_vrl( + &self.vrl.source, + &vrl::stdlib::all(), + &state, + CompileConfig::default(), + ) { + Ok(result) => Ok(VrlDeserializer { + program: result.program, + timezone: self.vrl.timezone.unwrap_or(TimeZone::Local), + }), + Err(diagnostics) => Err(Formatter::new(&self.vrl.source, diagnostics) + .to_string() + .into()), + } + } + + /// Return the type of event build by this deserializer. + pub fn output_type(&self) -> DataType { + DataType::Log + } + + /// The schema produced by the deserializer. + pub fn schema_definition(&self, log_namespace: LogNamespace) -> schema::Definition { + match log_namespace { + LogNamespace::Legacy => { + schema::Definition::empty_legacy_namespace().unknown_fields(Kind::any()) + } + LogNamespace::Vector => { + schema::Definition::new_with_default_metadata(Kind::any(), [log_namespace]) + } + } + } +} + +/// Deserializer that builds `Event`s from a byte frame containing logs compatible with VRL. +#[derive(Debug, Clone)] +pub struct VrlDeserializer { + program: Program, + timezone: TimeZone, +} + +fn parse_bytes(bytes: Bytes, log_namespace: LogNamespace) -> Event { + let bytes_deserializer = BytesDeserializerConfig::new().build(); + let log_event = bytes_deserializer.parse_single(bytes, log_namespace); + Event::from(log_event) +} + +impl Deserializer for VrlDeserializer { + fn parse( + &self, + bytes: Bytes, + log_namespace: LogNamespace, + ) -> vector_common::Result> { + let event = parse_bytes(bytes, log_namespace); + match self.run_vrl(event, log_namespace) { + Ok(events) => Ok(events), + Err(e) => Err(e), + } + } +} + +impl VrlDeserializer { + fn run_vrl( + &self, + event: Event, + log_namespace: LogNamespace, + ) -> vector_common::Result> { + let mut runtime = Runtime::default(); + let mut target = VrlTarget::new(event, self.program.info(), true); + match runtime.resolve(&mut target, &self.program, &self.timezone) { + Ok(_) => match target.into_events(log_namespace) { + TargetEvents::One(event) => Ok(smallvec![event]), + TargetEvents::Logs(events_iter) => Ok(SmallVec::from_iter(events_iter)), + TargetEvents::Traces(_) => Err("trace targets are not supported".into()), + }, + Err(e) => Err(e.to_string().into()), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use chrono::{DateTime, Utc}; + use indoc::indoc; + use vrl::btreemap; + use vrl::path::OwnedTargetPath; + use vrl::value::Value; + + fn make_decoder(source: &str) -> VrlDeserializer { + VrlDeserializerConfig { + vrl: VrlDeserializerOptions { + source: source.to_string(), + timezone: None, + }, + } + .build() + .expect("Failed to build VrlDeserializer") + } + + #[test] + fn test_json_message() { + let source = indoc!( + r#" + %m1 = "metadata" + . = string!(.) + . = parse_json!(.) + "# + ); + + let decoder = make_decoder(source); + + let log_bytes = Bytes::from(r#"{ "message": "Hello VRL" }"#); + let result = decoder.parse(log_bytes, LogNamespace::Vector).unwrap(); + assert_eq!(result.len(), 1); + let event = result.first().unwrap(); + assert_eq!( + *event.as_log().get(&OwnedTargetPath::event_root()).unwrap(), + btreemap! { "message" => "Hello VRL" }.into() + ); + assert_eq!( + *event + .as_log() + .get(&OwnedTargetPath::metadata_root()) + .unwrap(), + btreemap! { "m1" => "metadata" }.into() + ); + } + + #[test] + fn test_ignored_returned_expression() { + let source = indoc!( + r#" + . = { "a" : 1 } + { "b" : 9 } + "# + ); + + let decoder = make_decoder(source); + + let log_bytes = Bytes::from("some bytes"); + let result = decoder.parse(log_bytes, LogNamespace::Vector).unwrap(); + assert_eq!(result.len(), 1); + let event = result.first().unwrap(); + assert_eq!( + *event.as_log().get(&OwnedTargetPath::event_root()).unwrap(), + btreemap! { "a" => 1 }.into() + ); + } + + #[test] + fn test_multiple_events() { + let source = indoc!(". = [0,1,2]"); + let decoder = make_decoder(source); + let log_bytes = Bytes::from("some bytes"); + let result = decoder.parse(log_bytes, LogNamespace::Vector).unwrap(); + assert_eq!(result.len(), 3); + for (i, event) in result.iter().enumerate() { + assert_eq!( + *event.as_log().get(&OwnedTargetPath::event_root()).unwrap(), + i.into() + ); + } + } + + #[test] + fn test_syslog_and_cef_input() { + let source = indoc!( + r#" + if exists(.message) { + . = string!(.message) + } + . = parse_syslog(.) ?? parse_cef(.) ?? null + "# + ); + + let decoder = make_decoder(source); + + // Syslog input + let syslog_bytes = Bytes::from( + "<34>1 2024-02-06T15:04:05.000Z mymachine.example.com su - ID47 - 'su root' failed for user on /dev/pts/8", + ); + let result = decoder.parse(syslog_bytes, LogNamespace::Vector).unwrap(); + assert_eq!(result.len(), 1); + let syslog_event = result.first().unwrap(); + assert_eq!( + *syslog_event + .as_log() + .get(&OwnedTargetPath::event_root()) + .unwrap(), + btreemap! { + "appname" => "su", + "facility" => "auth", + "hostname" => "mymachine.example.com", + "message" => "'su root' failed for user on /dev/pts/8", + "msgid" => "ID47", + "severity" => "crit", + "timestamp" => "2024-02-06T15:04:05Z".parse::>().unwrap(), + "version" => 1 + } + .into() + ); + + // CEF input + let cef_bytes = Bytes::from("CEF:0|Security|Threat Manager|1.0|100|worm successfully stopped|10|src=10.0.0.1 dst=2.1.2.2 spt=1232"); + let result = decoder.parse(cef_bytes, LogNamespace::Vector).unwrap(); + assert_eq!(result.len(), 1); + let cef_event = result.first().unwrap(); + assert_eq!( + *cef_event + .as_log() + .get(&OwnedTargetPath::event_root()) + .unwrap(), + btreemap! { + "cefVersion" =>"0", + "deviceEventClassId" =>"100", + "deviceProduct" =>"Threat Manager", + "deviceVendor" =>"Security", + "deviceVersion" =>"1.0", + "dst" =>"2.1.2.2", + "name" =>"worm successfully stopped", + "severity" =>"10", + "spt" =>"1232", + "src" =>"10.0.0.1" + } + .into() + ); + let random_bytes = Bytes::from("a|- -| x"); + let result = decoder.parse(random_bytes, LogNamespace::Vector).unwrap(); + let random_event = result.first().unwrap(); + assert_eq!(result.len(), 1); + assert_eq!( + *random_event + .as_log() + .get(&OwnedTargetPath::event_root()) + .unwrap(), + Value::Null + ); + } + + #[test] + fn test_invalid_source() { + let error = VrlDeserializerConfig { + vrl: VrlDeserializerOptions { + source: ". ?".to_string(), + timezone: None, + }, + } + .build() + .unwrap_err() + .to_string(); + assert!(error.contains("error[E203]: syntax error")); + } + + #[test] + fn test_abort() { + let decoder = make_decoder("abort"); + let log_bytes = Bytes::from(r#"{ "message": "Hello VRL" }"#); + let error = decoder + .parse(log_bytes, LogNamespace::Vector) + .unwrap_err() + .to_string(); + assert!(error.contains("aborted")); + } +} diff --git a/lib/codecs/src/decoding/framing/length_delimited.rs b/lib/codecs/src/decoding/framing/length_delimited.rs index 8a98d9778fd8f..b72442e1ce4db 100644 --- a/lib/codecs/src/decoding/framing/length_delimited.rs +++ b/lib/codecs/src/decoding/framing/length_delimited.rs @@ -1,57 +1,43 @@ use bytes::{Bytes, BytesMut}; -use serde::{Deserialize, Serialize}; +use derivative::Derivative; use tokio_util::codec::Decoder; +use vector_config::configurable_component; + +use crate::common::length_delimited::LengthDelimitedCoderOptions; use super::BoxedFramingError; /// Config used to build a `LengthDelimitedDecoder`. -#[derive(Debug, Clone, Default, Deserialize, Serialize)] -pub struct LengthDelimitedDecoderConfig; +#[configurable_component] +#[derive(Debug, Clone, Derivative)] +#[derivative(Default)] +pub struct LengthDelimitedDecoderConfig { + /// Options for the length delimited decoder. + #[serde(skip_serializing_if = "vector_core::serde::is_default")] + pub length_delimited: LengthDelimitedCoderOptions, +} impl LengthDelimitedDecoderConfig { /// Build the `LengthDelimitedDecoder` from this configuration. pub fn build(&self) -> LengthDelimitedDecoder { - LengthDelimitedDecoder::new() + LengthDelimitedDecoder::new(&self.length_delimited) } } /// A codec for handling bytes sequences whose length is encoded in a frame head. -/// -/// Currently, this expects a length header in 32-bit MSB by default; options to -/// control the format of the header can be added in the future. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct LengthDelimitedDecoder(tokio_util::codec::LengthDelimitedCodec); impl LengthDelimitedDecoder { /// Creates a new `LengthDelimitedDecoder`. - pub fn new() -> Self { - Self(tokio_util::codec::LengthDelimitedCodec::new()) + pub fn new(config: &LengthDelimitedCoderOptions) -> Self { + Self(config.build_codec()) } } impl Default for LengthDelimitedDecoder { fn default() -> Self { - Self::new() - } -} - -impl Clone for LengthDelimitedDecoder { - fn clone(&self) -> Self { - // This has been fixed with https://github.com/tokio-rs/tokio/pull/4089, - // however we are blocked on upgrading to a new release of `tokio-util` - // that includes the `Clone` implementation: - // https://github.com/vectordotdev/vector/issues/11257. - // - // This is an awful implementation for `Clone` since it resets the - // internal state. However, it works for our use case because we - // generally only clone a codec that has not been mutated yet. - // - // Ideally, `tokio_util::codec::LengthDelimitedCodec` should implement - // `Clone` and it doesn't look like it was a deliberate decision to - // leave out the implementation. All of its internal fields implement - // `Clone`, so adding an implementation for `Clone` could be contributed - // to the upstream repo easily by adding it to the `derive` macro. - Self::new() + Self(tokio_util::codec::LengthDelimitedCodec::new()) } } @@ -81,7 +67,44 @@ mod tests { #[test] fn decode_frame() { let mut input = BytesMut::from("\x00\x00\x00\x03foo"); - let mut decoder = LengthDelimitedDecoder::new(); + let mut decoder = LengthDelimitedDecoder::default(); + + assert_eq!(decoder.decode(&mut input).unwrap().unwrap(), "foo"); + assert_eq!(decoder.decode(&mut input).unwrap(), None); + } + + #[test] + fn decode_frame_2byte_length() { + let mut input = BytesMut::from("\x00\x03foo"); + let mut decoder = LengthDelimitedDecoder::new(&LengthDelimitedCoderOptions { + length_field_length: 2, + ..Default::default() + }); + + assert_eq!(decoder.decode(&mut input).unwrap().unwrap(), "foo"); + assert_eq!(decoder.decode(&mut input).unwrap(), None); + } + + #[test] + fn decode_frame_little_endian() { + let mut input = BytesMut::from("\x03\x00\x00\x00foo"); + let mut decoder = LengthDelimitedDecoder::new(&LengthDelimitedCoderOptions { + length_field_is_big_endian: false, + ..Default::default() + }); + + assert_eq!(decoder.decode(&mut input).unwrap().unwrap(), "foo"); + assert_eq!(decoder.decode(&mut input).unwrap(), None); + } + + #[test] + fn decode_frame_2byte_length_with_offset() { + let mut input = BytesMut::from("\x00\x00\x00\x03foo"); + let mut decoder = LengthDelimitedDecoder::new(&LengthDelimitedCoderOptions { + length_field_length: 2, + length_field_offset: 2, + ..Default::default() + }); assert_eq!(decoder.decode(&mut input).unwrap().unwrap(), "foo"); assert_eq!(decoder.decode(&mut input).unwrap(), None); @@ -90,7 +113,7 @@ mod tests { #[test] fn decode_frame_ignore_unexpected_eof() { let mut input = BytesMut::from("\x00\x00\x00\x03fo"); - let mut decoder = LengthDelimitedDecoder::new(); + let mut decoder = LengthDelimitedDecoder::default(); assert_eq!(decoder.decode(&mut input).unwrap(), None); } @@ -98,7 +121,7 @@ mod tests { #[test] fn decode_frame_ignore_exceeding_bytes_without_header() { let mut input = BytesMut::from("\x00\x00\x00\x03fooo"); - let mut decoder = LengthDelimitedDecoder::new(); + let mut decoder = LengthDelimitedDecoder::default(); assert_eq!(decoder.decode(&mut input).unwrap().unwrap(), "foo"); assert_eq!(decoder.decode(&mut input).unwrap(), None); @@ -107,7 +130,7 @@ mod tests { #[test] fn decode_frame_ignore_missing_header() { let mut input = BytesMut::from("foo"); - let mut decoder = LengthDelimitedDecoder::new(); + let mut decoder = LengthDelimitedDecoder::default(); assert_eq!(decoder.decode(&mut input).unwrap(), None); } @@ -115,7 +138,7 @@ mod tests { #[test] fn decode_frames() { let mut input = BytesMut::from("\x00\x00\x00\x03foo\x00\x00\x00\x03bar"); - let mut decoder = LengthDelimitedDecoder::new(); + let mut decoder = LengthDelimitedDecoder::default(); assert_eq!(decoder.decode(&mut input).unwrap().unwrap(), "foo"); assert_eq!(decoder.decode(&mut input).unwrap().unwrap(), "bar"); @@ -125,7 +148,7 @@ mod tests { #[test] fn decode_eof_frame() { let mut input = BytesMut::from("\x00\x00\x00\x03foo"); - let mut decoder = LengthDelimitedDecoder::new(); + let mut decoder = LengthDelimitedDecoder::default(); assert_eq!(decoder.decode_eof(&mut input).unwrap().unwrap(), "foo"); assert_eq!(decoder.decode_eof(&mut input).unwrap(), None); @@ -134,7 +157,7 @@ mod tests { #[test] fn decode_eof_frame_unexpected_eof() { let mut input = BytesMut::from("\x00\x00\x00\x03fo"); - let mut decoder = LengthDelimitedDecoder::new(); + let mut decoder = LengthDelimitedDecoder::default(); assert!(decoder.decode_eof(&mut input).is_err()); } @@ -142,7 +165,7 @@ mod tests { #[test] fn decode_eof_frame_exceeding_bytes_without_header() { let mut input = BytesMut::from("\x00\x00\x00\x03fooo"); - let mut decoder = LengthDelimitedDecoder::new(); + let mut decoder = LengthDelimitedDecoder::default(); assert_eq!(decoder.decode_eof(&mut input).unwrap().unwrap(), "foo"); assert!(decoder.decode_eof(&mut input).is_err()); @@ -151,7 +174,7 @@ mod tests { #[test] fn decode_eof_frame_missing_header() { let mut input = BytesMut::from("foo"); - let mut decoder = LengthDelimitedDecoder::new(); + let mut decoder = LengthDelimitedDecoder::default(); assert!(decoder.decode_eof(&mut input).is_err()); } @@ -159,7 +182,7 @@ mod tests { #[test] fn decode_eof_frames() { let mut input = BytesMut::from("\x00\x00\x00\x03foo\x00\x00\x00\x03bar"); - let mut decoder = LengthDelimitedDecoder::new(); + let mut decoder = LengthDelimitedDecoder::default(); assert_eq!(decoder.decode_eof(&mut input).unwrap().unwrap(), "foo"); assert_eq!(decoder.decode_eof(&mut input).unwrap().unwrap(), "bar"); diff --git a/lib/codecs/src/decoding/mod.rs b/lib/codecs/src/decoding/mod.rs index ba3fdfde4ec9d..2db0dfcfcb327 100644 --- a/lib/codecs/src/decoding/mod.rs +++ b/lib/codecs/src/decoding/mod.rs @@ -5,6 +5,7 @@ mod error; pub mod format; pub mod framing; +use crate::decoding::format::{VrlDeserializer, VrlDeserializerConfig}; use bytes::{Bytes, BytesMut}; pub use error::StreamDecodingError; pub use format::{ @@ -88,7 +89,7 @@ pub enum FramingConfig { CharacterDelimited(CharacterDelimitedDecoderConfig), /// Byte frames which are prefixed by an unsigned big-endian 32-bit integer indicating the length. - LengthDelimited, + LengthDelimited(LengthDelimitedDecoderConfig), /// Byte frames which are delimited by a newline character. NewlineDelimited(NewlineDelimitedDecoderConfig), @@ -112,8 +113,8 @@ impl From for FramingConfig { } impl From for FramingConfig { - fn from(_: LengthDelimitedDecoderConfig) -> Self { - Self::LengthDelimited + fn from(config: LengthDelimitedDecoderConfig) -> Self { + Self::LengthDelimited(config) } } @@ -135,9 +136,7 @@ impl FramingConfig { match self { FramingConfig::Bytes => Framer::Bytes(BytesDecoderConfig.build()), FramingConfig::CharacterDelimited(config) => Framer::CharacterDelimited(config.build()), - FramingConfig::LengthDelimited => { - Framer::LengthDelimited(LengthDelimitedDecoderConfig.build()) - } + FramingConfig::LengthDelimited(config) => Framer::LengthDelimited(config.build()), FramingConfig::NewlineDelimited(config) => Framer::NewlineDelimited(config.build()), FramingConfig::OctetCounting(config) => Framer::OctetCounting(config.build()), } @@ -260,6 +259,11 @@ pub enum DeserializerConfig { /// Apache Avro-specific encoder options. avro: AvroDeserializerOptions, }, + + /// Decodes the raw bytes as a string and passes them as input to a [VRL][vrl] program. + /// + /// [vrl]: https://vector.dev/docs/reference/vrl + Vrl(VrlDeserializerConfig), } impl From for DeserializerConfig { @@ -319,6 +323,7 @@ impl DeserializerConfig { } DeserializerConfig::NativeJson(config) => Ok(Deserializer::NativeJson(config.build())), DeserializerConfig::Gelf(config) => Ok(Deserializer::Gelf(config.build())), + DeserializerConfig::Vrl(config) => Ok(Deserializer::Vrl(config.build()?)), } } @@ -326,7 +331,7 @@ impl DeserializerConfig { pub fn default_stream_framing(&self) -> FramingConfig { match self { DeserializerConfig::Avro { .. } => FramingConfig::Bytes, - DeserializerConfig::Native => FramingConfig::LengthDelimited, + DeserializerConfig::Native => FramingConfig::LengthDelimited(Default::default()), DeserializerConfig::Bytes | DeserializerConfig::Json(_) | DeserializerConfig::Gelf(_) @@ -336,6 +341,7 @@ impl DeserializerConfig { DeserializerConfig::Protobuf(_) => FramingConfig::Bytes, #[cfg(feature = "syslog")] DeserializerConfig::Syslog(_) => FramingConfig::NewlineDelimited(Default::default()), + DeserializerConfig::Vrl(_) => FramingConfig::Bytes, } } @@ -354,6 +360,7 @@ impl DeserializerConfig { DeserializerConfig::Native => NativeDeserializerConfig.output_type(), DeserializerConfig::NativeJson(config) => config.output_type(), DeserializerConfig::Gelf(config) => config.output_type(), + DeserializerConfig::Vrl(config) => config.output_type(), } } @@ -372,6 +379,7 @@ impl DeserializerConfig { DeserializerConfig::Native => NativeDeserializerConfig.schema_definition(log_namespace), DeserializerConfig::NativeJson(config) => config.schema_definition(log_namespace), DeserializerConfig::Gelf(config) => config.schema_definition(log_namespace), + DeserializerConfig::Vrl(config) => config.schema_definition(log_namespace), } } @@ -402,7 +410,8 @@ impl DeserializerConfig { DeserializerConfig::Json(_) | DeserializerConfig::NativeJson(_) | DeserializerConfig::Bytes - | DeserializerConfig::Gelf(_), + | DeserializerConfig::Gelf(_) + | DeserializerConfig::Vrl(_), _, ) => "text/plain", #[cfg(feature = "syslog")] @@ -433,6 +442,8 @@ pub enum Deserializer { Boxed(BoxedDeserializer), /// Uses a `GelfDeserializer` for deserialization. Gelf(GelfDeserializer), + /// Uses a `VrlDeserializer` for deserialization. + Vrl(VrlDeserializer), } impl format::Deserializer for Deserializer { @@ -452,6 +463,7 @@ impl format::Deserializer for Deserializer { Deserializer::NativeJson(deserializer) => deserializer.parse(bytes, log_namespace), Deserializer::Boxed(deserializer) => deserializer.parse(bytes, log_namespace), Deserializer::Gelf(deserializer) => deserializer.parse(bytes, log_namespace), + Deserializer::Vrl(deserializer) => deserializer.parse(bytes, log_namespace), } } } diff --git a/lib/codecs/src/encoding/format/protobuf.rs b/lib/codecs/src/encoding/format/protobuf.rs index 6c0fe8eae9e5e..1313124d0bcb7 100644 --- a/lib/codecs/src/encoding/format/protobuf.rs +++ b/lib/codecs/src/encoding/format/protobuf.rs @@ -1,10 +1,7 @@ -use crate::common::protobuf::get_message_descriptor; use crate::encoding::BuildError; use bytes::BytesMut; -use chrono::Timelike; use prost::Message; -use prost_reflect::{DynamicMessage, FieldDescriptor, Kind, MapKey, MessageDescriptor}; -use std::collections::HashMap; +use prost_reflect::MessageDescriptor; use std::path::PathBuf; use tokio_util::codec::Encoder; use vector_core::{ @@ -24,8 +21,10 @@ pub struct ProtobufSerializerConfig { impl ProtobufSerializerConfig { /// Build the `ProtobufSerializer` from this configuration. pub fn build(&self) -> Result { - let message_descriptor = - get_message_descriptor(&self.protobuf.desc_file, &self.protobuf.message_type)?; + let message_descriptor = vrl::protobuf::get_message_descriptor( + &self.protobuf.desc_file, + &self.protobuf.message_type, + )?; Ok(ProtobufSerializer { message_descriptor }) } @@ -64,133 +63,6 @@ pub struct ProtobufSerializer { message_descriptor: MessageDescriptor, } -/// Convert a single raw vector `Value` into a protobuf `Value`. -/// -/// Unlike `convert_value`, this ignores any field metadata such as cardinality. -fn convert_value_raw( - value: Value, - kind: &prost_reflect::Kind, -) -> Result { - let kind_str = value.kind_str().to_owned(); - match (value, kind) { - (Value::Boolean(b), Kind::Bool) => Ok(prost_reflect::Value::Bool(b)), - (Value::Bytes(b), Kind::Bytes) => Ok(prost_reflect::Value::Bytes(b)), - (Value::Bytes(b), Kind::String) => Ok(prost_reflect::Value::String( - String::from_utf8_lossy(&b).into_owned(), - )), - (Value::Bytes(b), Kind::Enum(descriptor)) => { - let string = String::from_utf8_lossy(&b).into_owned(); - if let Some(d) = descriptor - .values() - .find(|v| v.name().eq_ignore_ascii_case(&string)) - { - Ok(prost_reflect::Value::EnumNumber(d.number())) - } else { - Err(format!( - "Enum `{}` has no value that matches string '{}'", - descriptor.full_name(), - string - ) - .into()) - } - } - (Value::Float(f), Kind::Double) => Ok(prost_reflect::Value::F64(f.into_inner())), - (Value::Float(f), Kind::Float) => Ok(prost_reflect::Value::F32(f.into_inner() as f32)), - (Value::Integer(i), Kind::Int32) => Ok(prost_reflect::Value::I32(i as i32)), - (Value::Integer(i), Kind::Int64) => Ok(prost_reflect::Value::I64(i)), - (Value::Integer(i), Kind::Sint32) => Ok(prost_reflect::Value::I32(i as i32)), - (Value::Integer(i), Kind::Sint64) => Ok(prost_reflect::Value::I64(i)), - (Value::Integer(i), Kind::Sfixed32) => Ok(prost_reflect::Value::I32(i as i32)), - (Value::Integer(i), Kind::Sfixed64) => Ok(prost_reflect::Value::I64(i)), - (Value::Integer(i), Kind::Uint32) => Ok(prost_reflect::Value::U32(i as u32)), - (Value::Integer(i), Kind::Uint64) => Ok(prost_reflect::Value::U64(i as u64)), - (Value::Integer(i), Kind::Fixed32) => Ok(prost_reflect::Value::U32(i as u32)), - (Value::Integer(i), Kind::Fixed64) => Ok(prost_reflect::Value::U64(i as u64)), - (Value::Integer(i), Kind::Enum(_)) => Ok(prost_reflect::Value::EnumNumber(i as i32)), - (Value::Object(o), Kind::Message(message_descriptor)) => { - if message_descriptor.is_map_entry() { - let value_field = message_descriptor - .get_field_by_name("value") - .ok_or("Internal error with proto map processing")?; - let mut map: HashMap = HashMap::new(); - for (key, val) in o.into_iter() { - match convert_value(&value_field, val) { - Ok(prost_val) => { - map.insert(MapKey::String(key.into()), prost_val); - } - Err(e) => return Err(e), - } - } - Ok(prost_reflect::Value::Map(map)) - } else { - // if it's not a map, it's an actual message - Ok(prost_reflect::Value::Message(encode_message( - message_descriptor, - Value::Object(o), - )?)) - } - } - (Value::Regex(r), Kind::String) => Ok(prost_reflect::Value::String(r.as_str().to_owned())), - (Value::Regex(r), Kind::Bytes) => Ok(prost_reflect::Value::Bytes(r.as_bytes())), - (Value::Timestamp(t), Kind::Int64) => Ok(prost_reflect::Value::I64(t.timestamp_micros())), - (Value::Timestamp(t), Kind::Message(descriptor)) - if descriptor.full_name() == "google.protobuf.Timestamp" => - { - let mut message = DynamicMessage::new(descriptor.clone()); - message.try_set_field_by_name("seconds", prost_reflect::Value::I64(t.timestamp()))?; - message - .try_set_field_by_name("nanos", prost_reflect::Value::I32(t.nanosecond() as i32))?; - Ok(prost_reflect::Value::Message(message)) - } - _ => Err(format!("Cannot encode vector `{kind_str}` into protobuf `{kind:?}`",).into()), - } -} - -/// Convert a vector `Value` into a protobuf `Value`. -fn convert_value( - field_descriptor: &FieldDescriptor, - value: Value, -) -> Result { - if let Value::Array(a) = value { - if field_descriptor.cardinality() == prost_reflect::Cardinality::Repeated { - let repeated: Result, vector_common::Error> = a - .into_iter() - .map(|v| convert_value_raw(v, &field_descriptor.kind())) - .collect(); - Ok(prost_reflect::Value::List(repeated?)) - } else { - Err("Cannot encode vector array into a non-repeated protobuf field".into()) - } - } else { - convert_value_raw(value, &field_descriptor.kind()) - } -} - -/// Convert a vector object (`Value`) into a protobuf message. -/// -/// This function can only operate on `Value::Object`s, -/// since they are the only field-based vector Value -/// and protobuf messages are defined as a collection of fields and values. -fn encode_message( - message_descriptor: &MessageDescriptor, - value: Value, -) -> Result { - let mut message = DynamicMessage::new(message_descriptor.clone()); - if let Value::Object(map) = value { - for field in message_descriptor.fields() { - match map.get(field.name()) { - None | Some(Value::Null) => message.clear_field(&field), - Some(value) => { - message.try_set_field(&field, convert_value(&field, value.clone())?)? - } - } - } - Ok(message) - } else { - Err("ProtobufSerializer only supports serializing objects".into()) - } -} - impl ProtobufSerializer { /// Creates a new `ProtobufSerializer`. pub fn new(message_descriptor: MessageDescriptor) -> Self { @@ -208,9 +80,11 @@ impl Encoder for ProtobufSerializer { fn encode(&mut self, event: Event, buffer: &mut BytesMut) -> Result<(), Self::Error> { let message = match event { - Event::Log(log) => encode_message(&self.message_descriptor, log.into_parts().0), + Event::Log(log) => { + vrl::protobuf::encode_message(&self.message_descriptor, log.into_parts().0) + } Event::Metric(_) => unimplemented!(), - Event::Trace(trace) => encode_message( + Event::Trace(trace) => vrl::protobuf::encode_message( &self.message_descriptor, Value::Object(trace.into_parts().0), ), @@ -218,266 +92,3 @@ impl Encoder for ProtobufSerializer { message.encode(buffer).map_err(Into::into) } } - -#[cfg(test)] -mod tests { - use super::*; - use bytes::Bytes; - use chrono::{DateTime, NaiveDateTime, Utc}; - use ordered_float::NotNan; - use prost_reflect::MapKey; - use std::collections::{BTreeMap, HashMap}; - - macro_rules! mfield { - ($m:expr, $f:expr) => { - $m.get_field_by_name($f).unwrap().into_owned() - }; - } - - fn test_data_dir() -> PathBuf { - PathBuf::from(std::env::var_os("CARGO_MANIFEST_DIR").unwrap()).join("tests/data/protobuf") - } - - fn test_message_descriptor(message_type: &str) -> MessageDescriptor { - let path = PathBuf::from(std::env::var_os("CARGO_MANIFEST_DIR").unwrap()) - .join("tests/data/protobuf/protos/test.desc"); - get_message_descriptor(&path, &format!("test.{message_type}")).unwrap() - } - - #[test] - fn test_config_input_type() { - let config = ProtobufSerializerConfig { - protobuf: ProtobufSerializerOptions { - desc_file: test_data_dir().join("test_protobuf.desc"), - message_type: "test_protobuf.Person".into(), - }, - }; - assert_eq!(config.input_type(), DataType::Log); - } - - #[test] - fn test_encode_integers() { - let message = encode_message( - &test_message_descriptor("Integers"), - Value::Object(BTreeMap::from([ - ("i32".into(), Value::Integer(-1234)), - ("i64".into(), Value::Integer(-9876)), - ("u32".into(), Value::Integer(1234)), - ("u64".into(), Value::Integer(9876)), - ])), - ) - .unwrap(); - assert_eq!(Some(-1234), mfield!(message, "i32").as_i32()); - assert_eq!(Some(-9876), mfield!(message, "i64").as_i64()); - assert_eq!(Some(1234), mfield!(message, "u32").as_u32()); - assert_eq!(Some(9876), mfield!(message, "u64").as_u64()); - } - - #[test] - fn test_encode_floats() { - let message = encode_message( - &test_message_descriptor("Floats"), - Value::Object(BTreeMap::from([ - ("d".into(), Value::Float(NotNan::new(11.0).unwrap())), - ("f".into(), Value::Float(NotNan::new(2.0).unwrap())), - ])), - ) - .unwrap(); - assert_eq!(Some(11.0), mfield!(message, "d").as_f64()); - assert_eq!(Some(2.0), mfield!(message, "f").as_f32()); - } - - #[test] - fn test_encode_bytes() { - let bytes = Bytes::from(vec![0, 1, 2, 3]); - let message = encode_message( - &test_message_descriptor("Bytes"), - Value::Object(BTreeMap::from([ - ("text".into(), Value::Bytes(Bytes::from("vector"))), - ("binary".into(), Value::Bytes(bytes.clone())), - ])), - ) - .unwrap(); - assert_eq!(Some("vector"), mfield!(message, "text").as_str()); - assert_eq!(Some(&bytes), mfield!(message, "binary").as_bytes()); - } - - #[test] - fn test_encode_map() { - let message = encode_message( - &test_message_descriptor("Map"), - Value::Object(BTreeMap::from([ - ( - "names".into(), - Value::Object(BTreeMap::from([ - ("forty-four".into(), Value::Integer(44)), - ("one".into(), Value::Integer(1)), - ])), - ), - ( - "people".into(), - Value::Object(BTreeMap::from([( - "mark".into(), - Value::Object(BTreeMap::from([ - ("nickname".into(), Value::Bytes(Bytes::from("jeff"))), - ("age".into(), Value::Integer(22)), - ])), - )])), - ), - ])), - ) - .unwrap(); - // the simpler string->primitive map - assert_eq!( - Some(&HashMap::from([ - ( - MapKey::String("forty-four".into()), - prost_reflect::Value::I32(44), - ), - (MapKey::String("one".into()), prost_reflect::Value::I32(1),), - ])), - mfield!(message, "names").as_map() - ); - // the not-simpler string->message map - let people = mfield!(message, "people").as_map().unwrap().to_owned(); - assert_eq!(1, people.len()); - assert_eq!( - Some("jeff"), - mfield!( - people[&MapKey::String("mark".into())].as_message().unwrap(), - "nickname" - ) - .as_str() - ); - assert_eq!( - Some(22), - mfield!( - people[&MapKey::String("mark".into())].as_message().unwrap(), - "age" - ) - .as_u32() - ); - } - - #[test] - fn test_encode_enum() { - let message = encode_message( - &test_message_descriptor("Enum"), - Value::Object(BTreeMap::from([ - ("breakfast".into(), Value::Bytes(Bytes::from("tomato"))), - ("dinner".into(), Value::Bytes(Bytes::from("OLIVE"))), - ("lunch".into(), Value::Integer(0)), - ])), - ) - .unwrap(); - assert_eq!(Some(2), mfield!(message, "breakfast").as_enum_number()); - assert_eq!(Some(0), mfield!(message, "lunch").as_enum_number()); - assert_eq!(Some(1), mfield!(message, "dinner").as_enum_number()); - } - - #[test] - fn test_encode_timestamp() { - let message = encode_message( - &test_message_descriptor("Timestamp"), - Value::Object(BTreeMap::from([( - "morning".into(), - Value::Timestamp(DateTime::from_naive_utc_and_offset( - NaiveDateTime::from_timestamp_opt(8675, 309).unwrap(), - Utc, - )), - )])), - ) - .unwrap(); - let timestamp = mfield!(message, "morning").as_message().unwrap().clone(); - assert_eq!(Some(8675), mfield!(timestamp, "seconds").as_i64()); - assert_eq!(Some(309), mfield!(timestamp, "nanos").as_i32()); - } - - #[test] - fn test_encode_repeated_primitive() { - let message = encode_message( - &test_message_descriptor("RepeatedPrimitive"), - Value::Object(BTreeMap::from([( - "numbers".into(), - Value::Array(vec![ - Value::Integer(8), - Value::Integer(6), - Value::Integer(4), - ]), - )])), - ) - .unwrap(); - let list = mfield!(message, "numbers").as_list().unwrap().to_vec(); - assert_eq!(3, list.len()); - assert_eq!(Some(8), list[0].as_i64()); - assert_eq!(Some(6), list[1].as_i64()); - assert_eq!(Some(4), list[2].as_i64()); - } - - #[test] - fn test_encode_repeated_message() { - let message = encode_message( - &test_message_descriptor("RepeatedMessage"), - Value::Object(BTreeMap::from([( - "messages".into(), - Value::Array(vec![ - Value::Object(BTreeMap::from([( - "text".into(), - Value::Bytes(Bytes::from("vector")), - )])), - Value::Object(BTreeMap::from([("index".into(), Value::Integer(4444))])), - Value::Object(BTreeMap::from([ - ("text".into(), Value::Bytes(Bytes::from("protobuf"))), - ("index".into(), Value::Integer(1)), - ])), - ]), - )])), - ) - .unwrap(); - let list = mfield!(message, "messages").as_list().unwrap().to_vec(); - assert_eq!(3, list.len()); - assert_eq!( - Some("vector"), - mfield!(list[0].as_message().unwrap(), "text").as_str() - ); - assert!(!list[0].as_message().unwrap().has_field_by_name("index")); - assert!(!list[1].as_message().unwrap().has_field_by_name("t4ext")); - assert_eq!( - Some(4444), - mfield!(list[1].as_message().unwrap(), "index").as_u32() - ); - assert_eq!( - Some("protobuf"), - mfield!(list[2].as_message().unwrap(), "text").as_str() - ); - assert_eq!( - Some(1), - mfield!(list[2].as_message().unwrap(), "index").as_u32() - ); - } - - fn run_encoding_on_decoding_test_data( - filename: &str, - message_type: &str, - ) -> Result { - let protos_dir = PathBuf::from(std::env::var_os("CARGO_MANIFEST_DIR").unwrap()) - .join("tests/data/protobuf/protos"); - let descriptor_set_path = protos_dir.join(filename); - let message_descriptor = - get_message_descriptor(&descriptor_set_path, message_type).unwrap(); - encode_message( - &message_descriptor, - Value::Object(BTreeMap::from([ - ("name".into(), Value::Bytes(Bytes::from("rope"))), - ("id".into(), Value::Integer(9271)), - ])), - ) - } - - #[test] - fn test_encode_decoding_protobuf_test_data() { - // just check for the side-effect of success - run_encoding_on_decoding_test_data("test_protobuf.desc", "test_protobuf.Person").unwrap(); - run_encoding_on_decoding_test_data("test_protobuf3.desc", "test_protobuf3.Person").unwrap(); - } -} diff --git a/lib/codecs/src/encoding/framing/length_delimited.rs b/lib/codecs/src/encoding/framing/length_delimited.rs index f25cdba4edf7d..1e450073b9496 100644 --- a/lib/codecs/src/encoding/framing/length_delimited.rs +++ b/lib/codecs/src/encoding/framing/length_delimited.rs @@ -1,59 +1,43 @@ use bytes::BytesMut; -use serde::{Deserialize, Serialize}; +use derivative::Derivative; use tokio_util::codec::{Encoder, LengthDelimitedCodec}; +use vector_config::configurable_component; + +use crate::common::length_delimited::LengthDelimitedCoderOptions; use super::BoxedFramingError; /// Config used to build a `LengthDelimitedEncoder`. -#[derive(Debug, Clone, Deserialize, Serialize)] -pub struct LengthDelimitedEncoderConfig; +#[configurable_component] +#[derive(Debug, Clone, Derivative, Eq, PartialEq)] +#[derivative(Default)] +pub struct LengthDelimitedEncoderConfig { + /// Options for the length delimited decoder. + #[serde(skip_serializing_if = "vector_core::serde::is_default")] + pub length_delimited: LengthDelimitedCoderOptions, +} impl LengthDelimitedEncoderConfig { - /// Creates a `LengthDelimitedEncoderConfig`. - pub const fn new() -> Self { - Self - } - /// Build the `LengthDelimitedEncoder` from this configuration. pub fn build(&self) -> LengthDelimitedEncoder { - LengthDelimitedEncoder::new() + LengthDelimitedEncoder::new(&self.length_delimited) } } /// An encoder for handling bytes that are delimited by a length header. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct LengthDelimitedEncoder(LengthDelimitedCodec); impl LengthDelimitedEncoder { - /// Creates a `LengthDelimitedEncoder`. - pub fn new() -> Self { - Self(LengthDelimitedCodec::new()) + /// Creates a new `LengthDelimitedEncoder`. + pub fn new(config: &LengthDelimitedCoderOptions) -> Self { + Self(config.build_codec()) } } impl Default for LengthDelimitedEncoder { fn default() -> Self { - Self::new() - } -} - -impl Clone for LengthDelimitedEncoder { - fn clone(&self) -> Self { - // This has been fixed with https://github.com/tokio-rs/tokio/pull/4089, - // however we are blocked on upgrading to a new release of `tokio-util` - // that includes the `Clone` implementation: - // https://github.com/vectordotdev/vector/issues/11257. - // - // This is an awful implementation for `Clone` since it resets the - // internal state. However, it works for our use case because we - // generally only clone a codec that has not been mutated yet. - // - // Ideally, `tokio_util::codec::LengthDelimitedCodec` should implement - // `Clone` and it doesn't look like it was a deliberate decision to - // leave out the implementation. All of its internal fields implement - // `Clone`, so adding an implementation for `Clone` could be contributed - // to the upstream repo easily by adding it to the `derive` macro. - Self::new() + Self(LengthDelimitedCodec::new()) } } @@ -73,11 +57,24 @@ mod tests { #[test] fn encode() { - let mut codec = LengthDelimitedEncoder::new(); + let mut codec = LengthDelimitedEncoder::default(); let mut buffer = BytesMut::from("abc"); codec.encode((), &mut buffer).unwrap(); assert_eq!(&buffer[..], b"\0\0\0\x03abc"); } + + #[test] + fn encode_2byte_length() { + let mut codec = LengthDelimitedEncoder::new(&LengthDelimitedCoderOptions { + length_field_length: 2, + ..Default::default() + }); + + let mut buffer = BytesMut::from("abc"); + codec.encode((), &mut buffer).unwrap(); + + assert_eq!(&buffer[..], b"\0\x03abc"); + } } diff --git a/lib/codecs/src/encoding/mod.rs b/lib/codecs/src/encoding/mod.rs index 2f28c27ec7bf8..0d766f73e4d17 100644 --- a/lib/codecs/src/encoding/mod.rs +++ b/lib/codecs/src/encoding/mod.rs @@ -67,7 +67,7 @@ pub enum FramingConfig { /// Event data is prefixed with its length in bytes. /// /// The prefix is a 32-bit unsigned integer, little endian. - LengthDelimited, + LengthDelimited(LengthDelimitedEncoderConfig), /// Event data is delimited by a newline (LF) character. NewlineDelimited, @@ -86,8 +86,8 @@ impl From for FramingConfig { } impl From for FramingConfig { - fn from(_: LengthDelimitedEncoderConfig) -> Self { - Self::LengthDelimited + fn from(config: LengthDelimitedEncoderConfig) -> Self { + Self::LengthDelimited(config) } } @@ -103,9 +103,7 @@ impl FramingConfig { match self { FramingConfig::Bytes => Framer::Bytes(BytesEncoderConfig.build()), FramingConfig::CharacterDelimited(config) => Framer::CharacterDelimited(config.build()), - FramingConfig::LengthDelimited => { - Framer::LengthDelimited(LengthDelimitedEncoderConfig.build()) - } + FramingConfig::LengthDelimited(config) => Framer::LengthDelimited(config.build()), FramingConfig::NewlineDelimited => { Framer::NewlineDelimited(NewlineDelimitedEncoderConfig.build()) } @@ -360,7 +358,9 @@ impl SerializerConfig { // [1]: https://avro.apache.org/docs/1.11.1/specification/_print/#message-framing SerializerConfig::Avro { .. } | SerializerConfig::Native - | SerializerConfig::Protobuf(_) => FramingConfig::LengthDelimited, + | SerializerConfig::Protobuf(_) => { + FramingConfig::LengthDelimited(LengthDelimitedEncoderConfig::default()) + } SerializerConfig::Csv(_) | SerializerConfig::Gelf | SerializerConfig::Json(_) diff --git a/lib/codecs/tests/data/protobuf/protos/test.desc b/lib/codecs/tests/data/protobuf/protos/test.desc deleted file mode 100644 index f12bfa7d889b8..0000000000000 Binary files a/lib/codecs/tests/data/protobuf/protos/test.desc and /dev/null differ diff --git a/lib/codecs/tests/data/protobuf/protos/test.proto b/lib/codecs/tests/data/protobuf/protos/test.proto deleted file mode 100644 index 8e3275b7e5394..0000000000000 --- a/lib/codecs/tests/data/protobuf/protos/test.proto +++ /dev/null @@ -1,61 +0,0 @@ -// Remember to recompile `test.desc` when you update this file: -// protoc -I . -o test.desc test.proto google/protobuf/timestamp.proto - -syntax = "proto3"; - -package test; - -import "google/protobuf/timestamp.proto"; - -message Integers { - int32 i32 = 1; - int64 i64 = 2; - uint32 u32 = 3; - uint64 u64 = 4; -} - -message Floats { - double d = 1; - float f = 2; -} - -message Bytes { - string text = 1; - bytes binary = 2; -} - -message Map { - message Person { - string nickname = 1; - uint32 age = 2; - }; - map names = 1; - map people = 2; -} - -message Enum { - enum Fruit { - APPLE = 0; - OLIVE = 1; - TOMATO = 2; - } - Fruit breakfast = 1; - Fruit lunch = 2; - Fruit dinner = 3; -} - -message Timestamp { - google.protobuf.Timestamp morning = 1; -} - -message RepeatedPrimitive { - repeated int64 numbers = 1; -} - -message RepeatedMessage { - message EmbeddedMessage { - optional string text = 1; - optional uint32 index = 2; - } - repeated EmbeddedMessage messages = 1; -} diff --git a/lib/dnsmsg-parser/src/dns_message.rs b/lib/dnsmsg-parser/src/dns_message.rs index 58f949cb3ed8b..d22bff280631d 100644 --- a/lib/dnsmsg-parser/src/dns_message.rs +++ b/lib/dnsmsg-parser/src/dns_message.rs @@ -1,10 +1,11 @@ use hickory_proto::op::ResponseCode; +use crate::ede::EDE; + pub(super) const RTYPE_MB: u16 = 7; pub(super) const RTYPE_MG: u16 = 8; pub(super) const RTYPE_MR: u16 = 9; pub(super) const RTYPE_WKS: u16 = 11; -pub(super) const RTYPE_HINFO: u16 = 13; pub(super) const RTYPE_MINFO: u16 = 14; pub(super) const RTYPE_RP: u16 = 17; pub(super) const RTYPE_AFSDB: u16 = 18; @@ -81,6 +82,7 @@ pub struct OptPseudoSection { pub version: u8, pub dnssec_ok: bool, pub udp_max_payload_size: u16, + pub ede: Vec, pub options: Vec, } diff --git a/lib/dnsmsg-parser/src/dns_message_parser.rs b/lib/dnsmsg-parser/src/dns_message_parser.rs index 20907de4b286c..64b67be17621d 100644 --- a/lib/dnsmsg-parser/src/dns_message_parser.rs +++ b/lib/dnsmsg-parser/src/dns_message_parser.rs @@ -1,16 +1,19 @@ -use std::fmt::Write as _; use std::str::Utf8Error; +use std::{fmt::Write as _, ops::Deref}; use data_encoding::{BASE32HEX_NOPAD, BASE64, HEXUPPER}; use hickory_proto::{ error::ProtoError, - op::{message::Message as TrustDnsMessage, Edns, Query}, + op::{message::Message as TrustDnsMessage, Query}, rr::{ - dnssec::{rdata::DNSSECRData, Algorithm, SupportedAlgorithms}, + dnssec::{ + rdata::{DNSSECRData, DNSKEY, DS}, + Algorithm, SupportedAlgorithms, + }, rdata::{ caa::Value, opt::{EdnsCode, EdnsOption}, - A, AAAA, NULL, + A, AAAA, NULL, OPT, SVCB, }, record_data::RData, resource::Record, @@ -20,6 +23,8 @@ use hickory_proto::{ }; use thiserror::Error; +use crate::ede::{EDE, EDE_OPTION_CODE}; + use super::dns_message::{ self, DnsQueryMessage, DnsRecord, DnsUpdateMessage, EdnsOptionEntry, OptPseudoSection, QueryHeader, QueryQuestion, UpdateHeader, ZoneInfo, @@ -39,6 +44,27 @@ pub enum DnsMessageParserError { /// Result alias for parsing pub type DnsParserResult = Result; +/// Options for DNS message parser +#[derive(Debug, Default, Clone)] +pub struct DnsParserOptions { + /// Whether hostnames in RData should be lowercased, for consistency + pub lowercase_hostnames: bool, +} + +trait DnsParserOptionsTarget { + fn to_string_with_options(&self, options: &DnsParserOptions) -> String; +} + +impl DnsParserOptionsTarget for Name { + fn to_string_with_options(&self, options: &DnsParserOptions) -> String { + if options.lowercase_hostnames { + self.to_lowercase().to_string() + } else { + self.to_string() + } + } +} + /// A DNS message parser #[derive(Debug)] pub struct DnsMessageParser { @@ -50,6 +76,7 @@ pub struct DnsMessageParser { // contain compressed domain name, and store it here as a member field; for // subsequent invocations of the same call, we simply reuse this copy. raw_message_for_rdata_parsing: Option>, + options: DnsParserOptions, } impl DnsMessageParser { @@ -57,6 +84,15 @@ impl DnsMessageParser { DnsMessageParser { raw_message, raw_message_for_rdata_parsing: None, + options: DnsParserOptions::default(), + } + } + + pub fn with_options(raw_message: Vec, options: DnsParserOptions) -> Self { + DnsMessageParser { + raw_message, + raw_message_for_rdata_parsing: None, + options, } } @@ -113,7 +149,7 @@ impl DnsMessageParser { fn parse_dns_query_question(&self, question: &Query) -> QueryQuestion { QueryQuestion { - name: question.name().to_string(), + name: question.name().to_string_with_options(&self.options), class: question.query_class().to_string(), record_type: format_record_type(question.query_type()), record_type_id: u16::from(question.query_type()), @@ -148,17 +184,17 @@ impl DnsMessageParser { .collect::, _>>() } - fn parse_dns_record(&mut self, record: &Record) -> DnsParserResult { + pub(crate) fn parse_dns_record(&mut self, record: &Record) -> DnsParserResult { let record_data = match record.data() { Some(RData::Unknown { code, rdata }) => { self.format_unknown_rdata((*code).into(), rdata) } - Some(rdata) => format_rdata(rdata), + Some(rdata) => self.format_rdata(rdata), None => Ok((Some(String::from("")), None)), // NULL record }?; Ok(DnsRecord { - name: record.name().to_string(), + name: record.name().to_string_with_options(&self.options), class: record.dns_class().to_string(), record_type: format_record_type(record.record_type()), record_type_id: u16::from(record.record_type()), @@ -239,7 +275,7 @@ impl DnsMessageParser { let mut dec = BinDecoder::new(&address_vec); parse_ipv6_address(&mut dec)? }; - let domain_name = parse_domain_name(&mut decoder)?; + let domain_name = Self::parse_domain_name(&mut decoder, &self.options)?; Ok(( Some(format!("{} {} {}", prefix, ipv6_address, domain_name)), None, @@ -345,57 +381,49 @@ impl DnsMessageParser { ) -> DnsParserResult<(Option, Option>)> { match code { dns_message::RTYPE_MB => { + let options = self.options.clone(); let mut decoder = self.get_rdata_decoder_with_raw_message(rdata.anything()); - let madname = parse_domain_name(&mut decoder)?; - Ok((Some(madname.to_string()), None)) + let madname = Self::parse_domain_name(&mut decoder, &options)?; + Ok((Some(madname), None)) } dns_message::RTYPE_MG => { + let options = self.options.clone(); let mut decoder = self.get_rdata_decoder_with_raw_message(rdata.anything()); - let mgname = parse_domain_name(&mut decoder)?; - Ok((Some(mgname.to_string()), None)) + let mgname = Self::parse_domain_name(&mut decoder, &options)?; + Ok((Some(mgname), None)) } dns_message::RTYPE_MR => { + let options = self.options.clone(); let mut decoder = self.get_rdata_decoder_with_raw_message(rdata.anything()); - let newname = parse_domain_name(&mut decoder)?; - Ok((Some(newname.to_string()), None)) + let newname = Self::parse_domain_name(&mut decoder, &options)?; + Ok((Some(newname), None)) } dns_message::RTYPE_WKS => self.parse_wks_rdata(rdata.anything()), - dns_message::RTYPE_HINFO => { - let mut decoder = BinDecoder::new(rdata.anything()); - let cpu = parse_character_string(&mut decoder)?; - let os = parse_character_string(&mut decoder)?; - Ok(( - Some(format!( - "\"{}\" \"{}\"", - escape_string_for_text_representation(cpu), - escape_string_for_text_representation(os) - )), - None, - )) - } - dns_message::RTYPE_MINFO => { + let options = self.options.clone(); let mut decoder = self.get_rdata_decoder_with_raw_message(rdata.anything()); - let rmailbx = parse_domain_name(&mut decoder)?; - let emailbx = parse_domain_name(&mut decoder)?; + let rmailbx = Self::parse_domain_name(&mut decoder, &options)?; + let emailbx = Self::parse_domain_name(&mut decoder, &options)?; Ok((Some(format!("{} {}", rmailbx, emailbx)), None)) } dns_message::RTYPE_RP => { + let options = self.options.clone(); let mut decoder = self.get_rdata_decoder_with_raw_message(rdata.anything()); - let mbox = parse_domain_name(&mut decoder)?; - let txt = parse_domain_name(&mut decoder)?; + let mbox = Self::parse_domain_name(&mut decoder, &options)?; + let txt = Self::parse_domain_name(&mut decoder, &options)?; Ok((Some(format!("{} {}", mbox, txt)), None)) } dns_message::RTYPE_AFSDB => { + let options = self.options.clone(); let mut decoder = self.get_rdata_decoder_with_raw_message(rdata.anything()); let subtype = parse_u16(&mut decoder)?; - let hostname = parse_domain_name(&mut decoder)?; + let hostname = Self::parse_domain_name(&mut decoder, &options)?; Ok((Some(format!("{} {}", subtype, hostname)), None)) } @@ -436,9 +464,10 @@ impl DnsMessageParser { } dns_message::RTYPE_RT => { + let options = self.options.clone(); let mut decoder = self.get_rdata_decoder_with_raw_message(rdata.anything()); let preference = parse_u16(&mut decoder)?; - let intermediate_host = parse_domain_name(&mut decoder)?; + let intermediate_host = Self::parse_domain_name(&mut decoder, &options)?; Ok((Some(format!("{} {}", preference, intermediate_host)), None)) } @@ -451,19 +480,21 @@ impl DnsMessageParser { } dns_message::RTYPE_PX => { + let options = self.options.clone(); let mut decoder = self.get_rdata_decoder_with_raw_message(rdata.anything()); let preference = parse_u16(&mut decoder)?; - let map822 = parse_domain_name(&mut decoder)?; - let mapx400 = parse_domain_name(&mut decoder)?; + let map822 = Self::parse_domain_name(&mut decoder, &options)?; + let mapx400 = Self::parse_domain_name(&mut decoder, &options)?; Ok((Some(format!("{} {} {}", preference, map822, mapx400)), None)) } dns_message::RTYPE_LOC => self.parse_loc_rdata(rdata.anything()), dns_message::RTYPE_KX => { + let options = self.options.clone(); let mut decoder = self.get_rdata_decoder_with_raw_message(rdata.anything()); let preference = parse_u16(&mut decoder)?; - let exchanger = parse_domain_name(&mut decoder)?; + let exchanger = Self::parse_domain_name(&mut decoder, &options)?; Ok((Some(format!("{} {}", preference, exchanger)), None)) } @@ -522,258 +553,305 @@ impl DnsMessageParser { _ => Ok((None, Some(rdata.anything().to_vec()))), } } -} -fn format_rdata(rdata: &RData) -> DnsParserResult<(Option, Option>)> { - match rdata { - RData::A(ip) => Ok((Some(ip.to_string()), None)), - RData::AAAA(ip) => Ok((Some(ip.to_string()), None)), - RData::ANAME(name) => Ok((Some(name.to_string()), None)), - RData::CNAME(name) => Ok((Some(name.to_string()), None)), - RData::MX(mx) => { - let srv_rdata = format!("{} {}", mx.preference(), mx.exchange(),); - Ok((Some(srv_rdata), None)) - } - RData::NULL(null) => Ok((Some(BASE64.encode(null.anything())), None)), - RData::NS(ns) => Ok((Some(ns.to_string()), None)), - RData::OPENPGPKEY(key) => { - if let Ok(key_string) = String::from_utf8(Vec::from(key.public_key())) { - Ok((Some(format!("({})", &key_string)), None)) - } else { - Err(DnsMessageParserError::SimpleError { - cause: String::from("Invalid OPENPGPKEY rdata"), - }) + fn format_rdata(&self, rdata: &RData) -> DnsParserResult<(Option, Option>)> { + match rdata { + RData::A(ip) => Ok((Some(ip.to_string()), None)), + RData::AAAA(ip) => Ok((Some(ip.to_string()), None)), + RData::ANAME(name) => Ok((Some(name.to_string_with_options(&self.options)), None)), + RData::CNAME(name) => Ok((Some(name.to_string_with_options(&self.options)), None)), + RData::CSYNC(csync) => { + // Using CSYNC's formatter since not all data is exposed otherwise + let csync_rdata = format!("{}", csync); + Ok((Some(csync_rdata), None)) } - } - RData::PTR(ptr) => Ok((Some(ptr.to_string()), None)), - RData::SOA(soa) => Ok(( - Some(format!( - "{} {} {} {} {} {} {}", - soa.mname(), - soa.rname(), - soa.serial(), - soa.refresh(), - soa.retry(), - soa.expire(), - soa.minimum() + RData::MX(mx) => { + let srv_rdata = format!( + "{} {}", + mx.preference(), + mx.exchange().to_string_with_options(&self.options), + ); + Ok((Some(srv_rdata), None)) + } + RData::NULL(null) => Ok((Some(BASE64.encode(null.anything())), None)), + RData::NS(ns) => Ok((Some(ns.to_string_with_options(&self.options)), None)), + RData::OPENPGPKEY(key) => { + if let Ok(key_string) = String::from_utf8(Vec::from(key.public_key())) { + Ok((Some(format!("({})", &key_string)), None)) + } else { + Err(DnsMessageParserError::SimpleError { + cause: String::from("Invalid OPENPGPKEY rdata"), + }) + } + } + RData::PTR(ptr) => Ok((Some(ptr.to_string_with_options(&self.options)), None)), + RData::SOA(soa) => Ok(( + Some(format!( + "{} {} {} {} {} {} {}", + soa.mname().to_string_with_options(&self.options), + soa.rname().to_string_with_options(&self.options), + soa.serial(), + soa.refresh(), + soa.retry(), + soa.expire(), + soa.minimum() + )), + None, )), - None, - )), - RData::SRV(srv) => { - let srv_rdata = format!( - "{} {} {} {}", - srv.priority(), - srv.weight(), - srv.port(), - srv.target() - ); - Ok((Some(srv_rdata), None)) - } - RData::TXT(txt) => { - let txt_rdata = txt - .txt_data() - .iter() - .map(|value| { - format!( - "\"{}\"", - escape_string_for_text_representation( - String::from_utf8_lossy(value).to_string() + RData::SRV(srv) => { + let srv_rdata = format!( + "{} {} {} {}", + srv.priority(), + srv.weight(), + srv.port(), + srv.target().to_string_with_options(&self.options) + ); + Ok((Some(srv_rdata), None)) + } + RData::TXT(txt) => { + let txt_rdata = txt + .txt_data() + .iter() + .map(|value| { + format!( + "\"{}\"", + escape_string_for_text_representation( + String::from_utf8_lossy(value).to_string() + ) ) - ) - }) - .collect::>() - .join(" "); - Ok((Some(txt_rdata), None)) - } - RData::CAA(caa) => { - let caa_rdata = format!( - "{} {} \"{}\"", - caa.issuer_critical() as u8, - caa.tag().as_str(), - match caa.value() { - Value::Url(url) => { - url.as_str().to_string() - } - Value::Issuer(option_name, vec_keyvalue) => { - let mut final_issuer = String::new(); - if let Some(name) = option_name { - final_issuer.push_str(&name.to_utf8()); - for keyvalue in vec_keyvalue.iter() { - final_issuer.push_str("; "); - final_issuer.push_str(keyvalue.key()); - final_issuer.push('='); - final_issuer.push_str(keyvalue.value()); + }) + .collect::>() + .join(" "); + Ok((Some(txt_rdata), None)) + } + RData::CAA(caa) => { + let caa_rdata = format!( + "{} {} \"{}\"", + caa.issuer_critical() as u8, + caa.tag().as_str(), + match caa.value() { + Value::Url(url) => { + url.as_str().to_string() + } + Value::Issuer(option_name, vec_keyvalue) => { + let mut final_issuer = String::new(); + if let Some(name) = option_name { + final_issuer.push_str(&name.to_string_with_options(&self.options)); + for keyvalue in vec_keyvalue.iter() { + final_issuer.push_str("; "); + final_issuer.push_str(keyvalue.key()); + final_issuer.push('='); + final_issuer.push_str(keyvalue.value()); + } } + final_issuer.trim_end().to_string() } - final_issuer.trim_end().to_string() + Value::Unknown(unknown) => std::str::from_utf8(unknown) + .map_err(|source| DnsMessageParserError::Utf8ParsingError { source })? + .to_string(), } - Value::Unknown(unknown) => std::str::from_utf8(unknown) - .map_err(|source| DnsMessageParserError::Utf8ParsingError { source })? - .to_string(), - } - ); - Ok((Some(caa_rdata), None)) - } + ); + Ok((Some(caa_rdata), None)) + } - RData::TLSA(tlsa) => { - let tlsa_rdata = format!( - "{} {} {} {}", - u8::from(tlsa.cert_usage()), - u8::from(tlsa.selector()), - u8::from(tlsa.matching()), - HEXUPPER.encode(tlsa.cert_data()) - ); - Ok((Some(tlsa_rdata), None)) - } - RData::SSHFP(sshfp) => { - let sshfp_rdata = format!( - "{} {} {}", - Into::::into(sshfp.algorithm()), - Into::::into(sshfp.fingerprint_type()), - HEXUPPER.encode(sshfp.fingerprint()) - ); - Ok((Some(sshfp_rdata), None)) - } - RData::NAPTR(naptr) => { - let naptr_rdata = format!( - r#"{} {} "{}" "{}" "{}" {}"#, - naptr.order(), - naptr.preference(), - escape_string_for_text_representation( - std::str::from_utf8(naptr.flags()) - .map_err(|source| DnsMessageParserError::Utf8ParsingError { source })? - .to_string() - ), - escape_string_for_text_representation( - std::str::from_utf8(naptr.services()) - .map_err(|source| DnsMessageParserError::Utf8ParsingError { source })? - .to_string() - ), - escape_string_for_text_representation( - std::str::from_utf8(naptr.regexp()) - .map_err(|source| DnsMessageParserError::Utf8ParsingError { source })? - .to_string() - ), - naptr.replacement().to_utf8() - ); - Ok((Some(naptr_rdata), None)) - } - RData::DNSSEC(dnssec) => match dnssec { - // See https://tools.ietf.org/html/rfc4034 for details - // on dnssec related rdata formats - DNSSECRData::DS(ds) => { - let ds_rdata = format!( + RData::TLSA(tlsa) => { + let tlsa_rdata = format!( "{} {} {} {}", - ds.key_tag(), - u8::from(ds.algorithm()), - u8::from(ds.digest_type()), - HEXUPPER.encode(ds.digest()) + u8::from(tlsa.cert_usage()), + u8::from(tlsa.selector()), + u8::from(tlsa.matching()), + HEXUPPER.encode(tlsa.cert_data()) ); - Ok((Some(ds_rdata), None)) + Ok((Some(tlsa_rdata), None)) } - DNSSECRData::DNSKEY(dnskey) => { - let dnskey_rdata = format!( - "{} 3 {} {}", - { - if dnskey.revoke() { - 0b0000_0000_0000_0000 - } else if dnskey.zone_key() && dnskey.secure_entry_point() { - 0b0000_0001_0000_0001 - } else { - 0b0000_0001_0000_0000 - } - }, - u8::from(dnskey.algorithm()), - BASE64.encode(dnskey.public_key()) + RData::SSHFP(sshfp) => { + let sshfp_rdata = format!( + "{} {} {}", + Into::::into(sshfp.algorithm()), + Into::::into(sshfp.fingerprint_type()), + HEXUPPER.encode(sshfp.fingerprint()) ); - Ok((Some(dnskey_rdata), None)) + Ok((Some(sshfp_rdata), None)) } - DNSSECRData::NSEC(nsec) => { - let nsec_rdata = format!( - "{} {}", - nsec.next_domain_name(), - nsec.type_bit_maps() - .iter() - .flat_map(|e| format_record_type(*e)) - .collect::>() - .join(" ") + RData::NAPTR(naptr) => { + let naptr_rdata = format!( + r#"{} {} "{}" "{}" "{}" {}"#, + naptr.order(), + naptr.preference(), + escape_string_for_text_representation( + std::str::from_utf8(naptr.flags()) + .map_err(|source| DnsMessageParserError::Utf8ParsingError { source })? + .to_string() + ), + escape_string_for_text_representation( + std::str::from_utf8(naptr.services()) + .map_err(|source| DnsMessageParserError::Utf8ParsingError { source })? + .to_string() + ), + escape_string_for_text_representation( + std::str::from_utf8(naptr.regexp()) + .map_err(|source| DnsMessageParserError::Utf8ParsingError { source })? + .to_string() + ), + naptr.replacement().to_string_with_options(&self.options) ); - Ok((Some(nsec_rdata), None)) + Ok((Some(naptr_rdata), None)) } - DNSSECRData::NSEC3(nsec3) => { - let nsec3_rdata = format!( - "{} {} {} {} {} {}", - u8::from(nsec3.hash_algorithm()), - nsec3.opt_out() as u8, - nsec3.iterations(), - HEXUPPER.encode(nsec3.salt()), - BASE32HEX_NOPAD.encode(nsec3.next_hashed_owner_name()), - nsec3 - .type_bit_maps() - .iter() - .flat_map(|e| format_record_type(*e)) - .collect::>() - .join(" ") + RData::HINFO(hinfo) => { + let hinfo_data = format!( + r#""{}" "{}""#, + std::str::from_utf8(hinfo.cpu()) + .map_err(|source| DnsMessageParserError::Utf8ParsingError { source })?, + std::str::from_utf8(hinfo.os()) + .map_err(|source| DnsMessageParserError::Utf8ParsingError { source })?, ); - Ok((Some(nsec3_rdata), None)) + Ok((Some(hinfo_data), None)) } - DNSSECRData::NSEC3PARAM(nsec3param) => { - let nsec3param_rdata = format!( - "{} {} {} {}", - u8::from(nsec3param.hash_algorithm()), - nsec3param.opt_out() as u8, - nsec3param.iterations(), - HEXUPPER.encode(nsec3param.salt()), - ); - Ok((Some(nsec3param_rdata), None)) + RData::HTTPS(https) => { + let https_data = format_svcb_record(&https.0, &self.options); + Ok((Some(https_data), None)) } - - DNSSECRData::SIG(sig) => { - let sig_rdata = format!( - "{} {} {} {} {} {} {} {} {}", - match format_record_type(sig.type_covered()) { - Some(record_type) => record_type, - None => String::from("Unknown record type"), - }, - u8::from(sig.algorithm()), - sig.num_labels(), - sig.original_ttl(), - sig.sig_expiration(), // currently in epoch convert to human readable ? - sig.sig_inception(), // currently in epoch convert to human readable ? - sig.key_tag(), - sig.signer_name(), - BASE64.encode(sig.sig()) - ); - Ok((Some(sig_rdata), None)) + RData::SVCB(svcb) => { + let svcb_data = format_svcb_record(svcb, &self.options); + Ok((Some(svcb_data), None)) } - // RSIG is a derivation of SIG but choosing to keep this duplicate code in lieu of the alternative - // which is to allocate to the heap with Box in order to deref. - DNSSECRData::RRSIG(sig) => { - let sig_rdata = format!( - "{} {} {} {} {} {} {} {} {}", - match format_record_type(sig.type_covered()) { - Some(record_type) => record_type, - None => String::from("Unknown record type"), - }, - u8::from(sig.algorithm()), - sig.num_labels(), - sig.original_ttl(), - sig.sig_expiration(), // currently in epoch convert to human readable ? - sig.sig_inception(), // currently in epoch convert to human readable ? - sig.key_tag(), - sig.signer_name(), - BASE64.encode(sig.sig()) - ); - Ok((Some(sig_rdata), None)) + RData::OPT(opt) => { + let parsed = parse_edns_options(opt)?; + let ede_data = parsed.0.iter().map(|entry| EdnsOptionEntry { + opt_code: 15u16, + opt_name: "EDE".to_string(), + opt_data: format!( + "EDE={}({}){}", + entry.info_code(), + entry.purpose().unwrap_or(""), + entry.extra_text().unwrap_or("".to_string()) + ), + }); + let opt_data = parsed + .1 + .into_iter() + .chain(ede_data) + .map(|entry| format!("{}={}", entry.opt_name, entry.opt_data)) + .collect::>() + .join(","); + Ok((Some(opt_data), None)) } - DNSSECRData::Unknown { code: _, rdata } => Ok((None, Some(rdata.anything().to_vec()))), + RData::DNSSEC(dnssec) => match dnssec { + // See https://tools.ietf.org/html/rfc4034 for details + // on dnssec related rdata formats + DNSSECRData::CDS(cds) => Ok((Some(format_ds_record(cds.deref())), None)), + DNSSECRData::DS(ds) => Ok((Some(format_ds_record(ds)), None)), + DNSSECRData::CDNSKEY(cdnskey) => { + Ok((Some(format_dnskey_record(cdnskey.deref())), None)) + } + DNSSECRData::DNSKEY(dnskey) => Ok((Some(format_dnskey_record(dnskey)), None)), + DNSSECRData::NSEC(nsec) => { + let nsec_rdata = format!( + "{} {}", + nsec.next_domain_name() + .to_string_with_options(&self.options), + nsec.type_bit_maps() + .iter() + .flat_map(|e| format_record_type(*e)) + .collect::>() + .join(" ") + ); + Ok((Some(nsec_rdata), None)) + } + DNSSECRData::NSEC3(nsec3) => { + let nsec3_rdata = format!( + "{} {} {} {} {} {}", + u8::from(nsec3.hash_algorithm()), + nsec3.opt_out() as u8, + nsec3.iterations(), + HEXUPPER.encode(nsec3.salt()), + BASE32HEX_NOPAD.encode(nsec3.next_hashed_owner_name()), + nsec3 + .type_bit_maps() + .iter() + .flat_map(|e| format_record_type(*e)) + .collect::>() + .join(" ") + ); + Ok((Some(nsec3_rdata), None)) + } + DNSSECRData::NSEC3PARAM(nsec3param) => { + let nsec3param_rdata = format!( + "{} {} {} {}", + u8::from(nsec3param.hash_algorithm()), + nsec3param.opt_out() as u8, + nsec3param.iterations(), + HEXUPPER.encode(nsec3param.salt()), + ); + Ok((Some(nsec3param_rdata), None)) + } + + DNSSECRData::SIG(sig) => { + let sig_rdata = format!( + "{} {} {} {} {} {} {} {} {}", + match format_record_type(sig.type_covered()) { + Some(record_type) => record_type, + None => String::from("Unknown record type"), + }, + u8::from(sig.algorithm()), + sig.num_labels(), + sig.original_ttl(), + sig.sig_expiration(), // currently in epoch convert to human readable ? + sig.sig_inception(), // currently in epoch convert to human readable ? + sig.key_tag(), + sig.signer_name().to_string_with_options(&self.options), + BASE64.encode(sig.sig()) + ); + Ok((Some(sig_rdata), None)) + } + // RSIG is a derivation of SIG but choosing to keep this duplicate code in lieu of the alternative + // which is to allocate to the heap with Box in order to deref. + DNSSECRData::RRSIG(sig) => { + let sig_rdata = format!( + "{} {} {} {} {} {} {} {} {}", + match format_record_type(sig.type_covered()) { + Some(record_type) => record_type, + None => String::from("Unknown record type"), + }, + u8::from(sig.algorithm()), + sig.num_labels(), + sig.original_ttl(), + sig.sig_expiration(), // currently in epoch convert to human readable ? + sig.sig_inception(), // currently in epoch convert to human readable ? + sig.key_tag(), + sig.signer_name().to_string_with_options(&self.options), + BASE64.encode(sig.sig()) + ); + Ok((Some(sig_rdata), None)) + } + DNSSECRData::KEY(key) => { + let key_rdata = format!( + "{} {} {} {}", + key.flags(), + u8::from(key.protocol()), + u8::from(key.algorithm()), + BASE64.encode(key.public_key()) + ); + Ok((Some(key_rdata), None)) + } + DNSSECRData::Unknown { code: _, rdata } => { + Ok((None, Some(rdata.anything().to_vec()))) + } + _ => Err(DnsMessageParserError::SimpleError { + cause: format!("Unsupported rdata {:?}", rdata), + }), + }, _ => Err(DnsMessageParserError::SimpleError { cause: format!("Unsupported rdata {:?}", rdata), }), - }, - _ => Err(DnsMessageParserError::SimpleError { - cause: format!("Unsupported rdata {:?}", rdata), - }), + } + } + + fn parse_domain_name( + decoder: &mut BinDecoder<'_>, + options: &DnsParserOptions, + ) -> DnsParserResult { + parse_domain_name(decoder).map(|n| n.to_string_with_options(options)) } } @@ -784,6 +862,46 @@ fn format_record_type(record_type: RecordType) -> Option { } } +fn format_svcb_record(svcb: &SVCB, options: &DnsParserOptions) -> String { + format!( + "{} {} {}", + svcb.svc_priority(), + svcb.target_name().to_string_with_options(options), + svcb.svc_params() + .iter() + .map(|(key, value)| format!(r#"{}="{}""#, key, value.to_string().trim_end_matches(','))) + .collect::>() + .join(" ") + ) +} + +fn format_dnskey_record(dnskey: &DNSKEY) -> String { + format!( + "{} 3 {} {}", + { + if dnskey.revoke() { + 0b0000_0000_0000_0000 + } else if dnskey.zone_key() && dnskey.secure_entry_point() { + 0b0000_0001_0000_0001 + } else { + 0b0000_0001_0000_0000 + } + }, + u8::from(dnskey.algorithm()), + BASE64.encode(dnskey.public_key()) + ) +} + +fn format_ds_record(ds: &DS) -> String { + format!( + "{} {} {} {}", + ds.key_tag(), + u8::from(ds.algorithm()), + u8::from(ds.digest_type()), + HEXUPPER.encode(ds.digest()) + ) +} + fn parse_response_code(rcode: u16) -> Option<&'static str> { match rcode { 0 => Some("NoError"), // 0 NoError No Error [RFC1035] @@ -845,20 +963,37 @@ fn parse_dns_update_message_header(dns_message: &TrustDnsMessage) -> UpdateHeade fn parse_edns(dns_message: &TrustDnsMessage) -> Option> { dns_message.extensions().as_ref().map(|edns| { - parse_edns_options(edns).map(|options| OptPseudoSection { + parse_edns_options(edns.options()).map(|(ede, rest)| OptPseudoSection { extended_rcode: edns.rcode_high(), version: edns.version(), dnssec_ok: edns.dnssec_ok(), udp_max_payload_size: edns.max_payload(), - options, + ede, + options: rest, }) }) } -fn parse_edns_options(edns: &Edns) -> DnsParserResult> { - edns.options() +fn parse_edns_options(edns: &OPT) -> DnsParserResult<(Vec, Vec)> { + let ede_opts: Vec = edns + .as_ref() + .iter() + .filter_map(|(_, option)| { + if let EdnsOption::Unknown(EDE_OPTION_CODE, option) = option { + Some( + EDE::from_bytes(option) + .map_err(|source| DnsMessageParserError::TrustDnsError { source }), + ) + } else { + None + } + }) + .collect::, DnsMessageParserError>>()?; + + let rest: Vec = edns .as_ref() .iter() + .filter(|(&code, _)| u16::from(code) != EDE_OPTION_CODE) .map(|(code, option)| match option { EdnsOption::DAU(algorithms) | EdnsOption::DHU(algorithms) @@ -870,7 +1005,9 @@ fn parse_edns_options(edns: &Edns) -> DnsParserResult> { .map(|bytes| parse_edns_opt(*code, &bytes)) .map_err(|source| DnsMessageParserError::TrustDnsError { source }), }) - .collect() + .collect::, DnsMessageParserError>>()?; + + Ok((ede_opts, rest)) } fn parse_edns_opt_dnssec_algorithms( @@ -897,7 +1034,7 @@ fn parse_loc_rdata_size(data: u8) -> DnsParserResult { let base = (data & 0xF0) >> 4; if base > 9 { return Err(DnsMessageParserError::SimpleError { - cause: format!("The base shouldnt be greater than 9. Base: {}", base), + cause: format!("The base shouldn't be greater than 9. Base: {}", base), }); } @@ -905,7 +1042,7 @@ fn parse_loc_rdata_size(data: u8) -> DnsParserResult { if exponent > 9 { return Err(DnsMessageParserError::SimpleError { cause: format!( - "The exponent shouldnt be greater than 9. Exponent: {}", + "The exponent shouldn't be greater than 9. Exponent: {}", exponent ), }); @@ -1131,15 +1268,23 @@ fn format_bytes_as_hex_string(bytes: &[u8]) -> String { #[cfg(test)] mod tests { use std::{ + collections::HashMap, net::{Ipv4Addr, Ipv6Addr}, str::FromStr, }; + #[allow(deprecated)] use hickory_proto::rr::{ dnssec::{ rdata::{ - dnskey::DNSKEY, ds::DS, nsec::NSEC, nsec3::NSEC3, nsec3param::NSEC3PARAM, sig::SIG, - DNSSECRData, RRSIG, + dnskey::DNSKEY, + ds::DS, + key::{KeyTrust, KeyUsage, Protocol, UpdateScope}, + nsec::NSEC, + nsec3::NSEC3, + nsec3param::NSEC3PARAM, + sig::SIG, + DNSSECRData, KEY, RRSIG, }, Algorithm as DNSSEC_Algorithm, DigestType, Nsec3HashAlgorithm, }, @@ -1147,10 +1292,12 @@ mod tests { rdata::{ caa::KeyValue, sshfp::{Algorithm, FingerprintType}, + svcb, tlsa::{CertUsage, Matching, Selector}, - CAA, NAPTR, SSHFP, TLSA, TXT, + CAA, CSYNC, HINFO, HTTPS, NAPTR, OPT, SSHFP, TLSA, TXT, }, }; + use hickory_proto::serialize::binary::Restrict; use super::*; @@ -1160,6 +1307,17 @@ mod tests { } } + fn format_rdata(rdata: &RData) -> DnsParserResult<(Option, Option>)> { + DnsMessageParser::new(Vec::new()).format_rdata(rdata) + } + + fn format_rdata_with_options( + rdata: &RData, + options: DnsParserOptions, + ) -> DnsParserResult<(Option, Option>)> { + DnsMessageParser::with_options(Vec::new(), options).format_rdata(rdata) + } + #[test] fn test_parse_as_query_message() { let raw_dns_message = "szgAAAABAAAAAAAAAmg1B2V4YW1wbGUDY29tAAAGAAE="; @@ -1187,6 +1345,43 @@ mod tests { ); } + #[test] + fn test_parse_as_query_message_with_ede() { + let raw_dns_message = + "szgAAAABAAAAAAABAmg1B2V4YW1wbGUDY29tAAAGAAEAACkE0AEBQAAABgAPAAIAFQ=="; + let raw_query_message = BASE64 + .decode(raw_dns_message.as_bytes()) + .expect("Invalid base64 encoded data."); + let parse_result = DnsMessageParser::new(raw_query_message).parse_as_query_message(); + assert!(parse_result.is_ok()); + let message = parse_result.expect("Message is not parsed."); + let opt_pseudo_section = message.opt_pseudo_section.expect("OPT section was missing"); + assert_eq!(opt_pseudo_section.ede.len(), 1); + assert_eq!(opt_pseudo_section.ede[0].info_code(), 21u16); + assert_eq!(opt_pseudo_section.ede[0].purpose(), Some("Not Supported")); + assert_eq!(opt_pseudo_section.ede[0].extra_text(), None); + } + + #[test] + fn test_parse_as_query_message_with_ede_with_extra_text() { + let raw_dns_message = + "szgAAAABAAAAAAABAmg1B2V4YW1wbGUDY29tAAAGAAEAACkE0AEBQAAAOQAPADUACW5vIFNFUCBtYXRjaGluZyB0aGUgRFMgZm91bmQgZm9yIGRuc3NlYy1mYWlsZWQub3JnLg=="; + let raw_query_message = BASE64 + .decode(raw_dns_message.as_bytes()) + .expect("Invalid base64 encoded data."); + let parse_result = DnsMessageParser::new(raw_query_message).parse_as_query_message(); + assert!(parse_result.is_ok()); + let message = parse_result.expect("Message is not parsed."); + let opt_pseudo_section = message.opt_pseudo_section.expect("OPT section was missing"); + assert_eq!(opt_pseudo_section.ede.len(), 1); + assert_eq!(opt_pseudo_section.ede[0].info_code(), 9u16); + assert_eq!(opt_pseudo_section.ede[0].purpose(), Some("DNSKEY Missing")); + assert_eq!( + opt_pseudo_section.ede[0].extra_text(), + Some("no SEP matching the DS found for dnssec-failed.org.".to_string()) + ); + } + #[test] fn test_parse_as_query_message_with_invalid_data() { let err = DnsMessageParser::new(vec![1, 2, 3]) @@ -1216,6 +1411,41 @@ mod tests { assert_ne!(dns_query_message.answer_section[0].rdata_bytes, None); } + #[test] + fn test_parse_response_with_https_rdata() { + let raw_response_message_base64 = "Oe2BgAABAAEAAAABBGNkbnAHc2FuamFnaANjb20AAEEAAcAMAEEAAQAAASwAPQABAAABAAYCaDMCaDIABAAIrEDEHKxAxRwABgAgJgZHAADmAAAAAAAArEDEHCYGRwAA5gAAAAAAAKxAxRwAACkE0AAAAAAAHAAKABjWOVAgEGik/gEAAABlwiAuXkvEOviB1sk="; + let raw_response_message = BASE64 + .decode(raw_response_message_base64.as_bytes()) + .expect("Invalid base64 encoded data."); + let dns_response_message = DnsMessageParser::new(raw_response_message) + .parse_as_query_message() + .expect("Invalid DNS query message."); + assert_eq!( + dns_response_message.answer_section[0].rdata, + Some(r#"1 . alpn="h3,h2" ipv4hint="172.64.196.28,172.64.197.28" ipv6hint="2606:4700:e6::ac40:c41c,2606:4700:e6::ac40:c51c""#.to_string()) + ); + assert_eq!(dns_response_message.answer_section[0].record_type_id, 65u16); + assert_eq!(dns_response_message.answer_section[0].rdata_bytes, None); + } + + #[test] + fn test_parse_response_with_hinfo_rdata() { + let raw_response_message_base64 = + "wS2BgAABAAEAAAAAB3RyYWNrZXIEZGxlcgNvcmcAAP8AAcAMAA0AAQAAC64ACQdSRkM4NDgyAA=="; + let raw_response_message = BASE64 + .decode(raw_response_message_base64.as_bytes()) + .expect("Invalid base64 encoded data."); + let dns_response_message = DnsMessageParser::new(raw_response_message) + .parse_as_query_message() + .expect("Invalid DNS query message."); + assert_eq!( + dns_response_message.answer_section[0].rdata, + Some(r#""RFC8482" """#.to_string()) + ); + assert_eq!(dns_response_message.answer_section[0].record_type_id, 13u16); + assert_eq!(dns_response_message.answer_section[0].rdata_bytes, None); + } + #[test] fn test_format_bytes_as_hex_string() { assert_eq!( @@ -1337,6 +1567,24 @@ mod tests { } } + #[test] + fn test_format_rdata_for_cname_type_downcase() { + let rdata = RData::CNAME(hickory_proto::rr::rdata::CNAME( + Name::from_str("WWW.Example.Com.").unwrap(), + )); + let rdata_text = format_rdata_with_options( + &rdata, + DnsParserOptions { + lowercase_hostnames: true, + }, + ); + assert!(rdata_text.is_ok()); + if let Ok((parsed, raw_rdata)) = rdata_text { + assert!(raw_rdata.is_none()); + assert_eq!("www.example.com.", parsed.unwrap()); + } + } + #[test] fn test_format_rdata_for_txt_type() { let rdata = RData::TXT(TXT::new(vec![ @@ -1577,6 +1825,36 @@ mod tests { } } + #[test] + fn test_format_rdata_for_key_type() { + let rdata = RData::DNSSEC(DNSSECRData::KEY(KEY::new( + KeyTrust::NotPrivate, + KeyUsage::Host, + #[allow(deprecated)] + UpdateScope { + zone: false, + strong: false, + unique: true, + general: true, + }, + Protocol::DNSSEC, + DNSSEC_Algorithm::RSASHA256, + vec![ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, + 23, 24, 25, 26, 27, 28, 29, 29, 31, + ], + ))); + let rdata_text = format_rdata(&rdata); + assert!(rdata_text.is_ok()); + if let Ok((parsed, raw_rdata)) = rdata_text { + assert!(raw_rdata.is_none()); + assert_eq!( + "16387 3 8 AAECAwQFBgcICQoLDA0ODxAREhMUFRYXGBkaGxwdHR8=", + parsed.unwrap() + ); + } + } + // rsig is a derivation of the SIG record data, but the upstream crate does not handle that with an trait // so there isn't really a great way to reduce code duplication here. #[test] @@ -1622,9 +1900,103 @@ mod tests { } } + #[test] + fn test_format_rdata_for_svcb_type() { + let rdata = RData::SVCB(svcb::SVCB::new( + 1, + Name::root(), + vec![ + ( + svcb::SvcParamKey::Alpn, + svcb::SvcParamValue::Alpn(svcb::Alpn(vec!["h3".to_string(), "h2".to_string()])), + ), + ( + svcb::SvcParamKey::Ipv4Hint, + svcb::SvcParamValue::Ipv4Hint(svcb::IpHint(vec![ + A(Ipv4Addr::new(104, 18, 36, 155)), + A(Ipv4Addr::new(172, 64, 151, 101)), + ])), + ), + ], + )); + let rdata_text = format_rdata(&rdata); + assert!(rdata_text.is_ok()); + if let Ok((parsed, raw_rdata)) = rdata_text { + assert!(raw_rdata.is_none()); + assert_eq!( + r#"1 . alpn="h3,h2" ipv4hint="104.18.36.155,172.64.151.101""#, + parsed.unwrap() + ); + } + } + + #[test] + fn test_format_rdata_for_https_type() { + let rdata = RData::HTTPS(HTTPS(svcb::SVCB::new( + 1, + Name::root(), + vec![ + ( + svcb::SvcParamKey::Alpn, + svcb::SvcParamValue::Alpn(svcb::Alpn(vec!["h3".to_string(), "h2".to_string()])), + ), + ( + svcb::SvcParamKey::Ipv4Hint, + svcb::SvcParamValue::Ipv4Hint(svcb::IpHint(vec![ + A(Ipv4Addr::new(104, 18, 36, 155)), + A(Ipv4Addr::new(172, 64, 151, 101)), + ])), + ), + ], + ))); + let rdata_text = format_rdata(&rdata); + assert!(rdata_text.is_ok()); + if let Ok((parsed, raw_rdata)) = rdata_text { + assert!(raw_rdata.is_none()); + assert_eq!( + r#"1 . alpn="h3,h2" ipv4hint="104.18.36.155,172.64.151.101""#, + parsed.unwrap() + ); + } + } + #[test] fn test_format_rdata_for_hinfo_type() { - test_format_rdata("BWludGVsBWxpbnV4", 13, "\"intel\" \"linux\""); + let rdata = RData::HINFO(HINFO::new("intel".to_string(), "linux".to_string())); + let rdata_text = format_rdata(&rdata); + assert!(rdata_text.is_ok()); + if let Ok((parsed, raw_rdata)) = rdata_text { + assert!(raw_rdata.is_none()); + assert_eq!(r#""intel" "linux""#, parsed.unwrap()); + } + } + + #[test] + fn test_format_rdata_for_csync_type() { + let types = vec![RecordType::A, RecordType::NS, RecordType::AAAA]; + let rdata = RData::CSYNC(CSYNC::new(123, true, true, types)); + let rdata_text = format_rdata(&rdata); + assert!(rdata_text.is_ok()); + if let Ok((parsed, raw_rdata)) = rdata_text { + assert!(raw_rdata.is_none()); + assert_eq!("123 3 A NS AAAA", parsed.unwrap()); + } + } + + #[test] + fn test_format_rdata_for_opt_type() { + let mut options = HashMap::new(); + options.insert( + EdnsCode::LLQ, + EdnsOption::Unknown(u16::from(EdnsCode::LLQ), vec![0x01; 18]), + ); + let rdata = RData::OPT(OPT::new(options)); + let rdata_text = format_rdata(&rdata); + assert!(rdata_text.is_ok()); + if let Ok((parsed, raw_rdata)) = rdata_text { + assert!(raw_rdata.is_none()); + assert_eq!("LLQ=AQEBAQEBAQEBAQEBAQEBAQEB", parsed.unwrap()); + } } #[test] @@ -1829,11 +2201,22 @@ mod tests { let raw_rdata = BASE64 .decode(raw_data.as_bytes()) .expect("Invalid base64 encoded rdata."); - let record_rdata = NULL::with(raw_rdata); - let rdata_text = - DnsMessageParser::new(Vec::::new()).format_unknown_rdata(code, &record_rdata); + let mut decoder = BinDecoder::new(&raw_rdata); + let record = Record::from_rdata( + Name::new(), + 1, + RData::read( + &mut decoder, + RecordType::from(code), + Restrict::new(raw_rdata.len() as u16), + ) + .unwrap(), + ); + let rdata_text = DnsMessageParser::new(Vec::::new()) + .parse_dns_record(&record) + .map(|r| r.rdata); assert!(rdata_text.is_ok()); - assert_eq!(expected_output, rdata_text.unwrap().0.unwrap()); + assert_eq!(expected_output, rdata_text.unwrap().unwrap()); } fn test_format_rdata_with_compressed_domain_names( diff --git a/lib/dnsmsg-parser/src/ede.rs b/lib/dnsmsg-parser/src/ede.rs new file mode 100644 index 0000000000000..7bd3e7c0439ee --- /dev/null +++ b/lib/dnsmsg-parser/src/ede.rs @@ -0,0 +1,93 @@ +use hickory_proto::{ + error::ProtoResult, + serialize::binary::{BinDecodable, BinDecoder, BinEncodable, BinEncoder}, +}; + +pub const EDE_OPTION_CODE: u16 = 15u16; + +#[derive(Debug, Clone)] +pub struct EDE { + info_code: u16, + extra_text: Option, +} + +impl EDE { + pub fn new(info_code: u16, extra_text: Option) -> Self { + Self { + info_code, + extra_text, + } + } + + // https://www.iana.org/assignments/dns-parameters/dns-parameters.xhtml#extended-dns-error-codes + pub fn purpose(&self) -> Option<&str> { + match self.info_code { + 0 => Some("Other Error"), + 1 => Some("Unsupported DNSKEY Algorithm"), + 2 => Some("Unsupported DS Digest Type"), + 3 => Some("Stale Answer"), + 4 => Some("Forged Answer"), + 5 => Some("DNSSEC Indeterminate"), + 6 => Some("DNSSEC Bogus"), + 7 => Some("Signature Expired"), + 8 => Some("Signature Not Yet Valid"), + 9 => Some("DNSKEY Missing"), + 10 => Some("RRSIGs Missing"), + 11 => Some("No Zone Key Bit Set"), + 12 => Some("NSEC Missing"), + 13 => Some("Cached Error"), + 14 => Some("Not Ready"), + 15 => Some("Blocked"), + 16 => Some("Censored"), + 17 => Some("Filtered"), + 18 => Some("Prohibited"), + 19 => Some("Stale NXDomain Answer"), + 20 => Some("Not Authoritative"), + 21 => Some("Not Supported"), + 22 => Some("No Reachable Authority"), + 23 => Some("Network Error"), + 24 => Some("Invalid Data"), + 25 => Some("Signature Expired before Valid"), + 26 => Some("Too Early"), + 27 => Some("Unsupported NSEC3 Iterations Value"), + 28 => Some("Unable to conform to policy"), + 29 => Some("Synthesized"), + _ => None, + } + } + + pub fn info_code(&self) -> u16 { + self.info_code + } + + pub fn extra_text(&self) -> Option { + self.extra_text.clone() + } +} + +impl BinEncodable for EDE { + fn emit(&self, encoder: &mut BinEncoder<'_>) -> ProtoResult<()> { + encoder.emit_u16(self.info_code)?; + if let Some(extra_text) = &self.extra_text { + encoder.emit_vec(extra_text.as_bytes())?; + } + Ok(()) + } +} + +impl<'a> BinDecodable<'a> for EDE { + fn read(decoder: &mut BinDecoder<'a>) -> ProtoResult { + let info_code = decoder.read_u16()?.unverified(); + let extra_text = if decoder.is_empty() { + None + } else { + Some(String::from_utf8( + decoder.read_vec(decoder.len())?.unverified(), + )?) + }; + Ok(Self { + info_code, + extra_text, + }) + } +} diff --git a/lib/dnsmsg-parser/src/lib.rs b/lib/dnsmsg-parser/src/lib.rs index f78b7099deb4b..d332fd0d3fae3 100644 --- a/lib/dnsmsg-parser/src/lib.rs +++ b/lib/dnsmsg-parser/src/lib.rs @@ -9,3 +9,4 @@ pub mod dns_message; pub mod dns_message_parser; +pub mod ede; diff --git a/lib/docs-renderer/Cargo.toml b/lib/docs-renderer/Cargo.toml index cbe6eef56a194..971b93d4038cf 100644 --- a/lib/docs-renderer/Cargo.toml +++ b/lib/docs-renderer/Cargo.toml @@ -6,8 +6,8 @@ edition = "2021" publish = false [dependencies] -anyhow = { version = "1.0.79", default-features = false, features = ["std"] } -serde = { version = "1.0", default-features = false } +anyhow = { version = "1.0.82", default-features = false, features = ["std"] } +serde.workspace = true serde_json.workspace = true snafu = { version = "0.7.5", default-features = false } tracing = { version = "0.1.34", default-features = false } diff --git a/lib/enrichment/Cargo.toml b/lib/enrichment/Cargo.toml index e88d71a6e2a73..923bd375ec23e 100644 --- a/lib/enrichment/Cargo.toml +++ b/lib/enrichment/Cargo.toml @@ -6,7 +6,7 @@ edition = "2021" publish = false [dependencies] -arc-swap = { version = "1.6.0", default-features = false } +arc-swap = { version = "1.7.1", default-features = false } chrono.workspace = true -dyn-clone = { version = "1.0.16", default-features = false } +dyn-clone = { version = "1.0.17", default-features = false } vrl.workspace = true diff --git a/lib/fakedata/Cargo.toml b/lib/fakedata/Cargo.toml index cdd6824ec8c75..8fe5f51459cb1 100644 --- a/lib/fakedata/Cargo.toml +++ b/lib/fakedata/Cargo.toml @@ -8,5 +8,5 @@ license = "MPL-2.0" [dependencies] chrono.workspace = true -fakedata_generator = "0.4.0" +fakedata_generator = "0.5.0" rand = "0.8.5" diff --git a/lib/file-source/Cargo.toml b/lib/file-source/Cargo.toml index 3e1e68155bc6c..ab0944c7caede 100644 --- a/lib/file-source/Cargo.toml +++ b/lib/file-source/Cargo.toml @@ -11,7 +11,7 @@ libc = "0.2" winapi = { version = "0.3", features = ["winioctl"] } [dependencies] -crc = "3.0.1" +crc = "3.2.1" glob = "0.3.1" scan_fmt = "0.2.6" vector-config = { path = "../vector-config", default-features = false } @@ -24,7 +24,7 @@ default-features = false features = [] [dependencies.bytes] -version = "1.5.0" +version = "1.6.0" default-features = false features = [] @@ -39,7 +39,7 @@ default-features = false features = [] [dependencies.indexmap] -version = "2.1.0" +version = "2.2.6" default-features = false features = ["serde"] @@ -69,14 +69,14 @@ default-features = false features = [] [dependencies.tokio] -version = "1.35.1" +version = "1.37.0" default-features = false features = ["full"] [dev-dependencies] criterion = "0.5" quickcheck = "1" -tempfile = "3.9.0" +tempfile = "3.10.1" similar-asserts = "1.5.0" [[bench]] diff --git a/lib/file-source/src/file_server.rs b/lib/file-source/src/file_server.rs index 59604037797c6..350664af478cc 100644 --- a/lib/file-source/src/file_server.rs +++ b/lib/file-source/src/file_server.rs @@ -52,6 +52,7 @@ where pub remove_after: Option, pub emitter: E, pub handle: tokio::runtime::Handle, + pub rotate_wait: Duration, } /// `FileServer` as Source @@ -292,11 +293,18 @@ where } } + for (_, watcher) in &mut fp_map { + if !watcher.file_findable() && watcher.last_seen().elapsed() > self.rotate_wait { + watcher.set_dead(); + } + } + // A FileWatcher is dead when the underlying file has disappeared. // If the FileWatcher is dead we don't retain it; it will be deallocated. fp_map.retain(|file_id, watcher| { if watcher.dead() { - self.emitter.emit_file_unwatched(&watcher.path); + self.emitter + .emit_file_unwatched(&watcher.path, watcher.reached_eof()); checkpoints.set_dead(*file_id); false } else { diff --git a/lib/file-source/src/file_watcher/mod.rs b/lib/file-source/src/file_watcher/mod.rs index f39df392748cf..7ac50173ea2b7 100644 --- a/lib/file-source/src/file_watcher/mod.rs +++ b/lib/file-source/src/file_watcher/mod.rs @@ -42,8 +42,10 @@ pub struct FileWatcher { devno: u64, inode: u64, is_dead: bool, + reached_eof: bool, last_read_attempt: Instant, last_read_success: Instant, + last_seen: Instant, max_line_bytes: usize, line_delimiter: Bytes, buf: BytesMut, @@ -143,8 +145,10 @@ impl FileWatcher { devno, inode: ino, is_dead: false, + reached_eof: false, last_read_attempt: ts, last_read_success: ts, + last_seen: ts, max_line_bytes, line_delimiter, buf: BytesMut::new(), @@ -176,6 +180,9 @@ impl FileWatcher { pub fn set_file_findable(&mut self, f: bool) { self.findable = f; + if f { + self.last_seen = Instant::now(); + } } pub fn file_findable(&self) -> bool { @@ -228,6 +235,7 @@ impl FileWatcher { let buf = self.buf.split().freeze(); if buf.is_empty() { // EOF + self.reached_eof = true; Ok(None) } else { Ok(Some(RawLine { @@ -236,6 +244,7 @@ impl FileWatcher { })) } } else { + self.reached_eof = true; Ok(None) } } @@ -268,6 +277,16 @@ impl FileWatcher { self.last_read_success.elapsed() < Duration::from_secs(10) || self.last_read_attempt.elapsed() > Duration::from_secs(10) } + + #[inline] + pub fn last_seen(&self) -> Instant { + self.last_seen + } + + #[inline] + pub fn reached_eof(&self) -> bool { + self.reached_eof + } } fn is_gzipped(r: &mut io::BufReader) -> io::Result { diff --git a/lib/file-source/src/fingerprinter.rs b/lib/file-source/src/fingerprinter.rs index de83749eb9d22..30086da1cd479 100644 --- a/lib/file-source/src/fingerprinter.rs +++ b/lib/file-source/src/fingerprinter.rs @@ -529,7 +529,7 @@ mod test { panic!(); } - fn emit_file_unwatched(&self, _: &Path) {} + fn emit_file_unwatched(&self, _: &Path, _: bool) {} fn emit_file_deleted(&self, _: &Path) {} diff --git a/lib/file-source/src/internal_events.rs b/lib/file-source/src/internal_events.rs index 20195bb5deb22..9eb60e65397a1 100644 --- a/lib/file-source/src/internal_events.rs +++ b/lib/file-source/src/internal_events.rs @@ -9,7 +9,7 @@ pub trait FileSourceInternalEvents: Send + Sync + Clone + 'static { fn emit_file_watch_error(&self, path: &Path, error: Error); - fn emit_file_unwatched(&self, path: &Path); + fn emit_file_unwatched(&self, path: &Path, reached_eof: bool); fn emit_file_deleted(&self, path: &Path); diff --git a/lib/k8s-e2e-tests/Cargo.toml b/lib/k8s-e2e-tests/Cargo.toml index 37c45cba7d2d0..7efad957d2cd0 100644 --- a/lib/k8s-e2e-tests/Cargo.toml +++ b/lib/k8s-e2e-tests/Cargo.toml @@ -12,10 +12,10 @@ futures = "0.3" k8s-openapi = { version = "0.16.0", default-features = false, features = ["v1_19"] } k8s-test-framework = { version = "0.1", path = "../k8s-test-framework" } regex = "1" -reqwest = { version = "0.11.23", features = ["json"] } +reqwest = { version = "0.11.26", features = ["json"] } serde_json.workspace = true -tokio = { version = "1.35.1", features = ["full"] } -indoc = "2.0.4" +tokio = { version = "1.37.0", features = ["full"] } +indoc = "2.0.5" env_logger = "0.10" tracing = { version = "0.1", features = ["log"] } rand = "0.8" diff --git a/lib/k8s-e2e-tests/src/lib.rs b/lib/k8s-e2e-tests/src/lib.rs index 6d5788cfa767c..076d2015f1713 100644 --- a/lib/k8s-e2e-tests/src/lib.rs +++ b/lib/k8s-e2e-tests/src/lib.rs @@ -307,7 +307,7 @@ where Ok(()) } -/// Create a pod for our other pods to have an affinity to to ensure they are all deployed on +/// Create a pod for our other pods to have an affinity to ensure they are all deployed on /// the same node. pub async fn create_affinity_pod( framework: &Framework, diff --git a/lib/k8s-e2e-tests/tests/vector-agent.rs b/lib/k8s-e2e-tests/tests/vector-agent.rs index 0c3c2c221445e..e15b690ed74e6 100644 --- a/lib/k8s-e2e-tests/tests/vector-agent.rs +++ b/lib/k8s-e2e-tests/tests/vector-agent.rs @@ -1614,7 +1614,7 @@ async fn multiple_ns() -> Result<(), Box> { expected_namespaces.insert(name); } - // Create a pod for our other pods to have an affinity to to ensure they are all deployed on + // Create a pod for our other pods to have an affinity to ensure they are all deployed on // the same node. let affinity_ns_name = format!("{}-affinity", pod_namespace); let affinity_ns = framework diff --git a/lib/k8s-test-framework/Cargo.toml b/lib/k8s-test-framework/Cargo.toml index c2e5f7cedd76e..18154156896e9 100644 --- a/lib/k8s-test-framework/Cargo.toml +++ b/lib/k8s-test-framework/Cargo.toml @@ -11,5 +11,5 @@ license = "MPL-2.0" k8s-openapi = { version = "0.16.0", default-features = false, features = ["v1_19"] } serde_json.workspace = true tempfile = "3" -tokio = { version = "1.35.1", features = ["full"] } +tokio = { version = "1.37.0", features = ["full"] } log = "0.4" diff --git a/lib/loki-logproto/Cargo.toml b/lib/loki-logproto/Cargo.toml index f7b8bb28ad29f..4f8887c8f617c 100644 --- a/lib/loki-logproto/Cargo.toml +++ b/lib/loki-logproto/Cargo.toml @@ -10,7 +10,7 @@ publish = false [dependencies] prost = { version = "0.12", default-features = false, features = ["std"] } prost-types = { version = "0.12", default-features = false, features = ["std"] } -bytes = { version = "1.5.0", default-features = false } +bytes = { version = "1.6.0", default-features = false } snap = { version = "1.1.1", default-features = false } [dev-dependencies] diff --git a/lib/opentelemetry-proto/Cargo.toml b/lib/opentelemetry-proto/Cargo.toml index e106bea46ad44..7877005e539bb 100644 --- a/lib/opentelemetry-proto/Cargo.toml +++ b/lib/opentelemetry-proto/Cargo.toml @@ -10,7 +10,7 @@ prost-build = { version = "0.12", default-features = false} tonic-build = { version = "0.10", default-features = false, features = ["prost", "transport"] } [dependencies] -bytes = { version = "1.5.0", default-features = false, features = ["serde"] } +bytes = { version = "1.6.0", default-features = false, features = ["serde"] } chrono.workspace = true hex = { version = "0.4.3", default-features = false, features = ["std"] } lookup = { package = "vector-lookup", path = "../vector-lookup", default-features = false } diff --git a/lib/prometheus-parser/Cargo.toml b/lib/prometheus-parser/Cargo.toml index f6aaca1e0817b..154620ec33138 100644 --- a/lib/prometheus-parser/Cargo.toml +++ b/lib/prometheus-parser/Cargo.toml @@ -9,7 +9,7 @@ license = "MPL-2.0" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -indexmap = "2.1.0" +indexmap.workspace = true nom = "7.1.3" num_enum = "0.7.2" prost = "0.12" diff --git a/lib/tracing-limit/Cargo.toml b/lib/tracing-limit/Cargo.toml index 0103b94a1445a..bfbf9f008502d 100644 --- a/lib/tracing-limit/Cargo.toml +++ b/lib/tracing-limit/Cargo.toml @@ -14,7 +14,7 @@ dashmap = { version = "5.5.3", default-features = false } [dev-dependencies] criterion = "0.5" tracing = "0.1.34" -mock_instant = { version = "0.3" } +mock_instant = { version = "0.4" } tracing-subscriber = { version = "0.3.18", default-features = false, features = ["env-filter", "fmt"] } [[bench]] diff --git a/lib/vector-api-client/Cargo.toml b/lib/vector-api-client/Cargo.toml index 7fa5cddc591a2..e004265c3cd92 100644 --- a/lib/vector-api-client/Cargo.toml +++ b/lib/vector-api-client/Cargo.toml @@ -9,23 +9,22 @@ license = "MPL-2.0" [dependencies] # Serde -serde = { version = "1.0.195", default-features = false, features = ["derive"] } +serde.workspace = true serde_json.workspace = true # Error handling -anyhow = { version = "1.0.79", default-features = false, features = ["std"] } +anyhow = { version = "1.0.82", default-features = false, features = ["std"] } # Tokio / Futures -async-trait = { version = "0.1", default-features = false } futures = { version = "0.3", default-features = false, features = ["compat", "io-compat"] } -tokio = { version = "1.35.1", default-features = false, features = ["macros", "rt", "sync"] } -tokio-stream = { version = "0.1.14", default-features = false, features = ["sync"] } +tokio = { version = "1.37.0", default-features = false, features = ["macros", "rt", "sync"] } +tokio-stream = { version = "0.1.15", default-features = false, features = ["sync"] } # GraphQL -graphql_client = { version = "0.13.0", default-features = false, features = ["graphql_query_derive"] } +graphql_client = { version = "0.14.0", default-features = false, features = ["graphql_query_derive"] } # HTTP / WebSockets -reqwest = { version = "0.11.23", default-features = false, features = ["json"] } +reqwest = { version = "0.11.26", default-features = false, features = ["json"] } tokio-tungstenite = { version = "0.20.1", default-features = false, features = ["connect", "rustls"] } # External libs @@ -33,4 +32,4 @@ chrono.workspace = true clap.workspace = true url = { version = "2.5.0", default-features = false } uuid = { version = "1", default-features = false, features = ["serde", "v4"] } -indoc = { version = "2.0.4", default-features = false } +indoc = { version = "2.0.5", default-features = false } diff --git a/lib/vector-api-client/src/gql/components.rs b/lib/vector-api-client/src/gql/components.rs index 00dcb2f8a72e5..a2bfde4cdf487 100644 --- a/lib/vector-api-client/src/gql/components.rs +++ b/lib/vector-api-client/src/gql/components.rs @@ -1,6 +1,5 @@ use std::fmt; -use async_trait::async_trait; use graphql_client::GraphQLQuery; use crate::{BoxedSubscription, QueryResult}; @@ -32,12 +31,10 @@ pub struct ComponentAddedSubscription; )] pub struct ComponentRemovedSubscription; -#[async_trait] pub trait ComponentsQueryExt { async fn components_query(&self, first: i64) -> crate::QueryResult; } -#[async_trait] impl ComponentsQueryExt for crate::Client { async fn components_query(&self, first: i64) -> QueryResult { let request_body = ComponentsQuery::build_query(components_query::Variables { first }); @@ -50,7 +47,6 @@ pub trait ComponentsSubscriptionExt { fn component_removed(&self) -> crate::BoxedSubscription; } -#[async_trait] impl ComponentsSubscriptionExt for crate::SubscriptionClient { /// Subscription for when a component has been added fn component_added(&self) -> BoxedSubscription { diff --git a/lib/vector-api-client/src/gql/health.rs b/lib/vector-api-client/src/gql/health.rs index baa83b6450eb4..f3bd4968c8b30 100644 --- a/lib/vector-api-client/src/gql/health.rs +++ b/lib/vector-api-client/src/gql/health.rs @@ -1,6 +1,5 @@ //! Health queries/subscriptions, for asserting a GraphQL API server is alive. -use async_trait::async_trait; use graphql_client::GraphQLQuery; /// Shorthand for a Chrono datetime, set to UTC. @@ -29,13 +28,11 @@ pub struct HealthQuery; pub struct HeartbeatSubscription; /// Extension methods for health queries. -#[async_trait] pub trait HealthQueryExt { /// Executes a health query. async fn health_query(&self) -> crate::QueryResult; } -#[async_trait] impl HealthQueryExt for crate::Client { /// Executes a health query. async fn health_query(&self) -> crate::QueryResult { diff --git a/lib/vector-api-client/src/gql/meta.rs b/lib/vector-api-client/src/gql/meta.rs index c25dc41c3d091..d8d9a734342f3 100644 --- a/lib/vector-api-client/src/gql/meta.rs +++ b/lib/vector-api-client/src/gql/meta.rs @@ -1,4 +1,3 @@ -use async_trait::async_trait; use graphql_client::GraphQLQuery; /// MetaVersionStringQuery returns the version string of the queried Vector instance. @@ -11,13 +10,11 @@ use graphql_client::GraphQLQuery; pub struct MetaVersionStringQuery; /// Extension methods for meta queries. -#[async_trait] pub trait MetaQueryExt { /// Executes a meta version string query. async fn meta_version_string(&self) -> crate::QueryResult; } -#[async_trait] impl MetaQueryExt for crate::Client { /// Executes a meta version string query. async fn meta_version_string(&self) -> crate::QueryResult { diff --git a/lib/vector-api-client/src/lib.rs b/lib/vector-api-client/src/lib.rs index 6f3a2f4c39ac4..e31401172bdc9 100644 --- a/lib/vector-api-client/src/lib.rs +++ b/lib/vector-api-client/src/lib.rs @@ -11,6 +11,7 @@ #![deny(warnings)] #![deny(missing_debug_implementations, missing_copy_implementations)] +#![allow(async_fn_in_trait)] mod client; /// GraphQL queries diff --git a/lib/vector-api-client/src/test/mod.rs b/lib/vector-api-client/src/test/mod.rs index 57590e30ced65..1f7716d96214f 100644 --- a/lib/vector-api-client/src/test/mod.rs +++ b/lib/vector-api-client/src/test/mod.rs @@ -1,4 +1,3 @@ -use async_trait::async_trait; use graphql_client::GraphQLQuery; use crate::{BoxedSubscription, QueryResult}; @@ -48,7 +47,6 @@ pub struct ComponentByComponentKeyQuery; )] pub struct ComponentsConnectionQuery; -#[async_trait] pub trait TestQueryExt { async fn component_links_query( &self, @@ -77,7 +75,6 @@ pub trait TestQueryExt { ) -> crate::QueryResult; } -#[async_trait] impl TestQueryExt for crate::Client { async fn component_links_query( &self, diff --git a/lib/vector-buffers/Cargo.toml b/lib/vector-buffers/Cargo.toml index c0119084a0f72..f496343e95f84 100644 --- a/lib/vector-buffers/Cargo.toml +++ b/lib/vector-buffers/Cargo.toml @@ -6,12 +6,12 @@ edition = "2021" publish = false [dependencies] -async-recursion = "1.0.5" +async-recursion = "1.1.0" async-stream = "0.3.5" async-trait = { version = "0.1", default-features = false } bytecheck = { version = "0.6.9", default-features = false, features = ["std"] } -bytes = { version = "1.5.0", default-features = false } -crc32fast = { version = "1.3.2", default-features = false } +bytes = { version = "1.6.0", default-features = false } +crc32fast = { version = "1.4.0", default-features = false } crossbeam-queue = { version = "0.3.11", default-features = false, features = ["std"] } crossbeam-utils = { version = "0.8.19", default-features = false } derivative = { version = "2.2.0", default-features = false } @@ -19,19 +19,19 @@ fslock = { version = "0.2.1", default-features = false, features = ["std"] } futures = { version = "0.3.30", default-features = false, features = ["std"] } memmap2 = { version = "0.9.4", default-features = false } metrics = "0.21.1" -num-traits = { version = "0.2.17", default-features = false } +num-traits = { version = "0.2.18", default-features = false } paste = "1.0.14" pin-project.workspace = true -rkyv = { version = "0.7.43", default-features = false, features = ["size_32", "std", "strict", "validation"] } -serde = { version = "1.0.195", default-features = false, features = ["derive"] } +rkyv = { version = "0.7.44", default-features = false, features = ["size_32", "std", "strict", "validation"] } +serde.workspace = true snafu = { version = "0.7.5", default-features = false, features = ["std"] } tokio-util = { version = "0.7.0", default-features = false } -tokio = { version = "1.35.1", default-features = false, features = ["rt", "macros", "rt-multi-thread", "sync", "fs", "io-util", "time"] } +tokio = { version = "1.37.0", default-features = false, features = ["rt", "macros", "rt-multi-thread", "sync", "fs", "io-util", "time"] } tracing = { version = "0.1.34", default-features = false, features = ["attributes"] } vector-config = { path = "../vector-config", default-features = false } vector-config-common = { path = "../vector-config-common", default-features = false } vector-config-macros = { path = "../vector-config-macros", default-features = false } -vector-common = { path = "../vector-common", default-features = false, features = ["byte_size_of", "serde"] } +vector-common = { path = "../vector-common", default-features = false, features = ["byte_size_of"] } [dev-dependencies] clap.workspace = true @@ -45,8 +45,8 @@ proptest = "1.4" quickcheck = "1.0" rand = "0.8.5" serde_yaml = { version = "0.9", default-features = false } -temp-dir = "0.1.12" -tokio-test = "0.4.3" +temp-dir = "0.1.13" +tokio-test = "0.4.4" tracing-fluent-assertions = { version = "0.3" } tracing-subscriber = { version = "0.3.18", default-features = false, features = ["env-filter", "fmt", "registry", "std", "ansi"] } diff --git a/lib/vector-buffers/examples/buffer_perf.rs b/lib/vector-buffers/examples/buffer_perf.rs index c853ae5dcb946..c571c8b74e7f7 100644 --- a/lib/vector-buffers/examples/buffer_perf.rs +++ b/lib/vector-buffers/examples/buffer_perf.rs @@ -283,7 +283,7 @@ where variant .add_to_builder(&mut builder, Some(data_dir), id) - .expect("should not fail to to add variant to builder"); + .expect("should not fail to add variant to builder"); builder .build(String::from("buffer_perf"), Span::none()) diff --git a/lib/vector-buffers/src/lib.rs b/lib/vector-buffers/src/lib.rs index 0e0460e80d351..d8b28c278ac75 100644 --- a/lib/vector-buffers/src/lib.rs +++ b/lib/vector-buffers/src/lib.rs @@ -9,6 +9,7 @@ #![allow(clippy::module_name_repetitions)] #![allow(clippy::type_complexity)] // long-types happen, especially in async code #![allow(clippy::must_use_candidate)] +#![allow(async_fn_in_trait)] #[macro_use] extern crate tracing; @@ -74,7 +75,7 @@ pub enum WhenFull { impl Arbitrary for WhenFull { fn arbitrary(g: &mut Gen) -> Self { // TODO: We explicitly avoid generating "overflow" as a possible value because nothing yet - // supports handling it, and will be defaulted to to using "block" if they encounter + // supports handling it, and will be defaulted to using "block" if they encounter // "overflow". Thus, there's no reason to emit it here... yet. if bool::arbitrary(g) { WhenFull::Block diff --git a/lib/vector-buffers/src/topology/channel/receiver.rs b/lib/vector-buffers/src/topology/channel/receiver.rs index f6b7120d23323..d21aa1ed67e4f 100644 --- a/lib/vector-buffers/src/topology/channel/receiver.rs +++ b/lib/vector-buffers/src/topology/channel/receiver.rs @@ -160,7 +160,7 @@ impl BufferReceiver { enum StreamState { Idle(BufferReceiver), Polling, - Closed(BufferReceiver), + Closed, } pub struct BufferReceiverStream { @@ -183,7 +183,7 @@ impl Stream for BufferReceiverStream { fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { loop { match mem::replace(&mut self.state, StreamState::Polling) { - s @ StreamState::Closed(_) => { + s @ StreamState::Closed => { self.state = s; return Poll::Ready(None); } @@ -193,7 +193,7 @@ impl Stream for BufferReceiverStream { StreamState::Polling => { let (result, receiver) = ready!(self.recv_fut.poll(cx)); self.state = if result.is_none() { - StreamState::Closed(receiver) + StreamState::Closed } else { StreamState::Idle(receiver) }; diff --git a/lib/vector-buffers/src/topology/test_util.rs b/lib/vector-buffers/src/topology/test_util.rs index 10d5aa2fb7a0e..ec0acac7416d2 100644 --- a/lib/vector-buffers/src/topology/test_util.rs +++ b/lib/vector-buffers/src/topology/test_util.rs @@ -72,6 +72,7 @@ impl EventCount for Sample { } #[derive(Debug)] +#[allow(dead_code)] // The inner _is_ read by the `Debug` impl, but that's ignored pub struct BasicError(pub(crate) String); impl fmt::Display for BasicError { diff --git a/lib/vector-buffers/src/variants/disk_v2/io.rs b/lib/vector-buffers/src/variants/disk_v2/io.rs index e81ec5c80a69b..a63b46ba1bc7d 100644 --- a/lib/vector-buffers/src/variants/disk_v2/io.rs +++ b/lib/vector-buffers/src/variants/disk_v2/io.rs @@ -1,6 +1,5 @@ use std::{io, path::Path}; -use async_trait::async_trait; use tokio::{ fs::OpenOptions, io::{AsyncRead, AsyncWrite}, @@ -22,7 +21,6 @@ impl Metadata { } /// Generalized interface for opening and deleting files from a filesystem. -#[async_trait] pub trait Filesystem: Send + Sync { type File: AsyncFile; type MemoryMap: ReadableMemoryMap; @@ -89,7 +87,6 @@ pub trait Filesystem: Send + Sync { async fn delete_file(&self, path: &Path) -> io::Result<()>; } -#[async_trait] pub trait AsyncFile: AsyncRead + AsyncWrite + Send + Sync { /// Queries metadata about the underlying file. /// @@ -128,7 +125,6 @@ pub trait WritableMemoryMap: ReadableMemoryMap { #[derive(Clone, Debug)] pub struct ProductionFilesystem; -#[async_trait] impl Filesystem for ProductionFilesystem { type File = tokio::fs::File; type MemoryMap = memmap2::Mmap; @@ -217,7 +213,6 @@ fn open_readable_file_options() -> OpenOptions { open_options } -#[async_trait] impl AsyncFile for tokio::fs::File { async fn metadata(&self) -> io::Result { let metadata = self.metadata().await?; diff --git a/lib/vector-buffers/src/variants/disk_v2/tests/mod.rs b/lib/vector-buffers/src/variants/disk_v2/tests/mod.rs index fa304951aca94..dfdf4120ddc4b 100644 --- a/lib/vector-buffers/src/variants/disk_v2/tests/mod.rs +++ b/lib/vector-buffers/src/variants/disk_v2/tests/mod.rs @@ -4,7 +4,6 @@ use std::{ sync::Arc, }; -use async_trait::async_trait; use tokio::{ fs::OpenOptions, io::{AsyncWriteExt, DuplexStream}, @@ -32,7 +31,6 @@ mod model; mod record; mod size_limits; -#[async_trait] impl AsyncFile for DuplexStream { async fn metadata(&self) -> io::Result { Ok(Metadata { len: 0 }) @@ -43,7 +41,6 @@ impl AsyncFile for DuplexStream { } } -#[async_trait] impl AsyncFile for Cursor> { async fn metadata(&self) -> io::Result { Ok(Metadata { len: 0 }) diff --git a/lib/vector-buffers/src/variants/disk_v2/tests/model/filesystem.rs b/lib/vector-buffers/src/variants/disk_v2/tests/model/filesystem.rs index db14ec92b2819..f6d5d11d5d079 100644 --- a/lib/vector-buffers/src/variants/disk_v2/tests/model/filesystem.rs +++ b/lib/vector-buffers/src/variants/disk_v2/tests/model/filesystem.rs @@ -8,7 +8,6 @@ use std::{ task::{Context, Poll}, }; -use async_trait::async_trait; use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; use crate::variants::disk_v2::{ @@ -206,7 +205,6 @@ impl AsyncWrite for TestFile { } } -#[async_trait] impl AsyncFile for TestFile { #[instrument(skip(self), level = "debug")] async fn metadata(&self) -> io::Result { @@ -304,7 +302,6 @@ impl Default for TestFilesystem { } } -#[async_trait] impl Filesystem for TestFilesystem { type File = TestFile; type MemoryMap = TestMmap; diff --git a/lib/vector-common/Cargo.toml b/lib/vector-common/Cargo.toml index 3410c12de04da..280503dfc9753 100644 --- a/lib/vector-common/Cargo.toml +++ b/lib/vector-common/Cargo.toml @@ -25,7 +25,6 @@ conversion = [ ] encoding = [ - "serde", "dep:nom", "dep:snafu", "btreemap" @@ -41,25 +40,25 @@ tokenize = [ [dependencies] async-stream = "0.3.5" -bytes = { version = "1.5.0", default-features = false, optional = true } -chrono-tz = { version = "0.8.5", default-features = false, features = ["serde"] } +bytes = { version = "1.6.0", default-features = false, optional = true } +chrono-tz = { version = "0.8.6", default-features = false, features = ["serde"] } chrono.workspace = true crossbeam-utils = { version = "0.8.19", default-features = false } derivative = { version = "2.2.0", default-features = false } futures = { version = "0.3.30", default-features = false, features = ["std"] } -indexmap = { version = "2.1.0", default-features = false, features = ["std"] } +indexmap.workspace = true metrics = "0.21.1" nom = { version = "7", optional = true } ordered-float = { version = "4.2.0", default-features = false } paste = "1.0.14" pin-project.workspace = true ryu = { version = "1", default-features = false } +serde.workspace = true serde_json.workspace = true -serde = { version = "1.0.195", optional = true, features = ["derive"] } smallvec = { version = "1", default-features = false } snafu = { version = "0.7", optional = true } stream-cancel = { version = "0.8.2", default-features = false } -tokio = { version = "1.35.1", default-features = false, features = ["macros", "time"] } +tokio = { version = "1.37.0", default-features = false, features = ["macros", "time"] } tracing = { version = "0.1.34", default-features = false } vrl.workspace = true vector-config = { path = "../vector-config" } @@ -68,6 +67,6 @@ vector-config-macros = { path = "../vector-config-macros" } [dev-dependencies] futures = { version = "0.3.30", default-features = false, features = ["async-await", "std"] } -tokio = { version = "1.35.1", default-features = false, features = ["rt", "time"] } +tokio = { version = "1.37.0", default-features = false, features = ["rt", "time"] } quickcheck = "1" quickcheck_macros = "1" diff --git a/lib/vector-common/src/config.rs b/lib/vector-common/src/config.rs index 3e1c7e2d77e67..621915e322ce7 100644 --- a/lib/vector-common/src/config.rs +++ b/lib/vector-common/src/config.rs @@ -7,11 +7,8 @@ use vector_config::{configurable_component, ConfigurableString}; /// Component identifier. #[configurable_component(no_deser, no_ser)] -#[cfg_attr( - feature = "serde", - derive(::serde::Deserialize, ::serde::Serialize), - serde(from = "String", into = "String") -)] +#[derive(::serde::Deserialize, ::serde::Serialize)] +#[serde(from = "String", into = "String")] #[derive(Clone, Debug, Eq, Hash, PartialEq)] pub struct ComponentKey { /// Component ID. diff --git a/lib/vector-common/src/sensitive_string.rs b/lib/vector-common/src/sensitive_string.rs index b6522889025bd..0b290d1327919 100644 --- a/lib/vector-common/src/sensitive_string.rs +++ b/lib/vector-common/src/sensitive_string.rs @@ -2,11 +2,8 @@ use vector_config::{configurable_component, ConfigurableString}; /// Wrapper for sensitive strings containing credentials #[configurable_component(no_deser, no_ser)] -#[cfg_attr( - feature = "serde", - derive(::serde::Deserialize, ::serde::Serialize), - serde(from = "String", into = "String") -)] +#[derive(::serde::Deserialize, ::serde::Serialize)] +#[serde(from = "String", into = "String")] #[configurable(metadata(sensitive))] #[derive(Clone, Default, PartialEq, Eq)] pub struct SensitiveString(String); diff --git a/lib/vector-config-common/Cargo.toml b/lib/vector-config-common/Cargo.toml index c031b8ecbdab3..c00b0f43eea2f 100644 --- a/lib/vector-config-common/Cargo.toml +++ b/lib/vector-config-common/Cargo.toml @@ -9,7 +9,7 @@ convert_case = { version = "0.6", default-features = false } darling = { version = "0.20", default-features = false, features = ["suggestions"] } once_cell = { version = "1", default-features = false, features = ["std"] } proc-macro2 = { version = "1.0", default-features = false } -serde = { version = "1.0", default-features = false, features = ["derive"] } +serde.workspace = true serde_json.workspace = true syn = { version = "2.0", features = ["full", "extra-traits", "visit-mut", "visit"] } tracing = { version = "0.1.34", default-features = false } diff --git a/lib/vector-config-macros/Cargo.toml b/lib/vector-config-macros/Cargo.toml index 641c135adaff1..8e35ce6ecdae9 100644 --- a/lib/vector-config-macros/Cargo.toml +++ b/lib/vector-config-macros/Cargo.toml @@ -16,5 +16,5 @@ syn = { version = "2.0", default-features = false, features = ["full", "extra-tr vector-config-common = { path = "../vector-config-common" } [dev-dependencies] -serde = { version = "1.0.195", default-features = false } +serde.workspace = true vector-config = { path = "../vector-config" } diff --git a/lib/vector-config/Cargo.toml b/lib/vector-config/Cargo.toml index b0847da7bc88e..5ebde30a79cbc 100644 --- a/lib/vector-config/Cargo.toml +++ b/lib/vector-config/Cargo.toml @@ -12,17 +12,17 @@ path = "tests/integration/lib.rs" [dependencies] chrono.workspace = true -chrono-tz = { version = "0.8.5", default-features = false } +chrono-tz = { version = "0.8.6", default-features = false } encoding_rs = { version = "0.8", default-features = false, features = ["alloc", "serde"] } -indexmap = { version = "2.1.0", default-features = false, features = ["std"] } +indexmap.workspace = true inventory = { version = "0.3" } no-proxy = { version = "0.3.4", default-features = false, features = ["serialize"] } -num-traits = { version = "0.2.17", default-features = false } -serde = { version = "1.0", default-features = false } +num-traits = { version = "0.2.18", default-features = false } +serde.workspace = true serde_json.workspace = true -serde_with = { version = "3.5.0", default-features = false, features = ["std"] } +serde_with = { version = "3.7.0", default-features = false, features = ["std"] } snafu = { version = "0.7.5", default-features = false } -toml = { version = "0.8.8", default-features = false } +toml.workspace = true tracing = { version = "0.1.34", default-features = false } url = { version = "2.5.0", default-features = false, features = ["serde"] } http = { version = "0.2.9", default-features = false } @@ -32,5 +32,5 @@ vector-config-macros = { path = "../vector-config-macros" } [dev-dependencies] assert-json-diff = { version = "2", default-features = false } -serde_with = { version = "3.5.0", default-features = false, features = ["std", "macros"] } +serde_with = { version = "3.7.0", default-features = false, features = ["std", "macros"] } vector-core = { path = "../vector-core", default-features = false, features = ["test"] } diff --git a/lib/vector-core/Cargo.toml b/lib/vector-core/Cargo.toml index 2777d05221e41..34a9b707ebc0c 100644 --- a/lib/vector-core/Cargo.toml +++ b/lib/vector-core/Cargo.toml @@ -6,56 +6,57 @@ edition = "2021" publish = false [dependencies] -async-graphql = { version = "7.0.1", default-features = false, features = ["playground" ], optional = true } +async-graphql = { version = "7.0.3", default-features = false, features = ["playground" ], optional = true } async-trait = { version = "0.1", default-features = false } bitmask-enum = { version = "2.2.3", default-features = false } -bytes = { version = "1.5.0", default-features = false, features = ["serde"] } +bytes = { version = "1.6.0", default-features = false, features = ["serde"] } chrono.workspace = true crossbeam-utils = { version = "0.8.19", default-features = false } db-key = { version = "0.0.5", default-features = false, optional = true } -dyn-clone = { version = "1.0.16", default-features = false } +dyn-clone = { version = "1.0.17", default-features = false } enrichment = { path = "../enrichment", optional = true } -enumflags2 = { version = "0.7.8", default-features = false } +enumflags2 = { version = "0.7.9", default-features = false } float_eq = { version = "1.0", default-features = false } futures = { version = "0.3.30", default-features = false, features = ["std"] } futures-util = { version = "0.3.29", default-features = false, features = ["std"] } headers = { version = "0.3.9", default-features = false } http = { version = "0.2.9", default-features = false } hyper-proxy = { version = "0.9.1", default-features = false, features = ["openssl-tls"] } -indexmap = { version = "2.1.0", default-features = false, features = ["serde", "std"] } +indexmap.workspace = true +ipnet = { version = "2", default-features = false, features = ["serde", "std"] } lookup = { package = "vector-lookup", path = "../vector-lookup" } metrics = "0.21.1" metrics-tracing-context = { version = "0.14.0", default-features = false } metrics-util = { version = "0.15.1", default-features = false, features = ["registry"] } -mlua = { version = "0.9.5", default-features = false, features = ["lua54", "send", "vendored"], optional = true } +mlua = { version = "0.9.7", default-features = false, features = ["lua54", "send", "vendored"], optional = true } no-proxy = { version = "0.3.4", default-features = false, features = ["serialize"] } once_cell = { version = "1.19", default-features = false } ordered-float = { version = "4.2.0", default-features = false } -openssl = { version = "0.10.63", default-features = false, features = ["vendored"] } +openssl = { version = "0.10.64", default-features = false, features = ["vendored"] } parking_lot = { version = "0.12.1", default-features = false } pin-project.workspace = true proptest = { version = "1.4", optional = true } prost-types = { version = "0.12", default-features = false } prost = { version = "0.12", default-features = false, features = ["std"] } -quanta = { version = "0.12.2", default-features = false } -regex = { version = "1.10.3", default-features = false, features = ["std", "perf"] } +quanta = { version = "0.12.3", default-features = false } +regex = { version = "1.10.4", default-features = false, features = ["std", "perf"] } ryu = { version = "1", default-features = false } -serde = { version = "1.0.195", default-features = false, features = ["derive", "rc"] } +serde.workspace = true serde_json.workspace = true -serde_with = { version = "3.5.0", default-features = false, features = ["std", "macros"] } +serde_with = { version = "3.7.0", default-features = false, features = ["std", "macros"] } smallvec = { version = "1", default-features = false, features = ["serde", "const_generics"] } snafu = { version = "0.7.5", default-features = false } -socket2 = { version = "0.5.5", default-features = false } -tokio = { version = "1.35.1", default-features = false, features = ["net"] } +socket2 = { version = "0.5.6", default-features = false } +tokio = { version = "1.37.0", default-features = false, features = ["net"] } tokio-openssl = { version = "0.6.4", default-features = false } tokio-stream = { version = "0.1", default-features = false, features = ["time"], optional = true } tokio-util = { version = "0.7.0", default-features = false, features = ["time"] } -toml = { version = "0.8.8", default-features = false } +toml.workspace = true tonic = { version = "0.10", default-features = false, features = ["transport"] } tracing = { version = "0.1.34", default-features = false } tracing-core = { version = "0.1.26", default-features = false } tracing-subscriber = { version = "0.3.18", default-features = false, features = ["std"] } -typetag = { version = "0.2.15", default-features = false } +typetag = { version = "0.2.16", default-features = false } url = { version = "2", default-features = false } vector-buffers = { path = "../vector-buffers", default-features = false } vector-common = { path = "../vector-common" } @@ -65,7 +66,7 @@ vector-config-macros = { path = "../vector-config-macros" } vrl.workspace = true [target.'cfg(target_os = "macos")'.dependencies] -security-framework = "2.9.2" +security-framework = "2.10.0" [target.'cfg(windows)'.dependencies] schannel = "0.1.23" @@ -74,22 +75,22 @@ schannel = "0.1.23" prost-build = "0.12" [dev-dependencies] -base64 = "0.21.7" -chrono-tz = { version = "0.8.5", default-features = false } +base64 = "0.22.0" +chrono-tz = { version = "0.8.6", default-features = false } criterion = { version = "0.5.1", features = ["html_reports"] } env-test-util = "1.0.1" quickcheck = "1" quickcheck_macros = "1" proptest = "1.4" similar-asserts = "1.5.0" -tokio-test = "0.4.3" -toml = { version = "0.8.8", default-features = false, features = ["parse"] } +tokio-test = "0.4.4" +toml.workspace = true ndarray = "0.15.6" ndarray-stats = "0.5.1" noisy_float = "0.2.0" rand = "0.8.5" rand_distr = "0.4.3" -serde_yaml = { version = "0.9.30", default-features = false } +serde_yaml = { version = "0.9.34", default-features = false } tracing-subscriber = { version = "0.3.18", default-features = false, features = ["env-filter", "fmt", "ansi", "registry"] } vector-common = { path = "../vector-common", default-features = false, features = ["test"] } diff --git a/lib/vector-core/src/event/log_event.rs b/lib/vector-core/src/event/log_event.rs index 43ca2ccb3944d..6c0229b8922c4 100644 --- a/lib/vector-core/src/event/log_event.rs +++ b/lib/vector-core/src/event/log_event.rs @@ -318,18 +318,26 @@ impl LogEvent { /// aware that if the field has been dropped and then somehow re-added, we still fetch /// the dropped value here. pub fn get_by_meaning(&self, meaning: impl AsRef) -> Option<&Value> { - if let Some(dropped) = self.metadata().dropped_field(&meaning) { - Some(dropped) - } else { + self.metadata().dropped_field(&meaning).or_else(|| { self.metadata() .schema_definition() .meaning_path(meaning.as_ref()) .and_then(|path| self.get(path)) - } + }) + } + + /// Retrieves the mutable value of a field based on it's meaning. + /// Note that this does _not_ check the dropped fields, unlike `get_by_meaning`, since the + /// purpose of the mutable reference is to be able to modify the value and modifying the dropped + /// fields has no effect on the resulting event. + pub fn get_mut_by_meaning(&mut self, meaning: impl AsRef) -> Option<&mut Value> { + Arc::clone(self.metadata.schema_definition()) + .meaning_path(meaning.as_ref()) + .and_then(|path| self.get_mut(path)) } /// Retrieves the target path of a field based on the specified `meaning`. - fn find_key_by_meaning(&self, meaning: impl AsRef) -> Option<&OwnedTargetPath> { + pub fn find_key_by_meaning(&self, meaning: impl AsRef) -> Option<&OwnedTargetPath> { self.metadata() .schema_definition() .meaning_path(meaning.as_ref()) diff --git a/lib/vector-core/src/event/metadata.rs b/lib/vector-core/src/event/metadata.rs index a9ae6e2d16fd1..49e9677dae874 100644 --- a/lib/vector-core/src/event/metadata.rs +++ b/lib/vector-core/src/event/metadata.rs @@ -337,8 +337,8 @@ impl EventMetadata { } /// Get the schema definition. - pub fn schema_definition(&self) -> &schema::Definition { - self.schema_definition.as_ref() + pub fn schema_definition(&self) -> &Arc { + &self.schema_definition } /// Set the schema definition. diff --git a/lib/vector-core/src/event/test/common.rs b/lib/vector-core/src/event/test/common.rs index d258e1fa3b7ff..431a4f3eee417 100644 --- a/lib/vector-core/src/event/test/common.rs +++ b/lib/vector-core/src/event/test/common.rs @@ -1,6 +1,6 @@ use std::{collections::BTreeSet, iter}; -use chrono::{DateTime, NaiveDateTime, Utc}; +use chrono::{DateTime, Utc}; use quickcheck::{empty_shrinker, Arbitrary, Gen}; use vrl::value::{ObjectMap, Value}; @@ -50,9 +50,7 @@ fn datetime(g: &mut Gen) -> DateTime { // are. We just sort of arbitrarily restrict things. let secs = i64::arbitrary(g) % 32_000; let nanosecs = u32::arbitrary(g) % 32_000; - NaiveDateTime::from_timestamp_opt(secs, nanosecs) - .expect("invalid timestamp") - .and_utc() + DateTime::from_timestamp(secs, nanosecs).expect("invalid timestamp") } impl Arbitrary for Event { diff --git a/lib/vector-core/src/event/trace.rs b/lib/vector-core/src/event/trace.rs index d52295822e323..3956a9532315b 100644 --- a/lib/vector-core/src/event/trace.rs +++ b/lib/vector-core/src/event/trace.rs @@ -112,6 +112,10 @@ impl TraceEvent { } None } + + pub fn remove<'a>(&mut self, key: impl TargetPath<'a>) -> Option { + self.0.remove(key) + } } impl From for TraceEvent { diff --git a/lib/vector-core/src/fanout.rs b/lib/vector-core/src/fanout.rs index bcd69a778b2e9..22871704215b8 100644 --- a/lib/vector-core/src/fanout.rs +++ b/lib/vector-core/src/fanout.rs @@ -74,7 +74,7 @@ impl Fanout { fn remove(&mut self, id: &ComponentKey) { assert!( - self.senders.remove(id).is_some(), + self.senders.shift_remove(id).is_some(), "Removing nonexistent sink from fanout: {id}" ); } @@ -349,7 +349,7 @@ impl<'a> SendGroup<'a> { // to also detach the send future for the sender if it exists, otherwise we'd be hanging // around still trying to send to it. assert!( - self.senders.remove(id).is_some(), + self.senders.shift_remove(id).is_some(), "Removing nonexistent sink from fanout: {id}" ); diff --git a/lib/vector-core/src/ipallowlist.rs b/lib/vector-core/src/ipallowlist.rs new file mode 100644 index 0000000000000..e1e59803834fd --- /dev/null +++ b/lib/vector-core/src/ipallowlist.rs @@ -0,0 +1,56 @@ +use serde::{Deserialize, Serialize}; +use std::cell::RefCell; +use vector_config::GenerateError; + +use ipnet::IpNet; +use vector_config::{configurable_component, Configurable, Metadata, ToValue}; +use vector_config_common::schema::{InstanceType, SchemaGenerator, SchemaObject}; + +/// List of allowed origin IP networks. IP addresses must be in CIDR notation. +#[configurable_component] +#[derive(Clone, Debug, PartialEq, Eq)] +#[serde(deny_unknown_fields, transparent)] +#[configurable(metadata(docs::human_name = "Allowed IP network origins"))] +#[configurable(metadata(docs::examples = "ip_allow_list_example()"))] +pub struct IpAllowlistConfig(pub Vec); + +const fn ip_allow_list_example() -> [&'static str; 4] { + [ + "192.168.0.0/16", + "127.0.0.1/32", + "::1/128", + "9876:9ca3:99ab::23/128", + ] +} + +/// IP network +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(deny_unknown_fields, transparent)] +pub struct IpNetConfig(pub IpNet); + +impl ToValue for IpNetConfig { + fn to_value(&self) -> serde_json::Value { + serde_json::Value::String(self.0.to_string()) + } +} + +impl Configurable for IpNetConfig { + fn generate_schema( + _: &RefCell, + ) -> std::result::Result { + Ok(SchemaObject { + instance_type: Some(InstanceType::String.into()), + ..Default::default() + }) + } + + fn metadata() -> Metadata { + Metadata::with_description("IP network") + } +} + +impl From for Vec { + fn from(value: IpAllowlistConfig) -> Self { + value.0.iter().map(|net| net.0).collect() + } +} diff --git a/lib/vector-core/src/lib.rs b/lib/vector-core/src/lib.rs index 21d68b5084c36..2e71d42d8a1c3 100644 --- a/lib/vector-core/src/lib.rs +++ b/lib/vector-core/src/lib.rs @@ -30,6 +30,7 @@ pub mod config; pub mod event; pub mod fanout; +pub mod ipallowlist; pub mod metrics; pub mod partition; pub mod schema; @@ -43,7 +44,7 @@ pub mod time; pub mod tls; pub mod transform; #[cfg(feature = "vrl")] -mod vrl; +pub mod vrl; use float_eq::FloatEq; use std::path::PathBuf; diff --git a/lib/vector-core/src/schema/meaning.rs b/lib/vector-core/src/schema/meaning.rs index ab766b0986924..450cfc7c9442b 100644 --- a/lib/vector-core/src/schema/meaning.rs +++ b/lib/vector-core/src/schema/meaning.rs @@ -12,6 +12,9 @@ pub const TIMESTAMP: &str = "timestamp"; /// The hostname of the machine where the event was generated. pub const HOST: &str = "host"; +/// The tags of an event, generally a key-value paired list. +pub const TAGS: &str = "tags"; + pub const SOURCE: &str = "source"; pub const SEVERITY: &str = "severity"; pub const TRACE_ID: &str = "trace_id"; diff --git a/lib/vector-core/src/sink.rs b/lib/vector-core/src/sink.rs index a3e2e66e08c17..436ecc2cf7c01 100644 --- a/lib/vector-core/src/sink.rs +++ b/lib/vector-core/src/sink.rs @@ -1,6 +1,5 @@ use std::{fmt, iter::IntoIterator, pin::Pin}; -use async_trait::async_trait; use futures::{stream, task::Context, task::Poll, Sink, SinkExt, Stream, StreamExt}; use crate::event::{into_event_stream, Event, EventArray, EventContainer}; @@ -86,7 +85,7 @@ impl fmt::Debug for VectorSink { // === StreamSink === -#[async_trait] +#[async_trait::async_trait] pub trait StreamSink { async fn run(self: Box, input: stream::BoxStream<'_, T>) -> Result<(), ()>; } @@ -172,7 +171,7 @@ struct EventStream { sink: Box, } -#[async_trait] +#[async_trait::async_trait] impl + Send> StreamSink for EventStream { async fn run(self: Box, input: stream::BoxStream<'_, EventArray>) -> Result<(), ()> { let input = Box::pin(input.flat_map(into_event_stream)); diff --git a/lib/vector-core/src/tls/incoming.rs b/lib/vector-core/src/tls/incoming.rs index 2f988fef2427c..ca1d1f7dbc185 100644 --- a/lib/vector-core/src/tls/incoming.rs +++ b/lib/vector-core/src/tls/incoming.rs @@ -1,3 +1,4 @@ +use ipnet::IpNet; use std::{ collections::HashMap, future::Future, @@ -48,24 +49,64 @@ impl MaybeTlsSettings { Self::Raw(()) => None, }; - Ok(MaybeTlsListener { listener, acceptor }) + Ok(MaybeTlsListener { + listener, + acceptor, + origin_filter: None, + }) + } + + pub async fn bind_with_allowlist( + &self, + addr: &SocketAddr, + allow_origin: Vec, + ) -> crate::tls::Result { + let listener = TcpListener::bind(addr).await.context(TcpBindSnafu)?; + + let acceptor = match self { + Self::Tls(tls) => Some(tls.acceptor()?), + Self::Raw(()) => None, + }; + + Ok(MaybeTlsListener { + listener, + acceptor, + origin_filter: Some(allow_origin), + }) } } pub struct MaybeTlsListener { listener: TcpListener, acceptor: Option, + origin_filter: Option>, } impl MaybeTlsListener { pub async fn accept(&mut self) -> crate::tls::Result> { - self.listener + let listener = self + .listener .accept() .await .map(|(stream, peer_addr)| { MaybeTlsIncomingStream::new(stream, peer_addr, self.acceptor.clone()) }) - .context(IncomingListenerSnafu) + .context(IncomingListenerSnafu)?; + + if let Some(origin_filter) = &self.origin_filter { + if origin_filter + .iter() + .any(|net| net.contains(&listener.peer_addr().ip())) + { + Ok(listener) + } else { + Err(TlsError::Connect { + source: std::io::ErrorKind::ConnectionRefused.into(), + }) + } + } else { + Ok(listener) + } } async fn into_accept( @@ -127,6 +168,12 @@ impl MaybeTlsListener { pub fn local_addr(&self) -> Result { self.listener.local_addr() } + + #[must_use] + pub fn with_allowlist(mut self, allowlist: Option>) -> Self { + self.origin_filter = allowlist; + self + } } impl From for MaybeTlsListener { @@ -134,6 +181,7 @@ impl From for MaybeTlsListener { Self { listener, acceptor: None, + origin_filter: None, } } } diff --git a/lib/vector-core/src/tls/settings.rs b/lib/vector-core/src/tls/settings.rs index 5d96c51abbaa6..23725e47496dd 100644 --- a/lib/vector-core/src/tls/settings.rs +++ b/lib/vector-core/src/tls/settings.rs @@ -85,15 +85,15 @@ pub struct TlsSourceConfig { #[derive(Clone, Debug, Default)] #[serde(deny_unknown_fields)] pub struct TlsConfig { - /// Enables certificate verification. + /// Enables certificate verification. For components that create a server, this requires that the + /// client connections have a valid client certificate. For components that initiate requests, + /// this validates that the upstream has a valid certificate. /// /// If enabled, certificates must not be expired and must be issued by a trusted /// issuer. This verification operates in a hierarchical manner, checking that the leaf certificate (the /// certificate presented by the client/server) is not only valid, but that the issuer of that certificate is also valid, and /// so on until the verification process reaches a root certificate. /// - /// Relevant for both incoming and outgoing connections. - /// /// Do NOT set this to `false` unless you understand the risks of not verifying the validity of certificates. pub verify_certificate: Option, diff --git a/lib/vector-lib/Cargo.toml b/lib/vector-lib/Cargo.toml index d97a25a644a1d..4f4c6abb2d334 100644 --- a/lib/vector-lib/Cargo.toml +++ b/lib/vector-lib/Cargo.toml @@ -26,6 +26,7 @@ lua = ["vector-core/lua"] file-source = ["dep:file-source"] opentelemetry = ["dep:opentelemetry-proto"] prometheus = ["dep:prometheus-parser"] +proptest = ["vector-lookup/proptest"] syslog = ["codecs/syslog"] test = ["vector-core/test"] vrl = ["vector-core/vrl"] diff --git a/lib/vector-lib/src/lib.rs b/lib/vector-lib/src/lib.rs index 9ffa72947f373..74e041ec7c0c7 100644 --- a/lib/vector-lib/src/lib.rs +++ b/lib/vector-lib/src/lib.rs @@ -18,8 +18,8 @@ pub use vector_config::impl_generate_config_from_default; #[cfg(feature = "vrl")] pub use vector_core::compile_vrl; pub use vector_core::{ - buckets, default_data_dir, emit, event, fanout, metric_tags, metrics, partition, quantiles, - register, samples, schema, serde, sink, source, tcp, tls, transform, update_counter, + buckets, default_data_dir, emit, event, fanout, ipallowlist, metric_tags, metrics, partition, + quantiles, register, samples, schema, serde, sink, source, tcp, tls, transform, update_counter, EstimatedJsonEncodedSizeOf, }; pub use vector_lookup as lookup; diff --git a/lib/vector-lookup/Cargo.toml b/lib/vector-lookup/Cargo.toml index b4325aa309737..ac2bf2ccd1503 100644 --- a/lib/vector-lookup/Cargo.toml +++ b/lib/vector-lookup/Cargo.toml @@ -7,10 +7,15 @@ publish = false license = "MPL-2.0" [dependencies] -serde = { version = "1.0.195", default-features = false, features = ["derive", "alloc"] } +proptest = { workspace = true, optional = true } +proptest-derive = { workspace = true, optional = true } +serde.workspace = true vector-config = { path = "../vector-config" } vector-config-macros = { path = "../vector-config-macros" } vrl.workspace = true +[dev-dependencies] + [features] test = [] +proptest = ["dep:proptest", "dep:proptest-derive"] diff --git a/lib/vector-lookup/src/lookup_v2/mod.rs b/lib/vector-lookup/src/lookup_v2/mod.rs index f6ad7dfa4c806..a6d5be15b37f6 100644 --- a/lib/vector-lookup/src/lookup_v2/mod.rs +++ b/lib/vector-lookup/src/lookup_v2/mod.rs @@ -15,6 +15,7 @@ use vrl::value::KeyString; /// use [optional_path::OptionalValuePath]. #[configurable_component] #[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] +#[cfg_attr(feature = "proptest", derive(proptest_derive::Arbitrary))] #[serde(try_from = "String", into = "String")] pub struct ConfigValuePath(pub OwnedValuePath); @@ -59,6 +60,7 @@ impl From<&str> for ConfigValuePath { /// with prefix default to `PathPrefix::Event` #[configurable_component] #[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] +#[cfg_attr(feature = "proptest", derive(proptest_derive::Arbitrary))] #[serde(try_from = "String", into = "String")] pub struct ConfigTargetPath(pub OwnedTargetPath); diff --git a/lib/vector-lookup/src/lookup_v2/optional_path.rs b/lib/vector-lookup/src/lookup_v2/optional_path.rs index de5f5d9d67ac0..22dcf2bb05acd 100644 --- a/lib/vector-lookup/src/lookup_v2/optional_path.rs +++ b/lib/vector-lookup/src/lookup_v2/optional_path.rs @@ -7,6 +7,7 @@ use crate::{OwnedTargetPath, OwnedValuePath}; #[configurable_component] #[derive(Debug, Clone, PartialEq, Eq, Default, Hash, PartialOrd, Ord)] +#[cfg_attr(feature = "proptest", derive(proptest_derive::Arbitrary))] #[serde(try_from = "String", into = "String")] /// An optional path that deserializes an empty string to `None`. pub struct OptionalTargetPath { @@ -67,6 +68,7 @@ impl From for OptionalTargetPath { #[configurable_component] #[derive(Debug, Clone, PartialEq, Eq, Default, Hash, PartialOrd, Ord)] +#[cfg_attr(feature = "proptest", derive(proptest_derive::Arbitrary))] #[serde(try_from = "String", into = "String")] /// An optional path that deserializes an empty string to `None`. pub struct OptionalValuePath { diff --git a/lib/vector-stream/Cargo.toml b/lib/vector-stream/Cargo.toml index 8a56eb1d3ec23..ca01a7e0aa55c 100644 --- a/lib/vector-stream/Cargo.toml +++ b/lib/vector-stream/Cargo.toml @@ -10,7 +10,7 @@ async-stream = { version = "0.3.5", default-features = false } futures = { version = "0.3.30", default-features = false, features = ["std"] } futures-util = { version = "0.3.29", default-features = false, features = ["std"] } pin-project.workspace = true -tokio = { version = "1.35.1", default-features = false, features = ["net"] } +tokio = { version = "1.37.0", default-features = false, features = ["net"] } tokio-util = { version = "0.7.0", default-features = false, features = ["time"] } tower = { version = "0.4", default-features = false, features = ["util"] } tracing = { version = "0.1.34", default-features = false } diff --git a/lib/vector-stream/src/driver.rs b/lib/vector-stream/src/driver.rs index 7a7b8386f8250..89d87101eae39 100644 --- a/lib/vector-stream/src/driver.rs +++ b/lib/vector-stream/src/driver.rs @@ -272,7 +272,7 @@ mod tests { type Counter = Arc; #[derive(Debug)] - struct DelayRequest(usize, EventFinalizers, RequestMetadata); + struct DelayRequest(EventFinalizers, RequestMetadata); impl DelayRequest { fn new(value: usize, counter: &Counter) -> Self { @@ -283,7 +283,6 @@ mod tests { counter.fetch_add(value, Ordering::Relaxed); }); Self( - value, EventFinalizers::new(EventFinalizer::new(batch)), RequestMetadata::default(), ) @@ -292,17 +291,17 @@ mod tests { impl Finalizable for DelayRequest { fn take_finalizers(&mut self) -> vector_core::event::EventFinalizers { - std::mem::take(&mut self.1) + std::mem::take(&mut self.0) } } impl MetaDescriptive for DelayRequest { fn get_metadata(&self) -> &RequestMetadata { - &self.2 + &self.1 } fn metadata_mut(&mut self) -> &mut RequestMetadata { - &mut self.2 + &mut self.1 } } diff --git a/lib/vector-vrl/tests/Cargo.toml b/lib/vector-vrl/tests/Cargo.toml index 9c69e6cedf738..3af10530df8de 100644 --- a/lib/vector-vrl/tests/Cargo.toml +++ b/lib/vector-vrl/tests/Cargo.toml @@ -17,7 +17,7 @@ clap.workspace = true glob = "0.3" prettydiff = "0.6" regex = "1" -serde = "1" +serde.workspace = true serde_json.workspace = true tracing-subscriber = { version = "0.3.18", default-features = false, features = ["fmt"] } diff --git a/lib/vector-vrl/tests/resources/protobuf_descriptor_set.desc b/lib/vector-vrl/tests/resources/protobuf_descriptor_set.desc new file mode 100644 index 0000000000000..43e7acf6cf771 Binary files /dev/null and b/lib/vector-vrl/tests/resources/protobuf_descriptor_set.desc differ diff --git a/lib/vector-vrl/tests/src/docs.rs b/lib/vector-vrl/tests/src/docs.rs index 55cf8e27a2079..ec694ce316f6e 100644 --- a/lib/vector-vrl/tests/src/docs.rs +++ b/lib/vector-vrl/tests/src/docs.rs @@ -16,6 +16,7 @@ const SKIP_FUNCTION_EXAMPLES: &[&str] = &[ "type_def", // Not supported on VM runtime "random_bytes", "uuid_v4", + "uuid_v7", "strip_ansi_escape_codes", "get_hostname", "now", @@ -190,5 +191,6 @@ fn test_from_cue_example(category: &'static str, name: String, example: Example) result_approx: false, skip, read_only_paths: vec![], + check_diagnostics: false, } } diff --git a/lib/vector-vrl/web-playground/Cargo.toml b/lib/vector-vrl/web-playground/Cargo.toml index ae693c042d2ef..90fa530a50b3c 100644 --- a/lib/vector-vrl/web-playground/Cargo.toml +++ b/lib/vector-vrl/web-playground/Cargo.toml @@ -11,7 +11,7 @@ crate-type = ["cdylib"] [dependencies] wasm-bindgen = "0.2" vrl.workspace = true -serde = { version = "1.0", features = ["derive"] } +serde.workspace = true serde-wasm-bindgen = "0.6" gloo-utils = { version = "0.2", features = ["serde"] } getrandom = { version = "0.2", features = ["js"] } @@ -19,4 +19,4 @@ vector-vrl-functions = { path = "../functions" } enrichment = { path = "../../enrichment" } [build-dependencies] -cargo_toml = "0.19.0" +cargo_toml = "0.20.2" diff --git a/regression/cases/datadog_agent_remap_datadog_logs/vector/vector.yaml b/regression/cases/datadog_agent_remap_datadog_logs/vector/vector.yaml index 4691ea851f14c..11e10dfb2f902 100644 --- a/regression/cases/datadog_agent_remap_datadog_logs/vector/vector.yaml +++ b/regression/cases/datadog_agent_remap_datadog_logs/vector/vector.yaml @@ -42,6 +42,7 @@ sinks: inputs: [ "parse_message" ] endpoint: "http://localhost:8080" default_api_key: "DEADBEEF" + compression: "gzip" healthcheck: enabled: false buffer: diff --git a/regression/cases/datadog_agent_remap_datadog_logs_acks/vector/vector.yaml b/regression/cases/datadog_agent_remap_datadog_logs_acks/vector/vector.yaml index 2e9d42fd6db20..e55ba16bcf509 100644 --- a/regression/cases/datadog_agent_remap_datadog_logs_acks/vector/vector.yaml +++ b/regression/cases/datadog_agent_remap_datadog_logs_acks/vector/vector.yaml @@ -42,6 +42,7 @@ sinks: inputs: [ "parse_message" ] endpoint: "http://localhost:8080" default_api_key: "DEADBEEF" + compression: "gzip" healthcheck: enabled: false buffer: diff --git a/regression/cases/enterprise_http_to_http/vector/vector.yaml b/regression/cases/enterprise_http_to_http/vector/vector.yaml index 3e9784c339ffa..c092bdd84fdc9 100644 --- a/regression/cases/enterprise_http_to_http/vector/vector.yaml +++ b/regression/cases/enterprise_http_to_http/vector/vector.yaml @@ -4,8 +4,8 @@ data_dir: "/var/lib/vector" ## Enterprise ## enterprise: - api_key: "${DD_API_KEY}" - configuration_key: "${DD_CONFIGURATION_KEY}" + api_key: "${DD_API_KEY-}" + configuration_key: "${DD_CONFIGURATION_KEY-}" endpoint: "http://localhost:8080" ## diff --git a/rfcs/2020-03-06-1999-api-extensions-for-lua-transform.md b/rfcs/2020-03-06-1999-api-extensions-for-lua-transform.md index fbc130122a115..fc2afcc4733a3 100644 --- a/rfcs/2020-03-06-1999-api-extensions-for-lua-transform.md +++ b/rfcs/2020-03-06-1999-api-extensions-for-lua-transform.md @@ -503,7 +503,7 @@ Both log and metrics events are encoded using [external tagging](https://serde.r If a log event is created by the user inside the transform is a table, then, if default fields named according to the [global schema](https://vector.dev/docs/reference/global-options/#log_schema) are not present in such a table, then they are automatically added to the event. This rule does not apply to events having `userdata` type. **Example 1** - > The global schema is configured so that `message_key` is `"message"`, `timestamp_key` is `"timestamp"`, and `host_key` is is `"instance_id"`. + > The global schema is configured so that `message_key` is `"message"`, `timestamp_key` is `"timestamp"`, and `host_key` is `"instance_id"`. > > If a new event is created inside the user-defined Lua code as a table > @@ -652,7 +652,7 @@ The mapping between Vector data types and Lua data types is the following: | [`Timestamp`](https://vector.dev/docs/about/data-model/log/#timestamps) | [`userdata`](https://www.lua.org/pil/28.1.html) | There is no dedicated timestamp type in Lua. However, there is a standard library function [`os.date`](https://www.lua.org/manual/5.1/manual.html#pdf-os.date) which returns a table with fields `year`, `month`, `day`, `hour`, `min`, `sec`, and some others. Other standard library functions, such as [`os.time`](https://www.lua.org/manual/5.1/manual.html#pdf-os.time), support tables with these fields as arguments. Because of that, Vector timestamps passed to the transform are represented as `userdata` with the same set of accessible fields. In order to have one-to-one correspondence between Vector timestamps and Lua timestamps, `os.date` function from the standard library is patched to return not a table, but `userdata` with the same set of fields as it usually would return instead. This approach makes it possible to have both compatibility with the standard library functions and a dedicated data type for timestamps. | | [`Null`](https://vector.dev/docs/about/data-model/log/#null-values) | empty string | In Lua setting a table field to `nil` means deletion of this field. Furthermore, setting an array element to `nil` leads to deletion of this element. In order to avoid inconsistencies, already present `Null` values are visible represented as empty strings from Lua code, and it is impossible to create a new `Null` value in the user-defined code. | | [`Map`](https://vector.dev/docs/about/data-model/log/#maps) | [`userdata`](https://www.lua.org/pil/28.1.html) or [`table`](https://www.lua.org/pil/2.5.html) | Maps which are parts of events passed to the transform from Vector have `userdata` type. User-created maps have `table` type. Both types are converted to Vector's `Map` type when they are emitted from the transform. | -| [`Array`](https://vector.dev/docs/about/data-model/log/#arrays) | [`sequence`](https://www.lua.org/pil/11.1.html) | Sequences in Lua are a special case of tables. Because of that fact, the indexes can in principle start from any number. However, the convention in Lua is to to start indexes from 1 instead of 0, so Vector should adhere it. | +| [`Array`](https://vector.dev/docs/about/data-model/log/#arrays) | [`sequence`](https://www.lua.org/pil/11.1.html) | Sequences in Lua are a special case of tables. Because of that fact, the indexes can in principle start from any number. However, the convention in Lua is to start indexes from 1 instead of 0, so Vector should adhere it. | ### Configuration diff --git a/rfcs/2023-02-08-7496-vrl-return.md b/rfcs/2023-02-08-7496-vrl-return.md new file mode 100644 index 0000000000000..ca4916e88163b --- /dev/null +++ b/rfcs/2023-02-08-7496-vrl-return.md @@ -0,0 +1,74 @@ +# RFC 7496 - 2023-02-02 - VRL Return Keyword + +Add return expression to the Vector Remap Language. + +## Context + +- #7496 + +## Cross cutting concerns + +- None. + +## Scope + +### In scope + +- Adding a return expression to VRL. +- The `return` can optionally take an expression as an argument and the result of that expression will be returned. + +### Out of scope + +- Adding new keywords for similar purposes such as `drop`. +- Defining semantics of keywords that are usually used for other purposes in other languages such as `break`. +- Implementation of `return` expressions inside closures. + +## Pain + +- Aborting with changing the input cannot be easily done. +- VRL code is often unnecessarily indented because of lack of early returns. + +## Proposal + +### User Experience + +- A `return` expression causes the VRL program to terminate, keeping any modifications made to the event. +- A `return` expression must be always followed by another expression, whose value will be used as the emitted event. +- The keyword cannot be used inside a closure. Trying to do that will result in a compilation error. + +### Implementation + +- Implementation will be similar to the current `abort` keyword when `drop_on_abort` is set to `false`. The only difference is that the returned value will be taken from the provided expression and not from original input. +- `drop_on_abort` will have no effect on return calls and configuration such as `drop_on_return` will not be added. + +## Rationale + +- It will be possible to write VRL with less indentation making it more readable. +- `return` is already a reserved word so it can be used without introducing a breaking change. + +## Drawbacks + +- The `return` keyword will be given a semantic meaning that will have to be supported going forward. + +## Prior Art + +- Most languages have a way to make early returns. +- There was no prior attempted implementation of returns in VRL to my knowledge. + +## Alternatives + +- New keywords that are not currently a reserved keyword can be added to the language. This would, however, constitute a breaking change. +- This feature can also be rejected as it does not add any functionality that cannot be currently expressed. + +## Outstanding Questions + +## Plan Of Attack + +Incremental steps to execute this change. These will be converted to issues after the RFC is approved: + +- [ ] Submit a PR with implementation of returns. + +## Future Improvements + +- Adding a `drop` keyword for explicit drop as an alternative to pre-configured `abort` for full control over passing the events to output unchanged, passing them changed, or routing them to the dropped output. +- Adding `return` to closures. diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 1c8cfba9f594b..60e2197ed01bb 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,3 +1,3 @@ [toolchain] -channel = "1.75.0" +channel = "1.77.2" profile = "default" diff --git a/scripts/build-docker.sh b/scripts/build-docker.sh index 7c97470950995..14a53e4a9722a 100755 --- a/scripts/build-docker.sh +++ b/scripts/build-docker.sh @@ -17,10 +17,36 @@ PLATFORM="${PLATFORM:-}" PUSH="${PUSH:-"true"}" REPO="${REPO:-"timberio/vector"}" +IFS=, read -ra REQUESTED_PLATFORMS <<< "$PLATFORM" +declare -A SUPPORTED_PLATFORMS=( + [debian]="linux/amd64,linux/arm/v6,linux/arm/v7,linux/arm64/v8" + [alpine]="linux/amd64,linux/arm/v6,linux/arm/v7,linux/arm64/v8" + [distroless-static]="linux/amd64,linux/arm/v7,linux/arm64/v8" + [distroless-libc]="linux/amd64,linux/arm/v7,linux/arm64/v8" +) + # # Functions # +evaluate_supported_platforms_for_base() { + local BASE="$1" + IFS=, read -ra SUPPORTED_PLATFORMS_FOR_BASE <<< "${SUPPORTED_PLATFORMS["$BASE"]}" + + local BUILDABLE_PLATFORMS="" + for platform in "${REQUESTED_PLATFORMS[@]}" + do + if [[ ${SUPPORTED_PLATFORMS_FOR_BASE[*]} =~ $platform ]] + then + BUILDABLE_PLATFORMS+="$platform," + else + >&2 echo "WARN: skipping $platform for $BASE, no base image for platform" + fi + done + + echo "${BUILDABLE_PLATFORMS%?}" +} + build() { local BASE="$1" local VERSION="$2" @@ -34,8 +60,11 @@ build() { ARGS+=(--push) fi + local BUILDABLE_PLATFORMS + BUILDABLE_PLATFORMS=$(evaluate_supported_platforms_for_base "$BASE") + docker buildx build \ - --platform="$PLATFORM" \ + --platform="$BUILDABLE_PLATFORMS" \ --tag "$TAG" \ target/artifacts \ -f "$DOCKERFILE" \ diff --git a/scripts/check-style.sh b/scripts/check-style.sh index bf4f18fdf65ad..f905b6f22d34f 100755 --- a/scripts/check-style.sh +++ b/scripts/check-style.sh @@ -33,6 +33,7 @@ for FILE in $(git ls-files); do *ico) continue;; *sig) continue;; *html) continue;; + *desc) continue;; tests/data*) continue;; lib/codecs/tests/data*) continue;; lib/vector-core/tests/data*) continue;; diff --git a/scripts/check_changelog_fragments.sh b/scripts/check_changelog_fragments.sh index 3110f31d7a7c4..0cb2718d4764a 100755 --- a/scripts/check_changelog_fragments.sh +++ b/scripts/check_changelog_fragments.sh @@ -17,7 +17,7 @@ if [ ! -d "${CHANGELOG_DIR}" ]; then fi # diff-filter=A lists only added files -FRAGMENTS=$(git diff --name-only --diff-filter=A origin/master ${CHANGELOG_DIR}) +FRAGMENTS=$(git diff --name-only --diff-filter=A --merge-base origin/master ${CHANGELOG_DIR}) if [ -z "$FRAGMENTS" ]; then echo "No changelog fragments detected" @@ -61,7 +61,13 @@ while IFS= read -r fname; do # used for external contributor PRs. if [[ $1 == "--authors" ]]; then last=$( tail -n 1 "${CHANGELOG_DIR}/${fname}" ) - if ! [[ "${last}" =~ ^(authors: .*)$ ]]; then + if [[ "${last}" == "authors: "*@* ]]; then + echo "invalid fragment contents: author should not be prefixed with @" + exit 1 + elif [[ "${last}" == "authors: "*,* ]]; then + echo "invalid fragment contents: authors should be space delimited, not comma delimited." + exit 1 + elif ! [[ "${last}" =~ ^(authors: .*)$ ]]; then echo "invalid fragment contents: author option was specified but fragment ${fname} contains no authors." exit 1 fi diff --git a/scripts/ci-integration-test.sh b/scripts/ci-int-e2e-test.sh similarity index 60% rename from scripts/ci-integration-test.sh rename to scripts/ci-int-e2e-test.sh index 9687f1c1eafe6..3b569db530f5f 100755 --- a/scripts/ci-integration-test.sh +++ b/scripts/ci-int-e2e-test.sh @@ -11,20 +11,21 @@ if [[ -z "${CI:-}" ]]; then exit 1 fi -if [ $# -ne 1 ] +if [ $# -ne 2 ] then - echo "usage: $0 INTEGRATION" + echo "usage: $0 [int|e2e] TEST_NAME" exit 1 fi set -x -INTEGRATION=$1 +TEST_TYPE=$1 # either "int" or "e2e" +TEST_NAME=$2 -cargo vdev -v int start "${INTEGRATION}" +cargo vdev -v "${TEST_TYPE}" start -a "${TEST_NAME}" sleep 30 -cargo vdev -v int test --retries 2 -a "${INTEGRATION}" +cargo vdev -v "${TEST_TYPE}" test --retries 2 -a "${TEST_NAME}" RET=$? -cargo vdev -v int stop -a "${INTEGRATION}" +cargo vdev -v "${TEST_TYPE}" stop -a "${TEST_NAME}" ./scripts/upload-test-results.sh exit $RET diff --git a/scripts/cross/arm-unknown-linux-gnueabi.dockerfile b/scripts/cross/arm-unknown-linux-gnueabi.dockerfile new file mode 100644 index 0000000000000..fa728d6f4a4b1 --- /dev/null +++ b/scripts/cross/arm-unknown-linux-gnueabi.dockerfile @@ -0,0 +1,4 @@ +FROM ghcr.io/cross-rs/arm-unknown-linux-gnueabi:0.2.5 + +COPY scripts/cross/bootstrap-ubuntu.sh scripts/environment/install-protoc.sh / +RUN /bootstrap-ubuntu.sh && bash /install-protoc.sh diff --git a/scripts/cross/arm-unknown-linux-musleabi.dockerfile b/scripts/cross/arm-unknown-linux-musleabi.dockerfile new file mode 100644 index 0000000000000..ad80b289a8b28 --- /dev/null +++ b/scripts/cross/arm-unknown-linux-musleabi.dockerfile @@ -0,0 +1,8 @@ +FROM ghcr.io/cross-rs/arm-unknown-linux-musleabi:0.2.5 + +COPY scripts/cross/bootstrap-ubuntu.sh scripts/environment/install-protoc.sh / +RUN /bootstrap-ubuntu.sh && bash /install-protoc.sh + +# Stick `libstdc++` somewhere it can be found other than it's normal location, otherwise we end up using the wrong version +# of _other_ libraries, which ultimately just breaks linking. We'll set `/lib/native-libs` as a search path in `.cargo/config.toml`. +RUN mkdir -p /lib/native-libs && cp /usr/local/arm-linux-musleabi/lib/libstdc++.a /lib/native-libs/ diff --git a/scripts/e2e/Dockerfile b/scripts/e2e/Dockerfile new file mode 100644 index 0000000000000..f976155d47e8d --- /dev/null +++ b/scripts/e2e/Dockerfile @@ -0,0 +1,46 @@ +ARG RUST_VERSION +ARG FEATURES +ARG DEBIAN_RELEASE=slim-bookworm + +FROM docker.io/rust:${RUST_VERSION}-${DEBIAN_RELEASE} + +RUN apt-get update && apt-get -y --no-install-recommends install \ + build-essential \ + cmake \ + curl \ + git \ + clang \ + libclang-dev \ + libsasl2-dev \ + libstdc++-11-dev \ + libssl-dev \ + libxxhash-dev \ + unzip \ + zlib1g-dev \ + zlib1g + +RUN git clone https://github.com/rui314/mold.git \ + && mkdir mold/build \ + && cd mold/build \ + && git checkout v2.0.0 \ + && ../install-build-deps.sh \ + && cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_COMPILER=c++ .. \ + && cmake --build . -j $(nproc) \ + && cmake --install . + +RUN rustup run "${RUST_VERSION}" cargo install cargo-nextest --version 0.9.64 --locked + +COPY scripts/environment/install-protoc.sh / +COPY tests/data/ca/certs /certs +RUN bash /install-protoc.sh + +WORKDIR /vector +COPY . . +ARG FEATURES + +RUN --mount=type=cache,target=/vector/target \ + --mount=type=cache,target=/usr/local/cargo/registry \ + --mount=type=cache,target=/usr/local/cargo/git \ + /usr/local/bin/mold -run cargo build --tests --lib --bin vector \ + --no-default-features --features $FEATURES && \ + cp target/debug/vector /usr/bin/vector diff --git a/scripts/e2e/README.md b/scripts/e2e/README.md new file mode 100644 index 0000000000000..62e7f8e9f64c7 --- /dev/null +++ b/scripts/e2e/README.md @@ -0,0 +1,9 @@ +This directory contains a set of end-to-end test frameworks for vector which are executed by the +`vdev` tool. + +These end-to-end (e2e) tests are executed with the `vdev e2e` subcommand, which behaves +identically to the `vdev integration` subcommand. See the README in the `scripts/integration` +subdirectory for more information. + +The e2e tests are more of a black box test, in which we spin up a full vector instance as one +of the compose services that runs alongside the others. diff --git a/scripts/e2e/datadog-logs/README.md b/scripts/e2e/datadog-logs/README.md new file mode 100644 index 0000000000000..7b3eaac8d9407 --- /dev/null +++ b/scripts/e2e/datadog-logs/README.md @@ -0,0 +1,19 @@ +This e2e test covers the `datadog_agent` source, and the +`datadog_logs` sink. + +Fake logs are generated in the emitter service and written +to a file. + +Two Agent containers are spun up to read the log file, one +for the Agent only case and one for the Agent -> Vector case. + +In the Agent only case, the Agent sends the logs to `fakeintake` +(another service) directly. This is the baseline. + +In the Agent-Vector case, the Agent send the logs to the vector +service, and the `datadog_logs` sink sends to a separate +`fakeintake` service. This is the compare case. + +The two sets of data should be shaped the same in terms of when +the events were received, and the content of the events, but the +timestamps themselves are not guaranteed to align. diff --git a/scripts/e2e/datadog-logs/compose.yaml b/scripts/e2e/datadog-logs/compose.yaml new file mode 100644 index 0000000000000..56fb68350c004 --- /dev/null +++ b/scripts/e2e/datadog-logs/compose.yaml @@ -0,0 +1,102 @@ +version: '3' + +services: + # Generates random log data for consumption by the custom Agent check + log_generator: + image: docker.io/mingrammer/flog + depends_on: + - datadog-agent-vector + - datadog-agent + command: + - "-f" + - "json" + - "-n" + - "1000" + - "-t" + - "log" + - "-o" + - "/var/log/a_custom.log" + volumes: + - log_path:/var/log/ + + # Tails a custom log created by `log_generator` and sends log data to + # the `fakeintake-agent` service + datadog-agent: + image: docker.io/datadog/agent:${CONFIG_AGENT_VERSION} + depends_on: + - fakeintake-agent + environment: + - DD_API_KEY=${TEST_DATADOG_API_KEY:?TEST_DATADOG_API_KEY required} + - DD_HOSTNAME=datadog-agent + - DD_ENABLE_PAYLOADS_EVENTS=false + - DD_ENABLE_PAYLOADS_SERVICE_CHECKS=false + - DD_CONTAINER_EXCLUDE="name:.*" + volumes: + # The Agent config file + - ${PWD}/tests/data/e2e/datadog/logs/agent_only.yaml:/etc/datadog-agent/datadog.yaml + # The custom logs check + - ${PWD}/tests/data/e2e/datadog/logs/logs.conf.d:/conf.d:ro + # The custom log to tail, created by the `log_generator` service + - log_path:/var/log/ + + # Tails a custom log created by `log_generator` and sends log data to + # the `vector` service + datadog-agent-vector: + image: docker.io/datadog/agent:${CONFIG_AGENT_VERSION} + depends_on: + - vector + environment: + - DD_API_KEY=${TEST_DATADOG_API_KEY:?TEST_DATADOG_API_KEY required} + - DD_HOSTNAME=datadog-agent-vector + - DD_ENABLE_PAYLOADS_EVENTS=false + - DD_ENABLE_PAYLOADS_SERVICE_CHECKS=false + - DD_CONTAINER_EXCLUDE="name:.*" + volumes: + # The Agent config file + - ${PWD}/tests/data/e2e/datadog/logs/agent_vector.yaml:/etc/datadog-agent/datadog.yaml + # The custom logs check + - ${PWD}/tests/data/e2e/datadog/logs/logs.conf.d:/conf.d:ro + # The custom log to tail, created by the `log_generator` service + - log_path:/var/log/ + + # Receives log data from the `datadog-agent-vector` service and sends + # to the `fakeintake-vector` service. + vector: + depends_on: + - fakeintake-vector + build: + context: ${PWD} + # re-using the integration test runner image since it already has + # compiled vector on it. + image: ${CONFIG_VECTOR_IMAGE} + environment: + - FEATURES=e2e-tests-datadog + working_dir: /home/vector + network_mode: host + command: + - "/usr/bin/vector" + - "-vvv" + - "-c" + - "/home/vector/tests/data/e2e/datadog/logs/vector.toml" + volumes: + - ${PWD}:/home/vector + + # Receives log data from the `datadog-agent` service. Is queried by the test runner + # which does the validation of consistency with the other fakeintake service. + fakeintake-agent: + # TODO: temporarily pegging the image as latest results in failures + image: docker.io/datadog/fakeintake:v77a06f2b + + # Receives log data from the `datadog-agent-vector` service. Is queried by the test runner + # which does the validation of consistency with the other fakeintake service. + fakeintake-vector: + # TODO: temporarily pegging the image as latest results in failures + image: docker.io/datadog/fakeintake:v77a06f2b + +networks: + default: + name: ${VECTOR_NETWORK} + +volumes: + log_path: {} + target: {} diff --git a/scripts/e2e/datadog-logs/test.yaml b/scripts/e2e/datadog-logs/test.yaml new file mode 100644 index 0000000000000..0610bb939ec2c --- /dev/null +++ b/scripts/e2e/datadog-logs/test.yaml @@ -0,0 +1,33 @@ +features: +- e2e-tests-datadog + +test: "e2e" + +test_filter: "datadog::logs::" + +runner: + env: + EXPECTED_LOG_EVENTS: '1000' + VECTOR_RECEIVE_PORT: '8081' + FAKE_INTAKE_AGENT_ENDPOINT: 'http://fakeintake-agent:80' + FAKE_INTAKE_VECTOR_ENDPOINT: 'http://fakeintake-vector:80' + +matrix: + # validate against the latest Agent nightly and also stable v6 and v7 + agent_version: ['latest', '6', '7'] + + +# changes to these files/paths will invoke the integration test in CI +# expressions are evaluated using https://github.com/micromatch/picomatch +paths: +- "src/common/datadog.rs" +- "src/sources/datadog_agent/**" +- "src/internal_events/datadog_*" +- "src/sinks/datadog/logs/**" +- "src/sinks/util/**" +# NOTE: currently we need the prefix 'e2e' even though it looks redundant, +# because the vdev code does not otherwise have a way to distinguish between +# the other `datadog-logs` int test. +# but once GH issue 18829 is completed, this will become unecessary. +- "scripts/e2e/e2e-datadog-logs/**" +- "tests/data/e2e/datadog/logs/**" diff --git a/scripts/e2e/datadog-metrics/README.md b/scripts/e2e/datadog-metrics/README.md new file mode 100644 index 0000000000000..07f0361f9ac6a --- /dev/null +++ b/scripts/e2e/datadog-metrics/README.md @@ -0,0 +1,19 @@ +This e2e test covers the `datadog_agent` source, and the +`datadog_metrics` sink. + +An emitter compose service runs a python DogStatsD program, +to generate various metric types for the test cases. + +Two Agent containers are spun up to receive the metrics, one +for the Agent only case and one for the Agent -> Vector case. + +In the Agent only case, the Agent sends the metrics to `fakeintake` +(another service) directly. This is the baseline. + +In the Agent-Vector case, the Agent send the metrics to the vector +service, and the `datadog_metrics` sink sends to a separate +`fakeintake` service. This is the compare case. + +The two sets of data should be shaped the same in terms of when +the events were received, and the content of the events, but the +timestamps themselves are not guaranteed to align. diff --git a/scripts/e2e/datadog-metrics/compose.yaml b/scripts/e2e/datadog-metrics/compose.yaml new file mode 100644 index 0000000000000..5942fda1011e9 --- /dev/null +++ b/scripts/e2e/datadog-metrics/compose.yaml @@ -0,0 +1,84 @@ +version: '3' + +services: + + # Emits metrics to the Agent only path + dogstatsd-client-agent: + build: ./dogstatsd_client + environment: + - STATSD_HOST=datadog-agent + depends_on: + - datadog-agent + + # Emits metrics to the Agent-Vector path + dogstatsd-client-vector: + build: ./dogstatsd_client + environment: + - STATSD_HOST=datadog-agent-vector + depends_on: + - datadog-agent-vector + + # Sends metric data received from the Emitter to the `fakeintake-agent` service + datadog-agent: + image: docker.io/datadog/agent:${CONFIG_AGENT_VERSION} + depends_on: + - fakeintake-agent + environment: + - DD_API_KEY=${TEST_DATADOG_API_KEY:?TEST_DATADOG_API_KEY required} + - DD_HOSTNAME=datadog-agent + volumes: + # The Agent config file + - ${PWD}/tests/data/e2e/datadog/metrics/agent_only.yaml:/etc/datadog-agent/datadog.yaml + + # Sends metric data received from the Emitter to the `vector` service + datadog-agent-vector: + image: docker.io/datadog/agent:${CONFIG_AGENT_VERSION} + depends_on: + - vector + environment: + - DD_API_KEY=${TEST_DATADOG_API_KEY:?TEST_DATADOG_API_KEY required} + - DD_HOSTNAME=datadog-agent-vector + volumes: + # The Agent config file + - ${PWD}/tests/data/e2e/datadog/metrics/agent_vector.yaml:/etc/datadog-agent/datadog.yaml + + # Receives metric data from the `datadog-agent-vector` service and sends + # to the `fakeintake-vector` service. + vector: + depends_on: + - fakeintake-vector + build: + context: ${PWD} + # re-using the integration test runner image since it already has + # compiled vector on it. + image: ${CONFIG_VECTOR_IMAGE} + environment: + - FEATURES=e2e-tests-datadog + working_dir: /home/vector + network_mode: host + command: + - "/usr/bin/vector" + - "-vvv" + - "-c" + - "/home/vector/tests/data/e2e/datadog/metrics/vector.toml" + volumes: + - ${PWD}:/home/vector + + # Receives metric data from the `datadog-agent` service. Is queried by the test runner + # which does the validation of consistency with the other fakeintake service. + fakeintake-agent: + # TODO: temporarily pegging the image as latest results in failures + image: docker.io/datadog/fakeintake:v77a06f2b + + # Receives metric data from the `datadog-agent-vector` service. Is queried by the test runner + # which does the validation of consistency with the other fakeintake service. + fakeintake-vector: + # TODO: temporarily pegging the image as latest results in failures + image: docker.io/datadog/fakeintake:v77a06f2b + +networks: + default: + name: ${VECTOR_NETWORK} + +volumes: + target: {} diff --git a/scripts/e2e/datadog-metrics/dogstatsd_client/Dockerfile b/scripts/e2e/datadog-metrics/dogstatsd_client/Dockerfile new file mode 100644 index 0000000000000..e8769539c8092 --- /dev/null +++ b/scripts/e2e/datadog-metrics/dogstatsd_client/Dockerfile @@ -0,0 +1,8 @@ +FROM python:3.7-alpine + +COPY . /app +WORKDIR /app + +RUN pip install -r requirements.txt + +CMD [ "python3", "./client.py"] diff --git a/scripts/e2e/datadog-metrics/dogstatsd_client/client.py b/scripts/e2e/datadog-metrics/dogstatsd_client/client.py new file mode 100644 index 0000000000000..b4a9b7390be6b --- /dev/null +++ b/scripts/e2e/datadog-metrics/dogstatsd_client/client.py @@ -0,0 +1,56 @@ +from datadog import initialize, statsd +import time +import os +import random + +STATSD_HOST = os.getenv('STATSD_HOST') + +print(f"initializing for {STATSD_HOST}") + +options = { + 'statsd_host':STATSD_HOST, + 'statsd_port':8125 +} + +initialize(**options) + +# Give the Agent time to actually spin up. +# The container may return "ready" but the +# Agent process is still booting. +time.sleep(10) + +hist_data = [ + 9, 5, 0, 2, 16, 17, 8, 16, 10, 13, + 15, 3, 9, 13, 11, 17, 5, 18, 14, 9, + 4, 16, 9, 17, 4, 11, 7, 14, 8, 12, + 10, 9, 11, 3, 18, 12, 17, 12, 3, 19, + 9, 11, 19, 9, 15, 2, 7, 10, 4, 14 +] + +dist_data = [ + 18, 5, 19, 0, 13, 12, 5, 12, 10, 4, + 1, 5, 7, 1, 14, 16, 20, 0, 8, 2, 4, + 20, 8, 4, 20, 6, 20, 3, 10, 11, 12, + 15, 2, 12, 5, 19, 19, 5, 9, 6, 18, + 19, 11, 6, 17, 5, 0, 1, 17, 17 +] + +for i in range(50): + print("rate") + statsd.increment('foo_metric.rate', tags=['a_tag:1']) + + print("gauge") + statsd.gauge('foo_metric.gauge', i, tags=["a_tag:2"]) + + print("set") + statsd.set('foo_metric.set', i, tags=["a_tag:3"]) + + print("histogram") + statsd.histogram('foo_metric.histogram', hist_data[i], tags=["a_tag:4"]) + + print("distribution") + statsd.distribution('foo_metric.distribution', dist_data[i], tags=["a_tag:5"]) + + statsd.flush() + time.sleep(0.01) + diff --git a/scripts/e2e/datadog-metrics/dogstatsd_client/requirements.txt b/scripts/e2e/datadog-metrics/dogstatsd_client/requirements.txt new file mode 100644 index 0000000000000..43023b823d991 --- /dev/null +++ b/scripts/e2e/datadog-metrics/dogstatsd_client/requirements.txt @@ -0,0 +1 @@ +datadog diff --git a/scripts/e2e/datadog-metrics/test.yaml b/scripts/e2e/datadog-metrics/test.yaml new file mode 100644 index 0000000000000..b9025d08ebf3d --- /dev/null +++ b/scripts/e2e/datadog-metrics/test.yaml @@ -0,0 +1,27 @@ +features: +- e2e-tests-datadog + +test: "e2e" + +test_filter: 'datadog::metrics::' + +runner: + env: + VECTOR_RECEIVE_PORT: '8081' + FAKE_INTAKE_AGENT_ENDPOINT: 'http://fakeintake-agent:80' + FAKE_INTAKE_VECTOR_ENDPOINT: 'http://fakeintake-vector:80' + +matrix: + # validate against the Agent latest nightly and also stable v6 and v7 + agent_version: ['latest', '6', '7'] + +# changes to these files/paths will invoke the integration test in CI +# expressions are evaluated using https://github.com/micromatch/picomatch +paths: +- "src/common/datadog.rs" +- "src/sources/datadog_agent/**" +- "src/internal_events/datadog_*" +- "src/sinks/datadog/metrics/**" +- "src/sinks/util/**" +- "scripts/integration/datadog-e2e/metrics/**" +- "tests/data/e2e/datadog/metrics/**" diff --git a/scripts/ensure-wasm-pack-installed.sh b/scripts/ensure-wasm-pack-installed.sh old mode 100644 new mode 100755 diff --git a/scripts/environment/bootstrap-ubuntu-20.04.sh b/scripts/environment/bootstrap-ubuntu-20.04.sh index be6182e6cacd5..7ce1541fdb348 100755 --- a/scripts/environment/bootstrap-ubuntu-20.04.sh +++ b/scripts/environment/bootstrap-ubuntu-20.04.sh @@ -20,8 +20,6 @@ apt-get install --yes \ apt-utils \ apt-transport-https -apt-get upgrade --yes - # Deps apt-get install --yes --no-install-recommends \ awscli \ @@ -41,7 +39,6 @@ apt-get install --yes --no-install-recommends \ llvm \ locales \ pkg-config \ - python3-pip \ rename \ rpm \ ruby-bundler \ diff --git a/scripts/environment/prepare.sh b/scripts/environment/prepare.sh index 3f73ec9b30844..c621871024eba 100755 --- a/scripts/environment/prepare.sh +++ b/scripts/environment/prepare.sh @@ -21,6 +21,15 @@ if ! dd-rust-license-tool --help >& /dev/null ; then rustup run stable cargo install dd-rust-license-tool --version 1.0.2 --force --locked fi +if [[ "$(wasm-pack --version)" != "wasm-pack 0.10.3" ]] ; then + echo "wasm-pack version 0.10.3 is not installed" + # We are using the version from git due to the bug: https://github.com/vectordotdev/vector/pull/16060#issuecomment-1428429602 + echo "running cargo install --git https://github.com/rustwasm/wasm-pack.git --rev e3582b7 wasm-pack" + cargo install --force --git https://github.com/rustwasm/wasm-pack.git --rev e3582b7 wasm-pack +else + echo "wasm-pack version 0.10.3 is installed already" +fi + # Currently fixing this to version 0.30 since version 0.31 has introduced # a change that means it only works with versions of node > 10. # https://github.com/igorshubovych/markdownlint-cli/issues/258 @@ -29,8 +38,5 @@ fi sudo npm -g install markdownlint-cli@0.30 sudo npm -g install @datadog/datadog-ci -pip3 install jsonschema==3.2.0 -pip3 install remarshal==0.11.2 - # Make sure our release build settings are present. . scripts/environment/release-flags.sh diff --git a/scripts/generate-release-cue.rb b/scripts/generate-release-cue.rb index c152b86d1e249..c308f44f8bb43 100755 --- a/scripts/generate-release-cue.rb +++ b/scripts/generate-release-cue.rb @@ -145,16 +145,13 @@ def generate_changelog!(new_version) contributors = Array.new if last.start_with?("authors: ") - authors_str = last[9..] - authors_str = authors_str.delete(" \t\r\n") - authors_arr = authors_str.split(",") - authors_arr.each { |author| contributors.push(author) } + contributors = last[9..].split(" ").map(&:strip) # remove that line from the description lines.pop() end - description = lines.join("") + description = lines.join("").strip() # get the PR number of the changelog fragment. # the fragment type is not used in the Vector release currently. @@ -195,7 +192,7 @@ def generate_changelog!(new_version) entry = "{\n" + "type: #{type.to_json}\n" + "description: \"\"\"\n" + - "#{description}" + + "#{description}\n" + "\"\"\"\n" if contributors.length() > 0 diff --git a/scripts/integration/databend/test.yaml b/scripts/integration/databend/test.yaml index f84a979cb4b11..545813974eef5 100644 --- a/scripts/integration/databend/test.yaml +++ b/scripts/integration/databend/test.yaml @@ -5,9 +5,7 @@ test_filter: '::databend::' runner: env: - DATABEND_ENDPOINT: http://databend:8000 - DATABEND_USER: vector - DATABEND_PASSWORD: vector + DATABEND_ENDPOINT: databend://vector:vector@databend:8000?sslmode=disable&presign=detect matrix: version: ['latest'] diff --git a/scripts/integration/datadog-logs/test.yaml b/scripts/integration/datadog-logs/test.yaml index 30a99f8a87ae7..7937db5d87158 100644 --- a/scripts/integration/datadog-logs/test.yaml +++ b/scripts/integration/datadog-logs/test.yaml @@ -1,7 +1,7 @@ features: - datadog-logs-integration-tests -test_filter: '::datadog::logs::' +test_filter: '::datadog::logs::integration_tests::' runner: env: diff --git a/scripts/integration/mqtt/compose.yaml b/scripts/integration/mqtt/compose.yaml new file mode 100644 index 0000000000000..ab44771709155 --- /dev/null +++ b/scripts/integration/mqtt/compose.yaml @@ -0,0 +1,11 @@ +version: '3' + +services: + emqx: + image: docker.io/emqx:${CONFIG_VERSION} + ports: + - 1883:1883 + +networks: + default: + name: ${VECTOR_NETWORK} diff --git a/scripts/integration/mqtt/test.yaml b/scripts/integration/mqtt/test.yaml new file mode 100644 index 0000000000000..607da45eeb5e1 --- /dev/null +++ b/scripts/integration/mqtt/test.yaml @@ -0,0 +1,12 @@ +features: +- mqtt-integration-tests + +test_filter: '::mqtt::' + +matrix: + version: ['5.0.15'] + +paths: +- "src/internal_events/mqtt.rs" +- "src/sinks/mqtt/**" +- "src/sinks/util/**" diff --git a/src/api/schema/metrics/mod.rs b/src/api/schema/metrics/mod.rs index 1a273dc5e9bee..08baa7cc12fd7 100644 --- a/src/api/schema/metrics/mod.rs +++ b/src/api/schema/metrics/mod.rs @@ -15,7 +15,7 @@ mod uptime; mod host; pub use allocated_bytes::{AllocatedBytes, ComponentAllocatedBytes}; -use async_graphql::{Interface, Object, Subscription}; +use async_graphql::{Interface, Subscription}; use chrono::{DateTime, Utc}; pub use errors::{ComponentErrorsTotal, ErrorsTotal}; pub use filter::*; @@ -45,9 +45,9 @@ pub enum MetricType { #[derive(Default)] pub struct MetricsQuery; -#[Object] +#[cfg(feature = "sources-host_metrics")] +#[async_graphql::Object] impl MetricsQuery { - #[cfg(feature = "sources-host_metrics")] /// Vector host metrics async fn host_metrics(&self) -> host::HostMetrics { host::HostMetrics::new() diff --git a/src/api/schema/mod.rs b/src/api/schema/mod.rs index d3e664f000d94..58c22a03b687f 100644 --- a/src/api/schema/mod.rs +++ b/src/api/schema/mod.rs @@ -13,7 +13,7 @@ use async_graphql::{EmptyMutation, MergedObject, MergedSubscription, Schema, Sch pub struct Query( health::HealthQuery, components::ComponentsQuery, - metrics::MetricsQuery, + #[cfg(feature = "sources-host_metrics")] metrics::MetricsQuery, meta::MetaQuery, ); diff --git a/src/app.rs b/src/app.rs index 8cb968145b1d6..3ec8d26af1412 100644 --- a/src/app.rs +++ b/src/app.rs @@ -7,7 +7,7 @@ use futures::StreamExt; use futures_util::future::BoxFuture; use once_cell::race::OnceNonZeroUsize; use tokio::runtime::{self, Runtime}; -use tokio::sync::broadcast::error::RecvError; +use tokio::sync::{broadcast::error::RecvError, MutexGuard}; use tokio_stream::wrappers::UnboundedReceiverStream; #[cfg(feature = "enterprise")] @@ -22,7 +22,7 @@ use crate::{ cli::{handle_config_errors, LogFormat, Opts, RootOpts}, config::{self, Config, ConfigPath}, heartbeat, - internal_events::{VectorQuit, VectorStarted, VectorStopped}, + internal_events::{VectorConfigLoadError, VectorQuit, VectorStarted, VectorStopped}, signal::{SignalHandler, SignalPair, SignalRx, SignalTo}, topology::{ ReloadOutcome, RunningTopology, SharedTopologyController, ShutdownErrorReceiver, @@ -222,7 +222,7 @@ impl Application { let config = runtime.block_on(ApplicationConfig::from_opts( &opts.root, &mut signals.handler, - extra_context.clone(), + extra_context, ))?; Ok(( @@ -340,12 +340,8 @@ async fn handle_signal( ) -> Option { match signal { Ok(SignalTo::ReloadFromConfigBuilder(config_builder)) => { - let mut topology_controller = topology_controller.lock().await; - let new_config = config_builder.build().map_err(handle_config_errors).ok(); - match topology_controller.reload(new_config).await { - ReloadOutcome::FatalError(error) => Some(SignalTo::Shutdown(Some(error))), - _ => None, - } + let topology_controller = topology_controller.lock().await; + reload_config_from_result(topology_controller, config_builder.build()).await } Ok(SignalTo::ReloadFromDisk) => { let mut topology_controller = topology_controller.lock().await; @@ -361,14 +357,9 @@ async fn handle_signal( signal_handler, allow_empty_config, ) - .await - .map_err(handle_config_errors) - .ok(); + .await; - match topology_controller.reload(new_config).await { - ReloadOutcome::FatalError(error) => Some(SignalTo::Shutdown(Some(error))), - _ => None, - } + reload_config_from_result(topology_controller, new_config).await } Err(RecvError::Lagged(amt)) => { warn!("Overflow, dropped {} signals.", amt); @@ -379,6 +370,23 @@ async fn handle_signal( } } +async fn reload_config_from_result( + mut topology_controller: MutexGuard<'_, TopologyController>, + config: Result>, +) -> Option { + match config { + Ok(new_config) => match topology_controller.reload(new_config).await { + ReloadOutcome::FatalError(error) => Some(SignalTo::Shutdown(Some(error))), + _ => None, + }, + Err(errors) => { + handle_config_errors(errors); + emit!(VectorConfigLoadError); + None + } + } +} + pub struct FinishedApplication { pub signal: SignalTo, pub signal_rx: SignalRx, diff --git a/src/aws/auth.rs b/src/aws/auth.rs index d71b9289633e4..c0e65b5dc85ba 100644 --- a/src/aws/auth.rs +++ b/src/aws/auth.rs @@ -13,14 +13,12 @@ use aws_config::{ sts::AssumeRoleProviderBuilder, }; use aws_credential_types::{provider::SharedCredentialsProvider, Credentials}; -use aws_smithy_runtime::client::http::hyper_014::HyperClientBuilder; +use aws_smithy_async::time::SystemTimeSource; use aws_smithy_runtime_api::client::identity::SharedIdentityCache; -use aws_types::region::Region; +use aws_types::{region::Region, SdkConfig}; use serde_with::serde_as; +use vector_lib::configurable::configurable_component; use vector_lib::{config::proxy::ProxyConfig, sensitive_string::SensitiveString, tls::TlsConfig}; -use vector_lib::{configurable::configurable_component, tls::MaybeTlsSettings}; - -use crate::http::{build_proxy_connector, build_tls_connector}; // matches default load timeout from the SDK as of 0.10.1, but lets us confidently document the // default rather than relying on the SDK default to not change @@ -208,6 +206,33 @@ impl AwsAuthentication { } } + /// Create the AssumeRoleProviderBuilder, ensuring we create the HTTP client with + /// the correct proxy and TLS options. + fn assume_role_provider_builder( + proxy: &ProxyConfig, + tls_options: &Option, + region: &Region, + assume_role: &str, + external_id: Option<&str>, + ) -> crate::Result { + let connector = super::connector(proxy, tls_options)?; + let config = SdkConfig::builder() + .http_client(connector) + .region(region.clone()) + .time_source(SystemTimeSource::new()) + .build(); + + let mut builder = AssumeRoleProviderBuilder::new(assume_role) + .region(region.clone()) + .configure(&config); + + if let Some(external_id) = external_id { + builder = builder.external_id(external_id) + } + + Ok(builder) + } + /// Returns the provider for the credentials based on the authentication mechanism chosen. pub async fn credentials_provider( &self, @@ -230,12 +255,13 @@ impl AwsAuthentication { )); if let Some(assume_role) = assume_role { let auth_region = region.clone().map(Region::new).unwrap_or(service_region); - let mut builder = - AssumeRoleProviderBuilder::new(assume_role).region(auth_region); - - if let Some(external_id) = external_id { - builder = builder.external_id(external_id) - } + let builder = Self::assume_role_provider_builder( + proxy, + tls_options, + &auth_region, + assume_role, + external_id.as_deref(), + )?; let provider = builder.build_from_provider(provider).await; @@ -247,14 +273,20 @@ impl AwsAuthentication { credentials_file, profile, } => { + let connector = super::connector(proxy, tls_options)?; + // The SDK uses the default profile out of the box, but doesn't provide an optional // type in the builder. We can just hardcode it so that everything works. let profile_files = ProfileFiles::builder() .with_file(ProfileFileKind::Credentials, credentials_file) .build(); + + let provider_config = ProviderConfig::empty().with_http_client(connector); + let profile_provider = ProfileFileCredentialsProvider::builder() .profile_files(profile_files) .profile_name(profile) + .configure(&provider_config) .build(); Ok(SharedCredentialsProvider::new(profile_provider)) } @@ -266,12 +298,13 @@ impl AwsAuthentication { .. } => { let auth_region = region.clone().map(Region::new).unwrap_or(service_region); - let mut builder = - AssumeRoleProviderBuilder::new(assume_role).region(auth_region.clone()); - - if let Some(external_id) = external_id { - builder = builder.external_id(external_id) - } + let builder = Self::assume_role_provider_builder( + proxy, + tls_options, + &auth_region, + assume_role, + external_id.as_deref(), + )?; let provider = builder .build_from_provider( @@ -313,14 +346,7 @@ async fn default_credentials_provider( tls_options: &Option, imds: ImdsAuthentication, ) -> crate::Result { - let tls_settings = MaybeTlsSettings::tls_client(tls_options)?; - let connector = if proxy.enabled { - let proxy = build_proxy_connector(tls_settings, proxy)?; - HyperClientBuilder::new().build(proxy) - } else { - let tls_connector = build_tls_connector(tls_settings)?; - HyperClientBuilder::new().build(tls_connector) - }; + let connector = super::connector(proxy, tls_options)?; let provider_config = ProviderConfig::empty() .with_region(Some(region.clone())) diff --git a/src/aws/mod.rs b/src/aws/mod.rs index a3bb665ed6d68..2f4ea2678a637 100644 --- a/src/aws/mod.rs +++ b/src/aws/mod.rs @@ -21,6 +21,7 @@ use aws_smithy_runtime_api::client::{ runtime_components::RuntimeComponents, }; use aws_smithy_types::body::SdkBody; +use aws_types::sdk_config::SharedHttpClient; use bytes::Bytes; use futures_util::FutureExt; use http::HeaderMap; @@ -94,6 +95,24 @@ fn check_response(res: &HttpResponse) -> bool { || (status.is_client_error() && re.is_match(response_body.as_ref())) } +/// Creates the http connector that has been configured to use the given proxy and TLS settings. +/// All AWS requests should use this connector as the aws crates by default use RustTLS which we +/// have turned off as we want to consistently use openssl. +fn connector( + proxy: &ProxyConfig, + tls_options: &Option, +) -> crate::Result { + let tls_settings = MaybeTlsSettings::tls_client(tls_options)?; + + if proxy.enabled { + let proxy = build_proxy_connector(tls_settings, proxy)?; + Ok(HyperClientBuilder::new().build(proxy)) + } else { + let tls_connector = build_tls_connector(tls_settings)?; + Ok(HyperClientBuilder::new().build(tls_connector)) + } +} + /// Implement for each AWS service to create the appropriate AWS sdk client. pub trait ClientBuilder { /// The type of the client in the SDK. @@ -103,10 +122,32 @@ pub trait ClientBuilder { fn build(config: &SdkConfig) -> Self::Client; } -async fn resolve_region(region: Option) -> crate::Result { +fn region_provider( + proxy: &ProxyConfig, + tls_options: &Option, +) -> crate::Result { + let config = aws_config::provider_config::ProviderConfig::default() + .with_http_client(connector(proxy, tls_options)?); + + Ok(aws_config::meta::region::RegionProviderChain::first_try( + aws_config::environment::EnvironmentVariableRegionProvider::new(), + ) + .or_else(aws_config::profile::ProfileFileRegionProvider::builder().build()) + .or_else( + aws_config::imds::region::ImdsRegionProvider::builder() + .configure(&config) + .build(), + )) +} + +async fn resolve_region( + proxy: &ProxyConfig, + tls_options: &Option, + region: Option, +) -> crate::Result { match region { Some(region) => Ok(region), - None => aws_config::default_provider::region::default_provider() + None => region_provider(proxy, tls_options)? .region() .await .ok_or_else(|| { @@ -140,20 +181,12 @@ pub async fn create_client_and_region( // The default credentials chains will look for a region if not given but we'd like to // error up front if later SDK calls will fail due to lack of region configuration - let region = resolve_region(region).await?; + let region = resolve_region(proxy, tls_options, region).await?; let provider_config = aws_config::provider_config::ProviderConfig::empty().with_region(Some(region.clone())); - let tls_settings = MaybeTlsSettings::tls_client(tls_options)?; - - let connector = if proxy.enabled { - let proxy = build_proxy_connector(tls_settings, proxy)?; - HyperClientBuilder::new().build(proxy) - } else { - let tls_connector = build_tls_connector(tls_settings)?; - HyperClientBuilder::new().build(tls_connector) - }; + let connector = connector(proxy, tls_options)?; // Create a custom http connector that will emit the required metrics for us. let connector = AwsHttpClient { diff --git a/src/cli.rs b/src/cli.rs index 441cce9f720df..c1092d3cab996 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -213,11 +213,20 @@ pub struct RootOpts { #[arg(long, env = "VECTOR_ALLOW_EMPTY_CONFIG", default_value = "false")] pub allow_empty_config: bool, - /// Turn on strict mode for environment variable interpolation. When set, interpolation of a - /// missing environment variable in configuration files will cause an error instead of a - /// warning, which will result in a failure to load any such configuration file. This defaults - /// to false, but that default is deprecated and will be changed to strict in future versions. - #[arg(long, env = "VECTOR_STRICT_ENV_VARS", default_value = "false")] + /// Turn on strict mode for environment variable interpolation. When set, interpolation of + /// a missing environment variable in configuration files will cause an error instead of + /// a warning, which will result in a failure to load any such configuration file. This option + /// is deprecated and will be removed in a future version to remove the ability to downgrade + /// missing environment variables to warnings. + #[arg( + long, + env = "VECTOR_STRICT_ENV_VARS", + default_value = "true", + default_missing_value = "true", + num_args = 0..=1, + require_equals = true, + action = ArgAction::Set + )] pub strict_env_vars: bool, } diff --git a/src/codecs/encoding/config.rs b/src/codecs/encoding/config.rs index d16ec78b627e4..4a124f8987060 100644 --- a/src/codecs/encoding/config.rs +++ b/src/codecs/encoding/config.rs @@ -105,7 +105,7 @@ impl EncodingConfigWithFraming { SinkType::MessageBased => CharacterDelimitedEncoder::new(b',').into(), }, (None, Serializer::Avro(_) | Serializer::Native(_)) => { - LengthDelimitedEncoder::new().into() + LengthDelimitedEncoder::default().into() } (None, Serializer::Gelf(_)) => { // Graylog/GELF always uses null byte delimiter on TCP, see @@ -115,7 +115,7 @@ impl EncodingConfigWithFraming { (None, Serializer::Protobuf(_)) => { // Protobuf uses length-delimited messages, see: // https://developers.google.com/protocol-buffers/docs/techniques#streaming - LengthDelimitedEncoder::new().into() + LengthDelimitedEncoder::default().into() } ( None, diff --git a/src/common/datadog.rs b/src/common/datadog.rs index 7f968a1433dfe..cd5dfb46700e0 100644 --- a/src/common/datadog.rs +++ b/src/common/datadog.rs @@ -4,44 +4,77 @@ #![allow(dead_code)] #![allow(unreachable_pub)] use serde::{Deserialize, Serialize}; -use vector_lib::{event::DatadogMetricOriginMetadata, sensitive_string::SensitiveString}; +use vector_lib::{ + event::DatadogMetricOriginMetadata, schema::meaning, sensitive_string::SensitiveString, +}; pub(crate) const DD_US_SITE: &str = "datadoghq.com"; pub(crate) const DD_EU_SITE: &str = "datadoghq.eu"; +/// The datadog tags event path. +pub const DDTAGS: &str = "ddtags"; + +/// Mapping of the semantic meaning of well known Datadog reserved attributes +/// to the field name that Datadog intake expects. +// https://docs.datadoghq.com/logs/log_configuration/attributes_naming_convention/?s=severity#reserved-attributes +pub const DD_RESERVED_SEMANTIC_ATTRS: [(&str, &str); 6] = [ + (meaning::SEVERITY, "status"), // status is intentionally semantically defined as severity + (meaning::TIMESTAMP, "timestamp"), + (meaning::HOST, "hostname"), + (meaning::SERVICE, "service"), + (meaning::SOURCE, "ddsource"), + (meaning::TAGS, DDTAGS), +]; + +/// DatadogSeriesMetric #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] -pub(crate) struct DatadogSeriesMetric { - pub(crate) metric: String, - pub(crate) r#type: DatadogMetricType, - pub(crate) interval: Option, - pub(crate) points: Vec>, - pub(crate) tags: Option>, +pub struct DatadogSeriesMetric { + /// metric + pub metric: String, + /// metric type + pub r#type: DatadogMetricType, + /// interval + pub interval: Option, + /// points + pub points: Vec>, + /// tags + pub tags: Option>, + /// host #[serde(skip_serializing_if = "Option::is_none")] - pub(crate) host: Option, + pub host: Option, + /// source_type_name #[serde(skip_serializing_if = "Option::is_none")] - pub(crate) source_type_name: Option, + pub source_type_name: Option, + /// device #[serde(skip_serializing_if = "Option::is_none")] - pub(crate) device: Option, + pub device: Option, + /// metadata #[serde(skip_serializing_if = "Option::is_none")] - pub(crate) metadata: Option, + pub metadata: Option, } +/// Datadog series metric metadata #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] -pub(crate) struct DatadogSeriesMetricMetadata { +pub struct DatadogSeriesMetricMetadata { #[serde(skip_serializing_if = "Option::is_none")] pub(crate) origin: Option, } +/// Datadog Metric Type #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] #[serde(rename_all = "snake_case")] -pub(crate) enum DatadogMetricType { +pub enum DatadogMetricType { + /// Gauge Gauge, + /// Count Count, + /// Rate Rate, } +/// Datadog Point #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] -pub(crate) struct DatadogPoint(pub(crate) i64, pub(crate) T); +pub struct DatadogPoint(pub i64, pub T); /// Gets the base API endpoint to use for any calls to Datadog. /// diff --git a/src/components/validation/mod.rs b/src/components/validation/mod.rs index 37f5d6668cf18..90fc349f0a1ef 100644 --- a/src/components/validation/mod.rs +++ b/src/components/validation/mod.rs @@ -6,14 +6,35 @@ mod test_case; pub mod util; mod validators; +use vector_lib::config::LogNamespace; + use crate::config::{BoxedSink, BoxedSource, BoxedTransform}; +/// For components implementing `ValidatableComponent` +pub mod prelude { + pub use super::ComponentTestCaseConfig; + pub use super::ExternalResource; + pub use super::HttpResourceConfig; + pub use super::ResourceDirection; + pub use super::ValidatableComponent; + pub use super::ValidationConfiguration; + pub use crate::register_validatable_component; +} + pub use self::resources::*; #[cfg(feature = "component-validation-runner")] pub use self::runner::*; pub use self::test_case::{TestCase, TestCaseExpectation}; pub use self::validators::*; +pub mod component_names { + pub const TEST_SOURCE_NAME: &str = "test_source"; + pub const TEST_SINK_NAME: &str = "test_sink"; + pub const TEST_TRANSFORM_NAME: &str = "test_transform"; + pub const TEST_INPUT_SOURCE_NAME: &str = "input_source"; + pub const TEST_OUTPUT_SINK_NAME: &str = "output_sink"; +} + /// Component types that can be validated. // TODO: We should centralize this in `vector-common` or something, where both this code and the // configuration schema stuff (namely the proc macros that use this) can share it. @@ -49,6 +70,51 @@ pub enum ComponentConfiguration { Sink(BoxedSink), } +/// Component configuration for a test case. +#[derive(Clone)] +pub struct ComponentTestCaseConfig { + config: ComponentConfiguration, + /// If specified, this name must match the `config_name` field of at least one of the test case events. + test_case: Option, + external_resource: Option, +} + +impl ComponentTestCaseConfig { + pub fn from_source>( + config: C, + test_case: Option, + external_resource: Option, + ) -> Self { + Self { + config: ComponentConfiguration::Source(config.into()), + test_case, + external_resource, + } + } + pub fn from_transform>( + config: C, + test_case: Option, + external_resource: Option, + ) -> Self { + Self { + config: ComponentConfiguration::Transform(config.into()), + test_case, + external_resource, + } + } + pub fn from_sink>( + config: C, + test_case: Option, + external_resource: Option, + ) -> Self { + Self { + config: ComponentConfiguration::Sink(config.into()), + test_case, + external_resource, + } + } +} + /// Configuration for validating a component. /// /// This type encompasses all of the required information for configuring and validating a @@ -58,46 +124,52 @@ pub enum ComponentConfiguration { pub struct ValidationConfiguration { component_name: &'static str, component_type: ComponentType, - component_configuration: ComponentConfiguration, - external_resource: Option, + /// There may be only one `ComponentTestCaseConfig` necessary to execute all test cases, but some cases + /// require more advanced configuration in order to hit the code path desired. + component_configurations: Vec, + log_namespace: LogNamespace, } impl ValidationConfiguration { /// Creates a new `ValidationConfiguration` for a source. - pub fn from_source>( + pub fn from_source( component_name: &'static str, - config: C, - external_resource: Option, + log_namespace: LogNamespace, + component_configurations: Vec, ) -> Self { Self { component_name, component_type: ComponentType::Source, - component_configuration: ComponentConfiguration::Source(config.into()), - external_resource, + component_configurations, + log_namespace, } } /// Creates a new `ValidationConfiguration` for a transform. - pub fn from_transform(component_name: &'static str, config: impl Into) -> Self { + pub fn from_transform( + component_name: &'static str, + log_namespace: LogNamespace, + component_configurations: Vec, + ) -> Self { Self { component_name, component_type: ComponentType::Transform, - component_configuration: ComponentConfiguration::Transform(config.into()), - external_resource: None, + component_configurations, + log_namespace, } } /// Creates a new `ValidationConfiguration` for a sink. - pub fn from_sink>( + pub fn from_sink( component_name: &'static str, - config: C, - external_resource: Option, + log_namespace: LogNamespace, + component_configurations: Vec, ) -> Self { Self { component_name, component_type: ComponentType::Sink, - component_configuration: ComponentConfiguration::Sink(config.into()), - external_resource, + component_configurations, + log_namespace, } } @@ -112,13 +184,36 @@ impl ValidationConfiguration { } /// Gets the configuration of the component. - pub fn component_configuration(&self) -> ComponentConfiguration { - self.component_configuration.clone() + pub fn component_configurations(&self) -> Vec { + self.component_configurations.clone() + } + + /// Gets the LogNamespace that the component is using. + pub const fn log_namespace(&self) -> LogNamespace { + self.log_namespace + } + + fn get_comp_test_case(&self, test_case: Option<&String>) -> Option { + let empty = String::from(""); + let test_case = test_case.unwrap_or(&empty); + self.component_configurations + .clone() + .into_iter() + .find(|c| c.test_case.as_ref().unwrap_or(&String::from("")) == test_case) + } + + /// Gets the configuration of the component. + pub fn component_configuration_for_test_case( + &self, + test_case: Option<&String>, + ) -> Option { + self.get_comp_test_case(test_case).map(|c| c.config) } /// Gets the external resource definition for validating the component, if any. - pub fn external_resource(&self) -> Option { - self.external_resource.clone() + pub fn external_resource(&self, test_case: Option<&String>) -> Option { + self.get_comp_test_case(test_case) + .and_then(|c| c.external_resource) } } @@ -173,191 +268,192 @@ macro_rules! register_validatable_component { /// Input and Output runners populate this structure as they send and receive events. /// The structure is passed into the validator to use as the expected values for the /// metrics that the components under test actually output. -#[derive(Default)] +#[derive(Default, Debug)] pub struct RunnerMetrics { pub received_events_total: u64, pub received_event_bytes_total: u64, pub received_bytes_total: u64, - pub sent_bytes_total: u64, // a reciprocal for received_bytes_total + pub sent_bytes_total: u64, pub sent_event_bytes_total: u64, pub sent_events_total: u64, pub errors_total: u64, + pub discarded_events_total: u64, } -#[cfg(all(test, feature = "component-validation-tests"))] -mod tests { - use std::{ - collections::VecDeque, - path::{Component, Path, PathBuf}, - }; - - use test_generator::test_resources; - - use crate::components::validation::{Runner, StandardValidators}; - use crate::extra_context::ExtraContext; - - use super::{ComponentType, ValidatableComponentDescription, ValidationConfiguration}; +#[cfg(feature = "component-validation-runner")] +fn run_validation(configuration: ValidationConfiguration, test_case_data_path: std::path::PathBuf) { + let component_name = configuration.component_name(); + info!( + "Running validation for component '{}' (type: {:?})...", + component_name, + configuration.component_type() + ); + + let rt = tokio::runtime::Builder::new_current_thread() + .enable_all() + .build() + .unwrap(); + + rt.block_on(async { + let mut runner = Runner::from_configuration( + configuration, + test_case_data_path, + crate::extra_context::ExtraContext::default(), + ); + runner.add_validator(StandardValidators::ComponentSpec); + + match runner.run_validation().await { + Ok(test_case_results) => { + let mut details = Vec::new(); + let mut had_failures = false; + + for test_case_result in test_case_results.into_iter() { + for validator_result in test_case_result.validator_results() { + match validator_result { + Ok(success) => { + if success.is_empty() { + details.push(format!( + " test case '{}': passed", + test_case_result.test_name() + )); + } else { + let formatted = success + .iter() + .map(|s| format!(" - {}\n", s)) + .collect::>(); + + details.push(format!( + " test case '{}': passed\n{}", + test_case_result.test_name(), + formatted.join("") + )); + } + } + Err(failure) => { + had_failures = true; + + if failure.is_empty() { + details.push(format!( + " test case '{}': failed", + test_case_result.test_name() + )); + } else { + let formatted = failure + .iter() + .map(|s| format!(" - {}\n", s)) + .collect::>(); + + details.push(format!( + " test case '{}': failed\n{}", + test_case_result.test_name(), + formatted.join("") + )); + } + } + } + } + } - #[test_resources("tests/validation/components/**/*.yaml")] - fn validate_component(test_case_data_path: &str) { - let test_case_data_path = PathBuf::from(test_case_data_path.to_string()); - if !test_case_data_path.exists() { - panic!("Component validation test invoked with path to test case data that could not be found: {}", test_case_data_path.to_string_lossy()); + if had_failures { + panic!( + "Failed to validate component '{}':\n{}", + component_name, + details.join("") + ); + } else { + info!( + "Successfully validated component '{}':\n{}", + component_name, + details.join("") + ); + } + } + Err(e) => panic!( + "Failed to complete validation run for component '{}': {}", + component_name, e + ), } + }); +} - let configuration = get_validation_configuration_from_test_case_path(&test_case_data_path) - .expect("Failed to find validation configuration from given test case data path."); - - run_validation(configuration, test_case_data_path); +#[cfg(feature = "component-validation-runner")] +fn get_validation_configuration_from_test_case_path( + test_case_data_path: &std::path::Path, +) -> Result { + // The test case data path should follow a fixed structure where the 2nd to last segment is + // the component type, and the last segment -- when the extension is removed -- is the + // component name. + let mut path_segments = test_case_data_path + .components() + .filter_map(|c| match c { + std::path::Component::Normal(path) => Some(std::path::Path::new(path)), + _ => None, + }) + .collect::>(); + if path_segments.len() <= 2 { + return Err(format!( + "Test case data path contained {} normal path segment(s), expected at least 2 or more.", + path_segments.len() + )); } - fn get_validation_configuration_from_test_case_path( - test_case_data_path: &Path, - ) -> Result { - // The test case data path should follow a fixed structure where the 2nd to last segment is - // the component type, and the last segment -- when the extension is removed -- is the - // component name. - let mut path_segments = test_case_data_path - .components() - .filter_map(|c| match c { - Component::Normal(path) => Some(Path::new(path)), - _ => None, - }) - .collect::>(); - if path_segments.len() <= 2 { - return Err(format!("Test case data path contained {} normal path segment(s), expected at least 2 or more.", path_segments.len())); - } + let component_name = path_segments + .pop_back() + .and_then(|segment| segment.file_stem().map(|s| s.to_string_lossy().to_string())) + .ok_or(format!( + "Test case data path '{}' contained unexpected or invalid filename.", + test_case_data_path.as_os_str().to_string_lossy() + ))?; + + let component_type = path_segments + .pop_back() + .map(|segment| { + segment + .as_os_str() + .to_string_lossy() + .to_string() + .to_ascii_lowercase() + }) + .and_then(|segment| match segment.as_str() { + "sources" => Some(ComponentType::Source), + "transforms" => Some(ComponentType::Transform), + "sinks" => Some(ComponentType::Sink), + _ => None, + }) + .ok_or(format!( + "Test case data path '{}' contained unexpected or invalid component type.", + test_case_data_path.as_os_str().to_string_lossy() + ))?; + + // Now that we've theoretically got the component type and component name, try to query the + // validatable component descriptions to find it. + ValidatableComponentDescription::query(&component_name, component_type).ok_or(format!( + "No validation configuration for component '{}' with component type '{}'.", + component_name, + component_type.as_str() + )) +} - let component_name = path_segments - .pop_back() - .and_then(|segment| segment.file_stem().map(|s| s.to_string_lossy().to_string())) - .ok_or(format!( - "Test case data path '{}' contained unexpected or invalid filename.", - test_case_data_path.as_os_str().to_string_lossy() - ))?; - - let component_type = path_segments - .pop_back() - .map(|segment| { - segment - .as_os_str() - .to_string_lossy() - .to_string() - .to_ascii_lowercase() - }) - .and_then(|segment| match segment.as_str() { - "sources" => Some(ComponentType::Source), - "transforms" => Some(ComponentType::Transform), - "sinks" => Some(ComponentType::Sink), - _ => None, - }) - .ok_or(format!( - "Test case data path '{}' contained unexpected or invalid component type.", - test_case_data_path.as_os_str().to_string_lossy() - ))?; - - // Now that we've theoretically got the component type and component name, try to query the - // validatable component descriptions to find it. - ValidatableComponentDescription::query(&component_name, component_type).ok_or(format!( - "No validation configuration for component '{}' with component type '{}'.", - component_name, - component_type.as_str() - )) +#[cfg(feature = "component-validation-runner")] +pub fn validate_component(test_case_data_path: std::path::PathBuf) { + if !test_case_data_path.exists() { + panic!("Component validation test invoked with path to test case data that could not be found: {}", test_case_data_path.to_string_lossy()); } - fn run_validation(configuration: ValidationConfiguration, test_case_data_path: PathBuf) { - crate::test_util::trace_init(); + let configuration = get_validation_configuration_from_test_case_path(&test_case_data_path) + .expect("Failed to find validation configuration from given test case data path."); - let component_name = configuration.component_name(); - info!( - "Running validation for component '{}' (type: {:?})...", - component_name, - configuration.component_type() - ); + run_validation(configuration, test_case_data_path); +} - let rt = tokio::runtime::Builder::new_current_thread() - .enable_all() - .build() - .unwrap(); - rt.block_on(async { - let mut runner = Runner::from_configuration( - configuration, - test_case_data_path, - ExtraContext::default(), - ); - runner.add_validator(StandardValidators::ComponentSpec); - - match runner.run_validation().await { - Ok(test_case_results) => { - let mut details = Vec::new(); - let mut had_failures = false; - - for test_case_result in test_case_results.into_iter() { - for validator_result in test_case_result.validator_results() { - match validator_result { - Ok(success) => { - if success.is_empty() { - details.push(format!( - " test case '{}': passed", - test_case_result.test_name() - )); - } else { - let formatted = success - .iter() - .map(|s| format!(" - {}\n", s)) - .collect::>(); - - details.push(format!( - " test case '{}': passed\n{}", - test_case_result.test_name(), - formatted.join("") - )); - } - } - Err(failure) => { - had_failures = true; - - if failure.is_empty() { - details.push(format!( - " test case '{}': failed", - test_case_result.test_name() - )); - } else { - let formatted = failure - .iter() - .map(|s| format!(" - {}\n", s)) - .collect::>(); - - details.push(format!( - " test case '{}': failed\n{}", - test_case_result.test_name(), - formatted.join("") - )); - } - } - } - } - } +#[cfg(all(test, feature = "component-validation-tests"))] +mod tests { + #[test_generator::test_resources("tests/validation/components/**/*.yaml")] + pub fn validate_component(test_case_data_path: &str) { + crate::test_util::trace_init(); - if had_failures { - panic!( - "Failed to validate component '{}':\n{}", - component_name, - details.join("") - ); - } else { - info!( - "Successfully validated component '{}':\n{}", - component_name, - details.join("") - ); - } - } - Err(e) => panic!( - "Failed to complete validation run for component '{}': {}", - component_name, e - ), - } - }); + let test_case_data_path = std::path::PathBuf::from(test_case_data_path.to_string()); + + super::validate_component(test_case_data_path); } } diff --git a/src/components/validation/resources/event.rs b/src/components/validation/resources/event.rs index 4bce09acd4ce5..fa4564a6076c0 100644 --- a/src/components/validation/resources/event.rs +++ b/src/components/validation/resources/event.rs @@ -1,5 +1,8 @@ +use std::collections::HashMap; + use bytes::BytesMut; use serde::Deserialize; +use serde_json::Value; use snafu::Snafu; use tokio_util::codec::Encoder as _; @@ -27,14 +30,21 @@ pub enum RawTestEvent { /// /// For transforms and sinks, generally, the only way to cause an error is if the event itself /// is malformed in some way, which can be achieved without this test event variant. - Modified { modified: bool, event: EventData }, + AlternateEncoder { fail_encoding_of: EventData }, + + /// The event will be rejected by the external resource. + ResourceReject { + external_resource_rejects: EventData, + }, } #[derive(Clone, Debug, Deserialize)] -#[serde(untagged)] +#[serde(rename_all = "snake_case")] pub enum EventData { - /// A log event. + /// A simple log event. Log(String), + /// A log event built from key-value pairs + LogBuilder(HashMap), } impl EventData { @@ -42,6 +52,15 @@ impl EventData { pub fn into_event(self) -> Event { match self { Self::Log(message) => Event::Log(LogEvent::from_bytes_legacy(&message.into())), + Self::LogBuilder(data) => { + let mut log_event = LogEvent::default(); + for (k, v) in data { + log_event + .parse_path_and_insert(&k, v) + .unwrap_or_else(|_| panic!("Unable to build log event for {}", &k)); + } + Event::Log(log_event) + } } } } @@ -52,6 +71,9 @@ impl EventData { /// metrics collection is based on the same event. Namely, one issue that can arise from creating the event /// from the event data twice (once for the expected and once for actual), it can result in a timestamp in /// the event which may or may not have the same millisecond precision as it's counterpart. +/// +/// For transforms and sinks, generally, the only way to cause an error is if the event itself +/// is malformed in some way, which can be achieved without this test event variant. #[derive(Clone, Debug, Deserialize)] #[serde(from = "RawTestEvent")] #[serde(untagged)] @@ -59,16 +81,13 @@ pub enum TestEvent { /// The event is used, as-is, without modification. Passthrough(Event), - /// The event is potentially modified by the external resource. - /// - /// The modification made is dependent on the external resource, but this mode is made available - /// for when a test case wants to exercise the failure path, but cannot cause a failure simply - /// by constructing the event in a certain way i.e. adding an invalid field, or removing a - /// required field, or using an invalid field value, and so on. - /// - /// For transforms and sinks, generally, the only way to cause an error is if the event itself - /// is malformed in some way, which can be achieved without this test event variant. - Modified { modified: bool, event: Event }, + /// The event is encoded using an encoding that differs from the component's + /// configured encoding, which should cause an error when the event is decoded. + FailWithAlternateEncoder(Event), + + /// The event encodes successfully but when the external resource receives that event, it should + /// throw a failure. + FailWithExternalResource(Event), } impl TestEvent { @@ -76,7 +95,42 @@ impl TestEvent { pub fn into_event(self) -> Event { match self { Self::Passthrough(event) => event, - Self::Modified { event, .. } => event, + Self::FailWithAlternateEncoder(event) => event, + Self::FailWithExternalResource(event) => event, + } + } + + pub fn get_event(&mut self) -> &mut Event { + match self { + Self::Passthrough(event) => event, + Self::FailWithAlternateEncoder(event) => event, + Self::FailWithExternalResource(event) => event, + } + } + + /// (should_fail, event) + pub fn get(self) -> (bool, Event) { + match self { + Self::Passthrough(event) => (false, event), + Self::FailWithAlternateEncoder(event) => (true, event), + Self::FailWithExternalResource(event) => (true, event), + } + } + + /// True if the event should fail, false otherwise. + pub const fn should_fail(&self) -> bool { + match self { + Self::Passthrough(_) => false, + Self::FailWithAlternateEncoder(_) | Self::FailWithExternalResource(_) => true, + } + } + + /// True if the event should be rejected by the external resource in order to + /// trigger a failure path. + pub const fn should_reject(&self) -> bool { + match self { + Self::Passthrough(_) | Self::FailWithAlternateEncoder(_) => false, + Self::FailWithExternalResource(_) => true, } } } @@ -90,10 +144,12 @@ impl From for TestEvent { RawTestEvent::Passthrough(event_data) => { TestEvent::Passthrough(event_data.into_event()) } - RawTestEvent::Modified { modified, event } => TestEvent::Modified { - modified, - event: event.into_event(), - }, + RawTestEvent::AlternateEncoder { + fail_encoding_of: event_data, + } => TestEvent::FailWithAlternateEncoder(event_data.into_event()), + RawTestEvent::ResourceReject { + external_resource_rejects: event_data, + } => TestEvent::FailWithExternalResource(event_data.into_event()), } } } @@ -104,20 +160,20 @@ pub fn encode_test_event( event: TestEvent, ) { match event { - TestEvent::Passthrough(event) => { + TestEvent::Passthrough(event) | TestEvent::FailWithExternalResource(event) => { // Encode the event normally. encoder .encode(event, buf) .expect("should not fail to encode input event"); } - TestEvent::Modified { event, .. } => { + TestEvent::FailWithAlternateEncoder(event) => { // This is a little fragile, but we check what serializer this encoder uses, and based // on `Serializer::supports_json`, we choose an opposing codec. For example, if the // encoder supports JSON, we'll use a serializer that doesn't support JSON, and vise // versa. let mut alt_encoder = if encoder.serializer().supports_json() { Encoder::::new( - LengthDelimitedEncoder::new().into(), + LengthDelimitedEncoder::default().into(), LogfmtSerializer::new().into(), ) } else { diff --git a/src/components/validation/resources/http.rs b/src/components/validation/resources/http.rs index 052dabec52cf8..2bfc04deba62f 100644 --- a/src/components/validation/resources/http.rs +++ b/src/components/validation/resources/http.rs @@ -1,5 +1,5 @@ use std::{ - collections::VecDeque, + collections::{HashMap, VecDeque}, future::Future, net::{IpAddr, SocketAddr}, str::FromStr, @@ -11,7 +11,7 @@ use axum::{ routing::{MethodFilter, MethodRouter}, Router, }; -use bytes::BytesMut; +use bytes::{BufMut as _, BytesMut}; use http::{Method, Request, StatusCode, Uri}; use hyper::{Body, Client, Server}; use tokio::{ @@ -20,8 +20,15 @@ use tokio::{ }; use tokio_util::codec::Decoder; -use crate::components::validation::sync::{Configuring, TaskCoordinator}; -use vector_lib::event::Event; +use crate::components::validation::{ + sync::{Configuring, TaskCoordinator}, + RunnerMetrics, +}; +use vector_lib::{ + codecs::encoding::Framer, codecs::encoding::Serializer::Json, + codecs::CharacterDelimitedEncoder, config::LogNamespace, event::Event, + EstimatedJsonEncodedSizeOf, +}; use super::{encode_test_event, ResourceCodec, ResourceDirection, TestEvent}; @@ -30,11 +37,21 @@ use super::{encode_test_event, ResourceCodec, ResourceDirection, TestEvent}; pub struct HttpResourceConfig { uri: Uri, method: Option, + headers: Option>, } impl HttpResourceConfig { pub const fn from_parts(uri: Uri, method: Option) -> Self { - Self { uri, method } + Self { + uri, + method, + headers: None, + } + } + + pub fn with_headers(mut self, headers: HashMap) -> Self { + self.headers = Some(headers); + self } pub fn spawn_as_input( @@ -43,38 +60,26 @@ impl HttpResourceConfig { codec: ResourceCodec, input_rx: mpsc::Receiver, task_coordinator: &TaskCoordinator, + runner_metrics: &Arc>, ) { match direction { // The source will pull data from us. ResourceDirection::Pull => { - spawn_input_http_server(self, codec, input_rx, task_coordinator) + spawn_input_http_server(self, codec, input_rx, task_coordinator, runner_metrics) } // We'll push data to the source. ResourceDirection::Push => { - spawn_input_http_client(self, codec, input_rx, task_coordinator) + spawn_input_http_client(self, codec, input_rx, task_coordinator, runner_metrics) } } } - pub fn spawn_as_output( - self, - direction: ResourceDirection, - codec: ResourceCodec, - output_tx: mpsc::Sender>, - task_coordinator: &TaskCoordinator, - ) -> vector_lib::Result<()> { - match direction { + pub fn spawn_as_output(self, ctx: HttpResourceOutputContext) -> vector_lib::Result<()> { + match ctx.direction { // We'll pull data from the sink. - ResourceDirection::Pull => Ok(spawn_output_http_client( - self, - codec, - output_tx, - task_coordinator, - )), + ResourceDirection::Pull => Ok(ctx.spawn_output_http_client(self)), // The sink will push data to us. - ResourceDirection::Push => { - spawn_output_http_server(self, codec, output_tx, task_coordinator) - } + ResourceDirection::Push => ctx.spawn_output_http_server(self), } } } @@ -86,6 +91,7 @@ fn spawn_input_http_server( codec: ResourceCodec, mut input_rx: mpsc::Receiver, task_coordinator: &TaskCoordinator, + runner_metrics: &Arc>, ) { // This HTTP server will poll the input receiver for input events and buffer them. When a // request comes in on the right path/method, one buffered input event will be sent back. If no @@ -97,8 +103,11 @@ fn spawn_input_http_server( let encoder = codec.into_encoder(); let sendable_events = Arc::clone(&outstanding_events); - let (resource_notifier, http_server_shutdown_tx) = - spawn_http_server(task_coordinator, &config, move |_| { + let (resource_notifier, http_server_shutdown_tx) = spawn_http_server( + task_coordinator, + &config, + runner_metrics, + move |_request, _runner_metrics| { let sendable_events = Arc::clone(&sendable_events); let mut encoder = encoder.clone(); @@ -116,15 +125,18 @@ fn spawn_input_http_server( StatusCode::OK.into_response() } } - }); + }, + ); // Now we'll create and spawn the resource's core logic loop which drives the buffering of input // events and working with the HTTP server as they're consumed. let resource_started = task_coordinator.track_started(); let resource_completed = task_coordinator.track_completed(); + let mut resource_shutdown_rx = task_coordinator.register_for_shutdown(); + tokio::spawn(async move { resource_started.mark_as_done(); - debug!("HTTP server external input resource started."); + info!("HTTP server external input resource started."); let mut input_finished = false; @@ -140,7 +152,10 @@ fn spawn_input_http_server( let mut outstanding_events = outstanding_events.lock().await; outstanding_events.push_back(event); }, - None => input_finished = true, + None => { + info!("HTTP server external input resource input is finished."); + input_finished = true; + }, }, _ = resource_notifier.notified() => { @@ -159,13 +174,20 @@ fn spawn_input_http_server( }, } } - // Mark ourselves as completed now that we've sent all inputs to the source, and // additionally signal the HTTP server to also gracefully shutdown. + info!("HTTP server external input resource signalling ready for shutdown."); + + // Wait for the runner to signal us to shutdown + resource_shutdown_rx.wait().await; + + // Shutdown the server _ = http_server_shutdown_tx.send(()); + + info!("HTTP server external input resource marking as done."); resource_completed.mark_as_done(); - debug!("HTTP server external input resource completed."); + info!("HTTP server external input resource completed."); }); } @@ -175,32 +197,65 @@ fn spawn_input_http_client( codec: ResourceCodec, mut input_rx: mpsc::Receiver, task_coordinator: &TaskCoordinator, + runner_metrics: &Arc>, ) { // Spin up an HTTP client that will push the input data to the source on a // request-per-input-item basis. This runs serially and has no parallelism. let started = task_coordinator.track_started(); let completed = task_coordinator.track_completed(); let mut encoder = codec.into_encoder(); + let runner_metrics = Arc::clone(runner_metrics); tokio::spawn(async move { // Mark ourselves as started. We don't actually do anything until we get our first input // message, though. started.mark_as_done(); - debug!("HTTP client external input resource started."); + info!("HTTP client external input resource started."); let client = Client::builder().build_http::(); let request_uri = config.uri; let request_method = config.method.unwrap_or(Method::POST); + let headers = config.headers.unwrap_or_default(); while let Some(event) = input_rx.recv().await { debug!("Got event to send from runner."); let mut buffer = BytesMut::new(); + + let is_json = matches!(encoder.serializer(), Json(_)) + && matches!( + encoder.framer(), + Framer::CharacterDelimited(CharacterDelimitedEncoder { delimiter: b',' }) + ); + + if is_json { + buffer.put_u8(b'['); + } + encode_test_event(&mut encoder, &mut buffer, event); - let request = Request::builder() + if is_json { + if !buffer.is_empty() { + // remove trailing comma from last record + buffer.truncate(buffer.len() - 1); + } + buffer.put_u8(b']'); + + // in this edge case we have removed the trailing comma (one byte) and added + // opening and closing braces (2 bytes) for a net add of one byte. + let mut runner_metrics = runner_metrics.lock().await; + runner_metrics.sent_bytes_total += 1; + } + + let mut request_builder = Request::builder() .uri(request_uri.clone()) - .method(request_method.clone()) + .method(request_method.clone()); + + for (key, value) in &headers { + request_builder = request_builder.header(key, value); + } + + let request = request_builder .body(buffer.freeze().into()) .expect("should not fail to build request"); @@ -219,90 +274,155 @@ fn spawn_input_http_client( // Mark ourselves as completed now that we've sent all inputs to the source. completed.mark_as_done(); - debug!("HTTP client external input resource completed."); + info!("HTTP client external input resource completed."); }); } -/// Spawns an HTTP server that accepts events sent by a sink. -#[allow(clippy::missing_const_for_fn)] -fn spawn_output_http_server( - config: HttpResourceConfig, - codec: ResourceCodec, - output_tx: mpsc::Sender>, - task_coordinator: &TaskCoordinator, -) -> vector_lib::Result<()> { - // This HTTP server will wait for events to be sent by a sink, and collect them and send them on - // via an output sender. We accept/collect events until we're told to shutdown. - - // First, we'll build and spawn our HTTP server. - let decoder = codec.into_decoder()?; +/// Anything that the output side HTTP external resource needs +pub struct HttpResourceOutputContext<'a> { + pub direction: ResourceDirection, + pub codec: ResourceCodec, + pub output_tx: mpsc::Sender>, + pub task_coordinator: &'a TaskCoordinator, + pub input_events: Vec, + pub runner_metrics: &'a Arc>, + pub log_namespace: LogNamespace, +} - let (_, http_server_shutdown_tx) = - spawn_http_server(task_coordinator, &config, move |request| { - let output_tx = output_tx.clone(); - let mut decoder = decoder.clone(); +impl HttpResourceOutputContext<'_> { + /// Spawns an HTTP server that accepts events sent by a sink. + #[allow(clippy::missing_const_for_fn)] + fn spawn_output_http_server(&self, config: HttpResourceConfig) -> vector_lib::Result<()> { + // This HTTP server will wait for events to be sent by a sink, and collect them and send them on + // via an output sender. We accept/collect events until we're told to shutdown. + + // First, we'll build and spawn our HTTP server. + let decoder = self.codec.into_decoder(self.log_namespace)?; + + // Note that we currently don't differentiate which events should and shouldn't be rejected- + // we reject all events in this server if any are marked for rejection. + // In the future it might be useful to be able to select which to reject. That will involve + // adding logic to the test case which is passed down here, and to the event itself. Since + // we can't guarantee the order of events, we'd need a way to flag which ones need to be + // rejected. + let should_reject = self + .input_events + .iter() + .filter(|te| te.should_reject()) + .count() + > 0; + + let output_tx = self.output_tx.clone(); + let (_, http_server_shutdown_tx) = spawn_http_server( + self.task_coordinator, + &config, + self.runner_metrics, + move |request, output_runner_metrics| { + let output_tx = output_tx.clone(); + let mut decoder = decoder.clone(); - async move { - match hyper::body::to_bytes(request.into_body()).await { - Err(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(), - Ok(body) => { - let mut body = BytesMut::from(&body[..]); - loop { - match decoder.decode_eof(&mut body) { - Ok(Some((events, _byte_size))) => { - output_tx - .send(events.to_vec()) - .await - .expect("should not fail to send output event"); + async move { + match hyper::body::to_bytes(request.into_body()).await { + Err(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(), + Ok(body) => { + let mut body = BytesMut::from(&body[..]); + loop { + match decoder.decode_eof(&mut body) { + Ok(Some((events, byte_size))) => { + if should_reject { + info!("HTTP server external output resource decoded {byte_size} bytes but test case configured to reject."); + } else { + let mut output_runner_metrics = + output_runner_metrics.lock().await; + info!("HTTP server external output resource decoded {byte_size} bytes."); + + // Update the runner metrics for the received events. This will later + // be used in the Validators, as the "expected" case. + output_runner_metrics.received_bytes_total += + byte_size as u64; + + output_runner_metrics.received_events_total += + events.len() as u64; + + events.iter().for_each(|event| { + output_runner_metrics.received_event_bytes_total += + event.estimated_json_encoded_size_of().get() + as u64; + }); + + output_tx + .send(events.to_vec()) + .await + .expect("should not fail to send output event"); + } + } + Ok(None) => { + if should_reject { + // This status code is not retried and should result in the component under test + // emitting error events + return StatusCode::BAD_REQUEST.into_response(); + } else { + return StatusCode::OK.into_response(); + } + } + Err(_) => { + error!( + "HTTP server failed to decode {:?}", + String::from_utf8_lossy(&body) + ); + return StatusCode::INTERNAL_SERVER_ERROR.into_response(); + } } - Ok(None) => return StatusCode::OK.into_response(), - Err(_) => return StatusCode::INTERNAL_SERVER_ERROR.into_response(), } } } } - } - }); + }, + ); - // Now we'll create and spawn the resource's core logic loop which simply waits for the runner - // to instruct us to shutdown, and when that happens, cascades to shutting down the HTTP server. - let resource_started = task_coordinator.track_started(); - let resource_completed = task_coordinator.track_completed(); - let mut resource_shutdown_rx = task_coordinator.register_for_shutdown(); - tokio::spawn(async move { - resource_started.mark_as_done(); - debug!("HTTP server external output resource started."); + // Now we'll create and spawn the resource's core logic loop which simply waits for the runner + // to instruct us to shutdown, and when that happens, cascades to shutting down the HTTP server. + let resource_started = self.task_coordinator.track_started(); + let resource_completed = self.task_coordinator.track_completed(); + let mut resource_shutdown_rx = self.task_coordinator.register_for_shutdown(); - resource_shutdown_rx.wait().await; - _ = http_server_shutdown_tx.send(()); - resource_completed.mark_as_done(); + tokio::spawn(async move { + resource_started.mark_as_done(); + info!("HTTP server external output resource started."); - debug!("HTTP server external output resource completed."); - }); - Ok(()) -} + // Wait for the runner to tell us to shutdown + resource_shutdown_rx.wait().await; -/// Spawns an HTTP client that pulls events by making requests to an HTTP server driven by a sink. -#[allow(clippy::missing_const_for_fn)] -fn spawn_output_http_client( - _config: HttpResourceConfig, - _codec: ResourceCodec, - _output_tx: mpsc::Sender>, - _task_coordinator: &TaskCoordinator, -) { - // TODO: The `prometheus_exporter` sink is the only sink that exposes an HTTP server which must be - // scraped... but since we need special logic to aggregate/deduplicate scraped metrics, we can't - // use this generically for that purpose. - todo!() + // signal the server to shutdown + let _ = http_server_shutdown_tx.send(()); + + // mark ourselves as done + resource_completed.mark_as_done(); + + info!("HTTP server external output resource completed."); + }); + + Ok(()) + } + + /// Spawns an HTTP client that pulls events by making requests to an HTTP server driven by a sink. + #[allow(clippy::missing_const_for_fn)] + fn spawn_output_http_client(&self, _config: HttpResourceConfig) { + // TODO: The `prometheus_exporter` sink is the only sink that exposes an HTTP server which must be + // scraped... but since we need special logic to aggregate/deduplicate scraped metrics, we can't + // use this generically for that purpose. + todo!() + } } fn spawn_http_server( task_coordinator: &TaskCoordinator, config: &HttpResourceConfig, + runner_metrics: &Arc>, handler: H, ) -> (Arc, oneshot::Sender<()>) where - H: Fn(Request) -> F + Clone + Send + 'static, + H: Fn(Request, Arc>) -> F + Clone + Send + 'static, F: Future + Send, R: IntoResponse, { @@ -327,6 +447,8 @@ where let resource_notifier = Arc::new(Notify::new()); let server_notifier = Arc::clone(&resource_notifier); + let output_runner_metrics = Arc::clone(runner_metrics); + tokio::spawn(async move { // Create our HTTP server by binding as early as possible to return an error if we can't // actually bind. @@ -353,7 +475,7 @@ where StatusCode::METHOD_NOT_ALLOWED }) .on(method_filter, move |request: Request| { - let request_handler = handler(request); + let request_handler = handler(request, output_runner_metrics); let notifier = Arc::clone(&server_notifier); async move { @@ -363,7 +485,12 @@ where } }); - let router = Router::new().route(&request_path, method_router); + let router = Router::new().route(&request_path, method_router).fallback( + |req: Request| async move { + error!(?req, "Component sent request the server could not route."); + StatusCode::NOT_FOUND + }, + ); // Now actually run/drive the HTTP server and process requests until we're told to shutdown. http_server_started.mark_as_done(); diff --git a/src/components/validation/resources/mod.rs b/src/components/validation/resources/mod.rs index 61ae25d02435c..82d5ea2b906be 100644 --- a/src/components/validation/resources/mod.rs +++ b/src/components/validation/resources/mod.rs @@ -1,13 +1,19 @@ mod event; mod http; -use tokio::sync::mpsc; -use vector_lib::codecs::{ - decoding::{self, DeserializerConfig}, - encoding::{ - self, Framer, FramingConfig, JsonSerializerConfig, SerializerConfig, TextSerializerConfig, +use std::sync::Arc; + +use tokio::sync::{mpsc, Mutex}; +use vector_lib::{ + codecs::{ + decoding::{self, DeserializerConfig}, + encoding::{ + self, Framer, FramingConfig, JsonSerializerConfig, SerializerConfig, + TextSerializerConfig, + }, + BytesEncoder, }, - BytesEncoder, + config::LogNamespace, }; use vector_lib::{config::DataType, event::Event}; @@ -15,8 +21,12 @@ use crate::codecs::{Decoder, DecodingConfig, Encoder, EncodingConfig, EncodingCo pub use self::event::{encode_test_event, TestEvent}; pub use self::http::HttpResourceConfig; +use self::http::HttpResourceOutputContext; -use super::sync::{Configuring, TaskCoordinator}; +use super::{ + sync::{Configuring, TaskCoordinator}, + RunnerMetrics, +}; /// The codec used by the external resource. /// @@ -96,7 +106,7 @@ impl ResourceCodec { /// /// The decoder is generated as an inverse to the input codec: if an encoding configuration was /// given, we generate a decoder that satisfies that encoding configuration, and vice versa. - pub fn into_decoder(&self) -> vector_lib::Result { + pub fn into_decoder(&self, log_namespace: LogNamespace) -> vector_lib::Result { let (framer, deserializer) = match self { Self::Decoding(config) => return config.build(), Self::Encoding(config) => ( @@ -113,7 +123,7 @@ impl ResourceCodec { } }; - Ok(Decoder::new(framer, deserializer)) + Ok(Decoder::new(framer, deserializer).with_log_namespace(log_namespace)) } } @@ -159,6 +169,7 @@ fn deserializer_config_to_serializer(config: &DeserializerConfig) -> encoding::S DeserializerConfig::NativeJson { .. } => SerializerConfig::NativeJson, DeserializerConfig::Gelf { .. } => SerializerConfig::Gelf, DeserializerConfig::Avro { avro } => SerializerConfig::Avro { avro: avro.into() }, + DeserializerConfig::Vrl { .. } => unimplemented!(), }; serializer_config @@ -176,7 +187,11 @@ fn decoder_framing_to_encoding_framer(framing: &decoding::FramingConfig) -> enco }, }) } - decoding::FramingConfig::LengthDelimited => encoding::FramingConfig::LengthDelimited, + decoding::FramingConfig::LengthDelimited(config) => { + encoding::FramingConfig::LengthDelimited(encoding::LengthDelimitedEncoderConfig { + length_delimited: config.length_delimited.clone(), + }) + } decoding::FramingConfig::NewlineDelimited(_) => encoding::FramingConfig::NewlineDelimited, // TODO: There's no equivalent octet counting framer for encoding... although // there's no particular reason that would make it hard to write. @@ -222,7 +237,11 @@ fn encoder_framing_to_decoding_framer(framing: encoding::FramingConfig) -> decod }, }) } - encoding::FramingConfig::LengthDelimited => decoding::FramingConfig::LengthDelimited, + encoding::FramingConfig::LengthDelimited(config) => { + decoding::FramingConfig::LengthDelimited(decoding::LengthDelimitedDecoderConfig { + length_delimited: config.length_delimited.clone(), + }) + } encoding::FramingConfig::NewlineDelimited => { decoding::FramingConfig::NewlineDelimited(Default::default()) } @@ -292,7 +311,7 @@ impl From for ResourceDefinition { /// the external resource must pull the data from the sink. #[derive(Clone)] pub struct ExternalResource { - direction: ResourceDirection, + pub direction: ResourceDirection, definition: ResourceDefinition, pub codec: ResourceCodec, } @@ -316,11 +335,16 @@ impl ExternalResource { self, input_rx: mpsc::Receiver, task_coordinator: &TaskCoordinator, + runner_metrics: &Arc>, ) { match self.definition { - ResourceDefinition::Http(http_config) => { - http_config.spawn_as_input(self.direction, self.codec, input_rx, task_coordinator) - } + ResourceDefinition::Http(http_config) => http_config.spawn_as_input( + self.direction, + self.codec, + input_rx, + task_coordinator, + runner_metrics, + ), } } @@ -329,10 +353,21 @@ impl ExternalResource { self, output_tx: mpsc::Sender>, task_coordinator: &TaskCoordinator, + input_events: Vec, + runner_metrics: &Arc>, + log_namespace: LogNamespace, ) -> vector_lib::Result<()> { match self.definition { ResourceDefinition::Http(http_config) => { - http_config.spawn_as_output(self.direction, self.codec, output_tx, task_coordinator) + http_config.spawn_as_output(HttpResourceOutputContext { + direction: self.direction, + codec: self.codec, + output_tx, + task_coordinator, + input_events, + runner_metrics, + log_namespace, + }) } } } diff --git a/src/components/validation/runner/config.rs b/src/components/validation/runner/config.rs index f80746ab21df1..5f70f56670e99 100644 --- a/src/components/validation/runner/config.rs +++ b/src/components/validation/runner/config.rs @@ -1,5 +1,8 @@ +use vector_lib::config::LogNamespace; + use crate::{ components::validation::{ + component_names::*, sync::{Configuring, TaskCoordinator}, util::GrpcAddress, ComponentConfiguration, ComponentType, ValidationConfiguration, @@ -21,30 +24,33 @@ pub struct TopologyBuilder { output_edge: Option, } -pub const TEST_SOURCE_NAME: &str = "test_source"; -pub const TEST_SINK_NAME: &str = "test_sink"; -pub const TEST_TRANSFORM_NAME: &str = "test_transform"; -pub const TEST_INPUT_SOURCE_NAME: &str = "input_source"; -pub const TEST_OUTPUT_SINK_NAME: &str = "output_sink"; - impl TopologyBuilder { /// Creates a component topology for the given component configuration. - pub fn from_configuration(configuration: &ValidationConfiguration) -> Self { - let component_configuration = configuration.component_configuration(); - match component_configuration { + pub fn from_configuration( + configuration: &ValidationConfiguration, + config_name: Option<&String>, + ) -> Result { + let component_configuration = configuration + .component_configuration_for_test_case(config_name) + .ok_or(format!( + "No test case name defined for configuration {:?}.", + config_name + ))?; + + Ok(match component_configuration { ComponentConfiguration::Source(source) => { debug_assert_eq!(configuration.component_type(), ComponentType::Source); Self::from_source(source) } ComponentConfiguration::Transform(transform) => { debug_assert_eq!(configuration.component_type(), ComponentType::Transform); - Self::from_transform(transform) + Self::from_transform(transform, configuration.log_namespace) } ComponentConfiguration::Sink(sink) => { debug_assert_eq!(configuration.component_type(), ComponentType::Sink); - Self::from_sink(sink) + Self::from_sink(sink, configuration.log_namespace) } - } + }) } /// Creates a component topology for validating a source. @@ -62,8 +68,8 @@ impl TopologyBuilder { } } - fn from_transform(transform: BoxedTransform) -> Self { - let (input_edge, input_source) = build_input_edge(); + fn from_transform(transform: BoxedTransform, log_namespace: LogNamespace) -> Self { + let (input_edge, input_source) = build_input_edge(log_namespace); let (output_edge, output_sink) = build_output_edge(); let mut config_builder = ConfigBuilder::default(); @@ -78,8 +84,8 @@ impl TopologyBuilder { } } - fn from_sink(sink: BoxedSink) -> Self { - let (input_edge, input_source) = build_input_edge(); + fn from_sink(sink: BoxedSink, log_namespace: LogNamespace) -> Self { + let (input_edge, input_source) = build_input_edge(log_namespace); let mut config_builder = ConfigBuilder::default(); config_builder.add_source(TEST_INPUT_SOURCE_NAME, input_source); @@ -120,11 +126,14 @@ impl TopologyBuilder { } } -fn build_input_edge() -> (InputEdge, impl Into) { +fn build_input_edge(log_namespace: LogNamespace) -> (InputEdge, impl Into) { let input_listen_addr = GrpcAddress::from(next_addr()); debug!(listen_addr = %input_listen_addr, "Creating controlled input edge."); - let input_source = VectorSourceConfig::from_address(input_listen_addr.as_socket_addr()); + let mut input_source = VectorSourceConfig::from_address(input_listen_addr.as_socket_addr()); + + input_source.log_namespace = Some(log_namespace == LogNamespace::Vector); + let input_edge = InputEdge::from_address(input_listen_addr); (input_edge, input_source) diff --git a/src/components/validation/runner/mod.rs b/src/components/validation/runner/mod.rs index a01ed4f13e495..8c5937bbf86cd 100644 --- a/src/components/validation/runner/mod.rs +++ b/src/components/validation/runner/mod.rs @@ -2,24 +2,24 @@ pub mod config; mod io; mod telemetry; -use std::{ - collections::HashMap, - path::PathBuf, - sync::{Arc, Mutex}, - time::Duration, -}; +use std::{collections::HashMap, path::PathBuf, sync::Arc, time::Duration}; use bytes::BytesMut; +use chrono::Utc; use tokio::{ runtime::Builder, select, - sync::mpsc::{self, Receiver, Sender}, + sync::{ + mpsc::{self, Receiver, Sender}, + Mutex, + }, task::JoinHandle, }; use tokio_util::codec::Encoder as _; -use vector_lib::codecs::encoding; -use vector_lib::{event::Event, EstimatedJsonEncodedSizeOf}; +use vector_lib::{ + codecs::encoding, config::LogNamespace, event::Event, EstimatedJsonEncodedSizeOf, +}; use crate::{ codecs::Encoder, @@ -203,17 +203,25 @@ impl Runner { let component_type = self.configuration.component_type(); - let test_cases = load_component_test_cases(self.test_case_data_path)?; + let test_cases = load_component_test_cases(&self.test_case_data_path)?; for test_case in test_cases { + println!(""); + println!(""); + info!( + "Running test '{}' case for component '{}' (type: {:?})...", + test_case.name, + self.configuration.component_name, + self.configuration.component_type() + ); // Create a task coordinator for each relevant phase of the test. // // This provides us the granularity to know when the tasks associated with each phase // (inputs, component topology, outputs/telemetry, etc) have started, and the ability to // trigger them to shutdown and then wait until the associated tasks have completed. - let input_task_coordinator = TaskCoordinator::new(); - let output_task_coordinator = TaskCoordinator::new(); - let topology_task_coordinator = TaskCoordinator::new(); - let telemetry_task_coordinator = TaskCoordinator::new(); + let input_task_coordinator = TaskCoordinator::new("Input"); + let output_task_coordinator = TaskCoordinator::new("Output"); + let topology_task_coordinator = TaskCoordinator::new("Topology"); + let telemetry_task_coordinator = TaskCoordinator::new("Telemetry"); // First, we get a topology builder for the given component being validated. // @@ -225,7 +233,10 @@ impl Runner { // We then finalize the topology builder to get our actual `ConfigBuilder`, as well as // any controlled edges (channel sender/receiver to the aforementioned filler // components) and a telemetry client for collecting internal telemetry. - let topology_builder = TopologyBuilder::from_configuration(&self.configuration); + let topology_builder = TopologyBuilder::from_configuration( + &self.configuration, + test_case.config_name.as_ref(), + )?; let (config_builder, controlled_edges, telemetry_collector) = topology_builder .finalize( &input_task_coordinator, @@ -234,7 +245,7 @@ impl Runner { ) .await; - debug!("Component topology configuration built and telemetry collector spawned."); + info!("Component topology configuration built and telemetry collector spawned."); // Create the data structure that the input and output runners will use to store // their received/sent metrics. This is then shared with the Validator for comparison @@ -251,26 +262,29 @@ impl Runner { // For example, if we're validating a source, we would have added a filler sink for our // controlled output edge, which means we then need a server task listening for the // events sent by that sink. + let (runner_input, runner_output, maybe_runner_encoder) = build_external_resource( + &test_case, &self.configuration, &input_task_coordinator, &output_task_coordinator, + &runner_metrics, )?; let input_tx = runner_input.into_sender(controlled_edges.input); let output_rx = runner_output.into_receiver(controlled_edges.output); - debug!("External resource (if any) and controlled edges built and spawned."); + info!("External resource (if any) and controlled edges built and spawned."); // Now with any external resource spawned, as well as any tasks for handling controlled // edges, we'll wait for all of those tasks to report that they're ready to go and // listening, etc. - let input_task_coordinator = input_task_coordinator.started().await; - debug!("All input task(s) started."); + let mut input_task_coordinator = input_task_coordinator.started().await; + info!("All input task(s) started."); - let telemetry_task_coordinator = telemetry_task_coordinator.started().await; - debug!("All telemetry task(s) started."); + let mut telemetry_task_coordinator = telemetry_task_coordinator.started().await; + info!("All telemetry task(s) started."); - let output_task_coordinator = output_task_coordinator.started().await; - debug!("All output task(s) started."); + let mut output_task_coordinator = output_task_coordinator.started().await; + info!("All output task(s) started."); // At this point, we need to actually spawn the configured component topology so that it // runs, and make sure we have a way to tell it when to shutdown so that we can properly @@ -280,7 +294,7 @@ impl Runner { &topology_task_coordinator, self.extra_context.clone(), ); - let topology_task_coordinator = topology_task_coordinator.started().await; + let mut topology_task_coordinator = topology_task_coordinator.started().await; // Now we'll spawn two tasks: one for sending inputs, and one for collecting outputs. // @@ -307,12 +321,23 @@ impl Runner { input_tx, &runner_metrics, maybe_runner_encoder.as_ref().cloned(), + self.configuration.component_type, + self.configuration.log_namespace(), ); + // the number of events we expect to receive from the output. + let expected_output_events = test_case + .events + .iter() + .filter(|te| !te.should_fail()) + .count(); + let output_driver = spawn_output_driver( output_rx, &runner_metrics, maybe_runner_encoder.as_ref().cloned(), + self.configuration.component_type, + expected_output_events, ); // At this point, the component topology is running, and all input/output/telemetry @@ -328,21 +353,26 @@ impl Runner { .await .expect("input driver task should not have panicked"); + // Synchronize the shutdown of all tasks, and get the resulting output events. + // We drive the shutdown by ensuring that the output events have been + // processed by the external resource, which ensures that the input events have travelled + // all the way through the pipeline, and that the telemetry events have been processed + // before shutting down the telemetry and topology tasks. input_task_coordinator.shutdown().await; - debug!("Input task(s) have been shutdown."); - telemetry_task_coordinator.shutdown().await; - debug!("Telemetry task(s) have been shutdown."); - - topology_task_coordinator.shutdown().await; - debug!("Component topology task has been shutdown."); + let output_events = output_driver + .await + .expect("output driver task should not have panicked"); + // Now that all output events have been received, we can shutdown the controlled edge/sink output_task_coordinator.shutdown().await; - debug!("Output task(s) have been shutdown."); - let output_events = output_driver - .await - .expect("input driver task should not have panicked"); + // as well as the telemetry and topology + telemetry_task_coordinator.shutdown().await; + topology_task_coordinator.shutdown().await; + + info!("Collected runner metrics: {:?}", runner_metrics); + let final_runner_metrics = runner_metrics.lock().await; // Run the relevant data -- inputs, outputs, telemetry, etc -- through each validator to // get the validation results for this test. @@ -350,6 +380,7 @@ impl Runner { name: test_name, expectation, events: input_events, + .. } = test_case; let telemetry_events = telemetry_collector.collect().await; @@ -363,7 +394,7 @@ impl Runner { &input_events, &output_events, &telemetry_events, - &runner_metrics.lock().unwrap(), + &final_runner_metrics, ) }) .collect(); @@ -399,7 +430,7 @@ impl Runner { /// during deserialization of the test case file, whether the error is I/O related in nature or due /// to invalid YAML, or not representing valid serialized test cases, then an error variant will be /// returned explaining the cause. -fn load_component_test_cases(test_case_data_path: PathBuf) -> Result, String> { +fn load_component_test_cases(test_case_data_path: &PathBuf) -> Result, String> { std::fs::File::open(test_case_data_path) .map_err(|e| { format!( @@ -418,15 +449,21 @@ fn load_component_test_cases(test_case_data_path: PathBuf) -> Result, output_task_coordinator: &TaskCoordinator, + runner_metrics: &Arc>, ) -> Result<(RunnerInput, RunnerOutput, Option>), vector_lib::Error> { let component_type = configuration.component_type(); - let maybe_external_resource = configuration.external_resource(); - let maybe_encoder = maybe_external_resource + let maybe_external_resource = configuration.external_resource(test_case.config_name.as_ref()); + + let resource_codec = maybe_external_resource .as_ref() - .map(|resource| resource.codec.into_encoder()); + .map(|resource| resource.codec.clone()); + + let maybe_encoder = resource_codec.as_ref().map(|codec| codec.into_encoder()); + match component_type { ComponentType::Source => { // As an external resource for a source, we create a channel that the validation runner @@ -436,7 +473,7 @@ fn build_external_resource( let (tx, rx) = mpsc::channel(1024); let resource = maybe_external_resource.expect("a source must always have an external resource"); - resource.spawn_as_input(rx, input_task_coordinator); + resource.spawn_as_input(rx, input_task_coordinator, runner_metrics); Ok(( RunnerInput::External(tx), @@ -456,7 +493,14 @@ fn build_external_resource( let (tx, rx) = mpsc::channel(1024); let resource = maybe_external_resource.expect("a sink must always have an external resource"); - resource.spawn_as_output(tx, output_task_coordinator)?; + + resource.spawn_as_output( + tx, + output_task_coordinator, + test_case.events.clone(), + runner_metrics, + configuration.log_namespace(), + )?; Ok(( RunnerInput::Controlled, @@ -479,7 +523,11 @@ fn spawn_component_topology( let mut config = config_builder .build() .expect("config should not have any errors"); - config.healthchecks.set_require_healthy(Some(true)); + + // It's possible we could extend the framework to allow specifying logic to + // handle that, but I don't see much value currently since the healthcheck is + // not enforced for components, and it doesn't impact the internal telemetry. + config.healthchecks.enabled = false; _ = std::thread::spawn(move || { let test_runtime = Builder::new_current_thread() @@ -488,22 +536,22 @@ fn spawn_component_topology( .expect("should not fail to build current-thread runtime"); test_runtime.block_on(async move { - debug!("Building component topology..."); + info!("Building component topology..."); let (topology, mut crash_rx) = RunningTopology::start_init_validated(config, extra_context) .await .unwrap(); - debug!("Component topology built and spawned."); + info!("Component topology built and spawned."); topology_started.mark_as_done(); select! { // We got the signal to shutdown, so stop the topology gracefully. _ = topology_shutdown_handle.wait() => { - debug!("Shutdown signal received, stopping topology..."); + info!("Shutdown signal received, stopping topology..."); topology.stop().await; - debug!("Component topology stopped gracefully.") + info!("Component topology stopped gracefully.") }, _ = crash_rx.recv() => { error!("Component topology under validation unexpectedly crashed."); @@ -520,11 +568,15 @@ fn spawn_input_driver( input_tx: Sender, runner_metrics: &Arc>, mut maybe_encoder: Option>, + component_type: ComponentType, + log_namespace: LogNamespace, ) -> JoinHandle<()> { let input_runner_metrics = Arc::clone(runner_metrics); + let now = Utc::now(); + tokio::spawn(async move { - for input_event in input_events { + for mut input_event in input_events { input_tx .send(input_event.clone()) .await @@ -532,30 +584,73 @@ fn spawn_input_driver( // Update the runner metrics for the sent event. This will later // be used in the Validators, as the "expected" case. - let mut input_runner_metrics = input_runner_metrics.lock().unwrap(); + let mut input_runner_metrics = input_runner_metrics.lock().await; + + // the controlled edge (vector source) adds metadata to the event when it is received. + // thus we need to add it here so the expected values for the comparisons on transforms + // and sinks are accurate. + if component_type != ComponentType::Source { + if let Event::Log(ref mut log) = input_event.get_event() { + log_namespace.insert_standard_vector_source_metadata(log, "vector", now); + } + } + + let (failure_case, mut event) = input_event.clone().get(); if let Some(encoder) = maybe_encoder.as_mut() { let mut buffer = BytesMut::new(); - encode_test_event(encoder, &mut buffer, input_event.clone()); + encode_test_event(encoder, &mut buffer, input_event); input_runner_metrics.sent_bytes_total += buffer.len() as u64; } - let (modified, event) = match input_event { - TestEvent::Passthrough(event) => (false, event), - TestEvent::Modified { modified, event } => (modified, event), - }; - // account for failure case - if modified { + if failure_case { input_runner_metrics.errors_total += 1; - } else { + // TODO: this assumption may need to be made configurable at some point + if component_type == ComponentType::Sink { + input_runner_metrics.discarded_events_total += 1; + } + } + + if !failure_case || component_type == ComponentType::Sink { input_runner_metrics.sent_events_total += 1; + // Convert unix timestamp in input events to the Datetime string. + // This is necessary when a source expects the incoming event to have a + // unix timestamp but we convert it into a datetime string in the source. + // For example, the `datadog_agent` source. This only takes effect when + // the test case YAML file defining the event, constructs it with the log + // builder variant, and specifies an integer in milliseconds for the timestamp. + if component_type == ComponentType::Source { + if let Event::Log(ref mut log) = event { + if let Some(ts) = log.remove_timestamp() { + let ts = match ts.as_integer() { + Some(ts) => chrono::DateTime::from_timestamp_millis(ts) + .expect(&format!("invalid timestamp in input test event {ts}")) + .into(), + None => ts, + }; + log.parse_path_and_insert("timestamp", ts) + .expect("failed to insert timestamp"); + } + } + } + + // This particular metric is tricky because a component can run the + // EstimatedJsonSizeOf calculation on a single event or an array of + // events. If it's an array of events, the size calculation includes + // the size of bracket ('[', ']') characters... But we have no way + // of knowing which case it will be. Indeed, there are even components + // where BOTH scenarios are possible, depending on how the component + // is configured. + // This is handled in the component spec validator code where we compare + // the actual to the expected. input_runner_metrics.sent_event_bytes_total += - vec![event].estimated_json_encoded_size_of().get() as u64; + event.estimated_json_encoded_size_of().get() as u64; } } + info!("Input driver sent all events."); }) } @@ -563,33 +658,62 @@ fn spawn_output_driver( mut output_rx: Receiver>, runner_metrics: &Arc>, maybe_encoder: Option>, + component_type: ComponentType, + expected_events: usize, ) -> JoinHandle> { let output_runner_metrics = Arc::clone(runner_metrics); tokio::spawn(async move { + let timeout = tokio::time::sleep(Duration::from_secs(8)); + tokio::pin!(timeout); + let mut output_events = Vec::new(); - while let Some(events) = output_rx.recv().await { - output_events.extend(events.clone()); - // Update the runner metrics for the received event. This will later - // be used in the Validators, as the "expected" case. - let mut output_runner_metrics = output_runner_metrics.lock().unwrap(); - - for output_event in events { - output_runner_metrics.received_events_total += 1; - output_runner_metrics.received_event_bytes_total += vec![output_event.clone()] - .estimated_json_encoded_size_of() - .get() - as u64; - - if let Some(encoder) = maybe_encoder.as_ref() { - let mut buffer = BytesMut::new(); - encoder - .clone() - .encode(output_event, &mut buffer) - .expect("should not fail to encode output event"); - - output_runner_metrics.received_bytes_total += buffer.len() as u64; + loop { + tokio::select! { + _ = &mut timeout => { + error!("Output driver timed out waiting for all events."); + break + }, + events = output_rx.recv() => { + if let Some(events) = events { + info!("Output driver received {} events.", events.len()); + output_events.extend(events.clone()); + + // Update the runner metrics for the received event. This will later + // be used in the Validators, as the "expected" case. + let mut output_runner_metrics = output_runner_metrics.lock().await; + + if component_type != ComponentType::Sink { + for output_event in events { + // The event is wrapped in a Vec to match the actual event storage in + // the real topology + output_runner_metrics.received_event_bytes_total += + vec![&output_event].estimated_json_encoded_size_of().get() as u64; + + if let Some(encoder) = maybe_encoder.as_ref() { + let mut buffer = BytesMut::new(); + encoder + .clone() + .encode(output_event, &mut buffer) + .expect("should not fail to encode output event"); + + output_runner_metrics.received_events_total += 1; + output_runner_metrics.received_bytes_total += buffer.len() as u64; + } + } + } + if output_events.len() >= expected_events { + info!("Output driver has received all expected events."); + break + } + } else { + // The channel closed on us. + // This shouldn't happen because in the runner we should not shutdown the external + // resource until this output driver task is complete. + error!("Output driver channel with external resource closed."); + break + } } } } diff --git a/src/components/validation/runner/telemetry.rs b/src/components/validation/runner/telemetry.rs index c415bfb05d012..83c12c02db9c4 100644 --- a/src/components/validation/runner/telemetry.rs +++ b/src/components/validation/runner/telemetry.rs @@ -36,7 +36,7 @@ impl Telemetry { /// Creates a telemetry collector by attaching the relevant components to an existing `ConfigBuilder`. pub fn attach_to_config(config_builder: &mut ConfigBuilder) -> Self { let listen_addr = GrpcAddress::from(next_addr()); - debug!(%listen_addr, "Attaching telemetry components."); + info!(%listen_addr, "Attaching telemetry components."); // Attach an internal logs and internal metrics source, and send them on to a dedicated Vector // sink that we'll spawn a listener for to collect everything. @@ -83,10 +83,10 @@ impl Telemetry { // needs to be shut down after the telemetry collector. This is because // the server needs to be alive to process every last incoming event // from the Vector sink that we're using to collect telemetry. - let grpc_task_coordinator = TaskCoordinator::new(); + let grpc_task_coordinator = TaskCoordinator::new("gRPC"); spawn_grpc_server(self.listen_addr, self.service, &grpc_task_coordinator); - let grpc_task_coordinator = grpc_task_coordinator.started().await; - debug!("All gRPC task(s) started."); + let mut grpc_task_coordinator = grpc_task_coordinator.started().await; + info!("All gRPC task(s) started."); let mut rx = self.rx; let driver_handle = tokio::spawn(async move { @@ -107,7 +107,7 @@ impl Telemetry { // emitted. Thus, two batches ensure that all component // events have been emitted. - debug!("Telemetry: waiting for final internal_metrics events before shutting down."); + info!("Telemetry: waiting for final internal_metrics events before shutting down."); let mut batches_received = 0; @@ -121,7 +121,7 @@ impl Telemetry { None => break, Some(telemetry_event_batch) => { telemetry_events.extend(telemetry_event_batch); - debug!("Telemetry: processed one batch of internal_metrics."); + info!("Telemetry: processed one batch of internal_metrics."); batches_received += 1; if batches_received == SHUTDOWN_TICKS { break; @@ -145,7 +145,6 @@ impl Telemetry { } grpc_task_coordinator.shutdown().await; - debug!("GRPC task(s) have been shutdown."); telemetry_completed.mark_as_done(); diff --git a/src/components/validation/sync.rs b/src/components/validation/sync.rs index 0e842b66eb87f..92f359af6a47a 100644 --- a/src/components/validation/sync.rs +++ b/src/components/validation/sync.rs @@ -180,17 +180,19 @@ pub struct Started { /// after waiting for all tasks to start, and so on. pub struct TaskCoordinator { state: State, + name: String, } impl TaskCoordinator<()> { /// Creates a new `TaskCoordinator`. - pub fn new() -> TaskCoordinator { + pub fn new(name: &str) -> TaskCoordinator { TaskCoordinator { state: Configuring { tasks_started: WaitGroup::new(), tasks_completed: WaitGroup::new(), shutdown_triggers: Mutex::new(Vec::new()), }, + name: name.to_string(), } } } @@ -233,27 +235,34 @@ impl TaskCoordinator { tasks_completed: Some(tasks_completed), shutdown_triggers: shutdown_triggers.into_inner().expect("poisoned"), }, + name: self.name, } } } impl TaskCoordinator { /// Triggers all coordinated tasks to shutdown, and waits for them to mark themselves as completed. - pub async fn shutdown(mut self) { + pub async fn shutdown(&mut self) { + info!("{}: triggering task to shutdown.", self.name); + // Trigger all registered shutdown handles. for trigger in self.state.shutdown_triggers.drain(..) { trigger.trigger(); - trace!("Shutdown triggered for coordinated tasks."); + debug!("{}: shutdown triggered for coordinated tasks.", self.name); } // Now simply wait for all of them to mark themselves as completed. - trace!("Waiting for coordinated tasks to complete..."); + debug!( + "{}: waiting for coordinated tasks to complete...", + self.name + ); let tasks_completed = self .state .tasks_completed .as_mut() .expect("tasks completed wait group already consumed"); tasks_completed.wait_for_children().await; - trace!("All coordinated tasks completed."); + + info!("{}: task has been shutdown.", self.name); } } diff --git a/src/components/validation/test_case.rs b/src/components/validation/test_case.rs index 360c6513161b0..a281f6e5883f0 100644 --- a/src/components/validation/test_case.rs +++ b/src/components/validation/test_case.rs @@ -26,6 +26,7 @@ pub enum TestCaseExpectation { #[derive(Deserialize)] pub struct TestCase { pub name: String, + pub config_name: Option, pub expectation: TestCaseExpectation, pub events: Vec, } diff --git a/src/components/validation/validators/component_spec/mod.rs b/src/components/validation/validators/component_spec/mod.rs index cfd1f8a5a88f5..98db84a1ce6dd 100644 --- a/src/components/validation/validators/component_spec/mod.rs +++ b/src/components/validation/validators/component_spec/mod.rs @@ -1,12 +1,9 @@ -mod sources; +use crate::components::validation::{ + component_names::*, ComponentType, RunnerMetrics, TestCaseExpectation, TestEvent, +}; +use vector_lib::event::{Event, Metric, MetricKind}; -use vector_lib::event::{Event, Metric}; - -use crate::components::validation::{ComponentType, RunnerMetrics, TestCaseExpectation, TestEvent}; - -use super::Validator; - -use self::sources::{validate_sources, SourceMetricType}; +use super::{ComponentMetricType, Validator}; /// Validates that the component meets the requirements of the [Component Specification][component_spec]. /// @@ -33,12 +30,17 @@ impl Validator for ComponentSpecValidator { telemetry_events: &[Event], runner_metrics: &RunnerMetrics, ) -> Result, Vec> { + let expect_received_events = inputs + .iter() + .filter(|te| !te.should_fail() || te.should_reject()) + .count() as u64; + for input in inputs { - debug!("Validator observed input event: {:?}", input); + info!("Validator observed input event: {:?}", input); } for output in outputs { - debug!("Validator observed output event: {:?}", output); + info!("Validator observed output event: {:?}", output); } // Validate that the number of inputs/outputs matched the test case expectation. @@ -82,7 +84,12 @@ impl Validator for ComponentSpecValidator { format!("received {} telemetry events", telemetry_events.len()), ]; - let out = validate_telemetry(component_type, telemetry_events, runner_metrics)?; + let out = validate_telemetry( + component_type, + telemetry_events, + runner_metrics, + expect_received_events, + )?; run_out.extend(out); Ok(run_out) @@ -93,21 +100,34 @@ fn validate_telemetry( component_type: ComponentType, telemetry_events: &[Event], runner_metrics: &RunnerMetrics, + expect_received_events: u64, ) -> Result, Vec> { let mut out: Vec = Vec::new(); let mut errs: Vec = Vec::new(); - match component_type { - ComponentType::Source => { - let result = validate_sources(telemetry_events, runner_metrics); - match result { - Ok(o) => out.extend(o), - Err(e) => errs.extend(e), - } + let metric_types = [ + ComponentMetricType::EventsReceived, + ComponentMetricType::EventsReceivedBytes, + ComponentMetricType::ReceivedBytesTotal, + ComponentMetricType::SentEventsTotal, + ComponentMetricType::SentEventBytesTotal, + ComponentMetricType::SentBytesTotal, + ComponentMetricType::ErrorsTotal, + ComponentMetricType::DiscardedEventsTotal, + ]; + + metric_types.iter().for_each(|metric_type| { + match validate_metric( + telemetry_events, + runner_metrics, + metric_type, + component_type, + expect_received_events, + ) { + Err(e) => errs.extend(e), + Ok(m) => out.extend(m), } - ComponentType::Sink => {} - ComponentType::Transform => {} - } + }); if errs.is_empty() { Ok(out) @@ -116,11 +136,82 @@ fn validate_telemetry( } } +fn validate_metric( + telemetry_events: &[Event], + runner_metrics: &RunnerMetrics, + metric_type: &ComponentMetricType, + component_type: ComponentType, + expect_received_events: u64, +) -> Result, Vec> { + let component_id = match component_type { + ComponentType::Source => TEST_SOURCE_NAME, + ComponentType::Transform => TEST_TRANSFORM_NAME, + ComponentType::Sink => TEST_SINK_NAME, + }; + + let expected = match metric_type { + ComponentMetricType::EventsReceived => { + // The reciprocal metric for events received is events sent, + // so the expected value is what the input runner sent. + runner_metrics.sent_events_total + } + ComponentMetricType::EventsReceivedBytes => { + // The reciprocal metric for received_event_bytes is sent_event_bytes, + // so the expected value is what the input runner sent. + runner_metrics.sent_event_bytes_total + } + ComponentMetricType::ReceivedBytesTotal => { + // The reciprocal metric for received_bytes is sent_bytes, + // so the expected value is what the input runner sent. + if component_type == ComponentType::Sink { + 0 // sinks should not emit this metric + } else { + runner_metrics.sent_bytes_total + } + } + ComponentMetricType::SentEventsTotal => { + // The reciprocal metric for events sent is events received, + // so the expected value is what the output runner received. + runner_metrics.received_events_total + } + ComponentMetricType::SentBytesTotal => { + // The reciprocal metric for sent_bytes is received_bytes, + // so the expected value is what the output runner received. + if component_type == ComponentType::Source { + 0 // sources should not emit this metric + } else { + runner_metrics.received_bytes_total + } + } + ComponentMetricType::SentEventBytesTotal => { + // The reciprocal metric for sent_event_bytes is received_event_bytes, + // so the expected value is what the output runner received. + runner_metrics.received_event_bytes_total + } + ComponentMetricType::ErrorsTotal => runner_metrics.errors_total, + ComponentMetricType::DiscardedEventsTotal => runner_metrics.discarded_events_total, + }; + + compare_actual_to_expected( + telemetry_events, + metric_type, + component_id, + expected, + expect_received_events, + ) +} + fn filter_events_by_metric_and_component<'a>( telemetry_events: &'a [Event], - metric: &SourceMetricType, - component_name: &'a str, + metric: &ComponentMetricType, + component_id: &'a str, ) -> Vec<&'a Metric> { + info!( + "Filter looking for metric {} {}", + metric.to_string(), + component_id + ); + let metrics: Vec<&Metric> = telemetry_events .iter() .flat_map(|e| { @@ -132,8 +223,9 @@ fn filter_events_by_metric_and_component<'a>( }) .filter(|&m| { if m.name() == metric.to_string() { + debug!("{}", m); if let Some(tags) = m.tags() { - if tags.get("component_name").unwrap_or("") == component_name { + if tags.get("component_id").unwrap_or("") == component_id { return true; } } @@ -143,7 +235,71 @@ fn filter_events_by_metric_and_component<'a>( }) .collect(); - debug!("{}: {} metrics found.", metric.to_string(), metrics.len(),); + info!("{}: {} metrics found.", metric.to_string(), metrics.len()); metrics } + +fn sum_counters( + metric_name: &ComponentMetricType, + metrics: &[&Metric], +) -> Result> { + let mut sum: f64 = 0.0; + let mut errs = Vec::new(); + + for m in metrics { + match m.value() { + vector_lib::event::MetricValue::Counter { value } => { + if let MetricKind::Absolute = m.data().kind { + sum = *value; + } else { + sum += *value; + } + } + _ => errs.push(format!("{}: metric value is not a counter", metric_name,)), + } + } + + if errs.is_empty() { + Ok(sum as u64) + } else { + Err(errs) + } +} + +fn compare_actual_to_expected( + telemetry_events: &[Event], + metric_type: &ComponentMetricType, + component_id: &str, + expected: u64, + expect_received_events: u64, +) -> Result, Vec> { + let mut errs: Vec = Vec::new(); + + let metrics = + filter_events_by_metric_and_component(telemetry_events, metric_type, component_id); + + let actual = sum_counters(metric_type, &metrics)?; + + info!("{metric_type}: expected {expected}, actual {actual}."); + + if actual != expected && + // This is a bit messy. The issue is that EstimatedJsonSizeOf can be called by a component + // on an event array, or on a single event. And we have no way of knowing which that is. + // By default the input driver for the framework is not assuming it is an array, so we + // check here if it matches what the array scenario would be, which is to add the size of + // the brackets, for each event. + (metric_type != &ComponentMetricType::EventsReceivedBytes + || (actual != (expected + (expect_received_events * 2)))) + { + errs.push(format!( + "{metric_type}: expected {expected}, actual {actual}", + )); + } + + if !errs.is_empty() { + return Err(errs); + } + + Ok(vec![format!("{}: {}", metric_type, actual)]) +} diff --git a/src/components/validation/validators/component_spec/sources.rs b/src/components/validation/validators/component_spec/sources.rs deleted file mode 100644 index e4c6d3383f688..0000000000000 --- a/src/components/validation/validators/component_spec/sources.rs +++ /dev/null @@ -1,242 +0,0 @@ -use std::fmt::{Display, Formatter}; - -use vector_lib::event::{Event, MetricKind}; - -use crate::components::validation::RunnerMetrics; - -use super::filter_events_by_metric_and_component; - -const TEST_SOURCE_NAME: &str = "test_source"; - -pub enum SourceMetricType { - EventsReceived, - EventsReceivedBytes, - ReceivedBytesTotal, - SentEventsTotal, - SentEventBytesTotal, - ErrorsTotal, -} - -impl SourceMetricType { - const fn name(&self) -> &'static str { - match self { - SourceMetricType::EventsReceived => "component_received_events_total", - SourceMetricType::EventsReceivedBytes => "component_received_event_bytes_total", - SourceMetricType::ReceivedBytesTotal => "component_received_bytes_total", - SourceMetricType::SentEventsTotal => "component_sent_events_total", - SourceMetricType::SentEventBytesTotal => "component_sent_event_bytes_total", - SourceMetricType::ErrorsTotal => "component_errors_total", - } - } -} - -impl Display for SourceMetricType { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.name()) - } -} - -pub fn validate_sources( - telemetry_events: &[Event], - runner_metrics: &RunnerMetrics, -) -> Result, Vec> { - let mut out: Vec = Vec::new(); - let mut errs: Vec = Vec::new(); - - let validations = [ - validate_component_received_events_total, - validate_component_received_event_bytes_total, - validate_component_received_bytes_total, - validate_component_sent_events_total, - validate_component_sent_event_bytes_total, - validate_component_errors_total, - ]; - - for v in validations.iter() { - match v(telemetry_events, runner_metrics) { - Err(e) => errs.extend(e), - Ok(m) => out.extend(m), - } - } - - if errs.is_empty() { - Ok(out) - } else { - Err(errs) - } -} - -fn sum_counters( - metric_name: &SourceMetricType, - metrics: &[&vector_lib::event::Metric], -) -> Result> { - let mut sum: f64 = 0.0; - let mut errs = Vec::new(); - - for m in metrics { - match m.value() { - vector_lib::event::MetricValue::Counter { value } => { - if let MetricKind::Absolute = m.data().kind { - sum = *value; - } else { - sum += *value; - } - } - _ => errs.push(format!("{}: metric value is not a counter", metric_name,)), - } - } - - if errs.is_empty() { - Ok(sum as u64) - } else { - Err(errs) - } -} - -fn validate_events_total( - telemetry_events: &[Event], - metric_type: &SourceMetricType, - expected_events: u64, -) -> Result, Vec> { - let mut errs: Vec = Vec::new(); - - let metrics = - filter_events_by_metric_and_component(telemetry_events, metric_type, TEST_SOURCE_NAME); - - let actual_events = sum_counters(metric_type, &metrics)?; - - debug!( - "{}: {} events, {} expected events.", - metric_type, actual_events, expected_events, - ); - - if actual_events != expected_events { - errs.push(format!( - "{}: expected {} events, but received {}", - metric_type, expected_events, actual_events - )); - } - - if !errs.is_empty() { - return Err(errs); - } - - Ok(vec![format!("{}: {}", metric_type, actual_events)]) -} - -fn validate_bytes_total( - telemetry_events: &[Event], - metric_type: &SourceMetricType, - expected_bytes: u64, -) -> Result, Vec> { - let mut errs: Vec = Vec::new(); - - let metrics = - filter_events_by_metric_and_component(telemetry_events, metric_type, TEST_SOURCE_NAME); - - let actual_bytes = sum_counters(metric_type, &metrics)?; - - debug!( - "{}: {} bytes, {} expected bytes.", - metric_type, actual_bytes, expected_bytes, - ); - - if actual_bytes != expected_bytes { - errs.push(format!( - "{}: expected {} bytes, but received {}", - metric_type, expected_bytes, actual_bytes - )); - } - - if !errs.is_empty() { - return Err(errs); - } - - Ok(vec![format!("{}: {}", metric_type, actual_bytes)]) -} - -fn validate_component_received_events_total( - telemetry_events: &[Event], - runner_metrics: &RunnerMetrics, -) -> Result, Vec> { - // The reciprocal metric for events received is events sent, - // so the expected value is what the input runner sent. - let expected_events = runner_metrics.sent_events_total; - - validate_events_total( - telemetry_events, - &SourceMetricType::EventsReceived, - expected_events, - ) -} - -fn validate_component_received_event_bytes_total( - telemetry_events: &[Event], - runner_metrics: &RunnerMetrics, -) -> Result, Vec> { - // The reciprocal metric for received_event_bytes is sent_event_bytes, - // so the expected value is what the input runner sent. - let expected_bytes = runner_metrics.sent_event_bytes_total; - - validate_bytes_total( - telemetry_events, - &SourceMetricType::EventsReceivedBytes, - expected_bytes, - ) -} - -fn validate_component_received_bytes_total( - telemetry_events: &[Event], - runner_metrics: &RunnerMetrics, -) -> Result, Vec> { - // The reciprocal metric for received_bytes is sent_bytes, - // so the expected value is what the input runner sent. - let expected_bytes = runner_metrics.sent_bytes_total; - - validate_bytes_total( - telemetry_events, - &SourceMetricType::ReceivedBytesTotal, - expected_bytes, - ) -} - -fn validate_component_sent_events_total( - telemetry_events: &[Event], - runner_metrics: &RunnerMetrics, -) -> Result, Vec> { - // The reciprocal metric for events sent is events received, - // so the expected value is what the output runner received. - let expected_events = runner_metrics.received_events_total; - - validate_events_total( - telemetry_events, - &SourceMetricType::SentEventsTotal, - expected_events, - ) -} - -fn validate_component_sent_event_bytes_total( - telemetry_events: &[Event], - runner_metrics: &RunnerMetrics, -) -> Result, Vec> { - // The reciprocal metric for sent_event_bytes is received_event_bytes, - // so the expected value is what the output runner received. - let expected_bytes = runner_metrics.received_event_bytes_total; - - validate_bytes_total( - telemetry_events, - &SourceMetricType::SentEventBytesTotal, - expected_bytes, - ) -} - -fn validate_component_errors_total( - telemetry_events: &[Event], - runner_metrics: &RunnerMetrics, -) -> Result, Vec> { - validate_events_total( - telemetry_events, - &SourceMetricType::ErrorsTotal, - runner_metrics.errors_total, - ) -} diff --git a/src/components/validation/validators/mod.rs b/src/components/validation/validators/mod.rs index b89441d8adc9a..13693eb5ecd0c 100644 --- a/src/components/validation/validators/mod.rs +++ b/src/components/validation/validators/mod.rs @@ -2,6 +2,8 @@ mod component_spec; pub use self::component_spec::ComponentSpecValidator; +use std::fmt::{Display, Formatter}; + use vector_lib::event::Event; use super::{ComponentType, RunnerMetrics, TestCaseExpectation, TestEvent}; @@ -49,3 +51,36 @@ impl From for Box { } } } + +#[derive(PartialEq)] +pub enum ComponentMetricType { + EventsReceived, + EventsReceivedBytes, + ReceivedBytesTotal, + SentEventsTotal, + SentBytesTotal, + SentEventBytesTotal, + ErrorsTotal, + DiscardedEventsTotal, +} + +impl ComponentMetricType { + const fn name(&self) -> &'static str { + match self { + ComponentMetricType::EventsReceived => "component_received_events_total", + ComponentMetricType::EventsReceivedBytes => "component_received_event_bytes_total", + ComponentMetricType::ReceivedBytesTotal => "component_received_bytes_total", + ComponentMetricType::SentEventsTotal => "component_sent_events_total", + ComponentMetricType::SentBytesTotal => "component_sent_bytes_total", + ComponentMetricType::SentEventBytesTotal => "component_sent_event_bytes_total", + ComponentMetricType::ErrorsTotal => "component_errors_total", + ComponentMetricType::DiscardedEventsTotal => "component_discarded_events_total", + } + } +} + +impl Display for ComponentMetricType { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.name()) + } +} diff --git a/src/conditions/mod.rs b/src/conditions/mod.rs index 8c5244e47cb14..4475ff0bcf852 100644 --- a/src/conditions/mod.rs +++ b/src/conditions/mod.rs @@ -9,9 +9,9 @@ pub(crate) mod is_metric; pub(crate) mod is_trace; mod vrl; +pub use self::datadog_search::{DatadogSearchConfig, DatadogSearchRunner}; pub use self::vrl::VrlConfig; use self::{ - datadog_search::{DatadogSearchConfig, DatadogSearchRunner}, is_log::{check_is_log, check_is_log_with_context}, is_metric::{check_is_metric, check_is_metric_with_context}, is_trace::{check_is_trace, check_is_trace_with_context}, diff --git a/src/config/enrichment_table.rs b/src/config/enrichment_table.rs index 7052b82005b68..5e2cd72a00858 100644 --- a/src/config/enrichment_table.rs +++ b/src/config/enrichment_table.rs @@ -1,4 +1,3 @@ -use async_trait::async_trait; use enum_dispatch::enum_dispatch; use vector_lib::config::GlobalOptions; use vector_lib::configurable::{configurable_component, NamedComponent}; @@ -22,7 +21,6 @@ impl EnrichmentTableOuter { } /// Generalized interface for describing and building enrichment table components. -#[async_trait] #[enum_dispatch] pub trait EnrichmentTableConfig: NamedComponent + core::fmt::Debug + Send + Sync { /// Builds the enrichment table with the given globals. diff --git a/src/config/graph.rs b/src/config/graph.rs index dc73ad2dfa322..f97e63cfc0ddc 100644 --- a/src/config/graph.rs +++ b/src/config/graph.rs @@ -228,7 +228,7 @@ impl Graph { stack.insert(n.clone()); } else { // we came back to the node after exploring all its children - remove it from the stack and traversal - stack.remove(&n); + stack.shift_remove(&n); traversal.pop_back(); } let inputs = self diff --git a/src/config/provider.rs b/src/config/provider.rs index f2ae9595ee2c4..c2d7fbd462e62 100644 --- a/src/config/provider.rs +++ b/src/config/provider.rs @@ -1,11 +1,9 @@ -use async_trait::async_trait; use enum_dispatch::enum_dispatch; use vector_lib::configurable::NamedComponent; use crate::{providers::BuildResult, signal}; /// Generalized interface for constructing a configuration from a provider. -#[async_trait] #[enum_dispatch] pub trait ProviderConfig: NamedComponent + core::fmt::Debug + Send + Sync { /// Builds a configuration. diff --git a/src/config/sink.rs b/src/config/sink.rs index 0de8ac5c5f760..77ab8e1e8e933 100644 --- a/src/config/sink.rs +++ b/src/config/sink.rs @@ -238,6 +238,9 @@ pub struct SinkContext { pub schema: schema::Options, pub app_name: String, pub app_name_slug: String, + + /// Extra context data provided by the running app and shared across all components. This can be + /// used to pass shared settings or other data from outside the components. pub extra_context: ExtraContext, } diff --git a/src/config/source.rs b/src/config/source.rs index 2ff02c3e53b17..20b2f227e7198 100644 --- a/src/config/source.rs +++ b/src/config/source.rs @@ -17,7 +17,7 @@ use vector_lib::{ }; use super::{schema, ComponentKey, ProxyConfig, Resource}; -use crate::{shutdown::ShutdownSignal, SourceSender}; +use crate::{extra_context::ExtraContext, shutdown::ShutdownSignal, SourceSender}; pub type BoxedSource = Box; @@ -131,10 +131,14 @@ pub struct SourceContext { /// Given a source can expose multiple [`SourceOutput`] channels, the ID is tied to the identifier of /// that `SourceOutput`. pub schema_definitions: HashMap, schema::Definition>, + + /// Extra context data provided by the running app and shared across all components. This can be + /// used to pass shared settings or other data from outside the components. + pub extra_context: ExtraContext, } impl SourceContext { - #[cfg(test)] + #[cfg(any(test, feature = "test-utils"))] pub fn new_shutdown( key: &ComponentKey, out: SourceSender, @@ -151,12 +155,13 @@ impl SourceContext { acknowledgements: false, schema_definitions: HashMap::default(), schema: Default::default(), + extra_context: Default::default(), }, shutdown, ) } - #[cfg(test)] + #[cfg(any(test, feature = "test-utils"))] pub fn new_test( out: SourceSender, schema_definitions: Option, schema::Definition>>, @@ -170,6 +175,7 @@ impl SourceContext { acknowledgements: false, schema_definitions: schema_definitions.unwrap_or_default(), schema: Default::default(), + extra_context: Default::default(), } } diff --git a/src/config/transform.rs b/src/config/transform.rs index b5cfdedeb0880..c7315dc80e630 100644 --- a/src/config/transform.rs +++ b/src/config/transform.rs @@ -19,6 +19,7 @@ use vector_lib::{ use super::schema::Options as SchemaOptions; use super::OutputId; use super::{id::Inputs, ComponentKey}; +use crate::extra_context::ExtraContext; pub type BoxedTransform = Box; @@ -96,7 +97,6 @@ where } } -#[derive(Debug)] pub struct TransformContext { // This is optional because currently there are a lot of places we use `TransformContext` that // may not have the relevant data available (e.g. tests). In the future it'd be nice to make it @@ -121,6 +121,10 @@ pub struct TransformContext { pub merged_schema_definition: schema::Definition, pub schema: SchemaOptions, + + /// Extra context data provided by the running app and shared across all components. This can be + /// used to pass shared settings or other data from outside the components. + pub extra_context: ExtraContext, } impl Default for TransformContext { @@ -132,6 +136,7 @@ impl Default for TransformContext { schema_definitions: HashMap::from([(None, HashMap::new())]), merged_schema_definition: schema::Definition::any(), schema: SchemaOptions::default(), + extra_context: Default::default(), } } } diff --git a/src/config/unit_test/mod.rs b/src/config/unit_test/mod.rs index 78148b356825e..209f55f99bcd5 100644 --- a/src/config/unit_test/mod.rs +++ b/src/config/unit_test/mod.rs @@ -1,4 +1,13 @@ -#[cfg(all(test, feature = "vector-unit-test-tests"))] +// should match vector-unit-test-tests feature +#[cfg(all( + test, + feature = "sources-demo_logs", + feature = "transforms-remap", + feature = "transforms-route", + feature = "transforms-filter", + feature = "transforms-reduce", + feature = "sinks-console" +))] mod tests; mod unit_test_components; @@ -32,7 +41,7 @@ use crate::{ self, loading, ComponentKey, Config, ConfigBuilder, ConfigPath, SinkOuter, SourceOuter, TestDefinition, TestInput, TestInputValue, TestOutput, }, - event::{Event, LogEvent, Value}, + event::{Event, EventMetadata, LogEvent, Value}, signal, topology::{builder::TopologyPieces, RunningTopology}, }; @@ -216,15 +225,18 @@ impl UnitTestBuildMetadata { Ok(inputs .into_iter() .map(|(insert_at, events)| { - let mut source_config = template_sources.remove(&insert_at).unwrap_or_else(|| { - // At this point, all inputs should have been validated to - // correspond with valid transforms, and all valid transforms - // have a source attached. - panic!( - "Invalid input: cannot insert at {:?}", - insert_at.to_string() - ) - }); + let mut source_config = + template_sources + .shift_remove(&insert_at) + .unwrap_or_else(|| { + // At this point, all inputs should have been validated to + // correspond with valid transforms, and all valid transforms + // have a source attached. + panic!( + "Invalid input: cannot insert at {:?}", + insert_at.to_string() + ) + }); source_config.events.extend(events); let id: &str = self .source_ids @@ -581,7 +593,12 @@ fn build_input_event(input: &TestInput) -> Result { result .program .resolve(&mut ctx) - .map(|v| Event::Log(LogEvent::from(v.clone()))) + .map(|_| { + Event::Log(LogEvent::from_parts( + target.value.clone(), + EventMetadata::default_with_value(target.metadata.clone()), + )) + }) .map_err(|e| e.to_string()) } else { Err("input type 'vrl' requires the field 'source'".to_string()) diff --git a/src/config/unit_test/tests.rs b/src/config/unit_test/tests.rs index 225397c3ce901..04e517b70ae1f 100644 --- a/src/config/unit_test/tests.rs +++ b/src/config/unit_test/tests.rs @@ -5,6 +5,8 @@ use crate::config::ConfigBuilder; #[tokio::test] async fn parse_no_input() { + crate::test_util::trace_init(); + let config: ConfigBuilder = toml::from_str(indoc! {r#" [transforms.bar] inputs = ["foo"] @@ -76,6 +78,8 @@ async fn parse_no_input() { #[tokio::test] async fn parse_no_test_input() { + crate::test_util::trace_init(); + let config: ConfigBuilder = toml::from_str(indoc! {r#" [transforms.bar] inputs = ["foo"] @@ -107,6 +111,8 @@ async fn parse_no_test_input() { #[tokio::test] async fn parse_no_outputs() { + crate::test_util::trace_init(); + let config: ConfigBuilder = toml::from_str(indoc! {r#" [transforms.foo] inputs = ["ignored"] @@ -136,6 +142,8 @@ async fn parse_no_outputs() { #[tokio::test] async fn parse_invalid_output_targets() { + crate::test_util::trace_init(); + let config: ConfigBuilder = toml::from_str(indoc! {r#" [transforms.bar] inputs = ["foo"] @@ -198,6 +206,8 @@ async fn parse_invalid_output_targets() { #[tokio::test] async fn parse_broken_topology() { + crate::test_util::trace_init(); + let config: ConfigBuilder = toml::from_str(indoc! {r#" [transforms.foo] inputs = ["something"] @@ -299,6 +309,8 @@ async fn parse_broken_topology() { #[tokio::test] async fn parse_bad_input_event() { + crate::test_util::trace_init(); + let config: ConfigBuilder = toml::from_str(indoc! {r#" [transforms.foo] inputs = ["ignored"] @@ -335,6 +347,8 @@ async fn parse_bad_input_event() { #[tokio::test] async fn test_success_multi_inputs() { + crate::test_util::trace_init(); + let config: ConfigBuilder = toml::from_str(indoc! {r#" [transforms.foo] inputs = ["ignored"] @@ -347,7 +361,7 @@ async fn test_success_multi_inputs() { inputs = ["ignored"] type = "remap" source = ''' - .new_field_two = "string value" + .new_field_two = "second string value" ''' [transforms.bar] @@ -437,6 +451,8 @@ async fn test_success_multi_inputs() { #[tokio::test] async fn test_success() { + crate::test_util::trace_init(); + let config: ConfigBuilder = toml::from_str(indoc! {r#" [transforms.foo] inputs = ["ignored"] @@ -504,6 +520,8 @@ async fn test_success() { #[tokio::test] async fn test_route() { + crate::test_util::trace_init(); + let config: ConfigBuilder = toml::from_str(indoc! {r#" [transforms.foo] inputs = ["ignored"] @@ -566,6 +584,8 @@ async fn test_route() { #[tokio::test] async fn test_fail_no_outputs() { + crate::test_util::trace_init(); + let config: ConfigBuilder = toml::from_str(indoc! {r#" [transforms.foo] inputs = [ "TODO" ] @@ -599,6 +619,8 @@ async fn test_fail_no_outputs() { #[tokio::test] async fn test_fail_two_output_events() { + crate::test_util::trace_init(); + let config: ConfigBuilder = toml::from_str(indoc! {r#" [transforms.foo] inputs = [ "TODO" ] @@ -678,6 +700,8 @@ async fn test_fail_two_output_events() { #[tokio::test] async fn test_no_outputs_from() { + crate::test_util::trace_init(); + let config: ConfigBuilder = toml::from_str(indoc! {r#" [transforms.foo] inputs = [ "ignored" ] @@ -715,6 +739,8 @@ async fn test_no_outputs_from() { #[tokio::test] async fn test_no_outputs_from_chained() { + crate::test_util::trace_init(); + let config: ConfigBuilder = toml::from_str(indoc! { r#" [transforms.foo] inputs = [ "ignored" ] @@ -759,6 +785,8 @@ async fn test_no_outputs_from_chained() { #[tokio::test] async fn test_log_input() { + crate::test_util::trace_init(); + let config: ConfigBuilder = toml::from_str(indoc! { r#" [transforms.foo] inputs = ["ignored"] @@ -798,6 +826,8 @@ async fn test_log_input() { #[tokio::test] async fn test_metric_input() { + crate::test_util::trace_init(); + let config: ConfigBuilder = toml::from_str(indoc! { r#" [transforms.foo] inputs = ["ignored"] @@ -837,6 +867,8 @@ async fn test_metric_input() { #[tokio::test] async fn test_success_over_gap() { + crate::test_util::trace_init(); + let config: ConfigBuilder = toml::from_str(indoc! { r#" [transforms.foo] inputs = ["ignored"] @@ -885,6 +917,8 @@ async fn test_success_over_gap() { #[tokio::test] async fn test_success_tree() { + crate::test_util::trace_init(); + let config: ConfigBuilder = toml::from_str(indoc! { r#" [transforms.ignored] inputs = ["also_ignored"] @@ -949,6 +983,8 @@ async fn test_success_tree() { #[tokio::test] async fn test_fails() { + crate::test_util::trace_init(); + let config: ConfigBuilder = toml::from_str(indoc! { r#" [transforms.foo] inputs = ["ignored"] @@ -1033,6 +1069,8 @@ async fn test_fails() { #[tokio::test] async fn test_dropped_branch() { + crate::test_util::trace_init(); + let config: ConfigBuilder = toml::from_str(indoc! {r#" [transforms.droptest] type = "remap" @@ -1117,6 +1155,8 @@ async fn test_dropped_branch() { #[tokio::test] async fn test_task_transform() { + crate::test_util::trace_init(); + let config: ConfigBuilder = toml::from_str(indoc! {r#" [transforms.ingress1] type = "remap" @@ -1198,6 +1238,8 @@ async fn test_task_transform() { #[tokio::test] async fn test_glob_input() { + crate::test_util::trace_init(); + let config: ConfigBuilder = toml::from_str(indoc! {r#" [transforms.ingress1] type = "remap" diff --git a/src/enrichment_tables/file.rs b/src/enrichment_tables/file.rs index 5f1e63cce1214..fe932e1347614 100644 --- a/src/enrichment_tables/file.rs +++ b/src/enrichment_tables/file.rs @@ -230,7 +230,6 @@ impl FileConfig { } } -#[async_trait::async_trait] impl EnrichmentTableConfig for FileConfig { async fn build( &self, diff --git a/src/enrichment_tables/geoip.rs b/src/enrichment_tables/geoip.rs index 64c77fb75a159..84d9da62b8aae 100644 --- a/src/enrichment_tables/geoip.rs +++ b/src/enrichment_tables/geoip.rs @@ -18,8 +18,7 @@ use vrl::value::{ObjectMap, Value}; use crate::config::{EnrichmentTableConfig, GenerateConfig}; // MaxMind GeoIP database files have a type field we can use to recognize specific -// products. If we encounter one of these two types, we look for ASN/ISP information; -// otherwise we expect to be working with a City database. +// products. If it is an unknown type, an error will be returned. #[derive(Copy, Clone, Debug)] #[allow(missing_docs)] pub enum DatabaseKind { @@ -29,13 +28,16 @@ pub enum DatabaseKind { City, } -impl From<&str> for DatabaseKind { - fn from(v: &str) -> Self { - match v { - "GeoLite2-ASN" => Self::Asn, - "GeoIP2-ISP" => Self::Isp, - "GeoIP2-Connection-Type" => Self::ConnectionType, - _ => Self::City, +impl TryFrom<&str> for DatabaseKind { + type Error = (); + + fn try_from(value: &str) -> Result { + match value { + "GeoLite2-ASN" => Ok(Self::Asn), + "GeoIP2-ISP" => Ok(Self::Isp), + "GeoIP2-Connection-Type" => Ok(Self::ConnectionType), + "GeoIP2-City" | "GeoLite2-City" => Ok(Self::City), + _ => Err(()), } } } @@ -48,6 +50,7 @@ pub struct GeoipConfig { /// (**GeoLite2-City.mmdb**). /// /// Other databases, such as the country database, are not supported. + /// `mmdb` enrichment table can be used for other databases. /// /// [geoip2]: https://dev.maxmind.com/geoip/geoip2/downloadable /// [geolite2]: https://dev.maxmind.com/geoip/geoip2/geolite2/#Download_Access @@ -89,7 +92,6 @@ impl GenerateConfig for GeoipConfig { } } -#[async_trait::async_trait] impl EnrichmentTableConfig for GeoipConfig { async fn build( &self, @@ -112,7 +114,13 @@ impl Geoip { /// Creates a new GeoIP struct from the provided config. pub fn new(config: GeoipConfig) -> crate::Result { let dbreader = Arc::new(Reader::open_readfile(config.path.clone())?); - let dbkind = DatabaseKind::from(dbreader.metadata.database_type.as_str()); + let dbkind = + DatabaseKind::try_from(dbreader.metadata.database_type.as_str()).map_err(|_| { + format!( + "Unsupported MMDB database type ({}). Use `mmdb` enrichment table instead.", + dbreader.metadata.database_type + ) + })?; // Check if we can read database with dummy Ip. let ip = IpAddr::V4(std::net::Ipv4Addr::UNSPECIFIED); @@ -444,6 +452,16 @@ mod tests { assert!(values.is_none()); } + #[test] + fn custom_mmdb_type_error() { + let result = Geoip::new(GeoipConfig { + path: "tests/data/custom-type.mmdb".to_string(), + locale: default_locale(), + }); + + assert!(result.is_err()); + } + fn find(ip: &str, database: &str) -> Option { find_select(ip, database, None) } diff --git a/src/enrichment_tables/mmdb.rs b/src/enrichment_tables/mmdb.rs new file mode 100644 index 0000000000000..d4a40197d405b --- /dev/null +++ b/src/enrichment_tables/mmdb.rs @@ -0,0 +1,277 @@ +//! Handles enrichment tables for `type = mmdb`. +//! Enrichment data is loaded from any database in [MaxMind][maxmind] format. +//! +//! [maxmind]: https://maxmind.com +use std::{fs, net::IpAddr, sync::Arc, time::SystemTime}; + +use maxminddb::{MaxMindDBError, Reader}; +use vector_lib::configurable::configurable_component; +use vector_lib::enrichment::{Case, Condition, IndexHandle, Table}; +use vrl::value::{ObjectMap, Value}; + +use crate::config::{EnrichmentTableConfig, GenerateConfig}; + +/// Configuration for the `mmdb` enrichment table. +#[derive(Clone, Debug, Eq, PartialEq)] +#[configurable_component(enrichment_table("mmdb"))] +pub struct MmdbConfig { + /// Path to the [MaxMind][maxmind] database + /// + /// [maxmind]: https://maxmind.com + pub path: String, +} + +impl GenerateConfig for MmdbConfig { + fn generate_config() -> toml::Value { + toml::Value::try_from(Self { + path: "/path/to/GeoLite2-City.mmdb".to_string(), + }) + .unwrap() + } +} + +impl EnrichmentTableConfig for MmdbConfig { + async fn build( + &self, + _: &crate::config::GlobalOptions, + ) -> crate::Result> { + Ok(Box::new(Mmdb::new(self.clone())?)) + } +} + +#[derive(Clone)] +/// A struct that implements [vector_lib::enrichment::Table] to handle loading enrichment data from a MaxMind database. +pub struct Mmdb { + config: MmdbConfig, + dbreader: Arc>>, + last_modified: SystemTime, +} + +impl Mmdb { + /// Creates a new Mmdb struct from the provided config. + pub fn new(config: MmdbConfig) -> crate::Result { + let dbreader = Arc::new(Reader::open_readfile(config.path.clone())?); + + // Check if we can read database with dummy Ip. + let ip = IpAddr::V4(std::net::Ipv4Addr::UNSPECIFIED); + let result = dbreader.lookup::(ip).map(|_| ()); + + match result { + Ok(_) | Err(MaxMindDBError::AddressNotFoundError(_)) => Ok(Mmdb { + last_modified: fs::metadata(&config.path)?.modified()?, + dbreader, + config, + }), + Err(error) => Err(error.into()), + } + } + + fn lookup(&self, ip: IpAddr, select: Option<&[String]>) -> Option { + let data = self.dbreader.lookup::(ip).ok()?; + + if let Some(fields) = select { + let mut filtered = Value::from(ObjectMap::new()); + let mut data_value = Value::from(data); + for field in fields { + filtered.insert( + field.as_str(), + data_value + .remove(field.as_str(), false) + .unwrap_or(Value::Null), + ); + } + filtered.into_object() + } else { + Some(data) + } + } +} + +impl Table for Mmdb { + /// Search the enrichment table data with the given condition. + /// All conditions must match (AND). + /// + /// # Errors + /// Errors if no rows, or more than 1 row is found. + fn find_table_row<'a>( + &self, + case: Case, + condition: &'a [Condition<'a>], + select: Option<&[String]>, + index: Option, + ) -> Result { + let mut rows = self.find_table_rows(case, condition, select, index)?; + + match rows.pop() { + Some(row) if rows.is_empty() => Ok(row), + Some(_) => Err("More than 1 row found".to_string()), + None => Err("IP not found".to_string()), + } + } + + /// Search the enrichment table data with the given condition. + /// All conditions must match (AND). + /// Can return multiple matched records + fn find_table_rows<'a>( + &self, + _: Case, + condition: &'a [Condition<'a>], + select: Option<&[String]>, + _: Option, + ) -> Result, String> { + match condition.first() { + Some(_) if condition.len() > 1 => Err("Only one condition is allowed".to_string()), + Some(Condition::Equals { value, .. }) => { + let ip = value + .to_string_lossy() + .parse::() + .map_err(|_| "Invalid IP address".to_string())?; + Ok(self + .lookup(ip, select) + .map(|values| vec![values]) + .unwrap_or_default()) + } + Some(_) => Err("Only equality condition is allowed".to_string()), + None => Err("IP condition must be specified".to_string()), + } + } + + /// Hints to the enrichment table what data is going to be searched to allow it to index the + /// data in advance. + /// + /// # Errors + /// Errors if the fields are not in the table. + fn add_index(&mut self, _: Case, fields: &[&str]) -> Result { + match fields.len() { + 0 => Err("IP field is required".to_string()), + 1 => Ok(IndexHandle(0)), + _ => Err("Only one field is allowed".to_string()), + } + } + + /// Returns a list of the field names that are in each index + fn index_fields(&self) -> Vec<(Case, Vec)> { + Vec::new() + } + + /// Returns true if the underlying data has changed and the table needs reloading. + fn needs_reload(&self) -> bool { + matches!(fs::metadata(&self.config.path) + .and_then(|metadata| metadata.modified()), + Ok(modified) if modified > self.last_modified) + } +} + +impl std::fmt::Debug for Mmdb { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Maxmind database {})", self.config.path) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use vrl::value::Value; + + #[test] + fn city_partial_lookup() { + let values = find_select( + "2.125.160.216", + "tests/data/GeoIP2-City-Test.mmdb", + Some(&[ + "location.latitude".to_string(), + "location.longitude".to_string(), + ]), + ) + .unwrap(); + + let mut expected = ObjectMap::new(); + expected.insert( + "location".into(), + ObjectMap::from([ + ("latitude".into(), Value::from(51.75)), + ("longitude".into(), Value::from(-1.25)), + ]) + .into(), + ); + + assert_eq!(values, expected); + } + + #[test] + fn isp_lookup() { + let values = find("208.192.1.2", "tests/data/GeoIP2-ISP-Test.mmdb").unwrap(); + + let mut expected = ObjectMap::new(); + expected.insert("autonomous_system_number".into(), 701i64.into()); + expected.insert( + "autonomous_system_organization".into(), + "MCI Communications Services, Inc. d/b/a Verizon Business".into(), + ); + expected.insert("isp".into(), "Verizon Business".into()); + expected.insert("organization".into(), "Verizon Business".into()); + + assert_eq!(values, expected); + } + + #[test] + fn connection_type_lookup_success() { + let values = find( + "201.243.200.1", + "tests/data/GeoIP2-Connection-Type-Test.mmdb", + ) + .unwrap(); + + let mut expected = ObjectMap::new(); + expected.insert("connection_type".into(), "Corporate".into()); + + assert_eq!(values, expected); + } + + #[test] + fn lookup_missing() { + let values = find("10.1.12.1", "tests/data/custom-type.mmdb"); + + assert!(values.is_none()); + } + + #[test] + fn custom_mmdb_type() { + let values = find("208.192.1.2", "tests/data/custom-type.mmdb").unwrap(); + + let mut expected = ObjectMap::new(); + expected.insert("hostname".into(), "custom".into()); + expected.insert( + "nested".into(), + ObjectMap::from([ + ("hostname".into(), "custom".into()), + ("original_cidr".into(), "208.192.1.2/24".into()), + ]) + .into(), + ); + + assert_eq!(values, expected); + } + + fn find(ip: &str, database: &str) -> Option { + find_select(ip, database, None) + } + + fn find_select(ip: &str, database: &str, select: Option<&[String]>) -> Option { + Mmdb::new(MmdbConfig { + path: database.to_string(), + }) + .unwrap() + .find_table_rows( + Case::Insensitive, + &[Condition::Equals { + field: "ip", + value: ip.into(), + }], + select, + None, + ) + .unwrap() + .pop() + } +} diff --git a/src/enrichment_tables/mod.rs b/src/enrichment_tables/mod.rs index 15ec912d911be..97a93b0059022 100644 --- a/src/enrichment_tables/mod.rs +++ b/src/enrichment_tables/mod.rs @@ -10,6 +10,9 @@ pub mod file; #[cfg(feature = "enrichment-tables-geoip")] pub mod geoip; +#[cfg(feature = "enrichment-tables-mmdb")] +pub mod mmdb; + /// Configurable enrichment tables. #[configurable_component] #[derive(Clone, Debug)] @@ -25,6 +28,12 @@ pub enum EnrichmentTables { /// [geoip2]: https://www.maxmind.com/en/geoip2-databases #[cfg(feature = "enrichment-tables-geoip")] Geoip(geoip::GeoipConfig), + + /// Exposes data from a [MaxMind][maxmind] database as an enrichment table. + /// + /// [maxmind]: https://www.maxmind.com/ + #[cfg(feature = "enrichment-tables-mmdb")] + Mmdb(mmdb::MmdbConfig), } // TODO: Use `enum_dispatch` here. @@ -34,6 +43,8 @@ impl NamedComponent for EnrichmentTables { Self::File(config) => config.get_component_name(), #[cfg(feature = "enrichment-tables-geoip")] Self::Geoip(config) => config.get_component_name(), + #[cfg(feature = "enrichment-tables-mmdb")] + Self::Mmdb(config) => config.get_component_name(), #[allow(unreachable_patterns)] _ => unimplemented!(), } diff --git a/src/extra_context.rs b/src/extra_context.rs index 23251c88ea510..025093b2ad22d 100644 --- a/src/extra_context.rs +++ b/src/extra_context.rs @@ -9,19 +9,14 @@ use std::{ /// Structure containing any extra data. /// The data is held in an [`Arc`] so is cheap to clone. #[derive(Clone, Default)] -pub struct ExtraContext(Arc>>); +pub struct ExtraContext(Arc>); -impl ExtraContext { - /// Create a new `ExtraContext` with the provided [`HashMap`]. - pub fn new(context: HashMap>) -> Self { - Self(Arc::new(context)) - } +type ContextItem = Box; +impl ExtraContext { /// Create a new `ExtraContext` that contains the single passed in value. pub fn single_value(value: T) -> Self { - let mut map = HashMap::new(); - map.insert(value.type_id(), Box::new(value) as _); - Self(Arc::new(map)) + [Box::new(value) as _].into_iter().collect() } #[cfg(test)] @@ -41,14 +36,21 @@ impl ExtraContext { } /// Get an object from the context, if it doesn't exist return the default. - pub fn get_or_default(&self) -> T - where - T: Clone + Default, - { + pub fn get_or_default(&self) -> T { self.get().cloned().unwrap_or_default() } } +impl FromIterator for ExtraContext { + fn from_iter>(iter: T) -> Self { + Self(Arc::new( + iter.into_iter() + .map(|item| ((*item).type_id(), item)) + .collect(), + )) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/src/http.rs b/src/http.rs index 11d427bee7e47..2afb9abdac38c 100644 --- a/src/http.rs +++ b/src/http.rs @@ -10,6 +10,7 @@ use futures::future::BoxFuture; use headers::{Authorization, HeaderMapExt}; use http::{ header::HeaderValue, request::Builder, uri::InvalidUri, HeaderMap, Request, Response, Uri, + Version, }; use hyper::{ body::{Body, HttpBody}, @@ -398,8 +399,12 @@ pub fn build_http_trace_layer( #[derive(Clone, Debug, PartialEq)] #[serde(deny_unknown_fields)] pub struct KeepaliveConfig { - /// The maximum amount of time a connection may exist before it is closed - /// by sending a `Connection: close` header on the HTTP response. + /// The maximum amount of time a connection may exist before it is closed by sending + /// a `Connection: close` header on the HTTP response. Set this to a large value like + /// `100000000` to "disable" this feature + /// + /// + /// Only applies to HTTP/0.9, HTTP/1.0, and HTTP/1.1 requests. /// /// A random jitter configured by `max_connection_age_jitter_factor` is added /// to the specified duration to spread out connection storms. @@ -524,22 +529,32 @@ where let start_reference = self.start_reference; let max_connection_age = self.max_connection_age; let peer_addr = self.peer_addr; + let version = req.version(); let future = self.service.call(req); Box::pin(async move { let mut response = future.await?; - if start_reference.elapsed() >= max_connection_age { - debug!( - message = "Closing connection due to max connection age.", - ?max_connection_age, - connection_age = ?start_reference.elapsed(), - ?peer_addr, - ); - // Tell the client to close this connection. - // Hyper will automatically close the connection after the response is sent. - response.headers_mut().insert( - hyper::header::CONNECTION, - hyper::header::HeaderValue::from_static("close"), - ); + match version { + Version::HTTP_09 | Version::HTTP_10 | Version::HTTP_11 => { + if start_reference.elapsed() >= max_connection_age { + debug!( + message = "Closing connection due to max connection age.", + ?max_connection_age, + connection_age = ?start_reference.elapsed(), + ?peer_addr, + ); + // Tell the client to close this connection. + // Hyper will automatically close the connection after the response is sent. + response.headers_mut().insert( + hyper::header::CONNECTION, + hyper::header::HeaderValue::from_static("close"), + ); + } + } + // TODO need to send GOAWAY frame + Version::HTTP_2 => (), + // TODO need to send GOAWAY frame + Version::HTTP_3 => (), + _ => (), } Ok(response) }) @@ -666,6 +681,52 @@ mod tests { ); } + #[tokio::test] + async fn test_max_connection_age_service_http2() { + tokio::time::pause(); + + let start_reference = Instant::now(); + let max_connection_age = Duration::from_secs(0); + let mut service = MaxConnectionAgeService { + service: tower::service_fn(|_req: Request| async { + Ok::, hyper::Error>(Response::new(Body::empty())) + }), + start_reference, + max_connection_age, + peer_addr: "1.2.3.4:1234".parse().unwrap(), + }; + + let mut req = Request::get("http://example.com") + .body(Body::empty()) + .unwrap(); + *req.version_mut() = Version::HTTP_2; + let response = service.call(req).await.unwrap(); + assert_eq!(response.headers().get("Connection"), None); + } + + #[tokio::test] + async fn test_max_connection_age_service_http3() { + tokio::time::pause(); + + let start_reference = Instant::now(); + let max_connection_age = Duration::from_secs(0); + let mut service = MaxConnectionAgeService { + service: tower::service_fn(|_req: Request| async { + Ok::, hyper::Error>(Response::new(Body::empty())) + }), + start_reference, + max_connection_age, + peer_addr: "1.2.3.4:1234".parse().unwrap(), + }; + + let mut req = Request::get("http://example.com") + .body(Body::empty()) + .unwrap(); + *req.version_mut() = Version::HTTP_3; + let response = service.call(req).await.unwrap(); + assert_eq!(response.headers().get("Connection"), None); + } + #[tokio::test] async fn test_max_connection_age_service_zero_duration() { tokio::time::pause(); diff --git a/src/internal_events/datadog_agent.rs b/src/internal_events/datadog_agent.rs new file mode 100644 index 0000000000000..a0846809c9afe --- /dev/null +++ b/src/internal_events/datadog_agent.rs @@ -0,0 +1,26 @@ +use metrics::counter; + +use vector_lib::internal_event::InternalEvent; +use vector_lib::internal_event::{error_stage, error_type}; + +#[derive(Debug)] +pub struct DatadogAgentJsonParseError<'a> { + pub error: &'a serde_json::Error, +} + +impl InternalEvent for DatadogAgentJsonParseError<'_> { + fn emit(self) { + error!( + message = "Failed to parse JSON body.", + error = ?self.error, + error_type = error_type::PARSER_FAILED, + stage = error_stage::PROCESSING, + internal_log_rate_limit = true, + ); + counter!( + "component_errors_total", 1, + "error_type" => error_type::PARSER_FAILED, + "stage" => error_stage::PROCESSING, + ); + } +} diff --git a/src/internal_events/file.rs b/src/internal_events/file.rs index fc493a1a83c78..876ffc3af0c2f 100644 --- a/src/internal_events/file.rs +++ b/src/internal_events/file.rs @@ -305,21 +305,27 @@ mod source { pub struct FileUnwatched<'a> { pub file: &'a Path, pub include_file_metric_tag: bool, + pub reached_eof: bool, } impl<'a> InternalEvent for FileUnwatched<'a> { fn emit(self) { + let reached_eof = if self.reached_eof { "true" } else { "false" }; info!( message = "Stopped watching file.", file = %self.file.display(), + reached_eof ); if self.include_file_metric_tag { counter!( "files_unwatched_total", 1, "file" => self.file.to_string_lossy().into_owned(), + "reached_eof" => reached_eof, ); } else { - counter!("files_unwatched_total", 1); + counter!("files_unwatched_total", 1, + "reached_eof" => reached_eof, + ); } } } @@ -505,10 +511,11 @@ mod source { }); } - fn emit_file_unwatched(&self, file: &Path) { + fn emit_file_unwatched(&self, file: &Path, reached_eof: bool) { emit!(FileUnwatched { file, - include_file_metric_tag: self.include_file_metric_tag + include_file_metric_tag: self.include_file_metric_tag, + reached_eof }); } diff --git a/src/internal_events/mod.rs b/src/internal_events/mod.rs index 924215599c63e..8d69de5c7e095 100644 --- a/src/internal_events/mod.rs +++ b/src/internal_events/mod.rs @@ -30,11 +30,13 @@ mod batch; mod codecs; mod common; mod conditions; +#[cfg(feature = "sources-datadog_agent")] +mod datadog_agent; #[cfg(feature = "sinks-datadog_metrics")] mod datadog_metrics; #[cfg(feature = "sinks-datadog_traces")] mod datadog_traces; -#[cfg(feature = "transforms-dedupe")] +#[cfg(feature = "transforms-impl-dedupe")] mod dedupe; #[cfg(feature = "sources-demo_logs")] mod demo_logs; @@ -87,6 +89,8 @@ mod lua; mod metric_to_log; #[cfg(feature = "sources-mongodb_metrics")] mod mongodb_metrics; +#[cfg(feature = "sinks-mqtt")] +mod mqtt; #[cfg(feature = "sources-nginx_metrics")] mod nginx_metrics; mod open; @@ -100,7 +104,7 @@ mod process; feature = "sinks-prometheus" ))] mod prometheus; -#[cfg(feature = "sinks-pulsar")] +#[cfg(any(feature = "sinks-pulsar", feature = "sources-pulsar"))] mod pulsar; #[cfg(feature = "sources-redis")] mod redis; @@ -163,11 +167,13 @@ pub(crate) use self::aws_kinesis_firehose::*; #[cfg(any(feature = "sources-aws_s3", feature = "sources-aws_sqs",))] pub(crate) use self::aws_sqs::*; pub(crate) use self::codecs::*; +#[cfg(feature = "sources-datadog_agent")] +pub(crate) use self::datadog_agent::*; #[cfg(feature = "sinks-datadog_metrics")] pub(crate) use self::datadog_metrics::*; #[cfg(feature = "sinks-datadog_traces")] pub(crate) use self::datadog_traces::*; -#[cfg(feature = "transforms-dedupe")] +#[cfg(feature = "transforms-impl-dedupe")] pub(crate) use self::dedupe::*; #[cfg(feature = "sources-demo_logs")] pub(crate) use self::demo_logs::*; @@ -221,6 +227,8 @@ pub(crate) use self::loki::*; pub(crate) use self::lua::*; #[cfg(feature = "transforms-metric_to_log")] pub(crate) use self::metric_to_log::*; +#[cfg(feature = "sinks-mqtt")] +pub(crate) use self::mqtt::*; #[cfg(feature = "sources-nginx_metrics")] pub(crate) use self::nginx_metrics::*; #[allow(unused_imports)] @@ -233,7 +241,7 @@ pub(crate) use self::postgresql_metrics::*; feature = "sinks-prometheus" ))] pub(crate) use self::prometheus::*; -#[cfg(feature = "sinks-pulsar")] +#[cfg(any(feature = "sinks-pulsar", feature = "sources-pulsar"))] pub(crate) use self::pulsar::*; #[cfg(feature = "sources-redis")] pub(crate) use self::redis::*; @@ -241,7 +249,7 @@ pub(crate) use self::redis::*; pub(crate) use self::reduce::*; #[cfg(feature = "transforms-remap")] pub(crate) use self::remap::*; -#[cfg(feature = "transforms-sample")] +#[cfg(feature = "transforms-impl-sample")] pub(crate) use self::sample::*; #[cfg(feature = "sinks-sematext")] pub(crate) use self::sematext_metrics::*; diff --git a/src/internal_events/mqtt.rs b/src/internal_events/mqtt.rs new file mode 100644 index 0000000000000..fa80f738da827 --- /dev/null +++ b/src/internal_events/mqtt.rs @@ -0,0 +1,34 @@ +use std::fmt::Debug; + +use metrics::counter; +use rumqttc::ConnectionError; +use vector_lib::internal_event::InternalEvent; +use vector_lib::internal_event::{error_stage, error_type}; + +#[derive(Debug)] +pub struct MqttConnectionError { + pub error: ConnectionError, +} + +impl InternalEvent for MqttConnectionError { + fn emit(self) { + error!( + message = "MQTT connection error.", + error = %self.error, + error_code = "mqtt_connection_error", + error_type = error_type::WRITER_FAILED, + stage = error_stage::SENDING, + internal_log_rate_limit = true, + ); + counter!( + "component_errors_total", 1, + "error_code" => "mqtt_connection_error", + "error_type" => error_type::WRITER_FAILED, + "stage" => error_stage::SENDING, + ); + } + + fn name(&self) -> Option<&'static str> { + Some("MqttConnectionError") + } +} diff --git a/src/internal_events/pulsar.rs b/src/internal_events/pulsar.rs index 46c45356e7f2c..7006d46fba7ec 100644 --- a/src/internal_events/pulsar.rs +++ b/src/internal_events/pulsar.rs @@ -1,6 +1,9 @@ use metrics::counter; -use vector_lib::internal_event::InternalEvent; -use vector_lib::internal_event::{error_stage, error_type, ComponentEventsDropped, UNINTENTIONAL}; +#[cfg(feature = "sources-pulsar")] +use metrics::{register_counter, Counter}; +use vector_lib::internal_event::{ + error_stage, error_type, ComponentEventsDropped, InternalEvent, UNINTENTIONAL, +}; #[derive(Debug)] pub struct PulsarSendingError { @@ -52,3 +55,83 @@ impl InternalEvent for PulsarPropertyExtractionError { ); } } + +#[cfg(feature = "sources-pulsar")] +pub enum PulsarErrorEventType { + Read, + Ack, + NAck, +} + +#[cfg(feature = "sources-pulsar")] +pub struct PulsarErrorEventData { + pub msg: String, + pub error_type: PulsarErrorEventType, +} + +#[cfg(feature = "sources-pulsar")] +registered_event!( + PulsarErrorEvent => { + ack_errors: Counter = register_counter!( + "component_errors_total", + "error_code" => "acknowledge_message", + "error_type" => error_type::ACKNOWLEDGMENT_FAILED, + "stage" => error_stage::RECEIVING, + ), + + nack_errors: Counter = register_counter!( + "component_errors_total", + "error_code" => "negative_acknowledge_message", + "error_type" => error_type::ACKNOWLEDGMENT_FAILED, + "stage" => error_stage::RECEIVING, + ), + + read_errors: Counter = register_counter!( + "component_errors_total", + "error_code" => "reading_message", + "error_type" => error_type::READER_FAILED, + "stage" => error_stage::RECEIVING, + ), + } + + fn emit(&self,error:PulsarErrorEventData) { + match error.error_type{ + PulsarErrorEventType::Read => { + error!( + message = "Failed to read message.", + error = error.msg, + error_code = "reading_message", + error_type = error_type::READER_FAILED, + stage = error_stage::RECEIVING, + internal_log_rate_limit = true, + ); + + self.read_errors.increment(1_u64); + } + PulsarErrorEventType::Ack => { + error!( + message = "Failed to acknowledge message.", + error = error.msg, + error_code = "acknowledge_message", + error_type = error_type::ACKNOWLEDGMENT_FAILED, + stage = error_stage::RECEIVING, + internal_log_rate_limit = true, + ); + + self.ack_errors.increment(1_u64); + } + PulsarErrorEventType::NAck => { + error!( + message = "Failed to negatively acknowledge message.", + error = error.msg, + error_code = "negative_acknowledge_message", + error_type = error_type::ACKNOWLEDGMENT_FAILED, + stage = error_stage::RECEIVING, + internal_log_rate_limit = true, + ); + + self.nack_errors.increment(1_u64); + } + } + } +); diff --git a/src/internal_events/socket.rs b/src/internal_events/socket.rs index 157084d52f340..10a144c8eed22 100644 --- a/src/internal_events/socket.rs +++ b/src/internal_events/socket.rs @@ -1,4 +1,4 @@ -use metrics::counter; +use metrics::{counter, histogram}; use vector_lib::internal_event::{ComponentEventsDropped, InternalEvent, UNINTENTIONAL}; use vector_lib::{ internal_event::{error_stage, error_type}, @@ -40,6 +40,7 @@ impl InternalEvent for SocketBytesReceived { "component_received_bytes_total", self.byte_size as u64, "protocol" => protocol, ); + histogram!("component_received_bytes", self.byte_size as f64); } } @@ -61,6 +62,7 @@ impl InternalEvent for SocketEventsReceived { ); counter!("component_received_events_total", self.count as u64, "mode" => mode); counter!("component_received_event_bytes_total", self.byte_size.get() as u64, "mode" => mode); + histogram!("component_received_bytes", self.byte_size.get() as f64, "mode" => mode); } } diff --git a/src/internal_events/tcp.rs b/src/internal_events/tcp.rs index 8372e5fd45082..2f73a97be6181 100644 --- a/src/internal_events/tcp.rs +++ b/src/internal_events/tcp.rs @@ -44,6 +44,30 @@ impl InternalEvent for TcpSocketConnectionShutdown { } } +#[derive(Debug)] +pub struct TcpSocketError<'a, E> { + pub(crate) error: &'a E, + pub peer_addr: SocketAddr, +} + +impl InternalEvent for TcpSocketError<'_, E> { + fn emit(self) { + error!( + message = "TCP socket error.", + error = %self.error, + peer_addr = ?self.peer_addr, + error_type = error_type::CONNECTION_FAILED, + stage = error_stage::PROCESSING, + internal_log_rate_limit = true, + ); + counter!( + "component_errors_total", 1, + "error_type" => error_type::CONNECTION_FAILED, + "stage" => error_stage::PROCESSING, + ); + } +} + #[derive(Debug)] pub struct TcpSocketTlsConnectionError { pub error: TlsError, diff --git a/src/lib.rs b/src/lib.rs index 4e710e3429385..9943ce45b68c1 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -7,6 +7,7 @@ #![deny(warnings)] #![deny(missing_docs)] #![cfg_attr(docsrs, feature(doc_cfg), deny(rustdoc::broken_intra_doc_links))] +#![allow(async_fn_in_trait)] #![allow(clippy::approx_constant)] #![allow(clippy::float_cmp)] #![allow(clippy::match_wild_err_arm)] diff --git a/src/providers/http.rs b/src/providers/http.rs index 95bce42ea1547..a41e6404395a6 100644 --- a/src/providers/http.rs +++ b/src/providers/http.rs @@ -169,7 +169,6 @@ fn poll_http( } } -#[async_trait::async_trait] impl ProviderConfig for HttpConfig { async fn build(&mut self, signal_handler: &mut signal::SignalHandler) -> BuildResult { let url = self diff --git a/src/sinks/amqp/config.rs b/src/sinks/amqp/config.rs index c24fcdc110ff3..1c5794b0022dd 100644 --- a/src/sinks/amqp/config.rs +++ b/src/sinks/amqp/config.rs @@ -12,12 +12,13 @@ use super::sink::AmqpSink; #[derive(Clone, Debug, Default)] pub struct AmqpPropertiesConfig { /// Content-Type for the AMQP messages. - #[configurable(derived)] pub(crate) content_type: Option, /// Content-Encoding for the AMQP messages. - #[configurable(derived)] pub(crate) content_encoding: Option, + + /// Expiration for AMQP messages (in milliseconds) + pub(crate) expiration_ms: Option, } impl AmqpPropertiesConfig { @@ -29,6 +30,9 @@ impl AmqpPropertiesConfig { if let Some(content_encoding) = &self.content_encoding { prop = prop.with_content_encoding(ShortString::from(content_encoding.clone())); } + if let Some(expiration_ms) = &self.expiration_ms { + prop = prop.with_expiration(ShortString::from(expiration_ms.to_string())); + } prop } } diff --git a/src/sinks/aws_kinesis/firehose/record.rs b/src/sinks/aws_kinesis/firehose/record.rs index 0b57c55ae5d98..24704aa3b50d5 100644 --- a/src/sinks/aws_kinesis/firehose/record.rs +++ b/src/sinks/aws_kinesis/firehose/record.rs @@ -41,7 +41,6 @@ pub struct KinesisFirehoseClient { pub client: KinesisClient, } -#[async_trait::async_trait] impl SendRecord for KinesisFirehoseClient { type T = KinesisRecord; type E = KinesisError; diff --git a/src/sinks/aws_kinesis/record.rs b/src/sinks/aws_kinesis/record.rs index 3a3521f113e6d..2af9f9de59949 100644 --- a/src/sinks/aws_kinesis/record.rs +++ b/src/sinks/aws_kinesis/record.rs @@ -1,4 +1,5 @@ -use async_trait::async_trait; +use std::future::Future; + use aws_smithy_runtime_api::client::{orchestrator::HttpResponse, result::SdkError}; use bytes::Bytes; @@ -19,15 +20,14 @@ pub trait Record { } /// Capable of sending records. -#[async_trait] pub trait SendRecord { type T; type E; /// Sends the records. - async fn send( + fn send( &self, records: Vec, stream_name: String, - ) -> Result>; + ) -> impl Future>> + Send; } diff --git a/src/sinks/aws_kinesis/streams/record.rs b/src/sinks/aws_kinesis/streams/record.rs index 4bb90e3e5c963..16bf89515c277 100644 --- a/src/sinks/aws_kinesis/streams/record.rs +++ b/src/sinks/aws_kinesis/streams/record.rs @@ -51,7 +51,6 @@ pub struct KinesisStreamClient { pub client: KinesisClient, } -#[async_trait::async_trait] impl SendRecord for KinesisStreamClient { type T = KinesisRecord; type E = KinesisError; diff --git a/src/sinks/aws_s3/integration_tests.rs b/src/sinks/aws_s3/integration_tests.rs index 8d723f09b9587..d6255933847a4 100644 --- a/src/sinks/aws_s3/integration_tests.rs +++ b/src/sinks/aws_s3/integration_tests.rs @@ -388,7 +388,11 @@ async fn s3_healthchecks() { .create_service(&ProxyConfig::from_env()) .await .unwrap(); - config.build_healthcheck(service.client()).unwrap(); + config + .build_healthcheck(service.client()) + .unwrap() + .await + .unwrap(); } #[tokio::test] diff --git a/src/sinks/aws_s_s/client.rs b/src/sinks/aws_s_s/client.rs index ad8907aa503b0..855d7ce0e6da3 100644 --- a/src/sinks/aws_s_s/client.rs +++ b/src/sinks/aws_s_s/client.rs @@ -1,15 +1,16 @@ +use std::future::Future; + use aws_smithy_runtime_api::client::{orchestrator::HttpResponse, result::SdkError}; use super::{request_builder::SendMessageEntry, service::SendMessageResponse}; -#[async_trait::async_trait] pub(super) trait Client where R: std::fmt::Debug + std::fmt::Display + std::error::Error, { - async fn send_message( + fn send_message( &self, entry: SendMessageEntry, byte_size: usize, - ) -> Result>; + ) -> impl Future>> + Send; } diff --git a/src/sinks/aws_s_s/sns/client.rs b/src/sinks/aws_s_s/sns/client.rs index bbcacf30474e5..9890ee2a73645 100644 --- a/src/sinks/aws_s_s/sns/client.rs +++ b/src/sinks/aws_s_s/sns/client.rs @@ -16,7 +16,6 @@ impl SnsMessagePublisher { } } -#[async_trait::async_trait] impl Client for SnsMessagePublisher { async fn send_message( &self, diff --git a/src/sinks/aws_s_s/sqs/client.rs b/src/sinks/aws_s_s/sqs/client.rs index 0e107af13fb28..f50bce6d053d6 100644 --- a/src/sinks/aws_s_s/sqs/client.rs +++ b/src/sinks/aws_s_s/sqs/client.rs @@ -16,7 +16,6 @@ impl SqsMessagePublisher { } } -#[async_trait::async_trait] impl Client for SqsMessagePublisher { async fn send_message( &self, diff --git a/src/sinks/azure_blob/integration_tests.rs b/src/sinks/azure_blob/integration_tests.rs index 754425806d16d..9e36ed1a6acd7 100644 --- a/src/sinks/azure_blob/integration_tests.rs +++ b/src/sinks/azure_blob/integration_tests.rs @@ -40,9 +40,10 @@ async fn azure_blob_healthcheck_passed() { ) .expect("Failed to create client"); - let response = azure_common::config::build_healthcheck(config.container_name, client); - - response.expect("Failed to pass healthcheck"); + azure_common::config::build_healthcheck(config.container_name, client) + .expect("Failed to build healthcheck") + .await + .expect("Failed to pass healthcheck"); } #[tokio::test] diff --git a/src/sinks/clickhouse/config.rs b/src/sinks/clickhouse/config.rs index a377dc3f7e11a..f50f49ca510af 100644 --- a/src/sinks/clickhouse/config.rs +++ b/src/sinks/clickhouse/config.rs @@ -1,17 +1,53 @@ -use http::{Request, StatusCode, Uri}; -use hyper::Body; +//! Configuration for the `Clickhouse` sink. use super::{ - service::{ClickhouseRetryLogic, ClickhouseService}, - sink::ClickhouseSink, + request_builder::ClickhouseRequestBuilder, + service::{ClickhouseRetryLogic, ClickhouseServiceRequestBuilder}, + sink::{ClickhouseSink, PartitionKey}, }; use crate::{ - http::{get_http_scheme_from_uri, Auth, HttpClient, MaybeAuth}, + http::{Auth, HttpClient, MaybeAuth}, sinks::{ prelude::*, - util::{RealtimeSizeBasedDefaultBatchSettings, UriSerde}, + util::{http::HttpService, RealtimeSizeBasedDefaultBatchSettings, UriSerde}, }, }; +use http::{Request, StatusCode, Uri}; +use hyper::Body; +use std::fmt; +use vector_lib::codecs::{encoding::Framer, JsonSerializerConfig, NewlineDelimitedEncoderConfig}; + +/// Data format. +/// +/// The format used to parse input/output data. +/// +/// [formats]: https://clickhouse.com/docs/en/interfaces/formats +#[configurable_component] +#[derive(Clone, Copy, Debug, Derivative, Eq, PartialEq, Hash)] +#[serde(rename_all = "snake_case")] +#[derivative(Default)] +#[allow(clippy::enum_variant_names)] +pub enum Format { + #[derivative(Default)] + /// JSONEachRow. + JsonEachRow, + + /// JSONAsObject. + JsonAsObject, + + /// JSONAsString. + JsonAsString, +} + +impl fmt::Display for Format { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Format::JsonEachRow => write!(f, "JSONEachRow"), + Format::JsonAsObject => write!(f, "JSONAsObject"), + Format::JsonAsString => write!(f, "JSONAsString"), + } + } +} /// Configuration for the `clickhouse` sink. #[configurable_component(sink("clickhouse", "Deliver log data to a ClickHouse database."))] @@ -31,6 +67,10 @@ pub struct ClickhouseConfig { #[configurable(metadata(docs::examples = "mydatabase"))] pub database: Option