From 0e1b1d3a9b9a63e1399fbaf6cd3bc454ddbd2948 Mon Sep 17 00:00:00 2001 From: Daniel Jaglowski Date: Tue, 5 Nov 2024 11:00:46 -0500 Subject: [PATCH 1/9] [receiver/windowseventlog] Fix panic when rendering excessively long offset (#36179) A user observed a panic in the receiver after updating to v0.112.0, where rendering info is expanded by default: ``` goroutine 69 [running]: github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/input/windows.(*Buffer).ReadBytes(0x37?, 0x19?) C:/Users/runneradmin/go/pkg/mod/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza@v0.112.0/operator/input/windows/buffer.go:26 +0x165 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/input/windows.(*Buffer).ReadWideChars(...) C:/Users/runneradmin/go/pkg/mod/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza@v0.112.0/operator/input/windows/buffer.go:37 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/input/windows.(*Event).RenderDeep(0xc002aaf998, {{0xc003120000, 0x4000, 0x4000}}, {0x8c1b918?}) C:/Users/runneradmin/go/pkg/mod/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza@v0.112.0/operator/input/windows/event.go:117 +0x155 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/input/windows.(*Input).renderDeepAndSend(0xc001ee3dc0, {0x9977160, 0xc00312c460}, {0x19}, {0x13b480ccd58?}) ``` @pjanotti This is a quick fix to avoid the panic by reading the full buffer, but not trying to read more. There may be a better behavior than this but I am not familiar with how the buffer size is established so am proposing to just read the full content for now. --- .chloggen/fix-panic-wel.yaml | 27 +++++++++++++++++++ pkg/stanza/operator/input/windows/buffer.go | 3 +++ .../operator/input/windows/buffer_test.go | 11 ++++++++ 3 files changed, 41 insertions(+) create mode 100644 .chloggen/fix-panic-wel.yaml diff --git a/.chloggen/fix-panic-wel.yaml b/.chloggen/fix-panic-wel.yaml new file mode 100644 index 000000000000..01dfa4f218df --- /dev/null +++ b/.chloggen/fix-panic-wel.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: bug_fix + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: receiver/windowseventlog + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Fix panic when rendering long event messages. + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [36179] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/pkg/stanza/operator/input/windows/buffer.go b/pkg/stanza/operator/input/windows/buffer.go index 149eade2b4dd..3d083bb76693 100644 --- a/pkg/stanza/operator/input/windows/buffer.go +++ b/pkg/stanza/operator/input/windows/buffer.go @@ -23,6 +23,9 @@ type Buffer struct { // ReadBytes will read UTF-8 bytes from the buffer, where offset is the number of bytes to be read func (b *Buffer) ReadBytes(offset uint32) ([]byte, error) { + if offset > uint32(len(b.buffer)) { + offset = uint32(len(b.buffer)) + } utf16 := b.buffer[:offset] utf8, err := unicode.UTF16(unicode.LittleEndian, unicode.UseBOM).NewDecoder().Bytes(utf16) if err != nil { diff --git a/pkg/stanza/operator/input/windows/buffer_test.go b/pkg/stanza/operator/input/windows/buffer_test.go index f8376706ec78..78f654742495 100644 --- a/pkg/stanza/operator/input/windows/buffer_test.go +++ b/pkg/stanza/operator/input/windows/buffer_test.go @@ -21,6 +21,17 @@ func TestBufferReadBytes(t *testing.T) { require.Equal(t, utf8, bytes) } +func TestBufferReadBytesOverflow(t *testing.T) { + buffer := NewBuffer() + utf8 := []byte("test") + utf16, _ := unicode.UTF16(unicode.LittleEndian, unicode.UseBOM).NewEncoder().Bytes(utf8) + copy(buffer.buffer, utf16) + offset := uint32(len(utf16)) + bytes, err := buffer.ReadBytes(offset * 2) + require.NoError(t, err) + require.Equal(t, utf8, bytes) +} + func TestBufferReadWideBytes(t *testing.T) { buffer := NewBuffer() utf8 := []byte("test") From 13c0c3b1915dc87f0193f5c644cb59195540a0f4 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 5 Nov 2024 09:14:53 -0700 Subject: [PATCH 2/9] fix(deps): update module github.com/snowflakedb/gosnowflake to v1.12.0 (#36200) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Change | Age | Adoption | Passing | Confidence | |---|---|---|---|---|---| | [github.com/snowflakedb/gosnowflake](https://redirect.github.com/snowflakedb/gosnowflake) | `v1.11.2` -> `v1.12.0` | [![age](https://developer.mend.io/api/mc/badges/age/go/github.com%2fsnowflakedb%2fgosnowflake/v1.12.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/go/github.com%2fsnowflakedb%2fgosnowflake/v1.12.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/go/github.com%2fsnowflakedb%2fgosnowflake/v1.11.2/v1.12.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/go/github.com%2fsnowflakedb%2fgosnowflake/v1.11.2/v1.12.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | --- > [!WARNING] > Some dependencies could not be looked up. Check the Dependency Dashboard for more information. --- ### Release Notes
snowflakedb/gosnowflake (github.com/snowflakedb/gosnowflake) ### [`v1.12.0`](https://redirect.github.com/snowflakedb/gosnowflake/releases/tag/v1.12.0): Release [Compare Source](https://redirect.github.com/snowflakedb/gosnowflake/compare/v1.11.2...v1.12.0) - Please check Snowflake [Go Snowflake for release notes](https://docs.snowflake.com/en/release-notes/clients-drivers/golang).
--- ### Configuration 📅 **Schedule**: Branch creation - "on tuesday" (UTC), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR was generated by [Mend Renovate](https://mend.io/renovate/). View the [repository job log](https://developer.mend.io/github/open-telemetry/opentelemetry-collector-contrib). --------- Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Co-authored-by: opentelemetrybot <107717825+opentelemetrybot@users.noreply.github.com> Co-authored-by: Yang Song --- internal/sqlquery/go.mod | 5 +++-- internal/sqlquery/go.sum | 10 ++++++---- receiver/snowflakereceiver/go.mod | 5 +++-- receiver/snowflakereceiver/go.sum | 10 ++++++---- receiver/sqlqueryreceiver/go.mod | 3 ++- receiver/sqlqueryreceiver/go.sum | 6 ++++-- receiver/sqlserverreceiver/go.mod | 5 +++-- receiver/sqlserverreceiver/go.sum | 10 ++++++---- 8 files changed, 33 insertions(+), 21 deletions(-) diff --git a/internal/sqlquery/go.mod b/internal/sqlquery/go.mod index 65f2b6bb277c..a0045b802619 100644 --- a/internal/sqlquery/go.mod +++ b/internal/sqlquery/go.mod @@ -8,7 +8,7 @@ require ( github.com/lib/pq v1.10.9 github.com/microsoft/go-mssqldb v1.7.2 github.com/sijms/go-ora/v2 v2.8.22 - github.com/snowflakedb/gosnowflake v1.11.2 + github.com/snowflakedb/gosnowflake v1.12.0 github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector/component v0.112.0 go.opentelemetry.io/collector/pdata v1.18.0 @@ -24,6 +24,7 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0 // indirect + github.com/BurntSushi/toml v1.4.0 // indirect github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c // indirect github.com/apache/arrow/go/v15 v15.0.0 // indirect github.com/aws/aws-sdk-go-v2 v1.26.1 // indirect @@ -72,7 +73,7 @@ require ( github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.12.0 // indirect - github.com/sirupsen/logrus v1.9.0 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect github.com/zeebo/xxh3 v1.0.2 // indirect go.opentelemetry.io/collector/config/configtelemetry v0.112.0 // indirect go.opentelemetry.io/collector/consumer v0.112.0 // indirect diff --git a/internal/sqlquery/go.sum b/internal/sqlquery/go.sum index 9eed4500d56e..4f0b715ee51f 100644 --- a/internal/sqlquery/go.sum +++ b/internal/sqlquery/go.sum @@ -18,6 +18,8 @@ github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0 h1:u/LLAOFgsMv7HmNL4 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0/go.mod h1:2e8rMJtl2+2j+HXbTBwnyGpm5Nou7KhvSfxOq8JpTag= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 h1:DzHpqpoJVaCgOUdVHxE8QB52S6NiVdDQvGlny1qvPqA= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= +github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c h1:RGWPOewvKIROun94nF7v2cua9qP+thov/7M50KEoeSU= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= github.com/SAP/go-hdb v1.12.4 h1:Oje5j/Ua6Yh5qwSy5xhXpJoooEi6vaj/ubbk3q5DWJQ= @@ -162,10 +164,10 @@ github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/sijms/go-ora/v2 v2.8.22 h1:3ABgRzVKxS439cEgSLjFKutIwOyhnyi4oOSBywEdOlU= github.com/sijms/go-ora/v2 v2.8.22/go.mod h1:QgFInVi3ZWyqAiJwzBQA+nbKYKH77tdp1PYoCqhR2dU= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/snowflakedb/gosnowflake v1.11.2 h1:eAMsxrCiC6ij5wX3dHx1TQCBOdDmCK062Ir8rndUkRg= -github.com/snowflakedb/gosnowflake v1.11.2/go.mod h1:WFe+8mpsapDaQjHX6BqJBKtfQCGlGD3lHKeDsKfpx2A= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/snowflakedb/gosnowflake v1.12.0 h1:Saez8egtn5xAoVMBxFaMu9MYfAG9SS9dpAEXD1/ECIo= +github.com/snowflakedb/gosnowflake v1.12.0/go.mod h1:wHfYmZi3zvtWItojesAhWWXBN7+niex2R1h/S7QCZYg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= diff --git a/receiver/snowflakereceiver/go.mod b/receiver/snowflakereceiver/go.mod index 06caef1342ed..75f48e6172bb 100644 --- a/receiver/snowflakereceiver/go.mod +++ b/receiver/snowflakereceiver/go.mod @@ -7,7 +7,7 @@ require ( github.com/google/go-cmp v0.6.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.112.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.112.0 - github.com/snowflakedb/gosnowflake v1.11.2 + github.com/snowflakedb/gosnowflake v1.12.0 github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector/component v0.112.0 go.opentelemetry.io/collector/config/configopaque v1.18.0 @@ -28,6 +28,7 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 // indirect + github.com/BurntSushi/toml v1.4.0 // indirect github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c // indirect github.com/apache/arrow/go/v15 v15.0.0 // indirect github.com/aws/aws-sdk-go-v2 v1.26.1 // indirect @@ -75,7 +76,7 @@ require ( github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.12.0 // indirect - github.com/sirupsen/logrus v1.9.0 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect github.com/zeebo/xxh3 v1.0.2 // indirect go.opentelemetry.io/collector/config/configtelemetry v0.112.0 // indirect go.opentelemetry.io/collector/consumer/consumererror v0.112.0 // indirect diff --git a/receiver/snowflakereceiver/go.sum b/receiver/snowflakereceiver/go.sum index f655e52a67b3..47b2906b0dae 100644 --- a/receiver/snowflakereceiver/go.sum +++ b/receiver/snowflakereceiver/go.sum @@ -14,6 +14,8 @@ github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 h1:gggzg0SUMs6SQbEw+ github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0/go.mod h1:+6KLcKIVgxoBDMqMO/Nvy7bZ9a0nbU3I1DtFQK3YvB4= github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY= github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= +github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= +github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c h1:RGWPOewvKIROun94nF7v2cua9qP+thov/7M50KEoeSU= @@ -144,10 +146,10 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/snowflakedb/gosnowflake v1.11.2 h1:eAMsxrCiC6ij5wX3dHx1TQCBOdDmCK062Ir8rndUkRg= -github.com/snowflakedb/gosnowflake v1.11.2/go.mod h1:WFe+8mpsapDaQjHX6BqJBKtfQCGlGD3lHKeDsKfpx2A= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/snowflakedb/gosnowflake v1.12.0 h1:Saez8egtn5xAoVMBxFaMu9MYfAG9SS9dpAEXD1/ECIo= +github.com/snowflakedb/gosnowflake v1.12.0/go.mod h1:wHfYmZi3zvtWItojesAhWWXBN7+niex2R1h/S7QCZYg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= diff --git a/receiver/sqlqueryreceiver/go.mod b/receiver/sqlqueryreceiver/go.mod index baed0a7ec648..c6e8cd341d63 100644 --- a/receiver/sqlqueryreceiver/go.mod +++ b/receiver/sqlqueryreceiver/go.mod @@ -38,6 +38,7 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/BurntSushi/toml v1.4.0 // indirect github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/SAP/go-hdb v1.12.4 // indirect @@ -131,7 +132,7 @@ require ( github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/sijms/go-ora/v2 v2.8.22 // indirect github.com/sirupsen/logrus v1.9.3 // indirect - github.com/snowflakedb/gosnowflake v1.11.2 // indirect + github.com/snowflakedb/gosnowflake v1.12.0 // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect github.com/valyala/fastjson v1.6.4 // indirect diff --git a/receiver/sqlqueryreceiver/go.sum b/receiver/sqlqueryreceiver/go.sum index 5e79d9b112e5..af8131a4814c 100644 --- a/receiver/sqlqueryreceiver/go.sum +++ b/receiver/sqlqueryreceiver/go.sum @@ -26,6 +26,8 @@ github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOEl github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 h1:DzHpqpoJVaCgOUdVHxE8QB52S6NiVdDQvGlny1qvPqA= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= +github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c h1:RGWPOewvKIROun94nF7v2cua9qP+thov/7M50KEoeSU= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= @@ -258,8 +260,8 @@ github.com/sijms/go-ora/v2 v2.8.22 h1:3ABgRzVKxS439cEgSLjFKutIwOyhnyi4oOSBywEdOl github.com/sijms/go-ora/v2 v2.8.22/go.mod h1:QgFInVi3ZWyqAiJwzBQA+nbKYKH77tdp1PYoCqhR2dU= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/snowflakedb/gosnowflake v1.11.2 h1:eAMsxrCiC6ij5wX3dHx1TQCBOdDmCK062Ir8rndUkRg= -github.com/snowflakedb/gosnowflake v1.11.2/go.mod h1:WFe+8mpsapDaQjHX6BqJBKtfQCGlGD3lHKeDsKfpx2A= +github.com/snowflakedb/gosnowflake v1.12.0 h1:Saez8egtn5xAoVMBxFaMu9MYfAG9SS9dpAEXD1/ECIo= +github.com/snowflakedb/gosnowflake v1.12.0/go.mod h1:wHfYmZi3zvtWItojesAhWWXBN7+niex2R1h/S7QCZYg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= diff --git a/receiver/sqlserverreceiver/go.mod b/receiver/sqlserverreceiver/go.mod index 5753a4941669..d5c488db4d05 100644 --- a/receiver/sqlserverreceiver/go.mod +++ b/receiver/sqlserverreceiver/go.mod @@ -29,6 +29,7 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0 // indirect + github.com/BurntSushi/toml v1.4.0 // indirect github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c // indirect github.com/SAP/go-hdb v1.12.4 // indirect github.com/apache/arrow/go/v15 v15.0.0 // indirect @@ -89,8 +90,8 @@ require ( github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/sijms/go-ora/v2 v2.8.22 // indirect - github.com/sirupsen/logrus v1.9.0 // indirect - github.com/snowflakedb/gosnowflake v1.11.2 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/snowflakedb/gosnowflake v1.12.0 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/zeebo/xxh3 v1.0.2 // indirect go.opentelemetry.io/collector/config/configtelemetry v0.112.0 // indirect diff --git a/receiver/sqlserverreceiver/go.sum b/receiver/sqlserverreceiver/go.sum index 7796590ae610..1c8cfeeab23d 100644 --- a/receiver/sqlserverreceiver/go.sum +++ b/receiver/sqlserverreceiver/go.sum @@ -18,6 +18,8 @@ github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0 h1:u/LLAOFgsMv7HmNL4 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0/go.mod h1:2e8rMJtl2+2j+HXbTBwnyGpm5Nou7KhvSfxOq8JpTag= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 h1:DzHpqpoJVaCgOUdVHxE8QB52S6NiVdDQvGlny1qvPqA= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= +github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c h1:RGWPOewvKIROun94nF7v2cua9qP+thov/7M50KEoeSU= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= github.com/SAP/go-hdb v1.12.4 h1:Oje5j/Ua6Yh5qwSy5xhXpJoooEi6vaj/ubbk3q5DWJQ= @@ -176,10 +178,10 @@ github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/sijms/go-ora/v2 v2.8.22 h1:3ABgRzVKxS439cEgSLjFKutIwOyhnyi4oOSBywEdOlU= github.com/sijms/go-ora/v2 v2.8.22/go.mod h1:QgFInVi3ZWyqAiJwzBQA+nbKYKH77tdp1PYoCqhR2dU= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/snowflakedb/gosnowflake v1.11.2 h1:eAMsxrCiC6ij5wX3dHx1TQCBOdDmCK062Ir8rndUkRg= -github.com/snowflakedb/gosnowflake v1.11.2/go.mod h1:WFe+8mpsapDaQjHX6BqJBKtfQCGlGD3lHKeDsKfpx2A= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/snowflakedb/gosnowflake v1.12.0 h1:Saez8egtn5xAoVMBxFaMu9MYfAG9SS9dpAEXD1/ECIo= +github.com/snowflakedb/gosnowflake v1.12.0/go.mod h1:wHfYmZi3zvtWItojesAhWWXBN7+niex2R1h/S7QCZYg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= From d7eb6ff2cae7273707435097037c6381444ea3b2 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 5 Nov 2024 09:15:20 -0700 Subject: [PATCH 3/9] fix(deps): update module google.golang.org/api to v0.204.0 (#36201) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Change | Age | Adoption | Passing | Confidence | |---|---|---|---|---|---| | [google.golang.org/api](https://redirect.github.com/googleapis/google-api-go-client) | `v0.201.0` -> `v0.204.0` | [![age](https://developer.mend.io/api/mc/badges/age/go/google.golang.org%2fapi/v0.204.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/go/google.golang.org%2fapi/v0.204.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/go/google.golang.org%2fapi/v0.201.0/v0.204.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/go/google.golang.org%2fapi/v0.201.0/v0.204.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | | [google.golang.org/api](https://redirect.github.com/googleapis/google-api-go-client) | `v0.203.0` -> `v0.204.0` | [![age](https://developer.mend.io/api/mc/badges/age/go/google.golang.org%2fapi/v0.204.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/go/google.golang.org%2fapi/v0.204.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/go/google.golang.org%2fapi/v0.203.0/v0.204.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/go/google.golang.org%2fapi/v0.203.0/v0.204.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | --- > [!WARNING] > Some dependencies could not be looked up. Check the Dependency Dashboard for more information. --- ### Release Notes
googleapis/google-api-go-client (google.golang.org/api) ### [`v0.204.0`](https://redirect.github.com/googleapis/google-api-go-client/releases/tag/v0.204.0) [Compare Source](https://redirect.github.com/googleapis/google-api-go-client/compare/v0.203.0...v0.204.0) ##### Features - **all:** Auto-regenerate discovery clients ([#​2837](https://redirect.github.com/googleapis/google-api-go-client/issues/2837)) ([343ae0e](https://redirect.github.com/googleapis/google-api-go-client/commit/343ae0e8222988b33ce388661056cd3a834a54a6)) - **all:** Auto-regenerate discovery clients ([#​2839](https://redirect.github.com/googleapis/google-api-go-client/issues/2839)) ([37b271e](https://redirect.github.com/googleapis/google-api-go-client/commit/37b271e98c5041584a0c4fc0741c2798ab05dbca)) - **all:** Auto-regenerate discovery clients ([#​2841](https://redirect.github.com/googleapis/google-api-go-client/issues/2841)) ([fb3747b](https://redirect.github.com/googleapis/google-api-go-client/commit/fb3747b8f9c99f0cf41b5830322192bcb0092f0a)) - **all:** Auto-regenerate discovery clients ([#​2842](https://redirect.github.com/googleapis/google-api-go-client/issues/2842)) ([7221d2c](https://redirect.github.com/googleapis/google-api-go-client/commit/7221d2c798d6ae77f88c60de23e5871e92fe03d3)) - **all:** Auto-regenerate discovery clients ([#​2844](https://redirect.github.com/googleapis/google-api-go-client/issues/2844)) ([56c5ddb](https://redirect.github.com/googleapis/google-api-go-client/commit/56c5ddb8eaf93f57891f285d913b21fb357b87a0)) - **all:** Auto-regenerate discovery clients ([#​2845](https://redirect.github.com/googleapis/google-api-go-client/issues/2845)) ([9d5f008](https://redirect.github.com/googleapis/google-api-go-client/commit/9d5f008642ce0996af3e4c84f9a017b64c6eb3d3)) - **all:** Auto-regenerate discovery clients ([#​2848](https://redirect.github.com/googleapis/google-api-go-client/issues/2848)) ([5bc448d](https://redirect.github.com/googleapis/google-api-go-client/commit/5bc448d6497f0c355b530d7b17d9f034a02aee6f)) - **all:** Auto-regenerate discovery clients ([#​2850](https://redirect.github.com/googleapis/google-api-go-client/issues/2850)) ([7955ec4](https://redirect.github.com/googleapis/google-api-go-client/commit/7955ec454d23d0a36a8ae1c0253b95f7fa3a930c)) ##### Bug Fixes - **transport/grpc:** Pass through cert source to new auth lib ([#​2840](https://redirect.github.com/googleapis/google-api-go-client/issues/2840)) ([c67e7c0](https://redirect.github.com/googleapis/google-api-go-client/commit/c67e7c09f9cbe3aea06cfe54f4e550ac16725c6c)) ##### Documentation - Mark WithUniverseDomain as stable ([#​2847](https://redirect.github.com/googleapis/google-api-go-client/issues/2847)) ([29e20f6](https://redirect.github.com/googleapis/google-api-go-client/commit/29e20f602923858630f5e00d5fbd303bcf421f13)) ### [`v0.203.0`](https://redirect.github.com/googleapis/google-api-go-client/releases/tag/v0.203.0) [Compare Source](https://redirect.github.com/googleapis/google-api-go-client/compare/v0.202.0...v0.203.0) ##### Features - **all:** Auto-regenerate discovery clients ([#​2834](https://redirect.github.com/googleapis/google-api-go-client/issues/2834)) ([c77b5f4](https://redirect.github.com/googleapis/google-api-go-client/commit/c77b5f4cd277b56cefe5fee3ede469443e32dcec)) ### [`v0.202.0`](https://redirect.github.com/googleapis/google-api-go-client/releases/tag/v0.202.0) [Compare Source](https://redirect.github.com/googleapis/google-api-go-client/compare/v0.201.0...v0.202.0) ##### Features - **all:** Auto-regenerate discovery clients ([#​2827](https://redirect.github.com/googleapis/google-api-go-client/issues/2827)) ([cb3f24b](https://redirect.github.com/googleapis/google-api-go-client/commit/cb3f24b55f38fe2b307013857a9a057a178091d3)) - **all:** Auto-regenerate discovery clients ([#​2829](https://redirect.github.com/googleapis/google-api-go-client/issues/2829)) ([5384fb9](https://redirect.github.com/googleapis/google-api-go-client/commit/5384fb98878c7abd9693777ed2caa780c80e5db4)) - **all:** Auto-regenerate discovery clients ([#​2830](https://redirect.github.com/googleapis/google-api-go-client/issues/2830)) ([64042ed](https://redirect.github.com/googleapis/google-api-go-client/commit/64042ed340420f8555b6be3faf9a3b3beab3dd41)) - **all:** Auto-regenerate discovery clients ([#​2831](https://redirect.github.com/googleapis/google-api-go-client/issues/2831)) ([d094279](https://redirect.github.com/googleapis/google-api-go-client/commit/d09427918c62c75f4dc62e35752531f54a3f0892)) - **all:** Auto-regenerate discovery clients ([#​2833](https://redirect.github.com/googleapis/google-api-go-client/issues/2833)) ([87e652f](https://redirect.github.com/googleapis/google-api-go-client/commit/87e652f5bf2409927b060aee030c2340aac4afbd))
--- ### Configuration 📅 **Schedule**: Branch creation - "on tuesday" (UTC), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about these updates again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR was generated by [Mend Renovate](https://mend.io/renovate/). View the [repository job log](https://developer.mend.io/github/open-telemetry/opentelemetry-collector-contrib). --------- Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Co-authored-by: opentelemetrybot <107717825+opentelemetrybot@users.noreply.github.com> Co-authored-by: Yang Song --- exporter/googlecloudpubsubexporter/go.mod | 12 +++++----- exporter/googlecloudpubsubexporter/go.sum | 24 +++++++++---------- receiver/googlecloudmonitoringreceiver/go.mod | 12 +++++----- receiver/googlecloudmonitoringreceiver/go.sum | 24 +++++++++---------- receiver/googlecloudpubsubreceiver/go.mod | 12 +++++----- receiver/googlecloudpubsubreceiver/go.sum | 24 +++++++++---------- receiver/googlecloudspannerreceiver/go.mod | 12 +++++----- receiver/googlecloudspannerreceiver/go.sum | 24 +++++++++---------- 8 files changed, 72 insertions(+), 72 deletions(-) diff --git a/exporter/googlecloudpubsubexporter/go.mod b/exporter/googlecloudpubsubexporter/go.mod index f057f1822b34..196a9695c89a 100644 --- a/exporter/googlecloudpubsubexporter/go.mod +++ b/exporter/googlecloudpubsubexporter/go.mod @@ -14,14 +14,14 @@ require ( go.opentelemetry.io/collector/exporter/exportertest v0.112.0 go.opentelemetry.io/collector/pdata v1.18.0 go.uber.org/zap v1.27.0 - google.golang.org/api v0.203.0 + google.golang.org/api v0.204.0 google.golang.org/grpc v1.67.1 ) require ( cloud.google.com/go v0.116.0 // indirect - cloud.google.com/go/auth v0.9.9 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect + cloud.google.com/go/auth v0.10.0 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.5 // indirect cloud.google.com/go/compute/metadata v0.5.2 // indirect cloud.google.com/go/iam v1.2.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect @@ -74,9 +74,9 @@ require ( golang.org/x/sys v0.26.0 // indirect golang.org/x/text v0.19.0 // indirect golang.org/x/time v0.7.0 // indirect - google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect + google.golang.org/genproto v0.0.0-20241021214115-324edc3d5d38 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/exporter/googlecloudpubsubexporter/go.sum b/exporter/googlecloudpubsubexporter/go.sum index ec8c0effdaa1..dc4476261a2b 100644 --- a/exporter/googlecloudpubsubexporter/go.sum +++ b/exporter/googlecloudpubsubexporter/go.sum @@ -1,10 +1,10 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.116.0 h1:B3fRrSDkLRt5qSHWe40ERJvhvnQwdZiHu0bJOpldweE= cloud.google.com/go v0.116.0/go.mod h1:cEPSRWPzZEswwdr9BxE6ChEn01dWlTaF05LiC2Xs70U= -cloud.google.com/go/auth v0.9.9 h1:BmtbpNQozo8ZwW2t7QJjnrQtdganSdmqeIBxHxNkEZQ= -cloud.google.com/go/auth v0.9.9/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= -cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= -cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= +cloud.google.com/go/auth v0.10.0 h1:tWlkvFAh+wwTOzXIjrwM64karR1iTBZ/GRr0S/DULYo= +cloud.google.com/go/auth v0.10.0/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= +cloud.google.com/go/auth/oauth2adapt v0.2.5 h1:2p29+dePqsCHPP1bqDJcKj4qxRyYCcbzKpFyKGt3MTk= +cloud.google.com/go/auth/oauth2adapt v0.2.5/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= cloud.google.com/go/iam v1.2.1 h1:QFct02HRb7H12J/3utj0qf5tobFh9V4vR6h9eX5EBRU= @@ -226,19 +226,19 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.203.0 h1:SrEeuwU3S11Wlscsn+LA1kb/Y5xT8uggJSkIhD08NAU= -google.golang.org/api v0.203.0/go.mod h1:BuOVyCSYEPwJb3npWvDnNmFI92f3GeRnHNkETneT3SI= +google.golang.org/api v0.204.0 h1:3PjmQQEDkR/ENVZZwIYB4W/KzYtN8OrqnNcHWpeR8E4= +google.golang.org/api v0.204.0/go.mod h1:69y8QSoKIbL9F94bWgWAq6wGqGwyjBgi2y8rAK8zLag= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53 h1:Df6WuGvthPzc+JiQ/G+m+sNX24kc0aTBqoDN/0yyykE= -google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53/go.mod h1:fheguH3Am2dGp1LfXkrvwqC/KlFq8F0nLq3LryOMrrE= -google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 h1:T6rh4haD3GVYsgEfWExoCZA2o2FmbNyKpTuAxbEFPTg= -google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:wp2WsuBYj6j8wUdo3ToZsdxxixbvQNAHqVJrTgi5E5M= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/genproto v0.0.0-20241021214115-324edc3d5d38 h1:Q3nlH8iSQSRUwOskjbcSMcF2jiYMNiQYZ0c2KEJLKKU= +google.golang.org/genproto v0.0.0-20241021214115-324edc3d5d38/go.mod h1:xBI+tzfqGGN2JBeSebfKXFSdBpWVQ7sLW40PTupVRm4= +google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 h1:fVoAXEKA4+yufmbdVYv+SE73+cPZbbbe8paLsHfkK+U= +google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53/go.mod h1:riSXTwQ4+nqmPGtobMFyW5FqVAmIs0St6VPp4Ug7CE4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 h1:zciRKQ4kBpFgpfC5QQCVtnnNAcLIqweL7plyZRQHVpI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= diff --git a/receiver/googlecloudmonitoringreceiver/go.mod b/receiver/googlecloudmonitoringreceiver/go.mod index 66295fe84ed0..2a8efa3a1ecb 100644 --- a/receiver/googlecloudmonitoringreceiver/go.mod +++ b/receiver/googlecloudmonitoringreceiver/go.mod @@ -12,12 +12,12 @@ require ( go.opentelemetry.io/collector/receiver v0.112.0 go.uber.org/zap v1.27.0 golang.org/x/oauth2 v0.23.0 - google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 + google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 ) require ( - cloud.google.com/go/auth v0.9.9 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect + cloud.google.com/go/auth v0.10.0 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.5 // indirect cloud.google.com/go/compute/metadata v0.5.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/s2a-go v0.1.8 // indirect @@ -31,7 +31,7 @@ require ( golang.org/x/crypto v0.28.0 // indirect golang.org/x/sync v0.8.0 // indirect golang.org/x/time v0.7.0 // indirect - google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53 // indirect + google.golang.org/genproto v0.0.0-20241021214115-324edc3d5d38 // indirect ) require ( @@ -64,8 +64,8 @@ require ( golang.org/x/net v0.30.0 // indirect golang.org/x/sys v0.26.0 // indirect golang.org/x/text v0.19.0 // indirect - google.golang.org/api v0.203.0 - google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect + google.golang.org/api v0.204.0 + google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 // indirect google.golang.org/grpc v1.67.1 // indirect google.golang.org/protobuf v1.35.1 gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/receiver/googlecloudmonitoringreceiver/go.sum b/receiver/googlecloudmonitoringreceiver/go.sum index eaa011dd16a8..e4045abe6ec4 100644 --- a/receiver/googlecloudmonitoringreceiver/go.sum +++ b/receiver/googlecloudmonitoringreceiver/go.sum @@ -1,8 +1,8 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go/auth v0.9.9 h1:BmtbpNQozo8ZwW2t7QJjnrQtdganSdmqeIBxHxNkEZQ= -cloud.google.com/go/auth v0.9.9/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= -cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= -cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= +cloud.google.com/go/auth v0.10.0 h1:tWlkvFAh+wwTOzXIjrwM64karR1iTBZ/GRr0S/DULYo= +cloud.google.com/go/auth v0.10.0/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= +cloud.google.com/go/auth/oauth2adapt v0.2.5 h1:2p29+dePqsCHPP1bqDJcKj4qxRyYCcbzKpFyKGt3MTk= +cloud.google.com/go/auth/oauth2adapt v0.2.5/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= cloud.google.com/go/monitoring v1.21.2 h1:FChwVtClH19E7pJ+e0xUhJPGksctZNVOk2UhMmblmdU= @@ -206,19 +206,19 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.203.0 h1:SrEeuwU3S11Wlscsn+LA1kb/Y5xT8uggJSkIhD08NAU= -google.golang.org/api v0.203.0/go.mod h1:BuOVyCSYEPwJb3npWvDnNmFI92f3GeRnHNkETneT3SI= +google.golang.org/api v0.204.0 h1:3PjmQQEDkR/ENVZZwIYB4W/KzYtN8OrqnNcHWpeR8E4= +google.golang.org/api v0.204.0/go.mod h1:69y8QSoKIbL9F94bWgWAq6wGqGwyjBgi2y8rAK8zLag= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53 h1:Df6WuGvthPzc+JiQ/G+m+sNX24kc0aTBqoDN/0yyykE= -google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53/go.mod h1:fheguH3Am2dGp1LfXkrvwqC/KlFq8F0nLq3LryOMrrE= -google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 h1:T6rh4haD3GVYsgEfWExoCZA2o2FmbNyKpTuAxbEFPTg= -google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:wp2WsuBYj6j8wUdo3ToZsdxxixbvQNAHqVJrTgi5E5M= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/genproto v0.0.0-20241021214115-324edc3d5d38 h1:Q3nlH8iSQSRUwOskjbcSMcF2jiYMNiQYZ0c2KEJLKKU= +google.golang.org/genproto v0.0.0-20241021214115-324edc3d5d38/go.mod h1:xBI+tzfqGGN2JBeSebfKXFSdBpWVQ7sLW40PTupVRm4= +google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 h1:fVoAXEKA4+yufmbdVYv+SE73+cPZbbbe8paLsHfkK+U= +google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53/go.mod h1:riSXTwQ4+nqmPGtobMFyW5FqVAmIs0St6VPp4Ug7CE4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 h1:zciRKQ4kBpFgpfC5QQCVtnnNAcLIqweL7plyZRQHVpI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= diff --git a/receiver/googlecloudpubsubreceiver/go.mod b/receiver/googlecloudpubsubreceiver/go.mod index e1a38d4895fa..4a51e056884e 100644 --- a/receiver/googlecloudpubsubreceiver/go.mod +++ b/receiver/googlecloudpubsubreceiver/go.mod @@ -19,17 +19,17 @@ require ( go.uber.org/goleak v1.3.0 go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 - google.golang.org/api v0.203.0 - google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53 - google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 + google.golang.org/api v0.204.0 + google.golang.org/genproto v0.0.0-20241021214115-324edc3d5d38 + google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 google.golang.org/grpc v1.67.1 google.golang.org/protobuf v1.35.1 ) require ( cloud.google.com/go v0.116.0 // indirect - cloud.google.com/go/auth v0.9.9 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect + cloud.google.com/go/auth v0.10.0 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.5 // indirect cloud.google.com/go/compute/metadata v0.5.2 // indirect cloud.google.com/go/iam v1.2.1 // indirect cloud.google.com/go/longrunning v0.6.1 // indirect @@ -79,7 +79,7 @@ require ( golang.org/x/sys v0.26.0 // indirect golang.org/x/text v0.19.0 // indirect golang.org/x/time v0.7.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/receiver/googlecloudpubsubreceiver/go.sum b/receiver/googlecloudpubsubreceiver/go.sum index 84e8f51a25cc..ec4d4f3249ef 100644 --- a/receiver/googlecloudpubsubreceiver/go.sum +++ b/receiver/googlecloudpubsubreceiver/go.sum @@ -1,10 +1,10 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.116.0 h1:B3fRrSDkLRt5qSHWe40ERJvhvnQwdZiHu0bJOpldweE= cloud.google.com/go v0.116.0/go.mod h1:cEPSRWPzZEswwdr9BxE6ChEn01dWlTaF05LiC2Xs70U= -cloud.google.com/go/auth v0.9.9 h1:BmtbpNQozo8ZwW2t7QJjnrQtdganSdmqeIBxHxNkEZQ= -cloud.google.com/go/auth v0.9.9/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= -cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= -cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= +cloud.google.com/go/auth v0.10.0 h1:tWlkvFAh+wwTOzXIjrwM64karR1iTBZ/GRr0S/DULYo= +cloud.google.com/go/auth v0.10.0/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= +cloud.google.com/go/auth/oauth2adapt v0.2.5 h1:2p29+dePqsCHPP1bqDJcKj4qxRyYCcbzKpFyKGt3MTk= +cloud.google.com/go/auth/oauth2adapt v0.2.5/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= cloud.google.com/go/iam v1.2.1 h1:QFct02HRb7H12J/3utj0qf5tobFh9V4vR6h9eX5EBRU= @@ -232,19 +232,19 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.203.0 h1:SrEeuwU3S11Wlscsn+LA1kb/Y5xT8uggJSkIhD08NAU= -google.golang.org/api v0.203.0/go.mod h1:BuOVyCSYEPwJb3npWvDnNmFI92f3GeRnHNkETneT3SI= +google.golang.org/api v0.204.0 h1:3PjmQQEDkR/ENVZZwIYB4W/KzYtN8OrqnNcHWpeR8E4= +google.golang.org/api v0.204.0/go.mod h1:69y8QSoKIbL9F94bWgWAq6wGqGwyjBgi2y8rAK8zLag= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53 h1:Df6WuGvthPzc+JiQ/G+m+sNX24kc0aTBqoDN/0yyykE= -google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53/go.mod h1:fheguH3Am2dGp1LfXkrvwqC/KlFq8F0nLq3LryOMrrE= -google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 h1:T6rh4haD3GVYsgEfWExoCZA2o2FmbNyKpTuAxbEFPTg= -google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:wp2WsuBYj6j8wUdo3ToZsdxxixbvQNAHqVJrTgi5E5M= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/genproto v0.0.0-20241021214115-324edc3d5d38 h1:Q3nlH8iSQSRUwOskjbcSMcF2jiYMNiQYZ0c2KEJLKKU= +google.golang.org/genproto v0.0.0-20241021214115-324edc3d5d38/go.mod h1:xBI+tzfqGGN2JBeSebfKXFSdBpWVQ7sLW40PTupVRm4= +google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 h1:fVoAXEKA4+yufmbdVYv+SE73+cPZbbbe8paLsHfkK+U= +google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53/go.mod h1:riSXTwQ4+nqmPGtobMFyW5FqVAmIs0St6VPp4Ug7CE4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 h1:zciRKQ4kBpFgpfC5QQCVtnnNAcLIqweL7plyZRQHVpI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= diff --git a/receiver/googlecloudspannerreceiver/go.mod b/receiver/googlecloudspannerreceiver/go.mod index 568e4f547206..db71e4fad617 100644 --- a/receiver/googlecloudspannerreceiver/go.mod +++ b/receiver/googlecloudspannerreceiver/go.mod @@ -16,7 +16,7 @@ require ( go.uber.org/goleak v1.3.0 go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 - google.golang.org/api v0.201.0 + google.golang.org/api v0.204.0 google.golang.org/grpc v1.67.1 gopkg.in/yaml.v3 v3.0.1 ) @@ -24,8 +24,8 @@ require ( require ( cel.dev/expr v0.16.0 // indirect cloud.google.com/go v0.116.0 // indirect - cloud.google.com/go/auth v0.9.8 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect + cloud.google.com/go/auth v0.10.0 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.5 // indirect cloud.google.com/go/compute/metadata v0.5.2 // indirect cloud.google.com/go/iam v1.2.1 // indirect cloud.google.com/go/longrunning v0.6.1 // indirect @@ -82,9 +82,9 @@ require ( golang.org/x/sys v0.26.0 // indirect golang.org/x/text v0.19.0 // indirect golang.org/x/time v0.7.0 // indirect - google.golang.org/genproto v0.0.0-20241007155032-5fefd90f89a9 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9 // indirect + google.golang.org/genproto v0.0.0-20241021214115-324edc3d5d38 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 // indirect google.golang.org/protobuf v1.35.1 // indirect ) diff --git a/receiver/googlecloudspannerreceiver/go.sum b/receiver/googlecloudspannerreceiver/go.sum index 17f0ea86d735..1bf6fec126d8 100644 --- a/receiver/googlecloudspannerreceiver/go.sum +++ b/receiver/googlecloudspannerreceiver/go.sum @@ -101,10 +101,10 @@ cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVo cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= -cloud.google.com/go/auth v0.9.8 h1:+CSJ0Gw9iVeSENVCKJoLHhdUykDgXSc4Qn+gu2BRtR8= -cloud.google.com/go/auth v0.9.8/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= -cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= -cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= +cloud.google.com/go/auth v0.10.0 h1:tWlkvFAh+wwTOzXIjrwM64karR1iTBZ/GRr0S/DULYo= +cloud.google.com/go/auth v0.10.0/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= +cloud.google.com/go/auth/oauth2adapt v0.2.5 h1:2p29+dePqsCHPP1bqDJcKj4qxRyYCcbzKpFyKGt3MTk= +cloud.google.com/go/auth/oauth2adapt v0.2.5/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= @@ -1423,8 +1423,8 @@ google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/ google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0= google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= -google.golang.org/api v0.201.0 h1:+7AD9JNM3tREtawRMu8sOjSbb8VYcYXJG/2eEOmfDu0= -google.golang.org/api v0.201.0/go.mod h1:HVY0FCHVs89xIW9fzf/pBvOEm+OolHa86G/txFezyq4= +google.golang.org/api v0.204.0 h1:3PjmQQEDkR/ENVZZwIYB4W/KzYtN8OrqnNcHWpeR8E4= +google.golang.org/api v0.204.0/go.mod h1:69y8QSoKIbL9F94bWgWAq6wGqGwyjBgi2y8rAK8zLag= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1564,12 +1564,12 @@ google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOl google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= -google.golang.org/genproto v0.0.0-20241007155032-5fefd90f89a9 h1:nFS3IivktIU5Mk6KQa+v6RKkHUpdQpphqGNLxqNnbEk= -google.golang.org/genproto v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:tEzYTYZxbmVNOu0OAFH9HzdJtLn6h4Aj89zzlBCdHms= -google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f h1:jTm13A2itBi3La6yTGqn8bVSrc3ZZ1r8ENHlIXBfnRA= -google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f/go.mod h1:CLGoBuH1VHxAUXVPP8FfPwPEVJB6lz3URE5mY2SuayE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9 h1:QCqS/PdaHTSWGvupk2F/ehwHtGc0/GYkT+3GAcR1CCc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/genproto v0.0.0-20241021214115-324edc3d5d38 h1:Q3nlH8iSQSRUwOskjbcSMcF2jiYMNiQYZ0c2KEJLKKU= +google.golang.org/genproto v0.0.0-20241021214115-324edc3d5d38/go.mod h1:xBI+tzfqGGN2JBeSebfKXFSdBpWVQ7sLW40PTupVRm4= +google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 h1:fVoAXEKA4+yufmbdVYv+SE73+cPZbbbe8paLsHfkK+U= +google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53/go.mod h1:riSXTwQ4+nqmPGtobMFyW5FqVAmIs0St6VPp4Ug7CE4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 h1:zciRKQ4kBpFgpfC5QQCVtnnNAcLIqweL7plyZRQHVpI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= From 4901ce608a52ab8249e4963380b59e4c6c34422b Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 5 Nov 2024 10:14:23 -0700 Subject: [PATCH 4/9] fix(deps): update module github.com/docker/docker to v27.3.1+incompatible (#36211) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Change | Age | Adoption | Passing | Confidence | |---|---|---|---|---|---| | [github.com/docker/docker](https://redirect.github.com/docker/docker) | `v27.1.1+incompatible` -> `v27.3.1+incompatible` | [![age](https://developer.mend.io/api/mc/badges/age/go/github.com%2fdocker%2fdocker/v27.3.1+incompatible?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/go/github.com%2fdocker%2fdocker/v27.3.1+incompatible?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/go/github.com%2fdocker%2fdocker/v27.1.1+incompatible/v27.3.1+incompatible?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/go/github.com%2fdocker%2fdocker/v27.1.1+incompatible/v27.3.1+incompatible?slim=true)](https://docs.renovatebot.com/merge-confidence/) | --- > [!WARNING] > Some dependencies could not be looked up. Check the Dependency Dashboard for more information. --- ### Release Notes
docker/docker (github.com/docker/docker) ### [`v27.3.1+incompatible`](https://redirect.github.com/docker/docker/compare/v27.3.0...v27.3.1) [Compare Source](https://redirect.github.com/docker/docker/compare/v27.3.0...v27.3.1) ### [`v27.3.0+incompatible`](https://redirect.github.com/docker/docker/compare/v27.2.1...v27.3.0) [Compare Source](https://redirect.github.com/docker/docker/compare/v27.2.1...v27.3.0) ### [`v27.2.1+incompatible`](https://redirect.github.com/docker/docker/compare/v27.2.0...v27.2.1) [Compare Source](https://redirect.github.com/docker/docker/compare/v27.2.0...v27.2.1) ### [`v27.2.0+incompatible`](https://redirect.github.com/docker/docker/compare/v27.1.2...v27.2.0) [Compare Source](https://redirect.github.com/docker/docker/compare/v27.1.2...v27.2.0) ### [`v27.1.2+incompatible`](https://redirect.github.com/docker/docker/compare/v27.1.1...v27.1.2) [Compare Source](https://redirect.github.com/docker/docker/compare/v27.1.1...v27.1.2)
--- ### Configuration 📅 **Schedule**: Branch creation - "on tuesday" (UTC), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR was generated by [Mend Renovate](https://mend.io/renovate/). View the [repository job log](https://developer.mend.io/github/open-telemetry/opentelemetry-collector-contrib). --------- Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Co-authored-by: opentelemetrybot <107717825+opentelemetrybot@users.noreply.github.com> Co-authored-by: Yang Song --- extension/storage/dbstorage/go.mod | 7 +++++-- extension/storage/dbstorage/go.sum | 9 ++++----- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/extension/storage/dbstorage/go.mod b/extension/storage/dbstorage/go.mod index 491a3f06c5e8..838c58c9157e 100644 --- a/extension/storage/dbstorage/go.mod +++ b/extension/storage/dbstorage/go.mod @@ -3,7 +3,7 @@ module github.com/open-telemetry/opentelemetry-collector-contrib/extension/stora go 1.22.0 require ( - github.com/docker/docker v27.1.1+incompatible + github.com/docker/docker v27.3.1+incompatible github.com/docker/go-connections v0.5.0 github.com/jackc/pgx/v5 v5.7.1 github.com/mattn/go-sqlite3 v1.14.24 @@ -19,10 +19,10 @@ require ( require ( dario.cat/mergo v1.0.0 // indirect + github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect - github.com/containerd/containerd v1.7.18 // indirect github.com/containerd/log v0.1.0 // indirect github.com/containerd/platforms v0.2.1 // indirect github.com/cpuguy83/dockercfg v0.3.1 // indirect @@ -51,6 +51,7 @@ require ( github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect + github.com/moby/sys/userns v0.1.0 // indirect github.com/moby/term v0.5.0 // indirect github.com/morikuni/aec v1.0.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect @@ -68,6 +69,7 @@ require ( go.opentelemetry.io/collector/pdata v1.18.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect go.opentelemetry.io/otel v1.31.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 // indirect go.opentelemetry.io/otel/metric v1.31.0 // indirect go.opentelemetry.io/otel/sdk v1.31.0 // indirect go.opentelemetry.io/otel/sdk/metric v1.31.0 // indirect @@ -78,6 +80,7 @@ require ( golang.org/x/sync v0.8.0 // indirect golang.org/x/sys v0.26.0 // indirect golang.org/x/text v0.18.0 // indirect + golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 // indirect google.golang.org/grpc v1.67.1 // indirect google.golang.org/protobuf v1.35.1 // indirect diff --git a/extension/storage/dbstorage/go.sum b/extension/storage/dbstorage/go.sum index b560c69a3857..2223194e8145 100644 --- a/extension/storage/dbstorage/go.sum +++ b/extension/storage/dbstorage/go.sum @@ -8,8 +8,6 @@ github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERo github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/containerd/containerd v1.7.18 h1:jqjZTQNfXGoEaZdW1WwPU0RqSn1Bm2Ay/KJPUuO8nao= -github.com/containerd/containerd v1.7.18/go.mod h1:IYEk9/IO6wAPUz2bCMVUbsfXjzw5UNP5fLz4PsUygQ4= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= @@ -23,8 +21,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v27.1.1+incompatible h1:hO/M4MtV36kzKldqnA37IWhebRA+LnqqcqDja6kVaKY= -github.com/docker/docker v27.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI= +github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -90,6 +88,8 @@ github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5 github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg= github.com/moby/sys/user v0.1.0/go.mod h1:fKJhFOnsCN6xZ5gSfbM6zaHGgDJMrqt9/reuj4T7MmU= +github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= +github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= @@ -216,7 +216,6 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto v0.0.0-20230920204549-e6e6cdab5c13 h1:vlzZttNJGVqTsRFU9AmdnrcO1Znh8Ew9kCD//yjigk0= google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8= google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo= google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 h1:e7S5W7MGGLaSu8j3YjdezkZ+m1/Nm0uRVRMEMGk26Xs= From 5a3892e8d6e802c70ee4366f8194948adbda7ae9 Mon Sep 17 00:00:00 2001 From: jvoravong <47871238+jvoravong@users.noreply.github.com> Date: Tue, 5 Nov 2024 12:51:23 -0700 Subject: [PATCH 5/9] [k8scluster/receiver] Add more e2e test coverage (#36114) #### Description - Add more e2e test coverage for cronjob, hpa, job, and statefulset objects - Updated the golden file (expected.yaml) used for test validation - Added some test helpers to help create and delete multiple Kubernetes test objects - No .chloggen entry is needed for this PR because all the changes are for testing enhancements #### Testing - Tested locally and in Github CI/CD --------- Co-authored-by: Antoine Toulme Co-authored-by: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com> --- internal/k8stest/k8s_objects.go | 35 + receiver/k8sclusterreceiver/e2e_test.go | 84 +- .../testdata/e2e/expected.yaml | 1476 ++++++++++++----- .../testdata/e2e/testobjects/cronjob.yaml | 19 + .../testdata/e2e/testobjects/hpa.yaml | 13 + .../testdata/e2e/testobjects/job.yaml | 17 + .../testdata/e2e/testobjects/statefulset.yaml | 29 + 7 files changed, 1234 insertions(+), 439 deletions(-) create mode 100644 receiver/k8sclusterreceiver/testdata/e2e/testobjects/cronjob.yaml create mode 100644 receiver/k8sclusterreceiver/testdata/e2e/testobjects/hpa.yaml create mode 100644 receiver/k8sclusterreceiver/testdata/e2e/testobjects/job.yaml create mode 100644 receiver/k8sclusterreceiver/testdata/e2e/testobjects/statefulset.yaml diff --git a/internal/k8stest/k8s_objects.go b/internal/k8stest/k8s_objects.go index bf4b559d0994..a9acca588f84 100644 --- a/internal/k8stest/k8s_objects.go +++ b/internal/k8stest/k8s_objects.go @@ -5,6 +5,8 @@ package k8stest // import "github.com/open-telemetry/opentelemetry-collector-con import ( "context" + "os" + "path/filepath" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -53,3 +55,36 @@ func DeleteObject(client *K8sClient, obj *unstructured.Unstructured) error { PropagationPolicy: &deletePolicy, }) } + +func CreateObjects(client *K8sClient, dir string) ([]*unstructured.Unstructured, error) { + var objs []*unstructured.Unstructured + files, err := os.ReadDir(dir) + if err != nil { + return nil, err + } + + for _, file := range files { + if file.IsDir() { + continue // Skip directories + } + manifest, err := os.ReadFile(filepath.Join(dir, file.Name())) + if err != nil { + return nil, err + } + obj, err := CreateObject(client, manifest) + if err != nil { + return nil, err + } + objs = append(objs, obj) + } + return objs, nil +} + +func DeleteObjects(client *K8sClient, objs []*unstructured.Unstructured) error { + for _, obj := range objs { + if err := DeleteObject(client, obj); err != nil { + return err + } + } + return nil +} diff --git a/receiver/k8sclusterreceiver/e2e_test.go b/receiver/k8sclusterreceiver/e2e_test.go index 8f685047816f..97259ee4b675 100644 --- a/receiver/k8sclusterreceiver/e2e_test.go +++ b/receiver/k8sclusterreceiver/e2e_test.go @@ -7,7 +7,6 @@ package k8sclusterreceiver import ( "context" - "path/filepath" "strings" "testing" "time" @@ -26,7 +25,9 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest/pmetrictest" ) +const expectedFile = "./testdata/e2e/expected.yaml" const testKubeConfig = "/tmp/kube-config-otelcol-e2e-testing" +const testObjectsDir = "./testdata/e2e/testobjects/" // TestE2E tests the k8s cluster receiver with a real k8s cluster. // The test requires a prebuilt otelcontribcol image uploaded to a kind k8s cluster defined in @@ -38,13 +39,20 @@ const testKubeConfig = "/tmp/kube-config-otelcol-e2e-testing" func TestE2E(t *testing.T) { var expected pmetric.Metrics - expectedFile := filepath.Join("testdata", "e2e", "expected.yaml") expected, err := golden.ReadMetrics(expectedFile) require.NoError(t, err) k8sClient, err := k8stest.NewK8sClient(testKubeConfig) require.NoError(t, err) + // k8s test objs + testObjs, err := k8stest.CreateObjects(k8sClient, testObjectsDir) + require.NoErrorf(t, err, "failed to create objects") + + t.Cleanup(func() { + require.NoErrorf(t, k8stest.DeleteObjects(k8sClient, testObjs), "failed to delete objects") + }) + metricsConsumer := new(consumertest.MetricsSink) shutdownSink := startUpSink(t, metricsConsumer) defer shutdownSink() @@ -52,53 +60,89 @@ func TestE2E(t *testing.T) { testID := uuid.NewString()[:8] collectorObjs := k8stest.CreateCollectorObjects(t, k8sClient, testID, "") - defer func() { + t.Cleanup(func() { for _, obj := range append(collectorObjs) { require.NoErrorf(t, k8stest.DeleteObject(k8sClient, obj), "failed to delete object %s", obj.GetName()) } - }() + }) wantEntries := 10 // Minimal number of metrics to wait for. waitForData(t, wantEntries, metricsConsumer) + // golden.WriteMetrics(t, expectedFile, metricsConsumer.AllMetrics()[len(metricsConsumer.AllMetrics())-1]) replaceWithStar := func(string) string { return "*" } shortenNames := func(value string) string { + if strings.HasPrefix(value, "coredns") { + return "coredns" + } + if strings.HasPrefix(value, "kindnet") { + return "kindnet" + } + if strings.HasPrefix(value, "kube-apiserver") { + return "kube-apiserver" + } if strings.HasPrefix(value, "kube-proxy") { return "kube-proxy" } - if strings.HasPrefix(value, "local-path-provisioner") { - return "local-path-provisioner" + if strings.HasPrefix(value, "kube-scheduler") { + return "kube-scheduler" } - if strings.HasPrefix(value, "kindnet") { - return "kindnet" + if strings.HasPrefix(value, "kube-controller-manager") { + return "kube-controller-manager" } - if strings.HasPrefix(value, "coredns") { - return "coredns" + if strings.HasPrefix(value, "local-path-provisioner") { + return "local-path-provisioner" } if strings.HasPrefix(value, "otelcol") { return "otelcol" } + if strings.HasPrefix(value, "test-k8scluster-receiver-cronjob") { + return "test-k8scluster-receiver-cronjob" + } + if strings.HasPrefix(value, "test-k8scluster-receiver-job") { + return "test-k8scluster-receiver-job" + } return value } containerImageShorten := func(value string) string { - return value[(strings.LastIndex(value, "/") + 1):] + // Extracts the image name by removing the repository prefix. + // Also removes any architecture identifier suffix, if present, by applying shortenNames. + return shortenNames(value[(strings.LastIndex(value, "/") + 1):]) } + require.NoError(t, pmetrictest.CompareMetrics(expected, metricsConsumer.AllMetrics()[len(metricsConsumer.AllMetrics())-1], pmetrictest.IgnoreTimestamp(), pmetrictest.IgnoreStartTimestamp(), - pmetrictest.IgnoreMetricValues("k8s.deployment.desired", "k8s.deployment.available", "k8s.container.restarts", "k8s.container.cpu_request", "k8s.container.memory_request", "k8s.container.memory_limit"), + pmetrictest.IgnoreMetricValues( + "k8s.container.cpu_request", + "k8s.container.memory_limit", + "k8s.container.memory_request", + "k8s.container.restarts", + "k8s.cronjob.active_jobs", + "k8s.deployment.available", + "k8s.deployment.desired", + "k8s.job.active_pods", + "k8s.job.desired_successful_pods", + "k8s.job.failed_pods", + "k8s.job.max_parallel_pods", + "k8s.job.successful_pods"), + pmetrictest.ChangeResourceAttributeValue("container.id", replaceWithStar), + pmetrictest.ChangeResourceAttributeValue("container.image.name", containerImageShorten), + pmetrictest.ChangeResourceAttributeValue("container.image.tag", replaceWithStar), + pmetrictest.ChangeResourceAttributeValue("k8s.cronjob.uid", replaceWithStar), + pmetrictest.ChangeResourceAttributeValue("k8s.daemonset.uid", replaceWithStar), pmetrictest.ChangeResourceAttributeValue("k8s.deployment.name", shortenNames), - pmetrictest.ChangeResourceAttributeValue("k8s.pod.name", shortenNames), - pmetrictest.ChangeResourceAttributeValue("k8s.replicaset.name", shortenNames), pmetrictest.ChangeResourceAttributeValue("k8s.deployment.uid", replaceWithStar), + pmetrictest.ChangeResourceAttributeValue("k8s.hpa.uid", replaceWithStar), + pmetrictest.ChangeResourceAttributeValue("k8s.job.name", shortenNames), + pmetrictest.ChangeResourceAttributeValue("k8s.job.uid", replaceWithStar), + pmetrictest.ChangeResourceAttributeValue("k8s.namespace.uid", replaceWithStar), + pmetrictest.ChangeResourceAttributeValue("k8s.node.uid", replaceWithStar), + pmetrictest.ChangeResourceAttributeValue("k8s.pod.name", shortenNames), pmetrictest.ChangeResourceAttributeValue("k8s.pod.uid", replaceWithStar), + pmetrictest.ChangeResourceAttributeValue("k8s.replicaset.name", shortenNames), pmetrictest.ChangeResourceAttributeValue("k8s.replicaset.uid", replaceWithStar), - pmetrictest.ChangeResourceAttributeValue("container.id", replaceWithStar), - pmetrictest.ChangeResourceAttributeValue("container.image.tag", replaceWithStar), - pmetrictest.ChangeResourceAttributeValue("k8s.node.uid", replaceWithStar), - pmetrictest.ChangeResourceAttributeValue("k8s.namespace.uid", replaceWithStar), - pmetrictest.ChangeResourceAttributeValue("k8s.daemonset.uid", replaceWithStar), - pmetrictest.ChangeResourceAttributeValue("container.image.name", containerImageShorten), + pmetrictest.ChangeResourceAttributeValue("k8s.statefulset.uid", replaceWithStar), pmetrictest.IgnoreScopeVersion(), pmetrictest.IgnoreResourceMetricsOrder(), pmetrictest.IgnoreMetricsOrder(), diff --git a/receiver/k8sclusterreceiver/testdata/e2e/expected.yaml b/receiver/k8sclusterreceiver/testdata/e2e/expected.yaml index bbd6a95685b9..9f7aed414369 100644 --- a/receiver/k8sclusterreceiver/testdata/e2e/expected.yaml +++ b/receiver/k8sclusterreceiver/testdata/e2e/expected.yaml @@ -3,20 +3,20 @@ resourceMetrics: attributes: - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: default - key: k8s.namespace.uid value: - stringValue: 3604b135-20f2-404b-9c1a-175ef649793e - schemaUrl: "https://opentelemetry.io/schemas/1.18.0" + stringValue: feb94a85-d29f-4693-a6d7-ca5206a5141e + schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: - description: The current phase of namespaces (1 for active and 0 for terminating) gauge: dataPoints: - asInt: "1" - timeUnixNano: "1686772769034865545" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" name: k8s.namespace.phase - unit: "" scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver version: latest @@ -24,20 +24,20 @@ resourceMetrics: attributes: - key: k8s.namespace.name value: - stringValue: local-path-storage + stringValue: kube-node-lease - key: k8s.namespace.uid value: - stringValue: 414da07d-33d0-4043-ae7c-d6b264d134e5 - schemaUrl: "https://opentelemetry.io/schemas/1.18.0" + stringValue: ff852fe4-f42e-48d7-883d-3df03ab5741c + schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: - description: The current phase of namespaces (1 for active and 0 for terminating) gauge: dataPoints: - asInt: "1" - timeUnixNano: "1686772769034865545" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" name: k8s.namespace.phase - unit: "" scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver version: latest @@ -48,17 +48,17 @@ resourceMetrics: stringValue: kube-public - key: k8s.namespace.uid value: - stringValue: 7516afba-1597-49e3-8569-9732b7b94865 - schemaUrl: "https://opentelemetry.io/schemas/1.18.0" + stringValue: 66be991c-1e7d-4a14-af98-4f421bee9ec4 + schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: - description: The current phase of namespaces (1 for active and 0 for terminating) gauge: dataPoints: - asInt: "1" - timeUnixNano: "1686772769034865545" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" name: k8s.namespace.phase - unit: "" scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver version: latest @@ -66,20 +66,20 @@ resourceMetrics: attributes: - key: k8s.namespace.name value: - stringValue: kube-node-lease + stringValue: kube-system - key: k8s.namespace.uid value: - stringValue: 8dd32894-d0ff-4cff-bd75-b818c20fc72b - schemaUrl: "https://opentelemetry.io/schemas/1.18.0" + stringValue: 1fdcff4f-01e0-459a-baaa-463b5f52eaa2 + schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: - description: The current phase of namespaces (1 for active and 0 for terminating) gauge: dataPoints: - asInt: "1" - timeUnixNano: "1686772769034865545" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" name: k8s.namespace.phase - unit: "" scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver version: latest @@ -87,20 +87,20 @@ resourceMetrics: attributes: - key: k8s.namespace.name value: - stringValue: default + stringValue: local-path-storage - key: k8s.namespace.uid value: - stringValue: caa467a2-d3e8-4e66-8b76-a155464bac79 - schemaUrl: "https://opentelemetry.io/schemas/1.18.0" + stringValue: cf6c8796-7d4b-4e61-ae41-9c90207c7c06 + schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: - description: The current phase of namespaces (1 for active and 0 for terminating) gauge: dataPoints: - asInt: "1" - timeUnixNano: "1686772769034865545" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" name: k8s.namespace.phase - unit: "" scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver version: latest @@ -111,17 +111,41 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.node.uid value: - stringValue: afd51338-8dbe-4234-aed3-0d1a9b3ee38e - schemaUrl: "https://opentelemetry.io/schemas/1.18.0" + stringValue: 080365b3-8b82-48dc-9885-d88364004eb3 + schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: - description: Ready condition status of the node (true=1, false=0, unknown=-1) gauge: dataPoints: - asInt: "1" - timeUnixNano: "1686772769034865545" + timeUnixNano: "1000000" name: k8s.node.condition_ready - unit: "" + scope: + name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver + version: latest + - resource: + attributes: + - key: k8s.cronjob.name + value: + stringValue: test-k8scluster-receiver-cronjob + - key: k8s.cronjob.uid + value: + stringValue: 6a3c3e99-5db1-481f-9d5d-782ae9de9f58 + - key: k8s.namespace.name + value: + stringValue: default + schemaUrl: https://opentelemetry.io/schemas/1.18.0 + scopeMetrics: + - metrics: + - description: The number of actively running jobs for a cronjob + gauge: + dataPoints: + - asInt: "2" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.cronjob.active_jobs + unit: '{job}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver version: latest @@ -132,41 +156,45 @@ resourceMetrics: stringValue: kindnet - key: k8s.daemonset.uid value: - stringValue: e7f2def1-dc2a-42f1-800e-187a4d408359 + stringValue: 4b389825-8fb0-4c66-a774-c9dfcba9d813 - key: k8s.namespace.name value: stringValue: kube-system - schemaUrl: "https://opentelemetry.io/schemas/1.18.0" + schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: - description: Number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod gauge: dataPoints: - asInt: "1" - timeUnixNano: "1686772769034865545" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" name: k8s.daemonset.current_scheduled_nodes - unit: "{node}" + unit: '{node}' - description: Number of nodes that should be running the daemon pod (including nodes currently running the daemon pod) gauge: dataPoints: - asInt: "1" - timeUnixNano: "1686772769034865545" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" name: k8s.daemonset.desired_scheduled_nodes - unit: "{node}" + unit: '{node}' - description: Number of nodes that are running the daemon pod, but are not supposed to run the daemon pod gauge: dataPoints: - asInt: "0" - timeUnixNano: "1686772769034865545" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" name: k8s.daemonset.misscheduled_nodes - unit: "{node}" + unit: '{node}' - description: Number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready gauge: dataPoints: - asInt: "1" - timeUnixNano: "1686772769034865545" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" name: k8s.daemonset.ready_nodes - unit: "{node}" + unit: '{node}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver version: latest @@ -177,41 +205,45 @@ resourceMetrics: stringValue: kube-proxy - key: k8s.daemonset.uid value: - stringValue: d84cd585-d6bb-44af-b070-a9cb363fa903 + stringValue: b88aca8b-5776-4f6a-b1f4-d430f972e7fc - key: k8s.namespace.name value: stringValue: kube-system - schemaUrl: "https://opentelemetry.io/schemas/1.18.0" + schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: - description: Number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod gauge: dataPoints: - asInt: "1" - timeUnixNano: "1686772769034865545" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" name: k8s.daemonset.current_scheduled_nodes - unit: "{node}" + unit: '{node}' - description: Number of nodes that should be running the daemon pod (including nodes currently running the daemon pod) gauge: dataPoints: - asInt: "1" - timeUnixNano: "1686772769034865545" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" name: k8s.daemonset.desired_scheduled_nodes - unit: "{node}" + unit: '{node}' - description: Number of nodes that are running the daemon pod, but are not supposed to run the daemon pod gauge: dataPoints: - asInt: "0" - timeUnixNano: "1686772769034865545" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" name: k8s.daemonset.misscheduled_nodes - unit: "{node}" + unit: '{node}' - description: Number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready gauge: dataPoints: - asInt: "1" - timeUnixNano: "1686772769034865545" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" name: k8s.daemonset.ready_nodes - unit: "{node}" + unit: '{node}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver version: latest @@ -222,27 +254,29 @@ resourceMetrics: stringValue: coredns - key: k8s.deployment.uid value: - stringValue: 2c83cf0c-8b3d-4106-a54c-4c84f9b6e755 + stringValue: 40f70689-1d8b-4eaf-b1b9-c7f1604ad616 - key: k8s.namespace.name value: stringValue: kube-system - schemaUrl: "https://opentelemetry.io/schemas/1.18.0" + schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: - - description: Number of desired pods in this deployment + - description: Total number of available pods (ready for at least minReadySeconds) targeted by this deployment gauge: dataPoints: - asInt: "2" - timeUnixNano: "1686772769034865545" - name: k8s.deployment.desired - unit: "{pod}" - - description: Total number of available pods (ready for at least minReadySeconds) targeted by this deployment + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.deployment.available + unit: '{pod}' + - description: Number of desired pods in this deployment gauge: dataPoints: - asInt: "2" - timeUnixNano: "1686772769034865545" - name: k8s.deployment.available - unit: "{pod}" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.deployment.desired + unit: '{pod}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver version: latest @@ -253,27 +287,29 @@ resourceMetrics: stringValue: local-path-provisioner - key: k8s.deployment.uid value: - stringValue: 998d752c-e947-4784-95a8-373e587ae6be + stringValue: c97a7ce6-7bc2-475b-ad74-ccbd1c464e17 - key: k8s.namespace.name value: stringValue: local-path-storage - schemaUrl: "https://opentelemetry.io/schemas/1.18.0" + schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: - - description: Number of desired pods in this deployment + - description: Total number of available pods (ready for at least minReadySeconds) targeted by this deployment gauge: dataPoints: - asInt: "1" - timeUnixNano: "1686772769034865545" - name: k8s.deployment.desired - unit: "{pod}" - - description: Total number of available pods (ready for at least minReadySeconds) targeted by this deployment + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.deployment.available + unit: '{pod}' + - description: Number of desired pods in this deployment gauge: dataPoints: - asInt: "1" - timeUnixNano: "1686772769034865545" - name: k8s.deployment.available - unit: "{pod}" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.deployment.desired + unit: '{pod}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver version: latest @@ -281,30 +317,252 @@ resourceMetrics: attributes: - key: k8s.deployment.name value: - stringValue: otelcol-5ffb893c + stringValue: otelcol-786b94f3 - key: k8s.deployment.uid value: - stringValue: ed2f7c36-acb7-4348-9eaa-6e86d17b3e70 + stringValue: 6433ed08-d04b-458d-b3db-f526238a1e65 - key: k8s.namespace.name value: stringValue: default - schemaUrl: "https://opentelemetry.io/schemas/1.18.0" + schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: + - description: Total number of available pods (ready for at least minReadySeconds) targeted by this deployment + gauge: + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.deployment.available + unit: '{pod}' - description: Number of desired pods in this deployment gauge: dataPoints: - asInt: "1" - timeUnixNano: "1686772769034865545" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" name: k8s.deployment.desired - unit: "{pod}" - - description: Total number of available pods (ready for at least minReadySeconds) targeted by this deployment + unit: '{pod}' + scope: + name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver + version: latest + - resource: + attributes: + - key: k8s.hpa.name + value: + stringValue: test-k8scluster-receiver-hpa + - key: k8s.hpa.uid + value: + stringValue: 963572dc-4663-4fb2-930a-e143320a03c3 + - key: k8s.namespace.name + value: + stringValue: default + schemaUrl: https://opentelemetry.io/schemas/1.18.0 + scopeMetrics: + - metrics: + - description: Current number of pod replicas managed by this autoscaler. gauge: dataPoints: - asInt: "1" - timeUnixNano: "1686772769034865545" - name: k8s.deployment.available - unit: "{pod}" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.hpa.current_replicas + unit: '{pod}' + - description: Desired number of pod replicas managed by this autoscaler. + gauge: + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.hpa.desired_replicas + unit: '{pod}' + - description: Maximum number of replicas to which the autoscaler can scale up. + gauge: + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.hpa.max_replicas + unit: '{pod}' + - description: Minimum number of replicas to which the autoscaler can scale up. + gauge: + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.hpa.min_replicas + unit: '{pod}' + scope: + name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver + version: latest + - resource: + attributes: + - key: k8s.job.name + value: + stringValue: test-k8scluster-receiver-cronjob-28839770 + - key: k8s.job.uid + value: + stringValue: a38da134-af71-4bc1-a585-c9e0342f9aab + - key: k8s.namespace.name + value: + stringValue: default + schemaUrl: https://opentelemetry.io/schemas/1.18.0 + scopeMetrics: + - metrics: + - description: The number of actively running pods for a job + gauge: + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.job.active_pods + unit: '{pod}' + - description: The desired number of successfully finished pods the job should be run with + gauge: + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.job.desired_successful_pods + unit: '{pod}' + - description: The number of pods which reached phase Failed for a job + gauge: + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.job.failed_pods + unit: '{pod}' + - description: The max desired number of pods the job should run at any given time + gauge: + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.job.max_parallel_pods + unit: '{pod}' + - description: The number of pods which reached phase Succeeded for a job + gauge: + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.job.successful_pods + unit: '{pod}' + scope: + name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver + version: latest + - resource: + attributes: + - key: k8s.job.name + value: + stringValue: test-k8scluster-receiver-cronjob-28839771 + - key: k8s.job.uid + value: + stringValue: 37a9e0cc-5315-4e89-bb2b-5221849ff483 + - key: k8s.namespace.name + value: + stringValue: default + schemaUrl: https://opentelemetry.io/schemas/1.18.0 + scopeMetrics: + - metrics: + - description: The number of actively running pods for a job + gauge: + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.job.active_pods + unit: '{pod}' + - description: The desired number of successfully finished pods the job should be run with + gauge: + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.job.desired_successful_pods + unit: '{pod}' + - description: The number of pods which reached phase Failed for a job + gauge: + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.job.failed_pods + unit: '{pod}' + - description: The max desired number of pods the job should run at any given time + gauge: + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.job.max_parallel_pods + unit: '{pod}' + - description: The number of pods which reached phase Succeeded for a job + gauge: + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.job.successful_pods + unit: '{pod}' + scope: + name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver + version: latest + - resource: + attributes: + - key: k8s.job.name + value: + stringValue: test-k8scluster-receiver-job + - key: k8s.job.uid + value: + stringValue: b7ecbf9e-8e1a-4d70-beda-aab183645382 + - key: k8s.namespace.name + value: + stringValue: default + schemaUrl: https://opentelemetry.io/schemas/1.18.0 + scopeMetrics: + - metrics: + - description: The number of actively running pods for a job + gauge: + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.job.active_pods + unit: '{pod}' + - description: The desired number of successfully finished pods the job should be run with + gauge: + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.job.desired_successful_pods + unit: '{pod}' + - description: The number of pods which reached phase Failed for a job + gauge: + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.job.failed_pods + unit: '{pod}' + - description: The max desired number of pods the job should run at any given time + gauge: + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.job.max_parallel_pods + unit: '{pod}' + - description: The number of pods which reached phase Succeeded for a job + gauge: + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.job.successful_pods + unit: '{pod}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver version: latest @@ -315,27 +573,78 @@ resourceMetrics: stringValue: default - key: k8s.replicaset.name value: - stringValue: otelcol-5ffb893c-5459b589fd + stringValue: otelcol-786b94f3-67cf69944f - key: k8s.replicaset.uid value: - stringValue: fafc728a-82c7-49d6-a816-6bff81a191b4 - schemaUrl: "https://opentelemetry.io/schemas/1.18.0" + stringValue: d532dd9c-0490-4f85-be78-fd21d8a1b56f + schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: + - description: Total number of available pods (ready for at least minReadySeconds) targeted by this replicaset + gauge: + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.replicaset.available + unit: '{pod}' - description: Number of desired pods in this replicaset gauge: dataPoints: - asInt: "1" - timeUnixNano: "1686772769034865545" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" name: k8s.replicaset.desired - unit: "{pod}" - - description: Total number of available pods (ready for at least minReadySeconds) targeted by this replicaset + unit: '{pod}' + scope: + name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver + version: latest + - resource: + attributes: + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.statefulset.name + value: + stringValue: test-k8scluster-receiver-statefulset + - key: k8s.statefulset.uid + value: + stringValue: 5ceb9f10-fc64-4d70-b6f8-228b4a0cfd3c + schemaUrl: https://opentelemetry.io/schemas/1.18.0 + scopeMetrics: + - metrics: + - description: The number of pods created by the StatefulSet controller from the StatefulSet version gauge: dataPoints: - asInt: "1" - timeUnixNano: "1686772769034865545" - name: k8s.replicaset.available - unit: "{pod}" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.statefulset.current_pods + unit: '{pod}' + - description: Number of desired pods in the stateful set (the `spec.replicas` field) + gauge: + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.statefulset.desired_pods + unit: '{pod}' + - description: Number of pods created by the stateful set that have the `Ready` condition + gauge: + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.statefulset.ready_pods + unit: '{pod}' + - description: Number of pods created by the StatefulSet controller from the StatefulSet version + gauge: + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.statefulset.updated_pods + unit: '{pod}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver version: latest @@ -346,27 +655,29 @@ resourceMetrics: stringValue: kube-system - key: k8s.replicaset.name value: - stringValue: coredns-565d847f94 + stringValue: coredns-7db6d8ff4d - key: k8s.replicaset.uid value: - stringValue: 8477bceb-33de-4072-9bb1-fbc762defdda - schemaUrl: "https://opentelemetry.io/schemas/1.18.0" + stringValue: 2c8fee82-58d4-46c4-ae5e-81afcc5f9948 + schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: - - description: Number of desired pods in this replicaset + - description: Total number of available pods (ready for at least minReadySeconds) targeted by this replicaset gauge: dataPoints: - asInt: "2" - timeUnixNano: "1686772769034865545" - name: k8s.replicaset.desired - unit: "{pod}" - - description: Total number of available pods (ready for at least minReadySeconds) targeted by this replicaset + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.replicaset.available + unit: '{pod}' + - description: Number of desired pods in this replicaset gauge: dataPoints: - asInt: "2" - timeUnixNano: "1686772769034865545" - name: k8s.replicaset.available - unit: "{pod}" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.replicaset.desired + unit: '{pod}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver version: latest @@ -377,27 +688,29 @@ resourceMetrics: stringValue: local-path-storage - key: k8s.replicaset.name value: - stringValue: local-path-provisioner-684f458cdd + stringValue: local-path-provisioner-988d74bc - key: k8s.replicaset.uid value: - stringValue: 59e21dbf-09e1-4053-851d-90aad70bfb01 - schemaUrl: "https://opentelemetry.io/schemas/1.18.0" + stringValue: e58f8ba2-8df8-425e-8a2a-c07cf351bbd8 + schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: - - description: Number of desired pods in this replicaset + - description: Total number of available pods (ready for at least minReadySeconds) targeted by this replicaset gauge: dataPoints: - asInt: "1" - timeUnixNano: "1686772769034865545" - name: k8s.replicaset.desired - unit: "{pod}" - - description: Total number of available pods (ready for at least minReadySeconds) targeted by this replicaset + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.replicaset.available + unit: '{pod}' + - description: Number of desired pods in this replicaset gauge: dataPoints: - asInt: "1" - timeUnixNano: "1686772769034865545" - name: k8s.replicaset.available - unit: "{pod}" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.replicaset.desired + unit: '{pod}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver version: latest @@ -411,20 +724,20 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: otelcol-5ffb893c-5459b589fd-lrbpq + stringValue: otelcol-786b94f3-67cf69944f-6zv25 - key: k8s.pod.uid value: - stringValue: 5e4d1b29-35e5-4ff6-9779-b02921adcace - schemaUrl: "https://opentelemetry.io/schemas/1.18.0" + stringValue: 1fb8be2b-ae32-41c2-a172-e6cb9beb7c37 + schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: - description: Current phase of the pod (1 - Pending, 2 - Running, 3 - Succeeded, 4 - Failed, 5 - Unknown) gauge: dataPoints: - asInt: "2" - timeUnixNano: "1686772769034865545" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" name: k8s.pod.phase - unit: "" scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver version: latest @@ -432,26 +745,26 @@ resourceMetrics: attributes: - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: default - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: coredns-565d847f94-kt4s4 + stringValue: test-k8scluster-receiver-cronjob-28839770-9pp7g - key: k8s.pod.uid value: - stringValue: ebd4da01-4a19-4ed8-bb2b-a75fa9c66160 - schemaUrl: "https://opentelemetry.io/schemas/1.18.0" + stringValue: e388cfa8-06c3-47b6-a7a6-113d7cdda849 + schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: - description: Current phase of the pod (1 - Pending, 2 - Running, 3 - Succeeded, 4 - Failed, 5 - Unknown) gauge: dataPoints: - asInt: "2" - timeUnixNano: "1686772769034865545" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" name: k8s.pod.phase - unit: "" scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver version: latest @@ -459,26 +772,26 @@ resourceMetrics: attributes: - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: default - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: coredns-565d847f94-v6kmv + stringValue: test-k8scluster-receiver-cronjob-28839771-llccr - key: k8s.pod.uid value: - stringValue: 2c672907-5d69-4f91-85e0-f1792164cadc - schemaUrl: "https://opentelemetry.io/schemas/1.18.0" + stringValue: 0c2351b3-842c-4632-95c2-e7b061128a98 + schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: - description: Current phase of the pod (1 - Pending, 2 - Running, 3 - Succeeded, 4 - Failed, 5 - Unknown) gauge: dataPoints: - asInt: "2" - timeUnixNano: "1686772769034865545" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" name: k8s.pod.phase - unit: "" scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver version: latest @@ -486,26 +799,26 @@ resourceMetrics: attributes: - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: default - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: etcd-kind-control-plane + stringValue: test-k8scluster-receiver-job-bzjrh - key: k8s.pod.uid value: - stringValue: 16463557-8966-458d-b356-54f16895a1dd - schemaUrl: "https://opentelemetry.io/schemas/1.18.0" + stringValue: 7e8bdace-4bce-4750-bd8c-d7359bb3e56b + schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: - description: Current phase of the pod (1 - Pending, 2 - Running, 3 - Succeeded, 4 - Failed, 5 - Unknown) gauge: dataPoints: - asInt: "2" - timeUnixNano: "1686772769034865545" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" name: k8s.pod.phase - unit: "" scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver version: latest @@ -513,26 +826,26 @@ resourceMetrics: attributes: - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: default - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kindnet-kjb8z + stringValue: test-k8scluster-receiver-statefulset-0 - key: k8s.pod.uid value: - stringValue: 9405ca8b-7b7d-4271-80d1-41901f84c9e8 - schemaUrl: "https://opentelemetry.io/schemas/1.18.0" + stringValue: f1ea5486-77b7-41c6-a3be-d03650011801 + schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: - description: Current phase of the pod (1 - Pending, 2 - Running, 3 - Succeeded, 4 - Failed, 5 - Unknown) gauge: dataPoints: - asInt: "2" - timeUnixNano: "1686772769034865545" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" name: k8s.pod.phase - unit: "" scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver version: latest @@ -546,20 +859,20 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-apiserver-kind-control-plane + stringValue: coredns-7db6d8ff4d-5kh78 - key: k8s.pod.uid value: - stringValue: 4ce29152-4749-43a7-89b4-b8265bf35b09 - schemaUrl: "https://opentelemetry.io/schemas/1.18.0" + stringValue: 2c5b60e0-a01e-4312-8818-d85f94ab841e + schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: - description: Current phase of the pod (1 - Pending, 2 - Running, 3 - Succeeded, 4 - Failed, 5 - Unknown) gauge: dataPoints: - asInt: "2" - timeUnixNano: "1686772769034865545" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" name: k8s.pod.phase - unit: "" scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver version: latest @@ -573,20 +886,20 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-controller-manager-kind-control-plane + stringValue: coredns-7db6d8ff4d-p89tc - key: k8s.pod.uid value: - stringValue: 5ebe0d65-e661-4e6b-a053-a3a22adec893 - schemaUrl: "https://opentelemetry.io/schemas/1.18.0" + stringValue: f3494708-493a-4f0f-965c-dcedfdca253f + schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: - description: Current phase of the pod (1 - Pending, 2 - Running, 3 - Succeeded, 4 - Failed, 5 - Unknown) gauge: dataPoints: - asInt: "2" - timeUnixNano: "1686772769034865545" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" name: k8s.pod.phase - unit: "" scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver version: latest @@ -600,20 +913,20 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-proxy-twxhf + stringValue: etcd-kind-control-plane - key: k8s.pod.uid value: - stringValue: 38e3c8d5-0c3e-465f-8a79-4117dbcd7607 - schemaUrl: "https://opentelemetry.io/schemas/1.18.0" + stringValue: 40e8f13b-bec6-4dae-98d9-fd86939dfc4c + schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: - description: Current phase of the pod (1 - Pending, 2 - Running, 3 - Succeeded, 4 - Failed, 5 - Unknown) gauge: dataPoints: - asInt: "2" - timeUnixNano: "1686772769034865545" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" name: k8s.pod.phase - unit: "" scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver version: latest @@ -627,20 +940,20 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-scheduler-kind-control-plane + stringValue: kindnet-qwzhw - key: k8s.pod.uid value: - stringValue: d966df8b-e9d3-41d5-9b25-6c1a5ec9d3dc - schemaUrl: "https://opentelemetry.io/schemas/1.18.0" + stringValue: 955e1f8c-2fe3-4a1d-85e6-31ff7410dc00 + schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: - description: Current phase of the pod (1 - Pending, 2 - Running, 3 - Succeeded, 4 - Failed, 5 - Unknown) gauge: dataPoints: - asInt: "2" - timeUnixNano: "1686772769034865545" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" name: k8s.pod.phase - unit: "" scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver version: latest @@ -648,43 +961,31 @@ resourceMetrics: attributes: - key: k8s.namespace.name value: - stringValue: local-path-storage + stringValue: kube-system - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: local-path-provisioner-684f458cdd-v726j + stringValue: kube-apiserver-kind-control-plane - key: k8s.pod.uid value: - stringValue: 22a22d93-0ec2-4c90-91b1-29a0b3ea9173 - schemaUrl: "https://opentelemetry.io/schemas/1.18.0" + stringValue: d2032a9e-8c7c-4d9c-bbcb-526bd1a7b4f7 + schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: - description: Current phase of the pod (1 - Pending, 2 - Running, 3 - Succeeded, 4 - Failed, 5 - Unknown) gauge: dataPoints: - asInt: "2" - timeUnixNano: "1686772769034865545" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" name: k8s.pod.phase - unit: "" scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver version: latest - resource: attributes: - - key: container.id - value: - stringValue: 065c7c8b8e35d285df3e05ada86520ab9a55dd5cb25331c1fb0e39739ae7fdfa - - key: container.image.name - value: - stringValue: registry.k8s.io/etcd - - key: container.image.tag - value: - stringValue: 3.5.4-0 - - key: k8s.container.name - value: - stringValue: etcd - key: k8s.namespace.name value: stringValue: kube-system @@ -693,58 +994,25 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: etcd-kind-control-plane + stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 16463557-8966-458d-b356-54f16895a1dd - schemaUrl: "https://opentelemetry.io/schemas/1.18.0" + stringValue: e3e6d44a-5bc6-4687-85f1-37eb42c42c05 + schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: - - description: How many times the container has restarted in the recent past. This value is pulled directly from the K8s API and the value can go indefinitely high and be reset to 0 at any time depending on how your kubelet is configured to prune dead containers. It is best to not depend too much on the exact value but rather look at it as either == 0, in which case you can conclude there were no restarts in the recent past, or > 0, in which case you can conclude there were restarts in the recent past, and not try and analyze the value beyond that. - gauge: - dataPoints: - - asInt: "0" - timeUnixNano: "1686772769034865545" - name: k8s.container.restarts - unit: "{restart}" - - description: Whether a container has passed its readiness probe (0 for no, 1 for yes) - gauge: - dataPoints: - - asInt: "1" - timeUnixNano: "1686772769034865545" - name: k8s.container.ready - unit: "" - - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details - gauge: - dataPoints: - - asDouble: 0.1 - timeUnixNano: "1686772769034865545" - name: k8s.container.cpu_request - unit: "{cpu}" - - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details + - description: Current phase of the pod (1 - Pending, 2 - Running, 3 - Succeeded, 4 - Failed, 5 - Unknown) gauge: dataPoints: - - asInt: "104857600" - timeUnixNano: "1686772769034865545" - name: k8s.container.memory_request - unit: "By" + - asInt: "2" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.pod.phase scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver version: latest - resource: attributes: - - key: container.id - value: - stringValue: 077787bc155f57b4bc991cbc069732fbe95c67df5e30b15d97144b0897828f4b - - key: container.image.name - value: - stringValue: docker.io/kindest/kindnetd - - key: container.image.tag - value: - stringValue: v20221004-44d545d1 - - key: k8s.container.name - value: - stringValue: kindnet-cni - key: k8s.namespace.name value: stringValue: kube-system @@ -753,55 +1021,74 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kindnet-kjb8z + stringValue: kube-proxy-kktz6 - key: k8s.pod.uid value: - stringValue: 9405ca8b-7b7d-4271-80d1-41901f84c9e8 - schemaUrl: "https://opentelemetry.io/schemas/1.18.0" + stringValue: c347e316-1bab-4b4d-bc37-4f526fca19a4 + schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: - - description: How many times the container has restarted in the recent past. This value is pulled directly from the K8s API and the value can go indefinitely high and be reset to 0 at any time depending on how your kubelet is configured to prune dead containers. It is best to not depend too much on the exact value but rather look at it as either == 0, in which case you can conclude there were no restarts in the recent past, or > 0, in which case you can conclude there were restarts in the recent past, and not try and analyze the value beyond that. + - description: Current phase of the pod (1 - Pending, 2 - Running, 3 - Succeeded, 4 - Failed, 5 - Unknown) gauge: dataPoints: - - asInt: "0" - timeUnixNano: "1686772769034865545" - name: k8s.container.restarts - unit: "{restart}" - - description: Whether a container has passed its readiness probe (0 for no, 1 for yes) - gauge: - dataPoints: - - asInt: "1" - timeUnixNano: "1686772769034865545" - name: k8s.container.ready - unit: "" - - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details - gauge: - dataPoints: - - asDouble: 0.1 - timeUnixNano: "1686772769034865545" - name: k8s.container.cpu_request - unit: "{cpu}" - - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details - gauge: - dataPoints: - - asInt: "52428800" - timeUnixNano: "1686772769034865545" - name: k8s.container.memory_request - unit: "By" - - description: Maximum resource limit set for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details + - asInt: "2" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.pod.phase + scope: + name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver + version: latest + - resource: + attributes: + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: kube-scheduler-kind-control-plane + - key: k8s.pod.uid + value: + stringValue: 991bbf5d-d6b9-4e33-8954-2a5f3505ff2d + schemaUrl: https://opentelemetry.io/schemas/1.18.0 + scopeMetrics: + - metrics: + - description: Current phase of the pod (1 - Pending, 2 - Running, 3 - Succeeded, 4 - Failed, 5 - Unknown) gauge: dataPoints: - - asDouble: 0.1 - timeUnixNano: "1686772769034865545" - name: k8s.container.cpu_limit - unit: "{cpu}" - - description: Maximum resource limit set for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details + - asInt: "2" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.pod.phase + scope: + name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver + version: latest + - resource: + attributes: + - key: k8s.namespace.name + value: + stringValue: local-path-storage + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: local-path-provisioner-988d74bc-c2wx7 + - key: k8s.pod.uid + value: + stringValue: 1169e7ae-031e-4535-bb94-aee23b0b7df3 + schemaUrl: https://opentelemetry.io/schemas/1.18.0 + scopeMetrics: + - metrics: + - description: Current phase of the pod (1 - Pending, 2 - Running, 3 - Succeeded, 4 - Failed, 5 - Unknown) gauge: dataPoints: - - asInt: "52428800" - timeUnixNano: "1686772769034865545" - name: k8s.container.memory_limit - unit: "By" + - asInt: "2" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.pod.phase scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver version: latest @@ -809,52 +1096,93 @@ resourceMetrics: attributes: - key: container.id value: - stringValue: 1a5b9c371c8a7c5d8b0e56a82395aeee88523b1e2d96f17b4a6ae22bf11936bb + stringValue: 10c9bec31ac94fc58e65ce5ed809455727eee9daae8ea80668990e848a7e7da0 - key: container.image.name value: - stringValue: registry.k8s.io/kube-apiserver-amd64 + stringValue: docker.io/library/alpine - key: container.image.tag value: - stringValue: v1.25.3 + stringValue: latest - key: k8s.container.name value: - stringValue: kube-apiserver + stringValue: alpine - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: default - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-apiserver-kind-control-plane + stringValue: test-k8scluster-receiver-cronjob-28839771-llccr - key: k8s.pod.uid value: - stringValue: 4ce29152-4749-43a7-89b4-b8265bf35b09 - schemaUrl: "https://opentelemetry.io/schemas/1.18.0" + stringValue: 0c2351b3-842c-4632-95c2-e7b061128a98 + schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: + - description: Whether a container has passed its readiness probe (0 for no, 1 for yes) + gauge: + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.ready - description: How many times the container has restarted in the recent past. This value is pulled directly from the K8s API and the value can go indefinitely high and be reset to 0 at any time depending on how your kubelet is configured to prune dead containers. It is best to not depend too much on the exact value but rather look at it as either == 0, in which case you can conclude there were no restarts in the recent past, or > 0, in which case you can conclude there were restarts in the recent past, and not try and analyze the value beyond that. gauge: dataPoints: - asInt: "0" - timeUnixNano: "1686772769034865545" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" name: k8s.container.restarts - unit: "{restart}" + unit: '{restart}' + scope: + name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver + version: latest + - resource: + attributes: + - key: container.id + value: + stringValue: 1f493fa217d539d5b74ffc4579e887f904f630d320105a2b83a987105342ae80 + - key: container.image.name + value: + stringValue: registry.k8s.io/kube-proxy-arm64 + - key: container.image.tag + value: + stringValue: v1.30.0 + - key: k8s.container.name + value: + stringValue: kube-proxy + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: kube-proxy-kktz6 + - key: k8s.pod.uid + value: + stringValue: c347e316-1bab-4b4d-bc37-4f526fca19a4 + schemaUrl: https://opentelemetry.io/schemas/1.18.0 + scopeMetrics: + - metrics: - description: Whether a container has passed its readiness probe (0 for no, 1 for yes) gauge: dataPoints: - asInt: "1" - timeUnixNano: "1686772769034865545" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" name: k8s.container.ready - unit: "" - - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details + - description: How many times the container has restarted in the recent past. This value is pulled directly from the K8s API and the value can go indefinitely high and be reset to 0 at any time depending on how your kubelet is configured to prune dead containers. It is best to not depend too much on the exact value but rather look at it as either == 0, in which case you can conclude there were no restarts in the recent past, or > 0, in which case you can conclude there were restarts in the recent past, and not try and analyze the value beyond that. gauge: dataPoints: - - asDouble: 0.25 - timeUnixNano: "1686772769034865545" - name: k8s.container.cpu_request - unit: "{cpu}" + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.restarts + unit: '{restart}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver version: latest @@ -862,52 +1190,93 @@ resourceMetrics: attributes: - key: container.id value: - stringValue: 2e506922310bbf1ffb8dbbf56c04e540306f272b794d89ffbe776fe5e2fc148e + stringValue: 2cb1cb272a301a00f50020c3e4751bfa9a281496a6dc35f02a5546451e894e93 - key: container.image.name value: - stringValue: registry.k8s.io/kube-scheduler-amd64 + stringValue: docker.io/library/nginx - key: container.image.tag value: - stringValue: v1.25.3 + stringValue: latest - key: k8s.container.name value: - stringValue: kube-scheduler + stringValue: nginx - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: default - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-scheduler-kind-control-plane + stringValue: test-k8scluster-receiver-statefulset-0 - key: k8s.pod.uid value: - stringValue: d966df8b-e9d3-41d5-9b25-6c1a5ec9d3dc - schemaUrl: "https://opentelemetry.io/schemas/1.18.0" + stringValue: f1ea5486-77b7-41c6-a3be-d03650011801 + schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: + - description: Whether a container has passed its readiness probe (0 for no, 1 for yes) + gauge: + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.ready - description: How many times the container has restarted in the recent past. This value is pulled directly from the K8s API and the value can go indefinitely high and be reset to 0 at any time depending on how your kubelet is configured to prune dead containers. It is best to not depend too much on the exact value but rather look at it as either == 0, in which case you can conclude there were no restarts in the recent past, or > 0, in which case you can conclude there were restarts in the recent past, and not try and analyze the value beyond that. gauge: dataPoints: - asInt: "0" - timeUnixNano: "1686772769034865545" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" name: k8s.container.restarts - unit: "{restart}" + unit: '{restart}' + scope: + name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver + version: latest + - resource: + attributes: + - key: container.id + value: + stringValue: 567cd0ad83d68987dfb4dbffd056732b25bd2fc89e912605c16a5d1a4cd2b54c + - key: container.image.name + value: + stringValue: docker.io/library/alpine + - key: container.image.tag + value: + stringValue: latest + - key: k8s.container.name + value: + stringValue: alpine + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: test-k8scluster-receiver-job-bzjrh + - key: k8s.pod.uid + value: + stringValue: 7e8bdace-4bce-4750-bd8c-d7359bb3e56b + schemaUrl: https://opentelemetry.io/schemas/1.18.0 + scopeMetrics: + - metrics: - description: Whether a container has passed its readiness probe (0 for no, 1 for yes) gauge: dataPoints: - asInt: "1" - timeUnixNano: "1686772769034865545" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" name: k8s.container.ready - unit: "" - - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details + - description: How many times the container has restarted in the recent past. This value is pulled directly from the K8s API and the value can go indefinitely high and be reset to 0 at any time depending on how your kubelet is configured to prune dead containers. It is best to not depend too much on the exact value but rather look at it as either == 0, in which case you can conclude there were no restarts in the recent past, or > 0, in which case you can conclude there were restarts in the recent past, and not try and analyze the value beyond that. gauge: dataPoints: - - asDouble: 0.1 - timeUnixNano: "1686772769034865545" - name: k8s.container.cpu_request - unit: "{cpu}" + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.restarts + unit: '{restart}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver version: latest @@ -915,16 +1284,16 @@ resourceMetrics: attributes: - key: container.id value: - stringValue: 3baa03c525095d74e7ee24a5c4c42a4680b131f9b8a68f5e2e853ae569d97e4c + stringValue: 6af7be5c276ef225d046ad0de442ee450c39122a12991f9da82c9629f949967b - key: container.image.name value: - stringValue: registry.k8s.io/kube-controller-manager-amd64 + stringValue: registry.k8s.io/coredns/coredns - key: container.image.tag value: - stringValue: v1.25.3 + stringValue: v1.11.1 - key: k8s.container.name value: - stringValue: kube-controller-manager + stringValue: coredns - key: k8s.namespace.name value: stringValue: kube-system @@ -933,34 +1302,52 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-controller-manager-kind-control-plane + stringValue: coredns-7db6d8ff4d-5kh78 - key: k8s.pod.uid value: - stringValue: 5ebe0d65-e661-4e6b-a053-a3a22adec893 - schemaUrl: "https://opentelemetry.io/schemas/1.18.0" + stringValue: 2c5b60e0-a01e-4312-8818-d85f94ab841e + schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: - - description: How many times the container has restarted in the recent past. This value is pulled directly from the K8s API and the value can go indefinitely high and be reset to 0 at any time depending on how your kubelet is configured to prune dead containers. It is best to not depend too much on the exact value but rather look at it as either == 0, in which case you can conclude there were no restarts in the recent past, or > 0, in which case you can conclude there were restarts in the recent past, and not try and analyze the value beyond that. + - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details gauge: dataPoints: - - asInt: "0" - timeUnixNano: "1686772769034865545" - name: k8s.container.restarts - unit: "{restart}" + - asDouble: 0.1 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.cpu_request + unit: '{cpu}' + - description: Maximum resource limit set for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details + gauge: + dataPoints: + - asInt: "178257920" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.memory_limit + unit: By + - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details + gauge: + dataPoints: + - asInt: "73400320" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.memory_request + unit: By - description: Whether a container has passed its readiness probe (0 for no, 1 for yes) gauge: dataPoints: - asInt: "1" - timeUnixNano: "1686772769034865545" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" name: k8s.container.ready - unit: "" - - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details + - description: How many times the container has restarted in the recent past. This value is pulled directly from the K8s API and the value can go indefinitely high and be reset to 0 at any time depending on how your kubelet is configured to prune dead containers. It is best to not depend too much on the exact value but rather look at it as either == 0, in which case you can conclude there were no restarts in the recent past, or > 0, in which case you can conclude there were restarts in the recent past, and not try and analyze the value beyond that. gauge: dataPoints: - - asDouble: 0.2 - timeUnixNano: "1686772769034865545" - name: k8s.container.cpu_request - unit: "{cpu}" + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.restarts + unit: '{restart}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver version: latest @@ -968,16 +1355,16 @@ resourceMetrics: attributes: - key: container.id value: - stringValue: 5cfead143bc88798f93fae8e05586b1191771477030fe89ed7bca288bb82c0aa + stringValue: 7349de0618283fb11a957febc6689a0fbbfd9b52af1106bb3608bc4278a27ecf - key: container.image.name value: - stringValue: registry.k8s.io/kube-proxy-amd64 + stringValue: registry.k8s.io/kube-scheduler-arm64 - key: container.image.tag value: - stringValue: v1.25.3 + stringValue: v1.30.0 - key: k8s.container.name value: - stringValue: kube-proxy + stringValue: kube-scheduler - key: k8s.namespace.name value: stringValue: kube-system @@ -986,27 +1373,36 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-proxy-twxhf + stringValue: kube-scheduler-kind-control-plane - key: k8s.pod.uid value: - stringValue: 38e3c8d5-0c3e-465f-8a79-4117dbcd7607 - schemaUrl: "https://opentelemetry.io/schemas/1.18.0" + stringValue: 991bbf5d-d6b9-4e33-8954-2a5f3505ff2d + schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: - - description: How many times the container has restarted in the recent past. This value is pulled directly from the K8s API and the value can go indefinitely high and be reset to 0 at any time depending on how your kubelet is configured to prune dead containers. It is best to not depend too much on the exact value but rather look at it as either == 0, in which case you can conclude there were no restarts in the recent past, or > 0, in which case you can conclude there were restarts in the recent past, and not try and analyze the value beyond that. + - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details gauge: dataPoints: - - asInt: "0" - timeUnixNano: "1686772769034865545" - name: k8s.container.restarts - unit: "{restart}" + - asDouble: 0.1 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.cpu_request + unit: '{cpu}' - description: Whether a container has passed its readiness probe (0 for no, 1 for yes) gauge: dataPoints: - asInt: "1" - timeUnixNano: "1686772769034865545" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" name: k8s.container.ready - unit: "" + - description: How many times the container has restarted in the recent past. This value is pulled directly from the K8s API and the value can go indefinitely high and be reset to 0 at any time depending on how your kubelet is configured to prune dead containers. It is best to not depend too much on the exact value but rather look at it as either == 0, in which case you can conclude there were no restarts in the recent past, or > 0, in which case you can conclude there were restarts in the recent past, and not try and analyze the value beyond that. + gauge: + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.restarts + unit: '{restart}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver version: latest @@ -1014,16 +1410,16 @@ resourceMetrics: attributes: - key: container.id value: - stringValue: 6963960c145745e079a94ccf5d9775339ac8b3ba42209d452597c145c5ddb4d4 + stringValue: 9c70b20960c36ddb400607a354058cd7525ec491251379c5aa84c359c5d518d7 - key: container.image.name value: - stringValue: registry.k8s.io/coredns/coredns + stringValue: registry.k8s.io/etcd - key: container.image.tag value: - stringValue: v1.9.3 + stringValue: 3.5.12-0 - key: k8s.container.name value: - stringValue: coredns + stringValue: etcd - key: k8s.namespace.name value: stringValue: kube-system @@ -1032,48 +1428,115 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: coredns-565d847f94-kt4s4 + stringValue: etcd-kind-control-plane - key: k8s.pod.uid value: - stringValue: ebd4da01-4a19-4ed8-bb2b-a75fa9c66160 - schemaUrl: "https://opentelemetry.io/schemas/1.18.0" + stringValue: 40e8f13b-bec6-4dae-98d9-fd86939dfc4c + schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: - - description: How many times the container has restarted in the recent past. This value is pulled directly from the K8s API and the value can go indefinitely high and be reset to 0 at any time depending on how your kubelet is configured to prune dead containers. It is best to not depend too much on the exact value but rather look at it as either == 0, in which case you can conclude there were no restarts in the recent past, or > 0, in which case you can conclude there were restarts in the recent past, and not try and analyze the value beyond that. + - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details gauge: dataPoints: - - asInt: "0" - timeUnixNano: "1686772769034865545" - name: k8s.container.restarts - unit: "{restart}" + - asDouble: 0.1 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.cpu_request + unit: '{cpu}' + - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details + gauge: + dataPoints: + - asInt: "104857600" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.memory_request + unit: By - description: Whether a container has passed its readiness probe (0 for no, 1 for yes) gauge: dataPoints: - asInt: "1" - timeUnixNano: "1686772769034865545" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" name: k8s.container.ready - unit: "" + - description: How many times the container has restarted in the recent past. This value is pulled directly from the K8s API and the value can go indefinitely high and be reset to 0 at any time depending on how your kubelet is configured to prune dead containers. It is best to not depend too much on the exact value but rather look at it as either == 0, in which case you can conclude there were no restarts in the recent past, or > 0, in which case you can conclude there were restarts in the recent past, and not try and analyze the value beyond that. + gauge: + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.restarts + unit: '{restart}' + scope: + name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver + version: latest + - resource: + attributes: + - key: container.id + value: + stringValue: 9c9e2d8cc660d21018432215b93bd4b9f26fbb0b0dfe71dca8c7089997cce23e + - key: container.image.name + value: + stringValue: registry.k8s.io/coredns/coredns + - key: container.image.tag + value: + stringValue: v1.11.1 + - key: k8s.container.name + value: + stringValue: coredns + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: coredns-7db6d8ff4d-p89tc + - key: k8s.pod.uid + value: + stringValue: f3494708-493a-4f0f-965c-dcedfdca253f + schemaUrl: https://opentelemetry.io/schemas/1.18.0 + scopeMetrics: + - metrics: - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details gauge: dataPoints: - asDouble: 0.1 - timeUnixNano: "1686772769034865545" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" name: k8s.container.cpu_request - unit: "{cpu}" + unit: '{cpu}' + - description: Maximum resource limit set for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details + gauge: + dataPoints: + - asInt: "178257920" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.memory_limit + unit: By - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details gauge: dataPoints: - asInt: "73400320" - timeUnixNano: "1686772769034865545" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" name: k8s.container.memory_request - unit: "By" - - description: Maximum resource limit set for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details + unit: By + - description: Whether a container has passed its readiness probe (0 for no, 1 for yes) gauge: dataPoints: - - asInt: "178257920" - timeUnixNano: "1686772769034865545" - name: k8s.container.memory_limit - unit: "By" + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.ready + - description: How many times the container has restarted in the recent past. This value is pulled directly from the K8s API and the value can go indefinitely high and be reset to 0 at any time depending on how your kubelet is configured to prune dead containers. It is best to not depend too much on the exact value but rather look at it as either == 0, in which case you can conclude there were no restarts in the recent past, or > 0, in which case you can conclude there were restarts in the recent past, and not try and analyze the value beyond that. + gauge: + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.restarts + unit: '{restart}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver version: latest @@ -1081,73 +1544,156 @@ resourceMetrics: attributes: - key: container.id value: - stringValue: 7c34e046e14a5c952a3fdc5ba539fbb65b1f56192d6c320f69e28563afede0fd + stringValue: acef2130e48fde6137e919c9eebc876435ff8a6a22031754fc1dde00cb6dae92 - key: container.image.name value: - stringValue: docker.io/library/otelcontribcol + stringValue: docker.io/kindest/local-path-provisioner - key: container.image.tag value: - stringValue: latest + stringValue: v20240202-8f1494ea - key: k8s.container.name value: - stringValue: opentelemetry-collector + stringValue: local-path-provisioner - key: k8s.namespace.name value: - stringValue: default + stringValue: local-path-storage - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: otelcol-5ffb893c-5459b589fd-lrbpq + stringValue: local-path-provisioner-988d74bc-c2wx7 - key: k8s.pod.uid value: - stringValue: 5e4d1b29-35e5-4ff6-9779-b02921adcace - schemaUrl: "https://opentelemetry.io/schemas/1.18.0" + stringValue: 1169e7ae-031e-4535-bb94-aee23b0b7df3 + schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: + - description: Whether a container has passed its readiness probe (0 for no, 1 for yes) + gauge: + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.ready - description: How many times the container has restarted in the recent past. This value is pulled directly from the K8s API and the value can go indefinitely high and be reset to 0 at any time depending on how your kubelet is configured to prune dead containers. It is best to not depend too much on the exact value but rather look at it as either == 0, in which case you can conclude there were no restarts in the recent past, or > 0, in which case you can conclude there were restarts in the recent past, and not try and analyze the value beyond that. gauge: dataPoints: - asInt: "0" - timeUnixNano: "1686772769034865545" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" name: k8s.container.restarts - unit: "{restart}" + unit: '{restart}' + scope: + name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver + version: latest + - resource: + attributes: + - key: container.id + value: + stringValue: bd25536854ec1e582f0bb3ac0f79ce761ae97317d9ba1f7b256f3e833bcba862 + - key: container.image.name + value: + stringValue: registry.k8s.io/kube-apiserver-arm64 + - key: container.image.tag + value: + stringValue: v1.30.0 + - key: k8s.container.name + value: + stringValue: kube-apiserver + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: kube-apiserver-kind-control-plane + - key: k8s.pod.uid + value: + stringValue: d2032a9e-8c7c-4d9c-bbcb-526bd1a7b4f7 + schemaUrl: https://opentelemetry.io/schemas/1.18.0 + scopeMetrics: + - metrics: + - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details + gauge: + dataPoints: + - asDouble: 0.25 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.cpu_request + unit: '{cpu}' - description: Whether a container has passed its readiness probe (0 for no, 1 for yes) gauge: dataPoints: - asInt: "1" - timeUnixNano: "1686772769034865545" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" name: k8s.container.ready - unit: "" - - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details + - description: How many times the container has restarted in the recent past. This value is pulled directly from the K8s API and the value can go indefinitely high and be reset to 0 at any time depending on how your kubelet is configured to prune dead containers. It is best to not depend too much on the exact value but rather look at it as either == 0, in which case you can conclude there were no restarts in the recent past, or > 0, in which case you can conclude there were restarts in the recent past, and not try and analyze the value beyond that. gauge: dataPoints: - - asInt: "268435456" - timeUnixNano: "1686772769034865545" - name: k8s.container.memory_request - unit: "By" + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.restarts + unit: '{restart}' + scope: + name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver + version: latest + - resource: + attributes: + - key: container.id + value: + stringValue: cc67e9bcb82cbeed83bc8dec9cf2b0c7915d921e793efb0d21da5225dfeb907d + - key: container.image.name + value: + stringValue: registry.k8s.io/kube-controller-manager-arm64 + - key: container.image.tag + value: + stringValue: v1.30.0 + - key: k8s.container.name + value: + stringValue: kube-controller-manager + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: kube-controller-manager-kind-control-plane + - key: k8s.pod.uid + value: + stringValue: e3e6d44a-5bc6-4687-85f1-37eb42c42c05 + schemaUrl: https://opentelemetry.io/schemas/1.18.0 + scopeMetrics: + - metrics: - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details gauge: dataPoints: - - asDouble: 0.128 - timeUnixNano: "1686772769034865545" + - asDouble: 0.2 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" name: k8s.container.cpu_request - unit: "{cpu}" - - description: Maximum resource limit set for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details + unit: '{cpu}' + - description: Whether a container has passed its readiness probe (0 for no, 1 for yes) gauge: dataPoints: - - asDouble: 0.128 - timeUnixNano: "1686772769034865545" - name: k8s.container.cpu_limit - unit: "{cpu}" - - description: Maximum resource limit set for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.ready + - description: How many times the container has restarted in the recent past. This value is pulled directly from the K8s API and the value can go indefinitely high and be reset to 0 at any time depending on how your kubelet is configured to prune dead containers. It is best to not depend too much on the exact value but rather look at it as either == 0, in which case you can conclude there were no restarts in the recent past, or > 0, in which case you can conclude there were restarts in the recent past, and not try and analyze the value beyond that. gauge: dataPoints: - - asInt: "268435456" - timeUnixNano: "1686772769034865545" - name: k8s.container.memory_limit - unit: "By" + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.restarts + unit: '{restart}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver version: latest @@ -1155,45 +1701,78 @@ resourceMetrics: attributes: - key: container.id value: - stringValue: cadc2e45454bec4fbe1bec28ab5ba391be414e20dbd927745e4350b728409c50 + stringValue: e14e6f08e774618b74202d19334266e4c65c1feb0b26ef7e8b7807644754f730 - key: container.image.name value: - stringValue: docker.io/kindest/local-path-provisioner + stringValue: docker.io/library/otelcontribcol - key: container.image.tag value: - stringValue: v0.0.22-kind.0 + stringValue: latest - key: k8s.container.name value: - stringValue: local-path-provisioner + stringValue: opentelemetry-collector - key: k8s.namespace.name value: - stringValue: local-path-storage + stringValue: default - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: local-path-provisioner-684f458cdd-v726j + stringValue: otelcol-786b94f3-67cf69944f-6zv25 - key: k8s.pod.uid value: - stringValue: 22a22d93-0ec2-4c90-91b1-29a0b3ea9173 - schemaUrl: "https://opentelemetry.io/schemas/1.18.0" + stringValue: 1fb8be2b-ae32-41c2-a172-e6cb9beb7c37 + schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: - - description: How many times the container has restarted in the recent past. This value is pulled directly from the K8s API and the value can go indefinitely high and be reset to 0 at any time depending on how your kubelet is configured to prune dead containers. It is best to not depend too much on the exact value but rather look at it as either == 0, in which case you can conclude there were no restarts in the recent past, or > 0, in which case you can conclude there were restarts in the recent past, and not try and analyze the value beyond that. + - description: Maximum resource limit set for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details gauge: dataPoints: - - asInt: "0" - timeUnixNano: "1686772769034865545" - name: k8s.container.restarts - unit: "{restart}" + - asDouble: 0.128 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.cpu_limit + unit: '{cpu}' + - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details + gauge: + dataPoints: + - asDouble: 0.128 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.cpu_request + unit: '{cpu}' + - description: Maximum resource limit set for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details + gauge: + dataPoints: + - asInt: "268435456" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.memory_limit + unit: By + - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details + gauge: + dataPoints: + - asInt: "268435456" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.memory_request + unit: By - description: Whether a container has passed its readiness probe (0 for no, 1 for yes) gauge: dataPoints: - asInt: "1" - timeUnixNano: "1686772769034865545" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" name: k8s.container.ready - unit: "" + - description: How many times the container has restarted in the recent past. This value is pulled directly from the K8s API and the value can go indefinitely high and be reset to 0 at any time depending on how your kubelet is configured to prune dead containers. It is best to not depend too much on the exact value but rather look at it as either == 0, in which case you can conclude there were no restarts in the recent past, or > 0, in which case you can conclude there were restarts in the recent past, and not try and analyze the value beyond that. + gauge: + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.restarts + unit: '{restart}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver version: latest @@ -1201,16 +1780,16 @@ resourceMetrics: attributes: - key: container.id value: - stringValue: d174ef52b51e0896b08fb5128589c747f4fbe112bcd6aaced727783fe79d8d2f + stringValue: ed3ab86077c3de40d6d9125bf4f25dbf1734c58c9c3a864e5ccc1ce3bcfc1d30 - key: container.image.name value: - stringValue: registry.k8s.io/coredns/coredns + stringValue: docker.io/kindest/kindnetd - key: container.image.tag value: - stringValue: v1.9.3 + stringValue: v20240202-8f1494ea - key: k8s.container.name value: - stringValue: coredns + stringValue: kindnet-cni - key: k8s.namespace.name value: stringValue: kube-system @@ -1219,48 +1798,107 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: coredns-565d847f94-v6kmv + stringValue: kindnet-qwzhw - key: k8s.pod.uid value: - stringValue: 2c672907-5d69-4f91-85e0-f1792164cadc - schemaUrl: "https://opentelemetry.io/schemas/1.18.0" + stringValue: 955e1f8c-2fe3-4a1d-85e6-31ff7410dc00 + schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: - - description: How many times the container has restarted in the recent past. This value is pulled directly from the K8s API and the value can go indefinitely high and be reset to 0 at any time depending on how your kubelet is configured to prune dead containers. It is best to not depend too much on the exact value but rather look at it as either == 0, in which case you can conclude there were no restarts in the recent past, or > 0, in which case you can conclude there were restarts in the recent past, and not try and analyze the value beyond that. - gauge: - dataPoints: - - asInt: "0" - timeUnixNano: "1686772769034865545" - name: k8s.container.restarts - unit: "{restart}" - - description: Whether a container has passed its readiness probe (0 for no, 1 for yes) + - description: Maximum resource limit set for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details gauge: dataPoints: - - asInt: "1" - timeUnixNano: "1686772769034865545" - name: k8s.container.ready - unit: "" + - asDouble: 0.1 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.cpu_limit + unit: '{cpu}' - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details gauge: dataPoints: - asDouble: 0.1 - timeUnixNano: "1686772769034865545" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" name: k8s.container.cpu_request - unit: "{cpu}" + unit: '{cpu}' + - description: Maximum resource limit set for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details + gauge: + dataPoints: + - asInt: "52428800" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.memory_limit + unit: By - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details gauge: dataPoints: - - asInt: "73400320" - timeUnixNano: "1686772769034865545" + - asInt: "52428800" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" name: k8s.container.memory_request - unit: "By" - - description: Maximum resource limit set for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details + unit: By + - description: Whether a container has passed its readiness probe (0 for no, 1 for yes) gauge: dataPoints: - - asInt: "178257920" - timeUnixNano: "1686772769034865545" - name: k8s.container.memory_limit - unit: "By" + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.ready + - description: How many times the container has restarted in the recent past. This value is pulled directly from the K8s API and the value can go indefinitely high and be reset to 0 at any time depending on how your kubelet is configured to prune dead containers. It is best to not depend too much on the exact value but rather look at it as either == 0, in which case you can conclude there were no restarts in the recent past, or > 0, in which case you can conclude there were restarts in the recent past, and not try and analyze the value beyond that. + gauge: + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.restarts + unit: '{restart}' + scope: + name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver + version: latest + - resource: + attributes: + - key: container.id + value: + stringValue: f01b9f5343f9ba34db396889c75d6128dace385b8f0c7aed2d39866ddd0df826 + - key: container.image.name + value: + stringValue: docker.io/library/alpine + - key: container.image.tag + value: + stringValue: latest + - key: k8s.container.name + value: + stringValue: alpine + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: test-k8scluster-receiver-cronjob-28839770-9pp7g + - key: k8s.pod.uid + value: + stringValue: e388cfa8-06c3-47b6-a7a6-113d7cdda849 + schemaUrl: https://opentelemetry.io/schemas/1.18.0 + scopeMetrics: + - metrics: + - description: Whether a container has passed its readiness probe (0 for no, 1 for yes) + gauge: + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.ready + - description: How many times the container has restarted in the recent past. This value is pulled directly from the K8s API and the value can go indefinitely high and be reset to 0 at any time depending on how your kubelet is configured to prune dead containers. It is best to not depend too much on the exact value but rather look at it as either == 0, in which case you can conclude there were no restarts in the recent past, or > 0, in which case you can conclude there were restarts in the recent past, and not try and analyze the value beyond that. + gauge: + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.restarts + unit: '{restart}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver version: latest diff --git a/receiver/k8sclusterreceiver/testdata/e2e/testobjects/cronjob.yaml b/receiver/k8sclusterreceiver/testdata/e2e/testobjects/cronjob.yaml new file mode 100644 index 000000000000..706bc90f26df --- /dev/null +++ b/receiver/k8sclusterreceiver/testdata/e2e/testobjects/cronjob.yaml @@ -0,0 +1,19 @@ +kind: CronJob +apiVersion: batch/v1 +metadata: + name: test-k8scluster-receiver-cronjob + namespace: default +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + containers: + - name: alpine + image: alpine + args: + - /bin/sh + - -c + - "echo Running; sleep 120" + restartPolicy: OnFailure diff --git a/receiver/k8sclusterreceiver/testdata/e2e/testobjects/hpa.yaml b/receiver/k8sclusterreceiver/testdata/e2e/testobjects/hpa.yaml new file mode 100644 index 000000000000..7730ec2abb51 --- /dev/null +++ b/receiver/k8sclusterreceiver/testdata/e2e/testobjects/hpa.yaml @@ -0,0 +1,13 @@ +apiVersion: autoscaling/v1 +kind: HorizontalPodAutoscaler +metadata: + name: test-k8scluster-receiver-hpa + namespace: default +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: StatefulSet + name: test-k8scluster-receiver-statefulset + minReplicas: 1 + maxReplicas: 1 + targetCPUUtilizationPercentage: 50 diff --git a/receiver/k8sclusterreceiver/testdata/e2e/testobjects/job.yaml b/receiver/k8sclusterreceiver/testdata/e2e/testobjects/job.yaml new file mode 100644 index 000000000000..b0851afedf6e --- /dev/null +++ b/receiver/k8sclusterreceiver/testdata/e2e/testobjects/job.yaml @@ -0,0 +1,17 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: test-k8scluster-receiver-job + namespace: default +spec: + template: + spec: + containers: + - name: alpine + image: alpine + args: + - /bin/sh + - -c + - "echo Hello from Job; sleep 600" + restartPolicy: Never + backoffLimit: 3 diff --git a/receiver/k8sclusterreceiver/testdata/e2e/testobjects/statefulset.yaml b/receiver/k8sclusterreceiver/testdata/e2e/testobjects/statefulset.yaml new file mode 100644 index 000000000000..52eb7110c68c --- /dev/null +++ b/receiver/k8sclusterreceiver/testdata/e2e/testobjects/statefulset.yaml @@ -0,0 +1,29 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-k8scluster-receiver-statefulset + namespace: default +spec: + serviceName: "test-k8scluster-receiver-statefulset-service" + replicas: 1 + selector: + matchLabels: + app: test-k8scluster-receiver-statefulset + template: + metadata: + labels: + app: test-k8scluster-receiver-statefulset + spec: + containers: + - name: nginx + image: nginx + ports: + - containerPort: 80 + volumeClaimTemplates: + - metadata: + name: test-k8scluster-receiver-statefulset-pvc + spec: + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 100Mi From 3cbc9f22310ffe1655586232016dbdd1bb2cb5d6 Mon Sep 17 00:00:00 2001 From: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com> Date: Tue, 5 Nov 2024 18:05:09 -0700 Subject: [PATCH 6/9] [chore] Pin markdown-link-check version to 3.12.2 (#36223) #### Description the changelog workflow has started failing, likely due to regressions in the tool's latest release. Pinning until fixed #### Link to tracking issue probably related to - https://github.com/tcort/markdown-link-check/issues/369 - https://github.com/tcort/markdown-link-check/issues/370 --- .github/workflows/changelog.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/changelog.yml b/.github/workflows/changelog.yml index 468dbb930d15..fe0c50398f0f 100644 --- a/.github/workflows/changelog.yml +++ b/.github/workflows/changelog.yml @@ -84,7 +84,7 @@ jobs: run: make chlog-preview > changelog_preview.md - name: Install markdown-link-check if: ${{ !contains(github.event.pull_request.labels.*.name, 'dependencies') && !contains(github.event.pull_request.labels.*.name, 'Skip Changelog') && !contains(github.event.pull_request.title, '[chore]')}} - run: npm install -g markdown-link-check + run: npm install -g markdown-link-check@3.12.2 - name: Run markdown-link-check if: ${{ !contains(github.event.pull_request.labels.*.name, 'dependencies') && !contains(github.event.pull_request.labels.*.name, 'Skip Changelog') && !contains(github.event.pull_request.title, '[chore]')}} run: | From 106eda95ef2e410c114ff0fc7ae4e71388d0392c Mon Sep 17 00:00:00 2001 From: Antoine Toulme Date: Tue, 5 Nov 2024 17:25:10 -0800 Subject: [PATCH 7/9] [chore] fix codeowners (#36224) Co-authored-by: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com> --- .github/CODEOWNERS | 2 +- cmd/githubgen/allowlist.txt | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 3f75b9466ad6..57cf64645b42 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -105,7 +105,7 @@ extension/httpforwarderextension/ @open-teleme extension/jaegerremotesampling/ @open-telemetry/collector-contrib-approvers @yurishkuro @frzifus extension/oauth2clientauthextension/ @open-telemetry/collector-contrib-approvers @pavankrish123 @jpkrohling extension/observer/ @open-telemetry/collector-contrib-approvers @dmitryax -extension/observer/cfgardenobserver/ @open-telemetry/collector-contrib-approvers @crobert-1 @cemdk @tomasmota @m1rp @jriguera +extension/observer/cfgardenobserver/ @open-telemetry/collector-contrib-approvers @crobert-1 @cemdk @m1rp @jriguera extension/observer/dockerobserver/ @open-telemetry/collector-contrib-approvers @MovieStoreGuy extension/observer/ecsobserver/ @open-telemetry/collector-contrib-approvers @dmitryax extension/observer/hostobserver/ @open-telemetry/collector-contrib-approvers @MovieStoreGuy diff --git a/cmd/githubgen/allowlist.txt b/cmd/githubgen/allowlist.txt index 7a8183addc22..c9090977501f 100644 --- a/cmd/githubgen/allowlist.txt +++ b/cmd/githubgen/allowlist.txt @@ -18,7 +18,6 @@ zpzhuSplunk thmshmm galrose cemdk -tomasmota m1rp jriguera abhishek-at-cloudwerx From 07a7ca94b813b4e4d59043a1bbd01f0142f09713 Mon Sep 17 00:00:00 2001 From: Daniel Jaglowski Date: Tue, 5 Nov 2024 21:12:07 -0500 Subject: [PATCH 8/9] [connector/routing] Add ability to route metrics and traces by request context (#36143) --- .chloggen/routing-connector-by-request.yaml | 2 +- connector/routingconnector/README.md | 1 - connector/routingconnector/logs_test.go | 12 +- connector/routingconnector/metrics.go | 5 + connector/routingconnector/metrics_test.go | 218 +++++++++++++++----- connector/routingconnector/traces.go | 5 + connector/routingconnector/traces_test.go | 184 ++++++++++++++--- 7 files changed, 342 insertions(+), 85 deletions(-) diff --git a/.chloggen/routing-connector-by-request.yaml b/.chloggen/routing-connector-by-request.yaml index 77cdfec98d48..0ef130b131ec 100644 --- a/.chloggen/routing-connector-by-request.yaml +++ b/.chloggen/routing-connector-by-request.yaml @@ -7,7 +7,7 @@ change_type: enhancement component: routingconnector # A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). -note: Add ability to route logs by request metadata. +note: Add ability to route by request metadata. # Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. issues: [19738] diff --git a/connector/routingconnector/README.md b/connector/routingconnector/README.md index e22814c4c7dd..b4f06f607a6b 100644 --- a/connector/routingconnector/README.md +++ b/connector/routingconnector/README.md @@ -44,7 +44,6 @@ The following settings are available: ### Limitations - The `match_once` setting is only supported when using the `resource` context. If any routes use `log` or `request` context, `match_once` must be set to `true`. -- The `request` context is only supported for logs at this time. - The `request` context requires use of the `condition` setting, and relies on a very limited grammar. Conditions must be in the form of `request["key"] == "value"` or `request["key"] != "value"`. (In the future, this grammar may be expanded to support more complex conditions.) ### Supported [OTTL] functions diff --git a/connector/routingconnector/logs_test.go b/connector/routingconnector/logs_test.go index 2fefc28b4b97..24747154c213 100644 --- a/connector/routingconnector/logs_test.go +++ b/connector/routingconnector/logs_test.go @@ -819,7 +819,7 @@ func TestLogsConnectorDetailed(t *testing.T) { ), }, { - name: "match_resource_then_logs", + name: "mixed/match_resource_then_logs", cfg: testConfig( withRoute("resource", isResourceA, idSink0), withRoute("log", isLogE, idSink1), @@ -831,7 +831,7 @@ func TestLogsConnectorDetailed(t *testing.T) { expectSinkD: plogutiltest.NewLogs("B", "CD", "F"), }, { - name: "match_logs_then_resource", + name: "mixed/match_logs_then_resource", cfg: testConfig( withRoute("log", isLogE, idSink0), withRoute("resource", isResourceB, idSink1), @@ -843,7 +843,7 @@ func TestLogsConnectorDetailed(t *testing.T) { expectSinkD: plogutiltest.NewLogs("A", "CD", "F"), }, { - name: "match_resource_then_grpc_request", + name: "mixed/match_resource_then_grpc_request", cfg: testConfig( withRoute("resource", isResourceA, idSink0), withRoute("request", isAcme, idSink1), @@ -856,7 +856,7 @@ func TestLogsConnectorDetailed(t *testing.T) { expectSinkD: plog.Logs{}, }, { - name: "match_logs_then_grpc_request", + name: "mixed/match_logs_then_grpc_request", cfg: testConfig( withRoute("log", isLogF, idSink0), withRoute("request", isAcme, idSink1), @@ -869,7 +869,7 @@ func TestLogsConnectorDetailed(t *testing.T) { expectSinkD: plog.Logs{}, }, { - name: "match_resource_then_http_request", + name: "mixed/match_resource_then_http_request", cfg: testConfig( withRoute("resource", isResourceA, idSink0), withRoute("request", isAcme, idSink1), @@ -882,7 +882,7 @@ func TestLogsConnectorDetailed(t *testing.T) { expectSinkD: plog.Logs{}, }, { - name: "match_logs_then_http_request", + name: "mixed/match_logs_then_http_request", cfg: testConfig( withRoute("log", isLogF, idSink0), withRoute("request", isAcme, idSink1), diff --git a/connector/routingconnector/metrics.go b/connector/routingconnector/metrics.go index 025a5bc95bb5..8f25c586bf71 100644 --- a/connector/routingconnector/metrics.go +++ b/connector/routingconnector/metrics.go @@ -74,6 +74,11 @@ func (c *metricsConnector) switchMetrics(ctx context.Context, md pmetric.Metrics route := c.router.routeSlice[i] matchedMetrics := pmetric.NewMetrics() switch route.statementContext { + case "request": + if route.requestCondition.matchRequest(ctx) { + groupAllMetrics(groups, route.consumer, md) + md = pmetric.NewMetrics() // all metrics have been routed + } case "", "resource": pmetricutil.MoveResourcesIf(md, matchedMetrics, func(rs pmetric.ResourceMetrics) bool { diff --git a/connector/routingconnector/metrics_test.go b/connector/routingconnector/metrics_test.go index 661d182c7760..0fba4eabc748 100644 --- a/connector/routingconnector/metrics_test.go +++ b/connector/routingconnector/metrics_test.go @@ -503,114 +503,233 @@ func TestMetricsConnectorDetailed(t *testing.T) { idSink1 := pipeline.NewIDWithName(pipeline.SignalMetrics, "1") idSinkD := pipeline.NewIDWithName(pipeline.SignalMetrics, "default") - isNotNil := `attributes["resourceName"] != nil` - isA := `attributes["resourceName"] == "resourceA"` - isB := `attributes["resourceName"] == "resourceB"` - isX := `attributes["resourceName"] == "resourceX"` - isY := `attributes["resourceName"] == "resourceY"` + isAcme := `request["X-Tenant"] == "acme"` + + isAnyResource := `attributes["resourceName"] != nil` + isResourceA := `attributes["resourceName"] == "resourceA"` + isResourceB := `attributes["resourceName"] == "resourceB"` + isResourceX := `attributes["resourceName"] == "resourceX"` + isResourceY := `attributes["resourceName"] == "resourceY"` testCases := []struct { name string cfg *Config + ctx context.Context input pmetric.Metrics expectSink0 pmetric.Metrics expectSink1 pmetric.Metrics expectSinkD pmetric.Metrics }{ { - name: "all_match_first_only", + name: "request/no_request_values", cfg: testConfig( - withRoute("resource", isNotNil, idSink0), - withRoute("resource", isY, idSink1), + withRoute("request", isAcme, idSink0), withDefault(idSinkD), ), - input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "FG"), - expectSink0: pmetricutiltest.NewMetrics("AB", "CD", "EF", "FG"), + ctx: context.Background(), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink0: pmetric.Metrics{}, + expectSink1: pmetric.Metrics{}, + expectSinkD: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + }, + { + name: "request/match_any_value", + cfg: testConfig( + withRoute("request", isAcme, idSink0), + withDefault(idSinkD), + ), + ctx: withGRPCMetadata( + withHTTPMetadata( + context.Background(), + map[string][]string{"X-Tenant": {"acme"}}, + ), + map[string]string{"X-Tenant": "notacme"}, + ), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink0: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), expectSink1: pmetric.Metrics{}, expectSinkD: pmetric.Metrics{}, }, { - name: "all_match_last_only", + name: "request/match_grpc_value", cfg: testConfig( - withRoute("resource", isX, idSink0), - withRoute("resource", isNotNil, idSink1), + withRoute("request", isAcme, idSink0), withDefault(idSinkD), ), - input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "FG"), + ctx: withGRPCMetadata(context.Background(), map[string]string{"X-Tenant": "acme"}), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink0: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink1: pmetric.Metrics{}, + expectSinkD: pmetric.Metrics{}, + }, + { + name: "request/match_no_grpc_value", + cfg: testConfig( + withRoute("request", isAcme, idSink0), + withDefault(idSinkD), + ), + ctx: withGRPCMetadata(context.Background(), map[string]string{"X-Tenant": "notacme"}), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), expectSink0: pmetric.Metrics{}, - expectSink1: pmetricutiltest.NewMetrics("AB", "CD", "EF", "FG"), + expectSink1: pmetric.Metrics{}, + expectSinkD: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + }, + { + name: "request/match_http_value", + cfg: testConfig( + withRoute("request", isAcme, idSink0), + withDefault(idSinkD), + ), + ctx: withHTTPMetadata(context.Background(), map[string][]string{"X-Tenant": {"acme"}}), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink0: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink1: pmetric.Metrics{}, expectSinkD: pmetric.Metrics{}, }, { - name: "all_match_only_once", + name: "request/match_http_value2", cfg: testConfig( - withRoute("resource", isNotNil, idSink0), - withRoute("resource", isA+" or "+isB, idSink1), + withRoute("request", isAcme, idSink0), withDefault(idSinkD), ), - input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "FG"), - expectSink0: pmetricutiltest.NewMetrics("AB", "CD", "EF", "FG"), + ctx: withHTTPMetadata(context.Background(), map[string][]string{"X-Tenant": {"notacme", "acme"}}), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink0: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), expectSink1: pmetric.Metrics{}, expectSinkD: pmetric.Metrics{}, }, { - name: "each_matches_one", + name: "request/match_no_http_value", + cfg: testConfig( + withRoute("request", isAcme, idSink0), + withDefault(idSinkD), + ), + ctx: withHTTPMetadata(context.Background(), map[string][]string{"X-Tenant": {"notacme"}}), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink0: pmetric.Metrics{}, + expectSink1: pmetric.Metrics{}, + expectSinkD: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + }, + { + name: "resource/all_match_first_only", cfg: testConfig( - withRoute("resource", isA, idSink0), - withRoute("resource", isB, idSink1), + withRoute("resource", isAnyResource, idSink0), + withRoute("resource", isResourceY, idSink1), withDefault(idSinkD), ), - input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "FG"), - expectSink0: pmetricutiltest.NewMetrics("A", "CD", "EF", "FG"), - expectSink1: pmetricutiltest.NewMetrics("B", "CD", "EF", "FG"), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink0: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink1: pmetric.Metrics{}, expectSinkD: pmetric.Metrics{}, }, { - name: "some_match_with_default", + name: "resource/all_match_last_only", cfg: testConfig( - withRoute("resource", isX, idSink0), - withRoute("resource", isB, idSink1), + withRoute("resource", isResourceX, idSink0), + withRoute("resource", isAnyResource, idSink1), withDefault(idSinkD), ), - input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "FG"), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), expectSink0: pmetric.Metrics{}, - expectSink1: pmetricutiltest.NewMetrics("B", "CD", "EF", "FG"), - expectSinkD: pmetricutiltest.NewMetrics("A", "CD", "EF", "FG"), + expectSink1: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSinkD: pmetric.Metrics{}, }, { - name: "some_match_without_default", + name: "resource/all_match_only_once", cfg: testConfig( - withRoute("resource", isX, idSink0), - withRoute("resource", isB, idSink1), + withRoute("resource", isAnyResource, idSink0), + withRoute("resource", isResourceA+" or "+isResourceB, idSink1), + withDefault(idSinkD), ), - input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "FG"), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink0: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink1: pmetric.Metrics{}, + expectSinkD: pmetric.Metrics{}, + }, + { + name: "resource/each_matches_one", + cfg: testConfig( + withRoute("resource", isResourceA, idSink0), + withRoute("resource", isResourceB, idSink1), + withDefault(idSinkD), + ), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink0: pmetricutiltest.NewMetrics("A", "CD", "EF", "GH"), + expectSink1: pmetricutiltest.NewMetrics("B", "CD", "EF", "GH"), + expectSinkD: pmetric.Metrics{}, + }, + { + name: "resource/some_match_with_default", + cfg: testConfig( + withRoute("resource", isResourceX, idSink0), + withRoute("resource", isResourceB, idSink1), + withDefault(idSinkD), + ), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink0: pmetric.Metrics{}, + expectSink1: pmetricutiltest.NewMetrics("B", "CD", "EF", "GH"), + expectSinkD: pmetricutiltest.NewMetrics("A", "CD", "EF", "GH"), + }, + { + name: "resource/some_match_without_default", + cfg: testConfig( + withRoute("resource", isResourceX, idSink0), + withRoute("resource", isResourceB, idSink1), + ), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), expectSink0: pmetric.Metrics{}, - expectSink1: pmetricutiltest.NewMetrics("B", "CD", "EF", "FG"), + expectSink1: pmetricutiltest.NewMetrics("B", "CD", "EF", "GH"), expectSinkD: pmetric.Metrics{}, }, { - name: "match_none_with_default", + name: "resource/match_none_with_default", cfg: testConfig( - withRoute("resource", isX, idSink0), - withRoute("resource", isY, idSink1), + withRoute("resource", isResourceX, idSink0), + withRoute("resource", isResourceY, idSink1), withDefault(idSinkD), ), - input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "FG"), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), expectSink0: pmetric.Metrics{}, expectSink1: pmetric.Metrics{}, - expectSinkD: pmetricutiltest.NewMetrics("AB", "CD", "EF", "FG"), + expectSinkD: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), }, { - name: "match_none_without_default", + name: "resource/match_none_without_default", cfg: testConfig( - withRoute("resource", isX, idSink0), - withRoute("resource", isY, idSink1), + withRoute("resource", isResourceX, idSink0), + withRoute("resource", isResourceY, idSink1), ), - input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "FG"), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), expectSink0: pmetric.Metrics{}, expectSink1: pmetric.Metrics{}, expectSinkD: pmetric.Metrics{}, }, + { + name: "mixed/match_resource_then_grpc_request", + cfg: testConfig( + withRoute("resource", isResourceA, idSink0), + withRoute("request", isAcme, idSink1), + withDefault(idSinkD), + ), + ctx: withGRPCMetadata(context.Background(), map[string]string{"X-Tenant": "acme"}), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink0: pmetricutiltest.NewMetrics("A", "CD", "EF", "GH"), + expectSink1: pmetricutiltest.NewMetrics("B", "CD", "EF", "GH"), + expectSinkD: pmetric.Metrics{}, + }, + { + name: "mixed/match_resource_then_http_request", + cfg: testConfig( + withRoute("resource", isResourceA, idSink0), + withRoute("request", isAcme, idSink1), + withDefault(idSinkD), + ), + ctx: withHTTPMetadata(context.Background(), map[string][]string{"X-Tenant": {"acme"}}), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink0: pmetricutiltest.NewMetrics("A", "CD", "EF", "GH"), + expectSink1: pmetricutiltest.NewMetrics("B", "CD", "EF", "GH"), + expectSinkD: pmetric.Metrics{}, + }, } for _, tt := range testCases { @@ -630,7 +749,12 @@ func TestMetricsConnectorDetailed(t *testing.T) { ) require.NoError(t, err) - require.NoError(t, conn.ConsumeMetrics(context.Background(), tt.input)) + ctx := context.Background() + if tt.ctx != nil { + ctx = tt.ctx + } + + require.NoError(t, conn.ConsumeMetrics(ctx, tt.input)) assertExpected := func(sink *consumertest.MetricsSink, expected pmetric.Metrics, name string) { if expected == (pmetric.Metrics{}) { diff --git a/connector/routingconnector/traces.go b/connector/routingconnector/traces.go index dd5966e6a66d..a82ee85a9973 100644 --- a/connector/routingconnector/traces.go +++ b/connector/routingconnector/traces.go @@ -74,6 +74,11 @@ func (c *tracesConnector) switchTraces(ctx context.Context, td ptrace.Traces) er route := c.router.routeSlice[i] matchedSpans := ptrace.NewTraces() switch route.statementContext { + case "request": + if route.requestCondition.matchRequest(ctx) { + groupAllTraces(groups, route.consumer, td) + td = ptrace.NewTraces() // all traces have been routed + } case "", "resource": ptraceutil.MoveResourcesIf(td, matchedSpans, func(rs ptrace.ResourceSpans) bool { diff --git a/connector/routingconnector/traces_test.go b/connector/routingconnector/traces_test.go index dd6c862d965a..291e8fd230af 100644 --- a/connector/routingconnector/traces_test.go +++ b/connector/routingconnector/traces_test.go @@ -427,25 +427,118 @@ func TestTracesConnectorDetailed(t *testing.T) { idSink1 := pipeline.NewIDWithName(pipeline.SignalTraces, "1") idSinkD := pipeline.NewIDWithName(pipeline.SignalTraces, "default") - isNotNil := `attributes["resourceName"] != nil` - isA := `attributes["resourceName"] == "resourceA"` - isB := `attributes["resourceName"] == "resourceB"` - isX := `attributes["resourceName"] == "resourceX"` - isY := `attributes["resourceName"] == "resourceY"` + isAcme := `request["X-Tenant"] == "acme"` + + isAnyResource := `attributes["resourceName"] != nil` + isResourceA := `attributes["resourceName"] == "resourceA"` + isResourceB := `attributes["resourceName"] == "resourceB"` + isResourceX := `attributes["resourceName"] == "resourceX"` + isResourceY := `attributes["resourceName"] == "resourceY"` testCases := []struct { name string cfg *Config + ctx context.Context input ptrace.Traces expectSink0 ptrace.Traces expectSink1 ptrace.Traces expectSinkD ptrace.Traces }{ { - name: "all_match_first_only", + name: "request/no_request_values", cfg: testConfig( - withRoute("resource", isNotNil, idSink0), - withRoute("resource", isY, idSink1), + withRoute("request", isAcme, idSink0), + withDefault(idSinkD), + ), + ctx: context.Background(), + input: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), + expectSink0: ptrace.Traces{}, + expectSink1: ptrace.Traces{}, + expectSinkD: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), + }, + { + name: "request/match_any_value", + cfg: testConfig( + withRoute("request", isAcme, idSink0), + withDefault(idSinkD), + ), + ctx: withGRPCMetadata( + withHTTPMetadata( + context.Background(), + map[string][]string{"X-Tenant": {"acme"}}, + ), + map[string]string{"X-Tenant": "notacme"}, + ), + input: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), + expectSink0: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), + expectSink1: ptrace.Traces{}, + expectSinkD: ptrace.Traces{}, + }, + { + name: "request/match_grpc_value", + cfg: testConfig( + withRoute("request", isAcme, idSink0), + withDefault(idSinkD), + ), + ctx: withGRPCMetadata(context.Background(), map[string]string{"X-Tenant": "acme"}), + input: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), + expectSink0: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), + expectSink1: ptrace.Traces{}, + expectSinkD: ptrace.Traces{}, + }, + { + name: "request/match_no_grpc_value", + cfg: testConfig( + withRoute("request", isAcme, idSink0), + withDefault(idSinkD), + ), + ctx: withGRPCMetadata(context.Background(), map[string]string{"X-Tenant": "notacme"}), + input: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), + expectSink0: ptrace.Traces{}, + expectSink1: ptrace.Traces{}, + expectSinkD: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), + }, + { + name: "request/match_http_value", + cfg: testConfig( + withRoute("request", isAcme, idSink0), + withDefault(idSinkD), + ), + ctx: withHTTPMetadata(context.Background(), map[string][]string{"X-Tenant": {"acme"}}), + input: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), + expectSink0: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), + expectSink1: ptrace.Traces{}, + expectSinkD: ptrace.Traces{}, + }, + { + name: "request/match_http_value2", + cfg: testConfig( + withRoute("request", isAcme, idSink0), + withDefault(idSinkD), + ), + ctx: withHTTPMetadata(context.Background(), map[string][]string{"X-Tenant": {"notacme", "acme"}}), + input: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), + expectSink0: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), + expectSink1: ptrace.Traces{}, + expectSinkD: ptrace.Traces{}, + }, + { + name: "request/match_no_http_value", + cfg: testConfig( + withRoute("request", isAcme, idSink0), + withDefault(idSinkD), + ), + ctx: withHTTPMetadata(context.Background(), map[string][]string{"X-Tenant": {"notacme"}}), + input: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), + expectSink0: ptrace.Traces{}, + expectSink1: ptrace.Traces{}, + expectSinkD: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), + }, + { + name: "resource/all_match_first_only", + cfg: testConfig( + withRoute("resource", isAnyResource, idSink0), + withRoute("resource", isResourceY, idSink1), withDefault(idSinkD), ), input: ptraceutiltest.NewTraces("AB", "CD", "EF", "FG"), @@ -454,10 +547,10 @@ func TestTracesConnectorDetailed(t *testing.T) { expectSinkD: ptrace.Traces{}, }, { - name: "all_match_last_only", + name: "resource/all_match_last_only", cfg: testConfig( - withRoute("resource", isX, idSink0), - withRoute("resource", isNotNil, idSink1), + withRoute("resource", isResourceX, idSink0), + withRoute("resource", isAnyResource, idSink1), withDefault(idSinkD), ), input: ptraceutiltest.NewTraces("AB", "CD", "EF", "FG"), @@ -466,10 +559,10 @@ func TestTracesConnectorDetailed(t *testing.T) { expectSinkD: ptrace.Traces{}, }, { - name: "all_match_only_once", + name: "resource/all_match_only_once", cfg: testConfig( - withRoute("resource", isNotNil, idSink0), - withRoute("resource", isA+" or "+isB, idSink1), + withRoute("resource", isAnyResource, idSink0), + withRoute("resource", isResourceA+" or "+isResourceB, idSink1), withDefault(idSinkD), ), input: ptraceutiltest.NewTraces("AB", "CD", "EF", "FG"), @@ -478,10 +571,10 @@ func TestTracesConnectorDetailed(t *testing.T) { expectSinkD: ptrace.Traces{}, }, { - name: "each_matches_one", + name: "resource/each_matches_one", cfg: testConfig( - withRoute("resource", isA, idSink0), - withRoute("resource", isB, idSink1), + withRoute("resource", isResourceA, idSink0), + withRoute("resource", isResourceB, idSink1), withDefault(idSinkD), ), input: ptraceutiltest.NewTraces("AB", "CD", "EF", "FG"), @@ -490,10 +583,10 @@ func TestTracesConnectorDetailed(t *testing.T) { expectSinkD: ptrace.Traces{}, }, { - name: "some_match_with_default", + name: "resource/some_match_with_default", cfg: testConfig( - withRoute("resource", isX, idSink0), - withRoute("resource", isB, idSink1), + withRoute("resource", isResourceX, idSink0), + withRoute("resource", isResourceB, idSink1), withDefault(idSinkD), ), input: ptraceutiltest.NewTraces("AB", "CD", "EF", "FG"), @@ -502,10 +595,10 @@ func TestTracesConnectorDetailed(t *testing.T) { expectSinkD: ptraceutiltest.NewTraces("A", "CD", "EF", "FG"), }, { - name: "some_match_without_default", + name: "resource/some_match_without_default", cfg: testConfig( - withRoute("resource", isX, idSink0), - withRoute("resource", isB, idSink1), + withRoute("resource", isResourceX, idSink0), + withRoute("resource", isResourceB, idSink1), ), input: ptraceutiltest.NewTraces("AB", "CD", "EF", "FG"), expectSink0: ptrace.Traces{}, @@ -513,10 +606,10 @@ func TestTracesConnectorDetailed(t *testing.T) { expectSinkD: ptrace.Traces{}, }, { - name: "match_none_with_default", + name: "resource/match_none_with_default", cfg: testConfig( - withRoute("resource", isX, idSink0), - withRoute("resource", isY, idSink1), + withRoute("resource", isResourceX, idSink0), + withRoute("resource", isResourceY, idSink1), withDefault(idSinkD), ), input: ptraceutiltest.NewTraces("AB", "CD", "EF", "FG"), @@ -525,16 +618,42 @@ func TestTracesConnectorDetailed(t *testing.T) { expectSinkD: ptraceutiltest.NewTraces("AB", "CD", "EF", "FG"), }, { - name: "match_none_without_default", + name: "resource/match_none_without_default", cfg: testConfig( - withRoute("resource", isX, idSink0), - withRoute("resource", isY, idSink1), + withRoute("resource", isResourceX, idSink0), + withRoute("resource", isResourceY, idSink1), ), input: ptraceutiltest.NewTraces("AB", "CD", "EF", "FG"), expectSink0: ptrace.Traces{}, expectSink1: ptrace.Traces{}, expectSinkD: ptrace.Traces{}, }, + { + name: "mixed/match_resource_then_grpc_request", + cfg: testConfig( + withRoute("resource", isResourceA, idSink0), + withRoute("request", isAcme, idSink1), + withDefault(idSinkD), + ), + ctx: withGRPCMetadata(context.Background(), map[string]string{"X-Tenant": "acme"}), + input: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), + expectSink0: ptraceutiltest.NewTraces("A", "CD", "EF", "GH"), + expectSink1: ptraceutiltest.NewTraces("B", "CD", "EF", "GH"), + expectSinkD: ptrace.Traces{}, + }, + { + name: "mixed/match_resource_then_http_request", + cfg: testConfig( + withRoute("resource", isResourceA, idSink0), + withRoute("request", isAcme, idSink1), + withDefault(idSinkD), + ), + ctx: withHTTPMetadata(context.Background(), map[string][]string{"X-Tenant": {"acme"}}), + input: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), + expectSink0: ptraceutiltest.NewTraces("A", "CD", "EF", "GH"), + expectSink1: ptraceutiltest.NewTraces("B", "CD", "EF", "GH"), + expectSinkD: ptrace.Traces{}, + }, } for _, tt := range testCases { @@ -554,7 +673,12 @@ func TestTracesConnectorDetailed(t *testing.T) { ) require.NoError(t, err) - require.NoError(t, conn.ConsumeTraces(context.Background(), tt.input)) + ctx := context.Background() + if tt.ctx != nil { + ctx = tt.ctx + } + + require.NoError(t, conn.ConsumeTraces(ctx, tt.input)) assertExpected := func(sink *consumertest.TracesSink, expected ptrace.Traces, name string) { if expected == (ptrace.Traces{}) { From 8daf9629198191ba712a85a553d04b4e2d7cf14a Mon Sep 17 00:00:00 2001 From: odubajDT <93584209+odubajDT@users.noreply.github.com> Date: Wed, 6 Nov 2024 11:42:14 +0100 Subject: [PATCH 9/9] [chore] Pin markdown-link-check version to 3.12.2 (#36231) ## Description Follow-up of https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/36223 Signed-off-by: odubajDT --- .github/workflows/check-links.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/check-links.yaml b/.github/workflows/check-links.yaml index 83817d99f1f5..224549ba628a 100644 --- a/.github/workflows/check-links.yaml +++ b/.github/workflows/check-links.yaml @@ -36,7 +36,7 @@ jobs: fetch-depth: 0 - name: Install markdown-link-check - run: npm install -g markdown-link-check + run: npm install -g markdown-link-check@3.12.2 - name: Run markdown-link-check run: |