From 99611b2b2d4aeb02c4b85088b09d4b26bfe595ae Mon Sep 17 00:00:00 2001 From: Alexander Diana Date: Tue, 24 Oct 2023 23:37:36 +0000 Subject: [PATCH] update vendor --- go.mod | 8 - go.sum | 30 +- vendor/github.com/Shopify/sarama/.gitignore | 8 +- .../github.com/Shopify/sarama/.golangci.yml | 43 +- vendor/github.com/Shopify/sarama/CHANGELOG.md | 308 +- .../Shopify/sarama/Dockerfile.kafka | 27 + vendor/github.com/Shopify/sarama/Makefile | 24 +- vendor/github.com/Shopify/sarama/README.md | 15 +- .../github.com/Shopify/sarama/acl_bindings.go | 6 +- .../Shopify/sarama/acl_create_request.go | 4 +- .../Shopify/sarama/acl_create_response.go | 4 +- .../Shopify/sarama/acl_delete_request.go | 4 +- .../Shopify/sarama/acl_delete_response.go | 6 +- .../Shopify/sarama/acl_describe_request.go | 2 +- .../Shopify/sarama/acl_describe_response.go | 2 +- .../github.com/Shopify/sarama/acl_filter.go | 1 - vendor/github.com/Shopify/sarama/acl_types.go | 183 + .../sarama/add_offsets_to_txn_request.go | 2 +- .../sarama/add_offsets_to_txn_response.go | 2 +- .../sarama/add_partitions_to_txn_request.go | 2 +- .../sarama/add_partitions_to_txn_response.go | 4 +- vendor/github.com/Shopify/sarama/admin.go | 424 +- .../sarama/alter_client_quotas_request.go | 194 + .../sarama/alter_client_quotas_response.go | 145 + .../Shopify/sarama/alter_configs_request.go | 4 +- .../Shopify/sarama/alter_configs_response.go | 75 +- .../alter_user_scram_credentials_request.go | 142 + .../alter_user_scram_credentials_response.go | 94 + .../Shopify/sarama/api_versions_request.go | 58 +- .../Shopify/sarama/api_versions_response.go | 137 +- .../Shopify/sarama/async_producer.go | 423 +- .../Shopify/sarama/balance_strategy.go | 188 +- vendor/github.com/Shopify/sarama/broker.go | 897 ++-- vendor/github.com/Shopify/sarama/client.go | 362 +- vendor/github.com/Shopify/sarama/compress.go | 4 +- vendor/github.com/Shopify/sarama/config.go | 121 +- vendor/github.com/Shopify/sarama/consumer.go | 348 +- .../Shopify/sarama/consumer_group.go | 372 +- .../Shopify/sarama/consumer_group_members.go | 53 +- .../sarama/consumer_metadata_request.go | 2 +- .../sarama/consumer_metadata_response.go | 2 +- .../Shopify/sarama/control_record.go | 30 +- .../github.com/Shopify/sarama/crc32_field.go | 1 + .../sarama/create_partitions_response.go | 4 + .../Shopify/sarama/create_topics_response.go | 4 + .../github.com/Shopify/sarama/decompress.go | 40 +- .../Shopify/sarama/delete_offsets_request.go | 92 + .../Shopify/sarama/delete_offsets_response.go | 112 + .../sarama/describe_client_quotas_request.go | 141 + .../sarama/describe_client_quotas_response.go | 235 + .../sarama/describe_configs_request.go | 1 - .../sarama/describe_configs_response.go | 8 +- .../Shopify/sarama/describe_groups_request.go | 29 +- .../sarama/describe_groups_response.go | 189 +- ...describe_user_scram_credentials_request.go | 70 + ...escribe_user_scram_credentials_response.go | 168 + vendor/github.com/Shopify/sarama/dev.yml | 2 +- .../Shopify/sarama/docker-compose.yml | 220 +- .../Shopify/sarama/encoder_decoder.go | 22 +- .../Shopify/sarama/end_txn_response.go | 2 +- .../github.com/Shopify/sarama/entrypoint.sh | 26 + vendor/github.com/Shopify/sarama/errors.go | 306 +- .../Shopify/sarama/fetch_request.go | 64 +- .../Shopify/sarama/fetch_response.go | 77 +- .../Shopify/sarama/gssapi_kerberos.go | 39 +- .../Shopify/sarama/heartbeat_request.go | 26 +- .../Shopify/sarama/heartbeat_response.go | 20 +- .../incremental_alter_configs_request.go | 173 + .../incremental_alter_configs_response.go | 66 + .../sarama/init_producer_id_request.go | 67 +- .../sarama/init_producer_id_response.go | 30 +- .../Shopify/sarama/join_group_request.go | 16 +- .../Shopify/sarama/join_group_response.go | 40 +- .../Shopify/sarama/kerberos_client.go | 12 +- .../Shopify/sarama/leave_group_request.go | 58 +- .../Shopify/sarama/leave_group_response.go | 60 +- .../Shopify/sarama/list_groups_request.go | 3 +- vendor/github.com/Shopify/sarama/message.go | 44 +- .../github.com/Shopify/sarama/message_set.go | 11 +- .../Shopify/sarama/metadata_request.go | 43 +- .../Shopify/sarama/metadata_response.go | 215 +- vendor/github.com/Shopify/sarama/metrics.go | 77 + .../github.com/Shopify/sarama/mockbroker.go | 25 +- .../github.com/Shopify/sarama/mockkerberos.go | 16 +- .../Shopify/sarama/mockresponses.go | 359 +- .../Shopify/sarama/offset_commit_request.go | 53 +- .../Shopify/sarama/offset_commit_response.go | 2 + .../Shopify/sarama/offset_fetch_request.go | 137 +- .../Shopify/sarama/offset_fetch_response.go | 111 +- .../Shopify/sarama/offset_manager.go | 97 +- .../Shopify/sarama/offset_request.go | 19 + .../Shopify/sarama/offset_response.go | 19 +- .../Shopify/sarama/packet_decoder.go | 7 + .../Shopify/sarama/packet_encoder.go | 2 + .../github.com/Shopify/sarama/partitioner.go | 8 +- .../github.com/Shopify/sarama/prep_encoder.go | 9 + .../github.com/Shopify/sarama/produce_set.go | 13 +- .../github.com/Shopify/sarama/quota_types.go | 21 + .../github.com/Shopify/sarama/real_decoder.go | 74 +- .../github.com/Shopify/sarama/real_encoder.go | 11 + vendor/github.com/Shopify/sarama/record.go | 4 +- .../github.com/Shopify/sarama/record_batch.go | 7 +- vendor/github.com/Shopify/sarama/records.go | 17 +- vendor/github.com/Shopify/sarama/request.go | 22 +- .../Shopify/sarama/response_header.go | 3 - vendor/github.com/Shopify/sarama/sarama.go | 79 +- .../sarama/sasl_authenticate_request.go | 12 +- .../sarama/sasl_authenticate_response.go | 35 +- .../Shopify/sarama/scram_formatter.go | 78 + .../sarama/sticky_assignor_user_data.go | 4 +- .../Shopify/sarama/sync_group_request.go | 132 +- .../Shopify/sarama/sync_group_response.go | 27 +- .../Shopify/sarama/sync_producer.go | 64 +- .../Shopify/sarama/transaction_manager.go | 887 ++++ vendor/github.com/Shopify/sarama/utils.go | 90 +- vendor/github.com/Shopify/sarama/version.go | 27 + vendor/github.com/Shopify/sarama/zstd.go | 76 +- .../aws/aws-sdk-go/aws/endpoints/defaults.go | 39 + .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../protocol/kafka_sarama/v2/coverage.tmp | 1 + .../sdk-go/protocol/kafka_sarama/v2/doc.go | 5 + .../protocol/kafka_sarama/v2/message.go | 7 +- .../sdk-go/protocol/kafka_sarama/v2/option.go | 5 + .../protocol/kafka_sarama/v2/protocol.go | 5 + .../protocol/kafka_sarama/v2/receiver.go | 61 +- .../sdk-go/protocol/kafka_sarama/v2/sender.go | 10 + .../kafka_sarama/v2/write_producer_message.go | 5 + .../github.com/cloudevents/sdk-go/v2/alias.go | 38 +- .../sdk-go/v2/binding/binary_writer.go | 5 + .../cloudevents/sdk-go/v2/binding/doc.go | 5 + .../cloudevents/sdk-go/v2/binding/encoding.go | 10 + .../sdk-go/v2/binding/event_message.go | 7 +- .../sdk-go/v2/binding/finish_message.go | 5 + .../sdk-go/v2/binding/format/doc.go | 5 + .../sdk-go/v2/binding/format/format.go | 27 + .../cloudevents/sdk-go/v2/binding/message.go | 12 + .../sdk-go/v2/binding/spec/attributes.go | 5 + .../cloudevents/sdk-go/v2/binding/spec/doc.go | 5 + .../v2/binding/spec/match_exact_version.go | 5 + .../sdk-go/v2/binding/spec/spec.go | 5 + .../sdk-go/v2/binding/structured_writer.go | 5 + .../cloudevents/sdk-go/v2/binding/to_event.go | 37 +- .../sdk-go/v2/binding/transformer.go | 5 + .../cloudevents/sdk-go/v2/binding/write.go | 5 + .../cloudevents/sdk-go/v2/client/client.go | 89 +- .../sdk-go/v2/client/client_default.go | 26 - .../sdk-go/v2/client/client_http.go | 35 + .../sdk-go/v2/client/client_observed.go | 105 +- .../sdk-go/v2/client/defaulters.go | 5 + .../cloudevents/sdk-go/v2/client/doc.go | 5 + .../sdk-go/v2/client/http_receiver.go | 36 +- .../cloudevents/sdk-go/v2/client/invoker.go | 50 +- .../sdk-go/v2/client/observability.go | 118 +- .../cloudevents/sdk-go/v2/client/options.go | 49 +- .../cloudevents/sdk-go/v2/client/receiver.go | 27 +- .../cloudevents/sdk-go/v2/context/context.go | 5 + .../sdk-go/v2/context/delegating.go | 25 + .../cloudevents/sdk-go/v2/context/doc.go | 5 + .../cloudevents/sdk-go/v2/context/logger.go | 5 + .../cloudevents/sdk-go/v2/context/retry.go | 5 + .../sdk-go/v2/event/content_type.go | 5 + .../sdk-go/v2/event/data_content_encoding.go | 5 + .../sdk-go/v2/event/datacodec/codec.go | 5 + .../v2/event/datacodec/codec_observed.go | 50 - .../sdk-go/v2/event/datacodec/doc.go | 5 + .../sdk-go/v2/event/datacodec/json/data.go | 5 + .../v2/event/datacodec/json/data_observed.go | 30 - .../sdk-go/v2/event/datacodec/json/doc.go | 5 + .../v2/event/datacodec/json/observability.go | 51 - .../v2/event/datacodec/observability.go | 51 - .../sdk-go/v2/event/datacodec/text/data.go | 5 + .../v2/event/datacodec/text/data_observed.go | 30 - .../sdk-go/v2/event/datacodec/text/doc.go | 5 + .../v2/event/datacodec/text/observability.go | 51 - .../sdk-go/v2/event/datacodec/xml/data.go | 5 + .../v2/event/datacodec/xml/data_observed.go | 30 - .../sdk-go/v2/event/datacodec/xml/doc.go | 5 + .../v2/event/datacodec/xml/observability.go | 51 - .../cloudevents/sdk-go/v2/event/doc.go | 5 + .../cloudevents/sdk-go/v2/event/event.go | 18 +- .../cloudevents/sdk-go/v2/event/event_data.go | 7 +- .../sdk-go/v2/event/event_interface.go | 5 + .../sdk-go/v2/event/event_marshal.go | 441 +- .../sdk-go/v2/event/event_observability.go | 77 - .../sdk-go/v2/event/event_reader.go | 5 + .../sdk-go/v2/event/event_unmarshal.go | 480 ++ .../sdk-go/v2/event/event_validation.go | 5 + .../sdk-go/v2/event/event_writer.go | 5 + .../sdk-go/v2/event/eventcontext.go | 5 + .../sdk-go/v2/event/eventcontext_v03.go | 24 +- .../v2/event/eventcontext_v03_reader.go | 16 +- .../v2/event/eventcontext_v03_writer.go | 5 + .../sdk-go/v2/event/eventcontext_v1.go | 36 +- .../sdk-go/v2/event/eventcontext_v1_reader.go | 16 +- .../sdk-go/v2/event/eventcontext_v1_writer.go | 5 + .../cloudevents/sdk-go/v2/event/extensions.go | 37 +- .../distributed_tracing_extension.go | 164 - .../cloudevents/sdk-go/v2/extensions/doc.go | 2 - .../sdk-go/v2/observability/doc.go | 4 - .../sdk-go/v2/observability/keys.go | 22 - .../sdk-go/v2/observability/observer.go | 87 - .../cloudevents/sdk-go/v2/protocol/doc.go | 5 + .../cloudevents/sdk-go/v2/protocol/error.go | 5 + .../v2/protocol/http/abuse_protection.go | 7 + .../sdk-go/v2/protocol/http/context.go | 48 + .../sdk-go/v2/protocol/http/doc.go | 5 + .../sdk-go/v2/protocol/http/headers.go | 22 + .../sdk-go/v2/protocol/http/message.go | 19 +- .../sdk-go/v2/protocol/http/options.go | 110 +- .../sdk-go/v2/protocol/http/protocol.go | 90 +- .../v2/protocol/http/protocol_lifecycle.go | 54 +- .../sdk-go/v2/protocol/http/protocol_rate.go | 34 + .../sdk-go/v2/protocol/http/protocol_retry.go | 58 +- .../sdk-go/v2/protocol/http/result.go | 5 + .../sdk-go/v2/protocol/http/retries_result.go | 5 + .../sdk-go/v2/protocol/http/utility.go | 89 + .../sdk-go/v2/protocol/http/write_request.go | 5 + .../v2/protocol/http/write_responsewriter.go | 5 + .../cloudevents/sdk-go/v2/protocol/inbound.go | 9 +- .../sdk-go/v2/protocol/lifecycle.go | 5 + .../sdk-go/v2/protocol/outbound.go | 5 + .../cloudevents/sdk-go/v2/protocol/result.go | 7 +- .../cloudevents/sdk-go/v2/types/allocate.go | 5 + .../cloudevents/sdk-go/v2/types/doc.go | 5 + .../cloudevents/sdk-go/v2/types/timestamp.go | 5 + .../cloudevents/sdk-go/v2/types/uri.go | 9 + .../cloudevents/sdk-go/v2/types/uriref.go | 5 + .../cloudevents/sdk-go/v2/types/value.go | 5 + .../cpuguy83/go-md2man/v2/md2man/md2man.go | 6 +- .../cpuguy83/go-md2man/v2/md2man/roff.go | 30 +- .../eapache/go-xerial-snappy/fuzz.go | 16 - .../eapache/go-xerial-snappy/snappy.go | 18 +- vendor/github.com/golang/groupcache/LICENSE | 191 - .../github.com/golang/groupcache/lru/lru.go | 133 - vendor/github.com/golang/snappy/AUTHORS | 3 + vendor/github.com/golang/snappy/CONTRIBUTORS | 4 + vendor/github.com/golang/snappy/decode.go | 87 +- .../github.com/golang/snappy/decode_arm64.s | 494 ++ .../snappy/{decode_amd64.go => decode_asm.go} | 1 + .../github.com/golang/snappy/decode_other.go | 24 +- vendor/github.com/golang/snappy/encode.go | 4 + .../github.com/golang/snappy/encode_arm64.s | 722 +++ .../snappy/{encode_amd64.go => encode_asm.go} | 1 + .../github.com/golang/snappy/encode_other.go | 2 +- vendor/github.com/hashicorp/errwrap/LICENSE | 354 ++ vendor/github.com/hashicorp/errwrap/README.md | 89 + .../github.com/hashicorp/errwrap/errwrap.go | 169 + .../hashicorp/go-multierror/LICENSE | 353 ++ .../hashicorp/go-multierror/Makefile | 31 + .../hashicorp/go-multierror/README.md | 150 + .../hashicorp/go-multierror/append.go | 43 + .../hashicorp/go-multierror/flatten.go | 26 + .../hashicorp/go-multierror/format.go | 27 + .../hashicorp/go-multierror/group.go | 38 + .../hashicorp/go-multierror/multierror.go | 121 + .../hashicorp/go-multierror/prefix.go | 37 + .../hashicorp/go-multierror/sort.go | 16 + vendor/github.com/hashicorp/go-uuid/LICENSE | 2 + .../jcmturner/aescts/v2}/LICENSE | 0 .../jcmturner/aescts/v2}/aescts.go | 12 +- .../jcmturner/dnsutils/v2}/LICENSE | 0 .../jcmturner/dnsutils/v2}/srv.go | 0 .../jcmturner/gokrb5/v8}/LICENSE | 0 .../jcmturner/gokrb5/v8}/asn1tools/tools.go | 0 .../jcmturner/gokrb5/v8}/client/ASExchange.go | 37 +- .../gokrb5/v8}/client/TGSExchange.go | 17 +- .../jcmturner/gokrb5/v8}/client/cache.go | 32 +- .../jcmturner/gokrb5/v8}/client/client.go | 136 +- .../jcmturner/gokrb5/v8}/client/network.go | 171 +- .../jcmturner/gokrb5/v8}/client/passwd.go | 44 +- .../jcmturner/gokrb5/v8}/client/session.go | 52 +- .../jcmturner/gokrb5/v8}/client/settings.go | 28 +- .../jcmturner/gokrb5/v8}/config/error.go | 0 .../jcmturner/gokrb5/v8}/config/hosts.go | 2 +- .../jcmturner/gokrb5/v8}/config/krb5conf.go | 52 +- .../gokrb5/v8}/credentials/ccache.go | 23 +- .../gokrb5/v8}/credentials/credentials.go | 145 +- .../v8}/crypto/aes128-cts-hmac-sha1-96.go | 54 +- .../v8}/crypto/aes128-cts-hmac-sha256-128.go | 15 +- .../v8}/crypto/aes256-cts-hmac-sha1-96.go | 54 +- .../v8}/crypto/aes256-cts-hmac-sha384-192.go | 15 +- .../gokrb5/v8}/crypto/common/common.go | 21 +- .../jcmturner/gokrb5/v8}/crypto/crypto.go | 10 +- .../gokrb5/v8}/crypto/des3-cbc-sha1-kd.go | 43 +- .../jcmturner/gokrb5/v8/crypto/etype/etype.go | 29 + .../jcmturner/gokrb5/v8}/crypto/rc4-hmac.go | 12 +- .../gokrb5/v8}/crypto/rfc3961/encryption.go | 10 +- .../v8}/crypto/rfc3961/keyDerivation.go | 13 +- .../gokrb5/v8}/crypto/rfc3961/nfold.go | 23 +- .../gokrb5/v8}/crypto/rfc3962/encryption.go | 6 +- .../v8}/crypto/rfc3962/keyDerivation.go | 15 +- .../gokrb5/v8}/crypto/rfc4757/checksum.go | 0 .../gokrb5/v8}/crypto/rfc4757/encryption.go | 2 +- .../v8}/crypto/rfc4757/keyDerivation.go | 15 - .../gokrb5/v8}/crypto/rfc4757/msgtype.go | 0 .../gokrb5/v8}/crypto/rfc8009/encryption.go | 15 +- .../v8}/crypto/rfc8009/keyDerivation.go | 17 +- .../jcmturner/gokrb5/v8}/gssapi/MICToken.go | 36 +- .../jcmturner/gokrb5/v8}/gssapi/README.md | 0 .../gokrb5/v8}/gssapi/contextFlags.go | 4 +- .../jcmturner/gokrb5/v8}/gssapi/gssapi.go | 5 +- .../jcmturner/gokrb5/v8}/gssapi/wrapToken.go | 56 +- .../gokrb5/v8}/iana/addrtype/constants.go | 0 .../gokrb5/v8}/iana/adtype/constants.go | 0 .../gokrb5/v8}/iana/asnAppTag/constants.go | 0 .../gokrb5/v8}/iana/chksumtype/constants.go | 0 .../jcmturner/gokrb5/v8}/iana/constants.go | 0 .../gokrb5/v8}/iana/errorcode/constants.go | 0 .../gokrb5/v8}/iana/etypeID/constants.go | 0 .../gokrb5/v8}/iana/flags/constants.go | 0 .../gokrb5/v8}/iana/keyusage/constants.go | 0 .../gokrb5/v8}/iana/msgtype/constants.go | 0 .../gokrb5/v8}/iana/nametype/constants.go | 0 .../gokrb5/v8}/iana/patype/constants.go | 0 .../gokrb5/v8}/kadmin/changepasswddata.go | 2 +- .../jcmturner/gokrb5/v8}/kadmin/message.go | 4 +- .../jcmturner/gokrb5/v8}/kadmin/passwd.go | 8 +- .../jcmturner/gokrb5/v8}/keytab/keytab.go | 106 +- .../jcmturner/gokrb5/v8}/krberror/error.go | 4 +- .../jcmturner/gokrb5/v8}/messages/APRep.go | 23 +- .../jcmturner/gokrb5/v8}/messages/APReq.go | 57 +- .../jcmturner/gokrb5/v8}/messages/KDCRep.go | 120 +- .../jcmturner/gokrb5/v8}/messages/KDCReq.go | 24 +- .../jcmturner/gokrb5/v8}/messages/KRBCred.go | 12 +- .../jcmturner/gokrb5/v8}/messages/KRBError.go | 23 +- .../jcmturner/gokrb5/v8}/messages/KRBPriv.go | 16 +- .../jcmturner/gokrb5/v8}/messages/KRBSafe.go | 26 +- .../jcmturner/gokrb5/v8}/messages/Ticket.go | 41 +- .../jcmturner/gokrb5/v8}/pac/client_claims.go | 5 +- .../jcmturner/gokrb5/v8}/pac/client_info.go | 2 +- .../gokrb5/v8}/pac/credentials_info.go | 18 +- .../jcmturner/gokrb5/v8}/pac/device_claims.go | 5 +- .../jcmturner/gokrb5/v8}/pac/device_info.go | 4 +- .../gokrb5/v8}/pac/kerb_validation_info.go | 9 +- .../jcmturner/gokrb5/v8}/pac/pac_type.go | 8 +- .../gokrb5/v8}/pac/s4u_delegation_info.go | 4 +- .../gokrb5/v8}/pac/signature_data.go | 16 +- .../gokrb5/v8}/pac/supplemental_cred.go | 7 +- .../jcmturner/gokrb5/v8}/pac/upn_dns_info.go | 2 +- .../gokrb5/v8}/types/Authenticator.go | 33 +- .../gokrb5/v8/types/AuthorizationData.go | 55 + .../gokrb5/v8}/types/Cryptosystem.go | 19 +- .../jcmturner/gokrb5/v8}/types/HostAddress.go | 26 +- .../gokrb5/v8/types/KerberosFlags.go | 68 + .../jcmturner/gokrb5/v8}/types/PAData.go | 2 +- .../gokrb5/v8}/types/PrincipalName.go | 5 +- .../jcmturner/gokrb5/v8}/types/TypedData.go | 0 .../jcmturner/rpc/v2}/LICENSE | 0 .../jcmturner/rpc/v2}/mstypes/claims.go | 32 +- .../jcmturner/rpc/v2}/mstypes/common.go | 2 + .../jcmturner/rpc/v2}/mstypes/filetime.go | 0 .../rpc/v2}/mstypes/group_membership.go | 0 .../v2}/mstypes/kerb_sid_and_attributes.go | 0 .../jcmturner/rpc/v2}/mstypes/reader.go | 0 .../rpc/v2}/mstypes/rpc_unicode_string.go | 0 .../jcmturner/rpc/v2}/mstypes/sid.go | 16 +- .../rpc/v2}/mstypes/user_session_key.go | 0 .../jcmturner/rpc/v2}/ndr/arrays.go | 0 .../jcmturner/rpc/v2}/ndr/decoder.go | 0 .../jcmturner/rpc/v2}/ndr/error.go | 0 .../jcmturner/rpc/v2}/ndr/header.go | 0 .../jcmturner/rpc/v2}/ndr/pipe.go | 0 .../jcmturner/rpc/v2}/ndr/primitives.go | 0 .../jcmturner/rpc/v2}/ndr/rawbytes.go | 0 .../jcmturner/rpc/v2}/ndr/strings.go | 0 .../jcmturner/rpc/v2}/ndr/tags.go | 0 .../jcmturner/rpc/v2}/ndr/union.go | 0 .../github.com/json-iterator/go/.codecov.yml | 3 + vendor/github.com/json-iterator/go/.gitignore | 4 + .../github.com/json-iterator/go/.travis.yml | 14 + vendor/github.com/json-iterator/go/Gopkg.lock | 21 + vendor/github.com/json-iterator/go/Gopkg.toml | 26 + .../go}/LICENSE | 4 +- vendor/github.com/json-iterator/go/README.md | 85 + vendor/github.com/json-iterator/go/adapter.go | 150 + vendor/github.com/json-iterator/go/any.go | 325 ++ .../github.com/json-iterator/go/any_array.go | 278 ++ .../github.com/json-iterator/go/any_bool.go | 137 + .../github.com/json-iterator/go/any_float.go | 83 + .../github.com/json-iterator/go/any_int32.go | 74 + .../github.com/json-iterator/go/any_int64.go | 74 + .../json-iterator/go/any_invalid.go | 82 + vendor/github.com/json-iterator/go/any_nil.go | 69 + .../github.com/json-iterator/go/any_number.go | 123 + .../github.com/json-iterator/go/any_object.go | 374 ++ vendor/github.com/json-iterator/go/any_str.go | 166 + .../github.com/json-iterator/go/any_uint32.go | 74 + .../github.com/json-iterator/go/any_uint64.go | 74 + vendor/github.com/json-iterator/go/build.sh | 12 + vendor/github.com/json-iterator/go/config.go | 375 ++ .../go/fuzzy_mode_convert_table.md | 7 + vendor/github.com/json-iterator/go/iter.go | 349 ++ .../github.com/json-iterator/go/iter_array.go | 64 + .../github.com/json-iterator/go/iter_float.go | 342 ++ .../github.com/json-iterator/go/iter_int.go | 346 ++ .../json-iterator/go/iter_object.go | 267 ++ .../github.com/json-iterator/go/iter_skip.go | 130 + .../json-iterator/go/iter_skip_sloppy.go | 163 + .../json-iterator/go/iter_skip_strict.go | 99 + .../github.com/json-iterator/go/iter_str.go | 215 + .../github.com/json-iterator/go/jsoniter.go | 18 + vendor/github.com/json-iterator/go/pool.go | 42 + vendor/github.com/json-iterator/go/reflect.go | 337 ++ .../json-iterator/go/reflect_array.go | 104 + .../json-iterator/go/reflect_dynamic.go | 70 + .../json-iterator/go/reflect_extension.go | 483 ++ .../json-iterator/go/reflect_json_number.go | 112 + .../go/reflect_json_raw_message.go | 76 + .../json-iterator/go/reflect_map.go | 346 ++ .../json-iterator/go/reflect_marshaler.go | 225 + .../json-iterator/go/reflect_native.go | 453 ++ .../json-iterator/go/reflect_optional.go | 129 + .../json-iterator/go/reflect_slice.go | 99 + .../go/reflect_struct_decoder.go | 1097 +++++ .../go/reflect_struct_encoder.go | 211 + vendor/github.com/json-iterator/go/stream.go | 210 + .../json-iterator/go/stream_float.go | 111 + .../github.com/json-iterator/go/stream_int.go | 190 + .../github.com/json-iterator/go/stream_str.go | 372 ++ vendor/github.com/json-iterator/go/test.sh | 12 + .../github.com/klauspost/compress/.gitignore | 7 + .../klauspost/compress/.goreleaser.yml | 4 + .../github.com/klauspost/compress/README.md | 171 +- .../klauspost/compress/huff0/bitreader.go | 122 +- .../klauspost/compress/huff0/bitwriter.go | 115 - .../klauspost/compress/huff0/bytereader.go | 10 - .../klauspost/compress/huff0/compress.go | 34 +- .../klauspost/compress/huff0/decompress.go | 740 ++- .../compress/huff0/decompress_amd64.go | 226 + .../compress/huff0/decompress_amd64.s | 846 ++++ .../compress/huff0/decompress_generic.go | 299 ++ .../klauspost/compress/huff0/huff0.go | 2 + .../compress/internal/cpuinfo/cpuinfo.go | 34 + .../internal/cpuinfo/cpuinfo_amd64.go | 11 + .../compress/internal/cpuinfo/cpuinfo_amd64.s | 36 + .../compress/internal/snapref/encode_other.go | 6 +- .../klauspost/compress/zstd/README.md | 172 +- .../klauspost/compress/zstd/bitreader.go | 12 +- .../klauspost/compress/zstd/bitwriter.go | 98 +- .../klauspost/compress/zstd/blockdec.go | 511 +-- .../klauspost/compress/zstd/blockenc.go | 108 +- .../klauspost/compress/zstd/bytebuf.go | 23 +- .../klauspost/compress/zstd/bytereader.go | 6 - .../klauspost/compress/zstd/decodeheader.go | 93 +- .../klauspost/compress/zstd/decoder.go | 674 ++- .../compress/zstd/decoder_options.go | 79 +- .../klauspost/compress/zstd/dict.go | 5 +- .../klauspost/compress/zstd/enc_base.go | 14 +- .../klauspost/compress/zstd/enc_best.go | 64 +- .../klauspost/compress/zstd/enc_better.go | 43 +- .../klauspost/compress/zstd/enc_dfast.go | 33 +- .../klauspost/compress/zstd/enc_fast.go | 163 +- .../klauspost/compress/zstd/encoder.go | 107 +- .../compress/zstd/encoder_options.go | 28 +- .../klauspost/compress/zstd/framedec.go | 329 +- .../klauspost/compress/zstd/fse_decoder.go | 128 +- .../compress/zstd/fse_decoder_amd64.go | 65 + .../compress/zstd/fse_decoder_amd64.s | 126 + .../compress/zstd/fse_decoder_generic.go | 72 + .../klauspost/compress/zstd/fse_encoder.go | 28 +- .../klauspost/compress/zstd/hash.go | 6 - .../klauspost/compress/zstd/history.go | 67 +- .../compress/zstd/internal/xxhash/README.md | 49 +- .../compress/zstd/internal/xxhash/xxhash.go | 47 +- .../zstd/internal/xxhash/xxhash_amd64.go | 12 - .../zstd/internal/xxhash/xxhash_amd64.s | 337 +- .../zstd/internal/xxhash/xxhash_arm64.s | 184 + .../zstd/internal/xxhash/xxhash_asm.go | 16 + .../zstd/internal/xxhash/xxhash_other.go | 23 +- .../klauspost/compress/zstd/seqdec.go | 327 +- .../klauspost/compress/zstd/seqdec_amd64.go | 379 ++ .../klauspost/compress/zstd/seqdec_amd64.s | 4079 +++++++++++++++++ .../klauspost/compress/zstd/seqdec_generic.go | 237 + .../github.com/klauspost/compress/zstd/zip.go | 69 +- .../klauspost/compress/zstd/zstd.go | 50 +- .../tracecontext.go/traceparent/package.go | 192 - .../tracecontext.go/tracestate/package.go | 123 - .../modern-go/concurrent/.gitignore | 1 + .../modern-go/concurrent/.travis.yml | 14 + .../modern-go/concurrent}/LICENSE | 3 +- .../github.com/modern-go/concurrent/README.md | 49 + .../modern-go/concurrent/executor.go | 14 + .../modern-go/concurrent/go_above_19.go | 15 + .../modern-go/concurrent/go_below_19.go | 33 + vendor/github.com/modern-go/concurrent/log.go | 13 + .../github.com/modern-go/concurrent/test.sh | 12 + .../concurrent/unbounded_executor.go | 119 + .../github.com/modern-go/reflect2/.gitignore | 2 + .../github.com/modern-go/reflect2/.travis.yml | 15 + .../github.com/modern-go/reflect2/Gopkg.lock | 9 + .../github.com/modern-go/reflect2/Gopkg.toml | 31 + vendor/github.com/modern-go/reflect2/LICENSE | 201 + .../github.com/modern-go/reflect2/README.md | 71 + .../modern-go/reflect2/go_above_118.go | 23 + .../modern-go/reflect2/go_above_19.go | 17 + .../modern-go/reflect2/go_below_118.go | 21 + .../github.com/modern-go/reflect2/reflect2.go | 300 ++ .../modern-go/reflect2/reflect2_amd64.s | 0 .../modern-go/reflect2/reflect2_kind.go | 30 + .../modern-go/reflect2/relfect2_386.s | 0 .../modern-go/reflect2/relfect2_amd64p32.s | 0 .../modern-go/reflect2/relfect2_arm.s | 0 .../modern-go/reflect2/relfect2_arm64.s | 0 .../modern-go/reflect2/relfect2_mips64x.s | 0 .../modern-go/reflect2/relfect2_mipsx.s | 0 .../modern-go/reflect2/relfect2_ppc64x.s | 0 .../modern-go/reflect2/relfect2_s390x.s | 0 .../modern-go/reflect2/safe_field.go | 58 + .../github.com/modern-go/reflect2/safe_map.go | 101 + .../modern-go/reflect2/safe_slice.go | 92 + .../modern-go/reflect2/safe_struct.go | 29 + .../modern-go/reflect2/safe_type.go | 78 + .../github.com/modern-go/reflect2/type_map.go | 70 + .../modern-go/reflect2/unsafe_array.go | 65 + .../modern-go/reflect2/unsafe_eface.go | 59 + .../modern-go/reflect2/unsafe_field.go | 74 + .../modern-go/reflect2/unsafe_iface.go | 64 + .../modern-go/reflect2/unsafe_link.go | 76 + .../modern-go/reflect2/unsafe_map.go | 130 + .../modern-go/reflect2/unsafe_ptr.go | 46 + .../modern-go/reflect2/unsafe_slice.go | 177 + .../modern-go/reflect2/unsafe_struct.go | 59 + .../modern-go/reflect2/unsafe_type.go | 85 + vendor/github.com/pierrec/lz4/.travis.yml | 24 - vendor/github.com/pierrec/lz4/debug.go | 23 - vendor/github.com/pierrec/lz4/debug_stub.go | 7 - vendor/github.com/pierrec/lz4/decode_amd64.go | 8 - vendor/github.com/pierrec/lz4/decode_other.go | 98 - vendor/github.com/pierrec/lz4/errors.go | 30 - vendor/github.com/pierrec/lz4/lz4.go | 113 - vendor/github.com/pierrec/lz4/lz4_go1.10.go | 29 - .../github.com/pierrec/lz4/lz4_notgo1.10.go | 29 - vendor/github.com/pierrec/lz4/reader.go | 335 -- .../pierrec/lz4/{ => v4}/.gitignore | 4 +- .../github.com/pierrec/lz4/{ => v4}/LICENSE | 0 .../github.com/pierrec/lz4/{ => v4}/README.md | 10 +- .../lz4/{ => v4/internal/lz4block}/block.go | 244 +- .../lz4/v4/internal/lz4block/blocks.go | 90 + .../{ => v4/internal/lz4block}/decode_amd64.s | 327 +- .../lz4/v4/internal/lz4block/decode_arm.s | 231 + .../lz4/v4/internal/lz4block/decode_arm64.s | 230 + .../lz4/v4/internal/lz4block/decode_asm.go | 10 + .../lz4/v4/internal/lz4block/decode_other.go | 139 + .../lz4/v4/internal/lz4errors/errors.go | 19 + .../lz4/v4/internal/lz4stream/block.go | 350 ++ .../lz4/v4/internal/lz4stream/frame.go | 204 + .../lz4/v4/internal/lz4stream/frame_gen.go | 103 + .../lz4/{ => v4}/internal/xxh32/xxh32zero.go | 75 +- .../lz4/v4/internal/xxh32/xxh32zero_arm.go | 11 + .../lz4/v4/internal/xxh32/xxh32zero_arm.s | 251 + .../lz4/v4/internal/xxh32/xxh32zero_other.go | 10 + vendor/github.com/pierrec/lz4/v4/lz4.go | 157 + vendor/github.com/pierrec/lz4/v4/options.go | 214 + .../github.com/pierrec/lz4/v4/options_gen.go | 92 + vendor/github.com/pierrec/lz4/v4/reader.go | 275 ++ vendor/github.com/pierrec/lz4/v4/state.go | 75 + vendor/github.com/pierrec/lz4/v4/state_gen.go | 28 + vendor/github.com/pierrec/lz4/v4/writer.go | 238 + vendor/github.com/pierrec/lz4/writer.go | 408 -- .../rcrowley/go-metrics/.travis.yml | 1 + vendor/go.opencensus.io/.gitignore | 9 - vendor/go.opencensus.io/.travis.yml | 17 - vendor/go.opencensus.io/AUTHORS | 1 - vendor/go.opencensus.io/CONTRIBUTING.md | 63 - vendor/go.opencensus.io/Makefile | 97 - vendor/go.opencensus.io/README.md | 267 -- vendor/go.opencensus.io/appveyor.yml | 24 - vendor/go.opencensus.io/internal/internal.go | 37 - vendor/go.opencensus.io/internal/sanitize.go | 50 - .../internal/tagencoding/tagencoding.go | 75 - .../internal/traceinternals.go | 53 - .../go.opencensus.io/metric/metricdata/doc.go | 19 - .../metric/metricdata/exemplar.go | 38 - .../metric/metricdata/label.go | 35 - .../metric/metricdata/metric.go | 46 - .../metric/metricdata/point.go | 193 - .../metric/metricdata/type_string.go | 16 - .../metric/metricdata/unit.go | 27 - .../metric/metricproducer/manager.go | 78 - .../metric/metricproducer/producer.go | 28 - vendor/go.opencensus.io/opencensus.go | 21 - .../go.opencensus.io/plugin/ochttp/client.go | 117 - .../plugin/ochttp/client_stats.go | 143 - vendor/go.opencensus.io/plugin/ochttp/doc.go | 19 - .../plugin/ochttp/propagation/b3/b3.go | 123 - .../propagation/tracecontext/propagation.go | 187 - .../go.opencensus.io/plugin/ochttp/route.go | 61 - .../go.opencensus.io/plugin/ochttp/server.go | 453 -- .../ochttp/span_annotating_client_trace.go | 169 - .../go.opencensus.io/plugin/ochttp/stats.go | 292 -- .../go.opencensus.io/plugin/ochttp/trace.go | 244 - .../plugin/ochttp/wrapped_body.go | 44 - vendor/go.opencensus.io/resource/resource.go | 164 - vendor/go.opencensus.io/stats/doc.go | 69 - .../go.opencensus.io/stats/internal/record.go | 25 - vendor/go.opencensus.io/stats/measure.go | 109 - .../go.opencensus.io/stats/measure_float64.go | 55 - .../go.opencensus.io/stats/measure_int64.go | 55 - vendor/go.opencensus.io/stats/record.go | 137 - vendor/go.opencensus.io/stats/units.go | 26 - .../stats/view/aggregation.go | 121 - .../stats/view/aggregation_data.go | 293 -- .../go.opencensus.io/stats/view/collector.go | 86 - vendor/go.opencensus.io/stats/view/doc.go | 47 - vendor/go.opencensus.io/stats/view/export.go | 45 - vendor/go.opencensus.io/stats/view/view.go | 221 - .../stats/view/view_to_metric.go | 152 - vendor/go.opencensus.io/stats/view/worker.go | 413 -- .../stats/view/worker_commands.go | 186 - vendor/go.opencensus.io/tag/context.go | 43 - vendor/go.opencensus.io/tag/doc.go | 26 - vendor/go.opencensus.io/tag/key.go | 44 - vendor/go.opencensus.io/tag/map.go | 229 - vendor/go.opencensus.io/tag/map_codec.go | 239 - vendor/go.opencensus.io/tag/metadata.go | 52 - vendor/go.opencensus.io/tag/profile_19.go | 31 - vendor/go.opencensus.io/tag/profile_not19.go | 23 - vendor/go.opencensus.io/tag/validate.go | 56 - vendor/go.opencensus.io/trace/basetypes.go | 119 - vendor/go.opencensus.io/trace/config.go | 86 - vendor/go.opencensus.io/trace/doc.go | 53 - vendor/go.opencensus.io/trace/evictedqueue.go | 38 - vendor/go.opencensus.io/trace/export.go | 97 - .../trace/internal/internal.go | 22 - vendor/go.opencensus.io/trace/lrumap.go | 61 - .../trace/propagation/propagation.go | 108 - vendor/go.opencensus.io/trace/sampling.go | 75 - vendor/go.opencensus.io/trace/spanbucket.go | 130 - vendor/go.opencensus.io/trace/spanstore.go | 306 -- vendor/go.opencensus.io/trace/status_codes.go | 37 - vendor/go.opencensus.io/trace/trace.go | 598 --- vendor/go.opencensus.io/trace/trace_go11.go | 32 - .../go.opencensus.io/trace/trace_nongo11.go | 25 - .../trace/tracestate/tracestate.go | 147 - .../gopkg.in/jcmturner/aescts.v1/.gitignore | 14 - vendor/gopkg.in/jcmturner/aescts.v1/README.md | 16 - .../gopkg.in/jcmturner/dnsutils.v1/.gitignore | 14 - .../jcmturner/dnsutils.v1/.travis.yml | 24 - .../jcmturner/gokrb5.v7/crypto/etype/etype.go | 29 - .../gokrb5.v7/types/AuthorizationData.go | 123 - .../gokrb5.v7/types/KerberosFlags.go | 124 - vendor/modules.txt | 177 +- 642 files changed, 37995 insertions(+), 17092 deletions(-) create mode 100644 vendor/github.com/Shopify/sarama/Dockerfile.kafka create mode 100644 vendor/github.com/Shopify/sarama/alter_client_quotas_request.go create mode 100644 vendor/github.com/Shopify/sarama/alter_client_quotas_response.go create mode 100644 vendor/github.com/Shopify/sarama/alter_user_scram_credentials_request.go create mode 100644 vendor/github.com/Shopify/sarama/alter_user_scram_credentials_response.go create mode 100644 vendor/github.com/Shopify/sarama/delete_offsets_request.go create mode 100644 vendor/github.com/Shopify/sarama/delete_offsets_response.go create mode 100644 vendor/github.com/Shopify/sarama/describe_client_quotas_request.go create mode 100644 vendor/github.com/Shopify/sarama/describe_client_quotas_response.go create mode 100644 vendor/github.com/Shopify/sarama/describe_user_scram_credentials_request.go create mode 100644 vendor/github.com/Shopify/sarama/describe_user_scram_credentials_response.go create mode 100644 vendor/github.com/Shopify/sarama/entrypoint.sh create mode 100644 vendor/github.com/Shopify/sarama/incremental_alter_configs_request.go create mode 100644 vendor/github.com/Shopify/sarama/incremental_alter_configs_response.go create mode 100644 vendor/github.com/Shopify/sarama/quota_types.go create mode 100644 vendor/github.com/Shopify/sarama/scram_formatter.go create mode 100644 vendor/github.com/Shopify/sarama/transaction_manager.go create mode 100644 vendor/github.com/Shopify/sarama/version.go create mode 100644 vendor/github.com/cloudevents/sdk-go/protocol/kafka_sarama/v2/coverage.tmp delete mode 100644 vendor/github.com/cloudevents/sdk-go/v2/client/client_default.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/client/client_http.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/context/delegating.go delete mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/codec_observed.go delete mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/data_observed.go delete mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/observability.go delete mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/observability.go delete mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/data_observed.go delete mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/observability.go delete mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/data_observed.go delete mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/observability.go delete mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/event_observability.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/event_unmarshal.go delete mode 100644 vendor/github.com/cloudevents/sdk-go/v2/extensions/distributed_tracing_extension.go delete mode 100644 vendor/github.com/cloudevents/sdk-go/v2/extensions/doc.go delete mode 100644 vendor/github.com/cloudevents/sdk-go/v2/observability/doc.go delete mode 100644 vendor/github.com/cloudevents/sdk-go/v2/observability/keys.go delete mode 100644 vendor/github.com/cloudevents/sdk-go/v2/observability/observer.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/context.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_rate.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/utility.go delete mode 100644 vendor/github.com/eapache/go-xerial-snappy/fuzz.go delete mode 100644 vendor/github.com/golang/groupcache/LICENSE delete mode 100644 vendor/github.com/golang/groupcache/lru/lru.go create mode 100644 vendor/github.com/golang/snappy/decode_arm64.s rename vendor/github.com/golang/snappy/{decode_amd64.go => decode_asm.go} (93%) create mode 100644 vendor/github.com/golang/snappy/encode_arm64.s rename vendor/github.com/golang/snappy/{encode_amd64.go => encode_asm.go} (97%) create mode 100644 vendor/github.com/hashicorp/errwrap/LICENSE create mode 100644 vendor/github.com/hashicorp/errwrap/README.md create mode 100644 vendor/github.com/hashicorp/errwrap/errwrap.go create mode 100644 vendor/github.com/hashicorp/go-multierror/LICENSE create mode 100644 vendor/github.com/hashicorp/go-multierror/Makefile create mode 100644 vendor/github.com/hashicorp/go-multierror/README.md create mode 100644 vendor/github.com/hashicorp/go-multierror/append.go create mode 100644 vendor/github.com/hashicorp/go-multierror/flatten.go create mode 100644 vendor/github.com/hashicorp/go-multierror/format.go create mode 100644 vendor/github.com/hashicorp/go-multierror/group.go create mode 100644 vendor/github.com/hashicorp/go-multierror/multierror.go create mode 100644 vendor/github.com/hashicorp/go-multierror/prefix.go create mode 100644 vendor/github.com/hashicorp/go-multierror/sort.go rename vendor/{gopkg.in/jcmturner/aescts.v1 => github.com/jcmturner/aescts/v2}/LICENSE (100%) rename vendor/{gopkg.in/jcmturner/aescts.v1 => github.com/jcmturner/aescts/v2}/aescts.go (93%) rename vendor/{gopkg.in/jcmturner/dnsutils.v1 => github.com/jcmturner/dnsutils/v2}/LICENSE (100%) rename vendor/{gopkg.in/jcmturner/dnsutils.v1 => github.com/jcmturner/dnsutils/v2}/srv.go (100%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/LICENSE (100%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/asn1tools/tools.go (100%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/client/ASExchange.go (85%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/client/TGSExchange.go (92%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/client/cache.go (79%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/client/client.go (61%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/client/network.go (62%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/client/passwd.go (67%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/client/session.go (84%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/client/settings.go (73%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/config/error.go (100%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/config/hosts.go (98%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/config/krb5conf.go (95%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/credentials/ccache.go (89%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/credentials/credentials.go (70%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/crypto/aes128-cts-hmac-sha1-96.go (55%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/crypto/aes128-cts-hmac-sha256-128.go (89%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/crypto/aes256-cts-hmac-sha1-96.go (55%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/crypto/aes256-cts-hmac-sha384-192.go (89%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/crypto/common/common.go (77%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/crypto/crypto.go (95%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/crypto/des3-cbc-sha1-kd.go (76%) create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/crypto/etype/etype.go rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/crypto/rc4-hmac.go (88%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/crypto/rfc3961/encryption.go (89%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/crypto/rfc3961/keyDerivation.go (91%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/crypto/rfc3961/nfold.go (72%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/crypto/rfc3962/encryption.go (95%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/crypto/rfc3962/keyDerivation.go (75%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/crypto/rfc4757/checksum.go (100%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/crypto/rfc4757/encryption.go (98%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/crypto/rfc4757/keyDerivation.go (72%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/crypto/rfc4757/msgtype.go (100%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/crypto/rfc8009/encryption.go (90%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/crypto/rfc8009/keyDerivation.go (88%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/gssapi/MICToken.go (79%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/gssapi/README.md (100%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/gssapi/contextFlags.go (81%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/gssapi/gssapi.go (95%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/gssapi/wrapToken.go (71%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/iana/addrtype/constants.go (100%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/iana/adtype/constants.go (100%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/iana/asnAppTag/constants.go (100%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/iana/chksumtype/constants.go (100%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/iana/constants.go (100%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/iana/errorcode/constants.go (100%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/iana/etypeID/constants.go (100%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/iana/flags/constants.go (100%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/iana/keyusage/constants.go (100%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/iana/msgtype/constants.go (100%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/iana/nametype/constants.go (100%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/iana/patype/constants.go (100%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/kadmin/changepasswddata.go (93%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/kadmin/message.go (96%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/kadmin/passwd.go (91%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/keytab/keytab.go (79%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/krberror/error.go (94%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/messages/APRep.go (72%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/messages/APReq.go (78%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/messages/KDCRep.go (83%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/messages/KDCReq.go (96%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/messages/KRBCred.go (93%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/messages/KRBError.go (82%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/messages/KRBPriv.go (90%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/messages/KRBSafe.go (68%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/messages/Ticket.go (91%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/pac/client_claims.go (90%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/pac/client_info.go (96%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/pac/credentials_info.go (76%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/pac/device_claims.go (90%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/pac/device_info.go (97%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/pac/kerb_validation_info.go (90%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/pac/pac_type.go (97%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/pac/s4u_delegation_info.go (91%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/pac/signature_data.go (59%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/pac/supplemental_cred.go (89%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/pac/upn_dns_info.go (98%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/types/Authenticator.go (69%) create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/types/AuthorizationData.go rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/types/Cryptosystem.go (73%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/types/HostAddress.go (85%) create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/types/KerberosFlags.go rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/types/PAData.go (98%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/types/PrincipalName.go (94%) rename vendor/{gopkg.in/jcmturner/gokrb5.v7 => github.com/jcmturner/gokrb5/v8}/types/TypedData.go (100%) rename vendor/{gopkg.in/jcmturner/rpc.v1 => github.com/jcmturner/rpc/v2}/LICENSE (100%) rename vendor/{gopkg.in/jcmturner/rpc.v1 => github.com/jcmturner/rpc/v2}/mstypes/claims.go (75%) rename vendor/{gopkg.in/jcmturner/rpc.v1 => github.com/jcmturner/rpc/v2}/mstypes/common.go (52%) rename vendor/{gopkg.in/jcmturner/rpc.v1 => github.com/jcmturner/rpc/v2}/mstypes/filetime.go (100%) rename vendor/{gopkg.in/jcmturner/rpc.v1 => github.com/jcmturner/rpc/v2}/mstypes/group_membership.go (100%) rename vendor/{gopkg.in/jcmturner/rpc.v1 => github.com/jcmturner/rpc/v2}/mstypes/kerb_sid_and_attributes.go (100%) rename vendor/{gopkg.in/jcmturner/rpc.v1 => github.com/jcmturner/rpc/v2}/mstypes/reader.go (100%) rename vendor/{gopkg.in/jcmturner/rpc.v1 => github.com/jcmturner/rpc/v2}/mstypes/rpc_unicode_string.go (100%) rename vendor/{gopkg.in/jcmturner/rpc.v1 => github.com/jcmturner/rpc/v2}/mstypes/sid.go (83%) rename vendor/{gopkg.in/jcmturner/rpc.v1 => github.com/jcmturner/rpc/v2}/mstypes/user_session_key.go (100%) rename vendor/{gopkg.in/jcmturner/rpc.v1 => github.com/jcmturner/rpc/v2}/ndr/arrays.go (100%) rename vendor/{gopkg.in/jcmturner/rpc.v1 => github.com/jcmturner/rpc/v2}/ndr/decoder.go (100%) rename vendor/{gopkg.in/jcmturner/rpc.v1 => github.com/jcmturner/rpc/v2}/ndr/error.go (100%) rename vendor/{gopkg.in/jcmturner/rpc.v1 => github.com/jcmturner/rpc/v2}/ndr/header.go (100%) rename vendor/{gopkg.in/jcmturner/rpc.v1 => github.com/jcmturner/rpc/v2}/ndr/pipe.go (100%) rename vendor/{gopkg.in/jcmturner/rpc.v1 => github.com/jcmturner/rpc/v2}/ndr/primitives.go (100%) rename vendor/{gopkg.in/jcmturner/rpc.v1 => github.com/jcmturner/rpc/v2}/ndr/rawbytes.go (100%) rename vendor/{gopkg.in/jcmturner/rpc.v1 => github.com/jcmturner/rpc/v2}/ndr/strings.go (100%) rename vendor/{gopkg.in/jcmturner/rpc.v1 => github.com/jcmturner/rpc/v2}/ndr/tags.go (100%) rename vendor/{gopkg.in/jcmturner/rpc.v1 => github.com/jcmturner/rpc/v2}/ndr/union.go (100%) create mode 100644 vendor/github.com/json-iterator/go/.codecov.yml create mode 100644 vendor/github.com/json-iterator/go/.gitignore create mode 100644 vendor/github.com/json-iterator/go/.travis.yml create mode 100644 vendor/github.com/json-iterator/go/Gopkg.lock create mode 100644 vendor/github.com/json-iterator/go/Gopkg.toml rename vendor/github.com/{lightstep/tracecontext.go => json-iterator/go}/LICENSE (95%) create mode 100644 vendor/github.com/json-iterator/go/README.md create mode 100644 vendor/github.com/json-iterator/go/adapter.go create mode 100644 vendor/github.com/json-iterator/go/any.go create mode 100644 vendor/github.com/json-iterator/go/any_array.go create mode 100644 vendor/github.com/json-iterator/go/any_bool.go create mode 100644 vendor/github.com/json-iterator/go/any_float.go create mode 100644 vendor/github.com/json-iterator/go/any_int32.go create mode 100644 vendor/github.com/json-iterator/go/any_int64.go create mode 100644 vendor/github.com/json-iterator/go/any_invalid.go create mode 100644 vendor/github.com/json-iterator/go/any_nil.go create mode 100644 vendor/github.com/json-iterator/go/any_number.go create mode 100644 vendor/github.com/json-iterator/go/any_object.go create mode 100644 vendor/github.com/json-iterator/go/any_str.go create mode 100644 vendor/github.com/json-iterator/go/any_uint32.go create mode 100644 vendor/github.com/json-iterator/go/any_uint64.go create mode 100644 vendor/github.com/json-iterator/go/build.sh create mode 100644 vendor/github.com/json-iterator/go/config.go create mode 100644 vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md create mode 100644 vendor/github.com/json-iterator/go/iter.go create mode 100644 vendor/github.com/json-iterator/go/iter_array.go create mode 100644 vendor/github.com/json-iterator/go/iter_float.go create mode 100644 vendor/github.com/json-iterator/go/iter_int.go create mode 100644 vendor/github.com/json-iterator/go/iter_object.go create mode 100644 vendor/github.com/json-iterator/go/iter_skip.go create mode 100644 vendor/github.com/json-iterator/go/iter_skip_sloppy.go create mode 100644 vendor/github.com/json-iterator/go/iter_skip_strict.go create mode 100644 vendor/github.com/json-iterator/go/iter_str.go create mode 100644 vendor/github.com/json-iterator/go/jsoniter.go create mode 100644 vendor/github.com/json-iterator/go/pool.go create mode 100644 vendor/github.com/json-iterator/go/reflect.go create mode 100644 vendor/github.com/json-iterator/go/reflect_array.go create mode 100644 vendor/github.com/json-iterator/go/reflect_dynamic.go create mode 100644 vendor/github.com/json-iterator/go/reflect_extension.go create mode 100644 vendor/github.com/json-iterator/go/reflect_json_number.go create mode 100644 vendor/github.com/json-iterator/go/reflect_json_raw_message.go create mode 100644 vendor/github.com/json-iterator/go/reflect_map.go create mode 100644 vendor/github.com/json-iterator/go/reflect_marshaler.go create mode 100644 vendor/github.com/json-iterator/go/reflect_native.go create mode 100644 vendor/github.com/json-iterator/go/reflect_optional.go create mode 100644 vendor/github.com/json-iterator/go/reflect_slice.go create mode 100644 vendor/github.com/json-iterator/go/reflect_struct_decoder.go create mode 100644 vendor/github.com/json-iterator/go/reflect_struct_encoder.go create mode 100644 vendor/github.com/json-iterator/go/stream.go create mode 100644 vendor/github.com/json-iterator/go/stream_float.go create mode 100644 vendor/github.com/json-iterator/go/stream_int.go create mode 100644 vendor/github.com/json-iterator/go/stream_str.go create mode 100644 vendor/github.com/json-iterator/go/test.sh create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_amd64.go create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_amd64.s create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_generic.go create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_generic.go delete mode 100644 vendor/github.com/lightstep/tracecontext.go/traceparent/package.go delete mode 100644 vendor/github.com/lightstep/tracecontext.go/tracestate/package.go create mode 100644 vendor/github.com/modern-go/concurrent/.gitignore create mode 100644 vendor/github.com/modern-go/concurrent/.travis.yml rename vendor/{go.opencensus.io => github.com/modern-go/concurrent}/LICENSE (99%) create mode 100644 vendor/github.com/modern-go/concurrent/README.md create mode 100644 vendor/github.com/modern-go/concurrent/executor.go create mode 100644 vendor/github.com/modern-go/concurrent/go_above_19.go create mode 100644 vendor/github.com/modern-go/concurrent/go_below_19.go create mode 100644 vendor/github.com/modern-go/concurrent/log.go create mode 100644 vendor/github.com/modern-go/concurrent/test.sh create mode 100644 vendor/github.com/modern-go/concurrent/unbounded_executor.go create mode 100644 vendor/github.com/modern-go/reflect2/.gitignore create mode 100644 vendor/github.com/modern-go/reflect2/.travis.yml create mode 100644 vendor/github.com/modern-go/reflect2/Gopkg.lock create mode 100644 vendor/github.com/modern-go/reflect2/Gopkg.toml create mode 100644 vendor/github.com/modern-go/reflect2/LICENSE create mode 100644 vendor/github.com/modern-go/reflect2/README.md create mode 100644 vendor/github.com/modern-go/reflect2/go_above_118.go create mode 100644 vendor/github.com/modern-go/reflect2/go_above_19.go create mode 100644 vendor/github.com/modern-go/reflect2/go_below_118.go create mode 100644 vendor/github.com/modern-go/reflect2/reflect2.go create mode 100644 vendor/github.com/modern-go/reflect2/reflect2_amd64.s create mode 100644 vendor/github.com/modern-go/reflect2/reflect2_kind.go create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_386.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_amd64p32.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_arm.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_arm64.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_mips64x.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_mipsx.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_ppc64x.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_s390x.s create mode 100644 vendor/github.com/modern-go/reflect2/safe_field.go create mode 100644 vendor/github.com/modern-go/reflect2/safe_map.go create mode 100644 vendor/github.com/modern-go/reflect2/safe_slice.go create mode 100644 vendor/github.com/modern-go/reflect2/safe_struct.go create mode 100644 vendor/github.com/modern-go/reflect2/safe_type.go create mode 100644 vendor/github.com/modern-go/reflect2/type_map.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_array.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_eface.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_field.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_iface.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_link.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_map.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_ptr.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_slice.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_struct.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_type.go delete mode 100644 vendor/github.com/pierrec/lz4/.travis.yml delete mode 100644 vendor/github.com/pierrec/lz4/debug.go delete mode 100644 vendor/github.com/pierrec/lz4/debug_stub.go delete mode 100644 vendor/github.com/pierrec/lz4/decode_amd64.go delete mode 100644 vendor/github.com/pierrec/lz4/decode_other.go delete mode 100644 vendor/github.com/pierrec/lz4/errors.go delete mode 100644 vendor/github.com/pierrec/lz4/lz4.go delete mode 100644 vendor/github.com/pierrec/lz4/lz4_go1.10.go delete mode 100644 vendor/github.com/pierrec/lz4/lz4_notgo1.10.go delete mode 100644 vendor/github.com/pierrec/lz4/reader.go rename vendor/github.com/pierrec/lz4/{ => v4}/.gitignore (96%) rename vendor/github.com/pierrec/lz4/{ => v4}/LICENSE (100%) rename vendor/github.com/pierrec/lz4/{ => v4}/README.md (83%) rename vendor/github.com/pierrec/lz4/{ => v4/internal/lz4block}/block.go (58%) create mode 100644 vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go rename vendor/github.com/pierrec/lz4/{ => v4/internal/lz4block}/decode_amd64.s (52%) create mode 100644 vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm.s create mode 100644 vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s create mode 100644 vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_asm.go create mode 100644 vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go create mode 100644 vendor/github.com/pierrec/lz4/v4/internal/lz4errors/errors.go create mode 100644 vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go create mode 100644 vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame.go create mode 100644 vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame_gen.go rename vendor/github.com/pierrec/lz4/{ => v4}/internal/xxh32/xxh32zero.go (79%) create mode 100644 vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.go create mode 100644 vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.s create mode 100644 vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_other.go create mode 100644 vendor/github.com/pierrec/lz4/v4/lz4.go create mode 100644 vendor/github.com/pierrec/lz4/v4/options.go create mode 100644 vendor/github.com/pierrec/lz4/v4/options_gen.go create mode 100644 vendor/github.com/pierrec/lz4/v4/reader.go create mode 100644 vendor/github.com/pierrec/lz4/v4/state.go create mode 100644 vendor/github.com/pierrec/lz4/v4/state_gen.go create mode 100644 vendor/github.com/pierrec/lz4/v4/writer.go delete mode 100644 vendor/github.com/pierrec/lz4/writer.go delete mode 100644 vendor/go.opencensus.io/.gitignore delete mode 100644 vendor/go.opencensus.io/.travis.yml delete mode 100644 vendor/go.opencensus.io/AUTHORS delete mode 100644 vendor/go.opencensus.io/CONTRIBUTING.md delete mode 100644 vendor/go.opencensus.io/Makefile delete mode 100644 vendor/go.opencensus.io/README.md delete mode 100644 vendor/go.opencensus.io/appveyor.yml delete mode 100644 vendor/go.opencensus.io/internal/internal.go delete mode 100644 vendor/go.opencensus.io/internal/sanitize.go delete mode 100644 vendor/go.opencensus.io/internal/tagencoding/tagencoding.go delete mode 100644 vendor/go.opencensus.io/internal/traceinternals.go delete mode 100644 vendor/go.opencensus.io/metric/metricdata/doc.go delete mode 100644 vendor/go.opencensus.io/metric/metricdata/exemplar.go delete mode 100644 vendor/go.opencensus.io/metric/metricdata/label.go delete mode 100644 vendor/go.opencensus.io/metric/metricdata/metric.go delete mode 100644 vendor/go.opencensus.io/metric/metricdata/point.go delete mode 100644 vendor/go.opencensus.io/metric/metricdata/type_string.go delete mode 100644 vendor/go.opencensus.io/metric/metricdata/unit.go delete mode 100644 vendor/go.opencensus.io/metric/metricproducer/manager.go delete mode 100644 vendor/go.opencensus.io/metric/metricproducer/producer.go delete mode 100644 vendor/go.opencensus.io/opencensus.go delete mode 100644 vendor/go.opencensus.io/plugin/ochttp/client.go delete mode 100644 vendor/go.opencensus.io/plugin/ochttp/client_stats.go delete mode 100644 vendor/go.opencensus.io/plugin/ochttp/doc.go delete mode 100644 vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go delete mode 100644 vendor/go.opencensus.io/plugin/ochttp/propagation/tracecontext/propagation.go delete mode 100644 vendor/go.opencensus.io/plugin/ochttp/route.go delete mode 100644 vendor/go.opencensus.io/plugin/ochttp/server.go delete mode 100644 vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go delete mode 100644 vendor/go.opencensus.io/plugin/ochttp/stats.go delete mode 100644 vendor/go.opencensus.io/plugin/ochttp/trace.go delete mode 100644 vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go delete mode 100644 vendor/go.opencensus.io/resource/resource.go delete mode 100644 vendor/go.opencensus.io/stats/doc.go delete mode 100644 vendor/go.opencensus.io/stats/internal/record.go delete mode 100644 vendor/go.opencensus.io/stats/measure.go delete mode 100644 vendor/go.opencensus.io/stats/measure_float64.go delete mode 100644 vendor/go.opencensus.io/stats/measure_int64.go delete mode 100644 vendor/go.opencensus.io/stats/record.go delete mode 100644 vendor/go.opencensus.io/stats/units.go delete mode 100644 vendor/go.opencensus.io/stats/view/aggregation.go delete mode 100644 vendor/go.opencensus.io/stats/view/aggregation_data.go delete mode 100644 vendor/go.opencensus.io/stats/view/collector.go delete mode 100644 vendor/go.opencensus.io/stats/view/doc.go delete mode 100644 vendor/go.opencensus.io/stats/view/export.go delete mode 100644 vendor/go.opencensus.io/stats/view/view.go delete mode 100644 vendor/go.opencensus.io/stats/view/view_to_metric.go delete mode 100644 vendor/go.opencensus.io/stats/view/worker.go delete mode 100644 vendor/go.opencensus.io/stats/view/worker_commands.go delete mode 100644 vendor/go.opencensus.io/tag/context.go delete mode 100644 vendor/go.opencensus.io/tag/doc.go delete mode 100644 vendor/go.opencensus.io/tag/key.go delete mode 100644 vendor/go.opencensus.io/tag/map.go delete mode 100644 vendor/go.opencensus.io/tag/map_codec.go delete mode 100644 vendor/go.opencensus.io/tag/metadata.go delete mode 100644 vendor/go.opencensus.io/tag/profile_19.go delete mode 100644 vendor/go.opencensus.io/tag/profile_not19.go delete mode 100644 vendor/go.opencensus.io/tag/validate.go delete mode 100644 vendor/go.opencensus.io/trace/basetypes.go delete mode 100644 vendor/go.opencensus.io/trace/config.go delete mode 100644 vendor/go.opencensus.io/trace/doc.go delete mode 100644 vendor/go.opencensus.io/trace/evictedqueue.go delete mode 100644 vendor/go.opencensus.io/trace/export.go delete mode 100644 vendor/go.opencensus.io/trace/internal/internal.go delete mode 100644 vendor/go.opencensus.io/trace/lrumap.go delete mode 100644 vendor/go.opencensus.io/trace/propagation/propagation.go delete mode 100644 vendor/go.opencensus.io/trace/sampling.go delete mode 100644 vendor/go.opencensus.io/trace/spanbucket.go delete mode 100644 vendor/go.opencensus.io/trace/spanstore.go delete mode 100644 vendor/go.opencensus.io/trace/status_codes.go delete mode 100644 vendor/go.opencensus.io/trace/trace.go delete mode 100644 vendor/go.opencensus.io/trace/trace_go11.go delete mode 100644 vendor/go.opencensus.io/trace/trace_nongo11.go delete mode 100644 vendor/go.opencensus.io/trace/tracestate/tracestate.go delete mode 100644 vendor/gopkg.in/jcmturner/aescts.v1/.gitignore delete mode 100644 vendor/gopkg.in/jcmturner/aescts.v1/README.md delete mode 100644 vendor/gopkg.in/jcmturner/dnsutils.v1/.gitignore delete mode 100644 vendor/gopkg.in/jcmturner/dnsutils.v1/.travis.yml delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/etype/etype.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v7/types/AuthorizationData.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v7/types/KerberosFlags.go diff --git a/go.mod b/go.mod index 8efc3f459..074060aaf 100644 --- a/go.mod +++ b/go.mod @@ -56,7 +56,6 @@ require ( github.com/fsnotify/fsnotify v1.4.9 // indirect github.com/go-logr/logr v1.2.4 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect - github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/google/go-cmp v0.6.0 // indirect @@ -73,7 +72,6 @@ require ( github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.15.14 // indirect - github.com/lightstep/tracecontext.go v0.0.0-20181129014701-1757c391b1ac // indirect github.com/mattn/go-colorable v0.1.12 // indirect github.com/mattn/go-isatty v0.0.14 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect @@ -81,7 +79,6 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe // indirect github.com/nxadm/tail v1.4.8 // indirect - github.com/pierrec/lz4 v2.5.2+incompatible // indirect github.com/pierrec/lz4/v4 v4.1.17 // indirect github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 // indirect github.com/prometheus/common v0.44.0 // indirect @@ -95,7 +92,6 @@ require ( github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c // indirect github.com/xdg/stringprep v1.0.0 // indirect github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect - go.opencensus.io v0.22.4 // indirect go.uber.org/atomic v1.7.0 // indirect go.uber.org/dig v1.17.0 // indirect go.uber.org/multierr v1.6.0 // indirect @@ -110,10 +106,6 @@ require ( google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect google.golang.org/protobuf v1.31.0 // indirect - gopkg.in/jcmturner/aescts.v1 v1.0.1 // indirect - gopkg.in/jcmturner/dnsutils.v1 v1.0.1 // indirect - gopkg.in/jcmturner/gokrb5.v7 v7.5.0 // indirect - gopkg.in/jcmturner/rpc.v1 v1.1.0 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index cfe2cd5be..b67f2e864 100644 --- a/go.sum +++ b/go.sum @@ -2,18 +2,17 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/Shopify/sarama v1.25.0/go.mod h1:y/CFFTO9eaMTNriwu/Q+W4eioLqiDMGkA1W+gmdfj8w= -github.com/Shopify/sarama v1.27.0 h1:tqo2zmyzPf1+gwTTwhI6W+EXDw4PVSczynpHKFtVAmo= github.com/Shopify/sarama v1.27.0/go.mod h1:aCdj6ymI8uyPEux1JJ9gcaDT6cinjGhNCAhs54taSUo= github.com/Shopify/sarama v1.38.1 h1:lqqPUPQZ7zPqYlWpTh+LQ9bhYNu2xJL6k1SJN4WVe2A= github.com/Shopify/sarama v1.38.1/go.mod h1:iwv9a67Ha8VNa+TifujYoWGxWnu2kNVAQdSdZ4X2o5g= github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/Shopify/toxiproxy/v2 v2.5.0 h1:i4LPT+qrSlKNtQf5QliVjdP08GyAH8+BUIc9gT0eahc= +github.com/Shopify/toxiproxy/v2 v2.5.0/go.mod h1:yhM2epWtAmel9CB8r2+L+PCmhH6yH2pITaPAo7jxJl0= github.com/ant0ine/go-json-rest v3.3.2+incompatible h1:nBixrkLFiDNAW0hauKDLc8yJI6XfrQumWvytE1Hk14E= github.com/ant0ine/go-json-rest v3.3.2+incompatible/go.mod h1:q6aCt0GfU6LhpBsnZ/2U+mwe+0XB5WStbmwyoPfc+sk= github.com/avast/retry-go v3.0.0+incompatible h1:4SOWQ7Qs+oroOTQOYnAHqelpCO0biHSxpiH9JdtuBj0= github.com/avast/retry-go v3.0.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY= -github.com/aws/aws-sdk-go v1.46.2 h1:XZbOmjtN1VCfEtQq7QNFsbxIqO+bB+bRhiOBjp6AzWc= -github.com/aws/aws-sdk-go v1.46.2/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-sdk-go v1.46.3 h1:zcrCu14ANOji6m38bUTxYdPqne4EXIvJQ2KXZ5oi9k0= github.com/aws/aws-sdk-go v1.46.3/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= @@ -28,16 +27,13 @@ github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWR github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudevents/sdk-go/protocol/kafka_sarama/v2 v2.2.0 h1:PB+tnuauZWE2RnfDJnyv8wuLc1tkWk/p8mYiy2xzdvQ= github.com/cloudevents/sdk-go/protocol/kafka_sarama/v2 v2.2.0/go.mod h1:XbBXL/a5TGNGs5N4UreDCIK7F71MNrXURJeVXury+XY= github.com/cloudevents/sdk-go/protocol/kafka_sarama/v2 v2.14.0 h1:1MCVOxNZySIYOWMI1+6Z7YR0PK3AmDi/Fklk1KdFIv8= github.com/cloudevents/sdk-go/protocol/kafka_sarama/v2 v2.14.0/go.mod h1:/B8nchIwQlr00jtE9bR0aoKaag7bO67xPM7r1DXCH4I= github.com/cloudevents/sdk-go/v2 v2.0.0/go.mod h1:3CTrpB4+u7Iaj6fd7E2Xvm5IxMdRoaAhqaRVnOr2rCU= -github.com/cloudevents/sdk-go/v2 v2.2.0 h1:FlBJg7W0QywbOjuZGmRXUyFk8qkCHx2euETp+tuopSU= github.com/cloudevents/sdk-go/v2 v2.2.0/go.mod h1:3CTrpB4+u7Iaj6fd7E2Xvm5IxMdRoaAhqaRVnOr2rCU= github.com/cloudevents/sdk-go/v2 v2.14.0 h1:Nrob4FwVgi5L4tV9lhjzZcjYqFVyJzsA56CwPaPfv6s= github.com/cloudevents/sdk-go/v2 v2.14.0/go.mod h1:xDmKfzNjM8gBvjaF8ijFjM1VYOVUEeUfapHMUX1T5To= -github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= @@ -59,11 +55,9 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm github.com/dvsekhvalnov/jose2go v1.5.0 h1:3j8ya4Z4kMCwT5nXIKFSV84YS+HdqSSO0VsTQxaLAeM= github.com/dvsekhvalnov/jose2go v1.5.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q= github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-resiliency v1.3.0 h1:RRL0nge+cWGlxXbUzJ7yMcq6w2XBEr19dCN6HECGaT0= github.com/eapache/go-resiliency v1.3.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/go-xerial-snappy v0.0.0-20230111030713-bf00bc1b83b6 h1:8yY/I9ndfrgrXUbOGObLHKBR4Fl3nZXwM2c7OYTT8hM= github.com/eapache/go-xerial-snappy v0.0.0-20230111030713-bf00bc1b83b6/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= @@ -74,7 +68,6 @@ github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/frankban/quicktest v1.4.1/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ= -github.com/frankban/quicktest v1.10.0 h1:Gfh+GAJZOAoKZsIZeZbdn2JF10kN1XHNvjsvQK8gVkE= github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= @@ -112,9 +105,6 @@ github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzq github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= @@ -133,7 +123,6 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -165,7 +154,6 @@ github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brv github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= @@ -178,10 +166,10 @@ github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90 github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= -github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg= github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo= +github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= github.com/jcmturner/gokrb5/v8 v8.4.3 h1:iTonLeSJOn7MVUtyMT+arAn5AKAPrkilzhGw8wE/Tq8= github.com/jcmturner/gokrb5/v8 v8.4.3/go.mod h1:dqRwJGXznQrzw6cWmyo6kH+E7jksEQG/CyVWsJEsJO0= @@ -199,7 +187,6 @@ github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dv github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.10.10/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.14 h1:i7WCKDToww0wA+9qrUZ1xOjp218vfFo3nTU6UHp+gOc= github.com/klauspost/compress v1.15.14/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= @@ -230,7 +217,6 @@ github.com/lestrrat-go/iter v1.0.2/go.mod h1:Momfcq3AnRlRjI5b5O8/G5/BvpzrhoFTZcn github.com/lestrrat-go/jwx v1.2.7/go.mod h1:bw24IXWbavc0R2RsOtpXL7RtMyP589yZ1+L7kd09ZGA= github.com/lestrrat-go/jwx v1.2.24/go.mod h1:zoNuZymNl5lgdcu6P7K6ie2QRll5HVfF4xwxBBK1NxY= github.com/lestrrat-go/option v1.0.0/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I= -github.com/lightstep/tracecontext.go v0.0.0-20181129014701-1757c391b1ac h1:+2b6iGRJe3hvV/yVXrd41yVEjxuFHxasJqDhkIjS4gk= github.com/lightstep/tracecontext.go v0.0.0-20181129014701-1757c391b1ac/go.mod h1:Frd2bnT3w5FB5q49ENTfVlztJES+1k/7lyWX2+9gq/M= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -282,7 +268,6 @@ github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1y github.com/onsi/gomega v1.28.1 h1:MijcGUbfYuznzK/5R4CPNoUP/9Xvuo20sXfEm6XxoTA= github.com/onsi/gomega v1.28.1/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= github.com/pierrec/lz4 v2.2.6+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc= github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= @@ -302,7 +287,6 @@ github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/ github.com/radovskyb/watcher v1.0.7 h1:AYePLih6dpmS32vlHfhCeli8127LzkIgwJGcwwe8tUE= github.com/radovskyb/watcher v1.0.7/go.mod h1:78okwvY5wPdzcb1UYnip1pvrZNIVEIh/Cm+ZuvsUYIg= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= @@ -366,8 +350,6 @@ github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5t go.mongodb.org/mongo-driver v1.12.1 h1:nLkghSU8fQNaK7oUmDhQFsnrtcoNy7Z6LVFKsEecqgE= go.mongodb.org/mongo-driver v1.12.1/go.mod h1:/rGBTebI3XYboVmgz+Wv3Bcbl3aD0QF9zl6kDDw18rQ= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -510,6 +492,7 @@ golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220411224347-583f2d630306 h1:+gHMid33q6pen7kv9xvT+JRinntgeXO2AeZVd0AWD3w= golang.org/x/time v0.0.0-20220411224347-583f2d630306/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -566,16 +549,11 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE= -gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw= gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= -gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM= gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= -gopkg.in/jcmturner/goidentity.v3 v3.0.0 h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI= gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= gopkg.in/jcmturner/gokrb5.v7 v7.2.3/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= -gopkg.in/jcmturner/gokrb5.v7 v7.5.0 h1:a9tsXlIDD9SKxotJMK3niV7rPZAJeX2aD/0yg3qlIrg= gopkg.in/jcmturner/gokrb5.v7 v7.5.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= -gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU= gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= diff --git a/vendor/github.com/Shopify/sarama/.gitignore b/vendor/github.com/Shopify/sarama/.gitignore index 2c9adc20b..f88795748 100644 --- a/vendor/github.com/Shopify/sarama/.gitignore +++ b/vendor/github.com/Shopify/sarama/.gitignore @@ -23,7 +23,9 @@ _testmain.go *.exe -coverage.txt -profile.out +/bin +/coverage.txt +/profile.out +/output.json -simplest-uncommitted-msg-0.1-jar-with-dependencies.jar +.idea diff --git a/vendor/github.com/Shopify/sarama/.golangci.yml b/vendor/github.com/Shopify/sarama/.golangci.yml index ce2b5230d..0b419abbf 100644 --- a/vendor/github.com/Shopify/sarama/.golangci.yml +++ b/vendor/github.com/Shopify/sarama/.golangci.yml @@ -23,13 +23,18 @@ linters-settings: gocritic: enabled-tags: - diagnostic - - experimental - - opinionated - - performance - - style + # - experimental + # - opinionated + # - performance + # - style disabled-checks: - - wrapperFunc + - assignOp + - appendAssign + - commentedOutCode - ifElseChain + - singleCaseSwitch + - sloppyReassign + - wrapperFunc funlen: lines: 300 statements: 300 @@ -40,11 +45,15 @@ linters: - bodyclose - deadcode - depguard + - exportloopref - dogsled # - dupl - errcheck + - errorlint - funlen - # - gocritic + - gochecknoinits + # - goconst + - gocritic - gocyclo - gofmt - goimports @@ -53,11 +62,12 @@ linters: # - gosimple - govet # - ineffassign - - interfacer - # - misspell + - misspell # - nakedret + - nilerr + # - paralleltest # - scopelint - # - staticcheck + - staticcheck - structcheck # - stylecheck - typecheck @@ -65,13 +75,14 @@ linters: - unused - varcheck - whitespace - # - goconst - - gochecknoinits issues: exclude: - - consider giving a name to these results - - include an explanation for nolint directive - - Potential Integer overflow made by strconv.Atoi result conversion to int16/32 - - Use of weak random number generator - - TLS MinVersion too low + - "G404: Use of weak random number generator" + exclude-rules: + # exclude some linters from running on certains files. + - path: functional.*_test\.go + linters: + - paralleltest + # maximum count of issues with the same text. set to 0 for unlimited. default is 3. + max-same-issues: 0 diff --git a/vendor/github.com/Shopify/sarama/CHANGELOG.md b/vendor/github.com/Shopify/sarama/CHANGELOG.md index 53204b9de..c2f92ec9a 100644 --- a/vendor/github.com/Shopify/sarama/CHANGELOG.md +++ b/vendor/github.com/Shopify/sarama/CHANGELOG.md @@ -1,10 +1,218 @@ # Changelog -#### Unreleased +## Version 1.31.1 (2022-02-01) + +- #2126 - @bai - Populate missing kafka versions +- #2124 - @bai - Add Kafka 3.1.0 to CI matrix, migrate to bitnami kafka image +- #2123 - @bai - Update klauspost/compress to 0.14 +- #2122 - @dnwe - fix(test): make it simpler to re-use toxiproxy +- #2119 - @bai - Add Kafka 3.1.0 version number +- #2005 - @raulnegreiros - feat: add methods to pause/resume consumer's consumption +- #2051 - @seveas - Expose the TLS connection state of a broker connection +- #2117 - @wuhuizuo - feat: add method MockApiVersionsResponse.SetApiKeys +- #2110 - @dnwe - fix: ensure heartbeats only stop after cleanup +- #2113 - @mosceo - Fix typo + +## Version 1.31.0 (2022-01-18) + +## What's Changed +### :tada: New Features / Improvements +* feat: expose IncrementalAlterConfigs API in admin.go by @fengyinqiao in https://github.com/Shopify/sarama/pull/2088 +* feat: allow AsyncProducer to have MaxOpenRequests inflight produce requests per broker by @xujianhai666 in https://github.com/Shopify/sarama/pull/1686 +* Support request pipelining in AsyncProducer by @slaunay in https://github.com/Shopify/sarama/pull/2094 +### :bug: Fixes +* fix(test): add fluent interface for mocks where missing by @grongor in https://github.com/Shopify/sarama/pull/2080 +* fix(test): test for ConsumePartition with OffsetOldest by @grongor in https://github.com/Shopify/sarama/pull/2081 +* fix: set HWMO during creation of partitionConsumer (fix incorrect HWMO before first fetch) by @grongor in https://github.com/Shopify/sarama/pull/2082 +* fix: ignore non-nil but empty error strings in Describe/Alter client quotas responses by @agriffaut in https://github.com/Shopify/sarama/pull/2096 +* fix: skip over KIP-482 tagged fields by @dnwe in https://github.com/Shopify/sarama/pull/2107 +* fix: clear preferredReadReplica if broker shutdown by @dnwe in https://github.com/Shopify/sarama/pull/2108 +* fix(test): correct wrong offsets in mock Consumer by @grongor in https://github.com/Shopify/sarama/pull/2078 +* fix: correct bugs in DescribeGroupsResponse by @dnwe in https://github.com/Shopify/sarama/pull/2111 +### :wrench: Maintenance +* chore: bump runtime and test dependencies by @dnwe in https://github.com/Shopify/sarama/pull/2100 +### :memo: Documentation +* docs: refresh README.md for Kafka 3.0.0 by @dnwe in https://github.com/Shopify/sarama/pull/2099 +### :heavy_plus_sign: Other Changes +* Fix typo by @mosceo in https://github.com/Shopify/sarama/pull/2084 + +## New Contributors +* @grongor made their first contribution in https://github.com/Shopify/sarama/pull/2080 +* @fengyinqiao made their first contribution in https://github.com/Shopify/sarama/pull/2088 +* @xujianhai666 made their first contribution in https://github.com/Shopify/sarama/pull/1686 +* @mosceo made their first contribution in https://github.com/Shopify/sarama/pull/2084 + +**Full Changelog**: https://github.com/Shopify/sarama/compare/v1.30.1...v1.31.0 + +## Version 1.30.1 (2021-12-04) + +## What's Changed +### :tada: New Features / Improvements +* feat(zstd): pass level param through to compress/zstd encoder by @lizthegrey in https://github.com/Shopify/sarama/pull/2045 +### :bug: Fixes +* fix: set min-go-version to 1.16 by @troyanov in https://github.com/Shopify/sarama/pull/2048 +* logger: fix debug logs' formatting directives by @utrack in https://github.com/Shopify/sarama/pull/2054 +* fix: stuck on the batch with zero records length by @pachmu in https://github.com/Shopify/sarama/pull/2057 +* fix: only update preferredReadReplica if valid by @dnwe in https://github.com/Shopify/sarama/pull/2076 +### :wrench: Maintenance +* chore: add release notes configuration by @dnwe in https://github.com/Shopify/sarama/pull/2046 +* chore: confluent platform version bump by @lizthegrey in https://github.com/Shopify/sarama/pull/2070 + +## Notes +* ℹ️ from Sarama 1.30.x onward the minimum version of Go toolchain required is 1.16.x + +## New Contributors +* @troyanov made their first contribution in https://github.com/Shopify/sarama/pull/2048 +* @lizthegrey made their first contribution in https://github.com/Shopify/sarama/pull/2045 +* @utrack made their first contribution in https://github.com/Shopify/sarama/pull/2054 +* @pachmu made their first contribution in https://github.com/Shopify/sarama/pull/2057 + +**Full Changelog**: https://github.com/Shopify/sarama/compare/v1.30.0...v1.30.1 + +## Version 1.30.0 (2021-09-29) + +⚠️ This release has been superseded by v1.30.1 and should _not_ be used. + +**regression**: enabling rackawareness causes severe throughput drops (#2071) — fixed in v1.30.1 via #2076 + +--- + +ℹ️ **Note: from Sarama 1.30.0 the minimum version of Go toolchain required is 1.16.x** + +--- + +# New Features / Improvements + +- #1983 - @zifengyu - allow configure AllowAutoTopicCreation argument in metadata refresh +- #2000 - @matzew - Using xdg-go module for SCRAM +- #2003 - @gdm85 - feat: add counter metrics for consumer group join/sync and their failures +- #1992 - @zhaomoran - feat: support SaslHandshakeRequest v0 for SCRAM +- #2006 - @faillefer - Add support for DeleteOffsets operation +- #1909 - @agriffaut - KIP-546 Client quota APIs +- #1633 - @aldelucca1 - feat: allow balance strategies to provide initial state +- #1275 - @dnwe - log: add a DebugLogger that proxies to Logger +- #2018 - @dnwe - feat: use DebugLogger reference for goldenpath log +- #2019 - @dnwe - feat: add logging & a metric for producer throttle +- #2023 - @dnwe - feat: add Controller() to ClusterAdmin interface +- #2025 - @dnwe - feat: support ApiVersionsRequest V3 protocol +- #2028 - @dnwe - feat: send ApiVersionsRequest on broker open +- #2034 - @bai - Add support for kafka 3.0.0 -#### Version 1.27.0 (2020-08-11) +# Fixes + +- #1990 - @doxsch - fix: correctly pass ValidateOnly through to CreatePartitionsRequest +- #1988 - @LubergAlexander - fix: correct WithCustomFallbackPartitioner implementation +- #2001 - @HurSungYun - docs: inform AsyncProducer Close pitfalls +- #1973 - @qiangmzsx - fix: metrics still taking up too much memory when metrics.UseNilMetrics=true +- #2007 - @bai - Add support for Go 1.17 +- #2009 - @dnwe - fix: enable nilerr linter and fix iferr checks +- #2010 - @dnwe - chore: enable exportloopref and misspell linters +- #2013 - @faillefer - fix(test): disable encoded response/request check when map contains multiple elements +- #2015 - @bai - Change default branch to main +- #1718 - @crivera-fastly - fix: correct the error handling in client.InitProducerID() +- #1984 - @null-sleep - fix(test): bump confluentPlatformVersion from 6.1.1 to 6.2.0 +- #2016 - @dnwe - chore: replace deprecated Go calls +- #2017 - @dnwe - chore: delete legacy vagrant script +- #2020 - @dnwe - fix(test): remove testLogger from TrackLeader test +- #2024 - @dnwe - chore: bump toxiproxy container to v2.1.5 +- #2033 - @bai - Update dependencies +- #2031 - @gdm85 - docs: do not mention buffered messages in sync producer Close method +- #2035 - @dnwe - chore: populate the missing kafka versions +- #2038 - @dnwe - feat: add a fuzzing workflow to github actions + +## New Contributors +* @zifengyu made their first contribution in https://github.com/Shopify/sarama/pull/1983 +* @doxsch made their first contribution in https://github.com/Shopify/sarama/pull/1990 +* @LubergAlexander made their first contribution in https://github.com/Shopify/sarama/pull/1988 +* @HurSungYun made their first contribution in https://github.com/Shopify/sarama/pull/2001 +* @gdm85 made their first contribution in https://github.com/Shopify/sarama/pull/2003 +* @qiangmzsx made their first contribution in https://github.com/Shopify/sarama/pull/1973 +* @zhaomoran made their first contribution in https://github.com/Shopify/sarama/pull/1992 +* @faillefer made their first contribution in https://github.com/Shopify/sarama/pull/2006 +* @crivera-fastly made their first contribution in https://github.com/Shopify/sarama/pull/1718 +* @null-sleep made their first contribution in https://github.com/Shopify/sarama/pull/1984 + +**Full Changelog**: https://github.com/Shopify/sarama/compare/v1.29.1...v1.30.0 + +## Version 1.29.1 (2021-06-24) + +# New Features / Improvements + +- #1966 - @ajanikow - KIP-339: Add Incremental Config updates API +- #1964 - @ajanikow - Add DelegationToken ResourceType + +# Fixes + +- #1962 - @hanxiaolin - fix(consumer): call interceptors when MaxProcessingTime expire +- #1971 - @KerryJava - fix kafka-producer-performance throughput panic +- #1968 - @dnwe - chore: bump golang.org/x versions +- #1956 - @joewreschnig - Allow checking the entire `ProducerMessage` in the mock producers +- #1963 - @dnwe - fix: ensure backoff timer is re-used +- #1949 - @dnwe - fix: explicitly use uint64 for payload length + +## Version 1.29.0 (2021-05-07) + +### New Features / Improvements + +- #1917 - @arkady-emelyanov - KIP-554: Add Broker-side SCRAM Config API +- #1869 - @wyndhblb - zstd: encode+decode performance improvements +- #1541 - @izolight - add String, (Un)MarshalText for acl types. +- #1921 - @bai - Add support for Kafka 2.8.0 + +### Fixes +- #1936 - @dnwe - fix(consumer): follow preferred broker +- #1933 - @ozzieba - Use gofork for encoding/asn1 to fix ASN errors during Kerberos authentication +- #1929 - @celrenheit - Handle isolation level in Offset(Request|Response) and require stable offset in FetchOffset(Request|Response) +- #1926 - @dnwe - fix: correct initial CodeQL findings +- #1925 - @bai - Test out CodeQL +- #1923 - @bestgopher - Remove redundant switch-case, fix doc typos +- #1922 - @bai - Update go dependencies +- #1898 - @mmaslankaprv - Parsing only known control batches value +- #1887 - @withshubh - Fix: issues affecting code quality + +## Version 1.28.0 (2021-02-15) + +**Note that with this release we change `RoundRobinBalancer` strategy to match Java client behavior. See #1788 for details.** + +- #1870 - @kvch - Update Kerberos library to latest major +- #1876 - @bai - Update docs, reference pkg.go.dev +- #1846 - @wclaeys - Do not ignore Consumer.Offsets.AutoCommit.Enable config on Close +- #1747 - @XSAM - fix: mock sync producer does not handle the offset while sending messages +- #1863 - @bai - Add support for Kafka 2.7.0 + update lz4 and klauspost/compress dependencies +- #1788 - @kzinglzy - feat[balance_strategy]: announcing a new round robin balance strategy +- #1862 - @bai - Fix CI setenv permissions issues +- #1832 - @ilyakaznacheev - Update Godoc link to pkg.go.dev +- #1822 - @danp - KIP-392: Allow consumers to fetch from closest replica + +## Version 1.27.2 (2020-10-21) + +### Improvements -# Improvements +#1750 - @krantideep95 Adds missing mock responses for mocking consumer group + +## Fixes + +#1817 - reverts #1785 - Add private method to Client interface to prevent implementation + +## Version 1.27.1 (2020-10-07) + +### Improvements + +#1775 - @d1egoaz - Adds a Producer Interceptor example +#1781 - @justin-chen - Refresh brokers given list of seed brokers +#1784 - @justin-chen - Add randomize seed broker method +#1790 - @d1egoaz - remove example binary +#1798 - @bai - Test against Go 1.15 +#1785 - @justin-chen - Add private method to Client interface to prevent implementation +#1802 - @uvw - Support Go 1.13 error unwrapping + +## Fixes + +#1791 - @stanislavkozlovski - bump default version to 1.0.0 + +## Version 1.27.0 (2020-08-11) + +### Improvements #1466 - @rubenvp8510 - Expose kerberos fast negotiation configuration #1695 - @KJTsanaktsidis - Use docker-compose to run the functional tests @@ -18,41 +226,41 @@ #1763 - @alrs - remove deprecated tls options from test #1769 - @bai - Add support for Kafka 2.6.0 -# Fixes +## Fixes #1697 - @kvch - Use gofork for encoding/asn1 to fix ASN errors during Kerberos authentication #1744 - @alrs - Fix isBalanced Function Signature -#### Version 1.26.4 (2020-05-19) +## Version 1.26.4 (2020-05-19) -# Fixes +## Fixes - #1701 - @d1egoaz - Set server name only for the current broker - #1694 - @dnwe - testfix: set KAFKA_HEAP_OPTS for zk and kafka -#### Version 1.26.3 (2020-05-07) +## Version 1.26.3 (2020-05-07) -# Fixes +## Fixes - #1692 - @d1egoaz - Set tls ServerName to fix issue: either ServerName or InsecureSkipVerify must be specified in the tls.Config -#### Version 1.26.2 (2020-05-06) +## Version 1.26.2 (2020-05-06) -# ⚠️ Known Issues +## ⚠️ Known Issues This release has been marked as not ready for production and may be unstable, please use v1.26.4. -# Improvements +### Improvements - #1560 - @iyacontrol - add sync pool for gzip 1-9 - #1605 - @dnwe - feat: protocol support for V11 fetch w/ rackID -- #1617 - @sladkoff / @dwi-di / @random-dwi - Add support for alter/list partition reassignements APIs +- #1617 - @sladkoff / @dwi-di / @random-dwi - Add support for alter/list partition reassignements APIs - #1632 - @bai - Add support for Go 1.14 - #1640 - @random-dwi - Feature/fix list partition reassignments - #1646 - @mimaison - Add DescribeLogDirs to admin client - #1667 - @bai - Add support for kafka 2.5.0 -# Fixes +## Fixes - #1594 - @sladkoff - Sets ConfigEntry.Default flag in addition to the ConfigEntry.Source for Kafka versions > V1_1_0_0 - #1601 - @alrs - fix: remove use of testing.T.FailNow() inside goroutine @@ -69,7 +277,7 @@ This release has been marked as not ready for production and may be unstable, pl - #1661 - @KJTsanaktsidis - Fix "broker received out of order sequence" when brokers die - #1666 - @KevinJCross - Bugfix: Allow TLS connections to work over socks proxy. -#### Version 1.26.1 (2020-02-04) +## Version 1.26.1 (2020-02-04) Improvements: - Add requests-in-flight metric ([1539](https://github.com/Shopify/sarama/pull/1539)) @@ -81,7 +289,7 @@ Bug Fixes: - Adds back Consumer.Offsets.CommitInterval to fix API ([1590](https://github.com/Shopify/sarama/pull/1590)) - Fix error message s/CommitInterval/AutoCommit.Interval ([1589](https://github.com/Shopify/sarama/pull/1589)) -#### Version 1.26.0 (2020-01-24) +## Version 1.26.0 (2020-01-24) New Features: - Enable zstd compression @@ -112,7 +320,7 @@ Bug Fixes: - Retry topic request on ControllerNotAvailable ([1586](https://github.com/Shopify/sarama/pull/1586)). -#### Version 1.25.0 (2020-01-13) +## Version 1.25.0 (2020-01-13) New Features: - Support TLS protocol in kafka-producer-performance @@ -136,7 +344,7 @@ Bug Fixes: - Fix possible faulty metrics in TestFuncProducing ([1545](https://github.com/Shopify/sarama/pull/1545)). -#### Version 1.24.1 (2019-10-31) +## Version 1.24.1 (2019-10-31) New Features: - Add DescribeLogDirs Request/Response pair @@ -150,7 +358,7 @@ Bug Fixes: - Ensure consistent use of read/write deadlines ([1529](https://github.com/Shopify/sarama/pull/1529)). -#### Version 1.24.0 (2019-10-09) +## Version 1.24.0 (2019-10-09) New Features: - Add sticky partition assignor @@ -192,7 +400,7 @@ Known Issues: - Please **don't** use Zstd, as it doesn't work right now. See https://github.com/Shopify/sarama/issues/1252 -#### Version 1.23.1 (2019-07-22) +## Version 1.23.1 (2019-07-22) Bug Fixes: - Fix fetch delete bug record @@ -200,7 +408,7 @@ Bug Fixes: - Handle SASL/OAUTHBEARER token rejection ([1428](https://github.com/Shopify/sarama/pull/1428)). -#### Version 1.23.0 (2019-07-02) +## Version 1.23.0 (2019-07-02) New Features: - Add support for Kafka 2.3.0 @@ -236,7 +444,7 @@ Bug Fixes: - Refactor misspelled word Resouce to Resource ([1368](https://github.com/Shopify/sarama/pull/1368)). -#### Version 1.22.1 (2019-04-29) +## Version 1.22.1 (2019-04-29) Improvements: - Use zstd 1.3.8 @@ -252,7 +460,7 @@ Bug Fixes: - Fix AllowAutoTopicCreation for MetadataRequest greater than v3 ([1344](https://github.com/Shopify/sarama/pull/1344)). -#### Version 1.22.0 (2019-04-09) +## Version 1.22.0 (2019-04-09) New Features: - Add Offline Replicas Operation to Client @@ -296,7 +504,7 @@ Bug Fixes: - Fix rate condition in PartitionConsumer ([1156](https://github.com/Shopify/sarama/pull/1156)). -#### Version 1.21.0 (2019-02-24) +## Version 1.21.0 (2019-02-24) New Features: - Add CreateAclRequest, DescribeAclRequest, DeleteAclRequest @@ -332,7 +540,7 @@ Bug Fixes: - Update kafka test version ([1273](https://github.com/Shopify/sarama/pull/1273)). -#### Version 1.20.1 (2019-01-10) +## Version 1.20.1 (2019-01-10) New Features: - Add optional replica id in offset request @@ -356,7 +564,7 @@ Bug Fixes: - Respect MaxMessageBytes limit for uncompressed messages ([1141](https://github.com/Shopify/sarama/pull/1141)). -#### Version 1.20.0 (2018-12-10) +## Version 1.20.0 (2018-12-10) New Features: - Add support for zstd compression @@ -386,7 +594,7 @@ Bug Fixes: - Fix typos in code and comments ([#1228](https://github.com/Shopify/sarama/pull/1228)). -#### Version 1.19.0 (2018-09-27) +## Version 1.19.0 (2018-09-27) New Features: - Implement a higher-level consumer group @@ -402,7 +610,7 @@ Bug Fixes: - Fix race condition in mock async producer ([#1174](https://github.com/Shopify/sarama/pull/1174)). -#### Version 1.18.0 (2018-09-07) +## Version 1.18.0 (2018-09-07) New Features: - Make `Partitioner.RequiresConsistency` vary per-message @@ -438,7 +646,7 @@ Bug Fixes: - Fix producer topic metadata on-demand fetch when topic error happens in metadata response ([#1125](https://github.com/Shopify/sarama/pull/1125)). -#### Version 1.17.0 (2018-05-30) +## Version 1.17.0 (2018-05-30) New Features: - Add support for gzip compression levels @@ -476,7 +684,7 @@ Bug Fixes: - Make `PartitionConsumer.Close` idempotent ([#1092](https://github.com/Shopify/sarama/pull/1092)). -#### Version 1.16.0 (2018-02-12) +## Version 1.16.0 (2018-02-12) New Features: - Add support for the Create/Delete Topics request/response pairs @@ -518,7 +726,7 @@ Bug Fixes: - Fix expectation-checking in the mock of `SyncProducer.SendMessages` ([#1035](https://github.com/Shopify/sarama/pull/1035)). -#### Version 1.15.0 (2017-12-08) +## Version 1.15.0 (2017-12-08) New Features: - Claim official support for Kafka 1.0, though it did already work @@ -544,7 +752,7 @@ Bug Fixes: - Fix leaking metrics when closing brokers ([#991](https://github.com/Shopify/sarama/pull/991)). -#### Version 1.14.0 (2017-11-13) +## Version 1.14.0 (2017-11-13) New Features: - Add support for the new Kafka 0.11 record-batch format, including the wire @@ -561,7 +769,7 @@ Bug Fixes: - Return partial replicas list when we have it ([#975](https://github.com/Shopify/sarama/pull/975)). -#### Version 1.13.0 (2017-10-04) +## Version 1.13.0 (2017-10-04) New Features: - Support for FetchRequest version 3 @@ -590,7 +798,7 @@ Bug Fixes: - Raise a proper error when encountering an unknown message version ([#940](https://github.com/Shopify/sarama/pull/940)). -#### Version 1.12.0 (2017-05-08) +## Version 1.12.0 (2017-05-08) New Features: - Added support for the `ApiVersions` request and response pair, and Kafka @@ -624,7 +832,7 @@ Bug Fixes: - Fix an alignment-related issue with atomics on 32-bit architectures ([#859](https://github.com/Shopify/sarama/pull/859)). -#### Version 1.11.0 (2016-12-20) +## Version 1.11.0 (2016-12-20) _Important:_ As of Sarama 1.11 it is necessary to set the config value of `Producer.Return.Successes` to true in order to use the SyncProducer. Previous @@ -656,7 +864,7 @@ Bug Fixes: - Fix crash on SASL initialization failure ([#795](https://github.com/Shopify/sarama/pull/795)). -#### Version 1.10.1 (2016-08-30) +## Version 1.10.1 (2016-08-30) Bug Fixes: - Fix the documentation for `HashPartitioner` which was incorrect @@ -671,7 +879,7 @@ Bug Fixes: - Handle consuming compressed relative offsets with Kafka 0.10 ([#735](https://github.com/Shopify/sarama/pull/735)). -#### Version 1.10.0 (2016-08-02) +## Version 1.10.0 (2016-08-02) _Important:_ As of Sarama 1.10 it is necessary to tell Sarama the version of Kafka you are running against (via the `config.Version` value) in order to use @@ -720,7 +928,7 @@ Bug Fixes: - Fix possible negative partition value from the HashPartitioner ([#709](https://github.com/Shopify/sarama/pull/709)). -#### Version 1.9.0 (2016-05-16) +## Version 1.9.0 (2016-05-16) New Features: - Add support for custom offset manager retention durations @@ -744,7 +952,7 @@ Bug Fixes: - Fix race condition shutting down the OffsetManager ([#658](https://github.com/Shopify/sarama/pull/658)). -#### Version 1.8.0 (2016-02-01) +## Version 1.8.0 (2016-02-01) New Features: - Full support for Kafka 0.9: @@ -763,7 +971,7 @@ Improvements: - Automatically retry `InvalidMessage` errors to match upstream behaviour ([#589](https://github.com/Shopify/sarama/pull/589)). -#### Version 1.7.0 (2015-12-11) +## Version 1.7.0 (2015-12-11) New Features: - Preliminary support for Kafka 0.9 @@ -794,13 +1002,13 @@ Bug Fixes: - Fix race condition in consumer test mock ([#553](https://github.com/Shopify/sarama/pull/553)). -#### Version 1.6.1 (2015-09-25) +## Version 1.6.1 (2015-09-25) Bug Fixes: - Fix panic that could occur if a user-supplied message value failed to encode ([#449](https://github.com/Shopify/sarama/pull/449)). -#### Version 1.6.0 (2015-09-04) +## Version 1.6.0 (2015-09-04) New Features: - Implementation of a consumer offset manager using the APIs introduced in @@ -822,7 +1030,7 @@ Bug Fixes: - Fix a potential race condition panic in the consumer on shutdown ([#529](https://github.com/Shopify/sarama/pull/529)). -#### Version 1.5.0 (2015-08-17) +## Version 1.5.0 (2015-08-17) New Features: - TLS-encrypted network connections are now supported. This feature is subject @@ -843,7 +1051,7 @@ Bug Fixes: - Fix a potential deadlock in the consumer on shutdown ([#475](https://github.com/Shopify/sarama/pull/475)). -#### Version 1.4.3 (2015-07-21) +## Version 1.4.3 (2015-07-21) Bug Fixes: - Don't include the partitioner in the producer's "fetch partitions" @@ -853,13 +1061,13 @@ Bug Fixes: - Update the import path for snappy-go, it has moved again and the API has changed slightly ([#486](https://github.com/Shopify/sarama/pull/486)). -#### Version 1.4.2 (2015-05-27) +## Version 1.4.2 (2015-05-27) Bug Fixes: - Update the import path for snappy-go, it has moved from google code to github ([#456](https://github.com/Shopify/sarama/pull/456)). -#### Version 1.4.1 (2015-05-25) +## Version 1.4.1 (2015-05-25) Improvements: - Optimizations when decoding snappy messages, thanks to John Potocny @@ -870,7 +1078,7 @@ Bug Fixes: ([#450](https://github.com/Shopify/sarama/pull/450), [#451](https://github.com/Shopify/sarama/pull/451)). -#### Version 1.4.0 (2015-05-01) +## Version 1.4.0 (2015-05-01) New Features: - The consumer now implements `Topics()` and `Partitions()` methods to enable @@ -900,7 +1108,7 @@ Bug Fixes: making it much more resilient to specific user code ordering ([#325](https://github.com/Shopify/sarama/pull/325)). -#### Version 1.3.0 (2015-04-16) +## Version 1.3.0 (2015-04-16) New Features: - The client now tracks consumer group coordinators using @@ -928,7 +1136,7 @@ Bug Fixes: it happens to be activated while the client is being closed ([#422](https://github.com/Shopify/sarama/pull/422)). -#### Version 1.2.0 (2015-04-07) +## Version 1.2.0 (2015-04-07) Improvements: - The producer's behaviour when `Flush.Frequency` is set is now more intuitive @@ -949,7 +1157,7 @@ Bug Fixes: API versions ([#390](https://github.com/Shopify/sarama/pull/390), [#400](https://github.com/Shopify/sarama/pull/400)). -#### Version 1.1.0 (2015-03-20) +## Version 1.1.0 (2015-03-20) Improvements: - Wrap the producer's partitioner call in a circuit-breaker so that repeatedly @@ -967,7 +1175,7 @@ Bug Fixes: metadata for a non-existant topic ([#372](https://github.com/Shopify/sarama/pull/372)). -#### Version 1.0.0 (2015-03-17) +## Version 1.0.0 (2015-03-17) Version 1.0.0 is the first tagged version, and is almost a complete rewrite. The primary differences with previous untagged versions are: diff --git a/vendor/github.com/Shopify/sarama/Dockerfile.kafka b/vendor/github.com/Shopify/sarama/Dockerfile.kafka new file mode 100644 index 000000000..48a9c178a --- /dev/null +++ b/vendor/github.com/Shopify/sarama/Dockerfile.kafka @@ -0,0 +1,27 @@ +FROM registry.access.redhat.com/ubi8/ubi-minimal:latest + +USER root + +RUN microdnf update \ + && microdnf install curl gzip java-11-openjdk-headless tar \ + && microdnf clean all + +ENV JAVA_HOME=/usr/lib/jvm/jre-11 + +# https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html +# Ensure Java doesn't cache any dns results +RUN cd /etc/java/java-11-openjdk/*/conf/security \ + && sed -e '/networkaddress.cache.ttl/d' -e '/networkaddress.cache.negative.ttl/d' -i java.security \ + && echo 'networkaddress.cache.ttl=0' >> java.security \ + && echo 'networkaddress.cache.negative.ttl=0' >> java.security + +# https://github.com/apache/kafka/blob/53eeaad946cd053e9eb1a762972d4efeacb8e4fc/tests/docker/Dockerfile#L65-L69 +ARG KAFKA_MIRROR="https://s3-us-west-2.amazonaws.com/kafka-packages" +RUN mkdir -p "/opt/kafka-2.8.2" && chmod a+rw /opt/kafka-2.8.2 && curl -s "$KAFKA_MIRROR/kafka_2.12-2.8.2.tgz" | tar xz --strip-components=1 -C "/opt/kafka-2.8.2" +RUN mkdir -p "/opt/kafka-3.1.2" && chmod a+rw /opt/kafka-3.1.2 && curl -s "$KAFKA_MIRROR/kafka_2.12-3.1.2.tgz" | tar xz --strip-components=1 -C "/opt/kafka-3.1.2" +RUN mkdir -p "/opt/kafka-3.2.3" && chmod a+rw /opt/kafka-3.2.3 && curl -s "$KAFKA_MIRROR/kafka_2.12-3.2.3.tgz" | tar xz --strip-components=1 -C "/opt/kafka-3.2.3" +RUN mkdir -p "/opt/kafka-3.3.1" && chmod a+rw /opt/kafka-3.3.1 && curl -s "$KAFKA_MIRROR/kafka_2.12-3.3.1.tgz" | tar xz --strip-components=1 -C "/opt/kafka-3.3.1" + +COPY entrypoint.sh / + +ENTRYPOINT ["/entrypoint.sh"] diff --git a/vendor/github.com/Shopify/sarama/Makefile b/vendor/github.com/Shopify/sarama/Makefile index a05863480..7cefc2a2c 100644 --- a/vendor/github.com/Shopify/sarama/Makefile +++ b/vendor/github.com/Shopify/sarama/Makefile @@ -1,19 +1,22 @@ default: fmt get update test lint -GO := GO111MODULE=on GOPRIVATE=github.com/linkedin GOSUMDB=off go +GO := go +GOBIN := $(shell pwd)/bin GOBUILD := CGO_ENABLED=0 $(GO) build $(BUILD_FLAG) -GOTEST := $(GO) test -gcflags='-l' -p 3 -v -race -timeout 6m -coverprofile=profile.out -covermode=atomic +GOTEST := $(GO) test -v -race -coverprofile=profile.out -covermode=atomic FILES := $(shell find . -name '*.go' -type f -not -name '*.pb.go' -not -name '*_generated.go' -not -name '*_test.go') TESTS := $(shell find . -name '*.go' -type f -not -name '*.pb.go' -not -name '*_generated.go' -name '*_test.go') +$(GOBIN)/tparse: + GOBIN=$(GOBIN) go install github.com/mfridman/tparse@v0.11.1 get: $(GO) get ./... $(GO) mod verify $(GO) mod tidy update: - $(GO) get -u -v all + $(GO) get -u -v ./... $(GO) mod verify $(GO) mod tidy @@ -23,9 +26,14 @@ fmt: lint: GOFLAGS="-tags=functional" golangci-lint run -test: - $(GOTEST) ./... - +test: $(GOBIN)/tparse + $(GOTEST) -timeout 2m -json ./... \ + | tee output.json | $(GOBIN)/tparse -follow -all + [ -z "$${GITHUB_STEP_SUMMARY}" ] \ + || NO_COLOR=1 $(GOBIN)/tparse -format markdown -file output.json -all >"$${GITHUB_STEP_SUMMARY:-/dev/null}" .PHONY: test_functional -test_functional: - $(GOTEST) -tags=functional ./... +test_functional: $(GOBIN)/tparse + $(GOTEST) -timeout 15m -tags=functional -json ./... \ + | tee output.json | $(GOBIN)/tparse -follow -all + [ -z "$${GITHUB_STEP_SUMMARY:-}" ] \ + || NO_COLOR=1 $(GOBIN)/tparse -format markdown -file output.json -all >"$${GITHUB_STEP_SUMMARY:-/dev/null}" diff --git a/vendor/github.com/Shopify/sarama/README.md b/vendor/github.com/Shopify/sarama/README.md index 38d39695b..0ee6e6a7f 100644 --- a/vendor/github.com/Shopify/sarama/README.md +++ b/vendor/github.com/Shopify/sarama/README.md @@ -1,14 +1,13 @@ # sarama -[![GoDoc](https://godoc.org/github.com/Shopify/sarama?status.svg)](https://godoc.org/github.com/Shopify/sarama) -[![Build Status](https://travis-ci.org/Shopify/sarama.svg?branch=master)](https://travis-ci.org/Shopify/sarama) -[![Coverage](https://codecov.io/gh/Shopify/sarama/branch/master/graph/badge.svg)](https://codecov.io/gh/Shopify/sarama) +[![Go Reference](https://pkg.go.dev/badge/github.com/Shopify/sarama.svg)](https://pkg.go.dev/github.com/Shopify/sarama) +[![Coverage](https://codecov.io/gh/Shopify/sarama/branch/main/graph/badge.svg)](https://codecov.io/gh/Shopify/sarama) -Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/) version 0.8 (and later). +Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/). ## Getting started -- API documentation and examples are available via [godoc](https://godoc.org/github.com/Shopify/sarama). +- API documentation and examples are available via [pkg.go.dev](https://pkg.go.dev/github.com/Shopify/sarama). - Mocks for testing are available in the [mocks](./mocks) subpackage. - The [examples](./examples) directory contains more elaborate example applications. - The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation. @@ -19,9 +18,7 @@ You might also want to look at the [Frequently Asked Questions](https://github.c Sarama provides a "2 releases + 2 months" compatibility guarantee: we support the two latest stable releases of Kafka and Go, and we provide a two month -grace period for older releases. This means we currently officially support -Go 1.13 through 1.14, and Kafka 2.4 through 2.6, although older releases are -still likely to work. +grace period for older releases. However, older releases of Kafka are still likely to work. Sarama follows semantic versioning and provides API stability via the gopkg.in service. You can import a version with a guaranteed stable API via http://gopkg.in/Shopify/sarama.v1. @@ -29,7 +26,7 @@ A changelog is available [here](CHANGELOG.md). ## Contributing -- Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/master/.github/CONTRIBUTING.md). +- Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/main/.github/CONTRIBUTING.md). - Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more technical and design details. - The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol) contains a wealth of useful information. - For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers. diff --git a/vendor/github.com/Shopify/sarama/acl_bindings.go b/vendor/github.com/Shopify/sarama/acl_bindings.go index 50b689d1d..13440be67 100644 --- a/vendor/github.com/Shopify/sarama/acl_bindings.go +++ b/vendor/github.com/Shopify/sarama/acl_bindings.go @@ -1,6 +1,6 @@ package sarama -//Resource holds information about acl resource type +// Resource holds information about acl resource type type Resource struct { ResourceType AclResourceType ResourceName string @@ -46,7 +46,7 @@ func (r *Resource) decode(pd packetDecoder, version int16) (err error) { return nil } -//Acl holds information about acl type +// Acl holds information about acl type type Acl struct { Principal string Host string @@ -93,7 +93,7 @@ func (a *Acl) decode(pd packetDecoder, version int16) (err error) { return nil } -//ResourceAcls is an acl resource type +// ResourceAcls is an acl resource type type ResourceAcls struct { Resource Acls []*Acl diff --git a/vendor/github.com/Shopify/sarama/acl_create_request.go b/vendor/github.com/Shopify/sarama/acl_create_request.go index 6d8a70e1a..449102f74 100644 --- a/vendor/github.com/Shopify/sarama/acl_create_request.go +++ b/vendor/github.com/Shopify/sarama/acl_create_request.go @@ -1,6 +1,6 @@ package sarama -//CreateAclsRequest is an acl creation request +// CreateAclsRequest is an acl creation request type CreateAclsRequest struct { Version int16 AclCreations []*AclCreation @@ -60,7 +60,7 @@ func (c *CreateAclsRequest) requiredVersion() KafkaVersion { } } -//AclCreation is a wrapper around Resource and Acl type +// AclCreation is a wrapper around Resource and Acl type type AclCreation struct { Resource Acl diff --git a/vendor/github.com/Shopify/sarama/acl_create_response.go b/vendor/github.com/Shopify/sarama/acl_create_response.go index 14b1b9e13..21d6c340c 100644 --- a/vendor/github.com/Shopify/sarama/acl_create_response.go +++ b/vendor/github.com/Shopify/sarama/acl_create_response.go @@ -2,7 +2,7 @@ package sarama import "time" -//CreateAclsResponse is a an acl response creation type +// CreateAclsResponse is a an acl response creation type type CreateAclsResponse struct { ThrottleTime time.Duration AclCreationResponses []*AclCreationResponse @@ -63,7 +63,7 @@ func (c *CreateAclsResponse) requiredVersion() KafkaVersion { return V0_11_0_0 } -//AclCreationResponse is an acl creation response type +// AclCreationResponse is an acl creation response type type AclCreationResponse struct { Err KError ErrMsg *string diff --git a/vendor/github.com/Shopify/sarama/acl_delete_request.go b/vendor/github.com/Shopify/sarama/acl_delete_request.go index 415252259..5e5c03bc2 100644 --- a/vendor/github.com/Shopify/sarama/acl_delete_request.go +++ b/vendor/github.com/Shopify/sarama/acl_delete_request.go @@ -1,6 +1,6 @@ package sarama -//DeleteAclsRequest is a delete acl request +// DeleteAclsRequest is a delete acl request type DeleteAclsRequest struct { Version int Filters []*AclFilter @@ -48,7 +48,7 @@ func (d *DeleteAclsRequest) version() int16 { return int16(d.Version) } -func (c *DeleteAclsRequest) headerVersion() int16 { +func (d *DeleteAclsRequest) headerVersion() int16 { return 1 } diff --git a/vendor/github.com/Shopify/sarama/acl_delete_response.go b/vendor/github.com/Shopify/sarama/acl_delete_response.go index cb6308826..cd33749d5 100644 --- a/vendor/github.com/Shopify/sarama/acl_delete_response.go +++ b/vendor/github.com/Shopify/sarama/acl_delete_response.go @@ -2,7 +2,7 @@ package sarama import "time" -//DeleteAclsResponse is a delete acl response +// DeleteAclsResponse is a delete acl response type DeleteAclsResponse struct { Version int16 ThrottleTime time.Duration @@ -64,7 +64,7 @@ func (d *DeleteAclsResponse) requiredVersion() KafkaVersion { return V0_11_0_0 } -//FilterResponse is a filter response type +// FilterResponse is a filter response type type FilterResponse struct { Err KError ErrMsg *string @@ -115,7 +115,7 @@ func (f *FilterResponse) decode(pd packetDecoder, version int16) (err error) { return nil } -//MatchingAcl is a matching acl type +// MatchingAcl is a matching acl type type MatchingAcl struct { Err KError ErrMsg *string diff --git a/vendor/github.com/Shopify/sarama/acl_describe_request.go b/vendor/github.com/Shopify/sarama/acl_describe_request.go index 29841a5ce..e0fe9023a 100644 --- a/vendor/github.com/Shopify/sarama/acl_describe_request.go +++ b/vendor/github.com/Shopify/sarama/acl_describe_request.go @@ -1,6 +1,6 @@ package sarama -//DescribeAclsRequest is a secribe acl request type +// DescribeAclsRequest is a secribe acl request type type DescribeAclsRequest struct { Version int AclFilter diff --git a/vendor/github.com/Shopify/sarama/acl_describe_response.go b/vendor/github.com/Shopify/sarama/acl_describe_response.go index c43408b24..3255fd485 100644 --- a/vendor/github.com/Shopify/sarama/acl_describe_response.go +++ b/vendor/github.com/Shopify/sarama/acl_describe_response.go @@ -2,7 +2,7 @@ package sarama import "time" -//DescribeAclsResponse is a describe acl response type +// DescribeAclsResponse is a describe acl response type type DescribeAclsResponse struct { Version int16 ThrottleTime time.Duration diff --git a/vendor/github.com/Shopify/sarama/acl_filter.go b/vendor/github.com/Shopify/sarama/acl_filter.go index fad555875..b380161aa 100644 --- a/vendor/github.com/Shopify/sarama/acl_filter.go +++ b/vendor/github.com/Shopify/sarama/acl_filter.go @@ -46,7 +46,6 @@ func (a *AclFilter) decode(pd packetDecoder, version int16) (err error) { if a.Version == 1 { pattern, err := pd.getInt8() - if err != nil { return err } diff --git a/vendor/github.com/Shopify/sarama/acl_types.go b/vendor/github.com/Shopify/sarama/acl_types.go index c10ad7b90..c3ba8ddcf 100644 --- a/vendor/github.com/Shopify/sarama/acl_types.go +++ b/vendor/github.com/Shopify/sarama/acl_types.go @@ -1,5 +1,10 @@ package sarama +import ( + "fmt" + "strings" +) + type ( AclOperation int @@ -27,6 +32,61 @@ const ( AclOperationIdempotentWrite ) +func (a *AclOperation) String() string { + mapping := map[AclOperation]string{ + AclOperationUnknown: "Unknown", + AclOperationAny: "Any", + AclOperationAll: "All", + AclOperationRead: "Read", + AclOperationWrite: "Write", + AclOperationCreate: "Create", + AclOperationDelete: "Delete", + AclOperationAlter: "Alter", + AclOperationDescribe: "Describe", + AclOperationClusterAction: "ClusterAction", + AclOperationDescribeConfigs: "DescribeConfigs", + AclOperationAlterConfigs: "AlterConfigs", + AclOperationIdempotentWrite: "IdempotentWrite", + } + s, ok := mapping[*a] + if !ok { + s = mapping[AclOperationUnknown] + } + return s +} + +// MarshalText returns the text form of the AclOperation (name without prefix) +func (a *AclOperation) MarshalText() ([]byte, error) { + return []byte(a.String()), nil +} + +// UnmarshalText takes a text reprentation of the operation and converts it to an AclOperation +func (a *AclOperation) UnmarshalText(text []byte) error { + normalized := strings.ToLower(string(text)) + mapping := map[string]AclOperation{ + "unknown": AclOperationUnknown, + "any": AclOperationAny, + "all": AclOperationAll, + "read": AclOperationRead, + "write": AclOperationWrite, + "create": AclOperationCreate, + "delete": AclOperationDelete, + "alter": AclOperationAlter, + "describe": AclOperationDescribe, + "clusteraction": AclOperationClusterAction, + "describeconfigs": AclOperationDescribeConfigs, + "alterconfigs": AclOperationAlterConfigs, + "idempotentwrite": AclOperationIdempotentWrite, + } + ao, ok := mapping[normalized] + if !ok { + *a = AclOperationUnknown + return fmt.Errorf("no acl operation with name %s", normalized) + } + *a = ao + return nil +} + // ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclPermissionType.java const ( AclPermissionUnknown AclPermissionType = iota @@ -35,6 +95,44 @@ const ( AclPermissionAllow ) +func (a *AclPermissionType) String() string { + mapping := map[AclPermissionType]string{ + AclPermissionUnknown: "Unknown", + AclPermissionAny: "Any", + AclPermissionDeny: "Deny", + AclPermissionAllow: "Allow", + } + s, ok := mapping[*a] + if !ok { + s = mapping[AclPermissionUnknown] + } + return s +} + +// MarshalText returns the text form of the AclPermissionType (name without prefix) +func (a *AclPermissionType) MarshalText() ([]byte, error) { + return []byte(a.String()), nil +} + +// UnmarshalText takes a text reprentation of the permission type and converts it to an AclPermissionType +func (a *AclPermissionType) UnmarshalText(text []byte) error { + normalized := strings.ToLower(string(text)) + mapping := map[string]AclPermissionType{ + "unknown": AclPermissionUnknown, + "any": AclPermissionAny, + "deny": AclPermissionDeny, + "allow": AclPermissionAllow, + } + + apt, ok := mapping[normalized] + if !ok { + *a = AclPermissionUnknown + return fmt.Errorf("no acl permission with name %s", normalized) + } + *a = apt + return nil +} + // ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/ResourceType.java const ( AclResourceUnknown AclResourceType = iota @@ -43,8 +141,53 @@ const ( AclResourceGroup AclResourceCluster AclResourceTransactionalID + AclResourceDelegationToken ) +func (a *AclResourceType) String() string { + mapping := map[AclResourceType]string{ + AclResourceUnknown: "Unknown", + AclResourceAny: "Any", + AclResourceTopic: "Topic", + AclResourceGroup: "Group", + AclResourceCluster: "Cluster", + AclResourceTransactionalID: "TransactionalID", + AclResourceDelegationToken: "DelegationToken", + } + s, ok := mapping[*a] + if !ok { + s = mapping[AclResourceUnknown] + } + return s +} + +// MarshalText returns the text form of the AclResourceType (name without prefix) +func (a *AclResourceType) MarshalText() ([]byte, error) { + return []byte(a.String()), nil +} + +// UnmarshalText takes a text reprentation of the resource type and converts it to an AclResourceType +func (a *AclResourceType) UnmarshalText(text []byte) error { + normalized := strings.ToLower(string(text)) + mapping := map[string]AclResourceType{ + "unknown": AclResourceUnknown, + "any": AclResourceAny, + "topic": AclResourceTopic, + "group": AclResourceGroup, + "cluster": AclResourceCluster, + "transactionalid": AclResourceTransactionalID, + "delegationtoken": AclResourceDelegationToken, + } + + art, ok := mapping[normalized] + if !ok { + *a = AclResourceUnknown + return fmt.Errorf("no acl resource with name %s", normalized) + } + *a = art + return nil +} + // ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/PatternType.java const ( AclPatternUnknown AclResourcePatternType = iota @@ -53,3 +196,43 @@ const ( AclPatternLiteral AclPatternPrefixed ) + +func (a *AclResourcePatternType) String() string { + mapping := map[AclResourcePatternType]string{ + AclPatternUnknown: "Unknown", + AclPatternAny: "Any", + AclPatternMatch: "Match", + AclPatternLiteral: "Literal", + AclPatternPrefixed: "Prefixed", + } + s, ok := mapping[*a] + if !ok { + s = mapping[AclPatternUnknown] + } + return s +} + +// MarshalText returns the text form of the AclResourcePatternType (name without prefix) +func (a *AclResourcePatternType) MarshalText() ([]byte, error) { + return []byte(a.String()), nil +} + +// UnmarshalText takes a text reprentation of the resource pattern type and converts it to an AclResourcePatternType +func (a *AclResourcePatternType) UnmarshalText(text []byte) error { + normalized := strings.ToLower(string(text)) + mapping := map[string]AclResourcePatternType{ + "unknown": AclPatternUnknown, + "any": AclPatternAny, + "match": AclPatternMatch, + "literal": AclPatternLiteral, + "prefixed": AclPatternPrefixed, + } + + arpt, ok := mapping[normalized] + if !ok { + *a = AclPatternUnknown + return fmt.Errorf("no acl resource pattern with name %s", normalized) + } + *a = arpt + return nil +} diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go index 95586f9a1..a96af9341 100644 --- a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go +++ b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go @@ -1,6 +1,6 @@ package sarama -//AddOffsetsToTxnRequest adds offsets to a transaction request +// AddOffsetsToTxnRequest adds offsets to a transaction request type AddOffsetsToTxnRequest struct { TransactionalID string ProducerID int64 diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go index bdb184419..bb61973d1 100644 --- a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go +++ b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go @@ -4,7 +4,7 @@ import ( "time" ) -//AddOffsetsToTxnResponse is a response type for adding offsets to txns +// AddOffsetsToTxnResponse is a response type for adding offsets to txns type AddOffsetsToTxnResponse struct { ThrottleTime time.Duration Err KError diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go index 6289f4514..57ecf6488 100644 --- a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go +++ b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go @@ -1,6 +1,6 @@ package sarama -//AddPartitionsToTxnRequest is a add paartition request +// AddPartitionsToTxnRequest is a add paartition request type AddPartitionsToTxnRequest struct { TransactionalID string ProducerID int64 diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go index 73b73b07f..098956507 100644 --- a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go +++ b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go @@ -4,7 +4,7 @@ import ( "time" ) -//AddPartitionsToTxnResponse is a partition errors to transaction type +// AddPartitionsToTxnResponse is a partition errors to transaction type type AddPartitionsToTxnResponse struct { ThrottleTime time.Duration Errors map[string][]*PartitionError @@ -87,7 +87,7 @@ func (a *AddPartitionsToTxnResponse) requiredVersion() KafkaVersion { return V0_11_0_0 } -//PartitionError is a partition error type +// PartitionError is a partition error type type PartitionError struct { Partition int32 Err KError diff --git a/vendor/github.com/Shopify/sarama/admin.go b/vendor/github.com/Shopify/sarama/admin.go index 9dea0255f..a334daff5 100644 --- a/vendor/github.com/Shopify/sarama/admin.go +++ b/vendor/github.com/Shopify/sarama/admin.go @@ -70,11 +70,24 @@ type ClusterAdmin interface { // for some resources while fail for others. The configs for a particular resource are updated automatically. AlterConfig(resourceType ConfigResourceType, name string, entries map[string]*string, validateOnly bool) error + // IncrementalAlterConfig Incrementally Update the configuration for the specified resources with the default options. + // This operation is supported by brokers with version 2.3.0.0 or higher. + // Updates are not transactional so they may succeed for some resources while fail for others. + // The configs for a particular resource are updated automatically. + IncrementalAlterConfig(resourceType ConfigResourceType, name string, entries map[string]IncrementalAlterConfigsEntry, validateOnly bool) error + + // Creates an access control list (ACL) which is bound to a specific resource. + // This operation is not transactional so it may succeed or fail. + // If you attempt to add an ACL that duplicates an existing ACL, no error will be raised, but + // no changes will be made. This operation is supported by brokers with version 0.11.0.0 or higher. + // Deprecated: Use CreateACLs instead. + CreateACL(resource Resource, acl Acl) error + // Creates access control lists (ACLs) which are bound to specific resources. // This operation is not transactional so it may succeed for some ACLs while fail for others. // If you attempt to add an ACL that duplicates an existing ACL, no error will be raised, but // no changes will be made. This operation is supported by brokers with version 0.11.0.0 or higher. - CreateACL(resource Resource, acl Acl) error + CreateACLs([]*ResourceAcls) error // Lists access control lists (ACLs) according to the supplied filter. // it may take some time for changes made by createAcls or deleteAcls to be reflected in the output of ListAcls @@ -95,6 +108,9 @@ type ClusterAdmin interface { // List the consumer group offsets available in the cluster. ListConsumerGroupOffsets(group string, topicPartitions map[string][]int32) (*OffsetFetchResponse, error) + // Deletes a consumer group offset + DeleteConsumerGroupOffset(group string, topic string, partition int32) error + // Delete a consumer group. DeleteConsumerGroup(group string) error @@ -104,6 +120,32 @@ type ClusterAdmin interface { // Get information about all log directories on the given set of brokers DescribeLogDirs(brokers []int32) (map[int32][]DescribeLogDirsResponseDirMetadata, error) + // Get information about SCRAM users + DescribeUserScramCredentials(users []string) ([]*DescribeUserScramCredentialsResult, error) + + // Delete SCRAM users + DeleteUserScramCredentials(delete []AlterUserScramCredentialsDelete) ([]*AlterUserScramCredentialsResult, error) + + // Upsert SCRAM users + UpsertUserScramCredentials(upsert []AlterUserScramCredentialsUpsert) ([]*AlterUserScramCredentialsResult, error) + + // Get client quota configurations corresponding to the specified filter. + // This operation is supported by brokers with version 2.6.0.0 or higher. + DescribeClientQuotas(components []QuotaFilterComponent, strict bool) ([]DescribeClientQuotasEntry, error) + + // Alters client quota configurations with the specified alterations. + // This operation is supported by brokers with version 2.6.0.0 or higher. + AlterClientQuotas(entity []QuotaEntityComponent, op ClientQuotasOp, validateOnly bool) error + + // Controller returns the cluster controller broker. It will return a + // locally cached value if it's available. + Controller() (*Broker, error) + + // Remove members from the consumer group by given member identities. + // This operation is supported by brokers with version 2.3 or higher + // This is for static membership feature. KIP-345 + RemoveMemberFromConsumerGroup(groupId string, groupInstanceIds []string) (*LeaveGroupResponse, error) + // Close shuts down the admin and closes underlying client. Close() error } @@ -119,13 +161,17 @@ func NewClusterAdmin(addrs []string, conf *Config) (ClusterAdmin, error) { if err != nil { return nil, err } - return NewClusterAdminFromClient(client) + admin, err := NewClusterAdminFromClient(client) + if err != nil { + client.Close() + } + return admin, err } // NewClusterAdminFromClient creates a new ClusterAdmin using the given client. // Note that underlying client will also be closed on admin's Close() call. func NewClusterAdminFromClient(client Client) (ClusterAdmin, error) { - //make sure we can retrieve the controller + // make sure we can retrieve the controller _, err := client.Controller() if err != nil { return nil, err @@ -153,26 +199,18 @@ func (ca *clusterAdmin) refreshController() (*Broker, error) { // isErrNoController returns `true` if the given error type unwraps to an // `ErrNotController` response from Kafka func isErrNoController(err error) bool { - switch e := err.(type) { - case *TopicError: - return e.Err == ErrNotController - case *TopicPartitionError: - return e.Err == ErrNotController - case KError: - return e == ErrNotController - } - return false + return errors.Is(err, ErrNotController) } // retryOnError will repeatedly call the given (error-returning) func in the -// case that its response is non-nil and retriable (as determined by the -// provided retriable func) up to the maximum number of tries permitted by +// case that its response is non-nil and retryable (as determined by the +// provided retryable func) up to the maximum number of tries permitted by // the admin client configuration -func (ca *clusterAdmin) retryOnError(retriable func(error) bool, fn func() error) error { +func (ca *clusterAdmin) retryOnError(retryable func(error) bool, fn func() error) error { var err error for attempt := 0; attempt < ca.conf.Admin.Retry.Max; attempt++ { err = fn() - if err == nil || !retriable(err) { + if err == nil || !retryable(err) { return err } Logger.Printf( @@ -225,8 +263,8 @@ func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateO return ErrIncompleteResponse } - if topicErr.Err != ErrNoError { - if topicErr.Err == ErrNotController { + if !errors.Is(topicErr.Err, ErrNoError) { + if errors.Is(topicErr.Err, ErrNotController) { _, _ = ca.refreshController() } return topicErr @@ -242,17 +280,7 @@ func (ca *clusterAdmin) DescribeTopics(topics []string) (metadata []*TopicMetada return nil, err } - request := &MetadataRequest{ - Topics: topics, - AllowAutoTopicCreation: false, - } - - if ca.conf.Version.IsAtLeast(V1_0_0_0) { - request.Version = 5 - } else if ca.conf.Version.IsAtLeast(V0_11_0_0) { - request.Version = 4 - } - + request := NewMetadataRequest(ca.conf.Version, topics) response, err := controller.GetMetadata(request) if err != nil { return nil, err @@ -266,14 +294,7 @@ func (ca *clusterAdmin) DescribeCluster() (brokers []*Broker, controllerID int32 return nil, int32(0), err } - request := &MetadataRequest{ - Topics: []string{}, - } - - if ca.conf.Version.IsAtLeast(V0_10_0_0) { - request.Version = 1 - } - + request := NewMetadataRequest(ca.conf.Version, nil) response, err := controller.GetMetadata(request) if err != nil { return nil, int32(0), err @@ -314,7 +335,7 @@ func (ca *clusterAdmin) ListTopics() (map[string]TopicDetail, error) { } _ = b.Open(ca.client.Config()) - metadataReq := &MetadataRequest{} + metadataReq := NewMetadataRequest(ca.conf.Version, nil) metadataResp, err := b.GetMetadata(metadataReq) if err != nil { return nil, err @@ -412,8 +433,8 @@ func (ca *clusterAdmin) DeleteTopic(topic string) error { return ErrIncompleteResponse } - if topicErr != ErrNoError { - if topicErr == ErrNotController { + if !errors.Is(topicErr, ErrNoError) { + if errors.Is(topicErr, ErrNotController) { _, _ = ca.refreshController() } return topicErr @@ -434,6 +455,7 @@ func (ca *clusterAdmin) CreatePartitions(topic string, count int32, assignment [ request := &CreatePartitionsRequest{ TopicPartitions: topicPartitions, Timeout: ca.conf.Admin.Timeout, + ValidateOnly: validateOnly, } return ca.retryOnError(isErrNoController, func() error { @@ -452,8 +474,8 @@ func (ca *clusterAdmin) CreatePartitions(topic string, count int32, assignment [ return ErrIncompleteResponse } - if topicErr.Err != ErrNoError { - if topicErr.Err == ErrNotController { + if !errors.Is(topicErr.Err, ErrNoError) { + if errors.Is(topicErr.Err, ErrNotController) { _, _ = ca.refreshController() } return topicErr @@ -491,21 +513,20 @@ func (ca *clusterAdmin) AlterPartitionReassignments(topic string, assignment [][ errs = append(errs, err) } else { if rsp.ErrorCode > 0 { - errs = append(errs, errors.New(rsp.ErrorCode.Error())) + errs = append(errs, rsp.ErrorCode) } for topic, topicErrors := range rsp.Errors { for partition, partitionError := range topicErrors { - if partitionError.errorCode != ErrNoError { - errStr := fmt.Sprintf("[%s-%d]: %s", topic, partition, partitionError.errorCode.Error()) - errs = append(errs, errors.New(errStr)) + if !errors.Is(partitionError.errorCode, ErrNoError) { + errs = append(errs, fmt.Errorf("[%s-%d]: %w", topic, partition, partitionError.errorCode)) } } } } if len(errs) > 0 { - return ErrReassignPartitions{MultiError{&errs}} + return Wrap(ErrReassignPartitions, errs...) } return nil @@ -543,52 +564,53 @@ func (ca *clusterAdmin) DeleteRecords(topic string, partitionOffsets map[int32]i if topic == "" { return ErrInvalidTopic } + errs := make([]error, 0) partitionPerBroker := make(map[*Broker][]int32) for partition := range partitionOffsets { broker, err := ca.client.Leader(topic, partition) if err != nil { - return err - } - if _, ok := partitionPerBroker[broker]; ok { - partitionPerBroker[broker] = append(partitionPerBroker[broker], partition) - } else { - partitionPerBroker[broker] = []int32{partition} + errs = append(errs, err) + continue } + partitionPerBroker[broker] = append(partitionPerBroker[broker], partition) } - errs := make([]error, 0) for broker, partitions := range partitionPerBroker { topics := make(map[string]*DeleteRecordsRequestTopic) recordsToDelete := make(map[int32]int64) for _, p := range partitions { recordsToDelete[p] = partitionOffsets[p] } - topics[topic] = &DeleteRecordsRequestTopic{PartitionOffsets: recordsToDelete} + topics[topic] = &DeleteRecordsRequestTopic{ + PartitionOffsets: recordsToDelete, + } request := &DeleteRecordsRequest{ Topics: topics, Timeout: ca.conf.Admin.Timeout, } - rsp, err := broker.DeleteRecords(request) if err != nil { errs = append(errs, err) - } else { - deleteRecordsResponseTopic, ok := rsp.Topics[topic] - if !ok { - errs = append(errs, ErrIncompleteResponse) - } else { - for _, deleteRecordsResponsePartition := range deleteRecordsResponseTopic.Partitions { - if deleteRecordsResponsePartition.Err != ErrNoError { - errs = append(errs, errors.New(deleteRecordsResponsePartition.Err.Error())) - } - } + continue + } + + deleteRecordsResponseTopic, ok := rsp.Topics[topic] + if !ok { + errs = append(errs, ErrIncompleteResponse) + continue + } + + for _, deleteRecordsResponsePartition := range deleteRecordsResponseTopic.Partitions { + if !errors.Is(deleteRecordsResponsePartition.Err, ErrNoError) { + errs = append(errs, deleteRecordsResponsePartition.Err) + continue } } } if len(errs) > 0 { - return ErrDeleteRecords{MultiError{&errs}} + return Wrap(ErrDeleteRecords, errs...) } - //todo since we are dealing with couple of partitions it would be good if we return slice of errors - //for each partition instead of one error + // todo since we are dealing with couple of partitions it would be good if we return slice of errors + // for each partition instead of one error return nil } @@ -623,7 +645,11 @@ func (ca *clusterAdmin) DescribeConfig(resource ConfigResource) ([]ConfigEntry, // DescribeConfig of broker/broker logger must be sent to the broker in question if dependsOnSpecificNode(resource) { - id, _ := strconv.Atoi(resource.Name) + var id int64 + id, err = strconv.ParseInt(resource.Name, 10, 32) + if err != nil { + return nil, err + } b, err = ca.findBroker(int32(id)) } else { b, err = ca.findAnyBroker() @@ -674,7 +700,11 @@ func (ca *clusterAdmin) AlterConfig(resourceType ConfigResourceType, name string // AlterConfig of broker/broker logger must be sent to the broker in question if dependsOnSpecificNode(ConfigResource{Name: name, Type: resourceType}) { - id, _ := strconv.Atoi(name) + var id int64 + id, err = strconv.ParseInt(name, 10, 32) + if err != nil { + return err + } b, err = ca.findBroker(int32(id)) } else { b, err = ca.findAnyBroker() @@ -702,6 +732,58 @@ func (ca *clusterAdmin) AlterConfig(resourceType ConfigResourceType, name string return nil } +func (ca *clusterAdmin) IncrementalAlterConfig(resourceType ConfigResourceType, name string, entries map[string]IncrementalAlterConfigsEntry, validateOnly bool) error { + var resources []*IncrementalAlterConfigsResource + resources = append(resources, &IncrementalAlterConfigsResource{ + Type: resourceType, + Name: name, + ConfigEntries: entries, + }) + + request := &IncrementalAlterConfigsRequest{ + Resources: resources, + ValidateOnly: validateOnly, + } + + var ( + b *Broker + err error + ) + + // AlterConfig of broker/broker logger must be sent to the broker in question + if dependsOnSpecificNode(ConfigResource{Name: name, Type: resourceType}) { + var id int64 + id, err = strconv.ParseInt(name, 10, 32) + if err != nil { + return err + } + b, err = ca.findBroker(int32(id)) + } else { + b, err = ca.findAnyBroker() + } + if err != nil { + return err + } + + _ = b.Open(ca.client.Config()) + rsp, err := b.IncrementalAlterConfigs(request) + if err != nil { + return err + } + + for _, rspResource := range rsp.Resources { + if rspResource.Name == name { + if rspResource.ErrorMsg != "" { + return errors.New(rspResource.ErrorMsg) + } + if rspResource.ErrorCode != 0 { + return KError(rspResource.ErrorCode) + } + } + } + return nil +} + func (ca *clusterAdmin) CreateACL(resource Resource, acl Acl) error { var acls []*AclCreation acls = append(acls, &AclCreation{resource, acl}) @@ -720,6 +802,28 @@ func (ca *clusterAdmin) CreateACL(resource Resource, acl Acl) error { return err } +func (ca *clusterAdmin) CreateACLs(resourceACLs []*ResourceAcls) error { + var acls []*AclCreation + for _, resourceACL := range resourceACLs { + for _, acl := range resourceACL.Acls { + acls = append(acls, &AclCreation{resourceACL.Resource, *acl}) + } + } + request := &CreateAclsRequest{AclCreations: acls} + + if ca.conf.Version.IsAtLeast(V2_0_0_0) { + request.Version = 1 + } + + b, err := ca.Controller() + if err != nil { + return err + } + + _, err = b.CreateAcls(request) + return err +} + func (ca *clusterAdmin) ListAcls(filter AclFilter) ([]ResourceAcls, error) { request := &DescribeAclsRequest{AclFilter: filter} @@ -784,9 +888,13 @@ func (ca *clusterAdmin) DescribeConsumerGroups(groups []string) (result []*Group } for broker, brokerGroups := range groupsPerBroker { - response, err := broker.DescribeGroups(&DescribeGroupsRequest{ + describeReq := &DescribeGroupsRequest{ Groups: brokerGroups, - }) + } + if ca.conf.Version.IsAtLeast(V2_3_0_0) { + describeReq.Version = 4 + } + response, err := broker.DescribeGroups(describeReq) if err != nil { return nil, err } @@ -861,6 +969,34 @@ func (ca *clusterAdmin) ListConsumerGroupOffsets(group string, topicPartitions m return coordinator.FetchOffset(request) } +func (ca *clusterAdmin) DeleteConsumerGroupOffset(group string, topic string, partition int32) error { + coordinator, err := ca.client.Coordinator(group) + if err != nil { + return err + } + + request := &DeleteOffsetsRequest{ + Group: group, + partitions: map[string][]int32{ + topic: {partition}, + }, + } + + resp, err := coordinator.DeleteOffsets(request) + if err != nil { + return err + } + + if !errors.Is(resp.ErrorCode, ErrNoError) { + return resp.ErrorCode + } + + if !errors.Is(resp.Errors[topic][partition], ErrNoError) { + return resp.Errors[topic][partition] + } + return nil +} + func (ca *clusterAdmin) DeleteConsumerGroup(group string) error { coordinator, err := ca.client.Coordinator(group) if err != nil { @@ -881,7 +1017,7 @@ func (ca *clusterAdmin) DeleteConsumerGroup(group string) error { return ErrIncompleteResponse } - if groupErr != ErrNoError { + if !errors.Is(groupErr, ErrNoError) { return groupErr } @@ -897,12 +1033,12 @@ func (ca *clusterAdmin) DescribeLogDirs(brokerIds []int32) (allLogDirs map[int32 wg := sync.WaitGroup{} for _, b := range brokerIds { - wg.Add(1) broker, err := ca.findBroker(b) if err != nil { Logger.Printf("Unable to find broker with ID = %v\n", b) continue } + wg.Add(1) go func(b *Broker, conf *Config) { defer wg.Done() _ = b.Open(conf) // Ensure that broker is opened @@ -932,3 +1068,141 @@ func (ca *clusterAdmin) DescribeLogDirs(brokerIds []int32) (allLogDirs map[int32 err = <-errChan return } + +func (ca *clusterAdmin) DescribeUserScramCredentials(users []string) ([]*DescribeUserScramCredentialsResult, error) { + req := &DescribeUserScramCredentialsRequest{} + for _, u := range users { + req.DescribeUsers = append(req.DescribeUsers, DescribeUserScramCredentialsRequestUser{ + Name: u, + }) + } + + b, err := ca.Controller() + if err != nil { + return nil, err + } + + rsp, err := b.DescribeUserScramCredentials(req) + if err != nil { + return nil, err + } + + return rsp.Results, nil +} + +func (ca *clusterAdmin) UpsertUserScramCredentials(upsert []AlterUserScramCredentialsUpsert) ([]*AlterUserScramCredentialsResult, error) { + res, err := ca.AlterUserScramCredentials(upsert, nil) + if err != nil { + return nil, err + } + + return res, nil +} + +func (ca *clusterAdmin) DeleteUserScramCredentials(delete []AlterUserScramCredentialsDelete) ([]*AlterUserScramCredentialsResult, error) { + res, err := ca.AlterUserScramCredentials(nil, delete) + if err != nil { + return nil, err + } + + return res, nil +} + +func (ca *clusterAdmin) AlterUserScramCredentials(u []AlterUserScramCredentialsUpsert, d []AlterUserScramCredentialsDelete) ([]*AlterUserScramCredentialsResult, error) { + req := &AlterUserScramCredentialsRequest{ + Deletions: d, + Upsertions: u, + } + + b, err := ca.Controller() + if err != nil { + return nil, err + } + + rsp, err := b.AlterUserScramCredentials(req) + if err != nil { + return nil, err + } + + return rsp.Results, nil +} + +// Describe All : use an empty/nil components slice + strict = false +// Contains components: strict = false +// Contains only components: strict = true +func (ca *clusterAdmin) DescribeClientQuotas(components []QuotaFilterComponent, strict bool) ([]DescribeClientQuotasEntry, error) { + request := &DescribeClientQuotasRequest{ + Components: components, + Strict: strict, + } + + b, err := ca.Controller() + if err != nil { + return nil, err + } + + rsp, err := b.DescribeClientQuotas(request) + if err != nil { + return nil, err + } + + if rsp.ErrorMsg != nil && len(*rsp.ErrorMsg) > 0 { + return nil, errors.New(*rsp.ErrorMsg) + } + if !errors.Is(rsp.ErrorCode, ErrNoError) { + return nil, rsp.ErrorCode + } + + return rsp.Entries, nil +} + +func (ca *clusterAdmin) AlterClientQuotas(entity []QuotaEntityComponent, op ClientQuotasOp, validateOnly bool) error { + entry := AlterClientQuotasEntry{ + Entity: entity, + Ops: []ClientQuotasOp{op}, + } + + request := &AlterClientQuotasRequest{ + Entries: []AlterClientQuotasEntry{entry}, + ValidateOnly: validateOnly, + } + + b, err := ca.Controller() + if err != nil { + return err + } + + rsp, err := b.AlterClientQuotas(request) + if err != nil { + return err + } + + for _, entry := range rsp.Entries { + if entry.ErrorMsg != nil && len(*entry.ErrorMsg) > 0 { + return errors.New(*entry.ErrorMsg) + } + if !errors.Is(entry.ErrorCode, ErrNoError) { + return entry.ErrorCode + } + } + + return nil +} + +func (ca *clusterAdmin) RemoveMemberFromConsumerGroup(groupId string, groupInstanceIds []string) (*LeaveGroupResponse, error) { + controller, err := ca.client.Coordinator(groupId) + if err != nil { + return nil, err + } + request := &LeaveGroupRequest{ + Version: 3, + GroupId: groupId, + } + for _, instanceId := range groupInstanceIds { + groupInstanceId := instanceId + request.Members = append(request.Members, MemberIdentity{ + GroupInstanceId: &groupInstanceId, + }) + } + return controller.LeaveGroup(request) +} diff --git a/vendor/github.com/Shopify/sarama/alter_client_quotas_request.go b/vendor/github.com/Shopify/sarama/alter_client_quotas_request.go new file mode 100644 index 000000000..f528512d0 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/alter_client_quotas_request.go @@ -0,0 +1,194 @@ +package sarama + +// AlterClientQuotas Request (Version: 0) => [entries] validate_only +// entries => [entity] [ops] +// entity => entity_type entity_name +// entity_type => STRING +// entity_name => NULLABLE_STRING +// ops => key value remove +// key => STRING +// value => FLOAT64 +// remove => BOOLEAN +// validate_only => BOOLEAN + +type AlterClientQuotasRequest struct { + Entries []AlterClientQuotasEntry // The quota configuration entries to alter. + ValidateOnly bool // Whether the alteration should be validated, but not performed. +} + +type AlterClientQuotasEntry struct { + Entity []QuotaEntityComponent // The quota entity to alter. + Ops []ClientQuotasOp // An individual quota configuration entry to alter. +} + +type ClientQuotasOp struct { + Key string // The quota configuration key. + Value float64 // The value to set, otherwise ignored if the value is to be removed. + Remove bool // Whether the quota configuration value should be removed, otherwise set. +} + +func (a *AlterClientQuotasRequest) encode(pe packetEncoder) error { + // Entries + if err := pe.putArrayLength(len(a.Entries)); err != nil { + return err + } + for _, e := range a.Entries { + if err := e.encode(pe); err != nil { + return err + } + } + + // ValidateOnly + pe.putBool(a.ValidateOnly) + + return nil +} + +func (a *AlterClientQuotasRequest) decode(pd packetDecoder, version int16) error { + // Entries + entryCount, err := pd.getArrayLength() + if err != nil { + return err + } + if entryCount > 0 { + a.Entries = make([]AlterClientQuotasEntry, entryCount) + for i := range a.Entries { + e := AlterClientQuotasEntry{} + if err = e.decode(pd, version); err != nil { + return err + } + a.Entries[i] = e + } + } else { + a.Entries = []AlterClientQuotasEntry{} + } + + // ValidateOnly + validateOnly, err := pd.getBool() + if err != nil { + return err + } + a.ValidateOnly = validateOnly + + return nil +} + +func (a *AlterClientQuotasEntry) encode(pe packetEncoder) error { + // Entity + if err := pe.putArrayLength(len(a.Entity)); err != nil { + return err + } + for _, component := range a.Entity { + if err := component.encode(pe); err != nil { + return err + } + } + + // Ops + if err := pe.putArrayLength(len(a.Ops)); err != nil { + return err + } + for _, o := range a.Ops { + if err := o.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (a *AlterClientQuotasEntry) decode(pd packetDecoder, version int16) error { + // Entity + componentCount, err := pd.getArrayLength() + if err != nil { + return err + } + if componentCount > 0 { + a.Entity = make([]QuotaEntityComponent, componentCount) + for i := 0; i < componentCount; i++ { + component := QuotaEntityComponent{} + if err := component.decode(pd, version); err != nil { + return err + } + a.Entity[i] = component + } + } else { + a.Entity = []QuotaEntityComponent{} + } + + // Ops + opCount, err := pd.getArrayLength() + if err != nil { + return err + } + if opCount > 0 { + a.Ops = make([]ClientQuotasOp, opCount) + for i := range a.Ops { + c := ClientQuotasOp{} + if err = c.decode(pd, version); err != nil { + return err + } + a.Ops[i] = c + } + } else { + a.Ops = []ClientQuotasOp{} + } + + return nil +} + +func (c *ClientQuotasOp) encode(pe packetEncoder) error { + // Key + if err := pe.putString(c.Key); err != nil { + return err + } + + // Value + pe.putFloat64(c.Value) + + // Remove + pe.putBool(c.Remove) + + return nil +} + +func (c *ClientQuotasOp) decode(pd packetDecoder, version int16) error { + // Key + key, err := pd.getString() + if err != nil { + return err + } + c.Key = key + + // Value + value, err := pd.getFloat64() + if err != nil { + return err + } + c.Value = value + + // Remove + remove, err := pd.getBool() + if err != nil { + return err + } + c.Remove = remove + + return nil +} + +func (a *AlterClientQuotasRequest) key() int16 { + return 49 +} + +func (a *AlterClientQuotasRequest) version() int16 { + return 0 +} + +func (a *AlterClientQuotasRequest) headerVersion() int16 { + return 1 +} + +func (a *AlterClientQuotasRequest) requiredVersion() KafkaVersion { + return V2_6_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/alter_client_quotas_response.go b/vendor/github.com/Shopify/sarama/alter_client_quotas_response.go new file mode 100644 index 000000000..ccd27d5f5 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/alter_client_quotas_response.go @@ -0,0 +1,145 @@ +package sarama + +import ( + "time" +) + +// AlterClientQuotas Response (Version: 0) => throttle_time_ms [entries] +// throttle_time_ms => INT32 +// entries => error_code error_message [entity] +// error_code => INT16 +// error_message => NULLABLE_STRING +// entity => entity_type entity_name +// entity_type => STRING +// entity_name => NULLABLE_STRING + +type AlterClientQuotasResponse struct { + ThrottleTime time.Duration // The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota. + Entries []AlterClientQuotasEntryResponse // The quota configuration entries altered. +} + +type AlterClientQuotasEntryResponse struct { + ErrorCode KError // The error code, or `0` if the quota alteration succeeded. + ErrorMsg *string // The error message, or `null` if the quota alteration succeeded. + Entity []QuotaEntityComponent // The quota entity altered. +} + +func (a *AlterClientQuotasResponse) encode(pe packetEncoder) error { + // ThrottleTime + pe.putInt32(int32(a.ThrottleTime / time.Millisecond)) + + // Entries + if err := pe.putArrayLength(len(a.Entries)); err != nil { + return err + } + for _, e := range a.Entries { + if err := e.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (a *AlterClientQuotasResponse) decode(pd packetDecoder, version int16) error { + // ThrottleTime + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + // Entries + entryCount, err := pd.getArrayLength() + if err != nil { + return err + } + if entryCount > 0 { + a.Entries = make([]AlterClientQuotasEntryResponse, entryCount) + for i := range a.Entries { + e := AlterClientQuotasEntryResponse{} + if err = e.decode(pd, version); err != nil { + return err + } + a.Entries[i] = e + } + } else { + a.Entries = []AlterClientQuotasEntryResponse{} + } + + return nil +} + +func (a *AlterClientQuotasEntryResponse) encode(pe packetEncoder) error { + // ErrorCode + pe.putInt16(int16(a.ErrorCode)) + + // ErrorMsg + if err := pe.putNullableString(a.ErrorMsg); err != nil { + return err + } + + // Entity + if err := pe.putArrayLength(len(a.Entity)); err != nil { + return err + } + for _, component := range a.Entity { + if err := component.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (a *AlterClientQuotasEntryResponse) decode(pd packetDecoder, version int16) error { + // ErrorCode + errCode, err := pd.getInt16() + if err != nil { + return err + } + a.ErrorCode = KError(errCode) + + // ErrorMsg + errMsg, err := pd.getNullableString() + if err != nil { + return err + } + a.ErrorMsg = errMsg + + // Entity + componentCount, err := pd.getArrayLength() + if err != nil { + return err + } + if componentCount > 0 { + a.Entity = make([]QuotaEntityComponent, componentCount) + for i := 0; i < componentCount; i++ { + component := QuotaEntityComponent{} + if err := component.decode(pd, version); err != nil { + return err + } + a.Entity[i] = component + } + } else { + a.Entity = []QuotaEntityComponent{} + } + + return nil +} + +func (a *AlterClientQuotasResponse) key() int16 { + return 49 +} + +func (a *AlterClientQuotasResponse) version() int16 { + return 0 +} + +func (a *AlterClientQuotasResponse) headerVersion() int16 { + return 0 +} + +func (a *AlterClientQuotasResponse) requiredVersion() KafkaVersion { + return V2_6_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/alter_configs_request.go b/vendor/github.com/Shopify/sarama/alter_configs_request.go index c88bb604a..8b94b1f3f 100644 --- a/vendor/github.com/Shopify/sarama/alter_configs_request.go +++ b/vendor/github.com/Shopify/sarama/alter_configs_request.go @@ -1,12 +1,12 @@ package sarama -//AlterConfigsRequest is an alter config request type +// AlterConfigsRequest is an alter config request type type AlterConfigsRequest struct { Resources []*AlterConfigsResource ValidateOnly bool } -//AlterConfigsResource is an alter config resource type +// AlterConfigsResource is an alter config resource type type AlterConfigsResource struct { Type ConfigResourceType Name string diff --git a/vendor/github.com/Shopify/sarama/alter_configs_response.go b/vendor/github.com/Shopify/sarama/alter_configs_response.go index 3266f9274..84cd86c72 100644 --- a/vendor/github.com/Shopify/sarama/alter_configs_response.go +++ b/vendor/github.com/Shopify/sarama/alter_configs_response.go @@ -2,13 +2,13 @@ package sarama import "time" -//AlterConfigsResponse is a response type for alter config +// AlterConfigsResponse is a response type for alter config type AlterConfigsResponse struct { ThrottleTime time.Duration Resources []*AlterConfigsResourceResponse } -//AlterConfigsResourceResponse is a response type for alter config resource +// AlterConfigsResourceResponse is a response type for alter config resource type AlterConfigsResourceResponse struct { ErrorCode int16 ErrorMsg string @@ -23,16 +23,9 @@ func (a *AlterConfigsResponse) encode(pe packetEncoder) error { return err } - for i := range a.Resources { - pe.putInt16(a.Resources[i].ErrorCode) - err := pe.putString(a.Resources[i].ErrorMsg) - if err != nil { - return nil - } - pe.putInt8(int8(a.Resources[i].Type)) - err = pe.putString(a.Resources[i].Name) - if err != nil { - return nil + for _, v := range a.Resources { + if err := v.encode(pe); err != nil { + return err } } @@ -56,30 +49,52 @@ func (a *AlterConfigsResponse) decode(pd packetDecoder, version int16) error { for i := range a.Resources { a.Resources[i] = new(AlterConfigsResourceResponse) - errCode, err := pd.getInt16() - if err != nil { + if err := a.Resources[i].decode(pd, version); err != nil { return err } - a.Resources[i].ErrorCode = errCode + } - e, err := pd.getString() - if err != nil { - return err - } - a.Resources[i].ErrorMsg = e + return nil +} - t, err := pd.getInt8() - if err != nil { - return err - } - a.Resources[i].Type = ConfigResourceType(t) +func (a *AlterConfigsResourceResponse) encode(pe packetEncoder) error { + pe.putInt16(a.ErrorCode) + err := pe.putString(a.ErrorMsg) + if err != nil { + return err + } + pe.putInt8(int8(a.Type)) + err = pe.putString(a.Name) + if err != nil { + return err + } + return nil +} - name, err := pd.getString() - if err != nil { - return err - } - a.Resources[i].Name = name +func (a *AlterConfigsResourceResponse) decode(pd packetDecoder, version int16) error { + errCode, err := pd.getInt16() + if err != nil { + return err + } + a.ErrorCode = errCode + + e, err := pd.getString() + if err != nil { + return err + } + a.ErrorMsg = e + + t, err := pd.getInt8() + if err != nil { + return err + } + a.Type = ConfigResourceType(t) + + name, err := pd.getString() + if err != nil { + return err } + a.Name = name return nil } diff --git a/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_request.go b/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_request.go new file mode 100644 index 000000000..0530d8946 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_request.go @@ -0,0 +1,142 @@ +package sarama + +type AlterUserScramCredentialsRequest struct { + Version int16 + + // Deletions represent list of SCRAM credentials to remove + Deletions []AlterUserScramCredentialsDelete + + // Upsertions represent list of SCRAM credentials to update/insert + Upsertions []AlterUserScramCredentialsUpsert +} + +type AlterUserScramCredentialsDelete struct { + Name string + Mechanism ScramMechanismType +} + +type AlterUserScramCredentialsUpsert struct { + Name string + Mechanism ScramMechanismType + Iterations int32 + Salt []byte + saltedPassword []byte + + // This field is never transmitted over the wire + // @see: https://tools.ietf.org/html/rfc5802 + Password []byte +} + +func (r *AlterUserScramCredentialsRequest) encode(pe packetEncoder) error { + pe.putCompactArrayLength(len(r.Deletions)) + for _, d := range r.Deletions { + if err := pe.putCompactString(d.Name); err != nil { + return err + } + pe.putInt8(int8(d.Mechanism)) + pe.putEmptyTaggedFieldArray() + } + + pe.putCompactArrayLength(len(r.Upsertions)) + for _, u := range r.Upsertions { + if err := pe.putCompactString(u.Name); err != nil { + return err + } + pe.putInt8(int8(u.Mechanism)) + pe.putInt32(u.Iterations) + + if err := pe.putCompactBytes(u.Salt); err != nil { + return err + } + + // do not transmit the password over the wire + formatter := scramFormatter{mechanism: u.Mechanism} + salted, err := formatter.saltedPassword(u.Password, u.Salt, int(u.Iterations)) + if err != nil { + return err + } + + if err := pe.putCompactBytes(salted); err != nil { + return err + } + pe.putEmptyTaggedFieldArray() + } + + pe.putEmptyTaggedFieldArray() + return nil +} + +func (r *AlterUserScramCredentialsRequest) decode(pd packetDecoder, version int16) error { + numDeletions, err := pd.getCompactArrayLength() + if err != nil { + return err + } + + r.Deletions = make([]AlterUserScramCredentialsDelete, numDeletions) + for i := 0; i < numDeletions; i++ { + r.Deletions[i] = AlterUserScramCredentialsDelete{} + if r.Deletions[i].Name, err = pd.getCompactString(); err != nil { + return err + } + mechanism, err := pd.getInt8() + if err != nil { + return err + } + r.Deletions[i].Mechanism = ScramMechanismType(mechanism) + if _, err = pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + + numUpsertions, err := pd.getCompactArrayLength() + if err != nil { + return err + } + + r.Upsertions = make([]AlterUserScramCredentialsUpsert, numUpsertions) + for i := 0; i < numUpsertions; i++ { + r.Upsertions[i] = AlterUserScramCredentialsUpsert{} + if r.Upsertions[i].Name, err = pd.getCompactString(); err != nil { + return err + } + mechanism, err := pd.getInt8() + if err != nil { + return err + } + + r.Upsertions[i].Mechanism = ScramMechanismType(mechanism) + if r.Upsertions[i].Iterations, err = pd.getInt32(); err != nil { + return err + } + if r.Upsertions[i].Salt, err = pd.getCompactBytes(); err != nil { + return err + } + if r.Upsertions[i].saltedPassword, err = pd.getCompactBytes(); err != nil { + return err + } + if _, err = pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + + if _, err = pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + return nil +} + +func (r *AlterUserScramCredentialsRequest) key() int16 { + return 51 +} + +func (r *AlterUserScramCredentialsRequest) version() int16 { + return r.Version +} + +func (r *AlterUserScramCredentialsRequest) headerVersion() int16 { + return 2 +} + +func (r *AlterUserScramCredentialsRequest) requiredVersion() KafkaVersion { + return V2_7_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_response.go b/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_response.go new file mode 100644 index 000000000..31e167b5e --- /dev/null +++ b/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_response.go @@ -0,0 +1,94 @@ +package sarama + +import "time" + +type AlterUserScramCredentialsResponse struct { + Version int16 + + ThrottleTime time.Duration + + Results []*AlterUserScramCredentialsResult +} + +type AlterUserScramCredentialsResult struct { + User string + + ErrorCode KError + ErrorMessage *string +} + +func (r *AlterUserScramCredentialsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) + pe.putCompactArrayLength(len(r.Results)) + + for _, u := range r.Results { + if err := pe.putCompactString(u.User); err != nil { + return err + } + pe.putInt16(int16(u.ErrorCode)) + if err := pe.putNullableCompactString(u.ErrorMessage); err != nil { + return err + } + pe.putEmptyTaggedFieldArray() + } + + pe.putEmptyTaggedFieldArray() + return nil +} + +func (r *AlterUserScramCredentialsResponse) decode(pd packetDecoder, version int16) error { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + numResults, err := pd.getCompactArrayLength() + if err != nil { + return err + } + + if numResults > 0 { + r.Results = make([]*AlterUserScramCredentialsResult, numResults) + for i := 0; i < numResults; i++ { + r.Results[i] = &AlterUserScramCredentialsResult{} + if r.Results[i].User, err = pd.getCompactString(); err != nil { + return err + } + + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.Results[i].ErrorCode = KError(kerr) + if r.Results[i].ErrorMessage, err = pd.getCompactNullableString(); err != nil { + return err + } + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + } + + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + return nil +} + +func (r *AlterUserScramCredentialsResponse) key() int16 { + return 51 +} + +func (r *AlterUserScramCredentialsResponse) version() int16 { + return r.Version +} + +func (r *AlterUserScramCredentialsResponse) headerVersion() int16 { + return 2 +} + +func (r *AlterUserScramCredentialsResponse) requiredVersion() KafkaVersion { + return V2_7_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/api_versions_request.go b/vendor/github.com/Shopify/sarama/api_versions_request.go index d67c5e1e5..e5b3baf64 100644 --- a/vendor/github.com/Shopify/sarama/api_versions_request.go +++ b/vendor/github.com/Shopify/sarama/api_versions_request.go @@ -1,29 +1,69 @@ package sarama -//ApiVersionsRequest ... +const defaultClientSoftwareName = "sarama" + type ApiVersionsRequest struct { + // Version defines the protocol version to use for encode and decode + Version int16 + // ClientSoftwareName contains the name of the client. + ClientSoftwareName string + // ClientSoftwareVersion contains the version of the client. + ClientSoftwareVersion string } -func (a *ApiVersionsRequest) encode(pe packetEncoder) error { +func (r *ApiVersionsRequest) encode(pe packetEncoder) (err error) { + if r.Version >= 3 { + if err := pe.putCompactString(r.ClientSoftwareName); err != nil { + return err + } + if err := pe.putCompactString(r.ClientSoftwareVersion); err != nil { + return err + } + pe.putEmptyTaggedFieldArray() + } + return nil } -func (a *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) { +func (r *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + if r.Version >= 3 { + if r.ClientSoftwareName, err = pd.getCompactString(); err != nil { + return err + } + if r.ClientSoftwareVersion, err = pd.getCompactString(); err != nil { + return err + } + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + return nil } -func (a *ApiVersionsRequest) key() int16 { +func (r *ApiVersionsRequest) key() int16 { return 18 } -func (a *ApiVersionsRequest) version() int16 { - return 0 +func (r *ApiVersionsRequest) version() int16 { + return r.Version } -func (a *ApiVersionsRequest) headerVersion() int16 { +func (r *ApiVersionsRequest) headerVersion() int16 { + if r.Version >= 3 { + return 2 + } return 1 } -func (a *ApiVersionsRequest) requiredVersion() KafkaVersion { - return V0_10_0_0 +func (r *ApiVersionsRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 0: + return V0_10_0_0 + case 3: + return V2_4_0_0 + default: + return V0_10_0_0 + } } diff --git a/vendor/github.com/Shopify/sarama/api_versions_response.go b/vendor/github.com/Shopify/sarama/api_versions_response.go index d09e8d9e1..ade911c59 100644 --- a/vendor/github.com/Shopify/sarama/api_versions_response.go +++ b/vendor/github.com/Shopify/sarama/api_versions_response.go @@ -1,76 +1,130 @@ package sarama -//ApiVersionsResponseBlock is an api version response block type -type ApiVersionsResponseBlock struct { - ApiKey int16 +// ApiVersionsResponseKey contains the APIs supported by the broker. +type ApiVersionsResponseKey struct { + // Version defines the protocol version to use for encode and decode + Version int16 + // ApiKey contains the API index. + ApiKey int16 + // MinVersion contains the minimum supported version, inclusive. MinVersion int16 + // MaxVersion contains the maximum supported version, inclusive. MaxVersion int16 } -func (b *ApiVersionsResponseBlock) encode(pe packetEncoder) error { - pe.putInt16(b.ApiKey) - pe.putInt16(b.MinVersion) - pe.putInt16(b.MaxVersion) +func (a *ApiVersionsResponseKey) encode(pe packetEncoder, version int16) (err error) { + a.Version = version + pe.putInt16(a.ApiKey) + + pe.putInt16(a.MinVersion) + + pe.putInt16(a.MaxVersion) + + if version >= 3 { + pe.putEmptyTaggedFieldArray() + } + return nil } -func (b *ApiVersionsResponseBlock) decode(pd packetDecoder) error { - var err error - - if b.ApiKey, err = pd.getInt16(); err != nil { +func (a *ApiVersionsResponseKey) decode(pd packetDecoder, version int16) (err error) { + a.Version = version + if a.ApiKey, err = pd.getInt16(); err != nil { return err } - if b.MinVersion, err = pd.getInt16(); err != nil { + if a.MinVersion, err = pd.getInt16(); err != nil { return err } - if b.MaxVersion, err = pd.getInt16(); err != nil { + if a.MaxVersion, err = pd.getInt16(); err != nil { return err } + if version >= 3 { + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + return nil } -//ApiVersionsResponse is an api version response type type ApiVersionsResponse struct { - Err KError - ApiVersions []*ApiVersionsResponseBlock + // Version defines the protocol version to use for encode and decode + Version int16 + // ErrorCode contains the top-level error code. + ErrorCode int16 + // ApiKeys contains the APIs supported by the broker. + ApiKeys []ApiVersionsResponseKey + // ThrottleTimeMs contains the duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota. + ThrottleTimeMs int32 } -func (r *ApiVersionsResponse) encode(pe packetEncoder) error { - pe.putInt16(int16(r.Err)) - if err := pe.putArrayLength(len(r.ApiVersions)); err != nil { - return err +func (r *ApiVersionsResponse) encode(pe packetEncoder) (err error) { + pe.putInt16(r.ErrorCode) + + if r.Version >= 3 { + pe.putCompactArrayLength(len(r.ApiKeys)) + } else { + if err := pe.putArrayLength(len(r.ApiKeys)); err != nil { + return err + } } - for _, apiVersion := range r.ApiVersions { - if err := apiVersion.encode(pe); err != nil { + for _, block := range r.ApiKeys { + if err := block.encode(pe, r.Version); err != nil { return err } } + + if r.Version >= 1 { + pe.putInt32(r.ThrottleTimeMs) + } + + if r.Version >= 3 { + pe.putEmptyTaggedFieldArray() + } + return nil } -func (r *ApiVersionsResponse) decode(pd packetDecoder, version int16) error { - kerr, err := pd.getInt16() - if err != nil { +func (r *ApiVersionsResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + if r.ErrorCode, err = pd.getInt16(); err != nil { return err } - r.Err = KError(kerr) + var numApiKeys int + if r.Version >= 3 { + numApiKeys, err = pd.getCompactArrayLength() + if err != nil { + return err + } + } else { + numApiKeys, err = pd.getArrayLength() + if err != nil { + return err + } + } + r.ApiKeys = make([]ApiVersionsResponseKey, numApiKeys) + for i := 0; i < numApiKeys; i++ { + var block ApiVersionsResponseKey + if err = block.decode(pd, r.Version); err != nil { + return err + } + r.ApiKeys[i] = block + } - numBlocks, err := pd.getArrayLength() - if err != nil { - return err + if r.Version >= 1 { + if r.ThrottleTimeMs, err = pd.getInt32(); err != nil { + return err + } } - r.ApiVersions = make([]*ApiVersionsResponseBlock, numBlocks) - for i := 0; i < numBlocks; i++ { - block := new(ApiVersionsResponseBlock) - if err := block.decode(pd); err != nil { + if r.Version >= 3 { + if _, err = pd.getEmptyTaggedFieldArray(); err != nil { return err } - r.ApiVersions[i] = block } return nil @@ -81,13 +135,22 @@ func (r *ApiVersionsResponse) key() int16 { } func (r *ApiVersionsResponse) version() int16 { - return 0 + return r.Version } -func (a *ApiVersionsResponse) headerVersion() int16 { +func (r *ApiVersionsResponse) headerVersion() int16 { + // ApiVersionsResponse always includes a v0 header. + // See KIP-511 for details return 0 } func (r *ApiVersionsResponse) requiredVersion() KafkaVersion { - return V0_10_0_0 + switch r.Version { + case 0: + return V0_10_0_0 + case 3: + return V2_4_0_0 + default: + return V0_10_0_0 + } } diff --git a/vendor/github.com/Shopify/sarama/async_producer.go b/vendor/github.com/Shopify/sarama/async_producer.go index f1ffc8f92..50f226f8e 100644 --- a/vendor/github.com/Shopify/sarama/async_producer.go +++ b/vendor/github.com/Shopify/sarama/async_producer.go @@ -2,20 +2,23 @@ package sarama import ( "encoding/binary" + "errors" "fmt" + "math" "sync" "time" "github.com/eapache/go-resiliency/breaker" "github.com/eapache/queue" + "github.com/rcrowley/go-metrics" ) // AsyncProducer publishes Kafka messages using a non-blocking API. It routes messages // to the correct broker for the provided topic-partition, refreshing metadata as appropriate, // and parses responses for errors. You must read from the Errors() channel or the // producer will deadlock. You must call Close() or AsyncClose() on a producer to avoid -// leaks: it will not be garbage-collected automatically when it passes out of -// scope. +// leaks and message lost: it will not be garbage-collected automatically when it passes +// out of scope and buffered messages may not be flushed. type AsyncProducer interface { // AsyncClose triggers a shutdown of the producer. The shutdown has completed @@ -26,7 +29,8 @@ type AsyncProducer interface { // Close shuts down the producer and waits for any buffered messages to be // flushed. You must call this function before a producer object passes out of - // scope, as it may otherwise leak memory. You must call this before calling + // scope, as it may otherwise leak memory. You must call this before process + // shutting down, or you may lose messages. You must call this before calling // Close on the underlying client. Close() error @@ -45,65 +49,27 @@ type AsyncProducer interface { // you can set Producer.Return.Errors in your config to false, which prevents // errors to be returned. Errors() <-chan *ProducerError -} -// transactionManager keeps the state necessary to ensure idempotent production -type transactionManager struct { - producerID int64 - producerEpoch int16 - sequenceNumbers map[string]int32 - mutex sync.Mutex -} + // IsTransactional return true when current producer is is transactional. + IsTransactional() bool -const ( - noProducerID = -1 - noProducerEpoch = -1 -) + // TxnStatus return current producer transaction status. + TxnStatus() ProducerTxnStatusFlag -func (t *transactionManager) getAndIncrementSequenceNumber(topic string, partition int32) (int32, int16) { - key := fmt.Sprintf("%s-%d", topic, partition) - t.mutex.Lock() - defer t.mutex.Unlock() - sequence := t.sequenceNumbers[key] - t.sequenceNumbers[key] = sequence + 1 - return sequence, t.producerEpoch -} + // BeginTxn mark current transaction as ready. + BeginTxn() error -func (t *transactionManager) bumpEpoch() { - t.mutex.Lock() - defer t.mutex.Unlock() - t.producerEpoch++ - for k := range t.sequenceNumbers { - t.sequenceNumbers[k] = 0 - } -} + // CommitTxn commit current transaction. + CommitTxn() error -func (t *transactionManager) getProducerID() (int64, int16) { - t.mutex.Lock() - defer t.mutex.Unlock() - return t.producerID, t.producerEpoch -} + // AbortTxn abort current transaction. + AbortTxn() error -func newTransactionManager(conf *Config, client Client) (*transactionManager, error) { - txnmgr := &transactionManager{ - producerID: noProducerID, - producerEpoch: noProducerEpoch, - } + // AddOffsetsToTxn add associated offsets to current transaction. + AddOffsetsToTxn(offsets map[string][]*PartitionOffsetMetadata, groupId string) error - if conf.Producer.Idempotent { - initProducerIDResponse, err := client.InitProducerID() - if err != nil { - return nil, err - } - txnmgr.producerID = initProducerIDResponse.ProducerID - txnmgr.producerEpoch = initProducerIDResponse.ProducerEpoch - txnmgr.sequenceNumbers = make(map[string]int32) - txnmgr.mutex = sync.Mutex{} - - Logger.Printf("Obtained a ProducerId: %d and ProducerEpoch: %d\n", txnmgr.producerID, txnmgr.producerEpoch) - } - - return txnmgr, nil + // AddMessageToTxn add message offsets to current transaction. + AddMessageToTxn(msg *ConsumerMessage, groupId string, metadata *string) error } type asyncProducer struct { @@ -119,6 +85,9 @@ type asyncProducer struct { brokerLock sync.Mutex txnmgr *transactionManager + txLock sync.Mutex + + metricsRegistry metrics.Registry } // NewAsyncProducer creates a new AsyncProducer using the given broker addresses and configuration. @@ -151,15 +120,16 @@ func newAsyncProducer(client Client) (AsyncProducer, error) { } p := &asyncProducer{ - client: client, - conf: client.Config(), - errors: make(chan *ProducerError), - input: make(chan *ProducerMessage), - successes: make(chan *ProducerMessage), - retries: make(chan *ProducerMessage), - brokers: make(map[*Broker]*brokerProducer), - brokerRefs: make(map[*brokerProducer]int), - txnmgr: txnmgr, + client: client, + conf: client.Config(), + errors: make(chan *ProducerError), + input: make(chan *ProducerMessage), + successes: make(chan *ProducerMessage), + retries: make(chan *ProducerMessage), + brokers: make(map[*Broker]*brokerProducer), + brokerRefs: make(map[*brokerProducer]int), + txnmgr: txnmgr, + metricsRegistry: newCleanupRegistry(client.Config().MetricRegistry), } // launch our singleton dispatchers @@ -172,9 +142,12 @@ func newAsyncProducer(client Client) (AsyncProducer, error) { type flagSet int8 const ( - syn flagSet = 1 << iota // first message from partitionProducer to brokerProducer - fin // final message from partitionProducer to brokerProducer and back - shutdown // start the shutdown process + syn flagSet = 1 << iota // first message from partitionProducer to brokerProducer + fin // final message from partitionProducer to brokerProducer and back + shutdown // start the shutdown process + endtxn // endtxn + committxn // endtxn + aborttxn // endtxn ) // ProducerMessage is the collection of elements passed to the Producer in order to send a message. @@ -206,7 +179,7 @@ type ProducerMessage struct { // Partition is the partition that the message was sent to. This is only // guaranteed to be defined if the message was successfully delivered. Partition int32 - // Timestamp can vary in behaviour depending on broker configuration, being + // Timestamp can vary in behavior depending on broker configuration, being // in either one of the CreateTime or LogAppendTime modes (default CreateTime), // and requiring version at least 0.10.0. // @@ -229,7 +202,7 @@ type ProducerMessage struct { const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc. -func (m *ProducerMessage) byteSize(version int) int { +func (m *ProducerMessage) ByteSize(version int) int { var size int if version >= 2 { size = maximumRecordOverhead @@ -267,6 +240,10 @@ func (pe ProducerError) Error() string { return fmt.Sprintf("kafka: Failed to produce message to topic %s: %s", pe.Msg.Topic, pe.Err) } +func (pe ProducerError) Unwrap() error { + return pe.Err +} + // ProducerErrors is a type that wraps a batch of "ProducerError"s and implements the Error interface. // It can be returned from the Producer's Close method to avoid the need to manually drain the Errors channel // when closing a producer. @@ -276,6 +253,97 @@ func (pe ProducerErrors) Error() string { return fmt.Sprintf("kafka: Failed to deliver %d messages.", len(pe)) } +func (p *asyncProducer) IsTransactional() bool { + return p.txnmgr.isTransactional() +} + +func (p *asyncProducer) AddMessageToTxn(msg *ConsumerMessage, groupId string, metadata *string) error { + offsets := make(map[string][]*PartitionOffsetMetadata) + offsets[msg.Topic] = []*PartitionOffsetMetadata{ + { + Partition: msg.Partition, + Offset: msg.Offset + 1, + Metadata: metadata, + }, + } + return p.AddOffsetsToTxn(offsets, groupId) +} + +func (p *asyncProducer) AddOffsetsToTxn(offsets map[string][]*PartitionOffsetMetadata, groupId string) error { + p.txLock.Lock() + defer p.txLock.Unlock() + + if !p.IsTransactional() { + DebugLogger.Printf("producer/txnmgr [%s] attempt to call AddOffsetsToTxn on a non-transactional producer\n", p.txnmgr.transactionalID) + return ErrNonTransactedProducer + } + + DebugLogger.Printf("producer/txnmgr [%s] add offsets to transaction\n", p.txnmgr.transactionalID) + return p.txnmgr.addOffsetsToTxn(offsets, groupId) +} + +func (p *asyncProducer) TxnStatus() ProducerTxnStatusFlag { + return p.txnmgr.currentTxnStatus() +} + +func (p *asyncProducer) BeginTxn() error { + p.txLock.Lock() + defer p.txLock.Unlock() + + if !p.IsTransactional() { + DebugLogger.Println("producer/txnmgr attempt to call BeginTxn on a non-transactional producer") + return ErrNonTransactedProducer + } + + return p.txnmgr.transitionTo(ProducerTxnFlagInTransaction, nil) +} + +func (p *asyncProducer) CommitTxn() error { + p.txLock.Lock() + defer p.txLock.Unlock() + + if !p.IsTransactional() { + DebugLogger.Printf("producer/txnmgr [%s] attempt to call CommitTxn on a non-transactional producer\n", p.txnmgr.transactionalID) + return ErrNonTransactedProducer + } + + DebugLogger.Printf("producer/txnmgr [%s] committing transaction\n", p.txnmgr.transactionalID) + err := p.finishTransaction(true) + if err != nil { + return err + } + DebugLogger.Printf("producer/txnmgr [%s] transaction committed\n", p.txnmgr.transactionalID) + return nil +} + +func (p *asyncProducer) AbortTxn() error { + p.txLock.Lock() + defer p.txLock.Unlock() + + if !p.IsTransactional() { + DebugLogger.Printf("producer/txnmgr [%s] attempt to call AbortTxn on a non-transactional producer\n", p.txnmgr.transactionalID) + return ErrNonTransactedProducer + } + DebugLogger.Printf("producer/txnmgr [%s] aborting transaction\n", p.txnmgr.transactionalID) + err := p.finishTransaction(false) + if err != nil { + return err + } + DebugLogger.Printf("producer/txnmgr [%s] transaction aborted\n", p.txnmgr.transactionalID) + return nil +} + +func (p *asyncProducer) finishTransaction(commit bool) error { + p.inFlight.Add(1) + if commit { + p.input <- &ProducerMessage{flags: endtxn | committxn} + } else { + p.input <- &ProducerMessage{flags: endtxn | aborttxn} + } + p.inFlight.Wait() + return p.txnmgr.finishTransaction(commit) +} + func (p *asyncProducer) Errors() <-chan *ProducerError { return p.errors } @@ -329,11 +397,27 @@ func (p *asyncProducer) dispatcher() { continue } + if msg.flags&endtxn != 0 { + var err error + if msg.flags&committxn != 0 { + err = p.txnmgr.transitionTo(ProducerTxnFlagEndTransaction|ProducerTxnFlagCommittingTransaction, nil) + } else { + err = p.txnmgr.transitionTo(ProducerTxnFlagEndTransaction|ProducerTxnFlagAbortingTransaction, nil) + } + if err != nil { + Logger.Printf("producer/txnmgr unable to end transaction %s", err) + } + p.inFlight.Done() + continue + } + if msg.flags&shutdown != 0 { shuttingDown = true p.inFlight.Done() continue - } else if msg.retries == 0 { + } + + if msg.retries == 0 { if shuttingDown { // we can't just call returnError here because that decrements the wait group, // which hasn't been incremented yet for this message, and shouldn't be @@ -346,6 +430,13 @@ func (p *asyncProducer) dispatcher() { continue } p.inFlight.Add(1) + // Ignore retried msg, there are already in txn. + // Can't produce new record when transaction is not started. + if p.IsTransactional() && p.txnmgr.currentTxnStatus()&ProducerTxnFlagInTransaction == 0 { + Logger.Printf("attempt to send message when transaction is not started or is in ending state, got %d, expect %d\n", p.txnmgr.currentTxnStatus(), ProducerTxnFlagInTransaction) + p.returnError(msg, ErrTransactionNotReady) + continue + } } for _, interceptor := range p.conf.Producer.Interceptors { @@ -359,7 +450,7 @@ func (p *asyncProducer) dispatcher() { p.returnError(msg, ConfigurationError("Producing headers requires Kafka at least v0.11")) continue } - if msg.byteSize(version) > p.conf.Producer.MaxMessageBytes { + if msg.ByteSize(version) > p.conf.Producer.MaxMessageBytes { p.returnError(msg, ErrMessageSizeTooLarge) continue } @@ -445,7 +536,6 @@ func (tp *topicProducer) partitionMessage(msg *ProducerMessage) error { } return }) - if err != nil { return err } @@ -523,6 +613,18 @@ func (pp *partitionProducer) backoff(retries int) { } } +func (pp *partitionProducer) updateLeaderIfBrokerProducerIsNil(msg *ProducerMessage) error { + if pp.brokerProducer == nil { + if err := pp.updateLeader(); err != nil { + pp.parent.returnError(msg, err) + pp.backoff(msg.retries) + return err + } + Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID()) + } + return nil +} + func (pp *partitionProducer) dispatch() { // try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader` // on the first message @@ -554,6 +656,9 @@ func (pp *partitionProducer) dispatch() { } if msg.retries > pp.highWatermark { + if err := pp.updateLeaderIfBrokerProducerIsNil(msg); err != nil { + continue + } // a new, higher, retry level; handle it and then back off pp.newHighWatermark(msg.retries) pp.backoff(msg.retries) @@ -580,14 +685,8 @@ func (pp *partitionProducer) dispatch() { // if we made it this far then the current msg contains real data, and can be sent to the next goroutine // without breaking any of our ordering guarantees - - if pp.brokerProducer == nil { - if err := pp.updateLeader(); err != nil { - pp.parent.returnError(msg, err) - pp.backoff(msg.retries) - continue - } - Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID()) + if err := pp.updateLeaderIfBrokerProducerIsNil(msg); err != nil { + continue } // Now that we know we have a broker to actually try and send this message to, generate the sequence @@ -599,6 +698,10 @@ func (pp *partitionProducer) dispatch() { msg.hasSequence = true } + if pp.parent.IsTransactional() { + pp.parent.txnmgr.maybeAddPartitionToCurrentTxn(pp.topic, pp.partition) + } + pp.brokerProducer.input <- msg } } @@ -671,6 +774,7 @@ func (p *asyncProducer) newBrokerProducer(broker *Broker) *brokerProducer { var ( input = make(chan *ProducerMessage) bridge = make(chan *produceSet) + pending = make(chan *brokerProducerResponse) responses = make(chan *brokerProducerResponse) ) @@ -680,7 +784,6 @@ func (p *asyncProducer) newBrokerProducer(broker *Broker) *brokerProducer { input: input, output: bridge, responses: responses, - stopchan: make(chan struct{}), buffer: newProduceSet(p), currentRetries: make(map[string]map[int32]error), } @@ -688,18 +791,88 @@ func (p *asyncProducer) newBrokerProducer(broker *Broker) *brokerProducer { // minimal bridge to make the network response `select`able go withRecover(func() { + // Use a wait group to know if we still have in flight requests + var wg sync.WaitGroup + for set := range bridge { request := set.buildRequest() - response, err := broker.Produce(request) + // Count the in flight requests to know when we can close the pending channel safely + wg.Add(1) + // Capture the current set to forward in the callback + sendResponse := func(set *produceSet) ProduceCallback { + return func(response *ProduceResponse, err error) { + // Forward the response to make sure we do not block the responseReceiver + pending <- &brokerProducerResponse{ + set: set, + err: err, + res: response, + } + wg.Done() + } + }(set) + + if p.IsTransactional() { + // Add partition to tx before sending current batch + err := p.txnmgr.publishTxnPartitions() + if err != nil { + // Request failed to be sent + sendResponse(nil, err) + continue + } + } - responses <- &brokerProducerResponse{ - set: set, - err: err, - res: response, + // Use AsyncProduce vs Produce to not block waiting for the response + // so that we can pipeline multiple produce requests and achieve higher throughput, see: + // https://kafka.apache.org/protocol#protocol_network + err := broker.AsyncProduce(request, sendResponse) + if err != nil { + // Request failed to be sent + sendResponse(nil, err) + continue + } + // Callback is not called when using NoResponse + if p.conf.Producer.RequiredAcks == NoResponse { + // Provide the expected nil response + sendResponse(nil, nil) + } + } + // Wait for all in flight requests to close the pending channel safely + wg.Wait() + close(pending) + }) + + // In order to avoid a deadlock when closing the broker on network or malformed response error + // we use an intermediate channel to buffer and send pending responses in order + // This is because the AsyncProduce callback inside the bridge is invoked from the broker + // responseReceiver goroutine and closing the broker requires such goroutine to be finished + go withRecover(func() { + buf := queue.New() + for { + if buf.Length() == 0 { + res, ok := <-pending + if !ok { + // We are done forwarding the last pending response + close(responses) + return + } + buf.Add(res) + } + // Send the head pending response or buffer another one + // so that we never block the callback + headRes := buf.Peek().(*brokerProducerResponse) + select { + case res, ok := <-pending: + if !ok { + continue + } + buf.Add(res) + continue + case responses <- headRes: + buf.Remove() + continue } } - close(responses) }) if p.conf.Producer.Retry.Max <= 0 { @@ -725,10 +898,9 @@ type brokerProducer struct { output chan<- *produceSet responses <-chan *brokerProducerResponse abandoned chan struct{} - stopchan chan struct{} buffer *produceSet - timer <-chan time.Time + timer *time.Timer timerFired bool closing error @@ -737,6 +909,7 @@ type brokerProducer struct { func (bp *brokerProducer) run() { var output chan<- *produceSet + var timerChan <-chan time.Time Logger.Printf("producer/broker/%d starting up\n", bp.broker.ID()) for { @@ -776,6 +949,14 @@ func (bp *brokerProducer) run() { continue } + if msg.flags&fin == fin { + // New broker producer that was caught up by the retry loop + bp.parent.retryMessage(msg, ErrShuttingDown) + DebugLogger.Printf("producer/broker/%d state change to [dying-%d] on %s/%d\n", + bp.broker.ID(), msg.retries, msg.Topic, msg.Partition) + continue + } + if bp.buffer.wouldOverflow(msg) { Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID()) if err := bp.waitForSpace(msg, false); err != nil { @@ -798,20 +979,18 @@ func (bp *brokerProducer) run() { } if bp.parent.conf.Producer.Flush.Frequency > 0 && bp.timer == nil { - bp.timer = time.After(bp.parent.conf.Producer.Flush.Frequency) + bp.timer = time.NewTimer(bp.parent.conf.Producer.Flush.Frequency) + timerChan = bp.timer.C } - case <-bp.timer: + case <-timerChan: bp.timerFired = true case output <- bp.buffer: bp.rollOver() + timerChan = nil case response, ok := <-bp.responses: if ok { bp.handleResponse(response) } - case <-bp.stopchan: - Logger.Printf( - "producer/broker/%d run loop asked to stop\n", bp.broker.ID()) - return } if bp.timerFired || bp.buffer.readyToFlush() { @@ -832,10 +1011,11 @@ func (bp *brokerProducer) shutdown() { } } close(bp.output) + // Drain responses from the bridge goroutine for response := range bp.responses { bp.handleResponse(response) } - close(bp.stopchan) + // No more brokerProducer related goroutine should be running Logger.Printf("producer/broker/%d shut down\n", bp.broker.ID()) } @@ -866,6 +1046,9 @@ func (bp *brokerProducer) waitForSpace(msg *ProducerMessage, forceRollover bool) } func (bp *brokerProducer) rollOver() { + if bp.timer != nil { + bp.timer.Stop() + } bp.timer = nil bp.timerFired = false bp.buffer = newProduceSet(bp.parent) @@ -978,7 +1161,7 @@ func (p *asyncProducer) retryBatch(topic string, partition int32, pSet *partitio produceSet.bufferCount += len(pSet.msgs) for _, msg := range pSet.msgs { if msg.retries >= p.conf.Producer.Retry.Max { - p.returnError(msg, kerr) + p.returnErrors(pSet.msgs, kerr) return } msg.retries++ @@ -995,15 +1178,16 @@ func (p *asyncProducer) retryBatch(topic string, partition int32, pSet *partitio } bp := p.getBrokerProducer(leader) bp.output <- produceSet + p.unrefBrokerProducer(leader, bp) } func (bp *brokerProducer) handleError(sent *produceSet, err error) { - switch err.(type) { - case PacketEncodingError: + var target PacketEncodingError + if errors.As(err, &target) { sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) { bp.parent.returnErrors(pSet.msgs, err) }) - default: + } else { Logger.Printf("producer/broker/%d state change to [closing] because %s\n", bp.broker.ID(), err) bp.parent.abandonBrokerConnection(bp.broker) _ = bp.broker.Close() @@ -1063,15 +1247,50 @@ func (p *asyncProducer) shutdown() { close(p.retries) close(p.errors) close(p.successes) + + p.metricsRegistry.UnregisterAll() +} + +func (p *asyncProducer) bumpIdempotentProducerEpoch() { + _, epoch := p.txnmgr.getProducerID() + if epoch == math.MaxInt16 { + Logger.Println("producer/txnmanager epoch exhausted, requesting new producer ID") + txnmgr, err := newTransactionManager(p.conf, p.client) + if err != nil { + Logger.Println(err) + return + } + + p.txnmgr = txnmgr + } else { + p.txnmgr.bumpEpoch() + } +} + +func (p *asyncProducer) maybeTransitionToErrorState(err error) error { + if errors.Is(err, ErrClusterAuthorizationFailed) || + errors.Is(err, ErrProducerFenced) || + errors.Is(err, ErrUnsupportedVersion) || + errors.Is(err, ErrTransactionalIDAuthorizationFailed) { + return p.txnmgr.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, err) + } + if p.txnmgr.coordinatorSupportsBumpingEpoch && p.txnmgr.currentTxnStatus()&ProducerTxnFlagEndTransaction == 0 { + p.txnmgr.epochBumpRequired = true + } + return p.txnmgr.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagAbortableError, err) } func (p *asyncProducer) returnError(msg *ProducerMessage, err error) { + if p.IsTransactional() { + _ = p.maybeTransitionToErrorState(err) + } // We need to reset the producer ID epoch if we set a sequence number on it, because the broker // will never see a message with this number, so we can never continue the sequence. - if msg.hasSequence { + if !p.IsTransactional() && msg.hasSequence { Logger.Printf("producer/txnmanager rolling over epoch due to publish failure on %s/%d", msg.Topic, msg.Partition) - p.txnmgr.bumpEpoch() + p.bumpIdempotentProducerEpoch() } + msg.clear() pErr := &ProducerError{Msg: msg, Err: err} if p.conf.Producer.Return.Errors { diff --git a/vendor/github.com/Shopify/sarama/balance_strategy.go b/vendor/github.com/Shopify/sarama/balance_strategy.go index 0ce7fea1f..4594df6f6 100644 --- a/vendor/github.com/Shopify/sarama/balance_strategy.go +++ b/vendor/github.com/Shopify/sarama/balance_strategy.go @@ -2,6 +2,8 @@ package sarama import ( "container/heap" + "errors" + "fmt" "math" "sort" "strings" @@ -56,48 +58,45 @@ type BalanceStrategy interface { // -------------------------------------------------------------------- // BalanceStrategyRange is the default and assigns partitions as ranges to consumer group members. -// Example with one topic T with six partitions (0..5) and two members (M1, M2): -// M1: {T: [0, 1, 2]} -// M2: {T: [3, 4, 5]} +// This follows the same logic as +// https://kafka.apache.org/31/javadoc/org/apache/kafka/clients/consumer/RangeAssignor.html +// +// Example with two topics T1 and T2 with six partitions each (0..5) and two members (M1, M2): +// +// M1: {T1: [0, 1, 2], T2: [0, 1, 2]} +// M2: {T2: [3, 4, 5], T2: [3, 4, 5]} var BalanceStrategyRange = &balanceStrategy{ name: RangeBalanceStrategyName, coreFn: func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) { - step := float64(len(partitions)) / float64(len(memberIDs)) + partitionsPerConsumer := len(partitions) / len(memberIDs) + consumersWithExtraPartition := len(partitions) % len(memberIDs) + + sort.Strings(memberIDs) for i, memberID := range memberIDs { - pos := float64(i) - min := int(math.Floor(pos*step + 0.5)) - max := int(math.Floor((pos+1)*step + 0.5)) + min := i*partitionsPerConsumer + int(math.Min(float64(consumersWithExtraPartition), float64(i))) + extra := 0 + if i < consumersWithExtraPartition { + extra = 1 + } + max := min + partitionsPerConsumer + extra plan.Add(memberID, topic, partitions[min:max]...) } }, } -// BalanceStrategyRoundRobin assigns partitions to members in alternating order. -// Example with topic T with six partitions (0..5) and two members (M1, M2): -// M1: {T: [0, 2, 4]} -// M2: {T: [1, 3, 5]} -var BalanceStrategyRoundRobin = &balanceStrategy{ - name: RoundRobinBalanceStrategyName, - coreFn: func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) { - for i, part := range partitions { - memberID := memberIDs[i%len(memberIDs)] - plan.Add(memberID, topic, part) - } - }, -} - // BalanceStrategySticky assigns partitions to members with an attempt to preserve earlier assignments // while maintain a balanced partition distribution. // Example with topic T with six partitions (0..5) and two members (M1, M2): -// M1: {T: [0, 2, 4]} -// M2: {T: [1, 3, 5]} +// +// M1: {T: [0, 2, 4]} +// M2: {T: [1, 3, 5]} // // On reassignment with an additional consumer, you might get an assignment plan like: -// M1: {T: [0, 2]} -// M2: {T: [1, 3]} -// M3: {T: [4, 5]} // +// M1: {T: [0, 2]} +// M2: {T: [1, 3]} +// M3: {T: [4, 5]} var BalanceStrategySticky = &stickyBalanceStrategy{} // -------------------------------------------------------------------- @@ -120,18 +119,27 @@ func (s *balanceStrategy) Plan(members map[string]ConsumerGroupMemberMetadata, t } } - // Sort members for each topic - for topic, memberIDs := range mbt { - sort.Sort(&balanceStrategySortable{ - topic: topic, - memberIDs: memberIDs, - }) + // func to sort and de-duplicate a StringSlice + uniq := func(ss sort.StringSlice) []string { + if ss.Len() < 2 { + return ss + } + sort.Sort(ss) + var i, j int + for i = 1; i < ss.Len(); i++ { + if ss[i] == ss[j] { + continue + } + j++ + ss.Swap(i, j) + } + return ss[:j+1] } // Assemble plan plan := make(BalanceStrategyPlan, len(members)) for topic, memberIDs := range mbt { - s.coreFn(plan, memberIDs, topic, topics[topic]) + s.coreFn(plan, uniq(memberIDs), topic, topics[topic]) } return plan, nil } @@ -141,30 +149,6 @@ func (s *balanceStrategy) AssignmentData(memberID string, topics map[string][]in return nil, nil } -type balanceStrategySortable struct { - topic string - memberIDs []string -} - -func (p balanceStrategySortable) Len() int { return len(p.memberIDs) } -func (p balanceStrategySortable) Swap(i, j int) { - p.memberIDs[i], p.memberIDs[j] = p.memberIDs[j], p.memberIDs[i] -} -func (p balanceStrategySortable) Less(i, j int) bool { - return balanceStrategyHashValue(p.topic, p.memberIDs[i]) < balanceStrategyHashValue(p.topic, p.memberIDs[j]) -} - -func balanceStrategyHashValue(vv ...string) uint32 { - h := uint32(2166136261) - for _, s := range vv { - for _, c := range s { - h ^= uint32(c) - h *= 16777619 - } - } - return h -} - type stickyBalanceStrategy struct { movements partitionMovements } @@ -353,6 +337,92 @@ func (s *stickyBalanceStrategy) balance(currentAssignment map[string][]topicPart } } +// BalanceStrategyRoundRobin assigns partitions to members in alternating order. +// For example, there are two topics (t0, t1) and two consumer (m0, m1), and each topic has three partitions (p0, p1, p2): +// M0: [t0p0, t0p2, t1p1] +// M1: [t0p1, t1p0, t1p2] +var BalanceStrategyRoundRobin = new(roundRobinBalancer) + +type roundRobinBalancer struct{} + +func (b *roundRobinBalancer) Name() string { + return RoundRobinBalanceStrategyName +} + +func (b *roundRobinBalancer) Plan(memberAndMetadata map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) { + if len(memberAndMetadata) == 0 || len(topics) == 0 { + return nil, errors.New("members and topics are not provided") + } + // sort partitions + var topicPartitions []topicAndPartition + for topic, partitions := range topics { + for _, partition := range partitions { + topicPartitions = append(topicPartitions, topicAndPartition{topic: topic, partition: partition}) + } + } + sort.SliceStable(topicPartitions, func(i, j int) bool { + pi := topicPartitions[i] + pj := topicPartitions[j] + return pi.comparedValue() < pj.comparedValue() + }) + + // sort members + var members []memberAndTopic + for memberID, meta := range memberAndMetadata { + m := memberAndTopic{ + memberID: memberID, + topics: make(map[string]struct{}), + } + for _, t := range meta.Topics { + m.topics[t] = struct{}{} + } + members = append(members, m) + } + sort.SliceStable(members, func(i, j int) bool { + mi := members[i] + mj := members[j] + return mi.memberID < mj.memberID + }) + + // assign partitions + plan := make(BalanceStrategyPlan, len(members)) + i := 0 + n := len(members) + for _, tp := range topicPartitions { + m := members[i%n] + for !m.hasTopic(tp.topic) { + i++ + m = members[i%n] + } + plan.Add(m.memberID, tp.topic, tp.partition) + i++ + } + return plan, nil +} + +func (b *roundRobinBalancer) AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) { + return nil, nil // do nothing for now +} + +type topicAndPartition struct { + topic string + partition int32 +} + +func (tp *topicAndPartition) comparedValue() string { + return fmt.Sprintf("%s-%d", tp.topic, tp.partition) +} + +type memberAndTopic struct { + memberID string + topics map[string]struct{} +} + +func (m *memberAndTopic) hasTopic(topic string) bool { + _, isExist := m.topics[topic] + return isExist +} + // Calculate the balance score of the given assignment, as the sum of assigned partitions size difference of all consumer pairs. // A perfectly balanced assignment (with all consumers getting the same number of partitions) has a balance score of 0. // Lower balance score indicates a more balanced assignment. @@ -539,9 +609,9 @@ func assignPartition(partition topicPartitionAssignment, sortedCurrentSubscripti // Deserialize topic partition assignment data to aid with creation of a sticky assignment. func deserializeTopicPartitionAssignment(userDataBytes []byte) (StickyAssignorUserData, error) { userDataV1 := &StickyAssignorUserDataV1{} - if err := decode(userDataBytes, userDataV1); err != nil { + if err := decode(userDataBytes, userDataV1, nil); err != nil { userDataV0 := &StickyAssignorUserDataV0{} - if err := decode(userDataBytes, userDataV0); err != nil { + if err := decode(userDataBytes, userDataV0, nil); err != nil { return nil, err } return userDataV0, nil diff --git a/vendor/github.com/Shopify/sarama/broker.go b/vendor/github.com/Shopify/sarama/broker.go index 5858a23c0..d049e9b47 100644 --- a/vendor/github.com/Shopify/sarama/broker.go +++ b/vendor/github.com/Shopify/sarama/broker.go @@ -3,8 +3,10 @@ package sarama import ( "crypto/tls" "encoding/binary" + "errors" "fmt" "io" + "math/rand" "net" "sort" "strconv" @@ -28,29 +30,34 @@ type Broker struct { connErr error lock sync.Mutex opened int32 - responses chan responsePromise + responses chan *responsePromise done chan bool - registeredMetrics []string - - incomingByteRate metrics.Meter - requestRate metrics.Meter - requestSize metrics.Histogram - requestLatency metrics.Histogram - outgoingByteRate metrics.Meter - responseRate metrics.Meter - responseSize metrics.Histogram - requestsInFlight metrics.Counter - brokerIncomingByteRate metrics.Meter - brokerRequestRate metrics.Meter - brokerRequestSize metrics.Histogram - brokerRequestLatency metrics.Histogram - brokerOutgoingByteRate metrics.Meter - brokerResponseRate metrics.Meter - brokerResponseSize metrics.Histogram - brokerRequestsInFlight metrics.Counter - - kerberosAuthenticator GSSAPIKerberosAuth + metricRegistry metrics.Registry + incomingByteRate metrics.Meter + requestRate metrics.Meter + fetchRate metrics.Meter + requestSize metrics.Histogram + requestLatency metrics.Histogram + outgoingByteRate metrics.Meter + responseRate metrics.Meter + responseSize metrics.Histogram + requestsInFlight metrics.Counter + protocolRequestsRate map[int16]metrics.Meter + brokerIncomingByteRate metrics.Meter + brokerRequestRate metrics.Meter + brokerFetchRate metrics.Meter + brokerRequestSize metrics.Histogram + brokerRequestLatency metrics.Histogram + brokerOutgoingByteRate metrics.Meter + brokerResponseRate metrics.Meter + brokerResponseSize metrics.Histogram + brokerRequestsInFlight metrics.Counter + brokerThrottleTime metrics.Histogram + brokerProtocolRequestsRate map[int16]metrics.Meter + + kerberosAuthenticator GSSAPIKerberosAuth + clientSessionReauthenticationTimeMs int64 } // SASLMechanism specifies the SASL mechanism the client uses to authenticate with the broker @@ -120,10 +127,25 @@ type responsePromise struct { requestTime time.Time correlationID int32 headerVersion int16 + handler func([]byte, error) packets chan []byte errors chan error } +func (p *responsePromise) handle(packets []byte, err error) { + // Use callback when provided + if p.handler != nil { + p.handler(packets, err) + return + } + // Otherwise fallback to using channels + if err != nil { + p.errors <- err + return + } + p.packets <- packets +} + // NewBroker creates and returns a Broker targeting the given host:port address. // This does not attempt to actually connect, you have to call Open() for that. func NewBroker(addr string) *Broker { @@ -149,11 +171,30 @@ func (b *Broker) Open(conf *Config) error { return err } + usingApiVersionsRequests := conf.Version.IsAtLeast(V2_4_0_0) && conf.ApiVersionsRequest + b.lock.Lock() - go withRecover(func() { - defer b.lock.Unlock() + b.metricRegistry = newCleanupRegistry(conf.MetricRegistry) + go withRecover(func() { + defer func() { + b.lock.Unlock() + + // Send an ApiVersionsRequest to identify the client (KIP-511). + // Ideally Sarama would use the response to control protocol versions, + // but for now just fire-and-forget just to send + if usingApiVersionsRequests { + _, err = b.ApiVersions(&ApiVersionsRequest{ + Version: 3, + ClientSoftwareName: defaultClientSoftwareName, + ClientSoftwareVersion: version(), + }) + if err != nil { + Logger.Printf("Error while sending ApiVersionsRequest to broker %s: %s\n", b.addr, err) + } + } + }() dialer := conf.getDialer() b.conn, b.connErr = dialer.Dial("tcp", b.addr) if b.connErr != nil { @@ -170,27 +211,34 @@ func (b *Broker) Open(conf *Config) error { b.conf = conf // Create or reuse the global metrics shared between brokers - b.incomingByteRate = metrics.GetOrRegisterMeter("incoming-byte-rate", conf.MetricRegistry) - b.requestRate = metrics.GetOrRegisterMeter("request-rate", conf.MetricRegistry) - b.requestSize = getOrRegisterHistogram("request-size", conf.MetricRegistry) - b.requestLatency = getOrRegisterHistogram("request-latency-in-ms", conf.MetricRegistry) - b.outgoingByteRate = metrics.GetOrRegisterMeter("outgoing-byte-rate", conf.MetricRegistry) - b.responseRate = metrics.GetOrRegisterMeter("response-rate", conf.MetricRegistry) - b.responseSize = getOrRegisterHistogram("response-size", conf.MetricRegistry) - b.requestsInFlight = metrics.GetOrRegisterCounter("requests-in-flight", conf.MetricRegistry) + b.incomingByteRate = metrics.GetOrRegisterMeter("incoming-byte-rate", b.metricRegistry) + b.requestRate = metrics.GetOrRegisterMeter("request-rate", b.metricRegistry) + b.fetchRate = metrics.GetOrRegisterMeter("consumer-fetch-rate", b.metricRegistry) + b.requestSize = getOrRegisterHistogram("request-size", b.metricRegistry) + b.requestLatency = getOrRegisterHistogram("request-latency-in-ms", b.metricRegistry) + b.outgoingByteRate = metrics.GetOrRegisterMeter("outgoing-byte-rate", b.metricRegistry) + b.responseRate = metrics.GetOrRegisterMeter("response-rate", b.metricRegistry) + b.responseSize = getOrRegisterHistogram("response-size", b.metricRegistry) + b.requestsInFlight = metrics.GetOrRegisterCounter("requests-in-flight", b.metricRegistry) + b.protocolRequestsRate = map[int16]metrics.Meter{} // Do not gather metrics for seeded broker (only used during bootstrap) because they share // the same id (-1) and are already exposed through the global metrics above - if b.id >= 0 { + if b.id >= 0 && !metrics.UseNilMetrics { b.registerMetrics() } - if conf.Net.SASL.Enable { - b.connErr = b.authenticateViaSASL() + if conf.Net.SASL.Mechanism == SASLTypeOAuth && conf.Net.SASL.Version == SASLHandshakeV0 { + conf.Net.SASL.Version = SASLHandshakeV1 + } + + useSaslV0 := conf.Net.SASL.Version == SASLHandshakeV0 || conf.Net.SASL.Mechanism == SASLTypeGSSAPI + if conf.Net.SASL.Enable && useSaslV0 { + b.connErr = b.authenticateViaSASLv0() if b.connErr != nil { err = b.conn.Close() if err == nil { - Logger.Printf("Closed connection to broker %s\n", b.addr) + DebugLogger.Printf("Closed connection to broker %s\n", b.addr) } else { Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err) } @@ -201,19 +249,41 @@ func (b *Broker) Open(conf *Config) error { } b.done = make(chan bool) - b.responses = make(chan responsePromise, b.conf.Net.MaxOpenRequests-1) + b.responses = make(chan *responsePromise, b.conf.Net.MaxOpenRequests-1) + go withRecover(b.responseReceiver) + if conf.Net.SASL.Enable && !useSaslV0 { + b.connErr = b.authenticateViaSASLv1() + if b.connErr != nil { + close(b.responses) + err = b.conn.Close() + if err == nil { + DebugLogger.Printf("Closed connection to broker %s\n", b.addr) + } else { + Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err) + } + b.conn = nil + atomic.StoreInt32(&b.opened, 0) + return + } + } if b.id >= 0 { - Logger.Printf("Connected to broker at %s (registered as #%d)\n", b.addr, b.id) + DebugLogger.Printf("Connected to broker at %s (registered as #%d)\n", b.addr, b.id) } else { - Logger.Printf("Connected to broker at %s (unregistered)\n", b.addr) + DebugLogger.Printf("Connected to broker at %s (unregistered)\n", b.addr) } - go withRecover(b.responseReceiver) }) return nil } +func (b *Broker) ResponseSize() int { + b.lock.Lock() + defer b.lock.Unlock() + + return len(b.responses) +} + // Connected returns true if the broker is connected and false otherwise. If the broker is not // connected but it had tried to connect, the error from that connection attempt is also returned. func (b *Broker) Connected() (bool, error) { @@ -223,7 +293,25 @@ func (b *Broker) Connected() (bool, error) { return b.conn != nil, b.connErr } -//Close closes the broker resources +// TLSConnectionState returns the client's TLS connection state. The second return value is false if this is not a tls connection or the connection has not yet been established. +func (b *Broker) TLSConnectionState() (state tls.ConnectionState, ok bool) { + b.lock.Lock() + defer b.lock.Unlock() + + if b.conn == nil { + return state, false + } + conn := b.conn + if bconn, ok := b.conn.(*bufConn); ok { + conn = bconn.Conn + } + if tc, ok := conn.(*tls.Conn); ok { + return tc.ConnectionState(), true + } + return state, false +} + +// Close closes the broker resources func (b *Broker) Close() error { b.lock.Lock() defer b.lock.Unlock() @@ -242,10 +330,10 @@ func (b *Broker) Close() error { b.done = nil b.responses = nil - b.unregisterMetrics() + b.metricRegistry.UnregisterAll() if err == nil { - Logger.Printf("Closed connection to broker %s\n", b.addr) + DebugLogger.Printf("Closed connection to broker %s\n", b.addr) } else { Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err) } @@ -276,12 +364,11 @@ func (b *Broker) Rack() string { return *b.rack } -//GetMetadata send a metadata request and returns a metadata response or error +// GetMetadata send a metadata request and returns a metadata response or error func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error) { response := new(MetadataResponse) err := b.sendAndReceive(request, response) - if err != nil { return nil, err } @@ -289,12 +376,11 @@ func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error return response, nil } -//GetConsumerMetadata send a consumer metadata request and returns a consumer metadata response or error +// GetConsumerMetadata send a consumer metadata request and returns a consumer metadata response or error func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*ConsumerMetadataResponse, error) { response := new(ConsumerMetadataResponse) err := b.sendAndReceive(request, response) - if err != nil { return nil, err } @@ -302,12 +388,11 @@ func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*Consume return response, nil } -//FindCoordinator sends a find coordinate request and returns a response or error +// FindCoordinator sends a find coordinate request and returns a response or error func (b *Broker) FindCoordinator(request *FindCoordinatorRequest) (*FindCoordinatorResponse, error) { response := new(FindCoordinatorResponse) err := b.sendAndReceive(request, response) - if err != nil { return nil, err } @@ -315,12 +400,11 @@ func (b *Broker) FindCoordinator(request *FindCoordinatorRequest) (*FindCoordina return response, nil } -//GetAvailableOffsets return an offset response or error +// GetAvailableOffsets return an offset response or error func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, error) { response := new(OffsetResponse) err := b.sendAndReceive(request, response) - if err != nil { return nil, err } @@ -328,7 +412,60 @@ func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, e return response, nil } -//Produce returns a produce response or error +// ProduceCallback function is called once the produce response has been parsed +// or could not be read. +type ProduceCallback func(*ProduceResponse, error) + +// AsyncProduce sends a produce request and eventually call the provided callback +// with a produce response or an error. +// +// Waiting for the response is generally not blocking on the contrary to using Produce. +// If the maximum number of in flight request configured is reached then +// the request will be blocked till a previous response is received. +// +// When configured with RequiredAcks == NoResponse, the callback will not be invoked. +// If an error is returned because the request could not be sent then the callback +// will not be invoked either. +// +// Make sure not to Close the broker in the callback as it will lead to a deadlock. +func (b *Broker) AsyncProduce(request *ProduceRequest, cb ProduceCallback) error { + metricRegistry := b.metricRegistry + needAcks := request.RequiredAcks != NoResponse + // Use a nil promise when no acks is required + var promise *responsePromise + + if needAcks { + // Create ProduceResponse early to provide the header version + res := new(ProduceResponse) + promise = &responsePromise{ + headerVersion: res.headerVersion(), + // Packets will be converted to a ProduceResponse in the responseReceiver goroutine + handler: func(packets []byte, err error) { + if err != nil { + // Failed request + cb(nil, err) + return + } + + if err := versionedDecode(packets, res, request.version(), metricRegistry); err != nil { + // Malformed response + cb(nil, err) + return + } + + // Wellformed response + b.updateThrottleMetric(res.ThrottleTime) + cb(res, nil) + }, + } + } + + b.lock.Lock() + defer b.lock.Unlock() + return b.sendWithPromise(request, promise) +} + +// Produce returns a produce response or error func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) { var ( response *ProduceResponse @@ -340,6 +477,7 @@ func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) { } else { response = new(ProduceResponse) err = b.sendAndReceive(request, response) + b.updateThrottleMetric(response.ThrottleTime) } if err != nil { @@ -349,8 +487,17 @@ func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) { return response, nil } -//Fetch returns a FetchResponse or error +// Fetch returns a FetchResponse or error func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) { + defer func() { + if b.fetchRate != nil { + b.fetchRate.Mark(1) + } + if b.brokerFetchRate != nil { + b.brokerFetchRate.Mark(1) + } + }() + response := new(FetchResponse) err := b.sendAndReceive(request, response) @@ -361,7 +508,7 @@ func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) { return response, nil } -//CommitOffset return an Offset commit response or error +// CommitOffset return an Offset commit response or error func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) { response := new(OffsetCommitResponse) @@ -373,9 +520,10 @@ func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitRespon return response, nil } -//FetchOffset returns an offset fetch response or error +// FetchOffset returns an offset fetch response or error func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, error) { response := new(OffsetFetchResponse) + response.Version = request.Version // needed to handle the two header versions err := b.sendAndReceive(request, response) if err != nil { @@ -385,7 +533,7 @@ func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, return response, nil } -//JoinGroup returns a join group response or error +// JoinGroup returns a join group response or error func (b *Broker) JoinGroup(request *JoinGroupRequest) (*JoinGroupResponse, error) { response := new(JoinGroupResponse) @@ -397,7 +545,7 @@ func (b *Broker) JoinGroup(request *JoinGroupRequest) (*JoinGroupResponse, error return response, nil } -//SyncGroup returns a sync group response or error +// SyncGroup returns a sync group response or error func (b *Broker) SyncGroup(request *SyncGroupRequest) (*SyncGroupResponse, error) { response := new(SyncGroupResponse) @@ -409,7 +557,7 @@ func (b *Broker) SyncGroup(request *SyncGroupRequest) (*SyncGroupResponse, error return response, nil } -//LeaveGroup return a leave group response or error +// LeaveGroup return a leave group response or error func (b *Broker) LeaveGroup(request *LeaveGroupRequest) (*LeaveGroupResponse, error) { response := new(LeaveGroupResponse) @@ -421,7 +569,7 @@ func (b *Broker) LeaveGroup(request *LeaveGroupRequest) (*LeaveGroupResponse, er return response, nil } -//Heartbeat returns a heartbeat response or error +// Heartbeat returns a heartbeat response or error func (b *Broker) Heartbeat(request *HeartbeatRequest) (*HeartbeatResponse, error) { response := new(HeartbeatResponse) @@ -433,7 +581,7 @@ func (b *Broker) Heartbeat(request *HeartbeatRequest) (*HeartbeatResponse, error return response, nil } -//ListGroups return a list group response or error +// ListGroups return a list group response or error func (b *Broker) ListGroups(request *ListGroupsRequest) (*ListGroupsResponse, error) { response := new(ListGroupsResponse) @@ -445,7 +593,7 @@ func (b *Broker) ListGroups(request *ListGroupsRequest) (*ListGroupsResponse, er return response, nil } -//DescribeGroups return describe group response or error +// DescribeGroups return describe group response or error func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroupsResponse, error) { response := new(DescribeGroupsResponse) @@ -457,7 +605,7 @@ func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroups return response, nil } -//ApiVersions return api version response or error +// ApiVersions return api version response or error func (b *Broker) ApiVersions(request *ApiVersionsRequest) (*ApiVersionsResponse, error) { response := new(ApiVersionsResponse) @@ -469,7 +617,7 @@ func (b *Broker) ApiVersions(request *ApiVersionsRequest) (*ApiVersionsResponse, return response, nil } -//CreateTopics send a create topic request and returns create topic response +// CreateTopics send a create topic request and returns create topic response func (b *Broker) CreateTopics(request *CreateTopicsRequest) (*CreateTopicsResponse, error) { response := new(CreateTopicsResponse) @@ -481,7 +629,7 @@ func (b *Broker) CreateTopics(request *CreateTopicsRequest) (*CreateTopicsRespon return response, nil } -//DeleteTopics sends a delete topic request and returns delete topic response +// DeleteTopics sends a delete topic request and returns delete topic response func (b *Broker) DeleteTopics(request *DeleteTopicsRequest) (*DeleteTopicsResponse, error) { response := new(DeleteTopicsResponse) @@ -493,8 +641,8 @@ func (b *Broker) DeleteTopics(request *DeleteTopicsRequest) (*DeleteTopicsRespon return response, nil } -//CreatePartitions sends a create partition request and returns create -//partitions response or error +// CreatePartitions sends a create partition request and returns create +// partitions response or error func (b *Broker) CreatePartitions(request *CreatePartitionsRequest) (*CreatePartitionsResponse, error) { response := new(CreatePartitionsResponse) @@ -506,8 +654,8 @@ func (b *Broker) CreatePartitions(request *CreatePartitionsRequest) (*CreatePart return response, nil } -//AlterPartitionReassignments sends a alter partition reassignments request and -//returns alter partition reassignments response +// AlterPartitionReassignments sends a alter partition reassignments request and +// returns alter partition reassignments response func (b *Broker) AlterPartitionReassignments(request *AlterPartitionReassignmentsRequest) (*AlterPartitionReassignmentsResponse, error) { response := new(AlterPartitionReassignmentsResponse) @@ -519,8 +667,8 @@ func (b *Broker) AlterPartitionReassignments(request *AlterPartitionReassignment return response, nil } -//ListPartitionReassignments sends a list partition reassignments request and -//returns list partition reassignments response +// ListPartitionReassignments sends a list partition reassignments request and +// returns list partition reassignments response func (b *Broker) ListPartitionReassignments(request *ListPartitionReassignmentsRequest) (*ListPartitionReassignmentsResponse, error) { response := new(ListPartitionReassignmentsResponse) @@ -532,8 +680,8 @@ func (b *Broker) ListPartitionReassignments(request *ListPartitionReassignmentsR return response, nil } -//DeleteRecords send a request to delete records and return delete record -//response or error +// DeleteRecords send a request to delete records and return delete record +// response or error func (b *Broker) DeleteRecords(request *DeleteRecordsRequest) (*DeleteRecordsResponse, error) { response := new(DeleteRecordsResponse) @@ -545,7 +693,7 @@ func (b *Broker) DeleteRecords(request *DeleteRecordsRequest) (*DeleteRecordsRes return response, nil } -//DescribeAcls sends a describe acl request and returns a response or error +// DescribeAcls sends a describe acl request and returns a response or error func (b *Broker) DescribeAcls(request *DescribeAclsRequest) (*DescribeAclsResponse, error) { response := new(DescribeAclsResponse) @@ -557,7 +705,7 @@ func (b *Broker) DescribeAcls(request *DescribeAclsRequest) (*DescribeAclsRespon return response, nil } -//CreateAcls sends a create acl request and returns a response or error +// CreateAcls sends a create acl request and returns a response or error func (b *Broker) CreateAcls(request *CreateAclsRequest) (*CreateAclsResponse, error) { response := new(CreateAclsResponse) @@ -566,10 +714,21 @@ func (b *Broker) CreateAcls(request *CreateAclsRequest) (*CreateAclsResponse, er return nil, err } + errs := make([]error, 0) + for _, res := range response.AclCreationResponses { + if !errors.Is(res.Err, ErrNoError) { + errs = append(errs, res.Err) + } + } + + if len(errs) > 0 { + return response, Wrap(ErrCreateACLs, errs...) + } + return response, nil } -//DeleteAcls sends a delete acl request and returns a response or error +// DeleteAcls sends a delete acl request and returns a response or error func (b *Broker) DeleteAcls(request *DeleteAclsRequest) (*DeleteAclsResponse, error) { response := new(DeleteAclsResponse) @@ -581,9 +740,10 @@ func (b *Broker) DeleteAcls(request *DeleteAclsRequest) (*DeleteAclsResponse, er return response, nil } -//InitProducerID sends an init producer request and returns a response or error +// InitProducerID sends an init producer request and returns a response or error func (b *Broker) InitProducerID(request *InitProducerIDRequest) (*InitProducerIDResponse, error) { response := new(InitProducerIDResponse) + response.Version = request.version() err := b.sendAndReceive(request, response) if err != nil { @@ -593,8 +753,8 @@ func (b *Broker) InitProducerID(request *InitProducerIDRequest) (*InitProducerID return response, nil } -//AddPartitionsToTxn send a request to add partition to txn and returns -//a response or error +// AddPartitionsToTxn send a request to add partition to txn and returns +// a response or error func (b *Broker) AddPartitionsToTxn(request *AddPartitionsToTxnRequest) (*AddPartitionsToTxnResponse, error) { response := new(AddPartitionsToTxnResponse) @@ -606,8 +766,8 @@ func (b *Broker) AddPartitionsToTxn(request *AddPartitionsToTxnRequest) (*AddPar return response, nil } -//AddOffsetsToTxn sends a request to add offsets to txn and returns a response -//or error +// AddOffsetsToTxn sends a request to add offsets to txn and returns a response +// or error func (b *Broker) AddOffsetsToTxn(request *AddOffsetsToTxnRequest) (*AddOffsetsToTxnResponse, error) { response := new(AddOffsetsToTxnResponse) @@ -619,7 +779,7 @@ func (b *Broker) AddOffsetsToTxn(request *AddOffsetsToTxnRequest) (*AddOffsetsTo return response, nil } -//EndTxn sends a request to end txn and returns a response or error +// EndTxn sends a request to end txn and returns a response or error func (b *Broker) EndTxn(request *EndTxnRequest) (*EndTxnResponse, error) { response := new(EndTxnResponse) @@ -631,8 +791,8 @@ func (b *Broker) EndTxn(request *EndTxnRequest) (*EndTxnResponse, error) { return response, nil } -//TxnOffsetCommit sends a request to commit transaction offsets and returns -//a response or error +// TxnOffsetCommit sends a request to commit transaction offsets and returns +// a response or error func (b *Broker) TxnOffsetCommit(request *TxnOffsetCommitRequest) (*TxnOffsetCommitResponse, error) { response := new(TxnOffsetCommitResponse) @@ -644,8 +804,8 @@ func (b *Broker) TxnOffsetCommit(request *TxnOffsetCommitRequest) (*TxnOffsetCom return response, nil } -//DescribeConfigs sends a request to describe config and returns a response or -//error +// DescribeConfigs sends a request to describe config and returns a response or +// error func (b *Broker) DescribeConfigs(request *DescribeConfigsRequest) (*DescribeConfigsResponse, error) { response := new(DescribeConfigsResponse) @@ -657,7 +817,7 @@ func (b *Broker) DescribeConfigs(request *DescribeConfigsRequest) (*DescribeConf return response, nil } -//AlterConfigs sends a request to alter config and return a response or error +// AlterConfigs sends a request to alter config and return a response or error func (b *Broker) AlterConfigs(request *AlterConfigsRequest) (*AlterConfigsResponse, error) { response := new(AlterConfigsResponse) @@ -669,7 +829,19 @@ func (b *Broker) AlterConfigs(request *AlterConfigsRequest) (*AlterConfigsRespon return response, nil } -//DeleteGroups sends a request to delete groups and returns a response or error +// IncrementalAlterConfigs sends a request to incremental alter config and return a response or error +func (b *Broker) IncrementalAlterConfigs(request *IncrementalAlterConfigsRequest) (*IncrementalAlterConfigsResponse, error) { + response := new(IncrementalAlterConfigsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +// DeleteGroups sends a request to delete groups and returns a response or error func (b *Broker) DeleteGroups(request *DeleteGroupsRequest) (*DeleteGroupsResponse, error) { response := new(DeleteGroupsResponse) @@ -680,7 +852,18 @@ func (b *Broker) DeleteGroups(request *DeleteGroupsRequest) (*DeleteGroupsRespon return response, nil } -//DescribeLogDirs sends a request to get the broker's log dir paths and sizes +// DeleteOffsets sends a request to delete group offsets and returns a response or error +func (b *Broker) DeleteOffsets(request *DeleteOffsetsRequest) (*DeleteOffsetsResponse, error) { + response := new(DeleteOffsetsResponse) + + if err := b.sendAndReceive(request, response); err != nil { + return nil, err + } + + return response, nil +} + +// DescribeLogDirs sends a request to get the broker's log dir paths and sizes func (b *Broker) DescribeLogDirs(request *DescribeLogDirsRequest) (*DescribeLogDirsResponse, error) { response := new(DescribeLogDirsResponse) @@ -692,6 +875,53 @@ func (b *Broker) DescribeLogDirs(request *DescribeLogDirsRequest) (*DescribeLogD return response, nil } +// DescribeUserScramCredentials sends a request to get SCRAM users +func (b *Broker) DescribeUserScramCredentials(req *DescribeUserScramCredentialsRequest) (*DescribeUserScramCredentialsResponse, error) { + res := new(DescribeUserScramCredentialsResponse) + + err := b.sendAndReceive(req, res) + if err != nil { + return nil, err + } + + return res, err +} + +func (b *Broker) AlterUserScramCredentials(req *AlterUserScramCredentialsRequest) (*AlterUserScramCredentialsResponse, error) { + res := new(AlterUserScramCredentialsResponse) + + err := b.sendAndReceive(req, res) + if err != nil { + return nil, err + } + + return res, nil +} + +// DescribeClientQuotas sends a request to get the broker's quotas +func (b *Broker) DescribeClientQuotas(request *DescribeClientQuotasRequest) (*DescribeClientQuotasResponse, error) { + response := new(DescribeClientQuotasResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +// AlterClientQuotas sends a request to alter the broker's quotas +func (b *Broker) AlterClientQuotas(request *AlterClientQuotasRequest) (*AlterClientQuotasResponse, error) { + response := new(AlterClientQuotasResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + // readFull ensures the conn ReadDeadline has been setup before making a // call to io.ReadFull func (b *Broker) readFull(buf []byte) (n int, err error) { @@ -712,25 +942,60 @@ func (b *Broker) write(buf []byte) (n int, err error) { return b.conn.Write(buf) } +// b.lock must be haled by caller func (b *Broker) send(rb protocolBody, promiseResponse bool, responseHeaderVersion int16) (*responsePromise, error) { - b.lock.Lock() - defer b.lock.Unlock() + var promise *responsePromise + if promiseResponse { + // Packets or error will be sent to the following channels + // once the response is received + promise = makeResponsePromise(responseHeaderVersion) + } + + if err := b.sendWithPromise(rb, promise); err != nil { + return nil, err + } + + return promise, nil +} + +func makeResponsePromise(responseHeaderVersion int16) *responsePromise { + promise := &responsePromise{ + headerVersion: responseHeaderVersion, + packets: make(chan []byte), + errors: make(chan error), + } + return promise +} +// b.lock must be held by caller +func (b *Broker) sendWithPromise(rb protocolBody, promise *responsePromise) error { if b.conn == nil { if b.connErr != nil { - return nil, b.connErr + return b.connErr + } + return ErrNotConnected + } + + if b.clientSessionReauthenticationTimeMs > 0 && currentUnixMilli() > b.clientSessionReauthenticationTimeMs { + err := b.authenticateViaSASLv1() + if err != nil { + return err } - return nil, ErrNotConnected } + return b.sendInternal(rb, promise) +} + +// b.lock must be held by caller +func (b *Broker) sendInternal(rb protocolBody, promise *responsePromise) error { if !b.conf.Version.IsAtLeast(rb.requiredVersion()) { - return nil, ErrUnsupportedVersion + return ErrUnsupportedVersion } req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb} - buf, err := encode(req, b.conf.MetricRegistry) + buf, err := encode(req, b.metricRegistry) if err != nil { - return nil, err + return err } requestTime := time.Now() @@ -738,25 +1003,29 @@ func (b *Broker) send(rb protocolBody, promiseResponse bool, responseHeaderVersi b.addRequestInFlightMetrics(1) bytes, err := b.write(buf) b.updateOutgoingCommunicationMetrics(bytes) + b.updateProtocolMetrics(rb) if err != nil { b.addRequestInFlightMetrics(-1) - return nil, err + return err } b.correlationID++ - if !promiseResponse { + if promise == nil { // Record request latency without the response b.updateRequestLatencyAndInFlightMetrics(time.Since(requestTime)) - return nil, nil + return nil } - promise := responsePromise{requestTime, req.correlationID, responseHeaderVersion, make(chan []byte), make(chan error)} + promise.requestTime = requestTime + promise.correlationID = req.correlationID b.responses <- promise - return &promise, nil + return nil } func (b *Broker) sendAndReceive(req protocolBody, res protocolBody) error { + b.lock.Lock() + defer b.lock.Unlock() responseHeaderVersion := int16(-1) if res != nil { responseHeaderVersion = res.headerVersion() @@ -771,10 +1040,14 @@ func (b *Broker) sendAndReceive(req protocolBody, res protocolBody) error { return nil } + return handleResponsePromise(req, res, promise, b.metricRegistry) +} + +func handleResponsePromise(req protocolBody, res protocolBody, promise *responsePromise, metricRegistry metrics.Registry) error { select { case buf := <-promise.packets: - return versionedDecode(buf, res, req.version()) - case err = <-promise.errors: + return versionedDecode(buf, res, req.version(), metricRegistry) + case err := <-promise.errors: return err } } @@ -816,7 +1089,7 @@ func (b *Broker) encode(pe packetEncoder, version int16) (err error) { return err } - port, err := strconv.Atoi(portstr) + port, err := strconv.ParseInt(portstr, 10, 32) if err != nil { return err } @@ -848,11 +1121,11 @@ func (b *Broker) responseReceiver() { // This was previously incremented in send() and // we are not calling updateIncomingCommunicationMetrics() b.addRequestInFlightMetrics(-1) - response.errors <- dead + response.handle(nil, dead) continue } - var headerLength = getHeaderLength(response.headerVersion) + headerLength := getHeaderLength(response.headerVersion) header := make([]byte, headerLength) bytesReadHeader, err := b.readFull(header) @@ -860,16 +1133,16 @@ func (b *Broker) responseReceiver() { if err != nil { b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency) dead = err - response.errors <- err + response.handle(nil, err) continue } decodedHeader := responseHeader{} - err = versionedDecode(header, &decodedHeader, response.headerVersion) + err = versionedDecode(header, &decodedHeader, response.headerVersion, b.metricRegistry) if err != nil { b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency) dead = err - response.errors <- err + response.handle(nil, err) continue } if decodedHeader.correlationID != response.correlationID { @@ -877,7 +1150,7 @@ func (b *Broker) responseReceiver() { // TODO if decoded ID < cur ID, discard until we catch up // TODO if decoded ID > cur ID, save it so when cur ID catches up we have a response dead = PacketDecodingError{fmt.Sprintf("correlation ID didn't match, wanted %d, got %d", response.correlationID, decodedHeader.correlationID)} - response.errors <- dead + response.handle(nil, dead) continue } @@ -886,11 +1159,11 @@ func (b *Broker) responseReceiver() { b.updateIncomingCommunicationMetrics(bytesReadHeader+bytesReadBody, requestLatency) if err != nil { dead = err - response.errors <- err + response.handle(nil, err) continue } - response.packets <- buf + response.handle(buf, nil) } close(b.done) } @@ -904,16 +1177,75 @@ func getHeaderLength(headerVersion int16) int8 { } } -func (b *Broker) authenticateViaSASL() error { +func (b *Broker) authenticateViaSASLv0() error { switch b.conf.Net.SASL.Mechanism { - case SASLTypeOAuth: - return b.sendAndReceiveSASLOAuth(b.conf.Net.SASL.TokenProvider) case SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512: - return b.sendAndReceiveSASLSCRAMv1() + return b.sendAndReceiveSASLSCRAMv0() case SASLTypeGSSAPI: return b.sendAndReceiveKerberos() default: - return b.sendAndReceiveSASLPlainAuth() + return b.sendAndReceiveSASLPlainAuthV0() + } +} + +func (b *Broker) authenticateViaSASLv1() error { + metricRegistry := b.metricRegistry + if b.conf.Net.SASL.Handshake { + handshakeRequest := &SaslHandshakeRequest{Mechanism: string(b.conf.Net.SASL.Mechanism), Version: b.conf.Net.SASL.Version} + handshakeResponse := new(SaslHandshakeResponse) + prom := makeResponsePromise(handshakeResponse.version()) + + handshakeErr := b.sendInternal(handshakeRequest, prom) + if handshakeErr != nil { + Logger.Printf("Error while performing SASL handshake %s\n", b.addr) + return handshakeErr + } + handshakeErr = handleResponsePromise(handshakeRequest, handshakeResponse, prom, metricRegistry) + if handshakeErr != nil { + Logger.Printf("Error while performing SASL handshake %s\n", b.addr) + return handshakeErr + } + + if !errors.Is(handshakeResponse.Err, ErrNoError) { + return handshakeResponse.Err + } + } + + authSendReceiver := func(authBytes []byte) (*SaslAuthenticateResponse, error) { + authenticateRequest := b.createSaslAuthenticateRequest(authBytes) + authenticateResponse := new(SaslAuthenticateResponse) + prom := makeResponsePromise(authenticateResponse.version()) + authErr := b.sendInternal(authenticateRequest, prom) + if authErr != nil { + Logger.Printf("Error while performing SASL Auth %s\n", b.addr) + return nil, authErr + } + authErr = handleResponsePromise(authenticateRequest, authenticateResponse, prom, metricRegistry) + if authErr != nil { + Logger.Printf("Error while performing SASL Auth %s\n", b.addr) + return nil, authErr + } + + if !errors.Is(authenticateResponse.Err, ErrNoError) { + var err error = authenticateResponse.Err + if authenticateResponse.ErrorMessage != nil { + err = Wrap(authenticateResponse.Err, errors.New(*authenticateResponse.ErrorMessage)) + } + return nil, err + } + + b.computeSaslSessionLifetime(authenticateResponse) + return authenticateResponse, nil + } + + switch b.conf.Net.SASL.Mechanism { + case SASLTypeOAuth: + provider := b.conf.Net.SASL.TokenProvider + return b.sendAndReceiveSASLOAuth(authSendReceiver, provider) + case SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512: + return b.sendAndReceiveSASLSCRAMv1(authSendReceiver, b.conf.Net.SASL.SCRAMClientGeneratorFunc()) + default: + return b.sendAndReceiveSASLPlainAuthV1(authSendReceiver) } } @@ -929,7 +1261,7 @@ func (b *Broker) sendAndReceiveSASLHandshake(saslType SASLMechanism, version int rb := &SaslHandshakeRequest{Mechanism: string(saslType), Version: version} req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb} - buf, err := encode(req, b.conf.MetricRegistry) + buf, err := encode(req, b.metricRegistry) if err != nil { return err } @@ -966,25 +1298,21 @@ func (b *Broker) sendAndReceiveSASLHandshake(saslType SASLMechanism, version int b.updateIncomingCommunicationMetrics(n+8, time.Since(requestTime)) res := &SaslHandshakeResponse{} - err = versionedDecode(payload, res, 0) + err = versionedDecode(payload, res, 0, b.metricRegistry) if err != nil { Logger.Printf("Failed to parse SASL handshake : %s\n", err.Error()) return err } - if res.Err != ErrNoError { + if !errors.Is(res.Err, ErrNoError) { Logger.Printf("Invalid SASL Mechanism : %s\n", res.Err.Error()) return res.Err } - Logger.Print("Successful SASL handshake. Available mechanisms: ", res.EnabledMechanisms) + DebugLogger.Print("Completed pre-auth SASL handshake. Available mechanisms: ", res.EnabledMechanisms) return nil } -// Kafka 0.10.x supported SASL PLAIN/Kerberos via KAFKA-3149 (KIP-43). -// Kafka 1.x.x onward added a SaslAuthenticate request/response message which -// wraps the SASL flow in the Kafka protocol, which allows for returning -// meaningful errors on authentication failure. // // In SASL Plain, Kafka expects the auth header to be in the following format // Message format (from https://tools.ietf.org/html/rfc4616): @@ -998,14 +1326,15 @@ func (b *Broker) sendAndReceiveSASLHandshake(saslType SASLMechanism, version int // SAFE = UTF1 / UTF2 / UTF3 / UTF4 // ;; any UTF-8 encoded Unicode character except NUL // +// + +// Kafka 0.10.x supported SASL PLAIN/Kerberos via KAFKA-3149 (KIP-43). +// sendAndReceiveSASLPlainAuthV0 flows the v0 sasl auth NOT wrapped in the kafka protocol +// // With SASL v0 handshake and auth then: // When credentials are valid, Kafka returns a 4 byte array of null characters. // When credentials are invalid, Kafka closes the connection. -// -// With SASL v1 handshake and auth then: -// When credentials are invalid, Kafka replies with a SaslAuthenticate response -// containing an error code and message detailing the authentication failure. -func (b *Broker) sendAndReceiveSASLPlainAuth() error { +func (b *Broker) sendAndReceiveSASLPlainAuthV0() error { // default to V0 to allow for backward compatibility when SASL is enabled // but not the handshake if b.conf.Net.SASL.Handshake { @@ -1016,16 +1345,8 @@ func (b *Broker) sendAndReceiveSASLPlainAuth() error { } } - if b.conf.Net.SASL.Version == SASLHandshakeV1 { - return b.sendAndReceiveV1SASLPlainAuth() - } - return b.sendAndReceiveV0SASLPlainAuth() -} - -// sendAndReceiveV0SASLPlainAuth flows the v0 sasl auth NOT wrapped in the kafka protocol -func (b *Broker) sendAndReceiveV0SASLPlainAuth() error { length := len(b.conf.Net.SASL.AuthIdentity) + 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password) - authBytes := make([]byte, length+4) //4 byte length header + auth data + authBytes := make([]byte, length+4) // 4 byte length header + auth data binary.BigEndian.PutUint32(authBytes, uint32(length)) copy(authBytes[4:], b.conf.Net.SASL.AuthIdentity+"\x00"+b.conf.Net.SASL.User+"\x00"+b.conf.Net.SASL.Password) @@ -1050,48 +1371,29 @@ func (b *Broker) sendAndReceiveV0SASLPlainAuth() error { return err } - Logger.Printf("SASL authentication successful with broker %s:%v - %v\n", b.addr, n, header) + DebugLogger.Printf("SASL authentication successful with broker %s:%v - %v\n", b.addr, n, header) return nil } -// sendAndReceiveV1SASLPlainAuth flows the v1 sasl authentication using the kafka protocol -func (b *Broker) sendAndReceiveV1SASLPlainAuth() error { - correlationID := b.correlationID - - requestTime := time.Now() - - // Will be decremented in updateIncomingCommunicationMetrics (except error) - b.addRequestInFlightMetrics(1) - bytesWritten, err := b.sendSASLPlainAuthClientResponse(correlationID) - b.updateOutgoingCommunicationMetrics(bytesWritten) - - if err != nil { - b.addRequestInFlightMetrics(-1) - Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error()) - return err - } - - b.correlationID++ - - bytesRead, err := b.receiveSASLServerResponse(&SaslAuthenticateResponse{}, correlationID) - b.updateIncomingCommunicationMetrics(bytesRead, time.Since(requestTime)) - - // With v1 sasl we get an error message set in the response we can return +// Kafka 1.x.x onward added a SaslAuthenticate request/response message which +// wraps the SASL flow in the Kafka protocol, which allows for returning +// meaningful errors on authentication failure. +func (b *Broker) sendAndReceiveSASLPlainAuthV1(authSendReceiver func(authBytes []byte) (*SaslAuthenticateResponse, error)) error { + authBytes := []byte(b.conf.Net.SASL.AuthIdentity + "\x00" + b.conf.Net.SASL.User + "\x00" + b.conf.Net.SASL.Password) + _, err := authSendReceiver(authBytes) if err != nil { - Logger.Printf("Error returned from broker during SASL flow %s: %s\n", b.addr, err.Error()) return err } + return err +} - return nil +func currentUnixMilli() int64 { + return time.Now().UnixNano() / int64(time.Millisecond) } // sendAndReceiveSASLOAuth performs the authentication flow as described by KIP-255 // https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=75968876 -func (b *Broker) sendAndReceiveSASLOAuth(provider AccessTokenProvider) error { - if err := b.sendAndReceiveSASLHandshake(SASLTypeOAuth, SASLHandshakeV1); err != nil { - return err - } - +func (b *Broker) sendAndReceiveSASLOAuth(authSendReceiver func(authBytes []byte) (*SaslAuthenticateResponse, error), provider AccessTokenProvider) error { token, err := provider.Token() if err != nil { return err @@ -1102,142 +1404,111 @@ func (b *Broker) sendAndReceiveSASLOAuth(provider AccessTokenProvider) error { return err } - challenged, err := b.sendClientMessage(message) + res, err := authSendReceiver(message) if err != nil { return err } + isChallenge := len(res.SaslAuthBytes) > 0 - if challenged { + if isChallenge { // Abort the token exchange. The broker returns the failure code. - _, err = b.sendClientMessage([]byte(`\x01`)) + _, err = authSendReceiver([]byte(`\x01`)) } - return err } -// sendClientMessage sends a SASL/OAUTHBEARER client message and returns true -// if the broker responds with a challenge, in which case the token is -// rejected. -func (b *Broker) sendClientMessage(message []byte) (bool, error) { - requestTime := time.Now() - // Will be decremented in updateIncomingCommunicationMetrics (except error) - b.addRequestInFlightMetrics(1) - correlationID := b.correlationID - - bytesWritten, err := b.sendSASLOAuthBearerClientMessage(message, correlationID) - b.updateOutgoingCommunicationMetrics(bytesWritten) - if err != nil { - b.addRequestInFlightMetrics(-1) - return false, err - } - - b.correlationID++ - - res := &SaslAuthenticateResponse{} - bytesRead, err := b.receiveSASLServerResponse(res, correlationID) - - requestLatency := time.Since(requestTime) - b.updateIncomingCommunicationMetrics(bytesRead, requestLatency) - - isChallenge := len(res.SaslAuthBytes) > 0 - - if isChallenge && err != nil { - Logger.Printf("Broker rejected authentication token: %s", res.SaslAuthBytes) - } - - return isChallenge, err -} - -func (b *Broker) sendAndReceiveSASLSCRAMv1() error { - if err := b.sendAndReceiveSASLHandshake(b.conf.Net.SASL.Mechanism, SASLHandshakeV1); err != nil { +func (b *Broker) sendAndReceiveSASLSCRAMv0() error { + if err := b.sendAndReceiveSASLHandshake(b.conf.Net.SASL.Mechanism, SASLHandshakeV0); err != nil { return err } scramClient := b.conf.Net.SASL.SCRAMClientGeneratorFunc() if err := scramClient.Begin(b.conf.Net.SASL.User, b.conf.Net.SASL.Password, b.conf.Net.SASL.SCRAMAuthzID); err != nil { - return fmt.Errorf("failed to start SCRAM exchange with the server: %s", err.Error()) + return fmt.Errorf("failed to start SCRAM exchange with the server: %w", err) } msg, err := scramClient.Step("") if err != nil { - return fmt.Errorf("failed to advance the SCRAM exchange: %s", err.Error()) + return fmt.Errorf("failed to advance the SCRAM exchange: %w", err) } for !scramClient.Done() { requestTime := time.Now() // Will be decremented in updateIncomingCommunicationMetrics (except error) b.addRequestInFlightMetrics(1) - correlationID := b.correlationID - bytesWritten, err := b.sendSaslAuthenticateRequest(correlationID, []byte(msg)) - b.updateOutgoingCommunicationMetrics(bytesWritten) + length := len(msg) + authBytes := make([]byte, length+4) // 4 byte length header + auth data + binary.BigEndian.PutUint32(authBytes, uint32(length)) + copy(authBytes[4:], []byte(msg)) + _, err := b.write(authBytes) + b.updateOutgoingCommunicationMetrics(length + 4) if err != nil { b.addRequestInFlightMetrics(-1) Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error()) return err } - b.correlationID++ - challenge, err := b.receiveSaslAuthenticateResponse(correlationID) + header := make([]byte, 4) + _, err = b.readFull(header) if err != nil { b.addRequestInFlightMetrics(-1) - Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error()) + Logger.Printf("Failed to read response header while authenticating with SASL to broker %s: %s\n", b.addr, err.Error()) return err } - - b.updateIncomingCommunicationMetrics(len(challenge), time.Since(requestTime)) - msg, err = scramClient.Step(string(challenge)) + payload := make([]byte, int32(binary.BigEndian.Uint32(header))) + n, err := b.readFull(payload) + if err != nil { + b.addRequestInFlightMetrics(-1) + Logger.Printf("Failed to read response payload while authenticating with SASL to broker %s: %s\n", b.addr, err.Error()) + return err + } + b.updateIncomingCommunicationMetrics(n+4, time.Since(requestTime)) + msg, err = scramClient.Step(string(payload)) if err != nil { Logger.Println("SASL authentication failed", err) return err } } - Logger.Println("SASL authentication succeeded") + DebugLogger.Println("SASL authentication succeeded") return nil } -func (b *Broker) sendSaslAuthenticateRequest(correlationID int32, msg []byte) (int, error) { - rb := &SaslAuthenticateRequest{msg} - req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb} - buf, err := encode(req, b.conf.MetricRegistry) - if err != nil { - return 0, err +func (b *Broker) sendAndReceiveSASLSCRAMv1(authSendReceiver func(authBytes []byte) (*SaslAuthenticateResponse, error), scramClient SCRAMClient) error { + if err := scramClient.Begin(b.conf.Net.SASL.User, b.conf.Net.SASL.Password, b.conf.Net.SASL.SCRAMAuthzID); err != nil { + return fmt.Errorf("failed to start SCRAM exchange with the server: %w", err) } - return b.write(buf) -} - -func (b *Broker) receiveSaslAuthenticateResponse(correlationID int32) ([]byte, error) { - buf := make([]byte, responseLengthSize+correlationIDSize) - _, err := b.readFull(buf) + msg, err := scramClient.Step("") if err != nil { - return nil, err + return fmt.Errorf("failed to advance the SCRAM exchange: %w", err) } - header := responseHeader{} - err = versionedDecode(buf, &header, 0) - if err != nil { - return nil, err - } + for !scramClient.Done() { + res, err := authSendReceiver([]byte(msg)) + if err != nil { + return err + } - if header.correlationID != correlationID { - return nil, fmt.Errorf("correlation ID didn't match, wanted %d, got %d", b.correlationID, header.correlationID) + msg, err = scramClient.Step(string(res.SaslAuthBytes)) + if err != nil { + Logger.Println("SASL authentication failed", err) + return err + } } - buf = make([]byte, header.length-correlationIDSize) - _, err = b.readFull(buf) - if err != nil { - return nil, err - } + DebugLogger.Println("SASL authentication succeeded") - res := &SaslAuthenticateResponse{} - if err := versionedDecode(buf, res, 0); err != nil { - return nil, err - } - if res.Err != ErrNoError { - return nil, res.Err + return nil +} + +func (b *Broker) createSaslAuthenticateRequest(msg []byte) *SaslAuthenticateRequest { + authenticateRequest := SaslAuthenticateRequest{SaslAuthBytes: msg} + if b.conf.Version.IsAtLeast(V2_2_0_0) { + authenticateRequest.Version = 1 } - return res.SaslAuthBytes, nil + + return &authenticateRequest } // Build SASL/OAUTHBEARER initial client response as described by RFC-7628 @@ -1271,64 +1542,21 @@ func mapToString(extensions map[string]string, keyValSep string, elemSep string) return strings.Join(buf, elemSep) } -func (b *Broker) sendSASLPlainAuthClientResponse(correlationID int32) (int, error) { - authBytes := []byte(b.conf.Net.SASL.AuthIdentity + "\x00" + b.conf.Net.SASL.User + "\x00" + b.conf.Net.SASL.Password) - rb := &SaslAuthenticateRequest{authBytes} - req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb} - buf, err := encode(req, b.conf.MetricRegistry) - if err != nil { - return 0, err - } - - return b.write(buf) -} - -func (b *Broker) sendSASLOAuthBearerClientMessage(initialResp []byte, correlationID int32) (int, error) { - rb := &SaslAuthenticateRequest{initialResp} - - req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb} - - buf, err := encode(req, b.conf.MetricRegistry) - if err != nil { - return 0, err - } - - return b.write(buf) -} - -func (b *Broker) receiveSASLServerResponse(res *SaslAuthenticateResponse, correlationID int32) (int, error) { - buf := make([]byte, responseLengthSize+correlationIDSize) - bytesRead, err := b.readFull(buf) - if err != nil { - return bytesRead, err - } - - header := responseHeader{} - err = versionedDecode(buf, &header, 0) - if err != nil { - return bytesRead, err - } - - if header.correlationID != correlationID { - return bytesRead, fmt.Errorf("correlation ID didn't match, wanted %d, got %d", b.correlationID, header.correlationID) - } - - buf = make([]byte, header.length-correlationIDSize) - c, err := b.readFull(buf) - bytesRead += c - if err != nil { - return bytesRead, err - } - - if err := versionedDecode(buf, res, 0); err != nil { - return bytesRead, err - } - - if res.Err != ErrNoError { - return bytesRead, res.Err +func (b *Broker) computeSaslSessionLifetime(res *SaslAuthenticateResponse) { + if res.SessionLifetimeMs > 0 { + // Follows the Java Kafka implementation from SaslClientAuthenticator.ReauthInfo#setAuthenticationEndAndSessionReauthenticationTimes + // pick a random percentage between 85% and 95% for session re-authentication + positiveSessionLifetimeMs := res.SessionLifetimeMs + authenticationEndMs := currentUnixMilli() + pctWindowFactorToTakeNetworkLatencyAndClockDriftIntoAccount := 0.85 + pctWindowJitterToAvoidReauthenticationStormAcrossManyChannelsSimultaneously := 0.10 + pctToUse := pctWindowFactorToTakeNetworkLatencyAndClockDriftIntoAccount + rand.Float64()*pctWindowJitterToAvoidReauthenticationStormAcrossManyChannelsSimultaneously + sessionLifetimeMsToUse := int64(float64(positiveSessionLifetimeMs) * pctToUse) + DebugLogger.Printf("Session expiration in %d ms and session re-authentication on or after %d ms", positiveSessionLifetimeMs, sessionLifetimeMsToUse) + b.clientSessionReauthenticationTimeMs = authenticationEndMs + sessionLifetimeMsToUse + } else { + b.clientSessionReauthenticationTimeMs = 0 } - - return bytesRead, nil } func (b *Broker) updateIncomingCommunicationMetrics(bytes int, requestLatency time.Duration) { @@ -1387,45 +1615,70 @@ func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) { } } +func (b *Broker) updateProtocolMetrics(rb protocolBody) { + protocolRequestsRate := b.protocolRequestsRate[rb.key()] + if protocolRequestsRate == nil { + protocolRequestsRate = metrics.GetOrRegisterMeter(fmt.Sprintf("protocol-requests-rate-%d", rb.key()), b.metricRegistry) + b.protocolRequestsRate[rb.key()] = protocolRequestsRate + } + protocolRequestsRate.Mark(1) + + if b.brokerProtocolRequestsRate != nil { + brokerProtocolRequestsRate := b.brokerProtocolRequestsRate[rb.key()] + if brokerProtocolRequestsRate == nil { + brokerProtocolRequestsRate = b.registerMeter(fmt.Sprintf("protocol-requests-rate-%d", rb.key())) + b.brokerProtocolRequestsRate[rb.key()] = brokerProtocolRequestsRate + } + brokerProtocolRequestsRate.Mark(1) + } +} + +func (b *Broker) updateThrottleMetric(throttleTime time.Duration) { + if throttleTime != time.Duration(0) { + DebugLogger.Printf( + "producer/broker/%d ProduceResponse throttled %v\n", + b.ID(), throttleTime) + if b.brokerThrottleTime != nil { + throttleTimeInMs := int64(throttleTime / time.Millisecond) + b.brokerThrottleTime.Update(throttleTimeInMs) + } + } +} + func (b *Broker) registerMetrics() { b.brokerIncomingByteRate = b.registerMeter("incoming-byte-rate") b.brokerRequestRate = b.registerMeter("request-rate") + b.brokerFetchRate = b.registerMeter("consumer-fetch-rate") b.brokerRequestSize = b.registerHistogram("request-size") b.brokerRequestLatency = b.registerHistogram("request-latency-in-ms") b.brokerOutgoingByteRate = b.registerMeter("outgoing-byte-rate") b.brokerResponseRate = b.registerMeter("response-rate") b.brokerResponseSize = b.registerHistogram("response-size") b.brokerRequestsInFlight = b.registerCounter("requests-in-flight") -} - -func (b *Broker) unregisterMetrics() { - for _, name := range b.registeredMetrics { - b.conf.MetricRegistry.Unregister(name) - } - b.registeredMetrics = nil + b.brokerThrottleTime = b.registerHistogram("throttle-time-in-ms") + b.brokerProtocolRequestsRate = map[int16]metrics.Meter{} } func (b *Broker) registerMeter(name string) metrics.Meter { nameForBroker := getMetricNameForBroker(name, b) - b.registeredMetrics = append(b.registeredMetrics, nameForBroker) - return metrics.GetOrRegisterMeter(nameForBroker, b.conf.MetricRegistry) + return metrics.GetOrRegisterMeter(nameForBroker, b.metricRegistry) } func (b *Broker) registerHistogram(name string) metrics.Histogram { nameForBroker := getMetricNameForBroker(name, b) - b.registeredMetrics = append(b.registeredMetrics, nameForBroker) - return getOrRegisterHistogram(nameForBroker, b.conf.MetricRegistry) + return getOrRegisterHistogram(nameForBroker, b.metricRegistry) } func (b *Broker) registerCounter(name string) metrics.Counter { nameForBroker := getMetricNameForBroker(name, b) - b.registeredMetrics = append(b.registeredMetrics, nameForBroker) - return metrics.GetOrRegisterCounter(nameForBroker, b.conf.MetricRegistry) + return metrics.GetOrRegisterCounter(nameForBroker, b.metricRegistry) } func validServerNameTLS(addr string, cfg *tls.Config) *tls.Config { if cfg == nil { - cfg = &tls.Config{} + cfg = &tls.Config{ + MinVersion: tls.VersionTLS12, + } } if cfg.ServerName != "" { return cfg diff --git a/vendor/github.com/Shopify/sarama/client.go b/vendor/github.com/Shopify/sarama/client.go index b29519342..f7872a1b3 100644 --- a/vendor/github.com/Shopify/sarama/client.go +++ b/vendor/github.com/Shopify/sarama/client.go @@ -1,9 +1,12 @@ package sarama import ( + "errors" + "math" "math/rand" "sort" "sync" + "sync/atomic" "time" ) @@ -29,6 +32,9 @@ type Client interface { // Brokers returns the current set of active brokers as retrieved from cluster metadata. Brokers() []*Broker + // Broker returns the active Broker if available for the broker ID. + Broker(brokerID int32) (*Broker, error) + // Topics returns the set of available topics as retrieved from cluster metadata. Topics() ([]string, error) @@ -44,6 +50,10 @@ type Client interface { // topic/partition, as determined by querying the cluster metadata. Leader(topic string, partitionID int32) (*Broker, error) + // LeaderAndEpoch returns the the leader and its epoch for the current + // topic/partition, as determined by querying the cluster metadata. + LeaderAndEpoch(topic string, partitionID int32) (*Broker, int32, error) + // Replicas returns the set of all replica IDs for the given partition. Replicas(topic string, partitionID int32) ([]int32, error) @@ -56,6 +66,11 @@ type Client interface { // partition. Offline replicas are replicas which are offline OfflineReplicas(topic string, partitionID int32) ([]int32, error) + // RefreshBrokers takes a list of addresses to be used as seed brokers. + // Existing broker connections are closed and the updated list of seed brokers + // will be used for the next metadata fetch. + RefreshBrokers(addrs []string) error + // RefreshMetadata takes a list of topics and queries the cluster to refresh the // available metadata for those topics. If no topics are provided, it will refresh // metadata for all topics. @@ -77,9 +92,22 @@ type Client interface { // in local cache. This function only works on Kafka 0.8.2 and higher. RefreshCoordinator(consumerGroup string) error + // Coordinator returns the coordinating broker for a transaction id. It will + // return a locally cached value if it's available. You can call + // RefreshCoordinator to update the cached value. This function only works on + // Kafka 0.11.0.0 and higher. + TransactionCoordinator(transactionID string) (*Broker, error) + + // RefreshCoordinator retrieves the coordinator for a transaction id and stores it + // in local cache. This function only works on Kafka 0.11.0.0 and higher. + RefreshTransactionCoordinator(transactionID string) error + // InitProducerID retrieves information required for Idempotent Producer InitProducerID() (*InitProducerIDResponse, error) + // LeastLoadedBroker retrieves broker that has the least responses pending + LeastLoadedBroker() *Broker + // Close shuts down all broker connections managed by this client. It is required // to call this function before a client object passes out of scope, as it will // otherwise leak memory. You must close any Producers or Consumers using a client @@ -104,6 +132,11 @@ const ( ) type client struct { + // updateMetaDataMs stores the time at which metadata was lasted updated. + // Note: this accessed atomically so must be the first word in the struct + // as per golang/go#41970 + updateMetaDataMs int64 + conf *Config closer, closed chan none // for shutting down background metadata updater @@ -113,24 +146,26 @@ type client struct { seedBrokers []*Broker deadSeeds []*Broker - controllerID int32 // cluster controller broker id - brokers map[int32]*Broker // maps broker ids to brokers - metadata map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata - metadataTopics map[string]none // topics that need to collect metadata - coordinators map[string]int32 // Maps consumer group names to coordinating broker IDs + controllerID int32 // cluster controller broker id + brokers map[int32]*Broker // maps broker ids to brokers + metadata map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata + metadataTopics map[string]none // topics that need to collect metadata + coordinators map[string]int32 // Maps consumer group names to coordinating broker IDs + transactionCoordinators map[string]int32 // Maps transaction ids to coordinating broker IDs // If the number of partitions is large, we can get some churn calling cachedPartitions, // so the result is cached. It is important to update this value whenever metadata is changed cachedPartitionsResults map[string][maxPartitionIndex][]int32 lock sync.RWMutex // protects access to the maps that hold cluster state. + } // NewClient creates a new Client. It connects to one of the given broker addresses // and uses that broker to automatically fetch metadata on the rest of the kafka cluster. If metadata cannot // be retrieved from any of the given broker addresses, the client is not created. func NewClient(addrs []string, conf *Config) (Client, error) { - Logger.Println("Initializing new client") + DebugLogger.Println("Initializing new client") if conf == nil { conf = NewConfig() @@ -153,23 +188,19 @@ func NewClient(addrs []string, conf *Config) (Client, error) { metadataTopics: make(map[string]none), cachedPartitionsResults: make(map[string][maxPartitionIndex][]int32), coordinators: make(map[string]int32), + transactionCoordinators: make(map[string]int32), } - random := rand.New(rand.NewSource(time.Now().UnixNano())) - for _, index := range random.Perm(len(addrs)) { - client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index])) - } + client.randomizeSeedBrokers(addrs) if conf.Metadata.Full { // do an initial fetch of all cluster metadata by specifying an empty list of topics err := client.RefreshMetadata() - switch err { - case nil: - break - case ErrLeaderNotAvailable, ErrReplicaNotAvailable, ErrTopicAuthorizationFailed, ErrClusterAuthorizationFailed: + if err == nil { + } else if errors.Is(err, ErrLeaderNotAvailable) || errors.Is(err, ErrReplicaNotAvailable) || errors.Is(err, ErrTopicAuthorizationFailed) || errors.Is(err, ErrClusterAuthorizationFailed) { // indicates that maybe part of the cluster is down, but is not fatal to creating the client Logger.Println(err) - default: + } else { close(client.closed) // we haven't started the background updater yet, so we have to do this manually _ = client.Close() return nil, err @@ -177,7 +208,7 @@ func NewClient(addrs []string, conf *Config) (Client, error) { } go withRecover(client.backgroundMetadataUpdater) - Logger.Println("Successfully initialized new client") + DebugLogger.Println("Successfully initialized new client") return client, nil } @@ -196,23 +227,36 @@ func (client *client) Brokers() []*Broker { return brokers } +func (client *client) Broker(brokerID int32) (*Broker, error) { + client.lock.RLock() + defer client.lock.RUnlock() + broker, ok := client.brokers[brokerID] + if !ok { + return nil, ErrBrokerNotFound + } + _ = broker.Open(client.conf) + return broker, nil +} + func (client *client) InitProducerID() (*InitProducerIDResponse, error) { - var err error - for broker := client.any(); broker != nil; broker = client.any() { + brokerErrors := make([]error, 0) + for broker := client.anyBroker(); broker != nil; broker = client.anyBroker() { + var response *InitProducerIDResponse req := &InitProducerIDRequest{} response, err := broker.InitProducerID(req) - switch err.(type) { - case nil: + if err == nil { return response, nil - default: + } else { // some error, remove that broker and try again Logger.Printf("Client got error from broker %d when issuing InitProducerID : %v\n", broker.ID(), err) _ = broker.Close() + brokerErrors = append(brokerErrors, err) client.deregisterBroker(broker) } } - return nil, err + + return nil, Wrap(ErrOutOfBrokers, brokerErrors...) } func (client *client) Close() error { @@ -229,7 +273,7 @@ func (client *client) Close() error { client.lock.Lock() defer client.lock.Unlock() - Logger.Println("Closing Client") + DebugLogger.Println("Closing Client") for _, broker := range client.brokers { safeAsyncClose(broker) @@ -355,7 +399,7 @@ func (client *client) Replicas(topic string, partitionID int32) ([]int32, error) return nil, ErrUnknownTopicOrPartition } - if metadata.Err == ErrReplicaNotAvailable { + if errors.Is(metadata.Err, ErrReplicaNotAvailable) { return dupInt32Slice(metadata.Replicas), metadata.Err } return dupInt32Slice(metadata.Replicas), nil @@ -380,7 +424,7 @@ func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32, return nil, ErrUnknownTopicOrPartition } - if metadata.Err == ErrReplicaNotAvailable { + if errors.Is(metadata.Err, ErrReplicaNotAvailable) { return dupInt32Slice(metadata.Isr), metadata.Err } return dupInt32Slice(metadata.Isr), nil @@ -405,28 +449,61 @@ func (client *client) OfflineReplicas(topic string, partitionID int32) ([]int32, return nil, ErrUnknownTopicOrPartition } - if metadata.Err == ErrReplicaNotAvailable { + if errors.Is(metadata.Err, ErrReplicaNotAvailable) { return dupInt32Slice(metadata.OfflineReplicas), metadata.Err } return dupInt32Slice(metadata.OfflineReplicas), nil } func (client *client) Leader(topic string, partitionID int32) (*Broker, error) { + leader, _, err := client.LeaderAndEpoch(topic, partitionID) + return leader, err +} + +func (client *client) LeaderAndEpoch(topic string, partitionID int32) (*Broker, int32, error) { if client.Closed() { - return nil, ErrClosedClient + return nil, -1, ErrClosedClient } - leader, err := client.cachedLeader(topic, partitionID) - + leader, epoch, err := client.cachedLeader(topic, partitionID) if leader == nil { err = client.RefreshMetadata(topic) if err != nil { - return nil, err + return nil, -1, err } - leader, err = client.cachedLeader(topic, partitionID) + leader, epoch, err = client.cachedLeader(topic, partitionID) } - return leader, err + return leader, epoch, err +} + +func (client *client) RefreshBrokers(addrs []string) error { + if client.Closed() { + return ErrClosedClient + } + + client.lock.Lock() + defer client.lock.Unlock() + + for _, broker := range client.brokers { + _ = broker.Close() + delete(client.brokers, broker.ID()) + } + + for _, broker := range client.seedBrokers { + _ = broker.Close() + } + + for _, broker := range client.deadSeeds { + _ = broker.Close() + } + + client.seedBrokers = nil + client.deadSeeds = nil + + client.randomizeSeedBrokers(addrs) + + return nil } func (client *client) RefreshMetadata(topics ...string) error { @@ -438,7 +515,7 @@ func (client *client) RefreshMetadata(topics ...string) error { // error. This handles the case by returning an error instead of sending it // off to Kafka. See: https://github.com/Shopify/sarama/pull/38#issuecomment-26362310 for _, topic := range topics { - if len(topic) == 0 { + if topic == "" { return ErrInvalidTopic // this is the error that 0.8.2 and later correctly return } } @@ -456,7 +533,6 @@ func (client *client) GetOffset(topic string, partitionID int32, time int64) (in } offset, err := client.getOffset(topic, partitionID, time) - if err != nil { if err := client.RefreshMetadata(topic); err != nil { return -1, err @@ -496,7 +572,10 @@ func (client *client) Controller() (*Broker, error) { func (client *client) deregisterController() { client.lock.Lock() defer client.lock.Unlock() - delete(client.brokers, client.controllerID) + if controller, ok := client.brokers[client.controllerID]; ok { + _ = controller.Close() + delete(client.brokers, client.controllerID) + } } // RefreshController retrieves the cluster controller from fresh metadata @@ -548,7 +627,7 @@ func (client *client) RefreshCoordinator(consumerGroup string) error { return ErrClosedClient } - response, err := client.getConsumerMetadata(consumerGroup, client.conf.Metadata.Retry.Max) + response, err := client.findCoordinator(consumerGroup, CoordinatorGroup, client.conf.Metadata.Retry.Max) if err != nil { return err } @@ -560,16 +639,62 @@ func (client *client) RefreshCoordinator(consumerGroup string) error { return nil } +func (client *client) TransactionCoordinator(transactionID string) (*Broker, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + coordinator := client.cachedTransactionCoordinator(transactionID) + + if coordinator == nil { + if err := client.RefreshTransactionCoordinator(transactionID); err != nil { + return nil, err + } + coordinator = client.cachedTransactionCoordinator(transactionID) + } + + if coordinator == nil { + return nil, ErrConsumerCoordinatorNotAvailable + } + + _ = coordinator.Open(client.conf) + return coordinator, nil +} + +func (client *client) RefreshTransactionCoordinator(transactionID string) error { + if client.Closed() { + return ErrClosedClient + } + + response, err := client.findCoordinator(transactionID, CoordinatorTransaction, client.conf.Metadata.Retry.Max) + if err != nil { + return err + } + + client.lock.Lock() + defer client.lock.Unlock() + client.registerBroker(response.Coordinator) + client.transactionCoordinators[transactionID] = response.Coordinator.ID() + return nil +} + // private broker management helpers +func (client *client) randomizeSeedBrokers(addrs []string) { + random := rand.New(rand.NewSource(time.Now().UnixNano())) + for _, index := range random.Perm(len(addrs)) { + client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index])) + } +} + func (client *client) updateBroker(brokers []*Broker) { - var currentBroker = make(map[int32]*Broker, len(brokers)) + currentBroker := make(map[int32]*Broker, len(brokers)) for _, broker := range brokers { currentBroker[broker.ID()] = broker if client.brokers[broker.ID()] == nil { // add new broker client.brokers[broker.ID()] = broker - Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr()) + DebugLogger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr()) } else if broker.Addr() != client.brokers[broker.ID()].Addr() { // replace broker with new address safeAsyncClose(client.brokers[broker.ID()]) client.brokers[broker.ID()] = broker @@ -597,7 +722,7 @@ func (client *client) registerBroker(broker *Broker) { if client.brokers[broker.ID()] == nil { client.brokers[broker.ID()] = broker - Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr()) + DebugLogger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr()) } else if broker.Addr() != client.brokers[broker.ID()].Addr() { safeAsyncClose(client.brokers[broker.ID()]) client.brokers[broker.ID()] = broker @@ -619,7 +744,7 @@ func (client *client) deregisterBroker(broker *Broker) { // but we really shouldn't have to; once that loop is made better this case can be // removed, and the function generally can be renamed from `deregisterBroker` to // `nextSeedBroker` or something - Logger.Printf("client/brokers deregistered broker #%d at %s", broker.ID(), broker.Addr()) + DebugLogger.Printf("client/brokers deregistered broker #%d at %s", broker.ID(), broker.Addr()) delete(client.brokers, broker.ID()) } } @@ -633,7 +758,7 @@ func (client *client) resurrectDeadBrokers() { client.deadSeeds = nil } -func (client *client) any() *Broker { +func (client *client) anyBroker() *Broker { client.lock.RLock() defer client.lock.RUnlock() @@ -651,6 +776,30 @@ func (client *client) any() *Broker { return nil } +func (client *client) LeastLoadedBroker() *Broker { + client.lock.RLock() + defer client.lock.RUnlock() + + if len(client.seedBrokers) > 0 { + _ = client.seedBrokers[0].Open(client.conf) + return client.seedBrokers[0] + } + + var leastLoadedBroker *Broker + pendingRequests := math.MaxInt + for _, broker := range client.brokers { + if pendingRequests > broker.ResponseSize() { + pendingRequests = broker.ResponseSize() + leastLoadedBroker = broker + } + } + + if leastLoadedBroker != nil { + _ = leastLoadedBroker.Open(client.conf) + } + return leastLoadedBroker +} + // private caching/lazy metadata helpers type partitionType int @@ -697,7 +846,7 @@ func (client *client) setPartitionCache(topic string, partitionSet partitionType ret := make([]int32, 0, len(partitions)) for _, partition := range partitions { - if partitionSet == writablePartitions && partition.Err == ErrLeaderNotAvailable { + if partitionSet == writablePartitions && errors.Is(partition.Err, ErrLeaderNotAvailable) { continue } ret = append(ret, partition.ID) @@ -707,7 +856,7 @@ func (client *client) setPartitionCache(topic string, partitionSet partitionType return ret } -func (client *client) cachedLeader(topic string, partitionID int32) (*Broker, error) { +func (client *client) cachedLeader(topic string, partitionID int32) (*Broker, int32, error) { client.lock.RLock() defer client.lock.RUnlock() @@ -715,19 +864,19 @@ func (client *client) cachedLeader(topic string, partitionID int32) (*Broker, er if partitions != nil { metadata, ok := partitions[partitionID] if ok { - if metadata.Err == ErrLeaderNotAvailable { - return nil, ErrLeaderNotAvailable + if errors.Is(metadata.Err, ErrLeaderNotAvailable) { + return nil, -1, ErrLeaderNotAvailable } b := client.brokers[metadata.Leader] if b == nil { - return nil, ErrLeaderNotAvailable + return nil, -1, ErrLeaderNotAvailable } _ = b.Open(client.conf) - return b, nil + return b, metadata.LeaderEpoch, nil } } - return nil, ErrUnknownTopicOrPartition + return nil, -1, ErrUnknownTopicOrPartition } func (client *client) getOffset(topic string, partitionID int32, time int64) (int64, error) { @@ -753,7 +902,7 @@ func (client *client) getOffset(topic string, partitionID int32, time int64) (in _ = broker.Close() return -1, ErrIncompleteResponse } - if block.Err != ErrNoError { + if !errors.Is(block.Err, ErrNoError) { return -1, block.Err } if len(block.Offsets) != 1 { @@ -822,34 +971,43 @@ func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int, Logger.Println("client/metadata skipping last retries as we would go past the metadata timeout") return err } - Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining) if backoff > 0 { time.Sleep(backoff) } + + t := atomic.LoadInt64(&client.updateMetaDataMs) + if time.Since(time.Unix(t/1e3, 0)) < backoff { + return err + } + Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining) + return client.tryRefreshMetadata(topics, attemptsRemaining-1, deadline) } return err } - broker := client.any() - for ; broker != nil && !pastDeadline(0); broker = client.any() { - allowAutoTopicCreation := true + broker := client.anyBroker() + brokerErrors := make([]error, 0) + for ; broker != nil && !pastDeadline(0); broker = client.anyBroker() { + allowAutoTopicCreation := client.conf.Metadata.AllowAutoTopicCreation if len(topics) > 0 { - Logger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr) + DebugLogger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr) } else { allowAutoTopicCreation = false - Logger.Printf("client/metadata fetching metadata for all topics from broker %s\n", broker.addr) + DebugLogger.Printf("client/metadata fetching metadata for all topics from broker %s\n", broker.addr) } - req := &MetadataRequest{Topics: topics, AllowAutoTopicCreation: allowAutoTopicCreation} - if client.conf.Version.IsAtLeast(V1_0_0_0) { - req.Version = 5 - } else if client.conf.Version.IsAtLeast(V0_10_0_0) { - req.Version = 1 + req := NewMetadataRequest(client.conf.Version, topics) + req.AllowAutoTopicCreation = allowAutoTopicCreation + t := atomic.LoadInt64(&client.updateMetaDataMs) + if !atomic.CompareAndSwapInt64(&client.updateMetaDataMs, t, time.Now().UnixNano()/int64(time.Millisecond)) { + return nil } + response, err := broker.GetMetadata(req) - switch err.(type) { - case nil: + var kerror KError + var packetEncodingError PacketEncodingError + if err == nil { allKnownMetaData := len(topics) == 0 // valid response, use it shouldRetry, err := client.updateMetadata(response, allKnownMetaData) @@ -858,19 +1016,17 @@ func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int, return retry(err) // note: err can be nil } return err - - case PacketEncodingError: + } else if errors.As(err, &packetEncodingError) { // didn't even send, return the error return err - - case KError: + } else if errors.As(err, &kerror) { // if SASL auth error return as this _should_ be a non retryable err for all brokers - if err.(KError) == ErrSASLAuthenticationFailed { + if errors.Is(err, ErrSASLAuthenticationFailed) { Logger.Println("client/metadata failed SASL authentication") return err } - if err.(KError) == ErrTopicAuthorizationFailed { + if errors.Is(err, ErrTopicAuthorizationFailed) { Logger.Println("client is not authorized to access this topic. The topics were: ", topics) return err } @@ -878,23 +1034,24 @@ func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int, Logger.Printf("client/metadata got error from broker %d while fetching metadata: %v\n", broker.ID(), err) _ = broker.Close() client.deregisterBroker(broker) - - default: + } else { // some other error, remove that broker and try again Logger.Printf("client/metadata got error from broker %d while fetching metadata: %v\n", broker.ID(), err) + brokerErrors = append(brokerErrors, err) _ = broker.Close() client.deregisterBroker(broker) } } + error := Wrap(ErrOutOfBrokers, brokerErrors...) if broker != nil { Logger.Printf("client/metadata not fetching metadata from broker %s as we would go past the metadata timeout\n", broker.addr) - return retry(ErrOutOfBrokers) + return retry(error) } Logger.Println("client/metadata no available broker to send metadata request to") client.resurrectDeadBrokers() - return retry(ErrOutOfBrokers) + return retry(error) } // if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable @@ -951,7 +1108,7 @@ func (client *client) updateMetadata(data *MetadataResponse, allKnownMetaData bo client.metadata[topic.Name] = make(map[int32]*PartitionMetadata, len(topic.Partitions)) for _, partition := range topic.Partitions { client.metadata[topic.Name][partition.ID] = partition - if partition.Err == ErrLeaderNotAvailable { + if errors.Is(partition.Err, ErrLeaderNotAvailable) { retry = true } } @@ -974,6 +1131,15 @@ func (client *client) cachedCoordinator(consumerGroup string) *Broker { return nil } +func (client *client) cachedTransactionCoordinator(transactionID string) *Broker { + client.lock.RLock() + defer client.lock.RUnlock() + if coordinatorID, ok := client.transactionCoordinators[transactionID]; ok { + return client.brokers[coordinatorID] + } + return nil +} + func (client *client) cachedController() *Broker { client.lock.RLock() defer client.lock.RUnlock() @@ -990,46 +1156,49 @@ func (client *client) computeBackoff(attemptsRemaining int) time.Duration { return client.conf.Metadata.Retry.Backoff } -func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemaining int) (*FindCoordinatorResponse, error) { +func (client *client) findCoordinator(coordinatorKey string, coordinatorType CoordinatorType, attemptsRemaining int) (*FindCoordinatorResponse, error) { retry := func(err error) (*FindCoordinatorResponse, error) { if attemptsRemaining > 0 { backoff := client.computeBackoff(attemptsRemaining) Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining) time.Sleep(backoff) - return client.getConsumerMetadata(consumerGroup, attemptsRemaining-1) + return client.findCoordinator(coordinatorKey, coordinatorType, attemptsRemaining-1) } return nil, err } - for broker := client.any(); broker != nil; broker = client.any() { - Logger.Printf("client/coordinator requesting coordinator for consumergroup %s from %s\n", consumerGroup, broker.Addr()) + brokerErrors := make([]error, 0) + for broker := client.anyBroker(); broker != nil; broker = client.anyBroker() { + DebugLogger.Printf("client/coordinator requesting coordinator for %s from %s\n", coordinatorKey, broker.Addr()) request := new(FindCoordinatorRequest) - request.CoordinatorKey = consumerGroup - request.CoordinatorType = CoordinatorGroup + request.CoordinatorKey = coordinatorKey + request.CoordinatorType = coordinatorType - response, err := broker.FindCoordinator(request) + if client.conf.Version.IsAtLeast(V0_11_0_0) { + request.Version = 1 + } + response, err := broker.FindCoordinator(request) if err != nil { Logger.Printf("client/coordinator request to broker %s failed: %s\n", broker.Addr(), err) - switch err.(type) { - case PacketEncodingError: + var packetEncodingError PacketEncodingError + if errors.As(err, &packetEncodingError) { return nil, err - default: + } else { _ = broker.Close() + brokerErrors = append(brokerErrors, err) client.deregisterBroker(broker) continue } } - switch response.Err { - case ErrNoError: - Logger.Printf("client/coordinator coordinator for consumergroup %s is #%d (%s)\n", consumerGroup, response.Coordinator.ID(), response.Coordinator.Addr()) + if errors.Is(response.Err, ErrNoError) { + DebugLogger.Printf("client/coordinator coordinator for %s is #%d (%s)\n", coordinatorKey, response.Coordinator.ID(), response.Coordinator.Addr()) return response, nil - - case ErrConsumerCoordinatorNotAvailable: - Logger.Printf("client/coordinator coordinator for consumer group %s is not available\n", consumerGroup) + } else if errors.Is(response.Err, ErrConsumerCoordinatorNotAvailable) { + Logger.Printf("client/coordinator coordinator for %s is not available\n", coordinatorKey) // This is very ugly, but this scenario will only happen once per cluster. // The __consumer_offsets topic only has to be created one time. @@ -1038,20 +1207,25 @@ func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemainin Logger.Printf("client/coordinator the __consumer_offsets topic is not initialized completely yet. Waiting 2 seconds...\n") time.Sleep(2 * time.Second) } + if coordinatorType == CoordinatorTransaction { + if _, err := client.Leader("__transaction_state", 0); err != nil { + Logger.Printf("client/coordinator the __transaction_state topic is not initialized completely yet. Waiting 2 seconds...\n") + time.Sleep(2 * time.Second) + } + } return retry(ErrConsumerCoordinatorNotAvailable) - case ErrGroupAuthorizationFailed: - Logger.Printf("client was not authorized to access group %s while attempting to find coordinator", consumerGroup) + } else if errors.Is(response.Err, ErrGroupAuthorizationFailed) { + Logger.Printf("client was not authorized to access group %s while attempting to find coordinator", coordinatorKey) return retry(ErrGroupAuthorizationFailed) - - default: + } else { return nil, response.Err } } Logger.Println("client/coordinator no available broker to send consumer metadata request to") client.resurrectDeadBrokers() - return retry(ErrOutOfBrokers) + return retry(Wrap(ErrOutOfBrokers, brokerErrors...)) } // nopCloserClient embeds an existing Client, but disables diff --git a/vendor/github.com/Shopify/sarama/compress.go b/vendor/github.com/Shopify/sarama/compress.go index 12cd7c3d5..504007a49 100644 --- a/vendor/github.com/Shopify/sarama/compress.go +++ b/vendor/github.com/Shopify/sarama/compress.go @@ -7,7 +7,7 @@ import ( "sync" snappy "github.com/eapache/go-xerial-snappy" - "github.com/pierrec/lz4" + "github.com/pierrec/lz4/v4" ) var ( @@ -187,7 +187,7 @@ func compress(cc CompressionCodec, level int, data []byte) ([]byte, error) { } return buf.Bytes(), nil case CompressionZSTD: - return zstdCompress(nil, data) + return zstdCompress(ZstdEncoderParams{level}, nil, data) default: return nil, PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", cc)} } diff --git a/vendor/github.com/Shopify/sarama/config.go b/vendor/github.com/Shopify/sarama/config.go index 9b7ce7aeb..b07034434 100644 --- a/vendor/github.com/Shopify/sarama/config.go +++ b/vendor/github.com/Shopify/sarama/config.go @@ -4,7 +4,7 @@ import ( "compress/gzip" "crypto/tls" "fmt" - "io/ioutil" + "io" "net" "regexp" "time" @@ -38,6 +38,9 @@ type Config struct { Net struct { // How many outstanding requests a connection is allowed to have before // sending on it blocks (default 5). + // Throughput can improve but message ordering is not guaranteed if Producer.Idempotent is disabled, see: + // https://kafka.apache.org/protocol#protocol_network + // https://kafka.apache.org/28/documentation.html#producerconfigs_max.in.flight.requests.per.connection MaxOpenRequests int // All three of the below configurations are similar to the @@ -148,6 +151,11 @@ type Config struct { // `Net.[Dial|Read]Timeout * BrokerCount * (Metadata.Retry.Max + 1) + Metadata.Retry.Backoff * Metadata.Retry.Max` // to fail. Timeout time.Duration + + // Whether to allow auto-create topics in metadata refresh. If set to true, + // the broker may auto-create topics that we requested which do not already exist, + // if it is configured to do so (`auto.create.topics.enable` is true). Defaults to true. + AllowAutoTopicCreation bool } // Producer is the namespace for configuration related to producing messages, @@ -180,6 +188,28 @@ type Config struct { // If enabled, the producer will ensure that exactly one copy of each message is // written. Idempotent bool + // Transaction specify + Transaction struct { + // Used in transactions to identify an instance of a producer through restarts + ID string + // Amount of time a transaction can remain unresolved (neither committed nor aborted) + // default is 1 min + Timeout time.Duration + + Retry struct { + // The total number of times to retry sending a message (default 50). + // Similar to the `message.send.max.retries` setting of the JVM producer. + Max int + // How long to wait for the cluster to settle between retries + // (default 10ms). Similar to the `retry.backoff.ms` setting of the + // JVM producer. + Backoff time.Duration + // Called to compute backoff time dynamically. Useful for implementing + // more sophisticated backoff strategies. This takes precedence over + // `Backoff` if set. + BackoffFunc func(retries, maxRetries int) time.Duration + } + } // Return specifies what channels will be populated. If they are set to true, // you must read from the respective channels to prevent deadlock. If, @@ -265,7 +295,16 @@ type Config struct { } Rebalance struct { // Strategy for allocating topic partitions to members (default BalanceStrategyRange) + // Deprecated: Strategy exists for historical compatibility + // and should not be used. Please use GroupStrategies. Strategy BalanceStrategy + + // GroupStrategies is the priority-ordered list of client-side consumer group + // balancing strategies that will be offered to the coordinator. The first + // strategy that all group members support will be chosen by the leader. + // default: [BalanceStrategyRange] + GroupStrategies []BalanceStrategy + // The maximum allowed time for each worker to join the group once a rebalance has begun. // This is basically a limit on the amount of time needed for all tasks to flush any pending // data and commit offsets. If the timeout is exceeded, then the worker will be removed from @@ -288,6 +327,17 @@ type Config struct { // coordinator for the group. UserData []byte } + + // support KIP-345 + InstanceId string + + // If true, consumer offsets will be automatically reset to configured Initial value + // if the fetched consumer offset is out of range of available offsets. Out of range + // can happen if the data has been deleted from the server, or during situations of + // under-replication where a replica does not have all the data yet. It can be + // dangerous to reset the offset automatically, particularly in the latter case. Defaults + // to true to maintain existing behavior. + ResetInvalidOffsets bool } Retry struct { @@ -422,6 +472,11 @@ type Config struct { // in the background while user code is working, greatly improving throughput. // Defaults to 256. ChannelBufferSize int + // ApiVersionsRequest determines whether Sarama should send an + // ApiVersionsRequest message to each broker as part of its initial + // connection. This defaults to `true` to match the official Java client + // and most 3rdparty ones. + ApiVersionsRequest bool // The version of Kafka that Sarama will assume it is running against. // Defaults to the oldest supported stable version. Since Kafka provides // backwards-compatibility, setting it to a version older than you have @@ -456,6 +511,7 @@ func NewConfig() *Config { c.Metadata.Retry.Backoff = 250 * time.Millisecond c.Metadata.RefreshFrequency = 10 * time.Minute c.Metadata.Full = true + c.Metadata.AllowAutoTopicCreation = true c.Producer.MaxMessageBytes = 1000000 c.Producer.RequiredAcks = WaitForLocal @@ -466,10 +522,14 @@ func NewConfig() *Config { c.Producer.Return.Errors = true c.Producer.CompressionLevel = CompressionLevelDefault + c.Producer.Transaction.Timeout = 1 * time.Minute + c.Producer.Transaction.Retry.Max = 50 + c.Producer.Transaction.Retry.Backoff = 100 * time.Millisecond + c.Consumer.Fetch.Min = 1 c.Consumer.Fetch.Default = 1024 * 1024 c.Consumer.Retry.Backoff = 2 * time.Second - c.Consumer.MaxWaitTime = 250 * time.Millisecond + c.Consumer.MaxWaitTime = 500 * time.Millisecond c.Consumer.MaxProcessingTime = 100 * time.Millisecond c.Consumer.Return.Errors = false c.Consumer.Offsets.AutoCommit.Enable = true @@ -479,14 +539,16 @@ func NewConfig() *Config { c.Consumer.Group.Session.Timeout = 10 * time.Second c.Consumer.Group.Heartbeat.Interval = 3 * time.Second - c.Consumer.Group.Rebalance.Strategy = BalanceStrategyRange + c.Consumer.Group.Rebalance.GroupStrategies = []BalanceStrategy{BalanceStrategyRange} c.Consumer.Group.Rebalance.Timeout = 60 * time.Second c.Consumer.Group.Rebalance.Retry.Max = 4 c.Consumer.Group.Rebalance.Retry.Backoff = 2 * time.Second + c.Consumer.Group.ResetInvalidOffsets = true c.ClientID = defaultClientID c.ChannelBufferSize = 256 - c.Version = MinVersion + c.ApiVersionsRequest = true + c.Version = DefaultVersion c.MetricRegistry = metrics.NewRegistry() return c @@ -494,6 +556,8 @@ func NewConfig() *Config { // Validate checks a Config instance. It will return a // ConfigurationError if the specified values don't make sense. +// +//nolint:gocyclo // This function's cyclomatic complexity has go beyond 100 func (c *Config) Validate() error { // some configuration values should be warned on but not fail completely, do those first if !c.Net.TLS.Enable && c.Net.TLS.Config != nil { @@ -663,7 +727,7 @@ func (c *Config) Validate() error { if c.Producer.Compression == CompressionGZIP { if c.Producer.CompressionLevel != CompressionLevelDefault { - if _, err := gzip.NewWriterLevel(ioutil.Discard, c.Producer.CompressionLevel); err != nil { + if _, err := gzip.NewWriterLevel(io.Discard, c.Producer.CompressionLevel); err != nil { return ConfigurationError(fmt.Sprintf("gzip compression does not work with level %d: %v", c.Producer.CompressionLevel, err)) } } @@ -688,6 +752,10 @@ func (c *Config) Validate() error { } } + if c.Producer.Transaction.ID != "" && !c.Producer.Idempotent { + return ConfigurationError("Transactional producer requires Idempotent to be true") + } + // validate the Consumer values switch { case c.Consumer.Fetch.Min <= 0: @@ -716,6 +784,10 @@ func (c *Config) Validate() error { Logger.Println("Deprecation warning: Consumer.Offsets.CommitInterval exists for historical compatibility" + " and should not be used. Please use Consumer.Offsets.AutoCommit, the current value will be ignored") } + if c.Consumer.Group.Rebalance.Strategy != nil { + Logger.Println("Deprecation warning: Consumer.Group.Rebalance.Strategy exists for historical compatibility" + + " and should not be used. Please use Consumer.Group.Rebalance.GroupStrategies") + } // validate IsolationLevel if c.Consumer.IsolationLevel == ReadCommitted && !c.Version.IsAtLeast(V0_11_0_0) { @@ -730,8 +802,8 @@ func (c *Config) Validate() error { return ConfigurationError("Consumer.Group.Heartbeat.Interval must be >= 1ms") case c.Consumer.Group.Heartbeat.Interval >= c.Consumer.Group.Session.Timeout: return ConfigurationError("Consumer.Group.Heartbeat.Interval must be < Consumer.Group.Session.Timeout") - case c.Consumer.Group.Rebalance.Strategy == nil: - return ConfigurationError("Consumer.Group.Rebalance.Strategy must not be empty") + case c.Consumer.Group.Rebalance.Strategy == nil && len(c.Consumer.Group.Rebalance.GroupStrategies) == 0: + return ConfigurationError("Consumer.Group.Rebalance.GroupStrategies or Consumer.Group.Rebalance.Strategy must not be empty") case c.Consumer.Group.Rebalance.Timeout <= time.Millisecond: return ConfigurationError("Consumer.Group.Rebalance.Timeout must be >= 1ms") case c.Consumer.Group.Rebalance.Retry.Max < 0: @@ -740,6 +812,21 @@ func (c *Config) Validate() error { return ConfigurationError("Consumer.Group.Rebalance.Retry.Backoff must be >= 0") } + for _, strategy := range c.Consumer.Group.Rebalance.GroupStrategies { + if strategy == nil { + return ConfigurationError("elements in Consumer.Group.Rebalance.Strategies must not be empty") + } + } + + if c.Consumer.Group.InstanceId != "" { + if !c.Version.IsAtLeast(V2_3_0_0) { + return ConfigurationError("Consumer.Group.InstanceId need Version >= 2.3") + } + if err := validateGroupInstanceId(c.Consumer.Group.InstanceId); err != nil { + return err + } + } + // validate misc shared values switch { case c.ChannelBufferSize < 0: @@ -763,3 +850,23 @@ func (c *Config) getDialer() proxy.Dialer { } } } + +const MAX_GROUP_INSTANCE_ID_LENGTH = 249 + +var GROUP_INSTANCE_ID_REGEXP = regexp.MustCompile(`^[0-9a-zA-Z\._\-]+$`) + +func validateGroupInstanceId(id string) error { + if id == "" { + return ConfigurationError("Group instance id must be non-empty string") + } + if id == "." || id == ".." { + return ConfigurationError(`Group instance id cannot be "." or ".."`) + } + if len(id) > MAX_GROUP_INSTANCE_ID_LENGTH { + return ConfigurationError(fmt.Sprintf(`Group instance id cannot be longer than %v, characters: %s`, MAX_GROUP_INSTANCE_ID_LENGTH, id)) + } + if !GROUP_INSTANCE_ID_REGEXP.MatchString(id) { + return ConfigurationError(fmt.Sprintf(`Group instance id %s is illegal, it contains a character other than, '.', '_' and '-'`, id)) + } + return nil +} diff --git a/vendor/github.com/Shopify/sarama/consumer.go b/vendor/github.com/Shopify/sarama/consumer.go index 5d9922344..eb27df8d7 100644 --- a/vendor/github.com/Shopify/sarama/consumer.go +++ b/vendor/github.com/Shopify/sarama/consumer.go @@ -35,6 +35,10 @@ func (ce ConsumerError) Error() string { return fmt.Sprintf("kafka: error while consuming %s/%d: %s", ce.Topic, ce.Partition, ce.Err) } +func (ce ConsumerError) Unwrap() error { + return ce.Err +} + // ConsumerErrors is a type that wraps a batch of errors and implements the Error interface. // It can be returned from the PartitionConsumer's Close methods to avoid the need to manually drain errors // when stopping. @@ -70,13 +74,37 @@ type Consumer interface { // Close shuts down the consumer. It must be called after all child // PartitionConsumers have already been closed. Close() error + + // Pause suspends fetching from the requested partitions. Future calls to the broker will not return any + // records from these partitions until they have been resumed using Resume()/ResumeAll(). + // Note that this method does not affect partition subscription. + // In particular, it does not cause a group rebalance when automatic assignment is used. + Pause(topicPartitions map[string][]int32) + + // Resume resumes specified partitions which have been paused with Pause()/PauseAll(). + // New calls to the broker will return records from these partitions if there are any to be fetched. + Resume(topicPartitions map[string][]int32) + + // Pause suspends fetching from all partitions. Future calls to the broker will not return any + // records from these partitions until they have been resumed using Resume()/ResumeAll(). + // Note that this method does not affect partition subscription. + // In particular, it does not cause a group rebalance when automatic assignment is used. + PauseAll() + + // Resume resumes all partitions which have been paused with Pause()/PauseAll(). + // New calls to the broker will return records from these partitions if there are any to be fetched. + ResumeAll() } +// max time to wait for more partition subscriptions +const partitionConsumersBatchTimeout = 100 * time.Millisecond + type consumer struct { conf *Config children map[string]map[int32]*partitionConsumer brokerConsumers map[*Broker]*brokerConsumer client Client + metricRegistry metrics.Registry lock sync.Mutex } @@ -109,12 +137,14 @@ func newConsumer(client Client) (Consumer, error) { conf: client.Config(), children: make(map[string]map[int32]*partitionConsumer), brokerConsumers: make(map[*Broker]*brokerConsumer), + metricRegistry: newCleanupRegistry(client.Config().MetricRegistry), } return c, nil } func (c *consumer) Close() error { + c.metricRegistry.UnregisterAll() return c.client.Close() } @@ -128,25 +158,26 @@ func (c *consumer) Partitions(topic string) ([]int32, error) { func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) { child := &partitionConsumer{ - consumer: c, - conf: c.conf, - topic: topic, - partition: partition, - messages: make(chan *ConsumerMessage, c.conf.ChannelBufferSize), - errors: make(chan *ConsumerError, c.conf.ChannelBufferSize), - feeder: make(chan *FetchResponse, 1), - trigger: make(chan none, 1), - dying: make(chan none), - fetchSize: c.conf.Consumer.Fetch.Default, + consumer: c, + conf: c.conf, + topic: topic, + partition: partition, + messages: make(chan *ConsumerMessage, c.conf.ChannelBufferSize), + errors: make(chan *ConsumerError, c.conf.ChannelBufferSize), + feeder: make(chan *FetchResponse, 1), + leaderEpoch: invalidLeaderEpoch, + preferredReadReplica: invalidPreferredReplicaID, + trigger: make(chan none, 1), + dying: make(chan none), + fetchSize: c.conf.Consumer.Fetch.Default, } if err := child.chooseStartingOffset(offset); err != nil { return nil, err } - var leader *Broker - var err error - if leader, err = c.client.Leader(child.topic, child.partition); err != nil { + leader, epoch, err := c.client.LeaderAndEpoch(child.topic, child.partition) + if err != nil { return nil, err } @@ -157,6 +188,7 @@ func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) go withRecover(child.dispatcher) go withRecover(child.responseFeeder) + child.leaderEpoch = epoch child.broker = c.refBrokerConsumer(leader) child.broker.input <- child @@ -240,6 +272,62 @@ func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) { delete(c.brokerConsumers, brokerWorker.broker) } +// Pause implements Consumer. +func (c *consumer) Pause(topicPartitions map[string][]int32) { + c.lock.Lock() + defer c.lock.Unlock() + + for topic, partitions := range topicPartitions { + for _, partition := range partitions { + if topicConsumers, ok := c.children[topic]; ok { + if partitionConsumer, ok := topicConsumers[partition]; ok { + partitionConsumer.Pause() + } + } + } + } +} + +// Resume implements Consumer. +func (c *consumer) Resume(topicPartitions map[string][]int32) { + c.lock.Lock() + defer c.lock.Unlock() + + for topic, partitions := range topicPartitions { + for _, partition := range partitions { + if topicConsumers, ok := c.children[topic]; ok { + if partitionConsumer, ok := topicConsumers[partition]; ok { + partitionConsumer.Resume() + } + } + } + } +} + +// PauseAll implements Consumer. +func (c *consumer) PauseAll() { + c.lock.Lock() + defer c.lock.Unlock() + + for _, partitions := range c.children { + for _, partitionConsumer := range partitions { + partitionConsumer.Pause() + } + } +} + +// ResumeAll implements Consumer. +func (c *consumer) ResumeAll() { + c.lock.Lock() + defer c.lock.Unlock() + + for _, partitions := range c.children { + for _, partitionConsumer := range partitions { + partitionConsumer.Resume() + } + } +} + // PartitionConsumer // PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call one of Close() or @@ -287,6 +375,20 @@ type PartitionConsumer interface { // i.e. the offset that will be used for the next message that will be produced. // You can use this to determine how far behind the processing is. HighWaterMarkOffset() int64 + + // Pause suspends fetching from this partition. Future calls to the broker will not return + // any records from these partition until it have been resumed using Resume(). + // Note that this method does not affect partition subscription. + // In particular, it does not cause a group rebalance when automatic assignment is used. + Pause() + + // Resume resumes this partition which have been paused with Pause(). + // New calls to the broker will return records from these partitions if there are any to be fetched. + // If the partition was not previously paused, this method is a no-op. + Resume() + + // IsPaused indicates if this partition consumer is paused or not + IsPaused() bool } type partitionConsumer struct { @@ -299,6 +401,9 @@ type partitionConsumer struct { errors chan *ConsumerError feeder chan *FetchResponse + leaderEpoch int32 + preferredReadReplica int32 + trigger, dying chan none closeOnce sync.Once topic string @@ -307,6 +412,8 @@ type partitionConsumer struct { fetchSize int32 offset int64 retries int32 + + paused int32 } var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing @@ -344,7 +451,6 @@ func (child *partitionConsumer) dispatcher() { child.broker = nil } - Logger.Printf("consumer/%s/%d finding new broker\n", child.topic, child.partition) if err := child.dispatch(); err != nil { child.sendError(err) child.trigger <- none{} @@ -359,19 +465,38 @@ func (child *partitionConsumer) dispatcher() { close(child.feeder) } +func (child *partitionConsumer) preferredBroker() (*Broker, int32, error) { + if child.preferredReadReplica >= 0 { + broker, err := child.consumer.client.Broker(child.preferredReadReplica) + if err == nil { + return broker, child.leaderEpoch, nil + } + Logger.Printf( + "consumer/%s/%d failed to find active broker for preferred read replica %d - will fallback to leader", + child.topic, child.partition, child.preferredReadReplica) + + // if we couldn't find it, discard the replica preference and trigger a + // metadata refresh whilst falling back to consuming from the leader again + child.preferredReadReplica = invalidPreferredReplicaID + _ = child.consumer.client.RefreshMetadata(child.topic) + } + + // if preferred replica cannot be found fallback to leader + return child.consumer.client.LeaderAndEpoch(child.topic, child.partition) +} + func (child *partitionConsumer) dispatch() error { if err := child.consumer.client.RefreshMetadata(child.topic); err != nil { return err } - var leader *Broker - var err error - if leader, err = child.consumer.client.Leader(child.topic, child.partition); err != nil { + broker, epoch, err := child.preferredBroker() + if err != nil { return err } - child.broker = child.consumer.refBrokerConsumer(leader) - + child.leaderEpoch = epoch + child.broker = child.consumer.refBrokerConsumer(broker) child.broker.input <- child return nil @@ -382,6 +507,9 @@ func (child *partitionConsumer) chooseStartingOffset(offset int64) error { if err != nil { return err } + + child.highWaterMarkOffset = newestOffset + oldestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetOldest) if err != nil { return err @@ -451,9 +579,7 @@ feederLoop: } for i, msg := range msgs { - for _, interceptor := range child.conf.Consumer.Interceptors { - msg.safelyApplyInterceptor(interceptor) - } + child.interceptors(msg) messageSelect: select { case <-child.dying: @@ -467,6 +593,7 @@ feederLoop: child.broker.acks.Done() remainingLoop: for _, msg = range msgs[i:] { + child.interceptors(msg) select { case child.messages <- msg: case <-child.dying: @@ -556,13 +683,9 @@ func (child *partitionConsumer) parseRecords(batch *RecordBatch) ([]*ConsumerMes } func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) { - var ( - metricRegistry = child.conf.MetricRegistry - consumerBatchSizeMetric metrics.Histogram - ) - - if metricRegistry != nil { - consumerBatchSizeMetric = getOrRegisterHistogram("consumer-batch-size", metricRegistry) + var consumerBatchSizeMetric metrics.Histogram + if child.consumer != nil && child.consumer.metricRegistry != nil { + consumerBatchSizeMetric = getOrRegisterHistogram("consumer-batch-size", child.consumer.metricRegistry) } // If request was throttled and empty we log and return without error @@ -578,7 +701,7 @@ func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*Consu return nil, ErrIncompleteResponse } - if block.Err != ErrNoError { + if !errors.Is(block.Err, ErrNoError) { return nil, block.Err } @@ -587,7 +710,13 @@ func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*Consu return nil, err } - consumerBatchSizeMetric.Update(int64(nRecs)) + if consumerBatchSizeMetric != nil { + consumerBatchSizeMetric.Update(int64(nRecs)) + } + + if block.PreferredReadReplica != invalidPreferredReplicaID { + child.preferredReadReplica = block.PreferredReadReplica + } if nRecs == 0 { partialTrailingMessage, err := block.isPartial() @@ -611,6 +740,10 @@ func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*Consu child.fetchSize = child.conf.Consumer.Fetch.Max } } + } else if block.LastRecordsBatchOffset != nil && *block.LastRecordsBatchOffset < block.HighWaterMarkOffset { + // check last record offset to avoid stuck if high watermark was not reached + Logger.Printf("consumer/broker/%d received batch with zero records but high watermark was not reached, topic %s, partition %d, offset %d\n", child.broker.broker.ID(), child.topic, child.partition, *block.LastRecordsBatchOffset) + child.offset = *block.LastRecordsBatchOffset + 1 } return nil, nil @@ -696,13 +829,33 @@ func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*Consu return messages, nil } +func (child *partitionConsumer) interceptors(msg *ConsumerMessage) { + for _, interceptor := range child.conf.Consumer.Interceptors { + msg.safelyApplyInterceptor(interceptor) + } +} + +// Pause implements PartitionConsumer. +func (child *partitionConsumer) Pause() { + atomic.StoreInt32(&child.paused, 1) +} + +// Resume implements PartitionConsumer. +func (child *partitionConsumer) Resume() { + atomic.StoreInt32(&child.paused, 0) +} + +// IsPaused implements PartitionConsumer. +func (child *partitionConsumer) IsPaused() bool { + return atomic.LoadInt32(&child.paused) == 1 +} + type brokerConsumer struct { consumer *consumer broker *Broker input chan *partitionConsumer newSubscriptions chan []*partitionConsumer subscriptions map[*partitionConsumer]none - wait chan none acks sync.WaitGroup refs int } @@ -713,7 +866,6 @@ func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer { broker: broker, input: make(chan *partitionConsumer), newSubscriptions: make(chan []*partitionConsumer), - wait: make(chan none), subscriptions: make(map[*partitionConsumer]none), refs: 0, } @@ -727,67 +879,84 @@ func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer { // The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer // goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks // up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give -// it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available, -// so the main goroutine can block waiting for work if it has none. +// it nil if no new subscriptions are available. func (bc *brokerConsumer) subscriptionManager() { - var buffer []*partitionConsumer + defer close(bc.newSubscriptions) for { - if len(buffer) > 0 { - select { - case event, ok := <-bc.input: - if !ok { - goto done - } - buffer = append(buffer, event) - case bc.newSubscriptions <- buffer: - buffer = nil - case bc.wait <- none{}: + var partitionConsumers []*partitionConsumer + + // Check for any partition consumer asking to subscribe if there aren't + // any, trigger the network request (to fetch Kafka messages) by sending "nil" to the + // newSubscriptions channel + select { + case pc, ok := <-bc.input: + if !ok { + return } - } else { + partitionConsumers = append(partitionConsumers, pc) + case bc.newSubscriptions <- nil: + continue + } + + // drain input of any further incoming subscriptions + timer := time.NewTimer(partitionConsumersBatchTimeout) + for batchComplete := false; !batchComplete; { select { - case event, ok := <-bc.input: - if !ok { - goto done - } - buffer = append(buffer, event) - case bc.newSubscriptions <- nil: + case pc := <-bc.input: + partitionConsumers = append(partitionConsumers, pc) + case <-timer.C: + batchComplete = true } } - } + timer.Stop() -done: - close(bc.wait) - if len(buffer) > 0 { - bc.newSubscriptions <- buffer + Logger.Printf( + "consumer/broker/%d accumulated %d new subscriptions\n", + bc.broker.ID(), len(partitionConsumers)) + + bc.newSubscriptions <- partitionConsumers } - close(bc.newSubscriptions) } -//subscriptionConsumer ensures we will get nil right away if no new subscriptions is available +// subscriptionConsumer ensures we will get nil right away if no new subscriptions is available +// this is a the main loop that fetches Kafka messages func (bc *brokerConsumer) subscriptionConsumer() { - <-bc.wait // wait for our first piece of work - for newSubscriptions := range bc.newSubscriptions { bc.updateSubscriptions(newSubscriptions) if len(bc.subscriptions) == 0 { // We're about to be shut down or we're about to receive more subscriptions. - // Either way, the signal just hasn't propagated to our goroutine yet. - <-bc.wait + // Take a small nap to avoid burning the CPU. + time.Sleep(partitionConsumersBatchTimeout) continue } response, err := bc.fetchNewMessages() - if err != nil { Logger.Printf("consumer/broker/%d disconnecting due to error processing FetchRequest: %s\n", bc.broker.ID(), err) bc.abort(err) return } + // if there isn't response, it means that not fetch was made + // so we don't need to handle any response + if response == nil { + continue + } + bc.acks.Add(len(bc.subscriptions)) for child := range bc.subscriptions { + if _, ok := response.Blocks[child.topic]; !ok { + bc.acks.Done() + continue + } + + if _, ok := response.Blocks[child.topic][child.partition]; !ok { + bc.acks.Done() + continue + } + child.feeder <- response } bc.acks.Wait() @@ -813,33 +982,52 @@ func (bc *brokerConsumer) updateSubscriptions(newSubscriptions []*partitionConsu } } -//handleResponses handles the response codes left for us by our subscriptions, and abandons ones that have been closed +// handleResponses handles the response codes left for us by our subscriptions, and abandons ones that have been closed func (bc *brokerConsumer) handleResponses() { for child := range bc.subscriptions { result := child.responseResult child.responseResult = nil - switch result { - case nil: - // no-op - case errTimedOut: + if result == nil { + if preferredBroker, _, err := child.preferredBroker(); err == nil { + if bc.broker.ID() != preferredBroker.ID() { + // not an error but needs redispatching to consume from preferred replica + Logger.Printf( + "consumer/broker/%d abandoned in favor of preferred replica broker/%d\n", + bc.broker.ID(), preferredBroker.ID()) + child.trigger <- none{} + delete(bc.subscriptions, child) + } + } + continue + } + + // Discard any replica preference. + child.preferredReadReplica = invalidPreferredReplicaID + + if errors.Is(result, errTimedOut) { Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n", bc.broker.ID(), child.topic, child.partition) delete(bc.subscriptions, child) - case ErrOffsetOutOfRange: + } else if errors.Is(result, ErrOffsetOutOfRange) { // there's no point in retrying this it will just fail the same way again // shut it down and force the user to choose what to do child.sendError(result) Logger.Printf("consumer/%s/%d shutting down because %s\n", child.topic, child.partition, result) close(child.trigger) delete(bc.subscriptions, child) - case ErrUnknownTopicOrPartition, ErrNotLeaderForPartition, ErrLeaderNotAvailable, ErrReplicaNotAvailable: + } else if errors.Is(result, ErrUnknownTopicOrPartition) || + errors.Is(result, ErrNotLeaderForPartition) || + errors.Is(result, ErrLeaderNotAvailable) || + errors.Is(result, ErrReplicaNotAvailable) || + errors.Is(result, ErrFencedLeaderEpoch) || + errors.Is(result, ErrUnknownLeaderEpoch) { // not an error, but does need redispatching Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n", bc.broker.ID(), child.topic, child.partition, result) child.trigger <- none{} delete(bc.subscriptions, child) - default: + } else { // dunno, tell the user and try redispatching child.sendError(result) Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n", @@ -861,7 +1049,8 @@ func (bc *brokerConsumer) abort(err error) { for newSubscriptions := range bc.newSubscriptions { if len(newSubscriptions) == 0 { - <-bc.wait + // Take a small nap to avoid burning the CPU. + time.Sleep(partitionConsumersBatchTimeout) continue } for _, child := range newSubscriptions { @@ -871,6 +1060,8 @@ func (bc *brokerConsumer) abort(err error) { } } +// fetchResponse can be nil if no fetch is made, it can occur when +// all partitions are paused func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) { request := &FetchRequest{ MinBytes: bc.consumer.conf.Consumer.Fetch.Min, @@ -907,7 +1098,14 @@ func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) { } for child := range bc.subscriptions { - request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize) + if !child.IsPaused() { + request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize, child.leaderEpoch) + } + } + + // avoid to fetch when there is no block + if len(request.blocks) == 0 { + return nil, nil } return bc.broker.Fetch(request) diff --git a/vendor/github.com/Shopify/sarama/consumer_group.go b/vendor/github.com/Shopify/sarama/consumer_group.go index fcc5792ea..ecdbcfa68 100644 --- a/vendor/github.com/Shopify/sarama/consumer_group.go +++ b/vendor/github.com/Shopify/sarama/consumer_group.go @@ -7,6 +7,8 @@ import ( "sort" "sync" "time" + + "github.com/rcrowley/go-metrics" ) // ErrClosedConsumerGroup is the error returned when a method is called on a consumer group that has been closed. @@ -28,7 +30,7 @@ type ConsumerGroup interface { // in a separate goroutine which requires it to be thread-safe. Any state must be carefully protected // from concurrent reads/writes. // 4. The session will persist until one of the ConsumeClaim() functions exits. This can be either when the - // parent context is cancelled or when a server-side rebalance cycle is initiated. + // parent context is canceled or when a server-side rebalance cycle is initiated. // 5. Once all the ConsumeClaim() loops have exited, the handler's Cleanup() hook is called // to allow the user to perform any final tasks before a rebalance. // 6. Finally, marked offsets are committed one last time before claims are released. @@ -52,22 +54,46 @@ type ConsumerGroup interface { // Close stops the ConsumerGroup and detaches any running sessions. It is required to call // this function before the object passes out of scope, as it will otherwise leak memory. Close() error + + // Pause suspends fetching from the requested partitions. Future calls to the broker will not return any + // records from these partitions until they have been resumed using Resume()/ResumeAll(). + // Note that this method does not affect partition subscription. + // In particular, it does not cause a group rebalance when automatic assignment is used. + Pause(partitions map[string][]int32) + + // Resume resumes specified partitions which have been paused with Pause()/PauseAll(). + // New calls to the broker will return records from these partitions if there are any to be fetched. + Resume(partitions map[string][]int32) + + // Pause suspends fetching from all partitions. Future calls to the broker will not return any + // records from these partitions until they have been resumed using Resume()/ResumeAll(). + // Note that this method does not affect partition subscription. + // In particular, it does not cause a group rebalance when automatic assignment is used. + PauseAll() + + // Resume resumes all partitions which have been paused with Pause()/PauseAll(). + // New calls to the broker will return records from these partitions if there are any to be fetched. + ResumeAll() } type consumerGroup struct { client Client - config *Config - consumer Consumer - groupID string - memberID string - errors chan error + config *Config + consumer Consumer + groupID string + groupInstanceId *string + memberID string + errors chan error - lock sync.Mutex - closed chan none - closeOnce sync.Once + lock sync.Mutex + errorsLock sync.RWMutex + closed chan none + closeOnce sync.Once userData []byte + + metricRegistry metrics.Registry } // NewConsumerGroup creates a new consumer group the given broker addresses and configuration. @@ -100,19 +126,25 @@ func newConsumerGroup(groupID string, client Client) (ConsumerGroup, error) { return nil, ConfigurationError("consumer groups require Version to be >= V0_10_2_0") } - consumer, err := NewConsumerFromClient(client) + consumer, err := newConsumer(client) if err != nil { return nil, err } - return &consumerGroup{ - client: client, - consumer: consumer, - config: config, - groupID: groupID, - errors: make(chan error, config.ChannelBufferSize), - closed: make(chan none), - }, nil + cg := &consumerGroup{ + client: client, + consumer: consumer, + config: config, + groupID: groupID, + errors: make(chan error, config.ChannelBufferSize), + closed: make(chan none), + userData: config.Consumer.Group.Member.UserData, + metricRegistry: newCleanupRegistry(config.MetricRegistry), + } + if client.Config().Consumer.Group.InstanceId != "" && config.Version.IsAtLeast(V2_3_0_0) { + cg.groupInstanceId = &client.Config().Consumer.Group.InstanceId + } + return cg, nil } // Errors implements ConsumerGroup. @@ -128,10 +160,13 @@ func (c *consumerGroup) Close() (err error) { err = e } - // drain errors go func() { + c.errorsLock.Lock() + defer c.errorsLock.Unlock() close(c.errors) }() + + // drain errors for e := range c.errors { err = e } @@ -139,6 +174,8 @@ func (c *consumerGroup) Close() (err error) { if e := c.client.Close(); e != nil { err = e } + + c.metricRegistry.UnregisterAll() }) return } @@ -167,7 +204,7 @@ func (c *consumerGroup) Consume(ctx context.Context, topics []string, handler Co // Init session sess, err := c.newSession(ctx, topics, handler, c.config.Consumer.Group.Rebalance.Retry.Max) - if err == ErrClosedClient { + if errors.Is(err, ErrClosedClient) { return ErrClosedConsumerGroup } else if err != nil { return err @@ -185,6 +222,26 @@ func (c *consumerGroup) Consume(ctx context.Context, topics []string, handler Co return sess.release(true) } +// Pause implements ConsumerGroup. +func (c *consumerGroup) Pause(partitions map[string][]int32) { + c.consumer.Pause(partitions) +} + +// Resume implements ConsumerGroup. +func (c *consumerGroup) Resume(partitions map[string][]int32) { + c.consumer.Resume(partitions) +} + +// PauseAll implements ConsumerGroup. +func (c *consumerGroup) PauseAll() { + c.consumer.PauseAll() +} + +// ResumeAll implements ConsumerGroup. +func (c *consumerGroup) ResumeAll() { + c.consumer.ResumeAll() +} + func (c *consumerGroup) retryNewSession(ctx context.Context, topics []string, handler ConsumerGroupHandler, retries int, refreshCoordinator bool) (*consumerGroupSession, error) { select { case <-c.closed: @@ -212,84 +269,147 @@ func (c *consumerGroup) newSession(ctx context.Context, topics []string, handler return c.retryNewSession(ctx, topics, handler, retries, true) } + var ( + metricRegistry = c.metricRegistry + consumerGroupJoinTotal metrics.Counter + consumerGroupJoinFailed metrics.Counter + consumerGroupSyncTotal metrics.Counter + consumerGroupSyncFailed metrics.Counter + ) + + if metricRegistry != nil { + consumerGroupJoinTotal = metrics.GetOrRegisterCounter(fmt.Sprintf("consumer-group-join-total-%s", c.groupID), metricRegistry) + consumerGroupJoinFailed = metrics.GetOrRegisterCounter(fmt.Sprintf("consumer-group-join-failed-%s", c.groupID), metricRegistry) + consumerGroupSyncTotal = metrics.GetOrRegisterCounter(fmt.Sprintf("consumer-group-sync-total-%s", c.groupID), metricRegistry) + consumerGroupSyncFailed = metrics.GetOrRegisterCounter(fmt.Sprintf("consumer-group-sync-failed-%s", c.groupID), metricRegistry) + } + // Join consumer group join, err := c.joinGroupRequest(coordinator, topics) + if consumerGroupJoinTotal != nil { + consumerGroupJoinTotal.Inc(1) + } if err != nil { _ = coordinator.Close() + if consumerGroupJoinFailed != nil { + consumerGroupJoinFailed.Inc(1) + } return nil, err } + if !errors.Is(join.Err, ErrNoError) { + if consumerGroupJoinFailed != nil { + consumerGroupJoinFailed.Inc(1) + } + } switch join.Err { case ErrNoError: c.memberID = join.MemberId - case ErrUnknownMemberId, ErrIllegalGeneration: // reset member ID and retry immediately + case ErrUnknownMemberId, ErrIllegalGeneration: + // reset member ID and retry immediately c.memberID = "" return c.newSession(ctx, topics, handler, retries) - case ErrNotCoordinatorForConsumer: // retry after backoff with coordinator refresh + case ErrNotCoordinatorForConsumer, ErrRebalanceInProgress, ErrOffsetsLoadInProgress: + // retry after backoff if retries <= 0 { return nil, join.Err } - return c.retryNewSession(ctx, topics, handler, retries, true) - case ErrRebalanceInProgress: // retry after backoff - if retries <= 0 { - return nil, join.Err + case ErrMemberIdRequired: + // from JoinGroupRequest v4, if client start with empty member id, + // it need to get member id from response and send another join request to join group + c.memberID = join.MemberId + return c.retryNewSession(ctx, topics, handler, retries+1 /*keep retry time*/, false) + case ErrFencedInstancedId: + if c.groupInstanceId != nil { + Logger.Printf("JoinGroup failed: group instance id %s has been fenced\n", *c.groupInstanceId) } - - return c.retryNewSession(ctx, topics, handler, retries, false) + return nil, join.Err default: return nil, join.Err } + var strategy BalanceStrategy + var ok bool + if strategy = c.config.Consumer.Group.Rebalance.Strategy; strategy == nil { + strategy, ok = c.findStrategy(join.GroupProtocol, c.config.Consumer.Group.Rebalance.GroupStrategies) + if !ok { + // this case shouldn't happen in practice, since the leader will choose the protocol + // that all the members support + return nil, fmt.Errorf("unable to find selected strategy: %s", join.GroupProtocol) + } + } + // Prepare distribution plan if we joined as the leader var plan BalanceStrategyPlan + var members map[string]ConsumerGroupMemberMetadata if join.LeaderId == join.MemberId { - members, err := join.GetMembers() + members, err = join.GetMembers() if err != nil { return nil, err } - plan, err = c.balance(members) + plan, err = c.balance(strategy, members) if err != nil { return nil, err } } // Sync consumer group - groupRequest, err := c.syncGroupRequest(coordinator, plan, join.GenerationId) + syncGroupResponse, err := c.syncGroupRequest(coordinator, members, plan, join.GenerationId, strategy) + if consumerGroupSyncTotal != nil { + consumerGroupSyncTotal.Inc(1) + } if err != nil { _ = coordinator.Close() + if consumerGroupSyncFailed != nil { + consumerGroupSyncFailed.Inc(1) + } return nil, err } - switch groupRequest.Err { + if !errors.Is(syncGroupResponse.Err, ErrNoError) { + if consumerGroupSyncFailed != nil { + consumerGroupSyncFailed.Inc(1) + } + } + + switch syncGroupResponse.Err { case ErrNoError: - case ErrUnknownMemberId, ErrIllegalGeneration: // reset member ID and retry immediately + case ErrUnknownMemberId, ErrIllegalGeneration: + // reset member ID and retry immediately c.memberID = "" return c.newSession(ctx, topics, handler, retries) - case ErrNotCoordinatorForConsumer: // retry after backoff with coordinator refresh + case ErrNotCoordinatorForConsumer, ErrRebalanceInProgress, ErrOffsetsLoadInProgress: + // retry after backoff if retries <= 0 { - return nil, groupRequest.Err + return nil, syncGroupResponse.Err } - return c.retryNewSession(ctx, topics, handler, retries, true) - case ErrRebalanceInProgress: // retry after backoff - if retries <= 0 { - return nil, groupRequest.Err + case ErrFencedInstancedId: + if c.groupInstanceId != nil { + Logger.Printf("JoinGroup failed: group instance id %s has been fenced\n", *c.groupInstanceId) } - - return c.retryNewSession(ctx, topics, handler, retries, false) + return nil, syncGroupResponse.Err default: - return nil, groupRequest.Err + return nil, syncGroupResponse.Err } // Retrieve and sort claims var claims map[string][]int32 - if len(groupRequest.MemberAssignment) > 0 { - members, err := groupRequest.GetMemberAssignment() + if len(syncGroupResponse.MemberAssignment) > 0 { + members, err := syncGroupResponse.GetMemberAssignment() if err != nil { return nil, err } claims = members.Topics - c.userData = members.UserData + + // in the case of stateful balance strategies, hold on to the returned + // assignment metadata, otherwise, reset the statically defined conusmer + // group metadata + if members.UserData != nil { + c.userData = members.UserData + } else { + c.userData = c.config.Consumer.Group.Member.UserData + } for _, partitions := range claims { sort.Sort(int32Slice(partitions)) @@ -310,31 +430,61 @@ func (c *consumerGroup) joinGroupRequest(coordinator *Broker, topics []string) ( req.Version = 1 req.RebalanceTimeout = int32(c.config.Consumer.Group.Rebalance.Timeout / time.Millisecond) } - - // use static user-data if configured, otherwise use consumer-group userdata from the last sync - userData := c.config.Consumer.Group.Member.UserData - if len(userData) == 0 { - userData = c.userData + if c.groupInstanceId != nil { + req.Version = 5 + req.GroupInstanceId = c.groupInstanceId } + meta := &ConsumerGroupMemberMetadata{ Topics: topics, - UserData: userData, + UserData: c.userData, } - strategy := c.config.Consumer.Group.Rebalance.Strategy - if err := req.AddGroupProtocolMetadata(strategy.Name(), meta); err != nil { - return nil, err + var strategy BalanceStrategy + if strategy = c.config.Consumer.Group.Rebalance.Strategy; strategy != nil { + if err := req.AddGroupProtocolMetadata(strategy.Name(), meta); err != nil { + return nil, err + } + } else { + for _, strategy = range c.config.Consumer.Group.Rebalance.GroupStrategies { + if err := req.AddGroupProtocolMetadata(strategy.Name(), meta); err != nil { + return nil, err + } + } } return coordinator.JoinGroup(req) } -func (c *consumerGroup) syncGroupRequest(coordinator *Broker, plan BalanceStrategyPlan, generationID int32) (*SyncGroupResponse, error) { +// findStrategy returns the BalanceStrategy with the specified protocolName +// from the slice provided. +func (c *consumerGroup) findStrategy(name string, groupStrategies []BalanceStrategy) (BalanceStrategy, bool) { + for _, strategy := range groupStrategies { + if strategy.Name() == name { + return strategy, true + } + } + return nil, false +} + +func (c *consumerGroup) syncGroupRequest( + coordinator *Broker, + members map[string]ConsumerGroupMemberMetadata, + plan BalanceStrategyPlan, + generationID int32, + strategy BalanceStrategy, +) (*SyncGroupResponse, error) { req := &SyncGroupRequest{ GroupId: c.groupID, MemberId: c.memberID, GenerationId: generationID, } - strategy := c.config.Consumer.Group.Rebalance.Strategy + + if c.config.Version.IsAtLeast(V2_3_0_0) { + req.Version = 3 + } + if c.groupInstanceId != nil { + req.GroupInstanceId = c.groupInstanceId + } for memberID, topics := range plan { assignment := &ConsumerGroupMemberAssignment{Topics: topics} userDataBytes, err := strategy.AssignmentData(memberID, topics, generationID) @@ -345,7 +495,15 @@ func (c *consumerGroup) syncGroupRequest(coordinator *Broker, plan BalanceStrate if err := req.AddGroupAssignmentMember(memberID, assignment); err != nil { return nil, err } + delete(members, memberID) + } + // add empty assignments for any remaining members + for memberID := range members { + if err := req.AddGroupAssignmentMember(memberID, &ConsumerGroupMemberAssignment{}); err != nil { + return nil, err + } } + return coordinator.SyncGroup(req) } @@ -355,11 +513,15 @@ func (c *consumerGroup) heartbeatRequest(coordinator *Broker, memberID string, g MemberId: memberID, GenerationId: generationID, } + if c.groupInstanceId != nil { + req.Version = 3 + req.GroupInstanceId = c.groupInstanceId + } return coordinator.Heartbeat(req) } -func (c *consumerGroup) balance(members map[string]ConsumerGroupMemberMetadata) (BalanceStrategyPlan, error) { +func (c *consumerGroup) balance(strategy BalanceStrategy, members map[string]ConsumerGroupMemberMetadata) (BalanceStrategyPlan, error) { topics := make(map[string][]int32) for _, meta := range members { for _, topic := range meta.Topics { @@ -375,7 +537,6 @@ func (c *consumerGroup) balance(members map[string]ConsumerGroupMemberMetadata) topics[topic] = partitions } - strategy := c.config.Consumer.Group.Rebalance.Strategy return strategy.Plan(members, topics) } @@ -392,29 +553,37 @@ func (c *consumerGroup) leave() error { return err } - resp, err := coordinator.LeaveGroup(&LeaveGroupRequest{ - GroupId: c.groupID, - MemberId: c.memberID, - }) - if err != nil { - _ = coordinator.Close() - return err - } + // KIP-345 if groupInstanceId is set, don not leave group when consumer closed. + // Since we do not discover ApiVersion for brokers, LeaveGroupRequest still use the old version request for now + if c.groupInstanceId == nil { + resp, err := coordinator.LeaveGroup(&LeaveGroupRequest{ + GroupId: c.groupID, + MemberId: c.memberID, + }) + if err != nil { + _ = coordinator.Close() + return err + } - // Unset memberID - c.memberID = "" + // Unset memberID + c.memberID = "" - // Check response - switch resp.Err { - case ErrRebalanceInProgress, ErrUnknownMemberId, ErrNoError: - return nil - default: - return resp.Err + // Check response + switch resp.Err { + case ErrRebalanceInProgress, ErrUnknownMemberId, ErrNoError: + return nil + default: + return resp.Err + } + } else { + c.memberID = "" } + return nil } func (c *consumerGroup) handleError(err error, topic string, partition int32) { - if _, ok := err.(*ConsumerError); !ok && topic != "" && partition > -1 { + var consumerError *ConsumerError + if ok := errors.As(err, &consumerError); !ok && topic != "" && partition > -1 { err = &ConsumerError{ Topic: topic, Partition: partition, @@ -427,9 +596,11 @@ func (c *consumerGroup) handleError(err error, topic string, partition int32) { return } + c.errorsLock.RLock() + defer c.errorsLock.RUnlock() select { case <-c.closed: - //consumer is closed + // consumer is closed return default: } @@ -442,6 +613,9 @@ func (c *consumerGroup) handleError(err error, topic string, partition int32) { } func (c *consumerGroup) loopCheckPartitionNumbers(topics []string, session *consumerGroupSession) { + if c.config.Metadata.RefreshFrequency == time.Duration(0) { + return + } pause := time.NewTicker(c.config.Metadata.RefreshFrequency) defer session.cancel() defer pause.Stop() @@ -463,7 +637,9 @@ func (c *consumerGroup) loopCheckPartitionNumbers(topics []string, session *cons select { case <-pause.C: case <-session.ctx.Done(): - Logger.Printf("loop check partition number coroutine will exit, topics %s", topics) + Logger.Printf( + "consumergroup/%s loop check partition number coroutine will exit, topics %s\n", + c.groupID, topics) // if session closed by other, should be exited return case <-c.closed: @@ -476,7 +652,9 @@ func (c *consumerGroup) topicToPartitionNumbers(topics []string) (map[string]int topicToPartitionNum := make(map[string]int, len(topics)) for _, topic := range topics { if partitionNum, err := c.client.Partitions(topic); err != nil { - Logger.Printf("Consumer Group topic %s get partition number failed %v", topic, err) + Logger.Printf( + "consumergroup/%s topic %s get partition number failed due to '%v'\n", + c.groupID, topic, err) return nil, err } else { topicToPartitionNum[topic] = len(partitionNum) @@ -549,15 +727,15 @@ type consumerGroupSession struct { } func newConsumerGroupSession(ctx context.Context, parent *consumerGroup, claims map[string][]int32, memberID string, generationID int32, handler ConsumerGroupHandler) (*consumerGroupSession, error) { + // init context + ctx, cancel := context.WithCancel(ctx) + // init offset manager - offsets, err := newOffsetManagerFromClient(parent.groupID, memberID, generationID, parent.client) + offsets, err := newOffsetManagerFromClient(parent.groupID, memberID, generationID, parent.client, cancel) if err != nil { return nil, err } - // init context - ctx, cancel := context.WithCancel(ctx) - // init session sess := &consumerGroupSession{ parent: parent, @@ -722,16 +900,28 @@ func (s *consumerGroupSession) release(withCleanup bool) (err error) { <-s.hbDead }) + Logger.Printf( + "consumergroup/session/%s/%d released\n", + s.MemberID(), s.GenerationID()) + return } func (s *consumerGroupSession) heartbeatLoop() { defer close(s.hbDead) defer s.cancel() // trigger the end of the session on exit + defer func() { + Logger.Printf( + "consumergroup/session/%s/%d heartbeat loop stopped\n", + s.MemberID(), s.GenerationID()) + }() pause := time.NewTicker(s.parent.config.Consumer.Group.Heartbeat.Interval) defer pause.Stop() + retryBackoff := time.NewTimer(s.parent.config.Metadata.Retry.Backoff) + defer retryBackoff.Stop() + retries := s.parent.config.Metadata.Retry.Max for { coordinator, err := s.parent.client.Coordinator(s.parent.groupID) @@ -740,11 +930,11 @@ func (s *consumerGroupSession) heartbeatLoop() { s.parent.handleError(err, "", -1) return } - + retryBackoff.Reset(s.parent.config.Metadata.Retry.Backoff) select { case <-s.hbDying: return - case <-time.After(s.parent.config.Metadata.Retry.Backoff): + case <-retryBackoff.C: retries-- } continue @@ -766,7 +956,16 @@ func (s *consumerGroupSession) heartbeatLoop() { switch resp.Err { case ErrNoError: retries = s.parent.config.Metadata.Retry.Max - case ErrRebalanceInProgress, ErrUnknownMemberId, ErrIllegalGeneration: + case ErrRebalanceInProgress: + retries = s.parent.config.Metadata.Retry.Max + s.cancel() + case ErrUnknownMemberId, ErrIllegalGeneration: + return + case ErrFencedInstancedId: + if s.parent.groupInstanceId != nil { + Logger.Printf("JoinGroup failed: group instance id %s has been fenced\n", *s.parent.groupInstanceId) + } + s.parent.handleError(resp.Err, "", -1) return default: s.parent.handleError(resp.Err, "", -1) @@ -836,7 +1035,8 @@ type consumerGroupClaim struct { func newConsumerGroupClaim(sess *consumerGroupSession, topic string, partition int32, offset int64) (*consumerGroupClaim, error) { pcm, err := sess.parent.consumer.ConsumePartition(topic, partition, offset) - if err == ErrOffsetOutOfRange { + + if errors.Is(err, ErrOffsetOutOfRange) && sess.parent.config.Consumer.Group.ResetInvalidOffsets { offset = sess.parent.config.Consumer.Offsets.Initial pcm, err = sess.parent.consumer.ConsumePartition(topic, partition, offset) } diff --git a/vendor/github.com/Shopify/sarama/consumer_group_members.go b/vendor/github.com/Shopify/sarama/consumer_group_members.go index 2d02cc386..3b8ca36f6 100644 --- a/vendor/github.com/Shopify/sarama/consumer_group_members.go +++ b/vendor/github.com/Shopify/sarama/consumer_group_members.go @@ -1,10 +1,14 @@ package sarama -//ConsumerGroupMemberMetadata holds the metadata for consumer group +import "errors" + +// ConsumerGroupMemberMetadata holds the metadata for consumer group +// https://github.com/apache/kafka/blob/trunk/clients/src/main/resources/common/message/ConsumerProtocolSubscription.json type ConsumerGroupMemberMetadata struct { - Version int16 - Topics []string - UserData []byte + Version int16 + Topics []string + UserData []byte + OwnedPartitions []*OwnedPartition } func (m *ConsumerGroupMemberMetadata) encode(pe packetEncoder) error { @@ -33,11 +37,50 @@ func (m *ConsumerGroupMemberMetadata) decode(pd packetDecoder) (err error) { if m.UserData, err = pd.getBytes(); err != nil { return } + if m.Version >= 1 { + n, err := pd.getArrayLength() + if err != nil { + // permit missing data here in case of misbehaving 3rd party + // clients who incorrectly marked the member metadata as V1 in + // their JoinGroup request + if errors.Is(err, ErrInsufficientData) { + return nil + } + return err + } + if n == 0 { + return nil + } + m.OwnedPartitions = make([]*OwnedPartition, n) + for i := 0; i < n; i++ { + m.OwnedPartitions[i] = &OwnedPartition{} + if err := m.OwnedPartitions[i].decode(pd); err != nil { + return err + } + } + } + + return nil +} + +type OwnedPartition struct { + Topic string + Partitions []int32 +} + +func (m *OwnedPartition) decode(pd packetDecoder) (err error) { + if m.Topic, err = pd.getString(); err != nil { + return err + } + if m.Partitions, err = pd.getInt32Array(); err != nil { + return err + } return nil } -//ConsumerGroupMemberAssignment holds the member assignment for a consume group +// ConsumerGroupMemberAssignment holds the member assignment for a consume group +// https://github.com/apache/kafka/blob/trunk/clients/src/main/resources/common/message/ConsumerProtocolAssignment.json type ConsumerGroupMemberAssignment struct { Version int16 Topics map[string][]int32 diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go b/vendor/github.com/Shopify/sarama/consumer_metadata_request.go index e5ebdaef5..5c18e048a 100644 --- a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go +++ b/vendor/github.com/Shopify/sarama/consumer_metadata_request.go @@ -1,6 +1,6 @@ package sarama -//ConsumerMetadataRequest is used for metadata requests +// ConsumerMetadataRequest is used for metadata requests type ConsumerMetadataRequest struct { ConsumerGroup string } diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go index 1b5d00d22..7fe0cf971 100644 --- a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go +++ b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go @@ -5,7 +5,7 @@ import ( "strconv" ) -//ConsumerMetadataResponse holds the response for a consumer group meta data requests +// ConsumerMetadataResponse holds the response for a consumer group meta data requests type ConsumerMetadataResponse struct { Err KError Coordinator *Broker diff --git a/vendor/github.com/Shopify/sarama/control_record.go b/vendor/github.com/Shopify/sarama/control_record.go index 9b75ab53b..244a82136 100644 --- a/vendor/github.com/Shopify/sarama/control_record.go +++ b/vendor/github.com/Shopify/sarama/control_record.go @@ -1,14 +1,14 @@ package sarama -//ControlRecordType ... +// ControlRecordType ... type ControlRecordType int const ( - //ControlRecordAbort is a control record for abort + // ControlRecordAbort is a control record for abort ControlRecordAbort ControlRecordType = iota - //ControlRecordCommit is a control record for commit + // ControlRecordCommit is a control record for commit ControlRecordCommit - //ControlRecordUnknown is a control record of unknown type + // ControlRecordUnknown is a control record of unknown type ControlRecordUnknown ) @@ -23,16 +23,6 @@ type ControlRecord struct { func (cr *ControlRecord) decode(key, value packetDecoder) error { var err error - cr.Version, err = value.getInt16() - if err != nil { - return err - } - - cr.CoordinatorEpoch, err = value.getInt32() - if err != nil { - return err - } - // There a version for the value part AND the key part. And I have no idea if they are supposed to match or not // Either way, all these version can only be 0 for now cr.Version, err = key.getInt16() @@ -55,6 +45,18 @@ func (cr *ControlRecord) decode(key, value packetDecoder) error { // UNKNOWN is used to indicate a control type which the client is not aware of and should be ignored cr.Type = ControlRecordUnknown } + // we want to parse value only if we are decoding control record of known type + if cr.Type != ControlRecordUnknown { + cr.Version, err = value.getInt16() + if err != nil { + return err + } + + cr.CoordinatorEpoch, err = value.getInt32() + if err != nil { + return err + } + } return nil } diff --git a/vendor/github.com/Shopify/sarama/crc32_field.go b/vendor/github.com/Shopify/sarama/crc32_field.go index 38189a3cd..32236e50f 100644 --- a/vendor/github.com/Shopify/sarama/crc32_field.go +++ b/vendor/github.com/Shopify/sarama/crc32_field.go @@ -72,6 +72,7 @@ func (c *crc32Field) check(curOffset int, buf []byte) error { return nil } + func (c *crc32Field) crc(curOffset int, buf []byte) (uint32, error) { var tab *crc32.Table switch c.polynomial { diff --git a/vendor/github.com/Shopify/sarama/create_partitions_response.go b/vendor/github.com/Shopify/sarama/create_partitions_response.go index 12ce78857..235787f13 100644 --- a/vendor/github.com/Shopify/sarama/create_partitions_response.go +++ b/vendor/github.com/Shopify/sarama/create_partitions_response.go @@ -84,6 +84,10 @@ func (t *TopicPartitionError) Error() string { return text } +func (t *TopicPartitionError) Unwrap() error { + return t.Err +} + func (t *TopicPartitionError) encode(pe packetEncoder) error { pe.putInt16(int16(t.Err)) diff --git a/vendor/github.com/Shopify/sarama/create_topics_response.go b/vendor/github.com/Shopify/sarama/create_topics_response.go index 7e1448a66..6b940bff0 100644 --- a/vendor/github.com/Shopify/sarama/create_topics_response.go +++ b/vendor/github.com/Shopify/sarama/create_topics_response.go @@ -98,6 +98,10 @@ func (t *TopicError) Error() string { return text } +func (t *TopicError) Unwrap() error { + return t.Err +} + func (t *TopicError) encode(pe packetEncoder, version int16) error { pe.putInt16(int16(t.Err)) diff --git a/vendor/github.com/Shopify/sarama/decompress.go b/vendor/github.com/Shopify/sarama/decompress.go index e4dc3c185..aa7fb7498 100644 --- a/vendor/github.com/Shopify/sarama/decompress.go +++ b/vendor/github.com/Shopify/sarama/decompress.go @@ -4,11 +4,11 @@ import ( "bytes" "compress/gzip" "fmt" - "io/ioutil" + "io" "sync" snappy "github.com/eapache/go-xerial-snappy" - "github.com/pierrec/lz4" + "github.com/pierrec/lz4/v4" ) var ( @@ -26,37 +26,35 @@ func decompress(cc CompressionCodec, data []byte) ([]byte, error) { case CompressionNone: return data, nil case CompressionGZIP: - var ( - err error - reader *gzip.Reader - readerIntf = gzipReaderPool.Get() - ) - if readerIntf != nil { - reader = readerIntf.(*gzip.Reader) - } else { + var err error + reader, ok := gzipReaderPool.Get().(*gzip.Reader) + if !ok { reader, err = gzip.NewReader(bytes.NewReader(data)) - if err != nil { - return nil, err - } + } else { + err = reader.Reset(bytes.NewReader(data)) } - defer gzipReaderPool.Put(reader) - - if err := reader.Reset(bytes.NewReader(data)); err != nil { + if err != nil { return nil, err } - return ioutil.ReadAll(reader) + defer gzipReaderPool.Put(reader) + + return io.ReadAll(reader) case CompressionSnappy: return snappy.Decode(data) case CompressionLZ4: - reader := lz4ReaderPool.Get().(*lz4.Reader) + reader, ok := lz4ReaderPool.Get().(*lz4.Reader) + if !ok { + reader = lz4.NewReader(bytes.NewReader(data)) + } else { + reader.Reset(bytes.NewReader(data)) + } defer lz4ReaderPool.Put(reader) - reader.Reset(bytes.NewReader(data)) - return ioutil.ReadAll(reader) + return io.ReadAll(reader) case CompressionZSTD: - return zstdDecompress(nil, data) + return zstdDecompress(ZstdDecoderParams{}, nil, data) default: return nil, PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", cc)} } diff --git a/vendor/github.com/Shopify/sarama/delete_offsets_request.go b/vendor/github.com/Shopify/sarama/delete_offsets_request.go new file mode 100644 index 000000000..339c7857c --- /dev/null +++ b/vendor/github.com/Shopify/sarama/delete_offsets_request.go @@ -0,0 +1,92 @@ +package sarama + +type DeleteOffsetsRequest struct { + Group string + partitions map[string][]int32 +} + +func (r *DeleteOffsetsRequest) encode(pe packetEncoder) (err error) { + err = pe.putString(r.Group) + if err != nil { + return err + } + + if r.partitions == nil { + pe.putInt32(0) + } else { + if err = pe.putArrayLength(len(r.partitions)); err != nil { + return err + } + } + for topic, partitions := range r.partitions { + err = pe.putString(topic) + if err != nil { + return err + } + err = pe.putInt32Array(partitions) + if err != nil { + return err + } + } + return +} + +func (r *DeleteOffsetsRequest) decode(pd packetDecoder, version int16) (err error) { + r.Group, err = pd.getString() + if err != nil { + return err + } + var partitionCount int + + partitionCount, err = pd.getArrayLength() + if err != nil { + return err + } + + if (partitionCount == 0 && version < 2) || partitionCount < 0 { + return nil + } + + r.partitions = make(map[string][]int32, partitionCount) + for i := 0; i < partitionCount; i++ { + var topic string + topic, err = pd.getString() + if err != nil { + return err + } + + var partitions []int32 + partitions, err = pd.getInt32Array() + if err != nil { + return err + } + + r.partitions[topic] = partitions + } + + return nil +} + +func (r *DeleteOffsetsRequest) key() int16 { + return 47 +} + +func (r *DeleteOffsetsRequest) version() int16 { + return 0 +} + +func (r *DeleteOffsetsRequest) headerVersion() int16 { + return 1 +} + +func (r *DeleteOffsetsRequest) requiredVersion() KafkaVersion { + return V2_4_0_0 +} + +func (r *DeleteOffsetsRequest) AddPartition(topic string, partitionID int32) { + if r.partitions == nil { + r.partitions = make(map[string][]int32) + } + + r.partitions[topic] = append(r.partitions[topic], partitionID) +} diff --git a/vendor/github.com/Shopify/sarama/delete_offsets_response.go b/vendor/github.com/Shopify/sarama/delete_offsets_response.go new file mode 100644 index 000000000..d59ae0f8c --- /dev/null +++ b/vendor/github.com/Shopify/sarama/delete_offsets_response.go @@ -0,0 +1,112 @@ +package sarama + +import ( + "time" +) + +type DeleteOffsetsResponse struct { + // The top-level error code, or 0 if there was no error. + ErrorCode KError + ThrottleTime time.Duration + // The responses for each partition of the topics. + Errors map[string]map[int32]KError +} + +func (r *DeleteOffsetsResponse) AddError(topic string, partition int32, errorCode KError) { + if r.Errors == nil { + r.Errors = make(map[string]map[int32]KError) + } + partitions := r.Errors[topic] + if partitions == nil { + partitions = make(map[int32]KError) + r.Errors[topic] = partitions + } + partitions[partition] = errorCode +} + +func (r *DeleteOffsetsResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.ErrorCode)) + pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) + + if err := pe.putArrayLength(len(r.Errors)); err != nil { + return err + } + for topic, partitions := range r.Errors { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putArrayLength(len(partitions)); err != nil { + return err + } + for partition, errorCode := range partitions { + pe.putInt32(partition) + pe.putInt16(int16(errorCode)) + } + } + return nil +} + +func (r *DeleteOffsetsResponse) decode(pd packetDecoder, version int16) error { + tmpErr, err := pd.getInt16() + if err != nil { + return err + } + r.ErrorCode = KError(tmpErr) + + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + numTopics, err := pd.getArrayLength() + if err != nil || numTopics == 0 { + return err + } + + r.Errors = make(map[string]map[int32]KError, numTopics) + for i := 0; i < numTopics; i++ { + name, err := pd.getString() + if err != nil { + return err + } + + numErrors, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Errors[name] = make(map[int32]KError, numErrors) + + for j := 0; j < numErrors; j++ { + id, err := pd.getInt32() + if err != nil { + return err + } + + tmp, err := pd.getInt16() + if err != nil { + return err + } + r.Errors[name][id] = KError(tmp) + } + } + + return nil +} + +func (r *DeleteOffsetsResponse) key() int16 { + return 47 +} + +func (r *DeleteOffsetsResponse) version() int16 { + return 0 +} + +func (r *DeleteOffsetsResponse) headerVersion() int16 { + return 0 +} + +func (r *DeleteOffsetsResponse) requiredVersion() KafkaVersion { + return V2_4_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/describe_client_quotas_request.go b/vendor/github.com/Shopify/sarama/describe_client_quotas_request.go new file mode 100644 index 000000000..17a82051c --- /dev/null +++ b/vendor/github.com/Shopify/sarama/describe_client_quotas_request.go @@ -0,0 +1,141 @@ +package sarama + +// DescribeClientQuotas Request (Version: 0) => [components] strict +// components => entity_type match_type match +// entity_type => STRING +// match_type => INT8 +// match => NULLABLE_STRING +// strict => BOOLEAN + +// A filter to be applied to matching client quotas. +// Components: the components to filter on +// Strict: whether the filter only includes specified components +type DescribeClientQuotasRequest struct { + Components []QuotaFilterComponent + Strict bool +} + +// Describe a component for applying a client quota filter. +// EntityType: the entity type the filter component applies to ("user", "client-id", "ip") +// MatchType: the match type of the filter component (any, exact, default) +// Match: the name that's matched exactly (used when MatchType is QuotaMatchExact) +type QuotaFilterComponent struct { + EntityType QuotaEntityType + MatchType QuotaMatchType + Match string +} + +func (d *DescribeClientQuotasRequest) encode(pe packetEncoder) error { + // Components + if err := pe.putArrayLength(len(d.Components)); err != nil { + return err + } + for _, c := range d.Components { + if err := c.encode(pe); err != nil { + return err + } + } + + // Strict + pe.putBool(d.Strict) + + return nil +} + +func (d *DescribeClientQuotasRequest) decode(pd packetDecoder, version int16) error { + // Components + componentCount, err := pd.getArrayLength() + if err != nil { + return err + } + if componentCount > 0 { + d.Components = make([]QuotaFilterComponent, componentCount) + for i := range d.Components { + c := QuotaFilterComponent{} + if err = c.decode(pd, version); err != nil { + return err + } + d.Components[i] = c + } + } else { + d.Components = []QuotaFilterComponent{} + } + + // Strict + strict, err := pd.getBool() + if err != nil { + return err + } + d.Strict = strict + + return nil +} + +func (d *QuotaFilterComponent) encode(pe packetEncoder) error { + // EntityType + if err := pe.putString(string(d.EntityType)); err != nil { + return err + } + + // MatchType + pe.putInt8(int8(d.MatchType)) + + // Match + if d.MatchType == QuotaMatchAny { + if err := pe.putNullableString(nil); err != nil { + return err + } + } else if d.MatchType == QuotaMatchDefault { + if err := pe.putString(""); err != nil { + return err + } + } else { + if err := pe.putString(d.Match); err != nil { + return err + } + } + + return nil +} + +func (d *QuotaFilterComponent) decode(pd packetDecoder, version int16) error { + // EntityType + entityType, err := pd.getString() + if err != nil { + return err + } + d.EntityType = QuotaEntityType(entityType) + + // MatchType + matchType, err := pd.getInt8() + if err != nil { + return err + } + d.MatchType = QuotaMatchType(matchType) + + // Match + match, err := pd.getNullableString() + if err != nil { + return err + } + if match != nil { + d.Match = *match + } + return nil +} + +func (d *DescribeClientQuotasRequest) key() int16 { + return 48 +} + +func (d *DescribeClientQuotasRequest) version() int16 { + return 0 +} + +func (d *DescribeClientQuotasRequest) headerVersion() int16 { + return 1 +} + +func (d *DescribeClientQuotasRequest) requiredVersion() KafkaVersion { + return V2_6_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/describe_client_quotas_response.go b/vendor/github.com/Shopify/sarama/describe_client_quotas_response.go new file mode 100644 index 000000000..555da0c48 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/describe_client_quotas_response.go @@ -0,0 +1,235 @@ +package sarama + +import ( + "time" +) + +// DescribeClientQuotas Response (Version: 0) => throttle_time_ms error_code error_message [entries] +// throttle_time_ms => INT32 +// error_code => INT16 +// error_message => NULLABLE_STRING +// entries => [entity] [values] +// entity => entity_type entity_name +// entity_type => STRING +// entity_name => NULLABLE_STRING +// values => key value +// key => STRING +// value => FLOAT64 + +type DescribeClientQuotasResponse struct { + ThrottleTime time.Duration // The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota. + ErrorCode KError // The error code, or `0` if the quota description succeeded. + ErrorMsg *string // The error message, or `null` if the quota description succeeded. + Entries []DescribeClientQuotasEntry // A result entry. +} + +type DescribeClientQuotasEntry struct { + Entity []QuotaEntityComponent // The quota entity description. + Values map[string]float64 // The quota values for the entity. +} + +type QuotaEntityComponent struct { + EntityType QuotaEntityType + MatchType QuotaMatchType + Name string +} + +func (d *DescribeClientQuotasResponse) encode(pe packetEncoder) error { + // ThrottleTime + pe.putInt32(int32(d.ThrottleTime / time.Millisecond)) + + // ErrorCode + pe.putInt16(int16(d.ErrorCode)) + + // ErrorMsg + if err := pe.putNullableString(d.ErrorMsg); err != nil { + return err + } + + // Entries + if err := pe.putArrayLength(len(d.Entries)); err != nil { + return err + } + for _, e := range d.Entries { + if err := e.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (d *DescribeClientQuotasResponse) decode(pd packetDecoder, version int16) error { + // ThrottleTime + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + // ErrorCode + errCode, err := pd.getInt16() + if err != nil { + return err + } + d.ErrorCode = KError(errCode) + + // ErrorMsg + errMsg, err := pd.getNullableString() + if err != nil { + return err + } + d.ErrorMsg = errMsg + + // Entries + entryCount, err := pd.getArrayLength() + if err != nil { + return err + } + if entryCount > 0 { + d.Entries = make([]DescribeClientQuotasEntry, entryCount) + for i := range d.Entries { + e := DescribeClientQuotasEntry{} + if err = e.decode(pd, version); err != nil { + return err + } + d.Entries[i] = e + } + } else { + d.Entries = []DescribeClientQuotasEntry{} + } + + return nil +} + +func (d *DescribeClientQuotasEntry) encode(pe packetEncoder) error { + // Entity + if err := pe.putArrayLength(len(d.Entity)); err != nil { + return err + } + for _, e := range d.Entity { + if err := e.encode(pe); err != nil { + return err + } + } + + // Values + if err := pe.putArrayLength(len(d.Values)); err != nil { + return err + } + for key, value := range d.Values { + // key + if err := pe.putString(key); err != nil { + return err + } + // value + pe.putFloat64(value) + } + + return nil +} + +func (d *DescribeClientQuotasEntry) decode(pd packetDecoder, version int16) error { + // Entity + componentCount, err := pd.getArrayLength() + if err != nil { + return err + } + if componentCount > 0 { + d.Entity = make([]QuotaEntityComponent, componentCount) + for i := 0; i < componentCount; i++ { + component := QuotaEntityComponent{} + if err := component.decode(pd, version); err != nil { + return err + } + d.Entity[i] = component + } + } else { + d.Entity = []QuotaEntityComponent{} + } + + // Values + valueCount, err := pd.getArrayLength() + if err != nil { + return err + } + if valueCount > 0 { + d.Values = make(map[string]float64, valueCount) + for i := 0; i < valueCount; i++ { + // key + key, err := pd.getString() + if err != nil { + return err + } + // value + value, err := pd.getFloat64() + if err != nil { + return err + } + d.Values[key] = value + } + } else { + d.Values = map[string]float64{} + } + + return nil +} + +func (c *QuotaEntityComponent) encode(pe packetEncoder) error { + // entity_type + if err := pe.putString(string(c.EntityType)); err != nil { + return err + } + // entity_name + if c.MatchType == QuotaMatchDefault { + if err := pe.putNullableString(nil); err != nil { + return err + } + } else { + if err := pe.putString(c.Name); err != nil { + return err + } + } + + return nil +} + +func (c *QuotaEntityComponent) decode(pd packetDecoder, version int16) error { + // entity_type + entityType, err := pd.getString() + if err != nil { + return err + } + c.EntityType = QuotaEntityType(entityType) + + // entity_name + entityName, err := pd.getNullableString() + if err != nil { + return err + } + + if entityName == nil { + c.MatchType = QuotaMatchDefault + } else { + c.MatchType = QuotaMatchExact + c.Name = *entityName + } + + return nil +} + +func (d *DescribeClientQuotasResponse) key() int16 { + return 48 +} + +func (d *DescribeClientQuotasResponse) version() int16 { + return 0 +} + +func (d *DescribeClientQuotasResponse) headerVersion() int16 { + return 0 +} + +func (d *DescribeClientQuotasResponse) requiredVersion() KafkaVersion { + return V2_6_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/describe_configs_request.go b/vendor/github.com/Shopify/sarama/describe_configs_request.go index d0c735280..4c3488031 100644 --- a/vendor/github.com/Shopify/sarama/describe_configs_request.go +++ b/vendor/github.com/Shopify/sarama/describe_configs_request.go @@ -61,7 +61,6 @@ func (r *DescribeConfigsRequest) decode(pd packetDecoder, version int16) (err er r.Resources[i].Name = name confLength, err := pd.getArrayLength() - if err != nil { return err } diff --git a/vendor/github.com/Shopify/sarama/describe_configs_response.go b/vendor/github.com/Shopify/sarama/describe_configs_response.go index 063ae9112..4968f4854 100644 --- a/vendor/github.com/Shopify/sarama/describe_configs_response.go +++ b/vendor/github.com/Shopify/sarama/describe_configs_response.go @@ -224,7 +224,7 @@ func (r *ConfigEntry) encode(pe packetEncoder, version int16) (err error) { return nil } -//https://cwiki.apache.org/confluence/display/KAFKA/KIP-226+-+Dynamic+Broker+Configuration +// https://cwiki.apache.org/confluence/display/KAFKA/KIP-226+-+Dynamic+Broker+Configuration func (r *ConfigEntry) decode(pd packetDecoder, version int16) (err error) { if version == 0 { r.Source = SourceUnknown @@ -308,19 +308,19 @@ func (c *ConfigSynonym) encode(pe packetEncoder, version int16) (err error) { func (c *ConfigSynonym) decode(pd packetDecoder, version int16) error { name, err := pd.getString() if err != nil { - return nil + return err } c.ConfigName = name value, err := pd.getString() if err != nil { - return nil + return err } c.ConfigValue = value source, err := pd.getInt8() if err != nil { - return nil + return err } c.Source = ConfigSource(source) return nil diff --git a/vendor/github.com/Shopify/sarama/describe_groups_request.go b/vendor/github.com/Shopify/sarama/describe_groups_request.go index f8962da58..f81f69ac4 100644 --- a/vendor/github.com/Shopify/sarama/describe_groups_request.go +++ b/vendor/github.com/Shopify/sarama/describe_groups_request.go @@ -1,16 +1,33 @@ package sarama type DescribeGroupsRequest struct { - Groups []string + Version int16 + Groups []string + IncludeAuthorizedOperations bool } func (r *DescribeGroupsRequest) encode(pe packetEncoder) error { - return pe.putStringArray(r.Groups) + if err := pe.putStringArray(r.Groups); err != nil { + return err + } + if r.Version >= 3 { + pe.putBool(r.IncludeAuthorizedOperations) + } + return nil } func (r *DescribeGroupsRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version r.Groups, err = pd.getStringArray() - return + if err != nil { + return err + } + if r.Version >= 3 { + if r.IncludeAuthorizedOperations, err = pd.getBool(); err != nil { + return err + } + } + return nil } func (r *DescribeGroupsRequest) key() int16 { @@ -18,7 +35,7 @@ func (r *DescribeGroupsRequest) key() int16 { } func (r *DescribeGroupsRequest) version() int16 { - return 0 + return r.Version } func (r *DescribeGroupsRequest) headerVersion() int16 { @@ -26,6 +43,10 @@ func (r *DescribeGroupsRequest) headerVersion() int16 { } func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 1, 2, 3, 4: + return V2_3_0_0 + } return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/describe_groups_response.go b/vendor/github.com/Shopify/sarama/describe_groups_response.go index bc242e421..09052e431 100644 --- a/vendor/github.com/Shopify/sarama/describe_groups_response.go +++ b/vendor/github.com/Shopify/sarama/describe_groups_response.go @@ -1,16 +1,26 @@ package sarama type DescribeGroupsResponse struct { + // Version defines the protocol version to use for encode and decode + Version int16 + // ThrottleTimeMs contains the duration in milliseconds for which the + // request was throttled due to a quota violation, or zero if the request + // did not violate any quota. + ThrottleTimeMs int32 + // Groups contains each described group. Groups []*GroupDescription } -func (r *DescribeGroupsResponse) encode(pe packetEncoder) error { +func (r *DescribeGroupsResponse) encode(pe packetEncoder) (err error) { + if r.Version >= 1 { + pe.putInt32(r.ThrottleTimeMs) + } if err := pe.putArrayLength(len(r.Groups)); err != nil { return err } - for _, groupDescription := range r.Groups { - if err := groupDescription.encode(pe); err != nil { + for _, block := range r.Groups { + if err := block.encode(pe, r.Version); err != nil { return err } } @@ -19,18 +29,24 @@ func (r *DescribeGroupsResponse) encode(pe packetEncoder) error { } func (r *DescribeGroupsResponse) decode(pd packetDecoder, version int16) (err error) { - n, err := pd.getArrayLength() - if err != nil { - return err - } - - r.Groups = make([]*GroupDescription, n) - for i := 0; i < n; i++ { - r.Groups[i] = new(GroupDescription) - if err := r.Groups[i].decode(pd); err != nil { + r.Version = version + if r.Version >= 1 { + if r.ThrottleTimeMs, err = pd.getInt32(); err != nil { return err } } + if numGroups, err := pd.getArrayLength(); err != nil { + return err + } else if numGroups > 0 { + r.Groups = make([]*GroupDescription, numGroups) + for i := 0; i < numGroups; i++ { + block := &GroupDescription{} + if err := block.decode(pd, r.Version); err != nil { + return err + } + r.Groups[i] = block + } + } return nil } @@ -40,7 +56,7 @@ func (r *DescribeGroupsResponse) key() int16 { } func (r *DescribeGroupsResponse) version() int16 { - return 0 + return r.Version } func (r *DescribeGroupsResponse) headerVersion() int16 { @@ -48,20 +64,39 @@ func (r *DescribeGroupsResponse) headerVersion() int16 { } func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion { + switch r.Version { + case 1, 2, 3, 4: + return V2_3_0_0 + } return V0_9_0_0 } +// GroupDescription contains each described group. type GroupDescription struct { - Err KError - GroupId string - State string + // Version defines the protocol version to use for encode and decode + Version int16 + // Err contains the describe error as the KError type. + Err KError + // ErrorCode contains the describe error, or 0 if there was no error. + ErrorCode int16 + // GroupId contains the group ID string. + GroupId string + // State contains the group state string, or the empty string. + State string + // ProtocolType contains the group protocol type, or the empty string. ProtocolType string - Protocol string - Members map[string]*GroupMemberDescription + // Protocol contains the group protocol data, or the empty string. + Protocol string + // Members contains the group members. + Members map[string]*GroupMemberDescription + // AuthorizedOperations contains a 32-bit bitfield to represent authorized + // operations for this group. + AuthorizedOperations int32 } -func (gd *GroupDescription) encode(pe packetEncoder) error { - pe.putInt16(int16(gd.Err)) +func (gd *GroupDescription) encode(pe packetEncoder, version int16) (err error) { + gd.Version = version + pe.putInt16(gd.ErrorCode) if err := pe.putString(gd.GroupId); err != nil { return err @@ -80,56 +115,55 @@ func (gd *GroupDescription) encode(pe packetEncoder) error { return err } - for memberId, groupMemberDescription := range gd.Members { - if err := pe.putString(memberId); err != nil { - return err - } - if err := groupMemberDescription.encode(pe); err != nil { + for _, block := range gd.Members { + if err := block.encode(pe, gd.Version); err != nil { return err } } + if gd.Version >= 3 { + pe.putInt32(gd.AuthorizedOperations) + } + return nil } -func (gd *GroupDescription) decode(pd packetDecoder) (err error) { - kerr, err := pd.getInt16() - if err != nil { +func (gd *GroupDescription) decode(pd packetDecoder, version int16) (err error) { + gd.Version = version + if gd.ErrorCode, err = pd.getInt16(); err != nil { return err } - gd.Err = KError(kerr) + gd.Err = KError(gd.ErrorCode) if gd.GroupId, err = pd.getString(); err != nil { - return + return err } if gd.State, err = pd.getString(); err != nil { - return + return err } if gd.ProtocolType, err = pd.getString(); err != nil { - return + return err } if gd.Protocol, err = pd.getString(); err != nil { - return - } - - n, err := pd.getArrayLength() - if err != nil { return err } - if n == 0 { - return nil - } - gd.Members = make(map[string]*GroupMemberDescription) - for i := 0; i < n; i++ { - memberId, err := pd.getString() - if err != nil { - return err + if numMembers, err := pd.getArrayLength(); err != nil { + return err + } else if numMembers > 0 { + gd.Members = make(map[string]*GroupMemberDescription, numMembers) + for i := 0; i < numMembers; i++ { + block := &GroupMemberDescription{} + if err := block.decode(pd, gd.Version); err != nil { + return err + } + gd.Members[block.MemberId] = block } + } - gd.Members[memberId] = new(GroupMemberDescription) - if err := gd.Members[memberId].decode(pd); err != nil { + if gd.Version >= 3 { + if gd.AuthorizedOperations, err = pd.getInt32(); err != nil { return err } } @@ -137,14 +171,38 @@ func (gd *GroupDescription) decode(pd packetDecoder) (err error) { return nil } +// GroupMemberDescription contains the group members. type GroupMemberDescription struct { - ClientId string - ClientHost string - MemberMetadata []byte + // Version defines the protocol version to use for encode and decode + Version int16 + // MemberId contains the member ID assigned by the group coordinator. + MemberId string + // GroupInstanceId contains the unique identifier of the consumer instance + // provided by end user. + GroupInstanceId *string + // ClientId contains the client ID used in the member's latest join group + // request. + ClientId string + // ClientHost contains the client host. + ClientHost string + // MemberMetadata contains the metadata corresponding to the current group + // protocol in use. + MemberMetadata []byte + // MemberAssignment contains the current assignment provided by the group + // leader. MemberAssignment []byte } -func (gmd *GroupMemberDescription) encode(pe packetEncoder) error { +func (gmd *GroupMemberDescription) encode(pe packetEncoder, version int16) (err error) { + gmd.Version = version + if err := pe.putString(gmd.MemberId); err != nil { + return err + } + if gmd.Version >= 4 { + if err := pe.putNullableString(gmd.GroupInstanceId); err != nil { + return err + } + } if err := pe.putString(gmd.ClientId); err != nil { return err } @@ -161,31 +219,46 @@ func (gmd *GroupMemberDescription) encode(pe packetEncoder) error { return nil } -func (gmd *GroupMemberDescription) decode(pd packetDecoder) (err error) { +func (gmd *GroupMemberDescription) decode(pd packetDecoder, version int16) (err error) { + gmd.Version = version + if gmd.MemberId, err = pd.getString(); err != nil { + return err + } + if gmd.Version >= 4 { + if gmd.GroupInstanceId, err = pd.getNullableString(); err != nil { + return err + } + } if gmd.ClientId, err = pd.getString(); err != nil { - return + return err } if gmd.ClientHost, err = pd.getString(); err != nil { - return + return err } if gmd.MemberMetadata, err = pd.getBytes(); err != nil { - return + return err } if gmd.MemberAssignment, err = pd.getBytes(); err != nil { - return + return err } return nil } func (gmd *GroupMemberDescription) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) { + if len(gmd.MemberAssignment) == 0 { + return nil, nil + } assignment := new(ConsumerGroupMemberAssignment) - err := decode(gmd.MemberAssignment, assignment) + err := decode(gmd.MemberAssignment, assignment, nil) return assignment, err } func (gmd *GroupMemberDescription) GetMemberMetadata() (*ConsumerGroupMemberMetadata, error) { + if len(gmd.MemberMetadata) == 0 { + return nil, nil + } metadata := new(ConsumerGroupMemberMetadata) - err := decode(gmd.MemberMetadata, metadata) + err := decode(gmd.MemberMetadata, metadata, nil) return metadata, err } diff --git a/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_request.go b/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_request.go new file mode 100644 index 000000000..b5b59404b --- /dev/null +++ b/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_request.go @@ -0,0 +1,70 @@ +package sarama + +// DescribeUserScramCredentialsRequest is a request to get list of SCRAM user names +type DescribeUserScramCredentialsRequest struct { + // Version 0 is currently only supported + Version int16 + + // If this is an empty array, all users will be queried + DescribeUsers []DescribeUserScramCredentialsRequestUser +} + +// DescribeUserScramCredentialsRequestUser is a describe request about specific user name +type DescribeUserScramCredentialsRequestUser struct { + Name string +} + +func (r *DescribeUserScramCredentialsRequest) encode(pe packetEncoder) error { + pe.putCompactArrayLength(len(r.DescribeUsers)) + for _, d := range r.DescribeUsers { + if err := pe.putCompactString(d.Name); err != nil { + return err + } + pe.putEmptyTaggedFieldArray() + } + + pe.putEmptyTaggedFieldArray() + return nil +} + +func (r *DescribeUserScramCredentialsRequest) decode(pd packetDecoder, version int16) error { + n, err := pd.getCompactArrayLength() + if err != nil { + return err + } + if n == -1 { + n = 0 + } + + r.DescribeUsers = make([]DescribeUserScramCredentialsRequestUser, n) + for i := 0; i < n; i++ { + r.DescribeUsers[i] = DescribeUserScramCredentialsRequestUser{} + if r.DescribeUsers[i].Name, err = pd.getCompactString(); err != nil { + return err + } + if _, err = pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + + if _, err = pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + return nil +} + +func (r *DescribeUserScramCredentialsRequest) key() int16 { + return 50 +} + +func (r *DescribeUserScramCredentialsRequest) version() int16 { + return r.Version +} + +func (r *DescribeUserScramCredentialsRequest) headerVersion() int16 { + return 2 +} + +func (r *DescribeUserScramCredentialsRequest) requiredVersion() KafkaVersion { + return V2_7_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_response.go b/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_response.go new file mode 100644 index 000000000..2656c2faa --- /dev/null +++ b/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_response.go @@ -0,0 +1,168 @@ +package sarama + +import "time" + +type ScramMechanismType int8 + +const ( + SCRAM_MECHANISM_UNKNOWN ScramMechanismType = iota // 0 + SCRAM_MECHANISM_SHA_256 // 1 + SCRAM_MECHANISM_SHA_512 // 2 +) + +func (s ScramMechanismType) String() string { + switch s { + case 1: + return SASLTypeSCRAMSHA256 + case 2: + return SASLTypeSCRAMSHA512 + default: + return "Unknown" + } +} + +type DescribeUserScramCredentialsResponse struct { + // Version 0 is currently only supported + Version int16 + + ThrottleTime time.Duration + + ErrorCode KError + ErrorMessage *string + + Results []*DescribeUserScramCredentialsResult +} + +type DescribeUserScramCredentialsResult struct { + User string + + ErrorCode KError + ErrorMessage *string + + CredentialInfos []*UserScramCredentialsResponseInfo +} + +type UserScramCredentialsResponseInfo struct { + Mechanism ScramMechanismType + Iterations int32 +} + +func (r *DescribeUserScramCredentialsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) + + pe.putInt16(int16(r.ErrorCode)) + if err := pe.putNullableCompactString(r.ErrorMessage); err != nil { + return err + } + + pe.putCompactArrayLength(len(r.Results)) + for _, u := range r.Results { + if err := pe.putCompactString(u.User); err != nil { + return err + } + pe.putInt16(int16(u.ErrorCode)) + if err := pe.putNullableCompactString(u.ErrorMessage); err != nil { + return err + } + + pe.putCompactArrayLength(len(u.CredentialInfos)) + for _, c := range u.CredentialInfos { + pe.putInt8(int8(c.Mechanism)) + pe.putInt32(c.Iterations) + pe.putEmptyTaggedFieldArray() + } + + pe.putEmptyTaggedFieldArray() + } + + pe.putEmptyTaggedFieldArray() + return nil +} + +func (r *DescribeUserScramCredentialsResponse) decode(pd packetDecoder, version int16) error { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.ErrorCode = KError(kerr) + if r.ErrorMessage, err = pd.getCompactNullableString(); err != nil { + return err + } + + numUsers, err := pd.getCompactArrayLength() + if err != nil { + return err + } + + if numUsers > 0 { + r.Results = make([]*DescribeUserScramCredentialsResult, numUsers) + for i := 0; i < numUsers; i++ { + r.Results[i] = &DescribeUserScramCredentialsResult{} + if r.Results[i].User, err = pd.getCompactString(); err != nil { + return err + } + + errorCode, err := pd.getInt16() + if err != nil { + return err + } + r.Results[i].ErrorCode = KError(errorCode) + if r.Results[i].ErrorMessage, err = pd.getCompactNullableString(); err != nil { + return err + } + + numCredentialInfos, err := pd.getCompactArrayLength() + if err != nil { + return err + } + + r.Results[i].CredentialInfos = make([]*UserScramCredentialsResponseInfo, numCredentialInfos) + for j := 0; j < numCredentialInfos; j++ { + r.Results[i].CredentialInfos[j] = &UserScramCredentialsResponseInfo{} + scramMechanism, err := pd.getInt8() + if err != nil { + return err + } + r.Results[i].CredentialInfos[j].Mechanism = ScramMechanismType(scramMechanism) + if r.Results[i].CredentialInfos[j].Iterations, err = pd.getInt32(); err != nil { + return err + } + if _, err = pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + + if _, err = pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + } + + if _, err = pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + return nil +} + +func (r *DescribeUserScramCredentialsResponse) key() int16 { + return 50 +} + +func (r *DescribeUserScramCredentialsResponse) version() int16 { + return r.Version +} + +func (r *DescribeUserScramCredentialsResponse) headerVersion() int16 { + return 2 +} + +func (r *DescribeUserScramCredentialsResponse) requiredVersion() KafkaVersion { + return V2_7_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/dev.yml b/vendor/github.com/Shopify/sarama/dev.yml index 6468f1549..e2acb38bb 100644 --- a/vendor/github.com/Shopify/sarama/dev.yml +++ b/vendor/github.com/Shopify/sarama/dev.yml @@ -2,7 +2,7 @@ name: sarama up: - go: - version: '1.14.3' + version: '1.17.6' commands: test: diff --git a/vendor/github.com/Shopify/sarama/docker-compose.yml b/vendor/github.com/Shopify/sarama/docker-compose.yml index 25593fd3b..e1119c87f 100644 --- a/vendor/github.com/Shopify/sarama/docker-compose.yml +++ b/vendor/github.com/Shopify/sarama/docker-compose.yml @@ -1,130 +1,152 @@ version: '3.7' services: zookeeper-1: - image: 'confluentinc/cp-zookeeper:${CONFLUENT_PLATFORM_VERSION:-5.5.0}' + image: 'docker.io/library/zookeeper:3.6.3' restart: always environment: - ZOOKEEPER_SERVER_ID: '1' - ZOOKEEPER_SERVERS: 'zookeeper-1:2888:3888;zookeeper-2:2888:3888;zookeeper-3:2888:3888' - ZOOKEEPER_CLIENT_PORT: '2181' - ZOOKEEPER_PEER_PORT: '2888' - ZOOKEEPER_LEADER_PORT: '3888' - ZOOKEEPER_INIT_LIMIT: '10' - ZOOKEEPER_SYNC_LIMIT: '5' - ZOOKEEPER_MAX_CLIENT_CONNS: '0' + ZOO_MY_ID: '1' + ZOO_SERVERS: 'server.1=zookeeper-1:2888:3888 server.2=zookeeper-2:2888:3888 server.3=zookeeper-3:2888:3888' + ZOO_CFG_EXTRA: 'clientPort=2181 peerPort=2888 leaderPort=3888' + ZOO_INIT_LIMIT: '10' + ZOO_SYNC_LIMIT: '5' + ZOO_MAX_CLIENT_CNXNS: '0' + ZOO_4LW_COMMANDS_WHITELIST: 'mntr,conf,ruok' zookeeper-2: - image: 'confluentinc/cp-zookeeper:${CONFLUENT_PLATFORM_VERSION:-5.5.0}' + image: 'docker.io/library/zookeeper:3.6.3' restart: always environment: - ZOOKEEPER_SERVER_ID: '2' - ZOOKEEPER_SERVERS: 'zookeeper-1:2888:3888;zookeeper-2:2888:3888;zookeeper-3:2888:3888' - ZOOKEEPER_CLIENT_PORT: '2181' - ZOOKEEPER_PEER_PORT: '2888' - ZOOKEEPER_LEADER_PORT: '3888' - ZOOKEEPER_INIT_LIMIT: '10' - ZOOKEEPER_SYNC_LIMIT: '5' - ZOOKEEPER_MAX_CLIENT_CONNS: '0' + ZOO_MY_ID: '2' + ZOO_SERVERS: 'server.1=zookeeper-1:2888:3888 server.2=zookeeper-2:2888:3888 server.3=zookeeper-3:2888:3888' + ZOO_CFG_EXTRA: 'clientPort=2181 peerPort=2888 leaderPort=3888' + ZOO_INIT_LIMIT: '10' + ZOO_SYNC_LIMIT: '5' + ZOO_MAX_CLIENT_CNXNS: '0' + ZOO_4LW_COMMANDS_WHITELIST: 'mntr,conf,ruok' zookeeper-3: - image: 'confluentinc/cp-zookeeper:${CONFLUENT_PLATFORM_VERSION:-5.5.0}' + image: 'docker.io/library/zookeeper:3.6.3' restart: always environment: - ZOOKEEPER_SERVER_ID: '3' - ZOOKEEPER_SERVERS: 'zookeeper-1:2888:3888;zookeeper-2:2888:3888;zookeeper-3:2888:3888' - ZOOKEEPER_CLIENT_PORT: '2181' - ZOOKEEPER_PEER_PORT: '2888' - ZOOKEEPER_LEADER_PORT: '3888' - ZOOKEEPER_INIT_LIMIT: '10' - ZOOKEEPER_SYNC_LIMIT: '5' - ZOOKEEPER_MAX_CLIENT_CONNS: '0' + ZOO_MY_ID: '3' + ZOO_SERVERS: 'server.1=zookeeper-1:2888:3888 server.2=zookeeper-2:2888:3888 server.3=zookeeper-3:2888:3888' + ZOO_CFG_EXTRA: 'clientPort=2181 peerPort=2888 leaderPort=3888' + ZOO_INIT_LIMIT: '10' + ZOO_SYNC_LIMIT: '5' + ZOO_MAX_CLIENT_CNXNS: '0' + ZOO_4LW_COMMANDS_WHITELIST: 'mntr,conf,ruok' kafka-1: - image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-5.5.0}' + image: 'sarama/fv-kafka' + build: + context: . + dockerfile: Dockerfile.kafka restart: always environment: - KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' - KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29091' - KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-1:9091,LISTENER_LOCAL://localhost:29091' - KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' - KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' - KAFKA_DEFAULT_REPLICATION_FACTOR: '2' - KAFKA_BROKER_ID: '1' - KAFKA_BROKER_RACK: '1' - KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000' - KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000' - KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' - KAFKA_DELETE_TOPIC_ENABLE: 'true' - KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false' + KAFKA_VERSION: ${KAFKA_VERSION:-3.3.1} + KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' + KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29091' + KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-1:9091,LISTENER_LOCAL://localhost:29091' + KAFKA_CFG_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' + KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' + KAFKA_CFG_DEFAULT_REPLICATION_FACTOR: '2' + KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: '2' + KAFKA_CFG_BROKER_ID: '1' + KAFKA_CFG_BROKER_RACK: '1' + KAFKA_CFG_ZOOKEEPER_SESSION_TIMEOUT_MS: '6000' + KAFKA_CFG_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '6000' + KAFKA_CFG_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' + KAFKA_CFG_DELETE_TOPIC_ENABLE: 'true' + KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'false' kafka-2: - image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-5.5.0}' + image: 'sarama/fv-kafka' + build: + context: . + dockerfile: Dockerfile.kafka restart: always environment: - KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' - KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29092' - KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-2:9091,LISTENER_LOCAL://localhost:29092' - KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' - KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' - KAFKA_DEFAULT_REPLICATION_FACTOR: '2' - KAFKA_BROKER_ID: '2' - KAFKA_BROKER_RACK: '2' - KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000' - KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000' - KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' - KAFKA_DELETE_TOPIC_ENABLE: 'true' - KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false' + KAFKA_VERSION: ${KAFKA_VERSION:-3.3.1} + KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' + KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29092' + KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-2:9091,LISTENER_LOCAL://localhost:29092' + KAFKA_CFG_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' + KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' + KAFKA_CFG_DEFAULT_REPLICATION_FACTOR: '2' + KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: '2' + KAFKA_CFG_BROKER_ID: '2' + KAFKA_CFG_BROKER_RACK: '2' + KAFKA_CFG_ZOOKEEPER_SESSION_TIMEOUT_MS: '6000' + KAFKA_CFG_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '6000' + KAFKA_CFG_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' + KAFKA_CFG_DELETE_TOPIC_ENABLE: 'true' + KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'false' kafka-3: - image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-5.5.0}' + image: 'sarama/fv-kafka' + build: + context: . + dockerfile: Dockerfile.kafka restart: always environment: - KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' - KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29093' - KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-3:9091,LISTENER_LOCAL://localhost:29093' - KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' - KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' - KAFKA_DEFAULT_REPLICATION_FACTOR: '2' - KAFKA_BROKER_ID: '3' - KAFKA_BROKER_RACK: '3' - KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000' - KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000' - KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' - KAFKA_DELETE_TOPIC_ENABLE: 'true' - KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false' + KAFKA_VERSION: ${KAFKA_VERSION:-3.3.1} + KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' + KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29093' + KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-3:9091,LISTENER_LOCAL://localhost:29093' + KAFKA_CFG_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' + KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' + KAFKA_CFG_DEFAULT_REPLICATION_FACTOR: '2' + KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: '2' + KAFKA_CFG_BROKER_ID: '3' + KAFKA_CFG_BROKER_RACK: '3' + KAFKA_CFG_ZOOKEEPER_SESSION_TIMEOUT_MS: '6000' + KAFKA_CFG_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '6000' + KAFKA_CFG_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' + KAFKA_CFG_DELETE_TOPIC_ENABLE: 'true' + KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'false' kafka-4: - image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-5.5.0}' + image: 'sarama/fv-kafka' + build: + context: . + dockerfile: Dockerfile.kafka restart: always environment: - KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' - KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29094' - KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-4:9091,LISTENER_LOCAL://localhost:29094' - KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' - KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' - KAFKA_DEFAULT_REPLICATION_FACTOR: '2' - KAFKA_BROKER_ID: '4' - KAFKA_BROKER_RACK: '4' - KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000' - KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000' - KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' - KAFKA_DELETE_TOPIC_ENABLE: 'true' - KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false' + KAFKA_VERSION: ${KAFKA_VERSION:-3.3.1} + KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' + KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29094' + KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-4:9091,LISTENER_LOCAL://localhost:29094' + KAFKA_CFG_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' + KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' + KAFKA_CFG_DEFAULT_REPLICATION_FACTOR: '2' + KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: '2' + KAFKA_CFG_BROKER_ID: '4' + KAFKA_CFG_BROKER_RACK: '4' + KAFKA_CFG_ZOOKEEPER_SESSION_TIMEOUT_MS: '6000' + KAFKA_CFG_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '6000' + KAFKA_CFG_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' + KAFKA_CFG_DELETE_TOPIC_ENABLE: 'true' + KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'false' kafka-5: - image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-5.5.0}' + image: 'sarama/fv-kafka' + build: + context: . + dockerfile: Dockerfile.kafka restart: always environment: - KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' - KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29095' - KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-5:9091,LISTENER_LOCAL://localhost:29095' - KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' - KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' - KAFKA_DEFAULT_REPLICATION_FACTOR: '2' - KAFKA_BROKER_ID: '5' - KAFKA_BROKER_RACK: '5' - KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000' - KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000' - KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' - KAFKA_DELETE_TOPIC_ENABLE: 'true' - KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false' + KAFKA_VERSION: ${KAFKA_VERSION:-3.3.1} + KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' + KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29095' + KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-5:9091,LISTENER_LOCAL://localhost:29095' + KAFKA_CFG_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' + KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' + KAFKA_CFG_DEFAULT_REPLICATION_FACTOR: '2' + KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: '2' + KAFKA_CFG_BROKER_ID: '5' + KAFKA_CFG_BROKER_RACK: '5' + KAFKA_CFG_ZOOKEEPER_SESSION_TIMEOUT_MS: '6000' + KAFKA_CFG_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '6000' + KAFKA_CFG_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' + KAFKA_CFG_DELETE_TOPIC_ENABLE: 'true' + KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'false' toxiproxy: - image: 'shopify/toxiproxy:2.1.4' + image: 'ghcr.io/shopify/toxiproxy:2.4.0' ports: - # The tests themselves actually start the proies on these ports + # The tests themselves actually start the proxies on these ports - '29091:29091' - '29092:29092' - '29093:29093' diff --git a/vendor/github.com/Shopify/sarama/encoder_decoder.go b/vendor/github.com/Shopify/sarama/encoder_decoder.go index 025bad61f..4ee76c6d7 100644 --- a/vendor/github.com/Shopify/sarama/encoder_decoder.go +++ b/vendor/github.com/Shopify/sarama/encoder_decoder.go @@ -45,7 +45,7 @@ func encode(e encoder, metricRegistry metrics.Registry) ([]byte, error) { return realEnc.raw, nil } -// Decoder is the interface that wraps the basic Decode method. +// decoder is the interface that wraps the basic Decode method. // Anything implementing Decoder can be extracted from bytes using Kafka's encoding rules. type decoder interface { decode(pd packetDecoder) error @@ -55,14 +55,17 @@ type versionedDecoder interface { decode(pd packetDecoder, version int16) error } -// Decode takes bytes and a Decoder and fills the fields of the decoder from the bytes, +// decode takes bytes and a decoder and fills the fields of the decoder from the bytes, // interpreted using Kafka's encoding rules. -func decode(buf []byte, in decoder) error { +func decode(buf []byte, in decoder, metricRegistry metrics.Registry) error { if buf == nil { return nil } - helper := realDecoder{raw: buf} + helper := realDecoder{ + raw: buf, + registry: metricRegistry, + } err := in.decode(&helper) if err != nil { return err @@ -75,19 +78,24 @@ func decode(buf []byte, in decoder) error { return nil } -func versionedDecode(buf []byte, in versionedDecoder, version int16) error { +func versionedDecode(buf []byte, in versionedDecoder, version int16, metricRegistry metrics.Registry) error { if buf == nil { return nil } - helper := realDecoder{raw: buf} + helper := realDecoder{ + raw: buf, + registry: metricRegistry, + } err := in.decode(&helper, version) if err != nil { return err } if helper.off != len(buf) { - return PacketDecodingError{"invalid length"} + return PacketDecodingError{ + Info: fmt.Sprintf("invalid length (off=%d, len=%d)", helper.off, len(buf)), + } } return nil diff --git a/vendor/github.com/Shopify/sarama/end_txn_response.go b/vendor/github.com/Shopify/sarama/end_txn_response.go index 763976726..dd2a04504 100644 --- a/vendor/github.com/Shopify/sarama/end_txn_response.go +++ b/vendor/github.com/Shopify/sarama/end_txn_response.go @@ -32,7 +32,7 @@ func (e *EndTxnResponse) decode(pd packetDecoder, version int16) (err error) { } func (e *EndTxnResponse) key() int16 { - return 25 + return 26 } func (e *EndTxnResponse) version() int16 { diff --git a/vendor/github.com/Shopify/sarama/entrypoint.sh b/vendor/github.com/Shopify/sarama/entrypoint.sh new file mode 100644 index 000000000..8cd2efcb9 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/entrypoint.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +KAFKA_VERSION="${KAFKA_VERSION:-3.3.1}" +KAFKA_HOME="/opt/kafka-${KAFKA_VERSION}" + +if [ ! -d "${KAFKA_HOME}" ]; then + echo 'Error: KAFKA_VERSION '$KAFKA_VERSION' not available in this image at '$KAFKA_HOME + exit 1 +fi + +cd "${KAFKA_HOME}" || exit 1 + +# discard all empty/commented lines +sed -e '/^#/d' -e '/^$/d' -i"" config/server.properties + +# emulate kafka_configure_from_environment_variables from bitnami/bitnami-docker-kafka +for var in "${!KAFKA_CFG_@}"; do + key="$(echo "$var" | sed -e 's/^KAFKA_CFG_//g' -e 's/_/\./g' -e 's/.*/\L&/')" + sed -e '/^'$key'/d' -i"" config/server.properties + value="${!var}" + echo "$key=$value" >>config/server.properties +done + +sort config/server.properties + +exec bin/kafka-server-start.sh config/server.properties diff --git a/vendor/github.com/Shopify/sarama/errors.go b/vendor/github.com/Shopify/sarama/errors.go index ca621b092..27977f166 100644 --- a/vendor/github.com/Shopify/sarama/errors.go +++ b/vendor/github.com/Shopify/sarama/errors.go @@ -3,11 +3,17 @@ package sarama import ( "errors" "fmt" + "strings" + + "github.com/hashicorp/go-multierror" ) // ErrOutOfBrokers is the error returned when the client has run out of brokers to talk to because all of them errored // or otherwise failed to respond. -var ErrOutOfBrokers = errors.New("kafka: client has run out of available brokers to talk to (Is your cluster reachable?)") +var ErrOutOfBrokers = errors.New("kafka: client has run out of available brokers to talk to") + +// ErrBrokerNotFound is the error returned when there's no broker found for the requested ID. +var ErrBrokerNotFound = errors.New("kafka: broker for ID is not found") // ErrClosedClient is the error returned when a method is called on a client that has been closed. var ErrClosedClient = errors.New("kafka: tried to use a client that was closed") @@ -49,6 +55,90 @@ var ErrControllerNotAvailable = errors.New("kafka: controller is not available") // the metadata. var ErrNoTopicsToUpdateMetadata = errors.New("kafka: no specific topics to update metadata") +// ErrUnknownScramMechanism is returned when user tries to AlterUserScramCredentials with unknown SCRAM mechanism +var ErrUnknownScramMechanism = errors.New("kafka: unknown SCRAM mechanism provided") + +// ErrReassignPartitions is returned when altering partition assignments for a topic fails +var ErrReassignPartitions = errors.New("failed to reassign partitions for topic") + +// ErrDeleteRecords is the type of error returned when fail to delete the required records +var ErrDeleteRecords = errors.New("kafka server: failed to delete records") + +// ErrCreateACLs is the type of error returned when ACL creation failed +var ErrCreateACLs = errors.New("kafka server: failed to create one or more ACL rules") + +// ErrAddPartitionsToTxn is returned when AddPartitionsToTxn failed multiple times +var ErrAddPartitionsToTxn = errors.New("transaction manager: failed to send partitions to transaction") + +// ErrTxnOffsetCommit is returned when TxnOffsetCommit failed multiple times +var ErrTxnOffsetCommit = errors.New("transaction manager: failed to send offsets to transaction") + +// ErrTransactionNotReady when transaction status is invalid for the current action. +var ErrTransactionNotReady = errors.New("transaction manager: transaction is not ready") + +// ErrNonTransactedProducer when calling BeginTxn, CommitTxn or AbortTxn on a non transactional producer. +var ErrNonTransactedProducer = errors.New("transaction manager: you need to add TransactionalID to producer") + +// ErrTransitionNotAllowed when txnmgr state transiion is not valid. +var ErrTransitionNotAllowed = errors.New("transaction manager: invalid transition attempted") + +// ErrCannotTransitionNilError when transition is attempted with an nil error. +var ErrCannotTransitionNilError = errors.New("transaction manager: cannot transition with a nil error") + +// ErrTxnUnableToParseResponse when response is nil +var ErrTxnUnableToParseResponse = errors.New("transaction manager: unable to parse response") + +// MultiErrorFormat specifies the formatter applied to format multierrors. The +// default implementation is a consensed version of the hashicorp/go-multierror +// default one +var MultiErrorFormat multierror.ErrorFormatFunc = func(es []error) string { + if len(es) == 1 { + return es[0].Error() + } + + points := make([]string, len(es)) + for i, err := range es { + points[i] = fmt.Sprintf("* %s", err) + } + + return fmt.Sprintf( + "%d errors occurred:\n\t%s\n", + len(es), strings.Join(points, "\n\t")) +} + +type sentinelError struct { + sentinel error + wrapped error +} + +func (err sentinelError) Error() string { + if err.wrapped != nil { + return fmt.Sprintf("%s: %v", err.sentinel, err.wrapped) + } else { + return fmt.Sprintf("%s", err.sentinel) + } +} + +func (err sentinelError) Is(target error) bool { + return errors.Is(err.sentinel, target) || errors.Is(err.wrapped, target) +} + +func (err sentinelError) Unwrap() error { + return err.wrapped +} + +func Wrap(sentinel error, wrapped ...error) sentinelError { + return sentinelError{sentinel: sentinel, wrapped: multiError(wrapped...)} +} + +func multiError(wrapped ...error) error { + merr := multierror.Append(nil, wrapped...) + if MultiErrorFormat != nil { + merr.ErrorFormat = MultiErrorFormat + } + return merr.ErrorOrNil() +} + // PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example, // if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that. type PacketEncodingError struct { @@ -81,44 +171,6 @@ func (err ConfigurationError) Error() string { // See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes type KError int16 -// MultiError is used to contain multi error. -type MultiError struct { - Errors *[]error -} - -func (mErr MultiError) Error() string { - var errString = "" - for _, err := range *mErr.Errors { - errString += err.Error() + "," - } - return errString -} - -func (mErr MultiError) PrettyError() string { - var errString = "" - for _, err := range *mErr.Errors { - errString += err.Error() + "\n" - } - return errString -} - -// ErrDeleteRecords is the type of error returned when fail to delete the required records -type ErrDeleteRecords struct { - MultiError -} - -func (err ErrDeleteRecords) Error() string { - return "kafka server: failed to delete records " + err.MultiError.Error() -} - -type ErrReassignPartitions struct { - MultiError -} - -func (err ErrReassignPartitions) Error() string { - return fmt.Sprintf("failed to reassign partitions for topic: \n%s", err.MultiError.PrettyError()) -} - // Numeric error codes returned by the Kafka server. const ( ErrNoError KError = 0 @@ -205,6 +257,14 @@ const ( ErrPreferredLeaderNotAvailable KError = 80 ErrGroupMaxSizeReached KError = 81 ErrFencedInstancedId KError = 82 + ErrEligibleLeadersNotAvailable KError = 83 + ErrElectionNotNeeded KError = 84 + ErrNoReassignmentInProgress KError = 85 + ErrGroupSubscribedToTopic KError = 86 + ErrInvalidRecord KError = 87 + ErrUnstableOffsetCommit KError = 88 + ErrThrottlingQuotaExceeded KError = 89 + ErrProducerFenced KError = 90 ) func (err KError) Error() string { @@ -214,159 +274,159 @@ func (err KError) Error() string { case ErrNoError: return "kafka server: Not an error, why are you printing me?" case ErrUnknown: - return "kafka server: Unexpected (unknown?) server error." + return "kafka server: Unexpected (unknown?) server error" case ErrOffsetOutOfRange: - return "kafka server: The requested offset is outside the range of offsets maintained by the server for the given topic/partition." + return "kafka server: The requested offset is outside the range of offsets maintained by the server for the given topic/partition" case ErrInvalidMessage: - return "kafka server: Message contents does not match its CRC." + return "kafka server: Message contents does not match its CRC" case ErrUnknownTopicOrPartition: - return "kafka server: Request was for a topic or partition that does not exist on this broker." + return "kafka server: Request was for a topic or partition that does not exist on this broker" case ErrInvalidMessageSize: - return "kafka server: The message has a negative size." + return "kafka server: The message has a negative size" case ErrLeaderNotAvailable: - return "kafka server: In the middle of a leadership election, there is currently no leader for this partition and hence it is unavailable for writes." + return "kafka server: In the middle of a leadership election, there is currently no leader for this partition and hence it is unavailable for writes" case ErrNotLeaderForPartition: - return "kafka server: Tried to send a message to a replica that is not the leader for some partition. Your metadata is out of date." + return "kafka server: Tried to send a message to a replica that is not the leader for some partition. Your metadata is out of date" case ErrRequestTimedOut: - return "kafka server: Request exceeded the user-specified time limit in the request." + return "kafka server: Request exceeded the user-specified time limit in the request" case ErrBrokerNotAvailable: return "kafka server: Broker not available. Not a client facing error, we should never receive this!!!" case ErrReplicaNotAvailable: - return "kafka server: Replica information not available, one or more brokers are down." + return "kafka server: Replica information not available, one or more brokers are down" case ErrMessageSizeTooLarge: - return "kafka server: Message was too large, server rejected it to avoid allocation error." + return "kafka server: Message was too large, server rejected it to avoid allocation error" case ErrStaleControllerEpochCode: - return "kafka server: StaleControllerEpochCode (internal error code for broker-to-broker communication)." + return "kafka server: StaleControllerEpochCode (internal error code for broker-to-broker communication)" case ErrOffsetMetadataTooLarge: - return "kafka server: Specified a string larger than the configured maximum for offset metadata." + return "kafka server: Specified a string larger than the configured maximum for offset metadata" case ErrNetworkException: - return "kafka server: The server disconnected before a response was received." + return "kafka server: The server disconnected before a response was received" case ErrOffsetsLoadInProgress: - return "kafka server: The broker is still loading offsets after a leader change for that offset's topic partition." + return "kafka server: The broker is still loading offsets after a leader change for that offset's topic partition" case ErrConsumerCoordinatorNotAvailable: - return "kafka server: Offset's topic has not yet been created." + return "kafka server: Offset's topic has not yet been created" case ErrNotCoordinatorForConsumer: - return "kafka server: Request was for a consumer group that is not coordinated by this broker." + return "kafka server: Request was for a consumer group that is not coordinated by this broker" case ErrInvalidTopic: - return "kafka server: The request attempted to perform an operation on an invalid topic." + return "kafka server: The request attempted to perform an operation on an invalid topic" case ErrMessageSetSizeTooLarge: - return "kafka server: The request included message batch larger than the configured segment size on the server." + return "kafka server: The request included message batch larger than the configured segment size on the server" case ErrNotEnoughReplicas: - return "kafka server: Messages are rejected since there are fewer in-sync replicas than required." + return "kafka server: Messages are rejected since there are fewer in-sync replicas than required" case ErrNotEnoughReplicasAfterAppend: - return "kafka server: Messages are written to the log, but to fewer in-sync replicas than required." + return "kafka server: Messages are written to the log, but to fewer in-sync replicas than required" case ErrInvalidRequiredAcks: - return "kafka server: The number of required acks is invalid (should be either -1, 0, or 1)." + return "kafka server: The number of required acks is invalid (should be either -1, 0, or 1)" case ErrIllegalGeneration: - return "kafka server: The provided generation id is not the current generation." + return "kafka server: The provided generation id is not the current generation" case ErrInconsistentGroupProtocol: - return "kafka server: The provider group protocol type is incompatible with the other members." + return "kafka server: The provider group protocol type is incompatible with the other members" case ErrInvalidGroupId: - return "kafka server: The provided group id was empty." + return "kafka server: The provided group id was empty" case ErrUnknownMemberId: - return "kafka server: The provided member is not known in the current generation." + return "kafka server: The provided member is not known in the current generation" case ErrInvalidSessionTimeout: - return "kafka server: The provided session timeout is outside the allowed range." + return "kafka server: The provided session timeout is outside the allowed range" case ErrRebalanceInProgress: - return "kafka server: A rebalance for the group is in progress. Please re-join the group." + return "kafka server: A rebalance for the group is in progress. Please re-join the group" case ErrInvalidCommitOffsetSize: - return "kafka server: The provided commit metadata was too large." + return "kafka server: The provided commit metadata was too large" case ErrTopicAuthorizationFailed: - return "kafka server: The client is not authorized to access this topic." + return "kafka server: The client is not authorized to access this topic" case ErrGroupAuthorizationFailed: - return "kafka server: The client is not authorized to access this group." + return "kafka server: The client is not authorized to access this group" case ErrClusterAuthorizationFailed: - return "kafka server: The client is not authorized to send this request type." + return "kafka server: The client is not authorized to send this request type" case ErrInvalidTimestamp: - return "kafka server: The timestamp of the message is out of acceptable range." + return "kafka server: The timestamp of the message is out of acceptable range" case ErrUnsupportedSASLMechanism: - return "kafka server: The broker does not support the requested SASL mechanism." + return "kafka server: The broker does not support the requested SASL mechanism" case ErrIllegalSASLState: - return "kafka server: Request is not valid given the current SASL state." + return "kafka server: Request is not valid given the current SASL state" case ErrUnsupportedVersion: - return "kafka server: The version of API is not supported." + return "kafka server: The version of API is not supported" case ErrTopicAlreadyExists: - return "kafka server: Topic with this name already exists." + return "kafka server: Topic with this name already exists" case ErrInvalidPartitions: - return "kafka server: Number of partitions is invalid." + return "kafka server: Number of partitions is invalid" case ErrInvalidReplicationFactor: - return "kafka server: Replication-factor is invalid." + return "kafka server: Replication-factor is invalid" case ErrInvalidReplicaAssignment: - return "kafka server: Replica assignment is invalid." + return "kafka server: Replica assignment is invalid" case ErrInvalidConfig: - return "kafka server: Configuration is invalid." + return "kafka server: Configuration is invalid" case ErrNotController: - return "kafka server: This is not the correct controller for this cluster." + return "kafka server: This is not the correct controller for this cluster" case ErrInvalidRequest: - return "kafka server: This most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker. See the broker logs for more details." + return "kafka server: This most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker. See the broker logs for more details" case ErrUnsupportedForMessageFormat: - return "kafka server: The requested operation is not supported by the message format version." + return "kafka server: The requested operation is not supported by the message format version" case ErrPolicyViolation: - return "kafka server: Request parameters do not satisfy the configured policy." + return "kafka server: Request parameters do not satisfy the configured policy" case ErrOutOfOrderSequenceNumber: - return "kafka server: The broker received an out of order sequence number." + return "kafka server: The broker received an out of order sequence number" case ErrDuplicateSequenceNumber: - return "kafka server: The broker received a duplicate sequence number." + return "kafka server: The broker received a duplicate sequence number" case ErrInvalidProducerEpoch: - return "kafka server: Producer attempted an operation with an old epoch." + return "kafka server: Producer attempted an operation with an old epoch" case ErrInvalidTxnState: - return "kafka server: The producer attempted a transactional operation in an invalid state." + return "kafka server: The producer attempted a transactional operation in an invalid state" case ErrInvalidProducerIDMapping: - return "kafka server: The producer attempted to use a producer id which is not currently assigned to its transactional id." + return "kafka server: The producer attempted to use a producer id which is not currently assigned to its transactional id" case ErrInvalidTransactionTimeout: - return "kafka server: The transaction timeout is larger than the maximum value allowed by the broker (as configured by max.transaction.timeout.ms)." + return "kafka server: The transaction timeout is larger than the maximum value allowed by the broker (as configured by max.transaction.timeout.ms)" case ErrConcurrentTransactions: - return "kafka server: The producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing." + return "kafka server: The producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing" case ErrTransactionCoordinatorFenced: - return "kafka server: The transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer." + return "kafka server: The transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer" case ErrTransactionalIDAuthorizationFailed: - return "kafka server: Transactional ID authorization failed." + return "kafka server: Transactional ID authorization failed" case ErrSecurityDisabled: - return "kafka server: Security features are disabled." + return "kafka server: Security features are disabled" case ErrOperationNotAttempted: - return "kafka server: The broker did not attempt to execute this operation." + return "kafka server: The broker did not attempt to execute this operation" case ErrKafkaStorageError: - return "kafka server: Disk error when trying to access log file on the disk." + return "kafka server: Disk error when trying to access log file on the disk" case ErrLogDirNotFound: - return "kafka server: The specified log directory is not found in the broker config." + return "kafka server: The specified log directory is not found in the broker config" case ErrSASLAuthenticationFailed: - return "kafka server: SASL Authentication failed." + return "kafka server: SASL Authentication failed" case ErrUnknownProducerID: - return "kafka server: The broker could not locate the producer metadata associated with the Producer ID." + return "kafka server: The broker could not locate the producer metadata associated with the Producer ID" case ErrReassignmentInProgress: - return "kafka server: A partition reassignment is in progress." + return "kafka server: A partition reassignment is in progress" case ErrDelegationTokenAuthDisabled: - return "kafka server: Delegation Token feature is not enabled." + return "kafka server: Delegation Token feature is not enabled" case ErrDelegationTokenNotFound: - return "kafka server: Delegation Token is not found on server." + return "kafka server: Delegation Token is not found on server" case ErrDelegationTokenOwnerMismatch: - return "kafka server: Specified Principal is not valid Owner/Renewer." + return "kafka server: Specified Principal is not valid Owner/Renewer" case ErrDelegationTokenRequestNotAllowed: - return "kafka server: Delegation Token requests are not allowed on PLAINTEXT/1-way SSL channels and on delegation token authenticated channels." + return "kafka server: Delegation Token requests are not allowed on PLAINTEXT/1-way SSL channels and on delegation token authenticated channels" case ErrDelegationTokenAuthorizationFailed: - return "kafka server: Delegation Token authorization failed." + return "kafka server: Delegation Token authorization failed" case ErrDelegationTokenExpired: - return "kafka server: Delegation Token is expired." + return "kafka server: Delegation Token is expired" case ErrInvalidPrincipalType: - return "kafka server: Supplied principalType is not supported." + return "kafka server: Supplied principalType is not supported" case ErrNonEmptyGroup: - return "kafka server: The group is not empty." + return "kafka server: The group is not empty" case ErrGroupIDNotFound: - return "kafka server: The group id does not exist." + return "kafka server: The group id does not exist" case ErrFetchSessionIDNotFound: - return "kafka server: The fetch session ID was not found." + return "kafka server: The fetch session ID was not found" case ErrInvalidFetchSessionEpoch: - return "kafka server: The fetch session epoch is invalid." + return "kafka server: The fetch session epoch is invalid" case ErrListenerNotFound: - return "kafka server: There is no listener on the leader broker that matches the listener on which metadata request was processed." + return "kafka server: There is no listener on the leader broker that matches the listener on which metadata request was processed" case ErrTopicDeletionDisabled: - return "kafka server: Topic deletion is disabled." + return "kafka server: Topic deletion is disabled" case ErrFencedLeaderEpoch: - return "kafka server: The leader epoch in the request is older than the epoch on the broker." + return "kafka server: The leader epoch in the request is older than the epoch on the broker" case ErrUnknownLeaderEpoch: - return "kafka server: The leader epoch in the request is newer than the epoch on the broker." + return "kafka server: The leader epoch in the request is newer than the epoch on the broker" case ErrUnsupportedCompressionType: - return "kafka server: The requesting client does not support the compression type of given partition." + return "kafka server: The requesting client does not support the compression type of given partition" case ErrStaleBrokerEpoch: return "kafka server: Broker epoch has changed" case ErrOffsetNotAvailable: @@ -376,9 +436,21 @@ func (err KError) Error() string { case ErrPreferredLeaderNotAvailable: return "kafka server: The preferred leader was not available" case ErrGroupMaxSizeReached: - return "kafka server: Consumer group The consumer group has reached its max size. already has the configured maximum number of members." + return "kafka server: Consumer group The consumer group has reached its max size. already has the configured maximum number of members" case ErrFencedInstancedId: - return "kafka server: The broker rejected this static consumer since another consumer with the same group.instance.id has registered with a different member.id." + return "kafka server: The broker rejected this static consumer since another consumer with the same group.instance.id has registered with a different member.id" + case ErrEligibleLeadersNotAvailable: + return "kafka server: Eligible topic partition leaders are not available" + case ErrElectionNotNeeded: + return "kafka server: Leader election not needed for topic partition" + case ErrNoReassignmentInProgress: + return "kafka server: No partition reassignment is in progress" + case ErrGroupSubscribedToTopic: + return "kafka server: Deleting offsets of a topic is forbidden while the consumer group is actively subscribed to it" + case ErrInvalidRecord: + return "kafka server: This record has failed the validation on broker and hence will be rejected" + case ErrUnstableOffsetCommit: + return "kafka server: There are unstable offsets that need to be cleared" } return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err) diff --git a/vendor/github.com/Shopify/sarama/fetch_request.go b/vendor/github.com/Shopify/sarama/fetch_request.go index f893aeff7..26adead4e 100644 --- a/vendor/github.com/Shopify/sarama/fetch_request.go +++ b/vendor/github.com/Shopify/sarama/fetch_request.go @@ -1,11 +1,18 @@ package sarama type fetchRequestBlock struct { - Version int16 + Version int16 + // currentLeaderEpoch contains the current leader epoch of the partition. currentLeaderEpoch int32 - fetchOffset int64 - logStartOffset int64 - maxBytes int32 + // fetchOffset contains the message offset. + fetchOffset int64 + // logStartOffset contains the earliest available offset of the follower + // replica. The field is only used when the request is sent by the + // follower. + logStartOffset int64 + // maxBytes contains the maximum bytes to fetch from this partition. See + // KIP-74 for cases where this limit may not be honored. + maxBytes int32 } func (b *fetchRequestBlock) encode(pe packetEncoder, version int16) error { @@ -46,16 +53,38 @@ func (b *fetchRequestBlock) decode(pd packetDecoder, version int16) (err error) // https://issues.apache.org/jira/browse/KAFKA-2063 for a discussion of the issues leading up to that. The KIP is at // https://cwiki.apache.org/confluence/display/KAFKA/KIP-74%3A+Add+Fetch+Response+Size+Limit+in+Bytes type FetchRequest struct { - MaxWaitTime int32 - MinBytes int32 - MaxBytes int32 - Version int16 - Isolation IsolationLevel - SessionID int32 + // Version defines the protocol version to use for encode and decode + Version int16 + // ReplicaID contains the broker ID of the follower, of -1 if this request + // is from a consumer. + // ReplicaID int32 + // MaxWaitTime contains the maximum time in milliseconds to wait for the response. + MaxWaitTime int32 + // MinBytes contains the minimum bytes to accumulate in the response. + MinBytes int32 + // MaxBytes contains the maximum bytes to fetch. See KIP-74 for cases + // where this limit may not be honored. + MaxBytes int32 + // Isolation contains a This setting controls the visibility of + // transactional records. Using READ_UNCOMMITTED (isolation_level = 0) + // makes all records visible. With READ_COMMITTED (isolation_level = 1), + // non-transactional and COMMITTED transactional records are visible. To be + // more concrete, READ_COMMITTED returns all data from offsets smaller than + // the current LSO (last stable offset), and enables the inclusion of the + // list of aborted transactions in the result, which allows consumers to + // discard ABORTED transactional records + Isolation IsolationLevel + // SessionID contains the fetch session ID. + SessionID int32 + // SessionEpoch contains the epoch of the partition leader as known to the + // follower replica or a consumer. SessionEpoch int32 - blocks map[string]map[int32]*fetchRequestBlock - forgotten map[string][]int32 - RackID string + // blocks contains the topics to fetch. + blocks map[string]map[int32]*fetchRequestBlock + // forgotten contains in an incremental fetch request, the partitions to remove. + forgotten map[string][]int32 + // RackID contains a Rack ID of the consumer making this request + RackID string } type IsolationLevel int8 @@ -66,7 +95,9 @@ const ( ) func (r *FetchRequest) encode(pe packetEncoder) (err error) { - pe.putInt32(-1) // replica ID is always -1 for clients + metricRegistry := pe.metricRegistry() + + pe.putInt32(-1) // ReplicaID is always -1 for clients pe.putInt32(r.MaxWaitTime) pe.putInt32(r.MinBytes) if r.Version >= 3 { @@ -99,6 +130,7 @@ func (r *FetchRequest) encode(pe packetEncoder) (err error) { return err } } + getOrRegisterTopicMeter("consumer-fetch-rate", topic, metricRegistry).Mark(1) } if r.Version >= 7 { err = pe.putArrayLength(len(r.forgotten)) @@ -270,7 +302,7 @@ func (r *FetchRequest) requiredVersion() KafkaVersion { } } -func (r *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int64, maxBytes int32) { +func (r *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int64, maxBytes int32, leaderEpoch int32) { if r.blocks == nil { r.blocks = make(map[string]map[int32]*fetchRequestBlock) } @@ -288,7 +320,7 @@ func (r *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int tmp.maxBytes = maxBytes tmp.fetchOffset = fetchOffset if r.Version >= 9 { - tmp.currentLeaderEpoch = int32(-1) + tmp.currentLeaderEpoch = leaderEpoch } r.blocks[topic][partitionID] = tmp diff --git a/vendor/github.com/Shopify/sarama/fetch_response.go b/vendor/github.com/Shopify/sarama/fetch_response.go index ca6d78832..3d449c85e 100644 --- a/vendor/github.com/Shopify/sarama/fetch_response.go +++ b/vendor/github.com/Shopify/sarama/fetch_response.go @@ -1,12 +1,22 @@ package sarama import ( + "errors" "sort" "time" + + "github.com/rcrowley/go-metrics" +) + +const ( + invalidLeaderEpoch = -1 + invalidPreferredReplicaID = -1 ) type AbortedTransaction struct { - ProducerID int64 + // ProducerID contains the producer id associated with the aborted transaction. + ProducerID int64 + // FirstOffset contains the first offset in the aborted transaction. FirstOffset int64 } @@ -30,18 +40,37 @@ func (t *AbortedTransaction) encode(pe packetEncoder) (err error) { } type FetchResponseBlock struct { - Err KError - HighWaterMarkOffset int64 - LastStableOffset int64 - LogStartOffset int64 - AbortedTransactions []*AbortedTransaction + // Err contains the error code, or 0 if there was no fetch error. + Err KError + // HighWatermarkOffset contains the current high water mark. + HighWaterMarkOffset int64 + // LastStableOffset contains the last stable offset (or LSO) of the + // partition. This is the last offset such that the state of all + // transactional records prior to this offset have been decided (ABORTED or + // COMMITTED) + LastStableOffset int64 + LastRecordsBatchOffset *int64 + // LogStartOffset contains the current log start offset. + LogStartOffset int64 + // AbortedTransactions contains the aborted transactions. + AbortedTransactions []*AbortedTransaction + // PreferredReadReplica contains the preferred read replica for the + // consumer to use on its next fetch request PreferredReadReplica int32 - Records *Records // deprecated: use FetchResponseBlock.RecordsSet - RecordsSet []*Records - Partial bool + // RecordsSet contains the record data. + RecordsSet []*Records + + Partial bool + Records *Records // deprecated: use FetchResponseBlock.RecordsSet } func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) { + metricRegistry := pd.metricRegistry() + var sizeMetric metrics.Histogram + if metricRegistry != nil { + sizeMetric = getOrRegisterHistogram("consumer-fetch-response-size", metricRegistry) + } + tmp, err := pd.getInt16() if err != nil { return err @@ -89,12 +118,17 @@ func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) if err != nil { return err } + } else { + b.PreferredReadReplica = -1 } recordsSize, err := pd.getInt32() if err != nil { return err } + if sizeMetric != nil { + sizeMetric.Update(int64(recordsSize)) + } recordsDecoder, err := pd.getSubset(int(recordsSize)) if err != nil { @@ -107,7 +141,7 @@ func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) records := &Records{} if err := records.decode(recordsDecoder); err != nil { // If we have at least one decoded records, this is not an error - if err == ErrInsufficientData { + if errors.Is(err, ErrInsufficientData) { if len(b.RecordsSet) == 0 { b.Partial = true } @@ -116,6 +150,11 @@ func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) return err } + b.LastRecordsBatchOffset, err = records.recordsOffset() + if err != nil { + return err + } + partial, err := records.isPartial() if err != nil { return err @@ -222,11 +261,19 @@ func (b *FetchResponseBlock) getAbortedTransactions() []*AbortedTransaction { } type FetchResponse struct { - Blocks map[string]map[int32]*FetchResponseBlock - ThrottleTime time.Duration - ErrorCode int16 - SessionID int32 - Version int16 + // Version defines the protocol version to use for encode and decode + Version int16 + // ThrottleTime contains the duration in milliseconds for which the request + // was throttled due to a quota violation, or zero if the request did not + // violate any quota. + ThrottleTime time.Duration + // ErrorCode contains the top level response error code. + ErrorCode int16 + // SessionID contains the fetch session ID, or 0 if this is not part of a fetch session. + SessionID int32 + // Blocks contains the response topics. + Blocks map[string]map[int32]*FetchResponseBlock + LogAppendTime bool Timestamp time.Time } diff --git a/vendor/github.com/Shopify/sarama/gssapi_kerberos.go b/vendor/github.com/Shopify/sarama/gssapi_kerberos.go index 1993fc08f..ab8b70196 100644 --- a/vendor/github.com/Shopify/sarama/gssapi_kerberos.go +++ b/vendor/github.com/Shopify/sarama/gssapi_kerberos.go @@ -2,18 +2,20 @@ package sarama import ( "encoding/binary" + "errors" "fmt" "io" + "math" "strings" "time" "github.com/jcmturner/gofork/encoding/asn1" - "gopkg.in/jcmturner/gokrb5.v7/asn1tools" - "gopkg.in/jcmturner/gokrb5.v7/gssapi" - "gopkg.in/jcmturner/gokrb5.v7/iana/chksumtype" - "gopkg.in/jcmturner/gokrb5.v7/iana/keyusage" - "gopkg.in/jcmturner/gokrb5.v7/messages" - "gopkg.in/jcmturner/gokrb5.v7/types" + "github.com/jcmturner/gokrb5/v8/asn1tools" + "github.com/jcmturner/gokrb5/v8/gssapi" + "github.com/jcmturner/gokrb5/v8/iana/chksumtype" + "github.com/jcmturner/gokrb5/v8/iana/keyusage" + "github.com/jcmturner/gokrb5/v8/messages" + "github.com/jcmturner/gokrb5/v8/types" ) const ( @@ -53,15 +55,14 @@ type KerberosClient interface { Destroy() } -/* -* -* Appends length in big endian before payload, and send it to kafka -* - */ - +// writePackage appends length in big endian before the payload, and sends it to kafka func (krbAuth *GSSAPIKerberosAuth) writePackage(broker *Broker, payload []byte) (int, error) { - length := len(payload) - finalPackage := make([]byte, length+4) //4 byte length header + payload + length := uint64(len(payload)) + size := length + 4 // 4 byte length header + payload + if size > math.MaxInt32 { + return 0, errors.New("payload too large, will overflow int32") + } + finalPackage := make([]byte, size) copy(finalPackage[4:], payload) binary.BigEndian.PutUint32(finalPackage, uint32(length)) bytes, err := broker.conn.Write(finalPackage) @@ -71,12 +72,7 @@ func (krbAuth *GSSAPIKerberosAuth) writePackage(broker *Broker, payload []byte) return bytes, nil } -/* -* -* Read length (4 bytes) and then read the payload -* - */ - +// readPackage reads payload length (4 bytes) and then reads the payload into []byte func (krbAuth *GSSAPIKerberosAuth) readPackage(broker *Broker) ([]byte, int, error) { bytesRead := 0 lengthInBytes := make([]byte, 4) @@ -154,7 +150,7 @@ func (krbAuth *GSSAPIKerberosAuth) createKrb5Token( * */ func (krbAuth *GSSAPIKerberosAuth) appendGSSAPIHeader(payload []byte) ([]byte, error) { - oidBytes, err := asn1.Marshal(gssapi.OID(gssapi.OIDKRB5)) + oidBytes, err := asn1.Marshal(gssapi.OIDKRB5.OID()) if err != nil { return nil, err } @@ -219,7 +215,6 @@ func (krbAuth *GSSAPIKerberosAuth) Authorize(broker *Broker) error { spn := fmt.Sprintf("%s/%s", broker.conf.Net.SASL.GSSAPI.ServiceName, host) ticket, encKey, err := kerberosClient.GetServiceTicket(spn) - if err != nil { Logger.Printf("Error getting Kerberos service ticket : %s", err) return err diff --git a/vendor/github.com/Shopify/sarama/heartbeat_request.go b/vendor/github.com/Shopify/sarama/heartbeat_request.go index e9d9af191..511910e71 100644 --- a/vendor/github.com/Shopify/sarama/heartbeat_request.go +++ b/vendor/github.com/Shopify/sarama/heartbeat_request.go @@ -1,9 +1,11 @@ package sarama type HeartbeatRequest struct { - GroupId string - GenerationId int32 - MemberId string + Version int16 + GroupId string + GenerationId int32 + MemberId string + GroupInstanceId *string } func (r *HeartbeatRequest) encode(pe packetEncoder) error { @@ -17,10 +19,17 @@ func (r *HeartbeatRequest) encode(pe packetEncoder) error { return err } + if r.Version >= 3 { + if err := pe.putNullableString(r.GroupInstanceId); err != nil { + return err + } + } + return nil } func (r *HeartbeatRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version if r.GroupId, err = pd.getString(); err != nil { return } @@ -30,6 +39,11 @@ func (r *HeartbeatRequest) decode(pd packetDecoder, version int16) (err error) { if r.MemberId, err = pd.getString(); err != nil { return } + if r.Version >= 3 { + if r.GroupInstanceId, err = pd.getNullableString(); err != nil { + return + } + } return nil } @@ -39,7 +53,7 @@ func (r *HeartbeatRequest) key() int16 { } func (r *HeartbeatRequest) version() int16 { - return 0 + return r.Version } func (r *HeartbeatRequest) headerVersion() int16 { @@ -47,5 +61,9 @@ func (r *HeartbeatRequest) headerVersion() int16 { } func (r *HeartbeatRequest) requiredVersion() KafkaVersion { + switch { + case r.Version >= 3: + return V2_3_0_0 + } return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/heartbeat_response.go b/vendor/github.com/Shopify/sarama/heartbeat_response.go index 577ab72e5..95ef97f47 100644 --- a/vendor/github.com/Shopify/sarama/heartbeat_response.go +++ b/vendor/github.com/Shopify/sarama/heartbeat_response.go @@ -1,15 +1,27 @@ package sarama type HeartbeatResponse struct { - Err KError + Version int16 + ThrottleTime int32 + Err KError } func (r *HeartbeatResponse) encode(pe packetEncoder) error { + if r.Version >= 1 { + pe.putInt32(r.ThrottleTime) + } pe.putInt16(int16(r.Err)) return nil } func (r *HeartbeatResponse) decode(pd packetDecoder, version int16) error { + var err error + r.Version = version + if r.Version >= 1 { + if r.ThrottleTime, err = pd.getInt32(); err != nil { + return err + } + } kerr, err := pd.getInt16() if err != nil { return err @@ -24,7 +36,7 @@ func (r *HeartbeatResponse) key() int16 { } func (r *HeartbeatResponse) version() int16 { - return 0 + return r.Version } func (r *HeartbeatResponse) headerVersion() int16 { @@ -32,5 +44,9 @@ func (r *HeartbeatResponse) headerVersion() int16 { } func (r *HeartbeatResponse) requiredVersion() KafkaVersion { + switch r.Version { + case 1, 2, 3: + return V2_3_0_0 + } return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/incremental_alter_configs_request.go b/vendor/github.com/Shopify/sarama/incremental_alter_configs_request.go new file mode 100644 index 000000000..c4d05a972 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/incremental_alter_configs_request.go @@ -0,0 +1,173 @@ +package sarama + +type IncrementalAlterConfigsOperation int8 + +const ( + IncrementalAlterConfigsOperationSet IncrementalAlterConfigsOperation = iota + IncrementalAlterConfigsOperationDelete + IncrementalAlterConfigsOperationAppend + IncrementalAlterConfigsOperationSubtract +) + +// IncrementalAlterConfigsRequest is an incremental alter config request type +type IncrementalAlterConfigsRequest struct { + Resources []*IncrementalAlterConfigsResource + ValidateOnly bool +} + +type IncrementalAlterConfigsResource struct { + Type ConfigResourceType + Name string + ConfigEntries map[string]IncrementalAlterConfigsEntry +} + +type IncrementalAlterConfigsEntry struct { + Operation IncrementalAlterConfigsOperation + Value *string +} + +func (a *IncrementalAlterConfigsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(a.Resources)); err != nil { + return err + } + + for _, r := range a.Resources { + if err := r.encode(pe); err != nil { + return err + } + } + + pe.putBool(a.ValidateOnly) + return nil +} + +func (a *IncrementalAlterConfigsRequest) decode(pd packetDecoder, version int16) error { + resourceCount, err := pd.getArrayLength() + if err != nil { + return err + } + + a.Resources = make([]*IncrementalAlterConfigsResource, resourceCount) + for i := range a.Resources { + r := &IncrementalAlterConfigsResource{} + err = r.decode(pd, version) + if err != nil { + return err + } + a.Resources[i] = r + } + + validateOnly, err := pd.getBool() + if err != nil { + return err + } + + a.ValidateOnly = validateOnly + + return nil +} + +func (a *IncrementalAlterConfigsResource) encode(pe packetEncoder) error { + pe.putInt8(int8(a.Type)) + + if err := pe.putString(a.Name); err != nil { + return err + } + + if err := pe.putArrayLength(len(a.ConfigEntries)); err != nil { + return err + } + + for name, e := range a.ConfigEntries { + if err := pe.putString(name); err != nil { + return err + } + + if err := e.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (a *IncrementalAlterConfigsResource) decode(pd packetDecoder, version int16) error { + t, err := pd.getInt8() + if err != nil { + return err + } + a.Type = ConfigResourceType(t) + + name, err := pd.getString() + if err != nil { + return err + } + a.Name = name + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + if n > 0 { + a.ConfigEntries = make(map[string]IncrementalAlterConfigsEntry, n) + for i := 0; i < n; i++ { + name, err := pd.getString() + if err != nil { + return err + } + + var v IncrementalAlterConfigsEntry + + if err := v.decode(pd, version); err != nil { + return err + } + + a.ConfigEntries[name] = v + } + } + return err +} + +func (a *IncrementalAlterConfigsEntry) encode(pe packetEncoder) error { + pe.putInt8(int8(a.Operation)) + + if err := pe.putNullableString(a.Value); err != nil { + return err + } + + return nil +} + +func (a *IncrementalAlterConfigsEntry) decode(pd packetDecoder, version int16) error { + t, err := pd.getInt8() + if err != nil { + return err + } + a.Operation = IncrementalAlterConfigsOperation(t) + + s, err := pd.getNullableString() + if err != nil { + return err + } + + a.Value = s + + return nil +} + +func (a *IncrementalAlterConfigsRequest) key() int16 { + return 44 +} + +func (a *IncrementalAlterConfigsRequest) version() int16 { + return 0 +} + +func (a *IncrementalAlterConfigsRequest) headerVersion() int16 { + return 1 +} + +func (a *IncrementalAlterConfigsRequest) requiredVersion() KafkaVersion { + return V2_3_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/incremental_alter_configs_response.go b/vendor/github.com/Shopify/sarama/incremental_alter_configs_response.go new file mode 100644 index 000000000..3e8c4500c --- /dev/null +++ b/vendor/github.com/Shopify/sarama/incremental_alter_configs_response.go @@ -0,0 +1,66 @@ +package sarama + +import "time" + +// IncrementalAlterConfigsResponse is a response type for incremental alter config +type IncrementalAlterConfigsResponse struct { + ThrottleTime time.Duration + Resources []*AlterConfigsResourceResponse +} + +func (a *IncrementalAlterConfigsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(a.ThrottleTime / time.Millisecond)) + + if err := pe.putArrayLength(len(a.Resources)); err != nil { + return err + } + + for _, v := range a.Resources { + if err := v.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (a *IncrementalAlterConfigsResponse) decode(pd packetDecoder, version int16) error { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + responseCount, err := pd.getArrayLength() + if err != nil { + return err + } + + a.Resources = make([]*AlterConfigsResourceResponse, responseCount) + + for i := range a.Resources { + a.Resources[i] = new(AlterConfigsResourceResponse) + + if err := a.Resources[i].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (a *IncrementalAlterConfigsResponse) key() int16 { + return 44 +} + +func (a *IncrementalAlterConfigsResponse) version() int16 { + return 0 +} + +func (a *IncrementalAlterConfigsResponse) headerVersion() int16 { + return 0 +} + +func (a *IncrementalAlterConfigsResponse) requiredVersion() KafkaVersion { + return V2_3_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_request.go b/vendor/github.com/Shopify/sarama/init_producer_id_request.go index 689444397..33ce5fa41 100644 --- a/vendor/github.com/Shopify/sarama/init_producer_id_request.go +++ b/vendor/github.com/Shopify/sarama/init_producer_id_request.go @@ -3,22 +3,45 @@ package sarama import "time" type InitProducerIDRequest struct { + Version int16 TransactionalID *string TransactionTimeout time.Duration + ProducerID int64 + ProducerEpoch int16 } func (i *InitProducerIDRequest) encode(pe packetEncoder) error { - if err := pe.putNullableString(i.TransactionalID); err != nil { - return err + if i.Version < 2 { + if err := pe.putNullableString(i.TransactionalID); err != nil { + return err + } + } else { + if err := pe.putNullableCompactString(i.TransactionalID); err != nil { + return err + } } pe.putInt32(int32(i.TransactionTimeout / time.Millisecond)) + if i.Version >= 3 { + pe.putInt64(i.ProducerID) + pe.putInt16(i.ProducerEpoch) + } + if i.Version >= 2 { + pe.putEmptyTaggedFieldArray() + } return nil } func (i *InitProducerIDRequest) decode(pd packetDecoder, version int16) (err error) { - if i.TransactionalID, err = pd.getNullableString(); err != nil { - return err + i.Version = version + if i.Version < 2 { + if i.TransactionalID, err = pd.getNullableString(); err != nil { + return err + } + } else { + if i.TransactionalID, err = pd.getCompactNullableString(); err != nil { + return err + } } timeout, err := pd.getInt32() @@ -26,6 +49,21 @@ func (i *InitProducerIDRequest) decode(pd packetDecoder, version int16) (err err return err } i.TransactionTimeout = time.Duration(timeout) * time.Millisecond + if i.Version >= 3 { + if i.ProducerID, err = pd.getInt64(); err != nil { + return err + } + + if i.ProducerEpoch, err = pd.getInt16(); err != nil { + return err + } + } + + if i.Version >= 2 { + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } return nil } @@ -35,13 +73,30 @@ func (i *InitProducerIDRequest) key() int16 { } func (i *InitProducerIDRequest) version() int16 { - return 0 + return i.Version } func (i *InitProducerIDRequest) headerVersion() int16 { + if i.Version >= 2 { + return 2 + } + return 1 } func (i *InitProducerIDRequest) requiredVersion() KafkaVersion { - return V0_11_0_0 + switch i.Version { + case 2: + // Added tagged fields + return V2_4_0_0 + case 3: + // Added ProducerID/Epoch + return V2_5_0_0 + case 0: + fallthrough + case 1: + fallthrough + default: + return V0_11_0_0 + } } diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_response.go b/vendor/github.com/Shopify/sarama/init_producer_id_response.go index 3e1242bf6..006070189 100644 --- a/vendor/github.com/Shopify/sarama/init_producer_id_response.go +++ b/vendor/github.com/Shopify/sarama/init_producer_id_response.go @@ -5,6 +5,7 @@ import "time" type InitProducerIDResponse struct { ThrottleTime time.Duration Err KError + Version int16 ProducerID int64 ProducerEpoch int16 } @@ -15,10 +16,15 @@ func (i *InitProducerIDResponse) encode(pe packetEncoder) error { pe.putInt64(i.ProducerID) pe.putInt16(i.ProducerEpoch) + if i.Version >= 2 { + pe.putEmptyTaggedFieldArray() + } + return nil } func (i *InitProducerIDResponse) decode(pd packetDecoder, version int16) (err error) { + i.Version = version throttleTime, err := pd.getInt32() if err != nil { return err @@ -39,6 +45,12 @@ func (i *InitProducerIDResponse) decode(pd packetDecoder, version int16) (err er return err } + if i.Version >= 2 { + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + return nil } @@ -47,13 +59,27 @@ func (i *InitProducerIDResponse) key() int16 { } func (i *InitProducerIDResponse) version() int16 { - return 0 + return i.Version } func (i *InitProducerIDResponse) headerVersion() int16 { + if i.Version >= 2 { + return 1 + } return 0 } func (i *InitProducerIDResponse) requiredVersion() KafkaVersion { - return V0_11_0_0 + switch i.Version { + case 2: + fallthrough + case 3: + return V2_4_0_0 + case 0: + fallthrough + case 1: + fallthrough + default: + return V0_11_0_0 + } } diff --git a/vendor/github.com/Shopify/sarama/join_group_request.go b/vendor/github.com/Shopify/sarama/join_group_request.go index 3734e82e4..432338cd5 100644 --- a/vendor/github.com/Shopify/sarama/join_group_request.go +++ b/vendor/github.com/Shopify/sarama/join_group_request.go @@ -30,6 +30,7 @@ type JoinGroupRequest struct { SessionTimeout int32 RebalanceTimeout int32 MemberId string + GroupInstanceId *string ProtocolType string GroupProtocols map[string][]byte // deprecated; use OrderedGroupProtocols OrderedGroupProtocols []*GroupProtocol @@ -46,6 +47,11 @@ func (r *JoinGroupRequest) encode(pe packetEncoder) error { if err := pe.putString(r.MemberId); err != nil { return err } + if r.Version >= 5 { + if err := pe.putNullableString(r.GroupInstanceId); err != nil { + return err + } + } if err := pe.putString(r.ProtocolType); err != nil { return err } @@ -101,6 +107,12 @@ func (r *JoinGroupRequest) decode(pd packetDecoder, version int16) (err error) { return } + if version >= 5 { + if r.GroupInstanceId, err = pd.getNullableString(); err != nil { + return + } + } + if r.ProtocolType, err = pd.getString(); err != nil { return } @@ -140,7 +152,9 @@ func (r *JoinGroupRequest) headerVersion() int16 { func (r *JoinGroupRequest) requiredVersion() KafkaVersion { switch r.Version { - case 2: + case 4, 5: + return V2_3_0_0 + case 2, 3: return V0_11_0_0 case 1: return V0_10_1_0 diff --git a/vendor/github.com/Shopify/sarama/join_group_response.go b/vendor/github.com/Shopify/sarama/join_group_response.go index 54b0a45c2..d8aa1f002 100644 --- a/vendor/github.com/Shopify/sarama/join_group_response.go +++ b/vendor/github.com/Shopify/sarama/join_group_response.go @@ -8,17 +8,23 @@ type JoinGroupResponse struct { GroupProtocol string LeaderId string MemberId string - Members map[string][]byte + Members []GroupMember +} + +type GroupMember struct { + MemberId string + GroupInstanceId *string + Metadata []byte } func (r *JoinGroupResponse) GetMembers() (map[string]ConsumerGroupMemberMetadata, error) { members := make(map[string]ConsumerGroupMemberMetadata, len(r.Members)) - for id, bin := range r.Members { + for _, member := range r.Members { meta := new(ConsumerGroupMemberMetadata) - if err := decode(bin, meta); err != nil { + if err := decode(member.Metadata, meta, nil); err != nil { return nil, err } - members[id] = *meta + members[member.MemberId] = *meta } return members, nil } @@ -44,12 +50,16 @@ func (r *JoinGroupResponse) encode(pe packetEncoder) error { return err } - for memberId, memberMetadata := range r.Members { - if err := pe.putString(memberId); err != nil { + for _, member := range r.Members { + if err := pe.putString(member.MemberId); err != nil { return err } - - if err := pe.putBytes(memberMetadata); err != nil { + if r.Version >= 5 { + if err := pe.putNullableString(member.GroupInstanceId); err != nil { + return err + } + } + if err := pe.putBytes(member.Metadata); err != nil { return err } } @@ -97,19 +107,27 @@ func (r *JoinGroupResponse) decode(pd packetDecoder, version int16) (err error) return nil } - r.Members = make(map[string][]byte) + r.Members = make([]GroupMember, n) for i := 0; i < n; i++ { memberId, err := pd.getString() if err != nil { return err } + var groupInstanceId *string = nil + if r.Version >= 5 { + groupInstanceId, err = pd.getNullableString() + if err != nil { + return err + } + } + memberMetadata, err := pd.getBytes() if err != nil { return err } - r.Members[memberId] = memberMetadata + r.Members[i] = GroupMember{MemberId: memberId, GroupInstanceId: groupInstanceId, Metadata: memberMetadata} } return nil @@ -129,6 +147,8 @@ func (r *JoinGroupResponse) headerVersion() int16 { func (r *JoinGroupResponse) requiredVersion() KafkaVersion { switch r.Version { + case 3, 4, 5: + return V2_3_0_0 case 2: return V0_11_0_0 case 1: diff --git a/vendor/github.com/Shopify/sarama/kerberos_client.go b/vendor/github.com/Shopify/sarama/kerberos_client.go index ebc114179..01a53193b 100644 --- a/vendor/github.com/Shopify/sarama/kerberos_client.go +++ b/vendor/github.com/Shopify/sarama/kerberos_client.go @@ -1,10 +1,10 @@ package sarama import ( - krb5client "gopkg.in/jcmturner/gokrb5.v7/client" - krb5config "gopkg.in/jcmturner/gokrb5.v7/config" - "gopkg.in/jcmturner/gokrb5.v7/keytab" - "gopkg.in/jcmturner/gokrb5.v7/types" + krb5client "github.com/jcmturner/gokrb5/v8/client" + krb5config "github.com/jcmturner/gokrb5/v8/config" + "github.com/jcmturner/gokrb5/v8/keytab" + "github.com/jcmturner/gokrb5/v8/types" ) type KerberosGoKrb5Client struct { @@ -37,9 +37,9 @@ func createClient(config *GSSAPIConfig, cfg *krb5config.Config) (KerberosClient, if err != nil { return nil, err } - client = krb5client.NewClientWithKeytab(config.Username, config.Realm, kt, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST)) + client = krb5client.NewWithKeytab(config.Username, config.Realm, kt, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST)) } else { - client = krb5client.NewClientWithPassword(config.Username, + client = krb5client.NewWithPassword(config.Username, config.Realm, config.Password, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST)) } return &KerberosGoKrb5Client{*client}, nil diff --git a/vendor/github.com/Shopify/sarama/leave_group_request.go b/vendor/github.com/Shopify/sarama/leave_group_request.go index d7789b68d..741b7290a 100644 --- a/vendor/github.com/Shopify/sarama/leave_group_request.go +++ b/vendor/github.com/Shopify/sarama/leave_group_request.go @@ -1,27 +1,69 @@ package sarama +type MemberIdentity struct { + MemberId string + GroupInstanceId *string +} + type LeaveGroupRequest struct { + Version int16 GroupId string - MemberId string + MemberId string // Removed in Version 3 + Members []MemberIdentity // Added in Version 3 } func (r *LeaveGroupRequest) encode(pe packetEncoder) error { if err := pe.putString(r.GroupId); err != nil { return err } - if err := pe.putString(r.MemberId); err != nil { - return err + if r.Version < 3 { + if err := pe.putString(r.MemberId); err != nil { + return err + } + } + if r.Version >= 3 { + if err := pe.putArrayLength(len(r.Members)); err != nil { + return err + } + for _, member := range r.Members { + if err := pe.putString(member.MemberId); err != nil { + return err + } + if err := pe.putNullableString(member.GroupInstanceId); err != nil { + return err + } + } } return nil } func (r *LeaveGroupRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version if r.GroupId, err = pd.getString(); err != nil { return } - if r.MemberId, err = pd.getString(); err != nil { - return + if r.Version < 3 { + if r.MemberId, err = pd.getString(); err != nil { + return + } + } + if r.Version >= 3 { + memberCount, err := pd.getArrayLength() + if err != nil { + return err + } + r.Members = make([]MemberIdentity, memberCount) + for i := 0; i < memberCount; i++ { + memberIdentity := MemberIdentity{} + if memberIdentity.MemberId, err = pd.getString(); err != nil { + return err + } + if memberIdentity.GroupInstanceId, err = pd.getNullableString(); err != nil { + return err + } + r.Members[i] = memberIdentity + } } return nil @@ -32,7 +74,7 @@ func (r *LeaveGroupRequest) key() int16 { } func (r *LeaveGroupRequest) version() int16 { - return 0 + return r.Version } func (r *LeaveGroupRequest) headerVersion() int16 { @@ -40,5 +82,9 @@ func (r *LeaveGroupRequest) headerVersion() int16 { } func (r *LeaveGroupRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 1, 2, 3: + return V2_3_0_0 + } return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/leave_group_response.go b/vendor/github.com/Shopify/sarama/leave_group_response.go index 25f8d5eb3..18ed357e8 100644 --- a/vendor/github.com/Shopify/sarama/leave_group_response.go +++ b/vendor/github.com/Shopify/sarama/leave_group_response.go @@ -1,21 +1,73 @@ package sarama +type MemberResponse struct { + MemberId string + GroupInstanceId *string + Err KError +} type LeaveGroupResponse struct { - Err KError + Version int16 + ThrottleTime int32 + Err KError + Members []MemberResponse } func (r *LeaveGroupResponse) encode(pe packetEncoder) error { + if r.Version >= 1 { + pe.putInt32(r.ThrottleTime) + } pe.putInt16(int16(r.Err)) + if r.Version >= 3 { + if err := pe.putArrayLength(len(r.Members)); err != nil { + return err + } + for _, member := range r.Members { + if err := pe.putString(member.MemberId); err != nil { + return err + } + if err := pe.putNullableString(member.GroupInstanceId); err != nil { + return err + } + pe.putInt16(int16(member.Err)) + } + } return nil } func (r *LeaveGroupResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + if r.Version >= 1 { + if r.ThrottleTime, err = pd.getInt32(); err != nil { + return err + } + } kerr, err := pd.getInt16() if err != nil { return err } r.Err = KError(kerr) + if r.Version >= 3 { + membersLen, err := pd.getArrayLength() + if err != nil { + return err + } + r.Members = make([]MemberResponse, membersLen) + for i := 0; i < len(r.Members); i++ { + if r.Members[i].MemberId, err = pd.getString(); err != nil { + return err + } + if r.Members[i].GroupInstanceId, err = pd.getNullableString(); err != nil { + return err + } + if memberErr, err := pd.getInt16(); err != nil { + return err + } else { + r.Members[i].Err = KError(memberErr) + } + } + } + return nil } @@ -24,7 +76,7 @@ func (r *LeaveGroupResponse) key() int16 { } func (r *LeaveGroupResponse) version() int16 { - return 0 + return r.Version } func (r *LeaveGroupResponse) headerVersion() int16 { @@ -32,5 +84,9 @@ func (r *LeaveGroupResponse) headerVersion() int16 { } func (r *LeaveGroupResponse) requiredVersion() KafkaVersion { + switch r.Version { + case 1, 2, 3: + return V2_3_0_0 + } return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/list_groups_request.go b/vendor/github.com/Shopify/sarama/list_groups_request.go index ed44cc27e..4553b2d2e 100644 --- a/vendor/github.com/Shopify/sarama/list_groups_request.go +++ b/vendor/github.com/Shopify/sarama/list_groups_request.go @@ -1,7 +1,6 @@ package sarama -type ListGroupsRequest struct { -} +type ListGroupsRequest struct{} func (r *ListGroupsRequest) encode(pe packetEncoder) error { return nil diff --git a/vendor/github.com/Shopify/sarama/message.go b/vendor/github.com/Shopify/sarama/message.go index e48566b37..c6f35a3f5 100644 --- a/vendor/github.com/Shopify/sarama/message.go +++ b/vendor/github.com/Shopify/sarama/message.go @@ -6,15 +6,15 @@ import ( ) const ( - //CompressionNone no compression + // CompressionNone no compression CompressionNone CompressionCodec = iota - //CompressionGZIP compression using GZIP + // CompressionGZIP compression using GZIP CompressionGZIP - //CompressionSnappy compression using snappy + // CompressionSnappy compression using snappy CompressionSnappy - //CompressionLZ4 compression using LZ4 + // CompressionLZ4 compression using LZ4 CompressionLZ4 - //CompressionZSTD compression using ZSTD + // CompressionZSTD compression using ZSTD CompressionZSTD // The lowest 3 bits contain the compression codec used for the message @@ -42,7 +42,29 @@ func (cc CompressionCodec) String() string { }[int(cc)] } -//Message is a kafka message type +// UnmarshalText returns a CompressionCodec from its string representation. +func (cc *CompressionCodec) UnmarshalText(text []byte) error { + codecs := map[string]CompressionCodec{ + "none": CompressionNone, + "gzip": CompressionGZIP, + "snappy": CompressionSnappy, + "lz4": CompressionLZ4, + "zstd": CompressionZSTD, + } + codec, ok := codecs[string(text)] + if !ok { + return fmt.Errorf("cannot parse %q as a compression codec", string(text)) + } + *cc = codec + return nil +} + +// MarshalText transforms a CompressionCodec into its string representation. +func (cc CompressionCodec) MarshalText() ([]byte, error) { + return []byte(cc.String()), nil +} + +// Message is a kafka message type type Message struct { Codec CompressionCodec // codec used to compress the message contents CompressionLevel int // compression level @@ -146,18 +168,12 @@ func (m *Message) decode(pd packetDecoder) (err error) { // for future metrics about the compression ratio in fetch requests m.compressedSize = len(m.Value) - switch m.Codec { - case CompressionNone: - // nothing to do - default: - if m.Value == nil { - break - } - + if m.Value != nil && m.Codec != CompressionNone { m.Value, err = decompress(m.Codec, m.Value) if err != nil { return err } + if err := m.decodeSet(); err != nil { return err } diff --git a/vendor/github.com/Shopify/sarama/message_set.go b/vendor/github.com/Shopify/sarama/message_set.go index 6523ec2f7..5bc211294 100644 --- a/vendor/github.com/Shopify/sarama/message_set.go +++ b/vendor/github.com/Shopify/sarama/message_set.go @@ -1,5 +1,7 @@ package sarama +import "errors" + type MessageBlock struct { Offset int64 Msg *Message @@ -70,7 +72,7 @@ func (ms *MessageSet) decode(pd packetDecoder) (err error) { for pd.remaining() > 0 { magic, err := magicValue(pd) if err != nil { - if err == ErrInsufficientData { + if errors.Is(err, ErrInsufficientData) { ms.PartialTrailingMessage = true return nil } @@ -83,10 +85,9 @@ func (ms *MessageSet) decode(pd packetDecoder) (err error) { msb := new(MessageBlock) err = msb.decode(pd) - switch err { - case nil: + if err == nil { ms.Messages = append(ms.Messages, msb) - case ErrInsufficientData: + } else if errors.Is(err, ErrInsufficientData) { // As an optimization the server is allowed to return a partial message at the // end of the message set. Clients should handle this case. So we just ignore such things. if msb.Offset == -1 { @@ -96,7 +97,7 @@ func (ms *MessageSet) decode(pd packetDecoder) (err error) { ms.PartialTrailingMessage = true } return nil - default: + } else { return err } } diff --git a/vendor/github.com/Shopify/sarama/metadata_request.go b/vendor/github.com/Shopify/sarama/metadata_request.go index e835f5a9c..a1b6ac09c 100644 --- a/vendor/github.com/Shopify/sarama/metadata_request.go +++ b/vendor/github.com/Shopify/sarama/metadata_request.go @@ -1,13 +1,30 @@ package sarama type MetadataRequest struct { - Version int16 - Topics []string + // Version defines the protocol version to use for encode and decode + Version int16 + // Topics contains the topics to fetch metadata for. + Topics []string + // AllowAutoTopicCreation contains a If this is true, the broker may auto-create topics that we requested which do not already exist, if it is configured to do so. AllowAutoTopicCreation bool } -func (r *MetadataRequest) encode(pe packetEncoder) error { - if r.Version < 0 || r.Version > 5 { +func NewMetadataRequest(version KafkaVersion, topics []string) *MetadataRequest { + m := &MetadataRequest{Topics: topics} + if version.IsAtLeast(V2_1_0_0) { + m.Version = 7 + } else if version.IsAtLeast(V2_0_0_0) { + m.Version = 6 + } else if version.IsAtLeast(V1_0_0_0) { + m.Version = 5 + } else if version.IsAtLeast(V0_10_0_0) { + m.Version = 1 + } + return m +} + +func (r *MetadataRequest) encode(pe packetEncoder) (err error) { + if r.Version < 0 || r.Version > 12 { return PacketEncodingError{"invalid or unsupported MetadataRequest version field"} } if r.Version == 0 || len(r.Topics) > 0 { @@ -25,13 +42,15 @@ func (r *MetadataRequest) encode(pe packetEncoder) error { } else { pe.putInt32(-1) } - if r.Version > 3 { + + if r.Version >= 4 { pe.putBool(r.AllowAutoTopicCreation) } + return nil } -func (r *MetadataRequest) decode(pd packetDecoder, version int16) error { +func (r *MetadataRequest) decode(pd packetDecoder, version int16) (err error) { r.Version = version size, err := pd.getInt32() if err != nil { @@ -47,13 +66,13 @@ func (r *MetadataRequest) decode(pd packetDecoder, version int16) error { r.Topics[i] = topic } } - if r.Version > 3 { - autoCreation, err := pd.getBool() - if err != nil { + + if r.Version >= 4 { + if r.AllowAutoTopicCreation, err = pd.getBool(); err != nil { return err } - r.AllowAutoTopicCreation = autoCreation } + return nil } @@ -79,6 +98,10 @@ func (r *MetadataRequest) requiredVersion() KafkaVersion { return V0_11_0_0 case 5: return V1_0_0_0 + case 6: + return V2_0_0_0 + case 7: + return V2_1_0_0 default: return MinVersion } diff --git a/vendor/github.com/Shopify/sarama/metadata_response.go b/vendor/github.com/Shopify/sarama/metadata_response.go index 0bb8702cc..10a56877d 100644 --- a/vendor/github.com/Shopify/sarama/metadata_response.go +++ b/vendor/github.com/Shopify/sarama/metadata_response.go @@ -1,44 +1,57 @@ package sarama +// PartitionMetadata contains each partition in the topic. type PartitionMetadata struct { - Err KError - ID int32 - Leader int32 - Replicas []int32 - Isr []int32 + // Version defines the protocol version to use for encode and decode + Version int16 + // Err contains the partition error, or 0 if there was no error. + Err KError + // ID contains the partition index. + ID int32 + // Leader contains the ID of the leader broker. + Leader int32 + // LeaderEpoch contains the leader epoch of this partition. + LeaderEpoch int32 + // Replicas contains the set of all nodes that host this partition. + Replicas []int32 + // Isr contains the set of nodes that are in sync with the leader for this partition. + Isr []int32 + // OfflineReplicas contains the set of offline replicas of this partition. OfflineReplicas []int32 } -func (pm *PartitionMetadata) decode(pd packetDecoder, version int16) (err error) { +func (p *PartitionMetadata) decode(pd packetDecoder, version int16) (err error) { + p.Version = version tmp, err := pd.getInt16() if err != nil { return err } - pm.Err = KError(tmp) + p.Err = KError(tmp) - pm.ID, err = pd.getInt32() - if err != nil { + if p.ID, err = pd.getInt32(); err != nil { return err } - pm.Leader, err = pd.getInt32() - if err != nil { + if p.Leader, err = pd.getInt32(); err != nil { return err } - pm.Replicas, err = pd.getInt32Array() - if err != nil { + if p.Version >= 7 { + if p.LeaderEpoch, err = pd.getInt32(); err != nil { + return err + } + } + + if p.Replicas, err = pd.getInt32Array(); err != nil { return err } - pm.Isr, err = pd.getInt32Array() - if err != nil { + if p.Isr, err = pd.getInt32Array(); err != nil { return err } - if version >= 5 { - pm.OfflineReplicas, err = pd.getInt32Array() - if err != nil { + if p.Version >= 5 { + if p.OfflineReplicas, err = pd.getInt32Array(); err != nil { return err } } @@ -46,24 +59,28 @@ func (pm *PartitionMetadata) decode(pd packetDecoder, version int16) (err error) return nil } -func (pm *PartitionMetadata) encode(pe packetEncoder, version int16) (err error) { - pe.putInt16(int16(pm.Err)) - pe.putInt32(pm.ID) - pe.putInt32(pm.Leader) +func (p *PartitionMetadata) encode(pe packetEncoder, version int16) (err error) { + p.Version = version + pe.putInt16(int16(p.Err)) - err = pe.putInt32Array(pm.Replicas) - if err != nil { + pe.putInt32(p.ID) + + pe.putInt32(p.Leader) + + if p.Version >= 7 { + pe.putInt32(p.LeaderEpoch) + } + + if err := pe.putInt32Array(p.Replicas); err != nil { return err } - err = pe.putInt32Array(pm.Isr) - if err != nil { + if err := pe.putInt32Array(p.Isr); err != nil { return err } - if version >= 5 { - err = pe.putInt32Array(pm.OfflineReplicas) - if err != nil { + if p.Version >= 5 { + if err := pe.putInt32Array(p.OfflineReplicas); err != nil { return err } } @@ -71,68 +88,71 @@ func (pm *PartitionMetadata) encode(pe packetEncoder, version int16) (err error) return nil } +// TopicMetadata contains each topic in the response. type TopicMetadata struct { - Err KError - Name string - IsInternal bool // Only valid for Version >= 1 + // Version defines the protocol version to use for encode and decode + Version int16 + // Err contains the topic error, or 0 if there was no error. + Err KError + // Name contains the topic name. + Name string + // IsInternal contains a True if the topic is internal. + IsInternal bool + // Partitions contains each partition in the topic. Partitions []*PartitionMetadata } -func (tm *TopicMetadata) decode(pd packetDecoder, version int16) (err error) { +func (t *TopicMetadata) decode(pd packetDecoder, version int16) (err error) { + t.Version = version tmp, err := pd.getInt16() if err != nil { return err } - tm.Err = KError(tmp) + t.Err = KError(tmp) - tm.Name, err = pd.getString() - if err != nil { + if t.Name, err = pd.getString(); err != nil { return err } - if version >= 1 { - tm.IsInternal, err = pd.getBool() - if err != nil { + if t.Version >= 1 { + if t.IsInternal, err = pd.getBool(); err != nil { return err } } - n, err := pd.getArrayLength() - if err != nil { + if numPartitions, err := pd.getArrayLength(); err != nil { return err - } - tm.Partitions = make([]*PartitionMetadata, n) - for i := 0; i < n; i++ { - tm.Partitions[i] = new(PartitionMetadata) - err = tm.Partitions[i].decode(pd, version) - if err != nil { - return err + } else { + t.Partitions = make([]*PartitionMetadata, numPartitions) + for i := 0; i < numPartitions; i++ { + block := &PartitionMetadata{} + if err := block.decode(pd, t.Version); err != nil { + return err + } + t.Partitions[i] = block } } return nil } -func (tm *TopicMetadata) encode(pe packetEncoder, version int16) (err error) { - pe.putInt16(int16(tm.Err)) +func (t *TopicMetadata) encode(pe packetEncoder, version int16) (err error) { + t.Version = version + pe.putInt16(int16(t.Err)) - err = pe.putString(tm.Name) - if err != nil { + if err := pe.putString(t.Name); err != nil { return err } - if version >= 1 { - pe.putBool(tm.IsInternal) + if t.Version >= 1 { + pe.putBool(t.IsInternal) } - err = pe.putArrayLength(len(tm.Partitions)) - if err != nil { + if err := pe.putArrayLength(len(t.Partitions)); err != nil { return err } - - for _, pm := range tm.Partitions { - err = pm.encode(pe, version) - if err != nil { + for _, block := range t.Partitions { + if err := block.encode(pe, t.Version); err != nil { return err } } @@ -141,20 +161,24 @@ func (tm *TopicMetadata) encode(pe packetEncoder, version int16) (err error) { } type MetadataResponse struct { - Version int16 + // Version defines the protocol version to use for encode and decode + Version int16 + // ThrottleTimeMs contains the duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota. ThrottleTimeMs int32 - Brokers []*Broker - ClusterID *string - ControllerID int32 - Topics []*TopicMetadata + // Brokers contains each broker in the response. + Brokers []*Broker + // ClusterID contains the cluster ID that responding broker belongs to. + ClusterID *string + // ControllerID contains the ID of the controller broker. + ControllerID int32 + // Topics contains each topic in the response. + Topics []*TopicMetadata } func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) { r.Version = version - - if version >= 3 { - r.ThrottleTimeMs, err = pd.getInt32() - if err != nil { + if r.Version >= 3 { + if r.ThrottleTimeMs, err = pd.getInt32(); err != nil { return err } } @@ -163,7 +187,6 @@ func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) { if err != nil { return err } - r.Brokers = make([]*Broker, n) for i := 0; i < n; i++ { r.Brokers[i] = new(Broker) @@ -173,46 +196,40 @@ func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) { } } - if version >= 2 { - r.ClusterID, err = pd.getNullableString() - if err != nil { + if r.Version >= 2 { + if r.ClusterID, err = pd.getNullableString(); err != nil { return err } } - if version >= 1 { - r.ControllerID, err = pd.getInt32() - if err != nil { + if r.Version >= 1 { + if r.ControllerID, err = pd.getInt32(); err != nil { return err } - } else { - r.ControllerID = -1 } - n, err = pd.getArrayLength() - if err != nil { + if numTopics, err := pd.getArrayLength(); err != nil { return err - } - - r.Topics = make([]*TopicMetadata, n) - for i := 0; i < n; i++ { - r.Topics[i] = new(TopicMetadata) - err = r.Topics[i].decode(pd, version) - if err != nil { - return err + } else { + r.Topics = make([]*TopicMetadata, numTopics) + for i := 0; i < numTopics; i++ { + block := &TopicMetadata{} + if err := block.decode(pd, r.Version); err != nil { + return err + } + r.Topics[i] = block } } return nil } -func (r *MetadataResponse) encode(pe packetEncoder) error { +func (r *MetadataResponse) encode(pe packetEncoder) (err error) { if r.Version >= 3 { pe.putInt32(r.ThrottleTimeMs) } - err := pe.putArrayLength(len(r.Brokers)) - if err != nil { + if err := pe.putArrayLength(len(r.Brokers)); err != nil { return err } for _, broker := range r.Brokers { @@ -223,8 +240,7 @@ func (r *MetadataResponse) encode(pe packetEncoder) error { } if r.Version >= 2 { - err := pe.putNullableString(r.ClusterID) - if err != nil { + if err := pe.putNullableString(r.ClusterID); err != nil { return err } } @@ -233,13 +249,11 @@ func (r *MetadataResponse) encode(pe packetEncoder) error { pe.putInt32(r.ControllerID) } - err = pe.putArrayLength(len(r.Topics)) - if err != nil { + if err := pe.putArrayLength(len(r.Topics)); err != nil { return err } - for _, tm := range r.Topics { - err = tm.encode(pe, r.Version) - if err != nil { + for _, block := range r.Topics { + if err := block.encode(pe, r.Version); err != nil { return err } } @@ -269,6 +283,10 @@ func (r *MetadataResponse) requiredVersion() KafkaVersion { return V0_11_0_0 case 5: return V1_0_0_0 + case 6: + return V2_0_0_0 + case 7: + return V2_1_0_0 default: return MinVersion } @@ -316,7 +334,6 @@ func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID i tmatch.Partitions = append(tmatch.Partitions, pmatch) foundPartition: - pmatch.Leader = brokerID pmatch.Replicas = replicas pmatch.Isr = isr diff --git a/vendor/github.com/Shopify/sarama/metrics.go b/vendor/github.com/Shopify/sarama/metrics.go index 90e5a87f4..7b7705f2e 100644 --- a/vendor/github.com/Shopify/sarama/metrics.go +++ b/vendor/github.com/Shopify/sarama/metrics.go @@ -3,6 +3,7 @@ package sarama import ( "fmt" "strings" + "sync" "github.com/rcrowley/go-metrics" ) @@ -41,3 +42,79 @@ func getOrRegisterTopicMeter(name string, topic string, r metrics.Registry) metr func getOrRegisterTopicHistogram(name string, topic string, r metrics.Registry) metrics.Histogram { return getOrRegisterHistogram(getMetricNameForTopic(name, topic), r) } + +// cleanupRegistry is an implementation of metrics.Registry that allows +// to unregister from the parent registry only those metrics +// that have been registered in cleanupRegistry +type cleanupRegistry struct { + parent metrics.Registry + metrics map[string]struct{} + mutex sync.RWMutex +} + +func newCleanupRegistry(parent metrics.Registry) metrics.Registry { + return &cleanupRegistry{ + parent: parent, + metrics: map[string]struct{}{}, + } +} + +func (r *cleanupRegistry) Each(fn func(string, interface{})) { + r.mutex.RLock() + defer r.mutex.RUnlock() + wrappedFn := func(name string, iface interface{}) { + if _, ok := r.metrics[name]; ok { + fn(name, iface) + } + } + r.parent.Each(wrappedFn) +} + +func (r *cleanupRegistry) Get(name string) interface{} { + r.mutex.RLock() + defer r.mutex.RUnlock() + if _, ok := r.metrics[name]; ok { + return r.parent.Get(name) + } + return nil +} + +func (r *cleanupRegistry) GetOrRegister(name string, metric interface{}) interface{} { + r.mutex.Lock() + defer r.mutex.Unlock() + r.metrics[name] = struct{}{} + return r.parent.GetOrRegister(name, metric) +} + +func (r *cleanupRegistry) Register(name string, metric interface{}) error { + r.mutex.Lock() + defer r.mutex.Unlock() + r.metrics[name] = struct{}{} + return r.parent.Register(name, metric) +} + +func (r *cleanupRegistry) RunHealthchecks() { + r.parent.RunHealthchecks() +} + +func (r *cleanupRegistry) GetAll() map[string]map[string]interface{} { + return r.parent.GetAll() +} + +func (r *cleanupRegistry) Unregister(name string) { + r.mutex.Lock() + defer r.mutex.Unlock() + if _, ok := r.metrics[name]; ok { + delete(r.metrics, name) + r.parent.Unregister(name) + } +} + +func (r *cleanupRegistry) UnregisterAll() { + r.mutex.Lock() + defer r.mutex.Unlock() + for name := range r.metrics { + delete(r.metrics, name) + r.parent.Unregister(name) + } +} diff --git a/vendor/github.com/Shopify/sarama/mockbroker.go b/vendor/github.com/Shopify/sarama/mockbroker.go index ff5a68ae7..628c3cb90 100644 --- a/vendor/github.com/Shopify/sarama/mockbroker.go +++ b/vendor/github.com/Shopify/sarama/mockbroker.go @@ -3,6 +3,7 @@ package sarama import ( "bytes" "encoding/binary" + "errors" "fmt" "io" "net" @@ -30,9 +31,9 @@ type RequestNotifierFunc func(bytesRead, bytesWritten int) // to facilitate testing of higher level or specialized consumers and producers // built on top of Sarama. Note that it does not 'mimic' the Kafka API protocol, // but rather provides a facility to do that. It takes care of the TCP -// transport, request unmarshaling, response marshaling, and makes it the test +// transport, request unmarshalling, response marshaling, and makes it the test // writer responsibility to program correct according to the Kafka API protocol -// MockBroker behaviour. +// MockBroker behavior. // // MockBroker is implemented as a TCP server listening on a kernel-selected // localhost port that can accept many connections. It reads Kafka requests @@ -83,9 +84,13 @@ func (b *MockBroker) SetLatency(latency time.Duration) { // and uses the found MockResponse instance to generate an appropriate reply. // If the request type is not found in the map then nothing is sent. func (b *MockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) { + fnMap := make(map[string]MockResponse) + for k, v := range handlerMap { + fnMap[k] = v + } b.setHandler(func(req *request) (res encoderWithHeader) { reqTypeName := reflect.TypeOf(req.body).Elem().Name() - mockResponse := handlerMap[reqTypeName] + mockResponse := fnMap[reqTypeName] if mockResponse == nil { return nil } @@ -218,6 +223,8 @@ func (b *MockBroker) handleRequests(conn io.ReadWriteCloser, idx int, wg *sync.W defer func() { _ = conn.Close() }() + s := spew.NewDefaultConfig() + s.MaxDepth = 1 Logger.Printf("*** mockbroker/%d/%d: connection opened", b.BrokerID(), idx) var err error @@ -264,7 +271,12 @@ func (b *MockBroker) handleRequests(conn io.ReadWriteCloser, idx int, wg *sync.W Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(req)) continue } - Logger.Printf("*** mockbroker/%d/%d: served %v -> %v", b.brokerID, idx, req, res) + Logger.Printf( + "*** mockbroker/%d/%d: replied to %T with %T\n-> %s\n-> %s", + b.brokerID, idx, req.body, res, + s.Sprintf("%#v", req.body), + s.Sprintf("%#v", res), + ) encodedRes, err := encode(res, nil) if err != nil { @@ -348,9 +360,10 @@ func (b *MockBroker) defaultRequestHandler(req *request) (res encoderWithHeader) func (b *MockBroker) serverError(err error) { isConnectionClosedError := false - if _, ok := err.(*net.OpError); ok { + opError := &net.OpError{} + if errors.As(err, &opError) { isConnectionClosedError = true - } else if err == io.EOF { + } else if errors.Is(err, io.EOF) { isConnectionClosedError = true } else if err.Error() == "use of closed network connection" { isConnectionClosedError = true diff --git a/vendor/github.com/Shopify/sarama/mockkerberos.go b/vendor/github.com/Shopify/sarama/mockkerberos.go index d36649d8a..a43607e1c 100644 --- a/vendor/github.com/Shopify/sarama/mockkerberos.go +++ b/vendor/github.com/Shopify/sarama/mockkerberos.go @@ -4,11 +4,11 @@ import ( "encoding/binary" "encoding/hex" - "gopkg.in/jcmturner/gokrb5.v7/credentials" - "gopkg.in/jcmturner/gokrb5.v7/gssapi" - "gopkg.in/jcmturner/gokrb5.v7/iana/keyusage" - "gopkg.in/jcmturner/gokrb5.v7/messages" - "gopkg.in/jcmturner/gokrb5.v7/types" + "github.com/jcmturner/gokrb5/v8/credentials" + "github.com/jcmturner/gokrb5/v8/gssapi" + "github.com/jcmturner/gokrb5/v8/iana/keyusage" + "github.com/jcmturner/gokrb5/v8/messages" + "github.com/jcmturner/gokrb5/v8/types" ) type KafkaGSSAPIHandler struct { @@ -27,7 +27,7 @@ func (h *KafkaGSSAPIHandler) MockKafkaGSSAPI(buffer []byte) []byte { return []byte{0x00, 0x00, 0x00, 0x01, 0xAD} } - var pack = gssapi.WrapToken{ + pack := gssapi.WrapToken{ Flags: KRB5_USER_AUTH, EC: 12, RRC: 0, @@ -108,8 +108,9 @@ func (c *MockKerberosClient) GetServiceTicket(spn string) (messages.Ticket, type func (c *MockKerberosClient) Domain() string { return "EXAMPLE.COM" } + func (c *MockKerberosClient) CName() types.PrincipalName { - var p = types.PrincipalName{ + p := types.PrincipalName{ NameType: KRB5_USER_AUTH, NameString: []string{ "kafka", @@ -118,6 +119,7 @@ func (c *MockKerberosClient) CName() types.PrincipalName { } return p } + func (c *MockKerberosClient) Destroy() { // Do nothing. } diff --git a/vendor/github.com/Shopify/sarama/mockresponses.go b/vendor/github.com/Shopify/sarama/mockresponses.go index be8af8545..15b4367f9 100644 --- a/vendor/github.com/Shopify/sarama/mockresponses.go +++ b/vendor/github.com/Shopify/sarama/mockresponses.go @@ -3,6 +3,7 @@ package sarama import ( "fmt" "strings" + "sync" ) // TestReporter has methods matching go's testing.T to avoid importing @@ -203,7 +204,6 @@ func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoderWithHeader type MockOffsetResponse struct { offsets map[string]map[int32]map[int64]int64 t TestReporter - version int16 } func NewMockOffsetResponse(t TestReporter) *MockOffsetResponse { @@ -213,11 +213,6 @@ func NewMockOffsetResponse(t TestReporter) *MockOffsetResponse { } } -func (mor *MockOffsetResponse) SetVersion(version int16) *MockOffsetResponse { - mor.version = version - return mor -} - func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, offset int64) *MockOffsetResponse { partitions := mor.offsets[topic] if partitions == nil { @@ -235,7 +230,7 @@ func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, of func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoderWithHeader { offsetRequest := reqBody.(*OffsetRequest) - offsetResponse := &OffsetResponse{Version: mor.version} + offsetResponse := &OffsetResponse{Version: offsetRequest.Version} for topic, partitions := range offsetRequest.blocks { for partition, block := range partitions { offset := mor.getOffset(topic, partition, block.time) @@ -261,41 +256,56 @@ func (mor *MockOffsetResponse) getOffset(topic string, partition int32, time int return offset } +// mockMessage is a message that used to be mocked for `FetchResponse` +type mockMessage struct { + key Encoder + msg Encoder +} + +func newMockMessage(key, msg Encoder) *mockMessage { + return &mockMessage{ + key: key, + msg: msg, + } +} + // MockFetchResponse is a `FetchResponse` builder. type MockFetchResponse struct { - messages map[string]map[int32]map[int64]Encoder + messages map[string]map[int32]map[int64]*mockMessage + messagesLock *sync.RWMutex highWaterMarks map[string]map[int32]int64 t TestReporter batchSize int - version int16 } func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse { return &MockFetchResponse{ - messages: make(map[string]map[int32]map[int64]Encoder), + messages: make(map[string]map[int32]map[int64]*mockMessage), + messagesLock: &sync.RWMutex{}, highWaterMarks: make(map[string]map[int32]int64), t: t, batchSize: batchSize, } } -func (mfr *MockFetchResponse) SetVersion(version int16) *MockFetchResponse { - mfr.version = version - return mfr +func (mfr *MockFetchResponse) SetMessage(topic string, partition int32, offset int64, msg Encoder) *MockFetchResponse { + return mfr.SetMessageWithKey(topic, partition, offset, nil, msg) } -func (mfr *MockFetchResponse) SetMessage(topic string, partition int32, offset int64, msg Encoder) *MockFetchResponse { +func (mfr *MockFetchResponse) SetMessageWithKey(topic string, partition int32, offset int64, key, msg Encoder) *MockFetchResponse { + mfr.messagesLock.Lock() + defer mfr.messagesLock.Unlock() partitions := mfr.messages[topic] if partitions == nil { - partitions = make(map[int32]map[int64]Encoder) + partitions = make(map[int32]map[int64]*mockMessage) mfr.messages[topic] = partitions } messages := partitions[partition] if messages == nil { - messages = make(map[int64]Encoder) + messages = make(map[int64]*mockMessage) partitions[partition] = messages } - messages[offset] = msg + messages[offset] = newMockMessage(key, msg) return mfr } @@ -312,7 +322,7 @@ func (mfr *MockFetchResponse) SetHighWaterMark(topic string, partition int32, of func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoderWithHeader { fetchRequest := reqBody.(*FetchRequest) res := &FetchResponse{ - Version: mfr.version, + Version: fetchRequest.Version, } for topic, partitions := range fetchRequest.blocks { for partition, block := range partitions { @@ -322,7 +332,7 @@ func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoderWithHeader { for i := 0; i < mfr.batchSize && offset < maxOffset; { msg := mfr.getMessage(topic, partition, offset) if msg != nil { - res.AddMessage(topic, partition, nil, msg, offset) + res.AddMessage(topic, partition, msg.key, msg.msg, offset) i++ } offset++ @@ -338,7 +348,9 @@ func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoderWithHeader { return res } -func (mfr *MockFetchResponse) getMessage(topic string, partition int32, offset int64) Encoder { +func (mfr *MockFetchResponse) getMessage(topic string, partition int32, offset int64) *mockMessage { + mfr.messagesLock.RLock() + defer mfr.messagesLock.RUnlock() partitions := mfr.messages[topic] if partitions == nil { return nil @@ -351,6 +363,8 @@ func (mfr *MockFetchResponse) getMessage(topic string, partition int32, offset i } func (mfr *MockFetchResponse) getMessageCount(topic string, partition int32) int { + mfr.messagesLock.RLock() + defer mfr.messagesLock.RUnlock() partitions := mfr.messages[topic] if partitions == nil { return 0 @@ -445,6 +459,7 @@ func (mr *MockFindCoordinatorResponse) SetError(coordinatorType CoordinatorType, func (mr *MockFindCoordinatorResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*FindCoordinatorRequest) res := &FindCoordinatorResponse{} + res.Version = req.Version var v interface{} switch req.CoordinatorType { case CoordinatorGroup: @@ -805,7 +820,8 @@ func (mr *MockDescribeConfigsResponse) For(reqBody versionedDecoder) encoderWith Configs: configEntries, }) case TopicResource: - maxMessageBytes := &ConfigEntry{Name: "max.message.bytes", + maxMessageBytes := &ConfigEntry{ + Name: "max.message.bytes", Value: "1000000", ReadOnly: false, Default: !includeSource, @@ -822,7 +838,8 @@ func (mr *MockDescribeConfigsResponse) For(reqBody versionedDecoder) encoderWith }, } } - retentionMs := &ConfigEntry{Name: "retention.ms", + retentionMs := &ConfigEntry{ + Name: "retention.ms", Value: "5000", ReadOnly: false, Default: false, @@ -836,7 +853,8 @@ func (mr *MockDescribeConfigsResponse) For(reqBody versionedDecoder) encoderWith }, } } - password := &ConfigEntry{Name: "password", + password := &ConfigEntry{ + Name: "password", Value: "12345", ReadOnly: false, Default: false, @@ -891,7 +909,8 @@ func (mr *MockAlterConfigsResponse) For(reqBody versionedDecoder) encoderWithHea res := &AlterConfigsResponse{} for _, r := range req.Resources { - res.Resources = append(res.Resources, &AlterConfigsResourceResponse{Name: r.Name, + res.Resources = append(res.Resources, &AlterConfigsResourceResponse{ + Name: r.Name, Type: r.Type, ErrorMsg: "", }) @@ -922,6 +941,51 @@ func (mr *MockAlterConfigsResponseWithErrorCode) For(reqBody versionedDecoder) e return res } +type MockIncrementalAlterConfigsResponse struct { + t TestReporter +} + +func NewMockIncrementalAlterConfigsResponse(t TestReporter) *MockIncrementalAlterConfigsResponse { + return &MockIncrementalAlterConfigsResponse{t: t} +} + +func (mr *MockIncrementalAlterConfigsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*IncrementalAlterConfigsRequest) + res := &IncrementalAlterConfigsResponse{} + + for _, r := range req.Resources { + res.Resources = append(res.Resources, &AlterConfigsResourceResponse{ + Name: r.Name, + Type: r.Type, + ErrorMsg: "", + }) + } + return res +} + +type MockIncrementalAlterConfigsResponseWithErrorCode struct { + t TestReporter +} + +func NewMockIncrementalAlterConfigsResponseWithErrorCode(t TestReporter) *MockIncrementalAlterConfigsResponseWithErrorCode { + return &MockIncrementalAlterConfigsResponseWithErrorCode{t: t} +} + +func (mr *MockIncrementalAlterConfigsResponseWithErrorCode) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*IncrementalAlterConfigsRequest) + res := &IncrementalAlterConfigsResponse{} + + for _, r := range req.Resources { + res.Resources = append(res.Resources, &AlterConfigsResourceResponse{ + Name: r.Name, + Type: r.Type, + ErrorCode: 83, + ErrorMsg: "", + }) + } + return res +} + type MockCreateAclsResponse struct { t TestReporter } @@ -940,6 +1004,24 @@ func (mr *MockCreateAclsResponse) For(reqBody versionedDecoder) encoderWithHeade return res } +type MockCreateAclsResponseError struct { + t TestReporter +} + +func NewMockCreateAclsResponseWithError(t TestReporter) *MockCreateAclsResponseError { + return &MockCreateAclsResponseError{t: t} +} + +func (mr *MockCreateAclsResponseError) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*CreateAclsRequest) + res := &CreateAclsResponse{} + + for range req.AclCreations { + res.AclCreationResponses = append(res.AclCreationResponses, &AclCreationResponse{Err: ErrInvalidRequest}) + } + return res +} + type MockListAclsResponse struct { t TestReporter } @@ -981,9 +1063,10 @@ func (mr *MockListAclsResponse) For(reqBody versionedDecoder) encoderWithHeader } type MockSaslAuthenticateResponse struct { - t TestReporter - kerror KError - saslAuthBytes []byte + t TestReporter + kerror KError + saslAuthBytes []byte + sessionLifetimeMs int64 } func NewMockSaslAuthenticateResponse(t TestReporter) *MockSaslAuthenticateResponse { @@ -991,9 +1074,12 @@ func NewMockSaslAuthenticateResponse(t TestReporter) *MockSaslAuthenticateRespon } func (msar *MockSaslAuthenticateResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*SaslAuthenticateRequest) res := &SaslAuthenticateResponse{} + res.Version = req.Version res.Err = msar.kerror res.SaslAuthBytes = msar.saslAuthBytes + res.SessionLifetimeMs = msar.sessionLifetimeMs return res } @@ -1007,6 +1093,11 @@ func (msar *MockSaslAuthenticateResponse) SetAuthBytes(saslAuthBytes []byte) *Mo return msar } +func (msar *MockSaslAuthenticateResponse) SetSessionLifetimeMs(sessionLifetimeMs int64) *MockSaslAuthenticateResponse { + msar.sessionLifetimeMs = sessionLifetimeMs + return msar +} + type MockDeleteAclsResponse struct { t TestReporter } @@ -1078,6 +1169,183 @@ func (m *MockDeleteGroupsResponse) For(reqBody versionedDecoder) encoderWithHead return resp } +type MockDeleteOffsetResponse struct { + errorCode KError + topic string + partition int32 + errorPartition KError +} + +func NewMockDeleteOffsetRequest(t TestReporter) *MockDeleteOffsetResponse { + return &MockDeleteOffsetResponse{} +} + +func (m *MockDeleteOffsetResponse) SetDeletedOffset(errorCode KError, topic string, partition int32, errorPartition KError) *MockDeleteOffsetResponse { + m.errorCode = errorCode + m.topic = topic + m.partition = partition + m.errorPartition = errorPartition + return m +} + +func (m *MockDeleteOffsetResponse) For(reqBody versionedDecoder) encoderWithHeader { + resp := &DeleteOffsetsResponse{ + ErrorCode: m.errorCode, + Errors: map[string]map[int32]KError{ + m.topic: {m.partition: m.errorPartition}, + }, + } + return resp +} + +type MockJoinGroupResponse struct { + t TestReporter + + ThrottleTime int32 + Err KError + GenerationId int32 + GroupProtocol string + LeaderId string + MemberId string + Members []GroupMember +} + +func NewMockJoinGroupResponse(t TestReporter) *MockJoinGroupResponse { + return &MockJoinGroupResponse{ + t: t, + Members: make([]GroupMember, 0), + } +} + +func (m *MockJoinGroupResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*JoinGroupRequest) + resp := &JoinGroupResponse{ + Version: req.Version, + ThrottleTime: m.ThrottleTime, + Err: m.Err, + GenerationId: m.GenerationId, + GroupProtocol: m.GroupProtocol, + LeaderId: m.LeaderId, + MemberId: m.MemberId, + Members: m.Members, + } + return resp +} + +func (m *MockJoinGroupResponse) SetThrottleTime(t int32) *MockJoinGroupResponse { + m.ThrottleTime = t + return m +} + +func (m *MockJoinGroupResponse) SetError(kerr KError) *MockJoinGroupResponse { + m.Err = kerr + return m +} + +func (m *MockJoinGroupResponse) SetGenerationId(id int32) *MockJoinGroupResponse { + m.GenerationId = id + return m +} + +func (m *MockJoinGroupResponse) SetGroupProtocol(proto string) *MockJoinGroupResponse { + m.GroupProtocol = proto + return m +} + +func (m *MockJoinGroupResponse) SetLeaderId(id string) *MockJoinGroupResponse { + m.LeaderId = id + return m +} + +func (m *MockJoinGroupResponse) SetMemberId(id string) *MockJoinGroupResponse { + m.MemberId = id + return m +} + +func (m *MockJoinGroupResponse) SetMember(id string, meta *ConsumerGroupMemberMetadata) *MockJoinGroupResponse { + bin, err := encode(meta, nil) + if err != nil { + panic(fmt.Sprintf("error encoding member metadata: %v", err)) + } + m.Members = append(m.Members, GroupMember{MemberId: id, Metadata: bin}) + return m +} + +type MockLeaveGroupResponse struct { + t TestReporter + + Err KError +} + +func NewMockLeaveGroupResponse(t TestReporter) *MockLeaveGroupResponse { + return &MockLeaveGroupResponse{t: t} +} + +func (m *MockLeaveGroupResponse) For(reqBody versionedDecoder) encoderWithHeader { + resp := &LeaveGroupResponse{ + Err: m.Err, + } + return resp +} + +func (m *MockLeaveGroupResponse) SetError(kerr KError) *MockLeaveGroupResponse { + m.Err = kerr + return m +} + +type MockSyncGroupResponse struct { + t TestReporter + + Err KError + MemberAssignment []byte +} + +func NewMockSyncGroupResponse(t TestReporter) *MockSyncGroupResponse { + return &MockSyncGroupResponse{t: t} +} + +func (m *MockSyncGroupResponse) For(reqBody versionedDecoder) encoderWithHeader { + resp := &SyncGroupResponse{ + Err: m.Err, + MemberAssignment: m.MemberAssignment, + } + return resp +} + +func (m *MockSyncGroupResponse) SetError(kerr KError) *MockSyncGroupResponse { + m.Err = kerr + return m +} + +func (m *MockSyncGroupResponse) SetMemberAssignment(assignment *ConsumerGroupMemberAssignment) *MockSyncGroupResponse { + bin, err := encode(assignment, nil) + if err != nil { + panic(fmt.Sprintf("error encoding member assignment: %v", err)) + } + m.MemberAssignment = bin + return m +} + +type MockHeartbeatResponse struct { + t TestReporter + + Err KError +} + +func NewMockHeartbeatResponse(t TestReporter) *MockHeartbeatResponse { + return &MockHeartbeatResponse{t: t} +} + +func (m *MockHeartbeatResponse) For(reqBody versionedDecoder) encoderWithHeader { + resp := &HeartbeatResponse{} + return resp +} + +func (m *MockHeartbeatResponse) SetError(kerr KError) *MockHeartbeatResponse { + m.Err = kerr + return m +} + type MockDescribeLogDirsResponse struct { t TestReporter logDirs []DescribeLogDirsResponseDirMetadata @@ -1119,3 +1387,40 @@ func (m *MockDescribeLogDirsResponse) For(reqBody versionedDecoder) encoderWithH } return resp } + +type MockApiVersionsResponse struct { + t TestReporter + apiKeys []ApiVersionsResponseKey +} + +func NewMockApiVersionsResponse(t TestReporter) *MockApiVersionsResponse { + return &MockApiVersionsResponse{ + t: t, + apiKeys: []ApiVersionsResponseKey{ + { + ApiKey: 0, + MinVersion: 5, + MaxVersion: 8, + }, + { + ApiKey: 1, + MinVersion: 7, + MaxVersion: 11, + }, + }, + } +} + +func (m *MockApiVersionsResponse) SetApiKeys(apiKeys []ApiVersionsResponseKey) *MockApiVersionsResponse { + m.apiKeys = apiKeys + return m +} + +func (m *MockApiVersionsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*ApiVersionsRequest) + res := &ApiVersionsResponse{ + Version: req.Version, + ApiKeys: m.apiKeys, + } + return res +} diff --git a/vendor/github.com/Shopify/sarama/offset_commit_request.go b/vendor/github.com/Shopify/sarama/offset_commit_request.go index 9931cade5..5dd88220d 100644 --- a/vendor/github.com/Shopify/sarama/offset_commit_request.go +++ b/vendor/github.com/Shopify/sarama/offset_commit_request.go @@ -13,9 +13,10 @@ const ReceiveTime int64 = -1 const GroupGenerationUndefined = -1 type offsetCommitRequestBlock struct { - offset int64 - timestamp int64 - metadata string + offset int64 + timestamp int64 + committedLeaderEpoch int32 + metadata string } func (b *offsetCommitRequestBlock) encode(pe packetEncoder, version int16) error { @@ -25,6 +26,9 @@ func (b *offsetCommitRequestBlock) encode(pe packetEncoder, version int16) error } else if b.timestamp != 0 { Logger.Println("Non-zero timestamp specified for OffsetCommitRequest not v1, it will be ignored") } + if version >= 6 { + pe.putInt32(b.committedLeaderEpoch) + } return pe.putString(b.metadata) } @@ -38,15 +42,22 @@ func (b *offsetCommitRequestBlock) decode(pd packetDecoder, version int16) (err return err } } + if version >= 6 { + if b.committedLeaderEpoch, err = pd.getInt32(); err != nil { + return err + } + } + b.metadata, err = pd.getString() return err } type OffsetCommitRequest struct { ConsumerGroup string - ConsumerGroupGeneration int32 // v1 or later - ConsumerID string // v1 or later - RetentionTime int64 // v2 or later + ConsumerGroupGeneration int32 // v1 or later + ConsumerID string // v1 or later + GroupInstanceId *string // v7 or later + RetentionTime int64 // v2 or later // Version can be: // - 0 (kafka 0.8.1 and later) @@ -54,12 +65,14 @@ type OffsetCommitRequest struct { // - 2 (kafka 0.9.0 and later) // - 3 (kafka 0.11.0 and later) // - 4 (kafka 2.0.0 and later) + // - 5&6 (kafka 2.1.0 and later) + // - 7 (kafka 2.3.0 and later) Version int16 blocks map[string]map[int32]*offsetCommitRequestBlock } func (r *OffsetCommitRequest) encode(pe packetEncoder) error { - if r.Version < 0 || r.Version > 4 { + if r.Version < 0 || r.Version > 7 { return PacketEncodingError{"invalid or unsupported OffsetCommitRequest version field"} } @@ -81,12 +94,19 @@ func (r *OffsetCommitRequest) encode(pe packetEncoder) error { } } - if r.Version >= 2 { + // Version 5 removes RetentionTime, which is now controlled only by a broker configuration. + if r.Version >= 2 && r.Version <= 4 { pe.putInt64(r.RetentionTime) } else if r.RetentionTime != 0 { Logger.Println("Non-zero RetentionTime specified for OffsetCommitRequest version <2, it will be ignored") } + if r.Version >= 7 { + if err := pe.putNullableString(r.GroupInstanceId); err != nil { + return err + } + } + if err := pe.putArrayLength(len(r.blocks)); err != nil { return err } @@ -123,12 +143,19 @@ func (r *OffsetCommitRequest) decode(pd packetDecoder, version int16) (err error } } - if r.Version >= 2 { + // Version 5 removes RetentionTime, which is now controlled only by a broker configuration. + if r.Version >= 2 && r.Version <= 4 { if r.RetentionTime, err = pd.getInt64(); err != nil { return err } } + if r.Version >= 7 { + if r.GroupInstanceId, err = pd.getNullableString(); err != nil { + return err + } + } + topicCount, err := pd.getArrayLength() if err != nil { return err @@ -184,12 +211,16 @@ func (r *OffsetCommitRequest) requiredVersion() KafkaVersion { return V0_11_0_0 case 4: return V2_0_0_0 + case 5, 6: + return V2_1_0_0 + case 7: + return V2_3_0_0 default: return MinVersion } } -func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, timestamp int64, metadata string) { +func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, leaderEpoch int32, timestamp int64, metadata string) { if r.blocks == nil { r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock) } @@ -198,7 +229,7 @@ func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset i r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock) } - r.blocks[topic][partitionID] = &offsetCommitRequestBlock{offset, timestamp, metadata} + r.blocks[topic][partitionID] = &offsetCommitRequestBlock{offset, timestamp, leaderEpoch, metadata} } func (r *OffsetCommitRequest) Offset(topic string, partitionID int32) (int64, string, error) { diff --git a/vendor/github.com/Shopify/sarama/offset_commit_response.go b/vendor/github.com/Shopify/sarama/offset_commit_response.go index 342260ef5..4bed269aa 100644 --- a/vendor/github.com/Shopify/sarama/offset_commit_response.go +++ b/vendor/github.com/Shopify/sarama/offset_commit_response.go @@ -108,6 +108,8 @@ func (r *OffsetCommitResponse) requiredVersion() KafkaVersion { return V0_11_0_0 case 4: return V2_0_0_0 + case 5, 6, 7: + return V2_3_0_0 default: return MinVersion } diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_request.go b/vendor/github.com/Shopify/sarama/offset_fetch_request.go index 51e9faa3f..7e147eb60 100644 --- a/vendor/github.com/Shopify/sarama/offset_fetch_request.go +++ b/vendor/github.com/Shopify/sarama/offset_fetch_request.go @@ -3,60 +3,155 @@ package sarama type OffsetFetchRequest struct { Version int16 ConsumerGroup string + RequireStable bool // requires v7+ partitions map[string][]int32 } func (r *OffsetFetchRequest) encode(pe packetEncoder) (err error) { - if r.Version < 0 || r.Version > 5 { + if r.Version < 0 || r.Version > 7 { return PacketEncodingError{"invalid or unsupported OffsetFetchRequest version field"} } - if err = pe.putString(r.ConsumerGroup); err != nil { + isFlexible := r.Version >= 6 + + if isFlexible { + err = pe.putCompactString(r.ConsumerGroup) + } else { + err = pe.putString(r.ConsumerGroup) + } + if err != nil { return err } - if r.Version >= 2 && r.partitions == nil { - pe.putInt32(-1) - } else { - if err = pe.putArrayLength(len(r.partitions)); err != nil { - return err + if isFlexible { + if r.partitions == nil { + pe.putUVarint(0) + } else { + pe.putCompactArrayLength(len(r.partitions)) } - for topic, partitions := range r.partitions { - if err = pe.putString(topic); err != nil { - return err - } - if err = pe.putInt32Array(partitions); err != nil { + } else { + if r.partitions == nil && r.Version >= 2 { + pe.putInt32(-1) + } else { + if err = pe.putArrayLength(len(r.partitions)); err != nil { return err } } } + + for topic, partitions := range r.partitions { + if isFlexible { + err = pe.putCompactString(topic) + } else { + err = pe.putString(topic) + } + if err != nil { + return err + } + + // + + if isFlexible { + err = pe.putCompactInt32Array(partitions) + } else { + err = pe.putInt32Array(partitions) + } + if err != nil { + return err + } + + if isFlexible { + pe.putEmptyTaggedFieldArray() + } + } + + if r.RequireStable && r.Version < 7 { + return PacketEncodingError{"requireStable is not supported. use version 7 or later"} + } + + if r.Version >= 7 { + pe.putBool(r.RequireStable) + } + + if isFlexible { + pe.putEmptyTaggedFieldArray() + } + return nil } func (r *OffsetFetchRequest) decode(pd packetDecoder, version int16) (err error) { r.Version = version - if r.ConsumerGroup, err = pd.getString(); err != nil { + isFlexible := r.Version >= 6 + if isFlexible { + r.ConsumerGroup, err = pd.getCompactString() + } else { + r.ConsumerGroup, err = pd.getString() + } + if err != nil { return err } - partitionCount, err := pd.getArrayLength() + + var partitionCount int + + if isFlexible { + partitionCount, err = pd.getCompactArrayLength() + } else { + partitionCount, err = pd.getArrayLength() + } if err != nil { return err } + if (partitionCount == 0 && version < 2) || partitionCount < 0 { return nil } - r.partitions = make(map[string][]int32) + + r.partitions = make(map[string][]int32, partitionCount) for i := 0; i < partitionCount; i++ { - topic, err := pd.getString() + var topic string + if isFlexible { + topic, err = pd.getCompactString() + } else { + topic, err = pd.getString() + } if err != nil { return err } - partitions, err := pd.getInt32Array() + + var partitions []int32 + if isFlexible { + partitions, err = pd.getCompactInt32Array() + } else { + partitions, err = pd.getInt32Array() + } if err != nil { return err } + if isFlexible { + _, err = pd.getEmptyTaggedFieldArray() + if err != nil { + return err + } + } + r.partitions[topic] = partitions } + + if r.Version >= 7 { + r.RequireStable, err = pd.getBool() + if err != nil { + return err + } + } + + if isFlexible { + _, err = pd.getEmptyTaggedFieldArray() + if err != nil { + return err + } + } + return nil } @@ -69,6 +164,10 @@ func (r *OffsetFetchRequest) version() int16 { } func (r *OffsetFetchRequest) headerVersion() int16 { + if r.Version >= 6 { + return 2 + } + return 1 } @@ -84,6 +183,10 @@ func (r *OffsetFetchRequest) requiredVersion() KafkaVersion { return V2_0_0_0 case 5: return V2_1_0_0 + case 6: + return V2_4_0_0 + case 7: + return V2_5_0_0 default: return MinVersion } diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_response.go b/vendor/github.com/Shopify/sarama/offset_fetch_response.go index 9c64e0708..19449220f 100644 --- a/vendor/github.com/Shopify/sarama/offset_fetch_response.go +++ b/vendor/github.com/Shopify/sarama/offset_fetch_response.go @@ -8,6 +8,8 @@ type OffsetFetchResponseBlock struct { } func (b *OffsetFetchResponseBlock) decode(pd packetDecoder, version int16) (err error) { + isFlexible := version >= 6 + b.Offset, err = pd.getInt64() if err != nil { return err @@ -20,7 +22,11 @@ func (b *OffsetFetchResponseBlock) decode(pd packetDecoder, version int16) (err } } - b.Metadata, err = pd.getString() + if isFlexible { + b.Metadata, err = pd.getCompactString() + } else { + b.Metadata, err = pd.getString() + } if err != nil { return err } @@ -31,23 +37,37 @@ func (b *OffsetFetchResponseBlock) decode(pd packetDecoder, version int16) (err } b.Err = KError(tmp) + if isFlexible { + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + return nil } func (b *OffsetFetchResponseBlock) encode(pe packetEncoder, version int16) (err error) { + isFlexible := version >= 6 pe.putInt64(b.Offset) if version >= 5 { pe.putInt32(b.LeaderEpoch) } - - err = pe.putString(b.Metadata) + if isFlexible { + err = pe.putCompactString(b.Metadata) + } else { + err = pe.putString(b.Metadata) + } if err != nil { return err } pe.putInt16(int16(b.Err)) + if isFlexible { + pe.putEmptyTaggedFieldArray() + } + return nil } @@ -58,19 +78,37 @@ type OffsetFetchResponse struct { Err KError } -func (r *OffsetFetchResponse) encode(pe packetEncoder) error { +func (r *OffsetFetchResponse) encode(pe packetEncoder) (err error) { + isFlexible := r.Version >= 6 + if r.Version >= 3 { pe.putInt32(r.ThrottleTimeMs) } - - if err := pe.putArrayLength(len(r.Blocks)); err != nil { + if isFlexible { + pe.putCompactArrayLength(len(r.Blocks)) + } else { + err = pe.putArrayLength(len(r.Blocks)) + } + if err != nil { return err } + for topic, partitions := range r.Blocks { - if err := pe.putString(topic); err != nil { + if isFlexible { + err = pe.putCompactString(topic) + } else { + err = pe.putString(topic) + } + if err != nil { return err } - if err := pe.putArrayLength(len(partitions)); err != nil { + + if isFlexible { + pe.putCompactArrayLength(len(partitions)) + } else { + err = pe.putArrayLength(len(partitions)) + } + if err != nil { return err } for partition, block := range partitions { @@ -79,15 +117,22 @@ func (r *OffsetFetchResponse) encode(pe packetEncoder) error { return err } } + if isFlexible { + pe.putEmptyTaggedFieldArray() + } } if r.Version >= 2 { pe.putInt16(int16(r.Err)) } + if isFlexible { + pe.putEmptyTaggedFieldArray() + } return nil } func (r *OffsetFetchResponse) decode(pd packetDecoder, version int16) (err error) { r.Version = version + isFlexible := version >= 6 if version >= 3 { r.ThrottleTimeMs, err = pd.getInt32() @@ -96,7 +141,12 @@ func (r *OffsetFetchResponse) decode(pd packetDecoder, version int16) (err error } } - numTopics, err := pd.getArrayLength() + var numTopics int + if isFlexible { + numTopics, err = pd.getCompactArrayLength() + } else { + numTopics, err = pd.getArrayLength() + } if err != nil { return err } @@ -104,22 +154,30 @@ func (r *OffsetFetchResponse) decode(pd packetDecoder, version int16) (err error if numTopics > 0 { r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock, numTopics) for i := 0; i < numTopics; i++ { - name, err := pd.getString() + var name string + if isFlexible { + name, err = pd.getCompactString() + } else { + name, err = pd.getString() + } if err != nil { return err } - numBlocks, err := pd.getArrayLength() + var numBlocks int + if isFlexible { + numBlocks, err = pd.getCompactArrayLength() + } else { + numBlocks, err = pd.getArrayLength() + } if err != nil { return err } - if numBlocks == 0 { - r.Blocks[name] = nil - continue + r.Blocks[name] = nil + if numBlocks > 0 { + r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks) } - r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks) - for j := 0; j < numBlocks; j++ { id, err := pd.getInt32() if err != nil { @@ -131,8 +189,15 @@ func (r *OffsetFetchResponse) decode(pd packetDecoder, version int16) (err error if err != nil { return err } + r.Blocks[name][id] = block } + + if isFlexible { + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } } } @@ -144,6 +209,12 @@ func (r *OffsetFetchResponse) decode(pd packetDecoder, version int16) (err error r.Err = KError(kerr) } + if isFlexible { + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + return nil } @@ -156,6 +227,10 @@ func (r *OffsetFetchResponse) version() int16 { } func (r *OffsetFetchResponse) headerVersion() int16 { + if r.Version >= 6 { + return 1 + } + return 0 } @@ -171,6 +246,10 @@ func (r *OffsetFetchResponse) requiredVersion() KafkaVersion { return V2_0_0_0 case 5: return V2_1_0_0 + case 6: + return V2_4_0_0 + case 7: + return V2_5_0_0 default: return MinVersion } diff --git a/vendor/github.com/Shopify/sarama/offset_manager.go b/vendor/github.com/Shopify/sarama/offset_manager.go index b4fea8226..1ea15ff93 100644 --- a/vendor/github.com/Shopify/sarama/offset_manager.go +++ b/vendor/github.com/Shopify/sarama/offset_manager.go @@ -26,13 +26,15 @@ type OffsetManager interface { } type offsetManager struct { - client Client - conf *Config - group string - ticker *time.Ticker + client Client + conf *Config + group string + ticker *time.Ticker + sessionCanceler func() - memberID string - generation int32 + memberID string + groupInstanceId *string + generation int32 broker *Broker brokerLock sync.RWMutex @@ -48,10 +50,10 @@ type offsetManager struct { // NewOffsetManagerFromClient creates a new OffsetManager from the given client. // It is still necessary to call Close() on the underlying client when finished with the partition manager. func NewOffsetManagerFromClient(group string, client Client) (OffsetManager, error) { - return newOffsetManagerFromClient(group, "", GroupGenerationUndefined, client) + return newOffsetManagerFromClient(group, "", GroupGenerationUndefined, client, nil) } -func newOffsetManagerFromClient(group, memberID string, generation int32, client Client) (*offsetManager, error) { +func newOffsetManagerFromClient(group, memberID string, generation int32, client Client, sessionCanceler func()) (*offsetManager, error) { // Check that we are not dealing with a closed Client before processing any other arguments if client.Closed() { return nil, ErrClosedClient @@ -59,10 +61,11 @@ func newOffsetManagerFromClient(group, memberID string, generation int32, client conf := client.Config() om := &offsetManager{ - client: client, - conf: conf, - group: group, - poms: make(map[string]map[int32]*partitionOffsetManager), + client: client, + conf: conf, + group: group, + poms: make(map[string]map[int32]*partitionOffsetManager), + sessionCanceler: sessionCanceler, memberID: memberID, generation: generation, @@ -70,6 +73,9 @@ func newOffsetManagerFromClient(group, memberID string, generation int32, client closing: make(chan none), closed: make(chan none), } + if conf.Consumer.Group.InstanceId != "" { + om.groupInstanceId = &conf.Consumer.Group.InstanceId + } if conf.Consumer.Offsets.AutoCommit.Enable { om.ticker = time.NewTicker(conf.Consumer.Offsets.AutoCommit.Interval) go withRecover(om.mainLoop) @@ -113,10 +119,12 @@ func (om *offsetManager) Close() error { om.asyncClosePOMs() // flush one last time - for attempt := 0; attempt <= om.conf.Consumer.Offsets.Retry.Max; attempt++ { - om.flushToBroker() - if om.releasePOMs(false) == 0 { - break + if om.conf.Consumer.Offsets.AutoCommit.Enable { + for attempt := 0; attempt <= om.conf.Consumer.Offsets.Retry.Max; attempt++ { + om.flushToBroker() + if om.releasePOMs(false) == 0 { + break + } } } @@ -136,11 +144,11 @@ func (om *offsetManager) computeBackoff(retries int) time.Duration { } } -func (om *offsetManager) fetchInitialOffset(topic string, partition int32, retries int) (int64, string, error) { +func (om *offsetManager) fetchInitialOffset(topic string, partition int32, retries int) (int64, int32, string, error) { broker, err := om.coordinator() if err != nil { if retries <= 0 { - return 0, "", err + return 0, 0, "", err } return om.fetchInitialOffset(topic, partition, retries-1) } @@ -153,7 +161,7 @@ func (om *offsetManager) fetchInitialOffset(topic string, partition int32, retri resp, err := broker.FetchOffset(req) if err != nil { if retries <= 0 { - return 0, "", err + return 0, 0, "", err } om.releaseCoordinator(broker) return om.fetchInitialOffset(topic, partition, retries-1) @@ -161,31 +169,31 @@ func (om *offsetManager) fetchInitialOffset(topic string, partition int32, retri block := resp.GetBlock(topic, partition) if block == nil { - return 0, "", ErrIncompleteResponse + return 0, 0, "", ErrIncompleteResponse } switch block.Err { case ErrNoError: - return block.Offset, block.Metadata, nil + return block.Offset, block.LeaderEpoch, block.Metadata, nil case ErrNotCoordinatorForConsumer: if retries <= 0 { - return 0, "", block.Err + return 0, 0, "", block.Err } om.releaseCoordinator(broker) return om.fetchInitialOffset(topic, partition, retries-1) case ErrOffsetsLoadInProgress: if retries <= 0 { - return 0, "", block.Err + return 0, 0, "", block.Err } backoff := om.computeBackoff(retries) select { case <-om.closing: - return 0, "", block.Err + return 0, 0, "", block.Err case <-time.After(backoff): } return om.fetchInitialOffset(topic, partition, retries-1) default: - return 0, "", block.Err + return 0, 0, "", block.Err } } @@ -296,12 +304,17 @@ func (om *offsetManager) constructRequest() *OffsetCommitRequest { for _, pom := range topicManagers { pom.lock.Lock() if pom.dirty { - r.AddBlock(pom.topic, pom.partition, pom.offset, perPartitionTimestamp, pom.metadata) + r.AddBlock(pom.topic, pom.partition, pom.offset, pom.leaderEpoch, perPartitionTimestamp, pom.metadata) } pom.lock.Unlock() } } + if om.groupInstanceId != nil { + r.Version = 7 + r.GroupInstanceId = om.groupInstanceId + } + if len(r.blocks) > 0 { return r } @@ -344,6 +357,10 @@ func (om *offsetManager) handleResponse(broker *Broker, req *OffsetCommitRequest pom.handleError(err) case ErrOffsetsLoadInProgress: // nothing wrong but we didn't commit, we'll get it next time round + case ErrFencedInstancedId: + pom.handleError(err) + // TODO close the whole consumer for instacne fenced.... + om.tryCancelSession() case ErrUnknownTopicOrPartition: // let the user know *and* try redispatching - if topic-auto-create is // enabled, redispatching should trigger a metadata req and create the @@ -418,6 +435,12 @@ func (om *offsetManager) findPOM(topic string, partition int32) *partitionOffset return nil } +func (om *offsetManager) tryCancelSession() { + if om.sessionCanceler != nil { + om.sessionCanceler() + } +} + // Partition Offset Manager // PartitionOffsetManager uses Kafka to store and fetch consumed partition offsets. You MUST call Close() @@ -474,9 +497,10 @@ type PartitionOffsetManager interface { } type partitionOffsetManager struct { - parent *offsetManager - topic string - partition int32 + parent *offsetManager + topic string + partition int32 + leaderEpoch int32 lock sync.Mutex offset int64 @@ -489,18 +513,19 @@ type partitionOffsetManager struct { } func (om *offsetManager) newPartitionOffsetManager(topic string, partition int32) (*partitionOffsetManager, error) { - offset, metadata, err := om.fetchInitialOffset(topic, partition, om.conf.Metadata.Retry.Max) + offset, leaderEpoch, metadata, err := om.fetchInitialOffset(topic, partition, om.conf.Metadata.Retry.Max) if err != nil { return nil, err } return &partitionOffsetManager{ - parent: om, - topic: topic, - partition: partition, - errors: make(chan *ConsumerError, om.conf.ChannelBufferSize), - offset: offset, - metadata: metadata, + parent: om, + topic: topic, + partition: partition, + leaderEpoch: leaderEpoch, + errors: make(chan *ConsumerError, om.conf.ChannelBufferSize), + offset: offset, + metadata: metadata, }, nil } diff --git a/vendor/github.com/Shopify/sarama/offset_request.go b/vendor/github.com/Shopify/sarama/offset_request.go index c0b3305f6..4c9ce4df5 100644 --- a/vendor/github.com/Shopify/sarama/offset_request.go +++ b/vendor/github.com/Shopify/sarama/offset_request.go @@ -28,6 +28,7 @@ func (b *offsetRequestBlock) decode(pd packetDecoder, version int16) (err error) type OffsetRequest struct { Version int16 + IsolationLevel IsolationLevel replicaID int32 isReplicaIDSet bool blocks map[string]map[int32]*offsetRequestBlock @@ -41,6 +42,10 @@ func (r *OffsetRequest) encode(pe packetEncoder) error { pe.putInt32(-1) } + if r.Version >= 2 { + pe.putBool(r.IsolationLevel == ReadCommitted) + } + err := pe.putArrayLength(len(r.blocks)) if err != nil { return err @@ -75,6 +80,18 @@ func (r *OffsetRequest) decode(pd packetDecoder, version int16) error { r.SetReplicaID(replicaID) } + if r.Version >= 2 { + tmp, err := pd.getBool() + if err != nil { + return err + } + + r.IsolationLevel = ReadUncommitted + if tmp { + r.IsolationLevel = ReadCommitted + } + } + blockCount, err := pd.getArrayLength() if err != nil { return err @@ -124,6 +141,8 @@ func (r *OffsetRequest) requiredVersion() KafkaVersion { switch r.Version { case 1: return V0_10_1_0 + case 2: + return V0_11_0_0 default: return MinVersion } diff --git a/vendor/github.com/Shopify/sarama/offset_response.go b/vendor/github.com/Shopify/sarama/offset_response.go index ead3ebbcc..ffe84664c 100644 --- a/vendor/github.com/Shopify/sarama/offset_response.go +++ b/vendor/github.com/Shopify/sarama/offset_response.go @@ -50,11 +50,19 @@ func (b *OffsetResponseBlock) encode(pe packetEncoder, version int16) (err error } type OffsetResponse struct { - Version int16 - Blocks map[string]map[int32]*OffsetResponseBlock + Version int16 + ThrottleTimeMs int32 + Blocks map[string]map[int32]*OffsetResponseBlock } func (r *OffsetResponse) decode(pd packetDecoder, version int16) (err error) { + if version >= 2 { + r.ThrottleTimeMs, err = pd.getInt32() + if err != nil { + return err + } + } + numTopics, err := pd.getArrayLength() if err != nil { return err @@ -117,9 +125,12 @@ func (r *OffsetResponse) GetBlock(topic string, partition int32) *OffsetResponse 105 99 0 0 0 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 1 1 1] - */ func (r *OffsetResponse) encode(pe packetEncoder) (err error) { + if r.Version >= 2 { + pe.putInt32(r.ThrottleTimeMs) + } + if err = pe.putArrayLength(len(r.Blocks)); err != nil { return err } @@ -158,6 +169,8 @@ func (r *OffsetResponse) requiredVersion() KafkaVersion { switch r.Version { case 1: return V0_10_1_0 + case 2: + return V0_11_0_0 default: return MinVersion } diff --git a/vendor/github.com/Shopify/sarama/packet_decoder.go b/vendor/github.com/Shopify/sarama/packet_decoder.go index ed00ba350..b8cae5350 100644 --- a/vendor/github.com/Shopify/sarama/packet_decoder.go +++ b/vendor/github.com/Shopify/sarama/packet_decoder.go @@ -1,5 +1,7 @@ package sarama +import "github.com/rcrowley/go-metrics" + // PacketDecoder is the interface providing helpers for reading with Kafka's encoding rules. // Types implementing Decoder only need to worry about calling methods like GetString, // not about how a string is represented in Kafka. @@ -11,6 +13,7 @@ type packetDecoder interface { getInt64() (int64, error) getVarint() (int64, error) getUVarint() (uint64, error) + getFloat64() (float64, error) getArrayLength() (int, error) getCompactArrayLength() (int, error) getBool() (bool, error) @@ -19,6 +22,7 @@ type packetDecoder interface { // Collections getBytes() ([]byte, error) getVarintBytes() ([]byte, error) + getCompactBytes() ([]byte, error) getRawBytes(length int) ([]byte, error) getString() (string, error) getNullableString() (*string, error) @@ -38,6 +42,9 @@ type packetDecoder interface { // Stacks, see PushDecoder push(in pushDecoder) error pop() error + + // To record metrics when provided + metricRegistry() metrics.Registry } // PushDecoder is the interface for decoding fields like CRCs and lengths where the validity diff --git a/vendor/github.com/Shopify/sarama/packet_encoder.go b/vendor/github.com/Shopify/sarama/packet_encoder.go index 50c735c04..5016e09a6 100644 --- a/vendor/github.com/Shopify/sarama/packet_encoder.go +++ b/vendor/github.com/Shopify/sarama/packet_encoder.go @@ -13,6 +13,7 @@ type packetEncoder interface { putInt64(in int64) putVarint(in int64) putUVarint(in uint64) + putFloat64(in float64) putCompactArrayLength(in int) putArrayLength(in int) error putBool(in bool) @@ -20,6 +21,7 @@ type packetEncoder interface { // Collections putBytes(in []byte) error putVarintBytes(in []byte) error + putCompactBytes(in []byte) error putRawBytes(in []byte) error putCompactString(in string) error putNullableCompactString(in *string) error diff --git a/vendor/github.com/Shopify/sarama/partitioner.go b/vendor/github.com/Shopify/sarama/partitioner.go index 6a708e729..57377760a 100644 --- a/vendor/github.com/Shopify/sarama/partitioner.go +++ b/vendor/github.com/Shopify/sarama/partitioner.go @@ -42,7 +42,7 @@ type PartitionerConstructor func(topic string) Partitioner type manualPartitioner struct{} -// HashPartitionOption lets you modify default values of the partitioner +// HashPartitionerOption lets you modify default values of the partitioner type HashPartitionerOption func(*hashPartitioner) // WithAbsFirst means that the partitioner handles absolute values @@ -61,9 +61,9 @@ func WithCustomHashFunction(hasher func() hash.Hash32) HashPartitionerOption { } // WithCustomFallbackPartitioner lets you specify what HashPartitioner should be used in case a Distribution Key is empty -func WithCustomFallbackPartitioner(randomHP *hashPartitioner) HashPartitionerOption { +func WithCustomFallbackPartitioner(randomHP Partitioner) HashPartitionerOption { return func(hp *hashPartitioner) { - hp.random = hp + hp.random = randomHP } } @@ -169,7 +169,7 @@ func NewHashPartitioner(topic string) Partitioner { // NewReferenceHashPartitioner is like NewHashPartitioner except that it handles absolute values // in the same way as the reference Java implementation. NewHashPartitioner was supposed to do -// that but it had a mistake and now there are people depending on both behaviours. This will +// that but it had a mistake and now there are people depending on both behaviors. This will // all go away on the next major version bump. func NewReferenceHashPartitioner(topic string) Partitioner { p := new(hashPartitioner) diff --git a/vendor/github.com/Shopify/sarama/prep_encoder.go b/vendor/github.com/Shopify/sarama/prep_encoder.go index 827542c50..1602fcb3f 100644 --- a/vendor/github.com/Shopify/sarama/prep_encoder.go +++ b/vendor/github.com/Shopify/sarama/prep_encoder.go @@ -42,6 +42,10 @@ func (pe *prepEncoder) putUVarint(in uint64) { pe.length += binary.PutUvarint(buf[:], in) } +func (pe *prepEncoder) putFloat64(in float64) { + pe.length += 8 +} + func (pe *prepEncoder) putArrayLength(in int) error { if in > math.MaxInt32 { return PacketEncodingError{fmt.Sprintf("array too long (%d)", in)} @@ -77,6 +81,11 @@ func (pe *prepEncoder) putVarintBytes(in []byte) error { return pe.putRawBytes(in) } +func (pe *prepEncoder) putCompactBytes(in []byte) error { + pe.putUVarint(uint64(len(in) + 1)) + return pe.putRawBytes(in) +} + func (pe *prepEncoder) putCompactString(in string) error { pe.putCompactArrayLength(len(in)) return pe.putRawBytes([]byte(in)) diff --git a/vendor/github.com/Shopify/sarama/produce_set.go b/vendor/github.com/Shopify/sarama/produce_set.go index 9c70f8180..8d6980479 100644 --- a/vendor/github.com/Shopify/sarama/produce_set.go +++ b/vendor/github.com/Shopify/sarama/produce_set.go @@ -137,6 +137,9 @@ func (ps *produceSet) buildRequest() *ProduceRequest { } if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { req.Version = 3 + if ps.parent.IsTransactional() { + req.TransactionalID = &ps.parent.conf.Producer.Transaction.ID + } } if ps.parent.conf.Producer.Compression == CompressionZSTD && ps.parent.conf.Version.IsAtLeast(V2_1_0_0) { @@ -160,6 +163,10 @@ func (ps *produceSet) buildRequest() *ProduceRequest { record.OffsetDelta = int64(i) } } + + // Set the batch as transactional when a transactionalID is set + rb.IsTransactional = ps.parent.IsTransactional() + req.AddBatch(topic, partition, rb) continue } @@ -181,7 +188,7 @@ func (ps *produceSet) buildRequest() *ProduceRequest { msg.Offset = int64(i) } } - payload, err := encode(set.recordsToSend.MsgSet, ps.parent.conf.MetricRegistry) + payload, err := encode(set.recordsToSend.MsgSet, ps.parent.metricsRegistry) if err != nil { Logger.Println(err) // if this happens, it's basically our fault. panic(err) @@ -235,11 +242,11 @@ func (ps *produceSet) wouldOverflow(msg *ProducerMessage) bool { switch { // Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety. - case ps.bufferBytes+msg.byteSize(version) >= int(MaxRequestSize-(10*1024)): + case ps.bufferBytes+msg.ByteSize(version) >= int(MaxRequestSize-(10*1024)): return true // Would we overflow the size-limit of a message-batch for this partition? case ps.msgs[msg.Topic] != nil && ps.msgs[msg.Topic][msg.Partition] != nil && - ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.byteSize(version) >= ps.parent.conf.Producer.MaxMessageBytes: + ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.ByteSize(version) >= ps.parent.conf.Producer.MaxMessageBytes: return true // Would we overflow simply in number of messages? case ps.parent.conf.Producer.Flush.MaxMessages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.MaxMessages: diff --git a/vendor/github.com/Shopify/sarama/quota_types.go b/vendor/github.com/Shopify/sarama/quota_types.go new file mode 100644 index 000000000..4f33af0bc --- /dev/null +++ b/vendor/github.com/Shopify/sarama/quota_types.go @@ -0,0 +1,21 @@ +package sarama + +type ( + QuotaEntityType string + + QuotaMatchType int +) + +// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/quota/ClientQuotaEntity.java +const ( + QuotaEntityUser QuotaEntityType = "user" + QuotaEntityClientID QuotaEntityType = "client-id" + QuotaEntityIP QuotaEntityType = "ip" +) + +// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/requests/DescribeClientQuotasRequest.java +const ( + QuotaMatchExact QuotaMatchType = iota + QuotaMatchDefault + QuotaMatchAny +) diff --git a/vendor/github.com/Shopify/sarama/real_decoder.go b/vendor/github.com/Shopify/sarama/real_decoder.go index 8ac576db2..7e37641f9 100644 --- a/vendor/github.com/Shopify/sarama/real_decoder.go +++ b/vendor/github.com/Shopify/sarama/real_decoder.go @@ -3,20 +3,24 @@ package sarama import ( "encoding/binary" "math" + + "github.com/rcrowley/go-metrics" ) -var errInvalidArrayLength = PacketDecodingError{"invalid array length"} -var errInvalidByteSliceLength = PacketDecodingError{"invalid byteslice length"} -var errInvalidStringLength = PacketDecodingError{"invalid string length"} -var errVarintOverflow = PacketDecodingError{"varint overflow"} -var errUVarintOverflow = PacketDecodingError{"uvarint overflow"} -var errInvalidBool = PacketDecodingError{"invalid bool"} -var errUnsupportedTaggedFields = PacketDecodingError{"non-empty tagged fields are not supported yet"} +var ( + errInvalidArrayLength = PacketDecodingError{"invalid array length"} + errInvalidByteSliceLength = PacketDecodingError{"invalid byteslice length"} + errInvalidStringLength = PacketDecodingError{"invalid string length"} + errVarintOverflow = PacketDecodingError{"varint overflow"} + errUVarintOverflow = PacketDecodingError{"uvarint overflow"} + errInvalidBool = PacketDecodingError{"invalid bool"} +) type realDecoder struct { - raw []byte - off int - stack []pushDecoder + raw []byte + off int + stack []pushDecoder + registry metrics.Registry } // primitives @@ -91,6 +95,16 @@ func (rd *realDecoder) getUVarint() (uint64, error) { return tmp, nil } +func (rd *realDecoder) getFloat64() (float64, error) { + if rd.remaining() < 8 { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } + tmp := math.Float64frombits(binary.BigEndian.Uint64(rd.raw[rd.off:])) + rd.off += 8 + return tmp, nil +} + func (rd *realDecoder) getArrayLength() (int, error) { if rd.remaining() < 4 { rd.off = len(rd.raw) @@ -137,8 +151,21 @@ func (rd *realDecoder) getEmptyTaggedFieldArray() (int, error) { return 0, err } - if tagCount != 0 { - return 0, errUnsupportedTaggedFields + // skip over any tagged fields without deserializing them + // as we don't currently support doing anything with them + for i := uint64(0); i < tagCount; i++ { + // fetch and ignore tag identifier + _, err := rd.getUVarint() + if err != nil { + return 0, err + } + length, err := rd.getUVarint() + if err != nil { + return 0, err + } + if _, err := rd.getRawBytes(int(length)); err != nil { + return 0, err + } } return 0, nil @@ -170,6 +197,16 @@ func (rd *realDecoder) getVarintBytes() ([]byte, error) { return rd.getRawBytes(int(tmp)) } +func (rd *realDecoder) getCompactBytes() ([]byte, error) { + n, err := rd.getUVarint() + if err != nil { + return nil, err + } + + length := int(n - 1) + return rd.getRawBytes(length) +} + func (rd *realDecoder) getStringLength() (int, error) { length, err := rd.getInt16() if err != nil { @@ -217,8 +254,10 @@ func (rd *realDecoder) getCompactString() (string, error) { return "", err } - var length = int(n - 1) - + length := int(n - 1) + if length < 0 { + return "", errInvalidByteSliceLength + } tmpStr := string(rd.raw[rd.off : rd.off+length]) rd.off += length return tmpStr, nil @@ -226,12 +265,11 @@ func (rd *realDecoder) getCompactString() (string, error) { func (rd *realDecoder) getCompactNullableString() (*string, error) { n, err := rd.getUVarint() - if err != nil { return nil, err } - var length = int(n - 1) + length := int(n - 1) if length < 0 { return nil, err @@ -424,3 +462,7 @@ func (rd *realDecoder) pop() error { return in.check(rd.off, rd.raw) } + +func (rd *realDecoder) metricRegistry() metrics.Registry { + return rd.registry +} diff --git a/vendor/github.com/Shopify/sarama/real_encoder.go b/vendor/github.com/Shopify/sarama/real_encoder.go index ba073f7d3..d6a0ddf12 100644 --- a/vendor/github.com/Shopify/sarama/real_encoder.go +++ b/vendor/github.com/Shopify/sarama/real_encoder.go @@ -3,6 +3,7 @@ package sarama import ( "encoding/binary" "errors" + "math" "github.com/rcrowley/go-metrics" ) @@ -44,6 +45,11 @@ func (re *realEncoder) putUVarint(in uint64) { re.off += binary.PutUvarint(re.raw[re.off:], in) } +func (re *realEncoder) putFloat64(in float64) { + binary.BigEndian.PutUint64(re.raw[re.off:], math.Float64bits(in)) + re.off += 8 +} + func (re *realEncoder) putArrayLength(in int) error { re.putInt32(int32(in)) return nil @@ -88,6 +94,11 @@ func (re *realEncoder) putVarintBytes(in []byte) error { return re.putRawBytes(in) } +func (re *realEncoder) putCompactBytes(in []byte) error { + re.putUVarint(uint64(len(in) + 1)) + return re.putRawBytes(in) +} + func (re *realEncoder) putCompactString(in string) error { re.putCompactArrayLength(len(in)) return re.putRawBytes([]byte(in)) diff --git a/vendor/github.com/Shopify/sarama/record.go b/vendor/github.com/Shopify/sarama/record.go index cdccfe322..a3fe8c061 100644 --- a/vendor/github.com/Shopify/sarama/record.go +++ b/vendor/github.com/Shopify/sarama/record.go @@ -11,7 +11,7 @@ const ( maximumRecordOverhead = 5*binary.MaxVarintLen32 + binary.MaxVarintLen64 + 1 ) -//RecordHeader stores key and value for a record header +// RecordHeader stores key and value for a record header type RecordHeader struct { Key []byte Value []byte @@ -35,7 +35,7 @@ func (h *RecordHeader) decode(pd packetDecoder) (err error) { return nil } -//Record is kafka record type +// Record is kafka record type type Record struct { Headers []*RecordHeader diff --git a/vendor/github.com/Shopify/sarama/record_batch.go b/vendor/github.com/Shopify/sarama/record_batch.go index c653763ec..d382ca488 100644 --- a/vendor/github.com/Shopify/sarama/record_batch.go +++ b/vendor/github.com/Shopify/sarama/record_batch.go @@ -1,6 +1,7 @@ package sarama import ( + "errors" "fmt" "time" ) @@ -167,7 +168,7 @@ func (b *RecordBatch) decode(pd packetDecoder) (err error) { bufSize := int(batchLen) - recordBatchOverhead recBuffer, err := pd.getRawBytes(bufSize) if err != nil { - if err == ErrInsufficientData { + if errors.Is(err, ErrInsufficientData) { b.PartialTrailingRecord = true b.Records = nil return nil @@ -185,8 +186,8 @@ func (b *RecordBatch) decode(pd packetDecoder) (err error) { } b.recordsLen = len(recBuffer) - err = decode(recBuffer, recordsArray(b.Records)) - if err == ErrInsufficientData { + err = decode(recBuffer, recordsArray(b.Records), nil) + if errors.Is(err, ErrInsufficientData) { b.PartialTrailingRecord = true b.Records = nil return nil diff --git a/vendor/github.com/Shopify/sarama/records.go b/vendor/github.com/Shopify/sarama/records.go index f4c5e95f1..d0c45d7f3 100644 --- a/vendor/github.com/Shopify/sarama/records.go +++ b/vendor/github.com/Shopify/sarama/records.go @@ -183,12 +183,27 @@ func (r *Records) isOverflow() (bool, error) { return false, fmt.Errorf("unknown records type: %v", r.recordsType) } +func (r *Records) recordsOffset() (*int64, error) { + switch r.recordsType { + case unknownRecords: + return nil, nil + case legacyRecords: + return nil, nil + case defaultRecords: + if r.RecordBatch == nil { + return nil, nil + } + return &r.RecordBatch.FirstOffset, nil + } + return nil, fmt.Errorf("unknown records type: %v", r.recordsType) +} + func magicValue(pd packetDecoder) (int8, error) { return pd.peekInt8(magicOffset) } func (r *Records) getControlRecord() (ControlRecord, error) { - if r.RecordBatch == nil || len(r.RecordBatch.Records) <= 0 { + if r.RecordBatch == nil || len(r.RecordBatch.Records) == 0 { return ControlRecord{}, fmt.Errorf("cannot get control record, record batch is empty") } diff --git a/vendor/github.com/Shopify/sarama/request.go b/vendor/github.com/Shopify/sarama/request.go index dcfd3946c..1e3923de7 100644 --- a/vendor/github.com/Shopify/sarama/request.go +++ b/vendor/github.com/Shopify/sarama/request.go @@ -109,7 +109,7 @@ func decodeRequest(r io.Reader) (*request, int, error) { bytesRead += len(encodedReq) req := &request{} - if err := decode(encodedReq, req); err != nil { + if err := decode(encodedReq, req, nil); err != nil { return nil, bytesRead, err } @@ -125,11 +125,11 @@ func allocateBody(key, version int16) protocolBody { case 2: return &OffsetRequest{Version: version} case 3: - return &MetadataRequest{} + return &MetadataRequest{Version: version} case 8: return &OffsetCommitRequest{Version: version} case 9: - return &OffsetFetchRequest{} + return &OffsetFetchRequest{Version: version} case 10: return &FindCoordinatorRequest{} case 11: @@ -147,7 +147,7 @@ func allocateBody(key, version int16) protocolBody { case 17: return &SaslHandshakeRequest{} case 18: - return &ApiVersionsRequest{} + return &ApiVersionsRequest{Version: version} case 19: return &CreateTopicsRequest{} case 20: @@ -155,7 +155,7 @@ func allocateBody(key, version int16) protocolBody { case 21: return &DeleteRecordsRequest{} case 22: - return &InitProducerIDRequest{} + return &InitProducerIDRequest{Version: version} case 24: return &AddPartitionsToTxnRequest{} case 25: @@ -182,10 +182,22 @@ func allocateBody(key, version int16) protocolBody { return &CreatePartitionsRequest{} case 42: return &DeleteGroupsRequest{} + case 44: + return &IncrementalAlterConfigsRequest{} case 45: return &AlterPartitionReassignmentsRequest{} case 46: return &ListPartitionReassignmentsRequest{} + case 47: + return &DeleteOffsetsRequest{} + case 48: + return &DescribeClientQuotasRequest{} + case 49: + return &AlterClientQuotasRequest{} + case 50: + return &DescribeUserScramCredentialsRequest{} + case 51: + return &AlterUserScramCredentialsRequest{} } return nil } diff --git a/vendor/github.com/Shopify/sarama/response_header.go b/vendor/github.com/Shopify/sarama/response_header.go index 5dffb75be..4ced93c13 100644 --- a/vendor/github.com/Shopify/sarama/response_header.go +++ b/vendor/github.com/Shopify/sarama/response_header.go @@ -2,9 +2,6 @@ package sarama import "fmt" -const responseLengthSize = 4 -const correlationIDSize = 4 - type responseHeader struct { length int32 correlationID int32 diff --git a/vendor/github.com/Shopify/sarama/sarama.go b/vendor/github.com/Shopify/sarama/sarama.go index 48f362d28..a42bc075a 100644 --- a/vendor/github.com/Shopify/sarama/sarama.go +++ b/vendor/github.com/Shopify/sarama/sarama.go @@ -22,28 +22,32 @@ Metrics are exposed through https://github.com/rcrowley/go-metrics library in a Broker related metrics: - +----------------------------------------------+------------+---------------------------------------------------------------+ - | Name | Type | Description | - +----------------------------------------------+------------+---------------------------------------------------------------+ - | incoming-byte-rate | meter | Bytes/second read off all brokers | - | incoming-byte-rate-for-broker- | meter | Bytes/second read off a given broker | - | outgoing-byte-rate | meter | Bytes/second written off all brokers | - | outgoing-byte-rate-for-broker- | meter | Bytes/second written off a given broker | - | request-rate | meter | Requests/second sent to all brokers | - | request-rate-for-broker- | meter | Requests/second sent to a given broker | - | request-size | histogram | Distribution of the request size in bytes for all brokers | - | request-size-for-broker- | histogram | Distribution of the request size in bytes for a given broker | - | request-latency-in-ms | histogram | Distribution of the request latency in ms for all brokers | - | request-latency-in-ms-for-broker- | histogram | Distribution of the request latency in ms for a given broker | - | response-rate | meter | Responses/second received from all brokers | - | response-rate-for-broker- | meter | Responses/second received from a given broker | - | response-size | histogram | Distribution of the response size in bytes for all brokers | - | response-size-for-broker- | histogram | Distribution of the response size in bytes for a given broker | - | requests-in-flight | counter | The current number of in-flight requests awaiting a response | - | | | for all brokers | - | requests-in-flight-for-broker- | counter | The current number of in-flight requests awaiting a response | - | | | for a given broker | - +----------------------------------------------+------------+---------------------------------------------------------------+ + +---------------------------------------------------------+------------+---------------------------------------------------------------+ + | Name | Type | Description | + +---------------------------------------------------------+------------+---------------------------------------------------------------+ + | incoming-byte-rate | meter | Bytes/second read off all brokers | + | incoming-byte-rate-for-broker- | meter | Bytes/second read off a given broker | + | outgoing-byte-rate | meter | Bytes/second written off all brokers | + | outgoing-byte-rate-for-broker- | meter | Bytes/second written off a given broker | + | request-rate | meter | Requests/second sent to all brokers | + | request-rate-for-broker- | meter | Requests/second sent to a given broker | + | request-size | histogram | Distribution of the request size in bytes for all brokers | + | request-size-for-broker- | histogram | Distribution of the request size in bytes for a given broker | + | request-latency-in-ms | histogram | Distribution of the request latency in ms for all brokers | + | request-latency-in-ms-for-broker- | histogram | Distribution of the request latency in ms for a given broker | + | response-rate | meter | Responses/second received from all brokers | + | response-rate-for-broker- | meter | Responses/second received from a given broker | + | response-size | histogram | Distribution of the response size in bytes for all brokers | + | response-size-for-broker- | histogram | Distribution of the response size in bytes for a given broker | + | requests-in-flight | counter | The current number of in-flight requests awaiting a response | + | | | for all brokers | + | requests-in-flight-for-broker- | counter | The current number of in-flight requests awaiting a response | + | | | for a given broker | + | protocol-requests-rate- | meter | Number of api requests sent to the brokers for all brokers | + | | | https://kafka.apache.org/protocol.html#protocol_api_keys | | + | protocol-requests-rate--for-broker- | meter | Number of packets sent to the brokers by api-key for a given | + | | | broker | + +---------------------------------------------------------+------------+---------------------------------------------------------------+ Note that we do not gather specific metrics for seed brokers but they are part of the "all brokers" metrics. @@ -68,13 +72,20 @@ Consumer related metrics: | Name | Type | Description | +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ | consumer-batch-size | histogram | Distribution of the number of messages in a batch | + | consumer-fetch-rate | meter | Fetch requests/second sent to all brokers | + | consumer-fetch-rate-for-broker- | meter | Fetch requests/second sent to a given broker | + | consumer-fetch-rate-for-topic- | meter | Fetch requests/second sent for a given topic | + | consumer-fetch-response-size | histogram | Distribution of the fetch response size in bytes | + | consumer-group-join-total- | counter | Total count of consumer group join attempts | + | consumer-group-join-failed- | counter | Total count of consumer group join failures | + | consumer-group-sync-total- | counter | Total count of consumer group sync attempts | + | consumer-group-sync-failed- | counter | Total count of consumer group sync failures | +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ - */ package sarama import ( - "io/ioutil" + "io" "log" ) @@ -82,7 +93,7 @@ var ( // Logger is the instance of a StdLogger interface that Sarama writes connection // management events to. By default it is set to discard all log messages via ioutil.Discard, // but you can set it to redirect wherever you want. - Logger StdLogger = log.New(ioutil.Discard, "[Sarama] ", log.LstdFlags) + Logger StdLogger = log.New(io.Discard, "[Sarama] ", log.LstdFlags) // PanicHandler is called for recovering from panics spawned internally to the library (and thus // not recoverable by the caller's goroutine). Defaults to nil, which means panics are not recovered. @@ -108,3 +119,21 @@ type StdLogger interface { Printf(format string, v ...interface{}) Println(v ...interface{}) } + +type debugLogger struct{} + +func (d *debugLogger) Print(v ...interface{}) { + Logger.Print(v...) +} +func (d *debugLogger) Printf(format string, v ...interface{}) { + Logger.Printf(format, v...) +} +func (d *debugLogger) Println(v ...interface{}) { + Logger.Println(v...) +} + +// DebugLogger is the instance of a StdLogger that Sarama writes more verbose +// debug information to. By default it is set to redirect all debug to the +// default Logger above, but you can optionally set it to another StdLogger +// instance to (e.g.,) discard debug information +var DebugLogger StdLogger = &debugLogger{} diff --git a/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go b/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go index 90504df6f..5bb0988ea 100644 --- a/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go +++ b/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go @@ -1,6 +1,8 @@ package sarama type SaslAuthenticateRequest struct { + // Version defines the protocol version to use for encode and decode + Version int16 SaslAuthBytes []byte } @@ -12,6 +14,7 @@ func (r *SaslAuthenticateRequest) encode(pe packetEncoder) error { } func (r *SaslAuthenticateRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version r.SaslAuthBytes, err = pd.getBytes() return err } @@ -21,7 +24,7 @@ func (r *SaslAuthenticateRequest) key() int16 { } func (r *SaslAuthenticateRequest) version() int16 { - return 0 + return r.Version } func (r *SaslAuthenticateRequest) headerVersion() int16 { @@ -29,5 +32,10 @@ func (r *SaslAuthenticateRequest) headerVersion() int16 { } func (r *SaslAuthenticateRequest) requiredVersion() KafkaVersion { - return V1_0_0_0 + switch r.Version { + case 1: + return V2_2_0_0 + default: + return V1_0_0_0 + } } diff --git a/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go b/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go index 3ef57b5af..37c8e45da 100644 --- a/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go +++ b/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go @@ -1,9 +1,12 @@ package sarama type SaslAuthenticateResponse struct { - Err KError - ErrorMessage *string - SaslAuthBytes []byte + // Version defines the protocol version to use for encode and decode + Version int16 + Err KError + ErrorMessage *string + SaslAuthBytes []byte + SessionLifetimeMs int64 } func (r *SaslAuthenticateResponse) encode(pe packetEncoder) error { @@ -11,10 +14,17 @@ func (r *SaslAuthenticateResponse) encode(pe packetEncoder) error { if err := pe.putNullableString(r.ErrorMessage); err != nil { return err } - return pe.putBytes(r.SaslAuthBytes) + if err := pe.putBytes(r.SaslAuthBytes); err != nil { + return err + } + if r.Version > 0 { + pe.putInt64(r.SessionLifetimeMs) + } + return nil } func (r *SaslAuthenticateResponse) decode(pd packetDecoder, version int16) error { + r.Version = version kerr, err := pd.getInt16() if err != nil { return err @@ -26,7 +36,13 @@ func (r *SaslAuthenticateResponse) decode(pd packetDecoder, version int16) error return err } - r.SaslAuthBytes, err = pd.getBytes() + if r.SaslAuthBytes, err = pd.getBytes(); err != nil { + return err + } + + if version > 0 { + r.SessionLifetimeMs, err = pd.getInt64() + } return err } @@ -36,7 +52,7 @@ func (r *SaslAuthenticateResponse) key() int16 { } func (r *SaslAuthenticateResponse) version() int16 { - return 0 + return r.Version } func (r *SaslAuthenticateResponse) headerVersion() int16 { @@ -44,5 +60,10 @@ func (r *SaslAuthenticateResponse) headerVersion() int16 { } func (r *SaslAuthenticateResponse) requiredVersion() KafkaVersion { - return V1_0_0_0 + switch r.Version { + case 1: + return V2_2_0_0 + default: + return V1_0_0_0 + } } diff --git a/vendor/github.com/Shopify/sarama/scram_formatter.go b/vendor/github.com/Shopify/sarama/scram_formatter.go new file mode 100644 index 000000000..2af9e4a69 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/scram_formatter.go @@ -0,0 +1,78 @@ +package sarama + +import ( + "crypto/hmac" + "crypto/sha256" + "crypto/sha512" + "hash" +) + +// ScramFormatter implementation +// @see: https://github.com/apache/kafka/blob/99b9b3e84f4e98c3f07714e1de6a139a004cbc5b/clients/src/main/java/org/apache/kafka/common/security/scram/internals/ScramFormatter.java#L93 +type scramFormatter struct { + mechanism ScramMechanismType +} + +func (s scramFormatter) mac(key []byte) (hash.Hash, error) { + var m hash.Hash + + switch s.mechanism { + case SCRAM_MECHANISM_SHA_256: + m = hmac.New(sha256.New, key) + + case SCRAM_MECHANISM_SHA_512: + m = hmac.New(sha512.New, key) + default: + return nil, ErrUnknownScramMechanism + } + + return m, nil +} + +func (s scramFormatter) hmac(key []byte, extra []byte) ([]byte, error) { + mac, err := s.mac(key) + if err != nil { + return nil, err + } + + if _, err := mac.Write(extra); err != nil { + return nil, err + } + return mac.Sum(nil), nil +} + +func (s scramFormatter) xor(result []byte, second []byte) { + for i := 0; i < len(result); i++ { + result[i] = result[i] ^ second[i] + } +} + +func (s scramFormatter) saltedPassword(password []byte, salt []byte, iterations int) ([]byte, error) { + mac, err := s.mac(password) + if err != nil { + return nil, err + } + + if _, err := mac.Write(salt); err != nil { + return nil, err + } + if _, err := mac.Write([]byte{0, 0, 0, 1}); err != nil { + return nil, err + } + + u1 := mac.Sum(nil) + prev := u1 + result := u1 + + for i := 2; i <= iterations; i++ { + ui, err := s.hmac(password, prev) + if err != nil { + return nil, err + } + + s.xor(result, ui) + prev = ui + } + + return result, nil +} diff --git a/vendor/github.com/Shopify/sarama/sticky_assignor_user_data.go b/vendor/github.com/Shopify/sarama/sticky_assignor_user_data.go index bb0c82c34..161233fc3 100644 --- a/vendor/github.com/Shopify/sarama/sticky_assignor_user_data.go +++ b/vendor/github.com/Shopify/sarama/sticky_assignor_user_data.go @@ -11,7 +11,7 @@ type StickyAssignorUserData interface { generation() int } -//StickyAssignorUserDataV0 holds topic partition information for an assignment +// StickyAssignorUserDataV0 holds topic partition information for an assignment type StickyAssignorUserDataV0 struct { Topics map[string][]int32 @@ -58,7 +58,7 @@ func (m *StickyAssignorUserDataV0) partitions() []topicPartitionAssignment { ret func (m *StickyAssignorUserDataV0) hasGeneration() bool { return false } func (m *StickyAssignorUserDataV0) generation() int { return defaultGeneration } -//StickyAssignorUserDataV1 holds topic partition information for an assignment +// StickyAssignorUserDataV1 holds topic partition information for an assignment type StickyAssignorUserDataV1 struct { Topics map[string][]int32 Generation int32 diff --git a/vendor/github.com/Shopify/sarama/sync_group_request.go b/vendor/github.com/Shopify/sarama/sync_group_request.go index ac6ecb13e..33ed3bacc 100644 --- a/vendor/github.com/Shopify/sarama/sync_group_request.go +++ b/vendor/github.com/Shopify/sarama/sync_group_request.go @@ -1,31 +1,73 @@ package sarama -type SyncGroupRequest struct { - GroupId string - GenerationId int32 - MemberId string - GroupAssignments map[string][]byte +type SyncGroupRequestAssignment struct { + // MemberId contains the ID of the member to assign. + MemberId string + // Assignment contains the member assignment. + Assignment []byte } -func (r *SyncGroupRequest) encode(pe packetEncoder) error { - if err := pe.putString(r.GroupId); err != nil { +func (a *SyncGroupRequestAssignment) encode(pe packetEncoder, version int16) (err error) { + if err := pe.putString(a.MemberId); err != nil { return err } - pe.putInt32(r.GenerationId) + if err := pe.putBytes(a.Assignment); err != nil { + return err + } + + return nil +} + +func (a *SyncGroupRequestAssignment) decode(pd packetDecoder, version int16) (err error) { + if a.MemberId, err = pd.getString(); err != nil { + return err + } - if err := pe.putString(r.MemberId); err != nil { + if a.Assignment, err = pd.getBytes(); err != nil { return err } - if err := pe.putArrayLength(len(r.GroupAssignments)); err != nil { + return nil +} + +type SyncGroupRequest struct { + // Version defines the protocol version to use for encode and decode + Version int16 + // GroupId contains the unique group identifier. + GroupId string + // GenerationId contains the generation of the group. + GenerationId int32 + // MemberId contains the member ID assigned by the group. + MemberId string + // GroupInstanceId contains the unique identifier of the consumer instance provided by end user. + GroupInstanceId *string + // GroupAssignments contains each assignment. + GroupAssignments []SyncGroupRequestAssignment +} + +func (s *SyncGroupRequest) encode(pe packetEncoder) (err error) { + if err := pe.putString(s.GroupId); err != nil { + return err + } + + pe.putInt32(s.GenerationId) + + if err := pe.putString(s.MemberId); err != nil { return err } - for memberId, memberAssignment := range r.GroupAssignments { - if err := pe.putString(memberId); err != nil { + + if s.Version >= 3 { + if err := pe.putNullableString(s.GroupInstanceId); err != nil { return err } - if err := pe.putBytes(memberAssignment); err != nil { + } + + if err := pe.putArrayLength(len(s.GroupAssignments)); err != nil { + return err + } + for _, block := range s.GroupAssignments { + if err := block.encode(pe, s.Version); err != nil { return err } } @@ -33,37 +75,37 @@ func (r *SyncGroupRequest) encode(pe packetEncoder) error { return nil } -func (r *SyncGroupRequest) decode(pd packetDecoder, version int16) (err error) { - if r.GroupId, err = pd.getString(); err != nil { - return - } - if r.GenerationId, err = pd.getInt32(); err != nil { - return - } - if r.MemberId, err = pd.getString(); err != nil { - return +func (s *SyncGroupRequest) decode(pd packetDecoder, version int16) (err error) { + s.Version = version + if s.GroupId, err = pd.getString(); err != nil { + return err } - n, err := pd.getArrayLength() - if err != nil { + if s.GenerationId, err = pd.getInt32(); err != nil { return err } - if n == 0 { - return nil + + if s.MemberId, err = pd.getString(); err != nil { + return err } - r.GroupAssignments = make(map[string][]byte) - for i := 0; i < n; i++ { - memberId, err := pd.getString() - if err != nil { - return err - } - memberAssignment, err := pd.getBytes() - if err != nil { + if s.Version >= 3 { + if s.GroupInstanceId, err = pd.getNullableString(); err != nil { return err } + } - r.GroupAssignments[memberId] = memberAssignment + if numAssignments, err := pd.getArrayLength(); err != nil { + return err + } else if numAssignments > 0 { + s.GroupAssignments = make([]SyncGroupRequestAssignment, numAssignments) + for i := 0; i < numAssignments; i++ { + var block SyncGroupRequestAssignment + if err := block.decode(pd, s.Version); err != nil { + return err + } + s.GroupAssignments[i] = block + } } return nil @@ -74,7 +116,7 @@ func (r *SyncGroupRequest) key() int16 { } func (r *SyncGroupRequest) version() int16 { - return 0 + return r.Version } func (r *SyncGroupRequest) headerVersion() int16 { @@ -82,18 +124,24 @@ func (r *SyncGroupRequest) headerVersion() int16 { } func (r *SyncGroupRequest) requiredVersion() KafkaVersion { + switch { + case r.Version >= 3: + return V2_3_0_0 + } return V0_9_0_0 } func (r *SyncGroupRequest) AddGroupAssignment(memberId string, memberAssignment []byte) { - if r.GroupAssignments == nil { - r.GroupAssignments = make(map[string][]byte) - } - - r.GroupAssignments[memberId] = memberAssignment + r.GroupAssignments = append(r.GroupAssignments, SyncGroupRequestAssignment{ + MemberId: memberId, + Assignment: memberAssignment, + }) } -func (r *SyncGroupRequest) AddGroupAssignmentMember(memberId string, memberAssignment *ConsumerGroupMemberAssignment) error { +func (r *SyncGroupRequest) AddGroupAssignmentMember( + memberId string, + memberAssignment *ConsumerGroupMemberAssignment, +) error { bin, err := encode(memberAssignment, nil) if err != nil { return err diff --git a/vendor/github.com/Shopify/sarama/sync_group_response.go b/vendor/github.com/Shopify/sarama/sync_group_response.go index af019c42f..41b63b3d0 100644 --- a/vendor/github.com/Shopify/sarama/sync_group_response.go +++ b/vendor/github.com/Shopify/sarama/sync_group_response.go @@ -1,22 +1,39 @@ package sarama type SyncGroupResponse struct { - Err KError + // Version defines the protocol version to use for encode and decode + Version int16 + // ThrottleTimeMs contains the duration in milliseconds for which the + // request was throttled due to a quota violation, or zero if the request + // did not violate any quota. + ThrottleTime int32 + // Err contains the error code, or 0 if there was no error. + Err KError + // MemberAssignment contains the member assignment. MemberAssignment []byte } func (r *SyncGroupResponse) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) { assignment := new(ConsumerGroupMemberAssignment) - err := decode(r.MemberAssignment, assignment) + err := decode(r.MemberAssignment, assignment, nil) return assignment, err } func (r *SyncGroupResponse) encode(pe packetEncoder) error { + if r.Version >= 1 { + pe.putInt32(r.ThrottleTime) + } pe.putInt16(int16(r.Err)) return pe.putBytes(r.MemberAssignment) } func (r *SyncGroupResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + if r.Version >= 1 { + if r.ThrottleTime, err = pd.getInt32(); err != nil { + return err + } + } kerr, err := pd.getInt16() if err != nil { return err @@ -33,7 +50,7 @@ func (r *SyncGroupResponse) key() int16 { } func (r *SyncGroupResponse) version() int16 { - return 0 + return r.Version } func (r *SyncGroupResponse) headerVersion() int16 { @@ -41,5 +58,9 @@ func (r *SyncGroupResponse) headerVersion() int16 { } func (r *SyncGroupResponse) requiredVersion() KafkaVersion { + switch r.Version { + case 1, 2, 3: + return V2_3_0_0 + } return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/sync_producer.go b/vendor/github.com/Shopify/sarama/sync_producer.go index 021c5a010..8765ac336 100644 --- a/vendor/github.com/Shopify/sarama/sync_producer.go +++ b/vendor/github.com/Shopify/sarama/sync_producer.go @@ -25,11 +25,31 @@ type SyncProducer interface { // SendMessages will return an error. SendMessages(msgs []*ProducerMessage) error - // Close shuts down the producer and waits for any buffered messages to be - // flushed. You must call this function before a producer object passes out of - // scope, as it may otherwise leak memory. You must call this before calling - // Close on the underlying client. + // Close shuts down the producer; you must call this function before a producer + // object passes out of scope, as it may otherwise leak memory. + // You must call this before calling Close on the underlying client. Close() error + + // TxnStatus return current producer transaction status. + TxnStatus() ProducerTxnStatusFlag + + // IsTransactional return true when current producer is is transactional. + IsTransactional() bool + + // BeginTxn mark current transaction as ready. + BeginTxn() error + + // CommitTxn commit current transaction. + CommitTxn() error + + // AbortTxn abort current transaction. + AbortTxn() error + + // AddOffsetsToTxn add associated offsets to current transaction. + AddOffsetsToTxn(offsets map[string][]*PartitionOffsetMetadata, groupId string) error + + // AddMessageToTxn add message offsets to current transaction. + AddMessageToTxn(msg *ConsumerMessage, groupId string, metadata *string) error } type syncProducer struct { @@ -94,8 +114,8 @@ func (sp *syncProducer) SendMessage(msg *ProducerMessage) (partition int32, offs msg.expectation = expectation sp.producer.Input() <- msg - if err := <-expectation; err != nil { - return -1, -1, err.Err + if pErr := <-expectation; pErr != nil { + return -1, -1, pErr.Err } return msg.Partition, msg.Offset, nil @@ -115,8 +135,8 @@ func (sp *syncProducer) SendMessages(msgs []*ProducerMessage) error { var errors ProducerErrors for expectation := range expectations { - if err := <-expectation; err != nil { - errors = append(errors, err) + if pErr := <-expectation; pErr != nil { + errors = append(errors, pErr) } } @@ -147,3 +167,31 @@ func (sp *syncProducer) Close() error { sp.wg.Wait() return nil } + +func (sp *syncProducer) IsTransactional() bool { + return sp.producer.IsTransactional() +} + +func (sp *syncProducer) BeginTxn() error { + return sp.producer.BeginTxn() +} + +func (sp *syncProducer) CommitTxn() error { + return sp.producer.CommitTxn() +} + +func (sp *syncProducer) AbortTxn() error { + return sp.producer.AbortTxn() +} + +func (sp *syncProducer) AddOffsetsToTxn(offsets map[string][]*PartitionOffsetMetadata, groupId string) error { + return sp.producer.AddOffsetsToTxn(offsets, groupId) +} + +func (sp *syncProducer) AddMessageToTxn(msg *ConsumerMessage, groupId string, metadata *string) error { + return sp.producer.AddMessageToTxn(msg, groupId, metadata) +} + +func (p *syncProducer) TxnStatus() ProducerTxnStatusFlag { + return p.producer.TxnStatus() +} diff --git a/vendor/github.com/Shopify/sarama/transaction_manager.go b/vendor/github.com/Shopify/sarama/transaction_manager.go new file mode 100644 index 000000000..e18abecd3 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/transaction_manager.go @@ -0,0 +1,887 @@ +package sarama + +import ( + "errors" + "fmt" + "strings" + "sync" + "time" +) + +// ProducerTxnStatusFlag mark current transaction status. +type ProducerTxnStatusFlag int16 + +const ( + // ProducerTxnFlagUninitialized when txnmgr is created + ProducerTxnFlagUninitialized ProducerTxnStatusFlag = 1 << iota + // ProducerTxnFlagInitializing when txnmgr is initilizing + ProducerTxnFlagInitializing + // ProducerTxnFlagReady when is ready to receive transaction + ProducerTxnFlagReady + // ProducerTxnFlagInTransaction when transaction is started + ProducerTxnFlagInTransaction + // ProducerTxnFlagEndTransaction when transaction will be committed + ProducerTxnFlagEndTransaction + // ProducerTxnFlagInError whan having abortable or fatal error + ProducerTxnFlagInError + // ProducerTxnFlagCommittingTransaction when committing txn + ProducerTxnFlagCommittingTransaction + // ProducerTxnFlagAbortingTransaction when committing txn + ProducerTxnFlagAbortingTransaction + // ProducerTxnFlagAbortableError when producer encounter an abortable error + // Must call AbortTxn in this case. + ProducerTxnFlagAbortableError + // ProducerTxnFlagFatalError when producer encounter an fatal error + // Must Close an recreate it. + ProducerTxnFlagFatalError +) + +func (s ProducerTxnStatusFlag) String() string { + status := make([]string, 0) + if s&ProducerTxnFlagUninitialized != 0 { + status = append(status, "ProducerTxnStateUninitialized") + } + if s&ProducerTxnFlagInitializing != 0 { + status = append(status, "ProducerTxnStateInitializing") + } + if s&ProducerTxnFlagReady != 0 { + status = append(status, "ProducerTxnStateReady") + } + if s&ProducerTxnFlagInTransaction != 0 { + status = append(status, "ProducerTxnStateInTransaction") + } + if s&ProducerTxnFlagEndTransaction != 0 { + status = append(status, "ProducerTxnStateEndTransaction") + } + if s&ProducerTxnFlagInError != 0 { + status = append(status, "ProducerTxnStateInError") + } + if s&ProducerTxnFlagCommittingTransaction != 0 { + status = append(status, "ProducerTxnStateCommittingTransaction") + } + if s&ProducerTxnFlagAbortingTransaction != 0 { + status = append(status, "ProducerTxnStateAbortingTransaction") + } + if s&ProducerTxnFlagAbortableError != 0 { + status = append(status, "ProducerTxnStateAbortableError") + } + if s&ProducerTxnFlagFatalError != 0 { + status = append(status, "ProducerTxnStateFatalError") + } + return strings.Join(status, "|") +} + +// transactionManager keeps the state necessary to ensure idempotent production +type transactionManager struct { + producerID int64 + producerEpoch int16 + sequenceNumbers map[string]int32 + mutex sync.Mutex + transactionalID string + transactionTimeout time.Duration + client Client + + // when kafka cluster is at least 2.5.0. + // used to recover when producer failed. + coordinatorSupportsBumpingEpoch bool + + // When producer need to bump it's epoch. + epochBumpRequired bool + // Record last seen error. + lastError error + + // Ensure that status is never accessed with a race-condition. + statusLock sync.RWMutex + status ProducerTxnStatusFlag + + // Ensure that only one goroutine will update partitions in current transaction. + partitionInTxnLock sync.Mutex + pendingPartitionsInCurrentTxn topicPartitionSet + partitionsInCurrentTxn topicPartitionSet + + // Offsets to add to transaction. + offsetsInCurrentTxn map[string]topicPartitionOffsets +} + +const ( + noProducerID = -1 + noProducerEpoch = -1 + + // see publishTxnPartitions comment. + addPartitionsRetryBackoff = 20 * time.Millisecond +) + +// txnmngr allowed transitions. +var producerTxnTransitions = map[ProducerTxnStatusFlag][]ProducerTxnStatusFlag{ + ProducerTxnFlagUninitialized: { + ProducerTxnFlagReady, + ProducerTxnFlagInError, + }, + // When we need are initilizing + ProducerTxnFlagInitializing: { + ProducerTxnFlagInitializing, + ProducerTxnFlagReady, + ProducerTxnFlagInError, + }, + // When we have initilized transactional producer + ProducerTxnFlagReady: { + ProducerTxnFlagInTransaction, + }, + // When beginTxn has been called + ProducerTxnFlagInTransaction: { + // When calling commit or abort + ProducerTxnFlagEndTransaction, + // When got an error + ProducerTxnFlagInError, + }, + ProducerTxnFlagEndTransaction: { + // When epoch bump + ProducerTxnFlagInitializing, + // When commit is good + ProducerTxnFlagReady, + // When got an error + ProducerTxnFlagInError, + }, + // Need to abort transaction + ProducerTxnFlagAbortableError: { + // Call AbortTxn + ProducerTxnFlagAbortingTransaction, + // When got an error + ProducerTxnFlagInError, + }, + // Need to close producer + ProducerTxnFlagFatalError: { + ProducerTxnFlagFatalError, + }, +} + +type topicPartition struct { + topic string + partition int32 +} + +// to ensure that we don't do a full scan every time a partition or an offset is added. +type topicPartitionSet map[topicPartition]struct{} +type topicPartitionOffsets map[topicPartition]*PartitionOffsetMetadata + +func (s topicPartitionSet) mapToRequest() map[string][]int32 { + result := make(map[string][]int32, len(s)) + for tp := range s { + result[tp.topic] = append(result[tp.topic], tp.partition) + } + return result +} + +func (s topicPartitionOffsets) mapToRequest() map[string][]*PartitionOffsetMetadata { + result := make(map[string][]*PartitionOffsetMetadata, len(s)) + for tp, offset := range s { + result[tp.topic] = append(result[tp.topic], offset) + } + return result +} + +// Return true if current transition is allowed. +func (t *transactionManager) isTransitionValid(target ProducerTxnStatusFlag) bool { + for status, allowedTransitions := range producerTxnTransitions { + if status&t.status != 0 { + for _, allowedTransition := range allowedTransitions { + if allowedTransition&target != 0 { + return true + } + } + } + } + return false +} + +// Get current transaction status. +func (t *transactionManager) currentTxnStatus() ProducerTxnStatusFlag { + t.statusLock.RLock() + defer t.statusLock.RUnlock() + + return t.status +} + +// Try to transition to a valid status and return an error otherwise. +func (t *transactionManager) transitionTo(target ProducerTxnStatusFlag, err error) error { + t.statusLock.Lock() + defer t.statusLock.Unlock() + + if !t.isTransitionValid(target) { + return ErrTransitionNotAllowed + } + + if target&ProducerTxnFlagInError != 0 { + if err == nil { + return ErrCannotTransitionNilError + } + t.lastError = err + } else { + t.lastError = nil + } + + DebugLogger.Printf("txnmgr/transition [%s] transition from %s to %s\n", t.transactionalID, t.status, target) + + t.status = target + return err +} + +func (t *transactionManager) getAndIncrementSequenceNumber(topic string, partition int32) (int32, int16) { + key := fmt.Sprintf("%s-%d", topic, partition) + t.mutex.Lock() + defer t.mutex.Unlock() + sequence := t.sequenceNumbers[key] + t.sequenceNumbers[key] = sequence + 1 + return sequence, t.producerEpoch +} + +func (t *transactionManager) bumpEpoch() { + t.mutex.Lock() + defer t.mutex.Unlock() + t.producerEpoch++ + for k := range t.sequenceNumbers { + t.sequenceNumbers[k] = 0 + } +} + +func (t *transactionManager) getProducerID() (int64, int16) { + t.mutex.Lock() + defer t.mutex.Unlock() + return t.producerID, t.producerEpoch +} + +// Compute retry backoff considered current attempts. +func (t *transactionManager) computeBackoff(attemptsRemaining int) time.Duration { + if t.client.Config().Producer.Transaction.Retry.BackoffFunc != nil { + maxRetries := t.client.Config().Producer.Transaction.Retry.Max + retries := maxRetries - attemptsRemaining + return t.client.Config().Producer.Transaction.Retry.BackoffFunc(retries, maxRetries) + } + return t.client.Config().Producer.Transaction.Retry.Backoff +} + +// return true is txnmngr is transactinal. +func (t *transactionManager) isTransactional() bool { + return t.transactionalID != "" +} + +// add specified offsets to current transaction. +func (t *transactionManager) addOffsetsToTxn(offsetsToAdd map[string][]*PartitionOffsetMetadata, groupId string) error { + t.mutex.Lock() + defer t.mutex.Unlock() + + if t.currentTxnStatus()&ProducerTxnFlagInTransaction == 0 { + return ErrTransactionNotReady + } + + if t.currentTxnStatus()&ProducerTxnFlagFatalError != 0 { + return t.lastError + } + + if _, ok := t.offsetsInCurrentTxn[groupId]; !ok { + t.offsetsInCurrentTxn[groupId] = topicPartitionOffsets{} + } + + for topic, offsets := range offsetsToAdd { + for _, offset := range offsets { + tp := topicPartition{topic: topic, partition: offset.Partition} + t.offsetsInCurrentTxn[groupId][tp] = offset + } + } + return nil +} + +// send txnmgnr save offsets to transaction coordinator. +func (t *transactionManager) publishOffsetsToTxn(offsets topicPartitionOffsets, groupId string) (topicPartitionOffsets, error) { + // First AddOffsetsToTxn + attemptsRemaining := t.client.Config().Producer.Transaction.Retry.Max + exec := func(run func() (bool, error), err error) error { + for attemptsRemaining >= 0 { + var retry bool + retry, err = run() + if !retry { + return err + } + backoff := t.computeBackoff(attemptsRemaining) + Logger.Printf("txnmgr/add-offset-to-txn [%s] retrying after %dms... (%d attempts remaining) (%s)\n", + t.transactionalID, backoff/time.Millisecond, attemptsRemaining, err) + time.Sleep(backoff) + attemptsRemaining-- + } + return err + } + lastError := exec(func() (bool, error) { + coordinator, err := t.client.TransactionCoordinator(t.transactionalID) + if err != nil { + return true, err + } + response, err := coordinator.AddOffsetsToTxn(&AddOffsetsToTxnRequest{ + TransactionalID: t.transactionalID, + ProducerEpoch: t.producerEpoch, + ProducerID: t.producerID, + GroupID: groupId, + }) + if err != nil { + // If an error occurred try to refresh current transaction coordinator. + _ = coordinator.Close() + _ = t.client.RefreshTransactionCoordinator(t.transactionalID) + return true, err + } + if response == nil { + // If no response is returned just retry. + return true, ErrTxnUnableToParseResponse + } + if response.Err == ErrNoError { + DebugLogger.Printf("txnmgr/add-offset-to-txn [%s] successful add-offset-to-txn with group %s %+v\n", + t.transactionalID, groupId, response) + // If no error, just exit. + return false, nil + } + switch response.Err { + case ErrConsumerCoordinatorNotAvailable: + fallthrough + case ErrNotCoordinatorForConsumer: + _ = coordinator.Close() + _ = t.client.RefreshTransactionCoordinator(t.transactionalID) + fallthrough + case ErrOffsetsLoadInProgress: + fallthrough + case ErrConcurrentTransactions: + // Retry + case ErrUnknownProducerID: + fallthrough + case ErrInvalidProducerIDMapping: + return false, t.abortableErrorIfPossible(response.Err) + case ErrGroupAuthorizationFailed: + return false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagAbortableError, response.Err) + default: + // Others are fatal + return false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, response.Err) + } + return true, response.Err + }, nil) + + if lastError != nil { + return offsets, lastError + } + + resultOffsets := offsets + // Then TxnOffsetCommit + // note the result is not completed until the TxnOffsetCommit returns + attemptsRemaining = t.client.Config().Producer.Transaction.Retry.Max + execTxnOffsetCommit := func(run func() (topicPartitionOffsets, bool, error), err error) (topicPartitionOffsets, error) { + var r topicPartitionOffsets + for attemptsRemaining >= 0 { + var retry bool + r, retry, err = run() + if !retry { + return r, err + } + backoff := t.computeBackoff(attemptsRemaining) + Logger.Printf("txnmgr/txn-offset-commit [%s] retrying after %dms... (%d attempts remaining) (%s)\n", + t.transactionalID, backoff/time.Millisecond, attemptsRemaining, err) + time.Sleep(backoff) + attemptsRemaining-- + } + return r, err + } + return execTxnOffsetCommit(func() (topicPartitionOffsets, bool, error) { + consumerGroupCoordinator, err := t.client.Coordinator(groupId) + if err != nil { + return resultOffsets, true, err + } + responses, err := consumerGroupCoordinator.TxnOffsetCommit(&TxnOffsetCommitRequest{ + TransactionalID: t.transactionalID, + ProducerEpoch: t.producerEpoch, + ProducerID: t.producerID, + GroupID: groupId, + Topics: offsets.mapToRequest(), + }) + if err != nil { + _ = consumerGroupCoordinator.Close() + _ = t.client.RefreshCoordinator(groupId) + return resultOffsets, true, err + } + + if responses == nil { + return resultOffsets, true, ErrTxnUnableToParseResponse + } + + var responseErrors []error + failedTxn := topicPartitionOffsets{} + for topic, partitionErrors := range responses.Topics { + for _, partitionError := range partitionErrors { + switch partitionError.Err { + case ErrNoError: + continue + // If the topic is unknown or the coordinator is loading, retry with the current coordinator + case ErrRequestTimedOut: + fallthrough + case ErrConsumerCoordinatorNotAvailable: + fallthrough + case ErrNotCoordinatorForConsumer: + _ = consumerGroupCoordinator.Close() + _ = t.client.RefreshCoordinator(groupId) + fallthrough + case ErrUnknownTopicOrPartition: + fallthrough + case ErrOffsetsLoadInProgress: + // Do nothing just retry + case ErrIllegalGeneration: + fallthrough + case ErrUnknownMemberId: + fallthrough + case ErrFencedInstancedId: + fallthrough + case ErrGroupAuthorizationFailed: + return resultOffsets, false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagAbortableError, partitionError.Err) + default: + // Others are fatal + return resultOffsets, false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, partitionError.Err) + } + tp := topicPartition{topic: topic, partition: partitionError.Partition} + failedTxn[tp] = offsets[tp] + responseErrors = append(responseErrors, partitionError.Err) + } + } + + resultOffsets = failedTxn + + if len(resultOffsets) == 0 { + DebugLogger.Printf("txnmgr/txn-offset-commit [%s] successful txn-offset-commit with group %s %+v\n", + t.transactionalID, groupId) + return resultOffsets, false, nil + } + return resultOffsets, true, Wrap(ErrTxnOffsetCommit, responseErrors...) + }, nil) +} + +func (t *transactionManager) initProducerId() (int64, int16, error) { + isEpochBump := false + + req := &InitProducerIDRequest{} + if t.isTransactional() { + req.TransactionalID = &t.transactionalID + req.TransactionTimeout = t.transactionTimeout + } + + if t.client.Config().Version.IsAtLeast(V2_5_0_0) { + req.Version = 3 + isEpochBump = t.producerID != noProducerID && t.producerEpoch != noProducerEpoch + t.coordinatorSupportsBumpingEpoch = true + req.ProducerID = t.producerID + req.ProducerEpoch = t.producerEpoch + } else if t.client.Config().Version.IsAtLeast(V2_4_0_0) { + req.Version = 2 + } + + if isEpochBump { + err := t.transitionTo(ProducerTxnFlagInitializing, nil) + if err != nil { + return -1, -1, err + } + DebugLogger.Printf("txnmgr/init-producer-id [%s] invoking InitProducerId for the first time in order to acquire a producer ID\n", + t.transactionalID) + } else { + DebugLogger.Printf("txnmgr/init-producer-id [%s] invoking InitProducerId with current producer ID %d and epoch %d in order to bump the epoch\n", + t.transactionalID, t.producerID, t.producerEpoch) + } + + attemptsRemaining := t.client.Config().Producer.Transaction.Retry.Max + exec := func(run func() (int64, int16, bool, error), err error) (int64, int16, error) { + pid := int64(-1) + pepoch := int16(-1) + for attemptsRemaining >= 0 { + var retry bool + pid, pepoch, retry, err = run() + if !retry { + return pid, pepoch, err + } + backoff := t.computeBackoff(attemptsRemaining) + Logger.Printf("txnmgr/init-producer-id [%s] retrying after %dms... (%d attempts remaining) (%s)\n", + t.transactionalID, backoff/time.Millisecond, attemptsRemaining, err) + time.Sleep(backoff) + attemptsRemaining-- + } + return -1, -1, err + } + return exec(func() (int64, int16, bool, error) { + var err error + var coordinator *Broker + if t.isTransactional() { + coordinator, err = t.client.TransactionCoordinator(t.transactionalID) + } else { + coordinator = t.client.LeastLoadedBroker() + } + if err != nil { + return -1, -1, true, err + } + response, err := coordinator.InitProducerID(req) + if err != nil { + if t.isTransactional() { + _ = coordinator.Close() + _ = t.client.RefreshTransactionCoordinator(t.transactionalID) + } + return -1, -1, true, err + } + if response == nil { + return -1, -1, true, ErrTxnUnableToParseResponse + } + if response.Err == ErrNoError { + if isEpochBump { + t.sequenceNumbers = make(map[string]int32) + } + err := t.transitionTo(ProducerTxnFlagReady, nil) + if err != nil { + return -1, -1, true, err + } + DebugLogger.Printf("txnmgr/init-producer-id [%s] successful init producer id %+v\n", + t.transactionalID, response) + return response.ProducerID, response.ProducerEpoch, false, nil + } + switch response.Err { + case ErrConsumerCoordinatorNotAvailable: + fallthrough + case ErrNotCoordinatorForConsumer: + if t.isTransactional() { + _ = coordinator.Close() + _ = t.client.RefreshTransactionCoordinator(t.transactionalID) + } + // Fatal errors + default: + return -1, -1, false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, response.Err) + } + return -1, -1, true, response.Err + }, nil) +} + +// if kafka cluster is at least 2.5.0 mark txnmngr to bump epoch else mark it as fatal. +func (t *transactionManager) abortableErrorIfPossible(err error) error { + if t.coordinatorSupportsBumpingEpoch { + t.epochBumpRequired = true + return t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagAbortableError, err) + } + return t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, err) +} + +// End current transaction. +func (t *transactionManager) completeTransaction() error { + if t.epochBumpRequired { + err := t.transitionTo(ProducerTxnFlagInitializing, nil) + if err != nil { + return err + } + } else { + err := t.transitionTo(ProducerTxnFlagReady, nil) + if err != nil { + return err + } + } + + t.lastError = nil + t.epochBumpRequired = false + t.partitionsInCurrentTxn = topicPartitionSet{} + t.pendingPartitionsInCurrentTxn = topicPartitionSet{} + t.offsetsInCurrentTxn = map[string]topicPartitionOffsets{} + + return nil +} + +// send EndTxn request with commit flag. (true when committing false otherwise) +func (t *transactionManager) endTxn(commit bool) error { + attemptsRemaining := t.client.Config().Producer.Transaction.Retry.Max + exec := func(run func() (bool, error), err error) error { + for attemptsRemaining >= 0 { + var retry bool + retry, err = run() + if !retry { + return err + } + backoff := t.computeBackoff(attemptsRemaining) + Logger.Printf("txnmgr/endtxn [%s] retrying after %dms... (%d attempts remaining) (%s)\n", + t.transactionalID, backoff/time.Millisecond, attemptsRemaining, err) + time.Sleep(backoff) + attemptsRemaining-- + } + return err + } + return exec(func() (bool, error) { + coordinator, err := t.client.TransactionCoordinator(t.transactionalID) + if err != nil { + return true, err + } + response, err := coordinator.EndTxn(&EndTxnRequest{ + TransactionalID: t.transactionalID, + ProducerEpoch: t.producerEpoch, + ProducerID: t.producerID, + TransactionResult: commit, + }) + if err != nil { + // Always retry on network error + _ = coordinator.Close() + _ = t.client.RefreshTransactionCoordinator(t.transactionalID) + return true, err + } + if response == nil { + return true, ErrTxnUnableToParseResponse + } + if response.Err == ErrNoError { + DebugLogger.Printf("txnmgr/endtxn [%s] successful to end txn %+v\n", + t.transactionalID, response) + return false, t.completeTransaction() + } + switch response.Err { + // Need to refresh coordinator + case ErrConsumerCoordinatorNotAvailable: + fallthrough + case ErrNotCoordinatorForConsumer: + _ = coordinator.Close() + _ = t.client.RefreshTransactionCoordinator(t.transactionalID) + fallthrough + case ErrOffsetsLoadInProgress: + fallthrough + case ErrConcurrentTransactions: + // Just retry + case ErrUnknownProducerID: + fallthrough + case ErrInvalidProducerIDMapping: + return false, t.abortableErrorIfPossible(response.Err) + // Fatal errors + default: + return false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, response.Err) + } + return true, response.Err + }, nil) +} + +// We will try to publish associated offsets for each groups +// then send endtxn request to mark transaction as finished. +func (t *transactionManager) finishTransaction(commit bool) error { + t.mutex.Lock() + defer t.mutex.Unlock() + + // Ensure no error when committing or abording + if commit && t.currentTxnStatus()&ProducerTxnFlagInError != 0 { + return t.lastError + } else if !commit && t.currentTxnStatus()&ProducerTxnFlagFatalError != 0 { + return t.lastError + } + + // if no records has been sent don't do anything. + if len(t.partitionsInCurrentTxn) == 0 { + return t.completeTransaction() + } + + epochBump := t.epochBumpRequired + // If we're aborting the transaction, so there should be no need to add offsets. + if commit && len(t.offsetsInCurrentTxn) > 0 { + for group, offsets := range t.offsetsInCurrentTxn { + newOffsets, err := t.publishOffsetsToTxn(offsets, group) + if err != nil { + t.offsetsInCurrentTxn[group] = newOffsets + return err + } + delete(t.offsetsInCurrentTxn, group) + } + } + + if t.currentTxnStatus()&ProducerTxnFlagFatalError != 0 { + return t.lastError + } + + if !errors.Is(t.lastError, ErrInvalidProducerIDMapping) { + err := t.endTxn(commit) + if err != nil { + return err + } + if !epochBump { + return nil + } + } + // reset pid and epoch if needed. + return t.initializeTransactions() +} + +// called before sending any transactional record +// won't do anything if current topic-partition is already added to transaction. +func (t *transactionManager) maybeAddPartitionToCurrentTxn(topic string, partition int32) { + if t.currentTxnStatus()&ProducerTxnFlagInError != 0 { + return + } + + tp := topicPartition{topic: topic, partition: partition} + + t.partitionInTxnLock.Lock() + defer t.partitionInTxnLock.Unlock() + if _, ok := t.partitionsInCurrentTxn[tp]; ok { + // partition is already added + return + } + + t.pendingPartitionsInCurrentTxn[tp] = struct{}{} +} + +// Makes a request to kafka to add a list of partitions ot the current transaction. +func (t *transactionManager) publishTxnPartitions() error { + t.partitionInTxnLock.Lock() + defer t.partitionInTxnLock.Unlock() + + if t.currentTxnStatus()&ProducerTxnFlagInError != 0 { + return t.lastError + } + + if len(t.pendingPartitionsInCurrentTxn) == 0 { + return nil + } + + // Remove the partitions from the pending set regardless of the result. We use the presence + // of partitions in the pending set to know when it is not safe to send batches. However, if + // the partitions failed to be added and we enter an error state, we expect the batches to be + // aborted anyway. In this case, we must be able to continue sending the batches which are in + // retry for partitions that were successfully added. + removeAllPartitionsOnFatalOrAbortedError := func() { + t.pendingPartitionsInCurrentTxn = topicPartitionSet{} + } + + // We only want to reduce the backoff when retrying the first AddPartition which errored out due to a + // CONCURRENT_TRANSACTIONS error since this means that the previous transaction is still completing and + // we don't want to wait too long before trying to start the new one. + // + // This is only a temporary fix, the long term solution is being tracked in + // https://issues.apache.org/jira/browse/KAFKA-5482 + retryBackoff := t.client.Config().Producer.Transaction.Retry.Backoff + computeBackoff := func(attemptsRemaining int) time.Duration { + if t.client.Config().Producer.Transaction.Retry.BackoffFunc != nil { + maxRetries := t.client.Config().Producer.Transaction.Retry.Max + retries := maxRetries - attemptsRemaining + return t.client.Config().Producer.Transaction.Retry.BackoffFunc(retries, maxRetries) + } + return retryBackoff + } + attemptsRemaining := t.client.Config().Producer.Transaction.Retry.Max + + exec := func(run func() (bool, error), err error) error { + for attemptsRemaining >= 0 { + var retry bool + retry, err = run() + if !retry { + return err + } + backoff := computeBackoff(attemptsRemaining) + Logger.Printf("txnmgr/add-partition-to-txn retrying after %dms... (%d attempts remaining) (%s)\n", backoff/time.Millisecond, attemptsRemaining, err) + time.Sleep(backoff) + attemptsRemaining-- + } + return err + } + return exec(func() (bool, error) { + coordinator, err := t.client.TransactionCoordinator(t.transactionalID) + if err != nil { + return true, err + } + addPartResponse, err := coordinator.AddPartitionsToTxn(&AddPartitionsToTxnRequest{ + TransactionalID: t.transactionalID, + ProducerID: t.producerID, + ProducerEpoch: t.producerEpoch, + TopicPartitions: t.pendingPartitionsInCurrentTxn.mapToRequest(), + }) + + if err != nil { + _ = coordinator.Close() + _ = t.client.RefreshTransactionCoordinator(t.transactionalID) + return true, err + } + + if addPartResponse == nil { + return true, ErrTxnUnableToParseResponse + } + + // remove from the list partitions that have been successfully updated + var responseErrors []error + for topic, results := range addPartResponse.Errors { + for _, response := range results { + tp := topicPartition{topic: topic, partition: response.Partition} + switch response.Err { + case ErrNoError: + // Mark partition as added to transaction + t.partitionsInCurrentTxn[tp] = struct{}{} + delete(t.pendingPartitionsInCurrentTxn, tp) + continue + case ErrConsumerCoordinatorNotAvailable: + fallthrough + case ErrNotCoordinatorForConsumer: + _ = coordinator.Close() + _ = t.client.RefreshTransactionCoordinator(t.transactionalID) + fallthrough + case ErrUnknownTopicOrPartition: + fallthrough + case ErrOffsetsLoadInProgress: + // Retry topicPartition + case ErrConcurrentTransactions: + if len(t.partitionsInCurrentTxn) == 0 && retryBackoff > addPartitionsRetryBackoff { + retryBackoff = addPartitionsRetryBackoff + } + case ErrOperationNotAttempted: + fallthrough + case ErrTopicAuthorizationFailed: + removeAllPartitionsOnFatalOrAbortedError() + return false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagAbortableError, response.Err) + case ErrUnknownProducerID: + fallthrough + case ErrInvalidProducerIDMapping: + removeAllPartitionsOnFatalOrAbortedError() + return false, t.abortableErrorIfPossible(response.Err) + // Fatal errors + default: + removeAllPartitionsOnFatalOrAbortedError() + return false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, response.Err) + } + responseErrors = append(responseErrors, response.Err) + } + } + + // handle end + if len(t.pendingPartitionsInCurrentTxn) == 0 { + DebugLogger.Printf("txnmgr/add-partition-to-txn [%s] successful to add partitions txn %+v\n", + t.transactionalID, addPartResponse) + return false, nil + } + return true, Wrap(ErrAddPartitionsToTxn, responseErrors...) + }, nil) +} + +// Build a new transaction manager sharing producer client. +func newTransactionManager(conf *Config, client Client) (*transactionManager, error) { + txnmgr := &transactionManager{ + producerID: noProducerID, + producerEpoch: noProducerEpoch, + client: client, + pendingPartitionsInCurrentTxn: topicPartitionSet{}, + partitionsInCurrentTxn: topicPartitionSet{}, + offsetsInCurrentTxn: make(map[string]topicPartitionOffsets), + status: ProducerTxnFlagUninitialized, + } + + if conf.Producer.Idempotent { + txnmgr.transactionalID = conf.Producer.Transaction.ID + txnmgr.transactionTimeout = conf.Producer.Transaction.Timeout + txnmgr.sequenceNumbers = make(map[string]int32) + txnmgr.mutex = sync.Mutex{} + + var err error + txnmgr.producerID, txnmgr.producerEpoch, err = txnmgr.initProducerId() + if err != nil { + return nil, err + } + Logger.Printf("txnmgr/init-producer-id [%s] obtained a ProducerId: %d and ProducerEpoch: %d\n", + txnmgr.transactionalID, txnmgr.producerID, txnmgr.producerEpoch) + } + + return txnmgr, nil +} + +// re-init producer-id and producer-epoch if needed. +func (t *transactionManager) initializeTransactions() (err error) { + t.producerID, t.producerEpoch, err = t.initProducerId() + return +} diff --git a/vendor/github.com/Shopify/sarama/utils.go b/vendor/github.com/Shopify/sarama/utils.go index 93bdeefef..819b6597c 100644 --- a/vendor/github.com/Shopify/sarama/utils.go +++ b/vendor/github.com/Shopify/sarama/utils.go @@ -122,8 +122,9 @@ func newKafkaVersion(major, minor, veryMinor, patch uint) KafkaVersion { // IsAtLeast return true if and only if the version it is called on is // greater than or equal to the version passed in: -// V1.IsAtLeast(V2) // false -// V2.IsAtLeast(V1) // true +// +// V1.IsAtLeast(V2) // false +// V2.IsAtLeast(V1) // true func (v KafkaVersion) IsAtLeast(other KafkaVersion) bool { for i := range v.version { if v.version[i] > other.version[i] { @@ -148,20 +149,50 @@ var ( V0_10_1_1 = newKafkaVersion(0, 10, 1, 1) V0_10_2_0 = newKafkaVersion(0, 10, 2, 0) V0_10_2_1 = newKafkaVersion(0, 10, 2, 1) + V0_10_2_2 = newKafkaVersion(0, 10, 2, 2) V0_11_0_0 = newKafkaVersion(0, 11, 0, 0) V0_11_0_1 = newKafkaVersion(0, 11, 0, 1) V0_11_0_2 = newKafkaVersion(0, 11, 0, 2) V1_0_0_0 = newKafkaVersion(1, 0, 0, 0) + V1_0_1_0 = newKafkaVersion(1, 0, 1, 0) + V1_0_2_0 = newKafkaVersion(1, 0, 2, 0) V1_1_0_0 = newKafkaVersion(1, 1, 0, 0) V1_1_1_0 = newKafkaVersion(1, 1, 1, 0) V2_0_0_0 = newKafkaVersion(2, 0, 0, 0) V2_0_1_0 = newKafkaVersion(2, 0, 1, 0) V2_1_0_0 = newKafkaVersion(2, 1, 0, 0) + V2_1_1_0 = newKafkaVersion(2, 1, 1, 0) V2_2_0_0 = newKafkaVersion(2, 2, 0, 0) + V2_2_1_0 = newKafkaVersion(2, 2, 1, 0) + V2_2_2_0 = newKafkaVersion(2, 2, 2, 0) V2_3_0_0 = newKafkaVersion(2, 3, 0, 0) + V2_3_1_0 = newKafkaVersion(2, 3, 1, 0) V2_4_0_0 = newKafkaVersion(2, 4, 0, 0) + V2_4_1_0 = newKafkaVersion(2, 4, 1, 0) V2_5_0_0 = newKafkaVersion(2, 5, 0, 0) + V2_5_1_0 = newKafkaVersion(2, 5, 1, 0) V2_6_0_0 = newKafkaVersion(2, 6, 0, 0) + V2_6_1_0 = newKafkaVersion(2, 6, 1, 0) + V2_6_2_0 = newKafkaVersion(2, 6, 2, 0) + V2_6_3_0 = newKafkaVersion(2, 6, 3, 0) + V2_7_0_0 = newKafkaVersion(2, 7, 0, 0) + V2_7_1_0 = newKafkaVersion(2, 7, 1, 0) + V2_7_2_0 = newKafkaVersion(2, 7, 2, 0) + V2_8_0_0 = newKafkaVersion(2, 8, 0, 0) + V2_8_1_0 = newKafkaVersion(2, 8, 1, 0) + V2_8_2_0 = newKafkaVersion(2, 8, 2, 0) + V3_0_0_0 = newKafkaVersion(3, 0, 0, 0) + V3_0_1_0 = newKafkaVersion(3, 0, 1, 0) + V3_0_2_0 = newKafkaVersion(3, 0, 2, 0) + V3_1_0_0 = newKafkaVersion(3, 1, 0, 0) + V3_1_1_0 = newKafkaVersion(3, 1, 1, 0) + V3_1_2_0 = newKafkaVersion(3, 1, 2, 0) + V3_2_0_0 = newKafkaVersion(3, 2, 0, 0) + V3_2_1_0 = newKafkaVersion(3, 2, 1, 0) + V3_2_2_0 = newKafkaVersion(3, 2, 2, 0) + V3_2_3_0 = newKafkaVersion(3, 2, 3, 0) + V3_3_0_0 = newKafkaVersion(3, 3, 0, 0) + V3_3_1_0 = newKafkaVersion(3, 3, 1, 0) SupportedVersions = []KafkaVersion{ V0_8_2_0, @@ -175,29 +206,74 @@ var ( V0_10_1_1, V0_10_2_0, V0_10_2_1, + V0_10_2_2, V0_11_0_0, V0_11_0_1, V0_11_0_2, V1_0_0_0, + V1_0_1_0, + V1_0_2_0, V1_1_0_0, V1_1_1_0, V2_0_0_0, V2_0_1_0, V2_1_0_0, + V2_1_1_0, V2_2_0_0, + V2_2_1_0, + V2_2_2_0, V2_3_0_0, + V2_3_1_0, V2_4_0_0, + V2_4_1_0, V2_5_0_0, + V2_5_1_0, V2_6_0_0, + V2_6_1_0, + V2_6_2_0, + V2_7_0_0, + V2_7_1_0, + V2_8_0_0, + V2_8_1_0, + V2_8_2_0, + V3_0_0_0, + V3_0_1_0, + V3_0_2_0, + V3_1_0_0, + V3_1_1_0, + V3_1_2_0, + V3_2_0_0, + V3_2_1_0, + V3_2_2_0, + V3_2_3_0, + V3_3_0_0, + V3_3_1_0, + } + MinVersion = V0_8_2_0 + MaxVersion = V3_3_1_0 + DefaultVersion = V1_0_0_0 + + // reduced set of versions to matrix test + fvtRangeVersions = []KafkaVersion{ + V0_8_2_2, + V0_10_2_2, + V1_0_2_0, + V1_1_1_0, + V2_0_1_0, + V2_2_2_0, + V2_4_1_0, + V2_6_2_0, + V2_8_2_0, + V3_1_2_0, + V3_2_3_0, + V3_3_1_0, } - MinVersion = V0_8_2_0 - MaxVersion = V2_6_0_0 ) -//ParseKafkaVersion parses and returns kafka version or error from a string +// ParseKafkaVersion parses and returns kafka version or error from a string func ParseKafkaVersion(s string) (KafkaVersion, error) { if len(s) < 5 { - return MinVersion, fmt.Errorf("invalid version `%s`", s) + return DefaultVersion, fmt.Errorf("invalid version `%s`", s) } var major, minor, veryMinor, patch uint var err error @@ -207,7 +283,7 @@ func ParseKafkaVersion(s string) (KafkaVersion, error) { err = scanKafkaVersion(s, `^\d+\.\d+\.\d+$`, "%d.%d.%d", [3]*uint{&major, &minor, &veryMinor}) } if err != nil { - return MinVersion, err + return DefaultVersion, err } return newKafkaVersion(major, minor, veryMinor, patch), nil } diff --git a/vendor/github.com/Shopify/sarama/version.go b/vendor/github.com/Shopify/sarama/version.go new file mode 100644 index 000000000..d3b9d53ff --- /dev/null +++ b/vendor/github.com/Shopify/sarama/version.go @@ -0,0 +1,27 @@ +package sarama + +import ( + "runtime/debug" + "sync" +) + +var ( + v string + vOnce sync.Once +) + +func version() string { + vOnce.Do(func() { + bi, ok := debug.ReadBuildInfo() + if ok { + v = bi.Main.Version + } + if v == "" || v == "(devel)" { + // if we can't read a go module version then they're using a git + // clone or vendored module so all we can do is report "dev" for + // the version to make a valid ApiVersions request + v = "dev" + } + }) + return v +} diff --git a/vendor/github.com/Shopify/sarama/zstd.go b/vendor/github.com/Shopify/sarama/zstd.go index 7c9951acc..6073ce7c4 100644 --- a/vendor/github.com/Shopify/sarama/zstd.go +++ b/vendor/github.com/Shopify/sarama/zstd.go @@ -6,23 +6,69 @@ import ( "github.com/klauspost/compress/zstd" ) -var ( - zstdDec *zstd.Decoder - zstdEnc *zstd.Encoder +// zstdMaxBufferedEncoders maximum number of not-in-use zstd encoders +// If the pool of encoders is exhausted then new encoders will be created on the fly +const zstdMaxBufferedEncoders = 1 - zstdEncOnce, zstdDecOnce sync.Once -) +type ZstdEncoderParams struct { + Level int +} +type ZstdDecoderParams struct { +} + +var zstdDecMap sync.Map + +var zstdAvailableEncoders sync.Map + +func getZstdEncoderChannel(params ZstdEncoderParams) chan *zstd.Encoder { + if c, ok := zstdAvailableEncoders.Load(params); ok { + return c.(chan *zstd.Encoder) + } + c, _ := zstdAvailableEncoders.LoadOrStore(params, make(chan *zstd.Encoder, zstdMaxBufferedEncoders)) + return c.(chan *zstd.Encoder) +} + +func getZstdEncoder(params ZstdEncoderParams) *zstd.Encoder { + select { + case enc := <-getZstdEncoderChannel(params): + return enc + default: + encoderLevel := zstd.SpeedDefault + if params.Level != CompressionLevelDefault { + encoderLevel = zstd.EncoderLevelFromZstd(params.Level) + } + zstdEnc, _ := zstd.NewWriter(nil, zstd.WithZeroFrames(true), + zstd.WithEncoderLevel(encoderLevel), + zstd.WithEncoderConcurrency(1)) + return zstdEnc + } +} + +func releaseEncoder(params ZstdEncoderParams, enc *zstd.Encoder) { + select { + case getZstdEncoderChannel(params) <- enc: + default: + } +} + +func getDecoder(params ZstdDecoderParams) *zstd.Decoder { + if ret, ok := zstdDecMap.Load(params); ok { + return ret.(*zstd.Decoder) + } + // It's possible to race and create multiple new readers. + // Only one will survive GC after use. + zstdDec, _ := zstd.NewReader(nil, zstd.WithDecoderConcurrency(0)) + zstdDecMap.Store(params, zstdDec) + return zstdDec +} -func zstdDecompress(dst, src []byte) ([]byte, error) { - zstdDecOnce.Do(func() { - zstdDec, _ = zstd.NewReader(nil) - }) - return zstdDec.DecodeAll(src, dst) +func zstdDecompress(params ZstdDecoderParams, dst, src []byte) ([]byte, error) { + return getDecoder(params).DecodeAll(src, dst) } -func zstdCompress(dst, src []byte) ([]byte, error) { - zstdEncOnce.Do(func() { - zstdEnc, _ = zstd.NewWriter(nil, zstd.WithZeroFrames(true)) - }) - return zstdEnc.EncodeAll(src, dst), nil +func zstdCompress(params ZstdEncoderParams, dst, src []byte) ([]byte, error) { + enc := getZstdEncoder(params) + out := enc.EncodeAll(src, dst) + releaseEncoder(params, enc) + return out, nil } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 05eb86229..6c26194f4 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -18208,6 +18208,9 @@ var awsPartition = partition{ endpointKey{ Region: "il-central-1", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -34389,9 +34392,21 @@ var awscnPartition = partition{ endpointKey{ Region: "cn-north-1", }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "states.cn-north-1.api.amazonwebservices.com.cn", + }, endpointKey{ Region: "cn-northwest-1", }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "states.cn-northwest-1.api.amazonwebservices.com.cn", + }, }, }, "storagegateway": service{ @@ -37313,21 +37328,45 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "glue.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-east-1", Variant: fipsVariant, }: endpoint{ Hostname: "glue-fips.us-gov-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "glue-fips.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "glue.us-gov-west-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, }: endpoint{ Hostname: "glue-fips.us-gov-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "glue-fips.us-gov-west-1.api.aws", + }, }, }, "greengrass": service{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 9c69d71d8..2a896f2b9 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.46.2" +const SDKVersion = "1.46.3" diff --git a/vendor/github.com/cloudevents/sdk-go/protocol/kafka_sarama/v2/coverage.tmp b/vendor/github.com/cloudevents/sdk-go/protocol/kafka_sarama/v2/coverage.tmp new file mode 100644 index 000000000..79b28a0b6 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/protocol/kafka_sarama/v2/coverage.tmp @@ -0,0 +1 @@ +mode: atomic diff --git a/vendor/github.com/cloudevents/sdk-go/protocol/kafka_sarama/v2/doc.go b/vendor/github.com/cloudevents/sdk-go/protocol/kafka_sarama/v2/doc.go index 12dfff9fe..e42c22d7a 100644 --- a/vendor/github.com/cloudevents/sdk-go/protocol/kafka_sarama/v2/doc.go +++ b/vendor/github.com/cloudevents/sdk-go/protocol/kafka_sarama/v2/doc.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + /* Package kafka_sarama implements a Kafka binding using github.com/Shopify/sarama module */ diff --git a/vendor/github.com/cloudevents/sdk-go/protocol/kafka_sarama/v2/message.go b/vendor/github.com/cloudevents/sdk-go/protocol/kafka_sarama/v2/message.go index c6bf2ee6e..b9ff657d5 100644 --- a/vendor/github.com/cloudevents/sdk-go/protocol/kafka_sarama/v2/message.go +++ b/vendor/github.com/cloudevents/sdk-go/protocol/kafka_sarama/v2/message.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package kafka_sarama import ( @@ -115,7 +120,7 @@ func (m *Message) ReadBinary(ctx context.Context, encoder binding.BinaryWriter) } if m.Value != nil { - err = encoder.SetData(bytes.NewReader(m.Value)) + err = encoder.SetData(bytes.NewBuffer(m.Value)) } return diff --git a/vendor/github.com/cloudevents/sdk-go/protocol/kafka_sarama/v2/option.go b/vendor/github.com/cloudevents/sdk-go/protocol/kafka_sarama/v2/option.go index f14b35223..7f506a874 100644 --- a/vendor/github.com/cloudevents/sdk-go/protocol/kafka_sarama/v2/option.go +++ b/vendor/github.com/cloudevents/sdk-go/protocol/kafka_sarama/v2/option.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package kafka_sarama import ( diff --git a/vendor/github.com/cloudevents/sdk-go/protocol/kafka_sarama/v2/protocol.go b/vendor/github.com/cloudevents/sdk-go/protocol/kafka_sarama/v2/protocol.go index b8db98fd2..1c70c0852 100644 --- a/vendor/github.com/cloudevents/sdk-go/protocol/kafka_sarama/v2/protocol.go +++ b/vendor/github.com/cloudevents/sdk-go/protocol/kafka_sarama/v2/protocol.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package kafka_sarama import ( diff --git a/vendor/github.com/cloudevents/sdk-go/protocol/kafka_sarama/v2/receiver.go b/vendor/github.com/cloudevents/sdk-go/protocol/kafka_sarama/v2/receiver.go index 65f2c61d3..c8f2fdfc4 100644 --- a/vendor/github.com/cloudevents/sdk-go/protocol/kafka_sarama/v2/receiver.go +++ b/vendor/github.com/cloudevents/sdk-go/protocol/kafka_sarama/v2/receiver.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package kafka_sarama import ( @@ -32,30 +37,58 @@ func NewReceiver() *Receiver { } } -func (r *Receiver) Setup(sess sarama.ConsumerGroupSession) error { +func (r *Receiver) Setup(sarama.ConsumerGroupSession) error { return nil } func (r *Receiver) Cleanup(sarama.ConsumerGroupSession) error { + return nil +} + +func (r *Receiver) Close(context.Context) error { r.once.Do(func() { close(r.incoming) }) return nil } +// ConsumeClaim must start a consumer loop of ConsumerGroupClaim's Messages(). +// Also the method should return when `session.Context()` is done. +// Refer - https://github.com/Shopify/sarama/blob/5e2c2ef0e429f895c86152189f625bfdad7d3452/examples/consumergroup/main.go#L177 func (r *Receiver) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error { - for message := range claim.Messages() { - m := NewMessageFromConsumerMessage(message) - - r.incoming <- msgErr{ - msg: binding.WithFinish(m, func(err error) { - if protocol.IsACK(err) { - session.MarkMessage(message, "") - } - }), + // NOTE: + // Do not move the code below to a goroutine. + // The `ConsumeClaim` itself is called within a goroutine, see: + // https://github.com/Shopify/sarama/blob/main/consumer_group.go#L27-L29 + for { + select { + case msg, ok := <-claim.Messages(): + if !ok { + return nil + } + m := NewMessageFromConsumerMessage(msg) + + r.incoming <- msgErr{ + msg: binding.WithFinish(m, func(err error) { + if protocol.IsACK(err) { + session.MarkMessage(msg, "") + } + }), + } + + // Should return when `session.Context()` is done. + // If not, will raise `ErrRebalanceInProgress` or `read tcp :: i/o timeout` when kafka rebalance. see: + // https://github.com/Shopify/sarama/issues/1192 + // https://github.com/Shopify/sarama/issues/2118 + // Also checked Shopify/sarama code which calls this ConsumeClaim method, and don't see if there is any difference + // whether this method returns error or not. If it returns the error, as per current implementation, it could + // get printed in logs and later drained when the ConsumerGroup gets closed. + // For now, to be on safer side, returning nil instead of session.Context().Err() as suggested in + // https://github.com/Shopify/sarama/blob/5e2c2ef0e429f895c86152189f625bfdad7d3452/examples/consumergroup/main.go + case <-session.Context().Done(): + return nil } } - return nil } func (r *Receiver) Receive(ctx context.Context) (binding.Message, error) { @@ -71,6 +104,7 @@ func (r *Receiver) Receive(ctx context.Context) (binding.Message, error) { } var _ protocol.Receiver = (*Receiver)(nil) +var _ protocol.Closer = (*Receiver)(nil) type Consumer struct { Receiver @@ -138,6 +172,7 @@ func (c *Consumer) OpenInbound(ctx context.Context) error { } func (c *Consumer) startConsumerGroupLoop(cg sarama.ConsumerGroup, ctx context.Context, errs chan<- error) { + defer c.Receiver.Close(ctx) // Need to be wrapped in a for loop // https://godoc.org/github.com/Shopify/sarama#ConsumerGroup for { @@ -152,7 +187,9 @@ func (c *Consumer) startConsumerGroupLoop(cg sarama.ConsumerGroup, ctx context.C return // Something else happened default: - if err == nil || err == sarama.ErrClosedClient || err == sarama.ErrClosedConsumerGroup { + if err == nil { + continue + } else if err == sarama.ErrClosedClient || err == sarama.ErrClosedConsumerGroup { // Consumer group closed correctly, we can close that loop return } else { diff --git a/vendor/github.com/cloudevents/sdk-go/protocol/kafka_sarama/v2/sender.go b/vendor/github.com/cloudevents/sdk-go/protocol/kafka_sarama/v2/sender.go index 0c92399a3..ffd04609d 100644 --- a/vendor/github.com/cloudevents/sdk-go/protocol/kafka_sarama/v2/sender.go +++ b/vendor/github.com/cloudevents/sdk-go/protocol/kafka_sarama/v2/sender.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package kafka_sarama import ( @@ -36,6 +41,11 @@ func NewSenderFromClient(client sarama.Client, topic string, options ...SenderOp return makeSender(producer, topic, options...), nil } +// NewSenderFromSyncProducer returns a binding.Sender that sends messages to a specific topic using sarama.SyncProducer +func NewSenderFromSyncProducer(topic string, syncProducer sarama.SyncProducer, options ...SenderOptionFunc) (*Sender, error) { + return makeSender(syncProducer, topic, options...), nil +} + func makeSender(syncProducer sarama.SyncProducer, topic string, options ...SenderOptionFunc) *Sender { s := &Sender{ topic: topic, diff --git a/vendor/github.com/cloudevents/sdk-go/protocol/kafka_sarama/v2/write_producer_message.go b/vendor/github.com/cloudevents/sdk-go/protocol/kafka_sarama/v2/write_producer_message.go index 3c7863471..180f2386c 100644 --- a/vendor/github.com/cloudevents/sdk-go/protocol/kafka_sarama/v2/write_producer_message.go +++ b/vendor/github.com/cloudevents/sdk-go/protocol/kafka_sarama/v2/write_producer_message.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package kafka_sarama import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/alias.go b/vendor/github.com/cloudevents/sdk-go/v2/alias.go index d60c10b26..2fbfaa9a7 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/alias.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/alias.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + // Package v2 reexports a subset of the SDK v2 API. package v2 @@ -9,7 +14,6 @@ import ( "github.com/cloudevents/sdk-go/v2/client" "github.com/cloudevents/sdk-go/v2/context" "github.com/cloudevents/sdk-go/v2/event" - "github.com/cloudevents/sdk-go/v2/observability" "github.com/cloudevents/sdk-go/v2/protocol" "github.com/cloudevents/sdk-go/v2/protocol/http" "github.com/cloudevents/sdk-go/v2/types" @@ -17,7 +21,7 @@ import ( // Client -type ClientOption client.Option +type ClientOption = client.Option type Client = client.Client // Event @@ -38,7 +42,7 @@ type URIRef = types.URIRef // HTTP Protocol -type HTTPOption http.Option +type HTTPOption = http.Option type HTTPProtocol = http.Protocol @@ -84,16 +88,20 @@ var ( // Client Creation - NewClient = client.New - NewClientObserved = client.NewObserved + NewClient = client.New + NewClientHTTP = client.NewHTTP + // Deprecated: please use New with the observability options. + NewClientObserved = client.NewObserved + // Deprecated: Please use NewClientHTTP with the observability options. NewDefaultClient = client.NewDefault NewHTTPReceiveHandler = client.NewHTTPReceiveHandler // Client Options - WithEventDefaulter = client.WithEventDefaulter - WithUUIDs = client.WithUUIDs - WithTimeNow = client.WithTimeNow + WithEventDefaulter = client.WithEventDefaulter + WithUUIDs = client.WithUUIDs + WithTimeNow = client.WithTimeNow + // Deprecated: this is now noop and will be removed in future releases. WithTracePropagation = client.WithTracePropagation() // Event Creation @@ -126,13 +134,19 @@ var ( ToMessage = binding.ToMessage - // HTTP Messages + // Event Creation - WriteHTTPRequest = http.WriteRequest + NewEventFromHTTPRequest = http.NewEventFromHTTPRequest + NewEventFromHTTPResponse = http.NewEventFromHTTPResponse + NewEventsFromHTTPRequest = http.NewEventsFromHTTPRequest + NewEventsFromHTTPResponse = http.NewEventsFromHTTPResponse + NewHTTPRequestFromEvent = http.NewHTTPRequestFromEvent + NewHTTPRequestFromEvents = http.NewHTTPRequestFromEvents + IsHTTPBatch = http.IsHTTPBatch - // Tracing + // HTTP Messages - EnableTracing = observability.EnableTracing + WriteHTTPRequest = http.WriteRequest // Context diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/binary_writer.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/binary_writer.go index a99cd0b70..97f2c4dd7 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/binding/binary_writer.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/binary_writer.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package binding import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/doc.go index 1176fad80..8fa999789 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/binding/doc.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/doc.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + /* Package binding defines interfaces for protocol bindings. diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/encoding.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/encoding.go index 0b6efe636..5070b7295 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/binding/encoding.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/encoding.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package binding import "errors" @@ -14,6 +19,9 @@ const ( EncodingEvent // When the encoding is unknown (which means that the message is a non-event) EncodingUnknown + + // EncodingBatch is an instance of JSON Batched Events + EncodingBatch ) func (e Encoding) String() string { @@ -24,6 +32,8 @@ func (e Encoding) String() string { return "structured" case EncodingEvent: return "event" + case EncodingBatch: + return "batch" case EncodingUnknown: return "unknown" } diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/event_message.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/event_message.go index 7afc13d16..f82c729c4 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/binding/event_message.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/event_message.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package binding import ( @@ -48,7 +53,7 @@ func (m *EventMessage) ReadBinary(ctx context.Context, b BinaryWriter) (err erro // Pass the body body := (*event.Event)(m).Data() if len(body) > 0 { - err = b.SetData(bytes.NewReader(body)) + err = b.SetData(bytes.NewBuffer(body)) if err != nil { return err } diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/finish_message.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/finish_message.go index 17445bfe5..8b51c4c61 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/binding/finish_message.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/finish_message.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package binding import "github.com/cloudevents/sdk-go/v2/binding/spec" diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/format/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/format/doc.go index ab153afbb..54c3f1a8c 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/binding/format/doc.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/format/doc.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + /* Package format formats structured events. diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/format/format.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/format/format.go index 9e2b1ec67..6bdd1842b 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/binding/format/format.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/format/format.go @@ -1,7 +1,13 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package format import ( "encoding/json" + "errors" "fmt" "strings" @@ -36,12 +42,33 @@ func (jsonFmt) Unmarshal(b []byte, e *event.Event) error { return json.Unmarshal(b, e) } +// JSONBatch is the built-in "application/cloudevents-batch+json" format. +var JSONBatch = jsonBatchFmt{} + +type jsonBatchFmt struct{} + +func (jb jsonBatchFmt) MediaType() string { + return event.ApplicationCloudEventsBatchJSON +} + +// Marshal will return an error for jsonBatchFmt since the Format interface doesn't support batch Marshalling, and we +// know it's structured batch json, we'll go direct to the json.UnMarshall() (see `ToEvents()`) since that is the best +// way to support batch operations for now. +func (jb jsonBatchFmt) Marshal(e *event.Event) ([]byte, error) { + return nil, errors.New("not supported for batch events") +} + +func (jb jsonBatchFmt) Unmarshal(b []byte, e *event.Event) error { + return errors.New("not supported for batch events") +} + // built-in formats var formats map[string]Format func init() { formats = map[string]Format{} Add(JSON) + Add(JSONBatch) } // Lookup returns the format for contentType, or nil if not found. diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/message.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/message.go index 7222f7154..e30e150c0 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/binding/message.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/message.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package binding import ( @@ -118,6 +123,13 @@ type ExactlyOnceMessage interface { Received(settle func(error)) } +// MessageContext interface exposes the internal context that a message might contain +// Only some Message implementations implement this interface. +type MessageContext interface { + // Get the context associated with this message + Context() context.Context +} + // MessageWrapper interface is used to walk through a decorated Message and unwrap it. type MessageWrapper interface { Message diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/attributes.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/attributes.go index 20ec1ce92..3c3021d46 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/attributes.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/attributes.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package spec import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/doc.go index 38d6fddf9..44c0b3145 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/doc.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/doc.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + /* Package spec provides spec-version metadata. diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/match_exact_version.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/match_exact_version.go index 5976faf12..110787ddc 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/match_exact_version.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/match_exact_version.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package spec import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/spec.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/spec.go index 4de589185..7fa0f5840 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/spec.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/spec.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package spec import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/structured_writer.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/structured_writer.go index 8cf2bbe3e..60256f2b3 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/binding/structured_writer.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/structured_writer.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package binding import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/to_event.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/to_event.go index d22a32e93..d3332c158 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/binding/to_event.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/to_event.go @@ -1,8 +1,14 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package binding import ( "bytes" "context" + "encoding/json" "errors" "fmt" "io" @@ -16,6 +22,9 @@ import ( // ErrCannotConvertToEvent is a generic error when a conversion of a Message to an Event fails var ErrCannotConvertToEvent = errors.New("cannot convert message to event") +// ErrCannotConvertToEvents is a generic error when a conversion of a Message to a Batched Event fails +var ErrCannotConvertToEvents = errors.New("cannot convert message to batched events") + // ToEvent translates a Message with a valid Structured or Binary representation to an Event. // This function returns the Event generated from the Message and the original encoding of the message or // an error that points the conversion error. @@ -56,6 +65,21 @@ func ToEvent(ctx context.Context, message MessageReader, transformers ...Transfo return &e, Transformers(transformers).Transform((*EventMessage)(&e), encoder) } +// ToEvents translates a Batch Message and corresponding Reader data to a slice of Events. +// This function returns the Events generated from the body data, or an error that points +// to the conversion issue. +func ToEvents(ctx context.Context, message MessageReader, body io.Reader) ([]event.Event, error) { + messageEncoding := message.ReadEncoding() + if messageEncoding != EncodingBatch { + return nil, ErrCannotConvertToEvents + } + + // Since Format doesn't support batch Marshalling, and we know it's structured batch json, we'll go direct to the + // json.UnMarshall(), since that is the best way to support batch operations for now. + var events []event.Event + return events, json.NewDecoder(body).Decode(&events) +} + type messageToEventBuilder event.Event var _ StructuredWriter = (*messageToEventBuilder)(nil) @@ -79,12 +103,15 @@ func (b *messageToEventBuilder) End(ctx context.Context) error { } func (b *messageToEventBuilder) SetData(data io.Reader) error { - var buf bytes.Buffer - w, err := io.Copy(&buf, data) - if err != nil { - return err + buf, ok := data.(*bytes.Buffer) + if !ok { + buf = new(bytes.Buffer) + _, err := io.Copy(buf, data) + if err != nil { + return err + } } - if w != 0 { + if buf.Len() > 0 { b.DataEncoded = buf.Bytes() } return nil diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/transformer.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/transformer.go index 6ab4f1e5d..de3bec44f 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/binding/transformer.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/transformer.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package binding // Transformer is an interface that implements a transformation diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/write.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/write.go index ff7cf5fb7..cb498e62d 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/binding/write.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/write.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package binding import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/client.go b/vendor/github.com/cloudevents/sdk-go/v2/client/client.go index 062799fbb..ea8fbfbb4 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/client/client.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/client.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package client import ( @@ -50,7 +55,8 @@ type Client interface { func New(obj interface{}, opts ...Option) (Client, error) { c := &ceClient{ // Running runtime.GOMAXPROCS(0) doesn't update the value, just returns the current one - pollGoroutines: runtime.GOMAXPROCS(0), + pollGoroutines: runtime.GOMAXPROCS(0), + observabilityService: noopObservabilityService{}, } if p, ok := obj.(protocol.Sender); ok { @@ -83,11 +89,15 @@ type ceClient struct { // Optional. opener protocol.Opener + observabilityService ObservabilityService + + inboundContextDecorators []func(context.Context, binding.Message) context.Context outboundContextDecorators []func(context.Context) context.Context invoker Invoker receiverMu sync.Mutex eventDefaulterFns []EventDefaulter pollGoroutines int + blockingCallback bool } func (c *ceClient) applyOptions(opts ...Option) error { @@ -100,8 +110,10 @@ func (c *ceClient) applyOptions(opts ...Option) error { } func (c *ceClient) Send(ctx context.Context, e event.Event) protocol.Result { + var err error if c.sender == nil { - return errors.New("sender not set") + err = errors.New("sender not set") + return err } for _, f := range c.outboundContextDecorators { @@ -113,17 +125,24 @@ func (c *ceClient) Send(ctx context.Context, e event.Event) protocol.Result { e = fn(ctx, e) } } - - if err := e.Validate(); err != nil { + if err = e.Validate(); err != nil { return err } - return c.sender.Send(ctx, (*binding.EventMessage)(&e)) + // Event has been defaulted and validated, record we are going to perform send. + ctx, cb := c.observabilityService.RecordSendingEvent(ctx, e) + err = c.sender.Send(ctx, (*binding.EventMessage)(&e)) + defer cb(err) + return err } func (c *ceClient) Request(ctx context.Context, e event.Event) (*event.Event, protocol.Result) { + var resp *event.Event + var err error + if c.requester == nil { - return nil, errors.New("requester not set") + err = errors.New("requester not set") + return nil, err } for _, f := range c.outboundContextDecorators { ctx = f(ctx) @@ -135,13 +154,16 @@ func (c *ceClient) Request(ctx context.Context, e event.Event) (*event.Event, pr } } - if err := e.Validate(); err != nil { + if err = e.Validate(); err != nil { return nil, err } + // Event has been defaulted and validated, record we are going to perform request. + ctx, cb := c.observabilityService.RecordRequestEvent(ctx, e) + // If provided a requester, use it to do request/response. - var resp *event.Event - msg, err := c.requester.Request(ctx, (*binding.EventMessage)(&e)) + var msg binding.Message + msg, err = c.requester.Request(ctx, (*binding.EventMessage)(&e)) if msg != nil { defer func() { if err := msg.Finish(err); err != nil { @@ -159,17 +181,20 @@ func (c *ceClient) Request(ctx context.Context, e event.Event) (*event.Event, pr // If the protocol returns no error, it is an ACK on the request, but we had // issues turning the response into an event, so make an ACK Result and pass // down the ToEvent error as well. - err = protocol.NewReceipt(true, "failed to convert response into event: %s\n%w", rserr.Error(), err) + err = protocol.NewReceipt(true, "failed to convert response into event: %v\n%w", rserr, err) } else { resp = rs } - + defer cb(err, resp) return resp, err } // StartReceiver sets up the given fn to handle Receive. // See Client.StartReceiver for details. This is a blocking call. func (c *ceClient) StartReceiver(ctx context.Context, fn interface{}) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c.receiverMu.Lock() defer c.receiverMu.Unlock() @@ -177,7 +202,7 @@ func (c *ceClient) StartReceiver(ctx context.Context, fn interface{}) error { return fmt.Errorf("client already has a receiver") } - invoker, err := newReceiveInvoker(fn, c.eventDefaulterFns...) // TODO: this will have to pick between a observed invoker or not. + invoker, err := newReceiveInvoker(fn, c.observabilityService, c.inboundContextDecorators, c.eventDefaulterFns...) if err != nil { return err } @@ -197,15 +222,6 @@ func (c *ceClient) StartReceiver(ctx context.Context, fn interface{}) error { c.invoker = nil }() - // Start the opener, if set. - if c.opener != nil { - go func() { - if err := c.opener.OpenInbound(ctx); err != nil { - cecontext.LoggerFrom(ctx).Errorf("Error while opening the inbound connection: %s", err) - } - }() - } - // Start Polling. wg := sync.WaitGroup{} for i := 0; i < c.pollGoroutines; i++ { @@ -229,18 +245,41 @@ func (c *ceClient) StartReceiver(ctx context.Context, fn interface{}) error { } if err != nil { - cecontext.LoggerFrom(ctx).Warnf("Error while receiving a message: %s", err) + cecontext.LoggerFrom(ctx).Warn("Error while receiving a message: ", err) continue } - if err := c.invoker.Invoke(ctx, msg, respFn); err != nil { - cecontext.LoggerFrom(ctx).Warnf("Error while handling a message: %s", err) + callback := func() { + if err := c.invoker.Invoke(ctx, msg, respFn); err != nil { + cecontext.LoggerFrom(ctx).Warn("Error while handling a message: ", err) + } + } + + if c.blockingCallback { + callback() + } else { + // Do not block on the invoker. + wg.Add(1) + go func() { + defer wg.Done() + callback() + }() } } }() } + + // Start the opener, if set. + if c.opener != nil { + if err = c.opener.OpenInbound(ctx); err != nil { + err = fmt.Errorf("error while opening the inbound connection: %w", err) + cancel() + } + } + wg.Wait() - return nil + + return err } // noRespFn is used to simply forward the protocol.Result for receivers that aren't responders diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/client_default.go b/vendor/github.com/cloudevents/sdk-go/v2/client/client_default.go deleted file mode 100644 index 82877d679..000000000 --- a/vendor/github.com/cloudevents/sdk-go/v2/client/client_default.go +++ /dev/null @@ -1,26 +0,0 @@ -package client - -import ( - "github.com/cloudevents/sdk-go/v2/protocol/http" -) - -// NewDefault provides the good defaults for the common case using an HTTP -// Protocol client. The http transport has had WithBinaryEncoding http -// transport option applied to it. The client will always send Binary -// encoding but will inspect the outbound event context and match the version. -// The WithTimeNow, and WithUUIDs client options are also applied to the -// client, all outbound events will have a time and id set if not already -// present. -func NewDefault() (Client, error) { - p, err := http.New() - if err != nil { - return nil, err - } - - c, err := NewObserved(p, WithTimeNow(), WithUUIDs()) - if err != nil { - return nil, err - } - - return c, nil -} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/client_http.go b/vendor/github.com/cloudevents/sdk-go/v2/client/client_http.go new file mode 100644 index 000000000..d48cc2042 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/client_http.go @@ -0,0 +1,35 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "github.com/cloudevents/sdk-go/v2/protocol/http" +) + +// NewHTTP provides the good defaults for the common case using an HTTP +// Protocol client. +// The WithTimeNow, and WithUUIDs client options are also applied to the +// client, all outbound events will have a time and id set if not already +// present. +func NewHTTP(opts ...http.Option) (Client, error) { + p, err := http.New(opts...) + if err != nil { + return nil, err + } + + c, err := New(p, WithTimeNow(), WithUUIDs()) + if err != nil { + return nil, err + } + + return c, nil +} + +// NewDefault has been replaced by NewHTTP +// Deprecated. To get the same as NewDefault provided, please use NewHTTP with +// the observability service passed as an option, or client.NewClientHTTP from +// package github.com/cloudevents/sdk-go/observability/opencensus/v2/client +var NewDefault = NewHTTP diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/client_observed.go b/vendor/github.com/cloudevents/sdk-go/v2/client/client_observed.go index 5feb4f8d0..82985b8a7 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/client/client_observed.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/client_observed.go @@ -1,101 +1,12 @@ -package client +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ -import ( - "context" - "github.com/cloudevents/sdk-go/v2/event" - "github.com/cloudevents/sdk-go/v2/extensions" - "github.com/cloudevents/sdk-go/v2/observability" - "github.com/cloudevents/sdk-go/v2/protocol" - "go.opencensus.io/trace" -) +package client // NewObserved produces a new client with the provided transport object and applied // client options. -func NewObserved(protocol interface{}, opts ...Option) (Client, error) { - client, err := New(protocol, opts...) - if err != nil { - return nil, err - } - - c := &obsClient{client: client} - - if err := c.applyOptions(opts...); err != nil { - return nil, err - } - return c, nil -} - -type obsClient struct { - client Client - - addTracing bool -} - -func (c *obsClient) applyOptions(opts ...Option) error { - for _, fn := range opts { - if err := fn(c); err != nil { - return err - } - } - return nil -} - -// Send transmits the provided event on a preconfigured Protocol. Send returns -// an error if there was an an issue validating the outbound event or the -// transport returns an error. -func (c *obsClient) Send(ctx context.Context, e event.Event) protocol.Result { - ctx, r := observability.NewReporter(ctx, reportSend) - ctx, span := trace.StartSpan(ctx, observability.ClientSpanName, trace.WithSpanKind(trace.SpanKindClient)) - defer span.End() - if span.IsRecordingEvents() { - span.AddAttributes(EventTraceAttributes(&e)...) - } - - if c.addTracing { - e.Context = e.Context.Clone() - extensions.FromSpanContext(span.SpanContext()).AddTracingAttributes(&e) - } - - result := c.client.Send(ctx, e) - - if protocol.IsACK(result) { - r.OK() - } else { - r.Error() - } - return result -} - -func (c *obsClient) Request(ctx context.Context, e event.Event) (*event.Event, protocol.Result) { - ctx, r := observability.NewReporter(ctx, reportRequest) - ctx, span := trace.StartSpan(ctx, observability.ClientSpanName, trace.WithSpanKind(trace.SpanKindClient)) - defer span.End() - if span.IsRecordingEvents() { - span.AddAttributes(EventTraceAttributes(&e)...) - } - - resp, result := c.client.Request(ctx, e) - - if protocol.IsACK(result) { - r.OK() - } else { - r.Error() - } - - return resp, result -} - -// StartReceiver sets up the given fn to handle Receive. -// See Client.StartReceiver for details. This is a blocking call. -func (c *obsClient) StartReceiver(ctx context.Context, fn interface{}) error { - ctx, r := observability.NewReporter(ctx, reportStartReceiver) - - err := c.client.StartReceiver(ctx, fn) - - if err != nil { - r.Error() - } else { - r.OK() - } - return err -} +// Deprecated: This now has the same behaviour of New, and will be removed in future releases. +// As New, you must provide the observability service to use. +var NewObserved = New diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/defaulters.go b/vendor/github.com/cloudevents/sdk-go/v2/client/defaulters.go index 5d0d7bc94..7bfebf35c 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/client/defaulters.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/defaulters.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package client import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/client/doc.go index a6a602bb4..e09962ce6 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/client/doc.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/doc.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + /* Package client holds the recommended entry points for interacting with the CloudEvents Golang SDK. The client wraps a selected transport. The client adds validation and defaulting for sending events, and flexible receiver method diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/http_receiver.go b/vendor/github.com/cloudevents/sdk-go/v2/client/http_receiver.go index 467cff9e5..94a4b4e65 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/client/http_receiver.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/http_receiver.go @@ -1,15 +1,20 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package client import ( "context" - "net/http" - "sync" - + cecontext "github.com/cloudevents/sdk-go/v2/context" thttp "github.com/cloudevents/sdk-go/v2/protocol/http" + "go.uber.org/zap" + "net/http" ) func NewHTTPReceiveHandler(ctx context.Context, p *thttp.Protocol, fn interface{}) (*EventReceiver, error) { - invoker, err := newReceiveInvoker(fn) + invoker, err := newReceiveInvoker(fn, noopObservabilityService{}, nil) //TODO(slinkydeveloper) maybe not nil? if err != nil { return nil, err } @@ -26,20 +31,15 @@ type EventReceiver struct { } func (r *EventReceiver) ServeHTTP(rw http.ResponseWriter, req *http.Request) { - wg := sync.WaitGroup{} - wg.Add(1) + // Prepare to handle the message if there's one (context cancellation will ensure this closes) go func() { - r.p.ServeHTTP(rw, req) - wg.Done() + ctx := req.Context() + msg, respFn, err := r.p.Respond(ctx) + if err != nil { + cecontext.LoggerFrom(context.TODO()).Debugw("failed to call Respond", zap.Error(err)) + } else if err := r.invoker.Invoke(ctx, msg, respFn); err != nil { + cecontext.LoggerFrom(context.TODO()).Debugw("failed to call Invoke", zap.Error(err)) + } }() - - ctx := req.Context() - msg, respFn, err := r.p.Respond(ctx) - if err != nil { - //lint:ignore SA9003 TODO: Branch left empty - } else if err := r.invoker.Invoke(ctx, msg, respFn); err != nil { - // TODO - } - // Block until ServeHTTP has returned - wg.Wait() + r.p.ServeHTTP(rw, req) } diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/invoker.go b/vendor/github.com/cloudevents/sdk-go/v2/client/invoker.go index 162ae27e2..403fb0f55 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/client/invoker.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/invoker.go @@ -1,7 +1,13 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package client import ( "context" + "fmt" "github.com/cloudevents/sdk-go/v2/binding" cecontext "github.com/cloudevents/sdk-go/v2/context" @@ -17,9 +23,11 @@ type Invoker interface { var _ Invoker = (*receiveInvoker)(nil) -func newReceiveInvoker(fn interface{}, fns ...EventDefaulter) (Invoker, error) { +func newReceiveInvoker(fn interface{}, observabilityService ObservabilityService, inboundContextDecorators []func(context.Context, binding.Message) context.Context, fns ...EventDefaulter) (Invoker, error) { r := &receiveInvoker{ - eventDefaulterFns: fns, + eventDefaulterFns: fns, + observabilityService: observabilityService, + inboundContextDecorators: inboundContextDecorators, } if fn, err := receiver(fn); err != nil { @@ -32,8 +40,10 @@ func newReceiveInvoker(fn interface{}, fns ...EventDefaulter) (Invoker, error) { } type receiveInvoker struct { - fn *receiverFn - eventDefaulterFns []EventDefaulter + fn *receiverFn + observabilityService ObservabilityService + eventDefaulterFns []EventDefaulter + inboundContextDecorators []func(context.Context, binding.Message) context.Context } func (r *receiveInvoker) Invoke(ctx context.Context, m binding.Message, respFn protocol.ResponseFn) (err error) { @@ -47,18 +57,35 @@ func (r *receiveInvoker) Invoke(ctx context.Context, m binding.Message, respFn p e, eventErr := binding.ToEvent(ctx, m) switch { case eventErr != nil && r.fn.hasEventIn: + r.observabilityService.RecordReceivedMalformedEvent(ctx, eventErr) return respFn(ctx, nil, protocol.NewReceipt(false, "failed to convert Message to Event: %w", eventErr)) case r.fn != nil: // Check if event is valid before invoking the receiver function if e != nil { if validationErr := e.Validate(); validationErr != nil { + r.observabilityService.RecordReceivedMalformedEvent(ctx, validationErr) return respFn(ctx, nil, protocol.NewReceipt(false, "validation error in incoming event: %w", validationErr)) } } // Let's invoke the receiver fn var resp *event.Event - resp, result = r.fn.invoke(ctx, e) + resp, result = func() (resp *event.Event, result protocol.Result) { + defer func() { + if r := recover(); r != nil { + result = fmt.Errorf("call to Invoker.Invoke(...) has panicked: %v", r) + cecontext.LoggerFrom(ctx).Error(result) + } + }() + ctx = computeInboundContext(m, ctx, r.inboundContextDecorators) + + var cb func(error) + ctx, cb = r.observabilityService.RecordCallingInvoker(ctx, e) + + resp, result = r.fn.invoke(ctx, e) + defer cb(result) + return + }() if respFn == nil { break @@ -71,7 +98,7 @@ func (r *receiveInvoker) Invoke(ctx context.Context, m binding.Message, respFn p } // Validate the event conforms to the CloudEvents Spec. if vErr := resp.Validate(); vErr != nil { - cecontext.LoggerFrom(ctx).Errorf("cloudevent validation failed on response event: %w", vErr) + cecontext.LoggerFrom(ctx).Errorf("cloudevent validation failed on response event: %v", vErr) } } @@ -97,3 +124,14 @@ func (r *receiveInvoker) IsReceiver() bool { func (r *receiveInvoker) IsResponder() bool { return r.fn.hasEventOut } + +func computeInboundContext(message binding.Message, fallback context.Context, inboundContextDecorators []func(context.Context, binding.Message) context.Context) context.Context { + result := fallback + if mctx, ok := message.(binding.MessageContext); ok { + result = cecontext.ValuesDelegating(mctx.Context(), fallback) + } + for _, f := range inboundContextDecorators { + result = f(result, message) + } + return result +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/observability.go b/vendor/github.com/cloudevents/sdk-go/v2/client/observability.go index 4c1905957..75005d3bb 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/client/observability.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/observability.go @@ -1,96 +1,54 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package client import ( "context" - "github.com/cloudevents/sdk-go/v2/event" - "github.com/cloudevents/sdk-go/v2/extensions" - "github.com/cloudevents/sdk-go/v2/observability" - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/trace" -) -var ( - // LatencyMs measures the latency in milliseconds for the CloudEvents - // client methods. - LatencyMs = stats.Float64("cloudevents.io/sdk-go/client/latency", "The latency in milliseconds for the CloudEvents client methods.", "ms") -) - -var ( - // LatencyView is an OpenCensus view that shows client method latency. - LatencyView = &view.View{ - Name: "client/latency", - Measure: LatencyMs, - Description: "The distribution of latency inside of client for CloudEvents.", - Aggregation: view.Distribution(0, .01, .1, 1, 10, 100, 1000, 10000), - TagKeys: observability.LatencyTags(), - } + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/event" ) -type observed int32 - -// Adheres to Observable -var _ observability.Observable = observed(0) +// ObservabilityService is an interface users can implement to record metrics, create tracing spans, and plug other observability tools in the Client +type ObservabilityService interface { + // InboundContextDecorators is a method that returns the InboundContextDecorators that must be mounted in the Client to properly propagate some tracing informations. + InboundContextDecorators() []func(context.Context, binding.Message) context.Context + + // RecordReceivedMalformedEvent is invoked when an event was received but it's malformed or invalid. + RecordReceivedMalformedEvent(ctx context.Context, err error) + // RecordCallingInvoker is invoked before the user function is invoked. + // The returned callback will be invoked after the user finishes to process the event with the eventual processing error + // The error provided to the callback could be both a processing error, or a result + RecordCallingInvoker(ctx context.Context, event *event.Event) (context.Context, func(errOrResult error)) + // RecordSendingEvent is invoked before the event is sent. + // The returned callback will be invoked when the response is received + // The error provided to the callback could be both a processing error, or a result + RecordSendingEvent(ctx context.Context, event event.Event) (context.Context, func(errOrResult error)) + + // RecordRequestEvent is invoked before the event is requested. + // The returned callback will be invoked when the response is received + RecordRequestEvent(ctx context.Context, event event.Event) (context.Context, func(errOrResult error, event *event.Event)) +} -const ( - specversionAttr = "cloudevents.specversion" - idAttr = "cloudevents.id" - typeAttr = "cloudevents.type" - sourceAttr = "cloudevents.source" - subjectAttr = "cloudevents.subject" - datacontenttypeAttr = "cloudevents.datacontenttype" +type noopObservabilityService struct{} - reportSend observed = iota - reportRequest - reportStartReceiver -) - -// MethodName implements Observable.MethodName -func (o observed) MethodName() string { - switch o { - case reportSend: - return "send" - case reportRequest: - return "request" - case reportStartReceiver: - return "start_receiver" - default: - return "unknown" - } +func (n noopObservabilityService) InboundContextDecorators() []func(context.Context, binding.Message) context.Context { + return nil } -// LatencyMs implements Observable.LatencyMs -func (o observed) LatencyMs() *stats.Float64Measure { - return LatencyMs +func (n noopObservabilityService) RecordReceivedMalformedEvent(ctx context.Context, err error) {} + +func (n noopObservabilityService) RecordCallingInvoker(ctx context.Context, event *event.Event) (context.Context, func(errOrResult error)) { + return ctx, func(errOrResult error) {} } -func EventTraceAttributes(e event.EventReader) []trace.Attribute { - as := []trace.Attribute{ - trace.StringAttribute(specversionAttr, e.SpecVersion()), - trace.StringAttribute(idAttr, e.ID()), - trace.StringAttribute(typeAttr, e.Type()), - trace.StringAttribute(sourceAttr, e.Source()), - } - if sub := e.Subject(); sub != "" { - as = append(as, trace.StringAttribute(subjectAttr, sub)) - } - if dct := e.DataContentType(); dct != "" { - as = append(as, trace.StringAttribute(datacontenttypeAttr, dct)) - } - return as +func (n noopObservabilityService) RecordSendingEvent(ctx context.Context, event event.Event) (context.Context, func(errOrResult error)) { + return ctx, func(errOrResult error) {} } -// TraceSpan returns context and trace.Span based on event. Caller must call span.End() -func TraceSpan(ctx context.Context, e event.Event) (context.Context, *trace.Span) { - var span *trace.Span - if ext, ok := extensions.GetDistributedTracingExtension(e); ok { - ctx, span = ext.StartChildSpan(ctx, observability.ClientSpanName, trace.WithSpanKind(trace.SpanKindServer)) - } - if span == nil { - ctx, span = trace.StartSpan(ctx, observability.ClientSpanName, trace.WithSpanKind(trace.SpanKindServer)) - } - if span.IsRecordingEvents() { - span.AddAttributes(EventTraceAttributes(&e)...) - } - return ctx, span +func (n noopObservabilityService) RecordRequestEvent(ctx context.Context, e event.Event) (context.Context, func(errOrResult error, event *event.Event)) { + return ctx, func(errOrResult error, event *event.Event) {} } diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/options.go b/vendor/github.com/cloudevents/sdk-go/v2/client/options.go index aeec1eb28..938478162 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/client/options.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/options.go @@ -1,7 +1,14 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package client import ( + "context" "fmt" + "github.com/cloudevents/sdk-go/v2/binding" ) @@ -63,11 +70,11 @@ func WithTimeNow() Option { // WithTracePropagation enables trace propagation via the distributed tracing // extension. +// Deprecated: this is now noop and will be removed in future releases. +// Don't use distributed tracing extension to propagate traces: +// https://github.com/cloudevents/spec/blob/v1.0.1/extensions/distributed-tracing.md#using-the-distributed-tracing-extension func WithTracePropagation() Option { return func(i interface{}) error { - if c, ok := i.(*obsClient); ok { - c.addTracing = true - } return nil } } @@ -83,3 +90,39 @@ func WithPollGoroutines(pollGoroutines int) Option { return nil } } + +// WithObservabilityService configures the observability service to use +// to record traces and metrics +func WithObservabilityService(service ObservabilityService) Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.observabilityService = service + c.inboundContextDecorators = append(c.inboundContextDecorators, service.InboundContextDecorators()...) + } + return nil + } +} + +// WithInboundContextDecorator configures a new inbound context decorator. +// Inbound context decorators are invoked to wrap additional informations from the binding.Message +// and propagate these informations in the context passed to the event receiver. +func WithInboundContextDecorator(dec func(context.Context, binding.Message) context.Context) Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.inboundContextDecorators = append(c.inboundContextDecorators, dec) + } + return nil + } +} + +// WithBlockingCallback makes the callback passed into StartReceiver is executed as a blocking call, +// i.e. in each poll go routine, the next event will not be received until the callback on current event completes. +// To make event processing serialized (no concurrency), use this option along with WithPollGoroutines(1) +func WithBlockingCallback() Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.blockingCallback = true + } + return nil + } +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/receiver.go b/vendor/github.com/cloudevents/sdk-go/v2/client/receiver.go index e1d1544c6..b1ab532d7 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/client/receiver.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/receiver.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package client import ( @@ -41,17 +46,17 @@ var ( // validate and invoke the provided function. // Valid fn signatures are: // * func() -// * func() error +// * func() protocol.Result // * func(context.Context) -// * func(context.Context) transport.Result +// * func(context.Context) protocol.Result // * func(event.Event) // * func(event.Event) transport.Result // * func(context.Context, event.Event) -// * func(context.Context, event.Event) transport.Result +// * func(context.Context, event.Event) protocol.Result // * func(event.Event) *event.Event -// * func(event.Event) (*event.Event, transport.Result) -// * func(context.Context, event.Event, *event.Event -// * func(context.Context, event.Event) (*event.Event, transport.Result) +// * func(event.Event) (*event.Event, protocol.Result) +// * func(context.Context, event.Event) *event.Event +// * func(context.Context, event.Event) (*event.Event, protocol.Result) // func receiver(fn interface{}) (*receiverFn, error) { fnType := reflect.TypeOf(fn) @@ -113,16 +118,16 @@ func (r *receiverFn) validateInParamSignature(fnType reflect.Type) error { switch fnType.NumIn() { case 2: // has to be (context.Context, event.Event) - if !fnType.In(1).ConvertibleTo(eventType) { - return fmt.Errorf("%s; cannot convert parameter 2 from %s to event.Event", inParamUsage, fnType.In(1)) + if !eventType.ConvertibleTo(fnType.In(1)) { + return fmt.Errorf("%s; cannot convert parameter 2 to %s from event.Event", inParamUsage, fnType.In(1)) } else { r.hasEventIn = true } fallthrough case 1: - if !fnType.In(0).ConvertibleTo(contextType) { - if !fnType.In(0).ConvertibleTo(eventType) { - return fmt.Errorf("%s; cannot convert parameter 1 from %s to context.Context or event.Event", inParamUsage, fnType.In(0)) + if !contextType.ConvertibleTo(fnType.In(0)) { + if !eventType.ConvertibleTo(fnType.In(0)) { + return fmt.Errorf("%s; cannot convert parameter 1 to %s from context.Context or event.Event", inParamUsage, fnType.In(0)) } else if r.hasEventIn { return fmt.Errorf("%s; duplicate parameter of type event.Event", inParamUsage) } else { diff --git a/vendor/github.com/cloudevents/sdk-go/v2/context/context.go b/vendor/github.com/cloudevents/sdk-go/v2/context/context.go index f9843dd61..fc9ef0315 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/context/context.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/context/context.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package context import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/context/delegating.go b/vendor/github.com/cloudevents/sdk-go/v2/context/delegating.go new file mode 100644 index 000000000..434a4da7a --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/context/delegating.go @@ -0,0 +1,25 @@ +package context + +import "context" + +type valuesDelegating struct { + context.Context + parent context.Context +} + +// ValuesDelegating wraps a child and parent context. It will perform Value() +// lookups first on the child, and then fall back to the child. All other calls +// go solely to the child context. +func ValuesDelegating(child, parent context.Context) context.Context { + return &valuesDelegating{ + Context: child, + parent: parent, + } +} + +func (c *valuesDelegating) Value(key interface{}) interface{} { + if val := c.Context.Value(key); val != nil { + return val + } + return c.parent.Value(key) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/context/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/context/doc.go index 377cab850..0b2dcaf70 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/context/doc.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/context/doc.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + /* Package context holds the last resort overrides and fyi objects that can be passed to clients and transports added to context.Context objects. diff --git a/vendor/github.com/cloudevents/sdk-go/v2/context/logger.go b/vendor/github.com/cloudevents/sdk-go/v2/context/logger.go index 996f72057..b3087a79f 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/context/logger.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/context/logger.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package context import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/context/retry.go b/vendor/github.com/cloudevents/sdk-go/v2/context/retry.go index f590d4662..ec17df72e 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/context/retry.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/context/retry.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package context import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/content_type.go b/vendor/github.com/cloudevents/sdk-go/v2/event/content_type.go index 591878e5d..a49522f82 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/content_type.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/content_type.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package event const ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/data_content_encoding.go b/vendor/github.com/cloudevents/sdk-go/v2/event/data_content_encoding.go index 24c4094fc..cf2152693 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/data_content_encoding.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/data_content_encoding.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package event const ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/codec.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/codec.go index fd68ca559..3e077740b 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/codec.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/codec.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package datacodec import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/codec_observed.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/codec_observed.go deleted file mode 100644 index b14e6f8b6..000000000 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/codec_observed.go +++ /dev/null @@ -1,50 +0,0 @@ -package datacodec - -import ( - "context" - - "github.com/cloudevents/sdk-go/v2/event/datacodec/json" - "github.com/cloudevents/sdk-go/v2/event/datacodec/text" - "github.com/cloudevents/sdk-go/v2/event/datacodec/xml" - "github.com/cloudevents/sdk-go/v2/observability" -) - -func SetObservedCodecs() { - AddDecoder("", json.DecodeObserved) - AddDecoder("application/json", json.DecodeObserved) - AddDecoder("text/json", json.DecodeObserved) - AddDecoder("application/xml", xml.DecodeObserved) - AddDecoder("text/xml", xml.DecodeObserved) - AddDecoder("text/plain", text.DecodeObserved) - - AddEncoder("", json.Encode) - AddEncoder("application/json", json.EncodeObserved) - AddEncoder("text/json", json.EncodeObserved) - AddEncoder("application/xml", xml.EncodeObserved) - AddEncoder("text/xml", xml.EncodeObserved) - AddEncoder("text/plain", text.EncodeObserved) -} - -// DecodeObserved calls Decode and records the result. -func DecodeObserved(ctx context.Context, contentType string, in []byte, out interface{}) error { - _, r := observability.NewReporter(ctx, reportDecode) - err := Decode(ctx, contentType, in, out) - if err != nil { - r.Error() - } else { - r.OK() - } - return err -} - -// EncodeObserved calls Encode and records the result. -func EncodeObserved(ctx context.Context, contentType string, in interface{}) ([]byte, error) { - _, r := observability.NewReporter(ctx, reportEncode) - b, err := Encode(ctx, contentType, in) - if err != nil { - r.Error() - } else { - r.OK() - } - return b, err -} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/doc.go index 9e401534e..b681af887 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/doc.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/doc.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + /* Package datacodec holds the data codec registry and adds known encoders and decoders supporting media types such as `application/json` and `application/xml`. diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/data.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/data.go index f40869b34..734ade59f 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/data.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/data.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package json import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/data_observed.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/data_observed.go deleted file mode 100644 index 21308ce86..000000000 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/data_observed.go +++ /dev/null @@ -1,30 +0,0 @@ -package json - -import ( - "context" - "github.com/cloudevents/sdk-go/v2/observability" -) - -// DecodeObserved calls Decode and records the results. -func DecodeObserved(ctx context.Context, in []byte, out interface{}) error { - _, r := observability.NewReporter(ctx, reportDecode) - err := Decode(ctx, in, out) - if err != nil { - r.Error() - } else { - r.OK() - } - return err -} - -// EncodeObserved calls Encode and records the results. -func EncodeObserved(ctx context.Context, in interface{}) ([]byte, error) { - _, r := observability.NewReporter(ctx, reportEncode) - b, err := Encode(ctx, in) - if err != nil { - r.Error() - } else { - r.OK() - } - return b, err -} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/doc.go index 86772c2e3..33e1323c7 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/doc.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/doc.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + /* Package json holds the encoder/decoder implementation for `application/json`. */ diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/observability.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/observability.go deleted file mode 100644 index 7ff796590..000000000 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/observability.go +++ /dev/null @@ -1,51 +0,0 @@ -package json - -import ( - "github.com/cloudevents/sdk-go/v2/observability" - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" -) - -var ( - // LatencyMs measures the latency in milliseconds for the CloudEvents json - // data codec methods. - LatencyMs = stats.Float64("cloudevents.io/sdk-go/datacodec/json/latency", "The latency in milliseconds for the CloudEvents json data codec methods.", "ms") -) - -var ( - // LatencyView is an OpenCensus view that shows data codec json method latency. - LatencyView = &view.View{ - Name: "datacodec/json/latency", - Measure: LatencyMs, - Description: "The distribution of latency inside of the json data codec for CloudEvents.", - Aggregation: view.Distribution(0, .01, .1, 1, 10, 100, 1000, 10000), - TagKeys: observability.LatencyTags(), - } -) - -type observed int32 - -// Adheres to Observable -var _ observability.Observable = observed(0) - -const ( - reportEncode observed = iota - reportDecode -) - -// MethodName implements Observable.MethodName -func (o observed) MethodName() string { - switch o { - case reportEncode: - return "encode" - case reportDecode: - return "decode" - default: - return "unknown" - } -} - -// LatencyMs implements Observable.LatencyMs -func (o observed) LatencyMs() *stats.Float64Measure { - return LatencyMs -} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/observability.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/observability.go deleted file mode 100644 index 870ec5dfe..000000000 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/observability.go +++ /dev/null @@ -1,51 +0,0 @@ -package datacodec - -import ( - "github.com/cloudevents/sdk-go/v2/observability" - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" -) - -var ( - // LatencyMs measures the latency in milliseconds for the CloudEvents generic - // codec data methods. - LatencyMs = stats.Float64("cloudevents.io/sdk-go/datacodec/latency", "The latency in milliseconds for the CloudEvents generic data codec methods.", "ms") -) - -var ( - // LatencyView is an OpenCensus view that shows data codec method latency. - LatencyView = &view.View{ - Name: "datacodec/latency", - Measure: LatencyMs, - Description: "The distribution of latency inside of the generic data codec for CloudEvents.", - Aggregation: view.Distribution(0, .01, .1, 1, 10, 100, 1000, 10000), - TagKeys: observability.LatencyTags(), - } -) - -type observed int32 - -// Adheres to Observable -var _ observability.Observable = observed(0) - -const ( - reportEncode observed = iota - reportDecode -) - -// MethodName implements Observable.MethodName -func (o observed) MethodName() string { - switch o { - case reportEncode: - return "encode" - case reportDecode: - return "decode" - default: - return "unknown" - } -} - -// LatencyMs implements Observable.LatencyMs -func (o observed) LatencyMs() *stats.Float64Measure { - return LatencyMs -} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/data.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/data.go index 5e6ddc080..761a10113 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/data.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/data.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package text import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/data_observed.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/data_observed.go deleted file mode 100644 index 2897ea6b2..000000000 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/data_observed.go +++ /dev/null @@ -1,30 +0,0 @@ -package text - -import ( - "context" - "github.com/cloudevents/sdk-go/v2/observability" -) - -// DecodeObserved calls Decode and records the results. -func DecodeObserved(ctx context.Context, in []byte, out interface{}) error { - _, r := observability.NewReporter(ctx, reportDecode) - err := Decode(ctx, in, out) - if err != nil { - r.Error() - } else { - r.OK() - } - return err -} - -// EncodeObserved calls Encode and records the results. -func EncodeObserved(ctx context.Context, in interface{}) ([]byte, error) { - _, r := observability.NewReporter(ctx, reportEncode) - b, err := Encode(ctx, in) - if err != nil { - r.Error() - } else { - r.OK() - } - return b, err -} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/doc.go index 13316702e..af10577aa 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/doc.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/doc.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + /* Package text holds the encoder/decoder implementation for `text/plain`. */ diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/observability.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/observability.go deleted file mode 100644 index ede85a2ad..000000000 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/observability.go +++ /dev/null @@ -1,51 +0,0 @@ -package text - -import ( - "github.com/cloudevents/sdk-go/v2/observability" - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" -) - -var ( - // LatencyMs measures the latency in milliseconds for the CloudEvents xml data - // codec methods. - LatencyMs = stats.Float64("cloudevents.io/sdk-go/datacodec/text/latency", "The latency in milliseconds for the CloudEvents text data codec methods.", "ms") -) - -var ( - // LatencyView is an OpenCensus view that shows data codec xml method latency. - LatencyView = &view.View{ - Name: "datacodec/text/latency", - Measure: LatencyMs, - Description: "The distribution of latency inside of the text data codec for CloudEvents.", - Aggregation: view.Distribution(0, .01, .1, 1, 10, 100, 1000, 10000), - TagKeys: observability.LatencyTags(), - } -) - -type observed int32 - -// Adheres to Observable -var _ observability.Observable = observed(0) - -const ( - reportEncode observed = iota - reportDecode -) - -// MethodName implements Observable.MethodName -func (o observed) MethodName() string { - switch o { - case reportEncode: - return "encode" - case reportDecode: - return "decode" - default: - return "unknown" - } -} - -// LatencyMs implements Observable.LatencyMs -func (o observed) LatencyMs() *stats.Float64Measure { - return LatencyMs -} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/data.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/data.go index 13045e03d..de68ec3dc 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/data.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/data.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package xml import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/data_observed.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/data_observed.go deleted file mode 100644 index 14f6c2824..000000000 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/data_observed.go +++ /dev/null @@ -1,30 +0,0 @@ -package xml - -import ( - "context" - "github.com/cloudevents/sdk-go/v2/observability" -) - -// DecodeObserved calls Decode and records the result. -func DecodeObserved(ctx context.Context, in []byte, out interface{}) error { - _, r := observability.NewReporter(ctx, reportDecode) - err := Decode(ctx, in, out) - if err != nil { - r.Error() - } else { - r.OK() - } - return err -} - -// EncodeObserved calls Encode and records the result. -func EncodeObserved(ctx context.Context, in interface{}) ([]byte, error) { - _, r := observability.NewReporter(ctx, reportEncode) - b, err := Encode(ctx, in) - if err != nil { - r.Error() - } else { - r.OK() - } - return b, err -} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/doc.go index d90b7c444..c8d73213f 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/doc.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/doc.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + /* Package xml holds the encoder/decoder implementation for `application/xml`. */ diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/observability.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/observability.go deleted file mode 100644 index b0f4c935d..000000000 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/observability.go +++ /dev/null @@ -1,51 +0,0 @@ -package xml - -import ( - "github.com/cloudevents/sdk-go/v2/observability" - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" -) - -var ( - // LatencyMs measures the latency in milliseconds for the CloudEvents xml data - // codec methods. - LatencyMs = stats.Float64("cloudevents.io/sdk-go/datacodec/xml/latency", "The latency in milliseconds for the CloudEvents xml data codec methods.", "ms") -) - -var ( - // LatencyView is an OpenCensus view that shows data codec xml method latency. - LatencyView = &view.View{ - Name: "datacodec/xml/latency", - Measure: LatencyMs, - Description: "The distribution of latency inside of the xml data codec for CloudEvents.", - Aggregation: view.Distribution(0, .01, .1, 1, 10, 100, 1000, 10000), - TagKeys: observability.LatencyTags(), - } -) - -type observed int32 - -// Adheres to Observable -var _ observability.Observable = observed(0) - -const ( - reportEncode observed = iota - reportDecode -) - -// MethodName implements Observable.MethodName -func (o observed) MethodName() string { - switch o { - case reportEncode: - return "encode" - case reportDecode: - return "decode" - default: - return "unknown" - } -} - -// LatencyMs implements Observable.LatencyMs -func (o observed) LatencyMs() *stats.Float64Measure { - return LatencyMs -} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/event/doc.go index eebbeb4ef..31c22ce67 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/doc.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/doc.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + /* Package event provides primitives to work with CloudEvents specification: https://github.com/cloudevents/spec. */ diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event.go index 3f8215a07..94b5aa0ad 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/event.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event.go @@ -1,9 +1,13 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package event import ( "bytes" "encoding/json" - "fmt" "strings" ) @@ -66,18 +70,6 @@ func (e Event) ExtensionAs(name string, obj interface{}) error { func (e Event) String() string { b := strings.Builder{} - b.WriteString("Validation: ") - - valid := e.Validate() - if valid == nil { - b.WriteString("valid\n") - } else { - b.WriteString("invalid\n") - } - if valid != nil { - b.WriteString(fmt.Sprintf("Validation Error: \n%s\n", valid.Error())) - } - b.WriteString(e.Context.String()) if e.DataEncoded != nil { diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_data.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_data.go index c85fe7e52..8fc449ed9 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/event_data.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_data.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package event import ( @@ -68,7 +73,7 @@ func (e Event) Data() []byte { } // DataAs attempts to populate the provided data object with the event payload. -// data should be a pointer type. +// obj should be a pointer type. func (e Event) DataAs(obj interface{}) error { data := e.Data() diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_interface.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_interface.go index af87454d8..2809fed57 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/event_interface.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_interface.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package event import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_marshal.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_marshal.go index 2289774f0..c5f2dc03c 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/event_marshal.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_marshal.go @@ -1,324 +1,203 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package event import ( - "context" - "encoding/json" + "bytes" + "encoding/base64" "fmt" + "io" "strings" - "github.com/cloudevents/sdk-go/v2/observability" + jsoniter "github.com/json-iterator/go" ) -// MarshalJSON implements a custom json marshal method used when this type is -// marshaled using json.Marshal. -func (e Event) MarshalJSON() ([]byte, error) { - _, r := observability.NewReporter(context.Background(), eventJSONObserved{o: reportMarshal, v: e.SpecVersion()}) - - if err := e.Validate(); err != nil { - r.Error() - return nil, err - } - - var b []byte - var err error - - switch e.SpecVersion() { - case CloudEventsVersionV03: - b, err = JsonEncodeLegacy(e) - case CloudEventsVersionV1: - b, err = JsonEncode(e) - default: - return nil, ValidationError{"specversion": fmt.Errorf("unknown : %q", e.SpecVersion())} - } - - // Report the observable - if err != nil { - r.Error() - return nil, err - } else { - r.OK() - } - - return b, nil -} - -// UnmarshalJSON implements the json unmarshal method used when this type is -// unmarshaled using json.Unmarshal. -func (e *Event) UnmarshalJSON(b []byte) error { - raw := make(map[string]json.RawMessage) - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - - version := versionFromRawMessage(raw) - - _, r := observability.NewReporter(context.Background(), eventJSONObserved{o: reportUnmarshal, v: version}) - - var err error - switch version { - case CloudEventsVersionV03: - err = e.JsonDecodeV03(b, raw) - case CloudEventsVersionV1: - err = e.JsonDecodeV1(b, raw) - default: - return ValidationError{"specversion": fmt.Errorf("unknown : %q", version)} - } - - // Report the observable - if err != nil { - r.Error() - return err - } else { - r.OK() - } - return nil -} +// WriteJson writes the in event in the provided writer. +// Note: this function assumes the input event is valid. +func WriteJson(in *Event, writer io.Writer) error { + stream := jsoniter.ConfigFastest.BorrowStream(writer) + defer jsoniter.ConfigFastest.ReturnStream(stream) + stream.WriteObjectStart() + + var ext map[string]interface{} + var dct *string + var isBase64 bool + + // Write the context (without the extensions) + switch eventContext := in.Context.(type) { + case *EventContextV03: + // Set a bunch of variables we need later + ext = eventContext.Extensions + dct = eventContext.DataContentType + + stream.WriteObjectField("specversion") + stream.WriteString(CloudEventsVersionV03) + stream.WriteMore() + + stream.WriteObjectField("id") + stream.WriteString(eventContext.ID) + stream.WriteMore() + + stream.WriteObjectField("source") + stream.WriteString(eventContext.Source.String()) + stream.WriteMore() + + stream.WriteObjectField("type") + stream.WriteString(eventContext.Type) + + if eventContext.Subject != nil { + stream.WriteMore() + stream.WriteObjectField("subject") + stream.WriteString(*eventContext.Subject) + } -func versionFromRawMessage(raw map[string]json.RawMessage) string { - // v0.2 and after - if v, ok := raw["specversion"]; ok { - var version string - if err := json.Unmarshal(v, &version); err != nil { - return "" + if eventContext.DataContentEncoding != nil { + isBase64 = true + stream.WriteMore() + stream.WriteObjectField("datacontentencoding") + stream.WriteString(*eventContext.DataContentEncoding) } - return version - } - return "" -} -// JsonEncode encodes an event to JSON -func JsonEncode(e Event) ([]byte, error) { - return jsonEncode(e.Context, e.DataEncoded, e.DataBase64) -} + if eventContext.DataContentType != nil { + stream.WriteMore() + stream.WriteObjectField("datacontenttype") + stream.WriteString(*eventContext.DataContentType) + } -// JsonEncodeLegacy performs legacy JSON encoding -func JsonEncodeLegacy(e Event) ([]byte, error) { - isBase64 := e.Context.DeprecatedGetDataContentEncoding() == Base64 - return jsonEncode(e.Context, e.DataEncoded, isBase64) -} + if eventContext.SchemaURL != nil { + stream.WriteMore() + stream.WriteObjectField("schemaurl") + stream.WriteString(eventContext.SchemaURL.String()) + } -func jsonEncode(ctx EventContextReader, data []byte, shouldEncodeToBase64 bool) ([]byte, error) { - var b map[string]json.RawMessage - var err error + if eventContext.Time != nil { + stream.WriteMore() + stream.WriteObjectField("time") + stream.WriteString(eventContext.Time.String()) + } + case *EventContextV1: + // Set a bunch of variables we need later + ext = eventContext.Extensions + dct = eventContext.DataContentType + isBase64 = in.DataBase64 + + stream.WriteObjectField("specversion") + stream.WriteString(CloudEventsVersionV1) + stream.WriteMore() + + stream.WriteObjectField("id") + stream.WriteString(eventContext.ID) + stream.WriteMore() + + stream.WriteObjectField("source") + stream.WriteString(eventContext.Source.String()) + stream.WriteMore() + + stream.WriteObjectField("type") + stream.WriteString(eventContext.Type) + + if eventContext.Subject != nil { + stream.WriteMore() + stream.WriteObjectField("subject") + stream.WriteString(*eventContext.Subject) + } - b, err = marshalEvent(ctx, ctx.GetExtensions()) - if err != nil { - return nil, err - } + if eventContext.DataContentType != nil { + stream.WriteMore() + stream.WriteObjectField("datacontenttype") + stream.WriteString(*eventContext.DataContentType) + } - if data != nil { - // data here is a serialized version of whatever payload. - // If we need to write the payload as base64, shouldEncodeToBase64 is true. - mediaType, err := ctx.GetDataMediaType() - if err != nil { - return nil, err + if eventContext.DataSchema != nil { + stream.WriteMore() + stream.WriteObjectField("dataschema") + stream.WriteString(eventContext.DataSchema.String()) } - isJson := mediaType == "" || mediaType == ApplicationJSON || mediaType == TextJSON - // If isJson and no encoding to base64, we don't need to perform additional steps - if isJson && !shouldEncodeToBase64 { - b["data"] = data - } else { - var dataKey = "data" - if ctx.GetSpecVersion() == CloudEventsVersionV1 && shouldEncodeToBase64 { - dataKey = "data_base64" - } - var dataPointer []byte - if shouldEncodeToBase64 { - dataPointer, err = json.Marshal(data) - } else { - dataPointer, err = json.Marshal(string(data)) - } - if err != nil { - return nil, err - } - b[dataKey] = dataPointer + if eventContext.Time != nil { + stream.WriteMore() + stream.WriteObjectField("time") + stream.WriteString(eventContext.Time.String()) } + default: + return fmt.Errorf("missing event context") } - body, err := json.Marshal(b) - if err != nil { - return nil, err + // Let's do a check on the error + if stream.Error != nil { + return fmt.Errorf("error while writing the event attributes: %w", stream.Error) } - return body, nil -} - -// JsonDecodeV03 takes in the byte representation of a version 0.3 structured json CloudEvent and returns a -// cloudevent.Event or an error if there are parsing errors. -func (e *Event) JsonDecodeV03(body []byte, raw map[string]json.RawMessage) error { - ec := EventContextV03{} - if err := json.Unmarshal(body, &ec); err != nil { - return err - } + // Let's write the body + if in.DataEncoded != nil { + stream.WriteMore() - delete(raw, "specversion") - delete(raw, "type") - delete(raw, "source") - delete(raw, "subject") - delete(raw, "id") - delete(raw, "time") - delete(raw, "schemaurl") - delete(raw, "datacontenttype") - delete(raw, "datacontentencoding") - - var data []byte - if d, ok := raw["data"]; ok { - data = d - - // Decode the Base64 if we have a base64 payload - if ec.DeprecatedGetDataContentEncoding() == Base64 { - var tmp []byte - if err := json.Unmarshal(d, &tmp); err != nil { - return err - } - e.DataBase64 = true - e.DataEncoded = tmp + // We need to figure out the media type first + var mediaType string + if dct == nil { + mediaType = ApplicationJSON } else { - if ec.DataContentType != nil { - ct := *ec.DataContentType - if ct != ApplicationJSON && ct != TextJSON { - var dataStr string - err := json.Unmarshal(d, &dataStr) - if err != nil { - return err - } - - data = []byte(dataStr) - } + // This code is required to extract the media type from the full content type string (which might contain encoding and stuff) + contentType := *dct + i := strings.IndexRune(contentType, ';') + if i == -1 { + i = len(contentType) } - e.DataEncoded = data - e.DataBase64 = false + mediaType = strings.TrimSpace(strings.ToLower(contentType[0:i])) } - } - delete(raw, "data") - - if len(raw) > 0 { - extensions := make(map[string]interface{}, len(raw)) - ec.Extensions = extensions - for k, v := range raw { - k = strings.ToLower(k) - var tmp interface{} - if err := json.Unmarshal(v, &tmp); err != nil { - return err - } - if err := ec.SetExtension(k, tmp); err != nil { - return fmt.Errorf("%w: Cannot set extension with key %s", err, k) - } - } - } - e.Context = &ec - - return nil -} - -// JsonDecodeV1 takes in the byte representation of a version 1.0 structured json CloudEvent and returns a -// cloudevent.Event or an error if there are parsing errors. -func (e *Event) JsonDecodeV1(body []byte, raw map[string]json.RawMessage) error { - ec := EventContextV1{} - if err := json.Unmarshal(body, &ec); err != nil { - return err - } + isJson := mediaType == "" || mediaType == ApplicationJSON || mediaType == TextJSON - delete(raw, "specversion") - delete(raw, "type") - delete(raw, "source") - delete(raw, "subject") - delete(raw, "id") - delete(raw, "time") - delete(raw, "dataschema") - delete(raw, "datacontenttype") - - var data []byte - if d, ok := raw["data"]; ok { - data = d - if ec.DataContentType != nil { - ct := *ec.DataContentType - if ct != ApplicationJSON && ct != TextJSON { - var dataStr string - err := json.Unmarshal(d, &dataStr) - if err != nil { - return err - } - - data = []byte(dataStr) + // If isJson and no encoding to base64, we don't need to perform additional steps + if isJson && !isBase64 { + stream.WriteObjectField("data") + _, err := stream.Write(in.DataEncoded) + if err != nil { + return fmt.Errorf("error while writing data: %w", err) } - } - } - delete(raw, "data") - - var dataBase64 []byte - if d, ok := raw["data_base64"]; ok { - var tmp []byte - if err := json.Unmarshal(d, &tmp); err != nil { - return err - } - dataBase64 = tmp - - } - delete(raw, "data_base64") - - if data != nil && dataBase64 != nil { - return ValidationError{"data": fmt.Errorf("found both 'data', and 'data_base64' in JSON payload")} - } - if data != nil { - e.DataEncoded = data - e.DataBase64 = false - } else if dataBase64 != nil { - e.DataEncoded = dataBase64 - e.DataBase64 = true - } - - if len(raw) > 0 { - extensions := make(map[string]interface{}, len(raw)) - ec.Extensions = extensions - for k, v := range raw { - k = strings.ToLower(k) - var tmp interface{} - if err := json.Unmarshal(v, &tmp); err != nil { - return err + } else { + if in.Context.GetSpecVersion() == CloudEventsVersionV1 && isBase64 { + stream.WriteObjectField("data_base64") + } else { + stream.WriteObjectField("data") } - if err := ec.SetExtension(k, tmp); err != nil { - return fmt.Errorf("%w: Cannot set extension with key %s", err, k) + // At this point of we need to write to base 64 string, or we just need to write the plain string + if isBase64 { + stream.WriteString(base64.StdEncoding.EncodeToString(in.DataEncoded)) + } else { + stream.WriteString(string(in.DataEncoded)) } } - } - - e.Context = &ec - return nil -} - -func marshalEvent(eventCtx EventContextReader, extensions map[string]interface{}) (map[string]json.RawMessage, error) { - b, err := json.Marshal(eventCtx) - if err != nil { - return nil, err } - brm := map[string]json.RawMessage{} - if err := json.Unmarshal(b, &brm); err != nil { - return nil, err + // Let's do a check on the error + if stream.Error != nil { + return fmt.Errorf("error while writing the event data: %w", stream.Error) } - sv, err := json.Marshal(eventCtx.GetSpecVersion()) - if err != nil { - return nil, err + for k, v := range ext { + stream.WriteMore() + stream.WriteObjectField(k) + stream.WriteVal(v) } - brm["specversion"] = sv + stream.WriteObjectEnd() - for k, v := range extensions { - k = strings.ToLower(k) - vb, err := json.Marshal(v) - if err != nil { - return nil, err - } - // Don't overwrite spec keys. - if _, ok := brm[k]; !ok { - brm[k] = vb - } + // Let's do a check on the error + if stream.Error != nil { + return fmt.Errorf("error while writing the event extensions: %w", stream.Error) } + return stream.Flush() +} - return brm, nil +// MarshalJSON implements a custom json marshal method used when this type is +// marshaled using json.Marshal. +func (e Event) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + err := WriteJson(&e, &buf) + return buf.Bytes(), err } diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_observability.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_observability.go deleted file mode 100644 index e21a845f1..000000000 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/event_observability.go +++ /dev/null @@ -1,77 +0,0 @@ -package event - -import ( - "fmt" - - "github.com/cloudevents/sdk-go/v2/observability" - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" -) - -var ( - // EventMarshalLatencyMs measures the latency in milliseconds for the - // CloudEvents.Event marshal/unmarshalJSON methods. - EventMarshalLatencyMs = stats.Float64( - "cloudevents.io/sdk-go/event/json/latency", - "The latency in milliseconds of (un)marshalJSON methods for CloudEvents.Event.", - "ms") -) - -var ( - // LatencyView is an OpenCensus view that shows CloudEvents.Event (un)marshalJSON method latency. - EventMarshalLatencyView = &view.View{ - Name: "event/json/latency", - Measure: EventMarshalLatencyMs, - Description: "The distribution of latency inside of (un)marshalJSON methods for CloudEvents.Event.", - Aggregation: view.Distribution(0, .01, .1, 1, 10, 100, 1000, 10000), - TagKeys: observability.LatencyTags(), - } -) - -type observed int32 - -// Adheres to Observable -var _ observability.Observable = observed(0) - -const ( - reportMarshal observed = iota - reportUnmarshal -) - -// MethodName implements Observable.MethodName -func (o observed) MethodName() string { - switch o { - case reportMarshal: - return "marshaljson" - case reportUnmarshal: - return "unmarshaljson" - default: - return "unknown" - } -} - -// LatencyMs implements Observable.LatencyMs -func (o observed) LatencyMs() *stats.Float64Measure { - return EventMarshalLatencyMs -} - -// eventJSONObserved is a wrapper to append version to observed. -type eventJSONObserved struct { - // Method - o observed - // Version - v string -} - -// Adheres to Observable -var _ observability.Observable = (*eventJSONObserved)(nil) - -// MethodName implements Observable.MethodName -func (c eventJSONObserved) MethodName() string { - return fmt.Sprintf("%s/%s", c.o.MethodName(), c.v) -} - -// LatencyMs implements Observable.LatencyMs -func (c eventJSONObserved) LatencyMs() *stats.Float64Measure { - return c.o.LatencyMs() -} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_reader.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_reader.go index 86ca609b4..9d1aeeb65 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/event_reader.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_reader.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package event import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_unmarshal.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_unmarshal.go new file mode 100644 index 000000000..0dd88ae5a --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_unmarshal.go @@ -0,0 +1,480 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "encoding/base64" + "errors" + "fmt" + "io" + "sync" + + jsoniter "github.com/json-iterator/go" + + "github.com/cloudevents/sdk-go/v2/types" +) + +const specVersionV03Flag uint8 = 1 << 4 +const specVersionV1Flag uint8 = 1 << 5 +const dataBase64Flag uint8 = 1 << 6 +const dataContentTypeFlag uint8 = 1 << 7 + +func checkFlag(state uint8, flag uint8) bool { + return state&flag != 0 +} + +func appendFlag(state *uint8, flag uint8) { + *state = (*state) | flag +} + +var iterPool = sync.Pool{ + New: func() interface{} { + return jsoniter.Parse(jsoniter.ConfigFastest, nil, 1024) + }, +} + +func borrowIterator(reader io.Reader) *jsoniter.Iterator { + iter := iterPool.Get().(*jsoniter.Iterator) + iter.Reset(reader) + return iter +} + +func returnIterator(iter *jsoniter.Iterator) { + iter.Error = nil + iter.Attachment = nil + iterPool.Put(iter) +} + +func ReadJson(out *Event, reader io.Reader) error { + iterator := borrowIterator(reader) + defer returnIterator(iterator) + + return readJsonFromIterator(out, iterator) +} + +// ReadJson allows you to read the bytes reader as an event +func readJsonFromIterator(out *Event, iterator *jsoniter.Iterator) error { + // Parsing dependency graph: + // SpecVersion + // ^ ^ + // | +--------------+ + // + + + // All Attributes datacontenttype (and datacontentencoding for v0.3) + // (except datacontenttype) ^ + // | + // | + // + + // Data + + var state uint8 = 0 + var cachedData []byte + + var ( + // Universally parseable fields. + id string + typ string + source types.URIRef + subject *string + time *types.Timestamp + datacontenttype *string + extensions = make(map[string]interface{}) + + // These fields require knowledge about the specversion to be parsed. + schemaurl jsoniter.Any + datacontentencoding jsoniter.Any + dataschema jsoniter.Any + dataBase64 jsoniter.Any + ) + + for key := iterator.ReadObject(); key != ""; key = iterator.ReadObject() { + // Check if we have some error in our error cache + if iterator.Error != nil { + return iterator.Error + } + + // We have a key, now we need to figure out what to do + // depending on the parsing state + + // If it's a specversion, trigger state change + if key == "specversion" { + if checkFlag(state, specVersionV1Flag|specVersionV03Flag) { + return fmt.Errorf("specversion was already provided") + } + sv := iterator.ReadString() + + // Check proper specversion + switch sv { + case CloudEventsVersionV1: + con := &EventContextV1{ + ID: id, + Type: typ, + Source: source, + Subject: subject, + Time: time, + DataContentType: datacontenttype, + } + + // Add the fields relevant for the version ... + if dataschema != nil { + var err error + con.DataSchema, err = toUriPtr(dataschema) + if err != nil { + return err + } + } + if dataBase64 != nil { + stream := jsoniter.ConfigFastest.BorrowStream(nil) + defer jsoniter.ConfigFastest.ReturnStream(stream) + dataBase64.WriteTo(stream) + cachedData = stream.Buffer() + if stream.Error != nil { + return stream.Error + } + appendFlag(&state, dataBase64Flag) + } + + // ... add all remaining fields as extensions. + if schemaurl != nil { + extensions["schemaurl"] = schemaurl.GetInterface() + } + if datacontentencoding != nil { + extensions["datacontentencoding"] = datacontentencoding.GetInterface() + } + + out.Context = con + appendFlag(&state, specVersionV1Flag) + case CloudEventsVersionV03: + con := &EventContextV03{ + ID: id, + Type: typ, + Source: source, + Subject: subject, + Time: time, + DataContentType: datacontenttype, + } + var err error + // Add the fields relevant for the version ... + if schemaurl != nil { + con.SchemaURL, err = toUriRefPtr(schemaurl) + if err != nil { + return err + } + } + if datacontentencoding != nil { + con.DataContentEncoding, err = toStrPtr(datacontentencoding) + if *con.DataContentEncoding != Base64 { + err = ValidationError{"datacontentencoding": errors.New("invalid datacontentencoding value, the only allowed value is 'base64'")} + } + if err != nil { + return err + } + appendFlag(&state, dataBase64Flag) + } + + // ... add all remaining fields as extensions. + if dataschema != nil { + extensions["dataschema"] = dataschema.GetInterface() + } + if dataBase64 != nil { + extensions["data_base64"] = dataBase64.GetInterface() + } + + out.Context = con + appendFlag(&state, specVersionV03Flag) + default: + return ValidationError{"specversion": errors.New("unknown value: " + sv)} + } + + // Apply all extensions to the context object. + for key, val := range extensions { + if err := out.Context.SetExtension(key, val); err != nil { + return err + } + } + continue + } + + // If no specversion ... + if !checkFlag(state, specVersionV03Flag|specVersionV1Flag) { + switch key { + case "id": + id = iterator.ReadString() + case "type": + typ = iterator.ReadString() + case "source": + source = readUriRef(iterator) + case "subject": + subject = readStrPtr(iterator) + case "time": + time = readTimestamp(iterator) + case "datacontenttype": + datacontenttype = readStrPtr(iterator) + appendFlag(&state, dataContentTypeFlag) + case "data": + cachedData = iterator.SkipAndReturnBytes() + case "data_base64": + dataBase64 = iterator.ReadAny() + case "dataschema": + dataschema = iterator.ReadAny() + case "schemaurl": + schemaurl = iterator.ReadAny() + case "datacontentencoding": + datacontentencoding = iterator.ReadAny() + default: + extensions[key] = iterator.Read() + } + continue + } + + // From this point downward -> we can assume the event has a context pointer non nil + + // If it's a datacontenttype, trigger state change + if key == "datacontenttype" { + if checkFlag(state, dataContentTypeFlag) { + return fmt.Errorf("datacontenttype was already provided") + } + + dct := iterator.ReadString() + + switch ctx := out.Context.(type) { + case *EventContextV03: + ctx.DataContentType = &dct + case *EventContextV1: + ctx.DataContentType = &dct + } + appendFlag(&state, dataContentTypeFlag) + continue + } + + // If it's a datacontentencoding and it's v0.3, trigger state change + if checkFlag(state, specVersionV03Flag) && key == "datacontentencoding" { + if checkFlag(state, dataBase64Flag) { + return ValidationError{"datacontentencoding": errors.New("datacontentencoding was specified twice")} + } + + dce := iterator.ReadString() + + if dce != Base64 { + return ValidationError{"datacontentencoding": errors.New("invalid datacontentencoding value, the only allowed value is 'base64'")} + } + + out.Context.(*EventContextV03).DataContentEncoding = &dce + appendFlag(&state, dataBase64Flag) + continue + } + + // We can parse all attributes, except data. + // If it's data or data_base64 and we don't have the attributes to process it, then we cache it + // The expanded form of this condition is: + // (checkFlag(state, specVersionV1Flag) && !checkFlag(state, dataContentTypeFlag) && (key == "data" || key == "data_base64")) || + // (checkFlag(state, specVersionV03Flag) && !(checkFlag(state, dataContentTypeFlag) && checkFlag(state, dataBase64Flag)) && key == "data") + if (state&(specVersionV1Flag|dataContentTypeFlag) == specVersionV1Flag && (key == "data" || key == "data_base64")) || + ((state&specVersionV03Flag == specVersionV03Flag) && (state&(dataContentTypeFlag|dataBase64Flag) != (dataContentTypeFlag | dataBase64Flag)) && key == "data") { + if key == "data_base64" { + appendFlag(&state, dataBase64Flag) + } + cachedData = iterator.SkipAndReturnBytes() + continue + } + + // At this point or this value is an attribute (excluding datacontenttype and datacontentencoding), or this value is data and this condition is valid: + // (specVersionV1Flag & dataContentTypeFlag) || (specVersionV03Flag & dataContentTypeFlag & dataBase64Flag) + switch eventContext := out.Context.(type) { + case *EventContextV03: + switch key { + case "id": + eventContext.ID = iterator.ReadString() + case "type": + eventContext.Type = iterator.ReadString() + case "source": + eventContext.Source = readUriRef(iterator) + case "subject": + eventContext.Subject = readStrPtr(iterator) + case "time": + eventContext.Time = readTimestamp(iterator) + case "schemaurl": + eventContext.SchemaURL = readUriRefPtr(iterator) + case "data": + iterator.Error = consumeData(out, checkFlag(state, dataBase64Flag), iterator) + default: + if eventContext.Extensions == nil { + eventContext.Extensions = make(map[string]interface{}, 1) + } + iterator.Error = eventContext.SetExtension(key, iterator.Read()) + } + case *EventContextV1: + switch key { + case "id": + eventContext.ID = iterator.ReadString() + case "type": + eventContext.Type = iterator.ReadString() + case "source": + eventContext.Source = readUriRef(iterator) + case "subject": + eventContext.Subject = readStrPtr(iterator) + case "time": + eventContext.Time = readTimestamp(iterator) + case "dataschema": + eventContext.DataSchema = readUriPtr(iterator) + case "data": + iterator.Error = consumeData(out, false, iterator) + case "data_base64": + iterator.Error = consumeData(out, true, iterator) + default: + if eventContext.Extensions == nil { + eventContext.Extensions = make(map[string]interface{}, 1) + } + iterator.Error = eventContext.SetExtension(key, iterator.Read()) + } + } + } + + if state&(specVersionV03Flag|specVersionV1Flag) == 0 { + return ValidationError{"specversion": errors.New("no specversion")} + } + + if iterator.Error != nil { + return iterator.Error + } + + // If there is a dataToken cached, we always defer at the end the processing + // because nor datacontenttype or datacontentencoding are mandatory. + if cachedData != nil { + return consumeDataAsBytes(out, checkFlag(state, dataBase64Flag), cachedData) + } + return nil +} + +func consumeDataAsBytes(e *Event, isBase64 bool, b []byte) error { + if isBase64 { + e.DataBase64 = true + + // Allocate payload byte buffer + base64Encoded := b[1 : len(b)-1] // remove quotes + e.DataEncoded = make([]byte, base64.StdEncoding.DecodedLen(len(base64Encoded))) + length, err := base64.StdEncoding.Decode(e.DataEncoded, base64Encoded) + if err != nil { + return err + } + e.DataEncoded = e.DataEncoded[0:length] + return nil + } + + mt, _ := e.Context.GetDataMediaType() + // Empty content type assumes json + if mt != "" && mt != ApplicationJSON && mt != TextJSON { + // If not json, then data is encoded as string + iter := jsoniter.ParseBytes(jsoniter.ConfigFastest, b) + src := iter.ReadString() // handles escaping + e.DataEncoded = []byte(src) + if iter.Error != nil { + return fmt.Errorf("unexpected data payload for media type %q, expected a string: %w", mt, iter.Error) + } + return nil + } + + e.DataEncoded = b + return nil +} + +func consumeData(e *Event, isBase64 bool, iter *jsoniter.Iterator) error { + if isBase64 { + e.DataBase64 = true + + // Allocate payload byte buffer + base64Encoded := iter.ReadStringAsSlice() + e.DataEncoded = make([]byte, base64.StdEncoding.DecodedLen(len(base64Encoded))) + length, err := base64.StdEncoding.Decode(e.DataEncoded, base64Encoded) + if err != nil { + return err + } + e.DataEncoded = e.DataEncoded[0:length] + return nil + } + + mt, _ := e.Context.GetDataMediaType() + if mt != ApplicationJSON && mt != TextJSON { + // If not json, then data is encoded as string + src := iter.ReadString() // handles escaping + e.DataEncoded = []byte(src) + if iter.Error != nil { + return fmt.Errorf("unexpected data payload for media type %q, expected a string: %w", mt, iter.Error) + } + return nil + } + + e.DataEncoded = iter.SkipAndReturnBytes() + return nil +} + +func readUriRef(iter *jsoniter.Iterator) types.URIRef { + str := iter.ReadString() + uriRef := types.ParseURIRef(str) + if uriRef == nil { + iter.Error = fmt.Errorf("cannot parse uri ref: %v", str) + return types.URIRef{} + } + return *uriRef +} + +func readStrPtr(iter *jsoniter.Iterator) *string { + str := iter.ReadString() + if str == "" { + return nil + } + return &str +} + +func readUriRefPtr(iter *jsoniter.Iterator) *types.URIRef { + return types.ParseURIRef(iter.ReadString()) +} + +func readUriPtr(iter *jsoniter.Iterator) *types.URI { + return types.ParseURI(iter.ReadString()) +} + +func readTimestamp(iter *jsoniter.Iterator) *types.Timestamp { + t, err := types.ParseTimestamp(iter.ReadString()) + if err != nil { + iter.Error = err + } + return t +} + +func toStrPtr(val jsoniter.Any) (*string, error) { + str := val.ToString() + if val.LastError() != nil { + return nil, val.LastError() + } + if str == "" { + return nil, nil + } + return &str, nil +} + +func toUriRefPtr(val jsoniter.Any) (*types.URIRef, error) { + str := val.ToString() + if val.LastError() != nil { + return nil, val.LastError() + } + return types.ParseURIRef(str), nil +} + +func toUriPtr(val jsoniter.Any) (*types.URI, error) { + str := val.ToString() + if val.LastError() != nil { + return nil, val.LastError() + } + return types.ParseURI(str), nil +} + +// UnmarshalJSON implements the json unmarshal method used when this type is +// unmarshaled using json.Unmarshal. +func (e *Event) UnmarshalJSON(b []byte) error { + iterator := jsoniter.ConfigFastest.BorrowIterator(b) + defer jsoniter.ConfigFastest.ReturnIterator(iterator) + return readJsonFromIterator(e, iterator) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_validation.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_validation.go index b5759fa4e..958ecc47d 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/event_validation.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_validation.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package event import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_writer.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_writer.go index 00018cbdb..ddfb1be38 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/event_writer.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_writer.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package event import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext.go index 2d0611215..a39565afa 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package event import "time" diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03.go index c626311df..c511c81c4 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package event import ( @@ -15,6 +20,17 @@ const ( CloudEventsVersionV03 = "0.3" ) +var specV03Attributes = map[string]struct{}{ + "type": {}, + "source": {}, + "subject": {}, + "id": {}, + "time": {}, + "schemaurl": {}, + "datacontenttype": {}, + "datacontentencoding": {}, +} + // EventContextV03 represents the non-data attributes of a CloudEvents v0.3 // event. type EventContextV03 struct { @@ -73,11 +89,17 @@ func (ec EventContextV03) ExtensionAs(name string, obj interface{}) error { } } -// SetExtension adds the extension 'name' with value 'value' to the CloudEvents context. +// SetExtension adds the extension 'name' with value 'value' to the CloudEvents +// context. This function fails if the name uses a reserved event context key. func (ec *EventContextV03) SetExtension(name string, value interface{}) error { if ec.Extensions == nil { ec.Extensions = make(map[string]interface{}) } + + if _, ok := specV03Attributes[strings.ToLower(name)]; ok { + return fmt.Errorf("bad key %q: CloudEvents spec attribute MUST NOT be overwritten by extension", name) + } + if value == nil { delete(ec.Extensions, name) if len(ec.Extensions) == 0 { diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_reader.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_reader.go index 8e6eec5ca..2cd27a705 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_reader.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_reader.go @@ -1,8 +1,13 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package event import ( "fmt" - "mime" + "strings" "time" ) @@ -22,11 +27,12 @@ func (ec EventContextV03) GetDataContentType() string { // GetDataMediaType implements EventContextReader.GetDataMediaType func (ec EventContextV03) GetDataMediaType() (string, error) { if ec.DataContentType != nil { - mediaType, _, err := mime.ParseMediaType(*ec.DataContentType) - if err != nil { - return "", err + dct := *ec.DataContentType + i := strings.IndexRune(dct, ';') + if i == -1 { + return dct, nil } - return mediaType, nil + return strings.TrimSpace(dct[0:i]), nil } return "", nil } diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_writer.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_writer.go index 94748c67c..5d664635e 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_writer.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_writer.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package event import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1.go index f7e09ed63..8f164502b 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1.go @@ -1,7 +1,11 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package event import ( - "errors" "fmt" "mime" "sort" @@ -17,6 +21,17 @@ const ( CloudEventsVersionV1 = "1.0" ) +var specV1Attributes = map[string]struct{}{ + "id": {}, + "source": {}, + "type": {}, + "datacontenttype": {}, + "subject": {}, + "time": {}, + "specversion": {}, + "dataschema": {}, +} + // EventContextV1 represents the non-data attributes of a CloudEvents v1.0 // event. type EventContextV1 struct { @@ -69,11 +84,16 @@ func (ec EventContextV1) ExtensionAs(name string, obj interface{}) error { return fmt.Errorf("unknown extension type %T", obj) } -// SetExtension adds the extension 'name' with value 'value' to the CloudEvents context. -// This function fails if the name doesn't respect the regex ^[a-zA-Z0-9]+$ +// SetExtension adds the extension 'name' with value 'value' to the CloudEvents +// context. This function fails if the name doesn't respect the regex +// ^[a-zA-Z0-9]+$ or if the name uses a reserved event context key. func (ec *EventContextV1) SetExtension(name string, value interface{}) error { - if !IsAlphaNumeric(name) { - return errors.New("bad key, CloudEvents attribute names MUST consist of lower-case letters ('a' to 'z') or digits ('0' to '9') from the ASCII character set") + if err := validateExtensionName(name); err != nil { + return err + } + + if _, ok := specV1Attributes[strings.ToLower(name)]; ok { + return fmt.Errorf("bad key %q: CloudEvents spec attribute MUST NOT be overwritten by extension", name) } name = strings.ToLower(name) @@ -226,10 +246,8 @@ func (ec EventContextV1) Validate() ValidationError { // OPTIONAL // If present, MUST adhere to the format specified in RFC 3986 if ec.DataSchema != nil { - dataSchema := strings.TrimSpace(ec.DataSchema.String()) - // empty string is not RFC 3986 compatible. - if dataSchema == "" { - errors["dataschema"] = fmt.Errorf("if present, MUST adhere to the format specified in RFC 3986") + if !ec.DataSchema.Validate() { + errors["dataschema"] = fmt.Errorf("if present, MUST adhere to the format specified in RFC 3986, Section 4.3. Absolute URI") } } diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_reader.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_reader.go index 64f1a919b..74f73b029 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_reader.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_reader.go @@ -1,8 +1,13 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package event import ( "fmt" - "mime" + "strings" "time" ) @@ -22,11 +27,12 @@ func (ec EventContextV1) GetDataContentType() string { // GetDataMediaType implements EventContextReader.GetDataMediaType func (ec EventContextV1) GetDataMediaType() (string, error) { if ec.DataContentType != nil { - mediaType, _, err := mime.ParseMediaType(*ec.DataContentType) - if err != nil { - return "", err + dct := *ec.DataContentType + i := strings.IndexRune(dct, ';') + if i == -1 { + return dct, nil } - return mediaType, nil + return strings.TrimSpace(dct[0:i]), nil } return "", nil } diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_writer.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_writer.go index 1ec29e65e..5f2aca763 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_writer.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_writer.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package event import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/extensions.go b/vendor/github.com/cloudevents/sdk-go/v2/event/extensions.go index 4a202e5e4..72d0e757a 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/extensions.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/extensions.go @@ -1,7 +1,13 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package event import ( - "regexp" + "errors" + "fmt" "strings" ) @@ -11,6 +17,12 @@ const ( DataContentEncodingKey = "datacontentencoding" ) +var ( + // This determines the behavior of validateExtensionName(). For MaxExtensionNameLength > 0, an error will be returned, + // if len(key) > MaxExtensionNameLength + MaxExtensionNameLength = 0 +) + func caseInsensitiveSearch(key string, space map[string]interface{}) (interface{}, bool) { lkey := strings.ToLower(key) for k, v := range space { @@ -21,4 +33,25 @@ func caseInsensitiveSearch(key string, space map[string]interface{}) (interface{ return nil, false } -var IsAlphaNumeric = regexp.MustCompile(`^[a-zA-Z0-9]+$`).MatchString +func IsExtensionNameValid(key string) bool { + if err := validateExtensionName(key); err != nil { + return false + } + return true +} + +func validateExtensionName(key string) error { + if len(key) < 1 { + return errors.New("bad key, CloudEvents attribute names MUST NOT be empty") + } + if MaxExtensionNameLength > 0 && len(key) > MaxExtensionNameLength { + return fmt.Errorf("bad key, CloudEvents attribute name '%s' is longer than %d characters", key, MaxExtensionNameLength) + } + + for _, c := range key { + if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9')) { + return errors.New("bad key, CloudEvents attribute names MUST consist of lower-case letters ('a' to 'z'), upper-case letters ('A' to 'Z') or digits ('0' to '9') from the ASCII character set") + } + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/extensions/distributed_tracing_extension.go b/vendor/github.com/cloudevents/sdk-go/v2/extensions/distributed_tracing_extension.go deleted file mode 100644 index 7988b65f2..000000000 --- a/vendor/github.com/cloudevents/sdk-go/v2/extensions/distributed_tracing_extension.go +++ /dev/null @@ -1,164 +0,0 @@ -package extensions - -import ( - "context" - "reflect" - "strings" - - "github.com/cloudevents/sdk-go/v2/binding" - "github.com/cloudevents/sdk-go/v2/event" - - "github.com/cloudevents/sdk-go/v2/types" - "github.com/lightstep/tracecontext.go/traceparent" - "github.com/lightstep/tracecontext.go/tracestate" - "go.opencensus.io/trace" - octs "go.opencensus.io/trace/tracestate" -) - -const ( - TraceParentExtension = "traceparent" - TraceStateExtension = "tracestate" -) - -// DistributedTracingExtension represents the extension for cloudevents context -type DistributedTracingExtension struct { - TraceParent string `json:"traceparent"` - TraceState string `json:"tracestate"` -} - -// AddTracingAttributes adds the tracing attributes traceparent and tracestate to the cloudevents context -func (d DistributedTracingExtension) AddTracingAttributes(e event.EventWriter) { - if d.TraceParent != "" { - value := reflect.ValueOf(d) - typeOf := value.Type() - - for i := 0; i < value.NumField(); i++ { - k := strings.ToLower(typeOf.Field(i).Name) - v := value.Field(i).Interface() - if k == TraceStateExtension && v == "" { - continue - } - e.SetExtension(k, v) - } - } -} - -func GetDistributedTracingExtension(event event.Event) (DistributedTracingExtension, bool) { - if tp, ok := event.Extensions()[TraceParentExtension]; ok { - if tpStr, err := types.ToString(tp); err == nil { - var tsStr string - if ts, ok := event.Extensions()[TraceStateExtension]; ok { - tsStr, _ = types.ToString(ts) - } - return DistributedTracingExtension{TraceParent: tpStr, TraceState: tsStr}, true - } - } - return DistributedTracingExtension{}, false -} - -func (d *DistributedTracingExtension) ReadTransformer() binding.TransformerFunc { - return func(reader binding.MessageMetadataReader, writer binding.MessageMetadataWriter) error { - tp := reader.GetExtension(TraceParentExtension) - if tp != nil { - tpFormatted, err := types.Format(tp) - if err != nil { - return err - } - d.TraceParent = tpFormatted - } - ts := reader.GetExtension(TraceStateExtension) - if ts != nil { - tsFormatted, err := types.Format(ts) - if err != nil { - return err - } - d.TraceState = tsFormatted - } - return nil - } -} - -func (d *DistributedTracingExtension) WriteTransformer() binding.TransformerFunc { - return func(reader binding.MessageMetadataReader, writer binding.MessageMetadataWriter) error { - err := writer.SetExtension(TraceParentExtension, d.TraceParent) - if err != nil { - return nil - } - if d.TraceState != "" { - return writer.SetExtension(TraceStateExtension, d.TraceState) - } - return nil - } -} - -// FromSpanContext populates DistributedTracingExtension from a SpanContext. -func FromSpanContext(sc trace.SpanContext) DistributedTracingExtension { - tp := traceparent.TraceParent{ - TraceID: sc.TraceID, - SpanID: sc.SpanID, - Flags: traceparent.Flags{ - Recorded: sc.IsSampled(), - }, - } - - entries := make([]string, 0, len(sc.Tracestate.Entries())) - for _, entry := range sc.Tracestate.Entries() { - entries = append(entries, strings.Join([]string{entry.Key, entry.Value}, "=")) - } - - return DistributedTracingExtension{ - TraceParent: tp.String(), - TraceState: strings.Join(entries, ","), - } -} - -// ToSpanContext creates a SpanContext from a DistributedTracingExtension instance. -func (d DistributedTracingExtension) ToSpanContext() (trace.SpanContext, error) { - tp, err := traceparent.ParseString(d.TraceParent) - if err != nil { - return trace.SpanContext{}, err - } - sc := trace.SpanContext{ - TraceID: tp.TraceID, - SpanID: tp.SpanID, - } - if tp.Flags.Recorded { - sc.TraceOptions |= 1 - } - - if ts, err := tracestate.ParseString(d.TraceState); err == nil { - entries := make([]octs.Entry, 0, len(ts)) - for _, member := range ts { - var key string - if member.Tenant != "" { - // Due to github.com/lightstep/tracecontext.go/issues/6, - // the meaning of Vendor and Tenant are swapped here. - key = member.Vendor + "@" + member.Tenant - } else { - key = member.Vendor - } - entries = append(entries, octs.Entry{Key: key, Value: member.Value}) - } - sc.Tracestate, _ = octs.New(nil, entries...) - } - - return sc, nil -} - -func (d DistributedTracingExtension) StartChildSpan(ctx context.Context, name string, opts ...trace.StartOption) (context.Context, *trace.Span) { - if sc, err := d.ToSpanContext(); err == nil { - tSpan := trace.FromContext(ctx) - ctx, span := trace.StartSpanWithRemoteParent(ctx, name, sc, opts...) - if tSpan != nil { - // Add link to the previous in-process trace. - tsc := tSpan.SpanContext() - span.AddLink(trace.Link{ - TraceID: tsc.TraceID, - SpanID: tsc.SpanID, - Type: trace.LinkTypeParent, - }) - } - return ctx, span - } - return ctx, nil -} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/extensions/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/extensions/doc.go deleted file mode 100644 index d71269042..000000000 --- a/vendor/github.com/cloudevents/sdk-go/v2/extensions/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package extensions provides implementations of common event extensions. -package extensions diff --git a/vendor/github.com/cloudevents/sdk-go/v2/observability/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/observability/doc.go deleted file mode 100644 index 3067ebe7e..000000000 --- a/vendor/github.com/cloudevents/sdk-go/v2/observability/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -/* -Package observability holds metrics and tracing recording implementations. -*/ -package observability diff --git a/vendor/github.com/cloudevents/sdk-go/v2/observability/keys.go b/vendor/github.com/cloudevents/sdk-go/v2/observability/keys.go deleted file mode 100644 index afadddcf5..000000000 --- a/vendor/github.com/cloudevents/sdk-go/v2/observability/keys.go +++ /dev/null @@ -1,22 +0,0 @@ -package observability - -import ( - "go.opencensus.io/tag" -) - -var ( - // KeyMethod is the tag used for marking method on a metric. - KeyMethod, _ = tag.NewKey("method") - // KeyResult is the tag used for marking result on a metric. - KeyResult, _ = tag.NewKey("result") -) - -const ( - // ClientSpanName is the key used to start spans from the client. - ClientSpanName = "cloudevents.client" - - // ResultError is a shared result tag value for error. - ResultError = "error" - // ResultOK is a shared result tag value for success. - ResultOK = "success" -) diff --git a/vendor/github.com/cloudevents/sdk-go/v2/observability/observer.go b/vendor/github.com/cloudevents/sdk-go/v2/observability/observer.go deleted file mode 100644 index bedf3e442..000000000 --- a/vendor/github.com/cloudevents/sdk-go/v2/observability/observer.go +++ /dev/null @@ -1,87 +0,0 @@ -package observability - -import ( - "context" - "sync" - "time" - - "go.opencensus.io/stats" - "go.opencensus.io/tag" -) - -// Observable represents the the customization used by the Reporter for a given -// measurement and trace for a single method. -type Observable interface { - MethodName() string - LatencyMs() *stats.Float64Measure -} - -// Reporter represents a running latency counter. When Error or OK are -// called, the latency is calculated. Error or OK are only allowed to -// be called once. -type Reporter interface { - Error() - OK() -} - -type reporter struct { - ctx context.Context - on Observable - start time.Time - once sync.Once -} - -// LatencyTags returns all tags used for Latency measurements. -func LatencyTags() []tag.Key { - return []tag.Key{KeyMethod, KeyResult} -} - -// EnableTracing is deprecated. Tracing is always enabled. -func EnableTracing(enabled bool) {} - -// NewReporter creates and returns a reporter wrapping the provided Observable. -func NewReporter(ctx context.Context, on Observable) (context.Context, Reporter) { - r := &reporter{ - ctx: ctx, - on: on, - start: time.Now(), - } - r.tagMethod() - return ctx, r -} - -func (r *reporter) tagMethod() { - var err error - r.ctx, err = tag.New(r.ctx, tag.Insert(KeyMethod, r.on.MethodName())) - if err != nil { - panic(err) // or ignore? - } -} - -func (r *reporter) record() { - ms := float64(time.Since(r.start) / time.Millisecond) - stats.Record(r.ctx, r.on.LatencyMs().M(ms)) -} - -// Error records the result as an error. -func (r *reporter) Error() { - r.once.Do(func() { - r.result(ResultError) - }) -} - -// OK records the result as a success. -func (r *reporter) OK() { - r.once.Do(func() { - r.result(ResultOK) - }) -} - -func (r *reporter) result(v string) { - var err error - r.ctx, err = tag.New(r.ctx, tag.Insert(KeyResult, v)) - if err != nil { - panic(err) // or ignore? - } - r.record() -} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/doc.go index d14bf7f98..f826a1841 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/doc.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/doc.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + /* Package protocol defines interfaces to decouple the client package from protocol implementations. diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/error.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/error.go index 0c9530d19..a3f335261 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/error.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/error.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package protocol import "fmt" diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/abuse_protection.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/abuse_protection.go index eb004101f..48f03fb6c 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/abuse_protection.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/abuse_protection.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package http import ( @@ -7,6 +12,7 @@ import ( "net/http" "strconv" "strings" + "time" ) type WebhookConfig struct { @@ -18,6 +24,7 @@ type WebhookConfig struct { const ( DefaultAllowedRate = 1000 + DefaultTimeout = time.Second * 600 ) // TODO: implement rate limiting. diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/context.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/context.go new file mode 100644 index 000000000..0eec396a1 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/context.go @@ -0,0 +1,48 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "context" + + nethttp "net/http" + "net/url" +) + +type requestKey struct{} + +// RequestData holds the http.Request information subset that can be +// used to retrieve HTTP information for an incoming CloudEvent. +type RequestData struct { + URL *url.URL + Header nethttp.Header + RemoteAddr string + Host string +} + +// WithRequestDataAtContext uses the http.Request to add RequestData +// information to the Context. +func WithRequestDataAtContext(ctx context.Context, r *nethttp.Request) context.Context { + if r == nil { + return ctx + } + + return context.WithValue(ctx, requestKey{}, &RequestData{ + URL: r.URL, + Header: r.Header, + RemoteAddr: r.RemoteAddr, + Host: r.Host, + }) +} + +// RequestDataFromContext retrieves RequestData from the Context. +// If not set nil is returned. +func RequestDataFromContext(ctx context.Context) *RequestData { + if req := ctx.Value(requestKey{}); req != nil { + return req.(*RequestData) + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/doc.go index 5c04b88af..3428ea387 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/doc.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/doc.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + /* Package http implements an HTTP binding using net/http module */ diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/headers.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/headers.go index f60c50445..055a5c4dd 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/headers.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/headers.go @@ -1,6 +1,14 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package http import ( + "context" + "github.com/cloudevents/sdk-go/v2/binding" + "net/http" "net/textproto" "strings" "unicode" @@ -10,6 +18,12 @@ import ( var attributeHeadersMapping map[string]string +type customHeaderKey int + +const ( + headerKey customHeaderKey = iota +) + func init() { attributeHeadersMapping = make(map[string]string) for _, v := range specs.Versions() { @@ -31,3 +45,11 @@ func extNameToHeaderName(name string) string { b.WriteString(name[1:]) return b.String() } + +func HeaderFrom(ctx context.Context) http.Header { + return binding.GetOrDefaultFromCtx(ctx, headerKey, make(http.Header)).(http.Header) +} + +func WithCustomHeader(ctx context.Context, header http.Header) context.Context { + return context.WithValue(ctx, headerKey, header) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/message.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/message.go index 451506ce5..7a7c36f9b 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/message.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/message.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package http import ( @@ -37,12 +42,15 @@ type Message struct { BodyReader io.ReadCloser OnFinish func(error) error + ctx context.Context + format format.Format version spec.Version } // Check if http.Message implements binding.Message var _ binding.Message = (*Message)(nil) +var _ binding.MessageContext = (*Message)(nil) var _ binding.MessageMetadataReader = (*Message)(nil) // NewMessage returns a binding.Message with header and data. @@ -64,7 +72,9 @@ func NewMessageFromHttpRequest(req *nethttp.Request) *Message { if req == nil { return nil } - return NewMessage(req.Header, req.Body) + message := NewMessage(req.Header, req.Body) + message.ctx = req.Context() + return message } // NewMessageFromHttpResponse returns a binding.Message with header and data. @@ -82,6 +92,9 @@ func (m *Message) ReadEncoding() binding.Encoding { return binding.EncodingBinary } if m.format != nil { + if m.format == format.JSONBatch { + return binding.EncodingBatch + } return binding.EncodingStructured } return binding.EncodingUnknown @@ -147,6 +160,10 @@ func (m *Message) GetExtension(name string) interface{} { return nil } +func (m *Message) Context() context.Context { + return m.ctx +} + func (m *Message) Finish(err error) error { if m.BodyReader != nil { _ = m.BodyReader.Close() diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/options.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/options.go index 71f88fcb2..5e400905a 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/options.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/options.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package http import ( @@ -69,18 +74,18 @@ func WithHeader(key, value string) Option { // WithShutdownTimeout sets the shutdown timeout when the http server is being shutdown. func WithShutdownTimeout(timeout time.Duration) Option { - return func(t *Protocol) error { - if t == nil { + return func(p *Protocol) error { + if p == nil { return fmt.Errorf("http shutdown timeout option can not set nil protocol") } - t.ShutdownTimeout = timeout + p.ShutdownTimeout = timeout return nil } } -func checkListen(t *Protocol, prefix string) error { +func checkListen(p *Protocol, prefix string) error { switch { - case t.listener.Load() != nil: + case p.listener.Load() != nil: return fmt.Errorf("error setting %v: listener already set", prefix) } return nil @@ -89,17 +94,17 @@ func checkListen(t *Protocol, prefix string) error { // WithPort sets the listening port for StartReceiver. // Only one of WithListener or WithPort is allowed. func WithPort(port int) Option { - return func(t *Protocol) error { - if t == nil { + return func(p *Protocol) error { + if p == nil { return fmt.Errorf("http port option can not set nil protocol") } if port < 0 || port > 65535 { return fmt.Errorf("http port option was given an invalid port: %d", port) } - if err := checkListen(t, "http port option"); err != nil { + if err := checkListen(p, "http port option"); err != nil { return err } - t.Port = port + p.Port = port return nil } } @@ -107,29 +112,29 @@ func WithPort(port int) Option { // WithListener sets the listener for StartReceiver. // Only one of WithListener or WithPort is allowed. func WithListener(l net.Listener) Option { - return func(t *Protocol) error { - if t == nil { + return func(p *Protocol) error { + if p == nil { return fmt.Errorf("http listener option can not set nil protocol") } - if err := checkListen(t, "http listener"); err != nil { + if err := checkListen(p, "http listener"); err != nil { return err } - t.listener.Store(l) + p.listener.Store(l) return nil } } // WithPath sets the path to receive cloudevents on for HTTP transports. func WithPath(path string) Option { - return func(t *Protocol) error { - if t == nil { + return func(p *Protocol) error { + if p == nil { return fmt.Errorf("http path option can not set nil protocol") } path = strings.TrimSpace(path) if len(path) == 0 { return fmt.Errorf("http path option was given an invalid path: %q", path) } - t.Path = path + p.Path = path return nil } } @@ -162,22 +167,40 @@ type Middleware func(next nethttp.Handler) nethttp.Handler // Middleware is applied to everything before it. For example // `NewClient(WithMiddleware(foo), WithMiddleware(bar))` would result in `bar(foo(original))`. func WithMiddleware(middleware Middleware) Option { - return func(t *Protocol) error { - if t == nil { + return func(p *Protocol) error { + if p == nil { return fmt.Errorf("http middleware option can not set nil protocol") } - t.middleware = append(t.middleware, middleware) + p.middleware = append(p.middleware, middleware) return nil } } // WithRoundTripper sets the HTTP RoundTripper. func WithRoundTripper(roundTripper nethttp.RoundTripper) Option { - return func(t *Protocol) error { - if t == nil { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http round tripper option can not set nil protocol") + } + p.roundTripper = roundTripper + return nil + } +} + +// WithRoundTripperDecorator decorates the default HTTP RoundTripper chosen. +func WithRoundTripperDecorator(decorator func(roundTripper nethttp.RoundTripper) nethttp.RoundTripper) Option { + return func(p *Protocol) error { + if p == nil { return fmt.Errorf("http round tripper option can not set nil protocol") } - t.roundTripper = roundTripper + if p.roundTripper == nil { + if p.Client == nil { + p.roundTripper = nethttp.DefaultTransport + } else { + p.roundTripper = p.Client.Transport + } + } + p.roundTripper = decorator(p.roundTripper) return nil } } @@ -225,7 +248,7 @@ func WithDefaultOptionsHandlerFunc(methods []string, rate int, origins []string, if p == nil { return fmt.Errorf("http OPTIONS handler func can not set nil protocol") } - p.OptionsHandlerFn = p.DeleteHandlerFn + p.OptionsHandlerFn = p.OptionsHandler p.WebhookConfig = &WebhookConfig{ AllowedMethods: methods, AllowedRate: &rate, @@ -235,3 +258,44 @@ func WithDefaultOptionsHandlerFunc(methods []string, rate int, origins []string, return nil } } + +// IsRetriable is a custom function that can be used to override the +// default retriable status codes. +type IsRetriable func(statusCode int) bool + +// WithIsRetriableFunc sets the function that gets called to determine if an +// error should be retried. If not set, the defaultIsRetriableFunc is used. +func WithIsRetriableFunc(isRetriable IsRetriable) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("isRetriable handler func can not set nil protocol") + } + if isRetriable == nil { + return fmt.Errorf("isRetriable handler can not be nil") + } + p.isRetriableFunc = isRetriable + return nil + } +} + +func WithRateLimiter(rl RateLimiter) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http OPTIONS handler func can not set nil protocol") + } + p.limiter = rl + return nil + } +} + +// WithRequestDataAtContextMiddleware adds to the Context RequestData. +// This enables a user's dispatch handler to inspect HTTP request information by +// retrieving it from the Context. +func WithRequestDataAtContextMiddleware() Option { + return WithMiddleware(func(next nethttp.Handler) nethttp.Handler { + return nethttp.HandlerFunc(func(w nethttp.ResponseWriter, r *nethttp.Request) { + ctx := WithRequestDataAtContext(r.Context(), r) + next.ServeHTTP(w, r.WithContext(ctx)) + }) + }) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol.go index 97372b4b7..dba6fd7ba 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol.go @@ -1,12 +1,19 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package http import ( + "bytes" "context" "errors" "fmt" "io" "net/http" "net/url" + "strconv" "sync" "sync/atomic" "time" @@ -28,6 +35,19 @@ type msgErr struct { err error } +// Default error codes that we retry on - string isn't used, it's just there so +// people know what each error code's title is. +// To modify this use Option +var defaultRetriableErrors = map[int]string{ + 404: "Not Found", + 413: "Payload Too Large", + 425: "Too Early", + 429: "Too Many Requests", + 502: "Bad Gateway", + 503: "Service Unavailable", + 504: "Gateway Timeout", +} + // Protocol acts as both a http client and a http handler. type Protocol struct { Target *url.URL @@ -47,7 +67,7 @@ type Protocol struct { // To support Opener: // ShutdownTimeout defines the timeout given to the http.Server when calling Shutdown. - // If nil, DefaultShutdownTimeout is used. + // If 0, DefaultShutdownTimeout is used. ShutdownTimeout time.Duration // Port is the port configured to bind the receiver to. Defaults to 8080. @@ -67,6 +87,9 @@ type Protocol struct { server *http.Server handlerRegistered bool middleware []Middleware + limiter RateLimiter + + isRetriableFunc IsRetriable } func New(opts ...Option) (*Protocol, error) { @@ -90,9 +113,22 @@ func New(opts ...Option) (*Protocol, error) { p.ShutdownTimeout = DefaultShutdownTimeout } + if p.isRetriableFunc == nil { + p.isRetriableFunc = defaultIsRetriableFunc + } + + if p.limiter == nil { + p.limiter = noOpLimiter{} + } + return p, nil } +// NewObserved creates an HTTP protocol with trace propagating middleware. +// Deprecated: now this behaves like New and it will be removed in future releases, +// setup the http observed protocol using the opencensus separate module NewObservedHttp +var NewObserved = New + func (p *Protocol) applyOptions(opts ...Option) error { for _, fn := range opts { if err := fn(p); err != nil { @@ -110,7 +146,28 @@ func (p *Protocol) Send(ctx context.Context, m binding.Message, transformers ... return fmt.Errorf("nil Message") } - _, err := p.Request(ctx, m, transformers...) + msg, err := p.Request(ctx, m, transformers...) + if msg != nil { + defer func() { _ = msg.Finish(err) }() + } + if err != nil && !protocol.IsACK(err) { + var res *Result + if protocol.ResultAs(err, &res) { + if message, ok := msg.(*Message); ok { + buf := new(bytes.Buffer) + buf.ReadFrom(message.BodyReader) + errorStr := buf.String() + // If the error is not wrapped, then append the original error string. + if og, ok := err.(*Result); ok { + og.Format = og.Format + "%s" + og.Args = append(og.Args, errorStr) + err = og + } else { + err = NewResult(res.StatusCode, "%w: %s", err, errorStr) + } + } + } + } return err } @@ -139,11 +196,9 @@ func (p *Protocol) Request(ctx context.Context, m binding.Message, transformers } func (p *Protocol) makeRequest(ctx context.Context) *http.Request { - // TODO: support custom headers from context? req := &http.Request{ Method: http.MethodPost, - Header: make(http.Header), - // TODO: HeaderFrom(ctx), + Header: HeaderFrom(ctx), } if p.RequestTemplate != nil { @@ -235,6 +290,20 @@ func (p *Protocol) Respond(ctx context.Context) (binding.Message, protocol.Respo // ServeHTTP implements http.Handler. // Blocks until ResponseFn is invoked. func (p *Protocol) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + // always apply limiter first using req context + ok, reset, err := p.limiter.Allow(req.Context(), req) + if err != nil { + p.incoming <- msgErr{msg: nil, err: fmt.Errorf("unable to acquire rate limit token: %w", err)} + rw.WriteHeader(http.StatusInternalServerError) + return + } + + if !ok { + rw.Header().Add("Retry-After", strconv.Itoa(int(reset))) + http.Error(rw, "limit exceeded", 429) + return + } + // Filter the GET style methods: switch req.Method { case http.MethodOptions: @@ -290,6 +359,7 @@ func (p *Protocol) ServeHTTP(rw http.ResponseWriter, req *http.Request) { } status := http.StatusOK + var errMsg string if res != nil { var result *Result switch { @@ -297,7 +367,7 @@ func (p *Protocol) ServeHTTP(rw http.ResponseWriter, req *http.Request) { if result.StatusCode > 100 && result.StatusCode < 600 { status = result.StatusCode } - + errMsg = fmt.Errorf(result.Format, result.Args...).Error() case !protocol.IsACK(res): // Map client errors to http status code validationError := event.ValidationError{} @@ -321,6 +391,9 @@ func (p *Protocol) ServeHTTP(rw http.ResponseWriter, req *http.Request) { } rw.WriteHeader(status) + if _, err := rw.Write([]byte(errMsg)); err != nil { + return err + } return nil } @@ -328,3 +401,8 @@ func (p *Protocol) ServeHTTP(rw http.ResponseWriter, req *http.Request) { // Block until ResponseFn is invoked wg.Wait() } + +func defaultIsRetriableFunc(sc int) bool { + _, ok := defaultRetriableErrors[sc] + return ok +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_lifecycle.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_lifecycle.go index f3aafbd4d..04ef96915 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_lifecycle.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_lifecycle.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package http import ( @@ -8,8 +13,6 @@ import ( "strings" "github.com/cloudevents/sdk-go/v2/protocol" - "go.opencensus.io/plugin/ochttp" - "go.opencensus.io/plugin/ochttp/propagation/tracecontext" ) var _ protocol.Opener = (*Protocol)(nil) @@ -35,12 +38,10 @@ func (p *Protocol) OpenInbound(ctx context.Context) error { } p.server = &http.Server{ - Addr: listener.Addr().String(), - Handler: &ochttp.Handler{ - Propagation: &tracecontext.HTTPFormat{}, - Handler: attachMiddleware(p.Handler, p.middleware), - FormatSpanName: formatSpanName, - }, + Addr: listener.Addr().String(), + Handler: attachMiddleware(p.Handler, p.middleware), + ReadTimeout: DefaultTimeout, + WriteTimeout: DefaultTimeout, } // Shutdown @@ -49,7 +50,7 @@ func (p *Protocol) OpenInbound(ctx context.Context) error { p.server = nil }() - errChan := make(chan error, 1) + errChan := make(chan error) go func() { errChan <- p.server.Serve(listener) }() @@ -57,14 +58,35 @@ func (p *Protocol) OpenInbound(ctx context.Context) error { // wait for the server to return or ctx.Done(). select { case <-ctx.Done(): - // Try a gracefully shutdown. + // Try a graceful shutdown. ctx, cancel := context.WithTimeout(context.Background(), p.ShutdownTimeout) defer cancel() - err := p.server.Shutdown(ctx) - <-errChan // Wait for server goroutine to exit - return err + + shdwnErr := p.server.Shutdown(ctx) + if shdwnErr != nil { + shdwnErr = fmt.Errorf("shutting down HTTP server: %w", shdwnErr) + } + + // Wait for server goroutine to exit + rntmErr := <-errChan + if rntmErr != nil && rntmErr != http.ErrServerClosed { + rntmErr = fmt.Errorf("server failed during shutdown: %w", rntmErr) + + if shdwnErr != nil { + return fmt.Errorf("combined error during shutdown of HTTP server: %w, %v", + shdwnErr, rntmErr) + } + + return rntmErr + } + + return shdwnErr + case err := <-errChan: - return err + if err != nil { + return fmt.Errorf("during runtime of HTTP server: %w", err) + } + return nil } } @@ -79,10 +101,6 @@ func (p *Protocol) GetListeningPort() int { return -1 } -func formatSpanName(r *http.Request) string { - return "cloudevents.http." + r.URL.Path -} - // listen if not already listening, update t.Port func (p *Protocol) listen() (net.Listener, error) { if p.listener.Load() == nil { diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_rate.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_rate.go new file mode 100644 index 000000000..9c4c10a29 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_rate.go @@ -0,0 +1,34 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "context" + "net/http" +) + +type RateLimiter interface { + // Allow attempts to take one token from the rate limiter for the specified + // request. It returns ok when this operation was successful. In case ok is + // false, reset will indicate the time in seconds when it is safe to perform + // another attempt. An error is returned when this operation failed, e.g. due to + // a backend error. + Allow(ctx context.Context, r *http.Request) (ok bool, reset uint64, err error) + // Close terminates rate limiter and cleans up any data structures or + // connections that may remain open. After a store is stopped, Take() should + // always return zero values. + Close(ctx context.Context) error +} + +type noOpLimiter struct{} + +func (n noOpLimiter) Allow(ctx context.Context, r *http.Request) (bool, uint64, error) { + return true, 0, nil +} + +func (n noOpLimiter) Close(ctx context.Context) error { + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_retry.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_retry.go index d3444b7cb..71e7346f3 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_retry.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_retry.go @@ -1,13 +1,22 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package http import ( + "bytes" "context" "errors" - "go.uber.org/zap" + "io" + "io/ioutil" "net/http" "net/url" "time" + "go.uber.org/zap" + "github.com/cloudevents/sdk-go/v2/binding" cecontext "github.com/cloudevents/sdk-go/v2/context" "github.com/cloudevents/sdk-go/v2/protocol" @@ -47,6 +56,24 @@ func (p *Protocol) doWithRetry(ctx context.Context, params *cecontext.RetryParam retry := 0 results := make([]protocol.Result, 0) + var ( + body []byte + err error + ) + + if req != nil && req.Body != nil { + defer func() { + if err = req.Body.Close(); err != nil { + cecontext.LoggerFrom(ctx).Warnw("could not close request body", zap.Error(err)) + } + }() + body, err = ioutil.ReadAll(req.Body) + if err != nil { + panic(err) + } + resetBody(req, body) + } + for { msg, result := p.doOnce(req) @@ -69,16 +96,8 @@ func (p *Protocol) doWithRetry(ctx context.Context, params *cecontext.RetryParam { var httpResult *Result if errors.As(result, &httpResult) { - // Potentially retry when: - // - 404 Not Found - // - 413 Payload Too Large with Retry-After (NOT SUPPORTED) - // - 425 Too Early - // - 429 Too Many Requests - // - 503 Service Unavailable (with or without Retry-After) (IGNORE Retry-After) - // - 504 Gateway Timeout - sc := httpResult.StatusCode - if sc == 404 || sc == 425 || sc == 429 || sc == 503 || sc == 504 { + if p.isRetriableFunc(sc) { // retry! goto DoBackoff } else { @@ -92,6 +111,8 @@ func (p *Protocol) doWithRetry(ctx context.Context, params *cecontext.RetryParam } DoBackoff: + resetBody(req, body) + // Wait for the correct amount of backoff time. // total tries = retry + 1 @@ -105,3 +126,20 @@ func (p *Protocol) doWithRetry(ctx context.Context, params *cecontext.RetryParam results = append(results, result) } } + +// reset body to allow it to be read multiple times, e.g. when retrying http +// requests +func resetBody(req *http.Request, body []byte) { + if req == nil || req.Body == nil { + return + } + + req.Body = ioutil.NopCloser(bytes.NewReader(body)) + + // do not modify existing GetBody function + if req.GetBody == nil { + req.GetBody = func() (io.ReadCloser, error) { + return ioutil.NopCloser(bytes.NewReader(body)), nil + } + } +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/result.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/result.go index 149e6872c..7a0b2626c 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/result.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/result.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package http import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/retries_result.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/retries_result.go index 0f25f7059..f4046d522 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/retries_result.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/retries_result.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package http import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/utility.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/utility.go new file mode 100644 index 000000000..350fc1cf6 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/utility.go @@ -0,0 +1,89 @@ +/* + Copyright 2022 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "bytes" + "context" + "encoding/json" + nethttp "net/http" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/event" +) + +// NewEventFromHTTPRequest returns an Event. +func NewEventFromHTTPRequest(req *nethttp.Request) (*event.Event, error) { + msg := NewMessageFromHttpRequest(req) + return binding.ToEvent(context.Background(), msg) +} + +// NewEventFromHTTPResponse returns an Event. +func NewEventFromHTTPResponse(resp *nethttp.Response) (*event.Event, error) { + msg := NewMessageFromHttpResponse(resp) + return binding.ToEvent(context.Background(), msg) +} + +// NewEventsFromHTTPRequest returns a batched set of Events from a HTTP Request +func NewEventsFromHTTPRequest(req *nethttp.Request) ([]event.Event, error) { + msg := NewMessageFromHttpRequest(req) + return binding.ToEvents(context.Background(), msg, msg.BodyReader) +} + +// NewEventsFromHTTPResponse returns a batched set of Events from a HTTP Response +func NewEventsFromHTTPResponse(resp *nethttp.Response) ([]event.Event, error) { + msg := NewMessageFromHttpResponse(resp) + return binding.ToEvents(context.Background(), msg, msg.BodyReader) +} + +// NewHTTPRequestFromEvent creates a http.Request object that can be used with any http.Client for a singular event. +// This is an HTTP POST action to the provided url. +func NewHTTPRequestFromEvent(ctx context.Context, url string, event event.Event) (*nethttp.Request, error) { + if err := event.Validate(); err != nil { + return nil, err + } + + req, err := nethttp.NewRequestWithContext(ctx, nethttp.MethodPost, url, nil) + if err != nil { + return nil, err + } + if err := WriteRequest(ctx, (*binding.EventMessage)(&event), req); err != nil { + return nil, err + } + + return req, nil +} + +// NewHTTPRequestFromEvents creates a http.Request object that can be used with any http.Client for sending +// a batched set of events. This is an HTTP POST action to the provided url. +func NewHTTPRequestFromEvents(ctx context.Context, url string, events []event.Event) (*nethttp.Request, error) { + // Sending batch events is quite straightforward, as there is only JSON format, so a simple implementation. + for _, e := range events { + if err := e.Validate(); err != nil { + return nil, err + } + } + var buffer bytes.Buffer + err := json.NewEncoder(&buffer).Encode(events) + if err != nil { + return nil, err + } + + request, err := nethttp.NewRequestWithContext(ctx, nethttp.MethodPost, url, &buffer) + if err != nil { + return nil, err + } + + request.Header.Set(ContentType, event.ApplicationCloudEventsBatchJSON) + + return request, nil +} + +// IsHTTPBatch returns if the current http.Request or http.Response is a batch event operation, by checking the +// header `Content-Type` value. +func IsHTTPBatch(header nethttp.Header) bool { + return header.Get(ContentType) == event.ApplicationCloudEventsBatchJSON +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_request.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_request.go index e0c0d3072..43ad36180 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_request.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_request.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package http import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_responsewriter.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_responsewriter.go index 9646ca49f..41385dab1 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_responsewriter.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_responsewriter.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package http import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/inbound.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/inbound.go index e67ed8acd..e7a74294d 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/inbound.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/inbound.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package protocol import ( @@ -9,6 +14,7 @@ import ( // Receiver receives messages. type Receiver interface { // Receive blocks till a message is received or ctx expires. + // Receive can be invoked safely from different goroutines. // // A non-nil error means the receiver is closed. // io.EOF means it closed cleanly, any other value indicates an error. @@ -29,7 +35,8 @@ type ResponseFn func(ctx context.Context, m binding.Message, r Result, transform // Responder receives messages and is given a callback to respond. type Responder interface { - // Receive blocks till a message is received or ctx expires. + // Respond blocks till a message is received or ctx expires. + // Respond can be invoked safely from different goroutines. // // A non-nil error means the receiver is closed. // io.EOF means it closed cleanly, any other value indicates an error. diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/lifecycle.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/lifecycle.go index 22ae08e09..4a058c962 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/lifecycle.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/lifecycle.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package protocol import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/outbound.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/outbound.go index b0a87761e..e44fa432a 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/outbound.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/outbound.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package protocol import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/result.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/result.go index f9bd9f27b..eae64e018 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/result.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/result.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package protocol import ( @@ -5,7 +10,7 @@ import ( "fmt" ) -// Result leverages go's 1.13 error wrapping. +// Result leverages go's error wrapping. type Result error // ResultIs reports whether any error in err's chain matches target. diff --git a/vendor/github.com/cloudevents/sdk-go/v2/types/allocate.go b/vendor/github.com/cloudevents/sdk-go/v2/types/allocate.go index c38f71177..814626874 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/types/allocate.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/types/allocate.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package types import "reflect" diff --git a/vendor/github.com/cloudevents/sdk-go/v2/types/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/types/doc.go index b1d9c29da..cf7a94f35 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/types/doc.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/types/doc.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + /* Package types implements the CloudEvents type system. diff --git a/vendor/github.com/cloudevents/sdk-go/v2/types/timestamp.go b/vendor/github.com/cloudevents/sdk-go/v2/types/timestamp.go index 3ae1c7def..ff049727d 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/types/timestamp.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/types/timestamp.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package types import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/types/uri.go b/vendor/github.com/cloudevents/sdk-go/v2/types/uri.go index 97248a24d..bed608094 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/types/uri.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/types/uri.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package types import ( @@ -68,6 +73,10 @@ func (u *URI) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { return nil } +func (u URI) Validate() bool { + return u.IsAbs() +} + // String returns the full string representation of the URI-Reference. func (u *URI) String() string { if u == nil { diff --git a/vendor/github.com/cloudevents/sdk-go/v2/types/uriref.go b/vendor/github.com/cloudevents/sdk-go/v2/types/uriref.go index e19a1dbb7..22fa12314 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/types/uriref.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/types/uriref.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package types import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/types/value.go b/vendor/github.com/cloudevents/sdk-go/v2/types/value.go index adfbdd687..f643d0aa5 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/types/value.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/types/value.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package types import ( diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go index b48005673..42bf32aab 100644 --- a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go +++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go @@ -9,6 +9,8 @@ func Render(doc []byte) []byte { renderer := NewRoffRenderer() return blackfriday.Run(doc, - []blackfriday.Option{blackfriday.WithRenderer(renderer), - blackfriday.WithExtensions(renderer.GetExtensions())}...) + []blackfriday.Option{ + blackfriday.WithRenderer(renderer), + blackfriday.WithExtensions(renderer.GetExtensions()), + }...) } diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go index be2b34360..4b19188d9 100644 --- a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go +++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go @@ -1,6 +1,7 @@ package md2man import ( + "bytes" "fmt" "io" "os" @@ -34,10 +35,10 @@ const ( hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n" linkTag = "\n\\[la]" linkCloseTag = "\\[ra]" - codespanTag = "\\fB\\fC" + codespanTag = "\\fB" codespanCloseTag = "\\fR" - codeTag = "\n.PP\n.RS\n\n.nf\n" - codeCloseTag = "\n.fi\n.RE\n" + codeTag = "\n.EX\n" + codeCloseTag = "\n.EE\n" quoteTag = "\n.PP\n.RS\n" quoteCloseTag = "\n.RE\n" listTag = "\n.RS\n" @@ -86,8 +87,7 @@ func (r *roffRenderer) RenderFooter(w io.Writer, ast *blackfriday.Node) { // RenderNode is called for each node in a markdown document; based on the node // type the equivalent roff output is sent to the writer func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus { - - var walkAction = blackfriday.GoToNext + walkAction := blackfriday.GoToNext switch node.Type { case blackfriday.Text: @@ -109,9 +109,16 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering out(w, strongCloseTag) } case blackfriday.Link: - if !entering { - out(w, linkTag+string(node.LinkData.Destination)+linkCloseTag) + // Don't render the link text for automatic links, because this + // will only duplicate the URL in the roff output. + // See https://daringfireball.net/projects/markdown/syntax#autolink + if !bytes.Equal(node.LinkData.Destination, node.FirstChild.Literal) { + out(w, string(node.FirstChild.Literal)) } + // Hyphens in a link must be escaped to avoid word-wrap in the rendered man page. + escapedLink := strings.ReplaceAll(string(node.LinkData.Destination), "-", "\\-") + out(w, linkTag+escapedLink+linkCloseTag) + walkAction = blackfriday.SkipChildren case blackfriday.Image: // ignore images walkAction = blackfriday.SkipChildren @@ -160,6 +167,11 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering r.handleTableCell(w, node, entering) case blackfriday.HTMLSpan: // ignore other HTML tags + case blackfriday.HTMLBlock: + if bytes.HasPrefix(node.Literal, []byte(" KERB_CHECKSUM_HMAC_MD5 -- Does not support RC4-HMAC and supports AES256 --> HMAC_SHA1_96_AES256 -- Does not support RC4-HMAC or AES256-CTS-HMAC-SHA1-96, and supports AES128-CTS-HMAC-SHA1-96 --> HMAC_SHA1_96_AES128 -- Does not support RC4-HMAC, AES128-CTS-HMAC-SHA1-96 or AES256-CTS-HMAC-SHA1-96 --> None. The checksum operation will fail. */ // SignatureData implements https://msdn.microsoft.com/en-us/library/cc237955.aspx diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/supplemental_cred.go b/vendor/github.com/jcmturner/gokrb5/v8/pac/supplemental_cred.go similarity index 89% rename from vendor/gopkg.in/jcmturner/gokrb5.v7/pac/supplemental_cred.go rename to vendor/github.com/jcmturner/gokrb5/v8/pac/supplemental_cred.go index 5f4f93c28..d40679d49 100644 --- a/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/supplemental_cred.go +++ b/vendor/github.com/jcmturner/gokrb5/v8/pac/supplemental_cred.go @@ -6,8 +6,8 @@ import ( "errors" "fmt" - "gopkg.in/jcmturner/rpc.v1/mstypes" - "gopkg.in/jcmturner/rpc.v1/ndr" + "github.com/jcmturner/rpc/v2/mstypes" + "github.com/jcmturner/rpc/v2/ndr" ) const ( @@ -70,9 +70,6 @@ func isFlagSet(f uint32, i uint32) bool { } // SECPKGSupplementalCred implements https://msdn.microsoft.com/en-us/library/cc237956.aspx -// The SECPKG_SUPPLEMENTAL_CRED structure defines the name of the security package that requires -// supplemental credentials and the credential buffer for that package. -// The SECPKG_SUPPLEMENTAL_CRED structure is marshaled by RPC. type SECPKGSupplementalCred struct { PackageName mstypes.RPCUnicodeString CredentialSize uint32 diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/upn_dns_info.go b/vendor/github.com/jcmturner/gokrb5/v8/pac/upn_dns_info.go similarity index 98% rename from vendor/gopkg.in/jcmturner/gokrb5.v7/pac/upn_dns_info.go rename to vendor/github.com/jcmturner/gokrb5/v8/pac/upn_dns_info.go index ee0e6bfd4..d374b9630 100644 --- a/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/upn_dns_info.go +++ b/vendor/github.com/jcmturner/gokrb5/v8/pac/upn_dns_info.go @@ -3,7 +3,7 @@ package pac import ( "bytes" - "gopkg.in/jcmturner/rpc.v1/mstypes" + "github.com/jcmturner/rpc/v2/mstypes" ) // UPNDNSInfo implements https://msdn.microsoft.com/en-us/library/dd240468.aspx diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/types/Authenticator.go b/vendor/github.com/jcmturner/gokrb5/v8/types/Authenticator.go similarity index 69% rename from vendor/gopkg.in/jcmturner/gokrb5.v7/types/Authenticator.go rename to vendor/github.com/jcmturner/gokrb5/v8/types/Authenticator.go index 500e03461..115a02ae3 100644 --- a/vendor/gopkg.in/jcmturner/gokrb5.v7/types/Authenticator.go +++ b/vendor/github.com/jcmturner/gokrb5/v8/types/Authenticator.go @@ -9,32 +9,13 @@ import ( "time" "github.com/jcmturner/gofork/encoding/asn1" - "gopkg.in/jcmturner/gokrb5.v7/asn1tools" - "gopkg.in/jcmturner/gokrb5.v7/iana" - "gopkg.in/jcmturner/gokrb5.v7/iana/asnAppTag" + "github.com/jcmturner/gokrb5/v8/asn1tools" + "github.com/jcmturner/gokrb5/v8/iana" + "github.com/jcmturner/gokrb5/v8/iana/asnAppTag" ) -/*Authenticator ::= [APPLICATION 2] SEQUENCE { -authenticator-vno [0] INTEGER (5), -crealm [1] Realm, -cname [2] PrincipalName, -cksum [3] Checksum OPTIONAL, -cusec [4] Microseconds, -ctime [5] KerberosTime, -subkey [6] EncryptionKey OPTIONAL, -seq-number [7] UInt32 OPTIONAL, -authorization-data [8] AuthorizationData OPTIONAL -} - - cksum - This field contains a checksum of the application data that - accompanies the KRB_AP_REQ, computed using a key usage value of 10 - in normal application exchanges, or 6 when used in the TGS-REQ - PA-TGS-REQ AP-DATA field. - -*/ - -// Authenticator - A record containing information that can be shown to have been recently generated using the session key known only by the client and server. +// Authenticator - A record containing information that can be shown to have been recently generated using the session +// key known only by the client and server. // https://tools.ietf.org/html/rfc4120#section-5.5.1 type Authenticator struct { AVNO int `asn1:"explicit,tag:0"` @@ -62,7 +43,7 @@ func NewAuthenticator(realm string, cname PrincipalName) (Authenticator, error) Cksum: Checksum{}, Cusec: int((t.UnixNano() / int64(time.Microsecond)) - (t.Unix() * 1e6)), CTime: t, - SeqNumber: seq.Int64(), + SeqNumber: seq.Int64() & 0x3fffffff, }, nil } @@ -72,7 +53,7 @@ func (a *Authenticator) GenerateSeqNumberAndSubKey(keyType int32, keySize int) e if err != nil { return err } - a.SeqNumber = seq.Int64() + a.SeqNumber = seq.Int64() & 0x3fffffff //Generate subkey value sk := make([]byte, keySize, keySize) rand.Read(sk) diff --git a/vendor/github.com/jcmturner/gokrb5/v8/types/AuthorizationData.go b/vendor/github.com/jcmturner/gokrb5/v8/types/AuthorizationData.go new file mode 100644 index 000000000..80c477cef --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/types/AuthorizationData.go @@ -0,0 +1,55 @@ +package types + +import ( + "github.com/jcmturner/gofork/encoding/asn1" +) + +// Reference: https://www.ietf.org/rfc/rfc4120.txt +// Section: 5.2.6 + +// AuthorizationData implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.6 +type AuthorizationData []AuthorizationDataEntry + +// AuthorizationDataEntry implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.6 +type AuthorizationDataEntry struct { + ADType int32 `asn1:"explicit,tag:0"` + ADData []byte `asn1:"explicit,tag:1"` +} + +// ADIfRelevant implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.6.1 +type ADIfRelevant AuthorizationData + +// ADKDCIssued implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.6.2 +type ADKDCIssued struct { + ADChecksum Checksum `asn1:"explicit,tag:0"` + IRealm string `asn1:"optional,generalstring,explicit,tag:1"` + Isname PrincipalName `asn1:"optional,explicit,tag:2"` + Elements AuthorizationData `asn1:"explicit,tag:3"` +} + +// ADAndOr implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.6.3 +type ADAndOr struct { + ConditionCount int32 `asn1:"explicit,tag:0"` + Elements AuthorizationData `asn1:"explicit,tag:1"` +} + +// ADMandatoryForKDC implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.6.4 +type ADMandatoryForKDC AuthorizationData + +// Unmarshal bytes into the ADKDCIssued. +func (a *ADKDCIssued) Unmarshal(b []byte) error { + _, err := asn1.Unmarshal(b, a) + return err +} + +// Unmarshal bytes into the AuthorizationData. +func (a *AuthorizationData) Unmarshal(b []byte) error { + _, err := asn1.Unmarshal(b, a) + return err +} + +// Unmarshal bytes into the AuthorizationDataEntry. +func (a *AuthorizationDataEntry) Unmarshal(b []byte) error { + _, err := asn1.Unmarshal(b, a) + return err +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/types/Cryptosystem.go b/vendor/github.com/jcmturner/gokrb5/v8/types/Cryptosystem.go similarity index 73% rename from vendor/gopkg.in/jcmturner/gokrb5.v7/types/Cryptosystem.go rename to vendor/github.com/jcmturner/gokrb5/v8/types/Cryptosystem.go index 7e8b4ab29..2f354ea7a 100644 --- a/vendor/gopkg.in/jcmturner/gokrb5.v7/types/Cryptosystem.go +++ b/vendor/github.com/jcmturner/gokrb5/v8/types/Cryptosystem.go @@ -1,7 +1,10 @@ package types import ( + "crypto/rand" + "github.com/jcmturner/gofork/encoding/asn1" + "github.com/jcmturner/gokrb5/v8/crypto/etype" ) // Reference: https://www.ietf.org/rfc/rfc4120.txt @@ -18,7 +21,7 @@ type EncryptedData struct { // AKA KeyBlock type EncryptionKey struct { KeyType int32 `asn1:"explicit,tag:0"` - KeyValue []byte `asn1:"explicit,tag:1"` + KeyValue []byte `asn1:"explicit,tag:1" json:"-"` } // Checksum implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.9 @@ -53,3 +56,17 @@ func (a *Checksum) Unmarshal(b []byte) error { _, err := asn1.Unmarshal(b, a) return err } + +// GenerateEncryptionKey creates a new EncryptionKey with a random key value. +func GenerateEncryptionKey(etype etype.EType) (EncryptionKey, error) { + k := EncryptionKey{ + KeyType: etype.GetETypeID(), + } + b := make([]byte, etype.GetKeyByteSize(), etype.GetKeyByteSize()) + _, err := rand.Read(b) + if err != nil { + return k, err + } + k.KeyValue = b + return k, nil +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/types/HostAddress.go b/vendor/github.com/jcmturner/gokrb5/v8/types/HostAddress.go similarity index 85% rename from vendor/gopkg.in/jcmturner/gokrb5.v7/types/HostAddress.go rename to vendor/github.com/jcmturner/gokrb5/v8/types/HostAddress.go index 2f6a5a7cc..895fe8053 100644 --- a/vendor/gopkg.in/jcmturner/gokrb5.v7/types/HostAddress.go +++ b/vendor/github.com/jcmturner/gokrb5/v8/types/HostAddress.go @@ -9,33 +9,9 @@ import ( "net" "github.com/jcmturner/gofork/encoding/asn1" - "gopkg.in/jcmturner/gokrb5.v7/iana/addrtype" + "github.com/jcmturner/gokrb5/v8/iana/addrtype" ) -/* -HostAddress and HostAddresses - -HostAddress ::= SEQUENCE { - addr-type [0] Int32, - address [1] OCTET STRING -} - --- NOTE: HostAddresses is always used as an OPTIONAL field and --- should not be empty. -HostAddresses -- NOTE: subtly different from rfc1510, - -- but has a value mapping and encodes the same - ::= SEQUENCE OF HostAddress - -The host address encodings consist of two fields: - -addr-type - This field specifies the type of address that follows. Pre- - defined values for this field are specified in Section 7.5.3. - -address - This field encodes a single address of type addr-type. -*/ - // HostAddresses implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.5 type HostAddresses []HostAddress diff --git a/vendor/github.com/jcmturner/gokrb5/v8/types/KerberosFlags.go b/vendor/github.com/jcmturner/gokrb5/v8/types/KerberosFlags.go new file mode 100644 index 000000000..0f2038340 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/types/KerberosFlags.go @@ -0,0 +1,68 @@ +package types + +// Reference: https://www.ietf.org/rfc/rfc4120.txt +// Section: 5.2.8 + +import ( + "github.com/jcmturner/gofork/encoding/asn1" +) + +// NewKrbFlags returns an ASN1 BitString struct of the right size for KrbFlags. +func NewKrbFlags() asn1.BitString { + f := asn1.BitString{} + f.Bytes = make([]byte, 4) + f.BitLength = len(f.Bytes) * 8 + return f +} + +// SetFlags sets the flags of an ASN1 BitString. +func SetFlags(f *asn1.BitString, j []int) { + for _, i := range j { + SetFlag(f, i) + } +} + +// SetFlag sets a flag in an ASN1 BitString. +func SetFlag(f *asn1.BitString, i int) { + for l := len(f.Bytes); l < 4; l++ { + (*f).Bytes = append((*f).Bytes, byte(0)) + (*f).BitLength = len((*f).Bytes) * 8 + } + //Which byte? + b := i / 8 + //Which bit in byte + p := uint(7 - (i - 8*b)) + (*f).Bytes[b] = (*f).Bytes[b] | (1 << p) +} + +// UnsetFlags unsets flags in an ASN1 BitString. +func UnsetFlags(f *asn1.BitString, j []int) { + for _, i := range j { + UnsetFlag(f, i) + } +} + +// UnsetFlag unsets a flag in an ASN1 BitString. +func UnsetFlag(f *asn1.BitString, i int) { + for l := len(f.Bytes); l < 4; l++ { + (*f).Bytes = append((*f).Bytes, byte(0)) + (*f).BitLength = len((*f).Bytes) * 8 + } + //Which byte? + b := i / 8 + //Which bit in byte + p := uint(7 - (i - 8*b)) + (*f).Bytes[b] = (*f).Bytes[b] &^ (1 << p) +} + +// IsFlagSet tests if a flag is set in the ASN1 BitString. +func IsFlagSet(f *asn1.BitString, i int) bool { + //Which byte? + b := i / 8 + //Which bit in byte + p := uint(7 - (i - 8*b)) + if (*f).Bytes[b]&(1<= 4294967296 { - str = fmt.Sprintf("S-1-0x%s", hex.EncodeToString(s.IdentifierAuthority[:])) + if i > math.MaxUint32 { + fmt.Fprintf(&strb, "0x%s", hex.EncodeToString(s.IdentifierAuthority[:])) } else { - str = fmt.Sprintf("S-1-%d", i) + fmt.Fprintf(&strb, "%d", i) } for _, sub := range s.SubAuthority { - str = fmt.Sprintf("%s-%d", str, sub) + fmt.Fprintf(&strb, "-%d", sub) } - return str + return strb.String() } diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/user_session_key.go b/vendor/github.com/jcmturner/rpc/v2/mstypes/user_session_key.go similarity index 100% rename from vendor/gopkg.in/jcmturner/rpc.v1/mstypes/user_session_key.go rename to vendor/github.com/jcmturner/rpc/v2/mstypes/user_session_key.go diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/arrays.go b/vendor/github.com/jcmturner/rpc/v2/ndr/arrays.go similarity index 100% rename from vendor/gopkg.in/jcmturner/rpc.v1/ndr/arrays.go rename to vendor/github.com/jcmturner/rpc/v2/ndr/arrays.go diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/decoder.go b/vendor/github.com/jcmturner/rpc/v2/ndr/decoder.go similarity index 100% rename from vendor/gopkg.in/jcmturner/rpc.v1/ndr/decoder.go rename to vendor/github.com/jcmturner/rpc/v2/ndr/decoder.go diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/error.go b/vendor/github.com/jcmturner/rpc/v2/ndr/error.go similarity index 100% rename from vendor/gopkg.in/jcmturner/rpc.v1/ndr/error.go rename to vendor/github.com/jcmturner/rpc/v2/ndr/error.go diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/header.go b/vendor/github.com/jcmturner/rpc/v2/ndr/header.go similarity index 100% rename from vendor/gopkg.in/jcmturner/rpc.v1/ndr/header.go rename to vendor/github.com/jcmturner/rpc/v2/ndr/header.go diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/pipe.go b/vendor/github.com/jcmturner/rpc/v2/ndr/pipe.go similarity index 100% rename from vendor/gopkg.in/jcmturner/rpc.v1/ndr/pipe.go rename to vendor/github.com/jcmturner/rpc/v2/ndr/pipe.go diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/primitives.go b/vendor/github.com/jcmturner/rpc/v2/ndr/primitives.go similarity index 100% rename from vendor/gopkg.in/jcmturner/rpc.v1/ndr/primitives.go rename to vendor/github.com/jcmturner/rpc/v2/ndr/primitives.go diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/rawbytes.go b/vendor/github.com/jcmturner/rpc/v2/ndr/rawbytes.go similarity index 100% rename from vendor/gopkg.in/jcmturner/rpc.v1/ndr/rawbytes.go rename to vendor/github.com/jcmturner/rpc/v2/ndr/rawbytes.go diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/strings.go b/vendor/github.com/jcmturner/rpc/v2/ndr/strings.go similarity index 100% rename from vendor/gopkg.in/jcmturner/rpc.v1/ndr/strings.go rename to vendor/github.com/jcmturner/rpc/v2/ndr/strings.go diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/tags.go b/vendor/github.com/jcmturner/rpc/v2/ndr/tags.go similarity index 100% rename from vendor/gopkg.in/jcmturner/rpc.v1/ndr/tags.go rename to vendor/github.com/jcmturner/rpc/v2/ndr/tags.go diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/union.go b/vendor/github.com/jcmturner/rpc/v2/ndr/union.go similarity index 100% rename from vendor/gopkg.in/jcmturner/rpc.v1/ndr/union.go rename to vendor/github.com/jcmturner/rpc/v2/ndr/union.go diff --git a/vendor/github.com/json-iterator/go/.codecov.yml b/vendor/github.com/json-iterator/go/.codecov.yml new file mode 100644 index 000000000..955dc0be5 --- /dev/null +++ b/vendor/github.com/json-iterator/go/.codecov.yml @@ -0,0 +1,3 @@ +ignore: + - "output_tests/.*" + diff --git a/vendor/github.com/json-iterator/go/.gitignore b/vendor/github.com/json-iterator/go/.gitignore new file mode 100644 index 000000000..15556530a --- /dev/null +++ b/vendor/github.com/json-iterator/go/.gitignore @@ -0,0 +1,4 @@ +/vendor +/bug_test.go +/coverage.txt +/.idea diff --git a/vendor/github.com/json-iterator/go/.travis.yml b/vendor/github.com/json-iterator/go/.travis.yml new file mode 100644 index 000000000..449e67cd0 --- /dev/null +++ b/vendor/github.com/json-iterator/go/.travis.yml @@ -0,0 +1,14 @@ +language: go + +go: + - 1.8.x + - 1.x + +before_install: + - go get -t -v ./... + +script: + - ./test.sh + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/json-iterator/go/Gopkg.lock b/vendor/github.com/json-iterator/go/Gopkg.lock new file mode 100644 index 000000000..c8a9fbb38 --- /dev/null +++ b/vendor/github.com/json-iterator/go/Gopkg.lock @@ -0,0 +1,21 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/modern-go/concurrent" + packages = ["."] + revision = "e0a39a4cb4216ea8db28e22a69f4ec25610d513a" + version = "1.0.0" + +[[projects]] + name = "github.com/modern-go/reflect2" + packages = ["."] + revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" + version = "1.0.1" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "ea54a775e5a354cb015502d2e7aa4b74230fc77e894f34a838b268c25ec8eeb8" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/json-iterator/go/Gopkg.toml b/vendor/github.com/json-iterator/go/Gopkg.toml new file mode 100644 index 000000000..313a0f887 --- /dev/null +++ b/vendor/github.com/json-iterator/go/Gopkg.toml @@ -0,0 +1,26 @@ +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + +ignored = ["github.com/davecgh/go-spew*","github.com/google/gofuzz*","github.com/stretchr/testify*"] + +[[constraint]] + name = "github.com/modern-go/reflect2" + version = "1.0.1" diff --git a/vendor/github.com/lightstep/tracecontext.go/LICENSE b/vendor/github.com/json-iterator/go/LICENSE similarity index 95% rename from vendor/github.com/lightstep/tracecontext.go/LICENSE rename to vendor/github.com/json-iterator/go/LICENSE index 853b46db1..2cf4f5ab2 100644 --- a/vendor/github.com/lightstep/tracecontext.go/LICENSE +++ b/vendor/github.com/json-iterator/go/LICENSE @@ -1,6 +1,6 @@ -The MIT License (MIT) +MIT License -Copyright (c) 2016 +Copyright (c) 2016 json-iterator Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/json-iterator/go/README.md b/vendor/github.com/json-iterator/go/README.md new file mode 100644 index 000000000..c589addf9 --- /dev/null +++ b/vendor/github.com/json-iterator/go/README.md @@ -0,0 +1,85 @@ +[![Sourcegraph](https://sourcegraph.com/github.com/json-iterator/go/-/badge.svg)](https://sourcegraph.com/github.com/json-iterator/go?badge) +[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://pkg.go.dev/github.com/json-iterator/go) +[![Build Status](https://travis-ci.org/json-iterator/go.svg?branch=master)](https://travis-ci.org/json-iterator/go) +[![codecov](https://codecov.io/gh/json-iterator/go/branch/master/graph/badge.svg)](https://codecov.io/gh/json-iterator/go) +[![rcard](https://goreportcard.com/badge/github.com/json-iterator/go)](https://goreportcard.com/report/github.com/json-iterator/go) +[![License](http://img.shields.io/badge/license-mit-blue.svg?style=flat-square)](https://raw.githubusercontent.com/json-iterator/go/master/LICENSE) +[![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby) + +A high-performance 100% compatible drop-in replacement of "encoding/json" + +# Benchmark + +![benchmark](http://jsoniter.com/benchmarks/go-benchmark.png) + +Source code: https://github.com/json-iterator/go-benchmark/blob/master/src/github.com/json-iterator/go-benchmark/benchmark_medium_payload_test.go + +Raw Result (easyjson requires static code generation) + +| | ns/op | allocation bytes | allocation times | +| --------------- | ----------- | ---------------- | ---------------- | +| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op | +| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op | +| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op | +| std encode | 2213 ns/op | 712 B/op | 5 allocs/op | +| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op | +| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op | + +Always benchmark with your own workload. +The result depends heavily on the data input. + +# Usage + +100% compatibility with standard lib + +Replace + +```go +import "encoding/json" +json.Marshal(&data) +``` + +with + +```go +import jsoniter "github.com/json-iterator/go" + +var json = jsoniter.ConfigCompatibleWithStandardLibrary +json.Marshal(&data) +``` + +Replace + +```go +import "encoding/json" +json.Unmarshal(input, &data) +``` + +with + +```go +import jsoniter "github.com/json-iterator/go" + +var json = jsoniter.ConfigCompatibleWithStandardLibrary +json.Unmarshal(input, &data) +``` + +[More documentation](http://jsoniter.com/migrate-from-go-std.html) + +# How to get + +``` +go get github.com/json-iterator/go +``` + +# Contribution Welcomed ! + +Contributors + +- [thockin](https://github.com/thockin) +- [mattn](https://github.com/mattn) +- [cch123](https://github.com/cch123) +- [Oleg Shaldybin](https://github.com/olegshaldybin) +- [Jason Toffaletti](https://github.com/toffaletti) + +Report issue or pull request, or email taowen@gmail.com, or [![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby) diff --git a/vendor/github.com/json-iterator/go/adapter.go b/vendor/github.com/json-iterator/go/adapter.go new file mode 100644 index 000000000..92d2cc4a3 --- /dev/null +++ b/vendor/github.com/json-iterator/go/adapter.go @@ -0,0 +1,150 @@ +package jsoniter + +import ( + "bytes" + "io" +) + +// RawMessage to make replace json with jsoniter +type RawMessage []byte + +// Unmarshal adapts to json/encoding Unmarshal API +// +// Unmarshal parses the JSON-encoded data and stores the result in the value pointed to by v. +// Refer to https://godoc.org/encoding/json#Unmarshal for more information +func Unmarshal(data []byte, v interface{}) error { + return ConfigDefault.Unmarshal(data, v) +} + +// UnmarshalFromString is a convenient method to read from string instead of []byte +func UnmarshalFromString(str string, v interface{}) error { + return ConfigDefault.UnmarshalFromString(str, v) +} + +// Get quick method to get value from deeply nested JSON structure +func Get(data []byte, path ...interface{}) Any { + return ConfigDefault.Get(data, path...) +} + +// Marshal adapts to json/encoding Marshal API +// +// Marshal returns the JSON encoding of v, adapts to json/encoding Marshal API +// Refer to https://godoc.org/encoding/json#Marshal for more information +func Marshal(v interface{}) ([]byte, error) { + return ConfigDefault.Marshal(v) +} + +// MarshalIndent same as json.MarshalIndent. Prefix is not supported. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + return ConfigDefault.MarshalIndent(v, prefix, indent) +} + +// MarshalToString convenient method to write as string instead of []byte +func MarshalToString(v interface{}) (string, error) { + return ConfigDefault.MarshalToString(v) +} + +// NewDecoder adapts to json/stream NewDecoder API. +// +// NewDecoder returns a new decoder that reads from r. +// +// Instead of a json/encoding Decoder, an Decoder is returned +// Refer to https://godoc.org/encoding/json#NewDecoder for more information +func NewDecoder(reader io.Reader) *Decoder { + return ConfigDefault.NewDecoder(reader) +} + +// Decoder reads and decodes JSON values from an input stream. +// Decoder provides identical APIs with json/stream Decoder (Token() and UseNumber() are in progress) +type Decoder struct { + iter *Iterator +} + +// Decode decode JSON into interface{} +func (adapter *Decoder) Decode(obj interface{}) error { + if adapter.iter.head == adapter.iter.tail && adapter.iter.reader != nil { + if !adapter.iter.loadMore() { + return io.EOF + } + } + adapter.iter.ReadVal(obj) + err := adapter.iter.Error + if err == io.EOF { + return nil + } + return adapter.iter.Error +} + +// More is there more? +func (adapter *Decoder) More() bool { + iter := adapter.iter + if iter.Error != nil { + return false + } + c := iter.nextToken() + if c == 0 { + return false + } + iter.unreadByte() + return c != ']' && c != '}' +} + +// Buffered remaining buffer +func (adapter *Decoder) Buffered() io.Reader { + remaining := adapter.iter.buf[adapter.iter.head:adapter.iter.tail] + return bytes.NewReader(remaining) +} + +// UseNumber causes the Decoder to unmarshal a number into an interface{} as a +// Number instead of as a float64. +func (adapter *Decoder) UseNumber() { + cfg := adapter.iter.cfg.configBeforeFrozen + cfg.UseNumber = true + adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions) +} + +// DisallowUnknownFields causes the Decoder to return an error when the destination +// is a struct and the input contains object keys which do not match any +// non-ignored, exported fields in the destination. +func (adapter *Decoder) DisallowUnknownFields() { + cfg := adapter.iter.cfg.configBeforeFrozen + cfg.DisallowUnknownFields = true + adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions) +} + +// NewEncoder same as json.NewEncoder +func NewEncoder(writer io.Writer) *Encoder { + return ConfigDefault.NewEncoder(writer) +} + +// Encoder same as json.Encoder +type Encoder struct { + stream *Stream +} + +// Encode encode interface{} as JSON to io.Writer +func (adapter *Encoder) Encode(val interface{}) error { + adapter.stream.WriteVal(val) + adapter.stream.WriteRaw("\n") + adapter.stream.Flush() + return adapter.stream.Error +} + +// SetIndent set the indention. Prefix is not supported +func (adapter *Encoder) SetIndent(prefix, indent string) { + config := adapter.stream.cfg.configBeforeFrozen + config.IndentionStep = len(indent) + adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions) +} + +// SetEscapeHTML escape html by default, set to false to disable +func (adapter *Encoder) SetEscapeHTML(escapeHTML bool) { + config := adapter.stream.cfg.configBeforeFrozen + config.EscapeHTML = escapeHTML + adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions) +} + +// Valid reports whether data is a valid JSON encoding. +func Valid(data []byte) bool { + return ConfigDefault.Valid(data) +} diff --git a/vendor/github.com/json-iterator/go/any.go b/vendor/github.com/json-iterator/go/any.go new file mode 100644 index 000000000..f6b8aeab0 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any.go @@ -0,0 +1,325 @@ +package jsoniter + +import ( + "errors" + "fmt" + "github.com/modern-go/reflect2" + "io" + "reflect" + "strconv" + "unsafe" +) + +// Any generic object representation. +// The lazy json implementation holds []byte and parse lazily. +type Any interface { + LastError() error + ValueType() ValueType + MustBeValid() Any + ToBool() bool + ToInt() int + ToInt32() int32 + ToInt64() int64 + ToUint() uint + ToUint32() uint32 + ToUint64() uint64 + ToFloat32() float32 + ToFloat64() float64 + ToString() string + ToVal(val interface{}) + Get(path ...interface{}) Any + Size() int + Keys() []string + GetInterface() interface{} + WriteTo(stream *Stream) +} + +type baseAny struct{} + +func (any *baseAny) Get(path ...interface{}) Any { + return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)} +} + +func (any *baseAny) Size() int { + return 0 +} + +func (any *baseAny) Keys() []string { + return []string{} +} + +func (any *baseAny) ToVal(obj interface{}) { + panic("not implemented") +} + +// WrapInt32 turn int32 into Any interface +func WrapInt32(val int32) Any { + return &int32Any{baseAny{}, val} +} + +// WrapInt64 turn int64 into Any interface +func WrapInt64(val int64) Any { + return &int64Any{baseAny{}, val} +} + +// WrapUint32 turn uint32 into Any interface +func WrapUint32(val uint32) Any { + return &uint32Any{baseAny{}, val} +} + +// WrapUint64 turn uint64 into Any interface +func WrapUint64(val uint64) Any { + return &uint64Any{baseAny{}, val} +} + +// WrapFloat64 turn float64 into Any interface +func WrapFloat64(val float64) Any { + return &floatAny{baseAny{}, val} +} + +// WrapString turn string into Any interface +func WrapString(val string) Any { + return &stringAny{baseAny{}, val} +} + +// Wrap turn a go object into Any interface +func Wrap(val interface{}) Any { + if val == nil { + return &nilAny{} + } + asAny, isAny := val.(Any) + if isAny { + return asAny + } + typ := reflect2.TypeOf(val) + switch typ.Kind() { + case reflect.Slice: + return wrapArray(val) + case reflect.Struct: + return wrapStruct(val) + case reflect.Map: + return wrapMap(val) + case reflect.String: + return WrapString(val.(string)) + case reflect.Int: + if strconv.IntSize == 32 { + return WrapInt32(int32(val.(int))) + } + return WrapInt64(int64(val.(int))) + case reflect.Int8: + return WrapInt32(int32(val.(int8))) + case reflect.Int16: + return WrapInt32(int32(val.(int16))) + case reflect.Int32: + return WrapInt32(val.(int32)) + case reflect.Int64: + return WrapInt64(val.(int64)) + case reflect.Uint: + if strconv.IntSize == 32 { + return WrapUint32(uint32(val.(uint))) + } + return WrapUint64(uint64(val.(uint))) + case reflect.Uintptr: + if ptrSize == 32 { + return WrapUint32(uint32(val.(uintptr))) + } + return WrapUint64(uint64(val.(uintptr))) + case reflect.Uint8: + return WrapUint32(uint32(val.(uint8))) + case reflect.Uint16: + return WrapUint32(uint32(val.(uint16))) + case reflect.Uint32: + return WrapUint32(uint32(val.(uint32))) + case reflect.Uint64: + return WrapUint64(val.(uint64)) + case reflect.Float32: + return WrapFloat64(float64(val.(float32))) + case reflect.Float64: + return WrapFloat64(val.(float64)) + case reflect.Bool: + if val.(bool) == true { + return &trueAny{} + } + return &falseAny{} + } + return &invalidAny{baseAny{}, fmt.Errorf("unsupported type: %v", typ)} +} + +// ReadAny read next JSON element as an Any object. It is a better json.RawMessage. +func (iter *Iterator) ReadAny() Any { + return iter.readAny() +} + +func (iter *Iterator) readAny() Any { + c := iter.nextToken() + switch c { + case '"': + iter.unreadByte() + return &stringAny{baseAny{}, iter.ReadString()} + case 'n': + iter.skipThreeBytes('u', 'l', 'l') // null + return &nilAny{} + case 't': + iter.skipThreeBytes('r', 'u', 'e') // true + return &trueAny{} + case 'f': + iter.skipFourBytes('a', 'l', 's', 'e') // false + return &falseAny{} + case '{': + return iter.readObjectAny() + case '[': + return iter.readArrayAny() + case '-': + return iter.readNumberAny(false) + case 0: + return &invalidAny{baseAny{}, errors.New("input is empty")} + default: + return iter.readNumberAny(true) + } +} + +func (iter *Iterator) readNumberAny(positive bool) Any { + iter.startCapture(iter.head - 1) + iter.skipNumber() + lazyBuf := iter.stopCapture() + return &numberLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func (iter *Iterator) readObjectAny() Any { + iter.startCapture(iter.head - 1) + iter.skipObject() + lazyBuf := iter.stopCapture() + return &objectLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func (iter *Iterator) readArrayAny() Any { + iter.startCapture(iter.head - 1) + iter.skipArray() + lazyBuf := iter.stopCapture() + return &arrayLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func locateObjectField(iter *Iterator, target string) []byte { + var found []byte + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + if field == target { + found = iter.SkipAndReturnBytes() + return false + } + iter.Skip() + return true + }) + return found +} + +func locateArrayElement(iter *Iterator, target int) []byte { + var found []byte + n := 0 + iter.ReadArrayCB(func(iter *Iterator) bool { + if n == target { + found = iter.SkipAndReturnBytes() + return false + } + iter.Skip() + n++ + return true + }) + return found +} + +func locatePath(iter *Iterator, path []interface{}) Any { + for i, pathKeyObj := range path { + switch pathKey := pathKeyObj.(type) { + case string: + valueBytes := locateObjectField(iter, pathKey) + if valueBytes == nil { + return newInvalidAny(path[i:]) + } + iter.ResetBytes(valueBytes) + case int: + valueBytes := locateArrayElement(iter, pathKey) + if valueBytes == nil { + return newInvalidAny(path[i:]) + } + iter.ResetBytes(valueBytes) + case int32: + if '*' == pathKey { + return iter.readAny().Get(path[i:]...) + } + return newInvalidAny(path[i:]) + default: + return newInvalidAny(path[i:]) + } + } + if iter.Error != nil && iter.Error != io.EOF { + return &invalidAny{baseAny{}, iter.Error} + } + return iter.readAny() +} + +var anyType = reflect2.TypeOfPtr((*Any)(nil)).Elem() + +func createDecoderOfAny(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ == anyType { + return &directAnyCodec{} + } + if typ.Implements(anyType) { + return &anyCodec{ + valType: typ, + } + } + return nil +} + +func createEncoderOfAny(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ == anyType { + return &directAnyCodec{} + } + if typ.Implements(anyType) { + return &anyCodec{ + valType: typ, + } + } + return nil +} + +type anyCodec struct { + valType reflect2.Type +} + +func (codec *anyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + panic("not implemented") +} + +func (codec *anyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := codec.valType.UnsafeIndirect(ptr) + any := obj.(Any) + any.WriteTo(stream) +} + +func (codec *anyCodec) IsEmpty(ptr unsafe.Pointer) bool { + obj := codec.valType.UnsafeIndirect(ptr) + any := obj.(Any) + return any.Size() == 0 +} + +type directAnyCodec struct { +} + +func (codec *directAnyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *(*Any)(ptr) = iter.readAny() +} + +func (codec *directAnyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + any := *(*Any)(ptr) + if any == nil { + stream.WriteNil() + return + } + any.WriteTo(stream) +} + +func (codec *directAnyCodec) IsEmpty(ptr unsafe.Pointer) bool { + any := *(*Any)(ptr) + return any.Size() == 0 +} diff --git a/vendor/github.com/json-iterator/go/any_array.go b/vendor/github.com/json-iterator/go/any_array.go new file mode 100644 index 000000000..0449e9aa4 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_array.go @@ -0,0 +1,278 @@ +package jsoniter + +import ( + "reflect" + "unsafe" +) + +type arrayLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *arrayLazyAny) ValueType() ValueType { + return ArrayValue +} + +func (any *arrayLazyAny) MustBeValid() Any { + return any +} + +func (any *arrayLazyAny) LastError() error { + return any.err +} + +func (any *arrayLazyAny) ToBool() bool { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.ReadArray() +} + +func (any *arrayLazyAny) ToInt() int { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToInt32() int32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToInt64() int64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint() uint { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint32() uint32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint64() uint64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToFloat32() float32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToFloat64() float64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *arrayLazyAny) ToVal(val interface{}) { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadVal(val) +} + +func (any *arrayLazyAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int: + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + valueBytes := locateArrayElement(iter, firstPath) + if valueBytes == nil { + return newInvalidAny(path) + } + iter.ResetBytes(valueBytes) + return locatePath(iter, path[1:]) + case int32: + if '*' == firstPath { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + arr := make([]Any, 0) + iter.ReadArrayCB(func(iter *Iterator) bool { + found := iter.readAny().Get(path[1:]...) + if found.ValueType() != InvalidValue { + arr = append(arr, found) + } + return true + }) + return wrapArray(arr) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *arrayLazyAny) Size() int { + size := 0 + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadArrayCB(func(iter *Iterator) bool { + size++ + iter.Skip() + return true + }) + return size +} + +func (any *arrayLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *arrayLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} + +type arrayAny struct { + baseAny + val reflect.Value +} + +func wrapArray(val interface{}) *arrayAny { + return &arrayAny{baseAny{}, reflect.ValueOf(val)} +} + +func (any *arrayAny) ValueType() ValueType { + return ArrayValue +} + +func (any *arrayAny) MustBeValid() Any { + return any +} + +func (any *arrayAny) LastError() error { + return nil +} + +func (any *arrayAny) ToBool() bool { + return any.val.Len() != 0 +} + +func (any *arrayAny) ToInt() int { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToInt32() int32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToInt64() int64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint() uint { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint32() uint32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint64() uint64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToFloat32() float32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToFloat64() float64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToString() string { + str, _ := MarshalToString(any.val.Interface()) + return str +} + +func (any *arrayAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int: + if firstPath < 0 || firstPath >= any.val.Len() { + return newInvalidAny(path) + } + return Wrap(any.val.Index(firstPath).Interface()) + case int32: + if '*' == firstPath { + mappedAll := make([]Any, 0) + for i := 0; i < any.val.Len(); i++ { + mapped := Wrap(any.val.Index(i).Interface()).Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll = append(mappedAll, mapped) + } + } + return wrapArray(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *arrayAny) Size() int { + return any.val.Len() +} + +func (any *arrayAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *arrayAny) GetInterface() interface{} { + return any.val.Interface() +} diff --git a/vendor/github.com/json-iterator/go/any_bool.go b/vendor/github.com/json-iterator/go/any_bool.go new file mode 100644 index 000000000..9452324af --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_bool.go @@ -0,0 +1,137 @@ +package jsoniter + +type trueAny struct { + baseAny +} + +func (any *trueAny) LastError() error { + return nil +} + +func (any *trueAny) ToBool() bool { + return true +} + +func (any *trueAny) ToInt() int { + return 1 +} + +func (any *trueAny) ToInt32() int32 { + return 1 +} + +func (any *trueAny) ToInt64() int64 { + return 1 +} + +func (any *trueAny) ToUint() uint { + return 1 +} + +func (any *trueAny) ToUint32() uint32 { + return 1 +} + +func (any *trueAny) ToUint64() uint64 { + return 1 +} + +func (any *trueAny) ToFloat32() float32 { + return 1 +} + +func (any *trueAny) ToFloat64() float64 { + return 1 +} + +func (any *trueAny) ToString() string { + return "true" +} + +func (any *trueAny) WriteTo(stream *Stream) { + stream.WriteTrue() +} + +func (any *trueAny) Parse() *Iterator { + return nil +} + +func (any *trueAny) GetInterface() interface{} { + return true +} + +func (any *trueAny) ValueType() ValueType { + return BoolValue +} + +func (any *trueAny) MustBeValid() Any { + return any +} + +type falseAny struct { + baseAny +} + +func (any *falseAny) LastError() error { + return nil +} + +func (any *falseAny) ToBool() bool { + return false +} + +func (any *falseAny) ToInt() int { + return 0 +} + +func (any *falseAny) ToInt32() int32 { + return 0 +} + +func (any *falseAny) ToInt64() int64 { + return 0 +} + +func (any *falseAny) ToUint() uint { + return 0 +} + +func (any *falseAny) ToUint32() uint32 { + return 0 +} + +func (any *falseAny) ToUint64() uint64 { + return 0 +} + +func (any *falseAny) ToFloat32() float32 { + return 0 +} + +func (any *falseAny) ToFloat64() float64 { + return 0 +} + +func (any *falseAny) ToString() string { + return "false" +} + +func (any *falseAny) WriteTo(stream *Stream) { + stream.WriteFalse() +} + +func (any *falseAny) Parse() *Iterator { + return nil +} + +func (any *falseAny) GetInterface() interface{} { + return false +} + +func (any *falseAny) ValueType() ValueType { + return BoolValue +} + +func (any *falseAny) MustBeValid() Any { + return any +} diff --git a/vendor/github.com/json-iterator/go/any_float.go b/vendor/github.com/json-iterator/go/any_float.go new file mode 100644 index 000000000..35fdb0949 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_float.go @@ -0,0 +1,83 @@ +package jsoniter + +import ( + "strconv" +) + +type floatAny struct { + baseAny + val float64 +} + +func (any *floatAny) Parse() *Iterator { + return nil +} + +func (any *floatAny) ValueType() ValueType { + return NumberValue +} + +func (any *floatAny) MustBeValid() Any { + return any +} + +func (any *floatAny) LastError() error { + return nil +} + +func (any *floatAny) ToBool() bool { + return any.ToFloat64() != 0 +} + +func (any *floatAny) ToInt() int { + return int(any.val) +} + +func (any *floatAny) ToInt32() int32 { + return int32(any.val) +} + +func (any *floatAny) ToInt64() int64 { + return int64(any.val) +} + +func (any *floatAny) ToUint() uint { + if any.val > 0 { + return uint(any.val) + } + return 0 +} + +func (any *floatAny) ToUint32() uint32 { + if any.val > 0 { + return uint32(any.val) + } + return 0 +} + +func (any *floatAny) ToUint64() uint64 { + if any.val > 0 { + return uint64(any.val) + } + return 0 +} + +func (any *floatAny) ToFloat32() float32 { + return float32(any.val) +} + +func (any *floatAny) ToFloat64() float64 { + return any.val +} + +func (any *floatAny) ToString() string { + return strconv.FormatFloat(any.val, 'E', -1, 64) +} + +func (any *floatAny) WriteTo(stream *Stream) { + stream.WriteFloat64(any.val) +} + +func (any *floatAny) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/any_int32.go b/vendor/github.com/json-iterator/go/any_int32.go new file mode 100644 index 000000000..1b56f3991 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_int32.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type int32Any struct { + baseAny + val int32 +} + +func (any *int32Any) LastError() error { + return nil +} + +func (any *int32Any) ValueType() ValueType { + return NumberValue +} + +func (any *int32Any) MustBeValid() Any { + return any +} + +func (any *int32Any) ToBool() bool { + return any.val != 0 +} + +func (any *int32Any) ToInt() int { + return int(any.val) +} + +func (any *int32Any) ToInt32() int32 { + return any.val +} + +func (any *int32Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *int32Any) ToUint() uint { + return uint(any.val) +} + +func (any *int32Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *int32Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *int32Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *int32Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *int32Any) ToString() string { + return strconv.FormatInt(int64(any.val), 10) +} + +func (any *int32Any) WriteTo(stream *Stream) { + stream.WriteInt32(any.val) +} + +func (any *int32Any) Parse() *Iterator { + return nil +} + +func (any *int32Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/any_int64.go b/vendor/github.com/json-iterator/go/any_int64.go new file mode 100644 index 000000000..c440d72b6 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_int64.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type int64Any struct { + baseAny + val int64 +} + +func (any *int64Any) LastError() error { + return nil +} + +func (any *int64Any) ValueType() ValueType { + return NumberValue +} + +func (any *int64Any) MustBeValid() Any { + return any +} + +func (any *int64Any) ToBool() bool { + return any.val != 0 +} + +func (any *int64Any) ToInt() int { + return int(any.val) +} + +func (any *int64Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *int64Any) ToInt64() int64 { + return any.val +} + +func (any *int64Any) ToUint() uint { + return uint(any.val) +} + +func (any *int64Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *int64Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *int64Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *int64Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *int64Any) ToString() string { + return strconv.FormatInt(any.val, 10) +} + +func (any *int64Any) WriteTo(stream *Stream) { + stream.WriteInt64(any.val) +} + +func (any *int64Any) Parse() *Iterator { + return nil +} + +func (any *int64Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/any_invalid.go b/vendor/github.com/json-iterator/go/any_invalid.go new file mode 100644 index 000000000..1d859eac3 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_invalid.go @@ -0,0 +1,82 @@ +package jsoniter + +import "fmt" + +type invalidAny struct { + baseAny + err error +} + +func newInvalidAny(path []interface{}) *invalidAny { + return &invalidAny{baseAny{}, fmt.Errorf("%v not found", path)} +} + +func (any *invalidAny) LastError() error { + return any.err +} + +func (any *invalidAny) ValueType() ValueType { + return InvalidValue +} + +func (any *invalidAny) MustBeValid() Any { + panic(any.err) +} + +func (any *invalidAny) ToBool() bool { + return false +} + +func (any *invalidAny) ToInt() int { + return 0 +} + +func (any *invalidAny) ToInt32() int32 { + return 0 +} + +func (any *invalidAny) ToInt64() int64 { + return 0 +} + +func (any *invalidAny) ToUint() uint { + return 0 +} + +func (any *invalidAny) ToUint32() uint32 { + return 0 +} + +func (any *invalidAny) ToUint64() uint64 { + return 0 +} + +func (any *invalidAny) ToFloat32() float32 { + return 0 +} + +func (any *invalidAny) ToFloat64() float64 { + return 0 +} + +func (any *invalidAny) ToString() string { + return "" +} + +func (any *invalidAny) WriteTo(stream *Stream) { +} + +func (any *invalidAny) Get(path ...interface{}) Any { + if any.err == nil { + return &invalidAny{baseAny{}, fmt.Errorf("get %v from invalid", path)} + } + return &invalidAny{baseAny{}, fmt.Errorf("%v, get %v from invalid", any.err, path)} +} + +func (any *invalidAny) Parse() *Iterator { + return nil +} + +func (any *invalidAny) GetInterface() interface{} { + return nil +} diff --git a/vendor/github.com/json-iterator/go/any_nil.go b/vendor/github.com/json-iterator/go/any_nil.go new file mode 100644 index 000000000..d04cb54c1 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_nil.go @@ -0,0 +1,69 @@ +package jsoniter + +type nilAny struct { + baseAny +} + +func (any *nilAny) LastError() error { + return nil +} + +func (any *nilAny) ValueType() ValueType { + return NilValue +} + +func (any *nilAny) MustBeValid() Any { + return any +} + +func (any *nilAny) ToBool() bool { + return false +} + +func (any *nilAny) ToInt() int { + return 0 +} + +func (any *nilAny) ToInt32() int32 { + return 0 +} + +func (any *nilAny) ToInt64() int64 { + return 0 +} + +func (any *nilAny) ToUint() uint { + return 0 +} + +func (any *nilAny) ToUint32() uint32 { + return 0 +} + +func (any *nilAny) ToUint64() uint64 { + return 0 +} + +func (any *nilAny) ToFloat32() float32 { + return 0 +} + +func (any *nilAny) ToFloat64() float64 { + return 0 +} + +func (any *nilAny) ToString() string { + return "" +} + +func (any *nilAny) WriteTo(stream *Stream) { + stream.WriteNil() +} + +func (any *nilAny) Parse() *Iterator { + return nil +} + +func (any *nilAny) GetInterface() interface{} { + return nil +} diff --git a/vendor/github.com/json-iterator/go/any_number.go b/vendor/github.com/json-iterator/go/any_number.go new file mode 100644 index 000000000..9d1e901a6 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_number.go @@ -0,0 +1,123 @@ +package jsoniter + +import ( + "io" + "unsafe" +) + +type numberLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *numberLazyAny) ValueType() ValueType { + return NumberValue +} + +func (any *numberLazyAny) MustBeValid() Any { + return any +} + +func (any *numberLazyAny) LastError() error { + return any.err +} + +func (any *numberLazyAny) ToBool() bool { + return any.ToFloat64() != 0 +} + +func (any *numberLazyAny) ToInt() int { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToInt32() int32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt32() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToInt64() int64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt64() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToUint() uint { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToUint32() uint32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint32() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToUint64() uint64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint64() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToFloat32() float32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadFloat32() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToFloat64() float64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadFloat64() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *numberLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *numberLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} diff --git a/vendor/github.com/json-iterator/go/any_object.go b/vendor/github.com/json-iterator/go/any_object.go new file mode 100644 index 000000000..c44ef5c98 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_object.go @@ -0,0 +1,374 @@ +package jsoniter + +import ( + "reflect" + "unsafe" +) + +type objectLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *objectLazyAny) ValueType() ValueType { + return ObjectValue +} + +func (any *objectLazyAny) MustBeValid() Any { + return any +} + +func (any *objectLazyAny) LastError() error { + return any.err +} + +func (any *objectLazyAny) ToBool() bool { + return true +} + +func (any *objectLazyAny) ToInt() int { + return 0 +} + +func (any *objectLazyAny) ToInt32() int32 { + return 0 +} + +func (any *objectLazyAny) ToInt64() int64 { + return 0 +} + +func (any *objectLazyAny) ToUint() uint { + return 0 +} + +func (any *objectLazyAny) ToUint32() uint32 { + return 0 +} + +func (any *objectLazyAny) ToUint64() uint64 { + return 0 +} + +func (any *objectLazyAny) ToFloat32() float32 { + return 0 +} + +func (any *objectLazyAny) ToFloat64() float64 { + return 0 +} + +func (any *objectLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *objectLazyAny) ToVal(obj interface{}) { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadVal(obj) +} + +func (any *objectLazyAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case string: + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + valueBytes := locateObjectField(iter, firstPath) + if valueBytes == nil { + return newInvalidAny(path) + } + iter.ResetBytes(valueBytes) + return locatePath(iter, path[1:]) + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadMapCB(func(iter *Iterator, field string) bool { + mapped := locatePath(iter, path[1:]) + if mapped.ValueType() != InvalidValue { + mappedAll[field] = mapped + } + return true + }) + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *objectLazyAny) Keys() []string { + keys := []string{} + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadMapCB(func(iter *Iterator, field string) bool { + iter.Skip() + keys = append(keys, field) + return true + }) + return keys +} + +func (any *objectLazyAny) Size() int { + size := 0 + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + iter.Skip() + size++ + return true + }) + return size +} + +func (any *objectLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *objectLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} + +type objectAny struct { + baseAny + err error + val reflect.Value +} + +func wrapStruct(val interface{}) *objectAny { + return &objectAny{baseAny{}, nil, reflect.ValueOf(val)} +} + +func (any *objectAny) ValueType() ValueType { + return ObjectValue +} + +func (any *objectAny) MustBeValid() Any { + return any +} + +func (any *objectAny) Parse() *Iterator { + return nil +} + +func (any *objectAny) LastError() error { + return any.err +} + +func (any *objectAny) ToBool() bool { + return any.val.NumField() != 0 +} + +func (any *objectAny) ToInt() int { + return 0 +} + +func (any *objectAny) ToInt32() int32 { + return 0 +} + +func (any *objectAny) ToInt64() int64 { + return 0 +} + +func (any *objectAny) ToUint() uint { + return 0 +} + +func (any *objectAny) ToUint32() uint32 { + return 0 +} + +func (any *objectAny) ToUint64() uint64 { + return 0 +} + +func (any *objectAny) ToFloat32() float32 { + return 0 +} + +func (any *objectAny) ToFloat64() float64 { + return 0 +} + +func (any *objectAny) ToString() string { + str, err := MarshalToString(any.val.Interface()) + any.err = err + return str +} + +func (any *objectAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case string: + field := any.val.FieldByName(firstPath) + if !field.IsValid() { + return newInvalidAny(path) + } + return Wrap(field.Interface()) + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + for i := 0; i < any.val.NumField(); i++ { + field := any.val.Field(i) + if field.CanInterface() { + mapped := Wrap(field.Interface()).Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll[any.val.Type().Field(i).Name] = mapped + } + } + } + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *objectAny) Keys() []string { + keys := make([]string, 0, any.val.NumField()) + for i := 0; i < any.val.NumField(); i++ { + keys = append(keys, any.val.Type().Field(i).Name) + } + return keys +} + +func (any *objectAny) Size() int { + return any.val.NumField() +} + +func (any *objectAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *objectAny) GetInterface() interface{} { + return any.val.Interface() +} + +type mapAny struct { + baseAny + err error + val reflect.Value +} + +func wrapMap(val interface{}) *mapAny { + return &mapAny{baseAny{}, nil, reflect.ValueOf(val)} +} + +func (any *mapAny) ValueType() ValueType { + return ObjectValue +} + +func (any *mapAny) MustBeValid() Any { + return any +} + +func (any *mapAny) Parse() *Iterator { + return nil +} + +func (any *mapAny) LastError() error { + return any.err +} + +func (any *mapAny) ToBool() bool { + return true +} + +func (any *mapAny) ToInt() int { + return 0 +} + +func (any *mapAny) ToInt32() int32 { + return 0 +} + +func (any *mapAny) ToInt64() int64 { + return 0 +} + +func (any *mapAny) ToUint() uint { + return 0 +} + +func (any *mapAny) ToUint32() uint32 { + return 0 +} + +func (any *mapAny) ToUint64() uint64 { + return 0 +} + +func (any *mapAny) ToFloat32() float32 { + return 0 +} + +func (any *mapAny) ToFloat64() float64 { + return 0 +} + +func (any *mapAny) ToString() string { + str, err := MarshalToString(any.val.Interface()) + any.err = err + return str +} + +func (any *mapAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + for _, key := range any.val.MapKeys() { + keyAsStr := key.String() + element := Wrap(any.val.MapIndex(key).Interface()) + mapped := element.Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll[keyAsStr] = mapped + } + } + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + value := any.val.MapIndex(reflect.ValueOf(firstPath)) + if !value.IsValid() { + return newInvalidAny(path) + } + return Wrap(value.Interface()) + } +} + +func (any *mapAny) Keys() []string { + keys := make([]string, 0, any.val.Len()) + for _, key := range any.val.MapKeys() { + keys = append(keys, key.String()) + } + return keys +} + +func (any *mapAny) Size() int { + return any.val.Len() +} + +func (any *mapAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *mapAny) GetInterface() interface{} { + return any.val.Interface() +} diff --git a/vendor/github.com/json-iterator/go/any_str.go b/vendor/github.com/json-iterator/go/any_str.go new file mode 100644 index 000000000..1f12f6612 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_str.go @@ -0,0 +1,166 @@ +package jsoniter + +import ( + "fmt" + "strconv" +) + +type stringAny struct { + baseAny + val string +} + +func (any *stringAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)} +} + +func (any *stringAny) Parse() *Iterator { + return nil +} + +func (any *stringAny) ValueType() ValueType { + return StringValue +} + +func (any *stringAny) MustBeValid() Any { + return any +} + +func (any *stringAny) LastError() error { + return nil +} + +func (any *stringAny) ToBool() bool { + str := any.ToString() + if str == "0" { + return false + } + for _, c := range str { + switch c { + case ' ', '\n', '\r', '\t': + default: + return true + } + } + return false +} + +func (any *stringAny) ToInt() int { + return int(any.ToInt64()) + +} + +func (any *stringAny) ToInt32() int32 { + return int32(any.ToInt64()) +} + +func (any *stringAny) ToInt64() int64 { + if any.val == "" { + return 0 + } + + flag := 1 + startPos := 0 + if any.val[0] == '+' || any.val[0] == '-' { + startPos = 1 + } + + if any.val[0] == '-' { + flag = -1 + } + + endPos := startPos + for i := startPos; i < len(any.val); i++ { + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + break + } + } + parsed, _ := strconv.ParseInt(any.val[startPos:endPos], 10, 64) + return int64(flag) * parsed +} + +func (any *stringAny) ToUint() uint { + return uint(any.ToUint64()) +} + +func (any *stringAny) ToUint32() uint32 { + return uint32(any.ToUint64()) +} + +func (any *stringAny) ToUint64() uint64 { + if any.val == "" { + return 0 + } + + startPos := 0 + + if any.val[0] == '-' { + return 0 + } + if any.val[0] == '+' { + startPos = 1 + } + + endPos := startPos + for i := startPos; i < len(any.val); i++ { + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + break + } + } + parsed, _ := strconv.ParseUint(any.val[startPos:endPos], 10, 64) + return parsed +} + +func (any *stringAny) ToFloat32() float32 { + return float32(any.ToFloat64()) +} + +func (any *stringAny) ToFloat64() float64 { + if len(any.val) == 0 { + return 0 + } + + // first char invalid + if any.val[0] != '+' && any.val[0] != '-' && (any.val[0] > '9' || any.val[0] < '0') { + return 0 + } + + // extract valid num expression from string + // eg 123true => 123, -12.12xxa => -12.12 + endPos := 1 + for i := 1; i < len(any.val); i++ { + if any.val[i] == '.' || any.val[i] == 'e' || any.val[i] == 'E' || any.val[i] == '+' || any.val[i] == '-' { + endPos = i + 1 + continue + } + + // end position is the first char which is not digit + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + endPos = i + break + } + } + parsed, _ := strconv.ParseFloat(any.val[:endPos], 64) + return parsed +} + +func (any *stringAny) ToString() string { + return any.val +} + +func (any *stringAny) WriteTo(stream *Stream) { + stream.WriteString(any.val) +} + +func (any *stringAny) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/any_uint32.go b/vendor/github.com/json-iterator/go/any_uint32.go new file mode 100644 index 000000000..656bbd33d --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_uint32.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type uint32Any struct { + baseAny + val uint32 +} + +func (any *uint32Any) LastError() error { + return nil +} + +func (any *uint32Any) ValueType() ValueType { + return NumberValue +} + +func (any *uint32Any) MustBeValid() Any { + return any +} + +func (any *uint32Any) ToBool() bool { + return any.val != 0 +} + +func (any *uint32Any) ToInt() int { + return int(any.val) +} + +func (any *uint32Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *uint32Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *uint32Any) ToUint() uint { + return uint(any.val) +} + +func (any *uint32Any) ToUint32() uint32 { + return any.val +} + +func (any *uint32Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *uint32Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *uint32Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *uint32Any) ToString() string { + return strconv.FormatInt(int64(any.val), 10) +} + +func (any *uint32Any) WriteTo(stream *Stream) { + stream.WriteUint32(any.val) +} + +func (any *uint32Any) Parse() *Iterator { + return nil +} + +func (any *uint32Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/any_uint64.go b/vendor/github.com/json-iterator/go/any_uint64.go new file mode 100644 index 000000000..7df2fce33 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_uint64.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type uint64Any struct { + baseAny + val uint64 +} + +func (any *uint64Any) LastError() error { + return nil +} + +func (any *uint64Any) ValueType() ValueType { + return NumberValue +} + +func (any *uint64Any) MustBeValid() Any { + return any +} + +func (any *uint64Any) ToBool() bool { + return any.val != 0 +} + +func (any *uint64Any) ToInt() int { + return int(any.val) +} + +func (any *uint64Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *uint64Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *uint64Any) ToUint() uint { + return uint(any.val) +} + +func (any *uint64Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *uint64Any) ToUint64() uint64 { + return any.val +} + +func (any *uint64Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *uint64Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *uint64Any) ToString() string { + return strconv.FormatUint(any.val, 10) +} + +func (any *uint64Any) WriteTo(stream *Stream) { + stream.WriteUint64(any.val) +} + +func (any *uint64Any) Parse() *Iterator { + return nil +} + +func (any *uint64Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/build.sh b/vendor/github.com/json-iterator/go/build.sh new file mode 100644 index 000000000..b45ef6883 --- /dev/null +++ b/vendor/github.com/json-iterator/go/build.sh @@ -0,0 +1,12 @@ +#!/bin/bash +set -e +set -x + +if [ ! -d /tmp/build-golang/src/github.com/json-iterator ]; then + mkdir -p /tmp/build-golang/src/github.com/json-iterator + ln -s $PWD /tmp/build-golang/src/github.com/json-iterator/go +fi +export GOPATH=/tmp/build-golang +go get -u github.com/golang/dep/cmd/dep +cd /tmp/build-golang/src/github.com/json-iterator/go +exec $GOPATH/bin/dep ensure -update diff --git a/vendor/github.com/json-iterator/go/config.go b/vendor/github.com/json-iterator/go/config.go new file mode 100644 index 000000000..2adcdc3b7 --- /dev/null +++ b/vendor/github.com/json-iterator/go/config.go @@ -0,0 +1,375 @@ +package jsoniter + +import ( + "encoding/json" + "io" + "reflect" + "sync" + "unsafe" + + "github.com/modern-go/concurrent" + "github.com/modern-go/reflect2" +) + +// Config customize how the API should behave. +// The API is created from Config by Froze. +type Config struct { + IndentionStep int + MarshalFloatWith6Digits bool + EscapeHTML bool + SortMapKeys bool + UseNumber bool + DisallowUnknownFields bool + TagKey string + OnlyTaggedField bool + ValidateJsonRawMessage bool + ObjectFieldMustBeSimpleString bool + CaseSensitive bool +} + +// API the public interface of this package. +// Primary Marshal and Unmarshal. +type API interface { + IteratorPool + StreamPool + MarshalToString(v interface{}) (string, error) + Marshal(v interface{}) ([]byte, error) + MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) + UnmarshalFromString(str string, v interface{}) error + Unmarshal(data []byte, v interface{}) error + Get(data []byte, path ...interface{}) Any + NewEncoder(writer io.Writer) *Encoder + NewDecoder(reader io.Reader) *Decoder + Valid(data []byte) bool + RegisterExtension(extension Extension) + DecoderOf(typ reflect2.Type) ValDecoder + EncoderOf(typ reflect2.Type) ValEncoder +} + +// ConfigDefault the default API +var ConfigDefault = Config{ + EscapeHTML: true, +}.Froze() + +// ConfigCompatibleWithStandardLibrary tries to be 100% compatible with standard library behavior +var ConfigCompatibleWithStandardLibrary = Config{ + EscapeHTML: true, + SortMapKeys: true, + ValidateJsonRawMessage: true, +}.Froze() + +// ConfigFastest marshals float with only 6 digits precision +var ConfigFastest = Config{ + EscapeHTML: false, + MarshalFloatWith6Digits: true, // will lose precession + ObjectFieldMustBeSimpleString: true, // do not unescape object field +}.Froze() + +type frozenConfig struct { + configBeforeFrozen Config + sortMapKeys bool + indentionStep int + objectFieldMustBeSimpleString bool + onlyTaggedField bool + disallowUnknownFields bool + decoderCache *concurrent.Map + encoderCache *concurrent.Map + encoderExtension Extension + decoderExtension Extension + extraExtensions []Extension + streamPool *sync.Pool + iteratorPool *sync.Pool + caseSensitive bool +} + +func (cfg *frozenConfig) initCache() { + cfg.decoderCache = concurrent.NewMap() + cfg.encoderCache = concurrent.NewMap() +} + +func (cfg *frozenConfig) addDecoderToCache(cacheKey uintptr, decoder ValDecoder) { + cfg.decoderCache.Store(cacheKey, decoder) +} + +func (cfg *frozenConfig) addEncoderToCache(cacheKey uintptr, encoder ValEncoder) { + cfg.encoderCache.Store(cacheKey, encoder) +} + +func (cfg *frozenConfig) getDecoderFromCache(cacheKey uintptr) ValDecoder { + decoder, found := cfg.decoderCache.Load(cacheKey) + if found { + return decoder.(ValDecoder) + } + return nil +} + +func (cfg *frozenConfig) getEncoderFromCache(cacheKey uintptr) ValEncoder { + encoder, found := cfg.encoderCache.Load(cacheKey) + if found { + return encoder.(ValEncoder) + } + return nil +} + +var cfgCache = concurrent.NewMap() + +func getFrozenConfigFromCache(cfg Config) *frozenConfig { + obj, found := cfgCache.Load(cfg) + if found { + return obj.(*frozenConfig) + } + return nil +} + +func addFrozenConfigToCache(cfg Config, frozenConfig *frozenConfig) { + cfgCache.Store(cfg, frozenConfig) +} + +// Froze forge API from config +func (cfg Config) Froze() API { + api := &frozenConfig{ + sortMapKeys: cfg.SortMapKeys, + indentionStep: cfg.IndentionStep, + objectFieldMustBeSimpleString: cfg.ObjectFieldMustBeSimpleString, + onlyTaggedField: cfg.OnlyTaggedField, + disallowUnknownFields: cfg.DisallowUnknownFields, + caseSensitive: cfg.CaseSensitive, + } + api.streamPool = &sync.Pool{ + New: func() interface{} { + return NewStream(api, nil, 512) + }, + } + api.iteratorPool = &sync.Pool{ + New: func() interface{} { + return NewIterator(api) + }, + } + api.initCache() + encoderExtension := EncoderExtension{} + decoderExtension := DecoderExtension{} + if cfg.MarshalFloatWith6Digits { + api.marshalFloatWith6Digits(encoderExtension) + } + if cfg.EscapeHTML { + api.escapeHTML(encoderExtension) + } + if cfg.UseNumber { + api.useNumber(decoderExtension) + } + if cfg.ValidateJsonRawMessage { + api.validateJsonRawMessage(encoderExtension) + } + api.encoderExtension = encoderExtension + api.decoderExtension = decoderExtension + api.configBeforeFrozen = cfg + return api +} + +func (cfg Config) frozeWithCacheReuse(extraExtensions []Extension) *frozenConfig { + api := getFrozenConfigFromCache(cfg) + if api != nil { + return api + } + api = cfg.Froze().(*frozenConfig) + for _, extension := range extraExtensions { + api.RegisterExtension(extension) + } + addFrozenConfigToCache(cfg, api) + return api +} + +func (cfg *frozenConfig) validateJsonRawMessage(extension EncoderExtension) { + encoder := &funcEncoder{func(ptr unsafe.Pointer, stream *Stream) { + rawMessage := *(*json.RawMessage)(ptr) + iter := cfg.BorrowIterator([]byte(rawMessage)) + defer cfg.ReturnIterator(iter) + iter.Read() + if iter.Error != nil && iter.Error != io.EOF { + stream.WriteRaw("null") + } else { + stream.WriteRaw(string(rawMessage)) + } + }, func(ptr unsafe.Pointer) bool { + return len(*((*json.RawMessage)(ptr))) == 0 + }} + extension[reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem()] = encoder + extension[reflect2.TypeOfPtr((*RawMessage)(nil)).Elem()] = encoder +} + +func (cfg *frozenConfig) useNumber(extension DecoderExtension) { + extension[reflect2.TypeOfPtr((*interface{})(nil)).Elem()] = &funcDecoder{func(ptr unsafe.Pointer, iter *Iterator) { + exitingValue := *((*interface{})(ptr)) + if exitingValue != nil && reflect.TypeOf(exitingValue).Kind() == reflect.Ptr { + iter.ReadVal(exitingValue) + return + } + if iter.WhatIsNext() == NumberValue { + *((*interface{})(ptr)) = json.Number(iter.readNumberAsString()) + } else { + *((*interface{})(ptr)) = iter.Read() + } + }} +} +func (cfg *frozenConfig) getTagKey() string { + tagKey := cfg.configBeforeFrozen.TagKey + if tagKey == "" { + return "json" + } + return tagKey +} + +func (cfg *frozenConfig) RegisterExtension(extension Extension) { + cfg.extraExtensions = append(cfg.extraExtensions, extension) + copied := cfg.configBeforeFrozen + cfg.configBeforeFrozen = copied +} + +type lossyFloat32Encoder struct { +} + +func (encoder *lossyFloat32Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat32Lossy(*((*float32)(ptr))) +} + +func (encoder *lossyFloat32Encoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float32)(ptr)) == 0 +} + +type lossyFloat64Encoder struct { +} + +func (encoder *lossyFloat64Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat64Lossy(*((*float64)(ptr))) +} + +func (encoder *lossyFloat64Encoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float64)(ptr)) == 0 +} + +// EnableLossyFloatMarshalling keeps 10**(-6) precision +// for float variables for better performance. +func (cfg *frozenConfig) marshalFloatWith6Digits(extension EncoderExtension) { + // for better performance + extension[reflect2.TypeOfPtr((*float32)(nil)).Elem()] = &lossyFloat32Encoder{} + extension[reflect2.TypeOfPtr((*float64)(nil)).Elem()] = &lossyFloat64Encoder{} +} + +type htmlEscapedStringEncoder struct { +} + +func (encoder *htmlEscapedStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + str := *((*string)(ptr)) + stream.WriteStringWithHTMLEscaped(str) +} + +func (encoder *htmlEscapedStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*string)(ptr)) == "" +} + +func (cfg *frozenConfig) escapeHTML(encoderExtension EncoderExtension) { + encoderExtension[reflect2.TypeOfPtr((*string)(nil)).Elem()] = &htmlEscapedStringEncoder{} +} + +func (cfg *frozenConfig) cleanDecoders() { + typeDecoders = map[string]ValDecoder{} + fieldDecoders = map[string]ValDecoder{} + *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) +} + +func (cfg *frozenConfig) cleanEncoders() { + typeEncoders = map[string]ValEncoder{} + fieldEncoders = map[string]ValEncoder{} + *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) +} + +func (cfg *frozenConfig) MarshalToString(v interface{}) (string, error) { + stream := cfg.BorrowStream(nil) + defer cfg.ReturnStream(stream) + stream.WriteVal(v) + if stream.Error != nil { + return "", stream.Error + } + return string(stream.Buffer()), nil +} + +func (cfg *frozenConfig) Marshal(v interface{}) ([]byte, error) { + stream := cfg.BorrowStream(nil) + defer cfg.ReturnStream(stream) + stream.WriteVal(v) + if stream.Error != nil { + return nil, stream.Error + } + result := stream.Buffer() + copied := make([]byte, len(result)) + copy(copied, result) + return copied, nil +} + +func (cfg *frozenConfig) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + if prefix != "" { + panic("prefix is not supported") + } + for _, r := range indent { + if r != ' ' { + panic("indent can only be space") + } + } + newCfg := cfg.configBeforeFrozen + newCfg.IndentionStep = len(indent) + return newCfg.frozeWithCacheReuse(cfg.extraExtensions).Marshal(v) +} + +func (cfg *frozenConfig) UnmarshalFromString(str string, v interface{}) error { + data := []byte(str) + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.ReadVal(v) + c := iter.nextToken() + if c == 0 { + if iter.Error == io.EOF { + return nil + } + return iter.Error + } + iter.ReportError("Unmarshal", "there are bytes left after unmarshal") + return iter.Error +} + +func (cfg *frozenConfig) Get(data []byte, path ...interface{}) Any { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + return locatePath(iter, path) +} + +func (cfg *frozenConfig) Unmarshal(data []byte, v interface{}) error { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.ReadVal(v) + c := iter.nextToken() + if c == 0 { + if iter.Error == io.EOF { + return nil + } + return iter.Error + } + iter.ReportError("Unmarshal", "there are bytes left after unmarshal") + return iter.Error +} + +func (cfg *frozenConfig) NewEncoder(writer io.Writer) *Encoder { + stream := NewStream(cfg, writer, 512) + return &Encoder{stream} +} + +func (cfg *frozenConfig) NewDecoder(reader io.Reader) *Decoder { + iter := Parse(cfg, reader, 512) + return &Decoder{iter} +} + +func (cfg *frozenConfig) Valid(data []byte) bool { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.Skip() + return iter.Error == nil +} diff --git a/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md b/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md new file mode 100644 index 000000000..3095662b0 --- /dev/null +++ b/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md @@ -0,0 +1,7 @@ +| json type \ dest type | bool | int | uint | float |string| +| --- | --- | --- | --- |--|--| +| number | positive => true
negative => true
zero => false| 23.2 => 23
-32.1 => -32| 12.1 => 12
-12.1 => 0|as normal|same as origin| +| string | empty string => false
string "0" => false
other strings => true | "123.32" => 123
"-123.4" => -123
"123.23xxxw" => 123
"abcde12" => 0
"-32.1" => -32| 13.2 => 13
-1.1 => 0 |12.1 => 12.1
-12.3 => -12.3
12.4xxa => 12.4
+1.1e2 =>110 |same as origin| +| bool | true => true
false => false| true => 1
false => 0 | true => 1
false => 0 |true => 1
false => 0|true => "true"
false => "false"| +| object | true | 0 | 0 |0|originnal json| +| array | empty array => false
nonempty array => true| [] => 0
[1,2] => 1 | [] => 0
[1,2] => 1 |[] => 0
[1,2] => 1|original json| \ No newline at end of file diff --git a/vendor/github.com/json-iterator/go/iter.go b/vendor/github.com/json-iterator/go/iter.go new file mode 100644 index 000000000..29b31cf78 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter.go @@ -0,0 +1,349 @@ +package jsoniter + +import ( + "encoding/json" + "fmt" + "io" +) + +// ValueType the type for JSON element +type ValueType int + +const ( + // InvalidValue invalid JSON element + InvalidValue ValueType = iota + // StringValue JSON element "string" + StringValue + // NumberValue JSON element 100 or 0.10 + NumberValue + // NilValue JSON element null + NilValue + // BoolValue JSON element true or false + BoolValue + // ArrayValue JSON element [] + ArrayValue + // ObjectValue JSON element {} + ObjectValue +) + +var hexDigits []byte +var valueTypes []ValueType + +func init() { + hexDigits = make([]byte, 256) + for i := 0; i < len(hexDigits); i++ { + hexDigits[i] = 255 + } + for i := '0'; i <= '9'; i++ { + hexDigits[i] = byte(i - '0') + } + for i := 'a'; i <= 'f'; i++ { + hexDigits[i] = byte((i - 'a') + 10) + } + for i := 'A'; i <= 'F'; i++ { + hexDigits[i] = byte((i - 'A') + 10) + } + valueTypes = make([]ValueType, 256) + for i := 0; i < len(valueTypes); i++ { + valueTypes[i] = InvalidValue + } + valueTypes['"'] = StringValue + valueTypes['-'] = NumberValue + valueTypes['0'] = NumberValue + valueTypes['1'] = NumberValue + valueTypes['2'] = NumberValue + valueTypes['3'] = NumberValue + valueTypes['4'] = NumberValue + valueTypes['5'] = NumberValue + valueTypes['6'] = NumberValue + valueTypes['7'] = NumberValue + valueTypes['8'] = NumberValue + valueTypes['9'] = NumberValue + valueTypes['t'] = BoolValue + valueTypes['f'] = BoolValue + valueTypes['n'] = NilValue + valueTypes['['] = ArrayValue + valueTypes['{'] = ObjectValue +} + +// Iterator is a io.Reader like object, with JSON specific read functions. +// Error is not returned as return value, but stored as Error member on this iterator instance. +type Iterator struct { + cfg *frozenConfig + reader io.Reader + buf []byte + head int + tail int + depth int + captureStartedAt int + captured []byte + Error error + Attachment interface{} // open for customized decoder +} + +// NewIterator creates an empty Iterator instance +func NewIterator(cfg API) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: nil, + buf: nil, + head: 0, + tail: 0, + depth: 0, + } +} + +// Parse creates an Iterator instance from io.Reader +func Parse(cfg API, reader io.Reader, bufSize int) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: reader, + buf: make([]byte, bufSize), + head: 0, + tail: 0, + depth: 0, + } +} + +// ParseBytes creates an Iterator instance from byte array +func ParseBytes(cfg API, input []byte) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: nil, + buf: input, + head: 0, + tail: len(input), + depth: 0, + } +} + +// ParseString creates an Iterator instance from string +func ParseString(cfg API, input string) *Iterator { + return ParseBytes(cfg, []byte(input)) +} + +// Pool returns a pool can provide more iterator with same configuration +func (iter *Iterator) Pool() IteratorPool { + return iter.cfg +} + +// Reset reuse iterator instance by specifying another reader +func (iter *Iterator) Reset(reader io.Reader) *Iterator { + iter.reader = reader + iter.head = 0 + iter.tail = 0 + iter.depth = 0 + return iter +} + +// ResetBytes reuse iterator instance by specifying another byte array as input +func (iter *Iterator) ResetBytes(input []byte) *Iterator { + iter.reader = nil + iter.buf = input + iter.head = 0 + iter.tail = len(input) + iter.depth = 0 + return iter +} + +// WhatIsNext gets ValueType of relatively next json element +func (iter *Iterator) WhatIsNext() ValueType { + valueType := valueTypes[iter.nextToken()] + iter.unreadByte() + return valueType +} + +func (iter *Iterator) skipWhitespacesWithoutLoadMore() bool { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\t', '\r': + continue + } + iter.head = i + return false + } + return true +} + +func (iter *Iterator) isObjectEnd() bool { + c := iter.nextToken() + if c == ',' { + return false + } + if c == '}' { + return true + } + iter.ReportError("isObjectEnd", "object ended prematurely, unexpected char "+string([]byte{c})) + return true +} + +func (iter *Iterator) nextToken() byte { + // a variation of skip whitespaces, returning the next non-whitespace token + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\t', '\r': + continue + } + iter.head = i + 1 + return c + } + if !iter.loadMore() { + return 0 + } + } +} + +// ReportError record a error in iterator instance with current position. +func (iter *Iterator) ReportError(operation string, msg string) { + if iter.Error != nil { + if iter.Error != io.EOF { + return + } + } + peekStart := iter.head - 10 + if peekStart < 0 { + peekStart = 0 + } + peekEnd := iter.head + 10 + if peekEnd > iter.tail { + peekEnd = iter.tail + } + parsing := string(iter.buf[peekStart:peekEnd]) + contextStart := iter.head - 50 + if contextStart < 0 { + contextStart = 0 + } + contextEnd := iter.head + 50 + if contextEnd > iter.tail { + contextEnd = iter.tail + } + context := string(iter.buf[contextStart:contextEnd]) + iter.Error = fmt.Errorf("%s: %s, error found in #%v byte of ...|%s|..., bigger context ...|%s|...", + operation, msg, iter.head-peekStart, parsing, context) +} + +// CurrentBuffer gets current buffer as string for debugging purpose +func (iter *Iterator) CurrentBuffer() string { + peekStart := iter.head - 10 + if peekStart < 0 { + peekStart = 0 + } + return fmt.Sprintf("parsing #%v byte, around ...|%s|..., whole buffer ...|%s|...", iter.head, + string(iter.buf[peekStart:iter.head]), string(iter.buf[0:iter.tail])) +} + +func (iter *Iterator) readByte() (ret byte) { + if iter.head == iter.tail { + if iter.loadMore() { + ret = iter.buf[iter.head] + iter.head++ + return ret + } + return 0 + } + ret = iter.buf[iter.head] + iter.head++ + return ret +} + +func (iter *Iterator) loadMore() bool { + if iter.reader == nil { + if iter.Error == nil { + iter.head = iter.tail + iter.Error = io.EOF + } + return false + } + if iter.captured != nil { + iter.captured = append(iter.captured, + iter.buf[iter.captureStartedAt:iter.tail]...) + iter.captureStartedAt = 0 + } + for { + n, err := iter.reader.Read(iter.buf) + if n == 0 { + if err != nil { + if iter.Error == nil { + iter.Error = err + } + return false + } + } else { + iter.head = 0 + iter.tail = n + return true + } + } +} + +func (iter *Iterator) unreadByte() { + if iter.Error != nil { + return + } + iter.head-- + return +} + +// Read read the next JSON element as generic interface{}. +func (iter *Iterator) Read() interface{} { + valueType := iter.WhatIsNext() + switch valueType { + case StringValue: + return iter.ReadString() + case NumberValue: + if iter.cfg.configBeforeFrozen.UseNumber { + return json.Number(iter.readNumberAsString()) + } + return iter.ReadFloat64() + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + return nil + case BoolValue: + return iter.ReadBool() + case ArrayValue: + arr := []interface{}{} + iter.ReadArrayCB(func(iter *Iterator) bool { + var elem interface{} + iter.ReadVal(&elem) + arr = append(arr, elem) + return true + }) + return arr + case ObjectValue: + obj := map[string]interface{}{} + iter.ReadMapCB(func(Iter *Iterator, field string) bool { + var elem interface{} + iter.ReadVal(&elem) + obj[field] = elem + return true + }) + return obj + default: + iter.ReportError("Read", fmt.Sprintf("unexpected value type: %v", valueType)) + return nil + } +} + +// limit maximum depth of nesting, as allowed by https://tools.ietf.org/html/rfc7159#section-9 +const maxDepth = 10000 + +func (iter *Iterator) incrementDepth() (success bool) { + iter.depth++ + if iter.depth <= maxDepth { + return true + } + iter.ReportError("incrementDepth", "exceeded max depth") + return false +} + +func (iter *Iterator) decrementDepth() (success bool) { + iter.depth-- + if iter.depth >= 0 { + return true + } + iter.ReportError("decrementDepth", "unexpected negative nesting") + return false +} diff --git a/vendor/github.com/json-iterator/go/iter_array.go b/vendor/github.com/json-iterator/go/iter_array.go new file mode 100644 index 000000000..204fe0e09 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_array.go @@ -0,0 +1,64 @@ +package jsoniter + +// ReadArray read array element, tells if the array has more element to read. +func (iter *Iterator) ReadArray() (ret bool) { + c := iter.nextToken() + switch c { + case 'n': + iter.skipThreeBytes('u', 'l', 'l') + return false // null + case '[': + c = iter.nextToken() + if c != ']' { + iter.unreadByte() + return true + } + return false + case ']': + return false + case ',': + return true + default: + iter.ReportError("ReadArray", "expect [ or , or ] or n, but found "+string([]byte{c})) + return + } +} + +// ReadArrayCB read array with callback +func (iter *Iterator) ReadArrayCB(callback func(*Iterator) bool) (ret bool) { + c := iter.nextToken() + if c == '[' { + if !iter.incrementDepth() { + return false + } + c = iter.nextToken() + if c != ']' { + iter.unreadByte() + if !callback(iter) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + for c == ',' { + if !callback(iter) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + } + if c != ']' { + iter.ReportError("ReadArrayCB", "expect ] in the end, but found "+string([]byte{c})) + iter.decrementDepth() + return false + } + return iter.decrementDepth() + } + return iter.decrementDepth() + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadArrayCB", "expect [ or n, but found "+string([]byte{c})) + return false +} diff --git a/vendor/github.com/json-iterator/go/iter_float.go b/vendor/github.com/json-iterator/go/iter_float.go new file mode 100644 index 000000000..8a3d8b6fb --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_float.go @@ -0,0 +1,342 @@ +package jsoniter + +import ( + "encoding/json" + "io" + "math/big" + "strconv" + "strings" + "unsafe" +) + +var floatDigits []int8 + +const invalidCharForNumber = int8(-1) +const endOfNumber = int8(-2) +const dotInNumber = int8(-3) + +func init() { + floatDigits = make([]int8, 256) + for i := 0; i < len(floatDigits); i++ { + floatDigits[i] = invalidCharForNumber + } + for i := int8('0'); i <= int8('9'); i++ { + floatDigits[i] = i - int8('0') + } + floatDigits[','] = endOfNumber + floatDigits[']'] = endOfNumber + floatDigits['}'] = endOfNumber + floatDigits[' '] = endOfNumber + floatDigits['\t'] = endOfNumber + floatDigits['\n'] = endOfNumber + floatDigits['.'] = dotInNumber +} + +// ReadBigFloat read big.Float +func (iter *Iterator) ReadBigFloat() (ret *big.Float) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return nil + } + prec := 64 + if len(str) > prec { + prec = len(str) + } + val, _, err := big.ParseFloat(str, 10, uint(prec), big.ToZero) + if err != nil { + iter.Error = err + return nil + } + return val +} + +// ReadBigInt read big.Int +func (iter *Iterator) ReadBigInt() (ret *big.Int) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return nil + } + ret = big.NewInt(0) + var success bool + ret, success = ret.SetString(str, 10) + if !success { + iter.ReportError("ReadBigInt", "invalid big int") + return nil + } + return ret +} + +//ReadFloat32 read float32 +func (iter *Iterator) ReadFloat32() (ret float32) { + c := iter.nextToken() + if c == '-' { + return -iter.readPositiveFloat32() + } + iter.unreadByte() + return iter.readPositiveFloat32() +} + +func (iter *Iterator) readPositiveFloat32() (ret float32) { + i := iter.head + // first char + if i == iter.tail { + return iter.readFloat32SlowPath() + } + c := iter.buf[i] + i++ + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat32SlowPath() + case endOfNumber: + iter.ReportError("readFloat32", "empty number") + return + case dotInNumber: + iter.ReportError("readFloat32", "leading dot is invalid") + return + case 0: + if i == iter.tail { + return iter.readFloat32SlowPath() + } + c = iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.ReportError("readFloat32", "leading zero is invalid") + return + } + } + value := uint64(ind) + // chars before dot +non_decimal_loop: + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat32SlowPath() + case endOfNumber: + iter.head = i + return float32(value) + case dotInNumber: + break non_decimal_loop + } + if value > uint64SafeToMultiple10 { + return iter.readFloat32SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind; + } + // chars after dot + if c == '.' { + i++ + decimalPlaces := 0 + if i == iter.tail { + return iter.readFloat32SlowPath() + } + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case endOfNumber: + if decimalPlaces > 0 && decimalPlaces < len(pow10) { + iter.head = i + return float32(float64(value) / float64(pow10[decimalPlaces])) + } + // too many decimal places + return iter.readFloat32SlowPath() + case invalidCharForNumber, dotInNumber: + return iter.readFloat32SlowPath() + } + decimalPlaces++ + if value > uint64SafeToMultiple10 { + return iter.readFloat32SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) + } + } + return iter.readFloat32SlowPath() +} + +func (iter *Iterator) readNumberAsString() (ret string) { + strBuf := [16]byte{} + str := strBuf[0:0] +load_loop: + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case '+', '-', '.', 'e', 'E', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + str = append(str, c) + continue + default: + iter.head = i + break load_loop + } + } + if !iter.loadMore() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + return + } + if len(str) == 0 { + iter.ReportError("readNumberAsString", "invalid number") + } + return *(*string)(unsafe.Pointer(&str)) +} + +func (iter *Iterator) readFloat32SlowPath() (ret float32) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return + } + errMsg := validateFloat(str) + if errMsg != "" { + iter.ReportError("readFloat32SlowPath", errMsg) + return + } + val, err := strconv.ParseFloat(str, 32) + if err != nil { + iter.Error = err + return + } + return float32(val) +} + +// ReadFloat64 read float64 +func (iter *Iterator) ReadFloat64() (ret float64) { + c := iter.nextToken() + if c == '-' { + return -iter.readPositiveFloat64() + } + iter.unreadByte() + return iter.readPositiveFloat64() +} + +func (iter *Iterator) readPositiveFloat64() (ret float64) { + i := iter.head + // first char + if i == iter.tail { + return iter.readFloat64SlowPath() + } + c := iter.buf[i] + i++ + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat64SlowPath() + case endOfNumber: + iter.ReportError("readFloat64", "empty number") + return + case dotInNumber: + iter.ReportError("readFloat64", "leading dot is invalid") + return + case 0: + if i == iter.tail { + return iter.readFloat64SlowPath() + } + c = iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.ReportError("readFloat64", "leading zero is invalid") + return + } + } + value := uint64(ind) + // chars before dot +non_decimal_loop: + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat64SlowPath() + case endOfNumber: + iter.head = i + return float64(value) + case dotInNumber: + break non_decimal_loop + } + if value > uint64SafeToMultiple10 { + return iter.readFloat64SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind; + } + // chars after dot + if c == '.' { + i++ + decimalPlaces := 0 + if i == iter.tail { + return iter.readFloat64SlowPath() + } + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case endOfNumber: + if decimalPlaces > 0 && decimalPlaces < len(pow10) { + iter.head = i + return float64(value) / float64(pow10[decimalPlaces]) + } + // too many decimal places + return iter.readFloat64SlowPath() + case invalidCharForNumber, dotInNumber: + return iter.readFloat64SlowPath() + } + decimalPlaces++ + if value > uint64SafeToMultiple10 { + return iter.readFloat64SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) + if value > maxFloat64 { + return iter.readFloat64SlowPath() + } + } + } + return iter.readFloat64SlowPath() +} + +func (iter *Iterator) readFloat64SlowPath() (ret float64) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return + } + errMsg := validateFloat(str) + if errMsg != "" { + iter.ReportError("readFloat64SlowPath", errMsg) + return + } + val, err := strconv.ParseFloat(str, 64) + if err != nil { + iter.Error = err + return + } + return val +} + +func validateFloat(str string) string { + // strconv.ParseFloat is not validating `1.` or `1.e1` + if len(str) == 0 { + return "empty number" + } + if str[0] == '-' { + return "-- is not valid" + } + dotPos := strings.IndexByte(str, '.') + if dotPos != -1 { + if dotPos == len(str)-1 { + return "dot can not be last character" + } + switch str[dotPos+1] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + return "missing digit after dot" + } + } + return "" +} + +// ReadNumber read json.Number +func (iter *Iterator) ReadNumber() (ret json.Number) { + return json.Number(iter.readNumberAsString()) +} diff --git a/vendor/github.com/json-iterator/go/iter_int.go b/vendor/github.com/json-iterator/go/iter_int.go new file mode 100644 index 000000000..d786a89fe --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_int.go @@ -0,0 +1,346 @@ +package jsoniter + +import ( + "math" + "strconv" +) + +var intDigits []int8 + +const uint32SafeToMultiply10 = uint32(0xffffffff)/10 - 1 +const uint64SafeToMultiple10 = uint64(0xffffffffffffffff)/10 - 1 +const maxFloat64 = 1<<53 - 1 + +func init() { + intDigits = make([]int8, 256) + for i := 0; i < len(intDigits); i++ { + intDigits[i] = invalidCharForNumber + } + for i := int8('0'); i <= int8('9'); i++ { + intDigits[i] = i - int8('0') + } +} + +// ReadUint read uint +func (iter *Iterator) ReadUint() uint { + if strconv.IntSize == 32 { + return uint(iter.ReadUint32()) + } + return uint(iter.ReadUint64()) +} + +// ReadInt read int +func (iter *Iterator) ReadInt() int { + if strconv.IntSize == 32 { + return int(iter.ReadInt32()) + } + return int(iter.ReadInt64()) +} + +// ReadInt8 read int8 +func (iter *Iterator) ReadInt8() (ret int8) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt8+1 { + iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int8(val) + } + val := iter.readUint32(c) + if val > math.MaxInt8 { + iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int8(val) +} + +// ReadUint8 read uint8 +func (iter *Iterator) ReadUint8() (ret uint8) { + val := iter.readUint32(iter.nextToken()) + if val > math.MaxUint8 { + iter.ReportError("ReadUint8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return uint8(val) +} + +// ReadInt16 read int16 +func (iter *Iterator) ReadInt16() (ret int16) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt16+1 { + iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int16(val) + } + val := iter.readUint32(c) + if val > math.MaxInt16 { + iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int16(val) +} + +// ReadUint16 read uint16 +func (iter *Iterator) ReadUint16() (ret uint16) { + val := iter.readUint32(iter.nextToken()) + if val > math.MaxUint16 { + iter.ReportError("ReadUint16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return uint16(val) +} + +// ReadInt32 read int32 +func (iter *Iterator) ReadInt32() (ret int32) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt32+1 { + iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int32(val) + } + val := iter.readUint32(c) + if val > math.MaxInt32 { + iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int32(val) +} + +// ReadUint32 read uint32 +func (iter *Iterator) ReadUint32() (ret uint32) { + return iter.readUint32(iter.nextToken()) +} + +func (iter *Iterator) readUint32(c byte) (ret uint32) { + ind := intDigits[c] + if ind == 0 { + iter.assertInteger() + return 0 // single zero + } + if ind == invalidCharForNumber { + iter.ReportError("readUint32", "unexpected character: "+string([]byte{byte(ind)})) + return + } + value := uint32(ind) + if iter.tail-iter.head > 10 { + i := iter.head + ind2 := intDigits[iter.buf[i]] + if ind2 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + i++ + ind3 := intDigits[iter.buf[i]] + if ind3 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10 + uint32(ind2) + } + //iter.head = i + 1 + //value = value * 100 + uint32(ind2) * 10 + uint32(ind3) + i++ + ind4 := intDigits[iter.buf[i]] + if ind4 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100 + uint32(ind2)*10 + uint32(ind3) + } + i++ + ind5 := intDigits[iter.buf[i]] + if ind5 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000 + uint32(ind2)*100 + uint32(ind3)*10 + uint32(ind4) + } + i++ + ind6 := intDigits[iter.buf[i]] + if ind6 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10000 + uint32(ind2)*1000 + uint32(ind3)*100 + uint32(ind4)*10 + uint32(ind5) + } + i++ + ind7 := intDigits[iter.buf[i]] + if ind7 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100000 + uint32(ind2)*10000 + uint32(ind3)*1000 + uint32(ind4)*100 + uint32(ind5)*10 + uint32(ind6) + } + i++ + ind8 := intDigits[iter.buf[i]] + if ind8 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000000 + uint32(ind2)*100000 + uint32(ind3)*10000 + uint32(ind4)*1000 + uint32(ind5)*100 + uint32(ind6)*10 + uint32(ind7) + } + i++ + ind9 := intDigits[iter.buf[i]] + value = value*10000000 + uint32(ind2)*1000000 + uint32(ind3)*100000 + uint32(ind4)*10000 + uint32(ind5)*1000 + uint32(ind6)*100 + uint32(ind7)*10 + uint32(ind8) + iter.head = i + if ind9 == invalidCharForNumber { + iter.assertInteger() + return value + } + } + for { + for i := iter.head; i < iter.tail; i++ { + ind = intDigits[iter.buf[i]] + if ind == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + if value > uint32SafeToMultiply10 { + value2 := (value << 3) + (value << 1) + uint32(ind) + if value2 < value { + iter.ReportError("readUint32", "overflow") + return + } + value = value2 + continue + } + value = (value << 3) + (value << 1) + uint32(ind) + } + if !iter.loadMore() { + iter.assertInteger() + return value + } + } +} + +// ReadInt64 read int64 +func (iter *Iterator) ReadInt64() (ret int64) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint64(iter.readByte()) + if val > math.MaxInt64+1 { + iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10)) + return + } + return -int64(val) + } + val := iter.readUint64(c) + if val > math.MaxInt64 { + iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10)) + return + } + return int64(val) +} + +// ReadUint64 read uint64 +func (iter *Iterator) ReadUint64() uint64 { + return iter.readUint64(iter.nextToken()) +} + +func (iter *Iterator) readUint64(c byte) (ret uint64) { + ind := intDigits[c] + if ind == 0 { + iter.assertInteger() + return 0 // single zero + } + if ind == invalidCharForNumber { + iter.ReportError("readUint64", "unexpected character: "+string([]byte{byte(ind)})) + return + } + value := uint64(ind) + if iter.tail-iter.head > 10 { + i := iter.head + ind2 := intDigits[iter.buf[i]] + if ind2 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + i++ + ind3 := intDigits[iter.buf[i]] + if ind3 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10 + uint64(ind2) + } + //iter.head = i + 1 + //value = value * 100 + uint32(ind2) * 10 + uint32(ind3) + i++ + ind4 := intDigits[iter.buf[i]] + if ind4 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100 + uint64(ind2)*10 + uint64(ind3) + } + i++ + ind5 := intDigits[iter.buf[i]] + if ind5 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000 + uint64(ind2)*100 + uint64(ind3)*10 + uint64(ind4) + } + i++ + ind6 := intDigits[iter.buf[i]] + if ind6 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10000 + uint64(ind2)*1000 + uint64(ind3)*100 + uint64(ind4)*10 + uint64(ind5) + } + i++ + ind7 := intDigits[iter.buf[i]] + if ind7 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100000 + uint64(ind2)*10000 + uint64(ind3)*1000 + uint64(ind4)*100 + uint64(ind5)*10 + uint64(ind6) + } + i++ + ind8 := intDigits[iter.buf[i]] + if ind8 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000000 + uint64(ind2)*100000 + uint64(ind3)*10000 + uint64(ind4)*1000 + uint64(ind5)*100 + uint64(ind6)*10 + uint64(ind7) + } + i++ + ind9 := intDigits[iter.buf[i]] + value = value*10000000 + uint64(ind2)*1000000 + uint64(ind3)*100000 + uint64(ind4)*10000 + uint64(ind5)*1000 + uint64(ind6)*100 + uint64(ind7)*10 + uint64(ind8) + iter.head = i + if ind9 == invalidCharForNumber { + iter.assertInteger() + return value + } + } + for { + for i := iter.head; i < iter.tail; i++ { + ind = intDigits[iter.buf[i]] + if ind == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + if value > uint64SafeToMultiple10 { + value2 := (value << 3) + (value << 1) + uint64(ind) + if value2 < value { + iter.ReportError("readUint64", "overflow") + return + } + value = value2 + continue + } + value = (value << 3) + (value << 1) + uint64(ind) + } + if !iter.loadMore() { + iter.assertInteger() + return value + } + } +} + +func (iter *Iterator) assertInteger() { + if iter.head < iter.tail && iter.buf[iter.head] == '.' { + iter.ReportError("assertInteger", "can not decode float as int") + } +} diff --git a/vendor/github.com/json-iterator/go/iter_object.go b/vendor/github.com/json-iterator/go/iter_object.go new file mode 100644 index 000000000..58ee89c84 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_object.go @@ -0,0 +1,267 @@ +package jsoniter + +import ( + "fmt" + "strings" +) + +// ReadObject read one field from object. +// If object ended, returns empty string. +// Otherwise, returns the field name. +func (iter *Iterator) ReadObject() (ret string) { + c := iter.nextToken() + switch c { + case 'n': + iter.skipThreeBytes('u', 'l', 'l') + return "" // null + case '{': + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + field := iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + return field + } + if c == '}' { + return "" // end of object + } + iter.ReportError("ReadObject", `expect " after {, but found `+string([]byte{c})) + return + case ',': + field := iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + return field + case '}': + return "" // end of object + default: + iter.ReportError("ReadObject", fmt.Sprintf(`expect { or , or } or n, but found %s`, string([]byte{c}))) + return + } +} + +// CaseInsensitive +func (iter *Iterator) readFieldHash() int64 { + hash := int64(0x811c9dc5) + c := iter.nextToken() + if c != '"' { + iter.ReportError("readFieldHash", `expect ", but found `+string([]byte{c})) + return 0 + } + for { + for i := iter.head; i < iter.tail; i++ { + // require ascii string and no escape + b := iter.buf[i] + if b == '\\' { + iter.head = i + for _, b := range iter.readStringSlowPath() { + if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive { + b += 'a' - 'A' + } + hash ^= int64(b) + hash *= 0x1000193 + } + c = iter.nextToken() + if c != ':' { + iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c})) + return 0 + } + return hash + } + if b == '"' { + iter.head = i + 1 + c = iter.nextToken() + if c != ':' { + iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c})) + return 0 + } + return hash + } + if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive { + b += 'a' - 'A' + } + hash ^= int64(b) + hash *= 0x1000193 + } + if !iter.loadMore() { + iter.ReportError("readFieldHash", `incomplete field name`) + return 0 + } + } +} + +func calcHash(str string, caseSensitive bool) int64 { + if !caseSensitive { + str = strings.ToLower(str) + } + hash := int64(0x811c9dc5) + for _, b := range []byte(str) { + hash ^= int64(b) + hash *= 0x1000193 + } + return int64(hash) +} + +// ReadObjectCB read object with callback, the key is ascii only and field name not copied +func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { + c := iter.nextToken() + var field string + if c == '{' { + if !iter.incrementDepth() { + return false + } + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + field = iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + if !callback(iter, field) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + for c == ',' { + field = iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + if !callback(iter, field) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + } + if c != '}' { + iter.ReportError("ReadObjectCB", `object not ended with }`) + iter.decrementDepth() + return false + } + return iter.decrementDepth() + } + if c == '}' { + return iter.decrementDepth() + } + iter.ReportError("ReadObjectCB", `expect " after {, but found `+string([]byte{c})) + iter.decrementDepth() + return false + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadObjectCB", `expect { or n, but found `+string([]byte{c})) + return false +} + +// ReadMapCB read map with callback, the key can be any string +func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool { + c := iter.nextToken() + if c == '{' { + if !iter.incrementDepth() { + return false + } + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + field := iter.ReadString() + if iter.nextToken() != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + iter.decrementDepth() + return false + } + if !callback(iter, field) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + for c == ',' { + field = iter.ReadString() + if iter.nextToken() != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + iter.decrementDepth() + return false + } + if !callback(iter, field) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + } + if c != '}' { + iter.ReportError("ReadMapCB", `object not ended with }`) + iter.decrementDepth() + return false + } + return iter.decrementDepth() + } + if c == '}' { + return iter.decrementDepth() + } + iter.ReportError("ReadMapCB", `expect " after {, but found `+string([]byte{c})) + iter.decrementDepth() + return false + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c})) + return false +} + +func (iter *Iterator) readObjectStart() bool { + c := iter.nextToken() + if c == '{' { + c = iter.nextToken() + if c == '}' { + return false + } + iter.unreadByte() + return true + } else if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return false + } + iter.ReportError("readObjectStart", "expect { or n, but found "+string([]byte{c})) + return false +} + +func (iter *Iterator) readObjectFieldAsBytes() (ret []byte) { + str := iter.ReadStringAsSlice() + if iter.skipWhitespacesWithoutLoadMore() { + if ret == nil { + ret = make([]byte, len(str)) + copy(ret, str) + } + if !iter.loadMore() { + return + } + } + if iter.buf[iter.head] != ':' { + iter.ReportError("readObjectFieldAsBytes", "expect : after object field, but found "+string([]byte{iter.buf[iter.head]})) + return + } + iter.head++ + if iter.skipWhitespacesWithoutLoadMore() { + if ret == nil { + ret = make([]byte, len(str)) + copy(ret, str) + } + if !iter.loadMore() { + return + } + } + if ret == nil { + return str + } + return ret +} diff --git a/vendor/github.com/json-iterator/go/iter_skip.go b/vendor/github.com/json-iterator/go/iter_skip.go new file mode 100644 index 000000000..e91eefb15 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_skip.go @@ -0,0 +1,130 @@ +package jsoniter + +import "fmt" + +// ReadNil reads a json object as nil and +// returns whether it's a nil or not +func (iter *Iterator) ReadNil() (ret bool) { + c := iter.nextToken() + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') // null + return true + } + iter.unreadByte() + return false +} + +// ReadBool reads a json object as BoolValue +func (iter *Iterator) ReadBool() (ret bool) { + c := iter.nextToken() + if c == 't' { + iter.skipThreeBytes('r', 'u', 'e') + return true + } + if c == 'f' { + iter.skipFourBytes('a', 'l', 's', 'e') + return false + } + iter.ReportError("ReadBool", "expect t or f, but found "+string([]byte{c})) + return +} + +// SkipAndReturnBytes skip next JSON element, and return its content as []byte. +// The []byte can be kept, it is a copy of data. +func (iter *Iterator) SkipAndReturnBytes() []byte { + iter.startCapture(iter.head) + iter.Skip() + return iter.stopCapture() +} + +// SkipAndAppendBytes skips next JSON element and appends its content to +// buffer, returning the result. +func (iter *Iterator) SkipAndAppendBytes(buf []byte) []byte { + iter.startCaptureTo(buf, iter.head) + iter.Skip() + return iter.stopCapture() +} + +func (iter *Iterator) startCaptureTo(buf []byte, captureStartedAt int) { + if iter.captured != nil { + panic("already in capture mode") + } + iter.captureStartedAt = captureStartedAt + iter.captured = buf +} + +func (iter *Iterator) startCapture(captureStartedAt int) { + iter.startCaptureTo(make([]byte, 0, 32), captureStartedAt) +} + +func (iter *Iterator) stopCapture() []byte { + if iter.captured == nil { + panic("not in capture mode") + } + captured := iter.captured + remaining := iter.buf[iter.captureStartedAt:iter.head] + iter.captureStartedAt = -1 + iter.captured = nil + return append(captured, remaining...) +} + +// Skip skips a json object and positions to relatively the next json object +func (iter *Iterator) Skip() { + c := iter.nextToken() + switch c { + case '"': + iter.skipString() + case 'n': + iter.skipThreeBytes('u', 'l', 'l') // null + case 't': + iter.skipThreeBytes('r', 'u', 'e') // true + case 'f': + iter.skipFourBytes('a', 'l', 's', 'e') // false + case '0': + iter.unreadByte() + iter.ReadFloat32() + case '-', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.skipNumber() + case '[': + iter.skipArray() + case '{': + iter.skipObject() + default: + iter.ReportError("Skip", fmt.Sprintf("do not know how to skip: %v", c)) + return + } +} + +func (iter *Iterator) skipFourBytes(b1, b2, b3, b4 byte) { + if iter.readByte() != b1 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b2 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b3 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b4 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } +} + +func (iter *Iterator) skipThreeBytes(b1, b2, b3 byte) { + if iter.readByte() != b1 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } + if iter.readByte() != b2 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } + if iter.readByte() != b3 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } +} diff --git a/vendor/github.com/json-iterator/go/iter_skip_sloppy.go b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go new file mode 100644 index 000000000..9303de41e --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go @@ -0,0 +1,163 @@ +//+build jsoniter_sloppy + +package jsoniter + +// sloppy but faster implementation, do not validate the input json + +func (iter *Iterator) skipNumber() { + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\r', '\t', ',', '}', ']': + iter.head = i + return + } + } + if !iter.loadMore() { + return + } + } +} + +func (iter *Iterator) skipArray() { + level := 1 + if !iter.incrementDepth() { + return + } + for { + for i := iter.head; i < iter.tail; i++ { + switch iter.buf[i] { + case '"': // If inside string, skip it + iter.head = i + 1 + iter.skipString() + i = iter.head - 1 // it will be i++ soon + case '[': // If open symbol, increase level + level++ + if !iter.incrementDepth() { + return + } + case ']': // If close symbol, increase level + level-- + if !iter.decrementDepth() { + return + } + + // If we have returned to the original level, we're done + if level == 0 { + iter.head = i + 1 + return + } + } + } + if !iter.loadMore() { + iter.ReportError("skipObject", "incomplete array") + return + } + } +} + +func (iter *Iterator) skipObject() { + level := 1 + if !iter.incrementDepth() { + return + } + + for { + for i := iter.head; i < iter.tail; i++ { + switch iter.buf[i] { + case '"': // If inside string, skip it + iter.head = i + 1 + iter.skipString() + i = iter.head - 1 // it will be i++ soon + case '{': // If open symbol, increase level + level++ + if !iter.incrementDepth() { + return + } + case '}': // If close symbol, increase level + level-- + if !iter.decrementDepth() { + return + } + + // If we have returned to the original level, we're done + if level == 0 { + iter.head = i + 1 + return + } + } + } + if !iter.loadMore() { + iter.ReportError("skipObject", "incomplete object") + return + } + } +} + +func (iter *Iterator) skipString() { + for { + end, escaped := iter.findStringEnd() + if end == -1 { + if !iter.loadMore() { + iter.ReportError("skipString", "incomplete string") + return + } + if escaped { + iter.head = 1 // skip the first char as last char read is \ + } + } else { + iter.head = end + return + } + } +} + +// adapted from: https://github.com/buger/jsonparser/blob/master/parser.go +// Tries to find the end of string +// Support if string contains escaped quote symbols. +func (iter *Iterator) findStringEnd() (int, bool) { + escaped := false + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + if !escaped { + return i + 1, false + } + j := i - 1 + for { + if j < iter.head || iter.buf[j] != '\\' { + // even number of backslashes + // either end of buffer, or " found + return i + 1, true + } + j-- + if j < iter.head || iter.buf[j] != '\\' { + // odd number of backslashes + // it is \" or \\\" + break + } + j-- + } + } else if c == '\\' { + escaped = true + } + } + j := iter.tail - 1 + for { + if j < iter.head || iter.buf[j] != '\\' { + // even number of backslashes + // either end of buffer, or " found + return -1, false // do not end with \ + } + j-- + if j < iter.head || iter.buf[j] != '\\' { + // odd number of backslashes + // it is \" or \\\" + break + } + j-- + + } + return -1, true // end with \ +} diff --git a/vendor/github.com/json-iterator/go/iter_skip_strict.go b/vendor/github.com/json-iterator/go/iter_skip_strict.go new file mode 100644 index 000000000..6cf66d043 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_skip_strict.go @@ -0,0 +1,99 @@ +//+build !jsoniter_sloppy + +package jsoniter + +import ( + "fmt" + "io" +) + +func (iter *Iterator) skipNumber() { + if !iter.trySkipNumber() { + iter.unreadByte() + if iter.Error != nil && iter.Error != io.EOF { + return + } + iter.ReadFloat64() + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = nil + iter.ReadBigFloat() + } + } +} + +func (iter *Iterator) trySkipNumber() bool { + dotFound := false + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + case '.': + if dotFound { + iter.ReportError("validateNumber", `more than one dot found in number`) + return true // already failed + } + if i+1 == iter.tail { + return false + } + c = iter.buf[i+1] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + iter.ReportError("validateNumber", `missing digit after dot`) + return true // already failed + } + dotFound = true + default: + switch c { + case ',', ']', '}', ' ', '\t', '\n', '\r': + if iter.head == i { + return false // if - without following digits + } + iter.head = i + return true // must be valid + } + return false // may be invalid + } + } + return false +} + +func (iter *Iterator) skipString() { + if !iter.trySkipString() { + iter.unreadByte() + iter.ReadString() + } +} + +func (iter *Iterator) trySkipString() bool { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + iter.head = i + 1 + return true // valid + } else if c == '\\' { + return false + } else if c < ' ' { + iter.ReportError("trySkipString", + fmt.Sprintf(`invalid control character found: %d`, c)) + return true // already failed + } + } + return false +} + +func (iter *Iterator) skipObject() { + iter.unreadByte() + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + iter.Skip() + return true + }) +} + +func (iter *Iterator) skipArray() { + iter.unreadByte() + iter.ReadArrayCB(func(iter *Iterator) bool { + iter.Skip() + return true + }) +} diff --git a/vendor/github.com/json-iterator/go/iter_str.go b/vendor/github.com/json-iterator/go/iter_str.go new file mode 100644 index 000000000..adc487ea8 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_str.go @@ -0,0 +1,215 @@ +package jsoniter + +import ( + "fmt" + "unicode/utf16" +) + +// ReadString read string from iterator +func (iter *Iterator) ReadString() (ret string) { + c := iter.nextToken() + if c == '"' { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + ret = string(iter.buf[iter.head:i]) + iter.head = i + 1 + return ret + } else if c == '\\' { + break + } else if c < ' ' { + iter.ReportError("ReadString", + fmt.Sprintf(`invalid control character found: %d`, c)) + return + } + } + return iter.readStringSlowPath() + } else if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return "" + } + iter.ReportError("ReadString", `expects " or n, but found `+string([]byte{c})) + return +} + +func (iter *Iterator) readStringSlowPath() (ret string) { + var str []byte + var c byte + for iter.Error == nil { + c = iter.readByte() + if c == '"' { + return string(str) + } + if c == '\\' { + c = iter.readByte() + str = iter.readEscapedChar(c, str) + } else { + str = append(str, c) + } + } + iter.ReportError("readStringSlowPath", "unexpected end of input") + return +} + +func (iter *Iterator) readEscapedChar(c byte, str []byte) []byte { + switch c { + case 'u': + r := iter.readU4() + if utf16.IsSurrogate(r) { + c = iter.readByte() + if iter.Error != nil { + return nil + } + if c != '\\' { + iter.unreadByte() + str = appendRune(str, r) + return str + } + c = iter.readByte() + if iter.Error != nil { + return nil + } + if c != 'u' { + str = appendRune(str, r) + return iter.readEscapedChar(c, str) + } + r2 := iter.readU4() + if iter.Error != nil { + return nil + } + combined := utf16.DecodeRune(r, r2) + if combined == '\uFFFD' { + str = appendRune(str, r) + str = appendRune(str, r2) + } else { + str = appendRune(str, combined) + } + } else { + str = appendRune(str, r) + } + case '"': + str = append(str, '"') + case '\\': + str = append(str, '\\') + case '/': + str = append(str, '/') + case 'b': + str = append(str, '\b') + case 'f': + str = append(str, '\f') + case 'n': + str = append(str, '\n') + case 'r': + str = append(str, '\r') + case 't': + str = append(str, '\t') + default: + iter.ReportError("readEscapedChar", + `invalid escape char after \`) + return nil + } + return str +} + +// ReadStringAsSlice read string from iterator without copying into string form. +// The []byte can not be kept, as it will change after next iterator call. +func (iter *Iterator) ReadStringAsSlice() (ret []byte) { + c := iter.nextToken() + if c == '"' { + for i := iter.head; i < iter.tail; i++ { + // require ascii string and no escape + // for: field name, base64, number + if iter.buf[i] == '"' { + // fast path: reuse the underlying buffer + ret = iter.buf[iter.head:i] + iter.head = i + 1 + return ret + } + } + readLen := iter.tail - iter.head + copied := make([]byte, readLen, readLen*2) + copy(copied, iter.buf[iter.head:iter.tail]) + iter.head = iter.tail + for iter.Error == nil { + c := iter.readByte() + if c == '"' { + return copied + } + copied = append(copied, c) + } + return copied + } + iter.ReportError("ReadStringAsSlice", `expects " or n, but found `+string([]byte{c})) + return +} + +func (iter *Iterator) readU4() (ret rune) { + for i := 0; i < 4; i++ { + c := iter.readByte() + if iter.Error != nil { + return + } + if c >= '0' && c <= '9' { + ret = ret*16 + rune(c-'0') + } else if c >= 'a' && c <= 'f' { + ret = ret*16 + rune(c-'a'+10) + } else if c >= 'A' && c <= 'F' { + ret = ret*16 + rune(c-'A'+10) + } else { + iter.ReportError("readU4", "expects 0~9 or a~f, but found "+string([]byte{c})) + return + } + } + return ret +} + +const ( + t1 = 0x00 // 0000 0000 + tx = 0x80 // 1000 0000 + t2 = 0xC0 // 1100 0000 + t3 = 0xE0 // 1110 0000 + t4 = 0xF0 // 1111 0000 + t5 = 0xF8 // 1111 1000 + + maskx = 0x3F // 0011 1111 + mask2 = 0x1F // 0001 1111 + mask3 = 0x0F // 0000 1111 + mask4 = 0x07 // 0000 0111 + + rune1Max = 1<<7 - 1 + rune2Max = 1<<11 - 1 + rune3Max = 1<<16 - 1 + + surrogateMin = 0xD800 + surrogateMax = 0xDFFF + + maxRune = '\U0010FFFF' // Maximum valid Unicode code point. + runeError = '\uFFFD' // the "error" Rune or "Unicode replacement character" +) + +func appendRune(p []byte, r rune) []byte { + // Negative values are erroneous. Making it unsigned addresses the problem. + switch i := uint32(r); { + case i <= rune1Max: + p = append(p, byte(r)) + return p + case i <= rune2Max: + p = append(p, t2|byte(r>>6)) + p = append(p, tx|byte(r)&maskx) + return p + case i > maxRune, surrogateMin <= i && i <= surrogateMax: + r = runeError + fallthrough + case i <= rune3Max: + p = append(p, t3|byte(r>>12)) + p = append(p, tx|byte(r>>6)&maskx) + p = append(p, tx|byte(r)&maskx) + return p + default: + p = append(p, t4|byte(r>>18)) + p = append(p, tx|byte(r>>12)&maskx) + p = append(p, tx|byte(r>>6)&maskx) + p = append(p, tx|byte(r)&maskx) + return p + } +} diff --git a/vendor/github.com/json-iterator/go/jsoniter.go b/vendor/github.com/json-iterator/go/jsoniter.go new file mode 100644 index 000000000..c2934f916 --- /dev/null +++ b/vendor/github.com/json-iterator/go/jsoniter.go @@ -0,0 +1,18 @@ +// Package jsoniter implements encoding and decoding of JSON as defined in +// RFC 4627 and provides interfaces with identical syntax of standard lib encoding/json. +// Converting from encoding/json to jsoniter is no more than replacing the package with jsoniter +// and variable type declarations (if any). +// jsoniter interfaces gives 100% compatibility with code using standard lib. +// +// "JSON and Go" +// (https://golang.org/doc/articles/json_and_go.html) +// gives a description of how Marshal/Unmarshal operate +// between arbitrary or predefined json objects and bytes, +// and it applies to jsoniter.Marshal/Unmarshal as well. +// +// Besides, jsoniter.Iterator provides a different set of interfaces +// iterating given bytes/string/reader +// and yielding parsed elements one by one. +// This set of interfaces reads input as required and gives +// better performance. +package jsoniter diff --git a/vendor/github.com/json-iterator/go/pool.go b/vendor/github.com/json-iterator/go/pool.go new file mode 100644 index 000000000..e2389b56c --- /dev/null +++ b/vendor/github.com/json-iterator/go/pool.go @@ -0,0 +1,42 @@ +package jsoniter + +import ( + "io" +) + +// IteratorPool a thread safe pool of iterators with same configuration +type IteratorPool interface { + BorrowIterator(data []byte) *Iterator + ReturnIterator(iter *Iterator) +} + +// StreamPool a thread safe pool of streams with same configuration +type StreamPool interface { + BorrowStream(writer io.Writer) *Stream + ReturnStream(stream *Stream) +} + +func (cfg *frozenConfig) BorrowStream(writer io.Writer) *Stream { + stream := cfg.streamPool.Get().(*Stream) + stream.Reset(writer) + return stream +} + +func (cfg *frozenConfig) ReturnStream(stream *Stream) { + stream.out = nil + stream.Error = nil + stream.Attachment = nil + cfg.streamPool.Put(stream) +} + +func (cfg *frozenConfig) BorrowIterator(data []byte) *Iterator { + iter := cfg.iteratorPool.Get().(*Iterator) + iter.ResetBytes(data) + return iter +} + +func (cfg *frozenConfig) ReturnIterator(iter *Iterator) { + iter.Error = nil + iter.Attachment = nil + cfg.iteratorPool.Put(iter) +} diff --git a/vendor/github.com/json-iterator/go/reflect.go b/vendor/github.com/json-iterator/go/reflect.go new file mode 100644 index 000000000..39acb320a --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect.go @@ -0,0 +1,337 @@ +package jsoniter + +import ( + "fmt" + "reflect" + "unsafe" + + "github.com/modern-go/reflect2" +) + +// ValDecoder is an internal type registered to cache as needed. +// Don't confuse jsoniter.ValDecoder with json.Decoder. +// For json.Decoder's adapter, refer to jsoniter.AdapterDecoder(todo link). +// +// Reflection on type to create decoders, which is then cached +// Reflection on value is avoided as we can, as the reflect.Value itself will allocate, with following exceptions +// 1. create instance of new value, for example *int will need a int to be allocated +// 2. append to slice, if the existing cap is not enough, allocate will be done using Reflect.New +// 3. assignment to map, both key and value will be reflect.Value +// For a simple struct binding, it will be reflect.Value free and allocation free +type ValDecoder interface { + Decode(ptr unsafe.Pointer, iter *Iterator) +} + +// ValEncoder is an internal type registered to cache as needed. +// Don't confuse jsoniter.ValEncoder with json.Encoder. +// For json.Encoder's adapter, refer to jsoniter.AdapterEncoder(todo godoc link). +type ValEncoder interface { + IsEmpty(ptr unsafe.Pointer) bool + Encode(ptr unsafe.Pointer, stream *Stream) +} + +type checkIsEmpty interface { + IsEmpty(ptr unsafe.Pointer) bool +} + +type ctx struct { + *frozenConfig + prefix string + encoders map[reflect2.Type]ValEncoder + decoders map[reflect2.Type]ValDecoder +} + +func (b *ctx) caseSensitive() bool { + if b.frozenConfig == nil { + // default is case-insensitive + return false + } + return b.frozenConfig.caseSensitive +} + +func (b *ctx) append(prefix string) *ctx { + return &ctx{ + frozenConfig: b.frozenConfig, + prefix: b.prefix + " " + prefix, + encoders: b.encoders, + decoders: b.decoders, + } +} + +// ReadVal copy the underlying JSON into go interface, same as json.Unmarshal +func (iter *Iterator) ReadVal(obj interface{}) { + depth := iter.depth + cacheKey := reflect2.RTypeOf(obj) + decoder := iter.cfg.getDecoderFromCache(cacheKey) + if decoder == nil { + typ := reflect2.TypeOf(obj) + if typ == nil || typ.Kind() != reflect.Ptr { + iter.ReportError("ReadVal", "can only unmarshal into pointer") + return + } + decoder = iter.cfg.DecoderOf(typ) + } + ptr := reflect2.PtrOf(obj) + if ptr == nil { + iter.ReportError("ReadVal", "can not read into nil pointer") + return + } + decoder.Decode(ptr, iter) + if iter.depth != depth { + iter.ReportError("ReadVal", "unexpected mismatched nesting") + return + } +} + +// WriteVal copy the go interface into underlying JSON, same as json.Marshal +func (stream *Stream) WriteVal(val interface{}) { + if nil == val { + stream.WriteNil() + return + } + cacheKey := reflect2.RTypeOf(val) + encoder := stream.cfg.getEncoderFromCache(cacheKey) + if encoder == nil { + typ := reflect2.TypeOf(val) + encoder = stream.cfg.EncoderOf(typ) + } + encoder.Encode(reflect2.PtrOf(val), stream) +} + +func (cfg *frozenConfig) DecoderOf(typ reflect2.Type) ValDecoder { + cacheKey := typ.RType() + decoder := cfg.getDecoderFromCache(cacheKey) + if decoder != nil { + return decoder + } + ctx := &ctx{ + frozenConfig: cfg, + prefix: "", + decoders: map[reflect2.Type]ValDecoder{}, + encoders: map[reflect2.Type]ValEncoder{}, + } + ptrType := typ.(*reflect2.UnsafePtrType) + decoder = decoderOfType(ctx, ptrType.Elem()) + cfg.addDecoderToCache(cacheKey, decoder) + return decoder +} + +func decoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := getTypeDecoderFromExtension(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfType(ctx, typ) + for _, extension := range extensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder) + for _, extension := range ctx.extraExtensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + return decoder +} + +func createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := ctx.decoders[typ] + if decoder != nil { + return decoder + } + placeholder := &placeholderDecoder{} + ctx.decoders[typ] = placeholder + decoder = _createDecoderOfType(ctx, typ) + placeholder.decoder = decoder + return decoder +} + +func _createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := createDecoderOfJsonRawMessage(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfJsonNumber(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfMarshaler(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfAny(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfNative(ctx, typ) + if decoder != nil { + return decoder + } + switch typ.Kind() { + case reflect.Interface: + ifaceType, isIFace := typ.(*reflect2.UnsafeIFaceType) + if isIFace { + return &ifaceDecoder{valType: ifaceType} + } + return &efaceDecoder{} + case reflect.Struct: + return decoderOfStruct(ctx, typ) + case reflect.Array: + return decoderOfArray(ctx, typ) + case reflect.Slice: + return decoderOfSlice(ctx, typ) + case reflect.Map: + return decoderOfMap(ctx, typ) + case reflect.Ptr: + return decoderOfOptional(ctx, typ) + default: + return &lazyErrorDecoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())} + } +} + +func (cfg *frozenConfig) EncoderOf(typ reflect2.Type) ValEncoder { + cacheKey := typ.RType() + encoder := cfg.getEncoderFromCache(cacheKey) + if encoder != nil { + return encoder + } + ctx := &ctx{ + frozenConfig: cfg, + prefix: "", + decoders: map[reflect2.Type]ValDecoder{}, + encoders: map[reflect2.Type]ValEncoder{}, + } + encoder = encoderOfType(ctx, typ) + if typ.LikePtr() { + encoder = &onePtrEncoder{encoder} + } + cfg.addEncoderToCache(cacheKey, encoder) + return encoder +} + +type onePtrEncoder struct { + encoder ValEncoder +} + +func (encoder *onePtrEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr)) +} + +func (encoder *onePtrEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.encoder.Encode(unsafe.Pointer(&ptr), stream) +} + +func encoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := getTypeEncoderFromExtension(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfType(ctx, typ) + for _, extension := range extensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder) + for _, extension := range ctx.extraExtensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + return encoder +} + +func createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := ctx.encoders[typ] + if encoder != nil { + return encoder + } + placeholder := &placeholderEncoder{} + ctx.encoders[typ] = placeholder + encoder = _createEncoderOfType(ctx, typ) + placeholder.encoder = encoder + return encoder +} +func _createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := createEncoderOfJsonRawMessage(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfJsonNumber(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfMarshaler(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfAny(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfNative(ctx, typ) + if encoder != nil { + return encoder + } + kind := typ.Kind() + switch kind { + case reflect.Interface: + return &dynamicEncoder{typ} + case reflect.Struct: + return encoderOfStruct(ctx, typ) + case reflect.Array: + return encoderOfArray(ctx, typ) + case reflect.Slice: + return encoderOfSlice(ctx, typ) + case reflect.Map: + return encoderOfMap(ctx, typ) + case reflect.Ptr: + return encoderOfOptional(ctx, typ) + default: + return &lazyErrorEncoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())} + } +} + +type lazyErrorDecoder struct { + err error +} + +func (decoder *lazyErrorDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.WhatIsNext() != NilValue { + if iter.Error == nil { + iter.Error = decoder.err + } + } else { + iter.Skip() + } +} + +type lazyErrorEncoder struct { + err error +} + +func (encoder *lazyErrorEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if ptr == nil { + stream.WriteNil() + } else if stream.Error == nil { + stream.Error = encoder.err + } +} + +func (encoder *lazyErrorEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type placeholderDecoder struct { + decoder ValDecoder +} + +func (decoder *placeholderDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.decoder.Decode(ptr, iter) +} + +type placeholderEncoder struct { + encoder ValEncoder +} + +func (encoder *placeholderEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.encoder.Encode(ptr, stream) +} + +func (encoder *placeholderEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.encoder.IsEmpty(ptr) +} diff --git a/vendor/github.com/json-iterator/go/reflect_array.go b/vendor/github.com/json-iterator/go/reflect_array.go new file mode 100644 index 000000000..13a0b7b08 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_array.go @@ -0,0 +1,104 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "unsafe" +) + +func decoderOfArray(ctx *ctx, typ reflect2.Type) ValDecoder { + arrayType := typ.(*reflect2.UnsafeArrayType) + decoder := decoderOfType(ctx.append("[arrayElem]"), arrayType.Elem()) + return &arrayDecoder{arrayType, decoder} +} + +func encoderOfArray(ctx *ctx, typ reflect2.Type) ValEncoder { + arrayType := typ.(*reflect2.UnsafeArrayType) + if arrayType.Len() == 0 { + return emptyArrayEncoder{} + } + encoder := encoderOfType(ctx.append("[arrayElem]"), arrayType.Elem()) + return &arrayEncoder{arrayType, encoder} +} + +type emptyArrayEncoder struct{} + +func (encoder emptyArrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteEmptyArray() +} + +func (encoder emptyArrayEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return true +} + +type arrayEncoder struct { + arrayType *reflect2.UnsafeArrayType + elemEncoder ValEncoder +} + +func (encoder *arrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteArrayStart() + elemPtr := unsafe.Pointer(ptr) + encoder.elemEncoder.Encode(elemPtr, stream) + for i := 1; i < encoder.arrayType.Len(); i++ { + stream.WriteMore() + elemPtr = encoder.arrayType.UnsafeGetIndex(ptr, i) + encoder.elemEncoder.Encode(elemPtr, stream) + } + stream.WriteArrayEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v: %s", encoder.arrayType, stream.Error.Error()) + } +} + +func (encoder *arrayEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type arrayDecoder struct { + arrayType *reflect2.UnsafeArrayType + elemDecoder ValDecoder +} + +func (decoder *arrayDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.doDecode(ptr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.arrayType, iter.Error.Error()) + } +} + +func (decoder *arrayDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + arrayType := decoder.arrayType + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return + } + if c != '[' { + iter.ReportError("decode array", "expect [ or n, but found "+string([]byte{c})) + return + } + c = iter.nextToken() + if c == ']' { + return + } + iter.unreadByte() + elemPtr := arrayType.UnsafeGetIndex(ptr, 0) + decoder.elemDecoder.Decode(elemPtr, iter) + length := 1 + for c = iter.nextToken(); c == ','; c = iter.nextToken() { + if length >= arrayType.Len() { + iter.Skip() + continue + } + idx := length + length += 1 + elemPtr = arrayType.UnsafeGetIndex(ptr, idx) + decoder.elemDecoder.Decode(elemPtr, iter) + } + if c != ']' { + iter.ReportError("decode array", "expect ], but found "+string([]byte{c})) + return + } +} diff --git a/vendor/github.com/json-iterator/go/reflect_dynamic.go b/vendor/github.com/json-iterator/go/reflect_dynamic.go new file mode 100644 index 000000000..8b6bc8b43 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_dynamic.go @@ -0,0 +1,70 @@ +package jsoniter + +import ( + "github.com/modern-go/reflect2" + "reflect" + "unsafe" +) + +type dynamicEncoder struct { + valType reflect2.Type +} + +func (encoder *dynamicEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + stream.WriteVal(obj) +} + +func (encoder *dynamicEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.valType.UnsafeIndirect(ptr) == nil +} + +type efaceDecoder struct { +} + +func (decoder *efaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + pObj := (*interface{})(ptr) + obj := *pObj + if obj == nil { + *pObj = iter.Read() + return + } + typ := reflect2.TypeOf(obj) + if typ.Kind() != reflect.Ptr { + *pObj = iter.Read() + return + } + ptrType := typ.(*reflect2.UnsafePtrType) + ptrElemType := ptrType.Elem() + if iter.WhatIsNext() == NilValue { + if ptrElemType.Kind() != reflect.Ptr { + iter.skipFourBytes('n', 'u', 'l', 'l') + *pObj = nil + return + } + } + if reflect2.IsNil(obj) { + obj := ptrElemType.New() + iter.ReadVal(obj) + *pObj = obj + return + } + iter.ReadVal(obj) +} + +type ifaceDecoder struct { + valType *reflect2.UnsafeIFaceType +} + +func (decoder *ifaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + decoder.valType.UnsafeSet(ptr, decoder.valType.UnsafeNew()) + return + } + obj := decoder.valType.UnsafeIndirect(ptr) + if reflect2.IsNil(obj) { + iter.ReportError("decode non empty interface", "can not unmarshal into nil") + return + } + iter.ReadVal(obj) +} diff --git a/vendor/github.com/json-iterator/go/reflect_extension.go b/vendor/github.com/json-iterator/go/reflect_extension.go new file mode 100644 index 000000000..74a97bfe5 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_extension.go @@ -0,0 +1,483 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "reflect" + "sort" + "strings" + "unicode" + "unsafe" +) + +var typeDecoders = map[string]ValDecoder{} +var fieldDecoders = map[string]ValDecoder{} +var typeEncoders = map[string]ValEncoder{} +var fieldEncoders = map[string]ValEncoder{} +var extensions = []Extension{} + +// StructDescriptor describe how should we encode/decode the struct +type StructDescriptor struct { + Type reflect2.Type + Fields []*Binding +} + +// GetField get one field from the descriptor by its name. +// Can not use map here to keep field orders. +func (structDescriptor *StructDescriptor) GetField(fieldName string) *Binding { + for _, binding := range structDescriptor.Fields { + if binding.Field.Name() == fieldName { + return binding + } + } + return nil +} + +// Binding describe how should we encode/decode the struct field +type Binding struct { + levels []int + Field reflect2.StructField + FromNames []string + ToNames []string + Encoder ValEncoder + Decoder ValDecoder +} + +// Extension the one for all SPI. Customize encoding/decoding by specifying alternate encoder/decoder. +// Can also rename fields by UpdateStructDescriptor. +type Extension interface { + UpdateStructDescriptor(structDescriptor *StructDescriptor) + CreateMapKeyDecoder(typ reflect2.Type) ValDecoder + CreateMapKeyEncoder(typ reflect2.Type) ValEncoder + CreateDecoder(typ reflect2.Type) ValDecoder + CreateEncoder(typ reflect2.Type) ValEncoder + DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder + DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder +} + +// DummyExtension embed this type get dummy implementation for all methods of Extension +type DummyExtension struct { +} + +// UpdateStructDescriptor No-op +func (extension *DummyExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { +} + +// CreateMapKeyDecoder No-op +func (extension *DummyExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateMapKeyEncoder No-op +func (extension *DummyExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// CreateDecoder No-op +func (extension *DummyExtension) CreateDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateEncoder No-op +func (extension *DummyExtension) CreateEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// DecorateDecoder No-op +func (extension *DummyExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { + return decoder +} + +// DecorateEncoder No-op +func (extension *DummyExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { + return encoder +} + +type EncoderExtension map[reflect2.Type]ValEncoder + +// UpdateStructDescriptor No-op +func (extension EncoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { +} + +// CreateDecoder No-op +func (extension EncoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateEncoder get encoder from map +func (extension EncoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder { + return extension[typ] +} + +// CreateMapKeyDecoder No-op +func (extension EncoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateMapKeyEncoder No-op +func (extension EncoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// DecorateDecoder No-op +func (extension EncoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { + return decoder +} + +// DecorateEncoder No-op +func (extension EncoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { + return encoder +} + +type DecoderExtension map[reflect2.Type]ValDecoder + +// UpdateStructDescriptor No-op +func (extension DecoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { +} + +// CreateMapKeyDecoder No-op +func (extension DecoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateMapKeyEncoder No-op +func (extension DecoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// CreateDecoder get decoder from map +func (extension DecoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder { + return extension[typ] +} + +// CreateEncoder No-op +func (extension DecoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// DecorateDecoder No-op +func (extension DecoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { + return decoder +} + +// DecorateEncoder No-op +func (extension DecoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { + return encoder +} + +type funcDecoder struct { + fun DecoderFunc +} + +func (decoder *funcDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.fun(ptr, iter) +} + +type funcEncoder struct { + fun EncoderFunc + isEmptyFunc func(ptr unsafe.Pointer) bool +} + +func (encoder *funcEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.fun(ptr, stream) +} + +func (encoder *funcEncoder) IsEmpty(ptr unsafe.Pointer) bool { + if encoder.isEmptyFunc == nil { + return false + } + return encoder.isEmptyFunc(ptr) +} + +// DecoderFunc the function form of TypeDecoder +type DecoderFunc func(ptr unsafe.Pointer, iter *Iterator) + +// EncoderFunc the function form of TypeEncoder +type EncoderFunc func(ptr unsafe.Pointer, stream *Stream) + +// RegisterTypeDecoderFunc register TypeDecoder for a type with function +func RegisterTypeDecoderFunc(typ string, fun DecoderFunc) { + typeDecoders[typ] = &funcDecoder{fun} +} + +// RegisterTypeDecoder register TypeDecoder for a typ +func RegisterTypeDecoder(typ string, decoder ValDecoder) { + typeDecoders[typ] = decoder +} + +// RegisterFieldDecoderFunc register TypeDecoder for a struct field with function +func RegisterFieldDecoderFunc(typ string, field string, fun DecoderFunc) { + RegisterFieldDecoder(typ, field, &funcDecoder{fun}) +} + +// RegisterFieldDecoder register TypeDecoder for a struct field +func RegisterFieldDecoder(typ string, field string, decoder ValDecoder) { + fieldDecoders[fmt.Sprintf("%s/%s", typ, field)] = decoder +} + +// RegisterTypeEncoderFunc register TypeEncoder for a type with encode/isEmpty function +func RegisterTypeEncoderFunc(typ string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) { + typeEncoders[typ] = &funcEncoder{fun, isEmptyFunc} +} + +// RegisterTypeEncoder register TypeEncoder for a type +func RegisterTypeEncoder(typ string, encoder ValEncoder) { + typeEncoders[typ] = encoder +} + +// RegisterFieldEncoderFunc register TypeEncoder for a struct field with encode/isEmpty function +func RegisterFieldEncoderFunc(typ string, field string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) { + RegisterFieldEncoder(typ, field, &funcEncoder{fun, isEmptyFunc}) +} + +// RegisterFieldEncoder register TypeEncoder for a struct field +func RegisterFieldEncoder(typ string, field string, encoder ValEncoder) { + fieldEncoders[fmt.Sprintf("%s/%s", typ, field)] = encoder +} + +// RegisterExtension register extension +func RegisterExtension(extension Extension) { + extensions = append(extensions, extension) +} + +func getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := _getTypeDecoderFromExtension(ctx, typ) + if decoder != nil { + for _, extension := range extensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder) + for _, extension := range ctx.extraExtensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + } + return decoder +} +func _getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder { + for _, extension := range extensions { + decoder := extension.CreateDecoder(typ) + if decoder != nil { + return decoder + } + } + decoder := ctx.decoderExtension.CreateDecoder(typ) + if decoder != nil { + return decoder + } + for _, extension := range ctx.extraExtensions { + decoder := extension.CreateDecoder(typ) + if decoder != nil { + return decoder + } + } + typeName := typ.String() + decoder = typeDecoders[typeName] + if decoder != nil { + return decoder + } + if typ.Kind() == reflect.Ptr { + ptrType := typ.(*reflect2.UnsafePtrType) + decoder := typeDecoders[ptrType.Elem().String()] + if decoder != nil { + return &OptionalDecoder{ptrType.Elem(), decoder} + } + } + return nil +} + +func getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := _getTypeEncoderFromExtension(ctx, typ) + if encoder != nil { + for _, extension := range extensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder) + for _, extension := range ctx.extraExtensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + } + return encoder +} + +func _getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder { + for _, extension := range extensions { + encoder := extension.CreateEncoder(typ) + if encoder != nil { + return encoder + } + } + encoder := ctx.encoderExtension.CreateEncoder(typ) + if encoder != nil { + return encoder + } + for _, extension := range ctx.extraExtensions { + encoder := extension.CreateEncoder(typ) + if encoder != nil { + return encoder + } + } + typeName := typ.String() + encoder = typeEncoders[typeName] + if encoder != nil { + return encoder + } + if typ.Kind() == reflect.Ptr { + typePtr := typ.(*reflect2.UnsafePtrType) + encoder := typeEncoders[typePtr.Elem().String()] + if encoder != nil { + return &OptionalEncoder{encoder} + } + } + return nil +} + +func describeStruct(ctx *ctx, typ reflect2.Type) *StructDescriptor { + structType := typ.(*reflect2.UnsafeStructType) + embeddedBindings := []*Binding{} + bindings := []*Binding{} + for i := 0; i < structType.NumField(); i++ { + field := structType.Field(i) + tag, hastag := field.Tag().Lookup(ctx.getTagKey()) + if ctx.onlyTaggedField && !hastag && !field.Anonymous() { + continue + } + if tag == "-" || field.Name() == "_" { + continue + } + tagParts := strings.Split(tag, ",") + if field.Anonymous() && (tag == "" || tagParts[0] == "") { + if field.Type().Kind() == reflect.Struct { + structDescriptor := describeStruct(ctx, field.Type()) + for _, binding := range structDescriptor.Fields { + binding.levels = append([]int{i}, binding.levels...) + omitempty := binding.Encoder.(*structFieldEncoder).omitempty + binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty} + binding.Decoder = &structFieldDecoder{field, binding.Decoder} + embeddedBindings = append(embeddedBindings, binding) + } + continue + } else if field.Type().Kind() == reflect.Ptr { + ptrType := field.Type().(*reflect2.UnsafePtrType) + if ptrType.Elem().Kind() == reflect.Struct { + structDescriptor := describeStruct(ctx, ptrType.Elem()) + for _, binding := range structDescriptor.Fields { + binding.levels = append([]int{i}, binding.levels...) + omitempty := binding.Encoder.(*structFieldEncoder).omitempty + binding.Encoder = &dereferenceEncoder{binding.Encoder} + binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty} + binding.Decoder = &dereferenceDecoder{ptrType.Elem(), binding.Decoder} + binding.Decoder = &structFieldDecoder{field, binding.Decoder} + embeddedBindings = append(embeddedBindings, binding) + } + continue + } + } + } + fieldNames := calcFieldNames(field.Name(), tagParts[0], tag) + fieldCacheKey := fmt.Sprintf("%s/%s", typ.String(), field.Name()) + decoder := fieldDecoders[fieldCacheKey] + if decoder == nil { + decoder = decoderOfType(ctx.append(field.Name()), field.Type()) + } + encoder := fieldEncoders[fieldCacheKey] + if encoder == nil { + encoder = encoderOfType(ctx.append(field.Name()), field.Type()) + } + binding := &Binding{ + Field: field, + FromNames: fieldNames, + ToNames: fieldNames, + Decoder: decoder, + Encoder: encoder, + } + binding.levels = []int{i} + bindings = append(bindings, binding) + } + return createStructDescriptor(ctx, typ, bindings, embeddedBindings) +} +func createStructDescriptor(ctx *ctx, typ reflect2.Type, bindings []*Binding, embeddedBindings []*Binding) *StructDescriptor { + structDescriptor := &StructDescriptor{ + Type: typ, + Fields: bindings, + } + for _, extension := range extensions { + extension.UpdateStructDescriptor(structDescriptor) + } + ctx.encoderExtension.UpdateStructDescriptor(structDescriptor) + ctx.decoderExtension.UpdateStructDescriptor(structDescriptor) + for _, extension := range ctx.extraExtensions { + extension.UpdateStructDescriptor(structDescriptor) + } + processTags(structDescriptor, ctx.frozenConfig) + // merge normal & embedded bindings & sort with original order + allBindings := sortableBindings(append(embeddedBindings, structDescriptor.Fields...)) + sort.Sort(allBindings) + structDescriptor.Fields = allBindings + return structDescriptor +} + +type sortableBindings []*Binding + +func (bindings sortableBindings) Len() int { + return len(bindings) +} + +func (bindings sortableBindings) Less(i, j int) bool { + left := bindings[i].levels + right := bindings[j].levels + k := 0 + for { + if left[k] < right[k] { + return true + } else if left[k] > right[k] { + return false + } + k++ + } +} + +func (bindings sortableBindings) Swap(i, j int) { + bindings[i], bindings[j] = bindings[j], bindings[i] +} + +func processTags(structDescriptor *StructDescriptor, cfg *frozenConfig) { + for _, binding := range structDescriptor.Fields { + shouldOmitEmpty := false + tagParts := strings.Split(binding.Field.Tag().Get(cfg.getTagKey()), ",") + for _, tagPart := range tagParts[1:] { + if tagPart == "omitempty" { + shouldOmitEmpty = true + } else if tagPart == "string" { + if binding.Field.Type().Kind() == reflect.String { + binding.Decoder = &stringModeStringDecoder{binding.Decoder, cfg} + binding.Encoder = &stringModeStringEncoder{binding.Encoder, cfg} + } else { + binding.Decoder = &stringModeNumberDecoder{binding.Decoder} + binding.Encoder = &stringModeNumberEncoder{binding.Encoder} + } + } + } + binding.Decoder = &structFieldDecoder{binding.Field, binding.Decoder} + binding.Encoder = &structFieldEncoder{binding.Field, binding.Encoder, shouldOmitEmpty} + } +} + +func calcFieldNames(originalFieldName string, tagProvidedFieldName string, wholeTag string) []string { + // ignore? + if wholeTag == "-" { + return []string{} + } + // rename? + var fieldNames []string + if tagProvidedFieldName == "" { + fieldNames = []string{originalFieldName} + } else { + fieldNames = []string{tagProvidedFieldName} + } + // private? + isNotExported := unicode.IsLower(rune(originalFieldName[0])) || originalFieldName[0] == '_' + if isNotExported { + fieldNames = []string{} + } + return fieldNames +} diff --git a/vendor/github.com/json-iterator/go/reflect_json_number.go b/vendor/github.com/json-iterator/go/reflect_json_number.go new file mode 100644 index 000000000..98d45c1ec --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_json_number.go @@ -0,0 +1,112 @@ +package jsoniter + +import ( + "encoding/json" + "github.com/modern-go/reflect2" + "strconv" + "unsafe" +) + +type Number string + +// String returns the literal text of the number. +func (n Number) String() string { return string(n) } + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} + +func CastJsonNumber(val interface{}) (string, bool) { + switch typedVal := val.(type) { + case json.Number: + return string(typedVal), true + case Number: + return string(typedVal), true + } + return "", false +} + +var jsonNumberType = reflect2.TypeOfPtr((*json.Number)(nil)).Elem() +var jsoniterNumberType = reflect2.TypeOfPtr((*Number)(nil)).Elem() + +func createDecoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ.AssignableTo(jsonNumberType) { + return &jsonNumberCodec{} + } + if typ.AssignableTo(jsoniterNumberType) { + return &jsoniterNumberCodec{} + } + return nil +} + +func createEncoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ.AssignableTo(jsonNumberType) { + return &jsonNumberCodec{} + } + if typ.AssignableTo(jsoniterNumberType) { + return &jsoniterNumberCodec{} + } + return nil +} + +type jsonNumberCodec struct { +} + +func (codec *jsonNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + switch iter.WhatIsNext() { + case StringValue: + *((*json.Number)(ptr)) = json.Number(iter.ReadString()) + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + *((*json.Number)(ptr)) = "" + default: + *((*json.Number)(ptr)) = json.Number([]byte(iter.readNumberAsString())) + } +} + +func (codec *jsonNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + number := *((*json.Number)(ptr)) + if len(number) == 0 { + stream.writeByte('0') + } else { + stream.WriteRaw(string(number)) + } +} + +func (codec *jsonNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*json.Number)(ptr))) == 0 +} + +type jsoniterNumberCodec struct { +} + +func (codec *jsoniterNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + switch iter.WhatIsNext() { + case StringValue: + *((*Number)(ptr)) = Number(iter.ReadString()) + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + *((*Number)(ptr)) = "" + default: + *((*Number)(ptr)) = Number([]byte(iter.readNumberAsString())) + } +} + +func (codec *jsoniterNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + number := *((*Number)(ptr)) + if len(number) == 0 { + stream.writeByte('0') + } else { + stream.WriteRaw(string(number)) + } +} + +func (codec *jsoniterNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*Number)(ptr))) == 0 +} diff --git a/vendor/github.com/json-iterator/go/reflect_json_raw_message.go b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go new file mode 100644 index 000000000..eba434f2f --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go @@ -0,0 +1,76 @@ +package jsoniter + +import ( + "encoding/json" + "github.com/modern-go/reflect2" + "unsafe" +) + +var jsonRawMessageType = reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem() +var jsoniterRawMessageType = reflect2.TypeOfPtr((*RawMessage)(nil)).Elem() + +func createEncoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ == jsonRawMessageType { + return &jsonRawMessageCodec{} + } + if typ == jsoniterRawMessageType { + return &jsoniterRawMessageCodec{} + } + return nil +} + +func createDecoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ == jsonRawMessageType { + return &jsonRawMessageCodec{} + } + if typ == jsoniterRawMessageType { + return &jsoniterRawMessageCodec{} + } + return nil +} + +type jsonRawMessageCodec struct { +} + +func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + *((*json.RawMessage)(ptr)) = nil + } else { + *((*json.RawMessage)(ptr)) = iter.SkipAndReturnBytes() + } +} + +func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + if *((*json.RawMessage)(ptr)) == nil { + stream.WriteNil() + } else { + stream.WriteRaw(string(*((*json.RawMessage)(ptr)))) + } +} + +func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*json.RawMessage)(ptr))) == 0 +} + +type jsoniterRawMessageCodec struct { +} + +func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + *((*RawMessage)(ptr)) = nil + } else { + *((*RawMessage)(ptr)) = iter.SkipAndReturnBytes() + } +} + +func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + if *((*RawMessage)(ptr)) == nil { + stream.WriteNil() + } else { + stream.WriteRaw(string(*((*RawMessage)(ptr)))) + } +} + +func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*RawMessage)(ptr))) == 0 +} diff --git a/vendor/github.com/json-iterator/go/reflect_map.go b/vendor/github.com/json-iterator/go/reflect_map.go new file mode 100644 index 000000000..582967130 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_map.go @@ -0,0 +1,346 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "reflect" + "sort" + "unsafe" +) + +func decoderOfMap(ctx *ctx, typ reflect2.Type) ValDecoder { + mapType := typ.(*reflect2.UnsafeMapType) + keyDecoder := decoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()) + elemDecoder := decoderOfType(ctx.append("[mapElem]"), mapType.Elem()) + return &mapDecoder{ + mapType: mapType, + keyType: mapType.Key(), + elemType: mapType.Elem(), + keyDecoder: keyDecoder, + elemDecoder: elemDecoder, + } +} + +func encoderOfMap(ctx *ctx, typ reflect2.Type) ValEncoder { + mapType := typ.(*reflect2.UnsafeMapType) + if ctx.sortMapKeys { + return &sortKeysMapEncoder{ + mapType: mapType, + keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()), + elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()), + } + } + return &mapEncoder{ + mapType: mapType, + keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()), + elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()), + } +} + +func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := ctx.decoderExtension.CreateMapKeyDecoder(typ) + if decoder != nil { + return decoder + } + for _, extension := range ctx.extraExtensions { + decoder := extension.CreateMapKeyDecoder(typ) + if decoder != nil { + return decoder + } + } + + ptrType := reflect2.PtrTo(typ) + if ptrType.Implements(unmarshalerType) { + return &referenceDecoder{ + &unmarshalerDecoder{ + valType: ptrType, + }, + } + } + if typ.Implements(unmarshalerType) { + return &unmarshalerDecoder{ + valType: typ, + } + } + if ptrType.Implements(textUnmarshalerType) { + return &referenceDecoder{ + &textUnmarshalerDecoder{ + valType: ptrType, + }, + } + } + if typ.Implements(textUnmarshalerType) { + return &textUnmarshalerDecoder{ + valType: typ, + } + } + + switch typ.Kind() { + case reflect.String: + return decoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String)) + case reflect.Bool, + reflect.Uint8, reflect.Int8, + reflect.Uint16, reflect.Int16, + reflect.Uint32, reflect.Int32, + reflect.Uint64, reflect.Int64, + reflect.Uint, reflect.Int, + reflect.Float32, reflect.Float64, + reflect.Uintptr: + typ = reflect2.DefaultTypeOfKind(typ.Kind()) + return &numericMapKeyDecoder{decoderOfType(ctx, typ)} + default: + return &lazyErrorDecoder{err: fmt.Errorf("unsupported map key type: %v", typ)} + } +} + +func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := ctx.encoderExtension.CreateMapKeyEncoder(typ) + if encoder != nil { + return encoder + } + for _, extension := range ctx.extraExtensions { + encoder := extension.CreateMapKeyEncoder(typ) + if encoder != nil { + return encoder + } + } + + if typ == textMarshalerType { + return &directTextMarshalerEncoder{ + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + } + if typ.Implements(textMarshalerType) { + return &textMarshalerEncoder{ + valType: typ, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + } + + switch typ.Kind() { + case reflect.String: + return encoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String)) + case reflect.Bool, + reflect.Uint8, reflect.Int8, + reflect.Uint16, reflect.Int16, + reflect.Uint32, reflect.Int32, + reflect.Uint64, reflect.Int64, + reflect.Uint, reflect.Int, + reflect.Float32, reflect.Float64, + reflect.Uintptr: + typ = reflect2.DefaultTypeOfKind(typ.Kind()) + return &numericMapKeyEncoder{encoderOfType(ctx, typ)} + default: + if typ.Kind() == reflect.Interface { + return &dynamicMapKeyEncoder{ctx, typ} + } + return &lazyErrorEncoder{err: fmt.Errorf("unsupported map key type: %v", typ)} + } +} + +type mapDecoder struct { + mapType *reflect2.UnsafeMapType + keyType reflect2.Type + elemType reflect2.Type + keyDecoder ValDecoder + elemDecoder ValDecoder +} + +func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + mapType := decoder.mapType + c := iter.nextToken() + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + *(*unsafe.Pointer)(ptr) = nil + mapType.UnsafeSet(ptr, mapType.UnsafeNew()) + return + } + if mapType.UnsafeIsNil(ptr) { + mapType.UnsafeSet(ptr, mapType.UnsafeMakeMap(0)) + } + if c != '{' { + iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c})) + return + } + c = iter.nextToken() + if c == '}' { + return + } + iter.unreadByte() + key := decoder.keyType.UnsafeNew() + decoder.keyDecoder.Decode(key, iter) + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + return + } + elem := decoder.elemType.UnsafeNew() + decoder.elemDecoder.Decode(elem, iter) + decoder.mapType.UnsafeSetIndex(ptr, key, elem) + for c = iter.nextToken(); c == ','; c = iter.nextToken() { + key := decoder.keyType.UnsafeNew() + decoder.keyDecoder.Decode(key, iter) + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + return + } + elem := decoder.elemType.UnsafeNew() + decoder.elemDecoder.Decode(elem, iter) + decoder.mapType.UnsafeSetIndex(ptr, key, elem) + } + if c != '}' { + iter.ReportError("ReadMapCB", `expect }, but found `+string([]byte{c})) + } +} + +type numericMapKeyDecoder struct { + decoder ValDecoder +} + +func (decoder *numericMapKeyDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + if c != '"' { + iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c})) + return + } + decoder.decoder.Decode(ptr, iter) + c = iter.nextToken() + if c != '"' { + iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c})) + return + } +} + +type numericMapKeyEncoder struct { + encoder ValEncoder +} + +func (encoder *numericMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.writeByte('"') + encoder.encoder.Encode(ptr, stream) + stream.writeByte('"') +} + +func (encoder *numericMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type dynamicMapKeyEncoder struct { + ctx *ctx + valType reflect2.Type +} + +func (encoder *dynamicMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).Encode(reflect2.PtrOf(obj), stream) +} + +func (encoder *dynamicMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool { + obj := encoder.valType.UnsafeIndirect(ptr) + return encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).IsEmpty(reflect2.PtrOf(obj)) +} + +type mapEncoder struct { + mapType *reflect2.UnsafeMapType + keyEncoder ValEncoder + elemEncoder ValEncoder +} + +func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *(*unsafe.Pointer)(ptr) == nil { + stream.WriteNil() + return + } + stream.WriteObjectStart() + iter := encoder.mapType.UnsafeIterate(ptr) + for i := 0; iter.HasNext(); i++ { + if i != 0 { + stream.WriteMore() + } + key, elem := iter.UnsafeNext() + encoder.keyEncoder.Encode(key, stream) + if stream.indention > 0 { + stream.writeTwoBytes(byte(':'), byte(' ')) + } else { + stream.writeByte(':') + } + encoder.elemEncoder.Encode(elem, stream) + } + stream.WriteObjectEnd() +} + +func (encoder *mapEncoder) IsEmpty(ptr unsafe.Pointer) bool { + iter := encoder.mapType.UnsafeIterate(ptr) + return !iter.HasNext() +} + +type sortKeysMapEncoder struct { + mapType *reflect2.UnsafeMapType + keyEncoder ValEncoder + elemEncoder ValEncoder +} + +func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *(*unsafe.Pointer)(ptr) == nil { + stream.WriteNil() + return + } + stream.WriteObjectStart() + mapIter := encoder.mapType.UnsafeIterate(ptr) + subStream := stream.cfg.BorrowStream(nil) + subStream.Attachment = stream.Attachment + subIter := stream.cfg.BorrowIterator(nil) + keyValues := encodedKeyValues{} + for mapIter.HasNext() { + key, elem := mapIter.UnsafeNext() + subStreamIndex := subStream.Buffered() + encoder.keyEncoder.Encode(key, subStream) + if subStream.Error != nil && subStream.Error != io.EOF && stream.Error == nil { + stream.Error = subStream.Error + } + encodedKey := subStream.Buffer()[subStreamIndex:] + subIter.ResetBytes(encodedKey) + decodedKey := subIter.ReadString() + if stream.indention > 0 { + subStream.writeTwoBytes(byte(':'), byte(' ')) + } else { + subStream.writeByte(':') + } + encoder.elemEncoder.Encode(elem, subStream) + keyValues = append(keyValues, encodedKV{ + key: decodedKey, + keyValue: subStream.Buffer()[subStreamIndex:], + }) + } + sort.Sort(keyValues) + for i, keyValue := range keyValues { + if i != 0 { + stream.WriteMore() + } + stream.Write(keyValue.keyValue) + } + if subStream.Error != nil && stream.Error == nil { + stream.Error = subStream.Error + } + stream.WriteObjectEnd() + stream.cfg.ReturnStream(subStream) + stream.cfg.ReturnIterator(subIter) +} + +func (encoder *sortKeysMapEncoder) IsEmpty(ptr unsafe.Pointer) bool { + iter := encoder.mapType.UnsafeIterate(ptr) + return !iter.HasNext() +} + +type encodedKeyValues []encodedKV + +type encodedKV struct { + key string + keyValue []byte +} + +func (sv encodedKeyValues) Len() int { return len(sv) } +func (sv encodedKeyValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv encodedKeyValues) Less(i, j int) bool { return sv[i].key < sv[j].key } diff --git a/vendor/github.com/json-iterator/go/reflect_marshaler.go b/vendor/github.com/json-iterator/go/reflect_marshaler.go new file mode 100644 index 000000000..3e21f3756 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_marshaler.go @@ -0,0 +1,225 @@ +package jsoniter + +import ( + "encoding" + "encoding/json" + "unsafe" + + "github.com/modern-go/reflect2" +) + +var marshalerType = reflect2.TypeOfPtr((*json.Marshaler)(nil)).Elem() +var unmarshalerType = reflect2.TypeOfPtr((*json.Unmarshaler)(nil)).Elem() +var textMarshalerType = reflect2.TypeOfPtr((*encoding.TextMarshaler)(nil)).Elem() +var textUnmarshalerType = reflect2.TypeOfPtr((*encoding.TextUnmarshaler)(nil)).Elem() + +func createDecoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValDecoder { + ptrType := reflect2.PtrTo(typ) + if ptrType.Implements(unmarshalerType) { + return &referenceDecoder{ + &unmarshalerDecoder{ptrType}, + } + } + if ptrType.Implements(textUnmarshalerType) { + return &referenceDecoder{ + &textUnmarshalerDecoder{ptrType}, + } + } + return nil +} + +func createEncoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ == marshalerType { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &directMarshalerEncoder{ + checkIsEmpty: checkIsEmpty, + } + return encoder + } + if typ.Implements(marshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &marshalerEncoder{ + valType: typ, + checkIsEmpty: checkIsEmpty, + } + return encoder + } + ptrType := reflect2.PtrTo(typ) + if ctx.prefix != "" && ptrType.Implements(marshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, ptrType) + var encoder ValEncoder = &marshalerEncoder{ + valType: ptrType, + checkIsEmpty: checkIsEmpty, + } + return &referenceEncoder{encoder} + } + if typ == textMarshalerType { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &directTextMarshalerEncoder{ + checkIsEmpty: checkIsEmpty, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + return encoder + } + if typ.Implements(textMarshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &textMarshalerEncoder{ + valType: typ, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + checkIsEmpty: checkIsEmpty, + } + return encoder + } + // if prefix is empty, the type is the root type + if ctx.prefix != "" && ptrType.Implements(textMarshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, ptrType) + var encoder ValEncoder = &textMarshalerEncoder{ + valType: ptrType, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + checkIsEmpty: checkIsEmpty, + } + return &referenceEncoder{encoder} + } + return nil +} + +type marshalerEncoder struct { + checkIsEmpty checkIsEmpty + valType reflect2.Type +} + +func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + if encoder.valType.IsNullable() && reflect2.IsNil(obj) { + stream.WriteNil() + return + } + marshaler := obj.(json.Marshaler) + bytes, err := marshaler.MarshalJSON() + if err != nil { + stream.Error = err + } else { + // html escape was already done by jsoniter + // but the extra '\n' should be trimed + l := len(bytes) + if l > 0 && bytes[l-1] == '\n' { + bytes = bytes[:l-1] + } + stream.Write(bytes) + } +} + +func (encoder *marshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type directMarshalerEncoder struct { + checkIsEmpty checkIsEmpty +} + +func (encoder *directMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + marshaler := *(*json.Marshaler)(ptr) + if marshaler == nil { + stream.WriteNil() + return + } + bytes, err := marshaler.MarshalJSON() + if err != nil { + stream.Error = err + } else { + stream.Write(bytes) + } +} + +func (encoder *directMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type textMarshalerEncoder struct { + valType reflect2.Type + stringEncoder ValEncoder + checkIsEmpty checkIsEmpty +} + +func (encoder *textMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + if encoder.valType.IsNullable() && reflect2.IsNil(obj) { + stream.WriteNil() + return + } + marshaler := (obj).(encoding.TextMarshaler) + bytes, err := marshaler.MarshalText() + if err != nil { + stream.Error = err + } else { + str := string(bytes) + encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream) + } +} + +func (encoder *textMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type directTextMarshalerEncoder struct { + stringEncoder ValEncoder + checkIsEmpty checkIsEmpty +} + +func (encoder *directTextMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + marshaler := *(*encoding.TextMarshaler)(ptr) + if marshaler == nil { + stream.WriteNil() + return + } + bytes, err := marshaler.MarshalText() + if err != nil { + stream.Error = err + } else { + str := string(bytes) + encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream) + } +} + +func (encoder *directTextMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type unmarshalerDecoder struct { + valType reflect2.Type +} + +func (decoder *unmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + valType := decoder.valType + obj := valType.UnsafeIndirect(ptr) + unmarshaler := obj.(json.Unmarshaler) + iter.nextToken() + iter.unreadByte() // skip spaces + bytes := iter.SkipAndReturnBytes() + err := unmarshaler.UnmarshalJSON(bytes) + if err != nil { + iter.ReportError("unmarshalerDecoder", err.Error()) + } +} + +type textUnmarshalerDecoder struct { + valType reflect2.Type +} + +func (decoder *textUnmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + valType := decoder.valType + obj := valType.UnsafeIndirect(ptr) + if reflect2.IsNil(obj) { + ptrType := valType.(*reflect2.UnsafePtrType) + elemType := ptrType.Elem() + elem := elemType.UnsafeNew() + ptrType.UnsafeSet(ptr, unsafe.Pointer(&elem)) + obj = valType.UnsafeIndirect(ptr) + } + unmarshaler := (obj).(encoding.TextUnmarshaler) + str := iter.ReadString() + err := unmarshaler.UnmarshalText([]byte(str)) + if err != nil { + iter.ReportError("textUnmarshalerDecoder", err.Error()) + } +} diff --git a/vendor/github.com/json-iterator/go/reflect_native.go b/vendor/github.com/json-iterator/go/reflect_native.go new file mode 100644 index 000000000..f88722d14 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_native.go @@ -0,0 +1,453 @@ +package jsoniter + +import ( + "encoding/base64" + "reflect" + "strconv" + "unsafe" + + "github.com/modern-go/reflect2" +) + +const ptrSize = 32 << uintptr(^uintptr(0)>>63) + +func createEncoderOfNative(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 { + sliceDecoder := decoderOfSlice(ctx, typ) + return &base64Codec{sliceDecoder: sliceDecoder} + } + typeName := typ.String() + kind := typ.Kind() + switch kind { + case reflect.String: + if typeName != "string" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem()) + } + return &stringCodec{} + case reflect.Int: + if typeName != "int" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &int32Codec{} + } + return &int64Codec{} + case reflect.Int8: + if typeName != "int8" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem()) + } + return &int8Codec{} + case reflect.Int16: + if typeName != "int16" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem()) + } + return &int16Codec{} + case reflect.Int32: + if typeName != "int32" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem()) + } + return &int32Codec{} + case reflect.Int64: + if typeName != "int64" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem()) + } + return &int64Codec{} + case reflect.Uint: + if typeName != "uint" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint8: + if typeName != "uint8" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem()) + } + return &uint8Codec{} + case reflect.Uint16: + if typeName != "uint16" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem()) + } + return &uint16Codec{} + case reflect.Uint32: + if typeName != "uint32" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem()) + } + return &uint32Codec{} + case reflect.Uintptr: + if typeName != "uintptr" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem()) + } + if ptrSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint64: + if typeName != "uint64" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem()) + } + return &uint64Codec{} + case reflect.Float32: + if typeName != "float32" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem()) + } + return &float32Codec{} + case reflect.Float64: + if typeName != "float64" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem()) + } + return &float64Codec{} + case reflect.Bool: + if typeName != "bool" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem()) + } + return &boolCodec{} + } + return nil +} + +func createDecoderOfNative(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 { + sliceDecoder := decoderOfSlice(ctx, typ) + return &base64Codec{sliceDecoder: sliceDecoder} + } + typeName := typ.String() + switch typ.Kind() { + case reflect.String: + if typeName != "string" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem()) + } + return &stringCodec{} + case reflect.Int: + if typeName != "int" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &int32Codec{} + } + return &int64Codec{} + case reflect.Int8: + if typeName != "int8" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem()) + } + return &int8Codec{} + case reflect.Int16: + if typeName != "int16" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem()) + } + return &int16Codec{} + case reflect.Int32: + if typeName != "int32" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem()) + } + return &int32Codec{} + case reflect.Int64: + if typeName != "int64" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem()) + } + return &int64Codec{} + case reflect.Uint: + if typeName != "uint" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint8: + if typeName != "uint8" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem()) + } + return &uint8Codec{} + case reflect.Uint16: + if typeName != "uint16" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem()) + } + return &uint16Codec{} + case reflect.Uint32: + if typeName != "uint32" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem()) + } + return &uint32Codec{} + case reflect.Uintptr: + if typeName != "uintptr" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem()) + } + if ptrSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint64: + if typeName != "uint64" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem()) + } + return &uint64Codec{} + case reflect.Float32: + if typeName != "float32" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem()) + } + return &float32Codec{} + case reflect.Float64: + if typeName != "float64" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem()) + } + return &float64Codec{} + case reflect.Bool: + if typeName != "bool" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem()) + } + return &boolCodec{} + } + return nil +} + +type stringCodec struct { +} + +func (codec *stringCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*string)(ptr)) = iter.ReadString() +} + +func (codec *stringCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + str := *((*string)(ptr)) + stream.WriteString(str) +} + +func (codec *stringCodec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*string)(ptr)) == "" +} + +type int8Codec struct { +} + +func (codec *int8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int8)(ptr)) = iter.ReadInt8() + } +} + +func (codec *int8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt8(*((*int8)(ptr))) +} + +func (codec *int8Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int8)(ptr)) == 0 +} + +type int16Codec struct { +} + +func (codec *int16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int16)(ptr)) = iter.ReadInt16() + } +} + +func (codec *int16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt16(*((*int16)(ptr))) +} + +func (codec *int16Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int16)(ptr)) == 0 +} + +type int32Codec struct { +} + +func (codec *int32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int32)(ptr)) = iter.ReadInt32() + } +} + +func (codec *int32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt32(*((*int32)(ptr))) +} + +func (codec *int32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int32)(ptr)) == 0 +} + +type int64Codec struct { +} + +func (codec *int64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int64)(ptr)) = iter.ReadInt64() + } +} + +func (codec *int64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt64(*((*int64)(ptr))) +} + +func (codec *int64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int64)(ptr)) == 0 +} + +type uint8Codec struct { +} + +func (codec *uint8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint8)(ptr)) = iter.ReadUint8() + } +} + +func (codec *uint8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint8(*((*uint8)(ptr))) +} + +func (codec *uint8Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint8)(ptr)) == 0 +} + +type uint16Codec struct { +} + +func (codec *uint16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint16)(ptr)) = iter.ReadUint16() + } +} + +func (codec *uint16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint16(*((*uint16)(ptr))) +} + +func (codec *uint16Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint16)(ptr)) == 0 +} + +type uint32Codec struct { +} + +func (codec *uint32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint32)(ptr)) = iter.ReadUint32() + } +} + +func (codec *uint32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint32(*((*uint32)(ptr))) +} + +func (codec *uint32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint32)(ptr)) == 0 +} + +type uint64Codec struct { +} + +func (codec *uint64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint64)(ptr)) = iter.ReadUint64() + } +} + +func (codec *uint64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint64(*((*uint64)(ptr))) +} + +func (codec *uint64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint64)(ptr)) == 0 +} + +type float32Codec struct { +} + +func (codec *float32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*float32)(ptr)) = iter.ReadFloat32() + } +} + +func (codec *float32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat32(*((*float32)(ptr))) +} + +func (codec *float32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float32)(ptr)) == 0 +} + +type float64Codec struct { +} + +func (codec *float64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*float64)(ptr)) = iter.ReadFloat64() + } +} + +func (codec *float64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat64(*((*float64)(ptr))) +} + +func (codec *float64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float64)(ptr)) == 0 +} + +type boolCodec struct { +} + +func (codec *boolCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*bool)(ptr)) = iter.ReadBool() + } +} + +func (codec *boolCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteBool(*((*bool)(ptr))) +} + +func (codec *boolCodec) IsEmpty(ptr unsafe.Pointer) bool { + return !(*((*bool)(ptr))) +} + +type base64Codec struct { + sliceType *reflect2.UnsafeSliceType + sliceDecoder ValDecoder +} + +func (codec *base64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + codec.sliceType.UnsafeSetNil(ptr) + return + } + switch iter.WhatIsNext() { + case StringValue: + src := iter.ReadString() + dst, err := base64.StdEncoding.DecodeString(src) + if err != nil { + iter.ReportError("decode base64", err.Error()) + } else { + codec.sliceType.UnsafeSet(ptr, unsafe.Pointer(&dst)) + } + case ArrayValue: + codec.sliceDecoder.Decode(ptr, iter) + default: + iter.ReportError("base64Codec", "invalid input") + } +} + +func (codec *base64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + if codec.sliceType.UnsafeIsNil(ptr) { + stream.WriteNil() + return + } + src := *((*[]byte)(ptr)) + encoding := base64.StdEncoding + stream.writeByte('"') + if len(src) != 0 { + size := encoding.EncodedLen(len(src)) + buf := make([]byte, size) + encoding.Encode(buf, src) + stream.buf = append(stream.buf, buf...) + } + stream.writeByte('"') +} + +func (codec *base64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*[]byte)(ptr))) == 0 +} diff --git a/vendor/github.com/json-iterator/go/reflect_optional.go b/vendor/github.com/json-iterator/go/reflect_optional.go new file mode 100644 index 000000000..fa71f4748 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_optional.go @@ -0,0 +1,129 @@ +package jsoniter + +import ( + "github.com/modern-go/reflect2" + "unsafe" +) + +func decoderOfOptional(ctx *ctx, typ reflect2.Type) ValDecoder { + ptrType := typ.(*reflect2.UnsafePtrType) + elemType := ptrType.Elem() + decoder := decoderOfType(ctx, elemType) + return &OptionalDecoder{elemType, decoder} +} + +func encoderOfOptional(ctx *ctx, typ reflect2.Type) ValEncoder { + ptrType := typ.(*reflect2.UnsafePtrType) + elemType := ptrType.Elem() + elemEncoder := encoderOfType(ctx, elemType) + encoder := &OptionalEncoder{elemEncoder} + return encoder +} + +type OptionalDecoder struct { + ValueType reflect2.Type + ValueDecoder ValDecoder +} + +func (decoder *OptionalDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + *((*unsafe.Pointer)(ptr)) = nil + } else { + if *((*unsafe.Pointer)(ptr)) == nil { + //pointer to null, we have to allocate memory to hold the value + newPtr := decoder.ValueType.UnsafeNew() + decoder.ValueDecoder.Decode(newPtr, iter) + *((*unsafe.Pointer)(ptr)) = newPtr + } else { + //reuse existing instance + decoder.ValueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) + } + } +} + +type dereferenceDecoder struct { + // only to deference a pointer + valueType reflect2.Type + valueDecoder ValDecoder +} + +func (decoder *dereferenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if *((*unsafe.Pointer)(ptr)) == nil { + //pointer to null, we have to allocate memory to hold the value + newPtr := decoder.valueType.UnsafeNew() + decoder.valueDecoder.Decode(newPtr, iter) + *((*unsafe.Pointer)(ptr)) = newPtr + } else { + //reuse existing instance + decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) + } +} + +type OptionalEncoder struct { + ValueEncoder ValEncoder +} + +func (encoder *OptionalEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *((*unsafe.Pointer)(ptr)) == nil { + stream.WriteNil() + } else { + encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) + } +} + +func (encoder *OptionalEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*unsafe.Pointer)(ptr)) == nil +} + +type dereferenceEncoder struct { + ValueEncoder ValEncoder +} + +func (encoder *dereferenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *((*unsafe.Pointer)(ptr)) == nil { + stream.WriteNil() + } else { + encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) + } +} + +func (encoder *dereferenceEncoder) IsEmpty(ptr unsafe.Pointer) bool { + dePtr := *((*unsafe.Pointer)(ptr)) + if dePtr == nil { + return true + } + return encoder.ValueEncoder.IsEmpty(dePtr) +} + +func (encoder *dereferenceEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool { + deReferenced := *((*unsafe.Pointer)(ptr)) + if deReferenced == nil { + return true + } + isEmbeddedPtrNil, converted := encoder.ValueEncoder.(IsEmbeddedPtrNil) + if !converted { + return false + } + fieldPtr := unsafe.Pointer(deReferenced) + return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr) +} + +type referenceEncoder struct { + encoder ValEncoder +} + +func (encoder *referenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.encoder.Encode(unsafe.Pointer(&ptr), stream) +} + +func (encoder *referenceEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr)) +} + +type referenceDecoder struct { + decoder ValDecoder +} + +func (decoder *referenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.decoder.Decode(unsafe.Pointer(&ptr), iter) +} diff --git a/vendor/github.com/json-iterator/go/reflect_slice.go b/vendor/github.com/json-iterator/go/reflect_slice.go new file mode 100644 index 000000000..9441d79df --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_slice.go @@ -0,0 +1,99 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "unsafe" +) + +func decoderOfSlice(ctx *ctx, typ reflect2.Type) ValDecoder { + sliceType := typ.(*reflect2.UnsafeSliceType) + decoder := decoderOfType(ctx.append("[sliceElem]"), sliceType.Elem()) + return &sliceDecoder{sliceType, decoder} +} + +func encoderOfSlice(ctx *ctx, typ reflect2.Type) ValEncoder { + sliceType := typ.(*reflect2.UnsafeSliceType) + encoder := encoderOfType(ctx.append("[sliceElem]"), sliceType.Elem()) + return &sliceEncoder{sliceType, encoder} +} + +type sliceEncoder struct { + sliceType *reflect2.UnsafeSliceType + elemEncoder ValEncoder +} + +func (encoder *sliceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if encoder.sliceType.UnsafeIsNil(ptr) { + stream.WriteNil() + return + } + length := encoder.sliceType.UnsafeLengthOf(ptr) + if length == 0 { + stream.WriteEmptyArray() + return + } + stream.WriteArrayStart() + encoder.elemEncoder.Encode(encoder.sliceType.UnsafeGetIndex(ptr, 0), stream) + for i := 1; i < length; i++ { + stream.WriteMore() + elemPtr := encoder.sliceType.UnsafeGetIndex(ptr, i) + encoder.elemEncoder.Encode(elemPtr, stream) + } + stream.WriteArrayEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v: %s", encoder.sliceType, stream.Error.Error()) + } +} + +func (encoder *sliceEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.sliceType.UnsafeLengthOf(ptr) == 0 +} + +type sliceDecoder struct { + sliceType *reflect2.UnsafeSliceType + elemDecoder ValDecoder +} + +func (decoder *sliceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.doDecode(ptr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.sliceType, iter.Error.Error()) + } +} + +func (decoder *sliceDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + sliceType := decoder.sliceType + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + sliceType.UnsafeSetNil(ptr) + return + } + if c != '[' { + iter.ReportError("decode slice", "expect [ or n, but found "+string([]byte{c})) + return + } + c = iter.nextToken() + if c == ']' { + sliceType.UnsafeSet(ptr, sliceType.UnsafeMakeSlice(0, 0)) + return + } + iter.unreadByte() + sliceType.UnsafeGrow(ptr, 1) + elemPtr := sliceType.UnsafeGetIndex(ptr, 0) + decoder.elemDecoder.Decode(elemPtr, iter) + length := 1 + for c = iter.nextToken(); c == ','; c = iter.nextToken() { + idx := length + length += 1 + sliceType.UnsafeGrow(ptr, length) + elemPtr = sliceType.UnsafeGetIndex(ptr, idx) + decoder.elemDecoder.Decode(elemPtr, iter) + } + if c != ']' { + iter.ReportError("decode slice", "expect ], but found "+string([]byte{c})) + return + } +} diff --git a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go new file mode 100644 index 000000000..92ae912dc --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go @@ -0,0 +1,1097 @@ +package jsoniter + +import ( + "fmt" + "io" + "strings" + "unsafe" + + "github.com/modern-go/reflect2" +) + +func decoderOfStruct(ctx *ctx, typ reflect2.Type) ValDecoder { + bindings := map[string]*Binding{} + structDescriptor := describeStruct(ctx, typ) + for _, binding := range structDescriptor.Fields { + for _, fromName := range binding.FromNames { + old := bindings[fromName] + if old == nil { + bindings[fromName] = binding + continue + } + ignoreOld, ignoreNew := resolveConflictBinding(ctx.frozenConfig, old, binding) + if ignoreOld { + delete(bindings, fromName) + } + if !ignoreNew { + bindings[fromName] = binding + } + } + } + fields := map[string]*structFieldDecoder{} + for k, binding := range bindings { + fields[k] = binding.Decoder.(*structFieldDecoder) + } + + if !ctx.caseSensitive() { + for k, binding := range bindings { + if _, found := fields[strings.ToLower(k)]; !found { + fields[strings.ToLower(k)] = binding.Decoder.(*structFieldDecoder) + } + } + } + + return createStructDecoder(ctx, typ, fields) +} + +func createStructDecoder(ctx *ctx, typ reflect2.Type, fields map[string]*structFieldDecoder) ValDecoder { + if ctx.disallowUnknownFields { + return &generalStructDecoder{typ: typ, fields: fields, disallowUnknownFields: true} + } + knownHash := map[int64]struct{}{ + 0: {}, + } + + switch len(fields) { + case 0: + return &skipObjectDecoder{typ} + case 1: + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + return &oneFieldStructDecoder{typ, fieldHash, fieldDecoder} + } + case 2: + var fieldHash1 int64 + var fieldHash2 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldHash1 == 0 { + fieldHash1 = fieldHash + fieldDecoder1 = fieldDecoder + } else { + fieldHash2 = fieldHash + fieldDecoder2 = fieldDecoder + } + } + return &twoFieldsStructDecoder{typ, fieldHash1, fieldDecoder1, fieldHash2, fieldDecoder2} + case 3: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } + } + return &threeFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3} + case 4: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } + } + return &fourFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4} + case 5: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } + } + return &fiveFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5} + case 6: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } + } + return &sixFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6} + case 7: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } + } + return &sevenFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7} + case 8: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldName8 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } + } + return &eightFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, + fieldName8, fieldDecoder8} + case 9: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldName8 int64 + var fieldName9 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + var fieldDecoder9 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else if fieldName8 == 0 { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } else { + fieldName9 = fieldHash + fieldDecoder9 = fieldDecoder + } + } + return &nineFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, + fieldName8, fieldDecoder8, + fieldName9, fieldDecoder9} + case 10: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldName8 int64 + var fieldName9 int64 + var fieldName10 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + var fieldDecoder9 *structFieldDecoder + var fieldDecoder10 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else if fieldName8 == 0 { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } else if fieldName9 == 0 { + fieldName9 = fieldHash + fieldDecoder9 = fieldDecoder + } else { + fieldName10 = fieldHash + fieldDecoder10 = fieldDecoder + } + } + return &tenFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, + fieldName8, fieldDecoder8, + fieldName9, fieldDecoder9, + fieldName10, fieldDecoder10} + } + return &generalStructDecoder{typ, fields, false} +} + +type generalStructDecoder struct { + typ reflect2.Type + fields map[string]*structFieldDecoder + disallowUnknownFields bool +} + +func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + var c byte + for c = ','; c == ','; c = iter.nextToken() { + decoder.decodeOneField(ptr, iter) + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + if c != '}' { + iter.ReportError("struct Decode", `expect }, but found `+string([]byte{c})) + } + iter.decrementDepth() +} + +func (decoder *generalStructDecoder) decodeOneField(ptr unsafe.Pointer, iter *Iterator) { + var field string + var fieldDecoder *structFieldDecoder + if iter.cfg.objectFieldMustBeSimpleString { + fieldBytes := iter.ReadStringAsSlice() + field = *(*string)(unsafe.Pointer(&fieldBytes)) + fieldDecoder = decoder.fields[field] + if fieldDecoder == nil && !iter.cfg.caseSensitive { + fieldDecoder = decoder.fields[strings.ToLower(field)] + } + } else { + field = iter.ReadString() + fieldDecoder = decoder.fields[field] + if fieldDecoder == nil && !iter.cfg.caseSensitive { + fieldDecoder = decoder.fields[strings.ToLower(field)] + } + } + if fieldDecoder == nil { + if decoder.disallowUnknownFields { + msg := "found unknown field: " + field + iter.ReportError("ReadObject", msg) + } + c := iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + iter.Skip() + return + } + c := iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + fieldDecoder.Decode(ptr, iter) +} + +type skipObjectDecoder struct { + typ reflect2.Type +} + +func (decoder *skipObjectDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + valueType := iter.WhatIsNext() + if valueType != ObjectValue && valueType != NilValue { + iter.ReportError("skipObjectDecoder", "expect object or null") + return + } + iter.Skip() +} + +type oneFieldStructDecoder struct { + typ reflect2.Type + fieldHash int64 + fieldDecoder *structFieldDecoder +} + +func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + if iter.readFieldHash() == decoder.fieldHash { + decoder.fieldDecoder.Decode(ptr, iter) + } else { + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type twoFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder +} + +func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type threeFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder +} + +func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type fourFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder +} + +func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type fiveFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder +} + +func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type sixFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder +} + +func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type sevenFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder + fieldHash7 int64 + fieldDecoder7 *structFieldDecoder +} + +func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type eightFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder + fieldHash7 int64 + fieldDecoder7 *structFieldDecoder + fieldHash8 int64 + fieldDecoder8 *structFieldDecoder +} + +func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type nineFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder + fieldHash7 int64 + fieldDecoder7 *structFieldDecoder + fieldHash8 int64 + fieldDecoder8 *structFieldDecoder + fieldHash9 int64 + fieldDecoder9 *structFieldDecoder +} + +func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + case decoder.fieldHash9: + decoder.fieldDecoder9.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type tenFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder + fieldHash7 int64 + fieldDecoder7 *structFieldDecoder + fieldHash8 int64 + fieldDecoder8 *structFieldDecoder + fieldHash9 int64 + fieldDecoder9 *structFieldDecoder + fieldHash10 int64 + fieldDecoder10 *structFieldDecoder +} + +func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + case decoder.fieldHash9: + decoder.fieldDecoder9.Decode(ptr, iter) + case decoder.fieldHash10: + decoder.fieldDecoder10.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type structFieldDecoder struct { + field reflect2.StructField + fieldDecoder ValDecoder +} + +func (decoder *structFieldDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + fieldPtr := decoder.field.UnsafeGet(ptr) + decoder.fieldDecoder.Decode(fieldPtr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%s: %s", decoder.field.Name(), iter.Error.Error()) + } +} + +type stringModeStringDecoder struct { + elemDecoder ValDecoder + cfg *frozenConfig +} + +func (decoder *stringModeStringDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.elemDecoder.Decode(ptr, iter) + str := *((*string)(ptr)) + tempIter := decoder.cfg.BorrowIterator([]byte(str)) + defer decoder.cfg.ReturnIterator(tempIter) + *((*string)(ptr)) = tempIter.ReadString() +} + +type stringModeNumberDecoder struct { + elemDecoder ValDecoder +} + +func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.WhatIsNext() == NilValue { + decoder.elemDecoder.Decode(ptr, iter) + return + } + + c := iter.nextToken() + if c != '"' { + iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) + return + } + decoder.elemDecoder.Decode(ptr, iter) + if iter.Error != nil { + return + } + c = iter.readByte() + if c != '"' { + iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) + return + } +} diff --git a/vendor/github.com/json-iterator/go/reflect_struct_encoder.go b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go new file mode 100644 index 000000000..152e3ef5a --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go @@ -0,0 +1,211 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "reflect" + "unsafe" +) + +func encoderOfStruct(ctx *ctx, typ reflect2.Type) ValEncoder { + type bindingTo struct { + binding *Binding + toName string + ignored bool + } + orderedBindings := []*bindingTo{} + structDescriptor := describeStruct(ctx, typ) + for _, binding := range structDescriptor.Fields { + for _, toName := range binding.ToNames { + new := &bindingTo{ + binding: binding, + toName: toName, + } + for _, old := range orderedBindings { + if old.toName != toName { + continue + } + old.ignored, new.ignored = resolveConflictBinding(ctx.frozenConfig, old.binding, new.binding) + } + orderedBindings = append(orderedBindings, new) + } + } + if len(orderedBindings) == 0 { + return &emptyStructEncoder{} + } + finalOrderedFields := []structFieldTo{} + for _, bindingTo := range orderedBindings { + if !bindingTo.ignored { + finalOrderedFields = append(finalOrderedFields, structFieldTo{ + encoder: bindingTo.binding.Encoder.(*structFieldEncoder), + toName: bindingTo.toName, + }) + } + } + return &structEncoder{typ, finalOrderedFields} +} + +func createCheckIsEmpty(ctx *ctx, typ reflect2.Type) checkIsEmpty { + encoder := createEncoderOfNative(ctx, typ) + if encoder != nil { + return encoder + } + kind := typ.Kind() + switch kind { + case reflect.Interface: + return &dynamicEncoder{typ} + case reflect.Struct: + return &structEncoder{typ: typ} + case reflect.Array: + return &arrayEncoder{} + case reflect.Slice: + return &sliceEncoder{} + case reflect.Map: + return encoderOfMap(ctx, typ) + case reflect.Ptr: + return &OptionalEncoder{} + default: + return &lazyErrorEncoder{err: fmt.Errorf("unsupported type: %v", typ)} + } +} + +func resolveConflictBinding(cfg *frozenConfig, old, new *Binding) (ignoreOld, ignoreNew bool) { + newTagged := new.Field.Tag().Get(cfg.getTagKey()) != "" + oldTagged := old.Field.Tag().Get(cfg.getTagKey()) != "" + if newTagged { + if oldTagged { + if len(old.levels) > len(new.levels) { + return true, false + } else if len(new.levels) > len(old.levels) { + return false, true + } else { + return true, true + } + } else { + return true, false + } + } else { + if oldTagged { + return true, false + } + if len(old.levels) > len(new.levels) { + return true, false + } else if len(new.levels) > len(old.levels) { + return false, true + } else { + return true, true + } + } +} + +type structFieldEncoder struct { + field reflect2.StructField + fieldEncoder ValEncoder + omitempty bool +} + +func (encoder *structFieldEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + fieldPtr := encoder.field.UnsafeGet(ptr) + encoder.fieldEncoder.Encode(fieldPtr, stream) + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%s: %s", encoder.field.Name(), stream.Error.Error()) + } +} + +func (encoder *structFieldEncoder) IsEmpty(ptr unsafe.Pointer) bool { + fieldPtr := encoder.field.UnsafeGet(ptr) + return encoder.fieldEncoder.IsEmpty(fieldPtr) +} + +func (encoder *structFieldEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool { + isEmbeddedPtrNil, converted := encoder.fieldEncoder.(IsEmbeddedPtrNil) + if !converted { + return false + } + fieldPtr := encoder.field.UnsafeGet(ptr) + return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr) +} + +type IsEmbeddedPtrNil interface { + IsEmbeddedPtrNil(ptr unsafe.Pointer) bool +} + +type structEncoder struct { + typ reflect2.Type + fields []structFieldTo +} + +type structFieldTo struct { + encoder *structFieldEncoder + toName string +} + +func (encoder *structEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteObjectStart() + isNotFirst := false + for _, field := range encoder.fields { + if field.encoder.omitempty && field.encoder.IsEmpty(ptr) { + continue + } + if field.encoder.IsEmbeddedPtrNil(ptr) { + continue + } + if isNotFirst { + stream.WriteMore() + } + stream.WriteObjectField(field.toName) + field.encoder.Encode(ptr, stream) + isNotFirst = true + } + stream.WriteObjectEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v.%s", encoder.typ, stream.Error.Error()) + } +} + +func (encoder *structEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type emptyStructEncoder struct { +} + +func (encoder *emptyStructEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteEmptyObject() +} + +func (encoder *emptyStructEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type stringModeNumberEncoder struct { + elemEncoder ValEncoder +} + +func (encoder *stringModeNumberEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.writeByte('"') + encoder.elemEncoder.Encode(ptr, stream) + stream.writeByte('"') +} + +func (encoder *stringModeNumberEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.elemEncoder.IsEmpty(ptr) +} + +type stringModeStringEncoder struct { + elemEncoder ValEncoder + cfg *frozenConfig +} + +func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + tempStream := encoder.cfg.BorrowStream(nil) + tempStream.Attachment = stream.Attachment + defer encoder.cfg.ReturnStream(tempStream) + encoder.elemEncoder.Encode(ptr, tempStream) + stream.WriteString(string(tempStream.Buffer())) +} + +func (encoder *stringModeStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.elemEncoder.IsEmpty(ptr) +} diff --git a/vendor/github.com/json-iterator/go/stream.go b/vendor/github.com/json-iterator/go/stream.go new file mode 100644 index 000000000..23d8a3ad6 --- /dev/null +++ b/vendor/github.com/json-iterator/go/stream.go @@ -0,0 +1,210 @@ +package jsoniter + +import ( + "io" +) + +// stream is a io.Writer like object, with JSON specific write functions. +// Error is not returned as return value, but stored as Error member on this stream instance. +type Stream struct { + cfg *frozenConfig + out io.Writer + buf []byte + Error error + indention int + Attachment interface{} // open for customized encoder +} + +// NewStream create new stream instance. +// cfg can be jsoniter.ConfigDefault. +// out can be nil if write to internal buffer. +// bufSize is the initial size for the internal buffer in bytes. +func NewStream(cfg API, out io.Writer, bufSize int) *Stream { + return &Stream{ + cfg: cfg.(*frozenConfig), + out: out, + buf: make([]byte, 0, bufSize), + Error: nil, + indention: 0, + } +} + +// Pool returns a pool can provide more stream with same configuration +func (stream *Stream) Pool() StreamPool { + return stream.cfg +} + +// Reset reuse this stream instance by assign a new writer +func (stream *Stream) Reset(out io.Writer) { + stream.out = out + stream.buf = stream.buf[:0] +} + +// Available returns how many bytes are unused in the buffer. +func (stream *Stream) Available() int { + return cap(stream.buf) - len(stream.buf) +} + +// Buffered returns the number of bytes that have been written into the current buffer. +func (stream *Stream) Buffered() int { + return len(stream.buf) +} + +// Buffer if writer is nil, use this method to take the result +func (stream *Stream) Buffer() []byte { + return stream.buf +} + +// SetBuffer allows to append to the internal buffer directly +func (stream *Stream) SetBuffer(buf []byte) { + stream.buf = buf +} + +// Write writes the contents of p into the buffer. +// It returns the number of bytes written. +// If nn < len(p), it also returns an error explaining +// why the write is short. +func (stream *Stream) Write(p []byte) (nn int, err error) { + stream.buf = append(stream.buf, p...) + if stream.out != nil { + nn, err = stream.out.Write(stream.buf) + stream.buf = stream.buf[nn:] + return + } + return len(p), nil +} + +// WriteByte writes a single byte. +func (stream *Stream) writeByte(c byte) { + stream.buf = append(stream.buf, c) +} + +func (stream *Stream) writeTwoBytes(c1 byte, c2 byte) { + stream.buf = append(stream.buf, c1, c2) +} + +func (stream *Stream) writeThreeBytes(c1 byte, c2 byte, c3 byte) { + stream.buf = append(stream.buf, c1, c2, c3) +} + +func (stream *Stream) writeFourBytes(c1 byte, c2 byte, c3 byte, c4 byte) { + stream.buf = append(stream.buf, c1, c2, c3, c4) +} + +func (stream *Stream) writeFiveBytes(c1 byte, c2 byte, c3 byte, c4 byte, c5 byte) { + stream.buf = append(stream.buf, c1, c2, c3, c4, c5) +} + +// Flush writes any buffered data to the underlying io.Writer. +func (stream *Stream) Flush() error { + if stream.out == nil { + return nil + } + if stream.Error != nil { + return stream.Error + } + _, err := stream.out.Write(stream.buf) + if err != nil { + if stream.Error == nil { + stream.Error = err + } + return err + } + stream.buf = stream.buf[:0] + return nil +} + +// WriteRaw write string out without quotes, just like []byte +func (stream *Stream) WriteRaw(s string) { + stream.buf = append(stream.buf, s...) +} + +// WriteNil write null to stream +func (stream *Stream) WriteNil() { + stream.writeFourBytes('n', 'u', 'l', 'l') +} + +// WriteTrue write true to stream +func (stream *Stream) WriteTrue() { + stream.writeFourBytes('t', 'r', 'u', 'e') +} + +// WriteFalse write false to stream +func (stream *Stream) WriteFalse() { + stream.writeFiveBytes('f', 'a', 'l', 's', 'e') +} + +// WriteBool write true or false into stream +func (stream *Stream) WriteBool(val bool) { + if val { + stream.WriteTrue() + } else { + stream.WriteFalse() + } +} + +// WriteObjectStart write { with possible indention +func (stream *Stream) WriteObjectStart() { + stream.indention += stream.cfg.indentionStep + stream.writeByte('{') + stream.writeIndention(0) +} + +// WriteObjectField write "field": with possible indention +func (stream *Stream) WriteObjectField(field string) { + stream.WriteString(field) + if stream.indention > 0 { + stream.writeTwoBytes(':', ' ') + } else { + stream.writeByte(':') + } +} + +// WriteObjectEnd write } with possible indention +func (stream *Stream) WriteObjectEnd() { + stream.writeIndention(stream.cfg.indentionStep) + stream.indention -= stream.cfg.indentionStep + stream.writeByte('}') +} + +// WriteEmptyObject write {} +func (stream *Stream) WriteEmptyObject() { + stream.writeByte('{') + stream.writeByte('}') +} + +// WriteMore write , with possible indention +func (stream *Stream) WriteMore() { + stream.writeByte(',') + stream.writeIndention(0) +} + +// WriteArrayStart write [ with possible indention +func (stream *Stream) WriteArrayStart() { + stream.indention += stream.cfg.indentionStep + stream.writeByte('[') + stream.writeIndention(0) +} + +// WriteEmptyArray write [] +func (stream *Stream) WriteEmptyArray() { + stream.writeTwoBytes('[', ']') +} + +// WriteArrayEnd write ] with possible indention +func (stream *Stream) WriteArrayEnd() { + stream.writeIndention(stream.cfg.indentionStep) + stream.indention -= stream.cfg.indentionStep + stream.writeByte(']') +} + +func (stream *Stream) writeIndention(delta int) { + if stream.indention == 0 { + return + } + stream.writeByte('\n') + toWrite := stream.indention - delta + for i := 0; i < toWrite; i++ { + stream.buf = append(stream.buf, ' ') + } +} diff --git a/vendor/github.com/json-iterator/go/stream_float.go b/vendor/github.com/json-iterator/go/stream_float.go new file mode 100644 index 000000000..826aa594a --- /dev/null +++ b/vendor/github.com/json-iterator/go/stream_float.go @@ -0,0 +1,111 @@ +package jsoniter + +import ( + "fmt" + "math" + "strconv" +) + +var pow10 []uint64 + +func init() { + pow10 = []uint64{1, 10, 100, 1000, 10000, 100000, 1000000} +} + +// WriteFloat32 write float32 to stream +func (stream *Stream) WriteFloat32(val float32) { + if math.IsInf(float64(val), 0) || math.IsNaN(float64(val)) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } + abs := math.Abs(float64(val)) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + if float32(abs) < 1e-6 || float32(abs) >= 1e21 { + fmt = 'e' + } + } + stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 32) +} + +// WriteFloat32Lossy write float32 to stream with ONLY 6 digits precision although much much faster +func (stream *Stream) WriteFloat32Lossy(val float32) { + if math.IsInf(float64(val), 0) || math.IsNaN(float64(val)) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } + if val < 0 { + stream.writeByte('-') + val = -val + } + if val > 0x4ffffff { + stream.WriteFloat32(val) + return + } + precision := 6 + exp := uint64(1000000) // 6 + lval := uint64(float64(val)*float64(exp) + 0.5) + stream.WriteUint64(lval / exp) + fval := lval % exp + if fval == 0 { + return + } + stream.writeByte('.') + for p := precision - 1; p > 0 && fval < pow10[p]; p-- { + stream.writeByte('0') + } + stream.WriteUint64(fval) + for stream.buf[len(stream.buf)-1] == '0' { + stream.buf = stream.buf[:len(stream.buf)-1] + } +} + +// WriteFloat64 write float64 to stream +func (stream *Stream) WriteFloat64(val float64) { + if math.IsInf(val, 0) || math.IsNaN(val) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } + abs := math.Abs(val) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + if abs < 1e-6 || abs >= 1e21 { + fmt = 'e' + } + } + stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 64) +} + +// WriteFloat64Lossy write float64 to stream with ONLY 6 digits precision although much much faster +func (stream *Stream) WriteFloat64Lossy(val float64) { + if math.IsInf(val, 0) || math.IsNaN(val) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } + if val < 0 { + stream.writeByte('-') + val = -val + } + if val > 0x4ffffff { + stream.WriteFloat64(val) + return + } + precision := 6 + exp := uint64(1000000) // 6 + lval := uint64(val*float64(exp) + 0.5) + stream.WriteUint64(lval / exp) + fval := lval % exp + if fval == 0 { + return + } + stream.writeByte('.') + for p := precision - 1; p > 0 && fval < pow10[p]; p-- { + stream.writeByte('0') + } + stream.WriteUint64(fval) + for stream.buf[len(stream.buf)-1] == '0' { + stream.buf = stream.buf[:len(stream.buf)-1] + } +} diff --git a/vendor/github.com/json-iterator/go/stream_int.go b/vendor/github.com/json-iterator/go/stream_int.go new file mode 100644 index 000000000..d1059ee4c --- /dev/null +++ b/vendor/github.com/json-iterator/go/stream_int.go @@ -0,0 +1,190 @@ +package jsoniter + +var digits []uint32 + +func init() { + digits = make([]uint32, 1000) + for i := uint32(0); i < 1000; i++ { + digits[i] = (((i / 100) + '0') << 16) + ((((i / 10) % 10) + '0') << 8) + i%10 + '0' + if i < 10 { + digits[i] += 2 << 24 + } else if i < 100 { + digits[i] += 1 << 24 + } + } +} + +func writeFirstBuf(space []byte, v uint32) []byte { + start := v >> 24 + if start == 0 { + space = append(space, byte(v>>16), byte(v>>8)) + } else if start == 1 { + space = append(space, byte(v>>8)) + } + space = append(space, byte(v)) + return space +} + +func writeBuf(buf []byte, v uint32) []byte { + return append(buf, byte(v>>16), byte(v>>8), byte(v)) +} + +// WriteUint8 write uint8 to stream +func (stream *Stream) WriteUint8(val uint8) { + stream.buf = writeFirstBuf(stream.buf, digits[val]) +} + +// WriteInt8 write int8 to stream +func (stream *Stream) WriteInt8(nval int8) { + var val uint8 + if nval < 0 { + val = uint8(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint8(nval) + } + stream.buf = writeFirstBuf(stream.buf, digits[val]) +} + +// WriteUint16 write uint16 to stream +func (stream *Stream) WriteUint16(val uint16) { + q1 := val / 1000 + if q1 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[val]) + return + } + r1 := val - q1*1000 + stream.buf = writeFirstBuf(stream.buf, digits[q1]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return +} + +// WriteInt16 write int16 to stream +func (stream *Stream) WriteInt16(nval int16) { + var val uint16 + if nval < 0 { + val = uint16(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint16(nval) + } + stream.WriteUint16(val) +} + +// WriteUint32 write uint32 to stream +func (stream *Stream) WriteUint32(val uint32) { + q1 := val / 1000 + if q1 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[val]) + return + } + r1 := val - q1*1000 + q2 := q1 / 1000 + if q2 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q1]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r2 := q1 - q2*1000 + q3 := q2 / 1000 + if q3 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q2]) + } else { + r3 := q2 - q3*1000 + stream.buf = append(stream.buf, byte(q3+'0')) + stream.buf = writeBuf(stream.buf, digits[r3]) + } + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) +} + +// WriteInt32 write int32 to stream +func (stream *Stream) WriteInt32(nval int32) { + var val uint32 + if nval < 0 { + val = uint32(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint32(nval) + } + stream.WriteUint32(val) +} + +// WriteUint64 write uint64 to stream +func (stream *Stream) WriteUint64(val uint64) { + q1 := val / 1000 + if q1 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[val]) + return + } + r1 := val - q1*1000 + q2 := q1 / 1000 + if q2 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q1]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r2 := q1 - q2*1000 + q3 := q2 / 1000 + if q3 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q2]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r3 := q2 - q3*1000 + q4 := q3 / 1000 + if q4 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q3]) + stream.buf = writeBuf(stream.buf, digits[r3]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r4 := q3 - q4*1000 + q5 := q4 / 1000 + if q5 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q4]) + stream.buf = writeBuf(stream.buf, digits[r4]) + stream.buf = writeBuf(stream.buf, digits[r3]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r5 := q4 - q5*1000 + q6 := q5 / 1000 + if q6 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q5]) + } else { + stream.buf = writeFirstBuf(stream.buf, digits[q6]) + r6 := q5 - q6*1000 + stream.buf = writeBuf(stream.buf, digits[r6]) + } + stream.buf = writeBuf(stream.buf, digits[r5]) + stream.buf = writeBuf(stream.buf, digits[r4]) + stream.buf = writeBuf(stream.buf, digits[r3]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) +} + +// WriteInt64 write int64 to stream +func (stream *Stream) WriteInt64(nval int64) { + var val uint64 + if nval < 0 { + val = uint64(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint64(nval) + } + stream.WriteUint64(val) +} + +// WriteInt write int to stream +func (stream *Stream) WriteInt(val int) { + stream.WriteInt64(int64(val)) +} + +// WriteUint write uint to stream +func (stream *Stream) WriteUint(val uint) { + stream.WriteUint64(uint64(val)) +} diff --git a/vendor/github.com/json-iterator/go/stream_str.go b/vendor/github.com/json-iterator/go/stream_str.go new file mode 100644 index 000000000..54c2ba0b3 --- /dev/null +++ b/vendor/github.com/json-iterator/go/stream_str.go @@ -0,0 +1,372 @@ +package jsoniter + +import ( + "unicode/utf8" +) + +// htmlSafeSet holds the value true if the ASCII character with the given +// array position can be safely represented inside a JSON string, embedded +// inside of HTML