diff --git a/Makefile b/Makefile index b3f8e106..9418c969 100644 --- a/Makefile +++ b/Makefile @@ -19,7 +19,7 @@ IMAGE ?= ghcr.io/hyperledger-labs/fabric-operator TAG ?= $(shell git rev-parse --short HEAD) ARCH ?= $(shell go env GOARCH) -OSS_GO_VER ?= 1.17.7 +OSS_GO_VER ?= 1.20.3 BUILD_DATE = $(shell date -u +"%Y-%m-%dT%H:%M:%SZ") OS = $(shell go env GOOS) diff --git a/api/v1beta1/ibporderer.go b/api/v1beta1/ibporderer.go index b61e3efa..ec498da3 100644 --- a/api/v1beta1/ibporderer.go +++ b/api/v1beta1/ibporderer.go @@ -22,6 +22,7 @@ import ( config "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer/config/v1" v2config "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer/config/v2" v24config "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer/config/v24" + v25config "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer/config/v25" "github.com/IBM-Blockchain/fabric-operator/pkg/util/image" "github.com/IBM-Blockchain/fabric-operator/version" corev1 "k8s.io/api/core/v1" @@ -88,7 +89,17 @@ func (o *IBPOrderer) GetConfigOverride() (interface{}, error) { switch version.GetMajorReleaseVersion(o.Spec.FabricVersion) { case version.V2: currentVer := version.String(o.Spec.FabricVersion) - if currentVer.EqualWithoutTag(version.V2_4_1) || currentVer.GreaterThan(version.V2_4_1) { + if currentVer.EqualWithoutTag(version.V2_5_1) || currentVer.GreaterThan(version.V2_5_1) { + if o.Spec.ConfigOverride == nil { + return &v25config.Orderer{}, nil + } + + configOverride, err := v25config.ReadFrom(&o.Spec.ConfigOverride.Raw) + if err != nil { + return nil, err + } + return configOverride, nil + } else if currentVer.EqualWithoutTag(version.V2_4_1) || currentVer.GreaterThan(version.V2_4_1) { if o.Spec.ConfigOverride == nil { return &v24config.Orderer{}, nil } diff --git a/api/v1beta1/ibppeer.go b/api/v1beta1/ibppeer.go index 6fe512de..350ae301 100644 --- a/api/v1beta1/ibppeer.go +++ b/api/v1beta1/ibppeer.go @@ -23,6 +23,7 @@ import ( config "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer/config/v1" v2config "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer/config/v2" + v25config "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer/config/v25" "github.com/IBM-Blockchain/fabric-operator/pkg/util/image" "github.com/IBM-Blockchain/fabric-operator/version" corev1 "k8s.io/api/core/v1" @@ -99,15 +100,33 @@ func (p *IBPPeer) UsingCCLauncherImage() bool { func (p *IBPPeer) EnrollerImage() string { return image.Format(p.Spec.Images.EnrollerImage, p.Spec.Images.EnrollerTag) } +func IsV25Peer(fabricVersion string) bool { + currentVer := version.String(fabricVersion) + if currentVer.EqualWithoutTag(version.V2_5_1) || currentVer.GreaterThan(version.V2_5_1) { + return true + } + return false +} func (s *IBPPeer) GetConfigOverride() (interface{}, error) { switch version.GetMajorReleaseVersion(s.Spec.FabricVersion) { case version.V2: + isv25Peer := IsV25Peer(s.Spec.FabricVersion) if s.Spec.ConfigOverride == nil { - return &v2config.Core{}, nil + if isv25Peer { + return &v25config.Core{}, nil + } else { + return &v2config.Core{}, nil + } } - configOverride, err := v2config.ReadFrom(&s.Spec.ConfigOverride.Raw) + var configOverride interface{} + var err error + if isv25Peer { + configOverride, err = v25config.ReadFrom(&s.Spec.ConfigOverride.Raw) + } else { + configOverride, err = v2config.ReadFrom(&s.Spec.ConfigOverride.Raw) + } if err != nil { return nil, err } diff --git a/controllers/common/common.go b/controllers/common/common.go index e7b366d7..52e9b703 100644 --- a/controllers/common/common.go +++ b/controllers/common/common.go @@ -39,9 +39,9 @@ type Client interface { List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error } -// 1. Only one existing instance (of the same type as 'instance') should have -// the name 'instance.GetName()'; if more than one is present, return error -// 2. If any instance of a different type share the same name, return error +// 1. Only one existing instance (of the same type as 'instance') should have +// the name 'instance.GetName()'; if more than one is present, return error +// 2. If any instance of a different type share the same name, return error func ValidateCRName(k8sclient Client, name, namespace, kind string) error { listOptions := &client.ListOptions{ Namespace: namespace, diff --git a/controllers/ibporderer/ibporderer_controller.go b/controllers/ibporderer/ibporderer_controller.go index 9f7ba80c..cdfa1be5 100644 --- a/controllers/ibporderer/ibporderer_controller.go +++ b/controllers/ibporderer/ibporderer_controller.go @@ -746,23 +746,41 @@ func (r *ReconcileIBPOrderer) UpdateFunc(e event.UpdateEvent) bool { oldVer := version.String(oldOrderer.Spec.FabricVersion) newVer := version.String(newOrderer.Spec.FabricVersion) - // check if this V1 -> V2.2.x/V2.4.x orderer migration + // check if this V1 -> V2.2.x/V2.4.x/v2.5.x orderer migration if (oldOrderer.Spec.FabricVersion == "" || version.GetMajorReleaseVersion(oldOrderer.Spec.FabricVersion) == version.V1) && version.GetMajorReleaseVersion(newOrderer.Spec.FabricVersion) == version.V2 { update.migrateToV2 = true - if newVer.EqualWithoutTag(version.V2_4_1) || newVer.GreaterThan(version.V2_4_1) { + if newVer.EqualWithoutTag(version.V2_5_1) || newVer.GreaterThan(version.V2_5_1) { + update.migrateToV25 = true + // Re-enrolling tls cert to include admin hostname in SAN (for orderers >=2.5.1) + update.tlscertReenrollNeeded = true + } else if newVer.EqualWithoutTag(version.V2_4_1) || newVer.GreaterThan(version.V2_4_1) { + update.migrateToV24 = true + // Re-enrolling tls cert to include admin hostname in SAN (for orderers >=2.4.1) + update.tlscertReenrollNeeded = true + } + } + + // check if this V2.2.x -> V2.4.x/2.5.x orderer migration + if (version.GetMajorReleaseVersion(oldOrderer.Spec.FabricVersion) == version.V2) && + oldVer.LessThan(version.V2_4_1) { + if newVer.EqualWithoutTag(version.V2_5_1) || newVer.GreaterThan(version.V2_5_1) { + update.migrateToV25 = true + // Re-enrolling tls cert to include admin hostname in SAN (for orderers >=2.4.1) + update.tlscertReenrollNeeded = true + } else if newVer.EqualWithoutTag(version.V2_4_1) || newVer.GreaterThan(version.V2_4_1) { update.migrateToV24 = true // Re-enrolling tls cert to include admin hostname in SAN (for orderers >=2.4.1) update.tlscertReenrollNeeded = true } } - // check if this V2.2.x -> V2.4.x orderer migration + // check if this V2.4.x -> V2.5.x orderer migration if (version.GetMajorReleaseVersion(oldOrderer.Spec.FabricVersion) == version.V2) && - oldVer.LessThan(version.V2_4_1) && - (newVer.EqualWithoutTag(version.V2_4_1) || newVer.GreaterThan(version.V2_4_1)) { - update.migrateToV24 = true + oldVer.LessThan(version.V2_5_1) && + (newVer.EqualWithoutTag(version.V2_5_1) || newVer.GreaterThan(version.V2_5_1)) { + update.migrateToV25 = true // Re-enrolling tls cert to include admin hostname in SAN (for orderers >=2.4.1) update.tlscertReenrollNeeded = true } diff --git a/controllers/ibporderer/predicate.go b/controllers/ibporderer/predicate.go index 6b87d20c..f8f1afd7 100644 --- a/controllers/ibporderer/predicate.go +++ b/controllers/ibporderer/predicate.go @@ -46,6 +46,7 @@ type Update struct { ecertCreated bool migrateToV2 bool migrateToV24 bool + migrateToV25 bool nodeOUUpdated bool imagesUpdated bool fabricVersionUpdated bool @@ -69,6 +70,7 @@ func (u *Update) Detected() bool { u.ecertEnroll || u.migrateToV2 || u.migrateToV24 || + u.migrateToV25 || u.nodeOUUpdated || u.imagesUpdated || u.fabricVersionUpdated @@ -186,6 +188,10 @@ func (u *Update) MigrateToV24() bool { return u.migrateToV24 } +func (u *Update) MigrateToV25() bool { + return u.migrateToV25 +} + func (u *Update) NodeOUUpdated() bool { return u.nodeOUUpdated } @@ -251,6 +257,9 @@ func (u *Update) GetUpdateStackWithTrues() string { if u.migrateToV24 { stack += "migrateToV24 " } + if u.migrateToV25 { + stack += "migrateToV25 " + } if u.nodeOUUpdated { stack += "nodeOUUpdated " } diff --git a/controllers/ibppeer/ibppeer_controller.go b/controllers/ibppeer/ibppeer_controller.go index bb11c4af..55abca96 100644 --- a/controllers/ibppeer/ibppeer_controller.go +++ b/controllers/ibppeer/ibppeer_controller.go @@ -48,7 +48,6 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" @@ -671,16 +670,29 @@ func (r *ReconcileIBPPeer) UpdateFunc(e event.UpdateEvent) bool { version.GetMajorReleaseVersion(oldPeer.Spec.FabricVersion) == version.V1) && version.GetMajorReleaseVersion(newPeer.Spec.FabricVersion) == version.V2 { update.migrateToV2 = true - if newVer.EqualWithoutTag(version.V2_4_1) || newVer.GreaterThan(version.V2_4_1) { + if newVer.EqualWithoutTag(version.V2_5_1) || newVer.GreaterThan(version.V2_5_1) { + update.migrateToV24 = true + update.migrateToV25 = true + } else if newVer.EqualWithoutTag(version.V2_4_1) || newVer.GreaterThan(version.V2_4_1) { update.migrateToV24 = true } } - // check if this V2.2.x -> V2.4.x peer migration + // check if this V2.2.x -> V2.4.x/V2.5.x peer migration if (version.GetMajorReleaseVersion(oldPeer.Spec.FabricVersion) == version.V2) && - oldVer.LessThan(version.V2_4_1) && - (newVer.EqualWithoutTag(version.V2_4_1) || newVer.GreaterThan(version.V2_4_1)) { + oldVer.LessThan(version.V2_4_1) { update.migrateToV24 = true + if newVer.EqualWithoutTag(version.V2_5_1) || newVer.GreaterThan(version.V2_5_1) { + update.migrateToV25 = true + } + } + + // check if this V2.4.x -> V2.5.x peer migration + if (version.GetMajorReleaseVersion(oldPeer.Spec.FabricVersion) == version.V2) && + oldVer.LessThan(version.V2_5_1) { + if newVer.EqualWithoutTag(version.V2_5_1) || newVer.GreaterThan(version.V2_5_1) { + update.migrateToV25 = true + } } if newPeer.Spec.Action.UpgradeDBs == true { @@ -775,7 +787,7 @@ func (r *ReconcileIBPPeer) DeleteFunc(e event.DeleteEvent) bool { // without proper controller references set and was not cleaned up on peer resource deletion. log.Info(fmt.Sprintf("Deleting %s-init-config config map, if found", peer.GetName())) if err := r.client.Delete(context.TODO(), &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ + ObjectMeta: v1.ObjectMeta{ Name: fmt.Sprintf("%s-init-config", peer.GetName()), Namespace: peer.GetNamespace(), }, diff --git a/controllers/ibppeer/predicate.go b/controllers/ibppeer/predicate.go index 2db6f869..1b742669 100644 --- a/controllers/ibppeer/predicate.go +++ b/controllers/ibppeer/predicate.go @@ -39,6 +39,7 @@ type Update struct { tlscertNewKeyReenroll bool migrateToV2 bool migrateToV24 bool + migrateToV25 bool mspUpdated bool ecertEnroll bool tlscertEnroll bool @@ -116,6 +117,10 @@ func (u *Update) MigrateToV24() bool { return u.migrateToV24 } +func (u *Update) MigrateToV25() bool { + return u.migrateToV25 +} + func (u *Update) UpgradeDBs() bool { return u.upgradedbs } @@ -195,6 +200,7 @@ func (u *Update) Needed() bool { u.tlscertNewKeyReenroll || u.migrateToV2 || u.migrateToV24 || + u.migrateToV25 || u.mspUpdated || u.ecertEnroll || u.upgradedbs || @@ -239,6 +245,9 @@ func (u *Update) GetUpdateStackWithTrues() string { if u.migrateToV24 { stack += "migrateToV24 " } + if u.migrateToV25 { + stack += "migrateToV25 " + } if u.mspUpdated { stack += "mspUpdated " } diff --git a/defaultconfig/orderer/v24/orderer.yaml b/defaultconfig/orderer/v24/orderer.yaml index 72cd86b9..82da297b 100644 --- a/defaultconfig/orderer/v24/orderer.yaml +++ b/defaultconfig/orderer/v24/orderer.yaml @@ -52,6 +52,14 @@ General: # ServerTimeout is the duration the server waits for a response from # a client before closing the connection. ServerTimeout: 20s + + # Since all nodes should be consistent it is recommended to keep + # the default value of 100MB for MaxRecvMsgSize & MaxSendMsgSize + # Max message size in bytes the GRPC server and client can receive + MaxRecvMsgSize: 104857600 + # Max message size in bytes the GRPC server and client can send + MaxSendMsgSize: 104857600 + # Cluster settings for ordering service nodes that communicate with other ordering service nodes # such as Raft based ordering service. Cluster: diff --git a/defaultconfig/orderer/v25/orderer.yaml b/defaultconfig/orderer/v25/orderer.yaml new file mode 100644 index 00000000..9177ed79 --- /dev/null +++ b/defaultconfig/orderer/v25/orderer.yaml @@ -0,0 +1,428 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +################################################################################ +# +# Orderer Configuration +# +# - This controls the type and configuration of the orderer. +# +################################################################################ +General: + # Listen address: The IP on which to bind to listen. + ListenAddress: 127.0.0.1 + + # Listen port: The port on which to bind to listen. + ListenPort: 7050 + + # TLS: TLS settings for the GRPC server. + TLS: + Enabled: false + # PrivateKey governs the file location of the private key of the TLS certificate. + PrivateKey: tls/server.key + # Certificate governs the file location of the server TLS certificate. + Certificate: tls/server.crt + RootCAs: + - tls/ca.crt + ClientAuthRequired: false + ClientRootCAs: + # Keepalive settings for the GRPC server. + Keepalive: + # ServerMinInterval is the minimum permitted time between client pings. + # If clients send pings more frequently, the server will + # disconnect them. + ServerMinInterval: 60s + # ServerInterval is the time between pings to clients. + ServerInterval: 7200s + # ServerTimeout is the duration the server waits for a response from + # a client before closing the connection. + ServerTimeout: 20s + + # Since all nodes should be consistent it is recommended to keep + # the default value of 100MB for MaxRecvMsgSize & MaxSendMsgSize + # Max message size in bytes the GRPC server and client can receive + MaxRecvMsgSize: 104857600 + # Max message size in bytes the GRPC server and client can send + MaxSendMsgSize: 104857600 + + # Cluster settings for ordering service nodes that communicate with other ordering service nodes + # such as Raft based ordering service. + Cluster: + # SendBufferSize is the maximum number of messages in the egress buffer. + # Consensus messages are dropped if the buffer is full, and transaction + # messages are waiting for space to be freed. + SendBufferSize: 100 + # ClientCertificate governs the file location of the client TLS certificate + # used to establish mutual TLS connections with other ordering service nodes. + ClientCertificate: + # ClientPrivateKey governs the file location of the private key of the client TLS certificate. + ClientPrivateKey: + # The below 4 properties should be either set together, or be unset together. + # If they are set, then the orderer node uses a separate listener for intra-cluster + # communication. If they are unset, then the general orderer listener is used. + # This is useful if you want to use a different TLS server certificates on the + # client-facing and the intra-cluster listeners. + + # ListenPort defines the port on which the cluster listens to connections. + ListenPort: + # ListenAddress defines the IP on which to listen to intra-cluster communication. + ListenAddress: + # ServerCertificate defines the file location of the server TLS certificate used for intra-cluster + # communication. + ServerCertificate: + # ServerPrivateKey defines the file location of the private key of the TLS certificate. + ServerPrivateKey: + + # Bootstrap method: The method by which to obtain the bootstrap block + # system channel is specified. The option can be one of: + # "file" - path to a file containing the genesis block or config block of system channel + # "none" - allows an orderer to start without a system channel configuration + BootstrapMethod: file + + # Bootstrap file: The file containing the bootstrap block to use when + # initializing the orderer system channel and BootstrapMethod is set to + # "file". The bootstrap file can be the genesis block, and it can also be + # a config block for late bootstrap of some consensus methods like Raft. + # Generate a genesis block by updating $FABRIC_CFG_PATH/configtx.yaml and + # using configtxgen command with "-outputBlock" option. + # Defaults to file "genesisblock" (in $FABRIC_CFG_PATH directory) if not specified. + BootstrapFile: + + # LocalMSPDir is where to find the private crypto material needed by the + # orderer. It is set relative here as a default for dev environments but + # should be changed to the real location in production. + LocalMSPDir: msp + + # LocalMSPID is the identity to register the local MSP material with the MSP + # manager. IMPORTANT: The local MSP ID of an orderer needs to match the MSP + # ID of one of the organizations defined in the orderer system channel's + # /Channel/Orderer configuration. The sample organization defined in the + # sample configuration provided has an MSP ID of "SampleOrg". + LocalMSPID: SampleOrg + + # Enable an HTTP service for Go "pprof" profiling as documented at: + # https://golang.org/pkg/net/http/pprof + Profile: + Enabled: false + Address: 0.0.0.0:6060 + + # BCCSP configures the blockchain crypto service providers. + BCCSP: + # Default specifies the preferred blockchain crypto service provider + # to use. If the preferred provider is not available, the software + # based provider ("SW") will be used. + # Valid providers are: + # - SW: a software based crypto provider + # - PKCS11: a CA hardware security module crypto provider. + Default: SW + + # SW configures the software based blockchain crypto provider. + SW: + # TODO: The default Hash and Security level needs refactoring to be + # fully configurable. Changing these defaults requires coordination + # SHA2 is hardcoded in several places, not only BCCSP + Hash: SHA2 + Security: 256 + # Location of key store. If this is unset, a location will be + # chosen using: 'LocalMSPDir'/keystore + FileKeyStore: + KeyStore: + + # Settings for the PKCS#11 crypto provider (i.e. when DEFAULT: PKCS11) + # PKCS11: + # # Location of the PKCS11 module library + # Library: + # # Token Label + # Label: + # # User PIN + # Pin: + # Hash: + # Security: + # FileKeyStore: + # KeyStore: + + # Authentication contains configuration parameters related to authenticating + # client messages + Authentication: + # the acceptable difference between the current server time and the + # client's time as specified in a client request message + TimeWindow: 15m + + +################################################################################ +# +# SECTION: File Ledger +# +# - This section applies to the configuration of the file or json ledgers. +# +################################################################################ +FileLedger: + + # Location: The directory to store the blocks in. + # NOTE: If this is unset, a new temporary location will be chosen every time + # the orderer is restarted, using the prefix specified by Prefix. + Location: /var/hyperledger/production/orderer + +################################################################################ +# +# SECTION: Kafka +# +# - This section applies to the configuration of the Kafka-based orderer, and +# its interaction with the Kafka cluster. +# +################################################################################ +Kafka: + + # Retry: What do if a connection to the Kafka cluster cannot be established, + # or if a metadata request to the Kafka cluster needs to be repeated. + Retry: + # When a new channel is created, or when an existing channel is reloaded + # (in case of a just-restarted orderer), the orderer interacts with the + # Kafka cluster in the following ways: + # 1. It creates a Kafka producer (writer) for the Kafka partition that + # corresponds to the channel. + # 2. It uses that producer to post a no-op CONNECT message to that + # partition + # 3. It creates a Kafka consumer (reader) for that partition. + # If any of these steps fail, they will be re-attempted every + # for a total of , and then every + # for a total of until they succeed. + # Note that the orderer will be unable to write to or read from a + # channel until all of the steps above have been completed successfully. + ShortInterval: 5s + ShortTotal: 10m + LongInterval: 5m + LongTotal: 12h + # Affects the socket timeouts when waiting for an initial connection, a + # response, or a transmission. See Config.Net for more info: + # https://godoc.org/github.com/Shopify/sarama#Config + NetworkTimeouts: + DialTimeout: 10s + ReadTimeout: 10s + WriteTimeout: 10s + # Affects the metadata requests when the Kafka cluster is in the middle + # of a leader election.See Config.Metadata for more info: + # https://godoc.org/github.com/Shopify/sarama#Config + Metadata: + RetryBackoff: 250ms + RetryMax: 3 + # What to do if posting a message to the Kafka cluster fails. See + # Config.Producer for more info: + # https://godoc.org/github.com/Shopify/sarama#Config + Producer: + RetryBackoff: 100ms + RetryMax: 3 + # What to do if reading from the Kafka cluster fails. See + # Config.Consumer for more info: + # https://godoc.org/github.com/Shopify/sarama#Config + Consumer: + RetryBackoff: 2s + # Settings to use when creating Kafka topics. Only applies when + # Kafka.Version is v0.10.1.0 or higher + Topic: + # The number of Kafka brokers across which to replicate the topic + ReplicationFactor: 3 + # Verbose: Enable logging for interactions with the Kafka cluster. + Verbose: false + + # TLS: TLS settings for the orderer's connection to the Kafka cluster. + TLS: + + # Enabled: Use TLS when connecting to the Kafka cluster. + Enabled: false + + # PrivateKey: PEM-encoded private key the orderer will use for + # authentication. + PrivateKey: + # As an alternative to specifying the PrivateKey here, uncomment the + # following "File" key and specify the file name from which to load the + # value of PrivateKey. + #File: path/to/PrivateKey + + # Certificate: PEM-encoded signed public key certificate the orderer will + # use for authentication. + Certificate: + # As an alternative to specifying the Certificate here, uncomment the + # following "File" key and specify the file name from which to load the + # value of Certificate. + #File: path/to/Certificate + + # RootCAs: PEM-encoded trusted root certificates used to validate + # certificates from the Kafka cluster. + RootCAs: + # As an alternative to specifying the RootCAs here, uncomment the + # following "File" key and specify the file name from which to load the + # value of RootCAs. + #File: path/to/RootCAs + + # SASLPlain: Settings for using SASL/PLAIN authentication with Kafka brokers + SASLPlain: + # Enabled: Use SASL/PLAIN to authenticate with Kafka brokers + Enabled: false + # User: Required when Enabled is set to true + User: + # Password: Required when Enabled is set to true + Password: + + # Kafka protocol version used to communicate with the Kafka cluster brokers + # (defaults to 0.10.2.0 if not specified) + Version: + +################################################################################ +# +# Debug Configuration +# +# - This controls the debugging options for the orderer +# +################################################################################ +Debug: + + # BroadcastTraceDir when set will cause each request to the Broadcast service + # for this orderer to be written to a file in this directory + BroadcastTraceDir: + + # DeliverTraceDir when set will cause each request to the Deliver service + # for this orderer to be written to a file in this directory + DeliverTraceDir: + +################################################################################ +# +# Operations Configuration +# +# - This configures the operations server endpoint for the orderer +# +################################################################################ +Operations: + # host and port for the operations server + ListenAddress: 127.0.0.1:8443 + + # TLS configuration for the operations endpoint + TLS: + # TLS enabled + Enabled: false + + # Certificate is the location of the PEM encoded TLS certificate + Certificate: + + # PrivateKey points to the location of the PEM-encoded key + PrivateKey: + + # Most operations service endpoints require client authentication when TLS + # is enabled. ClientAuthRequired requires client certificate authentication + # at the TLS layer to access all resources. + ClientAuthRequired: false + + # Paths to PEM encoded ca certificates to trust for client authentication + ClientRootCAs: [] + +################################################################################ +# +# Metrics Configuration +# +# - This configures metrics collection for the orderer +# +################################################################################ +Metrics: + # The metrics provider is one of statsd, prometheus, or disabled + Provider: prometheus + + # The statsd configuration + Statsd: + # network type: tcp or udp + Network: udp + + # the statsd server address + Address: 127.0.0.1:8125 + + # The interval at which locally cached counters and gauges are pushed + # to statsd; timings are pushed immediately + WriteInterval: 30s + + # The prefix is prepended to all emitted statsd metrics + Prefix: + +################################################################################ +# +# Admin Configuration +# +# - This configures the admin server endpoint for the orderer +# +################################################################################ +Admin: + # host and port for the admin server + ListenAddress: 127.0.0.1:9443 + + # TLS configuration for the admin endpoint + TLS: + # TLS enabled + Enabled: false + + # Certificate is the location of the PEM encoded TLS certificate + Certificate: + + # PrivateKey points to the location of the PEM-encoded key + PrivateKey: + + # Most admin service endpoints require client authentication when TLS + # is enabled. ClientAuthRequired requires client certificate authentication + # at the TLS layer to access all resources. + # + # NOTE: When TLS is enabled, the admin endpoint requires mutual TLS. The + # orderer will panic on startup if this value is set to false. + ClientAuthRequired: true + + # Paths to PEM encoded ca certificates to trust for client authentication + ClientRootCAs: [] + +################################################################################ +# +# Channel participation API Configuration +# +# - This provides the channel participation API configuration for the orderer. +# - Channel participation uses the ListenAddress and TLS settings of the Admin +# service. +# +################################################################################ +ChannelParticipation: + # Channel participation API is enabled. + # ibp updates this to enabled by default + Enabled: true + + # The maximum size of the request body when joining a channel. + MaxRequestBodySize: 1048576 + + +################################################################################ +# +# Consensus Configuration +# +# - This section contains config options for a consensus plugin. It is opaque +# to orderer, and completely up to consensus implementation to make use of. +# +################################################################################ +Consensus: + # The allowed key-value pairs here depend on consensus plugin. For etcd/raft, + # we use following options: + + # WALDir specifies the location at which Write Ahead Logs for etcd/raft are + # stored. Each channel will have its own subdir named after channel ID. + WALDir: /var/hyperledger/production/orderer/etcdraft/wal + + # SnapDir specifies the location at which snapshots for etcd/raft are + # stored. Each channel will have its own subdir named after channel ID. + SnapDir: /var/hyperledger/production/orderer/etcdraft/snapshot \ No newline at end of file diff --git a/defaultconfig/peer/v2/core.yaml b/defaultconfig/peer/v2/core.yaml index 74438633..45d3f32a 100644 --- a/defaultconfig/peer/v2/core.yaml +++ b/defaultconfig/peer/v2/core.yaml @@ -480,6 +480,13 @@ peer: # gatewayService limits concurrent requests to gateway service that handles the submission and evaluation of transactions. gatewayService: 500 + # Since all nodes should be consistent it is recommended to keep + # the default value of 100MB for MaxRecvMsgSize & MaxSendMsgSize + # Max message size in bytes GRPC server and client can receive + maxRecvMsgSize: 104857600 + # Max message size in bytes GRPC server and client can send + maxSendMsgSize: 104857600 + ############################################################################### # # VM section diff --git a/defaultconfig/peer/v25/core.yaml b/defaultconfig/peer/v25/core.yaml new file mode 100644 index 00000000..a62be343 --- /dev/null +++ b/defaultconfig/peer/v25/core.yaml @@ -0,0 +1,815 @@ +# +# Copyright contributors to the Hyperledger Fabric Operator project +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +############################################################################### +# +# Peer section +# +############################################################################### +peer: + + # The peer id provides a name for this peer instance and is used when + # naming docker resources. + id: jdoe + + # The networkId allows for logical separation of networks and is used when + # naming docker resources. + networkId: dev + + # The Address at local network interface this Peer will listen on. + # By default, it will listen on all network interfaces + listenAddress: 0.0.0.0:7051 + + # The endpoint this peer uses to listen for inbound chaincode connections. + # If this is commented-out, the listen address is selected to be + # the peer's address (see below) with port 7052 + # chaincodeListenAddress: 0.0.0.0:7052 + + # The endpoint the chaincode for this peer uses to connect to the peer. + # If this is not specified, the chaincodeListenAddress address is selected. + # And if chaincodeListenAddress is not specified, address is selected from + # peer address (see below). If specified peer address is invalid then it + # will fallback to the auto detected IP (local IP) regardless of the peer + # addressAutoDetect value. + # chaincodeAddress: 0.0.0.0:7052 + + # When used as peer config, this represents the endpoint to other peers + # in the same organization. For peers in other organization, see + # gossip.externalEndpoint for more info. + # When used as CLI config, this means the peer's endpoint to interact with + address: 0.0.0.0:7051 + + # Whether the Peer should programmatically determine its address + # This case is useful for docker containers. + # When set to true, will override peer address. + addressAutoDetect: false + + # Settings for the Peer's gateway server. + gateway: + # Whether the gateway is enabled for this Peer. + enabled: true + # endorsementTimeout is the duration the gateway waits for a response + # from other endorsing peers before returning a timeout error to the client. + endorsementTimeout: 30s + # broadcastTimeout is the duration the gateway waits for a response + # from ordering nodes before returning a timeout error to the client. + broadcastTimeout: 30s + # dialTimeout is the duration the gateway waits for a connection + # to other network nodes. + dialTimeout: 2m + + # Keepalive settings for peer server and clients + keepalive: + # Interval is the duration after which if the server does not see + # any activity from the client it pings the client to see if it's alive + interval: 7200s + # Timeout is the duration the server waits for a response + # from the client after sending a ping before closing the connection + timeout: 20s + # MinInterval is the minimum permitted time between client pings. + # If clients send pings more frequently, the peer server will + # disconnect them + minInterval: 60s + # Client keepalive settings for communicating with other peer nodes + client: + # Interval is the time between pings to peer nodes. This must + # greater than or equal to the minInterval specified by peer + # nodes + interval: 60s + # Timeout is the duration the client waits for a response from + # peer nodes before closing the connection + timeout: 20s + # DeliveryClient keepalive settings for communication with ordering + # nodes. + deliveryClient: + # Interval is the time between pings to ordering nodes. This must + # greater than or equal to the minInterval specified by ordering + # nodes. + interval: 60s + # Timeout is the duration the client waits for a response from + # ordering nodes before closing the connection + timeout: 20s + + + # Gossip related configuration + gossip: + # Bootstrap set to initialize gossip with. + # This is a list of other peers that this peer reaches out to at startup. + # Important: The endpoints here have to be endpoints of peers in the same + # organization, because the peer would refuse connecting to these endpoints + # unless they are in the same organization as the peer. + bootstrap: + - 127.0.0.1:7051 + + # NOTE: orgLeader and useLeaderElection parameters are mutual exclusive. + # Setting both to true would result in the termination of the peer + # since this is undefined state. If the peers are configured with + # useLeaderElection=false, make sure there is at least 1 peer in the + # organization that its orgLeader is set to true. + + # Defines whenever peer will initialize dynamic algorithm for + # "leader" selection, where leader is the peer to establish + # connection with ordering service and use delivery protocol + # to pull ledger blocks from ordering service. + useLeaderElection: false + # Statically defines peer to be an organization "leader", + # where this means that current peer will maintain connection + # with ordering service and disseminate block across peers in + # its own organization. Multiple peers or all peers in an organization + # may be configured as org leaders, so that they all pull + # blocks directly from ordering service. + orgLeader: true + + # Interval for membershipTracker polling + membershipTrackerInterval: 5s + + # Overrides the endpoint that the peer publishes to peers + # in its organization. For peers in foreign organizations + # see 'externalEndpoint' + endpoint: + # Maximum count of blocks stored in memory + maxBlockCountToStore: 10 + # Max time between consecutive message pushes(unit: millisecond) + maxPropagationBurstLatency: 10ms + # Max number of messages stored until a push is triggered to remote peers + maxPropagationBurstSize: 10 + # Number of times a message is pushed to remote peers + propagateIterations: 1 + # Number of peers selected to push messages to + propagatePeerNum: 3 + # Determines frequency of pull phases(unit: second) + # Must be greater than digestWaitTime + responseWaitTime + pullInterval: 4s + # Number of peers to pull from + pullPeerNum: 3 + # Determines frequency of pulling state info messages from peers(unit: second) + requestStateInfoInterval: 4s + # Determines frequency of pushing state info messages to peers(unit: second) + publishStateInfoInterval: 4s + # Maximum time a stateInfo message is kept until expired + stateInfoRetentionInterval: + # Time from startup certificates are included in Alive messages(unit: second) + publishCertPeriod: 10s + # Should we skip verifying block messages or not (currently not in use) + skipBlockVerification: false + # Dial timeout(unit: second) + dialTimeout: 3s + # Connection timeout(unit: second) + connTimeout: 2s + # Buffer size of received messages + recvBuffSize: 20 + # Buffer size of sending messages + sendBuffSize: 200 + # Time to wait before pull engine processes incoming digests (unit: second) + # Should be slightly smaller than requestWaitTime + digestWaitTime: 1s + # Time to wait before pull engine removes incoming nonce (unit: milliseconds) + # Should be slightly bigger than digestWaitTime + requestWaitTime: 1500ms + # Time to wait before pull engine ends pull (unit: second) + responseWaitTime: 2s + # Alive check interval(unit: second) + aliveTimeInterval: 5s + # Alive expiration timeout(unit: second) + aliveExpirationTimeout: 25s + # Reconnect interval(unit: second) + reconnectInterval: 25s + # Max number of attempts to connect to a peer + maxConnectionAttempts: 120 + # Message expiration factor for alive messages + msgExpirationFactor: 20 + # This is an endpoint that is published to peers outside of the organization. + # If this isn't set, the peer will not be known to other organizations. + externalEndpoint: + # Leader election service configuration + election: + # Longest time peer waits for stable membership during leader election startup (unit: second) + startupGracePeriod: 15s + # Interval gossip membership samples to check its stability (unit: second) + membershipSampleInterval: 1s + # Time passes since last declaration message before peer decides to perform leader election (unit: second) + leaderAliveThreshold: 10s + # Time between peer sends propose message and declares itself as a leader (sends declaration message) (unit: second) + leaderElectionDuration: 5s + + pvtData: + # pullRetryThreshold determines the maximum duration of time private data corresponding for a given block + # would be attempted to be pulled from peers until the block would be committed without the private data + # ibp updates this from 60s to 5s + pullRetryThreshold: 5s + # As private data enters the transient store, it is associated with the peer's ledger's height at that time. + # transientstoreMaxBlockRetention defines the maximum difference between the current ledger's height upon commit, + # and the private data residing inside the transient store that is guaranteed not to be purged. + # Private data is purged from the transient store when blocks with sequences that are multiples + # of transientstoreMaxBlockRetention are committed. + transientstoreMaxBlockRetention: 1000 + # pushAckTimeout is the maximum time to wait for an acknowledgement from each peer + # at private data push at endorsement time. + pushAckTimeout: 3s + # Block to live pulling margin, used as a buffer + # to prevent peer from trying to pull private data + # from peers that is soon to be purged in next N blocks. + # This helps a newly joined peer catch up to current + # blockchain height quicker. + btlPullMargin: 10 + # the process of reconciliation is done in an endless loop, while in each iteration reconciler tries to + # pull from the other peers the most recent missing blocks with a maximum batch size limitation. + # reconcileBatchSize determines the maximum batch size of missing private data that will be reconciled in a + # single iteration. + reconcileBatchSize: 10 + # reconcileSleepInterval determines the time reconciler sleeps from end of an iteration until the beginning + # of the next reconciliation iteration. + reconcileSleepInterval: 1m + # reconciliationEnabled is a flag that indicates whether private data reconciliation is enable or not. + reconciliationEnabled: true + # skipPullingInvalidTransactionsDuringCommit is a flag that indicates whether pulling of invalid + # transaction's private data from other peers need to be skipped during the commit time and pulled + # only through reconciler. + skipPullingInvalidTransactionsDuringCommit: false + # implicitCollectionDisseminationPolicy specifies the dissemination policy for the peer's own implicit collection. + # When a peer endorses a proposal that writes to its own implicit collection, below values override the default values + # for disseminating private data. + # Note that it is applicable to all channels the peer has joined. The implication is that requiredPeerCount has to + # be smaller than the number of peers in a channel that has the lowest numbers of peers from the organization. + implicitCollectionDisseminationPolicy: + # requiredPeerCount defines the minimum number of eligible peers to which the peer must successfully + # disseminate private data for its own implicit collection during endorsement. Default value is 0. + requiredPeerCount: 0 + # maxPeerCount defines the maximum number of eligible peers to which the peer will attempt to + # disseminate private data for its own implicit collection during endorsement. Default value is 1. + maxPeerCount: 1 + + # Gossip state transfer related configuration + state: + # indicates whenever state transfer is enabled or not + # default value is true, i.e. state transfer is active + # and takes care to sync up missing blocks allowing + # lagging peer to catch up to speed with rest network + enabled: false + # checkInterval interval to check whether peer is lagging behind enough to + # request blocks via state transfer from another peer. + checkInterval: 10s + # responseTimeout amount of time to wait for state transfer response from + # other peers + responseTimeout: 3s + # batchSize the number of blocks to request via state transfer from another peer + batchSize: 10 + # blockBufferSize reflects the size of the re-ordering buffer + # which captures blocks and takes care to deliver them in order + # down to the ledger layer. The actual buffer size is bounded between + # 0 and 2*blockBufferSize, each channel maintains its own buffer + blockBufferSize: 20 + # maxRetries maximum number of re-tries to ask + # for single state transfer request + maxRetries: 3 + + # TLS Settings + tls: + # Require server-side TLS + enabled: false + # Require client certificates / mutual TLS. + # Note that clients that are not configured to use a certificate will + # fail to connect to the peer. + clientAuthRequired: false + # X.509 certificate used for TLS server + cert: + file: tls/server.crt + # Private key used for TLS server (and client if clientAuthEnabled + # is set to true + key: + file: tls/server.key + # Trusted root certificate chain for tls.cert + rootcert: + file: tls/ca.crt + # Set of root certificate authorities used to verify client certificates + clientRootCAs: + files: + - tls/ca.crt + # Private key used for TLS when making client connections. If + # not set, peer.tls.key.file will be used instead + clientKey: + file: + # X.509 certificate used for TLS when making client connections. + # If not set, peer.tls.cert.file will be used instead + clientCert: + file: + + # Authentication contains configuration parameters related to authenticating + # client messages + authentication: + # the acceptable difference between the current server time and the + # client's time as specified in a client request message + timewindow: 15m + + # Path on the file system where peer will store data (eg ledger). This + # location must be access control protected to prevent unintended + # modification that might corrupt the peer operations. + fileSystemPath: /var/hyperledger/production + + # BCCSP (Blockchain crypto provider): Select which crypto implementation or + # library to use + BCCSP: + Default: SW + # Settings for the SW crypto provider (i.e. when DEFAULT: SW) + SW: + # TODO: The default Hash and Security level needs refactoring to be + # fully configurable. Changing these defaults requires coordination + # SHA2 is hardcoded in several places, not only BCCSP + Hash: SHA2 + Security: 256 + # Location of Key Store + FileKeyStore: + # If "", defaults to 'mspConfigPath'/keystore + KeyStore: + # Settings for the PKCS#11 crypto provider (i.e. when DEFAULT: PKCS11) + # PKCS11: + # # Location of the PKCS11 module library + # Library: + # # Token Label + # Label: + # # User PIN + # Pin: + # Hash: + # Security: + + # Path on the file system where peer will find MSP local configurations + mspConfigPath: msp + + # Identifier of the local MSP + # ----!!!!IMPORTANT!!!-!!!IMPORTANT!!!-!!!IMPORTANT!!!!---- + # Deployers need to change the value of the localMspId string. + # In particular, the name of the local MSP ID of a peer needs + # to match the name of one of the MSPs in each of the channel + # that this peer is a member of. Otherwise this peer's messages + # will not be identified as valid by other nodes. + localMspId: SampleOrg + + # CLI common client config options + client: + # connection timeout + connTimeout: 3s + + # Delivery service related config + deliveryclient: + # It sets the total time the delivery service may spend in reconnection + # attempts until its retry logic gives up and returns an error + reconnectTotalTimeThreshold: 3600s + + # It sets the delivery service <-> ordering service node connection timeout + connTimeout: 3s + + # It sets the delivery service maximal delay between consecutive retries + reConnectBackoffThreshold: 3600s + + # A list of orderer endpoint addresses which should be overridden + # when found in channel configurations. + addressOverrides: + # - from: + # to: + # caCertsFile: + # - from: + # to: + # caCertsFile: + + # Type for the local MSP - by default it's of type bccsp + localMspType: bccsp + + # Used with Go profiling tools only in none production environment. In + # production, it should be disabled (eg enabled: false) + profile: + enabled: false + listenAddress: 0.0.0.0:6060 + + # Handlers defines custom handlers that can filter and mutate + # objects passing within the peer, such as: + # Auth filter - reject or forward proposals from clients + # Decorators - append or mutate the chaincode input passed to the chaincode + # Endorsers - Custom signing over proposal response payload and its mutation + # Valid handler definition contains: + # - A name which is a factory method name defined in + # core/handlers/library/library.go for statically compiled handlers + # - library path to shared object binary for pluggable filters + # Auth filters and decorators are chained and executed in the order that + # they are defined. For example: + # authFilters: + # - + # name: FilterOne + # library: /opt/lib/filter.so + # - + # name: FilterTwo + # decorators: + # - + # name: DecoratorOne + # - + # name: DecoratorTwo + # library: /opt/lib/decorator.so + # Endorsers are configured as a map that its keys are the endorsement system chaincodes that are being overridden. + # Below is an example that overrides the default ESCC and uses an endorsement plugin that has the same functionality + # as the default ESCC. + # If the 'library' property is missing, the name is used as the constructor method in the builtin library similar + # to auth filters and decorators. + # endorsers: + # escc: + # name: DefaultESCC + # library: /etc/hyperledger/fabric/plugin/escc.so + handlers: + authFilters: + - + name: DefaultAuth + - + name: ExpirationCheck # This filter checks identity x509 certificate expiration + decorators: + - + name: DefaultDecorator + endorsers: + escc: + name: DefaultEndorsement + library: + validators: + vscc: + name: DefaultValidation + library: + + # library: /etc/hyperledger/fabric/plugin/escc.so + # Number of goroutines that will execute transaction validation in parallel. + # By default, the peer chooses the number of CPUs on the machine. Set this + # variable to override that choice. + # NOTE: overriding this value might negatively influence the performance of + # the peer so please change this value only if you know what you're doing + validatorPoolSize: + + # The discovery service is used by clients to query information about peers, + # such as - which peers have joined a certain channel, what is the latest + # channel config, and most importantly - given a chaincode and a channel, + # what possible sets of peers satisfy the endorsement policy. + discovery: + enabled: true + # Whether the authentication cache is enabled or not. + authCacheEnabled: true + # The maximum size of the cache, after which a purge takes place + authCacheMaxSize: 1000 + # The proportion (0 to 1) of entries that remain in the cache after the cache is purged due to overpopulation + authCachePurgeRetentionRatio: 0.75 + # Whether to allow non-admins to perform non channel scoped queries. + # When this is false, it means that only peer admins can perform non channel scoped queries. + orgMembersAllowedAccess: false + + # Limits is used to configure some internal resource limits. + limits: + # Concurrency limits the number of concurrently running requests to a service on each peer. + # Currently this option is only applied to endorser service and deliver service. + # When the property is missing or the value is 0, the concurrency limit is disabled for the service. + concurrency: + # endorserService limits concurrent requests to endorser service that handles chaincode deployment, query and invocation, + # including both user chaincodes and system chaincodes. + endorserService: 2500 + # deliverService limits concurrent event listeners registered to deliver service for blocks and transaction events. + deliverService: 2500 + # gatewayService limits concurrent requests to gateway service that handles the submission and evaluation of transactions. + gatewayService: 500 + + # Since all nodes should be consistent it is recommended to keep + # the default value of 100MB for MaxRecvMsgSize & MaxSendMsgSize + # Max message size in bytes GRPC server and client can receive + maxRecvMsgSize: 104857600 + # Max message size in bytes GRPC server and client can send + maxSendMsgSize: 104857600 + +############################################################################### +# +# VM section +# +############################################################################### +vm: + + # Endpoint of the vm management system. For docker can be one of the following in general + # unix:///var/run/docker.sock + # http://localhost:2375 + # https://localhost:2376 + # + # For the chaincode as a service external builders, this attribute must be _removed_, not set as a nil value, + # for the peer to avoid a launch time detection of the docker daemon on the local host. + # + # ibp changes this. + # endpoint: + + + # settings for docker vms + docker: + tls: + enabled: false + ca: + file: docker/ca.crt + cert: + file: docker/tls.crt + key: + file: docker/tls.key + + # Enables/disables the standard out/err from chaincode containers for + # debugging purposes + attachStdout: false + + # Parameters on creating docker container. + # Container may be efficiently created using ipam & dns-server for cluster + # NetworkMode - sets the networking mode for the container. Supported + # standard values are: `host`(default),`bridge`,`ipvlan`,`none`. + # Dns - a list of DNS servers for the container to use. + # Note: `Privileged` `Binds` `Links` and `PortBindings` properties of + # Docker Host Config are not supported and will not be used if set. + # LogConfig - sets the logging driver (Type) and related options + # (Config) for Docker. For more info, + # https://docs.docker.com/engine/admin/logging/overview/ + # Note: Set LogConfig using Environment Variables is not supported. + hostConfig: + NetworkMode: host + Dns: + # - 192.168.0.1 + # NEVER UNCOMMENT THIS + # LogConfig: + # Type: json-file + # Config: + # max-size: "50m" + # max-file: "5" + Memory: 2147483648 + +############################################################################### +# +# Chaincode section +# +############################################################################### +chaincode: + + # The id is used by the Chaincode stub to register the executing Chaincode + # ID with the Peer and is generally supplied through ENV variables + # the `path` form of ID is provided when installing the chaincode. + # The `name` is used for all other requests and can be any string. + id: + path: + name: + + # Generic builder environment, suitable for most chaincode types + builder: $(DOCKER_NS)/fabric-ccenv:$(TWO_DIGIT_VERSION) + + # Enables/disables force pulling of the base docker images (listed below) + # during user chaincode instantiation. + # Useful when using moving image tags (such as :latest) + pull: false + + golang: + # golang will never need more than baseos + runtime: $(DOCKER_NS)/fabric-baseos:$(TWO_DIGIT_VERSION) + + # whether or not golang chaincode should be linked dynamically + dynamicLink: false + + java: + # This is an image based on java:openjdk-8 with addition compiler + # tools added for java shim layer packaging. + # This image is packed with shim layer libraries that are necessary + # for Java chaincode runtime. + runtime: $(DOCKER_NS)/fabric-javaenv:$(TWO_DIGIT_VERSION) + + node: + # This is an image based on node:$(NODE_VER)-alpine + runtime: $(DOCKER_NS)/fabric-nodeenv:$(TWO_DIGIT_VERSION) + + # List of directories to treat as external builders and launchers for + # chaincode. The external builder detection processing will iterate over the + # builders in the order specified below. + # ibp updates this with ibp related values + externalBuilders: + - name: ibp-builder + path: /usr/local + environmentWhitelist: + - IBP_BUILDER_ENDPOINT + - IBP_BUILDER_SHARED_DIR + propagateEnvironment: + - IBP_BUILDER_ENDPOINT + - IBP_BUILDER_SHARED_DIR + - PEER_NAME + + # Default builder for chaincode-as-a-service, included in fabric + # opensource versions >= 2.4.2. This is a "no-op" builder and will not + # manage the lifecycle of pods, deployments, and services in k8s. The + # builder will only copy the chaincode package metadata, instructing the + # peer to connect to a remote CCaaS endpoint at a given service URL. + - name: ccaas-builder + path: /opt/hyperledger/ccaas_builder + propagateEnvironment: + - CHAINCODE_AS_A_SERVICE_BUILDER_CONFIG + + # The maximum duration to wait for the chaincode build and install process + # to complete. + installTimeout: 300s + + # Timeout duration for starting up a container and waiting for Register + # to come through. + startuptimeout: 300s + + # Timeout duration for Invoke and Init calls to prevent runaway. + # This timeout is used by all chaincodes in all the channels, including + # system chaincodes. + # Note that during Invoke, if the image is not available (e.g. being + # cleaned up when in development environment), the peer will automatically + # build the image, which might take more time. In production environment, + # the chaincode image is unlikely to be deleted, so the timeout could be + # reduced accordingly. + # ibp updates this from 30s to 60s + executetimeout: 60s + + # There are 2 modes: "dev" and "net". + # In dev mode, user runs the chaincode after starting peer from + # command line on local machine. + # In net mode, peer will run chaincode in a docker container. + mode: net + + # keepalive in seconds. In situations where the communication goes through a + # proxy that does not support keep-alive, this parameter will maintain connection + # between peer and chaincode. + # A value <= 0 turns keepalive off + keepalive: 0 + + # enabled system chaincodes + system: + _lifecycle: enable + cscc: enable + lscc: enable + qscc: enable + + # Logging section for the chaincode container + logging: + # Default level for all loggers within the chaincode container + level: info + # Override default level for the 'shim' logger + shim: warning + # Format for the chaincode container logs + format: '%{color}%{time:2006-01-02 15:04:05.000 MST} [%{module}] %{shortfunc} -> %{level:.4s} %{id:03x}%{color:reset} %{message}' + +############################################################################### +# +# Ledger section - ledger configuration encompasses both the blockchain +# and the state +# +############################################################################### +ledger: + + blockchain: + + state: + # stateDatabase - options are "goleveldb", "CouchDB" + # goleveldb - default state database stored in goleveldb. + # CouchDB - store state database in CouchDB + stateDatabase: goleveldb + # Limit on the number of records to return per query + totalQueryLimit: 100000 + couchDBConfig: + # It is recommended to run CouchDB on the same server as the peer, and + # not map the CouchDB container port to a server port in docker-compose. + # Otherwise proper security must be provided on the connection between + # CouchDB client (on the peer) and server. + couchDBAddress: 127.0.0.1:5984 + # This username must have read and write authority on CouchDB + username: + # The password is recommended to pass as an environment variable + # during start up (eg CORE_LEDGER_STATE_COUCHDBCONFIG_PASSWORD). + # If it is stored here, the file must be access control protected + # to prevent unintended users from discovering the password. + password: + # Number of retries for CouchDB errors + maxRetries: 3 + # Number of retries for CouchDB errors during peer startup. + # The delay between retries doubles for each attempt. + # Default of 10 retries results in 11 attempts over 2 minutes. + maxRetriesOnStartup: 10 + # CouchDB request timeout (unit: duration, e.g. 20s) + requestTimeout: 35s + # Limit on the number of records per each CouchDB query + # Note that chaincode queries are only bound by totalQueryLimit. + # Internally the chaincode may execute multiple CouchDB queries, + # each of size internalQueryLimit. + internalQueryLimit: 1000 + # Limit on the number of records per CouchDB bulk update batch + maxBatchUpdateSize: 1000 + # Warm indexes after every N blocks. + # This option warms any indexes that have been + # deployed to CouchDB after every N blocks. + # A value of 1 will warm indexes after every block commit, + # to ensure fast selector queries. + # Increasing the value may improve write efficiency of peer and CouchDB, + # but may degrade query response time. + warmIndexesAfterNBlocks: 1 + # Create the _global_changes system database + # This is optional. Creating the global changes database will require + # additional system resources to track changes and maintain the database + createGlobalChangesDB: false + # CacheSize denotes the maximum mega bytes (MB) to be allocated for the in-memory state + # cache. Note that CacheSize needs to be a multiple of 32 MB. If it is not a multiple + # of 32 MB, the peer would round the size to the next multiple of 32 MB. + # To disable the cache, 0 MB needs to be assigned to the cacheSize. + cacheSize: 64 + + history: + # enableHistoryDatabase - options are true or false + # Indicates if the history of key updates should be stored. + # All history 'index' will be stored in goleveldb, regardless if using + # CouchDB or alternate database for the state. + enableHistoryDatabase: true + + pvtdataStore: + # the maximum db batch size for converting + # the ineligible missing data entries to eligible missing data entries + collElgProcMaxDbBatchSize: 5000 + # the minimum duration (in milliseconds) between writing + # two consecutive db batches for converting the ineligible missing data entries to eligible missing data entries + collElgProcDbBatchesInterval: 1000 + # The missing data entries are classified into two categories: + # (1) prioritized + # (2) deprioritized + # Initially, all missing data are in the prioritized list. When the + # reconciler is unable to fetch the missing data from other peers, + # the unreconciled missing data would be moved to the deprioritized list. + # The reconciler would retry deprioritized missing data after every + # deprioritizedDataReconcilerInterval (unit: minutes). Note that the + # interval needs to be greater than the reconcileSleepInterval + deprioritizedDataReconcilerInterval: 60m + # The frequency to purge private data (in number of blocks). + # Private data is purged from the peer's private data store based on + # the collection property blockToLive or an explicit chaincode call to PurgePrivateData(). + purgeInterval: 100 + # Whether to log private data keys purged from private data store (INFO level) when explicitly purged via chaincode + purgedKeyAuditLogging: true + + snapshots: + # Path on the file system where peer will store ledger snapshots + rootDir: /var/hyperledger/production/snapshots + +############################################################################### +# +# Operations section +# +############################################################################### +operations: + # host and port for the operations server + listenAddress: 127.0.0.1:9443 + + # TLS configuration for the operations endpoint + tls: + # TLS enabled + enabled: false + + # path to PEM encoded server certificate for the operations server + cert: + file: + + # path to PEM encoded server key for the operations server + key: + file: + + # most operations service endpoints require client authentication when TLS + # is enabled. clientAuthRequired requires client certificate authentication + # at the TLS layer to access all resources. + clientAuthRequired: false + + # paths to PEM encoded ca certificates to trust for client authentication + clientRootCAs: + files: [] + +############################################################################### +# +# Metrics section +# +############################################################################### +metrics: + # metrics provider is one of statsd, prometheus, or disabled + # ibp updates this from disabled to prometheus + provider: prometheus + + # statsd configuration + statsd: + # network type: tcp or udp + network: udp + + # statsd server address + address: 127.0.0.1:8125 + + # the interval at which locally cached counters and gauges are pushed + # to statsd; timings are pushed immediately + writeInterval: 10s + + # prefix is prepended to all emitted statsd metrics + prefix: \ No newline at end of file diff --git a/integration/init/peer_test.go b/integration/init/peer_test.go index 51317a99..35a8717f 100644 --- a/integration/init/peer_test.go +++ b/integration/init/peer_test.go @@ -65,9 +65,10 @@ var _ = Describe("Peer init", func() { } config := &initializer.Config{ - OUFile: filepath.Join(defaultConfigs, "peer/ouconfig.yaml"), - CorePeerFile: filepath.Join(defaultConfigs, "peer/core.yaml"), - CorePeerV2File: filepath.Join(defaultConfigs, "peer/v2/core.yaml"), + OUFile: filepath.Join(defaultConfigs, "peer/ouconfig.yaml"), + CorePeerFile: filepath.Join(defaultConfigs, "peer/core.yaml"), + CorePeerV2File: filepath.Join(defaultConfigs, "peer/v2/core.yaml"), + CorePeerV25File: filepath.Join(defaultConfigs, "peer/v25/core.yaml"), } validator := &validator.Validator{ Client: client, diff --git a/integration/operator.go b/integration/operator.go index 58768923..1e2126c4 100644 --- a/integration/operator.go +++ b/integration/operator.go @@ -66,6 +66,7 @@ func GetOperatorConfig(configs, caFiles, peerFiles, ordererFiles, consoleFiles s PeerInitConfig: &peerinit.Config{ CorePeerFile: filepath.Join(configs, "peer/core.yaml"), CorePeerV2File: filepath.Join(configs, "peer/v2/core.yaml"), + CorePeerV25File: filepath.Join(configs, "peer/v25/core.yaml"), OUFile: filepath.Join(configs, "peer/ouconfig.yaml"), InterOUFile: filepath.Join(configs, "peer/ouconfig-inter.yaml"), DeploymentFile: filepath.Join(peerFiles, "deployment.yaml"), @@ -87,6 +88,7 @@ func GetOperatorConfig(configs, caFiles, peerFiles, ordererFiles, consoleFiles s OrdererInitConfig: &ordererinit.Config{ OrdererV2File: filepath.Join(configs, "orderer/v2/orderer.yaml"), OrdererV24File: filepath.Join(configs, "orderer/v24/orderer.yaml"), + OrdererV25File: filepath.Join(configs, "orderer/v25/orderer.yaml"), OrdererFile: filepath.Join(configs, "orderer/orderer.yaml"), ConfigTxFile: filepath.Join(configs, "orderer/configtx.yaml"), OUFile: filepath.Join(configs, "orderer/ouconfig.yaml"), diff --git a/main.go b/main.go index 5f9570b9..8b702e4a 100644 --- a/main.go +++ b/main.go @@ -137,6 +137,7 @@ func setDefaultPeerDefinitions(cfg *config.Config) { InterOUFile: filepath.Join(defaultConfigs, "peer/ouconfig-inter.yaml"), CorePeerFile: filepath.Join(defaultConfigs, "peer/core.yaml"), CorePeerV2File: filepath.Join(defaultConfigs, "peer/v2/core.yaml"), + CorePeerV25File: filepath.Join(defaultConfigs, "peer/v25/core.yaml"), DeploymentFile: filepath.Join(defaultPeerDef, "deployment.yaml"), PVCFile: filepath.Join(defaultPeerDef, "pvc.yaml"), CouchDBPVCFile: filepath.Join(defaultPeerDef, "couchdb-pvc.yaml"), @@ -159,6 +160,7 @@ func setDefaultOrdererDefinitions(cfg *config.Config) { cfg.OrdererInitConfig = &ordererinit.Config{ OrdererV2File: filepath.Join(defaultConfigs, "orderer/v2/orderer.yaml"), OrdererV24File: filepath.Join(defaultConfigs, "orderer/v24/orderer.yaml"), + OrdererV25File: filepath.Join(defaultConfigs, "orderer/v25/orderer.yaml"), OrdererFile: filepath.Join(defaultConfigs, "orderer/orderer.yaml"), ConfigTxFile: filepath.Join(defaultConfigs, "orderer/configtx.yaml"), OUFile: filepath.Join(defaultConfigs, "orderer/ouconfig.yaml"), diff --git a/pkg/apis/orderer/v24/orderer.go b/pkg/apis/orderer/v24/orderer.go index efdc991b..f7181bb4 100644 --- a/pkg/apis/orderer/v24/orderer.go +++ b/pkg/apis/orderer/v24/orderer.go @@ -49,6 +49,8 @@ type General struct { LocalMSPID string `json:"localMspId,omitempty"` BCCSP *commonapi.BCCSP `json:"BCCSP,omitempty"` Authentication v1.Authentication `json:"authentication,omitempty"` + MaxRecvMsgSize int `json:"maxRecvMsgSize,omitempty"` + MaxSendMsgSize int `json:"maxSendMsgSize,omitempty"` } // FileLedger contains configuration for the file-based ledger. diff --git a/pkg/apis/orderer/v25/orderer.go b/pkg/apis/orderer/v25/orderer.go new file mode 100644 index 00000000..c837fa08 --- /dev/null +++ b/pkg/apis/orderer/v25/orderer.go @@ -0,0 +1,35 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v25 + +import ( + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/orderer/v1" + v2 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/orderer/v24" +) + +type Orderer struct { + General v2.General `json:"general,omitempty"` + FileLedger v2.FileLedger `json:"fileLedger,omitempty"` + Debug v1.Debug `json:"debug,omitempty"` + Consensus interface{} `json:"consensus,omitempty"` + Operations v1.Operations `json:"operations,omitempty"` + Metrics v1.Metrics `json:"metrics,omitempty"` + Admin v2.Admin `json:"admin,omitempty"` + ChannelParticipation v2.ChannelParticipation `json:"channelParticipation,omitempty"` +} diff --git a/pkg/apis/peer/v2/peer.go b/pkg/apis/peer/v2/peer.go index 5434f062..72df20a2 100644 --- a/pkg/apis/peer/v2/peer.go +++ b/pkg/apis/peer/v2/peer.go @@ -61,6 +61,8 @@ type Peer struct { ValidatorPoolSize int `json:"validatorPoolSize,omitempty"` Discovery v1.Discovery `json:"discovery,omitempty"` Limits Limits `json:"limits,omitempty"` + MaxRecvMsgSize int `json:"maxRecvMsgSize,omitempty"` + MaxSendMsgSize int `json:"maxSendMsgSize,omitempty"` } type Gossip struct { diff --git a/pkg/apis/peer/v25/peer.go b/pkg/apis/peer/v25/peer.go new file mode 100644 index 00000000..a26fd990 --- /dev/null +++ b/pkg/apis/peer/v25/peer.go @@ -0,0 +1,87 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v25 + +import ( + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/peer/v1" + v2 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/peer/v2" +) + +type Core struct { + Peer Peer `json:"peer,omitempty"` + Chaincode v2.Chaincode `json:"chaincode,omitempty"` + Operations v1.Operations `json:"operations,omitempty"` + Metrics v1.Metrics `json:"metrics,omitempty"` + VM v1.VM `json:"vm,omitempty"` + Ledger Ledger `json:"ledger,omitempty"` + // Not Fabric - this is for deployment + MaxNameLength *int `json:"maxnamelength,omitempty"` +} + +type Peer struct { + ID string `json:"id,omitempty"` + NetworkID string `json:"networkId,omitempty"` + ListenAddress string `json:"listenAddress,omitempty"` + ChaincodeListenAddress string `json:"chaincodeListenAddress,omitempty"` + ChaincodeAddress string `json:"chaincodeAddress,omitempty"` + Address string `json:"address,omitempty"` + AddressAutoDetect *bool `json:"addressAutoDetect,omitempty"` + Gateway Gateway `json:"gateway,omitempty"` + Keepalive v2.KeepAlive `json:"keepalive,omitempty"` + Gossip v2.Gossip `json:"gossip,omitempty"` + TLS v1.TLS `json:"tls,omitempty"` + Authentication v1.Authentication `json:"authentication,omitempty"` + FileSystemPath string `json:"fileSystemPath,omitempty"` + BCCSP *common.BCCSP `json:"BCCSP,omitempty"` + MspConfigPath string `json:"mspConfigPath,omitempty"` + LocalMspId string `json:"localMspId,omitempty"` + Client v1.Client `json:"client,omitempty"` + DeliveryClient v1.DeliveryClient `json:"deliveryclient,omitempty"` + LocalMspType string `json:"localMspType,omitempty"` + Profile v1.Profile `json:"profile,omitempty"` + AdminService v1.AdminService `json:"adminService,omitempty"` + Handlers v1.HandlersConfig `json:"handlers,omitempty"` + ValidatorPoolSize int `json:"validatorPoolSize,omitempty"` + Discovery v1.Discovery `json:"discovery,omitempty"` + Limits v2.Limits `json:"limits,omitempty"` + MaxRecvMsgSize int `json:"maxRecvMsgSize,omitempty"` + MaxSendMsgSize int `json:"maxSendMsgSize,omitempty"` +} + +type Ledger struct { + State v2.LedgerState `json:"state,omitempty"` + History v1.LedgerHistory `json:"history,omitempty"` + PvtDataStore PvtDataStore `json:"pvtdataStore,omitempty"` +} + +type PvtDataStore struct { + CollElgProcMaxDbBatchSize int `json:"collElgProcMaxDbBatchSize,omitempty"` + CollElgProcDbBatchesInterval int `json:"collElgProcDbBatchesInterval,omitempty"` + DeprioritizedDataReconcilerInterval common.Duration `json:"deprioritizedDataReconcilerInterval,omitempty"` + PurgeInterval int `json:"purgeInterval,omitempty"` + PurgedKeyAuditLogging *bool `json:"purgedKeyAuditLogging,omitempty"` +} + +type Gateway struct { + Enabled *bool `json:"enabled,omitempty"` + EndorsementTimeout common.Duration `json:"endorsementTimeout,omitempty"` + DialTimeout common.Duration `json:"dialTimeout,omitempty"` + BroadcastTimeout common.Duration `json:"broadcastTimeout,omitempty"` +} diff --git a/pkg/initializer/orderer/config/v25/config_suite_test.go b/pkg/initializer/orderer/config/v25/config_suite_test.go new file mode 100644 index 00000000..b4f6fc6f --- /dev/null +++ b/pkg/initializer/orderer/config/v25/config_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v25_test + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestV25(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "V2 Suite") +} diff --git a/pkg/initializer/orderer/config/v25/config_test.go b/pkg/initializer/orderer/config/v25/config_test.go new file mode 100644 index 00000000..1429c248 --- /dev/null +++ b/pkg/initializer/orderer/config/v25/config_test.go @@ -0,0 +1,199 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v25_test + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + commonapi "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + v1 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/orderer/v1" + v24 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/orderer/v24" + v25 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/orderer/v25" + config "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer/config/v25" +) + +var _ = Describe("V2 Orderer Configuration", func() { + Context("reading and writing orderer configuration file", func() { + BeforeEach(func() { + config := &config.Orderer{} + + err := config.WriteToFile("/tmp/orderer.yaml") + Expect(err).NotTo(HaveOccurred()) + }) + + It("creates orderer.yaml", func() { + Expect("/tmp/orderer.yaml").Should(BeAnExistingFile()) + }) + + It("read orderer.yaml", func() { + _, err := config.ReadOrdererFile("/tmp/orderer.yaml") + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("merges current configuration with overrides values", func() { + It("merges with defaults based on HSM proxy", func() { + orderer, err := config.ReadOrdererFile("../../../../../testdata/init/orderer/orderer.yaml") + Expect(err).NotTo(HaveOccurred()) + + newConfig := &config.Orderer{ + Orderer: v25.Orderer{ + General: v24.General{ + BCCSP: &commonapi.BCCSP{ + ProviderName: "PKCS11", + PKCS11: &commonapi.PKCS11Opts{ + Library: "library2", + Label: "label2", + Pin: "2222", + HashFamily: "SHA3", + SecLevel: 512, + FileKeyStore: &commonapi.FileKeyStoreOpts{ + KeyStorePath: "keystore3", + }, + }, + }, + }, + }, + } + + err = orderer.MergeWith(newConfig, true) + Expect(err).NotTo(HaveOccurred()) + Expect(orderer.General.BCCSP.PKCS11.Library).To(Equal("/usr/local/lib/libpkcs11-proxy.so")) + Expect(orderer.General.BCCSP.PKCS11.Label).To(Equal("label2")) + Expect(orderer.General.BCCSP.PKCS11.Pin).To(Equal("2222")) + Expect(orderer.General.BCCSP.PKCS11.HashFamily).To(Equal("SHA3")) + Expect(orderer.General.BCCSP.PKCS11.SecLevel).To(Equal(512)) + Expect(orderer.General.BCCSP.PKCS11.FileKeyStore.KeyStorePath).To(Equal("keystore3")) + }) + + It("correctly merges boolean fields", func() { + orderer, err := config.ReadOrdererFile("../../../../../testdata/init/orderer/orderer.yaml") + Expect(err).NotTo(HaveOccurred()) + + trueVal := true + orderer.General.Authentication.NoExpirationChecks = &trueVal + orderer.General.Profile.Enabled = &trueVal + Expect(*orderer.General.Authentication.NoExpirationChecks).To(Equal(true)) + Expect(*orderer.General.Profile.Enabled).To(Equal(true)) + + falseVal := false + newConfig := &config.Orderer{ + Orderer: v25.Orderer{ + General: v24.General{ + Authentication: v1.Authentication{ + NoExpirationChecks: &falseVal, + }, + }, + }, + } + + err = orderer.MergeWith(newConfig, false) + Expect(err).NotTo(HaveOccurred()) + + By("setting field from 'true' to 'false' if bool pointer set to 'false' in override config", func() { + Expect(*orderer.General.Authentication.NoExpirationChecks).To(Equal(false)) + }) + + By("persisting boolean fields set to 'true' when bool pointer not set to 'false' in override config", func() { + Expect(*orderer.General.Profile.Enabled).To(Equal(true)) + }) + + }) + }) + + It("reads in orderer.yaml and unmarshal it to peer config", func() { + orderer, err := config.ReadOrdererFile("../../../../../testdata/init/orderer/orderer.yaml") + Expect(err).NotTo(HaveOccurred()) + + // General + general := orderer.General + By("setting General.ListenAddress", func() { + Expect(general.ListenAddress).To(Equal("127.0.0.1")) + }) + + By("setting General.ListenPort", func() { + Expect(general.ListenPort).To(Equal(uint16(7050))) + }) + + By("setting General.TLS.Enabled", func() { + Expect(*general.TLS.Enabled).To(Equal(true)) + }) + + By("setting General.TLS.PrivateKey", func() { + Expect(general.TLS.PrivateKey).To(Equal("tls/server.key")) + }) + + By("setting General.TLS.Certificate", func() { + Expect(general.TLS.Certificate).To(Equal("tls/server.crt")) + }) + + By("setting General.TLS.RootCAs", func() { + Expect(general.TLS.RootCAs).To(Equal([]string{"tls/ca.crt"})) + }) + + By("setting General.TLS.ClientAuthRequired", func() { + Expect(*general.TLS.ClientAuthRequired).To(Equal(true)) + }) + + By("setting General.TLS.ClientRootCAs", func() { + Expect(general.TLS.ClientRootCAs).To(Equal([]string{"tls/client.crt"})) + }) + + By("setting General.BCCSP.ProviderName", func() { + Expect(general.BCCSP.ProviderName).To(Equal("SW")) + }) + + By("setting General.BCCSP.SW.HashFamily", func() { + Expect(general.BCCSP.SW.HashFamily).To(Equal("SHA2")) + }) + + By("setting General.BCCSP.SW.SecLevel", func() { + Expect(general.BCCSP.SW.SecLevel).To(Equal(256)) + }) + + By("setting General.BCCSP.SW.FileKeyStore.KeyStore", func() { + Expect(general.BCCSP.SW.FileKeyStore.KeyStorePath).To(Equal("msp/keystore")) + }) + + By("setting BCCSP.PKCS11.Library", func() { + Expect(general.BCCSP.PKCS11.Library).To(Equal("library1")) + }) + + By("setting BCCSP.PKCS11.Label", func() { + Expect(general.BCCSP.PKCS11.Label).To(Equal("label1")) + }) + + By("setting BCCSP.PKCS11.Pin", func() { + Expect(general.BCCSP.PKCS11.Pin).To(Equal("1234")) + }) + + By("setting BCCSP.PKCS11.HashFamily", func() { + Expect(general.BCCSP.PKCS11.HashFamily).To(Equal("SHA2")) + }) + + By("setting BCCSP.PKCS11.Security", func() { + Expect(general.BCCSP.PKCS11.SecLevel).To(Equal(256)) + }) + + By("setting BCCSP.PKCS11.FileKeystore.KeystorePath", func() { + Expect(general.BCCSP.PKCS11.FileKeyStore.KeyStorePath).To(Equal("keystore2")) + }) + }) +}) diff --git a/pkg/initializer/orderer/config/v25/io.go b/pkg/initializer/orderer/config/v25/io.go new file mode 100644 index 00000000..ed5024fa --- /dev/null +++ b/pkg/initializer/orderer/config/v25/io.go @@ -0,0 +1,61 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v25 + +import ( + "io/ioutil" + "path/filepath" + + "sigs.k8s.io/yaml" +) + +func ReadOrdererFile(path string) (*Orderer, error) { + config, err := ioutil.ReadFile(filepath.Clean(path)) + if err != nil { + return nil, err + } + + orderer := &Orderer{} + err = yaml.Unmarshal(config, orderer) + if err != nil { + return nil, err + } + + return orderer, nil +} + +func ReadOrdererFromBytes(config []byte) (*Orderer, error) { + orderer := &Orderer{} + err := yaml.Unmarshal(config, orderer) + if err != nil { + return nil, err + } + + return orderer, nil +} + +func ReadFrom(from *[]byte) (*Orderer, error) { + ordererConfig := &Orderer{} + err := yaml.Unmarshal(*from, ordererConfig) + if err != nil { + return nil, err + } + + return ordererConfig, nil +} diff --git a/pkg/initializer/orderer/config/v25/orderer.go b/pkg/initializer/orderer/config/v25/orderer.go new file mode 100644 index 00000000..3784f080 --- /dev/null +++ b/pkg/initializer/orderer/config/v25/orderer.go @@ -0,0 +1,140 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v25 + +import ( + "encoding/json" + "io/ioutil" + "strings" + + commonapi "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + V25 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/orderer/v25" + "github.com/IBM-Blockchain/fabric-operator/pkg/util/merge" + "github.com/pkg/errors" + "sigs.k8s.io/yaml" +) + +type Orderer struct { + V25.Orderer `json:",inline"` +} + +func (o *Orderer) ToBytes() ([]byte, error) { + bytes, err := yaml.Marshal(o) + if err != nil { + return nil, err + } + + return bytes, nil +} + +func (o *Orderer) WriteToFile(path string) error { + bytes, err := yaml.Marshal(o) + if err != nil { + return err + } + + err = ioutil.WriteFile(path, bytes, 0600) + if err != nil { + return err + } + + return nil +} + +func (o *Orderer) MergeWith(newConfig interface{}, usingHSMProxy bool) error { + newOrderer := newConfig.(*Orderer) + + if newOrderer != nil { + err := merge.WithOverwrite(o, newConfig) + if err != nil { + return errors.Wrapf(err, "failed to merge orderer configuration overrides") + } + } + + if o.UsingPKCS11() { + o.SetPKCS11Defaults(usingHSMProxy) + } + + return nil +} + +func (o *Orderer) DeepCopyInto(into *Orderer) { + b, err := json.Marshal(o) + if err != nil { + return + } + + err = json.Unmarshal(b, into) + if err != nil { + return + } +} + +func (o *Orderer) DeepCopy() *Orderer { + if o == nil { + return nil + } + out := new(Orderer) + o.DeepCopyInto(out) + return out +} + +func (o *Orderer) UsingPKCS11() bool { + if o.General.BCCSP != nil { + if strings.ToLower(o.General.BCCSP.ProviderName) == "pkcs11" { + return true + } + } + return false +} + +func (o *Orderer) SetPKCS11Defaults(usingHSMProxy bool) { + if o.General.BCCSP.PKCS11 == nil { + o.General.BCCSP.PKCS11 = &commonapi.PKCS11Opts{} + } + + if usingHSMProxy { + o.General.BCCSP.PKCS11.Library = "/usr/local/lib/libpkcs11-proxy.so" + } + + if o.General.BCCSP.PKCS11.HashFamily == "" { + o.General.BCCSP.PKCS11.HashFamily = "SHA2" + } + + if o.General.BCCSP.PKCS11.SecLevel == 0 { + o.General.BCCSP.PKCS11.SecLevel = 256 + } +} + +func (o *Orderer) SetBCCSPLibrary(library string) { + if o.General.BCCSP.PKCS11 == nil { + o.General.BCCSP.PKCS11 = &commonapi.PKCS11Opts{} + } + + o.General.BCCSP.PKCS11.Library = library +} + +func (o *Orderer) SetDefaultKeyStore() { + // No-op + return +} + +func (o *Orderer) GetBCCSPSection() *commonapi.BCCSP { + return o.General.BCCSP +} diff --git a/pkg/initializer/orderer/initializer.go b/pkg/initializer/orderer/initializer.go index eb8d4f47..1407f30e 100644 --- a/pkg/initializer/orderer/initializer.go +++ b/pkg/initializer/orderer/initializer.go @@ -34,6 +34,7 @@ import ( ordererconfig "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer/config/v1" v2ordererconfig "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer/config/v2" v24ordererconfig "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer/config/v24" + v25ordererconfig "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer/config/v25" "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" "github.com/IBM-Blockchain/fabric-operator/pkg/util" @@ -54,6 +55,7 @@ type Config struct { OrdererFile string OrdererV2File string OrdererV24File string + OrdererV25File string OUFile string InterOUFile string DeploymentFile string @@ -430,7 +432,14 @@ func (i *Initializer) GetCoreConfigFromFile(instance *current.IBPOrderer, file s switch version.GetMajorReleaseVersion(instance.Spec.FabricVersion) { case version.V2: currentVer := version.String(instance.Spec.FabricVersion) - if currentVer.EqualWithoutTag(version.V2_4_1) || currentVer.GreaterThan(version.V2_4_1) { + if currentVer.EqualWithoutTag(version.V2_5_1) || currentVer.GreaterThan(version.V2_5_1) { + log.Info("v2.5.x Fabric Orderer requested") + v25config, err := v25ordererconfig.ReadOrdererFile(file) + if err != nil { + return nil, errors.Wrap(err, "failed to read v2.5.x default config file") + } + return v25config, nil + } else if currentVer.EqualWithoutTag(version.V2_4_1) || currentVer.GreaterThan(version.V2_4_1) { log.Info("v2.4.x Fabric Orderer requested") v24config, err := v24ordererconfig.ReadOrdererFile(file) if err != nil { @@ -465,7 +474,14 @@ func (i *Initializer) GetCoreConfigFromBytes(instance *current.IBPOrderer, bytes switch version.GetMajorReleaseVersion(instance.Spec.FabricVersion) { case version.V2: currentVer := version.String(instance.Spec.FabricVersion) - if currentVer.EqualWithoutTag(version.V2_4_1) || currentVer.GreaterThan(version.V2_4_1) { + if currentVer.EqualWithoutTag(version.V2_5_1) || currentVer.GreaterThan(version.V2_5_1) { + log.Info("v2.5.x Fabric Orderer requested") + v25config, err := v25ordererconfig.ReadOrdererFromBytes(bytes) + if err != nil { + return nil, err + } + return v25config, nil + } else if currentVer.EqualWithoutTag(version.V2_4_1) || currentVer.GreaterThan(version.V2_4_1) { log.Info("v2.4.x Fabric Orderer requested") v24config, err := v24ordererconfig.ReadOrdererFromBytes(bytes) if err != nil { diff --git a/pkg/initializer/peer/config/v25/config.go b/pkg/initializer/peer/config/v25/config.go new file mode 100644 index 00000000..7c2ea51b --- /dev/null +++ b/pkg/initializer/peer/config/v25/config.go @@ -0,0 +1,197 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v25 + +import ( + "encoding/json" + "io/ioutil" + "path/filepath" + "strings" + + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + v25 "github.com/IBM-Blockchain/fabric-operator/pkg/apis/peer/v25" + "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer/config/commoncore" + v1config "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer/config/v1" + "github.com/IBM-Blockchain/fabric-operator/pkg/util/merge" + "github.com/pkg/errors" + "sigs.k8s.io/yaml" +) + +type Core struct { + v25.Core `json:",inline"` + addrOverrides []v1config.AddressOverride +} + +func (c *Core) ToBytes() ([]byte, error) { + bytes, err := yaml.Marshal(c) + if err != nil { + return nil, err + } + + return bytes, nil +} + +func (c *Core) WriteToFile(path string) error { + bytes, err := yaml.Marshal(c) + if err != nil { + return err + } + + err = ioutil.WriteFile(filepath.Clean(path), bytes, 0600) + if err != nil { + return err + } + + return nil +} + +func (c *Core) MergeWith(newConfig interface{}, usingHSMProxy bool) error { + newCore := newConfig.(*Core) + + if newCore != nil { + err := merge.WithOverwrite(c, newCore) + if err != nil { + return errors.Wrapf(err, "failed to merge peer configuration overrides") + } + } + + if c.UsingPKCS11() { + c.SetPKCS11Defaults(usingHSMProxy) + } + + dc := v1config.DeliveryClient{DeliveryClient: c.Peer.DeliveryClient} + addrOverrides, err := dc.HandleCAcertsFiles() + if err != nil { + return errors.Wrapf(err, "failed to convert base64 certs to filepath") + } + c.Peer.DeliveryClient = dc.DeliveryClient + c.addrOverrides = addrOverrides + + return nil +} + +func (c *Core) DeepCopyInto(into *Core) { + b, err := json.Marshal(c) + if err != nil { + return + } + + err = json.Unmarshal(b, into) + if err != nil { + return + } +} + +func (c *Core) DeepCopy() *Core { + if c == nil { + return nil + } + out := new(Core) + c.DeepCopyInto(out) + return out +} + +func (c *Core) UsingPKCS11() bool { + if c.Peer.BCCSP != nil { + if strings.ToLower(c.Peer.BCCSP.ProviderName) == "pkcs11" { + return true + } + } + return false +} + +func (c *Core) SetPKCS11Defaults(usingHSMProxy bool) { + if c.Peer.BCCSP.PKCS11 == nil { + c.Peer.BCCSP.PKCS11 = &common.PKCS11Opts{} + } + + if usingHSMProxy { + c.Peer.BCCSP.PKCS11.Library = "/usr/local/lib/libpkcs11-proxy.so" + } + + if c.Peer.BCCSP.PKCS11.HashFamily == "" { + c.Peer.BCCSP.PKCS11.HashFamily = "SHA2" + } + + if c.Peer.BCCSP.PKCS11.SecLevel == 0 { + c.Peer.BCCSP.PKCS11.SecLevel = 256 + } + + c.Peer.BCCSP.PKCS11.SoftVerify = true +} + +func (c *Core) SetDefaultKeyStore() { + // No-op + return +} + +func (c *Core) GetMaxNameLength() *int { + return c.MaxNameLength +} + +func (c *Core) GetAddressOverrides() []v1config.AddressOverride { + return c.addrOverrides +} + +func (c *Core) GetBCCSPSection() *common.BCCSP { + return c.Peer.BCCSP +} + +func (c *Core) SetBCCSPLibrary(library string) { + if c.Peer.BCCSP.PKCS11 == nil { + c.Peer.BCCSP.PKCS11 = &common.PKCS11Opts{} + } + + c.Peer.BCCSP.PKCS11.Library = library +} + +func ReadCoreFile(path string) (*Core, error) { + core, err := ioutil.ReadFile(filepath.Clean(path)) + if err != nil { + return nil, err + } + + return coreFromBytes(core) +} + +func ReadCoreFromBytes(core []byte) (*Core, error) { + return coreFromBytes(core) +} + +func ReadFrom(from *[]byte) (*Core, error) { + return coreFromBytes(*from) +} + +func coreFromBytes(coreBytes []byte) (*Core, error) { + coreConfig := &Core{} + err := yaml.Unmarshal(coreBytes, coreConfig) + if err != nil { + // Check if peer.gossip.bootstrap needs to be converted + updatedCore, err := commoncore.ConvertBootstrapToArray(coreBytes) + if err != nil { + return nil, errors.Wrap(err, "failed to convert peer.gossip.bootstrap to string array") + } + err = yaml.Unmarshal(updatedCore, coreConfig) + if err != nil { + return nil, err + } + } + + return coreConfig, nil +} diff --git a/pkg/initializer/peer/config/v25/config_test.go b/pkg/initializer/peer/config/v25/config_test.go new file mode 100644 index 00000000..6a68f525 --- /dev/null +++ b/pkg/initializer/peer/config/v25/config_test.go @@ -0,0 +1,130 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v25_test + +import ( + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + v2core "github.com/IBM-Blockchain/fabric-operator/pkg/apis/peer/v2" + v25core "github.com/IBM-Blockchain/fabric-operator/pkg/apis/peer/v25" + v25 "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer/config/v25" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Peer configuration", func() { + It("merges current configuration with overrides values", func() { + core, err := v25.ReadCoreFile("../../../../../testdata/init/peer/core.yaml") + Expect(err).NotTo(HaveOccurred()) + Expect(core.Peer.ID).To(Equal("jdoe")) + + newConfig := &v25.Core{ + Core: v25core.Core{ + Peer: v25core.Peer{ + BCCSP: &common.BCCSP{ + ProviderName: "PKCS11", + PKCS11: &common.PKCS11Opts{ + Library: "library2", + Label: "label2", + Pin: "2222", + HashFamily: "SHA3", + SecLevel: 512, + FileKeyStore: &common.FileKeyStoreOpts{ + KeyStorePath: "keystore3", + }, + }, + }, + }, + }, + } + + Expect(core.Peer.Keepalive.MinInterval).To(Equal(common.MustParseDuration("60s"))) + + err = core.MergeWith(newConfig, true) + Expect(err).NotTo(HaveOccurred()) + + Expect(*core.Peer.BCCSP.PKCS11).To(Equal(common.PKCS11Opts{ + Library: "/usr/local/lib/libpkcs11-proxy.so", + Label: "label2", + Pin: "2222", + HashFamily: "SHA3", + SecLevel: 512, + SoftVerify: true, + FileKeyStore: &common.FileKeyStoreOpts{ + KeyStorePath: "keystore3", + }, + })) + }) + + Context("chaincode configuration", func() { + It("merges v25 current configuration with overrides values", func() { + core, err := v25.ReadCoreFile("../../../../../testdata/init/peer/core.yaml") + Expect(err).NotTo(HaveOccurred()) + Expect(core.Peer.ID).To(Equal("jdoe")) + + startupTimeout, err := common.ParseDuration("200s") + Expect(err).NotTo(HaveOccurred()) + executeTimeout, err := common.ParseDuration("20s") + Expect(err).NotTo(HaveOccurred()) + + newConfig := &v25.Core{ + Core: v25core.Core{ + Chaincode: v2core.Chaincode{ + StartupTimeout: startupTimeout, + ExecuteTimeout: executeTimeout, + ExternalBuilders: []v2core.ExternalBuilder{ + v2core.ExternalBuilder{ + Path: "/scripts", + Name: "go-builder", + EnvironmentWhiteList: []string{"ENV1=Value1"}, + PropogateEnvironment: []string{"ENV1=Value1"}, + }, + }, + }, + }, + } + + err = core.MergeWith(newConfig, false) + Expect(err).NotTo(HaveOccurred()) + Expect(core.Chaincode.StartupTimeout).To(Equal(startupTimeout)) + Expect(core.Chaincode.ExecuteTimeout).To(Equal(executeTimeout)) + + Expect(core.Chaincode.ExternalBuilders[0]).To(Equal( + v2core.ExternalBuilder{ + Path: "/scripts", + Name: "go-builder", + EnvironmentWhiteList: []string{"ENV1=Value1"}, + PropogateEnvironment: []string{"ENV1=Value1"}, + }, + )) + }) + }) + + Context("read in core file", func() { + It("reads core and converts peer.gossip.bootstrap", func() { + core, err := v25.ReadCoreFile("../../../../../testdata/init/peer/core_bootstrap_test.yaml") + Expect(err).NotTo(HaveOccurred()) + Expect(core.Peer.Gossip.Bootstrap).To(Equal([]string{"127.0.0.1:7051"})) + }) + + It("returns error if invalid core (besides peer.gossip.boostrap field)", func() { + _, err := v25.ReadCoreFile("../../../../../testdata/init/peer/core_invalid.yaml") + Expect(err).NotTo(HaveOccurred()) + }) + }) +}) diff --git a/pkg/initializer/peer/config/v25/v2_suite_test.go b/pkg/initializer/peer/config/v25/v2_suite_test.go new file mode 100644 index 00000000..660f13ed --- /dev/null +++ b/pkg/initializer/peer/config/v25/v2_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v25_test + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestV25(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "V25 Suite") +} diff --git a/pkg/initializer/peer/coreconfigmap.go b/pkg/initializer/peer/coreconfigmap.go index 63a5dda4..ed77b6c1 100644 --- a/pkg/initializer/peer/coreconfigmap.go +++ b/pkg/initializer/peer/coreconfigmap.go @@ -29,6 +29,7 @@ import ( "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/common/config" configv1 "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer/config/v1" configv2 "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer/config/v2" + configv25 "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer/config/v25" k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" "github.com/IBM-Blockchain/fabric-operator/pkg/util" "github.com/IBM-Blockchain/fabric-operator/version" @@ -173,11 +174,20 @@ func GetCoreFromConfigMap(client k8sclient.Client, instance *current.IBPPeer) (* func GetCoreConfigFromBytes(instance *current.IBPPeer, bytes []byte) (CoreConfig, error) { switch version.GetMajorReleaseVersion(instance.Spec.FabricVersion) { case version.V2: - v2config, err := configv2.ReadCoreFromBytes(bytes) - if err != nil { - return nil, err + peerversion := version.String(instance.Spec.FabricVersion) + if peerversion.EqualWithoutTag(version.V2_5_1) || peerversion.GreaterThan(version.V2_5_1) { + v25config, err := configv25.ReadCoreFromBytes(bytes) + if err != nil { + return nil, err + } + return v25config, nil + } else { + v2config, err := configv2.ReadCoreFromBytes(bytes) + if err != nil { + return nil, err + } + return v2config, nil } - return v2config, nil case version.V1: fallthrough default: @@ -196,11 +206,20 @@ func GetCoreConfigFromFile(instance *current.IBPPeer, file string) (CoreConfig, switch version.GetMajorReleaseVersion(instance.Spec.FabricVersion) { case version.V2: log.Info("v2 Fabric Peer requested") - v2config, err := configv2.ReadCoreFile(file) - if err != nil { - return nil, err + peerversion := version.String(instance.Spec.FabricVersion) + if peerversion.EqualWithoutTag(version.V2_5_1) || peerversion.GreaterThan(version.V2_5_1) { + v25config, err := configv25.ReadCoreFile(file) + if err != nil { + return nil, err + } + return v25config, nil + } else { + v2config, err := configv2.ReadCoreFile(file) + if err != nil { + return nil, err + } + return v2config, nil } - return v2config, nil case version.V1: fallthrough default: diff --git a/pkg/initializer/peer/coreconfigmap_test.go b/pkg/initializer/peer/coreconfigmap_test.go index 7c73ef8d..25d13ac7 100644 --- a/pkg/initializer/peer/coreconfigmap_test.go +++ b/pkg/initializer/peer/coreconfigmap_test.go @@ -46,10 +46,11 @@ var _ = Describe("core config map", func() { client = &mocks.Client{} coreCM = &initializer.CoreConfigMap{ Config: &initializer.Config{ - CorePeerFile: "../../../defaultconfig/peer/core.yaml", - CorePeerV2File: "../../../defaultconfig/peer/v2/core.yaml", - OUFile: "../../../defaultconfig/peer/ouconfig.yaml", - InterOUFile: "../../../defaultconfig/peer/ouconfig-inter.yaml", + CorePeerFile: "../../../defaultconfig/peer/core.yaml", + CorePeerV2File: "../../../defaultconfig/peer/v2/core.yaml", + CorePeerV25File: "../../../defaultconfig/peer/v25/core.yaml", + OUFile: "../../../defaultconfig/peer/ouconfig.yaml", + InterOUFile: "../../../defaultconfig/peer/ouconfig-inter.yaml", }, Client: client, GetLabels: func(o metav1.Object) map[string]string { return map[string]string{} }, diff --git a/pkg/initializer/peer/initializer.go b/pkg/initializer/peer/initializer.go index 1f401dbb..dbeb943e 100644 --- a/pkg/initializer/peer/initializer.go +++ b/pkg/initializer/peer/initializer.go @@ -47,6 +47,7 @@ type Config struct { InterOUFile string CorePeerFile string CorePeerV2File string + CorePeerV25File string DeploymentFile string PVCFile string CouchDBPVCFile string diff --git a/pkg/migrator/peer/fabric/migrator.go b/pkg/migrator/peer/fabric/migrator.go index 3d92eb4a..562e3808 100644 --- a/pkg/migrator/peer/fabric/migrator.go +++ b/pkg/migrator/peer/fabric/migrator.go @@ -66,7 +66,14 @@ func V2Migrate(instance metav1.Object, migrator Migrator, version string, timeou func V24Migrate(instance metav1.Object, migrator Migrator, version string, timeouts config.DBMigrationTimeouts) error { if err := migrator.UpdateConfig(instance, version); err != nil { - return errors.Wrap(err, "failed to update v2.4.1 configs") + return errors.Wrap(err, "failed to update v2.4.x configs") + } + return nil +} + +func V25Migrate(instance metav1.Object, migrator Migrator, version string, timeouts config.DBMigrationTimeouts) error { + if err := migrator.UpdateConfig(instance, version); err != nil { + return errors.Wrap(err, "failed to update v2.5.x configs") } return nil } diff --git a/pkg/migrator/peer/fabric/v25/mocks/configmapmanager.go b/pkg/migrator/peer/fabric/v25/mocks/configmapmanager.go new file mode 100644 index 00000000..3b210036 --- /dev/null +++ b/pkg/migrator/peer/fabric/v25/mocks/configmapmanager.go @@ -0,0 +1,195 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + initializer "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer" + v25 "github.com/IBM-Blockchain/fabric-operator/pkg/migrator/peer/fabric/v25" + v1 "k8s.io/api/core/v1" +) + +type ConfigMapManager struct { + CreateOrUpdateStub func(*v1beta1.IBPPeer, initializer.CoreConfig) error + createOrUpdateMutex sync.RWMutex + createOrUpdateArgsForCall []struct { + arg1 *v1beta1.IBPPeer + arg2 initializer.CoreConfig + } + createOrUpdateReturns struct { + result1 error + } + createOrUpdateReturnsOnCall map[int]struct { + result1 error + } + GetCoreConfigStub func(*v1beta1.IBPPeer) (*v1.ConfigMap, error) + getCoreConfigMutex sync.RWMutex + getCoreConfigArgsForCall []struct { + arg1 *v1beta1.IBPPeer + } + getCoreConfigReturns struct { + result1 *v1.ConfigMap + result2 error + } + getCoreConfigReturnsOnCall map[int]struct { + result1 *v1.ConfigMap + result2 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *ConfigMapManager) CreateOrUpdate(arg1 *v1beta1.IBPPeer, arg2 initializer.CoreConfig) error { + fake.createOrUpdateMutex.Lock() + ret, specificReturn := fake.createOrUpdateReturnsOnCall[len(fake.createOrUpdateArgsForCall)] + fake.createOrUpdateArgsForCall = append(fake.createOrUpdateArgsForCall, struct { + arg1 *v1beta1.IBPPeer + arg2 initializer.CoreConfig + }{arg1, arg2}) + stub := fake.CreateOrUpdateStub + fakeReturns := fake.createOrUpdateReturns + fake.recordInvocation("CreateOrUpdate", []interface{}{arg1, arg2}) + fake.createOrUpdateMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ConfigMapManager) CreateOrUpdateCallCount() int { + fake.createOrUpdateMutex.RLock() + defer fake.createOrUpdateMutex.RUnlock() + return len(fake.createOrUpdateArgsForCall) +} + +func (fake *ConfigMapManager) CreateOrUpdateCalls(stub func(*v1beta1.IBPPeer, initializer.CoreConfig) error) { + fake.createOrUpdateMutex.Lock() + defer fake.createOrUpdateMutex.Unlock() + fake.CreateOrUpdateStub = stub +} + +func (fake *ConfigMapManager) CreateOrUpdateArgsForCall(i int) (*v1beta1.IBPPeer, initializer.CoreConfig) { + fake.createOrUpdateMutex.RLock() + defer fake.createOrUpdateMutex.RUnlock() + argsForCall := fake.createOrUpdateArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *ConfigMapManager) CreateOrUpdateReturns(result1 error) { + fake.createOrUpdateMutex.Lock() + defer fake.createOrUpdateMutex.Unlock() + fake.CreateOrUpdateStub = nil + fake.createOrUpdateReturns = struct { + result1 error + }{result1} +} + +func (fake *ConfigMapManager) CreateOrUpdateReturnsOnCall(i int, result1 error) { + fake.createOrUpdateMutex.Lock() + defer fake.createOrUpdateMutex.Unlock() + fake.CreateOrUpdateStub = nil + if fake.createOrUpdateReturnsOnCall == nil { + fake.createOrUpdateReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.createOrUpdateReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *ConfigMapManager) GetCoreConfig(arg1 *v1beta1.IBPPeer) (*v1.ConfigMap, error) { + fake.getCoreConfigMutex.Lock() + ret, specificReturn := fake.getCoreConfigReturnsOnCall[len(fake.getCoreConfigArgsForCall)] + fake.getCoreConfigArgsForCall = append(fake.getCoreConfigArgsForCall, struct { + arg1 *v1beta1.IBPPeer + }{arg1}) + stub := fake.GetCoreConfigStub + fakeReturns := fake.getCoreConfigReturns + fake.recordInvocation("GetCoreConfig", []interface{}{arg1}) + fake.getCoreConfigMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *ConfigMapManager) GetCoreConfigCallCount() int { + fake.getCoreConfigMutex.RLock() + defer fake.getCoreConfigMutex.RUnlock() + return len(fake.getCoreConfigArgsForCall) +} + +func (fake *ConfigMapManager) GetCoreConfigCalls(stub func(*v1beta1.IBPPeer) (*v1.ConfigMap, error)) { + fake.getCoreConfigMutex.Lock() + defer fake.getCoreConfigMutex.Unlock() + fake.GetCoreConfigStub = stub +} + +func (fake *ConfigMapManager) GetCoreConfigArgsForCall(i int) *v1beta1.IBPPeer { + fake.getCoreConfigMutex.RLock() + defer fake.getCoreConfigMutex.RUnlock() + argsForCall := fake.getCoreConfigArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *ConfigMapManager) GetCoreConfigReturns(result1 *v1.ConfigMap, result2 error) { + fake.getCoreConfigMutex.Lock() + defer fake.getCoreConfigMutex.Unlock() + fake.GetCoreConfigStub = nil + fake.getCoreConfigReturns = struct { + result1 *v1.ConfigMap + result2 error + }{result1, result2} +} + +func (fake *ConfigMapManager) GetCoreConfigReturnsOnCall(i int, result1 *v1.ConfigMap, result2 error) { + fake.getCoreConfigMutex.Lock() + defer fake.getCoreConfigMutex.Unlock() + fake.GetCoreConfigStub = nil + if fake.getCoreConfigReturnsOnCall == nil { + fake.getCoreConfigReturnsOnCall = make(map[int]struct { + result1 *v1.ConfigMap + result2 error + }) + } + fake.getCoreConfigReturnsOnCall[i] = struct { + result1 *v1.ConfigMap + result2 error + }{result1, result2} +} + +func (fake *ConfigMapManager) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.createOrUpdateMutex.RLock() + defer fake.createOrUpdateMutex.RUnlock() + fake.getCoreConfigMutex.RLock() + defer fake.getCoreConfigMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *ConfigMapManager) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ v25.ConfigMapManager = new(ConfigMapManager) diff --git a/pkg/migrator/peer/fabric/v25/mocks/deploymentmanager.go b/pkg/migrator/peer/fabric/v25/mocks/deploymentmanager.go new file mode 100644 index 00000000..1e8bc136 --- /dev/null +++ b/pkg/migrator/peer/fabric/v25/mocks/deploymentmanager.go @@ -0,0 +1,338 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mocks + +import ( + "sync" + + v25 "github.com/IBM-Blockchain/fabric-operator/pkg/migrator/peer/fabric/v25" + v1a "k8s.io/api/apps/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type DeploymentManager struct { + DeleteStub func(v1.Object) error + deleteMutex sync.RWMutex + deleteArgsForCall []struct { + arg1 v1.Object + } + deleteReturns struct { + result1 error + } + deleteReturnsOnCall map[int]struct { + result1 error + } + DeploymentStatusStub func(v1.Object) (v1a.DeploymentStatus, error) + deploymentStatusMutex sync.RWMutex + deploymentStatusArgsForCall []struct { + arg1 v1.Object + } + deploymentStatusReturns struct { + result1 v1a.DeploymentStatus + result2 error + } + deploymentStatusReturnsOnCall map[int]struct { + result1 v1a.DeploymentStatus + result2 error + } + GetStub func(v1.Object) (client.Object, error) + getMutex sync.RWMutex + getArgsForCall []struct { + arg1 v1.Object + } + getReturns struct { + result1 client.Object + result2 error + } + getReturnsOnCall map[int]struct { + result1 client.Object + result2 error + } + GetSchemeStub func() *runtime.Scheme + getSchemeMutex sync.RWMutex + getSchemeArgsForCall []struct { + } + getSchemeReturns struct { + result1 *runtime.Scheme + } + getSchemeReturnsOnCall map[int]struct { + result1 *runtime.Scheme + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *DeploymentManager) Delete(arg1 v1.Object) error { + fake.deleteMutex.Lock() + ret, specificReturn := fake.deleteReturnsOnCall[len(fake.deleteArgsForCall)] + fake.deleteArgsForCall = append(fake.deleteArgsForCall, struct { + arg1 v1.Object + }{arg1}) + stub := fake.DeleteStub + fakeReturns := fake.deleteReturns + fake.recordInvocation("Delete", []interface{}{arg1}) + fake.deleteMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *DeploymentManager) DeleteCallCount() int { + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + return len(fake.deleteArgsForCall) +} + +func (fake *DeploymentManager) DeleteCalls(stub func(v1.Object) error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = stub +} + +func (fake *DeploymentManager) DeleteArgsForCall(i int) v1.Object { + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + argsForCall := fake.deleteArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *DeploymentManager) DeleteReturns(result1 error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = nil + fake.deleteReturns = struct { + result1 error + }{result1} +} + +func (fake *DeploymentManager) DeleteReturnsOnCall(i int, result1 error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = nil + if fake.deleteReturnsOnCall == nil { + fake.deleteReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.deleteReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *DeploymentManager) DeploymentStatus(arg1 v1.Object) (v1a.DeploymentStatus, error) { + fake.deploymentStatusMutex.Lock() + ret, specificReturn := fake.deploymentStatusReturnsOnCall[len(fake.deploymentStatusArgsForCall)] + fake.deploymentStatusArgsForCall = append(fake.deploymentStatusArgsForCall, struct { + arg1 v1.Object + }{arg1}) + stub := fake.DeploymentStatusStub + fakeReturns := fake.deploymentStatusReturns + fake.recordInvocation("DeploymentStatus", []interface{}{arg1}) + fake.deploymentStatusMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *DeploymentManager) DeploymentStatusCallCount() int { + fake.deploymentStatusMutex.RLock() + defer fake.deploymentStatusMutex.RUnlock() + return len(fake.deploymentStatusArgsForCall) +} + +func (fake *DeploymentManager) DeploymentStatusCalls(stub func(v1.Object) (v1a.DeploymentStatus, error)) { + fake.deploymentStatusMutex.Lock() + defer fake.deploymentStatusMutex.Unlock() + fake.DeploymentStatusStub = stub +} + +func (fake *DeploymentManager) DeploymentStatusArgsForCall(i int) v1.Object { + fake.deploymentStatusMutex.RLock() + defer fake.deploymentStatusMutex.RUnlock() + argsForCall := fake.deploymentStatusArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *DeploymentManager) DeploymentStatusReturns(result1 v1a.DeploymentStatus, result2 error) { + fake.deploymentStatusMutex.Lock() + defer fake.deploymentStatusMutex.Unlock() + fake.DeploymentStatusStub = nil + fake.deploymentStatusReturns = struct { + result1 v1a.DeploymentStatus + result2 error + }{result1, result2} +} + +func (fake *DeploymentManager) DeploymentStatusReturnsOnCall(i int, result1 v1a.DeploymentStatus, result2 error) { + fake.deploymentStatusMutex.Lock() + defer fake.deploymentStatusMutex.Unlock() + fake.DeploymentStatusStub = nil + if fake.deploymentStatusReturnsOnCall == nil { + fake.deploymentStatusReturnsOnCall = make(map[int]struct { + result1 v1a.DeploymentStatus + result2 error + }) + } + fake.deploymentStatusReturnsOnCall[i] = struct { + result1 v1a.DeploymentStatus + result2 error + }{result1, result2} +} + +func (fake *DeploymentManager) Get(arg1 v1.Object) (client.Object, error) { + fake.getMutex.Lock() + ret, specificReturn := fake.getReturnsOnCall[len(fake.getArgsForCall)] + fake.getArgsForCall = append(fake.getArgsForCall, struct { + arg1 v1.Object + }{arg1}) + stub := fake.GetStub + fakeReturns := fake.getReturns + fake.recordInvocation("Get", []interface{}{arg1}) + fake.getMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *DeploymentManager) GetCallCount() int { + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + return len(fake.getArgsForCall) +} + +func (fake *DeploymentManager) GetCalls(stub func(v1.Object) (client.Object, error)) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = stub +} + +func (fake *DeploymentManager) GetArgsForCall(i int) v1.Object { + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + argsForCall := fake.getArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *DeploymentManager) GetReturns(result1 client.Object, result2 error) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = nil + fake.getReturns = struct { + result1 client.Object + result2 error + }{result1, result2} +} + +func (fake *DeploymentManager) GetReturnsOnCall(i int, result1 client.Object, result2 error) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = nil + if fake.getReturnsOnCall == nil { + fake.getReturnsOnCall = make(map[int]struct { + result1 client.Object + result2 error + }) + } + fake.getReturnsOnCall[i] = struct { + result1 client.Object + result2 error + }{result1, result2} +} + +func (fake *DeploymentManager) GetScheme() *runtime.Scheme { + fake.getSchemeMutex.Lock() + ret, specificReturn := fake.getSchemeReturnsOnCall[len(fake.getSchemeArgsForCall)] + fake.getSchemeArgsForCall = append(fake.getSchemeArgsForCall, struct { + }{}) + stub := fake.GetSchemeStub + fakeReturns := fake.getSchemeReturns + fake.recordInvocation("GetScheme", []interface{}{}) + fake.getSchemeMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *DeploymentManager) GetSchemeCallCount() int { + fake.getSchemeMutex.RLock() + defer fake.getSchemeMutex.RUnlock() + return len(fake.getSchemeArgsForCall) +} + +func (fake *DeploymentManager) GetSchemeCalls(stub func() *runtime.Scheme) { + fake.getSchemeMutex.Lock() + defer fake.getSchemeMutex.Unlock() + fake.GetSchemeStub = stub +} + +func (fake *DeploymentManager) GetSchemeReturns(result1 *runtime.Scheme) { + fake.getSchemeMutex.Lock() + defer fake.getSchemeMutex.Unlock() + fake.GetSchemeStub = nil + fake.getSchemeReturns = struct { + result1 *runtime.Scheme + }{result1} +} + +func (fake *DeploymentManager) GetSchemeReturnsOnCall(i int, result1 *runtime.Scheme) { + fake.getSchemeMutex.Lock() + defer fake.getSchemeMutex.Unlock() + fake.GetSchemeStub = nil + if fake.getSchemeReturnsOnCall == nil { + fake.getSchemeReturnsOnCall = make(map[int]struct { + result1 *runtime.Scheme + }) + } + fake.getSchemeReturnsOnCall[i] = struct { + result1 *runtime.Scheme + }{result1} +} + +func (fake *DeploymentManager) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + fake.deploymentStatusMutex.RLock() + defer fake.deploymentStatusMutex.RUnlock() + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + fake.getSchemeMutex.RLock() + defer fake.getSchemeMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *DeploymentManager) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ v25.DeploymentManager = new(DeploymentManager) diff --git a/pkg/migrator/peer/fabric/v25/peer.go b/pkg/migrator/peer/fabric/v25/peer.go new file mode 100644 index 00000000..427d7403 --- /dev/null +++ b/pkg/migrator/peer/fabric/v25/peer.go @@ -0,0 +1,298 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v25 + +import ( + "context" + "fmt" + "reflect" + "strings" + + "github.com/pkg/errors" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + "github.com/IBM-Blockchain/fabric-operator/pkg/action" + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/deployer" + v2peer "github.com/IBM-Blockchain/fabric-operator/pkg/apis/peer/v2" + v25peer "github.com/IBM-Blockchain/fabric-operator/pkg/apis/peer/v25" + initializer "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer" + v25config "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer/config/v25" + k8sclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" + ver "github.com/IBM-Blockchain/fabric-operator/version" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/yaml" +) + +var log = logf.Log.WithName("peer_fabric_migrator") + +//go:generate counterfeiter -o mocks/configmapmanager.go -fake-name ConfigMapManager . ConfigMapManager +type ConfigMapManager interface { + GetCoreConfig(*current.IBPPeer) (*corev1.ConfigMap, error) + CreateOrUpdate(*current.IBPPeer, initializer.CoreConfig) error +} + +//go:generate counterfeiter -o mocks/deploymentmanager.go -fake-name DeploymentManager . DeploymentManager +type DeploymentManager interface { + Get(metav1.Object) (client.Object, error) + Delete(metav1.Object) error + DeploymentStatus(metav1.Object) (appsv1.DeploymentStatus, error) + GetScheme() *runtime.Scheme +} + +type Migrate struct { + DeploymentManager DeploymentManager + ConfigMapManager ConfigMapManager + Client k8sclient.Client +} + +func (m *Migrate) MigrationNeeded(instance metav1.Object) bool { + // Check for DinD container, if DinD container not found this is + // v25 fabric IBP instance + obj, err := m.DeploymentManager.Get(instance) + if err != nil { + // If deployment does not exist, this instance is not a healthy + // state and migration should be avoided + return false + } + + var deploymentUpdated bool + var configUpdated bool + + dep := obj.(*appsv1.Deployment) + for _, cont := range dep.Spec.Template.Spec.Containers { + if strings.ToLower(cont.Name) == "dind" { + // DinD container found, instance is not at v25 + deploymentUpdated = false + } + } + + cm, err := m.ConfigMapManager.GetCoreConfig(instance.(*current.IBPPeer)) + if err != nil { + // If config map does not exist, this instance is not a healthy + // state and migration should be avoided + return false + } + + v1corebytes := cm.BinaryData["core.yaml"] + + core := &v25config.Core{} + err = yaml.Unmarshal(v1corebytes, core) + if err != nil { + return false + } + + configUpdated = configHasBeenUpdated(core) + + return !deploymentUpdated || !configUpdated +} + +func (m *Migrate) UpgradeDBs(instance metav1.Object, timeouts config.DBMigrationTimeouts) error { + log.Info(fmt.Sprintf("Resetting Peer '%s'", instance.GetName())) + return action.UpgradeDBs(m.DeploymentManager, m.Client, instance.(*current.IBPPeer), timeouts) +} + +func (m *Migrate) UpdateConfig(instance metav1.Object, version string) error { + log.Info("Updating config to v25") + cm, err := m.ConfigMapManager.GetCoreConfig(instance.(*current.IBPPeer)) + if err != nil { + return errors.Wrap(err, "failed to get config map") + } + v1corebytes := cm.BinaryData["core.yaml"] + + core := &v25config.Core{} + err = yaml.Unmarshal(v1corebytes, core) + if err != nil { + return err + } + + // resetting VM endpoint + // VM and Ledger structs been added to Peer. endpoint is not required for v25 peer as there is no DinD + core.VM.Endpoint = "" + + core.Chaincode.ExternalBuilders = []v2peer.ExternalBuilder{ + v2peer.ExternalBuilder{ + Name: "ibp-builder", + Path: "/usr/local", + EnvironmentWhiteList: []string{ + "IBP_BUILDER_ENDPOINT", + "IBP_BUILDER_SHARED_DIR", + }, + PropogateEnvironment: []string{ + "IBP_BUILDER_ENDPOINT", + "IBP_BUILDER_SHARED_DIR", + "PEER_NAME", + }, + }, + } + + core.Chaincode.InstallTimeout = common.MustParseDuration("300s") + if core.Chaincode.System == nil { + core.Chaincode.System = make(map[string]string) + } + core.Chaincode.System["_lifecycle"] = "enable" + + core.Peer.Limits.Concurrency.DeliverService = 2500 + core.Peer.Limits.Concurrency.EndorserService = 2500 + + core.Peer.Gossip.PvtData.ImplicitCollectionDisseminationPolicy.RequiredPeerCount = 0 + core.Peer.Gossip.PvtData.ImplicitCollectionDisseminationPolicy.MaxPeerCount = 1 + + currentVer := ver.String(version) + + trueVal := true + + if currentVer.EqualWithoutTag(ver.V2_5_1) || currentVer.GreaterThan(ver.V2_5_1) { + core.Peer.Gateway = v25peer.Gateway{ + Enabled: &trueVal, + EndorsementTimeout: common.MustParseDuration("30s"), + DialTimeout: common.MustParseDuration("120s"), + BroadcastTimeout: common.MustParseDuration("30s"), + } + core.Peer.Limits.Concurrency.GatewayService = 500 + core.Ledger.State.SnapShots = v2peer.SnapShots{ + RootDir: "/data/peer/ledgersData/snapshots/", + } + + core.Ledger.PvtDataStore = v25peer.PvtDataStore{ + CollElgProcMaxDbBatchSize: 500, + CollElgProcDbBatchesInterval: 1000, + DeprioritizedDataReconcilerInterval: common.MustParseDuration("3600s"), + PurgeInterval: 100, + PurgedKeyAuditLogging: &trueVal, + } + } + + core.Ledger.State.CouchdbConfig.CacheSize = 64 + core.Ledger.State.CouchdbConfig.MaxRetries = 10 + + err = m.ConfigMapManager.CreateOrUpdate(instance.(*current.IBPPeer), core) + if err != nil { + return err + } + + return nil +} + +// SetChaincodeLauncherResourceOnCR will update the peer's CR by adding chaincode launcher +// resources. The default resources are defined in deployer's config map, which is part +// IBPConsole resource. The default resources are extracted for the chaincode launcher +// by reading the deployer's config map and updating the CR. +func (m *Migrate) SetChaincodeLauncherResourceOnCR(instance metav1.Object) error { + log.Info("Setting chaincode launcher resource on CR") + cr := instance.(*current.IBPPeer) + + if cr.Spec.Resources != nil && cr.Spec.Resources.CCLauncher != nil { + // No need to proceed further if Chaincode launcher resources already set + return nil + } + + consoleList := ¤t.IBPConsoleList{} + if err := m.Client.List(context.TODO(), consoleList); err != nil { + return err + } + consoles := consoleList.Items + + // If no consoles found, set default resource for chaincode launcher container + rr := &corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("0.1"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + }, + } + + if len(consoles) > 0 { + log.Info("Setting chaincode launcher resource on CR based on deployer config from config map") + // Get config map associated with console + cm := &corev1.ConfigMap{} + nn := types.NamespacedName{ + Name: fmt.Sprintf("%s-deployer", consoles[0].GetName()), + Namespace: instance.GetNamespace(), + } + if err := m.Client.Get(context.TODO(), nn, cm); err != nil { + return err + } + + settingsBytes := []byte(cm.Data["settings.yaml"]) + settings := &deployer.Config{} + if err := yaml.Unmarshal(settingsBytes, settings); err != nil { + return err + } + + if settings.Defaults != nil && settings.Defaults.Resources != nil && + settings.Defaults.Resources.Peer != nil && settings.Defaults.Resources.Peer.CCLauncher != nil { + + rr = settings.Defaults.Resources.Peer.CCLauncher + } + } + + log.Info(fmt.Sprintf("Setting chaincode launcher resource on CR to %+v", rr)) + if cr.Spec.Resources == nil { + cr.Spec.Resources = ¤t.PeerResources{} + } + cr.Spec.Resources.CCLauncher = rr + if err := m.Client.Update(context.TODO(), cr); err != nil { + return err + } + + return nil +} + +// Updates required from v1.4 to v25.x: +// - External builders +// - Limits +// - Install timeout +// - Implicit collection dissemination policy +func configHasBeenUpdated(core *v25config.Core) bool { + if len(core.Chaincode.ExternalBuilders) == 0 { + return false + } + if core.Chaincode.ExternalBuilders[0].Name != "ibp-builder" { + return false + } + + // Check if install timeout was set + if reflect.DeepEqual(core.Chaincode.InstallTimeout, common.Duration{}) { + return false + } + + if core.Peer.Limits.Concurrency.DeliverService != 2500 { + return false + } + + if core.Peer.Limits.Concurrency.EndorserService != 2500 { + return false + } + + return true +} diff --git a/pkg/migrator/peer/fabric/v25/peer_test.go b/pkg/migrator/peer/fabric/v25/peer_test.go new file mode 100644 index 00000000..67a6a839 --- /dev/null +++ b/pkg/migrator/peer/fabric/v25/peer_test.go @@ -0,0 +1,367 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v25_test + +import ( + "context" + "strings" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + + current "github.com/IBM-Blockchain/fabric-operator/api/v1beta1" + controllermocks "github.com/IBM-Blockchain/fabric-operator/controllers/mocks" + config "github.com/IBM-Blockchain/fabric-operator/operatorconfig" + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/common" + "github.com/IBM-Blockchain/fabric-operator/pkg/apis/deployer" + v2peer "github.com/IBM-Blockchain/fabric-operator/pkg/apis/peer/v2" + v25config "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/peer/config/v25" + v25 "github.com/IBM-Blockchain/fabric-operator/pkg/migrator/peer/fabric/v25" + "github.com/IBM-Blockchain/fabric-operator/pkg/migrator/peer/fabric/v25/mocks" + + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + + k8sclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" +) + +var _ = Describe("V2 peer migrator", func() { + var ( + deploymentManager *mocks.DeploymentManager + configMapManager *mocks.ConfigMapManager + client *controllermocks.Client + migrator *v25.Migrate + instance *current.IBPPeer + ) + const FABRIC_V2 = "2.2.5-1" + BeforeEach(func() { + deploymentManager = &mocks.DeploymentManager{} + configMapManager = &mocks.ConfigMapManager{} + client = &controllermocks.Client{} + + instance = ¤t.IBPPeer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ibppeer", + }, + Spec: current.IBPPeerSpec{ + Images: ¤t.PeerImages{ + PeerImage: "peerimage", + PeerTag: "peertag", + }, + Resources: ¤t.PeerResources{}, + }, + } + + replicas := int32(1) + dep := &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Replicas: &replicas, + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + v1.Container{ + Name: "peer", + }, + v1.Container{ + Name: "dind", + }, + }, + }, + }, + }, + } + deploymentManager.GetReturns(dep, nil) + deploymentManager.DeploymentStatusReturns(appsv1.DeploymentStatus{}, nil) + deploymentManager.GetSchemeReturns(&runtime.Scheme{}) + + client.GetStub = func(ctx context.Context, types types.NamespacedName, obj k8sclient.Object) error { + switch obj.(type) { + case *batchv1.Job: + job := obj.(*batchv1.Job) + job.Status.Active = int32(1) + } + return nil + } + + configMapManager.GetCoreConfigReturns(&corev1.ConfigMap{ + BinaryData: map[string][]byte{ + "core.yaml": []byte{}, + }, + }, nil) + + migrator = &v25.Migrate{ + DeploymentManager: deploymentManager, + ConfigMapManager: configMapManager, + Client: client, + } + }) + + Context("migration needed", func() { + It("returns false if deployment not found", func() { + deploymentManager.GetReturns(nil, errors.New("not found")) + needed := migrator.MigrationNeeded(instance) + Expect(needed).To(Equal(false)) + }) + + It("returns true if config map not updated", func() { + dep := &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + v1.Container{ + Name: "peer", + }, + }, + }, + }, + }, + } + deploymentManager.GetReturns(dep, nil) + + needed := migrator.MigrationNeeded(instance) + Expect(needed).To(Equal(true)) + }) + + It("returns true if deployment has dind container", func() { + needed := migrator.MigrationNeeded(instance) + Expect(needed).To(Equal(true)) + }) + }) + + Context("upgrade dbs peer", func() { + BeforeEach(func() { + client.ListStub = func(ctx context.Context, obj k8sclient.ObjectList, opts ...k8sclient.ListOption) error { + if strings.Contains(opts[0].(*k8sclient.ListOptions).LabelSelector.String(), "app") { + pods := obj.(*corev1.PodList) + pods.Items = []corev1.Pod{} + } + if strings.Contains(opts[0].(*k8sclient.ListOptions).LabelSelector.String(), "job-name") { + pods := obj.(*corev1.PodList) + pods.Items = []corev1.Pod{ + corev1.Pod{ + Status: corev1.PodStatus{ + ContainerStatuses: []corev1.ContainerStatus{ + corev1.ContainerStatus{ + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{}, + }, + }, + }, + }, + }, + } + } + return nil + } + }) + + It("returns an error if unable to reset peer", func() { + deploymentManager.GetReturns(nil, errors.New("restore failed")) + err := migrator.UpgradeDBs(instance, config.DBMigrationTimeouts{ + JobStart: common.MustParseDuration("1s"), + JobCompletion: common.MustParseDuration("1s"), + }) + Expect(err).To(HaveOccurred()) + Expect(err).Should(MatchError(ContainSubstring("restore failed"))) + }) + + It("upgrade dbs", func() { + status := appsv1.DeploymentStatus{ + Replicas: int32(0), + } + deploymentManager.DeploymentStatusReturnsOnCall(0, status, nil) + + status.Replicas = 1 + deploymentManager.DeploymentStatusReturnsOnCall(1, status, nil) + + err := migrator.UpgradeDBs(instance, config.DBMigrationTimeouts{ + JobStart: common.MustParseDuration("1s"), + JobCompletion: common.MustParseDuration("1s"), + }) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("update config", func() { + It("returns an error if unable to get config map", func() { + configMapManager.GetCoreConfigReturns(nil, errors.New("get config map failed")) + err := migrator.UpdateConfig(instance, FABRIC_V2) + Expect(err).To(HaveOccurred()) + Expect(err).Should(MatchError(ContainSubstring("get config map failed"))) + }) + + It("returns an error if unable to update config map", func() { + configMapManager.CreateOrUpdateReturns(errors.New("update config map failed")) + err := migrator.UpdateConfig(instance, FABRIC_V2) + Expect(err).To(HaveOccurred()) + Expect(err).Should(MatchError(ContainSubstring("update config map failed"))) + }) + + It("sets relevant v25.x fields in config", func() { + err := migrator.UpdateConfig(instance, FABRIC_V2) + Expect(err).NotTo(HaveOccurred()) + + _, config := configMapManager.CreateOrUpdateArgsForCall(0) + core := config.(*v25config.Core) + + By("setting external builder", func() { + Expect(core.Chaincode.ExternalBuilders).To(ContainElement( + v2peer.ExternalBuilder{ + Name: "ibp-builder", + Path: "/usr/local", + EnvironmentWhiteList: []string{ + "IBP_BUILDER_ENDPOINT", + "IBP_BUILDER_SHARED_DIR", + }, + PropogateEnvironment: []string{ + "IBP_BUILDER_ENDPOINT", + "IBP_BUILDER_SHARED_DIR", + "PEER_NAME", + }, + }, + )) + }) + + By("setting install timeout", func() { + Expect(core.Chaincode.InstallTimeout).To(Equal(common.MustParseDuration("300s"))) + }) + + By("setting lifecycle chaincode", func() { + Expect(core.Chaincode.System["_lifecycle"]).To(Equal("enable")) + }) + + By("setting limits", func() { + Expect(core.Peer.Limits).To(Equal(v2peer.Limits{ + Concurrency: v2peer.Concurrency{ + DeliverService: 2500, + EndorserService: 2500, + }, + })) + }) + + By("setting implicit collection dissemination policy", func() { + Expect(core.Peer.Gossip.PvtData.ImplicitCollectionDisseminationPolicy).To(Equal(v2peer.ImplicitCollectionDisseminationPolicy{ + RequiredPeerCount: 0, + MaxPeerCount: 1, + })) + }) + + }) + + It("updates config map", func() { + err := migrator.UpdateConfig(instance, FABRIC_V2) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("set chaincode launcher resource on CR", func() { + BeforeEach(func() { + client.GetStub = func(ctx context.Context, nn types.NamespacedName, obj k8sclient.Object) error { + switch obj.(type) { + case *corev1.ConfigMap: + dep := &deployer.Config{ + Defaults: &deployer.Defaults{ + Resources: &deployer.Resources{ + Peer: ¤t.PeerResources{ + CCLauncher: &corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("3"), + corev1.ResourceMemory: resource.MustParse("3Gi"), + }, + }, + }, + }, + }, + } + + bytes, err := yaml.Marshal(dep) + Expect(err).NotTo(HaveOccurred()) + + cm := obj.(*corev1.ConfigMap) + cm.Data = map[string]string{ + "settings.yaml": string(bytes), + } + } + + return nil + } + + client.ListStub = func(ctx context.Context, obj k8sclient.ObjectList, opts ...k8sclient.ListOption) error { + switch obj.(type) { + case *current.IBPConsoleList: + list := obj.(*current.IBPConsoleList) + list.Items = []current.IBPConsole{current.IBPConsole{}} + } + + return nil + } + }) + + It("sets resources based on deployer config map", func() { + err := migrator.SetChaincodeLauncherResourceOnCR(instance) + Expect(err).NotTo(HaveOccurred()) + + _, cr, _ := client.UpdateArgsForCall(0) + Expect(cr).NotTo(BeNil()) + Expect(*cr.(*current.IBPPeer).Spec.Resources.CCLauncher).To(Equal(corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("3"), + corev1.ResourceMemory: resource.MustParse("3Gi"), + }}, + )) + }) + + It("sets resources default config map", func() { + client.GetStub = nil + + err := migrator.SetChaincodeLauncherResourceOnCR(instance) + Expect(err).NotTo(HaveOccurred()) + + _, cr, _ := client.UpdateArgsForCall(0) + Expect(cr).NotTo(BeNil()) + Expect(*cr.(*current.IBPPeer).Spec.Resources.CCLauncher).To(Equal(corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("0.1"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + }}, + )) + }) + }) +}) diff --git a/pkg/migrator/peer/fabric/v25/v25_suite_test.go b/pkg/migrator/peer/fabric/v25/v25_suite_test.go new file mode 100644 index 00000000..2e41b291 --- /dev/null +++ b/pkg/migrator/peer/fabric/v25/v25_suite_test.go @@ -0,0 +1,31 @@ +/* + * Copyright contributors to the Hyperledger Fabric Operator project + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v25_test + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestV2(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "V2 Suite") +} diff --git a/pkg/offering/base/orderer/mocks/update.go b/pkg/offering/base/orderer/mocks/update.go index 8b34c2cf..fcc264f8 100644 --- a/pkg/offering/base/orderer/mocks/update.go +++ b/pkg/offering/base/orderer/mocks/update.go @@ -159,6 +159,16 @@ type Update struct { migrateToV24ReturnsOnCall map[int]struct { result1 bool } + MigrateToV25Stub func() bool + migrateToV25Mutex sync.RWMutex + migrateToV25ArgsForCall []struct { + } + migrateToV25Returns struct { + result1 bool + } + migrateToV25ReturnsOnCall map[int]struct { + result1 bool + } NodeOUUpdatedStub func() bool nodeOUUpdatedMutex sync.RWMutex nodeOUUpdatedArgsForCall []struct { @@ -1038,6 +1048,59 @@ func (fake *Update) MigrateToV24ReturnsOnCall(i int, result1 bool) { }{result1} } +func (fake *Update) MigrateToV25() bool { + fake.migrateToV25Mutex.Lock() + ret, specificReturn := fake.migrateToV25ReturnsOnCall[len(fake.migrateToV25ArgsForCall)] + fake.migrateToV25ArgsForCall = append(fake.migrateToV25ArgsForCall, struct { + }{}) + stub := fake.MigrateToV25Stub + fakeReturns := fake.migrateToV25Returns + fake.recordInvocation("MigrateToV25", []interface{}{}) + fake.migrateToV25Mutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) MigrateToV25CallCount() int { + fake.migrateToV25Mutex.RLock() + defer fake.migrateToV25Mutex.RUnlock() + return len(fake.migrateToV25ArgsForCall) +} + +func (fake *Update) MigrateToV25Calls(stub func() bool) { + fake.migrateToV25Mutex.Lock() + defer fake.migrateToV25Mutex.Unlock() + fake.MigrateToV25Stub = stub +} + +func (fake *Update) MigrateToV25Returns(result1 bool) { + fake.migrateToV25Mutex.Lock() + defer fake.migrateToV25Mutex.Unlock() + fake.MigrateToV25Stub = nil + fake.migrateToV25Returns = struct { + result1 bool + }{result1} +} + +func (fake *Update) MigrateToV25ReturnsOnCall(i int, result1 bool) { + fake.migrateToV25Mutex.Lock() + defer fake.migrateToV25Mutex.Unlock() + fake.MigrateToV25Stub = nil + if fake.migrateToV25ReturnsOnCall == nil { + fake.migrateToV25ReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.migrateToV25ReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + func (fake *Update) NodeOUUpdated() bool { fake.nodeOUUpdatedMutex.Lock() ret, specificReturn := fake.nodeOUUpdatedReturnsOnCall[len(fake.nodeOUUpdatedArgsForCall)] @@ -1495,6 +1558,8 @@ func (fake *Update) Invocations() map[string][][]interface{} { defer fake.migrateToV2Mutex.RUnlock() fake.migrateToV24Mutex.RLock() defer fake.migrateToV24Mutex.RUnlock() + fake.migrateToV25Mutex.RLock() + defer fake.migrateToV25Mutex.RUnlock() fake.nodeOUUpdatedMutex.RLock() defer fake.nodeOUUpdatedMutex.RUnlock() fake.ordererTagUpdatedMutex.RLock() diff --git a/pkg/offering/base/orderer/node.go b/pkg/offering/base/orderer/node.go index aad23746..d6dfd160 100644 --- a/pkg/offering/base/orderer/node.go +++ b/pkg/offering/base/orderer/node.go @@ -38,6 +38,7 @@ import ( ordererconfig "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer/config/v1" v2ordererconfig "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer/config/v2" v24ordererconfig "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer/config/v24" + v25ordererconfig "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/orderer/config/v25" "github.com/IBM-Blockchain/fabric-operator/pkg/initializer/validator" controllerclient "github.com/IBM-Blockchain/fabric-operator/pkg/k8s/controllerclient" "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources" @@ -128,6 +129,7 @@ type Update interface { CryptoBackupNeeded() bool MigrateToV2() bool MigrateToV24() bool + MigrateToV25() bool NodeOUUpdated() bool ImagesUpdated() bool FabricVersionUpdated() bool @@ -511,7 +513,9 @@ func (n *Node) Initialize(instance *current.IBPOrderer, update Update) error { ordererConfig := n.Config.OrdererInitConfig.OrdererFile if version.GetMajorReleaseVersion(instance.Spec.FabricVersion) == version.V2 { currentVer := version.String(instance.Spec.FabricVersion) - if currentVer.EqualWithoutTag(version.V2_4_1) || currentVer.GreaterThan(version.V2_4_1) { + if currentVer.EqualWithoutTag(version.V2_5_1) || currentVer.GreaterThan(version.V2_5_1) { + ordererConfig = n.Config.OrdererInitConfig.OrdererV25File + } else if currentVer.EqualWithoutTag(version.V2_4_1) || currentVer.GreaterThan(version.V2_4_1) { ordererConfig = n.Config.OrdererInitConfig.OrdererV24File } else if currentVer.LessThan(version.V2_4_1) { ordererConfig = n.Config.OrdererInitConfig.OrdererV2File @@ -1076,7 +1080,7 @@ func (n *Node) GetEndpoints(instance *current.IBPOrderer) *current.OrdererEndpoi Grpcweb: "https://" + instance.Namespace + "-" + instance.Name + "-grpcweb." + instance.Spec.Domain + ":443", } currentVer := version.String(instance.Spec.FabricVersion) - if currentVer.EqualWithoutTag(version.V2_4_1) || currentVer.GreaterThan(version.V2_4_1) { + if currentVer.EqualWithoutTag(version.V2_4_1) || currentVer.EqualWithoutTag(version.V2_5_1) || currentVer.GreaterThan(version.V2_4_1) { endpoints.Admin = "https://" + instance.Namespace + "-" + instance.Name + "-admin." + instance.Spec.Domain + ":443" } return endpoints @@ -1398,7 +1402,9 @@ func (n *Node) FabricOrdererMigrationV2_0(instance *current.IBPOrderer) error { ordererConfig := n.Config.OrdererInitConfig.OrdererFile if version.GetMajorReleaseVersion(instance.Spec.FabricVersion) == version.V2 { currentVer := version.String(instance.Spec.FabricVersion) - if currentVer.EqualWithoutTag(version.V2_4_1) || currentVer.GreaterThan(version.V2_4_1) { + if currentVer.EqualWithoutTag(version.V2_5_1) || currentVer.GreaterThan(version.V2_5_1) { + ordererConfig = n.Config.OrdererInitConfig.OrdererV25File + } else if currentVer.EqualWithoutTag(version.V2_4_1) || currentVer.GreaterThan(version.V2_4_1) { ordererConfig = n.Config.OrdererInitConfig.OrdererV24File } else { ordererConfig = n.Config.OrdererInitConfig.OrdererV2File @@ -1408,7 +1414,14 @@ func (n *Node) FabricOrdererMigrationV2_0(instance *current.IBPOrderer) error { switch version.GetMajorReleaseVersion(instance.Spec.FabricVersion) { case version.V2: currentVer := version.String(instance.Spec.FabricVersion) - if currentVer.EqualWithoutTag(version.V2_4_1) || currentVer.GreaterThan(version.V2_4_1) { + if currentVer.EqualWithoutTag(version.V2_5_1) || currentVer.GreaterThan(version.V2_5_1) { + log.Info("v2.5.x Fabric Orderer requested") + v25config, err := v25ordererconfig.ReadOrdererFile(ordererConfig) + if err != nil { + return errors.Wrap(err, "failed to read v2.5.x default config file") + } + initOrderer.Config = v25config + } else if currentVer.EqualWithoutTag(version.V2_4_1) || currentVer.GreaterThan(version.V2_4_1) { log.Info("v2.4.x Fabric Orderer requested") v24config, err := v24ordererconfig.ReadOrdererFile(ordererConfig) if err != nil { @@ -1509,6 +1522,99 @@ func (n *Node) FabricOrdererMigrationV2_4(instance *current.IBPOrderer) error { cm.Data["ORDERER_ADMIN_TLS_CERTIFICATE"] = "/certs/tls/signcerts/cert.pem" cm.Data["ORDERER_ADMIN_TLS_PRIVATEKEY"] = "/certs/tls/keystore/key.pem" cm.Data["ORDERER_ADMIN_TLS_CLIENTAUTHREQUIRED"] = "true" + // override the default value 127.0.0.1:9443 + cm.Data["ORDERER_ADMIN_LISTENADDRESS"] = "0.0.0.0:9443" + if intermediateExists { + // override intermediate cert paths for root and clientroot cas + cm.Data["ORDERER_ADMIN_TLS_ROOTCAS"] = intercertPath + cm.Data["ORDERER_ADMIN_TLS_CLIENTROOTCAS"] = intercertPath + } else { + cm.Data["ORDERER_ADMIN_TLS_ROOTCAS"] = "/certs/msp/tlscacerts/cacert-0.pem" + cm.Data["ORDERER_ADMIN_TLS_CLIENTROOTCAS"] = "/certs/msp/tlscacerts/cacert-0.pem" + } + } + + err = n.Client.Update(context.TODO(), cm, controllerclient.UpdateOption{Owner: instance, Scheme: n.Scheme}) + if err != nil { + return errors.Wrap(err, "failed to update env configmap") + } + + initOrderer.Config = ordererConfig + configOverride, err := instance.GetConfigOverride() + if err != nil { + return err + } + + err = initOrderer.OverrideConfig(configOverride.(OrdererConfig)) + if err != nil { + return err + } + + if instance.IsHSMEnabled() && !instance.UsingHSMProxy() { + log.Info(fmt.Sprintf("During orderer '%s' migration, detected using HSM sidecar, setting library path", instance.GetName())) + hsmConfig, err := commonconfig.ReadHSMConfig(n.Client, instance) + if err != nil { + return err + } + initOrderer.Config.SetBCCSPLibrary(filepath.Join("/hsm/lib", filepath.Base(hsmConfig.Library.FilePath))) + } + + err = n.Initializer.CreateOrUpdateConfigMap(instance, initOrderer.GetConfig()) + if err != nil { + return err + } + return nil +} + +func (n *Node) FabricOrdererMigrationV2_5(instance *current.IBPOrderer) error { + log.Info(fmt.Sprintf("Orderer instance '%s' migrating to v2.5.x", instance.GetName())) + + initOrderer, err := n.Initializer.GetInitOrderer(instance, n.GetInitStoragePath(instance)) + if err != nil { + return err + } + + ordererConfig, err := v25ordererconfig.ReadOrdererFile(n.Config.OrdererInitConfig.OrdererV25File) + if err != nil { + return errors.Wrap(err, "failed to read v2.5.x default config file") + } + + // removed the field from the struct + // ordererConfig.FileLedger.Prefix = "" + + name := fmt.Sprintf("%s-env", instance.GetName()) + namespacedName := types.NamespacedName{ + Name: name, + Namespace: instance.Namespace, + } + + cm := &corev1.ConfigMap{} + err = n.Client.Get(context.TODO(), namespacedName, cm) + if err != nil { + return errors.Wrap(err, "failed to get env configmap") + } + + // Add configs for 2.5.x + trueVal := true + ordererConfig.Admin.TLs.Enabled = &trueVal + ordererConfig.Admin.TLs.ClientAuthRequired = &trueVal + + intermediateExists := util.IntermediateSecretExists(n.Client, instance.Namespace, fmt.Sprintf("ecert-%s-intercerts", instance.Name)) && + util.IntermediateSecretExists(n.Client, instance.Namespace, fmt.Sprintf("tls-%s-intercerts", instance.Name)) + intercertPath := "/certs/msp/tlsintermediatecerts/intercert-0.pem" + currentVer := version.String(instance.Spec.FabricVersion) + if currentVer.EqualWithoutTag(version.V2_5_1) || currentVer.GreaterThan(version.V2_5_1) { + // Enable Channel participation for 2.5.x orderers + cm.Data["ORDERER_CHANNELPARTICIPATION_ENABLED"] = "true" + + cm.Data["ORDERER_GENERAL_CLUSTER_SENDBUFFERSIZE"] = "100" + + cm.Data["ORDERER_ADMIN_TLS_ENABLED"] = "true" + cm.Data["ORDERER_ADMIN_TLS_CERTIFICATE"] = "/certs/tls/signcerts/cert.pem" + cm.Data["ORDERER_ADMIN_TLS_PRIVATEKEY"] = "/certs/tls/keystore/key.pem" + cm.Data["ORDERER_ADMIN_TLS_CLIENTAUTHREQUIRED"] = "true" + // override the default value 127.0.0.1:9443 + cm.Data["ORDERER_ADMIN_LISTENADDRESS"] = "0.0.0.0:9443" if intermediateExists { // override intermediate cert paths for root and clientroot cas cm.Data["ORDERER_ADMIN_TLS_ROOTCAS"] = intercertPath diff --git a/pkg/offering/base/peer/mocks/update.go b/pkg/offering/base/peer/mocks/update.go index 90eb970e..e5f35c32 100644 --- a/pkg/offering/base/peer/mocks/update.go +++ b/pkg/offering/base/peer/mocks/update.go @@ -159,6 +159,16 @@ type Update struct { migrateToV24ReturnsOnCall map[int]struct { result1 bool } + MigrateToV25Stub func() bool + migrateToV25Mutex sync.RWMutex + migrateToV25ArgsForCall []struct { + } + migrateToV25Returns struct { + result1 bool + } + migrateToV25ReturnsOnCall map[int]struct { + result1 bool + } NodeOUUpdatedStub func() bool nodeOUUpdatedMutex sync.RWMutex nodeOUUpdatedArgsForCall []struct { @@ -1053,6 +1063,59 @@ func (fake *Update) MigrateToV24ReturnsOnCall(i int, result1 bool) { }{result1} } +func (fake *Update) MigrateToV25() bool { + fake.migrateToV25Mutex.Lock() + ret, specificReturn := fake.migrateToV25ReturnsOnCall[len(fake.migrateToV25ArgsForCall)] + fake.migrateToV25ArgsForCall = append(fake.migrateToV25ArgsForCall, struct { + }{}) + stub := fake.MigrateToV25Stub + fakeReturns := fake.migrateToV25Returns + fake.recordInvocation("MigrateToV25", []interface{}{}) + fake.migrateToV25Mutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Update) MigrateToV25CallCount() int { + fake.migrateToV25Mutex.RLock() + defer fake.migrateToV25Mutex.RUnlock() + return len(fake.migrateToV25ArgsForCall) +} + +func (fake *Update) MigrateToV25Calls(stub func() bool) { + fake.migrateToV25Mutex.Lock() + defer fake.migrateToV25Mutex.Unlock() + fake.MigrateToV25Stub = stub +} + +func (fake *Update) MigrateToV25Returns(result1 bool) { + fake.migrateToV25Mutex.Lock() + defer fake.migrateToV25Mutex.Unlock() + fake.MigrateToV25Stub = nil + fake.migrateToV25Returns = struct { + result1 bool + }{result1} +} + +func (fake *Update) MigrateToV25ReturnsOnCall(i int, result1 bool) { + fake.migrateToV25Mutex.Lock() + defer fake.migrateToV25Mutex.Unlock() + fake.MigrateToV25Stub = nil + if fake.migrateToV25ReturnsOnCall == nil { + fake.migrateToV25ReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.migrateToV25ReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + func (fake *Update) NodeOUUpdated() bool { fake.nodeOUUpdatedMutex.Lock() ret, specificReturn := fake.nodeOUUpdatedReturnsOnCall[len(fake.nodeOUUpdatedArgsForCall)] @@ -1595,6 +1658,8 @@ func (fake *Update) Invocations() map[string][][]interface{} { defer fake.migrateToV2Mutex.RUnlock() fake.migrateToV24Mutex.RLock() defer fake.migrateToV24Mutex.RUnlock() + fake.migrateToV25Mutex.RLock() + defer fake.migrateToV25Mutex.RUnlock() fake.nodeOUUpdatedMutex.RLock() defer fake.nodeOUUpdatedMutex.RUnlock() fake.peerTagUpdatedMutex.RLock() diff --git a/pkg/offering/base/peer/override/deployment.go b/pkg/offering/base/peer/override/deployment.go index c52e5f47..be4b83da 100644 --- a/pkg/offering/base/peer/override/deployment.go +++ b/pkg/offering/base/peer/override/deployment.go @@ -281,10 +281,10 @@ func (o *Override) CreateDeployment(instance *current.IBPPeer, k8sDep *appsv1.De return errors.Wrap(err, "failed during V2 peer deployment overrides") } peerVersion := version.String(instance.Spec.FabricVersion) - if peerVersion.EqualWithoutTag(version.V2_4_1) || peerVersion.GreaterThan(version.V2_4_1) { + if peerVersion.EqualWithoutTag(version.V2_4_1) || peerVersion.EqualWithoutTag(version.V2_5_1) || peerVersion.GreaterThan(version.V2_4_1) { err = o.V24Deployment(instance, deployment) if err != nil { - return errors.Wrap(err, "failed during V24 peer deployment overrides") + return errors.Wrap(err, "failed during V24/V25 peer deployment overrides") } } } else { @@ -636,10 +636,10 @@ func (o *Override) UpdateDeployment(instance *current.IBPPeer, k8sDep *appsv1.De return errors.Wrapf(err, "failed to update V2 fabric deployment for instance '%s'", instance.GetName()) } peerVersion := version.String(instance.Spec.FabricVersion) - if peerVersion.EqualWithoutTag(version.V2_4_1) || peerVersion.GreaterThan(version.V2_4_1) { + if peerVersion.EqualWithoutTag(version.V2_4_1) || peerVersion.EqualWithoutTag(version.V2_5_1) || peerVersion.GreaterThan(version.V2_4_1) { err := o.V24DeploymentUpdate(instance, deployment) if err != nil { - return errors.Wrapf(err, "failed to update V24 fabric deployment for instance '%s'", instance.GetName()) + return errors.Wrapf(err, "failed to update V24/V25 fabric deployment for instance '%s'", instance.GetName()) } } } diff --git a/pkg/offering/base/peer/peer.go b/pkg/offering/base/peer/peer.go index dbc8be50..b6806181 100644 --- a/pkg/offering/base/peer/peer.go +++ b/pkg/offering/base/peer/peer.go @@ -44,6 +44,7 @@ import ( resourcemanager "github.com/IBM-Blockchain/fabric-operator/pkg/manager/resources/manager" "github.com/IBM-Blockchain/fabric-operator/pkg/migrator/peer/fabric" v2 "github.com/IBM-Blockchain/fabric-operator/pkg/migrator/peer/fabric/v2" + v25 "github.com/IBM-Blockchain/fabric-operator/pkg/migrator/peer/fabric/v25" "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common" "github.com/IBM-Blockchain/fabric-operator/pkg/offering/common/reconcilechecks" "github.com/IBM-Blockchain/fabric-operator/pkg/operatorerrors" @@ -153,6 +154,7 @@ type Update interface { TLScertNewKeyReenroll() bool MigrateToV2() bool MigrateToV24() bool + MigrateToV25() bool UpgradeDBs() bool MSPUpdated() bool EcertEnroll() bool @@ -396,7 +398,11 @@ func (p *Peer) Initialize(instance *current.IBPPeer, update Update) error { peerConfig := p.Config.PeerInitConfig.CorePeerFile if version.GetMajorReleaseVersion(instance.Spec.FabricVersion) == version.V2 { + peerversion := version.String(instance.Spec.FabricVersion) peerConfig = p.Config.PeerInitConfig.CorePeerV2File + if peerversion.EqualWithoutTag(version.V2_5_1) || peerversion.GreaterThan(version.V2_5_1) { + peerConfig = p.Config.PeerInitConfig.CorePeerV25File + } } if instance.UsingHSMProxy() { @@ -1226,6 +1232,22 @@ func (p *Peer) ReconcileFabricPeerMigrationV2_4(instance *current.IBPPeer) error return nil } +func (p *Peer) ReconcileFabricPeerMigrationV2_5(instance *current.IBPPeer) error { + log.Info("Migration to V2.5.x requested, checking if migration is needed") + + migrator := &v25.Migrate{ + DeploymentManager: p.DeploymentManager, + ConfigMapManager: &initializer.CoreConfigMap{Config: p.Config.PeerInitConfig, Scheme: p.Scheme, GetLabels: p.GetLabels, Client: p.Client}, + Client: p.Client, + } + + if err := fabric.V25Migrate(instance, migrator, instance.Spec.FabricVersion, p.Config.Operator.Peer.Timeouts.DBMigration); err != nil { + return err + } + + return nil +} + func (p *Peer) HandleMigrationJobs(listOpt k8sclient.ListOption, instance *current.IBPPeer) (bool, error) { status, job, err := p.CheckForRunningJobs(listOpt) if err != nil { diff --git a/pkg/offering/common/reconcilechecks/fabricversion.go b/pkg/offering/common/reconcilechecks/fabricversion.go index 8816e5bf..4d78d845 100644 --- a/pkg/offering/common/reconcilechecks/fabricversion.go +++ b/pkg/offering/common/reconcilechecks/fabricversion.go @@ -66,15 +66,17 @@ func FabricVersionHelper(instance Instance, versions *deployer.Versions, update return FabricVersion(instance, update, image, fv) } -//go:generate counterfeiter -o mocks/image.go -fake-name Image . Image // Image defines the contract with the image checks +// +//go:generate counterfeiter -o mocks/image.go -fake-name Image . Image type Image interface { UpdateRequired(images.Update) bool SetDefaults(images.Instance) error } -//go:generate counterfeiter -o mocks/version.go -fake-name Version . Version // Version defines the contract with the version checks +// +//go:generate counterfeiter -o mocks/version.go -fake-name Version . Version type Version interface { Normalize(images.FabricVersionInstance) string Validate(images.FabricVersionInstance) error diff --git a/pkg/offering/k8s/orderer/node.go b/pkg/offering/k8s/orderer/node.go index c4d6826a..ad6103a2 100644 --- a/pkg/offering/k8s/orderer/node.go +++ b/pkg/offering/k8s/orderer/node.go @@ -153,6 +153,12 @@ func (n *Node) Reconcile(instance *current.IBPOrderer, update baseorderer.Update } } + if update.MigrateToV25() { + if err := n.FabricOrdererMigrationV2_5(instance); err != nil { + return common.Result{}, operatorerrors.Wrap(err, operatorerrors.FabricOrdererMigrationFailed, "failed to migrate fabric orderer to version v2.5.x") + } + } + err = n.ReconcileManagers(instance, update, nil) if err != nil { return common.Result{}, errors.Wrap(err, "failed to reconcile managers") diff --git a/pkg/offering/k8s/orderer/orderer.go b/pkg/offering/k8s/orderer/orderer.go index 951f10cd..987b53eb 100644 --- a/pkg/offering/k8s/orderer/orderer.go +++ b/pkg/offering/k8s/orderer/orderer.go @@ -133,7 +133,7 @@ func (o *Orderer) ReconcileNode(instance *current.IBPOrderer, update baseorderer hostGrpc := fmt.Sprintf("%s-%s-grpcweb.%s", instance.Namespace, instance.Name, instance.Spec.Domain) hosts := []string{} currentVer := version.String(instance.Spec.FabricVersion) - if currentVer.EqualWithoutTag(version.V2_4_1) || currentVer.GreaterThan(version.V2_4_1) { + if currentVer.EqualWithoutTag(version.V2_4_1) || currentVer.EqualWithoutTag(version.V2_5_1) || currentVer.GreaterThan(version.V2_4_1) { hostAdmin := fmt.Sprintf("%s-%s-admin.%s", instance.Namespace, instance.Name, instance.Spec.Domain) hosts = append(hosts, hostAPI, hostOperations, hostGrpc, hostAdmin, "127.0.0.1") //TODO: need to Re-enroll when orderer migrated from 1.4.x/2.2.x to 2.4.1 diff --git a/pkg/offering/k8s/orderer/override/ingress.go b/pkg/offering/k8s/orderer/override/ingress.go index a790c249..14849bd4 100644 --- a/pkg/offering/k8s/orderer/override/ingress.go +++ b/pkg/offering/k8s/orderer/override/ingress.go @@ -139,7 +139,7 @@ func (o *Override) CommonIngress(instance *current.IBPOrderer, ingress *networki }, } currentVer := version.String(instance.Spec.FabricVersion) - if currentVer.EqualWithoutTag(version.V2_4_1) || currentVer.GreaterThan(version.V2_4_1) { + if currentVer.EqualWithoutTag(version.V2_4_1) || currentVer.EqualWithoutTag(version.V2_5_1) || currentVer.GreaterThan(version.V2_4_1) { adminhost := instance.Namespace + "-" + instance.Name + "-admin" + "." + instance.Spec.Domain adminIngressRule := []networkingv1.IngressRule{ networkingv1.IngressRule{ diff --git a/pkg/offering/k8s/peer/peer.go b/pkg/offering/k8s/peer/peer.go index 40ae583b..576d0143 100644 --- a/pkg/offering/k8s/peer/peer.go +++ b/pkg/offering/k8s/peer/peer.go @@ -196,6 +196,12 @@ func (p *Peer) Reconcile(instance *current.IBPPeer, update basepeer.Update) (com } } + if update.MigrateToV25() { + if err := p.ReconcileFabricPeerMigrationV2_5(instance); err != nil { + return common.Result{}, operatorerrors.Wrap(err, operatorerrors.FabricPeerMigrationFailed, "failed to migrate fabric peer to version v2.5.x") + } + } + err = p.ReconcileManagers(instance, update) if err != nil { return common.Result{}, errors.Wrap(err, "failed to reconcile managers") diff --git a/pkg/offering/openshift/orderer/node.go b/pkg/offering/openshift/orderer/node.go index 3e6c9b9b..e328460b 100644 --- a/pkg/offering/openshift/orderer/node.go +++ b/pkg/offering/openshift/orderer/node.go @@ -153,6 +153,12 @@ func (n *Node) Reconcile(instance *current.IBPOrderer, update baseorderer.Update } } + if update.MigrateToV25() { + if err := n.FabricOrdererMigrationV2_5(instance); err != nil { + return common.Result{}, operatorerrors.Wrap(err, operatorerrors.FabricOrdererMigrationFailed, "failed to migrate fabric orderer to version v2.5.x") + } + } + err = n.ReconcileManagers(instance, update, nil) if err != nil { return common.Result{}, errors.Wrap(err, "failed to reconcile managers") @@ -248,7 +254,7 @@ func (n *Node) ReconcileManagers(instance *current.IBPOrderer, updated baseorder } currentVer := version.String(instance.Spec.FabricVersion) - if currentVer.EqualWithoutTag(version.V2_4_1) || currentVer.GreaterThan(version.V2_4_1) { + if currentVer.EqualWithoutTag(version.V2_4_1) || currentVer.EqualWithoutTag(version.V2_5_1) || currentVer.GreaterThan(version.V2_4_1) { err = n.AdminRouteManager.Reconcile(instance, update) if err != nil { return errors.Wrap(err, "failed Orderer Admin Route reconciliation") diff --git a/pkg/offering/openshift/orderer/orderer.go b/pkg/offering/openshift/orderer/orderer.go index 2492c939..6f5f2323 100644 --- a/pkg/offering/openshift/orderer/orderer.go +++ b/pkg/offering/openshift/orderer/orderer.go @@ -130,7 +130,7 @@ func (o *Orderer) ReconcileNode(instance *current.IBPOrderer, update baseorderer hostGrpc := fmt.Sprintf("%s-%s-grpcweb.%s", instance.Namespace, instance.Name, instance.Spec.Domain) hosts := []string{} currentVer := version.String(instance.Spec.FabricVersion) - if currentVer.EqualWithoutTag(version.V2_4_1) || currentVer.GreaterThan(version.V2_4_1) { + if currentVer.EqualWithoutTag(version.V2_4_1) || currentVer.EqualWithoutTag(version.V2_5_1) || currentVer.GreaterThan(version.V2_4_1) { hostAdmin := fmt.Sprintf("%s-%s-admin.%s", instance.Namespace, instance.Name, instance.Spec.Domain) hosts = append(hosts, hostAPI, hostOperations, hostGrpc, hostAdmin, "127.0.0.1") } else { diff --git a/pkg/offering/openshift/orderer/override/adminroute.go b/pkg/offering/openshift/orderer/override/adminroute.go index 5df784d6..815cdd03 100644 --- a/pkg/offering/openshift/orderer/override/adminroute.go +++ b/pkg/offering/openshift/orderer/override/adminroute.go @@ -33,7 +33,7 @@ import ( func (o *Override) AdminRoute(object v1.Object, route *routev1.Route, action resources.Action) error { instance := object.(*current.IBPOrderer) currentVer := version.String(instance.Spec.FabricVersion) - if !(currentVer.EqualWithoutTag(version.V2_4_1) || currentVer.GreaterThan(version.V2_4_1)) { + if !(currentVer.EqualWithoutTag(version.V2_4_1) || currentVer.EqualWithoutTag(version.V2_5_1) || currentVer.GreaterThan(version.V2_4_1)) { return nil } switch action { diff --git a/pkg/offering/openshift/peer/peer.go b/pkg/offering/openshift/peer/peer.go index db87625d..5ca801cf 100644 --- a/pkg/offering/openshift/peer/peer.go +++ b/pkg/offering/openshift/peer/peer.go @@ -217,6 +217,12 @@ func (p *Peer) Reconcile(instance *current.IBPPeer, update basepeer.Update) (com } } + if update.MigrateToV25() { + if err := p.ReconcileFabricPeerMigrationV2_5(instance); err != nil { + return common.Result{}, operatorerrors.Wrap(err, operatorerrors.FabricPeerMigrationFailed, "failed to migrate fabric peer to version v2.5.x") + } + } + err = p.ReconcileManagers(instance, update) if err != nil { return common.Result{}, errors.Wrap(err, "failed to reconcile managers") diff --git a/pkg/restart/staggerrestarts/staggerrestarts.go b/pkg/restart/staggerrestarts/staggerrestarts.go index fe5bafb7..9167e4a2 100644 --- a/pkg/restart/staggerrestarts/staggerrestarts.go +++ b/pkg/restart/staggerrestarts/staggerrestarts.go @@ -64,7 +64,8 @@ func New(client k8sclient.Client, timeout time.Duration) *StaggerRestartsService // Restart is called by the restart manager. // For CA/Peer/Orderer: adds component to the queue for restart. // For Console: restarts the component directly as there is only one ibpconsole -// instance per network. We bypass the queue logic for ibpconsoles. +// +// instance per network. We bypass the queue logic for ibpconsoles. func (s *StaggerRestartsService) Restart(instance Instance, reason string) error { switch instance.(type) { case *current.IBPConsole: diff --git a/version/fabricversion.go b/version/fabricversion.go index e58ae15b..50931289 100644 --- a/version/fabricversion.go +++ b/version/fabricversion.go @@ -44,6 +44,7 @@ const ( V2_2_5 = "2.2.5" V2_4_1 = "2.4.1" + V2_5_1 = "2.5.1" V1_4 = "V1.4"