From 3125f28ab8911f35140bebaabeb76ec2b0500615 Mon Sep 17 00:00:00 2001 From: Vimal Kumar Date: Fri, 10 Jan 2020 02:45:36 +0530 Subject: [PATCH 01/21] Generate IndexManagement LogStore retention policy * Updated dependencies from elasticsearch-operator * Updated clusterlogging_types.go for logstore retention policy spec * Updated cluster-logging crd for logstore retention policy * Added indexmanagement to generate indexmanagement spec * Updated logStore to create indexmanagement spec, compare change in indexmanagement spec * Added testcases --- Gopkg.lock | 8 +- Gopkg.toml | 2 +- manifests/4.4/cluster-loggings.crd.yaml | 15 + pkg/apis/logging/v1/clusterlogging_types.go | 11 + pkg/apis/logging/v1/zz_generated.deepcopy.go | 54 +++- .../logging/v1alpha1/zz_generated.deepcopy.go | 2 +- .../indexmanagement/index_management.go | 180 +++++++++++ .../indexmanagement_suite_test.go | 13 + .../indexmanagement/indexmanagement_test.go | 187 +++++++++++ pkg/k8shandler/logstore.go | 15 +- pkg/k8shandler/logstore_test.go | 37 +++ .../elasticsearch-operator/Gopkg.lock | 111 ++++++- .../elasticsearch-operator/Gopkg.toml | 7 + .../manifests/4.4/elasticsearches.crd.yaml | 48 +++ .../apis/logging/v1/elasticsearch_types.go | 2 + .../apis/logging/v1/index_management_types.go | 265 +++++++++++++++ .../apis/logging/v1/zz_generated.deepcopy.go | 303 ++++++++++++++++++ .../pkg/indexmanagement/validations.go | 124 +++++++ .../pkg/k8shandler/configuration_tmpl.go | 23 +- .../pkg/k8shandler/elasticsearch.go | 105 ++++++ .../pkg/k8shandler/index_management.go | 100 ++++++ .../pkg/k8shandler/reconciler.go | 15 +- .../pkg/k8shandler/util.go | 1 - .../pkg/logger/logger.go | 20 ++ .../pkg/types/elasticsearch/types.go | 62 ++++ .../elasticsearch-operator/pkg/utils/utils.go | 18 ++ .../test/helpers/elasticsearch.go | 44 +++ .../test/helpers/json.go | 41 +++ .../test/utils/utils.go | 3 +- 29 files changed, 1779 insertions(+), 37 deletions(-) create mode 100644 pkg/k8shandler/indexmanagement/index_management.go create mode 100644 pkg/k8shandler/indexmanagement/indexmanagement_suite_test.go create mode 100644 pkg/k8shandler/indexmanagement/indexmanagement_test.go create mode 100644 vendor/github.com/openshift/elasticsearch-operator/pkg/apis/logging/v1/index_management_types.go create mode 100644 vendor/github.com/openshift/elasticsearch-operator/pkg/indexmanagement/validations.go create mode 100644 vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/index_management.go create mode 100644 vendor/github.com/openshift/elasticsearch-operator/pkg/types/elasticsearch/types.go create mode 100644 vendor/github.com/openshift/elasticsearch-operator/test/helpers/elasticsearch.go create mode 100644 vendor/github.com/openshift/elasticsearch-operator/test/helpers/json.go diff --git a/Gopkg.lock b/Gopkg.lock index 64badf7fba..f4d3ae867d 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -520,19 +520,21 @@ revision = "d4a64ec2cbd86f11ea74dfdcf6520d5833d0c6cd" [[projects]] - branch = "master" - digest = "1:fc8a467806a4d95635174d4413ab0bf93bd08ea2089c8d65c008fbba2bcb7fc7" + branch = "feature-es6x" + digest = "1:bde78f3ff1aa8a9fd828b61aee76f53c36a2f4b990fdf9a35b7430fe66daed14" name = "github.com/openshift/elasticsearch-operator" packages = [ "pkg/apis", "pkg/apis/logging/v1", + "pkg/indexmanagement", "pkg/k8shandler", "pkg/logger", + "pkg/types/elasticsearch", "pkg/utils", "test/utils", ] pruneopts = "T" - revision = "a39c936a841a12f747b463ed0d92f8bf32836a97" + revision = "66a02fbeffe5ace05b977021bf28e7f878eb32e6" [[projects]] digest = "1:0087f38751ec1995bafa67afd0ded9519a46297ecf571165c695fb9ba943688d" diff --git a/Gopkg.toml b/Gopkg.toml index 59fb6762fc..1191c62a3e 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -64,7 +64,7 @@ required = [ [[constraint]] name = "github.com/openshift/elasticsearch-operator" - branch = "master" + branch = "feature-es6x" [[override]] name = "gopkg.in/fsnotify.v1" diff --git a/manifests/4.4/cluster-loggings.crd.yaml b/manifests/4.4/cluster-loggings.crd.yaml index 46bab8e387..183ce2fc11 100644 --- a/manifests/4.4/cluster-loggings.crd.yaml +++ b/manifests/4.4/cluster-loggings.crd.yaml @@ -120,6 +120,21 @@ spec: - "ZeroRedundancy" required: - nodeCount + retentionPolicy: + description: Retention policy defines the maximum age for an index after which it should be deleted + properties: + logs.app: + properties: + maxAge: + type: string + logs.infra: + properties: + maxAge: + type: string + logs.audit: + properties: + maxAge: + type: string required: - type collection: diff --git a/pkg/apis/logging/v1/clusterlogging_types.go b/pkg/apis/logging/v1/clusterlogging_types.go index fccb381c5c..a569acfeec 100644 --- a/pkg/apis/logging/v1/clusterlogging_types.go +++ b/pkg/apis/logging/v1/clusterlogging_types.go @@ -52,6 +52,17 @@ type ProxySpec struct { type LogStoreSpec struct { Type LogStoreType `json:"type"` ElasticsearchSpec `json:"elasticsearch,omitempty"` + RetentionPolicy *RetentionPoliciesSpec `json:"retentionPolicy,omitempty"` +} + +type RetentionPoliciesSpec struct { + App *RetentionPolicySpec `json:"logs.app,omitempty"` + Infra *RetentionPolicySpec `json:"logs.infra,omitempty"` + Audit *RetentionPolicySpec `json:"logs.audit,omitempty"` +} + +type RetentionPolicySpec struct { + MaxAge elasticsearch.TimeUnit `json:"maxAge"` } type ElasticsearchSpec struct { diff --git a/pkg/apis/logging/v1/zz_generated.deepcopy.go b/pkg/apis/logging/v1/zz_generated.deepcopy.go index 29091db5d2..5c3348667d 100644 --- a/pkg/apis/logging/v1/zz_generated.deepcopy.go +++ b/pkg/apis/logging/v1/zz_generated.deepcopy.go @@ -1,6 +1,6 @@ // +build !ignore_autogenerated -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by operator-sdk. DO NOT EDIT. package v1 @@ -712,6 +712,11 @@ func (in *LogCollectionStatus) DeepCopy() *LogCollectionStatus { func (in *LogStoreSpec) DeepCopyInto(out *LogStoreSpec) { *out = *in in.ElasticsearchSpec.DeepCopyInto(&out.ElasticsearchSpec) + if in.RetentionPolicy != nil { + in, out := &in.RetentionPolicy, &out.RetentionPolicy + *out = new(RetentionPoliciesSpec) + (*in).DeepCopyInto(*out) + } return } @@ -822,6 +827,53 @@ func (in *ProxySpec) DeepCopy() *ProxySpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionPoliciesSpec) DeepCopyInto(out *RetentionPoliciesSpec) { + *out = *in + if in.App != nil { + in, out := &in.App, &out.App + *out = new(RetentionPolicySpec) + **out = **in + } + if in.Infra != nil { + in, out := &in.Infra, &out.Infra + *out = new(RetentionPolicySpec) + **out = **in + } + if in.Audit != nil { + in, out := &in.Audit, &out.Audit + *out = new(RetentionPolicySpec) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionPoliciesSpec. +func (in *RetentionPoliciesSpec) DeepCopy() *RetentionPoliciesSpec { + if in == nil { + return nil + } + out := new(RetentionPoliciesSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionPolicySpec) DeepCopyInto(out *RetentionPolicySpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionPolicySpec. +func (in *RetentionPolicySpec) DeepCopy() *RetentionPolicySpec { + if in == nil { + return nil + } + out := new(RetentionPolicySpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VisualizationSpec) DeepCopyInto(out *VisualizationSpec) { *out = *in diff --git a/pkg/apis/logging/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/logging/v1alpha1/zz_generated.deepcopy.go index 535db466e7..c234e7c8be 100644 --- a/pkg/apis/logging/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/logging/v1alpha1/zz_generated.deepcopy.go @@ -1,6 +1,6 @@ // +build !ignore_autogenerated -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by operator-sdk. DO NOT EDIT. package v1alpha1 diff --git a/pkg/k8shandler/indexmanagement/index_management.go b/pkg/k8shandler/indexmanagement/index_management.go new file mode 100644 index 0000000000..c3f21cdfe6 --- /dev/null +++ b/pkg/k8shandler/indexmanagement/index_management.go @@ -0,0 +1,180 @@ +package indexmanagement + +import ( + "fmt" + "regexp" + "strconv" + + logging "github.com/openshift/cluster-logging-operator/pkg/apis/logging/v1" + "github.com/openshift/cluster-logging-operator/pkg/logger" + esapi "github.com/openshift/elasticsearch-operator/pkg/apis/logging/v1" +) + +const ( + PolicyNameApp = "app-policy" + PolicyNameInfra = "infra-policy" + PolicyNameAudit = "audit-policy" + + MappingNameApp = "app" + MappingNameInfra = "infra" + MappingNameAudit = "audit.infra" + + PollInterval = "15m" + + HotPhaseAgeAsPercentOfMaxAge = 5 +) + +var ( + AliasesApp = []string{"app, logs.app"} + AliasesInfra = []string{"infra", "logs.infra"} + AliasesAudit = []string{"infra.audit", "logs.audit"} + + agePattern = regexp.MustCompile("^(?P\\d+)(?P[yMwdhHms])$") +) + +func NewSpec(retentionPolicy *logging.RetentionPoliciesSpec) *esapi.IndexManagementSpec { + + if retentionPolicy == nil { + return nil + } + if retentionPolicy.App == nil && retentionPolicy.Infra == nil && retentionPolicy.Audit == nil { + logger.Info("Retention policy not defined for any log source. Cannot create Index management spec.") + return nil + } + + indexManagement := esapi.IndexManagementSpec{} + if retentionPolicy.App != nil { + hotPhaseAgeApp, err := getHotPhaseAge(retentionPolicy.App.MaxAge) + if err != nil { + logger.Errorf("Error occured while getting hot phase age for App log source. err: %v", err) + return nil + } + appPolicySpec := newPolicySpec(PolicyNameApp, retentionPolicy.App.MaxAge, hotPhaseAgeApp) + indexManagement.Policies = append(indexManagement.Policies, appPolicySpec) + appMappingSpec := newMappingSpec(MappingNameApp, PolicyNameApp, AliasesApp) + indexManagement.Mappings = append(indexManagement.Mappings, appMappingSpec) + } + if retentionPolicy.Infra != nil { + hotPhaseAgeInfra, err := getHotPhaseAge(retentionPolicy.Infra.MaxAge) + if err != nil { + logger.Errorf("Error occured while getting hot phase age for Infra log source. err: %v", err) + return nil + } + infraPolicySpec := newPolicySpec(PolicyNameInfra, retentionPolicy.Infra.MaxAge, hotPhaseAgeInfra) + indexManagement.Policies = append(indexManagement.Policies, infraPolicySpec) + infraMappingSpec := newMappingSpec(MappingNameInfra, PolicyNameInfra, AliasesInfra) + indexManagement.Mappings = append(indexManagement.Mappings, infraMappingSpec) + } + if retentionPolicy.Audit != nil { + hotPhaseAgeAudit, err := getHotPhaseAge(retentionPolicy.Audit.MaxAge) + if err != nil { + logger.Errorf("Error occured while getting hot phase age for Audit log source. err: %v", err) + return nil + } + auditPolicySpec := newPolicySpec(PolicyNameAudit, retentionPolicy.Audit.MaxAge, hotPhaseAgeAudit) + indexManagement.Policies = append(indexManagement.Policies, auditPolicySpec) + auditMappingSpec := newMappingSpec(MappingNameAudit, PolicyNameAudit, AliasesAudit) + indexManagement.Mappings = append(indexManagement.Mappings, auditMappingSpec) + } + return &indexManagement +} + +func newPolicySpec(name string, maxIndexAge esapi.TimeUnit, hotPhaseAge esapi.TimeUnit) esapi.IndexManagementPolicySpec { + + policySpec := esapi.IndexManagementPolicySpec{ + Name: name, + PollInterval: PollInterval, + Phases: esapi.IndexManagementPhasesSpec{ + Hot: &esapi.IndexManagementHotPhaseSpec{ + Actions: esapi.IndexManagementActionsSpec{ + Rollover: &esapi.IndexManagementActionSpec{ + MaxAge: hotPhaseAge, + }, + }, + }, + Delete: &esapi.IndexManagementDeletePhaseSpec{ + MinAge: maxIndexAge, + }, + }, + } + return policySpec +} + +func newMappingSpec(name string, policyRef string, aliases []string) esapi.IndexManagementPolicyMappingSpec { + mappingSpec := esapi.IndexManagementPolicyMappingSpec{ + Name: name, + PolicyRef: policyRef, + Aliases: aliases, + } + return mappingSpec +} + +func getHotPhaseAge(maxAge esapi.TimeUnit) (esapi.TimeUnit, error) { + var ( + age int + unit byte + err error + hotphaseAge int + ) + age, unit, err = toAgeAndUnit(maxAge) + if err == nil { + hotphaseAge, unit, err = toHotPhaseAge(age, unit) + if err == nil { + return esapi.TimeUnit(fmt.Sprintf("%d%c", hotphaseAge, unit)), nil + } + } + return esapi.TimeUnit(""), err +} + +func toAgeAndUnit(timeunit esapi.TimeUnit) (int, byte, error) { + strvalues := agePattern.FindStringSubmatch(string(timeunit)) + if len(strvalues) != 3 { + return 0, 0, fmt.Errorf("age pattern mismatch") + } + age, _ := strconv.Atoi(strvalues[1]) + unit := strvalues[2][0] + return age, unit, nil +} + +func toHotPhaseAge(value int, unit byte) (int, byte, error) { + newval := value * HotPhaseAgeAsPercentOfMaxAge / 100 + + for newval == 0 { + value, newunit, err := convertToLowerUnits(value, unit) + if err != nil { + return 0, 0, err + } + newval = value * HotPhaseAgeAsPercentOfMaxAge / 100 + unit = newunit + } + + return newval, unit, nil +} + +func convertToLowerUnits(value int, unit byte) (int, byte, error) { + + switch unit { + case 's': + return 0, 0, fmt.Errorf("cannot convert \"%d%c\" to lower units", value, unit) + case 'm': + newval := value * 60 + return newval, 's', nil + case 'h', 'H': + newval := value * 60 + return newval, 'm', nil + case 'd': + newval := value * 24 + return newval, 'h', nil + case 'w': + newval := value * 7 + return newval, 'd', nil + case 'M': + newval := value * 30 + return newval, 'd', nil + case 'y': + newval := value * 365 + return newval, 'd', nil + } + + return 0, 0, fmt.Errorf("unknown units") +} diff --git a/pkg/k8shandler/indexmanagement/indexmanagement_suite_test.go b/pkg/k8shandler/indexmanagement/indexmanagement_suite_test.go new file mode 100644 index 0000000000..a57ff958eb --- /dev/null +++ b/pkg/k8shandler/indexmanagement/indexmanagement_suite_test.go @@ -0,0 +1,13 @@ +package indexmanagement_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestIndexmanagement(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Indexmanagement Suite") +} diff --git a/pkg/k8shandler/indexmanagement/indexmanagement_test.go b/pkg/k8shandler/indexmanagement/indexmanagement_test.go new file mode 100644 index 0000000000..fea12015d9 --- /dev/null +++ b/pkg/k8shandler/indexmanagement/indexmanagement_test.go @@ -0,0 +1,187 @@ +package indexmanagement + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + logging "github.com/openshift/cluster-logging-operator/pkg/apis/logging/v1" + esapi "github.com/openshift/elasticsearch-operator/pkg/apis/logging/v1" +) + +var _ = Describe("Indexmanagement", func() { + + var retentionPolicy *logging.RetentionPoliciesSpec + + BeforeEach(func() { + retentionPolicy = &logging.RetentionPoliciesSpec{ + App: &logging.RetentionPolicySpec{ + MaxAge: esapi.TimeUnit("1h"), + }, + Infra: &logging.RetentionPolicySpec{ + MaxAge: esapi.TimeUnit("2h"), + }, + Audit: &logging.RetentionPolicySpec{ + MaxAge: esapi.TimeUnit("3h"), + }, + } + }) + + Describe("IndexManagement Policy creation failure", func() { + Context("when retention policy is not defined", func() { + BeforeEach(func() { + retentionPolicy = nil + }) + It("should not generate index management", func() { + spec := NewSpec(retentionPolicy) + Expect(spec).To(BeNil()) + }) + }) + Context("retention policy App log source has low maxAge", func() { + BeforeEach(func() { + retentionPolicy.App.MaxAge = "10s" + }) + It("should not generate index management", func() { + spec := NewSpec(retentionPolicy) + Expect(spec).To(BeNil()) + }) + }) + Context("retention policy Infra log source has low maxAge", func() { + BeforeEach(func() { + retentionPolicy.Infra.MaxAge = "10s" + }) + It("should not generate index management", func() { + spec := NewSpec(retentionPolicy) + Expect(spec).To(BeNil()) + }) + }) + Context("retention policy Audit log source has low maxAge", func() { + BeforeEach(func() { + retentionPolicy.Audit.MaxAge = "10s" + }) + It("should not generate index management", func() { + spec := NewSpec(retentionPolicy) + Expect(spec).To(BeNil()) + }) + }) + Context("retetion policy is not defined for any log source", func() { + BeforeEach(func() { + retentionPolicy.App = nil + retentionPolicy.Infra = nil + retentionPolicy.Audit = nil + }) + It("should not generate index management", func() { + spec := NewSpec(retentionPolicy) + Expect(spec).To(BeNil()) + }) + }) + }) + Describe("IndexManagement Policy creation success", func() { + Context("Policy and Mapping generated", func() { + It("For All log source types", func() { + spec := NewSpec(retentionPolicy) + Expect(len(spec.Policies)).To(Equal(3)) + Expect(len(spec.Mappings)).To(Equal(3)) + }) + }) + Context("Hot Phase durations in created spec ", func() { + It("Must conform to the regex", func() { + spec := NewSpec(retentionPolicy) + Expect(agePattern.Match([]byte(spec.Policies[0].Phases.Hot.Actions.Rollover.MaxAge))).To(Equal(true)) + Expect(agePattern.Match([]byte(spec.Policies[1].Phases.Hot.Actions.Rollover.MaxAge))).To(Equal(true)) + Expect(agePattern.Match([]byte(spec.Policies[2].Phases.Hot.Actions.Rollover.MaxAge))).To(Equal(true)) + }) + }) + Context("Delete Phase durations in created spec ", func() { + It("Must conform to the regex", func() { + spec := NewSpec(retentionPolicy) + Expect(agePattern.Match([]byte(spec.Policies[0].Phases.Delete.MinAge))).To(Equal(true)) + Expect(agePattern.Match([]byte(spec.Policies[1].Phases.Delete.MinAge))).To(Equal(true)) + Expect(agePattern.Match([]byte(spec.Policies[2].Phases.Delete.MinAge))).To(Equal(true)) + }) + }) + Context("Delete Phase durations in created spec", func() { + It("Must be same as set in retention policy", func() { + spec := NewSpec(retentionPolicy) + Expect(spec.Policies[0].Phases.Delete.MinAge).To(Equal(retentionPolicy.App.MaxAge)) + Expect(spec.Policies[1].Phases.Delete.MinAge).To(Equal(retentionPolicy.Infra.MaxAge)) + Expect(spec.Policies[2].Phases.Delete.MinAge).To(Equal(retentionPolicy.Audit.MaxAge)) + }) + }) + Context("Spec Mappings", func() { + It("Policy-ref should be same as Policy Name", func() { + spec := NewSpec(retentionPolicy) + Expect(spec.Mappings[0].PolicyRef).To(Equal(spec.Policies[0].Name)) + Expect(spec.Mappings[1].PolicyRef).To(Equal(spec.Policies[1].Name)) + Expect(spec.Mappings[2].PolicyRef).To(Equal(spec.Policies[2].Name)) + }) + }) + }) + Describe("Index Management Policy Partial creation", func() { + Context("Retention policy is defined only for App Log Source", func() { + BeforeEach(func() { + retentionPolicy.Infra = nil + retentionPolicy.Audit = nil + }) + It("should generate index management for App log source only", func() { + spec := NewSpec(retentionPolicy) + Expect(len(spec.Policies)).To(Equal(1)) + Expect(spec.Policies[0].Name).To(Equal(PolicyNameApp)) + Expect(len(spec.Mappings)).To(Equal(1)) + Expect(spec.Mappings[0].PolicyRef).To(Equal(PolicyNameApp)) + }) + }) + }) + Describe("TimeUnit tests", func() { + var ( + time int + unit byte + err error + ) + Context("converting to lower units", func() { + It("year to days", func() { + time, unit, err = convertToLowerUnits(1, 'y') + Expect(time).To(Equal(365), "time is incorrect") + Expect(unit).To(Equal(byte('d')), "unit is incorrect") + Expect(err).To(BeNil(), "error must be nil") + }) + It("month to days", func() { + time, unit, err = convertToLowerUnits(1, 'M') + Expect(time).To(Equal(30), "time is incorrect") + Expect(unit).To(Equal(byte('d')), "unit is incorrect") + Expect(err).To(BeNil(), "error must be nil") + }) + It("week to days", func() { + time, unit, err = convertToLowerUnits(1, 'w') + Expect(time).To(Equal(7), "time is incorrect") + Expect(unit).To(Equal(byte('d')), "unit is incorrect") + Expect(err).To(BeNil(), "error must be nil") + }) + It("day to hours", func() { + time, unit, err = convertToLowerUnits(1, 'd') + Expect(time).To(Equal(24), "time is incorrect") + Expect(unit).To(Equal(byte('h')), "unit is incorrect") + Expect(err).To(BeNil(), "error must be nil") + }) + It("hour to minutes", func() { + time, unit, err = convertToLowerUnits(1, 'h') + Expect(time).To(Equal(60), "time is incorrect") + Expect(unit).To(Equal(byte('m')), "unit is incorrect") + Expect(err).To(BeNil(), "error must be nil") + }) + It("minutes to seconds", func() { + time, unit, err = convertToLowerUnits(1, 'm') + Expect(time).To(Equal(60), "time is incorrect") + Expect(unit).To(Equal(byte('s')), "unit is incorrect") + Expect(err).To(BeNil(), "error must be nil") + }) + It("days to seconds", func() { + time, unit, err = convertToLowerUnits(5, 'd') + time, unit, err = convertToLowerUnits(time, unit) + time, unit, err = convertToLowerUnits(time, unit) + Expect(time).To(Equal(5*24*60*60), "time is incorrect") + Expect(unit).To(Equal(byte('s')), "unit is incorrect") + Expect(err).To(BeNil(), "error must be nil") + }) + }) + }) +}) diff --git a/pkg/k8shandler/logstore.go b/pkg/k8shandler/logstore.go index e514830678..fcc60bb0e4 100644 --- a/pkg/k8shandler/logstore.go +++ b/pkg/k8shandler/logstore.go @@ -4,6 +4,8 @@ import ( "fmt" "reflect" + "github.com/openshift/cluster-logging-operator/pkg/k8shandler/indexmanagement" + "github.com/openshift/cluster-logging-operator/pkg/logger" "github.com/openshift/cluster-logging-operator/pkg/utils" "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/api/errors" @@ -30,11 +32,11 @@ func (clusterRequest *ClusterLoggingRequest) CreateOrUpdateLogStore() (err error cluster := clusterRequest.cluster if err = clusterRequest.createOrUpdateElasticsearchSecret(); err != nil { - return + return nil } if err = clusterRequest.createOrUpdateElasticsearchCR(); err != nil { - return + return nil } elasticsearchStatus, err := clusterRequest.getElasticsearchStatus() @@ -224,6 +226,8 @@ func newElasticsearchCR(cluster *logging.ClusterLogging, elasticsearchName strin redundancyPolicy = elasticsearch.ZeroRedundancy } + indexManagementSpec := indexmanagement.NewSpec(logStoreSpec.RetentionPolicy) + cr := &elasticsearch.Elasticsearch{ ObjectMeta: metav1.ObjectMeta{ Name: elasticsearchName, @@ -243,6 +247,7 @@ func newElasticsearchCR(cluster *logging.ClusterLogging, elasticsearchName strin Nodes: esNodes, ManagementState: elasticsearch.ManagementStateManaged, RedundancyPolicy: redundancyPolicy, + IndexManagement: indexManagementSpec, }, } @@ -344,6 +349,12 @@ func isElasticsearchCRDifferent(current *elasticsearch.Elasticsearch, desired *e different = true } + if !reflect.DeepEqual(current.Spec.IndexManagement, desired.Spec.IndexManagement) { + logger.Infof("Elasticsearch IndexManagement change found, updating %v", current.Name) + current.Spec.IndexManagement = desired.Spec.IndexManagement + different = true + } + return current, different } diff --git a/pkg/k8shandler/logstore_test.go b/pkg/k8shandler/logstore_test.go index 69fa8d1e3c..d29d340569 100644 --- a/pkg/k8shandler/logstore_test.go +++ b/pkg/k8shandler/logstore_test.go @@ -5,6 +5,7 @@ import ( "testing" logging "github.com/openshift/cluster-logging-operator/pkg/apis/logging/v1" + "github.com/openshift/cluster-logging-operator/pkg/k8shandler/indexmanagement" "github.com/openshift/cluster-logging-operator/pkg/utils" elasticsearch "github.com/openshift/elasticsearch-operator/pkg/apis/logging/v1" esutils "github.com/openshift/elasticsearch-operator/test/utils" @@ -472,3 +473,39 @@ func TestGenUUIDPreservedWhenNodeCountChanges(t *testing.T) { t.Errorf("Expected that original GenUUID would be preserved as %v but was %v", dataUUID, diffCR.Spec.Nodes[0].GenUUID) } } + +func TestIndexManagementChanges(t *testing.T) { + cluster := &logging.ClusterLogging{ + Spec: logging.ClusterLoggingSpec{ + LogStore: &logging.LogStoreSpec{ + Type: "elasticsearch", + RetentionPolicy: &logging.RetentionPoliciesSpec{ + App: &logging.RetentionPolicySpec{ + MaxAge: elasticsearch.TimeUnit("12h"), + }, + }, + }, + }, + } + elasticsearchCR1 := newElasticsearchCR(cluster, "test-app-name") + cluster = &logging.ClusterLogging{ + Spec: logging.ClusterLoggingSpec{ + LogStore: &logging.LogStoreSpec{ + Type: "elasticsearch", + RetentionPolicy: &logging.RetentionPoliciesSpec{ + Audit: &logging.RetentionPolicySpec{ + MaxAge: elasticsearch.TimeUnit("12h"), + }, + }, + }, + }, + } + elasticsearchCR2 := newElasticsearchCR(cluster, "test-app-name") + diffCR, different := isElasticsearchCRDifferent(elasticsearchCR1, elasticsearchCR2) + if !different { + t.Errorf("Expected that difference would be found due to retention policy change") + } + if !(diffCR.Spec.IndexManagement.Policies[0].Name == indexmanagement.PolicyNameAudit) { + t.Errorf("Expected that difference would be found due to retention policy change") + } +} diff --git a/vendor/github.com/openshift/elasticsearch-operator/Gopkg.lock b/vendor/github.com/openshift/elasticsearch-operator/Gopkg.lock index 8afbe4f59e..05ad6dbdb6 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/Gopkg.lock +++ b/vendor/github.com/openshift/elasticsearch-operator/Gopkg.lock @@ -333,6 +333,20 @@ revision = "7087cb70de9f7a8bc0a10c375cb0d2280a8edf9c" version = "v0.5.1" +[[projects]] + digest = "1:76efa3b55850d9caa14f8c0b3a951797f9bc2ffc283526073dcad1b06b6e02d3" + name = "github.com/hpcloud/tail" + packages = [ + ".", + "ratelimiter", + "util", + "watch", + "winfile", + ] + pruneopts = "NT" + revision = "a30252cb686a21eb2d0b98132633053ec2f7f1e5" + version = "v1.0.0" + [[projects]] digest = "1:aaa38889f11896ee3644d77e17dc7764cc47f5f3d3b488268df2af2b52541c5f" name = "github.com/imdario/mergo" @@ -433,6 +447,54 @@ revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" version = "1.0.1" +[[projects]] + digest = "1:75ae86a4e70ad043bf0f60608be1e4f9930ded3a4f5dd88083638a2ad79fa242" + name = "github.com/onsi/ginkgo" + packages = [ + ".", + "config", + "internal/codelocation", + "internal/containernode", + "internal/failer", + "internal/leafnodes", + "internal/remote", + "internal/spec", + "internal/spec_iterator", + "internal/specrunner", + "internal/suite", + "internal/testingtproxy", + "internal/writer", + "reporters", + "reporters/stenographer", + "reporters/stenographer/support/go-colorable", + "reporters/stenographer/support/go-isatty", + "types", + ] + pruneopts = "NT" + revision = "388ac7e50a3abf0798010091d5094171f4aefc0b" + version = "v1.11.0" + +[[projects]] + digest = "1:20b1aeb7c32a78cc38de4912ef13a5a8cbefdd306c070e6570cfc2194ef1a78c" + name = "github.com/onsi/gomega" + packages = [ + ".", + "format", + "internal/assertion", + "internal/asyncassertion", + "internal/oraclematcher", + "internal/testingtsupport", + "matchers", + "matchers/support/goraph/bipartitegraph", + "matchers/support/goraph/edge", + "matchers/support/goraph/node", + "matchers/support/goraph/util", + "types", + ] + pruneopts = "NT" + revision = "22a9feb22fe99439870ed2e013401725a9d495df" + version = "v1.8.1" + [[projects]] digest = "1:0087f38751ec1995bafa67afd0ded9519a46297ecf571165c695fb9ba943688d" name = "github.com/operator-framework/operator-sdk" @@ -643,11 +705,14 @@ [[projects]] branch = "master" - digest = "1:63708a2bcd353bb6b8392bf265d125c6a11ba8ae284e3548ef75360f11118fdd" + digest = "1:9502fc6cda0a919c2790e1123ee4beeb437a5d5886d25b0d78f681e76613c4fa" name = "golang.org/x/net" packages = [ "context", "context/ctxhttp", + "html", + "html/atom", + "html/charset", "http/httpguts", "http2", "http2/hpack", @@ -656,7 +721,7 @@ "trace", ] pruneopts = "NT" - revision = "eb5bcb51f2a31c7d5141d810b70815c05d9c9146" + revision = "c0dbc17a35534bf2e581d7a942408dc936316da4" [[projects]] branch = "master" @@ -697,12 +762,24 @@ packages = [ "collate", "collate/build", + "encoding", + "encoding/charmap", + "encoding/htmlindex", + "encoding/internal", + "encoding/internal/identifier", + "encoding/japanese", + "encoding/korean", + "encoding/simplifiedchinese", + "encoding/traditionalchinese", + "encoding/unicode", "internal/colltab", "internal/gen", "internal/tag", "internal/triegen", "internal/ucd", + "internal/utf8internal", "language", + "runes", "secure/bidirule", "transform", "unicode/bidi", @@ -743,6 +820,17 @@ pruneopts = "NT" revision = "052fc3cfdbc2c9e9082b1d51f850b7974b5efb2a" +[[projects]] + branch = "master" + digest = "1:325a1b73817aa79fc85f10376ed41ea607f7dd7f6c6bfa46df6a28ef2857941f" + name = "golang.org/x/xerrors" + packages = [ + ".", + "internal", + ] + pruneopts = "NT" + revision = "9bdfabe68543c54f90421aeb9a60ef8061b5b544" + [[projects]] digest = "1:7f41328f018b3c6cc4952cb0acc044984b290c8bada1f6644340835132bfad1e" name = "google.golang.org/api" @@ -822,6 +910,15 @@ revision = "3507fb8e1a5ad030303c106fef3a47c9fdad16ad" version = "v1.19.1" +[[projects]] + digest = "1:1b91ae0dc69a41d4c2ed23ea5cffb721ea63f5037ca4b81e6d6771fbb8f45129" + name = "gopkg.in/fsnotify.v1" + packages = ["."] + pruneopts = "NT" + revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9" + source = "https://github.com/fsnotify/fsnotify.git" + version = "v1.4.7" + [[projects]] digest = "1:2d1fbdc6777e5408cabeb02bf336305e724b925ff4546ded0fa8715a7267922a" name = "gopkg.in/inf.v0" @@ -830,6 +927,14 @@ revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf" version = "v0.9.1" +[[projects]] + branch = "v1" + digest = "1:8fb1ccb16a6cfecbfdfeb84d8ea1cc7afa8f9ef16526bc2326f72d993e32cef1" + name = "gopkg.in/tomb.v1" + packages = ["."] + pruneopts = "NT" + revision = "dd632973f1e7218eb1089048e0798ec9ae7dceb8" + [[projects]] digest = "1:18108594151654e9e696b27b181b953f9a90b16bf14d253dd1b397b025a1487f" name = "gopkg.in/yaml.v2" @@ -1161,6 +1266,8 @@ "github.com/go-logr/logr", "github.com/go-openapi/spec", "github.com/inhies/go-bytesize", + "github.com/onsi/ginkgo", + "github.com/onsi/gomega", "github.com/operator-framework/operator-sdk/pkg/k8sutil", "github.com/operator-framework/operator-sdk/pkg/leader", "github.com/operator-framework/operator-sdk/pkg/log/zap", diff --git a/vendor/github.com/openshift/elasticsearch-operator/Gopkg.toml b/vendor/github.com/openshift/elasticsearch-operator/Gopkg.toml index 83c95b7371..fad54ab137 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/Gopkg.toml +++ b/vendor/github.com/openshift/elasticsearch-operator/Gopkg.toml @@ -62,6 +62,9 @@ required = [ # The version rule is used for a specific release and the master branch for in between releases. # branch = "master" #osdk_branch_annotation version = "=v0.8.2" #osdk_version_annotation +[[override]] + name = "gopkg.in/fsnotify.v1" + source = "https://github.com/fsnotify/fsnotify.git" [prune] go-tests = true @@ -74,3 +77,7 @@ required = [ [[prune.project]] name = "k8s.io/gengo" non-go = false + +[[constraint]] + name = "github.com/onsi/ginkgo" + version = "1.10.3" diff --git a/vendor/github.com/openshift/elasticsearch-operator/manifests/4.4/elasticsearches.crd.yaml b/vendor/github.com/openshift/elasticsearch-operator/manifests/4.4/elasticsearches.crd.yaml index 1f2f7ffd40..c42276ae75 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/manifests/4.4/elasticsearches.crd.yaml +++ b/vendor/github.com/openshift/elasticsearch-operator/manifests/4.4/elasticsearches.crd.yaml @@ -77,6 +77,54 @@ spec: size: description: The max storage capacity for the node type: string + indexManagement: + description: Management spec for indicies + properties: + policies: + description: A list of polices for managing an indices + type: array + items: + properties: + name: + description: The unique name of the policy + type: string + pollInterval: + description: How often to check an index meets the desired criteria (e.g. 1m) + type: string + phases: + type: object + properties: + delete: + type: object + properties: + minAge: + description: The minimum age of an index before it should be deleted (e.g. 10d) + type: string + hot: + type: object + properties: + actions: + type: object + properties: + maxAge: + description: The maximum age of an index before it should be rolled over (e.g. 7d) + type: string + mappings: + description: Mappings of policies to indicies + type: array + items: + properties: + name: + description: The unique name of the policy mapping + type: string + policyRef: + description: A reference to a defined policy + type: string + aliases: + description: Aliases to apply to a template + type: array + items: + type: string nodeSpec: description: Default specification applied to all Elasticsearch nodes properties: diff --git a/vendor/github.com/openshift/elasticsearch-operator/pkg/apis/logging/v1/elasticsearch_types.go b/vendor/github.com/openshift/elasticsearch-operator/pkg/apis/logging/v1/elasticsearch_types.go index c7c0bb51e0..4dff5f3465 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/pkg/apis/logging/v1/elasticsearch_types.go +++ b/vendor/github.com/openshift/elasticsearch-operator/pkg/apis/logging/v1/elasticsearch_types.go @@ -48,6 +48,7 @@ type ElasticsearchSpec struct { RedundancyPolicy RedundancyPolicyType `json:"redundancyPolicy"` Nodes []ElasticsearchNode `json:"nodes"` Spec ElasticsearchNodeSpec `json:"nodeSpec"` + IndexManagement *IndexManagementSpec `json:"indexManagement"` } // ElasticsearchStatus defines the observed state of Elasticsearch @@ -63,6 +64,7 @@ type ElasticsearchStatus struct { ShardAllocationEnabled ShardAllocationState `json:"shardAllocationEnabled"` Pods map[ElasticsearchNodeRole]PodStateMap `json:"pods"` Conditions []ClusterCondition `json:"conditions"` + IndexManagementStatus *IndexManagementStatus `json:"indexManagement,omitempty"` } type ClusterHealth struct { diff --git a/vendor/github.com/openshift/elasticsearch-operator/pkg/apis/logging/v1/index_management_types.go b/vendor/github.com/openshift/elasticsearch-operator/pkg/apis/logging/v1/index_management_types.go new file mode 100644 index 0000000000..9aee22d0f1 --- /dev/null +++ b/vendor/github.com/openshift/elasticsearch-operator/pkg/apis/logging/v1/index_management_types.go @@ -0,0 +1,265 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// IndexManagementSpec specifies index management for an Elasticsearch cluster +// +k8s:openapi-gen=true +type IndexManagementSpec struct { + Policies []IndexManagementPolicySpec `json:"policies"` + Mappings []IndexManagementPolicyMappingSpec `json:"mappings"` +} + +//TimeUnit is a time unit like h,m,d +type TimeUnit string + +//IndexManagementPolicySpec is a definition of an index management policy +// +k8s:openapi-gen=true +type IndexManagementPolicySpec struct { + Name string `json:"name"` + PollInterval TimeUnit `json:"pollInterval"` + Phases IndexManagementPhasesSpec `json:"phases"` +} + +// +k8s:openapi-gen=true +type IndexManagementPhasesSpec struct { + Hot *IndexManagementHotPhaseSpec `json:"hot,omitempty"` + Delete *IndexManagementDeletePhaseSpec `json:"delete,omitempty"` +} + +// +k8s:openapi-gen=true +type IndexManagementDeletePhaseSpec struct { + MinAge TimeUnit `json:"minAge"` +} + +// +k8s:openapi-gen=true +type IndexManagementHotPhaseSpec struct { + Actions IndexManagementActionsSpec `json:"actions"` +} + +// +k8s:openapi-gen=true +type IndexManagementActionsSpec struct { + Rollover *IndexManagementActionSpec `json:"rollover"` +} + +// +k8s:openapi-gen=true +type IndexManagementActionSpec struct { + MaxAge TimeUnit `json:"maxAge"` +} + +//IndexManagementPolicyMappingSpec maps a management policy to an index +// +k8s:openapi-gen=true +type IndexManagementPolicyMappingSpec struct { + Name string `json:"name"` + PolicyRef string `json:"policyRef"` + Aliases []string `json:"aliases,omitempty"` +} + +type PolicyMap map[string]IndexManagementPolicySpec + +func (spec *IndexManagementSpec) PolicyMap() PolicyMap { + policyMap := map[string]IndexManagementPolicySpec{} + for _, spec := range spec.Policies { + policyMap[spec.Name] = spec + } + return policyMap +} + +func (policyMap *PolicyMap) HasPolicy(name string) bool { + _, found := map[string]IndexManagementPolicySpec(*policyMap)[name] + return found +} + +// +k8s:openapi-gen=true +type IndexManagementStatus struct { + State IndexManagementState `json:"state,omitempty"` + Reason IndexManagementStatusReason `json:"reason,omitempty"` + Message string `json:"message,omitempty"` + LastUpdated metav1.Time `json:"lastUpdated,omitempty"` + Policies []IndexManagementPolicyStatus `json:"policies,omitempty"` + Mappings []IndexManagementMappingStatus `json:"mappings,omitempty"` +} + +func NewIndexManagementStatus() *IndexManagementStatus { + return &IndexManagementStatus{ + State: IndexManagementStateAccepted, + Reason: IndexManagementStatusReasonPassed, + LastUpdated: metav1.Now(), + } +} + +//IndexManagementState of IndexManagment +type IndexManagementState string + +const ( + //IndexManagementStateAccepted when polices and mappings are well defined and pass validations + IndexManagementStateAccepted IndexManagementState = "Accepted" + + //IndexManagementStateDegraded some polices and mappings have failed validations + IndexManagementStateDegraded IndexManagementState = "Degraded" + + //IndexManagementStateDropped when IndexManagement is not defined or there are no valid polices and mappings + IndexManagementStateDropped IndexManagementState = "Dropped" +) + +type IndexManagementStatusReason string + +const ( + IndexManagementStatusReasonPassed = "PassedValidation" + IndexManagementStatusReasonUndefined = "Undefined" + IndexManagementStatusReasonValidationFailed = "OneOrMoreValidationsFailed" +) + +type IndexManagementMappingStatus struct { + //Name of the corresponding mapping for this status + Name string `json:"name,omitempty"` + + //State of the corresponding mapping for this status + State IndexManagementMappingState `json:"state,omitempty"` + + Reason IndexManagementMappingReason `json:"reason,omitempty"` + + Message string `json:"message,omitempty"` + + //Reasons for the state of the corresponding mapping for this status + Conditions []IndexManagementMappingCondition `json:"conditions,omitempty"` + + // LastUpdated represents the last time that the status was updated. + LastUpdated metav1.Time `json:"lastUpdated,omitempty"` +} + +func NewIndexManagementMappingStatus(name string) *IndexManagementMappingStatus { + return &IndexManagementMappingStatus{ + Name: name, + State: IndexManagementMappingStateAccepted, + Reason: IndexManagementMappingReasonConditionsMet, + LastUpdated: metav1.Now(), + } +} + +func (status *IndexManagementMappingStatus) AddPolicyMappingCondition(conditionType IndexManagementMappingConditionType, reason IndexManagementMappingConditionReason, message string) { + status.Conditions = append(status.Conditions, IndexManagementMappingCondition{ + Type: conditionType, + Reason: reason, + Status: corev1.ConditionFalse, + Message: message, + }) +} + +type IndexManagementMappingState string + +const ( + //IndexManagementMappingStateAccepted passes validations + IndexManagementMappingStateAccepted IndexManagementMappingState = "Accepted" + + //IndexManagementMappingStateDropped fails validations + IndexManagementMappingStateDropped IndexManagementMappingState = "Dropped" +) + +type IndexManagementMappingReason string + +const ( + IndexManagementMappingReasonConditionsMet IndexManagementMappingReason = "ConditionsMet" + IndexManagementMappingReasonConditionsNotMet IndexManagementMappingReason = "ConditionsNotMet" +) + +type IndexManagementMappingCondition struct { + Type IndexManagementMappingConditionType `json:"type,omitempty"` + Reason IndexManagementMappingConditionReason `json:"reason,omitempty"` + Status corev1.ConditionStatus `json:"status,omitempty"` + Message string `json:"message,omitempty"` +} + +type IndexManagementMappingConditionType string + +const ( + IndexManagementMappingConditionTypeName IndexManagementMappingConditionType = "Name" + IndexManagementMappingConditionTypePolicyRef IndexManagementMappingConditionType = "PolicyRef" +) + +type IndexManagementMappingConditionReason string + +const ( + IndexManagementMappingReasonMissing IndexManagementMappingConditionReason = "Missing" + IndexManagementMappingReasonNonUnique IndexManagementMappingConditionReason = "NonUnique" +) + +type IndexManagementPolicyStatus struct { + //Name of the corresponding policy for this status + Name string `json:"name,omitempty"` + + //State of the corresponding policy for this status + State IndexManagementPolicyState `json:"state,omitempty"` + + //Reasons for the state of the corresponding policy for this status + Reason IndexManagementPolicyReason `json:"reason,omitempty"` + + //Message about the corresponding policy + Message string `json:"message,omitempty"` + + //Reasons for the state of the corresponding policy for this status + Conditions []IndexManagementPolicyCondition `json:"conditions,omitempty"` + + // LastUpdated represents the last time that the status was updated. + LastUpdated metav1.Time `json:"lastUpdated,omitempty"` +} + +func NewIndexManagementPolicyStatus(name string) *IndexManagementPolicyStatus { + return &IndexManagementPolicyStatus{ + Name: name, + State: IndexManagementPolicyStateAccepted, + Reason: IndexManagementPolicyReasonConditionsMet, + LastUpdated: metav1.Now(), + } +} + +func (status *IndexManagementPolicyStatus) AddPolicyCondition(conditionType IndexManagementPolicyConditionType, reason IndexManagementPolicyConditionReason, message string) { + status.Conditions = append(status.Conditions, IndexManagementPolicyCondition{ + Type: conditionType, + Reason: reason, + Status: corev1.ConditionFalse, + Message: message, + }) +} + +type IndexManagementPolicyState string + +const ( + //IndexManagementPolicyStateAccepted passes validations + IndexManagementPolicyStateAccepted IndexManagementPolicyState = "Accepted" + + //IndexManagementPolicyStateDropped fails validations + IndexManagementPolicyStateDropped IndexManagementPolicyState = "Dropped" +) + +type IndexManagementPolicyReason string + +const ( + IndexManagementPolicyReasonConditionsMet IndexManagementPolicyReason = "ConditionsMet" + IndexManagementPolicyReasonConditionsNotMet IndexManagementPolicyReason = "ConditionsNotMet" +) + +type IndexManagementPolicyCondition struct { + Type IndexManagementPolicyConditionType `json:"type,omitempty"` + Reason IndexManagementPolicyConditionReason `json:"reason,omitempty"` + Status corev1.ConditionStatus `json:"status,omitempty"` + Message string `json:"message,omitempty"` +} + +type IndexManagementPolicyConditionType string + +const ( + IndexManagementPolicyConditionTypeName IndexManagementPolicyConditionType = "Name" + IndexManagementPolicyConditionTypePollInterval IndexManagementPolicyConditionType = "PollInterval" + IndexManagementPolicyConditionTypeTimeUnit IndexManagementPolicyConditionType = "TimeUnit" +) + +type IndexManagementPolicyConditionReason string + +const ( + IndexManagementPolicyReasonMalformed IndexManagementPolicyConditionReason = "MalFormed" + IndexManagementPolicyReasonMissing IndexManagementPolicyConditionReason = "Missing" + IndexManagementPolicyReasonNonUnique IndexManagementPolicyConditionReason = "NonUnique" +) diff --git a/vendor/github.com/openshift/elasticsearch-operator/pkg/apis/logging/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/elasticsearch-operator/pkg/apis/logging/v1/zz_generated.deepcopy.go index a715e11241..f9b4f0c16d 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/pkg/apis/logging/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/elasticsearch-operator/pkg/apis/logging/v1/zz_generated.deepcopy.go @@ -232,6 +232,11 @@ func (in *ElasticsearchSpec) DeepCopyInto(out *ElasticsearchSpec) { } } in.Spec.DeepCopyInto(&out.Spec) + if in.IndexManagement != nil { + in, out := &in.IndexManagement, &out.IndexManagement + *out = new(IndexManagementSpec) + (*in).DeepCopyInto(*out) + } return } @@ -288,6 +293,11 @@ func (in *ElasticsearchStatus) DeepCopyInto(out *ElasticsearchStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.IndexManagementStatus != nil { + in, out := &in.IndexManagementStatus, &out.IndexManagementStatus + *out = new(IndexManagementStatus) + (*in).DeepCopyInto(*out) + } return } @@ -327,6 +337,277 @@ func (in *ElasticsearchStorageSpec) DeepCopy() *ElasticsearchStorageSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexManagementActionSpec) DeepCopyInto(out *IndexManagementActionSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexManagementActionSpec. +func (in *IndexManagementActionSpec) DeepCopy() *IndexManagementActionSpec { + if in == nil { + return nil + } + out := new(IndexManagementActionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexManagementActionsSpec) DeepCopyInto(out *IndexManagementActionsSpec) { + *out = *in + if in.Rollover != nil { + in, out := &in.Rollover, &out.Rollover + *out = new(IndexManagementActionSpec) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexManagementActionsSpec. +func (in *IndexManagementActionsSpec) DeepCopy() *IndexManagementActionsSpec { + if in == nil { + return nil + } + out := new(IndexManagementActionsSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexManagementDeletePhaseSpec) DeepCopyInto(out *IndexManagementDeletePhaseSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexManagementDeletePhaseSpec. +func (in *IndexManagementDeletePhaseSpec) DeepCopy() *IndexManagementDeletePhaseSpec { + if in == nil { + return nil + } + out := new(IndexManagementDeletePhaseSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexManagementHotPhaseSpec) DeepCopyInto(out *IndexManagementHotPhaseSpec) { + *out = *in + in.Actions.DeepCopyInto(&out.Actions) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexManagementHotPhaseSpec. +func (in *IndexManagementHotPhaseSpec) DeepCopy() *IndexManagementHotPhaseSpec { + if in == nil { + return nil + } + out := new(IndexManagementHotPhaseSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexManagementMappingCondition) DeepCopyInto(out *IndexManagementMappingCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexManagementMappingCondition. +func (in *IndexManagementMappingCondition) DeepCopy() *IndexManagementMappingCondition { + if in == nil { + return nil + } + out := new(IndexManagementMappingCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexManagementMappingStatus) DeepCopyInto(out *IndexManagementMappingStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]IndexManagementMappingCondition, len(*in)) + copy(*out, *in) + } + in.LastUpdated.DeepCopyInto(&out.LastUpdated) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexManagementMappingStatus. +func (in *IndexManagementMappingStatus) DeepCopy() *IndexManagementMappingStatus { + if in == nil { + return nil + } + out := new(IndexManagementMappingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexManagementPhasesSpec) DeepCopyInto(out *IndexManagementPhasesSpec) { + *out = *in + if in.Hot != nil { + in, out := &in.Hot, &out.Hot + *out = new(IndexManagementHotPhaseSpec) + (*in).DeepCopyInto(*out) + } + if in.Delete != nil { + in, out := &in.Delete, &out.Delete + *out = new(IndexManagementDeletePhaseSpec) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexManagementPhasesSpec. +func (in *IndexManagementPhasesSpec) DeepCopy() *IndexManagementPhasesSpec { + if in == nil { + return nil + } + out := new(IndexManagementPhasesSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexManagementPolicyCondition) DeepCopyInto(out *IndexManagementPolicyCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexManagementPolicyCondition. +func (in *IndexManagementPolicyCondition) DeepCopy() *IndexManagementPolicyCondition { + if in == nil { + return nil + } + out := new(IndexManagementPolicyCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexManagementPolicyMappingSpec) DeepCopyInto(out *IndexManagementPolicyMappingSpec) { + *out = *in + if in.Aliases != nil { + in, out := &in.Aliases, &out.Aliases + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexManagementPolicyMappingSpec. +func (in *IndexManagementPolicyMappingSpec) DeepCopy() *IndexManagementPolicyMappingSpec { + if in == nil { + return nil + } + out := new(IndexManagementPolicyMappingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexManagementPolicySpec) DeepCopyInto(out *IndexManagementPolicySpec) { + *out = *in + in.Phases.DeepCopyInto(&out.Phases) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexManagementPolicySpec. +func (in *IndexManagementPolicySpec) DeepCopy() *IndexManagementPolicySpec { + if in == nil { + return nil + } + out := new(IndexManagementPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexManagementPolicyStatus) DeepCopyInto(out *IndexManagementPolicyStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]IndexManagementPolicyCondition, len(*in)) + copy(*out, *in) + } + in.LastUpdated.DeepCopyInto(&out.LastUpdated) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexManagementPolicyStatus. +func (in *IndexManagementPolicyStatus) DeepCopy() *IndexManagementPolicyStatus { + if in == nil { + return nil + } + out := new(IndexManagementPolicyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexManagementSpec) DeepCopyInto(out *IndexManagementSpec) { + *out = *in + if in.Policies != nil { + in, out := &in.Policies, &out.Policies + *out = make([]IndexManagementPolicySpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Mappings != nil { + in, out := &in.Mappings, &out.Mappings + *out = make([]IndexManagementPolicyMappingSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexManagementSpec. +func (in *IndexManagementSpec) DeepCopy() *IndexManagementSpec { + if in == nil { + return nil + } + out := new(IndexManagementSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexManagementStatus) DeepCopyInto(out *IndexManagementStatus) { + *out = *in + in.LastUpdated.DeepCopyInto(&out.LastUpdated) + if in.Policies != nil { + in, out := &in.Policies, &out.Policies + *out = make([]IndexManagementPolicyStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Mappings != nil { + in, out := &in.Mappings, &out.Mappings + *out = make([]IndexManagementMappingStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexManagementStatus. +func (in *IndexManagementStatus) DeepCopy() *IndexManagementStatus { + if in == nil { + return nil + } + out := new(IndexManagementStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in PodStateMap) DeepCopyInto(out *PodStateMap) { { @@ -356,3 +637,25 @@ func (in PodStateMap) DeepCopy() PodStateMap { in.DeepCopyInto(out) return *out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in PolicyMap) DeepCopyInto(out *PolicyMap) { + { + in := &in + *out = make(PolicyMap, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyMap. +func (in PolicyMap) DeepCopy() PolicyMap { + if in == nil { + return nil + } + out := new(PolicyMap) + in.DeepCopyInto(out) + return *out +} diff --git a/vendor/github.com/openshift/elasticsearch-operator/pkg/indexmanagement/validations.go b/vendor/github.com/openshift/elasticsearch-operator/pkg/indexmanagement/validations.go new file mode 100644 index 0000000000..834fb82dbd --- /dev/null +++ b/vendor/github.com/openshift/elasticsearch-operator/pkg/indexmanagement/validations.go @@ -0,0 +1,124 @@ +package indexmanagement + +import ( + "fmt" + "regexp" + "strings" + + esapi "github.com/openshift/elasticsearch-operator/pkg/apis/logging/v1" +) + +var ( + reTimeUnit = regexp.MustCompile("^\\d+[yMwdhHms]$") +) + +const ( + pollIntervalFailMessage = "The pollInterval is missing or requires a valid time unit (e.g. 3d)" + phaseTimeUnitFailMessage = "The %s phase '%s' is missing or requires a valid time unit (e.g. 3d)" + policyRefFailMessage = "A policy mapping must reference a defined IndexManagement policy" +) + +//VerifyAndNormalize validates the spec'd indexManagement and returns a spec which removes policies +//and mappings that are invalid +func VerifyAndNormalize(cluster *esapi.Elasticsearch) *esapi.IndexManagementSpec { + result := &esapi.IndexManagementSpec{} + status := esapi.NewIndexManagementStatus() + cluster.Status.IndexManagementStatus = status + if cluster.Spec.IndexManagement == nil || (len(cluster.Spec.IndexManagement.Mappings) == 0 && len(cluster.Spec.IndexManagement.Policies) == 0) { + status.State = esapi.IndexManagementStateDropped + status.Reason = esapi.IndexManagementStatusReasonUndefined + status.Message = "IndexManagement was not defined" + return nil + } + validatePolicies(cluster, result) + validateMappings(cluster, result) + if len(result.Mappings) != len(cluster.Spec.IndexManagement.Mappings) || len(result.Policies) != len(cluster.Spec.IndexManagement.Policies) { + status.State = esapi.IndexManagementStateDegraded + status.Reason = esapi.IndexManagementStatusReasonValidationFailed + } + if len(result.Mappings) == 0 && len(result.Policies) == 0 { + status.State = esapi.IndexManagementStateDropped + } + return result +} + +func validatePolicies(cluster *esapi.Elasticsearch, result *esapi.IndexManagementSpec) { + if cluster.Spec.IndexManagement == nil { + return + } + policyNames := map[string]interface{}{} + for n, policy := range cluster.Spec.IndexManagement.Policies { + status := esapi.NewIndexManagementPolicyStatus(policy.Name) + if strings.TrimSpace(policy.Name) == "" { + status.Name = fmt.Sprintf("policy[%d]", n) + status.AddPolicyCondition(esapi.IndexManagementPolicyConditionTypeName, esapi.IndexManagementPolicyReasonMissing, "") + } else { + if len(policyNames) > 0 { + if _, found := policyNames[policy.Name]; found { + status.Name = fmt.Sprintf("policy[%d]", n) + status.AddPolicyCondition(esapi.IndexManagementPolicyConditionTypeName, esapi.IndexManagementPolicyReasonNonUnique, "") + } + } + policyNames[policy.Name] = "" + } + if !isValidTimeUnit(policy.PollInterval) { + status.AddPolicyCondition(esapi.IndexManagementPolicyConditionTypePollInterval, esapi.IndexManagementPolicyReasonMalformed, pollIntervalFailMessage) + } + if policy.Phases.Hot != nil { + if policy.Phases.Hot.Actions.Rollover == nil || !isValidTimeUnit(policy.Phases.Hot.Actions.Rollover.MaxAge) { + message := fmt.Sprintf(phaseTimeUnitFailMessage, "hot", "maxAge") + status.AddPolicyCondition(esapi.IndexManagementPolicyConditionTypeTimeUnit, esapi.IndexManagementPolicyReasonMalformed, message) + } + } + if policy.Phases.Delete != nil { + if !isValidTimeUnit(policy.Phases.Delete.MinAge) { + message := fmt.Sprintf(phaseTimeUnitFailMessage, "delete", "minAge") + status.AddPolicyCondition(esapi.IndexManagementPolicyConditionTypeTimeUnit, esapi.IndexManagementPolicyReasonMalformed, message) + } + } + if len(status.Conditions) > 0 { + status.State = esapi.IndexManagementPolicyStateDropped + status.Reason = esapi.IndexManagementPolicyReasonConditionsNotMet + } else { + result.Policies = append(result.Policies, policy) + } + cluster.Status.IndexManagementStatus.Policies = append(cluster.Status.IndexManagementStatus.Policies, *status) + } +} + +func isValidTimeUnit(time esapi.TimeUnit) bool { + return reTimeUnit.MatchString(string(time)) +} + +func validateMappings(cluster *esapi.Elasticsearch, result *esapi.IndexManagementSpec) { + if cluster.Spec.IndexManagement == nil { + return + } + policies := cluster.Spec.IndexManagement.PolicyMap() + mappingNames := map[string]interface{}{} + for n, mapping := range cluster.Spec.IndexManagement.Mappings { + status := esapi.NewIndexManagementMappingStatus(mapping.Name) + if strings.TrimSpace(mapping.Name) == "" { + status.Name = fmt.Sprintf("mapping[%d]", n) + status.AddPolicyMappingCondition(esapi.IndexManagementMappingConditionTypeName, esapi.IndexManagementMappingReasonMissing, "") + } else { + if len(mappingNames) > 0 { + if _, found := mappingNames[mapping.Name]; found { + status.Name = fmt.Sprintf("mapping[%d]", n) + status.AddPolicyMappingCondition(esapi.IndexManagementMappingConditionTypeName, esapi.IndexManagementMappingReasonNonUnique, "") + } + } + mappingNames[mapping.Name] = "" + } + if !policies.HasPolicy(mapping.PolicyRef) { + status.AddPolicyMappingCondition(esapi.IndexManagementMappingConditionTypePolicyRef, esapi.IndexManagementMappingReasonMissing, policyRefFailMessage) + } + if len(status.Conditions) > 0 { + status.State = esapi.IndexManagementMappingStateDropped + status.Reason = esapi.IndexManagementMappingReasonConditionsNotMet + } else { + result.Mappings = append(result.Mappings, mapping) + } + cluster.Status.IndexManagementStatus.Mappings = append(cluster.Status.IndexManagementStatus.Mappings, *status) + } +} diff --git a/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/configuration_tmpl.go b/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/configuration_tmpl.go index dc679ba9f2..f1326f17c8 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/configuration_tmpl.go +++ b/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/configuration_tmpl.go @@ -4,10 +4,6 @@ const esYmlTmpl = ` cluster: name: ${CLUSTER_NAME} -script: - inline: true - stored: true - node: name: ${DC_NAME} master: ${IS_MASTER} @@ -26,29 +22,14 @@ gateway: expected_nodes: {{.RecoverExpectedShards}} recover_after_time: ${RECOVER_AFTER_TIME} -io.fabric8.elasticsearch.kibana.mapping.app: /usr/share/elasticsearch/index_patterns/com.redhat.viaq-openshift.index-pattern.json -io.fabric8.elasticsearch.kibana.mapping.ops: /usr/share/elasticsearch/index_patterns/com.redhat.viaq-openshift.index-pattern.json -io.fabric8.elasticsearch.kibana.mapping.empty: /usr/share/elasticsearch/index_patterns/com.redhat.viaq-openshift.index-pattern.json - -openshift.config: - use_common_data_model: true - project_index_prefix: "project" - time_field_name: "@timestamp" - -openshift.searchguard: - keystore.path: /etc/elasticsearch/secret/admin.jks - truststore.path: /etc/elasticsearch/secret/searchguard.truststore - -openshift.kibana.index.mode: {{.KibanaIndexMode}} - path: data: /elasticsearch/persistent/${CLUSTER_NAME}/data logs: /elasticsearch/persistent/${CLUSTER_NAME}/logs -searchguard: +opendistro_security: authcz.admin_dn: - CN=system.admin,OU=OpenShift,O=Logging - config_index_name: ".searchguard" + config_index_name: ".security" ssl: transport: enabled: true diff --git a/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/elasticsearch.go b/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/elasticsearch.go index 21a26d8ada..7e89aa3215 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/elasticsearch.go +++ b/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/elasticsearch.go @@ -8,6 +8,9 @@ import ( "github.com/inhies/go-bytesize" api "github.com/openshift/elasticsearch-operator/pkg/apis/logging/v1" + estypes "github.com/openshift/elasticsearch-operator/pkg/types/elasticsearch" + "github.com/openshift/elasticsearch-operator/pkg/utils" + "k8s.io/apimachinery/pkg/util/sets" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -329,3 +332,105 @@ func UpdateReplicaCount(clusterName, namespace string, client client.Client, rep return false, nil } + +func (req *ElasticsearchRequest) CreateIndex(name string, index *estypes.Index) error { + body, err := utils.ToJson(index) + if err != nil { + return err + } + payload := &esCurlStruct{ + Method: http.MethodPut, + URI: name, + RequestBody: body, + } + + req.FnCurlEsService(req.cluster.Name, req.cluster.Namespace, payload, req.client) + if payload.Error != nil { + return payload.Error + } + if payload.StatusCode != 200 && payload.StatusCode != 201 { + return fmt.Errorf("There was an error creating index %s. Error code: %v, %v", index.Name, payload.StatusCode != 200, payload.RequestBody) + } + return nil +} +func (req *ElasticsearchRequest) CreateIndexTemplate(name string, template *estypes.IndexTemplate) error { + body, err := utils.ToJson(template) + if err != nil { + return err + } + payload := &esCurlStruct{ + Method: http.MethodPut, + URI: fmt.Sprintf("_template/%s", name), + RequestBody: body, + } + + req.FnCurlEsService(req.cluster.Name, req.cluster.Namespace, payload, req.client) + if payload.Error != nil { + return payload.Error + } + if payload.StatusCode != 200 && payload.StatusCode != 201 { + return fmt.Errorf("There was an error creating index template %s. Error code: %v, %v", name, payload.StatusCode != 200, payload.RequestBody) + } + return nil +} + +func (req *ElasticsearchRequest) DeleteIndexTemplate(name string) error { + payload := &esCurlStruct{ + Method: http.MethodDelete, + URI: fmt.Sprintf("_template/%s", name), + } + + req.FnCurlEsService(req.cluster.Name, req.cluster.Namespace, payload, req.client) + if payload.Error != nil { + return payload.Error + } + if payload.StatusCode != 200 && payload.StatusCode != 404 { + return fmt.Errorf("There was an error deleting template %s. Error code: %v", name, payload.StatusCode) + } + return nil +} + +//ListTemplates returns a list of templates +func (req *ElasticsearchRequest) ListTemplates() (sets.String, error) { + payload := &esCurlStruct{ + Method: http.MethodGet, + URI: "_template", + } + + req.FnCurlEsService(req.cluster.Name, req.cluster.Namespace, payload, req.client) + if payload.Error != nil { + return nil, payload.Error + } + if payload.StatusCode != 200 { + return nil, fmt.Errorf("There was an error retrieving list of templates. Error code: %v, %v", payload.StatusCode != 200, payload.RequestBody) + } + response := sets.NewString() + for name := range payload.ResponseBody { + response.Insert(name) + } + return response, nil +} + +//ListIndicesForAlias returns a list of indices and the alias for the given pattern (e.g. foo-*, *-write) +func (req *ElasticsearchRequest) ListIndicesForAlias(aliasPattern string) ([]string, error) { + payload := &esCurlStruct{ + Method: http.MethodGet, + URI: fmt.Sprintf("_alias/%s", aliasPattern), + } + + req.FnCurlEsService(req.cluster.Name, req.cluster.Namespace, payload, req.client) + if payload.Error != nil { + return nil, payload.Error + } + if payload.StatusCode == 404 { + return []string{}, nil + } + if payload.StatusCode != 200 { + return nil, fmt.Errorf("There was an error retrieving list of indices aliased to %s. Error code: %v, %v", aliasPattern, payload.StatusCode != 200, payload.RequestBody) + } + response := []string{} + for index := range payload.ResponseBody { + response = append(response, index) + } + return response, nil +} diff --git a/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/index_management.go b/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/index_management.go new file mode 100644 index 0000000000..31a1b7b3d6 --- /dev/null +++ b/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/index_management.go @@ -0,0 +1,100 @@ +package k8shandler + +import ( + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/util/sets" + + logging "github.com/openshift/elasticsearch-operator/pkg/apis/logging/v1" + "github.com/openshift/elasticsearch-operator/pkg/indexmanagement" + "github.com/openshift/elasticsearch-operator/pkg/logger" + esapi "github.com/openshift/elasticsearch-operator/pkg/types/elasticsearch" +) + +const ( + //ocpTemplatePrefix is the prefix all operator generated templates + ocpTemplatePrefix = "ocp-gen" +) + +func (elasticsearchRequest *ElasticsearchRequest) CreateOrUpdateIndexManagement() error { + + logger.Debug("Reconciling IndexManagement") + cluster := elasticsearchRequest.cluster + if cluster.Spec.IndexManagement == nil { + logger.Debug("IndexManagement not specified - noop") + return nil + } + spec := indexmanagement.VerifyAndNormalize(cluster) + //TODO find crons with no matching mapping and remove them + elasticsearchRequest.cullIndexManagement(spec.Mappings) + for _, mapping := range spec.Mappings { + logger.Debugf("reconciling index manageme nt for mapping: %s", mapping.Name) + //create or update template + if err := elasticsearchRequest.createOrUpdateIndexTemplate(mapping); err != nil { + logger.Errorf("Error creating index template for mapping %s: %v", mapping.Name, err) + return err + } + //TODO: Can we have partial success? + if err := elasticsearchRequest.initializeIndexIfNeeded(mapping); err != nil { + logger.Errorf("Error intializing index for mapping %s: %v", mapping.Name, err) + return err + } + } + + return nil +} +func (elasticsearchRequest *ElasticsearchRequest) cullIndexManagement(mappings []logging.IndexManagementPolicyMappingSpec) { + mappingNames := sets.NewString() + for _, mapping := range mappings { + mappingNames.Insert(formatTemplateName(mapping.Name)) + } + existing, err := elasticsearchRequest.ListTemplates() + if err != nil { + logger.Warnf("Unable to list existing templates in order to reconcile stale ones: %v", err) + return + } + difference := existing.Difference(mappingNames) + + for _, template := range difference.List() { + if strings.HasPrefix(template, ocpTemplatePrefix) { + if err := elasticsearchRequest.DeleteIndexTemplate(template); err != nil { + logger.Warnf("Unable to delete stale template %q in order to reconcile: %v", template, err) + } + } + } +} +func (elasticsearchRequest *ElasticsearchRequest) initializeIndexIfNeeded(mapping logging.IndexManagementPolicyMappingSpec) error { + pattern := fmt.Sprintf("%s-write", mapping.Name) + indices, err := elasticsearchRequest.ListIndicesForAlias(pattern) + if err != nil { + return err + } + if len(indices) < 1 { + indexName := fmt.Sprintf("%s-000001", mapping.Name) + primaryShards := getDataCount(elasticsearchRequest.cluster) + replicas := int32(calculateReplicaCount(elasticsearchRequest.cluster)) + index := esapi.NewIndex(indexName, primaryShards, replicas) + index.AddAlias(mapping.Name, false) + index.AddAlias(pattern, true) + for _, alias := range mapping.Aliases { + index.AddAlias(alias, false) + } + return elasticsearchRequest.CreateIndex(indexName, index) + } + return nil +} + +func formatTemplateName(name string) string { + return fmt.Sprintf("%s-%s", ocpTemplatePrefix, name) +} + +func (elasticsearchRequest *ElasticsearchRequest) createOrUpdateIndexTemplate(mapping logging.IndexManagementPolicyMappingSpec) error { + name := formatTemplateName(mapping.Name) + pattern := fmt.Sprintf("%s*", mapping.Name) + primaryShards := getDataCount(elasticsearchRequest.cluster) + replicas := int32(calculateReplicaCount(elasticsearchRequest.cluster)) + aliases := append(mapping.Aliases, mapping.Name) + template := esapi.NewIndexTemplate(pattern, aliases, primaryShards, replicas) + return elasticsearchRequest.CreateIndexTemplate(name, template) +} diff --git a/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/reconciler.go b/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/reconciler.go index 3502dd044a..7c97723ad3 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/reconciler.go +++ b/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/reconciler.go @@ -8,15 +8,17 @@ import ( ) type ElasticsearchRequest struct { - client client.Client - cluster *elasticsearch.Elasticsearch + client client.Client + cluster *elasticsearch.Elasticsearch + FnCurlEsService func(clusterName, namespace string, payload *esCurlStruct, client client.Client) } func Reconcile(requestCluster *elasticsearch.Elasticsearch, requestClient client.Client) error { elasticsearchRequest := ElasticsearchRequest{ - client: requestClient, - cluster: requestCluster, + client: requestClient, + cluster: requestCluster, + FnCurlEsService: curlESService, } // Ensure existence of servicesaccount @@ -54,5 +56,10 @@ func Reconcile(requestCluster *elasticsearch.Elasticsearch, requestClient client return fmt.Errorf("Failed to reconcile PrometheusRules for Elasticsearch cluster: %v", err) } + // Ensure index management is in place + if err := elasticsearchRequest.CreateOrUpdateIndexManagement(); err != nil { + return fmt.Errorf("Failed to reconcile IndexMangement for Elasticsearch cluster: %v", err) + } + return nil } diff --git a/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/util.go b/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/util.go index e467e130dc..46679c90d8 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/util.go +++ b/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/util.go @@ -171,7 +171,6 @@ func getDataCount(dpl *api.Elasticsearch) int32 { dataCount = dataCount + node.NodeCount } } - return dataCount } diff --git a/vendor/github.com/openshift/elasticsearch-operator/pkg/logger/logger.go b/vendor/github.com/openshift/elasticsearch-operator/pkg/logger/logger.go index 35e9e94b1f..0972c0d67a 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/pkg/logger/logger.go +++ b/vendor/github.com/openshift/elasticsearch-operator/pkg/logger/logger.go @@ -9,11 +9,31 @@ import ( var log logr.Logger +//Debug logs messages at level 2 +func Debug(message string) { + logrus.Debug(message) +} + //Debugf logs messages at level 2 func Debugf(format string, objects ...interface{}) { logrus.Debugf(format, objects...) } +//Warnf logs messages at level 2 +func Warnf(format string, objects ...interface{}) { + logrus.Warnf(format, objects...) +} + +//Errorf logs messages at level error +func Errorf(format string, objects ...interface{}) { + logrus.Errorf(format, objects...) +} + +//Infof logs messages at level info +func Infof(format string, objects ...interface{}) { + logrus.Infof(format, objects...) +} + //IsDebugEnabled returns true if loglevel is 2 func IsDebugEnabled() bool { return logrus.GetLevel() == logrus.DebugLevel diff --git a/vendor/github.com/openshift/elasticsearch-operator/pkg/types/elasticsearch/types.go b/vendor/github.com/openshift/elasticsearch-operator/pkg/types/elasticsearch/types.go new file mode 100644 index 0000000000..f0744c7483 --- /dev/null +++ b/vendor/github.com/openshift/elasticsearch-operator/pkg/types/elasticsearch/types.go @@ -0,0 +1,62 @@ +package elasticsearch + +func NewIndexTemplate(pattern string, aliases []string, shards, replicas int32) *IndexTemplate { + template := IndexTemplate{ + Template: pattern, + Settings: IndexSettings{ + NumberOfShards: shards, + NumberOfReplicas: replicas, + }, + Aliases: map[string]IndexAlias{}, + } + for _, alias := range aliases { + template.Aliases[alias] = IndexAlias{} + } + return &template +} + +func NewIndex(name string, shards, replicas int32) *Index { + index := Index{ + Name: name, + Settings: IndexSettings{ + NumberOfShards: shards, + NumberOfReplicas: replicas, + }, + Aliases: map[string]IndexAlias{}, + } + return &index +} + +func (index *Index) AddAlias(name string, isWriteIndex bool) *Index { + alias := IndexAlias{} + if isWriteIndex { + alias.IsWriteIndex = true + } + index.Aliases[name] = alias + return index +} + +type Index struct { + //Name intentionally not serialized + Name string `json:"-"` + Settings IndexSettings `json:"settings,omitempty"` + Aliases map[string]IndexAlias `json:"aliases,omitempty"` +} + +type IndexTemplate struct { + Template string `json:"template,omitempty"` + Settings IndexSettings `json:"settings,omitempty"` + Aliases map[string]IndexAlias `json:"aliases,omitempty"` +} + +type Aliases struct { +} + +type IndexAlias struct { + IsWriteIndex bool `json:"is_write_index,omitempty"` +} + +type IndexSettings struct { + NumberOfShards int32 `json:"number_of_shards,omitempty"` + NumberOfReplicas int32 `json:"number_of_replicas,omitempty"` +} diff --git a/vendor/github.com/openshift/elasticsearch-operator/pkg/utils/utils.go b/vendor/github.com/openshift/elasticsearch-operator/pkg/utils/utils.go index 9403d5dc03..3bd6162321 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/pkg/utils/utils.go +++ b/vendor/github.com/openshift/elasticsearch-operator/pkg/utils/utils.go @@ -3,10 +3,28 @@ package utils import ( "crypto/rand" "encoding/base64" + "encoding/json" "fmt" "os" ) +func GetInt64(value int64) *int64 { + i := value + return &i +} +func GetInt32(value int32) *int32 { + i := value + return &i +} + +func ToJson(obj interface{}) (string, error) { + bytes, err := json.Marshal(obj) + if err != nil { + return "", err + } + return string(bytes), nil +} + func LookupEnvWithDefault(envName, defaultValue string) string { if value, ok := os.LookupEnv(envName); ok { return value diff --git a/vendor/github.com/openshift/elasticsearch-operator/test/helpers/elasticsearch.go b/vendor/github.com/openshift/elasticsearch-operator/test/helpers/elasticsearch.go new file mode 100644 index 0000000000..1e2ed08f8a --- /dev/null +++ b/vendor/github.com/openshift/elasticsearch-operator/test/helpers/elasticsearch.go @@ -0,0 +1,44 @@ +package helpers + +import ( + . "github.com/onsi/ginkgo" + + "encoding/json" + "fmt" +) + +func NewFakeElasticsearchChatter(responses map[string]FakeElasticsearchResponse) *FakeElasticsearchChatter { + return &FakeElasticsearchChatter{ + Requests: map[string]string{}, + Responses: responses, + } +} + +type FakeElasticsearchChatter struct { + Requests map[string]string + Responses map[string]FakeElasticsearchResponse +} + +type FakeElasticsearchResponse struct { + Error error + StatusCode int + Body string +} + +func (chat *FakeElasticsearchChatter) GetRequest(key string) (string, bool) { + request, found := chat.Requests[key] + return request, found +} + +func (chat *FakeElasticsearchChatter) GetResponse(key string) (FakeElasticsearchResponse, bool) { + response, found := chat.Responses[key] + return response, found +} + +func (response *FakeElasticsearchResponse) BodyAsResponseBody() map[string]interface{} { + body := &map[string]interface{}{} + if err := json.Unmarshal([]byte(response.Body), body); err != nil { + Fail(fmt.Sprintf("Unable to convert to response body %q: %v", response.Body, err)) + } + return *body +} diff --git a/vendor/github.com/openshift/elasticsearch-operator/test/helpers/json.go b/vendor/github.com/openshift/elasticsearch-operator/test/helpers/json.go new file mode 100644 index 0000000000..24d9ad1c06 --- /dev/null +++ b/vendor/github.com/openshift/elasticsearch-operator/test/helpers/json.go @@ -0,0 +1,41 @@ +package helpers + +import ( + "encoding/json" + "fmt" + "strings" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func NormalizeJson(doc string) string { + doc = strings.TrimSpace(doc) + data := &map[string]interface{}{} + if err := json.Unmarshal([]byte(doc), data); err != nil { + Fail(fmt.Sprintf("Unable to normalize document '%s': %v", doc, err)) + } + response, err := json.MarshalIndent(data, "", "\t") + if err != nil { + Fail(fmt.Sprintf("Unable to normalize document '%s': %v", doc, err)) + } + return string(response) +} + +type JsonExpectation struct { + actual string +} + +func ExpectJson(doc string) *JsonExpectation { + return &JsonExpectation{actual: doc} +} + +func (exp *JsonExpectation) ToEqual(doc string) { + actual := NormalizeJson(exp.actual) + expected := NormalizeJson(doc) + if actual != expected { + fmt.Printf("Actual>:\n%s<\n", actual) + fmt.Printf("Expected>:\n%s\n<", expected) + Expect(actual).To(Equal(expected)) + } +} diff --git a/vendor/github.com/openshift/elasticsearch-operator/test/utils/utils.go b/vendor/github.com/openshift/elasticsearch-operator/test/utils/utils.go index 65c48391e2..2d10542b93 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/test/utils/utils.go +++ b/vendor/github.com/openshift/elasticsearch-operator/test/utils/utils.go @@ -9,7 +9,7 @@ import ( "time" "github.com/sirupsen/logrus" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" @@ -19,6 +19,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" goctx "context" + api "github.com/openshift/elasticsearch-operator/pkg/apis/logging/v1" framework "github.com/operator-framework/operator-sdk/pkg/test" apierrors "k8s.io/apimachinery/pkg/api/errors" From ca039ca5b5d90387134773d9c66edada3c8d2850 Mon Sep 17 00:00:00 2001 From: Vimal Kumar Date: Tue, 21 Jan 2020 18:55:40 +0530 Subject: [PATCH 02/21] Update alm-exmpla to include retentionPolicy Updated 4.4 cluster service version alm-exmple to incude retentionPolicy --- .../4.4/cluster-logging.v4.4.0.clusterserviceversion.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/manifests/4.4/cluster-logging.v4.4.0.clusterserviceversion.yaml b/manifests/4.4/cluster-logging.v4.4.0.clusterserviceversion.yaml index 28efdb5096..46a622cc1f 100644 --- a/manifests/4.4/cluster-logging.v4.4.0.clusterserviceversion.yaml +++ b/manifests/4.4/cluster-logging.v4.4.0.clusterserviceversion.yaml @@ -37,6 +37,11 @@ metadata: "storageClassName": "gp2", "size": "200G" } + }, + "retentionPolicy":{ + "logs.app":{ + "maxAge":"7d" + } } }, "visualization": { From dce28ca1cb83aa734a5b374777ee49d0820e07c6 Mon Sep 17 00:00:00 2001 From: Jeff Cantrill Date: Wed, 12 Feb 2020 09:57:10 -0500 Subject: [PATCH 03/21] Bug 1799024: Support legacy syslog forwarding --- files/fluentd/run.sh | 24 -- pkg/generators/forwarding/factory.go | 4 +- .../forwarding/fluentd/fluent_conf_test.go | 32 ++- .../forwarding/fluentd/generators.go | 9 +- .../forwarding/fluentd/output_conf_es_test.go | 2 +- .../fluentd/output_conf_forward_test.go | 2 +- .../forwarding/fluentd/source_test.go | 2 +- .../forwarding/fluentd/templates.go | 15 ++ pkg/k8shandler/certificates.go | 4 + pkg/k8shandler/fluentd.go | 20 ++ pkg/k8shandler/forwarding.go | 2 +- test/e2e/logforwarding/syslog/deleteme.go | 3 + .../syslog/syslog_cert_generation.sh | 189 +++++++++++++ .../sysloglegacy/forward_to_syslog_test.go | 90 +++++++ .../sysloglegacy/logforwarding_suite_test.go | 13 + test/helpers/syslog.go | 250 ++++++++++++++++++ 16 files changed, 628 insertions(+), 33 deletions(-) create mode 100644 test/e2e/logforwarding/syslog/deleteme.go create mode 100644 test/e2e/logforwarding/syslog/syslog_cert_generation.sh create mode 100644 test/e2e/logforwarding/sysloglegacy/forward_to_syslog_test.go create mode 100644 test/e2e/logforwarding/sysloglegacy/logforwarding_suite_test.go create mode 100644 test/helpers/syslog.go diff --git a/files/fluentd/run.sh b/files/fluentd/run.sh index 1746894362..952988b45c 100644 --- a/files/fluentd/run.sh +++ b/files/fluentd/run.sh @@ -145,30 +145,6 @@ if [ -d /var/lib/docker/containers ] ; then fi fi -if [[ "${USE_REMOTE_SYSLOG:-}" = "true" ]] ; then - # The symlink is a workaround for https://github.com/openshift/origin-aggregated-logging/issues/604 - found= - for file in /usr/share/gems/gems/fluent-plugin-remote-syslog-*/lib/fluentd/plugin/*.rb ; do - bname=$(basename $file) - if [ ! -e "/etc/fluent/plugin/$bname" -a -f "$file" ] ; then - ln -s $file /etc/fluent/plugin/ - found=true - fi - done - if [ -z "${found:-}" ] ; then - # not found in rpm location - look in alternate location - for file in /opt/app-root/src/gems/fluent-plugin-remote-syslog*/lib/fluentd/plugin/*.rb ; do - bname=$(basename $file) - if [ ! -e "/etc/fluent/plugin/$bname" -a -f "$file" ] ; then - ln -s $file /etc/fluent/plugin/ - fi - done - fi - if [[ $REMOTE_SYSLOG_HOST ]] ; then - ruby generate_syslog_config.rb - fi -fi - if [ "${AUDIT_CONTAINER_ENGINE:-}" = "true" ] ; then cp -f $CFG_DIR/input-pre-audit-log.conf $CFG_DIR/openshift cp -f $CFG_DIR/filter-pre-a-audit-exclude.conf $CFG_DIR/openshift diff --git a/pkg/generators/forwarding/factory.go b/pkg/generators/forwarding/factory.go index bccabdca1e..816e2e6da3 100644 --- a/pkg/generators/forwarding/factory.go +++ b/pkg/generators/forwarding/factory.go @@ -9,10 +9,10 @@ import ( ) //NewConfigGenerator create a config generator for a given collector type -func NewConfigGenerator(collector logging.LogCollectionType, includeLegacyForwardConfig bool) (ConfigGenerator, error) { +func NewConfigGenerator(collector logging.LogCollectionType, includeLegacyForwardConfig, includeLegacySyslogConfig bool) (ConfigGenerator, error) { switch collector { case logging.LogCollectionTypeFluentd: - return fluentd.NewConfigGenerator(includeLegacyForwardConfig) + return fluentd.NewConfigGenerator(includeLegacyForwardConfig, includeLegacySyslogConfig) } return nil, fmt.Errorf("Config generation not supported for collects of type %s", collector) } diff --git a/pkg/generators/forwarding/fluentd/fluent_conf_test.go b/pkg/generators/forwarding/fluentd/fluent_conf_test.go index 284bb7246d..1a8ec190bc 100644 --- a/pkg/generators/forwarding/fluentd/fluent_conf_test.go +++ b/pkg/generators/forwarding/fluentd/fluent_conf_test.go @@ -15,7 +15,7 @@ var _ = Describe("Generating fluentd config", func() { ) BeforeEach(func() { var err error - generator, err = NewConfigGenerator(true) + generator, err = NewConfigGenerator(true, true) Expect(err).To(BeNil()) Expect(generator).ToNot(BeNil()) forwarding = &logging.ForwardingSpec{ @@ -379,6 +379,10 @@ var _ = Describe("Generating fluentd config", func() { @type relabel @label @_LEGACY_SECUREFORWARD + + @type relabel + @label @_LEGACY_SYSLOG + @@ -427,6 +431,13 @@ var _ = Describe("Generating fluentd config", func() { @include /etc/fluent/configs.d/secure-forward/secure-forward.conf + `) }) @@ -793,6 +804,10 @@ var _ = Describe("Generating fluentd config", func() { @type relabel @label @_LEGACY_SECUREFORWARD + + @type relabel + @label @_LEGACY_SYSLOG + @@ -1211,6 +1234,13 @@ var _ = Describe("Generating fluentd config", func() { @include /etc/fluent/configs.d/secure-forward/secure-forward.conf + `) }) diff --git a/pkg/generators/forwarding/fluentd/generators.go b/pkg/generators/forwarding/fluentd/generators.go index ef7eeee80d..e79770b16b 100644 --- a/pkg/generators/forwarding/fluentd/generators.go +++ b/pkg/generators/forwarding/fluentd/generators.go @@ -17,10 +17,11 @@ import ( type ConfigGenerator struct { *generators.Generator includeLegacyForwardConfig bool + includeLegacySyslogConfig bool } //NewConfigGenerator creates an instance of FluentdConfigGenerator -func NewConfigGenerator(includeLegacyForwardConfig bool) (*ConfigGenerator, error) { +func NewConfigGenerator(includeLegacyForwardConfig, includeLegacySyslogConfig bool) (*ConfigGenerator, error) { engine, err := generators.New("OutputLabelConf", &template.FuncMap{ "labelName": labelName, @@ -30,7 +31,7 @@ func NewConfigGenerator(includeLegacyForwardConfig bool) (*ConfigGenerator, erro if err != nil { return nil, err } - return &ConfigGenerator{engine, includeLegacyForwardConfig}, nil + return &ConfigGenerator{engine, includeLegacyForwardConfig, includeLegacySyslogConfig}, nil } //Generate the fluent.conf file using the forwarding information @@ -61,6 +62,7 @@ func (engine *ConfigGenerator) Generate(forwarding *logforward.ForwardingSpec) ( data := struct { IncludeLegacySecureForward bool + IncludeLegacySyslog bool CollectInfraLogs bool CollectAppLogs bool CollectAuditLogs bool @@ -70,6 +72,7 @@ func (engine *ConfigGenerator) Generate(forwarding *logforward.ForwardingSpec) ( OutputLabels []string }{ engine.includeLegacyForwardConfig, + engine.includeLegacySyslogConfig, logTypes.Has(string(logforward.LogSourceTypeInfra)), logTypes.Has(string(logforward.LogSourceTypeApp)), logTypes.Has(string(logforward.LogSourceTypeAudit)), @@ -137,10 +140,12 @@ func (engine *ConfigGenerator) generateSourceToPipelineLabels(sourcesToPipelines for sourceType, pipelineNames := range sourcesToPipelines { data := struct { IncludeLegacySecureForward bool + IncludeLegacySyslog bool Source string PipelineNames []string }{ engine.includeLegacyForwardConfig, + engine.includeLegacySyslogConfig, string(sourceType), pipelineNames, } diff --git a/pkg/generators/forwarding/fluentd/output_conf_es_test.go b/pkg/generators/forwarding/fluentd/output_conf_es_test.go index df389cc042..ddd227fa97 100644 --- a/pkg/generators/forwarding/fluentd/output_conf_es_test.go +++ b/pkg/generators/forwarding/fluentd/output_conf_es_test.go @@ -16,7 +16,7 @@ var _ = Describe("Generating fluentd config blocks", func() { ) BeforeEach(func() { var err error - generator, err = NewConfigGenerator(false) + generator, err = NewConfigGenerator(false, false) Expect(err).To(BeNil()) }) diff --git a/pkg/generators/forwarding/fluentd/output_conf_forward_test.go b/pkg/generators/forwarding/fluentd/output_conf_forward_test.go index 29130d268f..afd9c2503e 100644 --- a/pkg/generators/forwarding/fluentd/output_conf_forward_test.go +++ b/pkg/generators/forwarding/fluentd/output_conf_forward_test.go @@ -16,7 +16,7 @@ var _ = Describe("Generating fluentd secure forward output store config blocks", generator *ConfigGenerator ) BeforeEach(func() { - generator, err = NewConfigGenerator(false) + generator, err = NewConfigGenerator(false, false) Expect(err).To(BeNil()) }) diff --git a/pkg/generators/forwarding/fluentd/source_test.go b/pkg/generators/forwarding/fluentd/source_test.go index 67242ca4c0..29f646acc6 100644 --- a/pkg/generators/forwarding/fluentd/source_test.go +++ b/pkg/generators/forwarding/fluentd/source_test.go @@ -18,7 +18,7 @@ var _ = Describe("generating source", func() { ) BeforeEach(func() { - generator, err = NewConfigGenerator(false) + generator, err = NewConfigGenerator(false, false) Expect(err).To(BeNil()) }) diff --git a/pkg/generators/forwarding/fluentd/templates.go b/pkg/generators/forwarding/fluentd/templates.go index 6f67ea6005..c3b3dd87ac 100644 --- a/pkg/generators/forwarding/fluentd/templates.go +++ b/pkg/generators/forwarding/fluentd/templates.go @@ -304,6 +304,15 @@ const fluentConfTemplate = `{{- define "fluentConf" }} {{- end}} +{{ if .IncludeLegacySyslog }} + +{{- end}} {{- end}}` @@ -425,6 +434,12 @@ const sourceToPipelineCopyTemplate = `{{- define "sourceToPipelineCopyTemplate" @type relabel @label @_LEGACY_SECUREFORWARD +{{- end }} +{{ if .IncludeLegacySyslog }} + + @type relabel + @label @_LEGACY_SYSLOG + {{- end }} diff --git a/pkg/k8shandler/certificates.go b/pkg/k8shandler/certificates.go index e9e1c91fe1..7d3f6842ed 100644 --- a/pkg/k8shandler/certificates.go +++ b/pkg/k8shandler/certificates.go @@ -143,6 +143,10 @@ func (clusterRequest *ClusterLoggingRequest) CreateOrUpdateCertificates() (err e func GenerateCertificates(namespace, rootDir, logStoreName, workDir string) (err error) { script := fmt.Sprintf("%s/scripts/cert_generation.sh", rootDir) + return RunCertificatesScript(namespace, logStoreName, workDir, script) +} + +func RunCertificatesScript(namespace, logStoreName, workDir, script string) (err error) { logger.Debugf("Running script '%s %s %s %s'", script, workDir, namespace, logStoreName) cmd := exec.Command(script, workDir, namespace, logStoreName) result, err := cmd.Output() diff --git a/pkg/k8shandler/fluentd.go b/pkg/k8shandler/fluentd.go index 5c75e8c5df..b03061ab5b 100644 --- a/pkg/k8shandler/fluentd.go +++ b/pkg/k8shandler/fluentd.go @@ -27,6 +27,7 @@ import ( const ( fluentdAlertsFile = "fluentd/fluentd_prometheus_alerts.yaml" fluentdName = "fluentd" + syslogName = "syslog" ) func (clusterRequest *ClusterLoggingRequest) removeFluentd() (err error) { @@ -176,6 +177,21 @@ func (clusterRequest *ClusterLoggingRequest) includeLegacyForwardConfig() bool { return found } +//includeLegacySyslogConfig to address Bug 1799024. To be removed for LogForwarding GA +func (clusterRequest *ClusterLoggingRequest) includeLegacySyslogConfig() bool { + config := &v1.ConfigMap{ + Data: map[string]string{}, + } + if err := clusterRequest.Get(syslogName, config); err != nil { + if errors.IsNotFound(err) { + return false + } + logger.Warnf("There was a non-critical error trying to fetch the configmap: %v", err) + } + _, found := config.Data["syslog.conf"] + return found +} + func (clusterRequest *ClusterLoggingRequest) createOrUpdateFluentdConfigMap(fluentConf string) error { logrus.Debug("createOrUpdateFluentdConfigMap...") fluentdConfigMap := NewConfigMap( @@ -285,6 +301,8 @@ func newFluentdPodSpec(cluster *logging.ClusterLogging, elasticsearchAppName str {Name: "config", ReadOnly: true, MountPath: "/etc/fluent/configs.d/user"}, {Name: "secureforwardconfig", ReadOnly: true, MountPath: "/etc/fluent/configs.d/secure-forward"}, {Name: "secureforwardcerts", ReadOnly: true, MountPath: "/etc/ocp-forward"}, + {Name: "syslogconfig", ReadOnly: true, MountPath: "/etc/fluent/configs.d/syslog"}, + {Name: "syslogcerts", ReadOnly: true, MountPath: "/etc/ocp-syslog"}, {Name: "entrypoint", ReadOnly: true, MountPath: "/opt/app-root/src/run.sh", SubPath: "run.sh"}, {Name: "certs", ReadOnly: true, MountPath: "/etc/fluent/keys"}, {Name: "localtime", ReadOnly: true, MountPath: "/etc/localtime"}, @@ -342,6 +360,8 @@ func newFluentdPodSpec(cluster *logging.ClusterLogging, elasticsearchAppName str {Name: "config", VolumeSource: v1.VolumeSource{ConfigMap: &v1.ConfigMapVolumeSource{LocalObjectReference: v1.LocalObjectReference{Name: "fluentd"}}}}, {Name: "secureforwardconfig", VolumeSource: v1.VolumeSource{ConfigMap: &v1.ConfigMapVolumeSource{LocalObjectReference: v1.LocalObjectReference{Name: "secure-forward"}, Optional: utils.GetBool(true)}}}, {Name: "secureforwardcerts", VolumeSource: v1.VolumeSource{Secret: &v1.SecretVolumeSource{SecretName: "secure-forward", Optional: utils.GetBool(true)}}}, + {Name: "syslogconfig", VolumeSource: v1.VolumeSource{ConfigMap: &v1.ConfigMapVolumeSource{LocalObjectReference: v1.LocalObjectReference{Name: syslogName}, Optional: utils.GetBool(true)}}}, + {Name: "syslogcerts", VolumeSource: v1.VolumeSource{Secret: &v1.SecretVolumeSource{SecretName: syslogName, Optional: utils.GetBool(true)}}}, {Name: "entrypoint", VolumeSource: v1.VolumeSource{ConfigMap: &v1.ConfigMapVolumeSource{LocalObjectReference: v1.LocalObjectReference{Name: "fluentd"}}}}, {Name: "certs", VolumeSource: v1.VolumeSource{Secret: &v1.SecretVolumeSource{SecretName: "fluentd"}}}, {Name: "localtime", VolumeSource: v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: "/etc/localtime"}}}, diff --git a/pkg/k8shandler/forwarding.go b/pkg/k8shandler/forwarding.go index 89261b805d..9164ebe437 100644 --- a/pkg/k8shandler/forwarding.go +++ b/pkg/k8shandler/forwarding.go @@ -47,7 +47,7 @@ func (clusterRequest *ClusterLoggingRequest) generateCollectorConfig() (config s } clusterRequest.ForwardingSpec = clusterRequest.normalizeLogForwarding(clusterRequest.cluster.Namespace, clusterRequest.cluster) - generator, err := forwarding.NewConfigGenerator(clusterRequest.cluster.Spec.Collection.Logs.Type, clusterRequest.includeLegacyForwardConfig()) + generator, err := forwarding.NewConfigGenerator(clusterRequest.cluster.Spec.Collection.Logs.Type, clusterRequest.includeLegacyForwardConfig(), clusterRequest.includeLegacySyslogConfig()) return generator.Generate(&clusterRequest.ForwardingSpec) } diff --git a/test/e2e/logforwarding/syslog/deleteme.go b/test/e2e/logforwarding/syslog/deleteme.go new file mode 100644 index 0000000000..337a6fc2fa --- /dev/null +++ b/test/e2e/logforwarding/syslog/deleteme.go @@ -0,0 +1,3 @@ +package syslog + +// placeholder to make compiler happy diff --git a/test/e2e/logforwarding/syslog/syslog_cert_generation.sh b/test/e2e/logforwarding/syslog/syslog_cert_generation.sh new file mode 100644 index 0000000000..b44cf9b96f --- /dev/null +++ b/test/e2e/logforwarding/syslog/syslog_cert_generation.sh @@ -0,0 +1,189 @@ +#!/bin/bash + +set -e + +WORKING_DIR=$1 +NAMESPACE=$2 +LOG_STORE=$3 + +function generate_signing_ca() { + openssl req -x509 \ + -new \ + -newkey rsa:4096 \ + -keyout ${WORKING_DIR}/ca-syslog.key \ + -nodes \ + -days 1825 \ + -out ${WORKING_DIR}/ca-syslog.crt \ + -subj "/CN=openshift-cluster-logging-signer" +} + +function init_cert_files() { + + if [ ! -f ${WORKING_DIR}/ca.db ]; then + touch ${WORKING_DIR}/ca.db + fi + + if [ ! -f ${WORKING_DIR}/ca.serial.txt ]; then + echo 00 > ${WORKING_DIR}/ca.serial.txt + fi +} + +function generate_cert_config() { + local subject=$1 + cat < "${WORKING_DIR}/syslog-server.conf" +[ req ] +default_bits = 4096 +prompt = no +encrypt_key = yes +default_md = sha512 +distinguished_name = dn +[ dn ] +CN = ${subject} +OU = OpenShift +O = Logging +EOF +} + +function generate_request() { + openssl req -new \ + -out ${WORKING_DIR}/syslog-server.csr \ + -newkey rsa:4096 \ + -keyout ${WORKING_DIR}/syslog-server.key \ + -config ${WORKING_DIR}/syslog-server.conf \ + -days 712 \ + -nodes +} + +function sign_cert() { + openssl ca \ + -in ${WORKING_DIR}/syslog-server.csr \ + -notext \ + -out ${WORKING_DIR}/syslog-server.crt \ + -config ${WORKING_DIR}/signing.conf \ + -extensions v3_req \ + -batch \ + -extensions server_ext +} + +function generate_certs() { + local subject=$1 + + generate_cert_config $subject + generate_request + sign_cert +} + +function create_signing_conf() { + cat < "${WORKING_DIR}/signing.conf" +# Simple Signing CA + +# The [default] section contains global constants that can be referred to from +# the entire configuration file. It may also hold settings pertaining to more +# than one openssl command. + +[ default ] +dir = ${WORKING_DIR} # Top dir + +# The next part of the configuration file is used by the openssl req command. +# It defines the CA's key pair, its DN, and the desired extensions for the CA +# certificate. + +[ req ] +default_bits = 4096 # RSA key size +encrypt_key = yes # Protect private key +default_md = sha512 # MD to use +utf8 = yes # Input is UTF-8 +string_mask = utf8only # Emit UTF-8 strings +prompt = no # Don't prompt for DN +distinguished_name = ca_dn # DN section +req_extensions = ca_reqext # Desired extensions + +[ ca_dn ] +0.domainComponent = "io" +1.domainComponent = "openshift" +organizationName = "OpenShift Origin" +organizationalUnitName = "Logging Signing CA" +commonName = "Logging Signing CA" + +[ ca_reqext ] +keyUsage = critical,keyCertSign,cRLSign +basicConstraints = critical,CA:true,pathlen:0 +subjectKeyIdentifier = hash + +# The remainder of the configuration file is used by the openssl ca command. +# The CA section defines the locations of CA assets, as well as the policies +# applying to the CA. + +[ ca ] +default_ca = signing_ca # The default CA section + +[ signing_ca ] +certificate = \$dir/ca-syslog.crt # The CA cert +private_key = \$dir/ca-syslog.key # CA private key +new_certs_dir = \$dir/ # Certificate archive +serial = \$dir/ca.serial.txt # Serial number file +crlnumber = \$dir/ca.crl.srl # CRL number file +database = \$dir/ca.db # Index file +unique_subject = no # Require unique subject +default_days = 730 # How long to certify for +default_md = sha512 # MD to use +policy = any_pol # Default naming policy +email_in_dn = no # Add email to cert DN +preserve = no # Keep passed DN ordering +name_opt = ca_default # Subject DN display options +cert_opt = ca_default # Certificate display options +copy_extensions = copy # Copy extensions from CSR +x509_extensions = client_ext # Default cert extensions +default_crl_days = 7 # How long before next CRL +crl_extensions = crl_ext # CRL extensions + +# Naming policies control which parts of a DN end up in the certificate and +# under what circumstances certification should be denied. + +[ match_pol ] +domainComponent = match # Must match 'simple.org' +organizationName = match # Must match 'Simple Inc' +organizationalUnitName = optional # Included if present +commonName = supplied # Must be present + +[ any_pol ] +domainComponent = optional +countryName = optional +stateOrProvinceName = optional +localityName = optional +organizationName = optional +organizationalUnitName = optional +commonName = optional +emailAddress = optional + +# Certificate extensions define what types of certificates the CA is able to +# create. + +[ client_ext ] +keyUsage = critical,digitalSignature,keyEncipherment +basicConstraints = CA:false +extendedKeyUsage = clientAuth +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid + +[ server_ext ] +keyUsage = critical,digitalSignature,keyEncipherment +basicConstraints = CA:false +extendedKeyUsage = serverAuth,clientAuth +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid + +# CRL extensions exist solely to point to the CA certificate that has issued +# the CRL. + +[ crl_ext ] +authorityKeyIdentifier = keyid +EOF +} + + +generate_signing_ca +init_cert_files +create_signing_conf + +generate_certs ${LOG_STORE}.${NAMESPACE}.svc diff --git a/test/e2e/logforwarding/sysloglegacy/forward_to_syslog_test.go b/test/e2e/logforwarding/sysloglegacy/forward_to_syslog_test.go new file mode 100644 index 0000000000..8c0ca47bef --- /dev/null +++ b/test/e2e/logforwarding/sysloglegacy/forward_to_syslog_test.go @@ -0,0 +1,90 @@ +package sysloglegacy + +import ( + "fmt" + "runtime" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + apps "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + + "github.com/openshift/cluster-logging-operator/pkg/k8shandler" + "github.com/openshift/cluster-logging-operator/pkg/logger" + "github.com/openshift/cluster-logging-operator/test/helpers" +) + +var _ = Describe("LogForwarding", func() { + _, filename, _, _ := runtime.Caller(0) + logger.Infof("Running %s", filename) + var ( + err error + syslogDeployment *apps.Deployment + e2e = helpers.NewE2ETestFramework() + ) + BeforeEach(func() { + if err := e2e.DeployLogGenerator(); err != nil { + logger.Errorf("unable to deploy log generator. E: %s", err.Error()) + } + }) + Describe("when ClusterLogging is configured with 'forwarding' to an external syslog server", func() { + + Context("with the legacy syslog plugin", func() { + + Context("and tcp receiver", func() { + + BeforeEach(func() { + if syslogDeployment, err = e2e.DeploySyslogReceiver(corev1.ProtocolTCP); err != nil { + Fail(fmt.Sprintf("Unable to deploy syslog receiver: %v", err)) + } + fmt.Sprintf("%s.%s.svc:24224", syslogDeployment.ObjectMeta.Name, syslogDeployment.Namespace) + const conf = ` + + @type syslog_buffered + @id syslogid + remote_syslog syslog-receiver.openshift-logging.svc + port 24224 + hostname ${hostname} + facility user + severity debug + + ` + //create configmap syslog/"syslog.conf" + fluentdConfigMap := k8shandler.NewConfigMap( + "syslog", + syslogDeployment.Namespace, + map[string]string{ + "syslog.conf": conf, + }, + ) + if _, err = e2e.KubeClient.Core().ConfigMaps(syslogDeployment.Namespace).Create(fluentdConfigMap); err != nil { + Fail(fmt.Sprintf("Unable to create legacy syslog.conf configmap: %v", err)) + } + + components := []helpers.LogComponentType{helpers.ComponentTypeCollector, helpers.ComponentTypeStore} + cr := helpers.NewClusterLogging(components...) + cr.ObjectMeta.Annotations[k8shandler.ForwardingAnnotation] = "disabled" + if err := e2e.CreateClusterLogging(cr); err != nil { + Fail(fmt.Sprintf("Unable to create an instance of cluster logging: %v", err)) + } + for _, component := range components { + if err := e2e.WaitFor(component); err != nil { + Fail(fmt.Sprintf("Failed waiting for component %s to be ready: %v", component, err)) + } + } + }) + + It("should send logs to the forward.Output logstore", func() { + Expect(e2e.LogStore.HasInfraStructureLogs(helpers.DefaultWaitForLogsTimeout)).To(BeTrue(), "Expected to find stored infrastructure logs") + }) + }) + + }) + + AfterEach(func() { + e2e.Cleanup() + }) + + }) + +}) diff --git a/test/e2e/logforwarding/sysloglegacy/logforwarding_suite_test.go b/test/e2e/logforwarding/sysloglegacy/logforwarding_suite_test.go new file mode 100644 index 0000000000..39d0c418ef --- /dev/null +++ b/test/e2e/logforwarding/sysloglegacy/logforwarding_suite_test.go @@ -0,0 +1,13 @@ +package sysloglegacy + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestLogForwarding(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "LogForwarding Integration E2E Suite - Forward to legacy syslog") +} diff --git a/test/helpers/syslog.go b/test/helpers/syslog.go new file mode 100644 index 0000000000..37f804aadd --- /dev/null +++ b/test/helpers/syslog.go @@ -0,0 +1,250 @@ +package helpers + +import ( + "errors" + "fmt" + "strconv" + "strings" + "time" + + apps "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + + "github.com/openshift/cluster-logging-operator/pkg/k8shandler" + "github.com/openshift/cluster-logging-operator/pkg/logger" + "github.com/openshift/cluster-logging-operator/pkg/utils" +) + +type syslogReceiverLogStore struct { + deployment *apps.Deployment + tc *E2ETestFramework +} + +const ( + syslogReceiverName = "syslog-receiver" + tcpSyslogConf = ` +# Provides TCP syslog reception +# for parameters see http://www.rsyslog.com/doc/imtcp.html +module(load="imtcp") # needs to be done just once +input(type="imtcp" port="24224" ruleset="test") + +#### RULES #### +ruleset(name="test"){ + action(type="omfile" file="/var/log/infra.log") +} + ` + udpSyslogConf = ` +# Provides UDP syslog reception +# for parameters see http://www.rsyslog.com/doc/imudp.html +module(load="imudp") # needs to be done just once +input(type="imudp" port="24224" ruleset="test") + +#### RULES #### +ruleset(name="test"){ + action(type="omfile" file="/var/log/infra.log") +} + ` +) + +func (syslog *syslogReceiverLogStore) hasLogs(file string, timeToWait time.Duration) (bool, error) { + options := metav1.ListOptions{ + LabelSelector: "component=syslog-receiver", + } + pods, err := syslog.tc.KubeClient.CoreV1().Pods(OpenshiftLoggingNS).List(options) + if err != nil { + return false, err + } + if len(pods.Items) == 0 { + return false, errors.New("No pods found for syslog receiver") + } + logger.Debugf("Pod %s", pods.Items[0].Name) + cmd := fmt.Sprintf("ls %s | wc -l", file) + + err = wait.Poll(defaultRetryInterval, timeToWait, func() (done bool, err error) { + output, err := syslog.tc.PodExec(OpenshiftLoggingNS, pods.Items[0].Name, "syslog-receiver", []string{"bash", "-c", cmd}) + if err != nil { + return false, err + } + value, err := strconv.Atoi(strings.TrimSpace(output)) + if err != nil { + logger.Debugf("Error parsing output: %s", output) + return false, nil + } + return value > 0, nil + }) + if err == wait.ErrWaitTimeout { + return false, err + } + return true, err +} + +func (syslog *syslogReceiverLogStore) HasInfraStructureLogs(timeToWait time.Duration) (bool, error) { + return syslog.hasLogs("/var/log/infra.log", timeToWait) +} + +func (syslog *syslogReceiverLogStore) HasApplicationLogs(timeToWait time.Duration) (bool, error) { + return false, fmt.Errorf("Not implemented") +} + +func (syslog *syslogReceiverLogStore) HasAuditLogs(timeToWait time.Duration) (bool, error) { + return false, fmt.Errorf("Not implemented") +} + +func (tc *E2ETestFramework) createSyslogServiceAccount() (serviceAccount *corev1.ServiceAccount, err error) { + serviceAccount = k8shandler.NewServiceAccount("syslog-receiver", OpenshiftLoggingNS) + if serviceAccount, err = tc.KubeClient.Core().ServiceAccounts(OpenshiftLoggingNS).Create(serviceAccount); err != nil { + return nil, err + } + tc.AddCleanup(func() error { + return tc.KubeClient.Core().ServiceAccounts(OpenshiftLoggingNS).Delete(serviceAccount.Name, nil) + }) + return serviceAccount, nil +} + +func (tc *E2ETestFramework) createSyslogRbac(name string) (err error) { + saRole := k8shandler.NewRole( + name, + OpenshiftLoggingNS, + k8shandler.NewPolicyRules( + k8shandler.NewPolicyRule( + []string{"security.openshift.io"}, + []string{"securitycontextconstraints"}, + []string{"privileged"}, + []string{"use"}, + ), + ), + ) + if _, err = tc.KubeClient.Rbac().Roles(OpenshiftLoggingNS).Create(saRole); err != nil { + return err + } + tc.AddCleanup(func() error { + return tc.KubeClient.Rbac().Roles(OpenshiftLoggingNS).Delete(name, nil) + }) + subject := k8shandler.NewSubject( + "ServiceAccount", + name, + ) + subject.APIGroup = "" + roleBinding := k8shandler.NewRoleBinding( + name, + OpenshiftLoggingNS, + saRole.Name, + k8shandler.NewSubjects( + subject, + ), + ) + if _, err = tc.KubeClient.Rbac().RoleBindings(OpenshiftLoggingNS).Create(roleBinding); err != nil { + return err + } + tc.AddCleanup(func() error { + return tc.KubeClient.Rbac().RoleBindings(OpenshiftLoggingNS).Delete(name, nil) + }) + return nil +} + +func (tc *E2ETestFramework) DeploySyslogReceiver(protocol corev1.Protocol) (deployment *apps.Deployment, err error) { + logStore := &syslogReceiverLogStore{ + tc: tc, + } + serviceAccount, err := tc.createSyslogServiceAccount() + if err != nil { + return nil, err + } + if err := tc.createSyslogRbac(syslogReceiverName); err != nil { + return nil, err + } + container := corev1.Container{ + Name: syslogReceiverName, + Image: "quay.io/openshift/origin-logging-rsyslog:latest", + ImagePullPolicy: corev1.PullAlways, + Args: []string{"rsyslogd", "-n", "-f", "/rsyslog/etc/rsyslog.conf"}, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "config", + ReadOnly: true, + MountPath: "/rsyslog/etc", + }, + }, + SecurityContext: &corev1.SecurityContext{ + Privileged: utils.GetBool(true), + }, + } + podSpec := corev1.PodSpec{ + Containers: []corev1.Container{container}, + Volumes: []corev1.Volume{ + { + Name: "config", VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: container.Name, + }, + }, + }, + }, + }, + ServiceAccountName: serviceAccount.Name, + } + + var rsyslogConf string + switch { + case protocol == corev1.ProtocolUDP: + rsyslogConf = udpSyslogConf + + default: + rsyslogConf = tcpSyslogConf + } + + config := k8shandler.NewConfigMap(container.Name, OpenshiftLoggingNS, map[string]string{ + "rsyslog.conf": rsyslogConf, + }) + config, err = tc.KubeClient.Core().ConfigMaps(OpenshiftLoggingNS).Create(config) + if err != nil { + return nil, err + } + tc.AddCleanup(func() error { + return tc.KubeClient.Core().ConfigMaps(OpenshiftLoggingNS).Delete(config.Name, nil) + }) + + syslogDeployment := k8shandler.NewDeployment( + container.Name, + OpenshiftLoggingNS, + container.Name, + serviceAccount.Name, + podSpec, + ) + + syslogDeployment, err = tc.KubeClient.Apps().Deployments(OpenshiftLoggingNS).Create(syslogDeployment) + if err != nil { + return nil, err + } + service := k8shandler.NewService( + serviceAccount.Name, + OpenshiftLoggingNS, + serviceAccount.Name, + []corev1.ServicePort{ + { + Protocol: protocol, + Port: 24224, + }, + }, + ) + tc.AddCleanup(func() error { + var zerograce int64 + deleteopts := metav1.DeleteOptions{ + GracePeriodSeconds: &zerograce, + } + return tc.KubeClient.AppsV1().Deployments(OpenshiftLoggingNS).Delete(syslogDeployment.Name, &deleteopts) + }) + service, err = tc.KubeClient.Core().Services(OpenshiftLoggingNS).Create(service) + if err != nil { + return nil, err + } + tc.AddCleanup(func() error { + return tc.KubeClient.Core().Services(OpenshiftLoggingNS).Delete(service.Name, nil) + }) + logStore.deployment = syslogDeployment + tc.LogStore = logStore + return syslogDeployment, tc.waitForDeployment(OpenshiftLoggingNS, syslogDeployment.Name, defaultRetryInterval, defaultTimeout) +} From 9a353f9cdb5a7ab15c5f542a9e20e9230c22df36 Mon Sep 17 00:00:00 2001 From: Jeff Cantrill Date: Fri, 14 Feb 2020 15:36:28 -0500 Subject: [PATCH 04/21] Bug 1789076: Evaluate for ipv6 address --- files/fluentd/run.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/fluentd/run.sh b/files/fluentd/run.sh index 1746894362..379cb72085 100644 --- a/files/fluentd/run.sh +++ b/files/fluentd/run.sh @@ -40,7 +40,7 @@ if [ -z "${JOURNAL_SOURCE:-}" ] ; then fi IPADDR4=${NODE_IPV4:-$( /usr/sbin/ip -4 addr show dev eth0 | grep inet | sed -e "s/[ \t]*inet \([0-9.]*\).*/\1/" )} -IPADDR6="" # So as to omit "ipaddr6" field from logs. +IPADDR6=${NODE_IPV6:-$(/usr/sbin/ip -6 addr show dev eth0 | grep inet | sed -e "s/[ \t]*inet6 \([a-z0-9::]*\).*/\1/" )} export IPADDR4 IPADDR6 From ea0132a095bf31c3c7f4198e8a2c27e1d34df88b Mon Sep 17 00:00:00 2001 From: Jeff Cantrill Date: Tue, 18 Feb 2020 09:57:12 -0500 Subject: [PATCH 05/21] Bug 1804166: Update minKubeVersion for 4.4 --- manifests/4.4/cluster-logging.v4.4.0.clusterserviceversion.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/manifests/4.4/cluster-logging.v4.4.0.clusterserviceversion.yaml b/manifests/4.4/cluster-logging.v4.4.0.clusterserviceversion.yaml index f0fc22b70b..9f6ce7256e 100644 --- a/manifests/4.4/cluster-logging.v4.4.0.clusterserviceversion.yaml +++ b/manifests/4.4/cluster-logging.v4.4.0.clusterserviceversion.yaml @@ -98,7 +98,7 @@ spec: # The version value is substituted by the ART pipeline version: 4.4.0 displayName: Cluster Logging - minKubeVersion: 1.14.0 + minKubeVersion: 1.17.1 description: | # Cluster Logging The Cluster Logging Operator orchestrates and manages the aggregated logging stack as a cluster-wide service. From 26ba72a64a942feaedb4cb11e8f28292ab5234c9 Mon Sep 17 00:00:00 2001 From: Jeff Cantrill Date: Wed, 19 Feb 2020 15:26:23 -0500 Subject: [PATCH 06/21] remove explicit version dependency --- Makefile | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/Makefile b/Makefile index 1c5a99d69f..b3b47ed27c 100644 --- a/Makefile +++ b/Makefile @@ -16,8 +16,7 @@ TARGET=$(TARGET_DIR)/bin/$(APP_NAME) IMAGE_TAG?=quay.io/openshift/origin-$(APP_NAME):latest export IMAGE_TAG MAIN_PKG=cmd/manager/main.go -export OCP_VERSION?=4.4 -IMAGE_CLUSTER_LOGGING_OPERATOR?=registry.svc.ci.openshift.org/origin/$(VERSION):cluster-logging-operator +export OCP_VERSION?=$(shell basename $(shell find manifests/ -maxdepth 1 -not -name manifests -type d)) export CSV_FILE=$(CURPATH)/manifests/$(OCP_VERSION)/cluster-logging.v$(OCP_VERSION).0.clusterserviceversion.yaml export NAMESPACE?=openshift-logging export EO_CSV_FILE=$(CURPATH)/vendor/github.com/openshift/elasticsearch-operator/manifests/$(OCP_VERSION)/elasticsearch-operator.v$(OCP_VERSION).0.clusterserviceversion.yaml @@ -68,10 +67,10 @@ build: fmt @GOPATH=$(BUILD_GOPATH) $(GOBUILD) $(LDFLAGS) -o $(TARGET) $(MAIN_PKG) run: - ELASTICSEARCH_IMAGE=quay.io/openshift/origin-logging-elasticsearch5:latest \ + ELASTICSEARCH_IMAGE=quay.io/openshift/origin-logging-elasticsearch6:latest \ FLUENTD_IMAGE=$(FLUENTD_IMAGE) \ - KIBANA_IMAGE=quay.io/openshift/origin-logging-kibana5:latest \ - CURATOR_IMAGE=quay.io/openshift/origin-logging-curator5:latest \ + KIBANA_IMAGE=quay.io/openshift/origin-logging-kibana6:latest \ + CURATOR_IMAGE=quay.io/openshift/origin-logging-curator6:latest \ OAUTH_PROXY_IMAGE=quay.io/openshift/origin-oauth-proxy:latest \ PROMTAIL_IMAGE=quay.io/openshift/origin-promtail:latest \ OPERATOR_NAME=cluster-logging-operator \ @@ -106,10 +105,10 @@ deploy-image: image hack/deploy-image.sh deploy: deploy-image deploy-elasticsearch-operator - IMAGE_CLUSTER_LOGGING_OPERATOR=$(IMAGE_CLUSTER_LOGGING_OPERATOR) hack/deploy.sh + hack/deploy.sh deploy-no-build: deploy-elasticsearch-operator - IMAGE_CLUSTER_LOGGING_OPERATOR=$(IMAGE_CLUSTER_LOGGING_OPERATOR) hack/deploy.sh + hack/deploy.sh deploy-elasticsearch-operator: hack/deploy-eo.sh From 572371d0599ac41a1c164bada90ecf5048011023 Mon Sep 17 00:00:00 2001 From: Eric Wolinetz Date: Mon, 17 Feb 2020 12:12:00 -0600 Subject: [PATCH 07/21] Updating to require trustedcabundle cm and restrict proxy triggers to be update --- pkg/constants/constants.go | 1 + pkg/controller/add_controllers.go | 9 +- .../proxyconfig/proxyconfig_controller.go | 30 +--- .../trustedcabundle_controller.go | 95 ++++++++++ pkg/k8shandler/collection.go | 49 +++--- pkg/k8shandler/fluentd.go | 98 +++++++---- pkg/k8shandler/reconciler.go | 37 +++- pkg/k8shandler/secret.go | 32 ++++ pkg/k8shandler/trustedcabundle.go | 9 +- pkg/k8shandler/visualization.go | 162 +++++++++++------- 10 files changed, 389 insertions(+), 133 deletions(-) create mode 100644 pkg/controller/trustedcabundle/trustedcabundle_controller.go diff --git a/pkg/constants/constants.go b/pkg/constants/constants.go index a2083dab6a..c75da1d704 100644 --- a/pkg/constants/constants.go +++ b/pkg/constants/constants.go @@ -10,6 +10,7 @@ const ( TrustedCABundleMountFile = "tls-ca-bundle.pem" TrustedCABundleMountDir = "/etc/pki/ca-trust/extracted/pem/" TrustedCABundleHashName = "logging.openshift.io/hash" + SecretHashPrefix = "logging.openshift.io/" FluentdTrustedCAName = "fluentd-trusted-ca-bundle" KibanaTrustedCAName = "kibana-trusted-ca-bundle" // internal elasticsearch FQDN to prevent to connect to the global proxy diff --git a/pkg/controller/add_controllers.go b/pkg/controller/add_controllers.go index 82fbe15f3f..0ab33c9eeb 100644 --- a/pkg/controller/add_controllers.go +++ b/pkg/controller/add_controllers.go @@ -6,9 +6,16 @@ import ( "github.com/openshift/cluster-logging-operator/pkg/controller/forwarding" "github.com/openshift/cluster-logging-operator/pkg/controller/kibanasecret" "github.com/openshift/cluster-logging-operator/pkg/controller/proxyconfig" + "github.com/openshift/cluster-logging-operator/pkg/controller/trustedcabundle" ) func init() { // AddToManagerFuncs is a list of functions to create controllers and add them to a manager. - AddToManagerFuncs = append(AddToManagerFuncs, clusterlogging.Add, forwarding.Add, collector.Add, proxyconfig.Add, kibanasecret.Add) + AddToManagerFuncs = append(AddToManagerFuncs, + clusterlogging.Add, + forwarding.Add, + collector.Add, + proxyconfig.Add, + kibanasecret.Add, + trustedcabundle.Add) } diff --git a/pkg/controller/proxyconfig/proxyconfig_controller.go b/pkg/controller/proxyconfig/proxyconfig_controller.go index 99defccb7a..739944c5d1 100644 --- a/pkg/controller/proxyconfig/proxyconfig_controller.go +++ b/pkg/controller/proxyconfig/proxyconfig_controller.go @@ -7,10 +7,7 @@ import ( configv1 "github.com/openshift/api/config/v1" "github.com/openshift/cluster-logging-operator/pkg/constants" "github.com/openshift/cluster-logging-operator/pkg/k8shandler" - "github.com/openshift/cluster-logging-operator/pkg/utils" - corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" @@ -51,19 +48,16 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error { return err } - // Watch for changes to the additional trust bundle configmap in "openshift-logging". + // Watch for updates only pred := predicate.Funcs{ - UpdateFunc: func(e event.UpdateEvent) bool { return handleConfigMap(e.MetaNew) }, - DeleteFunc: func(e event.DeleteEvent) bool { return handleConfigMap(e.Meta) }, - CreateFunc: func(e event.CreateEvent) bool { return handleConfigMap(e.Meta) }, - GenericFunc: func(e event.GenericEvent) bool { return handleConfigMap(e.Meta) }, - } - if err = c.Watch(&source.Kind{Type: &corev1.ConfigMap{}}, &handler.EnqueueRequestForObject{}, pred); err != nil { - return err + UpdateFunc: func(e event.UpdateEvent) bool { return true }, + DeleteFunc: func(e event.DeleteEvent) bool { return false }, + CreateFunc: func(e event.CreateEvent) bool { return true }, + GenericFunc: func(e event.GenericEvent) bool { return false }, } // Watch for changes to the proxy resource. - if err = c.Watch(&source.Kind{Type: &configv1.Proxy{}}, &handler.EnqueueRequestForObject{}); err != nil { + if err = c.Watch(&source.Kind{Type: &configv1.Proxy{}}, &handler.EnqueueRequestForObject{}, pred); err != nil { return err } @@ -80,14 +74,11 @@ type ReconcileProxyConfig struct { scheme *runtime.Scheme } -// Reconcile reads that state of the cluster for a cluster-scoped named "cluster" as well as -// trusted CA bundle configmap objects for the collector and the visualization resources. -// When the user configured and/or system certs are updated, the change is propagated to the -// configmap objects and this reconciler triggers to restart those pods. +// Reconcile reads that state of the cluster for a cluster-scoped named "cluster" func (r *ReconcileProxyConfig) Reconcile(request reconcile.Request) (reconcile.Result, error) { proxyNamespacedName := types.NamespacedName{Name: constants.ProxyName} proxyConfig := &configv1.Proxy{} - if request.NamespacedName == proxyNamespacedName || utils.ContainsString(constants.ReconcileForGlobalProxyList, request.Name) { + if request.NamespacedName == proxyNamespacedName { if err := r.client.Get(context.TODO(), proxyNamespacedName, proxyConfig); err != nil { if apierrors.IsNotFound(err) { // Request object not found, could have been deleted after reconcile request. @@ -108,8 +99,3 @@ func (r *ReconcileProxyConfig) Reconcile(request reconcile.Request) (reconcile.R return reconcile.Result{}, nil } - -// handleConfigMap returns true if meta namespace is "openshift-logging". -func handleConfigMap(meta metav1.Object) bool { - return meta.GetNamespace() == constants.OpenshiftNS && utils.ContainsString(constants.ReconcileForGlobalProxyList, meta.GetName()) -} diff --git a/pkg/controller/trustedcabundle/trustedcabundle_controller.go b/pkg/controller/trustedcabundle/trustedcabundle_controller.go new file mode 100644 index 0000000000..604baba9d6 --- /dev/null +++ b/pkg/controller/trustedcabundle/trustedcabundle_controller.go @@ -0,0 +1,95 @@ +package trustedcabundle + +import ( + "time" + + configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/cluster-logging-operator/pkg/constants" + "github.com/openshift/cluster-logging-operator/pkg/k8shandler" + "github.com/openshift/cluster-logging-operator/pkg/utils" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +var ( + reconcilePeriod = 30 * time.Second + reconcileResult = reconcile.Result{RequeueAfter: reconcilePeriod} +) + +// Add creates a new ClusterLogging Controller and adds it to the Manager. The Manager will set fields on the Controller +// and Start it when the Manager is Started. +func Add(mgr manager.Manager) error { + return add(mgr, newReconciler(mgr)) +} + +// newReconciler returns a new reconcile.Reconciler +func newReconciler(mgr manager.Manager) reconcile.Reconciler { + if err := configv1.Install(mgr.GetScheme()); err != nil { + return &ReconcileTrustedCABundle{} + } + + return &ReconcileTrustedCABundle{client: mgr.GetClient(), scheme: mgr.GetScheme()} +} + +// add adds a new Controller to mgr with r as the reconcile.Reconciler +func add(mgr manager.Manager, r reconcile.Reconciler) error { + // Create a new controller + c, err := controller.New("trustedcabundle-controller", mgr, controller.Options{Reconciler: r}) + if err != nil { + return err + } + + // Watch for changes to the additional trust bundle configmap in "openshift-logging". + pred := predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { return handleConfigMap(e.MetaNew) }, + DeleteFunc: func(e event.DeleteEvent) bool { return false }, + CreateFunc: func(e event.CreateEvent) bool { return handleConfigMap(e.Meta) }, + GenericFunc: func(e event.GenericEvent) bool { return false }, + } + if err = c.Watch(&source.Kind{Type: &corev1.ConfigMap{}}, &handler.EnqueueRequestForObject{}, pred); err != nil { + return err + } + + return nil +} + +var _ reconcile.Reconciler = &ReconcileTrustedCABundle{} + +// ReconcileProxyConfig reconciles a ClusterLogging object +type ReconcileTrustedCABundle struct { + // This client, initialized using mgr.Client() above, is a split client + // that reads objects from the cache and writes to the apiserver + client client.Client + scheme *runtime.Scheme +} + +// Reconcile reads that state of the trusted CA bundle configmap objects for the +// collector and the visualization resources. +// When the user configured and/or system certs are updated, the pods are triggered to restart. +func (r *ReconcileTrustedCABundle) Reconcile(request reconcile.Request) (reconcile.Result, error) { + + // do one for fluentd and one for kibana separate... + if utils.ContainsString(constants.ReconcileForGlobalProxyList, request.Name) { + + if err := k8shandler.ReconcileForTrustedCABundle(request.Name, r.client); err != nil { + // Failed to reconcile - requeuing. + return reconcileResult, err + } + } + + return reconcile.Result{}, nil +} + +// handleConfigMap returns true if meta namespace is "openshift-logging". +func handleConfigMap(meta metav1.Object) bool { + return meta.GetNamespace() == constants.OpenshiftNS && utils.ContainsString(constants.ReconcileForGlobalProxyList, meta.GetName()) +} diff --git a/pkg/k8shandler/collection.go b/pkg/k8shandler/collection.go index 6ddc1ac748..b1a03a8457 100644 --- a/pkg/k8shandler/collection.go +++ b/pkg/k8shandler/collection.go @@ -88,26 +88,7 @@ func (clusterRequest *ClusterLoggingRequest) CreateOrUpdateCollection(proxyConfi return } - fluentdStatus, err := clusterRequest.getFluentdCollectorStatus() - if err != nil { - return fmt.Errorf("Failed to get status of Fluentd: %v", err) - } - - printUpdateMessage := true - retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { - if !compareFluentdCollectorStatus(fluentdStatus, cluster.Status.Collection.Logs.FluentdStatus) { - if printUpdateMessage { - logrus.Info("Updating status of Fluentd") - printUpdateMessage = false - } - cluster.Status.Collection.Logs.FluentdStatus = fluentdStatus - return clusterRequest.UpdateStatus(cluster) - } - return nil - }) - if retryErr != nil { - return fmt.Errorf("Failed to update Cluster Logging Fluentd status: %v", retryErr) - } + clusterRequest.UpdateFluentdStatus() if collectorServiceAccount != nil { @@ -143,6 +124,34 @@ func (clusterRequest *ClusterLoggingRequest) CreateOrUpdateCollection(proxyConfi return nil } +func (clusterRequest *ClusterLoggingRequest) UpdateFluentdStatus() (err error) { + + cluster := clusterRequest.cluster + + fluentdStatus, err := clusterRequest.getFluentdCollectorStatus() + if err != nil { + return fmt.Errorf("Failed to get status of Fluentd: %v", err) + } + + printUpdateMessage := true + retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { + if !compareFluentdCollectorStatus(fluentdStatus, cluster.Status.Collection.Logs.FluentdStatus) { + if printUpdateMessage { + logrus.Info("Updating status of Fluentd") + printUpdateMessage = false + } + cluster.Status.Collection.Logs.FluentdStatus = fluentdStatus + return clusterRequest.UpdateStatus(cluster) + } + return nil + }) + if retryErr != nil { + return fmt.Errorf("Failed to update Cluster Logging Fluentd status: %v", retryErr) + } + + return nil +} + func compareFluentdCollectorStatus(lhs, rhs logging.FluentdCollectorStatus) bool { if lhs.DaemonSet != rhs.DaemonSet { return false diff --git a/pkg/k8shandler/fluentd.go b/pkg/k8shandler/fluentd.go index b03061ab5b..2e6e38d983 100644 --- a/pkg/k8shandler/fluentd.go +++ b/pkg/k8shandler/fluentd.go @@ -380,7 +380,6 @@ func newFluentdPodSpec(cluster *logging.ClusterLogging, elasticsearchAppName str } if addTrustedCAVolume { - optional := true fluentdPodSpec.Volumes = append(fluentdPodSpec.Volumes, v1.Volume{ Name: constants.FluentdTrustedCAName, @@ -389,7 +388,6 @@ func newFluentdPodSpec(cluster *logging.ClusterLogging, elasticsearchAppName str LocalObjectReference: v1.LocalObjectReference{ Name: constants.FluentdTrustedCAName, }, - Optional: &optional, Items: []v1.KeyToPath{ { Key: constants.TrustedCABundleKey, @@ -413,20 +411,10 @@ func (clusterRequest *ClusterLoggingRequest) createOrUpdateFluentdDaemonset(pipe cluster := clusterRequest.cluster fluentdTrustBundle := &v1.ConfigMap{} - if proxyConfig != nil { - // Create or update cluster proxy trusted CA bundle. - err = clusterRequest.createOrUpdateTrustedCABundleConfigMap(constants.FluentdTrustedCAName) - if err != nil { - return - } - - // fluentd-trusted-ca-bundle - fluentdTrustBundleName := types.NamespacedName{Name: constants.FluentdTrustedCAName, Namespace: constants.OpenshiftNS} - if err := clusterRequest.client.Get(context.TODO(), fluentdTrustBundleName, fluentdTrustBundle); err != nil { - if !errors.IsNotFound(err) { - return err - } - } + // Create or update cluster proxy trusted CA bundle. + err = clusterRequest.createOrUpdateTrustedCABundleConfigMap(constants.FluentdTrustedCAName) + if err != nil { + return } fluentdPodSpec := newFluentdPodSpec(cluster, "elasticsearch", "elasticsearch", proxyConfig, fluentdTrustBundle, clusterRequest.ForwardingSpec) @@ -434,6 +422,13 @@ func (clusterRequest *ClusterLoggingRequest) createOrUpdateFluentdDaemonset(pipe fluentdDaemonset := NewDaemonSet("fluentd", cluster.Namespace, "fluentd", "fluentd", fluentdPodSpec) fluentdDaemonset.Spec.Template.Spec.Containers[0].Env = updateEnvVar(v1.EnvVar{Name: "FLUENT_CONF_HASH", Value: pipelineConfHash}, fluentdDaemonset.Spec.Template.Spec.Containers[0].Env) + annotations, err := clusterRequest.getFluentdAnnotations(fluentdDaemonset) + if err != nil { + return err + } + + fluentdDaemonset.Spec.Template.Annotations = annotations + uid := getServiceAccountLogCollectorUID() if len(uid) == 0 { // There's no uid for logcollector serviceaccount; setting ClusterLogging for the ownerReference. @@ -450,7 +445,7 @@ func (clusterRequest *ClusterLoggingRequest) createOrUpdateFluentdDaemonset(pipe if clusterRequest.isManaged() { retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { - return clusterRequest.updateFluentdDaemonsetIfRequired(fluentdDaemonset, fluentdTrustBundle) + return clusterRequest.updateFluentdDaemonsetIfRequired(fluentdDaemonset) }) if retryErr != nil { return retryErr @@ -460,7 +455,7 @@ func (clusterRequest *ClusterLoggingRequest) createOrUpdateFluentdDaemonset(pipe return nil } -func (clusterRequest *ClusterLoggingRequest) updateFluentdDaemonsetIfRequired(desired *apps.DaemonSet, trustedCABundleCM *v1.ConfigMap) (err error) { +func (clusterRequest *ClusterLoggingRequest) updateFluentdDaemonsetIfRequired(desired *apps.DaemonSet) (err error) { logger.DebugObject("desired fluent update: %v", desired) current := &apps.DaemonSet{} @@ -477,18 +472,8 @@ func (clusterRequest *ClusterLoggingRequest) updateFluentdDaemonsetIfRequired(de desired, different := isDaemonsetDifferent(current, desired) // Check trustedCA certs have been updated or not by comparing the hash values in annotation. - newTrustedCAHashedValue, err := calcTrustedCAHashValue(trustedCABundleCM) - if err != nil { - return fmt.Errorf("unable to calculate trusted CA hash value. E: %s", err.Error()) - } - - trustedCAHashedValue, _ := current.Spec.Template.ObjectMeta.Annotations[constants.TrustedCABundleHashName] - if trustedCAHashedValue != newTrustedCAHashedValue { + if current.Spec.Template.ObjectMeta.Annotations[constants.TrustedCABundleHashName] != desired.Spec.Template.ObjectMeta.Annotations[constants.TrustedCABundleHashName] { different = true - if desired.Spec.Template.ObjectMeta.Annotations == nil { - desired.Spec.Template.ObjectMeta.Annotations = make(map[string]string) - } - desired.Spec.Template.ObjectMeta.Annotations[constants.TrustedCABundleHashName] = newTrustedCAHashedValue } if different { @@ -514,6 +499,61 @@ func (clusterRequest *ClusterLoggingRequest) updateFluentdDaemonsetIfRequired(de return nil } +func (clusterRequest *ClusterLoggingRequest) getFluentdAnnotations(daemonset *apps.DaemonSet) (map[string]string, error) { + + if daemonset.Spec.Template.ObjectMeta.Annotations == nil { + daemonset.Spec.Template.ObjectMeta.Annotations = make(map[string]string) + } + + annotations := daemonset.Spec.Template.ObjectMeta.Annotations + + fluentdTrustBundle := &v1.ConfigMap{} + fluentdTrustBundleName := types.NamespacedName{Name: constants.FluentdTrustedCAName, Namespace: constants.OpenshiftNS} + if err := clusterRequest.client.Get(context.TODO(), fluentdTrustBundleName, fluentdTrustBundle); err != nil { + if !errors.IsNotFound(err) { + return annotations, err + } + } + + if _, ok := fluentdTrustBundle.Data[constants.TrustedCABundleKey]; !ok { + return annotations, fmt.Errorf("%v does not yet contain expected key %v", fluentdTrustBundle.Name, constants.TrustedCABundleKey) + } + + trustedCAHashValue, err := calcTrustedCAHashValue(fluentdTrustBundle) + if err != nil { + return annotations, fmt.Errorf("unable to calculate trusted CA value. E: %s", err.Error()) + } + + if trustedCAHashValue == "" { + return annotations, fmt.Errorf("Did not receive hashvalue for trusted CA value") + } + + annotations[constants.TrustedCABundleHashName] = trustedCAHashValue + + return annotations, nil +} + +func (clusterRequest *ClusterLoggingRequest) RestartFluentd(proxyConfig *configv1.Proxy) (err error) { + + collectorConfig, err := clusterRequest.generateCollectorConfig() + if err != nil { + return err + } + + logger.Debugf("Generated collector config: %s", collectorConfig) + collectorConfHash, err := utils.CalculateMD5Hash(collectorConfig) + if err != nil { + logger.Errorf("unable to calculate MD5 hash. E: %s", err.Error()) + return + } + + if err = clusterRequest.createOrUpdateFluentdDaemonset(collectorConfHash, proxyConfig); err != nil { + return + } + + return clusterRequest.UpdateFluentdStatus() +} + //updateEnvar adds the value to the list or replaces it if it already existing func updateEnvVar(value v1.EnvVar, values []v1.EnvVar) []v1.EnvVar { found := false diff --git a/pkg/k8shandler/reconciler.go b/pkg/k8shandler/reconciler.go index 748835efc9..e63affea08 100644 --- a/pkg/k8shandler/reconciler.go +++ b/pkg/k8shandler/reconciler.go @@ -116,6 +116,39 @@ func ReconcileForGlobalProxy(proxyConfig *configv1.Proxy, requestClient client.C return nil } +func ReconcileForTrustedCABundle(requestName string, requestClient client.Client) (err error) { + clusterLoggingRequest := ClusterLoggingRequest{ + client: requestClient, + } + + clusterLogging := clusterLoggingRequest.getClusterLogging() + clusterLoggingRequest.cluster = clusterLogging + + if clusterLogging.Spec.ManagementState == logging.ManagementStateUnmanaged { + return nil + } + + forwarding := clusterLoggingRequest.getLogForwarding() + if forwarding != nil { + clusterLoggingRequest.ForwardingRequest = forwarding + clusterLoggingRequest.ForwardingSpec = forwarding.Spec + } + + proxyConfig := clusterLoggingRequest.getProxyConfig() + + // call for Kibana to restart itself + if requestName == constants.KibanaTrustedCAName { + return clusterLoggingRequest.RestartKibana(proxyConfig) + } + + // call for Fluentd to restart itself + if requestName == constants.FluentdTrustedCAName { + return clusterLoggingRequest.RestartFluentd(proxyConfig) + } + + return nil +} + func ReconcileForKibanaSecret(requestClient client.Client) (err error) { clusterLoggingRequest := ClusterLoggingRequest{ @@ -129,8 +162,10 @@ func ReconcileForKibanaSecret(requestClient client.Client) (err error) { return nil } + proxyConfig := clusterLoggingRequest.getProxyConfig() + // call for Kibana to restart itself (e.g. delete its pods) - return clusterLoggingRequest.RestartKibana() + return clusterLoggingRequest.RestartKibana(proxyConfig) } func (clusterRequest *ClusterLoggingRequest) getClusterLogging() *logging.ClusterLogging { diff --git a/pkg/k8shandler/secret.go b/pkg/k8shandler/secret.go index 7a160c98e2..64fc494b36 100644 --- a/pkg/k8shandler/secret.go +++ b/pkg/k8shandler/secret.go @@ -3,7 +3,9 @@ package k8shandler import ( "fmt" "reflect" + "sort" + "github.com/openshift/cluster-logging-operator/pkg/utils" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/client-go/util/retry" @@ -92,3 +94,33 @@ func (clusterRequest *ClusterLoggingRequest) RemoveSecret(secretName string) err return nil } + +func calcSecretHashValue(secret *core.Secret) (string, error) { + hashValue := "" + var err error + + if secret == nil { + return hashValue, nil + } + + hashKeys := []string{} + rawbytes := []byte{} + + // we just want the keys here to sort them for consistently calculated hashes + for key := range secret.Data { + hashKeys = append(hashKeys, key) + } + + sort.Strings(hashKeys) + + for _, key := range hashKeys { + rawbytes = append(rawbytes, secret.Data[key]...) + } + + hashValue, err = utils.CalculateMD5Hash(string(rawbytes)) + if err != nil { + return "", err + } + + return hashValue, nil +} diff --git a/pkg/k8shandler/trustedcabundle.go b/pkg/k8shandler/trustedcabundle.go index c78f5ad403..de9c30b7d0 100644 --- a/pkg/k8shandler/trustedcabundle.go +++ b/pkg/k8shandler/trustedcabundle.go @@ -1,6 +1,8 @@ package k8shandler import ( + "fmt" + "github.com/openshift/cluster-logging-operator/pkg/constants" "github.com/openshift/cluster-logging-operator/pkg/utils" "github.com/sirupsen/logrus" @@ -42,7 +44,7 @@ func hasTrustedCABundle(configMap *core.ConfigMap) bool { } func calcTrustedCAHashValue(configMap *core.ConfigMap) (string, error) { - hashValue := "0" + hashValue := "" var err error if configMap == nil { @@ -55,5 +57,10 @@ func calcTrustedCAHashValue(configMap *core.ConfigMap) (string, error) { return "", err } } + + if !ok { + return "", fmt.Errorf("Expected key %v does not exist in %v", constants.TrustedCABundleKey, configMap.Name) + } + return hashValue, nil } diff --git a/pkg/k8shandler/visualization.go b/pkg/k8shandler/visualization.go index 9b4d71185b..eb2f313e40 100644 --- a/pkg/k8shandler/visualization.go +++ b/pkg/k8shandler/visualization.go @@ -15,8 +15,8 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/util/retry" - consolev1 "github.com/openshift/api/console/v1" configv1 "github.com/openshift/api/config/v1" + consolev1 "github.com/openshift/api/console/v1" logging "github.com/openshift/cluster-logging-operator/pkg/apis/logging/v1" apps "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" @@ -24,8 +24,8 @@ import ( ) const ( - kibanaServiceAccountName = "kibana" - kibanaOAuthRedirectReference = "{\"kind\":\"OAuthRedirectReference\",\"apiVersion\":\"v1\",\"reference\":{\"kind\":\"Route\",\"name\":\"kibana\"}}" + kibanaServiceAccountName = "kibana" + kibanaOAuthRedirectReference = "{\"kind\":\"OAuthRedirectReference\",\"apiVersion\":\"v1\",\"reference\":{\"kind\":\"Route\",\"name\":\"kibana\"}}" // The following strings are turned into JavaScript RegExps. Online tool to test them: https://regex101.com/ nodesAndContainersNamespaceFilter = "^(openshift-.*|kube-.*|openshift$|kube$|default$)" appsNamespaceFilter = "^((?!" + nodesAndContainersNamespaceFilter + ").)*$" // ^((?!^(openshift-.*|kube-.*|openshift$|kube$|default$)).)*$ @@ -79,28 +79,35 @@ func (clusterRequest *ClusterLoggingRequest) CreateOrUpdateVisualization(proxyCo return } - kibanaStatus, err := clusterRequest.getKibanaStatus() - cluster := clusterRequest.cluster + clusterRequest.UpdateKibanaStatus() + } - if err != nil { - return fmt.Errorf("Failed to get Kibana status for %q: %v", cluster.Name, err) - } + return nil +} - printUpdateMessage := true - retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { - if !compareKibanaStatus(kibanaStatus, cluster.Status.Visualization.KibanaStatus) { - if printUpdateMessage { - logrus.Infof("Updating status of Kibana") - printUpdateMessage = false - } - cluster.Status.Visualization.KibanaStatus = kibanaStatus - return clusterRequest.UpdateStatus(cluster) +func (clusterRequest *ClusterLoggingRequest) UpdateKibanaStatus() (err error) { + + kibanaStatus, err := clusterRequest.getKibanaStatus() + cluster := clusterRequest.cluster + + if err != nil { + return fmt.Errorf("Failed to get Kibana status for %q: %v", cluster.Name, err) + } + + printUpdateMessage := true + retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { + if !compareKibanaStatus(kibanaStatus, cluster.Status.Visualization.KibanaStatus) { + if printUpdateMessage { + logrus.Infof("Updating status of Kibana") + printUpdateMessage = false } - return nil - }) - if retryErr != nil { - return fmt.Errorf("Failed to update Kibana status for %q: %v", cluster.Name, retryErr) + cluster.Status.Visualization.KibanaStatus = kibanaStatus + return clusterRequest.UpdateStatus(cluster) } + return nil + }) + if retryErr != nil { + return fmt.Errorf("Failed to update Kibana status for %q: %v", cluster.Name, retryErr) } return nil @@ -157,23 +164,13 @@ func compareKibanaStatus(lhs, rhs []logging.KibanaStatus) bool { return true } -func (clusterRequest *ClusterLoggingRequest) RestartKibana() (err error) { +func (clusterRequest *ClusterLoggingRequest) RestartKibana(proxyConfig *configv1.Proxy) (err error) { - // get kibana pods - kibanaPods, err := clusterRequest.GetPodList( - map[string]string{ - "component": "kibana", - }) - - // delete kibana pods - for _, pod := range kibanaPods.Items { - err := clusterRequest.Delete(&pod) - if err != nil { - return err - } + if err = clusterRequest.createOrUpdateKibanaDeployment(proxyConfig); err != nil { + return } - return nil + return clusterRequest.UpdateKibanaStatus() } func (clusterRequest *ClusterLoggingRequest) removeKibana() (err error) { @@ -234,19 +231,9 @@ func (clusterRequest *ClusterLoggingRequest) createOrUpdateKibanaDeployment(prox kibanaTrustBundle := &v1.ConfigMap{} // Create cluster proxy trusted CA bundle. - if proxyConfig != nil { - err = clusterRequest.createOrUpdateTrustedCABundleConfigMap(constants.KibanaTrustedCAName) - if err != nil { - return - } - - // kibana-trusted-ca-bundle - kibanaTrustBundleName := types.NamespacedName{Name: constants.KibanaTrustedCAName, Namespace: constants.OpenshiftNS} - if err := clusterRequest.client.Get(context.TODO(), kibanaTrustBundleName, kibanaTrustBundle); err != nil { - if !errors.IsNotFound(err) { - return err - } - } + err = clusterRequest.createOrUpdateTrustedCABundleConfigMap(constants.KibanaTrustedCAName) + if err != nil { + return } kibanaPodSpec := newKibanaPodSpec(clusterRequest.cluster, "kibana", "elasticsearch.openshift-logging.svc.cluster.local", proxyConfig, kibanaTrustBundle) @@ -259,6 +246,14 @@ func (clusterRequest *ClusterLoggingRequest) createOrUpdateKibanaDeployment(prox ) kibanaDeployment.Spec.Replicas = &clusterRequest.cluster.Spec.Visualization.KibanaSpec.Replicas + // if we don't have the hash values we shouldn't start/create + annotations, err := clusterRequest.getKibanaAnnotations(kibanaDeployment) + if err != nil { + return err + } + + kibanaDeployment.Spec.Template.ObjectMeta.Annotations = annotations + utils.AddOwnerRefToObject(kibanaDeployment, utils.AsOwner(clusterRequest.cluster)) err = clusterRequest.Create(kibanaDeployment) @@ -283,17 +278,17 @@ func (clusterRequest *ClusterLoggingRequest) createOrUpdateKibanaDeployment(prox current, different := isDeploymentDifferent(current, kibanaDeployment) // Check trustedCA certs have been updated or not by comparing the hash values in annotation. - newTrustedCAHashedValue, err := calcTrustedCAHashValue(kibanaTrustBundle) - if err != nil { - return fmt.Errorf("unable to calculate trusted CA value. E: %s", err.Error()) - } - trustedCAHashedValue, _ := current.Spec.Template.ObjectMeta.Annotations[constants.TrustedCABundleHashName] - if trustedCAHashedValue != newTrustedCAHashedValue { + if current.Spec.Template.ObjectMeta.Annotations[constants.TrustedCABundleHashName] != kibanaDeployment.Spec.Template.ObjectMeta.Annotations[constants.TrustedCABundleHashName] { different = true - if kibanaDeployment.Spec.Template.ObjectMeta.Annotations == nil { - kibanaDeployment.Spec.Template.ObjectMeta.Annotations = make(map[string]string) + } + + // Check secret hash has been updated or not + for _, secretName := range []string{"kibana", "kibana-proxy"} { + + hashKey := fmt.Sprintf("%s%s", constants.SecretHashPrefix, secretName) + if kibanaDeployment.Spec.Template.ObjectMeta.Annotations[hashKey] != current.Spec.Template.ObjectMeta.Annotations[hashKey] { + different = true } - kibanaDeployment.Spec.Template.ObjectMeta.Annotations[constants.TrustedCABundleHashName] = newTrustedCAHashedValue } if different { @@ -307,6 +302,57 @@ func (clusterRequest *ClusterLoggingRequest) createOrUpdateKibanaDeployment(prox return nil } +func (clusterRequest *ClusterLoggingRequest) getKibanaAnnotations(deployment *apps.Deployment) (map[string]string, error) { + + if deployment.Spec.Template.ObjectMeta.Annotations == nil { + deployment.Spec.Template.ObjectMeta.Annotations = make(map[string]string) + } + + annotations := deployment.Spec.Template.ObjectMeta.Annotations + + kibanaTrustBundle := &v1.ConfigMap{} + kibanaTrustBundleName := types.NamespacedName{Name: constants.KibanaTrustedCAName, Namespace: constants.OpenshiftNS} + if err := clusterRequest.client.Get(context.TODO(), kibanaTrustBundleName, kibanaTrustBundle); err != nil { + if !errors.IsNotFound(err) { + return annotations, err + } + } + + if _, ok := kibanaTrustBundle.Data[constants.TrustedCABundleKey]; !ok { + return annotations, fmt.Errorf("%v does not yet contain expected key %v", kibanaTrustBundle.Name, constants.TrustedCABundleKey) + } + + trustedCAHashValue, err := calcTrustedCAHashValue(kibanaTrustBundle) + if err != nil { + return annotations, fmt.Errorf("unable to calculate trusted CA value. E: %s", err.Error()) + } + + if trustedCAHashValue == "" { + return annotations, fmt.Errorf("Did not receive hashvalue for trusted CA value") + } + + annotations[constants.TrustedCABundleHashName] = trustedCAHashValue + + // generate secret hash + for _, secretName := range []string{"kibana", "kibana-proxy"} { + + hashKey := fmt.Sprintf("%s%s", constants.SecretHashPrefix, secretName) + + secret, err := clusterRequest.GetSecret(secretName) + if err != nil { + return annotations, err + } + secretHashValue, err := calcSecretHashValue(secret) + if err != nil { + return annotations, err + } + + annotations[hashKey] = secretHashValue + } + + return annotations, nil +} + func isDeploymentDifferent(current *apps.Deployment, desired *apps.Deployment) (*apps.Deployment, bool) { different := false @@ -722,7 +768,6 @@ func newKibanaPodSpec(cluster *logging.ClusterLogging, kibanaName string, elasti ) if addTrustedCAVolume { - optional := true kibanaPodSpec.Volumes = append(kibanaPodSpec.Volumes, v1.Volume{ Name: constants.KibanaTrustedCAName, @@ -731,7 +776,6 @@ func newKibanaPodSpec(cluster *logging.ClusterLogging, kibanaName string, elasti LocalObjectReference: v1.LocalObjectReference{ Name: constants.KibanaTrustedCAName, }, - Optional: &optional, Items: []v1.KeyToPath{ { Key: constants.TrustedCABundleKey, From b0a2b8377bc13b6f515d2b1376a3955f402d3089 Mon Sep 17 00:00:00 2001 From: Jeff Cantrill Date: Fri, 21 Feb 2020 14:08:02 -0500 Subject: [PATCH 08/21] update references to es6 --- hack/testing/test-367-logforwarding.sh | 2 +- hack/testing/utils | 8 ++++---- .../4.4/cluster-logging.v4.4.0.clusterserviceversion.yaml | 4 ++-- manifests/4.4/image-references | 6 +++--- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/hack/testing/test-367-logforwarding.sh b/hack/testing/test-367-logforwarding.sh index 4a9fce2c63..02fb0c9aa6 100755 --- a/hack/testing/test-367-logforwarding.sh +++ b/hack/testing/test-367-logforwarding.sh @@ -58,7 +58,7 @@ for dir in $(ls -d $TEST_DIR); do if CLEANUP_CMD="$( cd $( dirname ${BASH_SOURCE[0]} ) >/dev/null 2>&1 && pwd )/../../test/e2e/logforwarding/cleanup.sh $artifact_dir $GENERATOR_NS" \ artifact_dir=$artifact_dir \ GENERATOR_NS=$GENERATOR_NS \ - ELASTICSEARCH_IMAGE=quay.io/openshift/origin-logging-elasticsearch5:latest \ + ELASTICSEARCH_IMAGE=quay.io/openshift/origin-logging-elasticsearch6:latest \ go test -count=1 -parallel=1 $dir | tee -a $artifact_dir/test.log ; then log::info "=======================================================" log::info "Logforwarding $dir passed" diff --git a/hack/testing/utils b/hack/testing/utils index 6af55c8ee4..a934cd938d 100644 --- a/hack/testing/utils +++ b/hack/testing/utils @@ -224,15 +224,15 @@ function deploy_clusterlogging_operator() { local es_img k_img c_img f_img op_img if [ -n "${IMAGE_FORMAT:-}" ] ; then IMAGE_CLUSTER_LOGGING_OPERATOR=$(sed -e "s,\${component},cluster-logging-operator," <(echo $IMAGE_FORMAT)) - es_img=${IMAGE_FORMAT/'${component}'/logging-elasticsearch5} - k_img=${IMAGE_FORMAT/'${component}'/logging-kibana5} + es_img=${IMAGE_FORMAT/'${component}'/logging-elasticsearch6} + k_img=${IMAGE_FORMAT/'${component}'/logging-kibana6} c_img=${IMAGE_FORMAT/'${component}'/logging-curator5} f_img=${IMAGE_FORMAT/'${component}'/logging-fluentd} op_img=${IMAGE_FORMAT/'${component}'/oauth-proxy} else IMAGE_CLUSTER_LOGGING_OPERATOR=${IMAGE_CLUSTER_LOGGING_OPERATOR:-registry.svc.ci.openshift.org/ocp/$version:cluster-logging-operator} - es_img=${IMAGE_ELASTICSEARCH_IMAGE:-registry.svc.ci.openshift.org/ocp/$version:logging-elasticsearch5} - k_img=${IMAGE_KIBANA_IMAGE:-registry.svc.ci.openshift.org/ocp/$version:logging-kibana5} + es_img=${IMAGE_ELASTICSEARCH_IMAGE:-registry.svc.ci.openshift.org/ocp/$version:logging-elasticsearch6} + k_img=${IMAGE_KIBANA_IMAGE:-registry.svc.ci.openshift.org/ocp/$version:logging-kibana6} c_img=${IMAGE_CURATOR_IMAGE:-registry.svc.ci.openshift.org/ocp/$version:logging-curator5} f_img=${IMAGE_FLUENTD_IMAGE:-registry.svc.ci.openshift.org/ocp/$version:logging-fluentd} op_img=${IMAGE_OAUTH_PROXY_IMAGE:-registry.svc.ci.openshift.org/ocp/$version:oauth-proxy} diff --git a/manifests/4.4/cluster-logging.v4.4.0.clusterserviceversion.yaml b/manifests/4.4/cluster-logging.v4.4.0.clusterserviceversion.yaml index 7ad8333807..d3f5232a24 100644 --- a/manifests/4.4/cluster-logging.v4.4.0.clusterserviceversion.yaml +++ b/manifests/4.4/cluster-logging.v4.4.0.clusterserviceversion.yaml @@ -306,11 +306,11 @@ spec: - name: OPERATOR_NAME value: "cluster-logging-operator" - name: ELASTICSEARCH_IMAGE - value: "quay.io/openshift/origin-logging-elasticsearch5:latest" + value: "quay.io/openshift/origin-logging-elasticsearch6:latest" - name: FLUENTD_IMAGE value: "quay.io/openshift/origin-logging-fluentd:latest" - name: KIBANA_IMAGE - value: "quay.io/openshift/origin-logging-kibana5:latest" + value: "quay.io/openshift/origin-logging-kibana6:latest" - name: CURATOR_IMAGE value: "quay.io/openshift/origin-logging-curator5:latest" - name: OAUTH_PROXY_IMAGE diff --git a/manifests/4.4/image-references b/manifests/4.4/image-references index afd38dc8c3..f930f791d4 100644 --- a/manifests/4.4/image-references +++ b/manifests/4.4/image-references @@ -9,11 +9,11 @@ spec: - name: logging-elasticsearch5 from: kind: DockerImage - name: quay.io/openshift/origin-logging-elasticsearch5:latest - - name: logging-kibana5 + name: quay.io/openshift/origin-logging-elasticsearch6:latest + - name: logging-kibana6 from: kind: DockerImage - name: quay.io/openshift/origin-logging-kibana5:latest + name: quay.io/openshift/origin-logging-kibana6:latest - name: logging-curator5 from: kind: DockerImage From e692b94be2cc2b9e96457137219b686d45ff3223 Mon Sep 17 00:00:00 2001 From: Jeff Cantrill Date: Mon, 24 Feb 2020 09:17:05 -0500 Subject: [PATCH 09/21] fix up es image for defaults --- hack/common | 2 +- hack/testing/test-367-logforwarding.sh | 2 +- hack/testing/utils | 18 +++++++++++++++--- .../indexmanagement/index_management.go | 2 +- 4 files changed, 18 insertions(+), 6 deletions(-) diff --git a/hack/common b/hack/common index d303497159..d86e0cb58a 100644 --- a/hack/common +++ b/hack/common @@ -11,7 +11,7 @@ source "$repo_dir/hack/lib/init.sh" source "$repo_dir/hack/testing/utils" source "$repo_dir/hack/testing/assertions" -VERSION=${VERSION:-4.4} +VERSION=${VERSION:-$(basename $(find ${repo_dir}/../manifests -type d | sort -r | head -n 1))} ELASTICSEARCH_OP_REPO=${ELASTICSEARCH_OP_REPO:-${repo_dir}/../elasticsearch-operator} ADMIN_USER=${ADMIN_USER:-kubeadmin} diff --git a/hack/testing/test-367-logforwarding.sh b/hack/testing/test-367-logforwarding.sh index 02fb0c9aa6..51259ec56f 100755 --- a/hack/testing/test-367-logforwarding.sh +++ b/hack/testing/test-367-logforwarding.sh @@ -58,7 +58,7 @@ for dir in $(ls -d $TEST_DIR); do if CLEANUP_CMD="$( cd $( dirname ${BASH_SOURCE[0]} ) >/dev/null 2>&1 && pwd )/../../test/e2e/logforwarding/cleanup.sh $artifact_dir $GENERATOR_NS" \ artifact_dir=$artifact_dir \ GENERATOR_NS=$GENERATOR_NS \ - ELASTICSEARCH_IMAGE=quay.io/openshift/origin-logging-elasticsearch6:latest \ + ELASTICSEARCH_IMAGE="$(format_elasticsearch_image)" \ go test -count=1 -parallel=1 $dir | tee -a $artifact_dir/test.log ; then log::info "=======================================================" log::info "Logforwarding $dir passed" diff --git a/hack/testing/utils b/hack/testing/utils index a934cd938d..9d043c91bf 100644 --- a/hack/testing/utils +++ b/hack/testing/utils @@ -224,19 +224,18 @@ function deploy_clusterlogging_operator() { local es_img k_img c_img f_img op_img if [ -n "${IMAGE_FORMAT:-}" ] ; then IMAGE_CLUSTER_LOGGING_OPERATOR=$(sed -e "s,\${component},cluster-logging-operator," <(echo $IMAGE_FORMAT)) - es_img=${IMAGE_FORMAT/'${component}'/logging-elasticsearch6} k_img=${IMAGE_FORMAT/'${component}'/logging-kibana6} c_img=${IMAGE_FORMAT/'${component}'/logging-curator5} f_img=${IMAGE_FORMAT/'${component}'/logging-fluentd} op_img=${IMAGE_FORMAT/'${component}'/oauth-proxy} else IMAGE_CLUSTER_LOGGING_OPERATOR=${IMAGE_CLUSTER_LOGGING_OPERATOR:-registry.svc.ci.openshift.org/ocp/$version:cluster-logging-operator} - es_img=${IMAGE_ELASTICSEARCH_IMAGE:-registry.svc.ci.openshift.org/ocp/$version:logging-elasticsearch6} k_img=${IMAGE_KIBANA_IMAGE:-registry.svc.ci.openshift.org/ocp/$version:logging-kibana6} c_img=${IMAGE_CURATOR_IMAGE:-registry.svc.ci.openshift.org/ocp/$version:logging-curator5} f_img=${IMAGE_FLUENTD_IMAGE:-registry.svc.ci.openshift.org/ocp/$version:logging-fluentd} op_img=${IMAGE_OAUTH_PROXY_IMAGE:-registry.svc.ci.openshift.org/ocp/$version:oauth-proxy} fi + es_img="$(format_elasticsearch_image)" sed -e "/name: ELASTICSEARCH_IMAGE/,/value:/s,value:.*\$,value: ${es_img}," \ -e "/name: KIBANA_IMAGE/,/value:/s,value:.*\$,value: ${k_img}," \ -e "/name: CURATOR_IMAGE/,/value:/s,value:.*\$,value: ${c_img}," \ @@ -248,15 +247,28 @@ function deploy_clusterlogging_operator() { function deploy_elasticsearch_operator() { local manifest=${repo_dir}/vendor/github.com/openshift/elasticsearch-operator/manifests - local version=$(basename $(find $manifest -type d | sort -r | head -n 1)) if [ -n "${IMAGE_FORMAT:-}" ] ; then IMAGE_ELASTICSEARCH_OPERATOR=$(sed -e "s,\${component},elasticsearch-operator," <(echo $IMAGE_FORMAT)) else + local version=$(basename $(find $manifest -type d | sort -r | head -n 1)) IMAGE_ELASTICSEARCH_OPERATOR=${IMAGE_ELASTICSEARCH_OPERATOR:-registry.svc.ci.openshift.org/ocp/$version:elasticsearch-operator} fi + ELASTICSEARCH_IMAGE="$(format_elasticsearch_image)" GLOBAL=true deploy_operator "openshift-operators-redhat" "elasticsearch-operator" $manifest $IMAGE_ELASTICSEARCH_OPERATOR $((2 * $minute)) } +function format_elasticsearch_image() { + if [ -n "${IMAGE_FORMAT:-}" ] ; then + local es_img=${IMAGE_FORMAT/'${component}'/logging-elasticsearch6} + echo ${es_img} + else + local manifest=${repo_dir}/vendor/github.com/openshift/elasticsearch-operator/manifests + local version=$(basename $(find $manifest -type d | sort -r | head -n 1)) + ELASTICSEARCH_IMAGE=${ELASTICSEARCH_IMAGE:-registry.svc.ci.openshift.org/ocp/$version:logging-elasticsearch6} + echo $ELASTICSEARCH_IMAGE + fi +} + function deploy_operator() { local namespace=$1 local operatorName=$2 diff --git a/pkg/k8shandler/indexmanagement/index_management.go b/pkg/k8shandler/indexmanagement/index_management.go index c3f21cdfe6..21935bc3d0 100644 --- a/pkg/k8shandler/indexmanagement/index_management.go +++ b/pkg/k8shandler/indexmanagement/index_management.go @@ -25,7 +25,7 @@ const ( ) var ( - AliasesApp = []string{"app, logs.app"} + AliasesApp = []string{"app", "logs.app"} AliasesInfra = []string{"infra", "logs.infra"} AliasesAudit = []string{"infra.audit", "logs.audit"} From 1ebefa9d54c479033136e74e96cdc54376b2a807 Mon Sep 17 00:00:00 2001 From: Vimal Kumar Date: Tue, 25 Feb 2020 21:25:22 +0530 Subject: [PATCH 10/21] Updated log source type names to use `-` instead of `.` OpenDistro security plugin parses role index patterns wrongly if it contains `.`, so using `-` instead. Updated `logs.app` to `logs-app` Updated `logs.infra` to `logs-infra` Updated `logs.audit` to `logs-audit` --- .../4.4/cluster-logging.v4.4.0.clusterserviceversion.yaml | 4 ++-- manifests/4.4/logforwardings.crd.yaml | 6 +++--- pkg/apis/logging/v1/zz_generated.deepcopy.go | 2 +- pkg/apis/logging/v1alpha1/forwarding_types.go | 6 +++--- pkg/apis/logging/v1alpha1/zz_generated.deepcopy.go | 2 +- pkg/generators/forwarding/fluentd/fluent_conf_test.go | 6 +++--- pkg/generators/forwarding/fluentd/source_test.go | 6 +++--- pkg/generators/forwarding/fluentd/templates.go | 2 +- pkg/k8shandler/consoleexternalloglink.go | 4 ++-- pkg/k8shandler/visualization.go | 6 +++--- 10 files changed, 22 insertions(+), 22 deletions(-) diff --git a/manifests/4.4/cluster-logging.v4.4.0.clusterserviceversion.yaml b/manifests/4.4/cluster-logging.v4.4.0.clusterserviceversion.yaml index 9f6ce7256e..b73bd5f987 100644 --- a/manifests/4.4/cluster-logging.v4.4.0.clusterserviceversion.yaml +++ b/manifests/4.4/cluster-logging.v4.4.0.clusterserviceversion.yaml @@ -82,12 +82,12 @@ metadata: "pipelines": [ { "name": "clo-default-app-pipeline", - "inputSource": "logs.app", + "inputSource": "logs-app", "outputRefs": ["clo-managaged-output-es"] }, { "name": "clo-default-infra-pipeline", - "inputSource": "logs.app", + "inputSource": "logs-app", "outputRefs": ["clo-managaged-output-es"] } ] diff --git a/manifests/4.4/logforwardings.crd.yaml b/manifests/4.4/logforwardings.crd.yaml index 442c2bb23b..b4876cd4a1 100644 --- a/manifests/4.4/logforwardings.crd.yaml +++ b/manifests/4.4/logforwardings.crd.yaml @@ -64,9 +64,9 @@ spec: description: The log source type type: string enum: - - logs.app - - logs.infra - - logs.audit + - logs-app + - logs-infra + - logs-audit required: - name - source diff --git a/pkg/apis/logging/v1/zz_generated.deepcopy.go b/pkg/apis/logging/v1/zz_generated.deepcopy.go index 29091db5d2..89f89e3123 100644 --- a/pkg/apis/logging/v1/zz_generated.deepcopy.go +++ b/pkg/apis/logging/v1/zz_generated.deepcopy.go @@ -1,6 +1,6 @@ // +build !ignore_autogenerated -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by operator-sdk. DO NOT EDIT. package v1 diff --git a/pkg/apis/logging/v1alpha1/forwarding_types.go b/pkg/apis/logging/v1alpha1/forwarding_types.go index 76fc9e0531..21f8d46197 100644 --- a/pkg/apis/logging/v1alpha1/forwarding_types.go +++ b/pkg/apis/logging/v1alpha1/forwarding_types.go @@ -12,14 +12,14 @@ const ( LogForwardingKind string = "LogForwarding" //LogSourceTypeApp are container logs from non-infra structure containers - LogSourceTypeApp LogSourceType = "logs.app" + LogSourceTypeApp LogSourceType = "logs-app" //LogSourceTypeInfra are logs from infra structure containers or node logs - LogSourceTypeInfra LogSourceType = "logs.infra" + LogSourceTypeInfra LogSourceType = "logs-infra" //LogSourceTypeAudit are audit logs from the nodes and the k8s and // openshift apiservers - LogSourceTypeAudit LogSourceType = "logs.audit" + LogSourceTypeAudit LogSourceType = "logs-audit" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/pkg/apis/logging/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/logging/v1alpha1/zz_generated.deepcopy.go index 535db466e7..c234e7c8be 100644 --- a/pkg/apis/logging/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/logging/v1alpha1/zz_generated.deepcopy.go @@ -1,6 +1,6 @@ // +build !ignore_autogenerated -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by operator-sdk. DO NOT EDIT. package v1alpha1 diff --git a/pkg/generators/forwarding/fluentd/fluent_conf_test.go b/pkg/generators/forwarding/fluentd/fluent_conf_test.go index 1a8ec190bc..9c7f3eed6c 100644 --- a/pkg/generators/forwarding/fluentd/fluent_conf_test.go +++ b/pkg/generators/forwarding/fluentd/fluent_conf_test.go @@ -73,7 +73,7 @@ var _ = Describe("Generating fluentd config", func() { } }) - It("should exclude source to pipeline labels when there are no pipelines for a given sourceType (e.g. only logs.app)", func() { + It("should exclude source to pipeline labels when there are no pipelines for a given sourceType (e.g. only logs-app)", func() { forwarding = &logging.ForwardingSpec{ Outputs: []logging.OutputSpec{ { @@ -367,7 +367,7 @@ var _ = Describe("Generating fluentd config", func() { - # Relabel specific sources (e.g. logs.apps) to multiple pipelines + # Relabel specific sources (e.g. logs-apps) to multiple pipelines - # Relabel specific sources (e.g. logs.apps) to multiple pipelines + # Relabel specific sources (e.g. logs-apps) to multiple pipelines -# Relabel specific sources (e.g. logs.apps) to multiple pipelines +# Relabel specific sources (e.g. logs-apps) to multiple pipelines {{- range .SourceToPipelineLabels }} {{ . }} {{- end}} diff --git a/pkg/k8shandler/consoleexternalloglink.go b/pkg/k8shandler/consoleexternalloglink.go index 7fcdec5a76..71486b3463 100644 --- a/pkg/k8shandler/consoleexternalloglink.go +++ b/pkg/k8shandler/consoleexternalloglink.go @@ -26,8 +26,8 @@ func NewConsoleExternalLogLink(resourceName, namespace, consoleText, hrefTemplat }, }, Spec: consolev1.ConsoleExternalLogLinkSpec{ - Text: consoleText, - HrefTemplate: hrefTemplate, + Text: consoleText, + HrefTemplate: hrefTemplate, NamespaceFilter: namespaceFilter, }, } diff --git a/pkg/k8shandler/visualization.go b/pkg/k8shandler/visualization.go index 9b4d71185b..4b5c226c46 100644 --- a/pkg/k8shandler/visualization.go +++ b/pkg/k8shandler/visualization.go @@ -15,8 +15,8 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/util/retry" - consolev1 "github.com/openshift/api/console/v1" configv1 "github.com/openshift/api/config/v1" + consolev1 "github.com/openshift/api/console/v1" logging "github.com/openshift/cluster-logging-operator/pkg/apis/logging/v1" apps "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" @@ -24,8 +24,8 @@ import ( ) const ( - kibanaServiceAccountName = "kibana" - kibanaOAuthRedirectReference = "{\"kind\":\"OAuthRedirectReference\",\"apiVersion\":\"v1\",\"reference\":{\"kind\":\"Route\",\"name\":\"kibana\"}}" + kibanaServiceAccountName = "kibana" + kibanaOAuthRedirectReference = "{\"kind\":\"OAuthRedirectReference\",\"apiVersion\":\"v1\",\"reference\":{\"kind\":\"Route\",\"name\":\"kibana\"}}" // The following strings are turned into JavaScript RegExps. Online tool to test them: https://regex101.com/ nodesAndContainersNamespaceFilter = "^(openshift-.*|kube-.*|openshift$|kube$|default$)" appsNamespaceFilter = "^((?!" + nodesAndContainersNamespaceFilter + ").)*$" // ^((?!^(openshift-.*|kube-.*|openshift$|kube$|default$)).)*$ From 83266bf00c13f73ddc01a5a2171968c43c0e528c Mon Sep 17 00:00:00 2001 From: Periklis Tsirakidis Date: Wed, 19 Feb 2020 15:22:21 +0100 Subject: [PATCH 11/21] Bud 1803196: Move shared config map to openshift-config-managed NS --- Makefile | 1 + hack/testing/assertions | 21 ++-- .../test-010-deploy-via-olm-minimal.sh | 25 +++-- hack/testing/test-020-olm-upgrade.sh | 8 +- hack/testing/test-367-logforwarding.sh | 2 + .../test-999-fluentd-prometheus-metrics.sh | 3 + hack/testing/utils | 22 ++++ manifests/4.4/0110_clusterrolebindings.yaml | 2 +- manifests/4.4/0200_roles.yaml | 14 +++ manifests/4.4/0210_rolebindings.yaml | 13 +++ pkg/k8shandler/configmap.go | 13 ++- pkg/k8shandler/consoleexternalloglink.go | 4 +- pkg/k8shandler/visualization.go | 78 +++++++------- pkg/k8shandler/visualization_test.go | 102 ++++++++++++++++++ 14 files changed, 247 insertions(+), 61 deletions(-) create mode 100644 manifests/4.4/0200_roles.yaml create mode 100644 manifests/4.4/0210_rolebindings.yaml diff --git a/Makefile b/Makefile index b3b47ed27c..d0b9a47747 100644 --- a/Makefile +++ b/Makefile @@ -19,6 +19,7 @@ MAIN_PKG=cmd/manager/main.go export OCP_VERSION?=$(shell basename $(shell find manifests/ -maxdepth 1 -not -name manifests -type d)) export CSV_FILE=$(CURPATH)/manifests/$(OCP_VERSION)/cluster-logging.v$(OCP_VERSION).0.clusterserviceversion.yaml export NAMESPACE?=openshift-logging +export MANAGED_CONFIG_NAMESPACE?=openshift-config-managed export EO_CSV_FILE=$(CURPATH)/vendor/github.com/openshift/elasticsearch-operator/manifests/$(OCP_VERSION)/elasticsearch-operator.v$(OCP_VERSION).0.clusterserviceversion.yaml FLUENTD_IMAGE?=quay.io/openshift/origin-logging-fluentd:latest diff --git a/hack/testing/assertions b/hack/testing/assertions index a6bae58f10..212c7fb55d 100644 --- a/hack/testing/assertions +++ b/hack/testing/assertions @@ -1,16 +1,21 @@ #!/bin/bash source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/utils" assert_resources_exist(){ - # verify deployments -- kibana, curator - try_until_success "oc -n $NAMESPACE get deployment kibana" ${TIMEOUT_MIN} + # verify deployments -- kibana, curator + try_until_success "oc -n $NAMESPACE get deployment kibana" "${TIMEOUT_MIN}" - # verify cron - try_until_success "oc -n $NAMESPACE get cronjob curator" ${TIMEOUT_MIN} + # verify cron + try_until_success "oc -n $NAMESPACE get cronjob curator" "${TIMEOUT_MIN}" - # verify DS - try_until_success "oc -n $NAMESPACE get ds fluentd" ${TIMEOUT_MIN} + # verify DS + try_until_success "oc -n $NAMESPACE get ds fluentd" "${TIMEOUT_MIN}" - # verify ER - try_until_success "oc -n $NAMESPACE get elasticsearch elasticsearch" ${TIMEOUT_MIN} + # verify ER + try_until_success "oc -n $NAMESPACE get elasticsearch elasticsearch" "${TIMEOUT_MIN}" } + +assert_kibana_shared_config_exist() { + # verify kibana shared config map + try_until_success "oc -n $MANAGED_CONFIG_NAMESPACE get configmap logging-shared-config" "${TIMEOUT_MIN}" +} diff --git a/hack/testing/test-010-deploy-via-olm-minimal.sh b/hack/testing/test-010-deploy-via-olm-minimal.sh index a7e729eafa..6d8b9a0c39 100755 --- a/hack/testing/test-010-deploy-via-olm-minimal.sh +++ b/hack/testing/test-010-deploy-via-olm-minimal.sh @@ -8,8 +8,9 @@ if [ "${DEBUG:-}" = "true" ]; then set -x fi -source "$(dirname "${BASH_SOURCE[0]}" )/../lib/init.sh" -source "$(dirname $0)/assertions" +source "$(dirname "${BASH_SOURCE[0]}")/../lib/init.sh" +source "$(dirname "${BASH_SOURCE[0]}")/assertions" +source "$(dirname "${BASH_SOURCE[0]}")/utils" os::test::junit::declare_suite_start "${BASH_SOURCE[0]}" @@ -29,6 +30,8 @@ cleanup(){ oc delete ns ${NAMESPACE} --wait=true --ignore-not-found oc delete crd elasticsearches.logging.openshift.io --wait=false --ignore-not-found os::cmd::try_until_failure "oc get project ${NAMESPACE}" "$((1 * $minute))" + + cleanup_olm_catalog_unsupported_resources os::cleanup::all "${return_code}" @@ -49,6 +52,8 @@ oc create ns ${NAMESPACE} || : os::cmd::expect_success "oc create -f ${repo_dir}/vendor/github.com/openshift/elasticsearch-operator/manifests/${version}/elasticsearches.crd.yaml" +# Create static cluster roles and rolebindings +deploy_olm_catalog_unsupported_resources os::log::info "Deploying operator from ${manifest}" NAMESPACE=${NAMESPACE} \ @@ -66,10 +71,15 @@ fi TIMEOUT_MIN=$((2 * $minute)) -##verify metrics rbac -# extra resources not support for ConfigMap based catelogs for now. -#os::cmd::expect_success "oc get clusterrole clusterlogging-collector-metrics" -#os::cmd::expect_success "oc get clusterrolebinding clusterlogging-collector-metrics" +# verify metrics rbac +# extra resources not support for ConfigMap based catalogs for now. +os::cmd::expect_success "oc get clusterrole clusterlogging-collector-metrics" +os::cmd::expect_success "oc get clusterrolebinding clusterlogging-collector-metrics" + +# verify shared config rbac +# extra resources not support for ConfigMap based catalogs for now. +os::cmd::expect_success "oc -n ${MANAGED_CONFIG_NAMESPACE} get role clusterlogging-shared-config" +os::cmd::expect_success "oc -n ${MANAGED_CONFIG_NAMESPACE} get rolebinding clusterlogging-shared-config" # wait for operator to be ready os::cmd::try_until_text "oc -n $NAMESPACE get deployment cluster-logging-operator -o jsonpath={.status.availableReplicas} --ignore-not-found" "1" ${TIMEOUT_MIN} @@ -79,3 +89,6 @@ os::cmd::expect_success "oc -n $NAMESPACE create -f ${repo_dir}/hack/cr.yaml" # assert deployment assert_resources_exist + +# assert kibana shared config +assert_kibana_shared_config_exist diff --git a/hack/testing/test-020-olm-upgrade.sh b/hack/testing/test-020-olm-upgrade.sh index dfa87f363b..e5f521d47b 100755 --- a/hack/testing/test-020-olm-upgrade.sh +++ b/hack/testing/test-020-olm-upgrade.sh @@ -48,7 +48,6 @@ cleanup(){ oc -n openshift-operator-lifecycle-manager logs --since=$runtime deployment/olm-operator > $ARTIFACT_DIR/olm-operator.logs 2>&1 ||: oc describe -n ${NAMESPACE} deployment/cluster-logging-operator > $ARTIFACT_DIR/cluster-logging-operator.describe.after_update 2>&1 ||: - for item in "crd/elasticsearches.logging.openshift.io" "crd/clusterloggings.logging.openshift.io" "ns/openshift-logging" "ns/openshift-operators-redhat"; do oc delete $item --wait=true --ignore-not-found --force --grace-period=0 done @@ -56,6 +55,8 @@ cleanup(){ try_until_text "oc get ${item} --ignore-not-found" "" "$((1 * $minute))" done + cleanup_olm_catalog_unsupported_resources + exit ${return_code} } trap cleanup exit @@ -92,6 +93,7 @@ assert_resources_exist oc describe -n ${NAMESPACE} deployment/cluster-logging-operator > $ARTIFACT_DIR/cluster-logging-operator.describe.before_update 2>&1 deploy_config_map_catalog_source $NAMESPACE ${repo_dir}/manifests "${IMAGE_CLUSTER_LOGGING_OPERATOR}" +deploy_olm_catalog_unsupported_resources # patch subscription payload="{\"op\":\"replace\",\"path\":\"/spec/source\",\"value\":\"cluster-logging\"}" @@ -105,4 +107,8 @@ try_until_text "oc -n openshift-logging get deployment cluster-logging-operator # verify operator is ready try_until_text "oc -n openshift-logging get deployment cluster-logging-operator -o jsonpath={.status.updatedReplicas} --ignore-not-found" "1" ${TIMEOUT_MIN} +# assert deployment assert_resources_exist + +# assert kibana shared config +assert_kibana_shared_config_exist diff --git a/hack/testing/test-367-logforwarding.sh b/hack/testing/test-367-logforwarding.sh index 51259ec56f..84f021da88 100755 --- a/hack/testing/test-367-logforwarding.sh +++ b/hack/testing/test-367-logforwarding.sh @@ -73,5 +73,7 @@ for dir in $(ls -d $TEST_DIR); do oc delete $ns --ignore-not-found --force --grace-period=0||: try_until_failure "oc get $ns" "$((1 * $minute))" done + + cleanup_olm_catalog_unsupported_resources done exit $failed diff --git a/hack/testing/test-999-fluentd-prometheus-metrics.sh b/hack/testing/test-999-fluentd-prometheus-metrics.sh index 7d72d266a9..53c5b420bc 100755 --- a/hack/testing/test-999-fluentd-prometheus-metrics.sh +++ b/hack/testing/test-999-fluentd-prometheus-metrics.sh @@ -27,6 +27,9 @@ cleanup() { try_until_failure "oc get ${item}" "$((1 * $minute))" done fi + + cleanup_olm_catalog_unsupported_resources + exit $return_code } trap "cleanup" EXIT diff --git a/hack/testing/utils b/hack/testing/utils index 9d043c91bf..2ff35f857a 100644 --- a/hack/testing/utils +++ b/hack/testing/utils @@ -104,6 +104,26 @@ wait_for_deployment_to_be_ready(){ try_until_text "oc -n $namespace get deployment $name -o jsonpath={.status.availableReplicas} --ignore-not-found" "1" $timeout } +deploy_olm_catalog_unsupported_resources(){ + local manifest=${repo_dir}/manifests + local version=$(basename $(find $manifest -type d | sort -r | head -n 1)) + + # Create static cluster roles and rolebindings + try_until_success "oc create -f ${manifest}/$version/0100_clusterroles.yaml" "$(( 30 * second ))" + try_until_success "oc create -f ${manifest}/$version/0110_clusterrolebindings.yaml" "$(( 30 * second ))" + + # Create static cluster roles and rolebindings + try_until_success "oc create -f ${manifest}/$version/0200_roles.yaml" "$(( 30 * second ))" + try_until_success "oc create -f ${manifest}/$version/0210_rolebindings.yaml" "$(( 30 * second ))" +} + +cleanup_olm_catalog_unsupported_resources(){ + oc delete clusterrolebinding clusterlogging-collector-metrics --wait=false --ignore-not-found + oc delete clusterrole clusterlogging-collector-metrics --wait=false --ignore-not-found + oc -n "${MANAGED_CONFIG_NAMESPACE}" delete role clusterlogging-shared-config + oc -n "${MANAGED_CONFIG_NAMESPACE}" delete rolebinding clusterlogging-shared-config +} + deploy_marketplace_operator(){ local ns=$1 local name=$2 @@ -242,6 +262,8 @@ function deploy_clusterlogging_operator() { -e "/name: FLUENTD_IMAGE/,/value:/s,value:.*\$,value: ${f_img}," \ -e "/name: OAUTH_PROXY_IMAGE/,/value:/s,value:.*\$,value: ${op_img}," \ -i $csv + + deploy_olm_catalog_unsupported_resources deploy_operator "openshift-logging" "cluster-logging-operator" $manifest $IMAGE_CLUSTER_LOGGING_OPERATOR $((2 * $minute)) } diff --git a/manifests/4.4/0110_clusterrolebindings.yaml b/manifests/4.4/0110_clusterrolebindings.yaml index 45e2d79140..2ebc336a3a 100644 --- a/manifests/4.4/0110_clusterrolebindings.yaml +++ b/manifests/4.4/0110_clusterrolebindings.yaml @@ -9,4 +9,4 @@ roleRef: subjects: - kind: ServiceAccount name: prometheus-k8s - namespace: openshift-monitoring + namespace: openshift-monitoring diff --git a/manifests/4.4/0200_roles.yaml b/manifests/4.4/0200_roles.yaml new file mode 100644 index 0000000000..89f36d4c7e --- /dev/null +++ b/manifests/4.4/0200_roles.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: clusterlogging-shared-config + namespace: openshift-config-managed +rules: +- apiGroups: [""] + resources: + - configmaps + verbs: + - get + - create + - update + - delete diff --git a/manifests/4.4/0210_rolebindings.yaml b/manifests/4.4/0210_rolebindings.yaml new file mode 100644 index 0000000000..baa0848c11 --- /dev/null +++ b/manifests/4.4/0210_rolebindings.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: clusterlogging-shared-config + namespace: openshift-config-managed +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: clusterlogging-shared-config +subjects: +- kind: ServiceAccount + name: cluster-logging-operator + namespace: openshift-logging diff --git a/pkg/k8shandler/configmap.go b/pkg/k8shandler/configmap.go index 9b1bf4f8c5..5a756f047d 100644 --- a/pkg/k8shandler/configmap.go +++ b/pkg/k8shandler/configmap.go @@ -88,12 +88,21 @@ func (clusterRequest *ClusterLoggingRequest) createOrUpdateConfigMap(configMap * return nil } -//RemoveConfigMap with a given name and namespace +//RemoveConfigMap with a given name and the cluster request namespace func (clusterRequest *ClusterLoggingRequest) RemoveConfigMap(configmapName string) error { + return clusterRequest.removeConfigMapFromNs(configmapName, clusterRequest.cluster.Namespace) +} + +//RemoveSharedConfigMap with a given a name and a shared cluster namespace +func (clusterRequest *ClusterLoggingRequest) RemoveSharedConfigMap(configMapName, namespace string) error { + return clusterRequest.removeConfigMapFromNs(configMapName, namespace) +} + +func (clusterRequest *ClusterLoggingRequest) removeConfigMapFromNs(configmapName, namespace string) error { configMap := NewConfigMap( configmapName, - clusterRequest.cluster.Namespace, + namespace, map[string]string{}, ) diff --git a/pkg/k8shandler/consoleexternalloglink.go b/pkg/k8shandler/consoleexternalloglink.go index 7fcdec5a76..71486b3463 100644 --- a/pkg/k8shandler/consoleexternalloglink.go +++ b/pkg/k8shandler/consoleexternalloglink.go @@ -26,8 +26,8 @@ func NewConsoleExternalLogLink(resourceName, namespace, consoleText, hrefTemplat }, }, Spec: consolev1.ConsoleExternalLogLinkSpec{ - Text: consoleText, - HrefTemplate: hrefTemplate, + Text: consoleText, + HrefTemplate: hrefTemplate, NamespaceFilter: namespaceFilter, }, } diff --git a/pkg/k8shandler/visualization.go b/pkg/k8shandler/visualization.go index eb2f313e40..16aa2d9f74 100644 --- a/pkg/k8shandler/visualization.go +++ b/pkg/k8shandler/visualization.go @@ -29,6 +29,12 @@ const ( // The following strings are turned into JavaScript RegExps. Online tool to test them: https://regex101.com/ nodesAndContainersNamespaceFilter = "^(openshift-.*|kube-.*|openshift$|kube$|default$)" appsNamespaceFilter = "^((?!" + nodesAndContainersNamespaceFilter + ").)*$" // ^((?!^(openshift-.*|kube-.*|openshift$|kube$|default$)).)*$ + + loggingSharedConfigMapNamePre44x = "sharing-config" + loggingSharedConfigRolePre44x = "sharing-config-reader" + loggingSharedConfigRoleBindingPre44x = "openshift-logging-sharing-config-reader-binding" + loggingSharedConfigMapName = "logging-shared-config" + loggingSharedConfigNs = "openshift-config-managed" ) var ( @@ -201,7 +207,11 @@ func (clusterRequest *ClusterLoggingRequest) removeKibana() (err error) { return } - if err = clusterRequest.RemoveConfigMap("sharing-config"); err != nil { + if err = clusterRequest.RemoveConfigMap(loggingSharedConfigMapNamePre44x); err != nil { + return + } + + if err = clusterRequest.RemoveSharedConfigMap(loggingSharedConfigMapName, loggingSharedConfigNs); err != nil { return } @@ -443,56 +453,42 @@ func (clusterRequest *ClusterLoggingRequest) createOrUpdateKibanaRoute() error { } } + if err := clusterRequest.createOrUpdateKibanaSharedConfigMap(); err != nil { + return err + } + + return nil +} + +func (clusterRequest *ClusterLoggingRequest) createOrUpdateKibanaSharedConfigMap() error { + cluster := clusterRequest.cluster + kibanaURL, err := clusterRequest.GetRouteURL("kibana") if err != nil { return err } - sharedConfig := createSharedConfig(cluster.Namespace, kibanaURL, kibanaURL) + sharedConfig := createSharedConfig(loggingSharedConfigNs, kibanaURL, kibanaURL) utils.AddOwnerRefToObject(sharedConfig, utils.AsOwner(cluster)) - err = clusterRequest.Create(sharedConfig) + err = clusterRequest.CreateOrUpdateConfigMap(sharedConfig) if err != nil && !errors.IsAlreadyExists(err) { return fmt.Errorf("Failure creating Kibana route shared config: %v", err) } - sharedRole := NewRole( - "sharing-config-reader", - cluster.Namespace, - NewPolicyRules( - NewPolicyRule( - []string{""}, - []string{"configmaps"}, - []string{"sharing-config"}, - []string{"get"}, - ), - ), - ) - - utils.AddOwnerRefToObject(sharedRole, utils.AsOwner(clusterRequest.cluster)) - - err = clusterRequest.Create(sharedRole) - if err != nil && !errors.IsAlreadyExists(err) { - return fmt.Errorf("Failure creating Kibana route shared config role for %q: %v", cluster.Name, err) + oldSharedConfig := NewConfigMap(loggingSharedConfigMapNamePre44x, cluster.GetNamespace(), map[string]string{}) + if err = clusterRequest.Delete(oldSharedConfig); err != nil && !errors.IsNotFound(err) { + return fmt.Errorf("Failure delete old Kibana route shared config for %q: %v", cluster.Name, err) } - sharedRoleBinding := NewRoleBinding( - "openshift-logging-sharing-config-reader-binding", - cluster.Namespace, - "sharing-config-reader", - NewSubjects( - NewSubject( - "Group", - "system:authenticated", - ), - ), - ) - - utils.AddOwnerRefToObject(sharedRoleBinding, utils.AsOwner(clusterRequest.cluster)) + oldSharedRole := NewRole(loggingSharedConfigRolePre44x, cluster.GetNamespace(), nil) + if err = clusterRequest.Delete(oldSharedRole); err != nil && !errors.IsNotFound(err) { + return fmt.Errorf("Failure deleting old Kibana shared config role for %q: %v", cluster.Name, err) + } - err = clusterRequest.Create(sharedRoleBinding) - if err != nil && !errors.IsAlreadyExists(err) { - return fmt.Errorf("Failure creating Kibana route shared config role binding for %q: %v", cluster.Name, err) + oldSharedRoleBinding := NewRoleBinding(loggingSharedConfigRoleBindingPre44x, cluster.GetNamespace(), loggingSharedConfigRolePre44x, nil) + if err = clusterRequest.Delete(oldSharedRoleBinding); err != nil && !errors.IsNotFound(err) { + return fmt.Errorf("Failure deleting old Kibana shared config role binding for %q: %v", cluster.Name, err) } return nil @@ -811,13 +807,13 @@ func newKibanaPodSpec(cluster *logging.ClusterLogging, kibanaName string, elasti return kibanaPodSpec } -func createSharedConfig(namespace, kibanaAppURL, kibanaInfraURL string) *v1.ConfigMap { +func createSharedConfig(namespace, kibanaAppPublicURL, kibanaInfraAppPublicURL string) *v1.ConfigMap { return NewConfigMap( - "sharing-config", + loggingSharedConfigMapName, namespace, map[string]string{ - "kibanaAppURL": kibanaAppURL, - "kibanaInfraURL": kibanaInfraURL, + "kibanaAppPublicURL": kibanaAppPublicURL, + "kibanaInfraAppPublicURL": kibanaInfraAppPublicURL, }, ) } diff --git a/pkg/k8shandler/visualization_test.go b/pkg/k8shandler/visualization_test.go index 468b8dfd3f..c81b168f26 100644 --- a/pkg/k8shandler/visualization_test.go +++ b/pkg/k8shandler/visualization_test.go @@ -1,6 +1,7 @@ package k8shandler import ( + "context" "fmt" "reflect" "strings" @@ -10,10 +11,17 @@ import ( logging "github.com/openshift/cluster-logging-operator/pkg/apis/logging/v1" "github.com/openshift/cluster-logging-operator/pkg/constants" "github.com/openshift/cluster-logging-operator/pkg/utils" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + routev1 "github.com/openshift/api/route/v1" v1 "k8s.io/api/core/v1" + rbac "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" ) func TestNewKibanaPodSpecSetsProxyToUseServiceAccountAsOAuthClient(t *testing.T) { @@ -243,6 +251,100 @@ func TestNewKibanaPodSpecWhenProxyConfigExists(t *testing.T) { checkKibanaProxyVolumesAndVolumeMounts(t, podSpec, constants.KibanaTrustedCAName) } +func TestNewLoggingSharedConfigMapExists(t *testing.T) { + _ = routev1.AddToScheme(scheme.Scheme) + cluster := &logging.ClusterLogging{ + ObjectMeta: metav1.ObjectMeta{ + Name: "instance", + Namespace: "openshift-logging", + }, + } + + testCases := []struct { + name string + objs []runtime.Object + wantCm *v1.ConfigMap + wantErr error + }{ + { + name: "new route creation", + wantCm: NewConfigMap( + loggingSharedConfigMapName, + loggingSharedConfigNs, + map[string]string{ + "kibanaAppPublicURL": "https://", + "kibanaInfraAppPublicURL": "https://", + }, + ), + }, + { + name: "update route with shared configmap, role and rolebinding migration", + objs: []runtime.Object{ + runtime.Object(NewConfigMap(loggingSharedConfigMapNamePre44x, cluster.GetNamespace(), map[string]string{})), + runtime.Object(NewRole(loggingSharedConfigRolePre44x, cluster.GetNamespace(), []rbac.PolicyRule{})), + runtime.Object(NewRoleBinding(loggingSharedConfigRoleBindingPre44x, cluster.GetNamespace(), loggingSharedConfigRolePre44x, []rbac.Subject{})), + }, + wantCm: NewConfigMap( + loggingSharedConfigMapName, + loggingSharedConfigNs, + map[string]string{ + "kibanaAppPublicURL": "https://", + "kibanaInfraAppPublicURL": "https://", + }, + ), + }, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client := fake.NewFakeClient(tc.objs...) + clusterRequest := &ClusterLoggingRequest{ + client: client, + cluster: cluster, + } + + if gotErr := clusterRequest.createOrUpdateKibanaRoute(); gotErr != tc.wantErr { + t.Errorf("got: %v, want: %v", gotErr, tc.wantErr) + } + + // Check new shared config map existings in openshift config shared namespace + key := types.NamespacedName{Namespace: loggingSharedConfigNs, Name: loggingSharedConfigMapName} + gotCm := &v1.ConfigMap{} + utils.AddOwnerRefToObject(tc.wantCm, utils.AsOwner(clusterRequest.cluster)) + + if err := client.Get(context.TODO(), key, gotCm); err != nil { + t.Errorf("Expected configmap got: %v", err) + } + if ok := reflect.DeepEqual(gotCm, tc.wantCm); !ok { + t.Errorf("got: %v, want: %v", gotCm, tc.wantCm) + } + + // Check old shared config map is deleted + key = types.NamespacedName{Namespace: cluster.GetNamespace(), Name: loggingSharedConfigMapNamePre44x} + gotCmPre44x := &v1.ConfigMap{} + if err := client.Get(context.TODO(), key, gotCmPre44x); !errors.IsNotFound(err) { + t.Errorf("Expected deleted shared config pre 4.4.x, got: %v", err) + } + + // Check old role to access the shared config map is deleted + key = types.NamespacedName{Namespace: cluster.GetNamespace(), Name: loggingSharedConfigRolePre44x} + gotRolePre44x := &rbac.Role{} + if err := client.Get(context.TODO(), key, gotRolePre44x); !errors.IsNotFound(err) { + t.Errorf("Expected deleted role for shared config map pre 4.4.x, got: %v", err) + } + + // Check old rolebinding for group system:autheticated is deleted + key = types.NamespacedName{Namespace: cluster.GetNamespace(), Name: loggingSharedConfigRoleBindingPre44x} + gotRoleBindingPre44x := &rbac.RoleBinding{} + if err := client.Get(context.TODO(), key, gotRoleBindingPre44x); !errors.IsNotFound(err) { + t.Errorf("Expected deleted rolebinding for shared config map pre 4.4.x, got: %v", err) + } + }) + } +} + func checkKibanaProxyEnvVar(t *testing.T, podSpec v1.PodSpec, name string, value string) { env := podSpec.Containers[1].Env found := false From 850d4bf234eb1e8dcf45409640592be0502b57cd Mon Sep 17 00:00:00 2001 From: Jeff Cantrill Date: Tue, 25 Feb 2020 15:07:15 -0500 Subject: [PATCH 12/21] LOG-599: Update fluent config to use static index --- .../forwarding/fluentd/fluent_conf_test.go | 44 +++++++++++-------- .../forwarding/fluentd/output_conf_es_test.go | 8 ++-- .../forwarding/fluentd/templates.go | 12 +++-- test/helpers/elasticsearch.go | 6 +-- 4 files changed, 41 insertions(+), 29 deletions(-) diff --git a/pkg/generators/forwarding/fluentd/fluent_conf_test.go b/pkg/generators/forwarding/fluentd/fluent_conf_test.go index 1a8ec190bc..6a2d870bdf 100644 --- a/pkg/generators/forwarding/fluentd/fluent_conf_test.go +++ b/pkg/generators/forwarding/fluentd/fluent_conf_test.go @@ -293,6 +293,7 @@ var _ = Describe("Generating fluentd config", func() { @type viaq_data_model + elasticsearch_index_prefix_field 'viaq_index_name' default_keep_fields CEE,time,@timestamp,aushape,ci_job,collectd,docker,fedora-ci,file,foreman,geoip,hostname,ipaddr4,ipaddr6,kubernetes,level,message,namespace_name,namespace_uuid,offset,openstack,ovirt,pid,pipeline_metadata,rsyslog,service,systemd,tags,testcase,tlog,viaq_msg_id extra_keep_fields "#{ENV['CDM_EXTRA_KEEP_FIELDS'] || ''}" keep_empty_fields "#{ENV['CDM_KEEP_EMPTY_FIELDS'] || 'message'}" @@ -336,17 +337,20 @@ var _ = Describe("Generating fluentd config", func() { enabled "#{ENV['ENABLE_ES_INDEX_NAME'] || 'true'}" tag "journal.system** system.var.log** **_default_** **_kube-*_** **_openshift-*_** **_openshift_**" - name_type operations_full - - + name_type static + static_index_name infra-write + + enabled "#{ENV['ENABLE_ES_INDEX_NAME'] || 'true'}" tag "linux-audit.log** k8s-audit.log** openshift-audit.log**" - name_type audit_full - - + name_type static + static_index_name audit-infra-write + + enabled "#{ENV['ENABLE_ES_INDEX_NAME'] || 'true'}" tag "**" - name_type project_full + name_type static + static_index_name app-write @@ -710,6 +714,7 @@ var _ = Describe("Generating fluentd config", func() { @type viaq_data_model + elasticsearch_index_prefix_field 'viaq_index_name' default_keep_fields CEE,time,@timestamp,aushape,ci_job,collectd,docker,fedora-ci,file,foreman,geoip,hostname,ipaddr4,ipaddr6,kubernetes,level,message,namespace_name,namespace_uuid,offset,openstack,ovirt,pid,pipeline_metadata,rsyslog,service,systemd,tags,testcase,tlog,viaq_msg_id extra_keep_fields "#{ENV['CDM_EXTRA_KEEP_FIELDS'] || ''}" keep_empty_fields "#{ENV['CDM_KEEP_EMPTY_FIELDS'] || 'message'}" @@ -753,17 +758,20 @@ var _ = Describe("Generating fluentd config", func() { enabled "#{ENV['ENABLE_ES_INDEX_NAME'] || 'true'}" tag "journal.system** system.var.log** **_default_** **_kube-*_** **_openshift-*_** **_openshift_**" - name_type operations_full + name_type static + static_index_name infra-write enabled "#{ENV['ENABLE_ES_INDEX_NAME'] || 'true'}" tag "linux-audit.log** k8s-audit.log** openshift-audit.log**" - name_type audit_full + name_type static + static_index_name audit-infra-write enabled "#{ENV['ENABLE_ES_INDEX_NAME'] || 'true'}" tag "**" - name_type project_full + name_type static + static_index_name app-write @@ -898,7 +906,7 @@ var _ = Describe("Generating fluentd config", func() { client_key '/var/run/ocp-collector/secrets/my-infra-secret/tls.key' client_cert '/var/run/ocp-collector/secrets/my-infra-secret/tls.crt' ca_file '/var/run/ocp-collector/secrets/my-infra-secret/ca-bundle.crt' - type_name com.redhat.viaq.common + type_name _doc write_operation create reload_connections "#{ENV['ES_RELOAD_CONNECTIONS'] || 'true'}" # https://github.com/uken/fluent-plugin-elasticsearch#reload-after @@ -940,7 +948,7 @@ var _ = Describe("Generating fluentd config", func() { client_key '/var/run/ocp-collector/secrets/my-infra-secret/tls.key' client_cert '/var/run/ocp-collector/secrets/my-infra-secret/tls.crt' ca_file '/var/run/ocp-collector/secrets/my-infra-secret/ca-bundle.crt' - type_name com.redhat.viaq.common + type_name _doc retry_tag retry_infra_es write_operation create reload_connections "#{ENV['ES_RELOAD_CONNECTIONS'] || 'true'}" @@ -985,7 +993,7 @@ var _ = Describe("Generating fluentd config", func() { client_key '/var/run/ocp-collector/secrets/my-es-secret/tls.key' client_cert '/var/run/ocp-collector/secrets/my-es-secret/tls.crt' ca_file '/var/run/ocp-collector/secrets/my-es-secret/ca-bundle.crt' - type_name com.redhat.viaq.common + type_name _doc write_operation create reload_connections "#{ENV['ES_RELOAD_CONNECTIONS'] || 'true'}" # https://github.com/uken/fluent-plugin-elasticsearch#reload-after @@ -1027,7 +1035,7 @@ var _ = Describe("Generating fluentd config", func() { client_key '/var/run/ocp-collector/secrets/my-es-secret/tls.key' client_cert '/var/run/ocp-collector/secrets/my-es-secret/tls.crt' ca_file '/var/run/ocp-collector/secrets/my-es-secret/ca-bundle.crt' - type_name com.redhat.viaq.common + type_name _doc retry_tag retry_apps_es_1 write_operation create reload_connections "#{ENV['ES_RELOAD_CONNECTIONS'] || 'true'}" @@ -1072,7 +1080,7 @@ var _ = Describe("Generating fluentd config", func() { client_key '/var/run/ocp-collector/secrets/my-other-secret/tls.key' client_cert '/var/run/ocp-collector/secrets/my-other-secret/tls.crt' ca_file '/var/run/ocp-collector/secrets/my-other-secret/ca-bundle.crt' - type_name com.redhat.viaq.common + type_name _doc write_operation create reload_connections "#{ENV['ES_RELOAD_CONNECTIONS'] || 'true'}" # https://github.com/uken/fluent-plugin-elasticsearch#reload-after @@ -1114,7 +1122,7 @@ var _ = Describe("Generating fluentd config", func() { client_key '/var/run/ocp-collector/secrets/my-other-secret/tls.key' client_cert '/var/run/ocp-collector/secrets/my-other-secret/tls.crt' ca_file '/var/run/ocp-collector/secrets/my-other-secret/ca-bundle.crt' - type_name com.redhat.viaq.common + type_name _doc retry_tag retry_apps_es_2 write_operation create reload_connections "#{ENV['ES_RELOAD_CONNECTIONS'] || 'true'}" @@ -1159,7 +1167,7 @@ var _ = Describe("Generating fluentd config", func() { client_key '/var/run/ocp-collector/secrets/my-audit-secret/tls.key' client_cert '/var/run/ocp-collector/secrets/my-audit-secret/tls.crt' ca_file '/var/run/ocp-collector/secrets/my-audit-secret/ca-bundle.crt' - type_name com.redhat.viaq.common + type_name _doc write_operation create reload_connections "#{ENV['ES_RELOAD_CONNECTIONS'] || 'true'}" # https://github.com/uken/fluent-plugin-elasticsearch#reload-after @@ -1201,7 +1209,7 @@ var _ = Describe("Generating fluentd config", func() { client_key '/var/run/ocp-collector/secrets/my-audit-secret/tls.key' client_cert '/var/run/ocp-collector/secrets/my-audit-secret/tls.crt' ca_file '/var/run/ocp-collector/secrets/my-audit-secret/ca-bundle.crt' - type_name com.redhat.viaq.common + type_name _doc retry_tag retry_audit_es write_operation create reload_connections "#{ENV['ES_RELOAD_CONNECTIONS'] || 'true'}" diff --git a/pkg/generators/forwarding/fluentd/output_conf_es_test.go b/pkg/generators/forwarding/fluentd/output_conf_es_test.go index ddd227fa97..441e4368c8 100644 --- a/pkg/generators/forwarding/fluentd/output_conf_es_test.go +++ b/pkg/generators/forwarding/fluentd/output_conf_es_test.go @@ -82,7 +82,7 @@ var _ = Describe("Generating fluentd config blocks", func() { client_key '/var/run/ocp-collector/secrets/my-es-secret/tls.key' client_cert '/var/run/ocp-collector/secrets/my-es-secret/tls.crt' ca_file '/var/run/ocp-collector/secrets/my-es-secret/ca-bundle.crt' - type_name com.redhat.viaq.common + type_name _doc write_operation create reload_connections "#{ENV['ES_RELOAD_CONNECTIONS'] || 'true'}" # https://github.com/uken/fluent-plugin-elasticsearch#reload-after @@ -124,7 +124,7 @@ var _ = Describe("Generating fluentd config blocks", func() { client_key '/var/run/ocp-collector/secrets/my-es-secret/tls.key' client_cert '/var/run/ocp-collector/secrets/my-es-secret/tls.crt' ca_file '/var/run/ocp-collector/secrets/my-es-secret/ca-bundle.crt' - type_name com.redhat.viaq.common + type_name _doc retry_tag retry_oncluster_elasticsearch write_operation create reload_connections "#{ENV['ES_RELOAD_CONNECTIONS'] || 'true'}" @@ -182,7 +182,7 @@ var _ = Describe("Generating fluentd config blocks", func() { user fluentd password changeme - type_name com.redhat.viaq.common + type_name _doc write_operation create reload_connections "#{ENV['ES_RELOAD_CONNECTIONS'] || 'true'}" # https://github.com/uken/fluent-plugin-elasticsearch#reload-after @@ -220,7 +220,7 @@ var _ = Describe("Generating fluentd config blocks", func() { user fluentd password changeme - type_name com.redhat.viaq.common + type_name _doc retry_tag retry_other_elasticsearch write_operation create reload_connections "#{ENV['ES_RELOAD_CONNECTIONS'] || 'true'}" diff --git a/pkg/generators/forwarding/fluentd/templates.go b/pkg/generators/forwarding/fluentd/templates.go index c3b3dd87ac..38f44ee71e 100644 --- a/pkg/generators/forwarding/fluentd/templates.go +++ b/pkg/generators/forwarding/fluentd/templates.go @@ -193,6 +193,7 @@ const fluentConfTemplate = `{{- define "fluentConf" }} @type viaq_data_model + elasticsearch_index_prefix_field 'viaq_index_name' default_keep_fields CEE,time,@timestamp,aushape,ci_job,collectd,docker,fedora-ci,file,foreman,geoip,hostname,ipaddr4,ipaddr6,kubernetes,level,message,namespace_name,namespace_uuid,offset,openstack,ovirt,pid,pipeline_metadata,rsyslog,service,systemd,tags,testcase,tlog,viaq_msg_id extra_keep_fields "#{ENV['CDM_EXTRA_KEEP_FIELDS'] || ''}" keep_empty_fields "#{ENV['CDM_KEEP_EMPTY_FIELDS'] || 'message'}" @@ -236,17 +237,20 @@ const fluentConfTemplate = `{{- define "fluentConf" }} enabled "#{ENV['ENABLE_ES_INDEX_NAME'] || 'true'}" tag "journal.system** system.var.log** **_default_** **_kube-*_** **_openshift-*_** **_openshift_**" - name_type operations_full + name_type static + static_index_name infra-write enabled "#{ENV['ENABLE_ES_INDEX_NAME'] || 'true'}" tag "linux-audit.log** k8s-audit.log** openshift-audit.log**" - name_type audit_full + name_type static + static_index_name audit-infra-write enabled "#{ENV['ENABLE_ES_INDEX_NAME'] || 'true'}" tag "**" - name_type project_full + name_type static + static_index_name app-write @@ -542,7 +546,7 @@ const storeElasticsearchTemplate = `{{- define "storeElasticsearch" }} client_cert '{{ .SecretPath "tls.crt"}}' ca_file '{{ .SecretPath "ca-bundle.crt"}}' {{ end -}} - type_name com.redhat.viaq.common + type_name _doc {{if .Hints.Has "include_retry_tag" -}} retry_tag {{.RetryTag}} {{end -}} diff --git a/test/helpers/elasticsearch.go b/test/helpers/elasticsearch.go index 226b958948..1b3406a7e2 100644 --- a/test/helpers/elasticsearch.go +++ b/test/helpers/elasticsearch.go @@ -21,9 +21,9 @@ import ( ) const ( - InfraIndexPrefix = ".operations." - ProjectIndexPrefix = "project." - AuditIndexPrefix = ".audit." + InfraIndexPrefix = "infra-" + ProjectIndexPrefix = "app-" + AuditIndexPrefix = "audit-infra-" elasticsearchesLoggingURI = "apis/logging.openshift.io/v1/namespaces/openshift-logging/elasticsearches" ) From fa70a533779d6fb67ff914b8662c617937417d91 Mon Sep 17 00:00:00 2001 From: Periklis Tsirakidis Date: Wed, 26 Feb 2020 08:31:52 +0100 Subject: [PATCH 13/21] Fix make targets for local development environment --- hack/common | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hack/common b/hack/common index d86e0cb58a..1564041cdc 100644 --- a/hack/common +++ b/hack/common @@ -11,7 +11,7 @@ source "$repo_dir/hack/lib/init.sh" source "$repo_dir/hack/testing/utils" source "$repo_dir/hack/testing/assertions" -VERSION=${VERSION:-$(basename $(find ${repo_dir}/../manifests -type d | sort -r | head -n 1))} +VERSION=${VERSION:-$(basename $(find ${repo_dir}/manifests -type d | sort -r | head -n 1))} ELASTICSEARCH_OP_REPO=${ELASTICSEARCH_OP_REPO:-${repo_dir}/../elasticsearch-operator} ADMIN_USER=${ADMIN_USER:-kubeadmin} From acf58a46b17d1b5aa2a5966a0093c673df6bef55 Mon Sep 17 00:00:00 2001 From: Periklis Tsirakidis Date: Wed, 26 Feb 2020 08:34:14 +0100 Subject: [PATCH 14/21] Drop xtrace verbosity --- hack/build-image.sh | 3 --- hack/common | 4 ---- hack/deploy.sh | 2 +- hack/testing/test-010-deploy-via-olm-minimal.sh | 3 --- hack/testing/test-020-olm-upgrade.sh | 3 --- hack/testing/test-367-logforwarding.sh | 3 --- hack/testing/test-999-fluentd-prometheus-metrics.sh | 6 ++---- 7 files changed, 3 insertions(+), 21 deletions(-) diff --git a/hack/build-image.sh b/hack/build-image.sh index 8785bb52e2..906ca71c4c 100755 --- a/hack/build-image.sh +++ b/hack/build-image.sh @@ -1,8 +1,5 @@ #!/bin/bash -if [ "${DEBUG:-}" = "true" ]; then - set -x -fi set -euo pipefail source "$(dirname $0)/common" diff --git a/hack/common b/hack/common index d86e0cb58a..66d1d14443 100644 --- a/hack/common +++ b/hack/common @@ -1,8 +1,4 @@ #!/bin/bash -if [ -n "${DEBUG:-}" ]; then - set -x -fi - alias oc=${OC:-oc} repo_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/.." diff --git a/hack/deploy.sh b/hack/deploy.sh index aa67b7375c..78608e0157 100755 --- a/hack/deploy.sh +++ b/hack/deploy.sh @@ -1,6 +1,6 @@ #!/bin/bash -set -euxo pipefail +set -euo pipefail source "$(dirname $0)/common" diff --git a/hack/testing/test-010-deploy-via-olm-minimal.sh b/hack/testing/test-010-deploy-via-olm-minimal.sh index 6d8b9a0c39..8a2b2255ea 100755 --- a/hack/testing/test-010-deploy-via-olm-minimal.sh +++ b/hack/testing/test-010-deploy-via-olm-minimal.sh @@ -4,9 +4,6 @@ # that begets the operands that make up logging. set -e -if [ "${DEBUG:-}" = "true" ]; then - set -x -fi source "$(dirname "${BASH_SOURCE[0]}")/../lib/init.sh" source "$(dirname "${BASH_SOURCE[0]}")/assertions" diff --git a/hack/testing/test-020-olm-upgrade.sh b/hack/testing/test-020-olm-upgrade.sh index e5f521d47b..cb417ff0d1 100755 --- a/hack/testing/test-020-olm-upgrade.sh +++ b/hack/testing/test-020-olm-upgrade.sh @@ -4,9 +4,6 @@ # that begets the operands that make up logging. set -e -if [ "${DEBUG:-}" = "true" ]; then - set -x -fi repo_dir="$( cd "$(dirname "$0")/../.." ; pwd -P )" source "$repo_dir/hack/testing/utils" diff --git a/hack/testing/test-367-logforwarding.sh b/hack/testing/test-367-logforwarding.sh index 84f021da88..aef65f2d5e 100755 --- a/hack/testing/test-367-logforwarding.sh +++ b/hack/testing/test-367-logforwarding.sh @@ -2,9 +2,6 @@ # Jira LOG-367 - Log forwarding set -e -if [ -n "${DEBUG:-}" ]; then - set -x -fi source "$(dirname $0)/../common" diff --git a/hack/testing/test-999-fluentd-prometheus-metrics.sh b/hack/testing/test-999-fluentd-prometheus-metrics.sh index 53c5b420bc..66a23ab607 100755 --- a/hack/testing/test-999-fluentd-prometheus-metrics.sh +++ b/hack/testing/test-999-fluentd-prometheus-metrics.sh @@ -1,8 +1,6 @@ - #!/bin/bash -x +#!/bin/bash + set -e -if [ -n "${DEBUG:-}" ]; then - set -x -fi repo_dir=${repo_dir:-$(dirname $0)/../..} source "$repo_dir/hack/testing/utils" From b2d9c25803592f53376eaf92510cbb561df0b7e1 Mon Sep 17 00:00:00 2001 From: Arik Hadas Date: Wed, 19 Feb 2020 18:57:45 +0200 Subject: [PATCH 15/21] add e2e test for legacy syslog over udp Signed-off-by: Arik Hadas --- .../sysloglegacy/forward_to_syslog_test.go | 50 +++++++++++++++---- test/helpers/syslog.go | 18 +++++++ 2 files changed, 59 insertions(+), 9 deletions(-) diff --git a/test/e2e/logforwarding/sysloglegacy/forward_to_syslog_test.go b/test/e2e/logforwarding/sysloglegacy/forward_to_syslog_test.go index 8c0ca47bef..5d84cc01a1 100644 --- a/test/e2e/logforwarding/sysloglegacy/forward_to_syslog_test.go +++ b/test/e2e/logforwarding/sysloglegacy/forward_to_syslog_test.go @@ -37,7 +37,6 @@ var _ = Describe("LogForwarding", func() { if syslogDeployment, err = e2e.DeploySyslogReceiver(corev1.ProtocolTCP); err != nil { Fail(fmt.Sprintf("Unable to deploy syslog receiver: %v", err)) } - fmt.Sprintf("%s.%s.svc:24224", syslogDeployment.ObjectMeta.Name, syslogDeployment.Namespace) const conf = ` @type syslog_buffered @@ -50,14 +49,47 @@ var _ = Describe("LogForwarding", func() { ` //create configmap syslog/"syslog.conf" - fluentdConfigMap := k8shandler.NewConfigMap( - "syslog", - syslogDeployment.Namespace, - map[string]string{ - "syslog.conf": conf, - }, - ) - if _, err = e2e.KubeClient.Core().ConfigMaps(syslogDeployment.Namespace).Create(fluentdConfigMap); err != nil { + if err = e2e.CreateLegacySyslogConfigMap(syslogDeployment.Namespace, conf); err != nil { + Fail(fmt.Sprintf("Unable to create legacy syslog.conf configmap: %v", err)) + } + + components := []helpers.LogComponentType{helpers.ComponentTypeCollector, helpers.ComponentTypeStore} + cr := helpers.NewClusterLogging(components...) + cr.ObjectMeta.Annotations[k8shandler.ForwardingAnnotation] = "disabled" + if err := e2e.CreateClusterLogging(cr); err != nil { + Fail(fmt.Sprintf("Unable to create an instance of cluster logging: %v", err)) + } + for _, component := range components { + if err := e2e.WaitFor(component); err != nil { + Fail(fmt.Sprintf("Failed waiting for component %s to be ready: %v", component, err)) + } + } + }) + + It("should send logs to the forward.Output logstore", func() { + Expect(e2e.LogStore.HasInfraStructureLogs(helpers.DefaultWaitForLogsTimeout)).To(BeTrue(), "Expected to find stored infrastructure logs") + }) + }) + + Context("and udp receiver", func() { + + BeforeEach(func() { + if syslogDeployment, err = e2e.DeploySyslogReceiver(corev1.ProtocolUDP); err != nil { + Fail(fmt.Sprintf("Unable to deploy syslog receiver: %v", err)) + } + const conf = ` + + @type syslog + @id syslogid + remote_syslog syslog-receiver.openshift-logging.svc + port 24224 + hostname ${hostname} + facility user + severity debug + + ` + //create configmap syslog/"syslog.conf" + if err = e2e.CreateLegacySyslogConfigMap(syslogDeployment.Namespace, conf); err != nil { Fail(fmt.Sprintf("Unable to create legacy syslog.conf configmap: %v", err)) } diff --git a/test/helpers/syslog.go b/test/helpers/syslog.go index 37f804aadd..7d7474b7d4 100644 --- a/test/helpers/syslog.go +++ b/test/helpers/syslog.go @@ -103,6 +103,24 @@ func (tc *E2ETestFramework) createSyslogServiceAccount() (serviceAccount *corev1 return serviceAccount, nil } +func (tc *E2ETestFramework) CreateLegacySyslogConfigMap(namespace, conf string) (err error) { + fluentdConfigMap := k8shandler.NewConfigMap( + "syslog", + namespace, + map[string]string{ + "syslog.conf": conf, + }, + ) + + if fluentdConfigMap, err = tc.KubeClient.Core().ConfigMaps(namespace).Create(fluentdConfigMap); err != nil { + return err + } + tc.AddCleanup(func() error { + return tc.KubeClient.Core().ConfigMaps(namespace).Delete(fluentdConfigMap.Name, nil) + }) + return nil +} + func (tc *E2ETestFramework) createSyslogRbac(name string) (err error) { saRole := k8shandler.NewRole( name, From 3f4fab3c0208e913402c931a3a5a2b434fd873d8 Mon Sep 17 00:00:00 2001 From: Igor Karpukhin Date: Wed, 26 Feb 2020 09:47:28 +0100 Subject: [PATCH 16/21] Added E2E-local target --- Makefile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Makefile b/Makefile index d0b9a47747..51439d3983 100644 --- a/Makefile +++ b/Makefile @@ -123,6 +123,10 @@ test-unit: fmt test-e2e: hack/test-e2e.sh +test-e2e-local: deploy-image + IMAGE_CLUSTER_LOGGING_OPERATOR=image-registry.openshift-image-registry.svc:5000/openshift/origin-cluster-logging-operator:latest \ + hack/test-e2e.sh + test-sec: go get -u github.com/securego/gosec/cmd/gosec gosec -severity medium --confidence medium -quiet ./... From 7a41edf38dc4430f8a33e74be68775bae5f85d44 Mon Sep 17 00:00:00 2001 From: Periklis Tsirakidis Date: Wed, 26 Feb 2020 14:08:35 +0100 Subject: [PATCH 17/21] Update elasticsearch-operator to point to master branch --- Gopkg.lock | 8 +- Gopkg.toml | 2 +- .../elasticsearch-operator/Dockerfile | 3 +- .../openshift/elasticsearch-operator/Makefile | 42 ++- .../openshift/elasticsearch-operator/OWNERS | 8 +- .../elasticsearch-operator/README.md | 2 +- .../hack/build-image.sh | 5 +- .../hack/cert_generation.sh | 268 +++++++++++++ .../elasticsearch-operator/hack/common | 2 +- .../elasticsearch-operator/hack/cr.yaml | 20 +- .../hack/deploy-image.sh | 5 +- .../hack/deploy-setup.sh | 5 +- .../elasticsearch-operator/hack/deploy.sh | 5 +- .../hack/test-e2e-wip.sh | 90 ----- .../elasticsearch-operator/hack/test-e2e.sh | 80 ++-- .../hack/testing/test-001-operator-sdk-e2e.sh | 84 +++++ .../elasticsearch-operator/hack/testing/utils | 33 +- .../elasticsearch-operator/hack/undeploy.sh | 5 +- .../manifests/05-deployment.yaml | 4 +- ...perator.v4.5.0.clusterserviceversion.yaml} | 20 +- .../{4.4 => 4.5}/elasticsearches.crd.yaml | 0 .../manifests/{4.4 => 4.5}/image-references | 4 +- .../elasticsearch-operator.package.yaml | 6 +- .../apis/logging/v1/elasticsearch_types.go | 15 + .../pkg/indexmanagement/converters.go | 62 ++++ .../pkg/indexmanagement/reconcile.go | 351 ++++++++++++++++++ .../pkg/indexmanagement/scripts.go | 84 +++++ .../pkg/indexmanagement/validations.go | 2 +- .../pkg/k8shandler/cluster.go | 16 + .../pkg/k8shandler/common.go | 8 +- .../pkg/k8shandler/configmaps.go | 2 +- .../pkg/k8shandler/defaults.go | 11 +- .../pkg/k8shandler/elasticsearch.go | 93 ++++- .../pkg/k8shandler/index_management.go | 33 +- .../pkg/k8shandler/util.go | 28 +- .../pkg/logger/logger.go | 16 + .../pkg/types/elasticsearch/types.go | 4 +- .../pkg/types/k8s/configmap.go | 22 ++ .../pkg/utils/comparators/envvars.go | 88 +++++ .../pkg/utils/comparators/maps.go | 10 + .../pkg/utils/comparators/resources.go | 26 ++ .../pkg/utils/comparators/tolerations.go | 50 +++ .../elasticsearch-operator/pkg/utils/utils.go | 25 ++ .../elasticsearch-operator/test/files/ca.crt | 30 -- .../test/files/elasticsearch.crt | 40 -- .../test/files/elasticsearch.key | 52 --- .../test/files/logging-es.crt | 35 -- .../test/files/logging-es.key | 52 --- .../test/files/system.admin.crt | 31 -- .../test/files/system.admin.key | 52 --- .../test/helpers/runtime/client.go | 64 ++++ 51 files changed, 1481 insertions(+), 522 deletions(-) create mode 100755 vendor/github.com/openshift/elasticsearch-operator/hack/cert_generation.sh delete mode 100644 vendor/github.com/openshift/elasticsearch-operator/hack/test-e2e-wip.sh create mode 100755 vendor/github.com/openshift/elasticsearch-operator/hack/testing/test-001-operator-sdk-e2e.sh rename vendor/github.com/openshift/elasticsearch-operator/manifests/{4.4/elasticsearch-operator.v4.4.0.clusterserviceversion.yaml => 4.5/elasticsearch-operator.v4.5.0.clusterserviceversion.yaml} (96%) rename vendor/github.com/openshift/elasticsearch-operator/manifests/{4.4 => 4.5}/elasticsearches.crd.yaml (100%) rename vendor/github.com/openshift/elasticsearch-operator/manifests/{4.4 => 4.5}/image-references (78%) create mode 100644 vendor/github.com/openshift/elasticsearch-operator/pkg/indexmanagement/converters.go create mode 100644 vendor/github.com/openshift/elasticsearch-operator/pkg/indexmanagement/reconcile.go create mode 100644 vendor/github.com/openshift/elasticsearch-operator/pkg/indexmanagement/scripts.go create mode 100644 vendor/github.com/openshift/elasticsearch-operator/pkg/types/k8s/configmap.go create mode 100644 vendor/github.com/openshift/elasticsearch-operator/pkg/utils/comparators/envvars.go create mode 100644 vendor/github.com/openshift/elasticsearch-operator/pkg/utils/comparators/maps.go create mode 100644 vendor/github.com/openshift/elasticsearch-operator/pkg/utils/comparators/resources.go create mode 100644 vendor/github.com/openshift/elasticsearch-operator/pkg/utils/comparators/tolerations.go delete mode 100644 vendor/github.com/openshift/elasticsearch-operator/test/files/ca.crt delete mode 100644 vendor/github.com/openshift/elasticsearch-operator/test/files/elasticsearch.crt delete mode 100644 vendor/github.com/openshift/elasticsearch-operator/test/files/elasticsearch.key delete mode 100644 vendor/github.com/openshift/elasticsearch-operator/test/files/logging-es.crt delete mode 100644 vendor/github.com/openshift/elasticsearch-operator/test/files/logging-es.key delete mode 100644 vendor/github.com/openshift/elasticsearch-operator/test/files/system.admin.crt delete mode 100644 vendor/github.com/openshift/elasticsearch-operator/test/files/system.admin.key create mode 100644 vendor/github.com/openshift/elasticsearch-operator/test/helpers/runtime/client.go diff --git a/Gopkg.lock b/Gopkg.lock index f4d3ae867d..0f6e7ed5f3 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -520,8 +520,8 @@ revision = "d4a64ec2cbd86f11ea74dfdcf6520d5833d0c6cd" [[projects]] - branch = "feature-es6x" - digest = "1:bde78f3ff1aa8a9fd828b61aee76f53c36a2f4b990fdf9a35b7430fe66daed14" + branch = "master" + digest = "1:57bd1e85edbf2ed57caa6cc779e011da4a04f71e318e605abcd15c7361a7590e" name = "github.com/openshift/elasticsearch-operator" packages = [ "pkg/apis", @@ -530,11 +530,13 @@ "pkg/k8shandler", "pkg/logger", "pkg/types/elasticsearch", + "pkg/types/k8s", "pkg/utils", + "pkg/utils/comparators", "test/utils", ] pruneopts = "T" - revision = "66a02fbeffe5ace05b977021bf28e7f878eb32e6" + revision = "e09a9f723a7cc4b83f43db4d6727837f3b436d4c" [[projects]] digest = "1:0087f38751ec1995bafa67afd0ded9519a46297ecf571165c695fb9ba943688d" diff --git a/Gopkg.toml b/Gopkg.toml index 1191c62a3e..59fb6762fc 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -64,7 +64,7 @@ required = [ [[constraint]] name = "github.com/openshift/elasticsearch-operator" - branch = "feature-es6x" + branch = "master" [[override]] name = "gopkg.in/fsnotify.v1" diff --git a/vendor/github.com/openshift/elasticsearch-operator/Dockerfile b/vendor/github.com/openshift/elasticsearch-operator/Dockerfile index e8fc645910..90c27bfbd1 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/Dockerfile +++ b/vendor/github.com/openshift/elasticsearch-operator/Dockerfile @@ -4,14 +4,13 @@ COPY . . RUN make FROM registry.svc.ci.openshift.org/openshift/origin-v4.0:base -ARG CSV=4.4 ENV ALERTS_FILE_PATH="/etc/elasticsearch-operator/files/prometheus_alerts.yml" ENV RULES_FILE_PATH="/etc/elasticsearch-operator/files/prometheus_rules.yml" COPY --from=builder /go/src/github.com/openshift/elasticsearch-operator/_output/bin/elasticsearch-operator /usr/bin/ COPY --from=builder /go/src/github.com/openshift/elasticsearch-operator/files/ /etc/elasticsearch-operator/files/ -COPY --from=builder /go/src/github.com/openshift/elasticsearch-operator/manifests/$CSV /manifests/$CSV +COPY --from=builder /go/src/github.com/openshift/elasticsearch-operator/manifests/4.* /manifests/ COPY --from=builder /go/src/github.com/openshift/elasticsearch-operator/manifests/elasticsearch-operator.package.yaml /manifests/ WORKDIR /usr/bin diff --git a/vendor/github.com/openshift/elasticsearch-operator/Makefile b/vendor/github.com/openshift/elasticsearch-operator/Makefile index f8b7a6651d..f1cadf61de 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/Makefile +++ b/vendor/github.com/openshift/elasticsearch-operator/Makefile @@ -18,6 +18,10 @@ KUBECONFIG?=$(HOME)/.kube/config MAIN_PKG=cmd/manager/main.go RUN_LOG?=elasticsearch-operator.log RUN_PID?=elasticsearch-operator.pid +LOGGING_IMAGE_STREAM?=feature-es6x +OPERATOR_NAMESPACE=openshift-operators-redhat +DEPLOYMENT_NAMESPACE=openshift-logging +REPLICAS?=0 # go source files, ignore vendor directory SRC = $(shell find . -type f -name '*.go' -not -path "./vendor/*") @@ -69,8 +73,8 @@ image: imagebuilder then hack/build-image.sh $(IMAGE_TAG) $(IMAGE_BUILDER) $(IMAGE_BUILDER_OPTS) ; \ fi -test-e2e: - hack/test-e2e.sh +test-e2e: gen-example-certs + LOGGING_IMAGE_STREAM=$(LOGGING_IMAGE_STREAM) REMOTE_CLUSTER=true hack/test-e2e.sh test-unit: @go test -v ./pkg/... ./cmd/... @@ -99,25 +103,51 @@ deploy-image: image hack/deploy-image.sh .PHONY: deploy-image -deploy-example: deploy - @oc create -n openshift-logging -f hack/cr.yaml +deploy-example: deploy deploy-example-secret + @oc create -n $(DEPLOYMENT_NAMESPACE) -f hack/cr.yaml .PHONY: deploy-example +deploy-example-secret: gen-example-certs + @oc -n $(DEPLOYMENT_NAMESPACE) delete secret elasticsearch ||: && \ + oc -n $(DEPLOYMENT_NAMESPACE) create secret generic elasticsearch \ + --from-file=admin-key=/tmp/example-secrets/system.admin.key \ + --from-file=admin-cert=/tmp/example-secrets/system.admin.crt \ + --from-file=admin-ca=/tmp/example-secrets/ca.crt \ + --from-file=/tmp/example-secrets/elasticsearch.crt \ + --from-file=/tmp/example-secrets/logging-es.key \ + --from-file=/tmp/example-secrets/logging-es.crt \ + --from-file=/tmp/example-secrets/elasticsearch.key +.PHONY: deploy-example-secret + +gen-example-certs: + @rm -rf /tmp/example-secrets ||: \ + mkdir /tmp/example-secrets && \ + hack/cert_generation.sh /tmp/example-secrets $(DEPLOYMENT_NAMESPACE) elasticsearch +.PHONY: gen-example-certs + run: deploy deploy-example @ALERTS_FILE_PATH=files/prometheus_alerts.yml \ RULES_FILE_PATH=files/prometheus_rules.yml \ - OPERATOR_NAME=elasticsearch-operator WATCH_NAMESPACE=openshift-logging \ + OPERATOR_NAME=elasticsearch-operator WATCH_NAMESPACE=$(DEPLOYMENT_NAMESPACE) \ KUBERNETES_CONFIG=/etc/origin/master/admin.kubeconfig \ go run ${MAIN_PKG} > $(RUN_LOG) 2>&1 & echo $$! > $(RUN_PID) run-local: @ALERTS_FILE_PATH=files/prometheus_alerts.yml \ RULES_FILE_PATH=files/prometheus_rules.yml \ - OPERATOR_NAME=elasticsearch-operator WATCH_NAMESPACE=openshift-logging \ + OPERATOR_NAME=elasticsearch-operator WATCH_NAMESPACE=$(DEPLOYMENT_NAMESPACE) \ KUBERNETES_CONFIG=$(KUBECONFIG) \ go run ${MAIN_PKG} LOG_LEVEL=debug .PHONY: run-local +scale-cvo: + @oc -n openshift-cluster-version scale deployment/cluster-version-operator --replicas=$(REPLICAS) +.PHONEY: scale-cvo + +scale-olm: + @oc -n openshift-operator-lifecycle-manager scale deployment/olm-operator --replicas=$(REPLICAS) +.PHONEY: scale-olm + undeploy: hack/undeploy.sh .PHONY: undeploy diff --git a/vendor/github.com/openshift/elasticsearch-operator/OWNERS b/vendor/github.com/openshift/elasticsearch-operator/OWNERS index 884c5affd7..df163efe78 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/OWNERS +++ b/vendor/github.com/openshift/elasticsearch-operator/OWNERS @@ -2,16 +2,16 @@ approvers: - jcantrill - ewolinetz - - richm - lukas-vlcek - - nhosoi - alanconway - igor-karpukhin + - vimalk78 + - syedriko reviewers: - ewolinetz - jcantrill - lukas-vlcek - - nhosoi - - richm - alanconway - igor-karpukhin + - vimalk78 + - syedriko diff --git a/vendor/github.com/openshift/elasticsearch-operator/README.md b/vendor/github.com/openshift/elasticsearch-operator/README.md index c485c46c41..99c5fe41cc 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/README.md +++ b/vendor/github.com/openshift/elasticsearch-operator/README.md @@ -119,7 +119,7 @@ file `$(RUN_PID)` (default `elasticsearch-operator.pid`) e.g. `kill $(cat elasti ### Image customization -The operator is designed to work with `quay.io/openshift/origin-logging-elasticsearch5` image. To use +The operator is designed to work with `quay.io/openshift/origin-logging-elasticsearch6` image. To use a different image, edit `manifests/image-references` before deployment, or edit the elasticsearch cr after deployment e.g. `oc edit elasticsearch elasticsearch`. diff --git a/vendor/github.com/openshift/elasticsearch-operator/hack/build-image.sh b/vendor/github.com/openshift/elasticsearch-operator/hack/build-image.sh index f2040686d5..d8af2f180e 100755 --- a/vendor/github.com/openshift/elasticsearch-operator/hack/build-image.sh +++ b/vendor/github.com/openshift/elasticsearch-operator/hack/build-image.sh @@ -1,6 +1,9 @@ #!/bin/bash -set -euxo pipefail +if [ "${DEBUG:-}" = "true" ]; then + set -x +fi +set -euo pipefail source "$(dirname $0)/common" diff --git a/vendor/github.com/openshift/elasticsearch-operator/hack/cert_generation.sh b/vendor/github.com/openshift/elasticsearch-operator/hack/cert_generation.sh new file mode 100755 index 0000000000..71db1d2ead --- /dev/null +++ b/vendor/github.com/openshift/elasticsearch-operator/hack/cert_generation.sh @@ -0,0 +1,268 @@ +#!/bin/bash + +if [ "${DEBUG:-}" = "true" ]; then + set -x +fi +set -e + +WORKING_DIR=$1 +NAMESPACE=$2 +CA_PATH=${CA_PATH:-$WORKING_DIR/ca.crt} +LOG_STORE=$3 +REGENERATE_NEEDED=0 + +function init_cert_files() { + + if [ ! -f ${WORKING_DIR}/ca.db ]; then + touch ${WORKING_DIR}/ca.db + fi + + if [ ! -f ${WORKING_DIR}/ca.serial.txt ]; then + echo 00 > ${WORKING_DIR}/ca.serial.txt + fi +} + +function generate_signing_ca() { + if [ ! -f ${WORKING_DIR}/ca.crt ] || [ ! -f ${WORKING_DIR}/ca.key ] || ! openssl x509 -checkend 0 -noout -in ${WORKING_DIR}/ca.crt; then + openssl req -x509 \ + -new \ + -newkey rsa:4096 \ + -keyout ${WORKING_DIR}/ca.key \ + -nodes \ + -days 1825 \ + -out ${WORKING_DIR}/ca.crt \ + -subj "/CN=openshift-cluster-logging-signer" + + REGENERATE_NEEDED=1 + fi +} + +function create_signing_conf() { + cat < "${WORKING_DIR}/signing.conf" +# Simple Signing CA + +# The [default] section contains global constants that can be referred to from +# the entire configuration file. It may also hold settings pertaining to more +# than one openssl command. + +[ default ] +dir = ${WORKING_DIR} # Top dir + +# The next part of the configuration file is used by the openssl req command. +# It defines the CA's key pair, its DN, and the desired extensions for the CA +# certificate. + +[ req ] +default_bits = 4096 # RSA key size +encrypt_key = yes # Protect private key +default_md = sha512 # MD to use +utf8 = yes # Input is UTF-8 +string_mask = utf8only # Emit UTF-8 strings +prompt = no # Don't prompt for DN +distinguished_name = ca_dn # DN section +req_extensions = ca_reqext # Desired extensions + +[ ca_dn ] +0.domainComponent = "io" +1.domainComponent = "openshift" +organizationName = "OpenShift Origin" +organizationalUnitName = "Logging Signing CA" +commonName = "Logging Signing CA" + +[ ca_reqext ] +keyUsage = critical,keyCertSign,cRLSign +basicConstraints = critical,CA:true,pathlen:0 +subjectKeyIdentifier = hash + +# The remainder of the configuration file is used by the openssl ca command. +# The CA section defines the locations of CA assets, as well as the policies +# applying to the CA. + +[ ca ] +default_ca = signing_ca # The default CA section + +[ signing_ca ] +certificate = \$dir/ca.crt # The CA cert +private_key = \$dir/ca.key # CA private key +new_certs_dir = \$dir/ # Certificate archive +serial = \$dir/ca.serial.txt # Serial number file +crlnumber = \$dir/ca.crl.srl # CRL number file +database = \$dir/ca.db # Index file +unique_subject = no # Require unique subject +default_days = 730 # How long to certify for +default_md = sha512 # MD to use +policy = any_pol # Default naming policy +email_in_dn = no # Add email to cert DN +preserve = no # Keep passed DN ordering +name_opt = ca_default # Subject DN display options +cert_opt = ca_default # Certificate display options +copy_extensions = copy # Copy extensions from CSR +x509_extensions = client_ext # Default cert extensions +default_crl_days = 7 # How long before next CRL +crl_extensions = crl_ext # CRL extensions + +# Naming policies control which parts of a DN end up in the certificate and +# under what circumstances certification should be denied. + +[ match_pol ] +domainComponent = match # Must match 'simple.org' +organizationName = match # Must match 'Simple Inc' +organizationalUnitName = optional # Included if present +commonName = supplied # Must be present + +[ any_pol ] +domainComponent = optional +countryName = optional +stateOrProvinceName = optional +localityName = optional +organizationName = optional +organizationalUnitName = optional +commonName = optional +emailAddress = optional + +# Certificate extensions define what types of certificates the CA is able to +# create. + +[ client_ext ] +keyUsage = critical,digitalSignature,keyEncipherment +basicConstraints = CA:false +extendedKeyUsage = clientAuth +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid + +[ server_ext ] +keyUsage = critical,digitalSignature,keyEncipherment +basicConstraints = CA:false +extendedKeyUsage = serverAuth,clientAuth +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid + +# CRL extensions exist solely to point to the CA certificate that has issued +# the CRL. + +[ crl_ext ] +authorityKeyIdentifier = keyid +EOF +} + +function sign_cert() { + local component=$1 + + openssl ca \ + -in ${WORKING_DIR}/${component}.csr \ + -notext \ + -out ${WORKING_DIR}/${component}.crt \ + -config ${WORKING_DIR}/signing.conf \ + -extensions v3_req \ + -batch \ + -extensions server_ext +} + +function generate_cert_config() { + local component=$1 + local extensions=${2:-} + + if [ "$extensions" != "" ]; then + cat < "${WORKING_DIR}/${component}.conf" +[ req ] +default_bits = 4096 +prompt = no +encrypt_key = yes +default_md = sha512 +distinguished_name = dn +req_extensions = req_ext +[ dn ] +CN = ${component} +OU = OpenShift +O = Logging +[ req_ext ] +subjectAltName = ${extensions} +EOF + else + cat < "${WORKING_DIR}/${component}.conf" +[ req ] +default_bits = 4096 +prompt = no +encrypt_key = yes +default_md = sha512 +distinguished_name = dn +[ dn ] +CN = ${component} +OU = OpenShift +O = Logging +EOF + fi +} + +function generate_request() { + local component=$1 + + openssl req -new \ + -out ${WORKING_DIR}/${component}.csr \ + -newkey rsa:4096 \ + -keyout ${WORKING_DIR}/${component}.key \ + -config ${WORKING_DIR}/${component}.conf \ + -days 712 \ + -nodes +} + +function generate_certs() { + local component=$1 + local extensions=${2:-} + + if [ $REGENERATE_NEEDED = 1 ] || [ ! -f ${WORKING_DIR}/${component}.crt ] || ! openssl x509 -checkend 0 -noout -in ${WORKING_DIR}/${component}.crt; then + generate_cert_config $component $extensions + generate_request $component + sign_cert $component + fi +} + +function generate_extensions() { + local add_oid=$1 + local add_localhost=$2 + shift + shift + local cert_names=$@ + + extension_names="" + extension_index=1 + local use_comma=0 + + if [ "$add_localhost" == "true" ]; then + extension_names="IP.1:127.0.0.1,IP.2:0:0:0:0:0:0:0:1,DNS.1:localhost" + extension_index=2 + use_comma=1 + fi + + for name in ${cert_names//,/}; do + if [ $use_comma = 1 ]; then + extension_names="${extension_names},DNS.${extension_index}:${name}" + else + extension_names="DNS.${extension_index}:${name}" + use_comma=1 + fi + extension_index=$(( extension_index + 1 )) + done + + if [ "$add_oid" == "true" ]; then + extension_names="${extension_names},RID.1:1.2.3.4.5.5" + fi + + echo "$extension_names" +} + +if [ ! -d $WORKING_DIR ]; then + mkdir -p $WORKING_DIR +fi + +generate_signing_ca +init_cert_files +create_signing_conf + +generate_certs 'system.logging.kibana' +generate_certs 'system.admin' + +# TODO: get es SAN DNS, IP values from es service names +generate_certs 'kibana-internal' "$(generate_extensions false false kibana)" +generate_certs 'elasticsearch' "$(generate_extensions true true $LOG_STORE{,-cluster}{,.${NAMESPACE}.svc}{,.cluster.local})" +generate_certs 'logging-es' "$(generate_extensions false true $LOG_STORE{,.${NAMESPACE}.svc}{,.cluster.local})" diff --git a/vendor/github.com/openshift/elasticsearch-operator/hack/common b/vendor/github.com/openshift/elasticsearch-operator/hack/common index 63114dbfe3..550c681162 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/hack/common +++ b/vendor/github.com/openshift/elasticsearch-operator/hack/common @@ -10,7 +10,7 @@ repo_dir="$(dirname $0)/.." source "$repo_dir/hack/lib/init.sh" source "$repo_dir/hack/testing/utils" -VERSION=${VERSION:-4.4} +VERSION=${VERSION:-$(basename $(find ${repo_dir}/manifests -type d | sort -r | head -n 1))} IMAGE_TAG=${IMAGE_TAG:-"quay.io/openshift/origin-elasticsearch-operator:latest"} diff --git a/vendor/github.com/openshift/elasticsearch-operator/hack/cr.yaml b/vendor/github.com/openshift/elasticsearch-operator/hack/cr.yaml index b0e679e616..52e748919f 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/hack/cr.yaml +++ b/vendor/github.com/openshift/elasticsearch-operator/hack/cr.yaml @@ -6,10 +6,9 @@ metadata: spec: managementState: "Managed" nodeSpec: - image: quay.io/openshift/origin-logging-elasticsearch5:latest + image: quay.io/openshift/origin-logging-elasticsearch6:latest resources: limits: - cpu: 500m memory: 1Gi requests: cpu: 500m @@ -22,3 +21,20 @@ spec: - master storage: {} redundancyPolicy: ZeroRedundancy + indexManagement: + policies: + - name: infra-policy + pollInterval: 1m + phases: + hot: + actions: + rollover: + maxAge: 2m + delete: + minAge: 5m + mappings: + - name: infra + policyRef: infra-policy + aliases: + - infra + - logs.infra diff --git a/vendor/github.com/openshift/elasticsearch-operator/hack/deploy-image.sh b/vendor/github.com/openshift/elasticsearch-operator/hack/deploy-image.sh index 993312ce1e..24fe62ea88 100755 --- a/vendor/github.com/openshift/elasticsearch-operator/hack/deploy-image.sh +++ b/vendor/github.com/openshift/elasticsearch-operator/hack/deploy-image.sh @@ -1,6 +1,9 @@ #!/bin/bash -set -euxo pipefail +if [ "${DEBUG:-}" = "true" ]; then + set -x +fi +set -euo pipefail if [ "${REMOTE_REGISTRY:-true}" = false ] ; then exit 0 diff --git a/vendor/github.com/openshift/elasticsearch-operator/hack/deploy-setup.sh b/vendor/github.com/openshift/elasticsearch-operator/hack/deploy-setup.sh index 1a420467a2..d223972705 100755 --- a/vendor/github.com/openshift/elasticsearch-operator/hack/deploy-setup.sh +++ b/vendor/github.com/openshift/elasticsearch-operator/hack/deploy-setup.sh @@ -3,7 +3,10 @@ # to deploy an Elasticsearch cluster. It assumes it is capable of login as a # user who has the cluster-admin role -set -euxo pipefail +if [ "${DEBUG:-}" = "true" ]; then + set -x +fi +set -euo pipefail source "$(dirname $0)/common" diff --git a/vendor/github.com/openshift/elasticsearch-operator/hack/deploy.sh b/vendor/github.com/openshift/elasticsearch-operator/hack/deploy.sh index 4ab8acab19..bdc30e897a 100755 --- a/vendor/github.com/openshift/elasticsearch-operator/hack/deploy.sh +++ b/vendor/github.com/openshift/elasticsearch-operator/hack/deploy.sh @@ -1,8 +1,11 @@ #!/bin/bash +if [ "${DEBUG:-}" = "true" ]; then + set -x +fi set -euo pipefail source "$(dirname $0)/common" IMAGE_ELASTICSEARCH_OPERATOR=image-registry.openshift-image-registry.svc:5000/openshift/origin-elasticsearch-operator:latest \ -deploy_elasticsearch_operator \ No newline at end of file +deploy_elasticsearch_operator diff --git a/vendor/github.com/openshift/elasticsearch-operator/hack/test-e2e-wip.sh b/vendor/github.com/openshift/elasticsearch-operator/hack/test-e2e-wip.sh deleted file mode 100644 index 18ec961b49..0000000000 --- a/vendor/github.com/openshift/elasticsearch-operator/hack/test-e2e-wip.sh +++ /dev/null @@ -1,90 +0,0 @@ -#!/bin/bash -set -euo pipefail - -if [ -n "${DEBUG:-}" ]; then - set -x -fi - -IMAGE_ELASTICSEARCH_OPERATOR=${IMAGE_ELASTICSEARCH_OPERATOR:-quay.io/openshift/origin-elasticsearch-operator:latest} - -if [ -n "${IMAGE_FORMAT:-}" ] ; then - IMAGE_ELASTICSEARCH_OPERATOR=$(sed -e "s,\${component},elasticsearch-operator," <(echo $IMAGE_FORMAT)) -fi - -KUBECONFIG=${KUBECONFIG:-$HOME/.kube/config} - -TEST_NAMESPACE="${TEST_NAMESPACE:-e2e-test-${RANDOM}}" - -if oc get project ${TEST_NAMESPACE} > /dev/null 2>&1 ; then - echo using existing project ${TEST_NAMESPACE} -else - oc create namespace ${TEST_NAMESPACE} -fi - -oc create -n ${TEST_NAMESPACE} -f \ -https://raw.githubusercontent.com/coreos/prometheus-operator/master/example/prometheus-operator-crd/prometheusrule.crd.yaml || : -oc create -n ${TEST_NAMESPACE} -f \ -https://raw.githubusercontent.com/coreos/prometheus-operator/master/example/prometheus-operator-crd/servicemonitor.crd.yaml || : - -if [ "${REMOTE_CLUSTER:-true}" = false ] ; then - sudo sysctl -w vm.max_map_count=262144 ||: - - manifest=$(mktemp) - files="01-service-account.yaml 02-role.yaml 03-role-bindings.yaml 05-deployment.yaml" - pushd manifests; - for f in ${files}; do - cat ${f} >> ${manifest}; - done; - popd - # update the manifest with the image built by ci - sed -i "s,quay.io/openshift/origin-elasticsearch-operator:latest,${IMAGE_ELASTICSEARCH_OPERATOR}," ${manifest} - sed -i "s/namespace: openshift-logging/namespace: ${TEST_NAMESPACE}/g" ${manifest} - - TEST_NAMESPACE=${TEST_NAMESPACE} go test ./test/e2e/... \ - -root=$(pwd) \ - -kubeconfig=${KUBECONFIG} \ - -globalMan manifests/04-crd.yaml \ - -namespacedMan ${manifest} \ - -v \ - -parallel=1 \ - -singleNamespace \ - -timeout 900s -else - - if [ -n "${OPENSHIFT_BUILD_NAMESPACE:-}" -a -n "${IMAGE_FORMAT:-}" ] ; then - imageprefix=$( echo "$IMAGE_FORMAT" | sed -e 's,/stable:.*$,/,' ) - testimage=${imageprefix}pipeline:src - testroot=$( pwd ) - - # create test secret with kubeconfig for pod - oc create secret -n ${TEST_NAMESPACE} generic test-secret --from-file=config=${KUBECONFIG} - - testpod=$(mktemp) - cat test/files/e2e-test-pod.yaml > ${testpod} - sed -i "s,\${TEST_NAMESPACE},${TEST_NAMESPACE}," ${testpod} - sed -i "s,\${IMAGE_ELASTICSEARCH_OPERATOR},${IMAGE_ELASTICSEARCH_OPERATOR}," ${testpod} - sed -i "s,\${IMAGE_E2E_TEST},${testimage}," ${testpod} - - oc create \ - -n ${TEST_NAMESPACE} \ - -f ${testpod} - - echo $KUBECONFIG - oc project || : - oc config current-context || : - - # wait for the pod to be ready first... - oc wait pod/elasticsearch-operator-e2e-test -n ${TEST_NAMESPACE} --for=condition=PodScheduled -o yaml --timeout=300s - - oc wait pod/elasticsearch-operator-e2e-test -n ${TEST_NAMESPACE} --for=condition=Ready --timeout=300s || oc logs elasticsearch-operator-e2e-test -n ${TEST_NAMESPACE} - oc get pods -n ${TEST_NAMESPACE} - - #oc logs -f elasticsearch-operator-e2e-test -n ${TEST_NAMESPACE} - else - echo "Failed to run e2e test" - fi - - # make sure that pod completely successfully? -fi - -oc delete namespace ${TEST_NAMESPACE} diff --git a/vendor/github.com/openshift/elasticsearch-operator/hack/test-e2e.sh b/vendor/github.com/openshift/elasticsearch-operator/hack/test-e2e.sh index 588f8eb980..b9b6f9b0a6 100755 --- a/vendor/github.com/openshift/elasticsearch-operator/hack/test-e2e.sh +++ b/vendor/github.com/openshift/elasticsearch-operator/hack/test-e2e.sh @@ -1,57 +1,33 @@ #!/bin/bash -set -euo pipefail - -if [ -n "${DEBUG:-}" ]; then - set -x -fi - -IMAGE_ELASTICSEARCH_OPERATOR=${IMAGE_ELASTICSEARCH_OPERATOR:-quay.io/openshift/origin-elasticsearch-operator:latest} -if [ -n "${IMAGE_FORMAT:-}" ] ; then - IMAGE_ELASTICSEARCH_OPERATOR=$(sed -e "s,\${component},elasticsearch-operator," <(echo $IMAGE_FORMAT)) +if [ "${DEBUG:-}" = "true" ]; then + set -x fi +set -euo pipefail -KUBECONFIG=${KUBECONFIG:-$HOME/.kube/config} - -repo_dir="$(dirname $0)/.." - -manifest=$(mktemp) -files="01-service-account.yaml 02-role.yaml 03-role-bindings.yaml 05-deployment.yaml" -pushd manifests; - for f in ${files}; do - cat ${f} >> ${manifest}; - done; -popd -# update the manifest with the image built by ci -sed -i "s,quay.io/openshift/origin-elasticsearch-operator:latest,${IMAGE_ELASTICSEARCH_OPERATOR}," ${manifest} - -if [ "${REMOTE_CLUSTER:-false}" = false ] ; then - sudo sysctl -w vm.max_map_count=262144 ||: -fi - -TEST_NAMESPACE="${TEST_NAMESPACE:-e2e-test-${RANDOM}}" - -if oc get project ${TEST_NAMESPACE} > /dev/null 2>&1 ; then - echo using existing project ${TEST_NAMESPACE} -else - oc create namespace ${TEST_NAMESPACE} +current_dir=$(dirname "${BASH_SOURCE[0]}" ) +source "${current_dir}/lib/init.sh" +source "${current_dir}/lib/util/logs.sh" + +for test in $( find "${current_dir}/testing" -type f -name 'test-*.sh' | sort); do + os::log::info "===============================================================" + os::log::info "running e2e $test " + os::log::info "===============================================================" + if "${test}" ; then + os::log::info "===========================================================" + os::log::info "e2e $test succeeded at $( date )" + os::log::info "===========================================================" + else + + os::log::error "============= FAILED FAILED ============= " + os::log::error "e2e $test failed at $( date )" + os::log::error "============= FAILED FAILED ============= " + failed="true" + fi +done + +get_logging_pod_logs + +if [[ -n "${failed:-}" ]]; then + exit 1 fi - -sed -i "s/namespace: openshift-logging/namespace: ${TEST_NAMESPACE}/g" ${manifest} - -oc create -n ${TEST_NAMESPACE} -f \ -https://raw.githubusercontent.com/coreos/prometheus-operator/master/example/prometheus-operator-crd/prometheusrule.crd.yaml || : -oc create -n ${TEST_NAMESPACE} -f \ -https://raw.githubusercontent.com/coreos/prometheus-operator/master/example/prometheus-operator-crd/servicemonitor.crd.yaml || : - -TEST_NAMESPACE=${TEST_NAMESPACE} go test ./test/e2e/... \ - -root=$(pwd) \ - -kubeconfig=${KUBECONFIG} \ - -globalMan manifests/04-crd.yaml \ - -namespacedMan ${manifest} \ - -v \ - -parallel=1 \ - -singleNamespace \ - -timeout 1200s - -oc delete namespace ${TEST_NAMESPACE} diff --git a/vendor/github.com/openshift/elasticsearch-operator/hack/testing/test-001-operator-sdk-e2e.sh b/vendor/github.com/openshift/elasticsearch-operator/hack/testing/test-001-operator-sdk-e2e.sh new file mode 100755 index 0000000000..c219b8048d --- /dev/null +++ b/vendor/github.com/openshift/elasticsearch-operator/hack/testing/test-001-operator-sdk-e2e.sh @@ -0,0 +1,84 @@ +#!/bin/bash +set -euo pipefail + +if [ -n "${DEBUG:-}" ]; then + set -x +fi + +IMAGE_ELASTICSEARCH_OPERATOR=${IMAGE_ELASTICSEARCH_OPERATOR:-quay.io/openshift/origin-elasticsearch-operator:latest} + +if [ -n "${IMAGE_FORMAT:-}" ] ; then + IMAGE_ELASTICSEARCH_OPERATOR=$(sed -e "s,\${component},elasticsearch-operator," <(echo $IMAGE_FORMAT)) +fi +LOGGING_IMAGE_STREAM=${LOGGING_IMAGE_STREAM:-stable} +ELASTICSEARCH_IMAGE=${ELASTICSEARCH_IMAGE:-registry.svc.ci.openshift.org/ocp/$LOGGING_IMAGE_STREAM:logging-elasticsearch6} + +KUBECONFIG=${KUBECONFIG:-$HOME/.kube/config} + +repo_dir="$(dirname $0)/../.." +source "${repo_dir}/hack/lib/log/output.sh" +source "${repo_dir}/hack/testing/utils" +ARTIFACT_DIR=${ARTIFACT_DIR:-"$repo_dir/_output/$(basename ${BASH_SOURCE[0]})"} +test_artifact_dir=$ARTIFACT_DIR/test-001-operator-sdk +if [ ! -d $test_artifact_dir ] ; then + mkdir -p $test_artifact_dir +fi + +manifest=$(mktemp) +files="01-service-account.yaml 02-role.yaml 03-role-bindings.yaml 05-deployment.yaml" +pushd manifests; + for f in ${files}; do + cat ${f} >> ${manifest}; + done; +popd +# update the manifest with the image built by ci +sed -i "s,quay.io/openshift/origin-elasticsearch-operator:latest,${IMAGE_ELASTICSEARCH_OPERATOR}," ${manifest} +sed -i "s,quay.io/openshift/origin-logging-elasticsearch6:latest,${ELASTICSEARCH_IMAGE}," ${manifest} + +if [ "${REMOTE_CLUSTER:-false}" = false ] ; then + sudo sysctl -w vm.max_map_count=262144 ||: +fi + +TEST_NAMESPACE="${TEST_NAMESPACE:-e2e-test-${RANDOM}}" + +start_seconds=$(date +%s) +cleanup(){ + local return_code="$?" + set +e + os::log::info "Running cleanup" + end_seconds=$(date +%s) + runtime="$(($end_seconds - $start_seconds))s" + + if [ "${SKIP_CLEANUP:-false}" == "false" ] ; then + get_all_logging_pod_logs ${TEST_NAMESPACE} $test_artifact_dir + for item in "ns/${TEST_NAMESPACE}" "clusterrole/elasticsearch-operator" "clusterrolebinding/elasticsearch-operator-rolebinding"; do + oc delete $item --wait=true --ignore-not-found --force --grace-period=0 + done + fi + + exit ${return_code} +} +trap cleanup exit + +if oc get project ${TEST_NAMESPACE} > /dev/null 2>&1 ; then + echo using existing project ${TEST_NAMESPACE} +else + oc create namespace ${TEST_NAMESPACE} +fi + +sed -i "s/namespace: openshift-logging/namespace: ${TEST_NAMESPACE}/g" ${manifest} + +oc create -n ${TEST_NAMESPACE} -f \ +https://raw.githubusercontent.com/coreos/prometheus-operator/master/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml ||: +oc create -n ${TEST_NAMESPACE} -f \ +https://raw.githubusercontent.com/coreos/prometheus-operator/master/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml ||: + +TEST_NAMESPACE=${TEST_NAMESPACE} go test ./test/e2e/... \ + -root=$(pwd) \ + -kubeconfig=${KUBECONFIG} \ + -globalMan manifests/04-crd.yaml \ + -namespacedMan ${manifest} \ + -v \ + -parallel=1 \ + -singleNamespace \ + -timeout 1200s diff --git a/vendor/github.com/openshift/elasticsearch-operator/hack/testing/utils b/vendor/github.com/openshift/elasticsearch-operator/hack/testing/utils index 72604670a8..1a5f8570af 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/hack/testing/utils +++ b/vendor/github.com/openshift/elasticsearch-operator/hack/testing/utils @@ -13,11 +13,11 @@ try_until_text() { local expire=$(($now + $timeout)) while [ $now -lt $expire ]; do if [[ "$($cmd)" == "${expected}" ]] ; then - exit 0 + return 0 fi now=$(date +%s) done - exit 1 + return 1 } wait_for_deployment_to_be_ready(){ @@ -27,6 +27,27 @@ wait_for_deployment_to_be_ready(){ try_until_text "oc -n $namespace get deployment $name -o jsonpath={.status.availableReplicas} --ignore-not-found" "1" $timeout } +get_all_logging_pod_logs() { + set +e + local LOGGING_NS=$1 + local outdir=${2:-$ARTIFACT_DIR} + local p + local container + for p in $(oc get pods -n ${LOGGING_NS} -o jsonpath='{.items[*].metadata.name}') ; do + oc -n ${LOGGING_NS} describe pod $p > $outdir/$p.describe 2>&1 || : + oc -n ${LOGGING_NS} get pod $p -o yaml > $outdir/$p.yaml 2>&1 || : + for container in $(oc -n ${LOGGING_NS} get po $p -o jsonpath='{.spec.containers[*].name}') ; do + oc logs -n ${LOGGING_NS} -c $container $p > $outdir/$p.$container.log 2>&1 + case "$container" in + elasticsearch*) oc exec -n ${LOGGING_NS} -c elasticsearch $p -- logs > $outdir/$p.$container.exec.log 2>&1 ;; + *) continue ;; + esac + done + done + set -e +} + + deploy_marketplace_operator(){ local namespace=$1 local name=$2 @@ -140,13 +161,15 @@ EOF } function deploy_elasticsearch_operator() { + OPERAND_IMAGES="" if [ -n "${IMAGE_FORMAT:-}" ] ; then IMAGE_ELASTICSEARCH_OPERATOR=$(sed -e "s,\${component},elasticsearch-operator," <(echo $IMAGE_FORMAT)) + OPERAND_IMAGES+="ELASTICSEARCH_IMAGE=registry.svc.ci.openshift.org/ocp/${LOGGING_IMAGE_STREAM:-stable}:logging-elasticsearch6" else IMAGE_ELASTICSEARCH_OPERATOR=${IMAGE_ELASTICSEARCH_OPERATOR:-registry.svc.ci.openshift.org/origin/$VERSION:elasticsearch-operator} fi manifest=${repo_dir}/manifests - GLOBAL=true deploy_operator "openshift-operators-redhat" "elasticsearch-operator" $manifest $IMAGE_ELASTICSEARCH_OPERATOR $((2 * $minute)) + OPERAND_IMAGES=$OPERAND_IMAGES GLOBAL=true deploy_operator "openshift-operators-redhat" "elasticsearch-operator" $manifest $IMAGE_ELASTICSEARCH_OPERATOR $((2 * $minute)) } function deploy_operator() { @@ -156,7 +179,6 @@ function deploy_operator() { local operatorImage=$4 local timeout=$5 - local version=$(basename $(find $manifest -type d | sort -r | head -n 1)) KUBECONFIG=${KUBECONFIG:-$HOME/.kube/config} @@ -165,6 +187,7 @@ function deploy_operator() { oc create ns ${namespace} || : os::log::info "Deploying operator from ${manifest}" + OPERAND_IMAGES=${OPERAND_IMAGES:-""} \ GLOBAL=${GLOBAL:-false} \ VERSION=${version} \ OPERATOR_IMAGE=${operatorImage} \ @@ -175,7 +198,7 @@ function deploy_operator() { if [ "$?" != "0" ] ; then os::log::error "Error deploying operator via OLM using manifest: $manifest" - exit 1 + return 1 fi wait_for_deployment_to_be_ready $namespace $operatorName $timeout diff --git a/vendor/github.com/openshift/elasticsearch-operator/hack/undeploy.sh b/vendor/github.com/openshift/elasticsearch-operator/hack/undeploy.sh index c7533d6e94..3352c31910 100755 --- a/vendor/github.com/openshift/elasticsearch-operator/hack/undeploy.sh +++ b/vendor/github.com/openshift/elasticsearch-operator/hack/undeploy.sh @@ -1,6 +1,9 @@ #!/bin/bash -set -euxo pipefail +if [ "${DEBUG:-}" = "true" ]; then + set -x +fi +set -euo pipefail oc delete ns $NAMESPACE --force --grace-period=1 ||: oc delete -n openshift is origin-elasticsearch-operator || : diff --git a/vendor/github.com/openshift/elasticsearch-operator/manifests/05-deployment.yaml b/vendor/github.com/openshift/elasticsearch-operator/manifests/05-deployment.yaml index ae4df29724..8dd7f240b1 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/manifests/05-deployment.yaml +++ b/vendor/github.com/openshift/elasticsearch-operator/manifests/05-deployment.yaml @@ -17,7 +17,7 @@ spec: containers: - name: elasticsearch-operator image: quay.io/openshift/origin-elasticsearch-operator:latest - imagePullPolicy: IfNotPresent + imagePullPolicy: Always command: - elasticsearch-operator ports: @@ -30,6 +30,8 @@ spec: value: "elasticsearch-operator" - name: PROXY_IMAGE value: "quay.io/openshift/origin-oauth-proxy:v4.0.0" + - name: ELASTICSEARCH_IMAGE + value: "quay.io/openshift/origin-logging-elasticsearch6:latest" - name: POD_NAME valueFrom: fieldRef: diff --git a/vendor/github.com/openshift/elasticsearch-operator/manifests/4.4/elasticsearch-operator.v4.4.0.clusterserviceversion.yaml b/vendor/github.com/openshift/elasticsearch-operator/manifests/4.5/elasticsearch-operator.v4.5.0.clusterserviceversion.yaml similarity index 96% rename from vendor/github.com/openshift/elasticsearch-operator/manifests/4.4/elasticsearch-operator.v4.4.0.clusterserviceversion.yaml rename to vendor/github.com/openshift/elasticsearch-operator/manifests/4.5/elasticsearch-operator.v4.5.0.clusterserviceversion.yaml index 157363855c..48e860f9b5 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/manifests/4.4/elasticsearch-operator.v4.4.0.clusterserviceversion.yaml +++ b/vendor/github.com/openshift/elasticsearch-operator/manifests/4.5/elasticsearch-operator.v4.5.0.clusterserviceversion.yaml @@ -3,9 +3,11 @@ apiVersion: operators.coreos.com/v1alpha1 kind: ClusterServiceVersion metadata: - name: elasticsearch-operator.v4.4.0 + name: elasticsearch-operator.v4.5.0 namespace: placeholder annotations: + "operatorframework.io/suggested-namespace": openshift-operators-redhat + "operatorframework.io/cluster-monitoring": "true" categories: "OpenShift Optional, Logging & Tracing" capabilities: "Seamless Upgrades" certified: "false" @@ -22,7 +24,7 @@ metadata: containerImage: quay.io/openshift/origin-elasticsearch-operator:latest createdAt: 2019-02-20T08:00:00Z support: AOS Cluster Logging, Jaeger - olm.skipRange: ">=4.2.0 <4.4.0" + olm.skipRange: ">=4.1.0 <4.5.0" alm-examples: |- [ { @@ -34,7 +36,7 @@ metadata: "spec": { "managementState": "Managed", "nodeSpec": { - "image": "quay.io/openshift/origin-logging-elasticsearch5:latest", + "image": "quay.io/openshift/origin-logging-elasticsearch6:latest", "resources": { "limits": { "memory": "1Gi" @@ -55,9 +57,9 @@ metadata: } ] spec: - version: 4.4.0 + version: 4.5.0 displayName: Elasticsearch Operator - minKubeVersion: 1.14.0 + minKubeVersion: 1.17.1 description: | The Elasticsearch Operator for OKD provides a means for configuring and managing an Elasticsearch cluster for use in tracing and cluster logging. @@ -127,6 +129,12 @@ spec: - statefulsets verbs: - "*" + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - "*" - apiGroups: - monitoring.coreos.com resources: @@ -194,7 +202,7 @@ spec: - name: PROXY_IMAGE value: "quay.io/openshift/origin-oauth-proxy:latest" - name: ELASTICSEARCH_IMAGE - value: "quay.io/openshift/origin-logging-elasticsearch5:latest" + value: "quay.io/openshift/origin-logging-elasticsearch6:latest" customresourcedefinitions: owned: - name: elasticsearches.logging.openshift.io diff --git a/vendor/github.com/openshift/elasticsearch-operator/manifests/4.4/elasticsearches.crd.yaml b/vendor/github.com/openshift/elasticsearch-operator/manifests/4.5/elasticsearches.crd.yaml similarity index 100% rename from vendor/github.com/openshift/elasticsearch-operator/manifests/4.4/elasticsearches.crd.yaml rename to vendor/github.com/openshift/elasticsearch-operator/manifests/4.5/elasticsearches.crd.yaml diff --git a/vendor/github.com/openshift/elasticsearch-operator/manifests/4.4/image-references b/vendor/github.com/openshift/elasticsearch-operator/manifests/4.5/image-references similarity index 78% rename from vendor/github.com/openshift/elasticsearch-operator/manifests/4.4/image-references rename to vendor/github.com/openshift/elasticsearch-operator/manifests/4.5/image-references index caa47ea916..cdfb10d2e5 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/manifests/4.4/image-references +++ b/vendor/github.com/openshift/elasticsearch-operator/manifests/4.5/image-references @@ -7,10 +7,10 @@ spec: from: kind: DockerImage name: quay.io/openshift/origin-elasticsearch-operator:latest - - name: logging-elasticsearch5 + - name: logging-elasticsearch6 from: kind: DockerImage - name: quay.io/openshift/origin-logging-elasticsearch5:latest + name: quay.io/openshift/origin-logging-elasticsearch6:latest - name: oauth-proxy from: kind: DockerImage diff --git a/vendor/github.com/openshift/elasticsearch-operator/manifests/elasticsearch-operator.package.yaml b/vendor/github.com/openshift/elasticsearch-operator/manifests/elasticsearch-operator.package.yaml index 21dee35e07..c5f616f098 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/manifests/elasticsearch-operator.package.yaml +++ b/vendor/github.com/openshift/elasticsearch-operator/manifests/elasticsearch-operator.package.yaml @@ -1,5 +1,5 @@ -#! package-manifest: ./deploy/chart/catalog_resources/rh-operators/elasticsearch-operator.v4.4.0.clusterserviceversion.yaml +#! package-manifest: ./deploy/chart/catalog_resources/rh-operators/elasticsearch-operator.v4.5.0.clusterserviceversion.yaml packageName: elasticsearch-operator channels: -- name: "4.4" - currentCSV: elasticsearch-operator.v4.4.0 +- name: "4.5" + currentCSV: elasticsearch-operator.v4.5.0 diff --git a/vendor/github.com/openshift/elasticsearch-operator/pkg/apis/logging/v1/elasticsearch_types.go b/vendor/github.com/openshift/elasticsearch-operator/pkg/apis/logging/v1/elasticsearch_types.go index 4dff5f3465..70fbb71918 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/pkg/apis/logging/v1/elasticsearch_types.go +++ b/vendor/github.com/openshift/elasticsearch-operator/pkg/apis/logging/v1/elasticsearch_types.go @@ -24,6 +24,21 @@ type Elasticsearch struct { Status ElasticsearchStatus `json:"status,omitempty"` } +//AddOwnerRefTo appends the Elasticsearch object as an OwnerReference to the passed object +func (es *Elasticsearch) AddOwnerRefTo(o metav1.Object) { + trueVar := true + ref := metav1.OwnerReference{ + APIVersion: SchemeGroupVersion.String(), + Kind: "Elasticsearch", + Name: es.Name, + UID: es.UID, + Controller: &trueVar, + } + if (metav1.OwnerReference{}) != ref { + o.SetOwnerReferences(append(o.GetOwnerReferences(), ref)) + } +} + // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ElasticsearchList contains a list of Elasticsearch diff --git a/vendor/github.com/openshift/elasticsearch-operator/pkg/indexmanagement/converters.go b/vendor/github.com/openshift/elasticsearch-operator/pkg/indexmanagement/converters.go new file mode 100644 index 0000000000..333454c723 --- /dev/null +++ b/vendor/github.com/openshift/elasticsearch-operator/pkg/indexmanagement/converters.go @@ -0,0 +1,62 @@ +package indexmanagement + +import ( + "fmt" + "strconv" + + apis "github.com/openshift/elasticsearch-operator/pkg/apis/logging/v1" + "github.com/openshift/elasticsearch-operator/pkg/logger" +) + +func calculateConditions(policy apis.IndexManagementPolicySpec, primaryShards int32) rolloverConditions { + // 40GB = 40960 1M messages + maxDoc := 40960 * primaryShards + maxSize := defaultShardSize * primaryShards + maxAge := "" + if policy.Phases.Hot != nil && policy.Phases.Hot.Actions.Rollover != nil { + maxAge = string(policy.Phases.Hot.Actions.Rollover.MaxAge) + } + return rolloverConditions{ + MaxSize: fmt.Sprintf("%dgb", maxSize), + MaxDocs: maxDoc, + MaxAge: maxAge, + } +} + +func calculateMillisForTimeUnit(timeunit apis.TimeUnit) (uint64, error) { + match := reTimeUnit.FindStringSubmatch(string(timeunit)) + if match == nil { + return 0, fmt.Errorf("Unable to convert timeunit to millis for invalid timeunit %q", timeunit) + } + number, err := strconv.ParseUint(match[1], 10, 0) + if err != nil { + logger.Infof("unable to parse %v", err) + return 0, err + } + switch match[2] { + case "w": + return number * millisPerWeek, nil + case "d": + return number * millisPerDay, nil + case "h", "H": + return number * millisPerHour, nil + case "m": + return number * millisPerMinute, nil + case "s": + return number * millisPerSecond, nil + } + return 0, fmt.Errorf("conversion to millis for time unit %q is unsupported", match[2]) +} + +func crontabScheduleFor(timeunit apis.TimeUnit) (string, error) { + match := reTimeUnit.FindStringSubmatch(string(timeunit)) + if match == nil { + return "", fmt.Errorf("Unable to create crontab schedule for invalid timeunit %q", timeunit) + } + switch match[2] { + case "m": + return fmt.Sprintf("*/%s * * * *", match[1]), nil + } + + return "", fmt.Errorf("crontab schedule for time unit %q is unsupported", match[2]) +} diff --git a/vendor/github.com/openshift/elasticsearch-operator/pkg/indexmanagement/reconcile.go b/vendor/github.com/openshift/elasticsearch-operator/pkg/indexmanagement/reconcile.go new file mode 100644 index 0000000000..5b18ed057d --- /dev/null +++ b/vendor/github.com/openshift/elasticsearch-operator/pkg/indexmanagement/reconcile.go @@ -0,0 +1,351 @@ +package indexmanagement + +import ( + "context" + "encoding/base64" + "fmt" + "reflect" + "strconv" + + batchv1 "k8s.io/api/batch/v1" + batch "k8s.io/api/batch/v1beta1" + core "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/util/retry" + "sigs.k8s.io/controller-runtime/pkg/client" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + apis "github.com/openshift/elasticsearch-operator/pkg/apis/logging/v1" + "github.com/openshift/elasticsearch-operator/pkg/logger" + k8s "github.com/openshift/elasticsearch-operator/pkg/types/k8s" + "github.com/openshift/elasticsearch-operator/pkg/utils" + "github.com/openshift/elasticsearch-operator/pkg/utils/comparators" +) + +const ( + indexManagmentNamePrefix = "ocp-index-mgm" + indexManagementConfigmap = "indexmanagement-scripts" + defaultShardSize = int32(40) +) + +var ( + defaultCPURequest = resource.MustParse("100m") + defaultMemoryRequest = resource.MustParse("32Mi") + jobHistoryLimitFailed = utils.GetInt32(2) + jobHistoryLimitSuccess = utils.GetInt32(1) + + millisPerSecond = uint64(1000) + millisPerMinute = uint64(60 * millisPerSecond) + millisPerHour = uint64(millisPerMinute * 60) + millisPerDay = uint64(millisPerHour * 24) + millisPerWeek = uint64(millisPerDay * 7) + + //fullExecMode 0777 + fullExecMode = utils.GetInt32(int32(511)) + + imLabels = map[string]string{ + "provider": "openshift", + "component": "indexManagement", + "logging-infra": "indexManagement", + } +) + +type rolloverConditions struct { + MaxAge string `json:"max_age,omitempty"` + MaxDocs int32 `json:"max_docs,omitempty"` + MaxSize string `json:"max_size,omitempty"` +} + +func RemoveCronJobsForMappings(apiclient client.Client, cluster *apis.Elasticsearch, mappings []apis.IndexManagementPolicyMappingSpec, policies apis.PolicyMap) error { + expected := sets.NewString() + for _, mapping := range mappings { + policy := policies[mapping.PolicyRef] + if policy.Phases.Hot != nil { + expected.Insert(fmt.Sprintf("%s-rollover-%s", indexManagmentNamePrefix, mapping.Name)) + } + if policy.Phases.Delete != nil { + expected.Insert(fmt.Sprintf("%s-delete-%s", indexManagmentNamePrefix, mapping.Name)) + } + } + logger.Debugf("Expecting to have cronjobs in %s: %v", cluster.Namespace, expected.List()) + selector := labels.NewSelector() + for k, v := range imLabels { + req, _ := labels.NewRequirement(k, selection.Equals, []string{v}) + selector.Add(*req) + } + cronList := &batch.CronJobList{} + if err := apiclient.List(context.TODO(), &client.ListOptions{Namespace: cluster.Namespace, LabelSelector: selector}, cronList); err != nil { + return err + } + existing := sets.NewString() + for _, cron := range cronList.Items { + existing.Insert(cron.Name) + } + difference := existing.Difference(expected) + logger.Debugf("Removing cronjobs in %s: %v", cluster.Namespace, difference.List()) + for _, name := range difference.List() { + cronjob := &batch.CronJob{ + TypeMeta: metav1.TypeMeta{ + Kind: "CronJob", + APIVersion: batch.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: cluster.Namespace, + }, + } + err := apiclient.Delete(context.TODO(), cronjob) + if err != nil && !errors.IsNotFound(err) { + logger.Errorf("Failure culling %s/%s cronjob %v", cluster.Namespace, name, err) + } + } + return nil +} + +func ReconcileCurationConfigmap(apiclient client.Client, cluster *apis.Elasticsearch) error { + data := scriptMap + desired := k8s.NewConfigMap(indexManagementConfigmap, cluster.Namespace, imLabels, data) + cluster.AddOwnerRefTo(desired) + err := apiclient.Create(context.TODO(), desired) + if err != nil { + if !errors.IsAlreadyExists(err) { + return fmt.Errorf("Error creating configmap for cluster %s: %v", cluster.Name, err) + } + err = retry.RetryOnConflict(retry.DefaultRetry, func() error { + current := &v1.ConfigMap{} + retryError := apiclient.Get(context.TODO(), types.NamespacedName{Name: desired.Name, Namespace: desired.Namespace}, current) + if retryError != nil { + return fmt.Errorf("Unable to get configmap %s/%s during reconciliation: %v", desired.Namespace, desired.Name, retryError) + } + if !reflect.DeepEqual(desired.Data, current.Data) { + logger.Debugf("Updating configmap %s/%s", current.Namespace, current.Name) + current.Data = desired.Data + return apiclient.Update(context.TODO(), current) + } + return nil + }) + } + return err +} + +func ReconcileRolloverCronjob(apiclient client.Client, cluster *apis.Elasticsearch, policy apis.IndexManagementPolicySpec, mapping apis.IndexManagementPolicyMappingSpec, primaryShards int32) error { + if policy.Phases.Hot == nil { + logger.Infof("Skipping rollover cronjob for policymapping %q; hot phase not defined", mapping.Name) + return nil + } + schedule, err := crontabScheduleFor(policy.PollInterval) + if err != nil { + return err + } + conditions := calculateConditions(policy, primaryShards) + name := fmt.Sprintf("%s-rollover-%s", indexManagmentNamePrefix, mapping.Name) + payload, err := utils.ToJson(map[string]rolloverConditions{"conditions": conditions}) + if err != nil { + return fmt.Errorf("There was an error serializing the rollover conditions to JSON: %v", err) + } + envvars := []core.EnvVar{ + {Name: "PAYLOAD", Value: base64.StdEncoding.EncodeToString([]byte(payload))}, + {Name: "POLICY_MAPPING", Value: mapping.Name}, + } + fnContainerHandler := func(container *core.Container) { + container.Command = []string{"bash"} + container.Args = []string{ + "-c", + "/tmp/scripts/rollover", + } + } + desired := newCronJob(cluster.Name, cluster.Spec.Spec.Image, cluster.Namespace, name, schedule, cluster.Spec.Spec.NodeSelector, cluster.Spec.Spec.Tolerations, envvars, fnContainerHandler) + + cluster.AddOwnerRefTo(desired) + return reconcileCronJob(apiclient, cluster, desired, areCronJobsSame) +} + +func ReconcileCurationCronjob(apiclient client.Client, cluster *apis.Elasticsearch, policy apis.IndexManagementPolicySpec, mapping apis.IndexManagementPolicyMappingSpec, primaryShards int32) error { + if policy.Phases.Delete == nil { + logger.Infof("Skipping curation cronjob for policymapping %q; delete phase not defined", mapping.Name) + return nil + } + schedule, err := crontabScheduleFor(policy.PollInterval) + if err != nil { + return err + } + minAgeMillis, err := calculateMillisForTimeUnit(policy.Phases.Delete.MinAge) + if err != nil { + return err + } + name := fmt.Sprintf("%s-delete-%s", indexManagmentNamePrefix, mapping.Name) + envvars := []core.EnvVar{ + {Name: "ALIAS", Value: mapping.Name}, + {Name: "MIN_AGE", Value: strconv.FormatUint(minAgeMillis, 10)}, + } + fnContainerHandler := func(container *core.Container) { + container.Command = []string{"bash"} + container.Args = []string{ + "-c", + "/tmp/scripts/delete", + } + } + desired := newCronJob(cluster.Name, cluster.Spec.Spec.Image, cluster.Namespace, name, schedule, cluster.Spec.Spec.NodeSelector, cluster.Spec.Spec.Tolerations, envvars, fnContainerHandler) + + cluster.AddOwnerRefTo(desired) + return reconcileCronJob(apiclient, cluster, desired, areCronJobsSame) +} + +func reconcileCronJob(apiclient client.Client, cluster *apis.Elasticsearch, desired *batch.CronJob, fnAreCronJobsSame func(lhs, rhs *batch.CronJob) bool) error { + err := apiclient.Create(context.TODO(), desired) + if err != nil { + if !errors.IsAlreadyExists(err) { + return fmt.Errorf("Error creating cronjob for cluster %s: %v", cluster.Name, err) + } + err = retry.RetryOnConflict(retry.DefaultRetry, func() error { + current := &batch.CronJob{} + retryError := apiclient.Get(context.TODO(), types.NamespacedName{Name: desired.Name, Namespace: desired.Namespace}, current) + if retryError != nil { + return fmt.Errorf("Unable to get cronjob %s/%s during reconciliation: %v", desired.Namespace, desired.Name, retryError) + } + if !fnAreCronJobsSame(current, desired) { + current.Spec = desired.Spec + return apiclient.Update(context.TODO(), current) + } + return nil + }) + } + return err +} + +func areCronJobsSame(lhs, rhs *batch.CronJob) bool { + logger.Debugf("Evaluating cronjob '%s/%s' ...", lhs.Namespace, lhs.Name) + if len(lhs.Spec.JobTemplate.Spec.Template.Spec.Containers) != len(lhs.Spec.JobTemplate.Spec.Template.Spec.Containers) { + logger.Debugf("Container lengths are different between current and desired for %s/%s", lhs.Namespace, lhs.Name) + return false + } + if !comparators.AreStringMapsSame(lhs.Spec.JobTemplate.Spec.Template.Spec.NodeSelector, rhs.Spec.JobTemplate.Spec.Template.Spec.NodeSelector) { + logger.Debugf("NodeSelector is different between current and desired for %s/%s", lhs.Namespace, lhs.Name) + return false + } + + if !comparators.AreTolerationsSame(lhs.Spec.JobTemplate.Spec.Template.Spec.Tolerations, rhs.Spec.JobTemplate.Spec.Template.Spec.Tolerations) { + logger.Debugf("Tolerations are different between current and desired for %s/%s", lhs.Namespace, lhs.Name) + return false + } + if lhs.Spec.Schedule != rhs.Spec.Schedule { + logger.Debugf("Schedule is different between current and desired for %s/%s", lhs.Namespace, lhs.Name) + lhs.Spec.Schedule = rhs.Spec.Schedule + return false + } + if lhs.Spec.Suspend != nil && rhs.Spec.Suspend != nil && *lhs.Spec.Suspend != *rhs.Spec.Suspend { + logger.Debugf("Suspend is different between current and desired for %s/%s", lhs.Namespace, lhs.Name) + return false + } + + for i, container := range lhs.Spec.JobTemplate.Spec.Template.Spec.Containers { + logger.Debugf("Evaluating cronjob container %q ...", container.Name) + other := rhs.Spec.JobTemplate.Spec.Template.Spec.Containers[i] + if container.Name != other.Name { + logger.Debugf("Container name is different between current and desired for %s/%s", lhs.Namespace, lhs.Name) + return false + } + if container.Image != other.Image { + logger.Debugf("Container image is different between current and desired for %s/%s: %q != %q", lhs.Namespace, lhs.Name, container.Image, other.Image) + return false + } + + if !reflect.DeepEqual(container.Command, other.Command) { + logger.Debugf("Container command is different between current and desired for %s/%s", lhs.Namespace, lhs.Name) + return false + } + if !reflect.DeepEqual(container.Args, other.Args) { + logger.Debugf("Container command args is different between current and desired for %s/%s", lhs.Namespace, lhs.Name) + return false + } + + if !comparators.AreResourceRequementsSame(container.Resources, other.Resources) { + logger.Debugf("Container resources are different between current and desired for %s/%s", lhs.Namespace, lhs.Name) + return false + } + + if !comparators.EnvValueEqual(container.Env, other.Env) { + logger.Debugf("Container EnvVars are different between current and desired for %s/%s", lhs.Namespace, lhs.Name) + return false + } + + } + logger.Debug("The current and desired cronjobs are the same") + return true +} + +func newCronJob(clusterName, image, namespace, name, schedule string, nodeSelector map[string]string, tolerations []core.Toleration, envvars []core.EnvVar, fnContainerHander func(*core.Container)) *batch.CronJob { + container := core.Container{ + Name: "indexmanagement", + Image: image, + ImagePullPolicy: core.PullIfNotPresent, + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceMemory: defaultMemoryRequest, + v1.ResourceCPU: defaultCPURequest, + }, + }, + Env: []core.EnvVar{ + {Name: "ES_SERVICE", Value: fmt.Sprintf("https://%s:9200", clusterName)}, + }, + } + container.Env = append(container.Env, envvars...) + fnContainerHander(&container) + + container.VolumeMounts = []v1.VolumeMount{ + {Name: "certs", ReadOnly: true, MountPath: "/etc/indexmanagement/keys"}, + {Name: "scripts", ReadOnly: false, MountPath: "/tmp/scripts"}, + } + podSpec := core.PodSpec{ + ServiceAccountName: clusterName, + Containers: []v1.Container{container}, + Volumes: []v1.Volume{ + {Name: "certs", VolumeSource: v1.VolumeSource{Secret: &v1.SecretVolumeSource{SecretName: clusterName}}}, + {Name: "scripts", VolumeSource: v1.VolumeSource{ConfigMap: &v1.ConfigMapVolumeSource{LocalObjectReference: v1.LocalObjectReference{Name: indexManagementConfigmap}, DefaultMode: fullExecMode}}}, + }, + NodeSelector: utils.EnsureLinuxNodeSelector(nodeSelector), + Tolerations: tolerations, + RestartPolicy: v1.RestartPolicyNever, + TerminationGracePeriodSeconds: utils.GetInt64(300), + } + + cronJob := &batch.CronJob{ + TypeMeta: metav1.TypeMeta{ + Kind: "CronJob", + APIVersion: batch.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: imLabels, + }, + Spec: batch.CronJobSpec{ + SuccessfulJobsHistoryLimit: jobHistoryLimitSuccess, + FailedJobsHistoryLimit: jobHistoryLimitFailed, + Schedule: schedule, + JobTemplate: batch.JobTemplateSpec{ + Spec: batchv1.JobSpec{ + BackoffLimit: utils.GetInt32(0), + Parallelism: utils.GetInt32(1), + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: imLabels, + }, + Spec: podSpec, + }, + }, + }, + }, + } + + return cronJob +} diff --git a/vendor/github.com/openshift/elasticsearch-operator/pkg/indexmanagement/scripts.go b/vendor/github.com/openshift/elasticsearch-operator/pkg/indexmanagement/scripts.go new file mode 100644 index 0000000000..7267c67f85 --- /dev/null +++ b/vendor/github.com/openshift/elasticsearch-operator/pkg/indexmanagement/scripts.go @@ -0,0 +1,84 @@ +package indexmanagement + +const rolloverScript = ` +set -euox pipefail +decoded=$(echo $PAYLOAD | base64 -d) +code=$(curl "$ES_SERVICE/$POLICY_MAPPING-write/_rollover?pretty" \ + -w "%{response_code}" \ + -sv \ + --cacert /etc/indexmanagement/keys/admin-ca \ + --cert /etc/indexmanagement/keys/admin-cert \ + --key /etc/indexmanagement/keys/admin-key \ + -HContent-Type:application/json \ + -XPOST \ + -o /tmp/response.txt \ + -d $decoded) +if [ "$code" == "200" ] ; then + exit 0 +fi +cat /tmp/response.txt +exit 1 +` +const deleteScript = ` +set -euox pipefail + +indices=$(curl -s $ES_SERVICE/$ALIAS/_settings/index.creation_date \ + --cacert /etc/indexmanagement/keys/admin-ca \ + --cert /etc/indexmanagement/keys/admin-cert \ + --key /etc/indexmanagement/keys/admin-key \ + -HContent-Type:application/json) + +CMD=$(cat < 0: + print indices[0] +END +) +writeIndex=$(echo "${indices}" | python -c "$CMD") + + +nowInMillis=$(date +%s%3N) +minAgeFromEpoc=$(($nowInMillis - $MIN_AGE)) +CMD=$(cat <\\d+)(?P[yMwdhHms])$") ) const ( diff --git a/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/cluster.go b/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/cluster.go index 2246e707c6..d079e2f454 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/cluster.go +++ b/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/cluster.go @@ -17,6 +17,8 @@ import ( var wrongConfig bool var nodes map[string][]NodeTypeInterface +var aliasNeededMap map[string]bool + func FlushNodes(clusterName, namespace string) { nodes[nodeMapKey(clusterName, namespace)] = []NodeTypeInterface{} } @@ -136,6 +138,20 @@ func (elasticsearchRequest *ElasticsearchRequest) CreateOrUpdateElasticsearchClu elasticsearchRequest.cluster.Namespace, elasticsearchRequest.client, int32(calculateReplicaCount(elasticsearchRequest.cluster))) + + if aliasNeededMap == nil { + aliasNeededMap = make(map[string]bool) + } + + if val, ok := aliasNeededMap[nodeMapKey(elasticsearchRequest.cluster.Name, elasticsearchRequest.cluster.Namespace)]; !ok || val { + // add alias to old indices if they exist and don't have one + // this should be removed after one release... + successful := elasticsearchRequest.AddAliasForOldIndices() + + if successful { + aliasNeededMap[nodeMapKey(elasticsearchRequest.cluster.Name, elasticsearchRequest.cluster.Namespace)] = false + } + } } } } diff --git a/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/common.go b/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/common.go index 0190bdead4..4ff68cf6f4 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/common.go +++ b/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/common.go @@ -8,15 +8,16 @@ import ( "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/openshift/elasticsearch-operator/pkg/utils" "sigs.k8s.io/controller-runtime/pkg/client" api "github.com/openshift/elasticsearch-operator/pkg/apis/logging/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // addOwnerRefToObject appends the desired OwnerReference to the object +// deprecated in favor of Elasticsearch#AddOwnerRefTo func addOwnerRefToObject(o metav1.Object, r metav1.OwnerReference) { if (metav1.OwnerReference{}) != r { o.SetOwnerReferences(append(o.GetOwnerReferences(), r)) @@ -247,9 +248,10 @@ func newProxyContainer(imageName, clusterName string) (v1.Container, error) { }, }, Args: []string{ + "--http-address=:4180", "--https-address=:60000", "--provider=openshift", - "--upstream=https://127.0.0.1:9200", + "--upstream=https://localhost:9200", "--tls-cert=/etc/proxy/secrets/tls.crt", "--tls-key=/etc/proxy/secrets/tls.key", "--upstream-ca=/etc/proxy/elasticsearch/admin-ca", @@ -364,7 +366,7 @@ func newPodTemplateSpec(nodeName, clusterName, namespace string, node api.Elasti selectors := mergeSelectors(node.NodeSelector, commonSpec.NodeSelector) // We want to make sure the pod ends up allocated on linux node. Thus we make sure the // linux node selectors is always present. See LOG-411 - selectors = ensureLinuxNodeSelector(selectors) + selectors = utils.EnsureLinuxNodeSelector(selectors) tolerations := appendTolerations(node.Tolerations, commonSpec.Tolerations) tolerations = appendTolerations(tolerations, []v1.Toleration{ diff --git a/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/configmaps.go b/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/configmaps.go index 67b5bf6024..ba40d2dad5 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/configmaps.go +++ b/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/configmaps.go @@ -60,7 +60,7 @@ func (elasticsearchRequest *ElasticsearchRequest) CreateOrUpdateConfigMaps() (er dpl.Labels, kibanaIndexMode, esUnicastHost(dpl.Name, dpl.Namespace), - rootLogger(), + rootLogger(elasticsearchRequest.cluster), strconv.Itoa(masterNodeCount/2+1), strconv.Itoa(dataNodeCount), strconv.Itoa(dataNodeCount), diff --git a/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/defaults.go b/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/defaults.go index 2529e5bafe..741490a387 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/defaults.go +++ b/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/defaults.go @@ -9,13 +9,13 @@ import ( const ( modeUnique = "unique" modeSharedOps = "shared_ops" - defaultMode = modeSharedOps + defaultMode = modeSharedOps defaultMasterCPURequest = "100m" defaultCPURequest = "100m" defaultMemoryLimit = "4Gi" defaultMemoryRequest = "1Gi" - elasticsearchDefaultImage = "quay.io/openshift/origin-logging-elasticsearch5" + elasticsearchDefaultImage = "quay.io/openshift/origin-logging-elasticsearch6" maxMasterCount = 3 @@ -24,6 +24,8 @@ const ( heapDumpLocation = "/elasticsearch/persistent/heapdump.hprof" k8sTokenFile = "/var/run/secrets/kubernetes.io/serviceaccount/token" + + logAppenderAnnotation = "elasticsearch.openshift.io/develLogAppender" ) func kibanaIndexMode(mode string) (string, error) { @@ -40,7 +42,10 @@ func esUnicastHost(clusterName, namespace string) string { return fmt.Sprintf("%v-cluster.%v.svc", clusterName, namespace) } -func rootLogger() string { +func rootLogger(cluster *api.Elasticsearch) string { + if value, ok := cluster.GetAnnotations()[log4jConfig]; ok { + return value + } return "rolling" } diff --git a/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/elasticsearch.go b/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/elasticsearch.go index 7e89aa3215..3e686e1317 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/elasticsearch.go +++ b/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/elasticsearch.go @@ -8,6 +8,7 @@ import ( "github.com/inhies/go-bytesize" api "github.com/openshift/elasticsearch-operator/pkg/apis/logging/v1" + "github.com/openshift/elasticsearch-operator/pkg/logger" estypes "github.com/openshift/elasticsearch-operator/pkg/types/elasticsearch" "github.com/openshift/elasticsearch-operator/pkg/utils" "k8s.io/apimachinery/pkg/util/sets" @@ -343,13 +344,13 @@ func (req *ElasticsearchRequest) CreateIndex(name string, index *estypes.Index) URI: name, RequestBody: body, } - + logger.DebugObject("CreateIndex with payload: %s", index) req.FnCurlEsService(req.cluster.Name, req.cluster.Namespace, payload, req.client) if payload.Error != nil { return payload.Error } if payload.StatusCode != 200 && payload.StatusCode != 201 { - return fmt.Errorf("There was an error creating index %s. Error code: %v, %v", index.Name, payload.StatusCode != 200, payload.RequestBody) + return fmt.Errorf("There was an error creating index %s. Error code: %v, %v", index.Name, payload.StatusCode != 200, payload.ResponseBody) } return nil } @@ -364,12 +365,13 @@ func (req *ElasticsearchRequest) CreateIndexTemplate(name string, template *esty RequestBody: body, } + logger.DebugObject("CreateIndexTemplate with payload: %s", template) req.FnCurlEsService(req.cluster.Name, req.cluster.Namespace, payload, req.client) if payload.Error != nil { return payload.Error } if payload.StatusCode != 200 && payload.StatusCode != 201 { - return fmt.Errorf("There was an error creating index template %s. Error code: %v, %v", name, payload.StatusCode != 200, payload.RequestBody) + return fmt.Errorf("There was an error creating index template %s. Error code: %v, %v", name, payload.StatusCode != 200, payload.ResponseBody) } return nil } @@ -402,7 +404,7 @@ func (req *ElasticsearchRequest) ListTemplates() (sets.String, error) { return nil, payload.Error } if payload.StatusCode != 200 { - return nil, fmt.Errorf("There was an error retrieving list of templates. Error code: %v, %v", payload.StatusCode != 200, payload.RequestBody) + return nil, fmt.Errorf("There was an error retrieving list of templates. Error code: %v, %v", payload.StatusCode != 200, payload.ResponseBody) } response := sets.NewString() for name := range payload.ResponseBody { @@ -426,7 +428,7 @@ func (req *ElasticsearchRequest) ListIndicesForAlias(aliasPattern string) ([]str return []string{}, nil } if payload.StatusCode != 200 { - return nil, fmt.Errorf("There was an error retrieving list of indices aliased to %s. Error code: %v, %v", aliasPattern, payload.StatusCode != 200, payload.RequestBody) + return nil, fmt.Errorf("There was an error retrieving list of indices aliased to %s. Error code: %v, %v", aliasPattern, payload.StatusCode != 200, payload.ResponseBody) } response := []string{} for index := range payload.ResponseBody { @@ -434,3 +436,84 @@ func (req *ElasticsearchRequest) ListIndicesForAlias(aliasPattern string) ([]str } return response, nil } + +func (req *ElasticsearchRequest) AddAliasForOldIndices() bool { + // get .operations.*/_alias + // get project.*/_alias + /* + { + "project.test.107d38b1-413b-11ea-a2cd-0a3ee645943a.2020.01.27" : { + "aliases" : { + "test" : { } + } + }, + "project.test2.8fe8b95e-4147-11ea-91e1-062a8c33f2ae.2020.01.27" : { + "aliases" : { } + } + } + */ + + successful := true + + payload := &esCurlStruct{ + Method: http.MethodGet, + URI: "project.*,.operations.*/_alias", + } + + req.FnCurlEsService(req.cluster.Name, req.cluster.Namespace, payload, req.client) + + // alias name choice based on https://github.com/openshift/enhancements/blob/master/enhancements/cluster-logging/cluster-logging-es-rollover-data-design.md#data-model + for index := range payload.ResponseBody { + // iterate over each index, if they have no aliases that match the new format + // then PUT the alias + + indexAlias := "" + if strings.HasPrefix(index, "project.") { + // it is a container log index + indexAlias = "app" + } else { + // it is an operations index + indexAlias = "infra" + } + + if payload.ResponseBody[index] != nil { + indexBody := payload.ResponseBody[index].(map[string]interface{}) + if indexBody["aliases"] != nil { + aliasBody := indexBody["aliases"].(map[string]interface{}) + + found := false + for alias := range aliasBody { + if alias == indexAlias { + found = true + break + } + } + + if !found { + // put /_alias/ + putPayload := &esCurlStruct{ + Method: http.MethodPut, + URI: fmt.Sprintf("%s/_alias/%s", index, indexAlias), + } + + req.FnCurlEsService(req.cluster.Name, req.cluster.Namespace, putPayload, req.client) + // check the response here -- if any failed then we want to return "false" + // but want to continue trying to process as many as we can now. + if putPayload.Error != nil || !parseBool("acknowledged", putPayload.ResponseBody) { + successful = false + } + } + } else { + // if for some reason we received a response without an "aliases" field + // we want to retry -- es may not be in a good state? + successful = false + } + } else { + // if for some reason we received a response without an index field + // we want to retry -- es may not be in a good state? + successful = false + } + } + + return successful +} diff --git a/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/index_management.go b/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/index_management.go index 31a1b7b3d6..82217c1f81 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/index_management.go +++ b/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/index_management.go @@ -26,10 +26,10 @@ func (elasticsearchRequest *ElasticsearchRequest) CreateOrUpdateIndexManagement( return nil } spec := indexmanagement.VerifyAndNormalize(cluster) - //TODO find crons with no matching mapping and remove them - elasticsearchRequest.cullIndexManagement(spec.Mappings) + policies := spec.PolicyMap() + elasticsearchRequest.cullIndexManagement(spec.Mappings, policies) for _, mapping := range spec.Mappings { - logger.Debugf("reconciling index manageme nt for mapping: %s", mapping.Name) + logger.Debugf("reconciling index management for mapping: %s", mapping.Name) //create or update template if err := elasticsearchRequest.createOrUpdateIndexTemplate(mapping); err != nil { logger.Errorf("Error creating index template for mapping %s: %v", mapping.Name, err) @@ -41,10 +41,29 @@ func (elasticsearchRequest *ElasticsearchRequest) CreateOrUpdateIndexManagement( return err } } + if err := indexmanagement.ReconcileCurationConfigmap(elasticsearchRequest.client, elasticsearchRequest.cluster); err != nil { + return err + } + primaryShards := getDataCount(elasticsearchRequest.cluster) + for _, mapping := range spec.Mappings { + policy := policies[mapping.PolicyRef] + if err := indexmanagement.ReconcileRolloverCronjob(elasticsearchRequest.client, elasticsearchRequest.cluster, policy, mapping, primaryShards); err != nil { + logger.Errorf("There was an error reconciling the rollover cronjob for policy %q: %v", policy.Name, err) + return err + } + if err := indexmanagement.ReconcileCurationCronjob(elasticsearchRequest.client, elasticsearchRequest.cluster, policy, mapping, primaryShards); err != nil { + logger.Errorf("There was an error reconciling the curation cronjob for policy %q: %v", policy.Name, err) + return err + } + } return nil } -func (elasticsearchRequest *ElasticsearchRequest) cullIndexManagement(mappings []logging.IndexManagementPolicyMappingSpec) { + +func (elasticsearchRequest *ElasticsearchRequest) cullIndexManagement(mappings []logging.IndexManagementPolicyMappingSpec, policies logging.PolicyMap) { + if err := indexmanagement.RemoveCronJobsForMappings(elasticsearchRequest.client, elasticsearchRequest.cluster, mappings, policies); err != nil { + logger.Errorf("Unable to cull cronjobs: %v", err) + } mappingNames := sets.NewString() for _, mapping := range mappings { mappingNames.Insert(formatTemplateName(mapping.Name)) @@ -65,7 +84,7 @@ func (elasticsearchRequest *ElasticsearchRequest) cullIndexManagement(mappings [ } } func (elasticsearchRequest *ElasticsearchRequest) initializeIndexIfNeeded(mapping logging.IndexManagementPolicyMappingSpec) error { - pattern := fmt.Sprintf("%s-write", mapping.Name) + pattern := formatWriteAlias(mapping) indices, err := elasticsearchRequest.ListIndicesForAlias(pattern) if err != nil { return err @@ -89,6 +108,10 @@ func formatTemplateName(name string) string { return fmt.Sprintf("%s-%s", ocpTemplatePrefix, name) } +func formatWriteAlias(mapping logging.IndexManagementPolicyMappingSpec) string { + return fmt.Sprintf("%s-write", mapping.Name) +} + func (elasticsearchRequest *ElasticsearchRequest) createOrUpdateIndexTemplate(mapping logging.IndexManagementPolicyMappingSpec) error { name := formatTemplateName(mapping.Name) pattern := fmt.Sprintf("%s*", mapping.Name) diff --git a/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/util.go b/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/util.go index 46679c90d8..54be1bc30c 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/util.go +++ b/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/util.go @@ -5,38 +5,14 @@ import ( "fmt" "strings" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "sigs.k8s.io/controller-runtime/pkg/client" api "github.com/openshift/elasticsearch-operator/pkg/apis/logging/v1" - "github.com/sirupsen/logrus" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - OsNodeLabel = "kubernetes.io/os" - LinuxValue = "linux" ) -// ensureLinuxNodeSelector takes given selector map and returns a selector map with linux node selector added into it. -// If there is already a node type selector and is different from "linux" then it is overridden and warning is logged. -// See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#interlude-built-in-node-labels -func ensureLinuxNodeSelector(selectors map[string]string) map[string]string { - if selectors == nil { - return map[string]string{OsNodeLabel: LinuxValue} - } - if os, ok := selectors[OsNodeLabel]; ok { - if os == LinuxValue { - return selectors - } - // Selector is provided but is not "linux" - logrus.Warnf("Overriding node selector value: %s=%s to %s", OsNodeLabel, os, LinuxValue) - } - selectors[OsNodeLabel] = LinuxValue - return selectors -} - func selectorForES(nodeRole string, clusterName string) map[string]string { return map[string]string{ diff --git a/vendor/github.com/openshift/elasticsearch-operator/pkg/logger/logger.go b/vendor/github.com/openshift/elasticsearch-operator/pkg/logger/logger.go index 0972c0d67a..f98ebba006 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/pkg/logger/logger.go +++ b/vendor/github.com/openshift/elasticsearch-operator/pkg/logger/logger.go @@ -1,6 +1,7 @@ package logger import ( + "encoding/json" "os" "github.com/go-logr/logr" @@ -29,6 +30,11 @@ func Errorf(format string, objects ...interface{}) { logrus.Errorf(format, objects...) } +//Error logs messages at level error +func Error(message string) { + logrus.Error(message) +} + //Infof logs messages at level info func Infof(format string, objects ...interface{}) { logrus.Infof(format, objects...) @@ -39,6 +45,16 @@ func IsDebugEnabled() bool { return logrus.GetLevel() == logrus.DebugLevel } +//DebugObject pretty prints the given object +func DebugObject(sprintfMessage string, object interface{}) { + if IsDebugEnabled() && object != nil { + pretty, err := json.MarshalIndent(object, "", " ") + if err != nil { + logrus.Debugf("Error marshalling object %v for debug log: %v", object, err) + } + logrus.Debugf(sprintfMessage, string(pretty)) + } +} func init() { level := os.Getenv("LOG_LEVEL") parsed, err := logrus.ParseLevel(level) diff --git a/vendor/github.com/openshift/elasticsearch-operator/pkg/types/elasticsearch/types.go b/vendor/github.com/openshift/elasticsearch-operator/pkg/types/elasticsearch/types.go index f0744c7483..8310a5f306 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/pkg/types/elasticsearch/types.go +++ b/vendor/github.com/openshift/elasticsearch-operator/pkg/types/elasticsearch/types.go @@ -57,6 +57,6 @@ type IndexAlias struct { } type IndexSettings struct { - NumberOfShards int32 `json:"number_of_shards,omitempty"` - NumberOfReplicas int32 `json:"number_of_replicas,omitempty"` + NumberOfShards int32 `json:"number_of_shards"` + NumberOfReplicas int32 `json:"number_of_replicas"` } diff --git a/vendor/github.com/openshift/elasticsearch-operator/pkg/types/k8s/configmap.go b/vendor/github.com/openshift/elasticsearch-operator/pkg/types/k8s/configmap.go new file mode 100644 index 0000000000..e366ecbe88 --- /dev/null +++ b/vendor/github.com/openshift/elasticsearch-operator/pkg/types/k8s/configmap.go @@ -0,0 +1,22 @@ +package factory + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func NewConfigMap(name, namespace string, labels map[string]string, data map[string]string) *v1.ConfigMap { + + return &v1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + Kind: "ConfigMap", + APIVersion: v1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: labels, + }, + Data: data, + } +} diff --git a/vendor/github.com/openshift/elasticsearch-operator/pkg/utils/comparators/envvars.go b/vendor/github.com/openshift/elasticsearch-operator/pkg/utils/comparators/envvars.go new file mode 100644 index 0000000000..30796fd310 --- /dev/null +++ b/vendor/github.com/openshift/elasticsearch-operator/pkg/utils/comparators/envvars.go @@ -0,0 +1,88 @@ +package comparators + +import ( + "reflect" + + v1 "k8s.io/api/core/v1" +) + +/** +EnvValueEqual - check if 2 EnvValues are equal or not +Notes: +- reflect.DeepEqual does not return expected results if the to-be-compared value is a pointer. +- needs to adjust with k8s.io/api/core/v#/types.go when the types are updated. +**/ +func EnvValueEqual(env1, env2 []v1.EnvVar) bool { + var found bool + if len(env1) != len(env2) { + return false + } + for _, elem1 := range env1 { + found = false + for _, elem2 := range env2 { + if elem1.Name == elem2.Name { + if elem1.Value != elem2.Value { + return false + } + if (elem1.ValueFrom != nil && elem2.ValueFrom == nil) || + (elem1.ValueFrom == nil && elem2.ValueFrom != nil) { + return false + } + if elem1.ValueFrom != nil { + found = EnvVarSourceEqual(*elem1.ValueFrom, *elem2.ValueFrom) + } else { + found = true + } + break + } + } + if !found { + return false + } + } + return true +} + +func EnvVarSourceEqual(esource1, esource2 v1.EnvVarSource) bool { + if (esource1.FieldRef != nil && esource2.FieldRef == nil) || + (esource1.FieldRef == nil && esource2.FieldRef != nil) || + (esource1.ResourceFieldRef != nil && esource2.ResourceFieldRef == nil) || + (esource1.ResourceFieldRef == nil && esource2.ResourceFieldRef != nil) || + (esource1.ConfigMapKeyRef != nil && esource2.ConfigMapKeyRef == nil) || + (esource1.ConfigMapKeyRef == nil && esource2.ConfigMapKeyRef != nil) || + (esource1.SecretKeyRef != nil && esource2.SecretKeyRef == nil) || + (esource1.SecretKeyRef == nil && esource2.SecretKeyRef != nil) { + return false + } + var rval bool + if esource1.FieldRef != nil { + if rval = reflect.DeepEqual(*esource1.FieldRef, *esource2.FieldRef); !rval { + return rval + } + } + if esource1.ResourceFieldRef != nil { + if rval = EnvVarResourceFieldSelectorEqual(*esource1.ResourceFieldRef, *esource2.ResourceFieldRef); !rval { + return rval + } + } + if esource1.ConfigMapKeyRef != nil { + if rval = reflect.DeepEqual(*esource1.ConfigMapKeyRef, *esource2.ConfigMapKeyRef); !rval { + return rval + } + } + if esource1.SecretKeyRef != nil { + if rval = reflect.DeepEqual(*esource1.SecretKeyRef, *esource2.SecretKeyRef); !rval { + return rval + } + } + return true +} + +func EnvVarResourceFieldSelectorEqual(resource1, resource2 v1.ResourceFieldSelector) bool { + if (resource1.ContainerName == resource2.ContainerName) && + (resource1.Resource == resource2.Resource) && + (resource1.Divisor.Cmp(resource2.Divisor) == 0) { + return true + } + return false +} diff --git a/vendor/github.com/openshift/elasticsearch-operator/pkg/utils/comparators/maps.go b/vendor/github.com/openshift/elasticsearch-operator/pkg/utils/comparators/maps.go new file mode 100644 index 0000000000..edaadb22bf --- /dev/null +++ b/vendor/github.com/openshift/elasticsearch-operator/pkg/utils/comparators/maps.go @@ -0,0 +1,10 @@ +package comparators + +import ( + "reflect" +) + +//AreStringMapsSame compares two maps which are string key/value +func AreStringMapsSame(lhs, rhs map[string]string) bool { + return reflect.DeepEqual(lhs, rhs) +} diff --git a/vendor/github.com/openshift/elasticsearch-operator/pkg/utils/comparators/resources.go b/vendor/github.com/openshift/elasticsearch-operator/pkg/utils/comparators/resources.go new file mode 100644 index 0000000000..e5287ac4d4 --- /dev/null +++ b/vendor/github.com/openshift/elasticsearch-operator/pkg/utils/comparators/resources.go @@ -0,0 +1,26 @@ +package comparators + +import ( + v1 "k8s.io/api/core/v1" +) + +func AreResourceRequementsSame(lhs, rhs v1.ResourceRequirements) bool { + + if rhs.Limits.Cpu().Cmp(*lhs.Limits.Cpu()) != 0 { + return false + } + // Check memory limits + if rhs.Limits.Memory().Cmp(*lhs.Limits.Memory()) != 0 { + return false + } + // Check CPU requests + if rhs.Requests.Cpu().Cmp(*lhs.Requests.Cpu()) != 0 { + return false + } + // Check memory requests + if rhs.Requests.Memory().Cmp(*lhs.Requests.Memory()) != 0 { + return false + } + + return true +} diff --git a/vendor/github.com/openshift/elasticsearch-operator/pkg/utils/comparators/tolerations.go b/vendor/github.com/openshift/elasticsearch-operator/pkg/utils/comparators/tolerations.go new file mode 100644 index 0000000000..73a5234f87 --- /dev/null +++ b/vendor/github.com/openshift/elasticsearch-operator/pkg/utils/comparators/tolerations.go @@ -0,0 +1,50 @@ +package comparators + +import ( + v1 "k8s.io/api/core/v1" +) + +//AreTolerationsSame compares two lists of tolerations for equality +func AreTolerationsSame(lhs, rhs []v1.Toleration) bool { + if len(lhs) != len(rhs) { + return false + } + + for _, lhsToleration := range lhs { + if !containsToleration(lhsToleration, rhs) { + return false + } + } + + return true +} + +func containsToleration(toleration v1.Toleration, tolerations []v1.Toleration) bool { + for _, t := range tolerations { + if isTolerationSame(t, toleration) { + return true + } + } + + return false +} + +func isTolerationSame(lhs, rhs v1.Toleration) bool { + + tolerationSecondsBool := false + // check that both are either null or not null + if (lhs.TolerationSeconds == nil) == (rhs.TolerationSeconds == nil) { + if lhs.TolerationSeconds != nil { + // only compare values (attempt to dereference) if pointers aren't nil + tolerationSecondsBool = (*lhs.TolerationSeconds == *rhs.TolerationSeconds) + } else { + tolerationSecondsBool = true + } + } + + return (lhs.Key == rhs.Key) && + (lhs.Operator == rhs.Operator) && + (lhs.Value == rhs.Value) && + (lhs.Effect == rhs.Effect) && + tolerationSecondsBool +} diff --git a/vendor/github.com/openshift/elasticsearch-operator/pkg/utils/utils.go b/vendor/github.com/openshift/elasticsearch-operator/pkg/utils/utils.go index 3bd6162321..7247f5f2f2 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/pkg/utils/utils.go +++ b/vendor/github.com/openshift/elasticsearch-operator/pkg/utils/utils.go @@ -6,8 +6,33 @@ import ( "encoding/json" "fmt" "os" + + "github.com/sirupsen/logrus" +) + +const ( + OsNodeLabel = "kubernetes.io/os" + LinuxValue = "linux" ) +// EnsureLinuxNodeSelector takes given selector map and returns a selector map with linux node selector added into it. +// If there is already a node type selector and is different from "linux" then it is overridden and warning is logged. +// See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#interlude-built-in-node-labels +func EnsureLinuxNodeSelector(selectors map[string]string) map[string]string { + if selectors == nil { + return map[string]string{OsNodeLabel: LinuxValue} + } + if os, ok := selectors[OsNodeLabel]; ok { + if os == LinuxValue { + return selectors + } + // Selector is provided but is not "linux" + logrus.Warnf("Overriding node selector value: %s=%s to %s", OsNodeLabel, os, LinuxValue) + } + selectors[OsNodeLabel] = LinuxValue + return selectors +} + func GetInt64(value int64) *int64 { i := value return &i diff --git a/vendor/github.com/openshift/elasticsearch-operator/test/files/ca.crt b/vendor/github.com/openshift/elasticsearch-operator/test/files/ca.crt deleted file mode 100644 index 045c6f3fcc..0000000000 --- a/vendor/github.com/openshift/elasticsearch-operator/test/files/ca.crt +++ /dev/null @@ -1,30 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIFLDCCAxSgAwIBAgIJANr0Zw8HvJE2MA0GCSqGSIb3DQEBCwUAMCsxKTAnBgNV -BAMMIG9wZW5zaGlmdC1jbHVzdGVyLWxvZ2dpbmctc2lnbmVyMB4XDTE5MDMwODIy -NDU1MFoXDTI0MDMwNjIyNDU1MFowKzEpMCcGA1UEAwwgb3BlbnNoaWZ0LWNsdXN0 -ZXItbG9nZ2luZy1zaWduZXIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC -AQCzVpeu4xdWE4/fu1DNw+nUMC1MbwPpBadcrFH2x2vXfDoQ2tVf0RGNTyWK/loV -2RIIb8uxJv8JmZ4zdsS+nW5b9BvnSqv/rD4Z5Ei4SFPWbsAqCk9gXWMUNDBQHLmd -40AxWhVdFkk2YEbY28KpVeaxRyFiJ0aKBOwhSQoGrd6rRs3NI833ZSZVgtVKl9zU -2pj/htxVOOGdZs3tmQTyNaJaRZQeyu7P6K9tzI736ZDTd8GtvIC3Li+pFCnOmCRS -egLxUvGD0XNxmTrWC9JTHkOMY03d933LY6xFlbs5+qtUfXkJ33pW0hIlFe0ZuBC+ -HJg4l4/EJnxuvod/ONxT991Qfs6LDF1ZJPKlbG1JhUetsrWkRh2tceZ/2T5uMPUZ -qnxCEm2zHpEyzD7rylNKy8t6TbPXog3nbbQQ2kyYL6mP0PRJMijxoo1jQxiv56Qg -1takfgEZCKVCtKOuHESLKv2iDpooiTvmRtBT155DL/zAqdtjkU8VOolw1kwmD9es -xtUCw8a4utJnf+ammlOojM1ks90TURNJ5ZC0j2YMYPxeNn5p0SA3xeI2ifoAZOgX -9P8pS4gdf7CfrPf4A86uUwFQqAQURtDGBGDSTr6mpzRayj+itoFMF5nb0uQG704x -56pXHLZL4xQFRnYPfdP3KSR5Kkn9X0OBKDVNmua7d3UpowIDAQABo1MwUTAdBgNV -HQ4EFgQUUs7ay1V2V+BH6EaYr/UkxxPymkcwHwYDVR0jBBgwFoAUUs7ay1V2V+BH -6EaYr/UkxxPymkcwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEA -pTnNCxvoevCkPtxmXhwFhl7DbSd6LMawS69uoxplxAcjj+BQ5CDsh4RaLUhD0NTd -yr0dvuQzLv2s/tzysNlcoug3FjTOS650KYhqjuIQTxv4g7RN/MGEprpvOw7JufXf -zkfzzGJ7jTdBVAh48RNTzDUzsBrTV4mmDNsbBNzeLm1hODyafVl0pvtbBQo6cH6a -QbSHvV855795uSP5Jca1FFcozlm4TSv4nXGFwoELSUwPsuqsWE5Ob7C/fEBT4Tpy -XcCq0an67En1ak99O+DP7a/9F8QFfBKGfsCj9L+t3ITgDVUvxHGAI7g8FMeo/cew -PRAeptXh1BQGSJww25p5fOQOj81f/9hWPUvdcwO9X/XoYZC1IU1RbkhLWBxuLT6j -KuNTCGIjrUlcOfxupzd0ln1TcYebwjMPZQVAFEakXyZIDNkE6SuVWckPcLivIo+F -7qiUCCSa7bLhZR5oYoZqDKc6n0lJUx/HHE3x8aX5LFYmjskEQwcIO/74+JHeOwqp -j+WE3SqIWmEEJN6pihQdXjt83KQZcAYhTSyTPegxXWGfMKcDpySvnKaOc3RmZciy -JrBC16Yl/Fjw1PqM5+j6dTqO67CKDz9nHRqBWhdwicRp7o9q1+wc1vPF20kDivTJ -HYk3ZEdecd1YK6eGO7IhWY1H0qGZZx8+i6jT/ovXutU= ------END CERTIFICATE----- diff --git a/vendor/github.com/openshift/elasticsearch-operator/test/files/elasticsearch.crt b/vendor/github.com/openshift/elasticsearch-operator/test/files/elasticsearch.crt deleted file mode 100644 index 84f4b88c7e..0000000000 --- a/vendor/github.com/openshift/elasticsearch-operator/test/files/elasticsearch.crt +++ /dev/null @@ -1,40 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIG9TCCBN2gAwIBAgIBBjANBgkqhkiG9w0BAQ0FADArMSkwJwYDVQQDDCBvcGVu -c2hpZnQtY2x1c3Rlci1sb2dnaW5nLXNpZ25lcjAeFw0xOTAzMDgyMjQ1NTNaFw0y -MTAzMDcyMjQ1NTNaMD4xEDAOBgNVBAoMB0xvZ2dpbmcxEjAQBgNVBAsMCU9wZW5T -aGlmdDEWMBQGA1UEAwwNZWxhc3RpY3NlYXJjaDCCAiIwDQYJKoZIhvcNAQEBBQAD -ggIPADCCAgoCggIBALjD3jvlx4rujhgvQHCk0T3Znj3hGw9c6Tlr3hltpmFs+JXh -z9smk6no/MrVmPuFP8UBLDibQCNZ0DlF2fH6yReD6phYkSw82qTNPdcgCnMJnch1 -3PQJTzx3SaGw+M5whblenXE4HW0fW+bVQloQBw52uvJs9tTkw0v10JlSntnUUnc0 -m2stUii4Omt1XA9ZIrbHa6b8sIYNFQXPnC7MVAFAyg+XN42o8UdOxFbHY+P1Gf4R -nmK2aMDq6PjHNktbaG0WJfoI/BQ/oQtV8zH3/Np8FlKXVB+EjVZTnYYgLNBQZ6tN -92Jw2F2ngIqHUbgC+buIKdYgQE/97cYwjtdQgAQzGRTHfBjDkvgLjx7YCVV9AUDR -tBX5BORD73bNJMPJKxliLUMgXAL5SrFRXn3oNUsjs90BhTT1uoer2iOnfBCcreI+ -KKFbDF/Bss3xv4ierUfRHA6nmv2M8YA9WPsiTYiIekla6o+iTSGleR615s6MD1La -B6QPNr+oyOYuLdnuoLZVhnr4LyVb93k5JjQ0gxeFE/ahM06o2RlYhQLNBcTO1Zrb -/jx8EtGZYzYHBm3vKWOGCCROKAxmyqUxi5cU8kM3KITvBh9TsN212rXZze+1XG3p -YiGGK1Bs7MADl54umA7jYEkvvJaJpXIzKdn6OUBYktlxgBGLM72heinSbkl1AgMB -AAGjggIPMIICCzAOBgNVHQ8BAf8EBAMCBaAwCQYDVR0TBAIwADAdBgNVHSUEFjAU -BggrBgEFBQcDAQYIKwYBBQUHAwIwHQYDVR0OBBYEFLGvE/vSnSEFNITtXAhYqFiP -9sYkMB8GA1UdIwQYMBaAFFLO2stVdlfgR+hGmK/1JMcT8ppHMIIBjQYDVR0RBIIB -hDCCAYCHBH8AAAGCCWxvY2FsaG9zdIIVZXhhbXBsZS1lbGFzdGljc2VhcmNogiNl -eGFtcGxlLWVsYXN0aWNzZWFyY2guY2x1c3Rlci5sb2NhbIIrZXhhbXBsZS1lbGFz -dGljc2VhcmNoLm9wZW5zaGlmdC1sb2dnaW5nLnN2Y4I5ZXhhbXBsZS1lbGFzdGlj -c2VhcmNoLm9wZW5zaGlmdC1sb2dnaW5nLnN2Yy5jbHVzdGVyLmxvY2Fsgh1leGFt -cGxlLWVsYXN0aWNzZWFyY2gtY2x1c3RlcoIrZXhhbXBsZS1lbGFzdGljc2VhcmNo -LWNsdXN0ZXIuY2x1c3Rlci5sb2NhbIIzZXhhbXBsZS1lbGFzdGljc2VhcmNoLWNs -dXN0ZXIub3BlbnNoaWZ0LWxvZ2dpbmcuc3ZjgkFleGFtcGxlLWVsYXN0aWNzZWFy -Y2gtY2x1c3Rlci5vcGVuc2hpZnQtbG9nZ2luZy5zdmMuY2x1c3Rlci5sb2NhbIgF -KgMEBQUwDQYJKoZIhvcNAQENBQADggIBAHrfw7A2DVxYtq+OW8ScGx3fEAkfuIy/ -tLc9Rr6Fot0jETz7sB01BnoYtD1NWV6h61Fox39BplYJFmumrjEdPxD3/X3Vf6YS -se1aHcmPdIKklUPBOFPrxnsPDZ1TrPQXiR9RIlmMQHIjsa4tiBYm7CvyPY7V21h/ -0owIEMVgqoM3TzZ5FzafbORaGw3MOqU81KBLP2LZA5mIQYA6v1R8kNvEk9Gp/ICk -OxBdLQYyT9y/U9hw8jGlOZl8U+ztSshYOj3+8/qrh/kUGhAYpFdnDDXgPRelp3vk -I+fPqXgQOQLUjemj1/aKFo8cFqNCi4sniP7A3vlPD1iF3GuEl7pkK8CifkJFsko8 -jNaMIPpmbitsrHhIhLahKE6jukijiloVUeB2hro99dW5IB7aDf9ruebk+MzyMjWn -McCZ0AtNynvOI/DkESzRTodQz6W3p2B20Uskx5QlrMeKNetH9B0nCrgCCllJ7E2d -NqVyamJL7TgkFTSZBCPLsFW0FfdK6wc225OPfDEIMbfUC4ii9jhkt7rlWcqdHcV1 -Vk+R1sPLfDs7ydUsTircZlVX3T1ITGq7cZkmAsGDD1Q9t9HCxebuLxV5l0ubCsvc -uDcl5cwT5dssQ5B7EmLMLiyJvw6SzVU8oHbXJ4/ilH4nWUMWNTh1QZpWCGhf9eaa -V7B/mXvB0B+W ------END CERTIFICATE----- diff --git a/vendor/github.com/openshift/elasticsearch-operator/test/files/elasticsearch.key b/vendor/github.com/openshift/elasticsearch-operator/test/files/elasticsearch.key deleted file mode 100644 index 44ac6481bb..0000000000 --- a/vendor/github.com/openshift/elasticsearch-operator/test/files/elasticsearch.key +++ /dev/null @@ -1,52 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQC4w9475ceK7o4Y -L0BwpNE92Z494RsPXOk5a94ZbaZhbPiV4c/bJpOp6PzK1Zj7hT/FASw4m0AjWdA5 -Rdnx+skXg+qYWJEsPNqkzT3XIApzCZ3Iddz0CU88d0mhsPjOcIW5Xp1xOB1tH1vm -1UJaEAcOdrrybPbU5MNL9dCZUp7Z1FJ3NJtrLVIouDprdVwPWSK2x2um/LCGDRUF -z5wuzFQBQMoPlzeNqPFHTsRWx2Pj9Rn+EZ5itmjA6uj4xzZLW2htFiX6CPwUP6EL -VfMx9/zafBZSl1QfhI1WU52GICzQUGerTfdicNhdp4CKh1G4Avm7iCnWIEBP/e3G -MI7XUIAEMxkUx3wYw5L4C48e2AlVfQFA0bQV+QTkQ+92zSTDySsZYi1DIFwC+Uqx -UV596DVLI7PdAYU09bqHq9ojp3wQnK3iPiihWwxfwbLN8b+Inq1H0RwOp5r9jPGA -PVj7Ik2IiHpJWuqPok0hpXketebOjA9S2gekDza/qMjmLi3Z7qC2VYZ6+C8lW/d5 -OSY0NIMXhRP2oTNOqNkZWIUCzQXEztWa2/48fBLRmWM2BwZt7yljhggkTigMZsql -MYuXFPJDNyiE7wYfU7Ddtdq12c3vtVxt6WIhhitQbOzAA5eeLpgO42BJL7yWiaVy -MynZ+jlAWJLZcYARizO9oXop0m5JdQIDAQABAoICAEJj9y30sg/lCl/8Up5nStx7 -ntXDVlLd4twEO4lNkjR90JEi0+p+YR7znipQOClgxvpGIpqwhoptUlnSFK9TmwB9 -IUXexUNtFm+TZD4xwC937B9E4sE5uyQSBP45th2P5y0lp1Mwg7pbQ02dobr0+WQk -G6bSqprzHI3l5S/CtVu2ZG97EsGfRl9lW3CTeiH1uHiPDcqyD8gLWVt+LzPNrf3J -RxH1Fzwq0Y/66Kf5+5XE07Msp4n224s+nboO8x5+2PYhuEGxCstH1dlZ6AozfsMP -RYIl6E9u9M5pOzgfxZqQ2b9/Q3426Dg24QRl/WIuh4fwNIBBlHpp1w+ZESqjPWwo -LfOCJn+rQoHIJpyvCsxY4aSgQ8eFNpvgo9yvqIKGAOVH1FYf2NdXskDNWOgAsfVp -Zs9nGzJ9+DdUc1Bg1RmO4YDOz6wAjjaYbYF46bixhzKtCHnpuIjYUFmSVDrAMLVD -we5isQ3Jx21YT61AD4wqKcSLO0T2bCzUsXD0AKGss09juGByRvNnJY4Rk87DglV9 -LR+BlXdLpuT8Ur8aZwydDRXAKNiQUAaOuMltuv0L/xr2oKVIpuR88foA4Wk4VZd9 -lsF5YpPSJDIgzQ5RD1qXW1z6iox30Wr9yTqikwLoS9roo3/0uFB8U6WtCFadxREQ -jZq97/vZG3wCwEF3ALuhAoIBAQDp6TkcDS2uIWcbScJjf9/kmgOn12pN4FDrTadQ -BunSbx6Sn4SCPFQJOjrFPhyO5BhAI1THelRlxxKuctsWQaQMRJskbS8TulcJX1Pw -nfmT9WgZLqRmx3HqDsceFn64cxewviG8C7/mwGRWdlHnydH5VR2oz/XMBB9v7iAC -t0qyBqdfrguYJ7ymvrIhX72G/PV4PhabOctHFRaeipO8Q8N8P01dISeEZN/5lY5Q -5Cp8zPekYmbfqVs7jT9/Han2t/RIUxD5JCaA+SCmFbJkO2taW2KM54CWgY7L2wMA -MiZvJTfa4YgybUjk5WMT1SmR6FaQnH0CXpf9MiNJYT2Mfc2dAoIBAQDKNowPgtuK -enVJXW5lY3NFvZ0tP9Osd7zDB2VfKj4JQ8QDZsMKR3rWm+NyLkI4BRcQ9ZGhiczG -fmU1eAQGPOjGmi28NEdvB5lZ3gcl9bLhdAw+TTMXhdKoZ+0Wt6FnG9ty9Yp8L++m -y3apQuZgX4k/E3579RKBwjIcllANVVdmYQdI/TAcJXtGQGgiOTxA/4owM+8awp3A -5/nG/mLgpC+cKjycPcVpg36DtYUdTjTNwF6OemBwMyBm+Hyi4SKumJVG4sTqN7V7 -CxgSvwiAi3Gd+wHiAEw5yxmB+dmr1a7vskqeXlSJ23thrM58uRYMuIw61tqV6RFb -DO3c8sZBMo+5AoIBAEH+VOh/J1YrgmWGh9t+pnJeqY1fD1TtZqccf4nqiWmfhCal -7sK3tpXr4czoWzJNVDI0RaUJ9GnKopCpQvqihmAXsxWx4EhWmFvCk3Idf96orDf+ -ms+Mka0RgkgQ3Ku5fQOWgPoG7ptxyF1EgJM+s4j+5KFOGvD2cNAphMp/YAmeKvap -qgfBnk4FG0ijNFuzXqYQDly5D4r8fic4vbmt/Kc/TNprkjSeKTrHYSGdXgdb5Lpy -MMgFhgHlNfbtLZi4CammFfHUqzBUGUwCbxQsV3tksQdEAVl1MA5/ufcCLynIfFu6 -qaxgfOJnfW6JA3Nw84tVO0fB3GvJC3+WecD7dVkCggEAdYdssptBFOSGBOUsA/vg -hNlnYGRnmCdj7AsBbWV09xeO9tckZ3YSPGgQs02VqU+0D2Rbh4M7JOdT1dbyp9mB -BeWYzXmpRywogmYcy4BZvtYfQ5rFJlfej4kP8RM72V0EmHWETxfhd5VsW4aJtdvx -PlsLOerAHfEMBTeMQaOnj9a3UEHwU3upgbRvkeyoS7L603dr8qbI39U0hOdX+u8S -yPZ3kewJMIF/5/d57gWFhUzY6IDSSoQ8wfyNHHI0ITyIDtJA100XyvMww7yKqYPi -voqkloHLzA9yvOvGCyzAZ4q9+fwjsJiDxsqUnUNKQIyG3LACDf2P29bQ7Ymhl0VW -6QKCAQEAjJPpgVZAnJ3dtTRBl/APAT41SiZMABt4HyT1LR05HKDkTm8IA4DSycUc -EpMMayjIFK62W6N952Uk8q9G/pvnJ/6jfvwnln2L3KuXJpyzb/SRjWB07cM8S/Ub -nxdB1pJ6PXaX47YfQzBrcEy0o9+ALlZsIoDYVSGFi7eFHe2c0iVTw6q3SKDjmV5/ -mc6j3d/0ITuIjknx4917VFOXg82Ub6LlwOLMuqnRDUg+1AIWfjqWdqVH2du77jIg -60Qej/jSGFvr58Vo+iwXrW23TEkkTuUq3l6oOhOzCzRontF9s8DCRthLWB7f8cv2 -xKBAD3AaNWNH3GEQS9rRQ6cjuvpc+A== ------END PRIVATE KEY----- diff --git a/vendor/github.com/openshift/elasticsearch-operator/test/files/logging-es.crt b/vendor/github.com/openshift/elasticsearch-operator/test/files/logging-es.crt deleted file mode 100644 index 41f2d4fa87..0000000000 --- a/vendor/github.com/openshift/elasticsearch-operator/test/files/logging-es.crt +++ /dev/null @@ -1,35 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIGJDCCBAygAwIBAgIBBzANBgkqhkiG9w0BAQ0FADArMSkwJwYDVQQDDCBvcGVu -c2hpZnQtY2x1c3Rlci1sb2dnaW5nLXNpZ25lcjAeFw0xOTAzMDgyMjQ1NTNaFw0y -MTAzMDcyMjQ1NTNaMDsxEDAOBgNVBAoMB0xvZ2dpbmcxEjAQBgNVBAsMCU9wZW5T -aGlmdDETMBEGA1UEAwwKbG9nZ2luZy1lczCCAiIwDQYJKoZIhvcNAQEBBQADggIP -ADCCAgoCggIBALAQOpMbLbAtWkSY0xY0y7yJGLkjf5B5YIDlJRT7VqiYgiMLJ/dZ -RM6TIM+lXFUY09kKaLrDgLwf5BkT5nWQ4umDqCdvkyg3qse3AbQKGQFZp+J2nLKq -2bxhJfQtUwA//5NJfQPKJRbR5qACeAgwz2Mi6OQ/cpI+oL7UF5AfD2UnhXNIzKS7 -APbgkQGkhnRZQnUkL0rCy2RbpbWey3iVRk1o+TV3mFGm5xLZM+rIl9WCsmaBnyRc -GUwUHnj97tObX6VhWBT5tSorXl7hsR2hwSGsoOCmARZNNqf3UGYMnuEGdTxNTQn/ -f61qnVZ6V2QK0sj+eNV4VSn9KJfRBZdp94uF5hr9WhJqcFiV8f2Q8r8VvByWV/0F -x+sOv52ySXZBBAL0t4vcHnkjk+Hk1WfETz5JdN9TNBG4o+1cw5sEQD+wUrv4036O -ire1TFkxG2+ESvZlhiEI5JeLTzNDWVHE9gHFvZJalQpaYg9bgrORrm+Sd5ZTHx3T -wP1vfkkkB1NjMGjqj89DtE0D9aI728Qk4XvnHoX76RM8eVI8yS3NMSEpHnsyQDaR -SF5jYzlG32//BeiN+2n3+yzndxF577eZ3IaKoGpmpdNHhjJp2qOdg8dn6ikQ2I22 -MYnbC4tvYbQbNTJni5WtCfHYPwXdobf7BebZ79Rb6d2HZLLf1eIxzVDLAgMBAAGj -ggFBMIIBPTAOBgNVHQ8BAf8EBAMCBaAwCQYDVR0TBAIwADAdBgNVHSUEFjAUBggr -BgEFBQcDAQYIKwYBBQUHAwIwHQYDVR0OBBYEFOmoJ6cfU96XoeRzgwERiqTVIaqL -MB8GA1UdIwQYMBaAFFLO2stVdlfgR+hGmK/1JMcT8ppHMIHABgNVHREEgbgwgbWH -BH8AAAGCCWxvY2FsaG9zdIIVZXhhbXBsZS1lbGFzdGljc2VhcmNogiNleGFtcGxl -LWVsYXN0aWNzZWFyY2guY2x1c3Rlci5sb2NhbIIrZXhhbXBsZS1lbGFzdGljc2Vh -cmNoLm9wZW5zaGlmdC1sb2dnaW5nLnN2Y4I5ZXhhbXBsZS1lbGFzdGljc2VhcmNo -Lm9wZW5zaGlmdC1sb2dnaW5nLnN2Yy5jbHVzdGVyLmxvY2FsMA0GCSqGSIb3DQEB -DQUAA4ICAQBY8/71A7nl6sm0zzCwbzOmWDLpK4fxmDZyUckKab8PZHukBiJQ7/M4 -Yp3eV++uRoztl9aLgSxNQ0klzPixVFoIfq9EQ70ZF4tgDcVyYVSUsQV9+VTzi/0S -MRnFQ/kBdn0VVfp3WmA2osvQ9H2OFq/GHxfhn7N/YNmBpmY2LcNyO5c10PfOCsA5 -xa+3pZMdsGmsv1BVYJGjUcsbJLYavvTDLGO+l3zAnlTXfSVPKsaeCQePIAnbPj1Q -r2f10KsLZH+BnOabTNFN5gz3eslVw11A9lK4DxcBPVDTFp2NT6F22lQMB9AD9AAZ -FJx77OYvtlbinr39/p4awJ7XQPJ9ZS4GkU7vEdznRZE0FOskhNfHu17yedjIBkKZ -Gf1GYo6kcK6BlSEsDtZOBGGJUVfu+kjLYi4375wzd2LejwcBFhZqZ6uDXH4d1L10 -rBq1FfKNmaQ6ULnwget6sBWdFqZxBBr6guwziB9ekpgiXTeqs3OSFLRFVDxoxXQo -mpAXoPwqaLc4TUtJOmYQtE7HBPbpS9OOGyX2V+oXFvszZHQ5NRrGO7M3rUAa/SlX -h/AktX7kLD1FNJLYO2KLBCGtlmj4SCuVhuGEHFlh1HVntoRajmQxhI0wB5wC8WUf -Dll9Hhb6bBVa2QIcz3HSHtR234SPwZ8Pwg9uUNjsQcQCpI41fPLnIA== ------END CERTIFICATE----- diff --git a/vendor/github.com/openshift/elasticsearch-operator/test/files/logging-es.key b/vendor/github.com/openshift/elasticsearch-operator/test/files/logging-es.key deleted file mode 100644 index 04933813d8..0000000000 --- a/vendor/github.com/openshift/elasticsearch-operator/test/files/logging-es.key +++ /dev/null @@ -1,52 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIJRAIBADANBgkqhkiG9w0BAQEFAASCCS4wggkqAgEAAoICAQCwEDqTGy2wLVpE -mNMWNMu8iRi5I3+QeWCA5SUU+1aomIIjCyf3WUTOkyDPpVxVGNPZCmi6w4C8H+QZ -E+Z1kOLpg6gnb5MoN6rHtwG0ChkBWafidpyyqtm8YSX0LVMAP/+TSX0DyiUW0eag -AngIMM9jIujkP3KSPqC+1BeQHw9lJ4VzSMykuwD24JEBpIZ0WUJ1JC9KwstkW6W1 -nst4lUZNaPk1d5hRpucS2TPqyJfVgrJmgZ8kXBlMFB54/e7Tm1+lYVgU+bUqK15e -4bEdocEhrKDgpgEWTTan91BmDJ7hBnU8TU0J/3+tap1WeldkCtLI/njVeFUp/SiX -0QWXafeLheYa/VoSanBYlfH9kPK/Fbwcllf9BcfrDr+dskl2QQQC9LeL3B55I5Ph -5NVnxE8+SXTfUzQRuKPtXMObBEA/sFK7+NN+joq3tUxZMRtvhEr2ZYYhCOSXi08z -Q1lRxPYBxb2SWpUKWmIPW4Kzka5vkneWUx8d08D9b35JJAdTYzBo6o/PQ7RNA/Wi -O9vEJOF75x6F++kTPHlSPMktzTEhKR57MkA2kUheY2M5Rt9v/wXojftp9/ss53cR -ee+3mdyGiqBqZqXTR4YyadqjnYPHZ+opENiNtjGJ2wuLb2G0GzUyZ4uVrQnx2D8F -3aG3+wXm2e/UW+ndh2Sy39XiMc1QywIDAQABAoICABLwi/yLLe0H0/ARKJP49fnA -HcK8yNo31GPJQqXxK69TSJyQdKotFjPKq/rqZPZah1+PuRhuM4kJPTZdZ6s5/M71 -9L7ZR9FJu7tkOnCtrWbxyBeaftzReD7FjJmpzoX2XH3xp9sigb7ksZSA73yiE40s -kjULhj2pMw2ULzy3uqY9DCQMrscBvubP3Yl5s2UMvRNw4w19yvduzHKvNGAyXq+x -HS3dQat28uCaMPWOwpabSqmwQ/hbV/VRLL4DQw/MZQubXyhSfGSe6bX3PWBcqDQB -KAVClldZ0wlal3GC5gx6712fSyxKKefz0h0daA1BBM/OYB1GppKvwxAh0iIpqhFQ -gpGeHGr26Hf79Ucrs9UfVefcjGiXyOwtPdpSpbWx/3Lu2FHaeqNfkpsVZki+ptI3 -dzgRyTFw3jSHJE3Jb0oNP+UomOpYU9cNVGtz4D8lS6+wVkWT6+KmPr6uTR0hHANF -96m3O7pluH9K1FL/10GB42QiEsIx2XvE5IB1HQVMDTbuX7o6goWwm5FXbmj0R6Av -UNrGbNq91GDNpGPEZvLl4yEz4s0tOxjmZjLUViLEkzc8B/qiHrvhffu1FyYBpuoT -8R58AdtVouNkOZ91EYHwmZ+/iF7QigqzAwTovLcFL0+aYTUXMfZ12eIKNMmq66oa -/+QqEwJyj/96bY6aWjYBAoIBAQDhNHJg274eCoa8JWz0KqyvqlPUTXr5nQdYW6FF -dgD5Zdk+gkD1anAxW9Ay4rZkydrMYteLz0pF+tL7TgHvEaSfAqUxzAo59Mxitefi -up4bPPuQ0u681kv2vl4TK7FGpOjORMg9bwoK6+I0GUvjL3eo2rjR0dMt1C65AoLZ -7uXOQMKSG0CXBDDZdP/9QmhG/rKHEeRXWAiR5wcdm78Tl0Z2XW5xd3DZVYW5b8K3 -dlwgfdnL1CYc8e05dipwVQwe4MdFbkCNfiKDzNwtmtjPIG4AIMyKiNSgMpHPJdXf -sgru5vGyHz23wncg7nJuHblFDL55jjz7G4ANNRS9m6OjMhhLAoIBAQDII4dTJE8Y -DmunNPUWrlU+O+9zO+xvUSnvG+LX3228yqXQ0NP8i5azZmElTE5HpLsQFQZczuk3 -RFKdj9qr+7cCIubmeQEaesfEVhG42ycNAngt4kKO0SAAShAxkMeOnaEjOuzeRrIK -p3ONGXO7MifgI5ULsOwRyJT2hzM9VSB9GMq+wj5FY6EsII3NBiLCVx0TnGyCFWSf -a1gMUxLBDiah+hfBSuBMPLNH3luGTCIT6Sm6s5+Thjwc5825r8N9a06+lHsleHjv -XOvyIsEMqh8DAAjkbY8TfuM82y59QNhh4+cHdYNyAlqM48n5G7YY6VxsgnKhsH9o -ha0BlOoZN1mBAoIBAQCZfwLaq8vYc/pDsQqjHZcYIHMEyE6iZfpEqiewzW56joXV -Cji2TXbs3ZR5qncBGWgtWM1reL06F2zIZvIAfbkDvGzWFSl/OTA5s5y6t9Hd5OHe -YzbftPyP0E8Up/ormWkodk85OD7TFNXYBsnnrknT6EiCko5qtS2nYmPHoQI3Y4J6 -zWJnzC5zeUCl4SZsBVuvnm1RbypgL+R0tNw2wSwAr0wAwJVFPPppWxiPrXe7mavi -NtZHcknrmXPxnrYlMbYAx9Xt2uQxi2cjGmDeRE9VfQWNAxhRnEYvt60fzB2Rmg1x -B8QsRLqn0n5iOZY2zyngdidwS90qo1xgo/2T1SgjAoIBAQCPV/sFjnzj0vhB0wkz -THubTlwrIaEu/WfWbRek288CJ/ZpQBiEygmOxmYPy75JS+/7DcP31u5seg/d7/mD -so6aBhtwuPwUVhocQjUBPF2U0M74thRLq/aKnoFIrtRvDfEqhXq/nMzKGrMFPVSJ -n9u1imam7/m0pwAiLiWB6SWS053q0L6+/iOislI9pQZiQPh/YkrJRL9D+yMd+KX3 -Bcafvrsi4xkRIWyareTJpF/H+Pi9UweZJsyJO3E6bGvMuX21vC0TYlzju572VvQD -uRjrEixMRzfxk7D3lliOt1IDkrKWwhD6KDtoq4GuXK2o3AMpaKjnq70lRJDsEBc3 -JlCBAoIBAQDd4E1cVyT0J5B3dX5KTWlhzUCqAetrpcKp1VNQ59D+GU8sPL8Cqhw7 -wqzd0KIn0ztK5m7pNParlhy6RTN818TjTMVc9vJUWXUX5kHwwFQWmJ9deaNC05e3 -+7yv6LLfNvourE1UYIkBCxtTGzFNVsr+ekU5RpfAOOoBx6lgygOGnMfrB8o/skd9 -QztK28LWeKB4Dc8HT9Q7EWiNKKZhpNYxiZdsmopg2xLGnUYSfm2iw2SkD7RoLUAX -PX9o36p0npgnQk/8MK2g4ovbcRJv2GIDU7bZiI4n2U1dEPDj7I64ivn1MGP4bDKh -QrzsamhY4r/rtblZdYABajuM87+gzslt ------END PRIVATE KEY----- diff --git a/vendor/github.com/openshift/elasticsearch-operator/test/files/system.admin.crt b/vendor/github.com/openshift/elasticsearch-operator/test/files/system.admin.crt deleted file mode 100644 index c41220de48..0000000000 --- a/vendor/github.com/openshift/elasticsearch-operator/test/files/system.admin.crt +++ /dev/null @@ -1,31 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIFXzCCA0egAwIBAgIBBDANBgkqhkiG9w0BAQ0FADArMSkwJwYDVQQDDCBvcGVu -c2hpZnQtY2x1c3Rlci1sb2dnaW5nLXNpZ25lcjAeFw0xOTAzMDgyMjQ1NTJaFw0y -MTAzMDcyMjQ1NTJaMD0xEDAOBgNVBAoMB0xvZ2dpbmcxEjAQBgNVBAsMCU9wZW5T -aGlmdDEVMBMGA1UEAwwMc3lzdGVtLmFkbWluMIICIjANBgkqhkiG9w0BAQEFAAOC -Ag8AMIICCgKCAgEAsdS2urpvLCQ0jwcJC7sxhx9vOgFGWmVUCc3q5UDUXrJ4pi2x -ZkvaM/X7cy7/Q/uEthAWBmpTB3kQg/w5Fg2FGS8cwBU3G6lVGqseCxH9RVNAd7Lm -h5Ot1yRUB3V6vh8X9c9c49IHBI66MscefYMN/tPYMPp3ebDKDPaMswqp/eButhUi -3Dq6fwZlZWvf7MsAcl5rUzP8B5EGd4E8hRKxUB19F1k+1nz3gP7Pg4KUljNwjZYj -/BISb5E84HyL1newW5AYyAfhIjbkwDlYRUQQo+skHAkxQxbZhezv4CcUO4i8Q9IX -NTf0Yy7XR6G9CNMFPjFfdwIUf/2GfPtn2b5GgqqVz06yytrPr3k92RArkrbgpHLK -ssoZTTz9GzT1QCOywOBE2f6mcdkquzY69PN4HmkYmM3c736cFRO9Nrl54HQJ9L0F -hVSN0XuwfSuqbkU2/2iMcWf01ZkQJQTNKvEEENn/rfrbsgC3GATrasEXIKffWPkf -fuYWqDKtKZjAN0wiBoUniNUxGxqK3wM0LWj1mpxdDFlqzVMAD9EQa/V1dYGGU+tU -OQbtsj2riVA/T9R5gSRMwbLlQmd7zMmTvD4PzASwE1mK7d2DXT7vJFGug9nulLXH -rdjaUhFNYBWkjhWW4AjqaJ4tMEruN7UMjljupLyWSMmSlpU+Lo4yQFlzyZMCAwEA -AaN8MHowDgYDVR0PAQH/BAQDAgWgMAkGA1UdEwQCMAAwHQYDVR0lBBYwFAYIKwYB -BQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBQ+IZwmNLTNKTk4VWDC6Tmwfe/UvTAf -BgNVHSMEGDAWgBRSztrLVXZX4EfoRpiv9STHE/KaRzANBgkqhkiG9w0BAQ0FAAOC -AgEAWvRn6X6EFpiRGEYD87BQ/5fn/u8oiBKh/lmhkOhbC/UBVn0jaBqOkTDUdc2x -Xlfqiowb2RuySTXjgRHKBZGyTDc/bCptP8vRl0Nd5XpeuIdbsuPrW0oapZ/mWdUP -L5WPIX/uByGcEuldKa7mjep58F+4wLFQeOfZ63MSumGs7dKKKXHKVb2CXMWfeKa2 -UIyYQM44AT6xq70sEjzTUutbwjwdFOuf/TXNnjLQXFxL42tS35Xd5b4uIUeMjUHM -Wj/HfcBkQPsPRqRuoiqu3cWaO/+gRiOdTiqC/E8bw0C+pkrc1OeGfjeLbbEzVNOZ -KogJFLcaJwqL6D8jqyHCSMEJrzJ26+rmZ2kmjfFtaOBwFD//IIA+NyMx4hsfAeHx -KuWk5E+EGHEFwj2VupW7c0mRzNt8kwEwmSmc2Sz+CZDrFzTmsFXhLHagi8YepdQ7 -Q+THUhATBpURZxx2mdKKbZqcUO262aFtnsNnGulLxw7lE+3Fq/MD4A/v1j+2Y1C/ -6kQfdw7AXrWkENqTzrXTIqURJDzPQPbJbckhdMke9vSSqJ/ZyubEl1Jr8MIhQUYM -Qcl8d5UCx3TYm6fKTfWD4a/u2ctopS3w6cSnD4YW9jcZ/BXNfanfsSHnYMmO45lJ -tgi5JpGppFODDtELjwJtClSknajTp/Kt6y/BfW/WvN9xaIE= ------END CERTIFICATE----- diff --git a/vendor/github.com/openshift/elasticsearch-operator/test/files/system.admin.key b/vendor/github.com/openshift/elasticsearch-operator/test/files/system.admin.key deleted file mode 100644 index a6b1f8bea2..0000000000 --- a/vendor/github.com/openshift/elasticsearch-operator/test/files/system.admin.key +++ /dev/null @@ -1,52 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIJQQIBADANBgkqhkiG9w0BAQEFAASCCSswggknAgEAAoICAQCx1La6um8sJDSP -BwkLuzGHH286AUZaZVQJzerlQNResnimLbFmS9oz9ftzLv9D+4S2EBYGalMHeRCD -/DkWDYUZLxzAFTcbqVUaqx4LEf1FU0B3suaHk63XJFQHdXq+Hxf1z1zj0gcEjroy -xx59gw3+09gw+nd5sMoM9oyzCqn94G62FSLcOrp/BmVla9/sywByXmtTM/wHkQZ3 -gTyFErFQHX0XWT7WfPeA/s+DgpSWM3CNliP8EhJvkTzgfIvWd7BbkBjIB+EiNuTA -OVhFRBCj6yQcCTFDFtmF7O/gJxQ7iLxD0hc1N/RjLtdHob0I0wU+MV93AhR//YZ8 -+2fZvkaCqpXPTrLK2s+veT3ZECuStuCkcsqyyhlNPP0bNPVAI7LA4ETZ/qZx2Sq7 -Njr083geaRiYzdzvfpwVE702uXngdAn0vQWFVI3Re7B9K6puRTb/aIxxZ/TVmRAl -BM0q8QQQ2f+t+tuyALcYBOtqwRcgp99Y+R9+5haoMq0pmMA3TCIGhSeI1TEbGorf -AzQtaPWanF0MWWrNUwAP0RBr9XV1gYZT61Q5Bu2yPauJUD9P1HmBJEzBsuVCZ3vM -yZO8Pg/MBLATWYrt3YNdPu8kUa6D2e6Utcet2NpSEU1gFaSOFZbgCOponi0wSu43 -tQyOWO6kvJZIyZKWlT4ujjJAWXPJkwIDAQABAoICAFQnm8nGDHJRN+YvqCI7Ffch -8xr6G3cP2LNDFVQkV9vwjZPmr7r/TmWklLgvl3Fuh6E4/5NNobk5m406QTGkeEYw -u2RTJd8bRUD3laIbg0XZXfrHWLz0MCJN+M4G1G1AfbA/z2+optWLTaZWAKHY8TiU -vAyBmySlexijXHYmJ8gS+5GDcxnRWQxf1IAoirGeZ7m34QZg5XYXNX48VI2NCQgm -zFnOTCRowx/ydfWASBzEfxEh6imRy9OsYajCh9KYlYbfLDsNL4dnft1QxesRiOZp -ko8J8pwTJiFwvRvQooyB6sYVmBIBRs/hDNPDQJf6dNK3vrus6lKfgZOHzd+HYgoN -6wjnQ8CoFeOwKjNKiUZXzAjmljPjRQ+GC7U2CAay/8wK1EfJsfjcEFi6dzuJxvW7 -+G3858Lhpey2JHSJl2Pvu7B39q6CFVhZzhkzdxk83wV1NHMJq+qvQ2IYlaTRWQga -kkHx5XfdsCZeR74vgjXSB6CoTQO4PCCDgdIJf9IOzPHELlC4bxbT6JhCe1DH7Dfs -AaMk/2epel12409NMYkjlJYFc8aBOEFHlWra083AOQMzVLABYq4sHZkRfLj2un4e -VXRjgBAbr9ycT56CiEm/TxpRRsXUBCWlSoyH3XK0FHl6A3vfoRWtM2Lj86K3wKoy -V6l5x3m8JwUDk/rQ9sNRAoIBAQDc7BOI/KcGz4ktJAWELV5/uyxB6iOXJW3YM0n0 -tyVdXCTgtT5VC8uadwguY36UHGvEhTKqwwsGx4GD2zogtdYwGEuFlc1LnKxTNbky -YdO/yUWBdhQFiXKbx3MpesDCf4768paT0RboxiDBSFbXjpyVia2wz4QQIZW5dfVZ -Fzu1x+x9dukW9H/CUxW7CSMOZXuJc/RWytYuj42+B9vBF0HWU0Q4pISc3Hlgw4AI -qJEvRQsgU+HUQewmeACY8kJrqy/sNtPMDbRbBDinLusbZaKI+t11uplOJvJRscrh -Iyv1m1gcMxN0r8VcWfiR5lfoivu/lzBTCFIQNCEAiR+tqE+1AoIBAQDOERZnB2E5 -qA+p1oqfOI4L+RrcvWiStixU3SN/0uQn1Z5WXGSfb84H0TGcyIFokOBUybq3bwNP -qHqRPkwvRqTkk5OPXl1Hyq0lpyHtoEJEHkAur63H235/5mhLWhijHp7MOm+9OLdt -6xacUhlRFsKawBhF+Iy6pzpDLhQcpXhWllIVjnoDvWWmcNa+fUJ9Um4EOLBpwADX -l2UO6pk20iO2T+OFPVl58T2eZhfFsXNytxDdMbgmMo2mh0/8e0me/U7YPe03T2ZZ -cJip8QhpQcM5Z1RC1QH2iRkczC+TBIo+sgm6dLAmEJCzkX0qdVESk1qlc79SIXEL -8SG41tIEFjEnAoIBAGYiFd3sv+McKxTdZFd5CgkPJSTL8+w6d4/OFlC2IuYIZVwl -Lk3vCA+/G98mTCx4/zF5SrU7OmvF7BBjV3or1nuhn7iASsq8AcbuPVIXe+dcS2/Q -gO0WYA+4o3r3rwm8IwPNBuQLdAXlItt4b/1zhtxzLANjzHAQtsFo0SaA4S/m32QZ -hyT/n9jcxF0VmklbjUM8gidl2qLn5uWFiIi6Ecvd7too5M4H921OtHBMTeKGjuBB -J1QTrfMS42PSC/buy1bu+feKFmlFiFNyE2s1D3E4WQ3GWH2S78/o3Bw9QGNpj9Bp -pIL7h8hsJ8h7rwRozH4EjWi1ngqzrtYqjqSf/UkCggEAdYYXUcFKuPhZnpJmUvKN -SNTUdipsNqhBVlEcyuz7BKD3geeF18yLI7gyZGmSLJHb719x5uYgbFD5PlbaWmge -6OIl2TGHX8d+wqe0WPL6eCEMl9PH9+D+H3HH75m9zWJMTMvTKIbtTnoyVgsYtz+U -029VKKrdkCKJvwDLpZ7VqARpYjP08KcXDzrxroh/4Duf1TaDnnxLvqlwkHZJ1ZM5 -nOOpLjdUDZEBhJJTYoOXBUjVDGIr1VUlpmErxCKIVW3AhuGipdXZ2I638swt8OH5 -toZw7wDO8s00DlGBnIaNVb5yf+3G/J6b+F20lf98smxp2UzPQ8cg0x++DM4vzvbd -JwKCAQAoPDj4AVyYq22aOqldzdNrBKR1BRYS//o36zBxiOWpK0lU053dKd8/+aqY -tvvrzT5Esugbdp0vVN2Y3wg1RLBHLpq7hEc2IE/Qk60mkfTgT2Pxr61Vp/99jU4A -LsiRH+42U3/WszgmJE3cjArFujxQVRYPcZyHvthcsOgQej3sIv3IOwfNNvgFAlaz -CnGrL+YH7sdKLxMDj64UCtxR+O4ktbThu7W6XpermPgJcOtH/fYMaYEKLXuFdP3e -iaX/RplC9JzarbBdA+Sp5pwy+jO9umjX11lkgJyD8zxhI7p2222Ntuv6G+m9Bbjz -DTZvgTswB+AEWfMHRQtVhVkuysA3 ------END PRIVATE KEY----- diff --git a/vendor/github.com/openshift/elasticsearch-operator/test/helpers/runtime/client.go b/vendor/github.com/openshift/elasticsearch-operator/test/helpers/runtime/client.go new file mode 100644 index 0000000000..5ea5a4d07a --- /dev/null +++ b/vendor/github.com/openshift/elasticsearch-operator/test/helpers/runtime/client.go @@ -0,0 +1,64 @@ +package runtime + +import ( + "context" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type FakeClient struct { + Error error + Client client.Client + updated []runtime.Object +} + +func NewAlreadyExistsException() *errors.StatusError { + return errors.NewAlreadyExists(schema.GroupResource{}, "existingname") +} + +func NewFakeClient(client client.Client, err error) *FakeClient { + return &FakeClient{ + Error: err, + Client: client, + updated: []runtime.Object{}, + } +} +func (fw *FakeClient) WasUpdated(name string) bool { + for _, o := range fw.updated { + listkey, _ := client.ObjectKeyFromObject(o) + if listkey.Name == name { + return true + } + } + return false +} +func (fw *FakeClient) Create(ctx context.Context, obj runtime.Object) error { + if fw.Error != nil { + return fw.Error + } + return fw.Client.Create(ctx, obj) +} + +func (fw *FakeClient) Delete(ctx context.Context, obj runtime.Object, opts ...client.DeleteOptionFunc) error { + return fw.Error +} + +func (fw *FakeClient) Update(ctx context.Context, obj runtime.Object) error { + fw.updated = append(fw.updated, obj) + return fw.Client.Update(ctx, obj) +} + +func (fw *FakeClient) Get(ctx context.Context, key client.ObjectKey, obj runtime.Object) error { + return fw.Client.Get(ctx, key, obj) +} + +func (fw *FakeClient) List(ctx context.Context, opts *client.ListOptions, list runtime.Object) error { + return fw.Error +} + +func (fw *FakeClient) Status() client.StatusWriter { + return fw +} From 124d7e49fa372c3e03304a2ae1eb88c4f9236237 Mon Sep 17 00:00:00 2001 From: Jeff Cantrill Date: Tue, 25 Feb 2020 17:16:59 -0500 Subject: [PATCH 18/21] bump manifest to 4.5 --- Dockerfile | 3 +-- hack/testing/test-010-deploy-via-olm-minimal.sh | 4 +++- hack/testing/test-020-olm-upgrade.sh | 1 + hack/testing/test-367-logforwarding.sh | 1 + hack/testing/test-999-fluentd-prometheus-metrics.sh | 2 +- hack/testing/utils | 8 ++++---- manifests/{4.4 => 4.5}/0100_clusterroles.yaml | 0 manifests/{4.4 => 4.5}/0110_clusterrolebindings.yaml | 0 manifests/{4.4 => 4.5}/0200_roles.yaml | 0 manifests/{4.4 => 4.5}/0210_rolebindings.yaml | 0 .../cluster-logging.v4.5.0.clusterserviceversion.yaml} | 6 +++--- manifests/{4.4 => 4.5}/cluster-loggings.crd.yaml | 0 manifests/{4.4 => 4.5}/collectors.crd.yaml | 0 manifests/{4.4 => 4.5}/image-references | 0 manifests/{4.4 => 4.5}/logforwardings.crd.yaml | 0 manifests/cluster-logging.package.yaml | 6 +++--- 16 files changed, 17 insertions(+), 14 deletions(-) rename manifests/{4.4 => 4.5}/0100_clusterroles.yaml (100%) rename manifests/{4.4 => 4.5}/0110_clusterrolebindings.yaml (100%) rename manifests/{4.4 => 4.5}/0200_roles.yaml (100%) rename manifests/{4.4 => 4.5}/0210_rolebindings.yaml (100%) rename manifests/{4.4/cluster-logging.v4.4.0.clusterserviceversion.yaml => 4.5/cluster-logging.v4.5.0.clusterserviceversion.yaml} (99%) rename manifests/{4.4 => 4.5}/cluster-loggings.crd.yaml (100%) rename manifests/{4.4 => 4.5}/collectors.crd.yaml (100%) rename manifests/{4.4 => 4.5}/image-references (100%) rename manifests/{4.4 => 4.5}/logforwardings.crd.yaml (100%) diff --git a/Dockerfile b/Dockerfile index 588bee70be..c647e348df 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,7 +4,6 @@ COPY . . RUN make FROM centos:centos7 -ARG CSV=4.4 RUN INSTALL_PKGS=" \ openssl \ " && \ @@ -17,7 +16,7 @@ COPY --from=builder _output/bin/cluster-logging-operator /usr/bin/ COPY scripts/* /usr/bin/scripts/ RUN mkdir -p /usr/share/logging/ COPY files/ /usr/share/logging/ -COPY manifests/$CSV /manifests/$CSV +COPY manifests/4* /manifests/ COPY manifests/cluster-logging.package.yaml /manifests/ # this is required because the operator invokes a script as `bash scripts/cert_generation.sh` WORKDIR /usr/bin diff --git a/hack/testing/test-010-deploy-via-olm-minimal.sh b/hack/testing/test-010-deploy-via-olm-minimal.sh index 6d8b9a0c39..3f6c6ca996 100755 --- a/hack/testing/test-010-deploy-via-olm-minimal.sh +++ b/hack/testing/test-010-deploy-via-olm-minimal.sh @@ -35,6 +35,7 @@ cleanup(){ os::cleanup::all "${return_code}" + set -e exit ${return_code} } trap cleanup exit @@ -50,7 +51,8 @@ KUBECONFIG=${KUBECONFIG:-$HOME/.kube/config} oc create ns ${NAMESPACE} || : -os::cmd::expect_success "oc create -f ${repo_dir}/vendor/github.com/openshift/elasticsearch-operator/manifests/${version}/elasticsearches.crd.yaml" +eo_version=$(basename $(find ${repo_dir}/vendor/github.com/openshift/elasticsearch-operator/manifests -type d | sort -r | head -n 1)) +os::cmd::expect_success "oc create -f ${repo_dir}/vendor/github.com/openshift/elasticsearch-operator/manifests/${eo_version}/elasticsearches.crd.yaml" # Create static cluster roles and rolebindings deploy_olm_catalog_unsupported_resources diff --git a/hack/testing/test-020-olm-upgrade.sh b/hack/testing/test-020-olm-upgrade.sh index e5f521d47b..e05afc39e7 100755 --- a/hack/testing/test-020-olm-upgrade.sh +++ b/hack/testing/test-020-olm-upgrade.sh @@ -57,6 +57,7 @@ cleanup(){ cleanup_olm_catalog_unsupported_resources + set -e exit ${return_code} } trap cleanup exit diff --git a/hack/testing/test-367-logforwarding.sh b/hack/testing/test-367-logforwarding.sh index 84f021da88..438e7ce797 100755 --- a/hack/testing/test-367-logforwarding.sh +++ b/hack/testing/test-367-logforwarding.sh @@ -32,6 +32,7 @@ cleanup(){ done fi + set -e exit ${return_code} } trap cleanup exit diff --git a/hack/testing/test-999-fluentd-prometheus-metrics.sh b/hack/testing/test-999-fluentd-prometheus-metrics.sh index 53c5b420bc..c4716aee4d 100755 --- a/hack/testing/test-999-fluentd-prometheus-metrics.sh +++ b/hack/testing/test-999-fluentd-prometheus-metrics.sh @@ -29,7 +29,7 @@ cleanup() { fi cleanup_olm_catalog_unsupported_resources - + set -e exit $return_code } trap "cleanup" EXIT diff --git a/hack/testing/utils b/hack/testing/utils index 2ff35f857a..434bce5ef0 100644 --- a/hack/testing/utils +++ b/hack/testing/utils @@ -109,12 +109,12 @@ deploy_olm_catalog_unsupported_resources(){ local version=$(basename $(find $manifest -type d | sort -r | head -n 1)) # Create static cluster roles and rolebindings - try_until_success "oc create -f ${manifest}/$version/0100_clusterroles.yaml" "$(( 30 * second ))" - try_until_success "oc create -f ${manifest}/$version/0110_clusterrolebindings.yaml" "$(( 30 * second ))" + oc create -f ${manifest}/$version/0100_clusterroles.yaml ||: + oc create -f ${manifest}/$version/0110_clusterrolebindings.yaml ||: # Create static cluster roles and rolebindings - try_until_success "oc create -f ${manifest}/$version/0200_roles.yaml" "$(( 30 * second ))" - try_until_success "oc create -f ${manifest}/$version/0210_rolebindings.yaml" "$(( 30 * second ))" + oc create -f ${manifest}/$version/0200_roles.yaml ||: + oc create -f ${manifest}/$version/0210_rolebindings.yaml ||: } cleanup_olm_catalog_unsupported_resources(){ diff --git a/manifests/4.4/0100_clusterroles.yaml b/manifests/4.5/0100_clusterroles.yaml similarity index 100% rename from manifests/4.4/0100_clusterroles.yaml rename to manifests/4.5/0100_clusterroles.yaml diff --git a/manifests/4.4/0110_clusterrolebindings.yaml b/manifests/4.5/0110_clusterrolebindings.yaml similarity index 100% rename from manifests/4.4/0110_clusterrolebindings.yaml rename to manifests/4.5/0110_clusterrolebindings.yaml diff --git a/manifests/4.4/0200_roles.yaml b/manifests/4.5/0200_roles.yaml similarity index 100% rename from manifests/4.4/0200_roles.yaml rename to manifests/4.5/0200_roles.yaml diff --git a/manifests/4.4/0210_rolebindings.yaml b/manifests/4.5/0210_rolebindings.yaml similarity index 100% rename from manifests/4.4/0210_rolebindings.yaml rename to manifests/4.5/0210_rolebindings.yaml diff --git a/manifests/4.4/cluster-logging.v4.4.0.clusterserviceversion.yaml b/manifests/4.5/cluster-logging.v4.5.0.clusterserviceversion.yaml similarity index 99% rename from manifests/4.4/cluster-logging.v4.4.0.clusterserviceversion.yaml rename to manifests/4.5/cluster-logging.v4.5.0.clusterserviceversion.yaml index 68fbb5f971..1b7f8dd42d 100644 --- a/manifests/4.4/cluster-logging.v4.4.0.clusterserviceversion.yaml +++ b/manifests/4.5/cluster-logging.v4.5.0.clusterserviceversion.yaml @@ -4,7 +4,7 @@ apiVersion: operators.coreos.com/v1alpha1 kind: ClusterServiceVersion metadata: # The version value is substituted by the ART pipeline - name: clusterlogging.v4.4.0 + name: clusterlogging.v4.5.0 namespace: placeholder annotations: "operatorframework.io/suggested-namespace": openshift-logging @@ -18,7 +18,7 @@ metadata: createdAt: 2018-08-01T08:00:00Z support: AOS Logging # The version value is substituted by the ART pipeline - olm.skipRange: ">=4.1.0 <4.4.0" + olm.skipRange: ">=4.1.0 <4.5.0" alm-examples: |- [ { @@ -101,7 +101,7 @@ metadata: ] spec: # The version value is substituted by the ART pipeline - version: 4.4.0 + version: 4.5.0 displayName: Cluster Logging minKubeVersion: 1.17.1 description: | diff --git a/manifests/4.4/cluster-loggings.crd.yaml b/manifests/4.5/cluster-loggings.crd.yaml similarity index 100% rename from manifests/4.4/cluster-loggings.crd.yaml rename to manifests/4.5/cluster-loggings.crd.yaml diff --git a/manifests/4.4/collectors.crd.yaml b/manifests/4.5/collectors.crd.yaml similarity index 100% rename from manifests/4.4/collectors.crd.yaml rename to manifests/4.5/collectors.crd.yaml diff --git a/manifests/4.4/image-references b/manifests/4.5/image-references similarity index 100% rename from manifests/4.4/image-references rename to manifests/4.5/image-references diff --git a/manifests/4.4/logforwardings.crd.yaml b/manifests/4.5/logforwardings.crd.yaml similarity index 100% rename from manifests/4.4/logforwardings.crd.yaml rename to manifests/4.5/logforwardings.crd.yaml diff --git a/manifests/cluster-logging.package.yaml b/manifests/cluster-logging.package.yaml index 9053d0de97..ff3814128c 100644 --- a/manifests/cluster-logging.package.yaml +++ b/manifests/cluster-logging.package.yaml @@ -1,5 +1,5 @@ -#! package-manifest: ./deploy/chart/catalog_resources/rh-operators/clusterlogging.v4.4.0.clusterserviceversion.yaml +#! package-manifest: ./deploy/chart/catalog_resources/rh-operators/clusterlogging.v4.5.0.clusterserviceversion.yaml packageName: cluster-logging channels: -- name: "4.4" - currentCSV: clusterlogging.v4.4.0 +- name: "4.5" + currentCSV: clusterlogging.v4.5.0 From 181b8b4471307c492b41e8003e1554df9135140b Mon Sep 17 00:00:00 2001 From: Alan Conway Date: Thu, 27 Feb 2020 17:03:18 -0500 Subject: [PATCH 19/21] Improved code generation, upgraded operator-sdk binary. - Define the operator-sdk binary version in Makefile, to 0.15.2 (was 0.8.2) - Automatically download and use a released binary for operator-sdk - Added `operator-sdk generate crds` to Makefile - Auto-run code and CRD generators if API source or SDK binary changes. Minor cleanup: - Makefile: replace $(CURPATH) with $(CURDIR) - same thing, CURDIR is a make built-in. - Removed some unused code and commented-out lines. - Checked in updated files with minor format fixes made by `make fmt` - Removed '@' to echo some long-running commands, to see what's happening. Signed-off-by: Alan Conway --- .zz_generate_timestamp | 0 Makefile | 72 +- ...ging.openshift.io_clusterloggings_crd.yaml | 795 ++++++++++++++++++ .../logging.openshift.io_collectors_crd.yaml | 107 +++ ...gging.openshift.io_logforwardings_crd.yaml | 188 +++++ pkg/apis/logging/v1/clusterlogging_types.go | 60 +- pkg/apis/logging/v1/doc.go | 1 + pkg/apis/logging/v1/zz_generated.deepcopy.go | 68 +- pkg/apis/logging/v1/zz_generated.defaults.go | 16 - pkg/apis/logging/v1/zz_generated.openapi.go | 87 -- .../logging/v1alpha1/zz_generated.deepcopy.go | 2 +- .../logging/v1alpha1/zz_generated.defaults.go | 16 - pkg/k8shandler/status.go | 6 +- 13 files changed, 1217 insertions(+), 201 deletions(-) create mode 100644 .zz_generate_timestamp create mode 100644 deploy/crds/logging.openshift.io_clusterloggings_crd.yaml create mode 100644 deploy/crds/logging.openshift.io_collectors_crd.yaml create mode 100644 deploy/crds/logging.openshift.io_logforwardings_crd.yaml delete mode 100644 pkg/apis/logging/v1/zz_generated.defaults.go delete mode 100644 pkg/apis/logging/v1/zz_generated.openapi.go delete mode 100644 pkg/apis/logging/v1alpha1/zz_generated.defaults.go diff --git a/.zz_generate_timestamp b/.zz_generate_timestamp new file mode 100644 index 0000000000..e69de29bb2 diff --git a/Makefile b/Makefile index 51439d3983..ec65dbe499 100644 --- a/Makefile +++ b/Makefile @@ -1,9 +1,8 @@ -CURPATH=$(PWD) -TARGET_DIR=$(CURPATH)/_output +TARGET_DIR=$(CURDIR)/_output KUBECONFIG?=$(HOME)/.kube/config GOBUILD=go build -BUILD_GOPATH=$(TARGET_DIR):$(TARGET_DIR)/vendor:$(CURPATH)/cmd +BUILD_GOPATH=$(TARGET_DIR):$(TARGET_DIR)/vendor:$(CURDIR)/cmd IMAGE_BUILDER_OPTS= IMAGE_BUILDER?=imagebuilder @@ -17,10 +16,10 @@ IMAGE_TAG?=quay.io/openshift/origin-$(APP_NAME):latest export IMAGE_TAG MAIN_PKG=cmd/manager/main.go export OCP_VERSION?=$(shell basename $(shell find manifests/ -maxdepth 1 -not -name manifests -type d)) -export CSV_FILE=$(CURPATH)/manifests/$(OCP_VERSION)/cluster-logging.v$(OCP_VERSION).0.clusterserviceversion.yaml +export CSV_FILE=$(CURDIR)/manifests/$(OCP_VERSION)/cluster-logging.v$(OCP_VERSION).0.clusterserviceversion.yaml export NAMESPACE?=openshift-logging export MANAGED_CONFIG_NAMESPACE?=openshift-config-managed -export EO_CSV_FILE=$(CURPATH)/vendor/github.com/openshift/elasticsearch-operator/manifests/$(OCP_VERSION)/elasticsearch-operator.v$(OCP_VERSION).0.clusterserviceversion.yaml +export EO_CSV_FILE=$(CURDIR)/vendor/github.com/openshift/elasticsearch-operator/manifests/$(OCP_VERSION)/elasticsearch-operator.v$(OCP_VERSION).0.clusterserviceversion.yaml FLUENTD_IMAGE?=quay.io/openshift/origin-logging-fluentd:latest @@ -29,43 +28,30 @@ TEST_PKGS=$(shell go list ./test) TEST_OPTIONS?= -OC?=oc +# go source files, excluding generated code. +SRC = $(shell find cmd pkg version -type f -name '*.go' -not -name zz_generated*) -# These will be provided to the target -#VERSION := 1.0.0 -#BUILD := `git rev-parse HEAD` - -# Use linker flags to provide version/build settings to the target -#LDFLAGS=-ldflags "-X=main.Version=$(VERSION) -X=main.Build=$(BUILD)" - -# go source files, ignore vendor directory -SRC = $(shell find . -type f -name '*.go' -not -path "./vendor/*") - -#.PHONY: all build clean install uninstall fmt simplify check run -.PHONY: all operator-sdk imagebuilder build clean fmt simplify gendeepcopy deploy-setup deploy-image deploy deploy-example test-unit test-e2e test-sec undeploy run +.PHONY: all imagebuilder build clean fmt simplify generate deploy-setup deploy-image deploy deploy-example test-unit test-e2e test-sec undeploy run all: build #check install -operator-sdk: - @if ! type -p operator-sdk ; \ - then if [ ! -d $(GOPATH)/src/github.com/operator-framework/operator-sdk ] ; \ - then git clone https://github.com/operator-framework/operator-sdk --branch master $(GOPATH)/src/github.com/operator-framework/operator-sdk ; \ - fi ; \ - cd $(GOPATH)/src/github.com/operator-framework/operator-sdk ; \ - make dep ; \ - make install || sudo make install || cd commands/operator-sdk && sudo go install ; \ - fi +# Download a known released version of operator-sdk. +OPERATOR_SDK_RELEASE?=v0.15.2 +OPERATOR_SDK=./operator-sdk-$(OPERATOR_SDK_RELEASE) +$(OPERATOR_SDK): + curl -f -L -o $@ https://github.com/operator-framework/operator-sdk/releases/download/${OPERATOR_SDK_RELEASE}/operator-sdk-${OPERATOR_SDK_RELEASE}-$(shell uname -i)-linux-gnu + chmod +x $(OPERATOR_SDK) imagebuilder: @if [ $${USE_IMAGE_STREAM:-false} = false ] && ! type -p imagebuilder ; \ then go get -u github.com/openshift/imagebuilder/cmd/imagebuilder ; \ fi -build: fmt +build: generate fmt @mkdir -p $(TARGET_DIR)/src/$(APP_REPO) - @cp -ru $(CURPATH)/pkg $(TARGET_DIR)/src/$(APP_REPO) - @cp -ru $(CURPATH)/vendor/* $(TARGET_DIR)/src - @GOPATH=$(BUILD_GOPATH) $(GOBUILD) $(LDFLAGS) -o $(TARGET) $(MAIN_PKG) + @cp -ru $(CURDIR)/pkg $(TARGET_DIR)/src/$(APP_REPO) + @cp -ru $(CURDIR)/vendor/* $(TARGET_DIR)/src + GOPATH=$(BUILD_GOPATH) $(GOBUILD) $(LDFLAGS) -o $(TARGET) $(MAIN_PKG) run: ELASTICSEARCH_IMAGE=quay.io/openshift/origin-logging-elasticsearch6:latest \ @@ -78,7 +64,7 @@ run: WATCH_NAMESPACE=openshift-logging \ KUBERNETES_CONFIG=$(KUBECONFIG) \ WORKING_DIR=$(TARGET_DIR)/ocp-clo \ - LOGGING_SHARE_DIR=$(CURPATH)/files \ + LOGGING_SHARE_DIR=$(CURDIR)/files \ go run ${MAIN_PKG} clean: @@ -91,16 +77,26 @@ image: imagebuilder fi lint: - @golangci-lint run -c golangci.yaml + golangci-lint run -c golangci.yaml fmt: - @gofmt -l -w cmd/ pkg/ version/ + gofmt -l -w cmd/ pkg/ version/ simplify: - @gofmt -s -l -w $(SRC) + gofmt -s -l -w $(SRC) + +GEN_TIMESTAMP=.zz_generate_timestamp +generate: $(GEN_TIMESTAMP) +$(GEN_TIMESTAMP): $(SRC) $(OPERATOR_SDK) + $(OPERATOR_SDK) generate k8s + $(OPERATOR_SDK) generate crds + @touch $@ -gendeepcopy: operator-sdk - @operator-sdk generate k8s +# spotless does make clean and removes generated code. Don't commit without re-generating. +spotless: clean + @find pkg -name 'zz_generated*' -delete -print + @rm -vrf deploy/crds/*.yaml + @rm -vf $(GEN_TIMESTAMP) deploy-image: image hack/deploy-image.sh @@ -118,7 +114,7 @@ deploy-example: deploy oc create -n $(NAMESPACE) -f hack/cr.yaml test-unit: fmt - @LOGGING_SHARE_DIR=$(CURPATH)/files go test $(TEST_OPTIONS) $(PKGS) + @LOGGING_SHARE_DIR=$(CURDIR)/files go test $(TEST_OPTIONS) $(PKGS) test-e2e: hack/test-e2e.sh diff --git a/deploy/crds/logging.openshift.io_clusterloggings_crd.yaml b/deploy/crds/logging.openshift.io_clusterloggings_crd.yaml new file mode 100644 index 0000000000..b6a86b007a --- /dev/null +++ b/deploy/crds/logging.openshift.io_clusterloggings_crd.yaml @@ -0,0 +1,795 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: clusterloggings.logging.openshift.io +spec: + group: logging.openshift.io + names: + kind: ClusterLogging + listKind: ClusterLoggingList + plural: clusterloggings + singular: clusterlogging + scope: Namespaced + validation: + openAPIV3Schema: + description: ClusterLogging is the Schema for the clusterloggings API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ClusterLoggingSpec defines the desired state of ClusterLogging + properties: + collection: + description: This is the struct that will contain information pertinent + to Log and event collection + properties: + logs: + properties: + fluentd: + properties: + nodeSelector: + additionalProperties: + type: string + type: object + resources: + description: ResourceRequirements describes the compute + resource requirements. + properties: + limits: + additionalProperties: + type: string + description: 'Limits describes the maximum amount of + compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + type: string + description: 'Requests describes the minimum amount + of compute resources required. If Requests is omitted + for a container, it defaults to Limits if that is + explicitly specified, otherwise to an implementation-defined + value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + tolerations: + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple + using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to + match. Empty means match all taint effects. When + specified, allowed values are NoSchedule, PreferNoSchedule + and NoExecute. + type: string + key: + description: Key is the taint key that the toleration + applies to. Empty means match all taint keys. If + the key is empty, operator must be Exists; this + combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists and Equal. + Defaults to Equal. Exists is equivalent to wildcard + for value, so that a pod can tolerate all taints + of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period + of time the toleration (which must be of effect + NoExecute, otherwise this field is ignored) tolerates + the taint. By default, it is not set, which means + tolerate the taint forever (do not evict). Zero + and negative values will be treated as 0 (evict + immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration + matches to. If the operator is Exists, the value + should be empty, otherwise just a regular string. + type: string + type: object + type: array + required: + - resources + type: object + type: + type: string + required: + - type + type: object + type: object + curation: + description: This is the struct that will contain information pertinent + to Log curation (Curator) + properties: + curator: + properties: + nodeSelector: + additionalProperties: + type: string + type: object + resources: + description: ResourceRequirements describes the compute resource + requirements. + properties: + limits: + additionalProperties: + type: string + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + type: string + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. More info: + https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + schedule: + type: string + tolerations: + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using + the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. + Empty means match all taint effects. When specified, + allowed values are NoSchedule, PreferNoSchedule and + NoExecute. + type: string + key: + description: Key is the taint key that the toleration + applies to. Empty means match all taint keys. If the + key is empty, operator must be Exists; this combination + means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists and Equal. + Defaults to Equal. Exists is equivalent to wildcard + for value, so that a pod can tolerate all taints of + a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of + time the toleration (which must be of effect NoExecute, + otherwise this field is ignored) tolerates the taint. + By default, it is not set, which means tolerate the + taint forever (do not evict). Zero and negative values + will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches + to. If the operator is Exists, the value should be empty, + otherwise just a regular string. + type: string + type: object + type: array + required: + - resources + - schedule + type: object + type: + type: string + required: + - type + type: object + logStore: + description: This is the struct that will contain information pertinent + to Log storage (Elasticsearch) + properties: + elasticsearch: + properties: + nodeCount: + format: int32 + type: integer + nodeSelector: + additionalProperties: + type: string + type: object + redundancyPolicy: + description: RedundancyPolicyType controls number of elasticsearch + replica shards FullRedundancy - each index is fully replicated + on every Data node in the cluster MultipleRedundancy - each + index is spread over half of the Data nodes SingleRedundancy + - one replica shard ZeroRedundancy - no replica shards + type: string + resources: + description: ResourceRequirements describes the compute resource + requirements. + properties: + limits: + additionalProperties: + type: string + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + type: string + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. More info: + https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + storage: + properties: + size: + type: string + storageClassName: + type: string + type: object + tolerations: + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using + the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. + Empty means match all taint effects. When specified, + allowed values are NoSchedule, PreferNoSchedule and + NoExecute. + type: string + key: + description: Key is the taint key that the toleration + applies to. Empty means match all taint keys. If the + key is empty, operator must be Exists; this combination + means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists and Equal. + Defaults to Equal. Exists is equivalent to wildcard + for value, so that a pod can tolerate all taints of + a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of + time the toleration (which must be of effect NoExecute, + otherwise this field is ignored) tolerates the taint. + By default, it is not set, which means tolerate the + taint forever (do not evict). Zero and negative values + will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches + to. If the operator is Exists, the value should be empty, + otherwise just a regular string. + type: string + type: object + type: array + required: + - nodeCount + - redundancyPolicy + - resources + - storage + type: object + retentionPolicy: + properties: + logs.app: + properties: + maxAge: + description: TimeUnit is a time unit like h,m,d + type: string + required: + - maxAge + type: object + logs.audit: + properties: + maxAge: + description: TimeUnit is a time unit like h,m,d + type: string + required: + - maxAge + type: object + logs.infra: + properties: + maxAge: + description: TimeUnit is a time unit like h,m,d + type: string + required: + - maxAge + type: object + type: object + type: + type: string + required: + - type + type: object + managementState: + description: 'Important: Run "operator-sdk generate k8s" to regenerate + code after modifying this file Add custom validation using kubebuilder + tags: https://book.kubebuilder.io/beyond_basics/generating_crd.html' + type: string + visualization: + description: This is the struct that will contain information pertinent + to Log visualization (Kibana) + properties: + kibana: + properties: + nodeSelector: + additionalProperties: + type: string + type: object + proxy: + properties: + resources: + description: ResourceRequirements describes the compute + resource requirements. + properties: + limits: + additionalProperties: + type: string + description: 'Limits describes the maximum amount of + compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + type: string + description: 'Requests describes the minimum amount + of compute resources required. If Requests is omitted + for a container, it defaults to Limits if that is + explicitly specified, otherwise to an implementation-defined + value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + required: + - resources + type: object + replicas: + format: int32 + type: integer + resources: + description: ResourceRequirements describes the compute resource + requirements. + properties: + limits: + additionalProperties: + type: string + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + type: string + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. More info: + https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + tolerations: + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using + the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. + Empty means match all taint effects. When specified, + allowed values are NoSchedule, PreferNoSchedule and + NoExecute. + type: string + key: + description: Key is the taint key that the toleration + applies to. Empty means match all taint keys. If the + key is empty, operator must be Exists; this combination + means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists and Equal. + Defaults to Equal. Exists is equivalent to wildcard + for value, so that a pod can tolerate all taints of + a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of + time the toleration (which must be of effect NoExecute, + otherwise this field is ignored) tolerates the taint. + By default, it is not set, which means tolerate the + taint forever (do not evict). Zero and negative values + will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches + to. If the operator is Exists, the value should be empty, + otherwise just a regular string. + type: string + type: object + type: array + required: + - replicas + - resources + type: object + type: + type: string + required: + - type + type: object + required: + - managementState + type: object + status: + description: ClusterLoggingStatus defines the observed state of ClusterLogging + properties: + clusterConditions: + items: + description: 'ConditionStatus contains details for the current condition + of this elasticsearch cluster. Status: the status of the condition. + LastTransitionTime: Last time the condition transitioned from one + status to another. Reason: Unique, one-word, CamelCase reason for + the condition''s last transition. Message: Human-readable message + indicating details about last transition.' + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + description: ClusterConditionType is a valid value for ClusterCondition.Type + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + collection: + properties: + logs: + properties: + fluentdStatus: + properties: + clusterCondition: + additionalProperties: + description: '`operator-sdk generate crds` does not allow + map-of-slice, must use a named type.' + items: + description: 'ConditionStatus contains details for the + current condition of this elasticsearch cluster. Status: + the status of the condition. LastTransitionTime: Last + time the condition transitioned from one status to + another. Reason: Unique, one-word, CamelCase reason + for the condition''s last transition. Message: Human-readable + message indicating details about last transition.' + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + description: ClusterConditionType is a valid value + for ClusterCondition.Type + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + type: object + daemonSet: + type: string + nodes: + additionalProperties: + type: string + type: object + pods: + additionalProperties: + items: + type: string + type: array + type: object + required: + - daemonSet + - nodes + - pods + type: object + type: object + type: object + curation: + properties: + curatorStatus: + items: + properties: + clusterCondition: + additionalProperties: + description: '`operator-sdk generate crds` does not allow + map-of-slice, must use a named type.' + items: + description: 'ConditionStatus contains details for the + current condition of this elasticsearch cluster. Status: + the status of the condition. LastTransitionTime: Last + time the condition transitioned from one status to another. + Reason: Unique, one-word, CamelCase reason for the condition''s + last transition. Message: Human-readable message indicating + details about last transition.' + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + description: ClusterConditionType is a valid value + for ClusterCondition.Type + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + type: object + cronJobs: + type: string + schedules: + type: string + suspended: + type: boolean + required: + - cronJobs + - schedules + - suspended + type: object + type: array + type: object + logStore: + properties: + elasticsearchStatus: + items: + properties: + cluster: + properties: + activePrimaryShards: + format: int32 + type: integer + activeShards: + format: int32 + type: integer + initializingShards: + format: int32 + type: integer + numDataNodes: + format: int32 + type: integer + numNodes: + format: int32 + type: integer + pendingTasks: + format: int32 + type: integer + relocatingShards: + format: int32 + type: integer + status: + type: string + unassignedShards: + format: int32 + type: integer + required: + - activePrimaryShards + - activeShards + - initializingShards + - numDataNodes + - numNodes + - pendingTasks + - relocatingShards + - status + - unassignedShards + type: object + clusterConditions: + items: + description: 'ClusterCondition contains details for the + current condition of this elasticsearch cluster. Status: + the status of the condition. LastTransitionTime: Last + time the condition transitioned from one status to another. + Reason: Unique, one-word, CamelCase reason for the condition''s + last transition. Message: Human-readable message indicating + details about last transition.' + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + description: ClusterConditionType is a valid value for + ClusterCondition.Type + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + clusterHealth: + type: string + clusterName: + type: string + deployments: + items: + type: string + type: array + nodeConditions: + additionalProperties: + items: + description: 'ClusterCondition contains details for the + current condition of this elasticsearch cluster. Status: + the status of the condition. LastTransitionTime: Last + time the condition transitioned from one status to another. + Reason: Unique, one-word, CamelCase reason for the condition''s + last transition. Message: Human-readable message indicating + details about last transition.' + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + description: ClusterConditionType is a valid value + for ClusterCondition.Type + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + type: object + nodeCount: + format: int32 + type: integer + pods: + additionalProperties: + additionalProperties: + items: + type: string + type: array + type: object + type: object + replicaSets: + items: + type: string + type: array + shardAllocationEnabled: + type: string + statefulSets: + items: + type: string + type: array + required: + - cluster + - clusterName + - nodeCount + - pods + - shardAllocationEnabled + type: object + type: array + type: object + visualization: + description: 'Important: Run "operator-sdk generate k8s" to regenerate + code after modifying this file Add custom validation using kubebuilder + tags: https://book.kubebuilder.io/beyond_basics/generating_crd.html' + properties: + kibanaStatus: + items: + properties: + clusterCondition: + additionalProperties: + description: '`operator-sdk generate crds` does not allow + map-of-slice, must use a named type.' + items: + description: 'ConditionStatus contains details for the + current condition of this elasticsearch cluster. Status: + the status of the condition. LastTransitionTime: Last + time the condition transitioned from one status to another. + Reason: Unique, one-word, CamelCase reason for the condition''s + last transition. Message: Human-readable message indicating + details about last transition.' + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + description: ClusterConditionType is a valid value + for ClusterCondition.Type + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + type: object + deployment: + type: string + pods: + additionalProperties: + items: + type: string + type: array + type: object + replicaSets: + items: + type: string + type: array + replicas: + format: int32 + type: integer + required: + - deployment + - pods + - replicaSets + - replicas + type: object + type: array + type: object + required: + - collection + - curation + - logStore + - visualization + type: object + type: object + version: v1 + versions: + - name: v1 + served: true + storage: true diff --git a/deploy/crds/logging.openshift.io_collectors_crd.yaml b/deploy/crds/logging.openshift.io_collectors_crd.yaml new file mode 100644 index 0000000000..4be02cf1d5 --- /dev/null +++ b/deploy/crds/logging.openshift.io_collectors_crd.yaml @@ -0,0 +1,107 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: collectors.logging.openshift.io +spec: + group: logging.openshift.io + names: + kind: Collector + listKind: CollectorList + plural: collectors + singular: collector + scope: Namespaced + validation: + openAPIV3Schema: + description: Collector is an instance of a collector + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: CollectorSpec is the specification for deployable collectors + properties: + nodeSelector: + additionalProperties: + type: string + type: object + promtail: + properties: + endpoint: + type: string + type: object + resources: + description: ResourceRequirements describes the compute resource requirements. + properties: + limits: + additionalProperties: + type: string + description: 'Limits describes the maximum amount of compute resources + allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + type: string + description: 'Requests describes the minimum amount of compute resources + required. If Requests is omitted for a container, it defaults + to Limits if that is explicitly specified, otherwise to an implementation-defined + value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + spec: + description: CollectorType is a kind of collector + type: string + tolerations: + items: + description: The pod this Toleration is attached to tolerates any + taint that matches the triple using the matching + operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty + means match all taint effects. When specified, allowed values + are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, operator + must be Exists; this combination means to match all values and + all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. Exists + is equivalent to wildcard for value, so that a pod can tolerate + all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the + toleration (which must be of effect NoExecute, otherwise this + field is ignored) tolerates the taint. By default, it is not + set, which means tolerate the taint forever (do not evict). + Zero and negative values will be treated as 0 (evict immediately) + by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise + just a regular string. + type: string + type: object + type: array + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true diff --git a/deploy/crds/logging.openshift.io_logforwardings_crd.yaml b/deploy/crds/logging.openshift.io_logforwardings_crd.yaml new file mode 100644 index 0000000000..696d4f25cc --- /dev/null +++ b/deploy/crds/logging.openshift.io_logforwardings_crd.yaml @@ -0,0 +1,188 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: logforwardings.logging.openshift.io +spec: + group: logging.openshift.io + names: + kind: LogForwarding + listKind: LogForwardingList + plural: logforwardings + singular: logforwarding + scope: Namespaced + validation: + openAPIV3Schema: + description: LogForwarding is the Schema for the logforwardings API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ForwardingSpec specifies log forwarding pipelines from a defined + sources to dest outputs + properties: + disableDefaultForwarding: + type: boolean + outputs: + items: + description: OutputSpec specifies destination config for log message + endpoints + properties: + endpoint: + type: string + name: + type: string + secret: + description: OutputSecretSpec specifies secrets for pipelines + properties: + name: + description: Name is the name of the secret to use with this + output + type: string + required: + - name + type: object + type: + description: OutputType defines the type of endpoint that will + receive messages + type: string + type: object + type: array + pipelines: + items: + description: PipelineSpec is the sources spec to named targets + properties: + inputSource: + description: LogSourceType is an explicitly defined log source + type: string + name: + type: string + outputRefs: + description: OutputRefs is a list of the names of outputs defined + by forwarding.outputs + items: + type: string + type: array + type: object + type: array + type: object + status: + description: ForwardingStatus is the status of spec'd forwarding + properties: + lastUpdated: + description: LastUpdated represents the last time that the status was + updated. + format: date-time + type: string + message: + description: Reason is a one-word CamelCase reason for the condition's + last transition. + type: string + outputs: + description: Outputs is the status of the outputs + items: + description: OutputStatus of a given output + properties: + conditions: + description: Reasons for the state of the corresponding pipeline + for this status + items: + properties: + message: + type: string + reason: + description: OutputConditionReason provides a reason for + the given state + type: string + status: + type: string + type: + type: string + type: object + type: array + lastUpdated: + description: LastUpdated represents the last time that the status + was updated. + format: date-time + type: string + message: + description: Message about the corresponding output + type: string + name: + description: Name of the corresponding output for this status + type: string + reason: + description: Reasons for the state of the corresponding output + for this status + type: string + state: + description: State of the corresponding output for this status + type: string + type: object + type: array + pipelines: + description: Pipelines is the status of the outputs + items: + description: PipelineStatus is the status of a give pipeline + properties: + conditions: + description: Reasons for the state of the corresponding pipeline + for this status + items: + properties: + message: + type: string + reason: + type: string + status: + type: string + typ: + type: string + type: object + type: array + lastUpdated: + description: LastUpdated represents the last time that the status + was updated. + format: date-time + type: string + message: + type: string + name: + description: Name of the corresponding pipeline for this status + type: string + reason: + type: string + state: + description: State of the corresponding pipeline for this status + type: string + type: object + type: array + reason: + description: Reason is a one-word CamelCase reason for the condition's + last transition. + type: string + sources: + description: LogSources lists the configured log sources + items: + description: LogSourceType is an explicitly defined log source + type: string + type: array + state: + description: State is the current state of LogForwarding instance + type: string + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true diff --git a/pkg/apis/logging/v1/clusterlogging_types.go b/pkg/apis/logging/v1/clusterlogging_types.go index c29dee4f04..d5262e9376 100644 --- a/pkg/apis/logging/v1/clusterlogging_types.go +++ b/pkg/apis/logging/v1/clusterlogging_types.go @@ -123,11 +123,11 @@ type VisualizationStatus struct { } type KibanaStatus struct { - Replicas int32 `json:"replicas"` - Deployment string `json:"deployment"` - ReplicaSets []string `json:"replicaSets"` - Pods PodStateMap `json:"pods"` - Conditions map[string][]ClusterCondition `json:"clusterCondition,omitempty"` + Replicas int32 `json:"replicas"` + Deployment string `json:"deployment"` + ReplicaSets []string `json:"replicaSets"` + Pods PodStateMap `json:"pods"` + Conditions map[string]ClusterConditions `json:"clusterCondition,omitempty"` } type LogStoreStatus struct { @@ -135,17 +135,17 @@ type LogStoreStatus struct { } type ElasticsearchStatus struct { - ClusterName string `json:"clusterName"` - NodeCount int32 `json:"nodeCount"` - ReplicaSets []string `json:"replicaSets,omitempty"` - Deployments []string `json:"deployments,omitempty"` - StatefulSets []string `json:"statefulSets,omitempty"` - ClusterHealth string `json:"clusterHealth,omitempty"` - Cluster elasticsearch.ClusterHealth `json:"cluster"` - Pods map[ElasticsearchRoleType]PodStateMap `json:"pods"` - ShardAllocationEnabled elasticsearch.ShardAllocationState `json:"shardAllocationEnabled"` - ClusterConditions []elasticsearch.ClusterCondition `json:"clusterConditions,omitempty"` - NodeConditions map[string][]elasticsearch.ClusterCondition `json:"nodeConditions,omitempty"` + ClusterName string `json:"clusterName"` + NodeCount int32 `json:"nodeCount"` + ReplicaSets []string `json:"replicaSets,omitempty"` + Deployments []string `json:"deployments,omitempty"` + StatefulSets []string `json:"statefulSets,omitempty"` + ClusterHealth string `json:"clusterHealth,omitempty"` + Cluster elasticsearch.ClusterHealth `json:"cluster"` + Pods map[ElasticsearchRoleType]PodStateMap `json:"pods"` + ShardAllocationEnabled elasticsearch.ShardAllocationState `json:"shardAllocationEnabled"` + ClusterConditions ElasticsearchClusterConditions `json:"clusterConditions,omitempty"` + NodeConditions map[string]ElasticsearchClusterConditions `json:"nodeConditions,omitempty"` } type CollectionStatus struct { @@ -160,17 +160,17 @@ type EventCollectionStatus struct { } type FluentdCollectorStatus struct { - DaemonSet string `json:"daemonSet"` - Nodes map[string]string `json:"nodes"` - Pods PodStateMap `json:"pods"` - Conditions map[string][]ClusterCondition `json:"clusterCondition,omitempty"` + DaemonSet string `json:"daemonSet"` + Nodes map[string]string `json:"nodes"` + Pods PodStateMap `json:"pods"` + Conditions map[string]ClusterConditions `json:"clusterCondition,omitempty"` } type FluentdNormalizerStatus struct { - Replicas int32 `json:"replicas"` - ReplicaSets []string `json:"replicaSets"` - Pods PodStateMap `json:"pods"` - Conditions map[string][]ClusterCondition `json:"clusterCondition,omitempty"` + Replicas int32 `json:"replicas"` + ReplicaSets []string `json:"replicaSets"` + Pods PodStateMap `json:"pods"` + Conditions map[string]ClusterConditions `json:"clusterCondition,omitempty"` } type NormalizerStatus struct { @@ -182,10 +182,10 @@ type CurationStatus struct { } type CuratorStatus struct { - CronJob string `json:"cronJobs"` - Schedule string `json:"schedules"` - Suspended bool `json:"suspended"` - Conditions map[string][]ClusterCondition `json:"clusterCondition,omitempty"` + CronJob string `json:"cronJobs"` + Schedule string `json:"schedules"` + Suspended bool `json:"suspended"` + Conditions map[string]ClusterConditions `json:"clusterCondition,omitempty"` } type PodStateMap map[PodStateType][]string @@ -268,6 +268,10 @@ const ( NodeStorage ClusterConditionType = "NodeStorage" ) +// `operator-sdk generate crds` does not allow map-of-slice, must use a named type. +type ClusterConditions []ClusterCondition +type ElasticsearchClusterConditions []elasticsearch.ClusterCondition + // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ClusterLoggingList contains a list of ClusterLogging diff --git a/pkg/apis/logging/v1/doc.go b/pkg/apis/logging/v1/doc.go index eab97cdeca..16422dccc2 100644 --- a/pkg/apis/logging/v1/doc.go +++ b/pkg/apis/logging/v1/doc.go @@ -1,4 +1,5 @@ // Package v1 contains API Schema definitions for the logging v1 API group +// // +k8s:deepcopy-gen=package,register // +groupName=logging.openshift.io package v1 diff --git a/pkg/apis/logging/v1/zz_generated.deepcopy.go b/pkg/apis/logging/v1/zz_generated.deepcopy.go index 5c3348667d..7d135be1c6 100644 --- a/pkg/apis/logging/v1/zz_generated.deepcopy.go +++ b/pkg/apis/logging/v1/zz_generated.deepcopy.go @@ -1,6 +1,6 @@ // +build !ignore_autogenerated -// Code generated by operator-sdk. DO NOT EDIT. +// Code generated by operator-sdk-v0.15.2. DO NOT EDIT. package v1 @@ -27,6 +27,28 @@ func (in *ClusterCondition) DeepCopy() *ClusterCondition { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ClusterConditions) DeepCopyInto(out *ClusterConditions) { + { + in := &in + *out = make(ClusterConditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConditions. +func (in ClusterConditions) DeepCopy() ClusterConditions { + if in == nil { + return nil + } + out := new(ClusterConditions) + in.DeepCopyInto(out) + return *out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterLogging) DeepCopyInto(out *ClusterLogging) { *out = *in @@ -265,14 +287,14 @@ func (in *CuratorStatus) DeepCopyInto(out *CuratorStatus) { *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(map[string][]ClusterCondition, len(*in)) + *out = make(map[string]ClusterConditions, len(*in)) for key, val := range *in { var outVal []ClusterCondition if val == nil { (*out)[key] = nil } else { in, out := &val, &outVal - *out = make([]ClusterCondition, len(*in)) + *out = make(ClusterConditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -293,6 +315,28 @@ func (in *CuratorStatus) DeepCopy() *CuratorStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ElasticsearchClusterConditions) DeepCopyInto(out *ElasticsearchClusterConditions) { + { + in := &in + *out = make(ElasticsearchClusterConditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchClusterConditions. +func (in ElasticsearchClusterConditions) DeepCopy() ElasticsearchClusterConditions { + if in == nil { + return nil + } + out := new(ElasticsearchClusterConditions) + in.DeepCopyInto(out) + return *out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ElasticsearchSpec) DeepCopyInto(out *ElasticsearchSpec) { *out = *in @@ -375,21 +419,21 @@ func (in *ElasticsearchStatus) DeepCopyInto(out *ElasticsearchStatus) { } if in.ClusterConditions != nil { in, out := &in.ClusterConditions, &out.ClusterConditions - *out = make([]loggingv1.ClusterCondition, len(*in)) + *out = make(ElasticsearchClusterConditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.NodeConditions != nil { in, out := &in.NodeConditions, &out.NodeConditions - *out = make(map[string][]loggingv1.ClusterCondition, len(*in)) + *out = make(map[string]ElasticsearchClusterConditions, len(*in)) for key, val := range *in { var outVal []loggingv1.ClusterCondition if val == nil { (*out)[key] = nil } else { in, out := &val, &outVal - *out = make([]loggingv1.ClusterCondition, len(*in)) + *out = make(ElasticsearchClusterConditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -469,14 +513,14 @@ func (in *FluentdCollectorStatus) DeepCopyInto(out *FluentdCollectorStatus) { } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(map[string][]ClusterCondition, len(*in)) + *out = make(map[string]ClusterConditions, len(*in)) for key, val := range *in { var outVal []ClusterCondition if val == nil { (*out)[key] = nil } else { in, out := &val, &outVal - *out = make([]ClusterCondition, len(*in)) + *out = make(ClusterConditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -522,14 +566,14 @@ func (in *FluentdNormalizerStatus) DeepCopyInto(out *FluentdNormalizerStatus) { } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(map[string][]ClusterCondition, len(*in)) + *out = make(map[string]ClusterConditions, len(*in)) for key, val := range *in { var outVal []ClusterCondition if val == nil { (*out)[key] = nil } else { in, out := &val, &outVal - *out = make([]ClusterCondition, len(*in)) + *out = make(ClusterConditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -646,14 +690,14 @@ func (in *KibanaStatus) DeepCopyInto(out *KibanaStatus) { } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(map[string][]ClusterCondition, len(*in)) + *out = make(map[string]ClusterConditions, len(*in)) for key, val := range *in { var outVal []ClusterCondition if val == nil { (*out)[key] = nil } else { in, out := &val, &outVal - *out = make([]ClusterCondition, len(*in)) + *out = make(ClusterConditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } diff --git a/pkg/apis/logging/v1/zz_generated.defaults.go b/pkg/apis/logging/v1/zz_generated.defaults.go deleted file mode 100644 index 0b0c84e4a0..0000000000 --- a/pkg/apis/logging/v1/zz_generated.defaults.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build !ignore_autogenerated - -// Code generated by defaulter-gen. DO NOT EDIT. - -package v1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// RegisterDefaults adds defaulters functions to the given scheme. -// Public to allow building arbitrary schemes. -// All generated defaulters are covering - they call all nested defaulters. -func RegisterDefaults(scheme *runtime.Scheme) error { - return nil -} diff --git a/pkg/apis/logging/v1/zz_generated.openapi.go b/pkg/apis/logging/v1/zz_generated.openapi.go deleted file mode 100644 index e7512311ef..0000000000 --- a/pkg/apis/logging/v1/zz_generated.openapi.go +++ /dev/null @@ -1,87 +0,0 @@ -// +build !ignore_autogenerated - -// Code generated by openapi-gen. DO NOT EDIT. - -// This file was autogenerated by openapi-gen. Do not edit it manually! - -package v1 - -import ( - spec "github.com/go-openapi/spec" - common "k8s.io/kube-openapi/pkg/common" -) - -func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition { - return map[string]common.OpenAPIDefinition{ - "github.com/openshift/cluster-logging-operator/pkg/apis/logging/v1.ClusterLogging": schema_pkg_apis_logging_v1_ClusterLogging(ref), - "github.com/openshift/cluster-logging-operator/pkg/apis/logging/v1.ClusterLoggingSpec": schema_pkg_apis_logging_v1_ClusterLoggingSpec(ref), - "github.com/openshift/cluster-logging-operator/pkg/apis/logging/v1.ClusterLoggingStatus": schema_pkg_apis_logging_v1_ClusterLoggingStatus(ref), - } -} - -func schema_pkg_apis_logging_v1_ClusterLogging(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ClusterLogging is the Schema for the clusterloggings API", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/openshift/cluster-logging-operator/pkg/apis/logging/v1.ClusterLoggingSpec"), - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/openshift/cluster-logging-operator/pkg/apis/logging/v1.ClusterLoggingStatus"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/openshift/cluster-logging-operator/pkg/apis/logging/v1.ClusterLoggingSpec", "github.com/openshift/cluster-logging-operator/pkg/apis/logging/v1.ClusterLoggingStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_pkg_apis_logging_v1_ClusterLoggingSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ClusterLoggingSpec defines the desired state of ClusterLogging", - Properties: map[string]spec.Schema{}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_pkg_apis_logging_v1_ClusterLoggingStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ClusterLoggingStatus defines the observed state of ClusterLogging", - Properties: map[string]spec.Schema{}, - }, - }, - Dependencies: []string{}, - } -} diff --git a/pkg/apis/logging/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/logging/v1alpha1/zz_generated.deepcopy.go index c234e7c8be..f5e8c420d2 100644 --- a/pkg/apis/logging/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/logging/v1alpha1/zz_generated.deepcopy.go @@ -1,6 +1,6 @@ // +build !ignore_autogenerated -// Code generated by operator-sdk. DO NOT EDIT. +// Code generated by operator-sdk-v0.15.2. DO NOT EDIT. package v1alpha1 diff --git a/pkg/apis/logging/v1alpha1/zz_generated.defaults.go b/pkg/apis/logging/v1alpha1/zz_generated.defaults.go deleted file mode 100644 index 7985166a60..0000000000 --- a/pkg/apis/logging/v1alpha1/zz_generated.defaults.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build !ignore_autogenerated - -// Code generated by defaulter-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// RegisterDefaults adds defaulters functions to the given scheme. -// Public to allow building arbitrary schemes. -// All generated defaulters are covering - they call all nested defaulters. -func RegisterDefaults(scheme *runtime.Scheme) error { - return nil -} diff --git a/pkg/k8shandler/status.go b/pkg/k8shandler/status.go index 095c9883fe..da773a121b 100644 --- a/pkg/k8shandler/status.go +++ b/pkg/k8shandler/status.go @@ -142,7 +142,7 @@ func (clusterRequest *ClusterLoggingRequest) getElasticsearchStatus() ([]logging if len(esList.Items) != 0 { for _, cluster := range esList.Items { - nodeConditions := make(map[string][]elasticsearch.ClusterCondition) + nodeConditions := make(map[string]logging.ElasticsearchClusterConditions) nodeStatus := logging.ElasticsearchStatus{ ClusterName: cluster.Name, @@ -235,10 +235,10 @@ func isPodReady(pod v1.Pod) bool { return true } -func (clusterRequest *ClusterLoggingRequest) getPodConditions(component string) (map[string][]logging.ClusterCondition, error) { +func (clusterRequest *ClusterLoggingRequest) getPodConditions(component string) (map[string]logging.ClusterConditions, error) { // Get all pods based on status.Nodes[] and check their conditions // get pod with label 'node-name=node.getName()' - podConditions := make(map[string][]logging.ClusterCondition) + podConditions := make(map[string]logging.ClusterConditions) nodePodList := &core.PodList{ TypeMeta: metav1.TypeMeta{ From e452904e549365a5a9906d34eedcbd3307550e86 Mon Sep 17 00:00:00 2001 From: Periklis Tsirakidis Date: Fri, 28 Feb 2020 17:48:52 +0100 Subject: [PATCH 20/21] Revert "Bud 1803196: Move shared config map to openshift-config-managed NS" This reverts commit 83266bf00c13f73ddc01a5a2171968c43c0e528c. --- Makefile | 1 - hack/testing/assertions | 21 ++-- .../test-010-deploy-via-olm-minimal.sh | 25 ++--- hack/testing/test-020-olm-upgrade.sh | 5 - hack/testing/test-367-logforwarding.sh | 2 - hack/testing/utils | 8 -- manifests/4.5/0110_clusterrolebindings.yaml | 2 +- manifests/4.5/0200_roles.yaml | 14 --- manifests/4.5/0210_rolebindings.yaml | 13 --- pkg/k8shandler/configmap.go | 13 +-- pkg/k8shandler/consoleexternalloglink.go | 4 +- pkg/k8shandler/visualization.go | 78 +++++++------- pkg/k8shandler/visualization_test.go | 102 ------------------ 13 files changed, 60 insertions(+), 228 deletions(-) delete mode 100644 manifests/4.5/0200_roles.yaml delete mode 100644 manifests/4.5/0210_rolebindings.yaml diff --git a/Makefile b/Makefile index ec65dbe499..33b0ce0578 100644 --- a/Makefile +++ b/Makefile @@ -18,7 +18,6 @@ MAIN_PKG=cmd/manager/main.go export OCP_VERSION?=$(shell basename $(shell find manifests/ -maxdepth 1 -not -name manifests -type d)) export CSV_FILE=$(CURDIR)/manifests/$(OCP_VERSION)/cluster-logging.v$(OCP_VERSION).0.clusterserviceversion.yaml export NAMESPACE?=openshift-logging -export MANAGED_CONFIG_NAMESPACE?=openshift-config-managed export EO_CSV_FILE=$(CURDIR)/vendor/github.com/openshift/elasticsearch-operator/manifests/$(OCP_VERSION)/elasticsearch-operator.v$(OCP_VERSION).0.clusterserviceversion.yaml FLUENTD_IMAGE?=quay.io/openshift/origin-logging-fluentd:latest diff --git a/hack/testing/assertions b/hack/testing/assertions index 212c7fb55d..a6bae58f10 100644 --- a/hack/testing/assertions +++ b/hack/testing/assertions @@ -1,21 +1,16 @@ #!/bin/bash source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/utils" assert_resources_exist(){ - # verify deployments -- kibana, curator - try_until_success "oc -n $NAMESPACE get deployment kibana" "${TIMEOUT_MIN}" + # verify deployments -- kibana, curator + try_until_success "oc -n $NAMESPACE get deployment kibana" ${TIMEOUT_MIN} - # verify cron - try_until_success "oc -n $NAMESPACE get cronjob curator" "${TIMEOUT_MIN}" + # verify cron + try_until_success "oc -n $NAMESPACE get cronjob curator" ${TIMEOUT_MIN} - # verify DS - try_until_success "oc -n $NAMESPACE get ds fluentd" "${TIMEOUT_MIN}" + # verify DS + try_until_success "oc -n $NAMESPACE get ds fluentd" ${TIMEOUT_MIN} - # verify ER - try_until_success "oc -n $NAMESPACE get elasticsearch elasticsearch" "${TIMEOUT_MIN}" + # verify ER + try_until_success "oc -n $NAMESPACE get elasticsearch elasticsearch" ${TIMEOUT_MIN} } - -assert_kibana_shared_config_exist() { - # verify kibana shared config map - try_until_success "oc -n $MANAGED_CONFIG_NAMESPACE get configmap logging-shared-config" "${TIMEOUT_MIN}" -} diff --git a/hack/testing/test-010-deploy-via-olm-minimal.sh b/hack/testing/test-010-deploy-via-olm-minimal.sh index d7d3bf0fc5..c5720a237f 100755 --- a/hack/testing/test-010-deploy-via-olm-minimal.sh +++ b/hack/testing/test-010-deploy-via-olm-minimal.sh @@ -5,9 +5,8 @@ set -e -source "$(dirname "${BASH_SOURCE[0]}")/../lib/init.sh" -source "$(dirname "${BASH_SOURCE[0]}")/assertions" -source "$(dirname "${BASH_SOURCE[0]}")/utils" +source "$(dirname "${BASH_SOURCE[0]}" )/../lib/init.sh" +source "$(dirname $0)/assertions" os::test::junit::declare_suite_start "${BASH_SOURCE[0]}" @@ -27,8 +26,6 @@ cleanup(){ oc delete ns ${NAMESPACE} --wait=true --ignore-not-found oc delete crd elasticsearches.logging.openshift.io --wait=false --ignore-not-found os::cmd::try_until_failure "oc get project ${NAMESPACE}" "$((1 * $minute))" - - cleanup_olm_catalog_unsupported_resources os::cleanup::all "${return_code}" @@ -51,8 +48,6 @@ oc create ns ${NAMESPACE} || : eo_version=$(basename $(find ${repo_dir}/vendor/github.com/openshift/elasticsearch-operator/manifests -type d | sort -r | head -n 1)) os::cmd::expect_success "oc create -f ${repo_dir}/vendor/github.com/openshift/elasticsearch-operator/manifests/${eo_version}/elasticsearches.crd.yaml" -# Create static cluster roles and rolebindings -deploy_olm_catalog_unsupported_resources os::log::info "Deploying operator from ${manifest}" NAMESPACE=${NAMESPACE} \ @@ -70,15 +65,10 @@ fi TIMEOUT_MIN=$((2 * $minute)) -# verify metrics rbac -# extra resources not support for ConfigMap based catalogs for now. -os::cmd::expect_success "oc get clusterrole clusterlogging-collector-metrics" -os::cmd::expect_success "oc get clusterrolebinding clusterlogging-collector-metrics" - -# verify shared config rbac -# extra resources not support for ConfigMap based catalogs for now. -os::cmd::expect_success "oc -n ${MANAGED_CONFIG_NAMESPACE} get role clusterlogging-shared-config" -os::cmd::expect_success "oc -n ${MANAGED_CONFIG_NAMESPACE} get rolebinding clusterlogging-shared-config" +##verify metrics rbac +# extra resources not support for ConfigMap based catelogs for now. +#os::cmd::expect_success "oc get clusterrole clusterlogging-collector-metrics" +#os::cmd::expect_success "oc get clusterrolebinding clusterlogging-collector-metrics" # wait for operator to be ready os::cmd::try_until_text "oc -n $NAMESPACE get deployment cluster-logging-operator -o jsonpath={.status.availableReplicas} --ignore-not-found" "1" ${TIMEOUT_MIN} @@ -88,6 +78,3 @@ os::cmd::expect_success "oc -n $NAMESPACE create -f ${repo_dir}/hack/cr.yaml" # assert deployment assert_resources_exist - -# assert kibana shared config -assert_kibana_shared_config_exist diff --git a/hack/testing/test-020-olm-upgrade.sh b/hack/testing/test-020-olm-upgrade.sh index 1f983415cb..f7cfe3fe2c 100755 --- a/hack/testing/test-020-olm-upgrade.sh +++ b/hack/testing/test-020-olm-upgrade.sh @@ -91,7 +91,6 @@ assert_resources_exist oc describe -n ${NAMESPACE} deployment/cluster-logging-operator > $ARTIFACT_DIR/cluster-logging-operator.describe.before_update 2>&1 deploy_config_map_catalog_source $NAMESPACE ${repo_dir}/manifests "${IMAGE_CLUSTER_LOGGING_OPERATOR}" -deploy_olm_catalog_unsupported_resources # patch subscription payload="{\"op\":\"replace\",\"path\":\"/spec/source\",\"value\":\"cluster-logging\"}" @@ -105,8 +104,4 @@ try_until_text "oc -n openshift-logging get deployment cluster-logging-operator # verify operator is ready try_until_text "oc -n openshift-logging get deployment cluster-logging-operator -o jsonpath={.status.updatedReplicas} --ignore-not-found" "1" ${TIMEOUT_MIN} -# assert deployment assert_resources_exist - -# assert kibana shared config -assert_kibana_shared_config_exist diff --git a/hack/testing/test-367-logforwarding.sh b/hack/testing/test-367-logforwarding.sh index 5a450c52f8..769fe5ac4a 100755 --- a/hack/testing/test-367-logforwarding.sh +++ b/hack/testing/test-367-logforwarding.sh @@ -71,7 +71,5 @@ for dir in $(ls -d $TEST_DIR); do oc delete $ns --ignore-not-found --force --grace-period=0||: try_until_failure "oc get $ns" "$((1 * $minute))" done - - cleanup_olm_catalog_unsupported_resources done exit $failed diff --git a/hack/testing/utils b/hack/testing/utils index 434bce5ef0..6f763d7dfb 100644 --- a/hack/testing/utils +++ b/hack/testing/utils @@ -111,17 +111,11 @@ deploy_olm_catalog_unsupported_resources(){ # Create static cluster roles and rolebindings oc create -f ${manifest}/$version/0100_clusterroles.yaml ||: oc create -f ${manifest}/$version/0110_clusterrolebindings.yaml ||: - - # Create static cluster roles and rolebindings - oc create -f ${manifest}/$version/0200_roles.yaml ||: - oc create -f ${manifest}/$version/0210_rolebindings.yaml ||: } cleanup_olm_catalog_unsupported_resources(){ oc delete clusterrolebinding clusterlogging-collector-metrics --wait=false --ignore-not-found oc delete clusterrole clusterlogging-collector-metrics --wait=false --ignore-not-found - oc -n "${MANAGED_CONFIG_NAMESPACE}" delete role clusterlogging-shared-config - oc -n "${MANAGED_CONFIG_NAMESPACE}" delete rolebinding clusterlogging-shared-config } deploy_marketplace_operator(){ @@ -262,8 +256,6 @@ function deploy_clusterlogging_operator() { -e "/name: FLUENTD_IMAGE/,/value:/s,value:.*\$,value: ${f_img}," \ -e "/name: OAUTH_PROXY_IMAGE/,/value:/s,value:.*\$,value: ${op_img}," \ -i $csv - - deploy_olm_catalog_unsupported_resources deploy_operator "openshift-logging" "cluster-logging-operator" $manifest $IMAGE_CLUSTER_LOGGING_OPERATOR $((2 * $minute)) } diff --git a/manifests/4.5/0110_clusterrolebindings.yaml b/manifests/4.5/0110_clusterrolebindings.yaml index 2ebc336a3a..45e2d79140 100644 --- a/manifests/4.5/0110_clusterrolebindings.yaml +++ b/manifests/4.5/0110_clusterrolebindings.yaml @@ -9,4 +9,4 @@ roleRef: subjects: - kind: ServiceAccount name: prometheus-k8s - namespace: openshift-monitoring + namespace: openshift-monitoring diff --git a/manifests/4.5/0200_roles.yaml b/manifests/4.5/0200_roles.yaml deleted file mode 100644 index 89f36d4c7e..0000000000 --- a/manifests/4.5/0200_roles.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: clusterlogging-shared-config - namespace: openshift-config-managed -rules: -- apiGroups: [""] - resources: - - configmaps - verbs: - - get - - create - - update - - delete diff --git a/manifests/4.5/0210_rolebindings.yaml b/manifests/4.5/0210_rolebindings.yaml deleted file mode 100644 index baa0848c11..0000000000 --- a/manifests/4.5/0210_rolebindings.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: clusterlogging-shared-config - namespace: openshift-config-managed -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: clusterlogging-shared-config -subjects: -- kind: ServiceAccount - name: cluster-logging-operator - namespace: openshift-logging diff --git a/pkg/k8shandler/configmap.go b/pkg/k8shandler/configmap.go index 5a756f047d..9b1bf4f8c5 100644 --- a/pkg/k8shandler/configmap.go +++ b/pkg/k8shandler/configmap.go @@ -88,21 +88,12 @@ func (clusterRequest *ClusterLoggingRequest) createOrUpdateConfigMap(configMap * return nil } -//RemoveConfigMap with a given name and the cluster request namespace +//RemoveConfigMap with a given name and namespace func (clusterRequest *ClusterLoggingRequest) RemoveConfigMap(configmapName string) error { - return clusterRequest.removeConfigMapFromNs(configmapName, clusterRequest.cluster.Namespace) -} - -//RemoveSharedConfigMap with a given a name and a shared cluster namespace -func (clusterRequest *ClusterLoggingRequest) RemoveSharedConfigMap(configMapName, namespace string) error { - return clusterRequest.removeConfigMapFromNs(configMapName, namespace) -} - -func (clusterRequest *ClusterLoggingRequest) removeConfigMapFromNs(configmapName, namespace string) error { configMap := NewConfigMap( configmapName, - namespace, + clusterRequest.cluster.Namespace, map[string]string{}, ) diff --git a/pkg/k8shandler/consoleexternalloglink.go b/pkg/k8shandler/consoleexternalloglink.go index 71486b3463..7fcdec5a76 100644 --- a/pkg/k8shandler/consoleexternalloglink.go +++ b/pkg/k8shandler/consoleexternalloglink.go @@ -26,8 +26,8 @@ func NewConsoleExternalLogLink(resourceName, namespace, consoleText, hrefTemplat }, }, Spec: consolev1.ConsoleExternalLogLinkSpec{ - Text: consoleText, - HrefTemplate: hrefTemplate, + Text: consoleText, + HrefTemplate: hrefTemplate, NamespaceFilter: namespaceFilter, }, } diff --git a/pkg/k8shandler/visualization.go b/pkg/k8shandler/visualization.go index 16aa2d9f74..eb2f313e40 100644 --- a/pkg/k8shandler/visualization.go +++ b/pkg/k8shandler/visualization.go @@ -29,12 +29,6 @@ const ( // The following strings are turned into JavaScript RegExps. Online tool to test them: https://regex101.com/ nodesAndContainersNamespaceFilter = "^(openshift-.*|kube-.*|openshift$|kube$|default$)" appsNamespaceFilter = "^((?!" + nodesAndContainersNamespaceFilter + ").)*$" // ^((?!^(openshift-.*|kube-.*|openshift$|kube$|default$)).)*$ - - loggingSharedConfigMapNamePre44x = "sharing-config" - loggingSharedConfigRolePre44x = "sharing-config-reader" - loggingSharedConfigRoleBindingPre44x = "openshift-logging-sharing-config-reader-binding" - loggingSharedConfigMapName = "logging-shared-config" - loggingSharedConfigNs = "openshift-config-managed" ) var ( @@ -207,11 +201,7 @@ func (clusterRequest *ClusterLoggingRequest) removeKibana() (err error) { return } - if err = clusterRequest.RemoveConfigMap(loggingSharedConfigMapNamePre44x); err != nil { - return - } - - if err = clusterRequest.RemoveSharedConfigMap(loggingSharedConfigMapName, loggingSharedConfigNs); err != nil { + if err = clusterRequest.RemoveConfigMap("sharing-config"); err != nil { return } @@ -453,42 +443,56 @@ func (clusterRequest *ClusterLoggingRequest) createOrUpdateKibanaRoute() error { } } - if err := clusterRequest.createOrUpdateKibanaSharedConfigMap(); err != nil { - return err - } - - return nil -} - -func (clusterRequest *ClusterLoggingRequest) createOrUpdateKibanaSharedConfigMap() error { - cluster := clusterRequest.cluster - kibanaURL, err := clusterRequest.GetRouteURL("kibana") if err != nil { return err } - sharedConfig := createSharedConfig(loggingSharedConfigNs, kibanaURL, kibanaURL) + sharedConfig := createSharedConfig(cluster.Namespace, kibanaURL, kibanaURL) utils.AddOwnerRefToObject(sharedConfig, utils.AsOwner(cluster)) - err = clusterRequest.CreateOrUpdateConfigMap(sharedConfig) + err = clusterRequest.Create(sharedConfig) if err != nil && !errors.IsAlreadyExists(err) { return fmt.Errorf("Failure creating Kibana route shared config: %v", err) } - oldSharedConfig := NewConfigMap(loggingSharedConfigMapNamePre44x, cluster.GetNamespace(), map[string]string{}) - if err = clusterRequest.Delete(oldSharedConfig); err != nil && !errors.IsNotFound(err) { - return fmt.Errorf("Failure delete old Kibana route shared config for %q: %v", cluster.Name, err) - } + sharedRole := NewRole( + "sharing-config-reader", + cluster.Namespace, + NewPolicyRules( + NewPolicyRule( + []string{""}, + []string{"configmaps"}, + []string{"sharing-config"}, + []string{"get"}, + ), + ), + ) + + utils.AddOwnerRefToObject(sharedRole, utils.AsOwner(clusterRequest.cluster)) - oldSharedRole := NewRole(loggingSharedConfigRolePre44x, cluster.GetNamespace(), nil) - if err = clusterRequest.Delete(oldSharedRole); err != nil && !errors.IsNotFound(err) { - return fmt.Errorf("Failure deleting old Kibana shared config role for %q: %v", cluster.Name, err) + err = clusterRequest.Create(sharedRole) + if err != nil && !errors.IsAlreadyExists(err) { + return fmt.Errorf("Failure creating Kibana route shared config role for %q: %v", cluster.Name, err) } - oldSharedRoleBinding := NewRoleBinding(loggingSharedConfigRoleBindingPre44x, cluster.GetNamespace(), loggingSharedConfigRolePre44x, nil) - if err = clusterRequest.Delete(oldSharedRoleBinding); err != nil && !errors.IsNotFound(err) { - return fmt.Errorf("Failure deleting old Kibana shared config role binding for %q: %v", cluster.Name, err) + sharedRoleBinding := NewRoleBinding( + "openshift-logging-sharing-config-reader-binding", + cluster.Namespace, + "sharing-config-reader", + NewSubjects( + NewSubject( + "Group", + "system:authenticated", + ), + ), + ) + + utils.AddOwnerRefToObject(sharedRoleBinding, utils.AsOwner(clusterRequest.cluster)) + + err = clusterRequest.Create(sharedRoleBinding) + if err != nil && !errors.IsAlreadyExists(err) { + return fmt.Errorf("Failure creating Kibana route shared config role binding for %q: %v", cluster.Name, err) } return nil @@ -807,13 +811,13 @@ func newKibanaPodSpec(cluster *logging.ClusterLogging, kibanaName string, elasti return kibanaPodSpec } -func createSharedConfig(namespace, kibanaAppPublicURL, kibanaInfraAppPublicURL string) *v1.ConfigMap { +func createSharedConfig(namespace, kibanaAppURL, kibanaInfraURL string) *v1.ConfigMap { return NewConfigMap( - loggingSharedConfigMapName, + "sharing-config", namespace, map[string]string{ - "kibanaAppPublicURL": kibanaAppPublicURL, - "kibanaInfraAppPublicURL": kibanaInfraAppPublicURL, + "kibanaAppURL": kibanaAppURL, + "kibanaInfraURL": kibanaInfraURL, }, ) } diff --git a/pkg/k8shandler/visualization_test.go b/pkg/k8shandler/visualization_test.go index c81b168f26..468b8dfd3f 100644 --- a/pkg/k8shandler/visualization_test.go +++ b/pkg/k8shandler/visualization_test.go @@ -1,7 +1,6 @@ package k8shandler import ( - "context" "fmt" "reflect" "strings" @@ -11,17 +10,10 @@ import ( logging "github.com/openshift/cluster-logging-operator/pkg/apis/logging/v1" "github.com/openshift/cluster-logging-operator/pkg/constants" "github.com/openshift/cluster-logging-operator/pkg/utils" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - routev1 "github.com/openshift/api/route/v1" v1 "k8s.io/api/core/v1" - rbac "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes/scheme" ) func TestNewKibanaPodSpecSetsProxyToUseServiceAccountAsOAuthClient(t *testing.T) { @@ -251,100 +243,6 @@ func TestNewKibanaPodSpecWhenProxyConfigExists(t *testing.T) { checkKibanaProxyVolumesAndVolumeMounts(t, podSpec, constants.KibanaTrustedCAName) } -func TestNewLoggingSharedConfigMapExists(t *testing.T) { - _ = routev1.AddToScheme(scheme.Scheme) - cluster := &logging.ClusterLogging{ - ObjectMeta: metav1.ObjectMeta{ - Name: "instance", - Namespace: "openshift-logging", - }, - } - - testCases := []struct { - name string - objs []runtime.Object - wantCm *v1.ConfigMap - wantErr error - }{ - { - name: "new route creation", - wantCm: NewConfigMap( - loggingSharedConfigMapName, - loggingSharedConfigNs, - map[string]string{ - "kibanaAppPublicURL": "https://", - "kibanaInfraAppPublicURL": "https://", - }, - ), - }, - { - name: "update route with shared configmap, role and rolebinding migration", - objs: []runtime.Object{ - runtime.Object(NewConfigMap(loggingSharedConfigMapNamePre44x, cluster.GetNamespace(), map[string]string{})), - runtime.Object(NewRole(loggingSharedConfigRolePre44x, cluster.GetNamespace(), []rbac.PolicyRule{})), - runtime.Object(NewRoleBinding(loggingSharedConfigRoleBindingPre44x, cluster.GetNamespace(), loggingSharedConfigRolePre44x, []rbac.Subject{})), - }, - wantCm: NewConfigMap( - loggingSharedConfigMapName, - loggingSharedConfigNs, - map[string]string{ - "kibanaAppPublicURL": "https://", - "kibanaInfraAppPublicURL": "https://", - }, - ), - }, - } - for _, tc := range testCases { - tc := tc - t.Run(tc.name, func(t *testing.T) { - t.Parallel() - - client := fake.NewFakeClient(tc.objs...) - clusterRequest := &ClusterLoggingRequest{ - client: client, - cluster: cluster, - } - - if gotErr := clusterRequest.createOrUpdateKibanaRoute(); gotErr != tc.wantErr { - t.Errorf("got: %v, want: %v", gotErr, tc.wantErr) - } - - // Check new shared config map existings in openshift config shared namespace - key := types.NamespacedName{Namespace: loggingSharedConfigNs, Name: loggingSharedConfigMapName} - gotCm := &v1.ConfigMap{} - utils.AddOwnerRefToObject(tc.wantCm, utils.AsOwner(clusterRequest.cluster)) - - if err := client.Get(context.TODO(), key, gotCm); err != nil { - t.Errorf("Expected configmap got: %v", err) - } - if ok := reflect.DeepEqual(gotCm, tc.wantCm); !ok { - t.Errorf("got: %v, want: %v", gotCm, tc.wantCm) - } - - // Check old shared config map is deleted - key = types.NamespacedName{Namespace: cluster.GetNamespace(), Name: loggingSharedConfigMapNamePre44x} - gotCmPre44x := &v1.ConfigMap{} - if err := client.Get(context.TODO(), key, gotCmPre44x); !errors.IsNotFound(err) { - t.Errorf("Expected deleted shared config pre 4.4.x, got: %v", err) - } - - // Check old role to access the shared config map is deleted - key = types.NamespacedName{Namespace: cluster.GetNamespace(), Name: loggingSharedConfigRolePre44x} - gotRolePre44x := &rbac.Role{} - if err := client.Get(context.TODO(), key, gotRolePre44x); !errors.IsNotFound(err) { - t.Errorf("Expected deleted role for shared config map pre 4.4.x, got: %v", err) - } - - // Check old rolebinding for group system:autheticated is deleted - key = types.NamespacedName{Namespace: cluster.GetNamespace(), Name: loggingSharedConfigRoleBindingPre44x} - gotRoleBindingPre44x := &rbac.RoleBinding{} - if err := client.Get(context.TODO(), key, gotRoleBindingPre44x); !errors.IsNotFound(err) { - t.Errorf("Expected deleted rolebinding for shared config map pre 4.4.x, got: %v", err) - } - }) - } -} - func checkKibanaProxyEnvVar(t *testing.T, podSpec v1.PodSpec, name string, value string) { env := podSpec.Containers[1].Env found := false From 6be369ef37862032b799118029f638c0fdcf11b8 Mon Sep 17 00:00:00 2001 From: Arik Hadas Date: Wed, 22 Jan 2020 16:39:33 +0200 Subject: [PATCH 21/21] forwarding to an external syslog server - based on the existing remote-syslog plugin Signed-off-by: Arik Hadas --- manifests/4.5/logforwardings.crd.yaml | 1 + pkg/apis/logging/v1alpha1/forwarding_types.go | 3 + pkg/generators/factory.go | 8 +- .../forwarding/fluentd/fluent_conf.go | 22 ++- .../forwarding/fluentd/generators.go | 3 + .../fluentd/output_conf_syslog_test.go | 108 +++++++++++++ .../forwarding/fluentd/syslog_conf.go | 8 + .../forwarding/fluentd/templates.go | 23 +++ pkg/k8shandler/forwarding.go | 2 +- test/e2e/logforwarding/syslog/deleteme.go | 3 - .../syslog/forward_to_syslog_test.go | 144 ++++++++++++++++++ .../syslog/logforwarding_suite_test.go | 13 ++ 12 files changed, 325 insertions(+), 13 deletions(-) create mode 100644 pkg/generators/forwarding/fluentd/output_conf_syslog_test.go create mode 100644 pkg/generators/forwarding/fluentd/syslog_conf.go delete mode 100644 test/e2e/logforwarding/syslog/deleteme.go create mode 100644 test/e2e/logforwarding/syslog/forward_to_syslog_test.go create mode 100644 test/e2e/logforwarding/syslog/logforwarding_suite_test.go diff --git a/manifests/4.5/logforwardings.crd.yaml b/manifests/4.5/logforwardings.crd.yaml index b4876cd4a1..da3b6a8b43 100644 --- a/manifests/4.5/logforwardings.crd.yaml +++ b/manifests/4.5/logforwardings.crd.yaml @@ -29,6 +29,7 @@ spec: enum: - elasticsearch - forward + - syslog name: description: The name of the output type: string diff --git a/pkg/apis/logging/v1alpha1/forwarding_types.go b/pkg/apis/logging/v1alpha1/forwarding_types.go index 21f8d46197..3cbe9e73c9 100644 --- a/pkg/apis/logging/v1alpha1/forwarding_types.go +++ b/pkg/apis/logging/v1alpha1/forwarding_types.go @@ -74,6 +74,9 @@ const ( //OutputTypeForward configures the pipeline to send messages via Fluent's secure forward OutputTypeForward OutputType = "forward" + + //OutputTypeSyslog configures pipeline to send messages to an external syslog server through docebo/fluent-plugin-remote-syslog + OutputTypeSyslog OutputType = "syslog" ) //LogForwardingReason The reason for the current state diff --git a/pkg/generators/factory.go b/pkg/generators/factory.go index c5efd21f6c..7cb93edf58 100644 --- a/pkg/generators/factory.go +++ b/pkg/generators/factory.go @@ -13,13 +13,7 @@ type Generator struct { //New creates an instance of a template engine for a set of templates func New(name string, addFunctions *template.FuncMap, templates ...string) (*Generator, error) { - allFunctions := funcMap - if addFunctions != nil { - for name, f := range *addFunctions { - allFunctions[name] = f - } - } - tmpl := template.New(name).Funcs(funcMap) + tmpl := template.New(name).Funcs(*addFunctions).Funcs(funcMap) var err error for i, s := range templates { tmpl, err = tmpl.Parse(s) diff --git a/pkg/generators/forwarding/fluentd/fluent_conf.go b/pkg/generators/forwarding/fluentd/fluent_conf.go index 1404383233..c4ccb6e0c1 100644 --- a/pkg/generators/forwarding/fluentd/fluent_conf.go +++ b/pkg/generators/forwarding/fluentd/fluent_conf.go @@ -11,6 +11,7 @@ import ( ) var replacer = strings.NewReplacer(" ", "_", "-", "_", ".", "_") +var protocolSeparator = "://" type outputLabelConf struct { Name string @@ -44,17 +45,34 @@ func (conf *outputLabelConf) Template() *template.Template { return conf.TemplateContext } func (conf *outputLabelConf) Host() string { - return strings.Split(conf.Target.Endpoint, ":")[0] + endpoint := stripProtocol(conf.Target.Endpoint) + return strings.Split(endpoint, ":")[0] } func (conf *outputLabelConf) Port() string { - parts := strings.Split(conf.Target.Endpoint, ":") + endpoint := stripProtocol(conf.Target.Endpoint) + parts := strings.Split(endpoint, ":") if len(parts) == 1 { return "9200" } return parts[1] } +func (conf *outputLabelConf) Protocol() string { + endpoint := conf.Target.Endpoint + if index := strings.Index(endpoint, protocolSeparator); index != -1 { + return endpoint[:index] + } + return "" +} + +func stripProtocol(endpoint string) string { + if index := strings.Index(endpoint, protocolSeparator); index != -1 { + endpoint = endpoint[index+len(protocolSeparator):] + } + return endpoint +} + func (conf *outputLabelConf) BufferPath() string { return fmt.Sprintf("/var/lib/fluentd/%s", conf.StoreID()) } diff --git a/pkg/generators/forwarding/fluentd/generators.go b/pkg/generators/forwarding/fluentd/generators.go index e79770b16b..fc16603757 100644 --- a/pkg/generators/forwarding/fluentd/generators.go +++ b/pkg/generators/forwarding/fluentd/generators.go @@ -202,6 +202,9 @@ func (engine *ConfigGenerator) generateOutputLabelBlocks(outputs []logforward.Ou case logforward.OutputTypeForward: storeTemplateName = "forward" outputTemplateName = "outputLabelConfNoCopy" + case logforward.OutputTypeSyslog: + storeTemplateName = "storeSyslog" + outputTemplateName = "outputLabelConfNoRetry" default: logger.Warnf("Pipeline targets include an unrecognized type: %q", output.Type) continue diff --git a/pkg/generators/forwarding/fluentd/output_conf_syslog_test.go b/pkg/generators/forwarding/fluentd/output_conf_syslog_test.go new file mode 100644 index 0000000000..16754f31a6 --- /dev/null +++ b/pkg/generators/forwarding/fluentd/output_conf_syslog_test.go @@ -0,0 +1,108 @@ +package fluentd + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + logging "github.com/openshift/cluster-logging-operator/pkg/apis/logging/v1alpha1" + test "github.com/openshift/cluster-logging-operator/test" +) + +var _ = Describe("Generating external syslog server output store config blocks", func() { + + var ( + err error + outputs []logging.OutputSpec + generator *ConfigGenerator + ) + BeforeEach(func() { + generator, err = NewConfigGenerator(false, false) + Expect(err).To(BeNil()) + }) + + Context("based on syslog plugin", func() { + tcpConf := `` + + udpConf := `` + + Context("for protocol-less endpoint", func() { + BeforeEach(func() { + outputs = []logging.OutputSpec{ + { + Type: logging.OutputTypeSyslog, + Name: "syslog-receiver", + Endpoint: "sl.svc.messaging.cluster.local:9654", + }, + } + }) + It("should produce well formed output label config", func() { + results, err := generator.generateOutputLabelBlocks(outputs) + Expect(err).To(BeNil()) + Expect(len(results)).To(Equal(1)) + test.Expect(results[0]).ToEqual(tcpConf) + }) + }) + + Context("for tcp endpoint", func() { + BeforeEach(func() { + outputs = []logging.OutputSpec{ + { + Type: logging.OutputTypeSyslog, + Name: "syslog-receiver", + Endpoint: "tcp://sl.svc.messaging.cluster.local:9654", + }, + } + }) + It("should produce well formed output label config", func() { + results, err := generator.generateOutputLabelBlocks(outputs) + Expect(err).To(BeNil()) + Expect(len(results)).To(Equal(1)) + test.Expect(results[0]).ToEqual(tcpConf) + }) + }) + + Context("for udp endpoint", func() { + BeforeEach(func() { + outputs = []logging.OutputSpec{ + { + Type: logging.OutputTypeSyslog, + Name: "syslog-receiver", + Endpoint: "udp://sl.svc.messaging.cluster.local:9654", + }, + } + }) + It("should produce well formed output label config", func() { + results, err := generator.generateOutputLabelBlocks(outputs) + Expect(err).To(BeNil()) + Expect(len(results)).To(Equal(1)) + test.Expect(results[0]).ToEqual(udpConf) + }) + }) + }) +}) diff --git a/pkg/generators/forwarding/fluentd/syslog_conf.go b/pkg/generators/forwarding/fluentd/syslog_conf.go new file mode 100644 index 0000000000..e5c41c797c --- /dev/null +++ b/pkg/generators/forwarding/fluentd/syslog_conf.go @@ -0,0 +1,8 @@ +package fluentd + +func (conf *outputLabelConf) SyslogPlugin() string { + if protocol := conf.Protocol(); protocol == "udp" { + return "syslog" + } + return "syslog_buffered" +} diff --git a/pkg/generators/forwarding/fluentd/templates.go b/pkg/generators/forwarding/fluentd/templates.go index 6eaa6842dc..a5a22f8bc1 100644 --- a/pkg/generators/forwarding/fluentd/templates.go +++ b/pkg/generators/forwarding/fluentd/templates.go @@ -12,8 +12,10 @@ var templateRegistry = []string{ sourceToPipelineCopyTemplate, outputLabelConfTemplate, outputLabelConfNocopyTemplate, + outputLabelConfNoretryTemplate, storeElasticsearchTemplate, forwardTemplate, + storeSyslogTemplate, } const fluentConfTemplate = `{{- define "fluentConf" }} @@ -484,6 +486,15 @@ const outputLabelConfNocopyTemplate = `{{- define "outputLabelConfNoCopy" }} {{- end}}` +const outputLabelConfNoretryTemplate = `{{- define "outputLabelConfNoRetry" }} + +{{- end}}` + const forwardTemplate = `{{- define "forward" }} # https://docs.fluentd.org/v1.0/articles/in_forward @type forward @@ -573,3 +584,15 @@ const storeElasticsearchTemplate = `{{- define "storeElasticsearch" }} {{- end}}` + +const storeSyslogTemplate = `{{- define "storeSyslog" }} + + @type {{.SyslogPlugin}} + @id {{.StoreID}} + remote_syslog {{.Host}} + port {{.Port}} + hostname ${hostname} + facility user + severity debug + +{{- end}}` diff --git a/pkg/k8shandler/forwarding.go b/pkg/k8shandler/forwarding.go index 9164ebe437..c4db656d51 100644 --- a/pkg/k8shandler/forwarding.go +++ b/pkg/k8shandler/forwarding.go @@ -26,7 +26,7 @@ const ( ) var ( - outputTypes = sets.NewString(string(logforward.OutputTypeElasticsearch), string(logforward.OutputTypeForward)) + outputTypes = sets.NewString(string(logforward.OutputTypeElasticsearch), string(logforward.OutputTypeForward), string(logforward.OutputTypeSyslog)) sourceTypes = sets.NewString(string(logforward.LogSourceTypeApp), string(logforward.LogSourceTypeInfra), string(logforward.LogSourceTypeAudit)) ) diff --git a/test/e2e/logforwarding/syslog/deleteme.go b/test/e2e/logforwarding/syslog/deleteme.go deleted file mode 100644 index 337a6fc2fa..0000000000 --- a/test/e2e/logforwarding/syslog/deleteme.go +++ /dev/null @@ -1,3 +0,0 @@ -package syslog - -// placeholder to make compiler happy diff --git a/test/e2e/logforwarding/syslog/forward_to_syslog_test.go b/test/e2e/logforwarding/syslog/forward_to_syslog_test.go new file mode 100644 index 0000000000..38603f6016 --- /dev/null +++ b/test/e2e/logforwarding/syslog/forward_to_syslog_test.go @@ -0,0 +1,144 @@ +package fluent + +import ( + "fmt" + "runtime" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + apps "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + + logforward "github.com/openshift/cluster-logging-operator/pkg/apis/logging/v1alpha1" + "github.com/openshift/cluster-logging-operator/pkg/logger" + "github.com/openshift/cluster-logging-operator/test/helpers" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var _ = Describe("LogForwarding", func() { + _, filename, _, _ := runtime.Caller(0) + logger.Infof("Running %s", filename) + var ( + err error + syslogDeployment *apps.Deployment + e2e = helpers.NewE2ETestFramework() + ) + BeforeEach(func() { + if err := e2e.DeployLogGenerator(); err != nil { + logger.Errorf("unable to deploy log generator. E: %s", err.Error()) + } + }) + Describe("when ClusterLogging is configured with 'forwarding' to an external syslog server", func() { + + Context("with the syslog plugin", func() { + + Context("and tcp receiver", func() { + + BeforeEach(func() { + if syslogDeployment, err = e2e.DeploySyslogReceiver(corev1.ProtocolTCP); err != nil { + Fail(fmt.Sprintf("Unable to deploy syslog receiver: %v", err)) + } + cr := helpers.NewClusterLogging(helpers.ComponentTypeCollector) + if err := e2e.CreateClusterLogging(cr); err != nil { + Fail(fmt.Sprintf("Unable to create an instance of cluster logging: %v", err)) + } + forwarding := &logforward.LogForwarding{ + TypeMeta: metav1.TypeMeta{ + Kind: logforward.LogForwardingKind, + APIVersion: logforward.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "instance", + }, + Spec: logforward.ForwardingSpec{ + Outputs: []logforward.OutputSpec{ + logforward.OutputSpec{ + Name: syslogDeployment.ObjectMeta.Name, + Type: logforward.OutputTypeSyslog, + Endpoint: fmt.Sprintf("%s.%s.svc:24224", syslogDeployment.ObjectMeta.Name, syslogDeployment.Namespace), + }, + }, + Pipelines: []logforward.PipelineSpec{ + logforward.PipelineSpec{ + Name: "test-infra", + OutputRefs: []string{syslogDeployment.ObjectMeta.Name}, + SourceType: logforward.LogSourceTypeInfra, + }, + }, + }, + } + if err := e2e.CreateLogForwarding(forwarding); err != nil { + Fail(fmt.Sprintf("Unable to create an instance of logforwarding: %v", err)) + } + components := []helpers.LogComponentType{helpers.ComponentTypeCollector} + for _, component := range components { + if err := e2e.WaitFor(component); err != nil { + Fail(fmt.Sprintf("Failed waiting for component %s to be ready: %v", component, err)) + } + } + }) + + It("should send logs to the forward.Output logstore", func() { + Expect(e2e.LogStore.HasInfraStructureLogs(helpers.DefaultWaitForLogsTimeout)).To(BeTrue(), "Expected to find stored infrastructure logs") + }) + }) + + Context("and udp receiver", func() { + + BeforeEach(func() { + if syslogDeployment, err = e2e.DeploySyslogReceiver(corev1.ProtocolUDP); err != nil { + Fail(fmt.Sprintf("Unable to deploy syslog receiver: %v", err)) + } + cr := helpers.NewClusterLogging(helpers.ComponentTypeCollector) + if err := e2e.CreateClusterLogging(cr); err != nil { + Fail(fmt.Sprintf("Unable to create an instance of cluster logging: %v", err)) + } + forwarding := &logforward.LogForwarding{ + TypeMeta: metav1.TypeMeta{ + Kind: logforward.LogForwardingKind, + APIVersion: logforward.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "instance", + }, + Spec: logforward.ForwardingSpec{ + Outputs: []logforward.OutputSpec{ + logforward.OutputSpec{ + Name: syslogDeployment.ObjectMeta.Name, + Type: logforward.OutputTypeSyslog, + Endpoint: fmt.Sprintf("udp://%s.%s.svc:24224", syslogDeployment.ObjectMeta.Name, syslogDeployment.Namespace), + }, + }, + Pipelines: []logforward.PipelineSpec{ + logforward.PipelineSpec{ + Name: "test-infra", + OutputRefs: []string{syslogDeployment.ObjectMeta.Name}, + SourceType: logforward.LogSourceTypeInfra, + }, + }, + }, + } + if err := e2e.CreateLogForwarding(forwarding); err != nil { + Fail(fmt.Sprintf("Unable to create an instance of logforwarding: %v", err)) + } + components := []helpers.LogComponentType{helpers.ComponentTypeCollector} + for _, component := range components { + if err := e2e.WaitFor(component); err != nil { + Fail(fmt.Sprintf("Failed waiting for component %s to be ready: %v", component, err)) + } + } + }) + + It("should send logs to the forward.Output logstore", func() { + Expect(e2e.LogStore.HasInfraStructureLogs(helpers.DefaultWaitForLogsTimeout)).To(BeTrue(), "Expected to find stored infrastructure logs") + }) + }) + }) + + AfterEach(func() { + e2e.Cleanup() + }) + + }) + +}) diff --git a/test/e2e/logforwarding/syslog/logforwarding_suite_test.go b/test/e2e/logforwarding/syslog/logforwarding_suite_test.go new file mode 100644 index 0000000000..7d044173c3 --- /dev/null +++ b/test/e2e/logforwarding/syslog/logforwarding_suite_test.go @@ -0,0 +1,13 @@ +package fluent + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestLogForwarding(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "LogForwarding Integration E2E Suite - Forward to syslog") +}