From 02279f6d948f37e11f710eb9d9da952dc8782653 Mon Sep 17 00:00:00 2001 From: dry923 Date: Fri, 26 Mar 2021 13:51:22 -0400 Subject: [PATCH] Allow passing a custom elasticsearch backend url to use for log forwarding --- workloads/logging/README.md | 5 + workloads/logging/deploy_logging_stack.sh | 31 +++--- workloads/logging/env.sh | 1 + .../logging/files/logging-stack_custom_es.yml | 100 ++++++++++++++++++ 4 files changed, 125 insertions(+), 12 deletions(-) create mode 100644 workloads/logging/files/logging-stack_custom_es.yml diff --git a/workloads/logging/README.md b/workloads/logging/README.md index a9290928..4dc79bdd 100644 --- a/workloads/logging/README.md +++ b/workloads/logging/README.md @@ -17,6 +17,11 @@ Ensure to have `KUBECONFIG` set to the proper path to your desired cluster. Default: `4.6` Update channel for the Elasticsearch and Cluster logging operators. +### CUSTOM_ES_URL +Default: "" +The external Elasticsearch url to direct logs to +NOTE: If set, internal ElasticSearch will not be configured + ### ES_NODE_COUNT Default: 3 Number of Elasticsearch nodes. diff --git a/workloads/logging/deploy_logging_stack.sh b/workloads/logging/deploy_logging_stack.sh index 3ef91c9c..d9499e53 100755 --- a/workloads/logging/deploy_logging_stack.sh +++ b/workloads/logging/deploy_logging_stack.sh @@ -7,7 +7,7 @@ source env.sh # Logging format log() { - echo ${bold}$(date -u): ${@}${normal} + echo -e "\033[1m$(date "+%d-%m-%YT%H:%M:%S") ${@}\033[0m" } # Check if oc client is installed @@ -28,8 +28,13 @@ fi function install() { # create cluster logging and elasticsearch resources - log "Creating cluster logging and elastisearch resources" - envsubst < ./files/logging-stack.yml | oc create -f - + if [[ $CUSTOM_ES_URL != "" ]]; then + log "Creating cluster logging with custom elasticsearch backend" + envsubst < ./files/logging-stack_custom_es.yml | oc create -f - + else + log "Creating cluster logging and elastisearch resources" + envsubst < ./files/logging-stack.yml | oc create -f - + fi } wait_time=0 @@ -71,13 +76,15 @@ while [[ $( oc get daemonset.apps/fluentd -n openshift-logging -o=jsonpath='{.st exit 1 fi done -log "Logging stack including Elasticsearch, Fluend and Kibana are up" +log "Logging stack is up" -# Expose the elasticsearch service -echo "Exposing the elasticsearch service by creating a route" -oc extract secret/elasticsearch --to=/tmp/ --keys=admin-ca --confirm -n openshift-logging -cp files/elasticsearch-route.yml /tmp/elasticsearch-route.yml -cat /tmp/admin-ca | sed -e "s/^/ /" >> /tmp/elasticsearch-route.yml -oc create -f /tmp/elasticsearch-route.yml -n openshift-logging -routeES=`oc get route elasticsearch -n openshift-logging -o jsonpath={.spec.host}` -echo "Elasticsearch is exposed at $routeES, bearer token is needed to access it" +if [[ $CUSTOM_ES_URL == "" ]]; then + # Expose the elasticsearch service + log "Exposing the elasticsearch service by creating a route" + oc extract secret/elasticsearch --to=/tmp/ --keys=admin-ca --confirm -n openshift-logging + cp files/elasticsearch-route.yml /tmp/elasticsearch-route.yml + cat /tmp/admin-ca | sed -e "s/^/ /" >> /tmp/elasticsearch-route.yml + oc create -f /tmp/elasticsearch-route.yml -n openshift-logging + routeES=`oc get route elasticsearch -n openshift-logging -o jsonpath={.spec.host}` + log "Elasticsearch is exposed at $routeES, bearer token is needed to access it" +fi diff --git a/workloads/logging/env.sh b/workloads/logging/env.sh index a6eee6c8..c4a328e7 100755 --- a/workloads/logging/env.sh +++ b/workloads/logging/env.sh @@ -2,6 +2,7 @@ # Vars and respective defaults export CHANNEL=${CHANNEL:=4.6} +export CUSTOM_ES_URL=${CUSTOM_ES_URL:=""} export ES_NODE_COUNT=${ES_NODE_COUNT:=3} export ES_STORAGE_CLASS=${ES_STORAGE_CLASS:=gp2} export ES_STORAGE_SIZE=${ES_STORAGE_SIZE:=100G} diff --git a/workloads/logging/files/logging-stack_custom_es.yml b/workloads/logging/files/logging-stack_custom_es.yml new file mode 100644 index 00000000..c107f9af --- /dev/null +++ b/workloads/logging/files/logging-stack_custom_es.yml @@ -0,0 +1,100 @@ +--- + +apiVersion: v1 +kind: Namespace +metadata: + name: openshift-operators-redhat + annotations: + openshift.io/node-selector: "" + labels: + openshift.io/cluster-monitoring: "true" + +--- +apiVersion: v1 +kind: Namespace +metadata: + name: openshift-logging + annotations: + openshift.io/node-selector: "" + labels: + openshift.io/cluster-monitoring: "true" + +--- +apiVersion: operators.coreos.com/v1 +kind: OperatorGroup +metadata: + name: openshift-operators-redhat + namespace: openshift-operators-redhat +spec: {} + +--- +apiVersion: operators.coreos.com/v1 +kind: OperatorGroup +metadata: + name: cluster-logging + namespace: openshift-logging +spec: + targetNamespaces: + - openshift-logging + +--- +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: cluster-logging + namespace: openshift-logging +spec: + channel: "$CHANNEL" + name: cluster-logging + source: redhat-operators + sourceNamespace: openshift-marketplace + +--- +apiVersion: logging.openshift.io/v1 +kind: "ClusterLogging" +metadata: + name: "instance" + namespace: "openshift-logging" +spec: + managementState: "Managed" + logStore: + type: "elasticsearch" + retentionPolicy: + application: + maxAge: 30d + infra: + maxAge: 30d + audit: + maxAge: 30d + curation: + type: "curator" + curator: + schedule: "30 3 * * *" + collection: + logs: + type: "fluentd" + fluentd: + resources: + limits: + memory: $FLUENTD_MEMORY_LIMITS + requests: + cpu: $FLUENTD_CPU_REQUESTS + memory: $FLUENTD_MEMORY_REQUESTS + +--- +apiVersion: logging.openshift.io/v1 +kind: ClusterLogForwarder +metadata: + namespace: openshift-logging + name: instance + labels: {} +spec: + outputs: + - name: elasticsearch-external + type: "elasticsearch" + url: $CUSTOM_ES_URL + pipelines: + - name: forward-logs + inputRefs: $FORWARD_LOGS + outputRefs: + - elasticsearch-external