Skip to content

Commit

Permalink
Allow passing a custom elasticsearch backend url to use for log forwa…
Browse files Browse the repository at this point in the history
…rding
  • Loading branch information
dry923 authored and rsevilla87 committed Apr 7, 2021
1 parent 2089a11 commit 02279f6
Show file tree
Hide file tree
Showing 4 changed files with 125 additions and 12 deletions.
5 changes: 5 additions & 0 deletions workloads/logging/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,11 @@ Ensure to have `KUBECONFIG` set to the proper path to your desired cluster.
Default: `4.6`
Update channel for the Elasticsearch and Cluster logging operators.

### CUSTOM_ES_URL
Default: ""
The external Elasticsearch url to direct logs to
NOTE: If set, internal ElasticSearch will not be configured

### ES_NODE_COUNT
Default: 3
Number of Elasticsearch nodes.
Expand Down
31 changes: 19 additions & 12 deletions workloads/logging/deploy_logging_stack.sh
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ source env.sh

# Logging format
log() {
echo ${bold}$(date -u): ${@}${normal}
echo -e "\033[1m$(date "+%d-%m-%YT%H:%M:%S") ${@}\033[0m"
}

# Check if oc client is installed
Expand All @@ -28,8 +28,13 @@ fi

function install() {
# create cluster logging and elasticsearch resources
log "Creating cluster logging and elastisearch resources"
envsubst < ./files/logging-stack.yml | oc create -f -
if [[ $CUSTOM_ES_URL != "" ]]; then
log "Creating cluster logging with custom elasticsearch backend"
envsubst < ./files/logging-stack_custom_es.yml | oc create -f -
else
log "Creating cluster logging and elastisearch resources"
envsubst < ./files/logging-stack.yml | oc create -f -
fi
}

wait_time=0
Expand Down Expand Up @@ -71,13 +76,15 @@ while [[ $( oc get daemonset.apps/fluentd -n openshift-logging -o=jsonpath='{.st
exit 1
fi
done
log "Logging stack including Elasticsearch, Fluend and Kibana are up"
log "Logging stack is up"

# Expose the elasticsearch service
echo "Exposing the elasticsearch service by creating a route"
oc extract secret/elasticsearch --to=/tmp/ --keys=admin-ca --confirm -n openshift-logging
cp files/elasticsearch-route.yml /tmp/elasticsearch-route.yml
cat /tmp/admin-ca | sed -e "s/^/ /" >> /tmp/elasticsearch-route.yml
oc create -f /tmp/elasticsearch-route.yml -n openshift-logging
routeES=`oc get route elasticsearch -n openshift-logging -o jsonpath={.spec.host}`
echo "Elasticsearch is exposed at $routeES, bearer token is needed to access it"
if [[ $CUSTOM_ES_URL == "" ]]; then
# Expose the elasticsearch service
log "Exposing the elasticsearch service by creating a route"
oc extract secret/elasticsearch --to=/tmp/ --keys=admin-ca --confirm -n openshift-logging
cp files/elasticsearch-route.yml /tmp/elasticsearch-route.yml
cat /tmp/admin-ca | sed -e "s/^/ /" >> /tmp/elasticsearch-route.yml
oc create -f /tmp/elasticsearch-route.yml -n openshift-logging
routeES=`oc get route elasticsearch -n openshift-logging -o jsonpath={.spec.host}`
log "Elasticsearch is exposed at $routeES, bearer token is needed to access it"
fi
1 change: 1 addition & 0 deletions workloads/logging/env.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

# Vars and respective defaults
export CHANNEL=${CHANNEL:=4.6}
export CUSTOM_ES_URL=${CUSTOM_ES_URL:=""}
export ES_NODE_COUNT=${ES_NODE_COUNT:=3}
export ES_STORAGE_CLASS=${ES_STORAGE_CLASS:=gp2}
export ES_STORAGE_SIZE=${ES_STORAGE_SIZE:=100G}
Expand Down
100 changes: 100 additions & 0 deletions workloads/logging/files/logging-stack_custom_es.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
---

apiVersion: v1
kind: Namespace
metadata:
name: openshift-operators-redhat
annotations:
openshift.io/node-selector: ""
labels:
openshift.io/cluster-monitoring: "true"

---
apiVersion: v1
kind: Namespace
metadata:
name: openshift-logging
annotations:
openshift.io/node-selector: ""
labels:
openshift.io/cluster-monitoring: "true"

---
apiVersion: operators.coreos.com/v1
kind: OperatorGroup
metadata:
name: openshift-operators-redhat
namespace: openshift-operators-redhat
spec: {}

---
apiVersion: operators.coreos.com/v1
kind: OperatorGroup
metadata:
name: cluster-logging
namespace: openshift-logging
spec:
targetNamespaces:
- openshift-logging

---
apiVersion: operators.coreos.com/v1alpha1
kind: Subscription
metadata:
name: cluster-logging
namespace: openshift-logging
spec:
channel: "$CHANNEL"
name: cluster-logging
source: redhat-operators
sourceNamespace: openshift-marketplace

---
apiVersion: logging.openshift.io/v1
kind: "ClusterLogging"
metadata:
name: "instance"
namespace: "openshift-logging"
spec:
managementState: "Managed"
logStore:
type: "elasticsearch"
retentionPolicy:
application:
maxAge: 30d
infra:
maxAge: 30d
audit:
maxAge: 30d
curation:
type: "curator"
curator:
schedule: "30 3 * * *"
collection:
logs:
type: "fluentd"
fluentd:
resources:
limits:
memory: $FLUENTD_MEMORY_LIMITS
requests:
cpu: $FLUENTD_CPU_REQUESTS
memory: $FLUENTD_MEMORY_REQUESTS

---
apiVersion: logging.openshift.io/v1
kind: ClusterLogForwarder
metadata:
namespace: openshift-logging
name: instance
labels: {}
spec:
outputs:
- name: elasticsearch-external
type: "elasticsearch"
url: $CUSTOM_ES_URL
pipelines:
- name: forward-logs
inputRefs: $FORWARD_LOGS
outputRefs:
- elasticsearch-external

0 comments on commit 02279f6

Please sign in to comment.