From f8777f812394155b15196e5472c5cc856ec137f1 Mon Sep 17 00:00:00 2001 From: mck Date: Tue, 22 Nov 2022 11:35:19 +0100 Subject: [PATCH] Add tests for parallel node collection, witht parameterised tests per configuration file --- ds-collector-tests/cluster-dse-k8s.make | 31 ++-- .../cluster-one-node-vanilla-ssh-docker.make | 41 +++-- ds-collector-tests/cluster-vanilla-k8s.make | 14 +- .../cluster-vanilla-ssh-docker.make | 41 +++-- .../test-collector-docker-parallel.conf | 133 ++++++++++++++++ .../test-collector-dse-k8s-parallel.conf.in | 147 ++++++++++++++++++ ...conf.in => test-collector-dse-k8s.conf.in} | 0 .../test-collector-k8s-parallel.conf.in | 133 ++++++++++++++++ .../test-collector-ssh-parallel.conf | 133 ++++++++++++++++ 9 files changed, 633 insertions(+), 40 deletions(-) create mode 100644 ds-collector-tests/test-collector-docker-parallel.conf create mode 100644 ds-collector-tests/test-collector-dse-k8s-parallel.conf.in rename ds-collector-tests/{test-collector-k8s-dse.conf.in => test-collector-dse-k8s.conf.in} (100%) create mode 100644 ds-collector-tests/test-collector-k8s-parallel.conf.in create mode 100644 ds-collector-tests/test-collector-ssh-parallel.conf diff --git a/ds-collector-tests/cluster-dse-k8s.make b/ds-collector-tests/cluster-dse-k8s.make index daf0a67..1ed16af 100755 --- a/ds-collector-tests/cluster-dse-k8s.make +++ b/ds-collector-tests/cluster-dse-k8s.make @@ -1,16 +1,25 @@ +# the test target will execute once for every test-collector-k8s*.conf.in configuration file found +CONFIGURATIONS := $(shell ls test-collector-dse-k8s*.conf.in) +TESTS := $(addprefix test_,${CONFIGURATIONS}) -all: setup test teardown +all: setup ${TESTS} teardown - -test: +${TESTS}: test_%: # ds-collector over k8s - cp test-collector-k8s-dse.conf.in /tmp/datastax/test-collector-k8s-dse.conf - echo "" >> /tmp/datastax/test-collector-k8s-dse.conf - echo "cqlshUsername=$$(kubectl -n cass-operator get secret cluster2-superuser -o yaml | grep " username" | awk -F" " '{print $$2}' | base64 -d && echo "")" >> /tmp/datastax/test-collector-k8s-dse.conf - echo "cqlshPassword=$$(kubectl -n cass-operator get secret cluster2-superuser -o yaml | grep " password" | awk -F" " '{print $$2}' | base64 -d && echo "")" >> /tmp/datastax/test-collector-k8s-dse.conf - ./collector/ds-collector -T -f /tmp/datastax/test-collector-k8s-dse.conf -n cluster2-dc1-default-sts-0 - ./collector/ds-collector -T -p -f /tmp/datastax/test-collector-k8s-dse.conf -n cluster2-dc1-default-sts-0 - ./collector/ds-collector -X -f /tmp/datastax/test-collector-k8s-dse.conf -n cluster2-dc1-default-sts-0 + @echo "\n Testing $* \n" + cp $* /tmp/datastax/test-collector-dse-k8s.conf + @echo "" >> /tmp/datastax/test-collector-dse-k8s.conf + @echo "git_branch=$$(git rev-parse --abbrev-ref HEAD)" >> /tmp/datastax/test-collector-dse-k8s.conf + @echo "git_sha=$$(git rev-parse HEAD)" >> /tmp/datastax/test-collector-dse-k8s.conf + echo "" >> /tmp/datastax/test-collector-dse-k8s.conf + @echo "git_branch=$$(git rev-parse --abbrev-ref HEAD)" >> /tmp/datastax/test-collector-dse-k8s.conf + @echo "git_sha=$$(git rev-parse HEAD)" >> /tmp/datastax/test-collector-dse-k8s.conf + echo "" >> /tmp/datastax/test-collector-dse-k8s.conf + echo "cqlshUsername=$$(kubectl -n cass-operator get secret cluster2-superuser -o yaml | grep " username" | awk -F" " '{print $$2}' | base64 -d && echo "")" >> /tmp/datastax/test-collector-dse-k8s.conf + echo "cqlshPassword=$$(kubectl -n cass-operator get secret cluster2-superuser -o yaml | grep " password" | awk -F" " '{print $$2}' | base64 -d && echo "")" >> /tmp/datastax/test-collector-dse-k8s.conf + ./collector/ds-collector -T -f /tmp/datastax/test-collector-dse-k8s.conf -n cluster2-dc1-default-sts-0 + ./collector/ds-collector -T -p -f /tmp/datastax/test-collector-dse-k8s.conf -n cluster2-dc1-default-sts-0 + ./collector/ds-collector -X -f /tmp/datastax/test-collector-dse-k8s.conf -n cluster2-dc1-default-sts-0 if ! ls /tmp/datastax/ | grep -q ".tar.gz" ; then echo "Failed to generate artefacts in the K8s cluster "; ls -l /tmp/datastax/ ; exit 1 ; fi @@ -42,8 +51,6 @@ setup: kubectl -n cass-operator apply -f k8s-manifests/example-cassdc-minimal-dse.yaml while (! kubectl -n cass-operator get pod | grep -q "cluster2-dc1-default-sts-0") || kubectl -n cass-operator get pod | grep -q "0/2" || kubectl -n cass-operator get pod | grep -q "1/2" ; do kubectl -n cass-operator get pod ; echo "waiting 60s…" ; sleep 60 ; done - @echo "git_branch=$$(git rev-parse --abbrev-ref HEAD)" >> test-collector-k8s-dse.conf - @echo "git_sha=$$(git rev-parse HEAD)" >> test-collector-k8s-dse.conf teardown: kubectl delete cassdcs --all-namespaces --all diff --git a/ds-collector-tests/cluster-one-node-vanilla-ssh-docker.make b/ds-collector-tests/cluster-one-node-vanilla-ssh-docker.make index 543dadc..4f47cea 100755 --- a/ds-collector-tests/cluster-one-node-vanilla-ssh-docker.make +++ b/ds-collector-tests/cluster-one-node-vanilla-ssh-docker.make @@ -1,24 +1,41 @@ +# the test target will execute once for every test-collector-k8s*.conf.in configuration file found +CONFIGURATIONS_SSH := $(shell ls test-collector-ssh*.conf) +CONFIGURATIONS_DOCKER := $(shell ls test-collector-docker*.conf) +TESTS_SSH := $(addprefix test_ssh_,${CONFIGURATIONS_SSH}) +TESTS_DOCKER := $(addprefix test_docker_,${CONFIGURATIONS_DOCKER}) -all: setup test teardown +all: setup ${TESTS_SSH} ${TESTS_DOCKER} teardown - -test: +${TESTS_SSH}: test_ssh_%: # ds-collector over SSH - docker exec -t ds-collector-tests_bastion_1 /collector/ds-collector -v -T -f /ds-collector-tests/test-collector-ssh.conf -n ds-collector-tests_cassandra-00_1 - docker exec -t ds-collector-tests_bastion_1 /collector/ds-collector -T -p -f /ds-collector-tests/test-collector-ssh.conf -n ds-collector-tests_cassandra-00_1 - docker exec -t ds-collector-tests_bastion_1 /collector/ds-collector -X -f /ds-collector-tests/test-collector-ssh.conf -n ds-collector-tests_cassandra-00_1 + @echo "\n Testing SSH $* \n" + docker exec -t ds-collector-tests_bastion_1 sh -c 'echo "" >> /ds-collector-tests/$*' + docker exec -t ds-collector-tests_bastion_1 sh -c 'echo "git_branch=$$(git rev-parse --abbrev-ref HEAD)" >> /ds-collector-tests/$*' + docker exec -t ds-collector-tests_bastion_1 sh -c 'echo "git_sha=$$(git rev-parse HEAD)" >> /ds-collector-tests/$*' + docker exec -t ds-collector-tests_bastion_1 sh -c 'echo "" >> /ds-collector-tests/$*' + docker exec -t ds-collector-tests_bastion_1 /collector/ds-collector -T -f /ds-collector-tests/$* -n ds-collector-tests_cassandra-00_1 + docker exec -t ds-collector-tests_bastion_1 /collector/ds-collector -T -p -f /ds-collector-tests/$* -n ds-collector-tests_cassandra-00_1 + docker exec -t ds-collector-tests_bastion_1 /collector/ds-collector -X -f /ds-collector-tests/$* -n ds-collector-tests_cassandra-00_1 # test archives exist if ! ( docker exec ds-collector-tests_bastion_1 ls /tmp/datastax/ ) | grep -q ".tar.gz" ; then echo "Failed to generate artefacts in the SSH cluster" ; ( docker exec ds-collector-tests_bastion_1 ls /tmp/datastax/ ) ; exit 1 ; fi # ds-collector over SSH with verbose mode - docker exec -t ds-collector-tests_bastion_1 /collector/ds-collector -v -T -f /ds-collector-tests/test-collector-ssh.conf -n ds-collector-tests_cassandra-00_1 - docker exec -t ds-collector-tests_bastion_1 /collector/ds-collector -v -T -p -f /ds-collector-tests/test-collector-ssh.conf -n ds-collector-tests_cassandra-00_1 - docker exec -t ds-collector-tests_bastion_1 /collector/ds-collector -v -X -f /ds-collector-tests/test-collector-ssh.conf -n ds-collector-tests_cassandra-00_1 + @echo "\n Testing SSH verbose $* \n" + docker exec -t ds-collector-tests_bastion_1 /collector/ds-collector -v -T -f /ds-collector-tests/$* -n ds-collector-tests_cassandra-00_1 + docker exec -t ds-collector-tests_bastion_1 /collector/ds-collector -v -T -p -f /ds-collector-tests/$* -n ds-collector-tests_cassandra-00_1 + docker exec -t ds-collector-tests_bastion_1 /collector/ds-collector -v -X -f /ds-collector-tests/$* -n ds-collector-tests_cassandra-00_1 # test archives exist if ! ( docker exec ds-collector-tests_bastion_1 ls /tmp/datastax/ ) | grep -q ".tar.gz" ; then echo "Failed to generate artefacts in the SSH cluster" ; ( docker exec ds-collector-tests_bastion_1 ls /tmp/datastax/ ) ; exit 1 ; fi + +${TESTS_DOCKER}: test_docker_%: # ds-collector over docker - ./collector/ds-collector -T -f test-collector-docker.conf -n ds-collector-tests_cassandra-00_1 - ./collector/ds-collector -T -p -f test-collector-docker.conf -n ds-collector-tests_cassandra-00_1 - ./collector/ds-collector -X -f test-collector-docker.conf -n ds-collector-tests_cassandra-00_1 + @echo "\n Testing Docker $* \n" + @echo "" >> $* + @echo "git_branch=$$(git rev-parse --abbrev-ref HEAD)" >> $* + @echo "git_sha=$$(git rev-parse HEAD)" >> $* + echo "" >> $* + ./collector/ds-collector -T -f $* -n ds-collector-tests_cassandra-00_1 + ./collector/ds-collector -T -p -f $* -n ds-collector-tests_cassandra-00_1 + ./collector/ds-collector -X -f $* -n ds-collector-tests_cassandra-00_1 # test archives exist if ! ls /tmp/datastax/ | grep -q ".tar.gz" ; then echo "Failed to generate artefacts in the docker cluster " ; ls -l /tmp/datastax/ ; exit 1 ; fi diff --git a/ds-collector-tests/cluster-vanilla-k8s.make b/ds-collector-tests/cluster-vanilla-k8s.make index 162fe66..44d0550 100755 --- a/ds-collector-tests/cluster-vanilla-k8s.make +++ b/ds-collector-tests/cluster-vanilla-k8s.make @@ -1,10 +1,16 @@ +# the test target will execute once for every test-collector-k8s*.conf.in configuration file found +CONFIGURATIONS := $(shell ls test-collector-k8s*.conf.in) +TESTS := $(addprefix test_,${CONFIGURATIONS}) -all: setup test teardown +all: setup ${TESTS} teardown - -test: +${TESTS}: test_%: # ds-collector over k8s - cp test-collector-k8s.conf.in /tmp/datastax/test-collector-k8s.conf + @echo "\n Testing $* \n" + cp $* /tmp/datastax/test-collector-k8s.conf + @echo "" >> /tmp/datastax/test-collector-k8s.conf + @echo "git_branch=$$(git rev-parse --abbrev-ref HEAD)" >> /tmp/datastax/test-collector-k8s.conf + @echo "git_sha=$$(git rev-parse HEAD)" >> /tmp/datastax/test-collector-k8s.conf echo "" >> /tmp/datastax/test-collector-k8s.conf echo "cqlshUsername=$$(kubectl -n cass-operator get secret cluster1-superuser -o yaml | grep " username" | awk -F" " '{print $$2}' | base64 -d && echo "")" >> /tmp/datastax/test-collector-k8s.conf echo "cqlshPassword=$$(kubectl -n cass-operator get secret cluster1-superuser -o yaml | grep " password" | awk -F" " '{print $$2}' | base64 -d && echo "")" >> /tmp/datastax/test-collector-k8s.conf diff --git a/ds-collector-tests/cluster-vanilla-ssh-docker.make b/ds-collector-tests/cluster-vanilla-ssh-docker.make index f675b24..ac8bad3 100755 --- a/ds-collector-tests/cluster-vanilla-ssh-docker.make +++ b/ds-collector-tests/cluster-vanilla-ssh-docker.make @@ -1,24 +1,41 @@ +# the test target will execute once for every test-collector-k8s*.conf.in configuration file found +CONFIGURATIONS_SSH := $(shell ls test-collector-ssh*.conf) +CONFIGURATIONS_DOCKER := $(shell ls test-collector-docker*.conf) +TESTS_SSH := $(addprefix test_ssh_,${CONFIGURATIONS_SSH}) +TESTS_DOCKER := $(addprefix test_docker_,${CONFIGURATIONS_DOCKER}) -all: setup test teardown +all: setup ${TESTS_SSH} ${TESTS_DOCKER} teardown - -test: +${TESTS_SSH}: test_ssh_%: # ds-collector over SSH - docker exec -t ds-collector-tests_bastion_1 /collector/ds-collector -T -f /ds-collector-tests/test-collector-ssh.conf -n ds-collector-tests_cassandra-00_1 - docker exec -t ds-collector-tests_bastion_1 /collector/ds-collector -T -p -f /ds-collector-tests/test-collector-ssh.conf -n ds-collector-tests_cassandra-00_1 - docker exec -t ds-collector-tests_bastion_1 /collector/ds-collector -X -f /ds-collector-tests/test-collector-ssh.conf -n ds-collector-tests_cassandra-00_1 + @echo "\n Testing SSH $* \n" + docker exec -t ds-collector-tests_bastion_1 sh -c 'echo "" >> /ds-collector-tests/$*' + docker exec -t ds-collector-tests_bastion_1 sh -c 'echo "git_branch=$$(git rev-parse --abbrev-ref HEAD)" >> /ds-collector-tests/$*' + docker exec -t ds-collector-tests_bastion_1 sh -c 'echo "git_sha=$$(git rev-parse HEAD)" >> /ds-collector-tests/$*' + docker exec -t ds-collector-tests_bastion_1 sh -c 'echo "" >> /ds-collector-tests/$*' + docker exec -t ds-collector-tests_bastion_1 /collector/ds-collector -T -f /ds-collector-tests/$* -n ds-collector-tests_cassandra-00_1 + docker exec -t ds-collector-tests_bastion_1 /collector/ds-collector -T -p -f /ds-collector-tests/$* -n ds-collector-tests_cassandra-00_1 + docker exec -t ds-collector-tests_bastion_1 /collector/ds-collector -X -f /ds-collector-tests/$* -n ds-collector-tests_cassandra-00_1 # test archives exist if ! ( docker exec ds-collector-tests_bastion_1 ls /tmp/datastax/ ) | grep -q ".tar.gz" ; then echo "Failed to generate artefacts in the SSH cluster" ; ( docker exec ds-collector-tests_bastion_1 ls /tmp/datastax/ ) ; exit 1 ; fi # ds-collector over SSH with verbose mode - docker exec -t ds-collector-tests_bastion_1 /collector/ds-collector -v -T -f /ds-collector-tests/test-collector-ssh.conf -n ds-collector-tests_cassandra-00_1 - docker exec -t ds-collector-tests_bastion_1 /collector/ds-collector -v -T -p -f /ds-collector-tests/test-collector-ssh.conf -n ds-collector-tests_cassandra-00_1 - docker exec -t ds-collector-tests_bastion_1 /collector/ds-collector -v -X -f /ds-collector-tests/test-collector-ssh.conf -n ds-collector-tests_cassandra-00_1 + @echo "\n Testing SSH verbose $* \n" + docker exec -t ds-collector-tests_bastion_1 /collector/ds-collector -v -T -f /ds-collector-tests/$* -n ds-collector-tests_cassandra-00_1 + docker exec -t ds-collector-tests_bastion_1 /collector/ds-collector -v -T -p -f /ds-collector-tests/$* -n ds-collector-tests_cassandra-00_1 + docker exec -t ds-collector-tests_bastion_1 /collector/ds-collector -v -X -f /ds-collector-tests/$* -n ds-collector-tests_cassandra-00_1 # test archives exist if ! ( docker exec ds-collector-tests_bastion_1 ls /tmp/datastax/ ) | grep -q ".tar.gz" ; then echo "Failed to generate artefacts in the SSH cluster" ; ( docker exec ds-collector-tests_bastion_1 ls /tmp/datastax/ ) ; exit 1 ; fi + +${TESTS_DOCKER}: test_docker_%: # ds-collector over docker - ./collector/ds-collector -T -f test-collector-docker.conf -n ds-collector-tests_cassandra-00_1 - ./collector/ds-collector -T -p -f test-collector-docker.conf -n ds-collector-tests_cassandra-00_1 - ./collector/ds-collector -X -f test-collector-docker.conf -n ds-collector-tests_cassandra-00_1 + @echo "\n Testing Docker $* \n" + @echo "" >> $* + @echo "git_branch=$$(git rev-parse --abbrev-ref HEAD)" >> $* + @echo "git_sha=$$(git rev-parse HEAD)" >> $* + echo "" >> $* + ./collector/ds-collector -T -f $* -n ds-collector-tests_cassandra-00_1 + ./collector/ds-collector -T -p -f $* -n ds-collector-tests_cassandra-00_1 + ./collector/ds-collector -X -f $* -n ds-collector-tests_cassandra-00_1 # test archives exist if ! ls /tmp/datastax/ | grep -q ".tar.gz" ; then echo "Failed to generate artefacts in the docker cluster " ; ls -l /tmp/datastax/ ; exit 1 ; fi diff --git a/ds-collector-tests/test-collector-docker-parallel.conf b/ds-collector-tests/test-collector-docker-parallel.conf new file mode 100644 index 0000000..5ebb5a0 --- /dev/null +++ b/ds-collector-tests/test-collector-docker-parallel.conf @@ -0,0 +1,133 @@ +# +# tlp_collector configuration file +# +# + +# base cassandra log and configuration directories +# update if you aren't using the default directory +# +#logHome="/var/log/cassandra" +#configHome="/etc/cassandra" +basedir="/tmp/datastax-ds-collector-docker-test/" + +# When the Cassandra nodes are inside docker containers +# Use docker commands to connect to nodes instead of ssh +# hostFile and hostName settings then need to use docker container IDs +# +# It is typical to also `skipSudo=true` (see below) on docker containers. +# +use_docker="true" + +# When the Cassandra nodes are inside kubernetes pods +# Use kubectl commands to connect to nodes instead of ssh +# hostFile and hostName settings then need to use k8s pod names +# +# It is typical to also `skipSudo=true` (see below) +# +#use_k8s="true" +#k8s_namespace="default" + +# base ssh options, do not override defaults +# without completing a connection test +# +#sshOptionHostkeyCheck="false" +#sshOptionAgentForwarding="true" +#sshOptionVerbose="false" +#sshOptionConnectTimeout="true" +#sshOptionConnectAttempts="true" + +# if an identity file to authenticate to the target nodes +# is needed, specify it in sshIdentity +# +#sshIdentity="" + +# if using password authentication for ssh and scp, specify +# it in sshPassword (requires sshpass command) +# +#sshPassword="root" + +# add additional binary path if needed +# +addPath="/opt/java/openjdk/bin" + +# specify additional ssh options as needed for your environment +# complete a connection test before adding extra options +# +#sshArgs="" +#scpArgs="" + +# if you want to specify a file containing a list of hosts +# use the qualified path to the list in hostFile +# +#hostFile="" + +# if you only have a single target node you want to collect from +# you can enter the connection details here +# setting a hostFile above will take precedence +# +#hostName="" + +# if you want nodes to be collected in parallel, specify +# all – for collecting all nodes at the same time, fastest and dangerous +# rack – not yet supported +# none – every node sequentially, the default behaviour +# +parallel_mode="all" + +# the user name to use when connecting to the target nodes(), +# the user should have sudo access. +# commenting userName will set the default to root +# +#userName="ubuntu" + +# the provided issue id to reference the artifacts +# a generic issueId will be generated if not provided here +# +issueId="TEST_DS_COLLECTOR-001" + +# the JMX port to connect to when when gathering metrics via JMX +# this will be the value that JMX_PORT is set to in the cassandra-env.sh file +# +#jmxPort="7199" +#jmxUsername="" +#jmxPassword="" + +# skip iostat and vmstat collection +# +#skipStat="true" + +# skip any calls that require sudo +# +#skipSudo="true" + +# skip pushing artifacts to s3 +# +skipS3="true" + +# uploads are encrypted by default +# +encrypt_uploads="false" + +# skip deleting artifact from central host after successful push to s3 +# this will keep a local copy of the artifact on the central host +# +keepArtifact="true" + +# s3 key +# default key provided, only use to override +# +#keyId="" +#keySecret="" + +# s3 auth +# default is to use s3Auth to upload +# + +# cqlsh authentication +# Used to extract the schema +#cqlshUsername="" +#cqlshPassword="" + +# cqlsh SSL encryption +# +#$cqlshSSL="false" diff --git a/ds-collector-tests/test-collector-dse-k8s-parallel.conf.in b/ds-collector-tests/test-collector-dse-k8s-parallel.conf.in new file mode 100644 index 0000000..0d96bdd --- /dev/null +++ b/ds-collector-tests/test-collector-dse-k8s-parallel.conf.in @@ -0,0 +1,147 @@ +# +# tlp_collector configuration file +# +# + +# base cassandra log and configuration directories +# update if you aren't using the default directory +# +#logHome="/var/log/cassandra" +#configHome="/etc/cassandra" +basedir="/tmp/datastax-ds-collector-k8s-test/" + +# When the Cassandra nodes are inside docker containers +# Use docker commands to connect to nodes instead of ssh +# hostFile and hostName settings then need to use docker container IDs +# +# It is typical to also `skipSudo=true` (see below) on docker containers. +# +#use_docker="true" + +# When the Cassandra nodes are inside kubernetes pods +# Use kubectl commands to connect to nodes instead of ssh +# hostFile and hostName settings then need to use k8s pod names +# +# It is typical to also `skipSudo=true` (see below) +# +use_k8s="true" +k8s_namespace="cass-operator" + +# base ssh options, do not override defaults +# without completing a connection test +# +#sshOptionHostkeyCheck="false" +#sshOptionAgentForwarding="true" +#sshOptionVerbose="false" +#sshOptionConnectTimeout="true" +#sshOptionConnectAttempts="true" + +# if an identity file to authenticate to the target nodes +# is needed, specify it in sshIdentity +# +#sshIdentity="" + +# if using password authentication for ssh and scp, specify +# it in sshPassword (requires sshpass command) +# +#sshPassword="root" + +# add additional binary path if needed +# +addPath="/opt/java/openjdk/bin" + +# specify additional ssh options as needed for your environment +# complete a connection test before adding extra options +# +#sshArgs="" +#scpArgs="" + +# if you want to specify a file containing a list of hosts +# use the qualified path to the list in hostFile +# +#hostFile="" + +# if you only have a single target node you want to collect from +# you can enter the connection details here +# setting a hostFile above will take precedence +# +#hostName="" + +# if you want nodes to be collected in parallel, specify +# all – for collecting all nodes at the same time, fastest and dangerous +# rack – not yet supported +# none – every node sequentially, the default behaviour +# +parallel_mode="all" + +# the user name to use when connecting to the target nodes(), +# the user should have sudo access. +# commenting userName will set the default to root +# +#userName="ubuntu" + +# the provided issue id to reference the artifacts +# a generic issueId will be generated if not provided here +# +issueId="TEST_DS_COLLECTOR-001" + +# the JMX port to connect to when when gathering metrics via JMX +# this will be the value that JMX_PORT is set to in the cassandra-env.sh file +# +#jmxPort="7199" +#jmxUsername="" +#jmxPassword="" + +# skip iostat and vmstat collection +# +#skipStat="true" + +# skip any calls that require sudo +# +skipSudo="true" + +# skip pushing artifacts to s3 +# +skipS3="true" + +# uploads are encrypted by default +# +encrypt_uploads="false" + +# skip deleting artifact from central host after successful push to s3 +# this will keep a local copy of the artifact on the central host +# +keepArtifact="true" + +# s3 key +# default key provided, only use to override +# +#keyId="" +#keySecret="" + +# s3 auth +# default is to use s3Auth to upload +# + +# cqlsh authentication +# Used to extract the schema +cqlsh_host="localhost" +#cqlshUsername="" +#cqlshPassword="" + +# cqlsh SSL encryption +# +#$cqlshSSL="false" + +# +# DSE Options +# +# When enabling dse mode, make sure configHome and logHome are correct for the dse installation +# +is_dse="true" + +# Set to DSE tarball install root directory +dse_root_dir="/opt/dse" + +# dsetool_options - options to pass to dsetool +#dt_opts="" diff --git a/ds-collector-tests/test-collector-k8s-dse.conf.in b/ds-collector-tests/test-collector-dse-k8s.conf.in similarity index 100% rename from ds-collector-tests/test-collector-k8s-dse.conf.in rename to ds-collector-tests/test-collector-dse-k8s.conf.in diff --git a/ds-collector-tests/test-collector-k8s-parallel.conf.in b/ds-collector-tests/test-collector-k8s-parallel.conf.in new file mode 100644 index 0000000..1963663 --- /dev/null +++ b/ds-collector-tests/test-collector-k8s-parallel.conf.in @@ -0,0 +1,133 @@ +# +# tlp_collector configuration file +# +# + +# base cassandra log and configuration directories +# update if you aren't using the default directory +# +#logHome="/var/log/cassandra" +#configHome="/etc/cassandra" +basedir="/tmp/datastax-ds-collector-k8s-test/" + +# When the Cassandra nodes are inside docker containers +# Use docker commands to connect to nodes instead of ssh +# hostFile and hostName settings then need to use docker container IDs +# +# It is typical to also `skipSudo=true` (see below) on docker containers. +# +#use_docker="true" + +# When the Cassandra nodes are inside kubernetes pods +# Use kubectl commands to connect to nodes instead of ssh +# hostFile and hostName settings then need to use k8s pod names +# +# It is typical to also `skipSudo=true` (see below) +# +use_k8s="true" +k8s_namespace="cass-operator" + +# base ssh options, do not override defaults +# without completing a connection test +# +#sshOptionHostkeyCheck="false" +#sshOptionAgentForwarding="true" +#sshOptionVerbose="false" +#sshOptionConnectTimeout="true" +#sshOptionConnectAttempts="true" + +# if an identity file to authenticate to the target nodes +# is needed, specify it in sshIdentity +# +#sshIdentity="" + +# if using password authentication for ssh and scp, specify +# it in sshPassword (requires sshpass command) +# +#sshPassword="root" + +# add additional binary path if needed +# +addPath="/opt/java/openjdk/bin" + +# specify additional ssh options as needed for your environment +# complete a connection test before adding extra options +# +#sshArgs="" +#scpArgs="" + +# if you want to specify a file containing a list of hosts +# use the qualified path to the list in hostFile +# +#hostFile="" + +# if you only have a single target node you want to collect from +# you can enter the connection details here +# setting a hostFile above will take precedence +# +#hostName="" + +# if you want nodes to be collected in parallel, specify +# all – for collecting all nodes at the same time, fastest and dangerous +# rack – not yet supported +# none – every node sequentially, the default behaviour +# +parallel_mode="all" + +# the user name to use when connecting to the target nodes(), +# the user should have sudo access. +# commenting userName will set the default to root +# +#userName="ubuntu" + +# the provided issue id to reference the artifacts +# a generic issueId will be generated if not provided here +# +issueId="TEST_DS_COLLECTOR-001" + +# the JMX port to connect to when when gathering metrics via JMX +# this will be the value that JMX_PORT is set to in the cassandra-env.sh file +# +#jmxPort="7199" +#jmxUsername="" +#jmxPassword="" + +# skip iostat and vmstat collection +# +#skipStat="true" + +# skip any calls that require sudo +# +skipSudo="true" + +# skip pushing artifacts to s3 +# +skipS3="true" + +# uploads are encrypted by default +# +encrypt_uploads="false" + +# skip deleting artifact from central host after successful push to s3 +# this will keep a local copy of the artifact on the central host +# +keepArtifact="true" + +# s3 key +# default key provided, only use to override +# +#keyId="" +#keySecret="" + +# s3 auth +# default is to use s3Auth to upload +# + +# cqlsh authentication +# Used to extract the schema +#cqlshUsername="" +#cqlshPassword="" + +# cqlsh SSL encryption +# +#$cqlshSSL="false" diff --git a/ds-collector-tests/test-collector-ssh-parallel.conf b/ds-collector-tests/test-collector-ssh-parallel.conf new file mode 100644 index 0000000..c8ed7dc --- /dev/null +++ b/ds-collector-tests/test-collector-ssh-parallel.conf @@ -0,0 +1,133 @@ +# +# tlp_collector configuration file +# +# + +# base cassandra log and configuration directories +# update if you aren't using the default directory +# +#logHome="/var/log/cassandra" +#configHome="/etc/cassandra" + +# When the Cassandra nodes are inside docker containers +# Use docker commands to connect to nodes instead of ssh +# hostFile and hostName settings then need to use docker container IDs +# +# It is typical to also `skipSudo=true` (see below) on docker containers. +# +#use_docker="true" + +# When the Cassandra nodes are inside kubernetes pods +# Use kubectl commands to connect to nodes instead of ssh +# hostFile and hostName settings then need to use k8s pod names +# +# It is typical to also `skipSudo=true` (see below) +# +#use_k8s="true" +#k8s_namespace="default" + +# base ssh options, do not override defaults +# without completing a connection test +# +#sshOptionHostkeyCheck="false" +#sshOptionAgentForwarding="true" +#sshOptionVerbose="false" +#sshOptionConnectTimeout="true" +#sshOptionConnectAttempts="true" + +# if an identity file to authenticate to the target nodes +# is needed, specify it in sshIdentity +# +#sshIdentity="" + +# if using password authentication for ssh and scp, specify +# it in sshPassword (requires sshpass command) +# +sshPassword="root" + +# add additional binary path if needed +# +addPath="/opt/java/openjdk/bin" + +# specify additional ssh options as needed for your environment +# complete a connection test before adding extra options +# +sshArgs="-tt" +#scpArgs="-tt" + +# if you want to specify a file containing a list of hosts +# use the qualified path to the list in hostFile +# +#hostFile="" + +# if you only have a single target node you want to collect from +# you can enter the connection details here +# setting a hostFile above will take precedence +# +#hostName="" + +# if you want nodes to be collected in parallel, specify +# all – for collecting all nodes at the same time, fastest and dangerous +# rack – not yet supported +# none – every node sequentially, the default behaviour +# +parallel_mode="all" + + +# the user name to use when connecting to the target nodes(), +# the user should have sudo access. +# commenting userName will set the default to root +# +#userName="ubuntu" + +# the provided issue id to reference the artifacts +# a generic issueId will be generated if not provided here +# +issueId="TEST_DS_COLLECTOR-000" + +# the JMX port to connect to when when gathering metrics via JMX +# this will be the value that JMX_PORT is set to in the cassandra-env.sh file +# +#jmxPort="7199" +#jmxUsername="" +#jmxPassword="" + +# skip iostat and vmstat collection +# +#skipStat="true" + +# skip any calls that require sudo +# +#skipSudo="true" + +# skip pushing artifacts to s3 +# +skipS3="true" + +# uploads are encrypted by default +# +encrypt_uploads="false" + +# skip deleting artifact from central host after successful push to s3 +# this will keep a local copy of the artifact on the central host +# +keepArtifact="true" + +# s3 key +# default key provided, only use to override +# +#keyId="" +#keySecret="" + +# s3 auth +# default is to use s3Auth to upload +# + +# cqlsh authentication +# Used to extract the schema +#cqlshUsername="" +#cqlshPassword="" + +# cqlsh SSL encryption +# +#$cqlshSSL="false"