Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add simplified package checker #12

Open
wants to merge 19 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
19 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions cvp_checks/fixtures/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,10 @@
def local_salt_client():
return utils.init_salt_client()


nodes = utils.calculate_groups()


@pytest.fixture(scope='session', params=nodes.values(), ids=nodes.keys())
def nodes_in_group(request):
return request.param
55 changes: 51 additions & 4 deletions cvp_checks/global_config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,12 @@
# Can be found on cfg* node using
# "salt-call pillar.get _param:salt_master_host"
# and "salt-call pillar.get _param:salt_master_port"
# or "salt-call pillar.get _param:jenkins_salt_api_url"
# SALT_USERNAME by default: salt
# It can be verified with "salt-call shadow.info salt"
# SALT_PASSWORD you can find on cfg* node using
# "grep -r salt_api_password /srv/salt/reclass/classes"
# "salt-call pillar.get _param:salt_api_password"
# or "grep -r salt_api_password /srv/salt/reclass/classes"
SALT_URL: <salt_url>
SALT_USERNAME: <salt_usr>
SALT_PASSWORD: <salt_pwd>
Expand All @@ -32,17 +34,62 @@ skipped_groups: [""]
# Groups can be defined using pillars.
# Uncomment this section to enable this.
# Otherwise groups will be discovered automaticaly
#groups: {cmp: 'nova:compute'}
# Tips:
# 1) you don't need to separate kvm and kvm_glusterfs nodes
# 2) Use I@pillar or mask like ctl* for targetting nodes

groups: {
cmp: 'I@nova:compute',
ctl: 'I@keystone:server',
msg: 'I@rabbitmq:server',
dbs: 'I@galera:*',
prx: 'I@nginx:server',
mon: 'I@prometheus:server and not I@influxdb:server',
log: 'I@kibana:server',
mtr: 'I@influxdb:server',
kvm: 'I@salt:control',
cid: 'I@docker:host and not I@prometheus:server and not I@kubernetes:*',
ntw: 'I@opencontrail:database',
ceph_mon: 'I@ceph:mon',
ceph_osd: 'I@ceph:osd',
k8-ctl: 'I@etcd:server',
k8-cmp: 'I@kubernetes:* and not I@etcd:*',
cfg: 'I@salt:master',
gtw: 'I@neutron:gateway'
}

simple_packages: {'mitaka': '{"galera": "25.3.10-1~u14.04+mcp1", "telegraf": "1.2", "zookeeper": "3.5", "kibana": "4.6.4", "kafka": "0.10.2.0", "ceph": "10.2.9", "glusterfs": "3.7", "mysql": "5.6.23", "ironic": "7.0", "glance": "12.0.0", "qemu": "2.5", "cassandra": "3.0.13", "git": "1:2.7.4-0ubuntu1", "redis": "3.2", "nova": "13.1.4", "grafana": "4.3.2", "keystone": "9.3.0", "etcd": "3.2.2", "neutron": "8.4.0", "designate": "4.0", "mongodb": "3.4", "influxdb": "1.3.3-1", "libvirt": "1.3.1", "heka": "0.10.1", "heat": "6.1.2", "haproxy": "1.6.3-1~u14.04+mcp1", "ceilometer": "6.1.5", "postgresql": "9.6.2", "nginx": "1.10.0-0ubuntu0.16.0", "opencontrail": "3.1.1", "prometheus": "1.6.3", "rabbitmq": "3.6.6-1~u14.04+mcp1", "elasticsearch": "2.4.5", "horizon": "9.1.2", "cinder": "8.1.1", "virtlet": "0.9.3", "reclass": "1.4.1", "salt": "2016.3"}',
# https://docs.mirantis.com/mcp/q3-17/mcp-release-notes/components-versions.html
'ocata':'{"galera": "25.3.14-1", "telegraf": "1.5", "zookeeper": "3.4.5", "kibana": "5.6.5", "kafka": "2.9.2-0.8.2.0-0contrail0", "ceph": "12.2.2-1enial", "glusterfs": "3.8.15", "mysql": "5.6.35", "ironic": "7.0", "glance": "14.0", "qemu": "2.5", "cassandra": "2.1.20", "git": "2.7.4", "redis": "2.8.4-2", "nova": "15.1.", "grafana": "4.5.2", "keystone": "11.0", "etcd": "2.2.5", "neutron": "10.0", "designate": "4.0", "mongodb": "3.4", "influxdb": "1.4.2", "libvirt": "1.3.1", "heka": "0.10.1", "heat": "8.0", "haproxy": "1.6.3", "ceilometer": "8.1", "postgresql": "9.6", "nginx": "1.10.3", "opencontrail": "3.2", "prometheus": "2.0.0", "rabbitmq": "3.6.5", "elasticsearch": "5.6.5", "horizon": "11.0", "cinder": "10.0", "virtlet": "0.9.3", "reclass": "1.4.1", "salt": "2016.3.8"}',
# https://docs.mirantis.com/mcp/q4-17/mcp-release-notes/components-versions.html
'pike': '{"galera": "25.3.14-1", "fluentd": "1.0.2", "telegraf": "1.5.3", "aodh": "5", "zookeeper": "3.4.5", "kibana": "5.6.5", "kafka": "2.9.2-0.8.2.0-0contrail0", "ceph": "12.2.4-1enial", "glusterfs": "3.8.15", "mysql": "5.6.35", "ironic": "9.1", "glance": "15.0", "grafana": "5.0.3", "qemu": "1:2.11+dfsg-1.1~u16.04+mcp1", "barbican": "5.0", "gnocchi": "4.0.4", "cassandra": "2.1.20", "git": "2.7.4", "redis": "2.8.4-2", "nova": "16.1", "keystone": "12.0", "etcd": "3.3", "neutron": "11.0", "designate": "5.0.1", "haproxy": "1.6.3", "manila": "5.0.1", "mongodb": "3.4", "influxdb": "1.4.2", "libvirt": "4.0.0-1.7~u16.04+mcp1", "heka": "0.10.1", "heat": "9.0", "tls": "1.2", "ceilometer": "9.0.5", "postgresql": "9.6", "nginx": "1.10.3", "opencontrail": "3.2", "prometheus": "2.2.1", "rabbitmq": "rabbitmq-server", "jenkins-master": "2.100", "elasticsearch": "5.6.5", "panko": "3.1", "horizon": "12.0", "cinder": "11.1", "virtlet": "1.0.0", "reclass": "1.4.1", "salt": "2016.3.8"}'
# https://docs.mirantis.com/mcp/q1-18/mcp-release-notes/components-versions.html
}

exclude_packages: [ 'python-neutron-lib', 'python-glance-store',
'python-neutronclient', 'python-glanceclient',
'python-pymysql', 'python-designateclient',
'python-cinderclient', 'python-mysqldb',
'python-heatclient', 'glance-store-common',
'python-novaclient', 'python-ironic-lib',
'python-ironicclient', 'python-elasticsearch-curator',
'python-elasticsearch', 'prometheus-relay',
'influxdb-relay', 'postgresql-client-common',
'python-horizon-neutron-lbaasv2-panel', 'ipxe-qemu',
'libvirt-exporter', 'libdbd-mysql', 'elasticsearch-curator',
'postgresql-common', 'python-gitdb', 'python-github',
'python-aodhclient', 'python-psycopg2',
'libdbd-mysql-perl', 'libdbd-mysql:amd64', 'python-influxdb']

# mtu test setting
# this test may skip groups (see example)
test_mtu:
{ #"skipped_groups": ["dbs"]
"skipped_ifaces": ["bonding_masters", "lo", "veth", "tap", "cali"]}
"skipped_ifaces": ["bonding_masters", "lo", "veth", "tap", "cali", "qv", "qb"]}
# mask for interfaces to skip

# ntp test setting
# this test may skip specific node (use fqdn)
test_ntp_sync:
{ #"skipped_nodes": [""],
"time_deviation": 5}
"time_deviation": 1}
94 changes: 94 additions & 0 deletions cvp_checks/tests/ceph/test_ceph_pg_count.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
import pytest
import math

def __next_power_of2(total_pg):
count = 0
if (total_pg and not(total_pg & (total_pg - 1))):
return total_pg
while( total_pg != 0):
total_pg >>= 1
count += 1

return 1 << count


def test_ceph_pg_count(local_salt_client):
"""
Test aimed to calculate placement groups for Ceph cluster
according formula below.
Formula to calculate PG num:
Total PGs =
(Total_number_of_OSD * 100) / max_replication_count / pool count
pg_num and pgp_num should be the same and
set according formula to higher value of powered 2
"""

ceph_monitors = local_salt_client.cmd(
'ceph:mon',
'test.ping',
expr_form='pillar')

if not ceph_monitors:
pytest.skip("Ceph is not found on this environment")

monitor = ceph_monitors.keys()[0]
pools = local_salt_client.cmd(
monitor, 'cmd.run',
["rados lspools"],
expr_form='glob').get(
ceph_monitors.keys()[0]).split('\n')

total_osds = int(local_salt_client.cmd(
monitor,
'cmd.run',
['ceph osd tree | grep osd | grep "up\|down" | wc -l'],
expr_form='glob').get(ceph_monitors.keys()[0]))

raw_pool_replications = local_salt_client.cmd(
monitor,
'cmd.run',
["ceph osd dump | grep size | awk '{print $3, $6}'"],
expr_form='glob').get(ceph_monitors.keys()[0]).split('\n')

pool_replications = {}
for replication in raw_pool_replications:
pool_replications[replication.split()[0]] = int(replication.split()[1])

max_replication_value = 0
for repl_value in pool_replications.values():
if repl_value > max_replication_value:
max_replication_value = repl_value

total_pg = (total_osds * 100) / max_replication_value / len(pools)
correct_pg_num = __next_power_of2(total_pg)

pools_pg_num = {}
pools_pgp_num = {}
for pool in pools:
pg_num = int(local_salt_client.cmd(
monitor,
'cmd.run',
["ceph osd pool get {} pg_num".format(pool)],
expr_form='glob').get(ceph_monitors.keys()[0]).split()[1])
pools_pg_num[pool] = pg_num
pgp_num = int(local_salt_client.cmd(
monitor,
'cmd.run',
["ceph osd pool get {} pgp_num".format(pool)],
expr_form='glob').get(ceph_monitors.keys()[0]).split()[1])
pools_pgp_num[pool] = pgp_num

wrong_pg_num_pools = []
pg_pgp_not_equal_pools = []
for pool in pools:
if pools_pg_num[pool] != pools_pgp_num[pool]:
pg_pgp_not_equal_pools.append(pool)
if pools_pg_num[pool] < correct_pg_num:
wrong_pg_num_pools.append(pool)

assert not pg_pgp_not_equal_pools, \
"For pools {} PG and PGP are not equal " \
"but should be".format(pg_pgp_not_equal_pools)
assert not wrong_pg_num_pools, "For pools {} " \
"PG number lower than Correct PG number, " \
"but should be equal or higher".format(wrong_pg_num_pools)
49 changes: 49 additions & 0 deletions cvp_checks/tests/ceph/test_ceph_replicas.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
import pytest


def test_ceph_replicas(local_salt_client):
"""
Test aimed to check number of replicas
for most of deployments if there is no
special requirement for that.
"""

ceph_monitors = local_salt_client.cmd(
'ceph:mon',
'test.ping',
expr_form='pillar')

if not ceph_monitors:
pytest.skip("Ceph is not found on this environment")

monitor = ceph_monitors.keys()[0]

raw_pool_replicas = local_salt_client.cmd(
monitor,
'cmd.run',
["ceph osd dump | grep size | " \
"awk '{print $3, $5, $6, $7, $8}'"],
expr_form='glob').get(
ceph_monitors.keys()[0]).split('\n')

pools_replicas = {}
for pool in raw_pool_replicas:
pool_name = pool.split(" ", 1)[0]
pool_replicas = {}
raw_replicas = pool.split(" ", 1)[1].split()
for elem in raw_replicas:
pool_replicas[raw_replicas[0]] = int(raw_replicas[1])
pool_replicas[raw_replicas[2]] = int(raw_replicas[3])
pools_replicas[pool_name] = pool_replicas

error = []
for pool, replicas in pools_replicas.items():
for replica, value in replicas.items():
if replica == 'min_size' and value < 2:
error.append(pool + " " + replica + " "
+ str(value) + " must be 2")
if replica == 'size' and value < 3:
error.append(pool + " " + replica + " "
+ str(value) + " must be 3")

assert not error, "Wrong pool replicas found\n{}".format(error)
55 changes: 55 additions & 0 deletions cvp_checks/tests/ceph/test_ceph_tell_bench.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
import pytest
import json
import math


def test_ceph_tell_bench(local_salt_client):
"""
Test checks that each OSD MB per second speed
is not lower than 10 MB comparing with AVG.
Bench command by default writes 1Gb on each OSD
with the default values of 4M
and gives the "bytes_per_sec" speed for each OSD.

"""
ceph_monitors = local_salt_client.cmd(
'ceph:mon',
'test.ping',
expr_form='pillar')

if not ceph_monitors:
pytest.skip("Ceph is not found on this environment")

cmd_result = local_salt_client.cmd(
ceph_monitors.keys()[0],
'cmd.run', ["ceph tell osd.* bench -f json"],
expr_form='glob').get(
ceph_monitors.keys()[0]).split('\n')

cmd_result = filter(None, cmd_result)

osd_pool = {}
for osd in cmd_result:
osd_ = osd.split(" ")
osd_pool[osd_[0]] = osd_[1]

mbps_sum = 0
osd_count = 0
for osd in osd_pool:
osd_count += 1
mbps_sum += json.loads(
osd_pool[osd])['bytes_per_sec'] / 1000000

mbps_avg = mbps_sum / osd_count
result = {}
for osd in osd_pool:
mbps = json.loads(
osd_pool[osd])['bytes_per_sec'] / 1000000
if math.fabs(mbps_avg - mbps) > 10:
result[osd] = osd_pool[osd]

assert len(result) == 0, \
"Performance of {} OSD lower " \
"than AVG performance, " \
"please check Ceph for possible problems".format(
json.dumps(result, indent=4))
9 changes: 7 additions & 2 deletions cvp_checks/tests/test_cinder_services.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,14 @@
import pytest


def test_cinder_services(local_salt_client):
service_down = local_salt_client.cmd(
'keystone:server',
'cinder:controller',
'cmd.run',
['. /root/keystonerc; cinder service-list | grep "down\|disabled"'],
expr_form='pillar')
if not service_down:
pytest.skip("Cinder is not found on this environment")
cinder_volume = local_salt_client.cmd(
'keystone:server',
'cmd.run',
Expand All @@ -12,4 +17,4 @@ def test_cinder_services(local_salt_client):
assert service_down[service_down.keys()[0]] == '', \
'''Some cinder services are in wrong state'''
assert cinder_volume[cinder_volume.keys()[0]] == '1', \
'''Some nova services are in wrong state'''
'''There are more than 1 host/backend for cinder'''
2 changes: 1 addition & 1 deletion cvp_checks/tests/test_k8s.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ def test_k8s_get_nodes_status(local_salt_client):
if 'STATUS' in line or 'proto' in line:
continue
else:
if 'Ready' not in line:
if 'Ready' != line.split()[1]:
errors.append(line)
break
assert not errors, 'k8s is not healthy: {}'.format(json.dumps(
Expand Down
Loading