forked from ceph/ceph-ansible
-
Notifications
You must be signed in to change notification settings - Fork 0
/
tox-shrink_osd.ini
122 lines (107 loc) · 5.25 KB
/
tox-shrink_osd.ini
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
[tox]
envlist = {centos}-{container,non_container}-{shrink_osd_single,shrink_osd_multiple}
skipsdist = True
[shrink-osd-single]
commands=
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
ireallymeanit=yes \
osd_to_kill=0 \
"
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
ireallymeanit=yes \
osd_to_kill=1 \
"
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
ireallymeanit=yes \
osd_to_kill=2 \
"
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
ireallymeanit=yes \
osd_to_kill=3 \
"
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
ireallymeanit=yes \
osd_to_kill=4 \
"
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
ireallymeanit=yes \
osd_to_kill=5 \
"
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
ireallymeanit=yes \
osd_to_kill=6 \
"
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
ireallymeanit=yes \
osd_to_kill=7 \
"
[shrink-osd-multiple]
commands=
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
ireallymeanit=yes \
osd_to_kill=0,1,2,3,4,5,6,7 \
"
[testenv]
whitelist_externals =
vagrant
bash
passenv=*
sitepackages=False
setenv=
ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey
ANSIBLE_CONFIG = {toxinidir}/ansible.cfg
ANSIBLE_CALLBACK_WHITELIST = profile_tasks
ANSIBLE_KEEP_REMOTE_FILES = 1
ANSIBLE_CACHE_PLUGIN = memory
ANSIBLE_GATHERING = implicit
# only available for ansible >= 2.5
ANSIBLE_STDOUT_CALLBACK = yaml
non_container: DEV_SETUP = True
# Set the vagrant box image to use
centos-non_container: CEPH_ANSIBLE_VAGRANT_BOX = centos/8
centos-container: CEPH_ANSIBLE_VAGRANT_BOX = centos/8
INVENTORY = {env:_INVENTORY:hosts}
container: CONTAINER_DIR = /container
container: PLAYBOOK = site-container.yml.sample
container: PURGE_PLAYBOOK = purge-container-cluster.yml
non_container: PLAYBOOK = site.yml.sample
CEPH_DOCKER_IMAGE_TAG = latest-master
CEPH_DOCKER_IMAGE_TAG_BIS = latest-bis-master
UPDATE_CEPH_DOCKER_IMAGE_TAG = latest-master
CEPH_STABLE_RELEASE = pacific
deps= -r{toxinidir}/tests/requirements.txt
changedir=
shrink_osd_single: {toxinidir}/tests/functional/shrink_osd{env:CONTAINER_DIR:}
shrink_osd_multiple: {toxinidir}/tests/functional/shrink_osd{env:CONTAINER_DIR:}
commands=
ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup={env:DEV_SETUP:False} change_dir={changedir} ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}" --tags "vagrant_setup"
bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}
bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
# configure lvm
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
"
# test cluster state using ceph-ansible tests
py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
shrink_osd_single: {[shrink-osd-single]commands}
shrink_osd_multiple: {[shrink-osd-multiple]commands}
# configure lvm
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --limit osds --extra-vars "\
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
"
# retest to ensure OSDs are well redeployed
py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
vagrant destroy --force