diff --git a/README.rst b/README.rst index 98ad365..6c980c6 100644 --- a/README.rst +++ b/README.rst @@ -607,12 +607,16 @@ Wait for status of selected machine's: machines: - kvm01 - kvm02 - timeout: 1200 # in seconds + timeout: {{ region.timeout.ready }} + attempts: {{ region.timeout.attempts }} req_status: "Ready" - require: - cmd: maas_login_admin ... +The timeout setting is taken from the reclass pillar data. +If the pillar data is not defined, it will use the default value. + If module run w/\o any extra paremeters, ``wait_for_machines_ready`` will wait for defined in salt machines. In this case, it is usefull to skip some machines: @@ -627,7 +631,8 @@ machines. In this case, it is usefull to skip some machines: module.run: - name: maas.wait_for_machine_status - kwargs: - timeout: 1200 # in seconds + timeout: {{ region.timeout.deployed }} + attempts: {{ region.timeout.attempts }} req_status: "Deployed" ignore_machines: - kvm01 # in case it's broken or whatever diff --git a/_modules/maas.py b/_modules/maas.py index c02f104..28e46c5 100644 --- a/_modules/maas.py +++ b/_modules/maas.py @@ -921,6 +921,7 @@ def wait_for_machine_status(cls, **kwargs): req_status: string; Polling status machines: list; machine names ignore_machines: list; machine names + attempts: max number of automatic hard retries :ret: True Exception - if something fail/timeout reached """ @@ -929,6 +930,8 @@ def wait_for_machine_status(cls, **kwargs): req_status = kwargs.get("req_status", "Ready") to_discover = kwargs.get("machines", None) ignore_machines = kwargs.get("ignore_machines", None) + attempts = kwargs.get("attempts", 0) + failed_attempts = {} if not to_discover: try: to_discover = __salt__['config.get']('maas')['region'][ @@ -943,11 +946,45 @@ def wait_for_machine_status(cls, **kwargs): while len(total) <= len(to_discover): for m in to_discover: for discovered in MachinesStatus.execute()['machines']: - if m == discovered['hostname'] and \ - discovered['status'].lower() == req_status.lower(): - if m in total: + if m == discovered['hostname'] and m in total: + req_status_list = req_status.lower().split('|') + if discovered['status'].lower() in req_status_list: total.remove(m) - + elif attempts > 0 and (m not in failed_attempts or + failed_attempts[m] < attempts): + status = discovered['status'] + sid = discovered['system_id'] + cls._maas = _create_maas_client() + if status in ['Failed commissioning', 'New']: + LOG.info('Machine {0} deleted'.format(sid)) + cls._maas.delete(u'api/2.0/machines/{0}/' + .format(sid)) + Machine().process() + elif status in ['Failed testing']: + data = {} + LOG.info('Machine {0} overriden'.format(sid)) + action = 'override_failed_testing' + cls._maas.post(u'api/2.0/machines/{0}/' + .format(sid), action, **data) + elif status in ['Failed deployment', 'Allocated']: + data = {} + LOG.info('Machine {0} mark broken'.format(sid)) + cls._maas.post(u'api/2.0/machines/{0}/' + .format(sid), 'mark_broken', **data) + LOG.info('Machine {0} mark fixed'.format(sid)) + cls._maas.post(u'api/2.0/machines/{0}/' + .format(sid), 'mark_fixed', **data) + if m in failed_attempts and failed_attempts[m]: + LOG.info('Machine {0} fio test'.format(sid)) + data['testing_scripts'] = 'fio' + cls._maas.post(u'api/2.0/machines/{0}/' + .format(sid), 'commission', **data) + DeployMachines().process() + else: + continue + if m not in failed_attempts: + failed_attempts[m] = 0 + failed_attempts[m] = failed_attempts[m] + 1 if len(total) <= 0: LOG.debug( "Machines:{} are:{}".format(to_discover, req_status)) @@ -959,7 +996,9 @@ def wait_for_machine_status(cls, **kwargs): "Waiting status:{} " "for machines:{}" "\nsleep for:{}s " - "Timeout:{}s".format(req_status, total, poll_time, timeout)) + "Timeout:{}s ({}s left)" + .format(req_status, total, poll_time, timeout, + timeout - (time.time() - started_at))) time.sleep(poll_time) diff --git a/maas/machines/wait_for_deployed.sls b/maas/machines/wait_for_deployed.sls index ebeedac..a646fdb 100644 --- a/maas/machines/wait_for_deployed.sls +++ b/maas/machines/wait_for_deployed.sls @@ -9,5 +9,7 @@ wait_for_machines_deployed: - name: maas.wait_for_machine_status - kwargs: req_status: "Deployed" + timeout: {{ region.timeout.deployed }} + attempts: {{ region.timeout.attempts }} - require: - cmd: maas_login_admin diff --git a/maas/machines/wait_for_ready.sls b/maas/machines/wait_for_ready.sls index c5d3c28..d8a2963 100644 --- a/maas/machines/wait_for_ready.sls +++ b/maas/machines/wait_for_ready.sls @@ -7,5 +7,8 @@ maas_login_admin: wait_for_machines_ready: module.run: - name: maas.wait_for_machine_status + - kwargs: + timeout: {{ region.timeout.ready }} + attempts: {{ region.timeout.attempts }} - require: - cmd: maas_login_admin diff --git a/maas/machines/wait_for_ready_or_deployed.sls b/maas/machines/wait_for_ready_or_deployed.sls new file mode 100644 index 0000000..db3dcc4 --- /dev/null +++ b/maas/machines/wait_for_ready_or_deployed.sls @@ -0,0 +1,15 @@ +{%- from "maas/map.jinja" import region with context %} + +maas_login_admin: + cmd.run: + - name: "maas-region apikey --username {{ region.admin.username }} > /var/lib/maas/.maas_credentials" + +wait_for_machines_ready_or_deployed: + module.run: + - name: maas.wait_for_machine_status + - kwargs: + req_status: "Ready|Deployed" + timeout: {{ region.timeout.ready }} + attempts: {{ region.timeout.attempts }} + - require: + - cmd: maas_login_admin diff --git a/maas/map.jinja b/maas/map.jinja index 0671435..1e6ac07 100644 --- a/maas/map.jinja +++ b/maas/map.jinja @@ -22,6 +22,10 @@ Debian: bind: host: 0.0.0.0 port: 80 + timeout: + ready: 1200 + deployed: 7200 + attempts: 0 {%- endload %} {%- set region = salt['grains.filter_by'](region_defaults, merge=salt['pillar.get']('maas:region', {})) %} diff --git a/tests/pillar/maas_region.sls b/tests/pillar/maas_region.sls index a150389..554a6fa 100644 --- a/tests/pillar/maas_region.sls +++ b/tests/pillar/maas_region.sls @@ -23,3 +23,7 @@ maas: password: password username: maas salt_master_ip: 127.0.0.1 + timeout: + deployed: 900 + ready: 900 + attempts: 2