From 9ed13f388ee5fe64796692f5199f99e69b02f83d Mon Sep 17 00:00:00 2001 From: Tim Heyer Date: Wed, 14 Jun 2017 15:30:43 +0000 Subject: [PATCH] Implement wait for timeout feature and unit test -Wait for all charts to deploy before exiting -Add wait flag and custom timeout flag --- armada/cli/apply.py | 10 ++- armada/handlers/armada.py | 47 +++++++++++++- armada/handlers/k8s.py | 10 +++ armada/tests/__init__.py | 0 armada/tests/unit/__init__.py | 0 armada/tests/unit/handlers/__init__.py | 0 armada/tests/unit/handlers/test_wait.py | 86 +++++++++++++++++++++++++ tox.ini | 6 ++ 8 files changed, 156 insertions(+), 3 deletions(-) create mode 100644 armada/tests/__init__.py create mode 100644 armada/tests/unit/__init__.py create mode 100644 armada/tests/unit/handlers/__init__.py create mode 100644 armada/tests/unit/handlers/test_wait.py diff --git a/armada/cli/apply.py b/armada/cli/apply.py index bcf4dc1e..d3106495 100644 --- a/armada/cli/apply.py +++ b/armada/cli/apply.py @@ -27,7 +27,9 @@ def applyCharts(args): args.disable_update_post, args.enable_chart_cleanup, args.skip_pre_flight, - args.dry_run) + args.dry_run, + args.wait, + args.timeout) armada.sync() class ApplyChartsCommand(cmd.Command): @@ -47,6 +49,12 @@ class ApplyChartsCommand(cmd.Command): default=False, help='Disable post upgrade actions') parser.add_argument('--enable-chart-cleanup', action='store', default=False, help='Enable Chart Clean Up') + parser.add_argument('--wait', action='store_true', + default=False, help='Wait until all charts' + 'have been deployed') + parser.add_argument('--timeout', action='store', + default=3600, help='Specifies time to wait' + ' for charts to deploy') return parser def take_action(self, parsed_args): diff --git a/armada/handlers/armada.py b/armada/handlers/armada.py index fe3346f7..ca2ae082 100644 --- a/armada/handlers/armada.py +++ b/armada/handlers/armada.py @@ -14,10 +14,11 @@ import difflib import yaml +from threading import Event, Timer +from time import sleep from oslo_config import cfg from oslo_log import log as logging - from supermutes.dot import dotify from chartbuilder import ChartBuilder @@ -34,6 +35,7 @@ logging.register_options(CONF) logging.setup(CONF, DOMAIN) LOG = logging.getLogger(__name__) +DEFAULT_TIMEOUT = 3600 class Armada(object): ''' @@ -46,7 +48,9 @@ class Armada(object): disable_update_post=False, enable_chart_cleanup=False, skip_pre_flight=False, - dry_run=False): + dry_run=False, + wait=False, + timeout=DEFAULT_TIMEOUT): ''' Initialize the Armada Engine and establish a connection to Tiller @@ -56,6 +60,8 @@ class Armada(object): self.enable_chart_cleanup = enable_chart_cleanup self.skip_pre_flight = skip_pre_flight self.dry_run = dry_run + self.wait = wait + self.timeout = float(timeout) self.config = yaml.load(config) self.tiller = Tiller() @@ -181,6 +187,11 @@ class Armada(object): chartbuilder.source_cleanup() + # if requested, wait for chart deployment + if self.wait: + LOG.info("Waiting for chart deployment") + self.wait_for_deployment() + if self.enable_chart_cleanup: self.tiller.chart_cleanup(prefix, self.config['armada']['charts']) @@ -211,3 +222,35 @@ class Armada(object): LOG.debug(line) return (len(chart_diff) > 0) or (len(values_diff) > 0) + + def wait_for_deployment(self): + FAIL_STATUS = 'Failed' + RUN_STATUS = 'Running' + SUCCESS_STATUS = 'Succeeded' + + pods = self.tiller.k8s.get_all_pods().items + timeout_event = Event() + timer = Timer(self.timeout, timeout_event.set) + + try: + timer.start() + while not len(pods) == 0 and not timeout_event.is_set(): + sleep(1) + pods_copy = list(pods) + for pod in pods_copy: + if pod.status.phase == FAIL_STATUS: + timer.cancel() + raise RuntimeError('Deploy failed {}' + .format(pod.metadata.name)) + elif (pod.status.phase == RUN_STATUS or + pod.status.phase == SUCCESS_STATUS): + pods.remove(pod) + except: + timer.cancel() + pass + + if timeout_event.is_set(): + raise RuntimeError('Deploy timeout {}' + .format([pod.metadata.name for pod in (pods)])) + else: + timer.cancel() diff --git a/armada/handlers/k8s.py b/armada/handlers/k8s.py index 80c3d042..13fea1d9 100644 --- a/armada/handlers/k8s.py +++ b/armada/handlers/k8s.py @@ -58,3 +58,13 @@ class K8s(object): ''' return self.client.list_namespaced_pod(namespace) + + def get_all_pods(self, label_selector=''): + ''' + :params - label_selector - filters pods by label + + Returns a list of pods from all namespaces + ''' + + return self.client \ + .list_pod_for_all_namespaces(label_selector=label_selector) diff --git a/armada/tests/__init__.py b/armada/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/armada/tests/unit/__init__.py b/armada/tests/unit/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/armada/tests/unit/handlers/__init__.py b/armada/tests/unit/handlers/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/armada/tests/unit/handlers/test_wait.py b/armada/tests/unit/handlers/test_wait.py new file mode 100644 index 00000000..0b2e826e --- /dev/null +++ b/armada/tests/unit/handlers/test_wait.py @@ -0,0 +1,86 @@ +from armada.handlers.armada import Armada + +import mock +import unittest +from threading import Thread +from time import sleep + +POD_NAME_COUNTER = 1 + +class PodGenerator(): + + def gen_pod(self, phase, message=None): + global POD_NAME_COUNTER + pod = mock.Mock() + pod.status.phase = phase + pod.metadata.name = 'pod_instance_{}'.format(POD_NAME_COUNTER) + POD_NAME_COUNTER += 1 + if message: + pod.status.message = message + return pod + + +class WaitTestCase(unittest.TestCase): + + @mock.patch('armada.handlers.armada.lint') + @mock.patch('armada.handlers.tiller.Tiller') + def test_wait(self, mock_tiller, mock_lint): + TIMEOUT = 5 + # instantiate Armada object + armada = Armada("../../examples/openstack-helm.yaml", + wait=True, + timeout=TIMEOUT) + armada.tiller = mock_tiller + + # TIMEOUT TEST + timeout_pod = PodGenerator().gen_pod('Unknown') + pods = mock.Mock() + pods.items = [timeout_pod] + mock_tiller.k8s.get_all_pods.return_value = pods + + with self.assertRaises(RuntimeError): + armada.wait_for_deployment() + mock_tiller.k8s.get_all_pods.assert_called_with() + + # FAILED_STATUS TEST + failed_pod = PodGenerator().gen_pod('Failed') + pods = mock.Mock() + pods.items = [failed_pod] + mock_tiller.k8s.get_all_pods.return_value = pods + + with self.assertRaises(RuntimeError): + armada.wait_for_deployment() + mock_tiller.k8s.get_all_pods.assert_called_with() + + # SUCCESS_STATUS TEST + success_pod = PodGenerator().gen_pod('Succeeded') + pods = mock.Mock() + pods.items = [success_pod] + mock_tiller.k8s.get_all_pods.return_value = pods + + try: + armada.wait_for_deployment() + except RuntimeError as e: + self.fail('Expected success but got {}'.format(e)) + mock_tiller.k8s.get_all_pods.assert_called_with() + + # SIMULATE_DEPLOYMENT TEST + simulation_pod = PodGenerator().gen_pod('Pending') + pods = mock.Mock() + pods.items = [simulation_pod] + mock_tiller.k8s.get_all_pods.return_value = pods + + method_call = Thread(target=armada.wait_for_deployment) + method_call.start() + + # let the method spin for a bit, then change pod status + sleep(TIMEOUT / 4.0) + simulation_pod.status.phase = 'Running' + + try: + # ensure the method_call thread ends after status change + method_call.join(5.0) + self.assertFalse(method_call.is_alive()) + except RuntimeError as e: + self.fail('Expected success but got {}'.format(e)) + mock_tiller.k8s.get_all_pods.assert_called_with() diff --git a/tox.ini b/tox.ini index cdb56e5d..3003512a 100644 --- a/tox.ini +++ b/tox.ini @@ -18,6 +18,12 @@ commands = python setup.py build_sphinx [testenv:genconfig] commands = oslo-config-generator --config-file=etc/armada/config-generator.conf +[testenv:lint] +commands = flake8 . + +[testenv:testing] +commands = nosetest -w armada + [flake8] ignore=E302,H306 exclude= libgit2-0.24.0, .git, .idea, .tox, *.egg-info, *.eggs, bin, dist, hapi