diff --git a/armada/api/__init__.py b/armada/api/__init__.py index 3d2c1cec..ac27ccf9 100644 --- a/armada/api/__init__.py +++ b/armada/api/__init__.py @@ -72,11 +72,8 @@ class BaseResource(object): try: return json.loads(raw_body.decode()) except json.JSONDecodeError as jex: - self.error( - req.context, - "Invalid JSON in request: %s" % str(jex)) - raise Exception( - "%s: Invalid JSON in body: %s" % (req.path, jex)) + self.error(req.context, "Invalid JSON in request: %s" % str(jex)) + raise Exception("%s: Invalid JSON in body: %s" % (req.path, jex)) def return_error(self, resp, status_code, message="", retry=False): resp.body = json.dumps({ @@ -112,6 +109,7 @@ class BaseResource(object): class ArmadaRequestContext(object): + def __init__(self): self.log_level = 'ERROR' self.user = None # Username diff --git a/armada/api/controller/armada.py b/armada/api/controller/armada.py index c43f5e6f..18e20570 100644 --- a/armada/api/controller/armada.py +++ b/armada/api/controller/armada.py @@ -58,23 +58,22 @@ class Apply(api.BaseResource): documents.extend(list(yaml.safe_load_all(d.decode()))) if req_body.get('overrides', None): - overrides = Override(documents, - overrides=req_body.get('overrides')) + overrides = Override( + documents, overrides=req_body.get('overrides')) documents = overrides.update_manifests() else: - self.error(req.context, "Unknown content-type %s" - % req.content_type) + self.error(req.context, + "Unknown content-type %s" % req.content_type) # TODO(fmontei): Use falcon. instead. return self.return_error( resp, falcon.HTTP_415, message="Request must be in application/x-yaml" - "or application/json") + "or application/json") try: armada = Armada( documents, - disable_update_pre=req.get_param_as_bool( - 'disable_update_pre'), + disable_update_pre=req.get_param_as_bool('disable_update_pre'), disable_update_post=req.get_param_as_bool( 'disable_update_post'), enable_chart_cleanup=req.get_param_as_bool( @@ -83,20 +82,17 @@ class Apply(api.BaseResource): force_wait=req.get_param_as_bool('wait'), timeout=req.get_param_as_int('timeout') or 0, tiller_host=req.get_param('tiller_host'), - tiller_port=req.get_param_as_int( - 'tiller_port') or CONF.tiller_port, + tiller_port=req.get_param_as_int('tiller_port') or + CONF.tiller_port, tiller_namespace=req.get_param( 'tiller_namespace', default=CONF.tiller_namespace), - target_manifest=req.get_param('target_manifest') - ) + target_manifest=req.get_param('target_manifest')) msg = armada.sync() - resp.body = json.dumps( - { - 'message': msg, - } - ) + resp.body = json.dumps({ + 'message': msg, + }) resp.content_type = 'application/json' resp.status = falcon.HTTP_200 @@ -106,5 +102,4 @@ class Apply(api.BaseResource): self.logger.exception('Caught unexpected exception') err_message = 'Failed to apply manifest: {}'.format(e) self.error(req.context, err_message) - self.return_error( - resp, falcon.HTTP_500, message=err_message) + self.return_error(resp, falcon.HTTP_500, message=err_message) diff --git a/armada/api/controller/rollback.py b/armada/api/controller/rollback.py index 56a797c6..8553c653 100644 --- a/armada/api/controller/rollback.py +++ b/armada/api/controller/rollback.py @@ -35,8 +35,8 @@ class Rollback(api.BaseResource): tiller = Tiller( tiller_host=req.get_param('tiller_host'), - tiller_port=req.get_param_as_int( - 'tiller_port') or CONF.tiller_port, + tiller_port=req.get_param_as_int('tiller_port') or + CONF.tiller_port, tiller_namespace=req.get_param( 'tiller_namespace', default=CONF.tiller_namespace), dry_run=dry_run) @@ -49,12 +49,10 @@ class Rollback(api.BaseResource): force=req.get_param_as_bool('force'), recreate_pods=req.get_param_as_bool('recreate_pods')) - resp.body = json.dumps( - { - 'message': ('(dry run) ' if dry_run else '') + - 'Rollback of {} complete.'.format(release), - } - ) + resp.body = json.dumps({ + 'message': ('(dry run) ' if dry_run else '') + + 'Rollback of {} complete.'.format(release), + }) resp.content_type = 'application/json' resp.status = falcon.HTTP_200 @@ -62,5 +60,4 @@ class Rollback(api.BaseResource): self.logger.exception('Caught unexpected exception') err_message = 'Failed to rollback release: {}'.format(e) self.error(req.context, err_message) - self.return_error( - resp, falcon.HTTP_500, message=err_message) + self.return_error(resp, falcon.HTTP_500, message=err_message) diff --git a/armada/api/controller/test.py b/armada/api/controller/test.py index 58e5570f..996e5a70 100644 --- a/armada/api/controller/test.py +++ b/armada/api/controller/test.py @@ -41,8 +41,8 @@ class TestReleasesReleaseNameController(api.BaseResource): try: tiller = Tiller( tiller_host=req.get_param('tiller_host'), - tiller_port=req.get_param_as_int( - 'tiller_port') or CONF.tiller_port, + tiller_port=req.get_param_as_int('tiller_port') or + CONF.tiller_port, tiller_namespace=req.get_param( 'tiller_namespace', default=CONF.tiller_namespace)) success = test_release_for_success(tiller, release) @@ -110,8 +110,7 @@ class TestReleasesManifestController(api.BaseResource): def _validate_documents(self, req, resp, documents): result, details = validate.validate_armada_documents(documents) - return self._format_validation_response(req, resp, result, - details) + return self._format_validation_response(req, resp, result, details) @policy.enforce('armada:tests_manifest') def on_post(self, req, resp): @@ -122,8 +121,8 @@ class TestReleasesManifestController(api.BaseResource): try: tiller = Tiller( tiller_host=req.get_param('tiller_host'), - tiller_port=req.get_param_as_int( - 'tiller_port') or CONF.tiller_port, + tiller_port=req.get_param_as_int('tiller_port') or + CONF.tiller_port, tiller_namespace=req.get_param( 'tiller_namespace', default=CONF.tiller_namespace)) # TODO(fmontei): Provide more sensible exception(s) here. @@ -147,23 +146,16 @@ class TestReleasesManifestController(api.BaseResource): armada_obj = Manifest( documents, target_manifest=target_manifest).get_manifest() - prefix = armada_obj.get(const.KEYWORD_ARMADA).get( - const.KEYWORD_PREFIX) + prefix = armada_obj.get(const.KEYWORD_ARMADA).get(const.KEYWORD_PREFIX) known_releases = [release[0] for release in tiller.list_charts()] - message = { - 'tests': { - 'passed': [], - 'skipped': [], - 'failed': [] - } - } + message = {'tests': {'passed': [], 'skipped': [], 'failed': []}} for group in armada_obj.get(const.KEYWORD_ARMADA).get( const.KEYWORD_GROUPS): for ch in group.get(const.KEYWORD_CHARTS): - release_name = release_prefixer( - prefix, ch.get('chart').get('release')) + release_name = release_prefixer(prefix, + ch.get('chart').get('release')) if release_name in known_releases: self.logger.info('RUNNING: %s tests', release_name) @@ -175,8 +167,8 @@ class TestReleasesManifestController(api.BaseResource): self.logger.info("FAILED: %s", release_name) message['test']['failed'].append(release_name) else: - self.logger.info( - 'Release %s not found - SKIPPING', release_name) + self.logger.info('Release %s not found - SKIPPING', + release_name) message['test']['skipped'].append(release_name) resp.status = falcon.HTTP_200 diff --git a/armada/api/controller/tiller.py b/armada/api/controller/tiller.py index 366a8a95..b61645d4 100644 --- a/armada/api/controller/tiller.py +++ b/armada/api/controller/tiller.py @@ -27,6 +27,7 @@ LOG = logging.getLogger(__name__) class Status(api.BaseResource): + @policy.enforce('tiller:get_status') def on_get(self, req, resp): ''' @@ -35,8 +36,8 @@ class Status(api.BaseResource): try: tiller = Tiller( tiller_host=req.get_param('tiller_host'), - tiller_port=req.get_param_as_int( - 'tiller_port') or CONF.tiller_port, + tiller_port=req.get_param_as_int('tiller_port') or + CONF.tiller_port, tiller_namespace=req.get_param( 'tiller_namespace', default=CONF.tiller_namespace)) @@ -58,11 +59,11 @@ class Status(api.BaseResource): except Exception as e: err_message = 'Failed to get Tiller Status: {}'.format(e) self.error(req.context, err_message) - self.return_error( - resp, falcon.HTTP_500, message=err_message) + self.return_error(resp, falcon.HTTP_500, message=err_message) class Release(api.BaseResource): + @policy.enforce('tiller:get_release') def on_get(self, req, resp): '''Controller for listing Tiller releases. @@ -70,14 +71,15 @@ class Release(api.BaseResource): try: tiller = Tiller( tiller_host=req.get_param('tiller_host'), - tiller_port=req.get_param_as_int( - 'tiller_port') or CONF.tiller_port, + tiller_port=req.get_param_as_int('tiller_port') or + CONF.tiller_port, tiller_namespace=req.get_param( 'tiller_namespace', default=CONF.tiller_namespace)) - LOG.debug('Tiller (Release) at: %s:%s, namespace=%s, ' - 'timeout=%s', tiller.tiller_host, tiller.tiller_port, - tiller.tiller_namespace, tiller.timeout) + LOG.debug( + 'Tiller (Release) at: %s:%s, namespace=%s, ' + 'timeout=%s', tiller.tiller_host, tiller.tiller_port, + tiller.tiller_namespace, tiller.timeout) releases = {} for release in tiller.list_releases(): @@ -91,5 +93,4 @@ class Release(api.BaseResource): except Exception as e: err_message = 'Unable to find Tiller Releases: {}'.format(e) self.error(req.context, err_message) - self.return_error( - resp, falcon.HTTP_500, message=err_message) + self.return_error(resp, falcon.HTTP_500, message=err_message) diff --git a/armada/api/controller/validation.py b/armada/api/controller/validation.py index 9fccf391..c72ac522 100644 --- a/armada/api/controller/validation.py +++ b/armada/api/controller/validation.py @@ -47,8 +47,8 @@ class Validate(api.BaseResource): manifest = self.req_yaml(req) documents = list(manifest) - self.logger.debug("Validating set of %d documents." - % len(documents)) + self.logger.debug( + "Validating set of %d documents." % len(documents)) result, details = validate_armada_documents(documents) @@ -81,5 +81,4 @@ class Validate(api.BaseResource): except Exception as ex: err_message = 'Failed to validate Armada Manifest' self.logger.error(err_message, exc_info=ex) - self.return_error( - resp, falcon.HTTP_400, message=err_message) + self.return_error(resp, falcon.HTTP_400, message=err_message) diff --git a/armada/api/middleware.py b/armada/api/middleware.py index c3b5596f..426f23b7 100644 --- a/armada/api/middleware.py +++ b/armada/api/middleware.py @@ -93,6 +93,7 @@ class ContextMiddleware(object): class LoggingMiddleware(object): + def __init__(self): self.logger = logging.getLogger(__name__) @@ -121,8 +122,8 @@ class LoggingMiddleware(object): 'external_ctx': ctx.external_marker, } resp.append_header('X-Armada-Req', ctx.request_id) - self.logger.info("%s %s - %s" % (req.method, req.uri, resp.status), - extra=extra) + self.logger.info( + "%s %s - %s" % (req.method, req.uri, resp.status), extra=extra) self.logger.debug("Response body:%s", resp.body) def _log_headers(self, headers): diff --git a/armada/cli/apply.py b/armada/cli/apply.py index 9b740432..414d3eb5 100644 --- a/armada/cli/apply.py +++ b/armada/cli/apply.py @@ -65,72 +65,71 @@ file: SHORT_DESC = "Command installs manifest charts." -@apply.command(name='apply', - help=DESC, - short_help=SHORT_DESC) -@click.argument('locations', - nargs=-1) -@click.option('--api', - help="Contacts service endpoint.", - is_flag=True) -@click.option('--disable-update-post', - help="Disable post-update Tiller operations.", - is_flag=True) -@click.option('--disable-update-pre', - help="Disable pre-update Tiller operations.", - is_flag=True) -@click.option('--dry-run', - help="Run charts without installing them.", - is_flag=True) -@click.option('--enable-chart-cleanup', - help="Clean up unmanaged charts.", - is_flag=True) -@click.option('--use-doc-ref', - help="Use armada manifest file reference.", - is_flag=True) -@click.option('--set', - help=("Use to override Armada Manifest values. Accepts " - "overrides that adhere to the format " - "::= to specify a primitive or " - "::=,..., to specify " - "a list of values."), - multiple=True, - type=str, - default=[]) -@click.option('--tiller-host', - help="Tiller host IP.", - default=None) -@click.option('--tiller-port', - help="Tiller host port.", - type=int, - default=CONF.tiller_port) -@click.option('--tiller-namespace', '-tn', - help="Tiller namespace.", - type=str, - default=CONF.tiller_namespace) -@click.option('--timeout', - help="Specifies time to wait for each chart to fully " - "finish deploying.", - type=int, - default=0) -@click.option('--values', '-f', - help=("Use to override multiple Armada Manifest values by " - "reading overrides from a values.yaml-type file."), - multiple=True, - type=str, - default=[]) -@click.option('--wait', - help=("Force Tiller to wait until all charts are deployed, " - "rather than using each chart's specified wait policy. " - "This is equivalent to sequenced chartgroups."), - is_flag=True) -@click.option('--target-manifest', - help=("The target manifest to run. Required for specifying " - "which manifest to run when multiple are available."), - default=None) -@click.option('--debug', - help="Enable debug logging.", - is_flag=True) +@apply.command(name='apply', help=DESC, short_help=SHORT_DESC) +@click.argument('locations', nargs=-1) +@click.option('--api', help="Contacts service endpoint.", is_flag=True) +@click.option( + '--disable-update-post', + help="Disable post-update Tiller operations.", + is_flag=True) +@click.option( + '--disable-update-pre', + help="Disable pre-update Tiller operations.", + is_flag=True) +@click.option( + '--dry-run', help="Run charts without installing them.", is_flag=True) +@click.option( + '--enable-chart-cleanup', help="Clean up unmanaged charts.", is_flag=True) +@click.option( + '--use-doc-ref', help="Use armada manifest file reference.", is_flag=True) +@click.option( + '--set', + help=("Use to override Armada Manifest values. Accepts " + "overrides that adhere to the format " + "::= to specify a primitive or " + "::=,..., to specify " + "a list of values."), + multiple=True, + type=str, + default=[]) +@click.option('--tiller-host', help="Tiller host IP.", default=None) +@click.option( + '--tiller-port', + help="Tiller host port.", + type=int, + default=CONF.tiller_port) +@click.option( + '--tiller-namespace', + '-tn', + help="Tiller namespace.", + type=str, + default=CONF.tiller_namespace) +@click.option( + '--timeout', + help="Specifies time to wait for each chart to fully " + "finish deploying.", + type=int, + default=0) +@click.option( + '--values', + '-f', + help=("Use to override multiple Armada Manifest values by " + "reading overrides from a values.yaml-type file."), + multiple=True, + type=str, + default=[]) +@click.option( + '--wait', + help=("Force Tiller to wait until all charts are deployed, " + "rather than using each chart's specified wait policy. " + "This is equivalent to sequenced chartgroups."), + is_flag=True) +@click.option( + '--target-manifest', + help=("The target manifest to run. Required for specifying " + "which manifest to run when multiple are available."), + default=None) +@click.option('--debug', help="Enable debug logging.", is_flag=True) @click.pass_context def apply_create(ctx, locations, api, disable_update_post, disable_update_pre, dry_run, enable_chart_cleanup, use_doc_ref, set, tiller_host, @@ -144,23 +143,11 @@ def apply_create(ctx, locations, api, disable_update_post, disable_update_pre, class ApplyManifest(CliAction): - def __init__(self, - ctx, - locations, - api, - disable_update_post, - disable_update_pre, - dry_run, - enable_chart_cleanup, - use_doc_ref, - set, - tiller_host, - tiller_port, - tiller_namespace, - timeout, - values, - wait, - target_manifest): + + def __init__(self, ctx, locations, api, disable_update_post, + disable_update_pre, dry_run, enable_chart_cleanup, + use_doc_ref, set, tiller_host, tiller_port, tiller_namespace, + timeout, values, wait, target_manifest): super(ApplyManifest, self).__init__() self.ctx = ctx # Filename can also be a URL reference diff --git a/armada/cli/delete.py b/armada/cli/delete.py index 6d4c59b4..b1572d0e 100644 --- a/armada/cli/delete.py +++ b/armada/cli/delete.py @@ -56,27 +56,16 @@ To delete releases by the name: SHORT_DESC = "Command deletes releases." -@delete.command(name='delete', - help=DESC, - short_help=SHORT_DESC) -@click.option('--manifest', - help="Armada Manifest file.", - type=str) -@click.option('--releases', - help="Comma-separated list of release names.", - type=str) -@click.option('--no-purge', - help="Deletes release without purge option.", - is_flag=True) -@click.option('--tiller-host', - help="Tiller host IP.") -@click.option('--tiller-port', - help="Tiller host port.", - type=int, - default=44134) -@click.option('--debug', - help="Enable debug logging.", - is_flag=True) +@delete.command(name='delete', help=DESC, short_help=SHORT_DESC) +@click.option('--manifest', help="Armada Manifest file.", type=str) +@click.option( + '--releases', help="Comma-separated list of release names.", type=str) +@click.option( + '--no-purge', help="Deletes release without purge option.", is_flag=True) +@click.option('--tiller-host', help="Tiller host IP.") +@click.option( + '--tiller-port', help="Tiller host port.", type=int, default=44134) +@click.option('--debug', help="Enable debug logging.", is_flag=True) @click.pass_context def delete_charts(ctx, manifest, releases, no_purge, tiller_host, tiller_port, debug): @@ -86,6 +75,7 @@ def delete_charts(ctx, manifest, releases, no_purge, tiller_host, tiller_port, class DeleteChartManifest(CliAction): + def __init__(self, ctx, manifest, releases, no_purge, tiller_host, tiller_port): @@ -103,8 +93,10 @@ class DeleteChartManifest(CliAction): known_release_names = [release[0] for release in tiller.list_charts()] if self.releases: - target_releases = [r.strip() for r in self.releases.split(',') - if r.strip() in known_release_names] + target_releases = [ + r.strip() for r in self.releases.split(',') + if r.strip() in known_release_names + ] if not target_releases: self.logger.info("There's no release to delete.") return @@ -131,14 +123,16 @@ class DeleteChartManifest(CliAction): const.KEYWORD_GROUPS): for ch in group.get(const.KEYWORD_CHARTS): release_name = release_prefixer( - prefix, ch.get('chart').get('release')) + prefix, + ch.get('chart').get('release')) if release_name in known_release_names: target_releases.append(release_name) except yaml.YAMLError as e: mark = e.problem_mark - self.logger.info("While parsing the manifest file, %s. " - "Error position: (%s:%s)", e.problem, - mark.line + 1, mark.column + 1) + self.logger.info( + "While parsing the manifest file, %s. " + "Error position: (%s:%s)", e.problem, mark.line + 1, + mark.column + 1) if not target_releases: self.logger.info("There's no release to delete.") diff --git a/armada/cli/rollback.py b/armada/cli/rollback.py index 02395b70..01453593 100644 --- a/armada/cli/rollback.py +++ b/armada/cli/rollback.py @@ -41,48 +41,46 @@ To rollback a release, run: SHORT_DESC = "Command performs a release rollback." -@rollback.command(name='rollback', - help=DESC, - short_help=SHORT_DESC) -@click.option('--release', - help="Release to rollback.", - type=str) -@click.option('--version', - help="Version of release to rollback to. 0 represents the " - "previous release", - type=int, - default=0) -@click.option('--dry-run', - help="Perform a dry-run rollback.", - is_flag=True) -@click.option('--tiller-host', - help="Tiller host IP.", - default=None) -@click.option('--tiller-port', - help="Tiller host port.", - type=int, - default=CONF.tiller_port) -@click.option('--tiller-namespace', '-tn', - help="Tiller namespace.", - type=str, - default=CONF.tiller_namespace) -@click.option('--timeout', - help="Specifies time to wait for rollback to complete.", - type=int, - default=0) -@click.option('--wait', - help=("Wait until rollback is complete before returning."), - is_flag=True) -@click.option('--force', - help=("Force resource update through delete/recreate if" - " needed."), - is_flag=True) -@click.option('--recreate-pods', - help=("Restarts pods for the resource if applicable."), - is_flag=True) -@click.option('--debug', - help="Enable debug logging.", - is_flag=True) +@rollback.command(name='rollback', help=DESC, short_help=SHORT_DESC) +@click.option('--release', help="Release to rollback.", type=str) +@click.option( + '--version', + help="Version of release to rollback to. 0 represents the " + "previous release", + type=int, + default=0) +@click.option('--dry-run', help="Perform a dry-run rollback.", is_flag=True) +@click.option('--tiller-host', help="Tiller host IP.", default=None) +@click.option( + '--tiller-port', + help="Tiller host port.", + type=int, + default=CONF.tiller_port) +@click.option( + '--tiller-namespace', + '-tn', + help="Tiller namespace.", + type=str, + default=CONF.tiller_namespace) +@click.option( + '--timeout', + help="Specifies time to wait for rollback to complete.", + type=int, + default=0) +@click.option( + '--wait', + help=("Wait until rollback is complete before returning."), + is_flag=True) +@click.option( + '--force', + help=("Force resource update through delete/recreate if" + " needed."), + is_flag=True) +@click.option( + '--recreate-pods', + help=("Restarts pods for the resource if applicable."), + is_flag=True) +@click.option('--debug', help="Enable debug logging.", is_flag=True) @click.pass_context def rollback_charts(ctx, release, version, dry_run, tiller_host, tiller_port, tiller_namespace, timeout, wait, force, recreate_pods, @@ -94,17 +92,9 @@ def rollback_charts(ctx, release, version, dry_run, tiller_host, tiller_port, class Rollback(CliAction): - def __init__(self, - ctx, - release, - version, - dry_run, - tiller_host, - tiller_port, - tiller_namespace, - timeout, - wait, - force, + + def __init__(self, ctx, release, version, dry_run, tiller_host, + tiller_port, tiller_namespace, timeout, wait, force, recreate_pods): super(Rollback, self).__init__() self.ctx = ctx @@ -121,8 +111,10 @@ class Rollback(CliAction): def invoke(self): tiller = Tiller( - tiller_host=self.tiller_host, tiller_port=self.tiller_port, - tiller_namespace=self.tiller_namespace, dry_run=self.dry_run) + tiller_host=self.tiller_host, + tiller_port=self.tiller_port, + tiller_namespace=self.tiller_namespace, + dry_run=self.dry_run) response = tiller.rollback_release( self.release, @@ -135,5 +127,7 @@ class Rollback(CliAction): self.output(response) def output(self, response): - self.logger.info(('(dry run) ' if self.dry_run else '') + - 'Rollback of %s complete.', self.release) + self.logger.info( + ('(dry run) ' + if self.dry_run else '') + 'Rollback of %s complete.', + self.release) diff --git a/armada/cli/test.py b/armada/cli/test.py index cf926849..ad37abac 100644 --- a/armada/cli/test.py +++ b/armada/cli/test.py @@ -54,43 +54,37 @@ To test release: SHORT_DESC = "Command tests releases." -@test.command(name='test', - help=DESC, - short_help=SHORT_DESC) -@click.option('--file', - help="Armada manifest.", - type=str) -@click.option('--release', - help="Helm release.", - type=str) -@click.option('--tiller-host', - help="Tiller host IP.", - default=None) -@click.option('--tiller-port', - help="Tiller host port.", - type=int, - default=CONF.tiller_port) -@click.option('--tiller-namespace', '-tn', - help="Tiller Namespace.", - type=str, - default=CONF.tiller_namespace) -@click.option('--target-manifest', - help=("The target manifest to run. Required for specifying " - "which manifest to run when multiple are available."), - default=None) -@click.option('--debug', - help="Enable debug logging.", - is_flag=True) +@test.command(name='test', help=DESC, short_help=SHORT_DESC) +@click.option('--file', help="Armada manifest.", type=str) +@click.option('--release', help="Helm release.", type=str) +@click.option('--tiller-host', help="Tiller host IP.", default=None) +@click.option( + '--tiller-port', + help="Tiller host port.", + type=int, + default=CONF.tiller_port) +@click.option( + '--tiller-namespace', + '-tn', + help="Tiller Namespace.", + type=str, + default=CONF.tiller_namespace) +@click.option( + '--target-manifest', + help=("The target manifest to run. Required for specifying " + "which manifest to run when multiple are available."), + default=None) +@click.option('--debug', help="Enable debug logging.", is_flag=True) @click.pass_context def test_charts(ctx, file, release, tiller_host, tiller_port, tiller_namespace, target_manifest, debug): CONF.debug = debug - TestChartManifest( - ctx, file, release, tiller_host, tiller_port, tiller_namespace, - target_manifest).safe_invoke() + TestChartManifest(ctx, file, release, tiller_host, tiller_port, + tiller_namespace, target_manifest).safe_invoke() class TestChartManifest(CliAction): + def __init__(self, ctx, file, release, tiller_host, tiller_port, tiller_namespace, target_manifest): @@ -125,8 +119,8 @@ class TestChartManifest(CliAction): 'tiller_port': self.tiller_port, 'tiller_namespace': self.tiller_namespace } - resp = client.get_test_release(release=self.release, - query=query) + resp = client.get_test_release( + release=self.release, query=query) self.logger.info(resp.get('result')) self.logger.info(resp.get('message')) @@ -144,7 +138,8 @@ class TestChartManifest(CliAction): const.KEYWORD_GROUPS): for ch in group.get(const.KEYWORD_CHARTS): release_name = release_prefixer( - prefix, ch.get('chart').get('release')) + prefix, + ch.get('chart').get('release')) if release_name in known_release_names: self.logger.info('RUNNING: %s tests', release_name) @@ -156,9 +151,8 @@ class TestChartManifest(CliAction): self.logger.info("FAILED: %s", release_name) else: - self.logger.info( - 'Release %s not found - SKIPPING', - release_name) + self.logger.info('Release %s not found - SKIPPING', + release_name) else: client = self.ctx.obj.get('CLIENT') query = { @@ -168,8 +162,8 @@ class TestChartManifest(CliAction): } with open(self.filename, 'r') as f: - resp = client.get_test_manifest(manifest=f.read(), - query=query) + resp = client.get_test_manifest( + manifest=f.read(), query=query) for test in resp.get('tests'): self.logger.info('Test State: %s', test) for item in test.get('tests').get(test): diff --git a/armada/cli/tiller.py b/armada/cli/tiller.py index 9feecc24..1f8b8274 100644 --- a/armada/cli/tiller.py +++ b/armada/cli/tiller.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. - import click from oslo_config import cfg @@ -47,29 +46,22 @@ To obtain Tiller service status/information: SHORT_DESC = "Command gets Tiller information." -@tiller.command(name='tiller', - help=DESC, - short_help=SHORT_DESC) -@click.option('--tiller-host', - help="Tiller host IP.", - default=None) -@click.option('--tiller-port', - help="Tiller host port.", - type=int, - default=CONF.tiller_port) -@click.option('--tiller-namespace', '-tn', - help="Tiller namespace.", - type=str, - default=CONF.tiller_namespace) -@click.option('--releases', - help="List of deployed releases.", - is_flag=True) -@click.option('--status', - help="Status of Armada services.", - is_flag=True) -@click.option('--debug', - help="Enable debug logging.", - is_flag=True) +@tiller.command(name='tiller', help=DESC, short_help=SHORT_DESC) +@click.option('--tiller-host', help="Tiller host IP.", default=None) +@click.option( + '--tiller-port', + help="Tiller host port.", + type=int, + default=CONF.tiller_port) +@click.option( + '--tiller-namespace', + '-tn', + help="Tiller namespace.", + type=str, + default=CONF.tiller_namespace) +@click.option('--releases', help="List of deployed releases.", is_flag=True) +@click.option('--status', help="Status of Armada services.", is_flag=True) +@click.option('--debug', help="Enable debug logging.", is_flag=True) @click.pass_context def tiller_service(ctx, tiller_host, tiller_port, tiller_namespace, releases, status, debug): @@ -93,7 +85,8 @@ class TillerServices(CliAction): def invoke(self): tiller = Tiller( - tiller_host=self.tiller_host, tiller_port=self.tiller_port, + tiller_host=self.tiller_host, + tiller_port=self.tiller_port, tiller_namespace=self.tiller_namespace) if self.status: @@ -117,9 +110,8 @@ class TillerServices(CliAction): if self.releases: if not self.ctx.obj.get('api', False): for release in tiller.list_releases(): - self.logger.info( - "Release %s in namespace: %s", - release.name, release.namespace) + self.logger.info("Release %s in namespace: %s", + release.name, release.namespace) else: client = self.ctx.obj.get('CLIENT') query = { @@ -130,6 +122,5 @@ class TillerServices(CliAction): resp = client.get_releases(query=query) for namespace in resp.get('releases'): for release in resp.get('releases').get(namespace): - self.logger.info( - 'Release %s in namespace: %s', release, - namespace) + self.logger.info('Release %s in namespace: %s', + release, namespace) diff --git a/armada/cli/validate.py b/armada/cli/validate.py index 2133c069..d5470456 100644 --- a/armada/cli/validate.py +++ b/armada/cli/validate.py @@ -42,14 +42,9 @@ The validate argument must be a relative path to Armada manifest SHORT_DESC = "Command validates Armada Manifest." -@validate.command(name='validate', - help=DESC, - short_help=SHORT_DESC) -@click.argument('locations', - nargs=-1) -@click.option('--debug', - help="Enable debug logging.", - is_flag=True) +@validate.command(name='validate', help=DESC, short_help=SHORT_DESC) +@click.argument('locations', nargs=-1) +@click.option('--debug', help="Enable debug logging.", is_flag=True) @click.pass_context def validate_manifest(ctx, locations, debug): CONF.debug = debug @@ -57,6 +52,7 @@ def validate_manifest(ctx, locations, debug): class ValidateManifest(CliAction): + def __init__(self, ctx, locations): super(ValidateManifest, self).__init__() self.ctx = ctx @@ -87,10 +83,8 @@ class ValidateManifest(CliAction): 'validation: %s', self.locations) else: if len(self.locations) > 1: - self.logger.error( - "Cannot specify multiple locations " - "when using validate API." - ) + self.logger.error("Cannot specify multiple locations " + "when using validate API.") return client = self.ctx.obj.get('CLIENT') diff --git a/armada/common/client.py b/armada/common/client.py index 01945f05..7d85fc13 100644 --- a/armada/common/client.py +++ b/armada/common/client.py @@ -27,6 +27,7 @@ API_VERSION = 'v{}/{}' class ArmadaClient(object): + def __init__(self, session): self.session = session @@ -61,9 +62,7 @@ class ArmadaClient(object): resp = self.session.post( endpoint, data=req_body, - headers={ - 'content-type': 'application/json' - }, + headers={'content-type': 'application/json'}, timeout=timeout) self._check_response(resp) @@ -107,9 +106,7 @@ class ArmadaClient(object): endpoint, body=manifest, query=query, - headers={ - 'content-type': 'application/x-yaml' - }, + headers={'content-type': 'application/x-yaml'}, timeout=timeout) elif manifest_ref: req_body = { @@ -120,9 +117,7 @@ class ArmadaClient(object): endpoint, data=req_body, query=query, - headers={ - 'content-type': 'application/json' - }, + headers={'content-type': 'application/json'}, timeout=timeout) self._check_response(resp) @@ -150,8 +145,8 @@ class ArmadaClient(object): def post_test_manifest(self, manifest=None, query=None, timeout=None): endpoint = self._set_endpoint('1.0', 'tests') - resp = self.session.post(endpoint, body=manifest, query=query, - timeout=timeout) + resp = self.session.post( + endpoint, body=manifest, query=query, timeout=timeout) self._check_response(resp) @@ -165,5 +160,5 @@ class ArmadaClient(object): elif resp.status_code == 403: raise err.ClientForbiddenError("Forbidden access to %s" % resp.url) elif not resp.ok: - raise err.ClientError("Error - received %d: %s" % - (resp.status_code, resp.text)) + raise err.ClientError( + "Error - received %d: %s" % (resp.status_code, resp.text)) diff --git a/armada/common/i18n.py b/armada/common/i18n.py index fd7caac6..b917eb15 100644 --- a/armada/common/i18n.py +++ b/armada/common/i18n.py @@ -12,7 +12,6 @@ import oslo_i18n - _translators = oslo_i18n.TranslatorFactory(domain='armada') # The primary translation function using the well-known name "_" diff --git a/armada/common/policies/__init__.py b/armada/common/policies/__init__.py index a455a121..d7eaf834 100644 --- a/armada/common/policies/__init__.py +++ b/armada/common/policies/__init__.py @@ -18,8 +18,5 @@ from armada.common.policies import tiller def list_rules(): - return itertools.chain( - base.list_rules(), - service.list_rules(), - tiller.list_rules() - ) + return itertools.chain(base.list_rules(), service.list_rules(), + tiller.list_rules()) diff --git a/armada/common/policies/base.py b/armada/common/policies/base.py index 98913320..3b035367 100644 --- a/armada/common/policies/base.py +++ b/armada/common/policies/base.py @@ -19,14 +19,12 @@ RULE_ADMIN_OR_TARGET_PROJECT = ( 'rule:admin_required or project_id:%(target.project.id)s') RULE_SERVICE_OR_ADMIN = 'rule:service_or_admin' - rules = [ - policy.RuleDefault(name='admin_required', - check_str='role:admin'), - policy.RuleDefault(name='service_or_admin', - check_str='rule:admin_required or rule:service_role'), - policy.RuleDefault(name='service_role', - check_str='role:service'), + policy.RuleDefault(name='admin_required', check_str='role:admin'), + policy.RuleDefault( + name='service_or_admin', + check_str='rule:admin_required or rule:service_role'), + policy.RuleDefault(name='service_role', check_str='role:service'), ] diff --git a/armada/common/policies/service.py b/armada/common/policies/service.py index f5b54633..00cdf753 100644 --- a/armada/common/policies/service.py +++ b/armada/common/policies/service.py @@ -14,34 +14,47 @@ from oslo_policy import policy from armada.common.policies import base - armada_policies = [ policy.DocumentedRuleDefault( name=base.ARMADA % 'create_endpoints', check_str=base.RULE_ADMIN_REQUIRED, description='Install manifest charts', - operations=[{'path': '/api/v1.0/apply/', 'method': 'POST'}]), + operations=[{ + 'path': '/api/v1.0/apply/', + 'method': 'POST' + }]), policy.DocumentedRuleDefault( name=base.ARMADA % 'validate_manifest', check_str=base.RULE_ADMIN_REQUIRED, description='Validate manifest', - operations=[{'path': '/api/v1.0/validatedesign/', 'method': 'POST'}]), + operations=[{ + 'path': '/api/v1.0/validatedesign/', + 'method': 'POST' + }]), policy.DocumentedRuleDefault( name=base.ARMADA % 'test_release', check_str=base.RULE_ADMIN_REQUIRED, description='Test release', - operations=[{'path': '/api/v1.0/test/{release}', 'method': 'GET'}]), + operations=[{ + 'path': '/api/v1.0/test/{release}', + 'method': 'GET' + }]), policy.DocumentedRuleDefault( name=base.ARMADA % 'test_manifest', check_str=base.RULE_ADMIN_REQUIRED, description='Test manifest', - operations=[{'path': '/api/v1.0/tests/', 'method': 'POST'}]), + operations=[{ + 'path': '/api/v1.0/tests/', + 'method': 'POST' + }]), policy.DocumentedRuleDefault( name=base.ARMADA % 'rollback_release', check_str=base.RULE_ADMIN_REQUIRED, description='Rollback release', - operations=[{'path': '/api/v1.0/rollback/{release}', 'method': 'POST'}] - ), + operations=[{ + 'path': '/api/v1.0/rollback/{release}', + 'method': 'POST' + }]), ] diff --git a/armada/common/policies/tiller.py b/armada/common/policies/tiller.py index 1eb4a10b..b1d9d4c1 100644 --- a/armada/common/policies/tiller.py +++ b/armada/common/policies/tiller.py @@ -14,19 +14,23 @@ from oslo_policy import policy from armada.common.policies import base - tiller_policies = [ policy.DocumentedRuleDefault( name=base.TILLER % 'get_status', check_str=base.RULE_ADMIN_REQUIRED, description='Get Tiller status', - operations=[{'path': '/api/v1.0/status/', 'method': 'GET'}]), - + operations=[{ + 'path': '/api/v1.0/status/', + 'method': 'GET' + }]), policy.DocumentedRuleDefault( name=base.TILLER % 'get_release', check_str=base.RULE_ADMIN_REQUIRED, description='Get Tiller release', - operations=[{'path': '/api/v1.0/releases/', 'method': 'GET'}]), + operations=[{ + 'path': '/api/v1.0/releases/', + 'method': 'GET' + }]), ] diff --git a/armada/common/policy.py b/armada/common/policy.py index 4e095688..552fd427 100644 --- a/armada/common/policy.py +++ b/armada/common/policy.py @@ -18,7 +18,6 @@ from oslo_policy import policy from armada.common import policies from armada.exceptions import base_exception as exc - CONF = cfg.CONF _ENFORCER = None @@ -46,14 +45,18 @@ def _enforce_policy(action, target, credentials, do_raise=True): def enforce(rule): + def decorator(func): + @functools.wraps(func) def handler(*args, **kwargs): setup_policy() context = args[1].context _enforce_policy(rule, {}, context, do_raise=True) return func(*args, **kwargs) + return handler + return decorator diff --git a/armada/common/session.py b/armada/common/session.py index d1db8daa..ce84bbbf 100644 --- a/armada/common/session.py +++ b/armada/common/session.py @@ -35,8 +35,13 @@ class ArmadaSession(object): read timeout to use """ - def __init__(self, host, port=None, scheme='http', token=None, - marker=None, timeout=None): + def __init__(self, + host, + port=None, + scheme='http', + token=None, + marker=None, + timeout=None): self._session = requests.Session() self._session.headers.update({ @@ -48,11 +53,10 @@ class ArmadaSession(object): if port: self.port = port - self.base_url = "{}://{}:{}/api/".format( - self.scheme, self.host, self.port) + self.base_url = "{}://{}:{}/api/".format(self.scheme, self.host, + self.port) else: - self.base_url = "{}://{}/api/".format( - self.scheme, self.host) + self.base_url = "{}://{}/api/".format(self.scheme, self.host) self.default_timeout = ArmadaSession._calc_timeout_tuple((20, 3600), timeout) @@ -75,15 +79,21 @@ class ArmadaSession(object): api_url = '{}{}'.format(self.base_url, endpoint) req_timeout = self._timeout(timeout) - self.logger.debug("Sending armada_client session GET %s with " - "params=[%s], headers=[%s], timeout=[%s]", - api_url, query, headers, req_timeout) + self.logger.debug( + "Sending armada_client session GET %s with " + "params=[%s], headers=[%s], timeout=[%s]", api_url, query, headers, + req_timeout) resp = self._session.get( api_url, params=query, headers=headers, timeout=req_timeout) return resp - def post(self, endpoint, query=None, body=None, data=None, headers=None, + def post(self, + endpoint, + query=None, + body=None, + data=None, + headers=None, timeout=None): """ Send a POST request to armada. If both body and data are specified, @@ -101,23 +111,26 @@ class ArmadaSession(object): api_url = '{}{}'.format(self.base_url, endpoint) req_timeout = self._timeout(timeout) - self.logger.debug("Sending armada_client session POST %s with " - "params=[%s], headers=[%s], timeout=[%s]", - api_url, query, headers, req_timeout) + self.logger.debug( + "Sending armada_client session POST %s with " + "params=[%s], headers=[%s], timeout=[%s]", api_url, query, headers, + req_timeout) if body is not None: self.logger.debug("Sending POST with explicit body: \n%s" % body) - resp = self._session.post(api_url, - params=query, - data=body, - headers=headers, - timeout=req_timeout) + resp = self._session.post( + api_url, + params=query, + data=body, + headers=headers, + timeout=req_timeout) else: self.logger.debug("Sending POST with JSON body: \n%s" % str(data)) - resp = self._session.post(api_url, - params=query, - json=data, - headers=headers, - timeout=req_timeout) + resp = self._session.post( + api_url, + params=query, + json=data, + headers=headers, + timeout=req_timeout) return resp @@ -145,7 +158,7 @@ class ArmadaSession(object): try: if isinstance(timeout, tuple): if all(isinstance(v, int) - for v in timeout) and len(timeout) == 2: + for v in timeout) and len(timeout) == 2: connect_timeout, read_timeout = timeout else: raise ValueError("Tuple non-integer or wrong length") @@ -154,8 +167,8 @@ class ArmadaSession(object): elif timeout is not None: raise ValueError("Non integer timeout value") except ValueError: - LOG.warn("Timeout value must be a tuple of integers or a single" - " integer. Proceeding with values of (%s, %s)", - connect_timeout, - read_timeout) + LOG.warn( + "Timeout value must be a tuple of integers or a single" + " integer. Proceeding with values of (%s, %s)", + connect_timeout, read_timeout) return (connect_timeout, read_timeout) diff --git a/armada/conf/__init__.py b/armada/conf/__init__.py index dc55175d..8fbcbdd5 100644 --- a/armada/conf/__init__.py +++ b/armada/conf/__init__.py @@ -54,10 +54,7 @@ def set_default_for_default_log_levels(): This function needs to be called before CONF(). """ - extra_log_level_defaults = [ - 'kubernetes.client.rest=INFO' - ] + extra_log_level_defaults = ['kubernetes.client.rest=INFO'] - log.set_defaults( - default_log_levels=log.get_default_log_levels() + - extra_log_level_defaults) + log.set_defaults(default_log_levels=log.get_default_log_levels() + + extra_log_level_defaults) diff --git a/armada/conf/default.py b/armada/conf/default.py index a60a90e7..6418bb3b 100644 --- a/armada/conf/default.py +++ b/armada/conf/default.py @@ -17,45 +17,37 @@ from oslo_config import cfg from armada.conf import utils - default_options = [ - cfg.ListOpt( 'armada_apply_roles', default=['admin'], help=utils.fmt('IDs of approved API access roles.')), - cfg.StrOpt( 'auth_url', default='http://0.0.0.0/v3', help=utils.fmt('The default Keystone authentication url.')), - cfg.StrOpt( 'certs', default=None, help=utils.fmt(""" Absolute path to the certificate file to use for chart registries """)), - cfg.StrOpt( 'kubernetes_config_path', default='/home/user/.kube/', help=utils.fmt('Path to Kubernetes configurations.')), - cfg.BoolOpt( 'middleware', default=True, help=utils.fmt(""" Enables or disables Keystone authentication middleware. """)), - cfg.StrOpt( 'project_domain_name', default='default', help=utils.fmt(""" The Keystone project domain name used for authentication. """)), - cfg.StrOpt( 'project_name', default='admin', @@ -69,22 +61,18 @@ The Keystone project domain name used for authentication. help=utils.fmt("""Optional path to an SSH private key used for authenticating against a Git source repository. The path must be an absolute path to the private key that includes the name of the key itself.""")), - cfg.StrOpt( 'tiller_pod_labels', default='app=helm,name=tiller', help=utils.fmt('Labels for the Tiller pod.')), - cfg.StrOpt( 'tiller_namespace', default='kube-system', help=utils.fmt('Namespace for the Tiller pod.')), - cfg.IntOpt( 'tiller_port', default=44134, help=utils.fmt('Port for the Tiller pod.')), - cfg.ListOpt( 'tiller_release_roles', default=['admin'], @@ -99,11 +87,11 @@ def register_opts(conf): def list_opts(): return { - 'DEFAULT': default_options, - 'keystone_authtoken': ( - ks_loading.get_session_conf_options() + - ks_loading.get_auth_common_conf_options() + - ks_loading.get_auth_plugin_conf_options('password') + - ks_loading.get_auth_plugin_conf_options('v3password') - ) + 'DEFAULT': + default_options, + 'keystone_authtoken': + (ks_loading.get_session_conf_options() + + ks_loading.get_auth_common_conf_options() + + ks_loading.get_auth_plugin_conf_options('password') + + ks_loading.get_auth_plugin_conf_options('v3password')) } diff --git a/armada/conf/opts.py b/armada/conf/opts.py index 8329510c..21f98d23 100644 --- a/armada/conf/opts.py +++ b/armada/conf/opts.py @@ -33,7 +33,6 @@ import importlib import os import pkgutil - LIST_OPTS_FUNC_NAME = 'list_opts' IGNORED_MODULES = ('opts', 'constants', 'utils') @@ -71,9 +70,8 @@ def _import_modules(module_names): if not hasattr(module, LIST_OPTS_FUNC_NAME): raise Exception( "The module '%s' should have a '%s' function which " - "returns the config options." % ( - full_module_path, - LIST_OPTS_FUNC_NAME)) + "returns the config options." % (full_module_path, + LIST_OPTS_FUNC_NAME)) else: imported_modules.append(module) return imported_modules diff --git a/armada/errors.py b/armada/errors.py index 5fba2e3d..eb99d616 100644 --- a/armada/errors.py +++ b/armada/errors.py @@ -19,7 +19,6 @@ import falcon from oslo_config import cfg from oslo_log import log as logging - LOG = logging.getLogger(__name__) CONF = cfg.CONF @@ -97,8 +96,10 @@ def format_error_resp(req, # message list as well. In both cases, if the error flag is not # set, set it appropriately. if error_list is None: - error_list = [{'message': 'An error occurred, but was not specified', - 'error': True}] + error_list = [{ + 'message': 'An error occurred, but was not specified', + 'error': True + }] else: for error_item in error_list: if 'error' not in error_item: @@ -145,9 +146,11 @@ def default_error_serializer(req, resp, exception): message=exception.description, reason=exception.title, error_type=exception.__class__.__name__, - error_list=[{'message': exception.description, 'error': True}], - info_list=None - ) + error_list=[{ + 'message': exception.description, + 'error': True + }], + info_list=None) def default_exception_handler(ex, req, resp, params): @@ -168,8 +171,7 @@ def default_exception_handler(ex, req, resp, params): falcon.HTTP_500, error_type=ex.__class__.__name__, message="Unhandled Exception raised: %s" % str(ex), - retry=True - ) + retry=True) class AppError(Exception): @@ -180,13 +182,15 @@ class AppError(Exception): title = 'Internal Server Error' status = falcon.HTTP_500 - def __init__(self, - title=None, - description=None, - error_list=None, - info_list=None, - status=None, - retry=False,): + def __init__( + self, + title=None, + description=None, + error_list=None, + info_list=None, + status=None, + retry=False, + ): """ :param description: The internal error description :param error_list: The list of errors diff --git a/armada/exceptions/__init__.py b/armada/exceptions/__init__.py index 203dc6b2..d2843537 100644 --- a/armada/exceptions/__init__.py +++ b/armada/exceptions/__init__.py @@ -14,5 +14,4 @@ from armada.exceptions.manifest_exceptions import ManifestException - __all__ = ['ManifestException'] diff --git a/armada/exceptions/armada_exceptions.py b/armada/exceptions/armada_exceptions.py index 51db8359..9343fd0c 100644 --- a/armada/exceptions/armada_exceptions.py +++ b/armada/exceptions/armada_exceptions.py @@ -37,6 +37,6 @@ class ProtectedReleaseException(ArmadaException): def __init__(self, reason): self._message = ( - 'Armada encountered protected release %s in FAILED status' % reason - ) + 'Armada encountered protected release %s in FAILED status' % + reason) super(ProtectedReleaseException, self).__init__(self._message) diff --git a/armada/exceptions/base_exception.py b/armada/exceptions/base_exception.py index c57348ab..341c4199 100644 --- a/armada/exceptions/base_exception.py +++ b/armada/exceptions/base_exception.py @@ -43,9 +43,8 @@ class ArmadaAPIException(falcon.HTTPError): def __init__(self, message=None, **kwargs): self.message = message or self.message - super(ArmadaAPIException, self).__init__( - self.status, self.title, self.message, **kwargs - ) + super(ArmadaAPIException, self).__init__(self.status, self.title, + self.message, **kwargs) class ActionForbidden(ArmadaAPIException): diff --git a/armada/exceptions/chartbuilder_exceptions.py b/armada/exceptions/chartbuilder_exceptions.py index 4a87ae3e..deff92c6 100644 --- a/armada/exceptions/chartbuilder_exceptions.py +++ b/armada/exceptions/chartbuilder_exceptions.py @@ -48,9 +48,10 @@ class HelmChartBuildException(ChartBuilderException): def __init__(self, chart_name, details): self._chart_name = chart_name self._message = ('Failed to build Helm chart for {chart_name}. ' - 'Details: {details}'.format( - **{'chart_name': chart_name, - 'details': details})) + 'Details: {details}'.format(**{ + 'chart_name': chart_name, + 'details': details + })) super(HelmChartBuildException, self).__init__(self._message) diff --git a/armada/exceptions/override_exceptions.py b/armada/exceptions/override_exceptions.py index a437522a..e60108db 100644 --- a/armada/exceptions/override_exceptions.py +++ b/armada/exceptions/override_exceptions.py @@ -59,7 +59,7 @@ class InvalidOverrideValueException(OverrideException): def __init__(self, override_command): self._message = '{} is not a valid override statement.'.format( - override_command) + override_command) super(InvalidOverrideValueException, self).__init__(self._message) diff --git a/armada/exceptions/source_exceptions.py b/armada/exceptions/source_exceptions.py index 10f4fb58..9aaac422 100644 --- a/armada/exceptions/source_exceptions.py +++ b/armada/exceptions/source_exceptions.py @@ -69,8 +69,8 @@ class GitSSHException(SourceException): def __init__(self, ssh_key_path): self._ssh_key_path = ssh_key_path - self._message = ( - 'Failed to find specified SSH key: {}.'.format(self._ssh_key_path)) + self._message = ('Failed to find specified SSH key: {}.'.format( + self._ssh_key_path)) super(GitSSHException, self).__init__(self._message) diff --git a/armada/exceptions/tiller_exceptions.py b/armada/exceptions/tiller_exceptions.py index 6c620a1a..b6e67004 100644 --- a/armada/exceptions/tiller_exceptions.py +++ b/armada/exceptions/tiller_exceptions.py @@ -52,8 +52,7 @@ class PostUpdateJobDeleteException(TillerException): def __init__(self, name, namespace): - message = 'Failed to delete k8s job {} in {}'.format( - name, namespace) + message = 'Failed to delete k8s job {} in {}'.format(name, namespace) super(PostUpdateJobDeleteException, self).__init__(message) @@ -68,8 +67,7 @@ class PostUpdateJobCreateException(TillerException): def __init__(self, name, namespace): - message = 'Failed to create k8s job {} in {}'.format( - name, namespace) + message = 'Failed to create k8s job {} in {}'.format(name, namespace) super(PostUpdateJobCreateException, self).__init__(message) @@ -84,8 +82,7 @@ class PreUpdateJobDeleteException(TillerException): def __init__(self, name, namespace): - message = 'Failed to delete k8s job {} in {}'.format( - name, namespace) + message = 'Failed to delete k8s job {} in {}'.format(name, namespace) super(PreUpdateJobDeleteException, self).__init__(message) @@ -95,8 +92,7 @@ class PreUpdateJobCreateException(TillerException): def __init__(self, name, namespace): - message = 'Failed to create k8s job {} in {}'.format( - name, namespace) + message = 'Failed to create k8s job {} in {}'.format(name, namespace) super(PreUpdateJobCreateException, self).__init__(message) @@ -152,8 +148,7 @@ class GetReleaseStatusException(TillerException): ''' def __init__(self, release, version): - message = 'Failed to get {} status {} version'.format( - release, version) + message = 'Failed to get {} status {} version'.format(release, version) super(GetReleaseStatusException, self).__init__(message) diff --git a/armada/handlers/armada.py b/armada/handlers/armada.py index 622d2d45..030c38e4 100644 --- a/armada/handlers/armada.py +++ b/armada/handlers/armada.py @@ -33,7 +33,6 @@ from armada.utils.release import release_prefixer from armada.utils import source from armada.utils import validate - LOG = logging.getLogger(__name__) CONF = cfg.CONF @@ -97,8 +96,10 @@ class Armada(object): # TODO: Use dependency injection i.e. pass in a Tiller instead of # creating it here. self.tiller = Tiller( - tiller_host=tiller_host, tiller_port=tiller_port, - tiller_namespace=tiller_namespace, dry_run=dry_run) + tiller_host=tiller_host, + tiller_port=tiller_port, + tiller_namespace=tiller_namespace, + dry_run=dry_run) self.documents = Override( documents, overrides=set_ovr, values=values).update_manifests() self.k8s_wait_attempts = k8s_wait_attempts @@ -166,8 +167,7 @@ class Armada(object): LOG.info('Downloading tarball from: %s', location) if not CONF.certs: - LOG.warn( - 'Disabling server validation certs to extract charts') + LOG.warn('Disabling server validation certs to extract charts') tarball_dir = source.get_tarball(location, verify=False) else: tarball_dir = source.get_tarball(location, verify=CONF.cert) @@ -189,9 +189,10 @@ class Armada(object): logstr += ' auth method: {}'.format(auth_method) LOG.info(logstr) - repo_dir = source.git_clone(*repo_branch, - proxy_server=proxy_server, - auth_method=auth_method) + repo_dir = source.git_clone( + *repo_branch, + proxy_server=proxy_server, + auth_method=auth_method) repos[repo_branch] = repo_dir chart['source_dir'] = (repo_dir, subpath) @@ -216,8 +217,8 @@ class Armada(object): else: # tiller.list_charts() only looks at DEPLOYED/FAILED so # this should be unreachable - LOG.debug('Ignoring release %s in status %s.', - release[0], release[4]) + LOG.debug('Ignoring release %s in status %s.', release[0], + release[4]) return deployed_releases, failed_releases @@ -251,9 +252,10 @@ class Armada(object): cg_desc = chartgroup.get('description', '') cg_sequenced = chartgroup.get('sequenced', False) cg_test_all_charts = chartgroup.get('test_charts', False) - LOG.info('Processing ChartGroup: %s (%s), sequenced=%s, ' - 'test_charts=%s', cg_name, cg_desc, cg_sequenced, - cg_test_all_charts) + LOG.info( + 'Processing ChartGroup: %s (%s), sequenced=%s, ' + 'test_charts=%s', cg_name, cg_desc, cg_sequenced, + cg_test_all_charts) ns_label_set = set() tests_to_run = [] @@ -281,16 +283,17 @@ class Armada(object): release_name) if protected: if p_continue: - LOG.warn('Release %s is `protected`, ' - 'continue_processing=True. Operator must ' - 'handle FAILED release manually.', - release_name) + LOG.warn( + 'Release %s is `protected`, ' + 'continue_processing=True. Operator must ' + 'handle FAILED release manually.', + release_name) msg['protected'].append(release_name) continue else: - LOG.error('Release %s is `protected`, ' - 'continue_processing=False.', - release_name) + LOG.error( + 'Release %s is `protected`, ' + 'continue_processing=False.', release_name) raise armada_exceptions.ProtectedReleaseException( release_name) else: @@ -310,9 +313,9 @@ class Armada(object): wait_labels = wait_values.get('labels', {}) # Determine wait logic - this_chart_should_wait = ( - cg_sequenced or self.force_wait or - wait_timeout > 0 or len(wait_labels) > 0) + this_chart_should_wait = (cg_sequenced or self.force_wait or + wait_timeout > 0 or + len(wait_labels) > 0) if this_chart_should_wait and wait_timeout <= 0: LOG.warn('No Chart timeout specified, using default: %ss', @@ -366,9 +369,10 @@ class Armada(object): # TODO(alanmeadows) account for .files differences # once we support those LOG.info('Checking upgrade chart diffs.') - upgrade_diff = self.show_diff( - chart, apply_chart, apply_values, - chartbuilder.dump(), values, msg) + upgrade_diff = self.show_diff(chart, apply_chart, + apply_values, + chartbuilder.dump(), values, + msg) if not upgrade_diff: LOG.info("There are no updates found in this chart") @@ -395,9 +399,8 @@ class Armada(object): recreate_pods=recreate_pods) if this_chart_should_wait: - self._wait_until_ready( - release_name, wait_labels, namespace, timer - ) + self._wait_until_ready(release_name, wait_labels, + namespace, timer) # Track namespace+labels touched by upgrade ns_label_set.add((namespace, tuple(wait_labels.items()))) @@ -423,9 +426,8 @@ class Armada(object): timeout=timer) if this_chart_should_wait: - self._wait_until_ready( - release_name, wait_labels, namespace, timer - ) + self._wait_until_ready(release_name, wait_labels, + namespace, timer) # Track namespace+labels touched by install ns_label_set.add((namespace, tuple(wait_labels.items()))) @@ -438,8 +440,9 @@ class Armada(object): timer = int(round(deadline - time.time())) if test_this_chart: if cg_sequenced: - LOG.info('Running sequenced test, timeout remaining: ' - '%ss.', timer) + LOG.info( + 'Running sequenced test, timeout remaining: ' + '%ss.', timer) if timer <= 0: reason = ('Timeout expired before testing ' 'sequenced release %s' % release_name) @@ -466,9 +469,10 @@ class Armada(object): for (ns, labels) in ns_label_set: labels_dict = dict(labels) timer = int(round(deadline - time.time())) - LOG.info('Final ChartGroup wait for healthy namespace (%s), ' - 'labels=(%s), timeout remaining: %ss.', - ns, labels_dict, timer) + LOG.info( + 'Final ChartGroup wait for healthy namespace (%s), ' + 'labels=(%s), timeout remaining: %ss.', ns, labels_dict, + timer) if timer <= 0: reason = ('Timeout expired waiting on namespace: %s, ' 'labels: (%s)' % (ns, labels_dict)) @@ -476,9 +480,10 @@ class Armada(object): raise armada_exceptions.ArmadaTimeoutException(reason) self._wait_until_ready( - release_name=None, wait_labels=labels_dict, - namespace=ns, timeout=timer - ) + release_name=None, + wait_labels=labels_dict, + namespace=ns, + timeout=timer) # After entire ChartGroup is healthy, run any pending tests for (test, test_timer) in tests_to_run: @@ -489,8 +494,7 @@ class Armada(object): if self.enable_chart_cleanup: self._chart_cleanup( prefix, - self.manifest[const.KEYWORD_ARMADA][const.KEYWORD_GROUPS], - msg) + self.manifest[const.KEYWORD_ARMADA][const.KEYWORD_GROUPS], msg) LOG.info('Done applying manifest.') return msg @@ -513,9 +517,10 @@ class Armada(object): def _wait_until_ready(self, release_name, wait_labels, namespace, timeout): if self.dry_run: - LOG.info('Skipping wait during `dry-run`, would have waited on ' - 'namespace=%s, labels=(%s) for %ss.', - namespace, wait_labels, timeout) + LOG.info( + 'Skipping wait during `dry-run`, would have waited on ' + 'namespace=%s, labels=(%s) for %ss.', namespace, wait_labels, + timeout) return self.tiller.k8s.wait_until_ready( @@ -524,13 +529,13 @@ class Armada(object): namespace=namespace, k8s_wait_attempts=self.k8s_wait_attempts, k8s_wait_attempt_sleep=self.k8s_wait_attempt_sleep, - timeout=timeout - ) + timeout=timeout) def _test_chart(self, release_name, timeout): if self.dry_run: - LOG.info('Skipping test during `dry-run`, would have tested ' - 'release=%s with timeout %ss.', release_name, timeout) + LOG.info( + 'Skipping test during `dry-run`, would have tested ' + 'release=%s with timeout %ss.', release_name, timeout) return True success = test_release_for_success( @@ -591,8 +596,9 @@ class Armada(object): valid_releases = [] for gchart in charts: for chart in gchart.get(const.KEYWORD_CHARTS, []): - valid_releases.append(release_prefixer( - prefix, chart.get('chart', {}).get('release'))) + valid_releases.append( + release_prefixer(prefix, + chart.get('chart', {}).get('release'))) actual_releases = [x.name for x in self.tiller.list_releases()] release_diff = list(set(actual_releases) - set(valid_releases)) diff --git a/armada/handlers/chartbuilder.py b/armada/handlers/chartbuilder.py index c9913791..ea085069 100644 --- a/armada/handlers/chartbuilder.py +++ b/armada/handlers/chartbuilder.py @@ -63,20 +63,16 @@ class ChartBuilder(object): property from the chart, or else "" if the property isn't a 2-tuple. ''' source_dir = self.chart.get('source_dir') - return ( - os.path.join(*source_dir) - if (source_dir and - isinstance(source_dir, (list, tuple)) and - len(source_dir) == 2) - else "" - ) + return (os.path.join(*source_dir) + if (source_dir and isinstance(source_dir, (list, tuple)) and + len(source_dir) == 2) else "") def get_ignored_files(self): '''Load files to ignore from .helmignore if present.''' try: ignored_files = [] - if os.path.exists(os.path.join(self.source_directory, - '.helmignore')): + if os.path.exists( + os.path.join(self.source_directory, '.helmignore')): with open(os.path.join(self.source_directory, '.helmignore')) as f: ignored_files = f.readlines() @@ -149,8 +145,9 @@ class ChartBuilder(object): with open(abspath, 'r') as f: file_contents = f.read().encode(encoding) except OSError as e: - LOG.debug('Failed to open and read file %s in the helm ' - 'chart directory.', abspath) + LOG.debug( + 'Failed to open and read file %s in the helm ' + 'chart directory.', abspath) raise chartbuilder_exceptions.FilesLoadException( file=abspath, details=e) except UnicodeError as e: @@ -162,22 +159,24 @@ class ChartBuilder(object): break if len(unicode_errors) == 2: - LOG.debug('Failed to read file %s in the helm chart directory.' - ' Ensure that it is encoded using utf-8.', abspath) + LOG.debug( + 'Failed to read file %s in the helm chart directory.' + ' Ensure that it is encoded using utf-8.', abspath) raise chartbuilder_exceptions.FilesLoadException( - file=abspath, clazz=unicode_errors[0].__class__.__name__, + file=abspath, + clazz=unicode_errors[0].__class__.__name__, details='\n'.join(e for e in unicode_errors)) non_template_files.append( - Any(type_url=relpath, - value=file_contents)) + Any(type_url=relpath, value=file_contents)) for root, dirs, files in os.walk(self.source_directory): relfolder = os.path.split(root)[-1] rel_folder_path = os.path.relpath(root, self.source_directory) - if not any(root.startswith(os.path.join(self.source_directory, x)) - for x in ['templates', 'charts']): + if not any( + root.startswith(os.path.join(self.source_directory, x)) + for x in ['templates', 'charts']): for file in files: if (file not in files_to_ignore and file not in non_template_files): @@ -211,8 +210,9 @@ class ChartBuilder(object): templates = [] if not os.path.exists( os.path.join(self.source_directory, 'templates')): - LOG.warn("Chart %s has no templates directory. " - "No templates will be deployed", chart_name) + LOG.warn( + "Chart %s has no templates directory. " + "No templates will be deployed", chart_name) for root, _, files in os.walk( os.path.join(self.source_directory, 'templates'), topdown=True): diff --git a/armada/handlers/document.py b/armada/handlers/document.py index 4b28d78e..6e98f142 100644 --- a/armada/handlers/document.py +++ b/armada/handlers/document.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + """Module for resolving design references.""" import urllib.parse @@ -66,8 +67,7 @@ class ReferenceResolver(object): except ValueError: raise InvalidPathException( "Cannot resolve design reference %s: unable " - "to parse as valid URI." - % l) + "to parse as valid URI." % l) return data @@ -90,8 +90,8 @@ class ReferenceResolver(object): response = requests.get(design_uri.geturl(), timeout=30) if response.status_code >= 400: raise InvalidPathException( - "Error received for HTTP reference: %d" - % response.status_code) + "Error received for HTTP reference: %d" % + response.status_code) return response.content diff --git a/armada/handlers/k8s.py b/armada/handlers/k8s.py index dc5d7741..455c553c 100644 --- a/armada/handlers/k8s.py +++ b/armada/handlers/k8s.py @@ -49,7 +49,9 @@ class K8s(object): self.batch_v1beta1_api = client.BatchV1beta1Api() self.extension_api = client.ExtensionsV1beta1Api() - def delete_job_action(self, name, namespace="default", + def delete_job_action(self, + name, + namespace="default", propagation_policy='Foreground', timeout=DEFAULT_K8S_TIMEOUT): ''' @@ -59,16 +61,13 @@ class K8s(object): to the delete. Default 'Foreground' means that child pods to the job will be deleted before the job is marked as deleted. ''' - self._delete_job_action( - self.batch_api.list_namespaced_job, - self.batch_api.delete_namespaced_job, - "job", - name, - namespace, - propagation_policy, - timeout) + self._delete_job_action(self.batch_api.list_namespaced_job, + self.batch_api.delete_namespaced_job, "job", + name, namespace, propagation_policy, timeout) - def delete_cron_job_action(self, name, namespace="default", + def delete_cron_job_action(self, + name, + namespace="default", propagation_policy='Foreground', timeout=DEFAULT_K8S_TIMEOUT): ''' @@ -80,15 +79,15 @@ class K8s(object): ''' self._delete_job_action( self.batch_v1beta1_api.list_namespaced_cron_job, - self.batch_v1beta1_api.delete_namespaced_cron_job, - "cron job", - name, - namespace, - propagation_policy, - timeout) + self.batch_v1beta1_api.delete_namespaced_cron_job, "cron job", + name, namespace, propagation_policy, timeout) - def _delete_job_action(self, list_func, delete_func, job_type_description, - name, namespace="default", + def _delete_job_action(self, + list_func, + delete_func, + job_type_description, + name, + namespace="default", propagation_policy='Foreground', timeout=DEFAULT_K8S_TIMEOUT): try: @@ -100,12 +99,13 @@ class K8s(object): w = watch.Watch() issue_delete = True found_events = False - for event in w.stream(list_func, - namespace=namespace, - timeout_seconds=timeout): + for event in w.stream( + list_func, namespace=namespace, timeout_seconds=timeout): if issue_delete: delete_func( - name=name, namespace=namespace, body=body, + name=name, + namespace=namespace, + body=body, propagation_policy=propagation_policy) issue_delete = False @@ -125,15 +125,14 @@ class K8s(object): job_type_description, name, namespace) err_msg = ('Reached timeout while waiting to delete %s: ' - 'name=%s, namespace=%s' % - (job_type_description, name, namespace)) + 'name=%s, namespace=%s' % (job_type_description, name, + namespace)) LOG.error(err_msg) raise exceptions.KubernetesWatchTimeoutException(err_msg) except ApiException as e: - LOG.exception( - "Exception when deleting %s: name=%s, namespace=%s", - job_type_description, name, namespace) + LOG.exception("Exception when deleting %s: name=%s, namespace=%s", + job_type_description, name, namespace) raise e def get_namespace_job(self, namespace="default", label_selector=''): @@ -282,8 +281,9 @@ class K8s(object): w = watch.Watch() found_events = False - for event in w.stream(self.client.list_pod_for_all_namespaces, - timeout_seconds=timeout): + for event in w.stream( + self.client.list_pod_for_all_namespaces, + timeout_seconds=timeout): pod_name = event['object'].metadata.name if release in pod_name: @@ -322,13 +322,13 @@ class K8s(object): label_selector = label_selectors(labels) if labels else '' wait_attempts = (k8s_wait_attempts if k8s_wait_attempts >= 1 else 1) - sleep_time = (k8s_wait_attempt_sleep if k8s_wait_attempt_sleep >= 1 - else 1) + sleep_time = (k8s_wait_attempt_sleep + if k8s_wait_attempt_sleep >= 1 else 1) - LOG.debug("Wait on namespace=(%s) labels=(%s) for %s sec " - "(k8s wait %s times, sleep %ss)", - namespace, label_selector, timeout, - wait_attempts, sleep_time) + LOG.debug( + "Wait on namespace=(%s) labels=(%s) for %s sec " + "(k8s wait %s times, sleep %ss)", namespace, label_selector, + timeout, wait_attempts, sleep_time) if not namespace: # This shouldn't be reachable @@ -349,14 +349,16 @@ class K8s(object): if deadline_remaining <= 0: return False timed_out, modified_pods, unready_pods, found_events = ( - self._wait_one_time(namespace=namespace, - label_selector=label_selector, - timeout=deadline_remaining)) + self._wait_one_time( + namespace=namespace, + label_selector=label_selector, + timeout=deadline_remaining)) if not found_events: - LOG.warn('Saw no install/update events for release=%s, ' - 'namespace=%s, labels=(%s)', - release, namespace, label_selector) + LOG.warn( + 'Saw no install/update events for release=%s, ' + 'namespace=%s, labels=(%s)', release, namespace, + label_selector) if timed_out: LOG.info('Timed out waiting for pods: %s', @@ -380,8 +382,9 @@ class K8s(object): return True def _wait_one_time(self, namespace, label_selector, timeout=100): - LOG.debug('Starting to wait: namespace=%s, label_selector=(%s), ' - 'timeout=%s', namespace, label_selector, timeout) + LOG.debug( + 'Starting to wait: namespace=%s, label_selector=(%s), ' + 'timeout=%s', namespace, label_selector, timeout) ready_pods = {} modified_pods = set() w = watch.Watch() @@ -420,15 +423,15 @@ class K8s(object): pod_ready = True if (pod_phase == 'Succeeded' or - (pod_phase == 'Running' and - self._get_pod_condition(status.conditions, - 'Ready') == 'True')): + (pod_phase == 'Running' and self._get_pod_condition( + status.conditions, 'Ready') == 'True')): LOG.debug('Pod %s is ready!', pod_name) else: pod_ready = False - LOG.debug('Pod %s not ready: conditions:\n%s\n' - 'container_statuses:\n%s', pod_name, - status.conditions, status.container_statuses) + LOG.debug( + 'Pod %s not ready: conditions:\n%s\n' + 'container_statuses:\n%s', pod_name, status.conditions, + status.container_statuses) ready_pods[pod_name] = pod_ready @@ -440,8 +443,8 @@ class K8s(object): ready_pods.pop(pod_name) elif event_type == 'ERROR': - LOG.error('Pod %s: Got error event %s', - pod_name, event['object'].to_dict()) + LOG.error('Pod %s: Got error event %s', pod_name, + event['object'].to_dict()) raise exceptions.KubernetesErrorEventException( 'Got error event for pod: %s' % event['object']) @@ -449,8 +452,8 @@ class K8s(object): LOG.error('Unrecognized event type (%s) for pod: %s', event_type, event['object']) raise exceptions.KubernetesUnknownStreamingEventTypeException( - 'Got unknown event type (%s) for pod: %s' - % (event_type, event['object'])) + 'Got unknown event type (%s) for pod: %s' % + (event_type, event['object'])) if all(ready_pods.values()): return (False, modified_pods, [], found_events) @@ -468,7 +471,8 @@ class K8s(object): def _check_timeout(self, timeout): if timeout <= 0: - LOG.warn('Kubernetes timeout is invalid or unspecified, ' - 'using default %ss.', DEFAULT_K8S_TIMEOUT) + LOG.warn( + 'Kubernetes timeout is invalid or unspecified, ' + 'using default %ss.', DEFAULT_K8S_TIMEOUT) timeout = DEFAULT_K8S_TIMEOUT return timeout diff --git a/armada/handlers/manifest.py b/armada/handlers/manifest.py index b923d280..68314832 100644 --- a/armada/handlers/manifest.py +++ b/armada/handlers/manifest.py @@ -188,8 +188,8 @@ class Manifest(object): """ try: chart = None - for iter, chart in enumerate(chart_group.get('data', {}).get( - 'chart_group', [])): + for iter, chart in enumerate( + chart_group.get('data', {}).get('chart_group', [])): if isinstance(chart, dict): continue chart_dep = self.find_chart_document(chart) @@ -214,8 +214,8 @@ class Manifest(object): """ try: group = None - for iter, group in enumerate(self.manifest.get('data', {}).get( - 'chart_groups', [])): + for iter, group in enumerate( + self.manifest.get('data', {}).get('chart_groups', [])): if isinstance(group, dict): continue chart_grp = self.find_chart_group_document(group) @@ -244,6 +244,4 @@ class Manifest(object): self.build_chart_groups() self.build_armada_manifest() - return { - 'armada': self.manifest.get('data', {}) - } + return {'armada': self.manifest.get('data', {})} diff --git a/armada/handlers/override.py b/armada/handlers/override.py index e2c12fbf..c4280603 100644 --- a/armada/handlers/override.py +++ b/armada/handlers/override.py @@ -22,6 +22,7 @@ from armada.utils import validate class Override(object): + def __init__(self, documents, overrides=None, values=None): self.documents = documents self.overrides = overrides @@ -61,8 +62,8 @@ class Override(object): def find_manifest_document(self, doc_path): for doc in self.documents: if doc.get('schema') == self.find_document_type( - doc_path[0]) and doc.get('metadata', {}).get( - 'name') == doc_path[1]: + doc_path[0]) and doc.get('metadata', + {}).get('name') == doc_path[1]: return doc raise override_exceptions.UnknownDocumentOverrideException( @@ -118,24 +119,24 @@ class Override(object): def update_chart_document(self, ovr): for doc in self.documents: if doc.get('schema') == const.DOCUMENT_CHART and doc.get( - 'metadata', {}).get('name') == ovr.get('metadata', {}).get( - 'name'): + 'metadata', {}).get('name') == ovr.get('metadata', + {}).get('name'): self.update(doc.get('data', {}), ovr.get('data', {})) return def update_chart_group_document(self, ovr): for doc in self.documents: if doc.get('schema') == const.DOCUMENT_GROUP and doc.get( - 'metadata', {}).get('name') == ovr.get('metadata', {}).get( - 'name'): + 'metadata', {}).get('name') == ovr.get('metadata', + {}).get('name'): self.update(doc.get('data', {}), ovr.get('data', {})) return def update_armada_manifest(self, ovr): for doc in self.documents: if doc.get('schema') == const.DOCUMENT_MANIFEST and doc.get( - 'metadata', {}).get('name') == ovr.get('metadata', {}).get( - 'name'): + 'metadata', {}).get('name') == ovr.get('metadata', + {}).get('name'): self.update(doc.get('data', {}), ovr.get('data', {})) return diff --git a/armada/handlers/test.py b/armada/handlers/test.py index 763059c4..c9347359 100644 --- a/armada/handlers/test.py +++ b/armada/handlers/test.py @@ -20,10 +20,10 @@ TESTRUN_STATUS_FAILURE = 2 TESTRUN_STATUS_RUNNING = 3 -def test_release_for_success(tiller, release, +def test_release_for_success(tiller, + release, timeout=const.DEFAULT_TILLER_TIMEOUT): test_suite_run = tiller.test_release(release, timeout) results = getattr(test_suite_run, 'results', []) - failed_results = [r for r in results if - r.status != TESTRUN_STATUS_SUCCESS] + failed_results = [r for r in results if r.status != TESTRUN_STATUS_SUCCESS] return len(failed_results) == 0 diff --git a/armada/handlers/tiller.py b/armada/handlers/tiller.py index 0f1dd50e..ed2a7fdc 100644 --- a/armada/handlers/tiller.py +++ b/armada/handlers/tiller.py @@ -51,6 +51,7 @@ LOG = logging.getLogger(__name__) class CommonEqualityMixin(object): + def __eq__(self, other): return (isinstance(other, self.__class__) and self.__dict__ == other.__dict__) @@ -76,8 +77,11 @@ class Tiller(object): service over gRPC ''' - def __init__(self, tiller_host=None, tiller_port=None, - tiller_namespace=None, dry_run=False): + def __init__(self, + tiller_host=None, + tiller_port=None, + tiller_namespace=None, + dry_run=False): self.tiller_host = tiller_host self.tiller_port = tiller_port or CONF.tiller_port self.tiller_namespace = tiller_namespace or CONF.tiller_namespace @@ -113,18 +117,16 @@ class Tiller(object): tiller_ip = self._get_tiller_ip() tiller_port = self._get_tiller_port() try: - LOG.debug('Tiller getting gRPC insecure channel at %s:%s ' - 'with options: [grpc.max_send_message_length=%s, ' - 'grpc.max_receive_message_length=%s]', - tiller_ip, tiller_port, - MAX_MESSAGE_LENGTH, MAX_MESSAGE_LENGTH) + LOG.debug( + 'Tiller getting gRPC insecure channel at %s:%s ' + 'with options: [grpc.max_send_message_length=%s, ' + 'grpc.max_receive_message_length=%s]', tiller_ip, tiller_port, + MAX_MESSAGE_LENGTH, MAX_MESSAGE_LENGTH) return grpc.insecure_channel( '%s:%s' % (tiller_ip, tiller_port), - options=[ - ('grpc.max_send_message_length', MAX_MESSAGE_LENGTH), - ('grpc.max_receive_message_length', MAX_MESSAGE_LENGTH) - ] - ) + options=[('grpc.max_send_message_length', MAX_MESSAGE_LENGTH), + ('grpc.max_receive_message_length', + MAX_MESSAGE_LENGTH)]) except Exception: raise ex.ChannelException() @@ -194,15 +196,15 @@ class Tiller(object): # iterate through all the pages when collecting this list. # NOTE(MarshM): `Helm List` defaults to returning Deployed and Failed, # but this might not be a desireable ListReleasesRequest default. - req = ListReleasesRequest(limit=RELEASE_LIMIT, - status_codes=[const.STATUS_DEPLOYED, - const.STATUS_FAILED], - sort_by='LAST_RELEASED', - sort_order='DESC') + req = ListReleasesRequest( + limit=RELEASE_LIMIT, + status_codes=[const.STATUS_DEPLOYED, const.STATUS_FAILED], + sort_by='LAST_RELEASED', + sort_order='DESC') LOG.debug('Tiller ListReleases() with timeout=%s', self.timeout) - release_list = stub.ListReleases(req, self.timeout, - metadata=self.metadata) + release_list = stub.ListReleases( + req, self.timeout, metadata=self.metadata) for y in release_list: # TODO(MarshM) this log is too noisy, fix later @@ -251,8 +253,8 @@ class Tiller(object): labels = action.get('labels') self.rolling_upgrade_pod_deployment( - name, release_name, namespace, labels, - action_type, chart, disable_hooks, values, timeout) + name, release_name, namespace, labels, action_type, chart, + disable_hooks, values, timeout) except Exception: LOG.warn("Pre: Could not update anything, please check yaml") raise ex.PreUpdateJobDeleteException(name, namespace) @@ -263,8 +265,8 @@ class Tiller(object): action_type = action.get('type') labels = action.get('labels', None) - self.delete_resources(release_name, name, action_type, - labels, namespace, timeout) + self.delete_resources(release_name, name, action_type, labels, + namespace, timeout) except Exception: LOG.warn("PRE: Could not delete anything, please check yaml") raise ex.PreUpdateJobDeleteException(name, namespace) @@ -307,13 +309,10 @@ class Tiller(object): charts = [] for latest_release in self.list_releases(): try: - release = ( - latest_release.name, - latest_release.version, - latest_release.chart, - latest_release.config.raw, - latest_release.info.status.Code.Name( - latest_release.info.status.code)) + release = (latest_release.name, latest_release.version, + latest_release.chart, latest_release.config.raw, + latest_release.info.status.Code.Name( + latest_release.info.status.code)) charts.append(release) LOG.debug('Found release %s, version %s, status: %s', release[0], release[1], release[4]) @@ -323,7 +322,10 @@ class Tiller(object): continue return charts - def update_release(self, chart, release, namespace, + def update_release(self, + chart, + release, + namespace, pre_actions=None, post_actions=None, disable_hooks=False, @@ -337,10 +339,10 @@ class Tiller(object): ''' timeout = self._check_timeout(wait, timeout) - LOG.info('Helm update release%s: wait=%s, timeout=%s, force=%s, ' - 'recreate_pods=%s', - (' (dry run)' if self.dry_run else ''), - wait, timeout, force, recreate_pods) + LOG.info( + 'Helm update release%s: wait=%s, timeout=%s, force=%s, ' + 'recreate_pods=%s', (' (dry run)' if self.dry_run else ''), wait, + timeout, force, recreate_pods) if values is None: values = Config(raw='') @@ -366,7 +368,8 @@ class Tiller(object): recreate=recreate_pods) update_msg = stub.UpdateRelease( - release_request, timeout + GRPC_EPSILON, + release_request, + timeout + GRPC_EPSILON, metadata=self.metadata) except Exception: @@ -377,16 +380,17 @@ class Tiller(object): self._post_update_actions(post_actions, namespace) tiller_result = TillerResult( - update_msg.release.name, - update_msg.release.namespace, + update_msg.release.name, update_msg.release.namespace, update_msg.release.info.status.Code.Name( update_msg.release.info.status.code), - update_msg.release.info.Description, - update_msg.release.version) + update_msg.release.info.Description, update_msg.release.version) return tiller_result - def install_release(self, chart, release, namespace, + def install_release(self, + chart, + release, + namespace, values=None, wait=False, timeout=None): @@ -396,8 +400,7 @@ class Tiller(object): timeout = self._check_timeout(wait, timeout) LOG.info('Helm install release%s: wait=%s, timeout=%s', - (' (dry run)' if self.dry_run else ''), - wait, timeout) + (' (dry run)' if self.dry_run else ''), wait, timeout) if values is None: values = Config(raw='') @@ -417,12 +420,12 @@ class Tiller(object): timeout=timeout) install_msg = stub.InstallRelease( - release_request, timeout + GRPC_EPSILON, + release_request, + timeout + GRPC_EPSILON, metadata=self.metadata) tiller_result = TillerResult( - install_msg.release.name, - install_msg.release.namespace, + install_msg.release.name, install_msg.release.namespace, install_msg.release.info.status.Code.Name( install_msg.release.info.status.code), install_msg.release.info.Description, @@ -434,7 +437,9 @@ class Tiller(object): status = self.get_release_status(release) raise ex.ReleaseException(release, status, 'Install') - def test_release(self, release, timeout=const.DEFAULT_TILLER_TIMEOUT, + def test_release(self, + release, + timeout=const.DEFAULT_TILLER_TIMEOUT, cleanup=True): ''' :param release - name of release to test @@ -455,8 +460,7 @@ class Tiller(object): # 1. Remove this timeout # 2. Add `k8s_timeout=const.DEFAULT_K8S_TIMEOUT` arg and use release_request = TestReleaseRequest( - name=release, timeout=timeout, - cleanup=cleanup) + name=release, timeout=timeout, cleanup=cleanup) test_message_stream = stub.RunReleaseTest( release_request, timeout, metadata=self.metadata) @@ -550,16 +554,18 @@ class Tiller(object): # Helm client calls ReleaseContent in Delete dry-run scenario if self.dry_run: content = self.get_release_content(release) - LOG.info('Skipping delete during `dry-run`, would have deleted ' - 'release=%s from namespace=%s.', - content.release.name, content.release.namespace) + LOG.info( + 'Skipping delete during `dry-run`, would have deleted ' + 'release=%s from namespace=%s.', content.release.name, + content.release.namespace) return # build release uninstall request try: stub = ReleaseServiceStub(self.channel) - LOG.info("Uninstall %s release with disable_hooks=%s, " - "purge=%s flags", release, disable_hooks, purge) + LOG.info( + "Uninstall %s release with disable_hooks=%s, " + "purge=%s flags", release, disable_hooks, purge) release_request = UninstallReleaseRequest( name=release, disable_hooks=disable_hooks, purge=purge) @@ -571,8 +577,13 @@ class Tiller(object): status = self.get_release_status(release) raise ex.ReleaseException(release, status, 'Delete') - def delete_resources(self, release_name, resource_name, resource_type, - resource_labels, namespace, wait=False, + def delete_resources(self, + release_name, + resource_name, + resource_type, + resource_labels, + namespace, + wait=False, timeout=const.DEFAULT_TILLER_TIMEOUT): ''' :params release_name - release name the specified resource is under @@ -588,8 +599,9 @@ class Tiller(object): label_selector = '' if resource_labels is not None: label_selector = label_selectors(resource_labels) - LOG.debug("Deleting resources in namespace %s matching " - "selectors (%s).", namespace, label_selector) + LOG.debug( + "Deleting resources in namespace %s matching " + "selectors (%s).", namespace, label_selector) handled = False if resource_type == 'job': @@ -598,19 +610,20 @@ class Tiller(object): jb_name = jb.metadata.name if self.dry_run: - LOG.info('Skipping delete job during `dry-run`, would ' - 'have deleted job %s in namespace=%s.', - jb_name, namespace) + LOG.info( + 'Skipping delete job during `dry-run`, would ' + 'have deleted job %s in namespace=%s.', jb_name, + namespace) continue - LOG.info("Deleting job %s in namespace: %s", - jb_name, namespace) + LOG.info("Deleting job %s in namespace: %s", jb_name, + namespace) self.k8s.delete_job_action(jb_name, namespace, timeout=timeout) handled = True if resource_type == 'cronjob' or resource_type == 'job': - get_jobs = self.k8s.get_namespace_cron_job( - namespace, label_selector) + get_jobs = self.k8s.get_namespace_cron_job(namespace, + label_selector) for jb in get_jobs.items: jb_name = jb.metadata.name @@ -621,42 +634,50 @@ class Tiller(object): "deprecated, use `type: cronjob` instead") if self.dry_run: - LOG.info('Skipping delete cronjob during `dry-run`, would ' - 'have deleted cronjob %s in namespace=%s.', - jb_name, namespace) + LOG.info( + 'Skipping delete cronjob during `dry-run`, would ' + 'have deleted cronjob %s in namespace=%s.', jb_name, + namespace) continue - LOG.info("Deleting cronjob %s in namespace: %s", - jb_name, namespace) + LOG.info("Deleting cronjob %s in namespace: %s", jb_name, + namespace) self.k8s.delete_cron_job_action(jb_name, namespace) handled = True if resource_type == 'pod': - release_pods = self.k8s.get_namespace_pod( - namespace, label_selector) + release_pods = self.k8s.get_namespace_pod(namespace, + label_selector) for pod in release_pods.items: pod_name = pod.metadata.name if self.dry_run: - LOG.info('Skipping delete pod during `dry-run`, would ' - 'have deleted pod %s in namespace=%s.', - pod_name, namespace) + LOG.info( + 'Skipping delete pod during `dry-run`, would ' + 'have deleted pod %s in namespace=%s.', pod_name, + namespace) continue - LOG.info("Deleting pod %s in namespace: %s", - pod_name, namespace) + LOG.info("Deleting pod %s in namespace: %s", pod_name, + namespace) self.k8s.delete_namespace_pod(pod_name, namespace) if wait: self.k8s.wait_for_pod_redeployment(pod_name, namespace) handled = True if not handled: - LOG.error("Unable to execute name: %s type: %s ", - resource_name, resource_type) + LOG.error("Unable to execute name: %s type: %s ", resource_name, + resource_type) - def rolling_upgrade_pod_deployment(self, name, release_name, namespace, - resource_labels, action_type, chart, - disable_hooks, values, + def rolling_upgrade_pod_deployment(self, + name, + release_name, + namespace, + resource_labels, + action_type, + chart, + disable_hooks, + values, timeout=const.DEFAULT_TILLER_TIMEOUT): ''' update statefullsets (daemon, stateful) @@ -695,8 +716,13 @@ class Tiller(object): # delete pods self.delete_resources( - release_name, name, 'pod', resource_labels, namespace, - wait=True, timeout=timeout) + release_name, + name, + 'pod', + resource_labels, + namespace, + wait=True, + timeout=timeout) else: LOG.error("Unable to exectue name: % type: %s", name, action_type) @@ -714,10 +740,10 @@ class Tiller(object): timeout = self._check_timeout(wait, timeout) - LOG.debug('Helm rollback%s of release=%s, version=%s, ' - 'wait=%s, timeout=%s', - (' (dry run)' if self.dry_run else ''), - release_name, version, wait, timeout) + LOG.debug( + 'Helm rollback%s of release=%s, version=%s, ' + 'wait=%s, timeout=%s', (' (dry run)' if self.dry_run else ''), + release_name, version, wait, timeout) try: stub = ReleaseServiceStub(self.channel) rollback_request = RollbackReleaseRequest( @@ -742,7 +768,8 @@ class Tiller(object): def _check_timeout(self, wait, timeout): if timeout is None or timeout <= 0: if wait: - LOG.warn('Tiller timeout is invalid or unspecified, ' - 'using default %ss.', self.timeout) + LOG.warn( + 'Tiller timeout is invalid or unspecified, ' + 'using default %ss.', self.timeout) timeout = self.timeout return timeout diff --git a/armada/shell.py b/armada/shell.py index 36a5d6d6..68ee061a 100644 --- a/armada/shell.py +++ b/armada/shell.py @@ -31,20 +31,15 @@ CONF = cfg.CONF @click.group() -@click.option('--debug', - help="Enable debug logging", - is_flag=True) -@click.option('--api/--no-api', - help="Execute service endpoints. (requires url option)", - default=False) -@click.option('--url', - help="Armada Service Endpoint", - envvar='HOST', - default=None) -@click.option('--token', - help="Keystone Service Token", - envvar='TOKEN', - default=None) +@click.option('--debug', help="Enable debug logging", is_flag=True) +@click.option( + '--api/--no-api', + help="Execute service endpoints. (requires url option)", + default=False) +@click.option( + '--url', help="Armada Service Endpoint", envvar='HOST', default=None) +@click.option( + '--token', help="Keystone Service Token", envvar='TOKEN', default=None) @click.pass_context def main(ctx, debug, api, url, token): """ @@ -83,8 +78,7 @@ def main(ctx, debug, api, url, token): ArmadaSession( host=parsed_url.netloc, scheme=parsed_url.scheme, - token=token) - ) + token=token)) if debug: CONF.debug = debug diff --git a/armada/tests/test_utils.py b/armada/tests/test_utils.py index c3c86fe9..be370d29 100644 --- a/armada/tests/test_utils.py +++ b/armada/tests/test_utils.py @@ -113,6 +113,7 @@ class AttrDict(dict): """Allows defining objects with attributes without defining a class """ + def __init__(self, *args, **kwargs): super(AttrDict, self).__init__(*args, **kwargs) self.__dict__ = self diff --git a/armada/tests/unit/api/base.py b/armada/tests/unit/api/base.py index df4b9356..83919a6f 100644 --- a/armada/tests/unit/api/base.py +++ b/armada/tests/unit/api/base.py @@ -35,8 +35,8 @@ class BaseControllerTest(test_base.ArmadaTestCase): sample_conf_dir = os.path.join(current_dir, os.pardir, os.pardir, os.pardir, os.pardir, 'etc', 'armada') sample_conf_files = ['api-paste.ini', 'armada.conf.sample'] - with mock.patch.object( - armada.conf, '_get_config_files') as mock_get_config_files: + with mock.patch.object(armada.conf, + '_get_config_files') as mock_get_config_files: mock_get_config_files.return_value = [ os.path.join(sample_conf_dir, x) for x in sample_conf_files ] diff --git a/armada/tests/unit/api/test_armada_controller.py b/armada/tests/unit/api/test_armada_controller.py index ca44dac6..2839637e 100644 --- a/armada/tests/unit/api/test_armada_controller.py +++ b/armada/tests/unit/api/test_armada_controller.py @@ -34,14 +34,16 @@ class ArmadaControllerTest(base.BaseControllerTest): rules = {'armada:create_endpoints': '@'} self.policy.set_rules(rules) - options = {'debug': 'true', - 'disable_update_pre': 'false', - 'disable_update_post': 'false', - 'enable_chart_cleanup': 'false', - 'skip_pre_flight': 'false', - 'dry_run': 'false', - 'wait': 'false', - 'timeout': '100'} + options = { + 'debug': 'true', + 'disable_update_pre': 'false', + 'disable_update_post': 'false', + 'enable_chart_cleanup': 'false', + 'skip_pre_flight': 'false', + 'dry_run': 'false', + 'wait': 'false', + 'timeout': '100' + } expected_armada_options = { 'disable_update_pre': False, @@ -67,18 +69,18 @@ class ArmadaControllerTest(base.BaseControllerTest): mock_armada.return_value.sync.return_value = \ {'diff': [], 'install': [], 'upgrade': []} - result = self.app.simulate_post(path='/api/v1.0/apply', - body=body, - headers={ - 'Content-Type': 'application/json' - }, - params=options) + result = self.app.simulate_post( + path='/api/v1.0/apply', + body=body, + headers={'Content-Type': 'application/json'}, + params=options) self.assertEqual(result.json, expected) self.assertEqual('application/json', result.headers['content-type']) mock_resolver.resolve_reference.assert_called_with([payload_url]) - mock_armada.assert_called_with([{'foo': 'bar'}], - **expected_armada_options) + mock_armada.assert_called_with([{ + 'foo': 'bar' + }], **expected_armada_options) mock_armada.return_value.sync.assert_called() def test_armada_apply_no_href(self): @@ -86,23 +88,24 @@ class ArmadaControllerTest(base.BaseControllerTest): rules = {'armada:create_endpoints': '@'} self.policy.set_rules(rules) - options = {'debug': 'true', - 'disable_update_pre': 'false', - 'disable_update_post': 'false', - 'enable_chart_cleanup': 'false', - 'skip_pre_flight': 'false', - 'dry_run': 'false', - 'wait': 'false', - 'timeout': '100'} + options = { + 'debug': 'true', + 'disable_update_pre': 'false', + 'disable_update_post': 'false', + 'enable_chart_cleanup': 'false', + 'skip_pre_flight': 'false', + 'dry_run': 'false', + 'wait': 'false', + 'timeout': '100' + } payload = {'hrefs': []} body = json.dumps(payload) - result = self.app.simulate_post(path='/api/v1.0/apply', - body=body, - headers={ - 'Content-Type': 'application/json' - }, - params=options) + result = self.app.simulate_post( + path='/api/v1.0/apply', + body=body, + headers={'Content-Type': 'application/json'}, + params=options) self.assertEqual(result.status_code, 400) diff --git a/armada/tests/unit/api/test_rollback_controller.py b/armada/tests/unit/api/test_rollback_controller.py index 59144746..1f9be131 100644 --- a/armada/tests/unit/api/test_rollback_controller.py +++ b/armada/tests/unit/api/test_rollback_controller.py @@ -64,12 +64,7 @@ class RollbackReleaseControllerTest(base.BaseControllerTest): dry_run=False) rollback_release.assert_called_once_with( - release, - 2, - wait=True, - timeout=123, - force=True, - recreate_pods=True) + release, 2, wait=True, timeout=123, force=True, recreate_pods=True) self.assertEqual(200, resp.status_code) self.assertEqual('Rollback of test-release complete.', diff --git a/armada/tests/unit/api/test_test_controller.py b/armada/tests/unit/api/test_test_controller.py index 1e244828..679cb337 100644 --- a/armada/tests/unit/api/test_test_controller.py +++ b/armada/tests/unit/api/test_test_controller.py @@ -43,13 +43,10 @@ class TestReleasesManifestControllerTest(base.BaseControllerTest): self.assertEqual(200, resp.status_code) result = json.loads(resp.text) - expected = { - "tests": {"passed": [], "skipped": [], "failed": []} - } + expected = {"tests": {"passed": [], "skipped": [], "failed": []}} self.assertEqual(expected, result) - mock_manifest.assert_called_once_with( - documents, target_manifest=None) + mock_manifest.assert_called_once_with(documents, target_manifest=None) self.assertTrue(mock_tiller.called) @@ -57,8 +54,8 @@ class TestReleasesReleaseNameControllerTest(base.BaseControllerTest): @mock.patch.object(test, 'test_release_for_success') @mock.patch.object(test, 'Tiller') - def test_test_controller_test_pass( - self, mock_tiller, mock_test_release_for_success): + def test_test_controller_test_pass(self, mock_tiller, + mock_test_release_for_success): rules = {'armada:test_release': '@'} self.policy.set_rules(rules) @@ -71,8 +68,8 @@ class TestReleasesReleaseNameControllerTest(base.BaseControllerTest): @mock.patch.object(test, 'test_release_for_success') @mock.patch.object(test, 'Tiller') - def test_test_controller_test_fail( - self, mock_tiller, mock_test_release_for_success): + def test_test_controller_test_fail(self, mock_tiller, + mock_test_release_for_success): rules = {'armada:test_release': '@'} self.policy.set_rules(rules) @@ -103,8 +100,7 @@ class TestReleasesManifestControllerNegativeTest(base.BaseControllerTest): @mock.patch.object(test, 'Manifest') @mock.patch.object(test, 'Tiller') - def test_test_controller_validation_failure_returns_400( - self, *_): + def test_test_controller_validation_failure_returns_400(self, *_): rules = {'armada:tests_manifest': '@'} self.policy.set_rules(rules) @@ -123,19 +119,22 @@ class TestReleasesManifestControllerNegativeTest(base.BaseControllerTest): resp_body = json.loads(resp.text) self.assertEqual(400, resp_body['code']) self.assertEqual(1, resp_body['details']['errorCount']) - self.assertIn( - {'message': ( - 'An error occurred while generating the manifest: Could not ' - 'find dependency chart helm-toolkit in armada/Chart/v1.'), - 'error': True, - 'kind': 'ValidationMessage', - 'level': 'Error', - 'name': 'ARM001', - 'documents': []}, - resp_body['details']['messageList']) + self.assertIn({ + 'message': + ('An error occurred while generating the manifest: Could not ' + 'find dependency chart helm-toolkit in armada/Chart/v1.'), + 'error': + True, + 'kind': + 'ValidationMessage', + 'level': + 'Error', + 'name': + 'ARM001', + 'documents': [] + }, resp_body['details']['messageList']) self.assertEqual(('Failed to validate documents or generate Armada ' - 'Manifest from documents.'), - resp_body['message']) + 'Manifest from documents.'), resp_body['message']) @mock.patch('armada.utils.validate.Manifest') @mock.patch.object(test, 'Tiller') @@ -158,18 +157,21 @@ class TestReleasesManifestControllerNegativeTest(base.BaseControllerTest): resp_body = json.loads(resp.text) self.assertEqual(400, resp_body['code']) self.assertEqual(1, resp_body['details']['errorCount']) - self.assertEqual( - [{'message': ( - 'An error occurred while generating the manifest: foo.'), - 'error': True, - 'kind': 'ValidationMessage', - 'level': 'Error', - 'name': 'ARM001', - 'documents': []}], - resp_body['details']['messageList']) + self.assertEqual([{ + 'message': + ('An error occurred while generating the manifest: foo.'), + 'error': + True, + 'kind': + 'ValidationMessage', + 'level': + 'Error', + 'name': + 'ARM001', + 'documents': [] + }], resp_body['details']['messageList']) self.assertEqual(('Failed to validate documents or generate Armada ' - 'Manifest from documents.'), - resp_body['message']) + 'Manifest from documents.'), resp_body['message']) @test_utils.attr(type=['negative']) diff --git a/armada/tests/unit/api/test_tiller_controller.py b/armada/tests/unit/api/test_tiller_controller.py index bb5ee46e..ed185ca7 100644 --- a/armada/tests/unit/api/test_tiller_controller.py +++ b/armada/tests/unit/api/test_tiller_controller.py @@ -37,13 +37,17 @@ class TillerControllerTest(base.BaseControllerTest): result = self.app.simulate_get('/api/v1.0/status') expected = { - 'tiller': {'version': 'fake_version', 'state': 'fake_status'} + 'tiller': { + 'version': 'fake_version', + 'state': 'fake_status' + } } self.assertEqual(expected, result.json) self.assertEqual('application/json', result.headers['content-type']) mock_tiller.assert_called_once_with( - tiller_host=None, tiller_port=44134, + tiller_host=None, + tiller_port=44134, tiller_namespace='kube-system') @mock.patch.object(tiller_controller, 'Tiller') @@ -55,20 +59,27 @@ class TillerControllerTest(base.BaseControllerTest): mock_tiller.return_value.tiller_status.return_value = 'fake_status' mock_tiller.return_value.tiller_version.return_value = 'fake_version' - result = self.app.simulate_get('/api/v1.0/status', - params_csv=False, - params={'tiller_host': 'fake_host', - 'tiller_port': '98765', - 'tiller_namespace': 'fake_ns'}) + result = self.app.simulate_get( + '/api/v1.0/status', + params_csv=False, + params={ + 'tiller_host': 'fake_host', + 'tiller_port': '98765', + 'tiller_namespace': 'fake_ns' + }) expected = { - 'tiller': {'version': 'fake_version', 'state': 'fake_status'} + 'tiller': { + 'version': 'fake_version', + 'state': 'fake_status' + } } self.assertEqual(expected, result.json) self.assertEqual('application/json', result.headers['content-type']) - mock_tiller.assert_called_once_with(tiller_host='fake_host', - tiller_port=98765, - tiller_namespace='fake_ns') + mock_tiller.assert_called_once_with( + tiller_host='fake_host', + tiller_port=98765, + tiller_namespace='fake_ns') @mock.patch.object(tiller_controller, 'Tiller') def test_tiller_releases(self, mock_tiller): @@ -82,17 +93,22 @@ class TillerControllerTest(base.BaseControllerTest): return fake_release mock_tiller.return_value.list_releases.return_value = [ - _get_fake_release('foo', 'bar'), _get_fake_release('baz', 'qux') + _get_fake_release('foo', 'bar'), + _get_fake_release('baz', 'qux') ] result = self.app.simulate_get('/api/v1.0/releases') expected = { - 'releases': {'bar_namespace': ['foo'], 'qux_namespace': ['baz']} + 'releases': { + 'bar_namespace': ['foo'], + 'qux_namespace': ['baz'] + } } self.assertEqual(expected, result.json) mock_tiller.assert_called_once_with( - tiller_host=None, tiller_port=44134, + tiller_host=None, + tiller_port=44134, tiller_namespace='kube-system') mock_tiller.return_value.list_releases.assert_called_once_with() @@ -108,22 +124,30 @@ class TillerControllerTest(base.BaseControllerTest): return fake_release mock_tiller.return_value.list_releases.return_value = [ - _get_fake_release('foo', 'bar'), _get_fake_release('baz', 'qux') + _get_fake_release('foo', 'bar'), + _get_fake_release('baz', 'qux') ] - result = self.app.simulate_get('/api/v1.0/releases', - params_csv=False, - params={'tiller_host': 'fake_host', - 'tiller_port': '98765', - 'tiller_namespace': 'fake_ns'}) + result = self.app.simulate_get( + '/api/v1.0/releases', + params_csv=False, + params={ + 'tiller_host': 'fake_host', + 'tiller_port': '98765', + 'tiller_namespace': 'fake_ns' + }) expected = { - 'releases': {'bar_namespace': ['foo'], 'qux_namespace': ['baz']} + 'releases': { + 'bar_namespace': ['foo'], + 'qux_namespace': ['baz'] + } } self.assertEqual(expected, result.json) - mock_tiller.assert_called_once_with(tiller_host='fake_host', - tiller_port=98765, - tiller_namespace='fake_ns') + mock_tiller.assert_called_once_with( + tiller_host='fake_host', + tiller_port=98765, + tiller_namespace='fake_ns') mock_tiller.return_value.list_releases.assert_called_once_with() diff --git a/armada/tests/unit/api/test_versions_controller.py b/armada/tests/unit/api/test_versions_controller.py index 7f70fe94..e2b3b77f 100644 --- a/armada/tests/unit/api/test_versions_controller.py +++ b/armada/tests/unit/api/test_versions_controller.py @@ -22,10 +22,5 @@ class VersionsControllerTest(base.BaseControllerTest): Validate that /api/v1.0/health returns 204. """ result = self.app.simulate_get('/versions') - expected = { - 'v1.0': { - 'path': '/api/v1.0', - 'status': 'stable' - } - } + expected = {'v1.0': {'path': '/api/v1.0', 'status': 'stable'}} self.assertDictEqual(expected, result.json) diff --git a/armada/tests/unit/common/test_policy.py b/armada/tests/unit/common/test_policy.py index 36124602..30514592 100644 --- a/armada/tests/unit/common/test_policy.py +++ b/armada/tests/unit/common/test_policy.py @@ -20,7 +20,6 @@ from armada import conf as cfg from armada.exceptions import base_exception as exc from armada.tests.unit import fixtures - CONF = cfg.CONF @@ -47,9 +46,8 @@ class PolicyTestCase(testtools.TestCase): action = "example:nope" mock_ctx.to_policy_view.return_value = self.credentials - self.assertRaises( - exc.ActionForbidden, policy._enforce_policy, action, - self.target, mock_ctx) + self.assertRaises(exc.ActionForbidden, policy._enforce_policy, action, + self.target, mock_ctx) @mock.patch('armada.api.ArmadaRequestContext') def test_enforce_good_action(self, mock_ctx): @@ -63,5 +61,5 @@ class PolicyTestCase(testtools.TestCase): action = "example:disallowed" mock_ctx.to_policy_view.return_value = self.credentials - self.assertRaises(exc.ActionForbidden, policy._enforce_policy, - action, self.target, mock_ctx) + self.assertRaises(exc.ActionForbidden, policy._enforce_policy, action, + self.target, mock_ctx) diff --git a/armada/tests/unit/fake_policy.py b/armada/tests/unit/fake_policy.py index 679b7a68..e3bf0025 100644 --- a/armada/tests/unit/fake_policy.py +++ b/armada/tests/unit/fake_policy.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. - policy_data = """ "admin_required": "role:admin" "armada:create_endpoints": "rule:admin_required" diff --git a/armada/tests/unit/fixtures.py b/armada/tests/unit/fixtures.py index c2ed4bae..cd938bd7 100644 --- a/armada/tests/unit/fixtures.py +++ b/armada/tests/unit/fixtures.py @@ -82,8 +82,7 @@ class RealPolicyFixture(fixtures.Fixture): def _setUp(self): super(RealPolicyFixture, self)._setUp() self.policy_dir = self.useFixture(fixtures.TempDir()) - self.policy_file = os.path.join(self.policy_dir.path, - 'policy.yaml') + self.policy_file = os.path.join(self.policy_dir.path, 'policy.yaml') # Load the fake_policy data and add the missing default rules. policy_rules = yaml.safe_load(fake_policy.policy_data) self.add_missing_default_rules(policy_rules) @@ -92,8 +91,10 @@ class RealPolicyFixture(fixtures.Fixture): policy_opts.set_defaults(CONF) self.useFixture( - ConfPatcher(policy_dirs=[], policy_file=self.policy_file, - group='oslo_policy')) + ConfPatcher( + policy_dirs=[], + policy_file=self.policy_file, + group='oslo_policy')) armada.common.policy.reset_policy() armada.common.policy.setup_policy() @@ -106,18 +107,16 @@ class RealPolicyFixture(fixtures.Fixture): """Validate that the expected and actual policies are equivalent. Otherwise an ``AssertionError`` is raised. """ - if not (set(self.expected_policy_actions) == - set(self.actual_policy_actions)): + if not (set(self.expected_policy_actions) == set( + self.actual_policy_actions)): error_msg = ( 'The expected policy actions passed to ' '`self.policy.set_rules` do not match the policy actions ' 'that were actually enforced by Armada. Set of expected ' 'policies %s should be equal to set of actual policies: %s. ' 'There is either a bug with the test or with policy ' - 'enforcement in the controller.' % ( - self.expected_policy_actions, - self.actual_policy_actions) - ) + 'enforcement in the controller.' % + (self.expected_policy_actions, self.actual_policy_actions)) raise AssertionError(error_msg) def _install_policy_verification_hook(self): @@ -152,8 +151,7 @@ class RealPolicyFixture(fixtures.Fixture): self.expected_policy_actions = [] _do_enforce_rbac = armada.common.policy._enforce_policy - def enforce_policy_and_remember_actual_rules( - action, *a, **k): + def enforce_policy_and_remember_actual_rules(action, *a, **k): self.actual_policy_actions.append(action) _do_enforce_rbac(action, *a, **k) diff --git a/armada/tests/unit/handlers/test_armada.py b/armada/tests/unit/handlers/test_armada.py index 21f2ead0..bb61a4e6 100644 --- a/armada/tests/unit/handlers/test_armada.py +++ b/armada/tests/unit/handlers/test_armada.py @@ -131,10 +131,10 @@ data: timeout: 10 """ -CHART_SOURCES = [('git://github.com/dummy/armada', 'chart_1'), - ('/tmp/dummy/armada', 'chart_2'), - ('/tmp/dummy/armada', 'chart_3'), - ('/tmp/dummy/armada', 'chart_4')] +CHART_SOURCES = [('git://github.com/dummy/armada', + 'chart_1'), ('/tmp/dummy/armada', 'chart_2'), + ('/tmp/dummy/armada', 'chart_3'), ('/tmp/dummy/armada', + 'chart_4')] class ArmadaHandlerTestCase(base.ArmadaTestCase): @@ -144,110 +144,106 @@ class ArmadaHandlerTestCase(base.ArmadaTestCase): expected_config = { 'armada': { - 'release_prefix': 'armada', - 'chart_groups': [ - { - 'chart_group': [ - { - 'chart': { - 'dependencies': [], - 'chart_name': 'test_chart_1', - 'namespace': 'test', - 'release': 'test_chart_1', - 'source': { - 'location': ( - 'git://github.com/dummy/armada'), - 'reference': 'master', - 'subpath': 'chart_1', - 'type': 'git' - }, - 'source_dir': CHART_SOURCES[0], - 'values': {}, - 'wait': { - 'timeout': 10 - } - } + 'release_prefix': + 'armada', + 'chart_groups': [{ + 'chart_group': [{ + 'chart': { + 'dependencies': [], + 'chart_name': 'test_chart_1', + 'namespace': 'test', + 'release': 'test_chart_1', + 'source': { + 'location': ('git://github.com/dummy/armada'), + 'reference': 'master', + 'subpath': 'chart_1', + 'type': 'git' }, - { - 'chart': { - 'dependencies': [], - 'chart_name': 'test_chart_2', - 'namespace': 'test', - 'protected': { - 'continue_processing': True - }, - 'release': 'test_chart_2', - 'source': { - 'location': '/tmp/dummy/armada', - 'subpath': 'chart_2', - 'type': 'local' - }, - 'source_dir': CHART_SOURCES[1], - 'values': {}, - 'wait': { - 'timeout': 10 - }, - 'upgrade': { - 'no_hooks': False, - 'options': { - 'force': True, - 'recreate_pods': True - } - } - } + 'source_dir': CHART_SOURCES[0], + 'values': {}, + 'wait': { + 'timeout': 10 + } + } + }, { + 'chart': { + 'dependencies': [], + 'chart_name': 'test_chart_2', + 'namespace': 'test', + 'protected': { + 'continue_processing': True }, - { - 'chart': { - 'dependencies': [], - 'chart_name': 'test_chart_3', - 'namespace': 'test', - 'protected': { - 'continue_processing': False - }, - 'release': 'test_chart_3', - 'source': { - 'location': '/tmp/dummy/armada', - 'subpath': 'chart_3', - 'type': 'local' - }, - 'source_dir': CHART_SOURCES[2], - 'values': {}, - 'wait': { - 'timeout': 10 - }, - 'upgrade': { - 'no_hooks': False - } - } + 'release': 'test_chart_2', + 'source': { + 'location': '/tmp/dummy/armada', + 'subpath': 'chart_2', + 'type': 'local' }, - { - 'chart': { - 'dependencies': [], - 'chart_name': 'test_chart_4', - 'namespace': 'test', - 'release': 'test_chart_4', - 'source': { - 'location': '/tmp/dummy/armada', - 'subpath': 'chart_4', - 'type': 'local' - }, - 'source_dir': CHART_SOURCES[3], - 'values': {}, - 'wait': { - 'timeout': 10 - }, - 'upgrade': { - 'no_hooks': False - }, - 'test': True + 'source_dir': CHART_SOURCES[1], + 'values': {}, + 'wait': { + 'timeout': 10 + }, + 'upgrade': { + 'no_hooks': False, + 'options': { + 'force': True, + 'recreate_pods': True } } - ], - 'description': 'this is a test', - 'name': 'example-group', - 'sequenced': False - } - ] + } + }, { + 'chart': { + 'dependencies': [], + 'chart_name': 'test_chart_3', + 'namespace': 'test', + 'protected': { + 'continue_processing': False + }, + 'release': 'test_chart_3', + 'source': { + 'location': '/tmp/dummy/armada', + 'subpath': 'chart_3', + 'type': 'local' + }, + 'source_dir': CHART_SOURCES[2], + 'values': {}, + 'wait': { + 'timeout': 10 + }, + 'upgrade': { + 'no_hooks': False + } + } + }, { + 'chart': { + 'dependencies': [], + 'chart_name': 'test_chart_4', + 'namespace': 'test', + 'release': 'test_chart_4', + 'source': { + 'location': '/tmp/dummy/armada', + 'subpath': 'chart_4', + 'type': 'local' + }, + 'source_dir': CHART_SOURCES[3], + 'values': {}, + 'wait': { + 'timeout': 10 + }, + 'upgrade': { + 'no_hooks': False + }, + 'test': True + } + }], + 'description': + 'this is a test', + 'name': + 'example-group', + 'sequenced': + False + }] } } @@ -270,12 +266,15 @@ class ArmadaHandlerTestCase(base.ArmadaTestCase): self._test_pre_flight_ops(armada_obj) - mock_tiller.assert_called_once_with(tiller_host=None, - tiller_namespace='kube-system', - tiller_port=44134, - dry_run=False) + mock_tiller.assert_called_once_with( + tiller_host=None, + tiller_namespace='kube-system', + tiller_port=44134, + dry_run=False) mock_source.git_clone.assert_called_once_with( - 'git://github.com/dummy/armada', 'master', auth_method=None, + 'git://github.com/dummy/armada', + 'master', + auth_method=None, proxy_server=None) @mock.patch.object(armada, 'source') @@ -300,7 +299,9 @@ class ArmadaHandlerTestCase(base.ArmadaTestCase): mock_source.source_cleanup.assert_called_with( CHART_SOURCES[counter][0]) - def _test_sync(self, known_releases, test_success=True, + def _test_sync(self, + known_releases, + test_success=True, test_failure_to_run=False): """Test install functionality from the sync() method.""" @@ -323,14 +324,13 @@ class ArmadaHandlerTestCase(base.ArmadaTestCase): m_tiller.list_charts.return_value = known_releases if test_failure_to_run: + def fail(tiller, release, timeout=None): - status = AttrDict(**{ - 'info': AttrDict(**{ - 'Description': 'Failed' - }) - }) + status = AttrDict( + **{'info': AttrDict(**{'Description': 'Failed'})}) raise tiller_exceptions.ReleaseException( release, status, 'Test') + mock_test_release_for_success.side_effect = fail else: mock_test_release_for_success.return_value = test_success @@ -359,15 +359,13 @@ class ArmadaHandlerTestCase(base.ArmadaTestCase): expected_install_release_calls.append( mock.call( mock_chartbuilder().get_helm_chart(), - "{}-{}".format(armada_obj.manifest['armada'][ - 'release_prefix'], - chart['release']), + "{}-{}".format( + armada_obj.manifest['armada'][ + 'release_prefix'], chart['release']), chart['namespace'], values=yaml.safe_dump(chart['values']), wait=this_chart_should_wait, - timeout=chart['wait']['timeout'] - ) - ) + timeout=chart['wait']['timeout'])) else: target_release = None for known_release in known_releases: @@ -391,9 +389,7 @@ class ArmadaHandlerTestCase(base.ArmadaTestCase): chart['namespace'], values=yaml.safe_dump(chart['values']), wait=this_chart_should_wait, - timeout=chart['wait']['timeout'] - ) - ) + timeout=chart['wait']['timeout'])) else: p_continue = protected.get( 'continue_processing', False) @@ -412,9 +408,8 @@ class ArmadaHandlerTestCase(base.ArmadaTestCase): mock.call( mock_chartbuilder().get_helm_chart(), "{}-{}".format( - armada_obj.manifest['armada'][ - 'release_prefix'], - chart['release']), + armada_obj.manifest['armada'] + ['release_prefix'], chart['release']), chart['namespace'], pre_actions={}, post_actions={}, @@ -423,21 +418,13 @@ class ArmadaHandlerTestCase(base.ArmadaTestCase): recreate_pods=recreate_pods, values=yaml.safe_dump(chart['values']), wait=this_chart_should_wait, - timeout=chart['wait']['timeout'] - ) - ) + timeout=chart['wait']['timeout'])) test_this_chart = chart.get( - 'test', - chart_group.get('test_charts', False)) + 'test', chart_group.get('test_charts', False)) if test_this_chart: expected_test_release_for_success_calls.append( - mock.call( - m_tiller, - release_name, - timeout=mock.ANY - ) - ) + mock.call(m_tiller, release_name, timeout=mock.ANY)) # Verify that at least 1 release is either installed or updated. self.assertTrue( @@ -445,26 +432,30 @@ class ArmadaHandlerTestCase(base.ArmadaTestCase): len(expected_update_release_calls) >= 1) # Verify that the expected number of non-deployed releases are # installed with expected arguments. - self.assertEqual(len(expected_install_release_calls), - m_tiller.install_release.call_count) + self.assertEqual( + len(expected_install_release_calls), + m_tiller.install_release.call_count) m_tiller.install_release.assert_has_calls( expected_install_release_calls) # Verify that the expected number of deployed releases are # updated with expected arguments. - self.assertEqual(len(expected_update_release_calls), - m_tiller.update_release.call_count) + self.assertEqual( + len(expected_update_release_calls), + m_tiller.update_release.call_count) m_tiller.update_release.assert_has_calls( expected_update_release_calls) # Verify that the expected number of deployed releases are # uninstalled with expected arguments. - self.assertEqual(len(expected_uninstall_release_calls), - m_tiller.uninstall_release.call_count) + self.assertEqual( + len(expected_uninstall_release_calls), + m_tiller.uninstall_release.call_count) m_tiller.uninstall_release.assert_has_calls( expected_uninstall_release_calls) # Verify that the expected number of deployed releases are # tested with expected arguments. - self.assertEqual(len(expected_test_release_for_success_calls), - mock_test_release_for_success.call_count) + self.assertEqual( + len(expected_test_release_for_success_calls), + mock_test_release_for_success.call_count) mock_test_release_for_success.assert_has_calls( expected_test_release_for_success_calls) @@ -473,8 +464,9 @@ class ArmadaHandlerTestCase(base.ArmadaTestCase): def _get_chart_by_name(self, name): name = name.split('armada-')[-1] yaml_documents = list(yaml.safe_load_all(TEST_YAML)) - return [c for c in yaml_documents - if c['data'].get('chart_name') == name][0] + return [ + c for c in yaml_documents if c['data'].get('chart_name') == name + ][0] def test_armada_sync_with_no_deployed_releases(self): known_releases = [] @@ -483,75 +475,71 @@ class ArmadaHandlerTestCase(base.ArmadaTestCase): def test_armada_sync_with_one_deployed_release(self): c1 = 'armada-test_chart_1' - known_releases = [ - [c1, None, self._get_chart_by_name(c1), None, - const.STATUS_DEPLOYED] - ] + known_releases = [[ + c1, None, + self._get_chart_by_name(c1), None, const.STATUS_DEPLOYED + ]] self._test_sync(known_releases) def test_armada_sync_with_both_deployed_releases(self): c1 = 'armada-test_chart_1' c2 = 'armada-test_chart_2' - known_releases = [ - [c1, None, self._get_chart_by_name(c1), None, - const.STATUS_DEPLOYED], - [c2, None, self._get_chart_by_name(c2), None, - const.STATUS_DEPLOYED] - ] + known_releases = [[ + c1, None, + self._get_chart_by_name(c1), None, const.STATUS_DEPLOYED + ], [ + c2, None, + self._get_chart_by_name(c2), None, const.STATUS_DEPLOYED + ]] self._test_sync(known_releases) def test_armada_sync_with_unprotected_releases(self): c1 = 'armada-test_chart_1' - known_releases = [ - [c1, None, self._get_chart_by_name(c1), None, - const.STATUS_FAILED] - ] + known_releases = [[ + c1, None, + self._get_chart_by_name(c1), None, const.STATUS_FAILED + ]] self._test_sync(known_releases) def test_armada_sync_with_protected_releases_continue(self): c1 = 'armada-test_chart_1' c2 = 'armada-test_chart_2' - known_releases = [ - [c2, None, self._get_chart_by_name(c2), None, - const.STATUS_FAILED], - [c1, None, self._get_chart_by_name(c1), None, - const.STATUS_FAILED] - ] + known_releases = [[ + c2, None, + self._get_chart_by_name(c2), None, const.STATUS_FAILED + ], [c1, None, + self._get_chart_by_name(c1), None, const.STATUS_FAILED]] self._test_sync(known_releases) def test_armada_sync_with_protected_releases_halt(self): c3 = 'armada-test_chart_3' - known_releases = [ - [c3, None, self._get_chart_by_name(c3), None, - const.STATUS_FAILED] - ] + known_releases = [[ + c3, None, + self._get_chart_by_name(c3), None, const.STATUS_FAILED + ]] def _test_method(): self._test_sync(known_releases) - self.assertRaises( - ProtectedReleaseException, - _test_method) + self.assertRaises(ProtectedReleaseException, _test_method) def test_armada_sync_test_failure(self): + def _test_method(): self._test_sync([], test_success=False) - self.assertRaises( - tiller_exceptions.TestFailedException, - _test_method) + self.assertRaises(tiller_exceptions.TestFailedException, _test_method) def test_armada_sync_test_failure_to_run(self): + def _test_method(): self._test_sync([], test_failure_to_run=True) - self.assertRaises( - tiller_exceptions.ReleaseException, - _test_method) + self.assertRaises(tiller_exceptions.ReleaseException, _test_method) @mock.patch.object(armada.Armada, 'post_flight_ops') @mock.patch.object(armada.Armada, 'pre_flight_ops') diff --git a/armada/tests/unit/handlers/test_chartbuilder.py b/armada/tests/unit/handlers/test_chartbuilder.py index b6ee5aee..0d99d867 100644 --- a/armada/tests/unit/handlers/test_chartbuilder.py +++ b/armada/tests/unit/handlers/test_chartbuilder.py @@ -145,9 +145,8 @@ class ChartBuilderTestCase(BaseChartBuilderTestCase): test_chart = {'source_dir': (chart_dir.path, '')} chartbuilder = ChartBuilder(test_chart) - self.assertRaises( - chartbuilder_exceptions.MetadataLoadException, - chartbuilder.get_metadata) + self.assertRaises(chartbuilder_exceptions.MetadataLoadException, + chartbuilder.get_metadata) def test_get_files(self): """Validates that ``get_files()`` ignores 'Chart.yaml', 'values.yaml' @@ -172,11 +171,11 @@ class ChartBuilderTestCase(BaseChartBuilderTestCase): test_chart = {'source_dir': (chart_dir.path, '')} chartbuilder = ChartBuilder(test_chart) - expected_files = ('[type_url: "%s"\n, type_url: "%s"\n]' % ('./bar', - './foo')) + expected_files = ( + '[type_url: "%s"\n, type_url: "%s"\n]' % ('./bar', './foo')) # Validate that only 'foo' and 'bar' are returned. - actual_files = sorted(chartbuilder.get_files(), - key=lambda x: x.type_url) + actual_files = sorted( + chartbuilder.get_files(), key=lambda x: x.type_url) self.assertEqual(expected_files, repr(actual_files).strip()) def test_get_files_with_unicode_characters(self): @@ -205,8 +204,7 @@ class ChartBuilderTestCase(BaseChartBuilderTestCase): chartbuilder = ChartBuilder(test_chart) helm_chart = chartbuilder.get_helm_chart() - expected = inspect.cleandoc( - """ + expected = inspect.cleandoc(""" metadata { name: "hello-world-chart" version: "0.1.0" @@ -214,8 +212,7 @@ class ChartBuilderTestCase(BaseChartBuilderTestCase): } values { } - """ - ).strip() + """).strip() self.assertIsInstance(helm_chart, Chart) self.assertTrue(hasattr(helm_chart, 'metadata')) @@ -256,8 +253,8 @@ class ChartBuilderTestCase(BaseChartBuilderTestCase): # Also create a nested directory and verify that files from it are also # added. - nested_dir = self._make_temporary_subdirectory( - chart_dir.path, 'nested') + nested_dir = self._make_temporary_subdirectory(chart_dir.path, + 'nested') self._write_temporary_file_contents(nested_dir, 'nested0', "random") ch = yaml.safe_load(self.chart_stream)['chart'] @@ -303,20 +300,18 @@ class ChartBuilderTestCase(BaseChartBuilderTestCase): self._write_temporary_file_contents(chart_dir.path, file, "") file_to_ignore = 'file_to_ignore' # Files to ignore within templates/ subdirectory. - self._write_temporary_file_contents( - templates_subdir, file_to_ignore, "") + self._write_temporary_file_contents(templates_subdir, file_to_ignore, + "") # Files to ignore within charts/ subdirectory. - self._write_temporary_file_contents( - charts_subdir, file_to_ignore, "") + self._write_temporary_file_contents(charts_subdir, file_to_ignore, "") # Files to ignore within templates/bin subdirectory. - self._write_temporary_file_contents( - templates_nested_subdir, file_to_ignore, "") + self._write_temporary_file_contents(templates_nested_subdir, + file_to_ignore, "") # Files to ignore within charts/extra subdirectory. - self._write_temporary_file_contents( - charts_nested_subdir, file_to_ignore, "") + self._write_temporary_file_contents(charts_nested_subdir, + file_to_ignore, "") # Files to **include** within charts/ subdirectory. - self._write_temporary_file_contents( - charts_subdir, '.prov', "xyzzy") + self._write_temporary_file_contents(charts_subdir, '.prov', "xyzzy") ch = yaml.safe_load(self.chart_stream)['chart'] ch['source_dir'] = (chart_dir.path, '') diff --git a/armada/tests/unit/handlers/test_manifest.py b/armada/tests/unit/handlers/test_manifest.py index 71eb524c..dba5243b 100644 --- a/armada/tests/unit/handlers/test_manifest.py +++ b/armada/tests/unit/handlers/test_manifest.py @@ -28,8 +28,8 @@ class ManifestTestCase(testtools.TestCase): def setUp(self): super(ManifestTestCase, self).setUp() - examples_dir = os.path.join( - os.getcwd(), 'armada', 'tests', 'unit', 'resources') + examples_dir = os.path.join(os.getcwd(), 'armada', 'tests', 'unit', + 'resources') with open(os.path.join(examples_dir, 'keystone-manifest.yaml')) as f: self.documents = list(yaml.safe_load_all(f.read())) @@ -139,8 +139,7 @@ class ManifestTestCase(testtools.TestCase): keystone_infra_services_chart_group = armada_manifest. \ find_chart_group_document('keystone-infra-services') - self.assertEqual(chart_groups[0], - keystone_infra_services_chart_group) + self.assertEqual(chart_groups[0], keystone_infra_services_chart_group) openstack_keystone_chart_group = armada_manifest. \ find_chart_group_document('openstack-keystone') @@ -224,8 +223,8 @@ class ManifestTestCase(testtools.TestCase): keystone_chart = armada_manifest.find_chart_document('keystone') keystone_chart_with_deps = armada_manifest.build_chart_deps( keystone_chart) - keystone_dependencies = keystone_chart_with_deps[ - 'data']['dependencies'] + keystone_dependencies = keystone_chart_with_deps['data'][ + 'dependencies'] self.assertEqual(openstack_keystone_chart_group_deps_dep_added[0], keystone_dependencies[0]) @@ -243,15 +242,14 @@ class ManifestTestCase(testtools.TestCase): mariadb_chart = armada_manifest.find_chart_document('mariadb') mariadb_chart_with_deps = armada_manifest.build_chart_deps( mariadb_chart) - mariadb_dependencies = mariadb_chart_with_deps[ - 'data']['dependencies'] + mariadb_dependencies = mariadb_chart_with_deps['data']['dependencies'] # building memcached chart dependencies memcached_chart = armada_manifest.find_chart_document('memcached') memcached_chart_with_deps = armada_manifest.build_chart_deps( memcached_chart) - memcached_dependencies = memcached_chart_with_deps[ - 'data']['dependencies'] + memcached_dependencies = memcached_chart_with_deps['data'][ + 'dependencies'] self.assertEqual(keystone_infra_services_dep_added[0], mariadb_dependencies[0]) @@ -275,8 +273,9 @@ class ManifestTestCase(testtools.TestCase): # helm-toolkit dependency, the basis for comparison of d # ependencies in other charts - expected_helm_toolkit_dependency = {'chart': helm_toolkit_chart.get( - 'data')} + expected_helm_toolkit_dependency = { + 'chart': helm_toolkit_chart.get('data') + } # keystone chart dependencies keystone_chart = armada_manifest.find_chart_document('keystone') @@ -288,8 +287,8 @@ class ManifestTestCase(testtools.TestCase): self.assertIn('data', keystone_chart_with_deps) self.assertIn('dependencies', keystone_chart_with_deps['data']) - keystone_dependencies = keystone_chart_with_deps[ - 'data']['dependencies'] + keystone_dependencies = keystone_chart_with_deps['data'][ + 'dependencies'] self.assertIsInstance(keystone_dependencies, list) self.assertEqual(1, len(keystone_dependencies)) @@ -306,8 +305,7 @@ class ManifestTestCase(testtools.TestCase): self.assertIn('data', mariadb_chart_with_deps) self.assertIn('dependencies', mariadb_chart_with_deps['data']) - mariadb_dependencies = mariadb_chart_with_deps[ - 'data']['dependencies'] + mariadb_dependencies = mariadb_chart_with_deps['data']['dependencies'] self.assertIsInstance(mariadb_dependencies, list) self.assertEqual(1, len(mariadb_dependencies)) @@ -325,8 +323,8 @@ class ManifestTestCase(testtools.TestCase): self.assertIn('data', memcached_chart_with_deps) self.assertIn('dependencies', memcached_chart_with_deps['data']) - memcached_dependencies = memcached_chart_with_deps[ - 'data']['dependencies'] + memcached_dependencies = memcached_chart_with_deps['data'][ + 'dependencies'] self.assertIsInstance(memcached_dependencies, list) self.assertEqual(1, len(memcached_dependencies)) @@ -338,8 +336,8 @@ class ManifestNegativeTestCase(testtools.TestCase): def setUp(self): super(ManifestNegativeTestCase, self).setUp() - examples_dir = os.path.join( - os.getcwd(), 'armada', 'tests', 'unit', 'resources') + examples_dir = os.path.join(os.getcwd(), 'armada', 'tests', 'unit', + 'resources') with open(os.path.join(examples_dir, 'keystone-manifest.yaml')) as f: self.documents = list(yaml.safe_load_all(f.read())) @@ -350,9 +348,8 @@ class ManifestNegativeTestCase(testtools.TestCase): documents.append(documents[-1]) # Copy the last manifest. error_re = r'Multiple manifests are not supported.*' - self.assertRaisesRegexp( - exceptions.ManifestException, error_re, manifest.Manifest, - documents) + self.assertRaisesRegexp(exceptions.ManifestException, error_re, + manifest.Manifest, documents) def test_get_documents_multi_target_manifests_raises_value_error(self): # Validates that finding multiple manifests with `target_manifest` @@ -362,26 +359,27 @@ class ManifestNegativeTestCase(testtools.TestCase): error_re = r'Multiple manifests are not supported.*' self.assertRaisesRegexp( - exceptions.ManifestException, error_re, manifest.Manifest, - documents, target_manifest='armada-manifest') + exceptions.ManifestException, + error_re, + manifest.Manifest, + documents, + target_manifest='armada-manifest') def test_get_documents_missing_manifest(self): # Validates exceptions.ManifestException is thrown if no manifest is # found. Manifest is last document in sample YAML. error_re = ('Documents must be a list of documents with at least one ' 'of each of the following schemas: .*') - self.assertRaisesRegexp( - exceptions.ManifestException, error_re, manifest.Manifest, - self.documents[:-1]) + self.assertRaisesRegexp(exceptions.ManifestException, error_re, + manifest.Manifest, self.documents[:-1]) def test_get_documents_missing_charts(self): # Validates exceptions.ManifestException is thrown if no chart is # found. Charts are first 4 documents in sample YAML. error_re = ('Documents must be a list of documents with at least one ' 'of each of the following schemas: .*') - self.assertRaisesRegexp( - exceptions.ManifestException, error_re, manifest.Manifest, - self.documents[4:]) + self.assertRaisesRegexp(exceptions.ManifestException, error_re, + manifest.Manifest, self.documents[4:]) def test_get_documents_missing_chart_groups(self): # Validates exceptions.ManifestException is thrown if no chart is @@ -389,21 +387,20 @@ class ManifestNegativeTestCase(testtools.TestCase): documents = self.documents[:4] + [self.documents[-1]] error_re = ('Documents must be a list of documents with at least one ' 'of each of the following schemas: .*') - self.assertRaisesRegexp( - exceptions.ManifestException, error_re, manifest.Manifest, - documents) + self.assertRaisesRegexp(exceptions.ManifestException, error_re, + manifest.Manifest, documents) def test_find_chart_document_negative(self): armada_manifest = manifest.Manifest(self.documents) - error_re = r'Could not find a %s named "%s"' % ( - const.DOCUMENT_CHART, 'invalid') + error_re = r'Could not find a %s named "%s"' % (const.DOCUMENT_CHART, + 'invalid') self.assertRaisesRegexp(exceptions.ManifestException, error_re, armada_manifest.find_chart_document, 'invalid') def test_find_group_document_negative(self): armada_manifest = manifest.Manifest(self.documents) - error_re = r'Could not find a %s named "%s"' % ( - const.DOCUMENT_GROUP, 'invalid') + error_re = r'Could not find a %s named "%s"' % (const.DOCUMENT_GROUP, + 'invalid') self.assertRaisesRegexp(exceptions.ManifestException, error_re, armada_manifest.find_chart_group_document, 'invalid') diff --git a/armada/tests/unit/handlers/test_override.py b/armada/tests/unit/handlers/test_override.py index 807c313f..f45fa45d 100644 --- a/armada/tests/unit/handlers/test_override.py +++ b/armada/tests/unit/handlers/test_override.py @@ -25,6 +25,7 @@ from armada import const class OverrideTestCase(testtools.TestCase): + def setUp(self): super(OverrideTestCase, self).setUp() self.basepath = os.path.join(os.path.dirname(__file__)) @@ -64,8 +65,7 @@ class OverrideTestCase(testtools.TestCase): documents_copy = copy.deepcopy(original_documents) values_documents = list(yaml.safe_load_all(g.read())) - override = ('manifest:simple-armada:release_prefix=' - 'overridden',) + override = ('manifest:simple-armada:release_prefix=' 'overridden', ) # Case 1: Checking if primitive gets updated. ovr = Override(original_documents, override, [values_yaml]) @@ -75,15 +75,14 @@ class OverrideTestCase(testtools.TestCase): self.assertNotEqual(original_documents, documents_copy) # since overrides done, these documents aren't same anymore self.assertNotEqual(original_documents, values_documents) - target_doc = [x - for x - in ovr.documents - if x.get('metadata').get('name') == 'simple-armada'][0] - self.assertEqual('overridden', - target_doc['data']['release_prefix']) + target_doc = [ + x for x in ovr.documents + if x.get('metadata').get('name') == 'simple-armada' + ][0] + self.assertEqual('overridden', target_doc['data']['release_prefix']) override = ('manifest:simple-armada:chart_groups=' - 'blog-group3,blog-group4',) + 'blog-group3,blog-group4', ) # Case 2: Checking if list gets updated. ovr = Override(original_documents, override, [values_yaml]) @@ -103,8 +102,7 @@ class OverrideTestCase(testtools.TestCase): original_documents = list(yaml.safe_load_all(f.read())) original_documents[-1]['data']['test'] = {'foo': 'bar'} - override = ('manifest:simple-armada:test=' - '{"foo": "bar"}',) + override = ('manifest:simple-armada:test=' '{"foo": "bar"}', ) ovr = Override(original_documents, override, []) self.assertRaises(json.decoder.JSONDecodeError, ovr.update_manifests) @@ -283,15 +281,15 @@ class OverrideTestCase(testtools.TestCase): documents = list(yaml.safe_load_all(f.read())) doc_path = ['manifest', 'simple-armada'] override = ('manifest:simple-armada:chart_groups=\ - blog-group3,blog-group4',) + blog-group3,blog-group4', ) ovr = Override(documents, override) ovr.update_manifests() ovr_doc = ovr.find_manifest_document(doc_path) target_docs = list(yaml.load_all(e.read())) - expected_doc = [x - for x - in target_docs - if x.get('schema') == 'armada/Manifest/v1'][0] + expected_doc = [ + x for x in target_docs + if x.get('schema') == 'armada/Manifest/v1' + ][0] self.assertEqual(expected_doc.get('data'), ovr_doc.get('data')) def test_find_manifest_document_valid(self): @@ -316,6 +314,7 @@ class OverrideTestCase(testtools.TestCase): class OverrideNegativeTestCase(testtools.TestCase): + def setUp(self): super(OverrideNegativeTestCase, self).setUp() self.basepath = os.path.join(os.path.dirname(__file__)) diff --git a/armada/tests/unit/handlers/test_test.py b/armada/tests/unit/handlers/test_test.py index 6160f423..93afda5e 100644 --- a/armada/tests/unit/handlers/test_test.py +++ b/armada/tests/unit/handlers/test_test.py @@ -30,9 +30,8 @@ class TestHandlerTestCase(base.ArmadaTestCase): release = 'release' tiller_obj.test_release = mock.Mock() - tiller_obj.test_release.return_value = AttrDict(**{ - 'results': results - }) + tiller_obj.test_release.return_value = AttrDict( + **{'results': results}) success = test.test_release_for_success(tiller_obj, release) self.assertEqual(expected_success, success) @@ -44,37 +43,22 @@ class TestHandlerTestCase(base.ArmadaTestCase): def test_unknown(self): self._test_test_release_for_success(False, [ - AttrDict(**{ - 'status': test.TESTRUN_STATUS_SUCCESS - }), - AttrDict(**{ - 'status': test.TESTRUN_STATUS_UNKNOWN - }) + AttrDict(**{'status': test.TESTRUN_STATUS_SUCCESS}), + AttrDict(**{'status': test.TESTRUN_STATUS_UNKNOWN}) ]) def test_success(self): - self._test_test_release_for_success(True, [ - AttrDict(**{ - 'status': test.TESTRUN_STATUS_SUCCESS - }) - ]) + self._test_test_release_for_success( + True, [AttrDict(**{'status': test.TESTRUN_STATUS_SUCCESS})]) def test_failure(self): self._test_test_release_for_success(False, [ - AttrDict(**{ - 'status': test.TESTRUN_STATUS_SUCCESS - }), - AttrDict(**{ - 'status': test.TESTRUN_STATUS_FAILURE - }) + AttrDict(**{'status': test.TESTRUN_STATUS_SUCCESS}), + AttrDict(**{'status': test.TESTRUN_STATUS_FAILURE}) ]) def test_running(self): self._test_test_release_for_success(False, [ - AttrDict(**{ - 'status': test.TESTRUN_STATUS_SUCCESS - }), - AttrDict(**{ - 'status': test.TESTRUN_STATUS_RUNNING - }) + AttrDict(**{'status': test.TESTRUN_STATUS_SUCCESS}), + AttrDict(**{'status': test.TESTRUN_STATUS_RUNNING}) ]) diff --git a/armada/tests/unit/handlers/test_tiller.py b/armada/tests/unit/handlers/test_tiller.py index d3ca5b59..6b7da5e1 100644 --- a/armada/tests/unit/handlers/test_tiller.py +++ b/armada/tests/unit/handlers/test_tiller.py @@ -48,8 +48,12 @@ class TillerTestCase(base.ArmadaTestCase): timeout = 3600 tiller_obj.install_release( - chart, name, namespace, values=initial_values, - wait=wait, timeout=timeout) + chart, + name, + namespace, + values=initial_values, + wait=wait, + timeout=timeout) mock_stub.assert_called_with(tiller_obj.channel) release_request = mock_install_request( @@ -58,12 +62,9 @@ class TillerTestCase(base.ArmadaTestCase): release=name, namespace=namespace, wait=wait, - timeout=timeout - ) - (mock_stub(tiller_obj.channel).InstallRelease - .assert_called_with(release_request, - timeout + 60, - metadata=tiller_obj.metadata)) + timeout=timeout) + (mock_stub(tiller_obj.channel).InstallRelease.assert_called_with( + release_request, timeout + 60, metadata=tiller_obj.metadata)) @mock.patch('armada.handlers.tiller.K8s', autospec=True) @mock.patch.object(tiller.Tiller, '_get_tiller_ip', autospec=True) @@ -83,10 +84,9 @@ class TillerTestCase(base.ArmadaTestCase): mock_grpc.insecure_channel.assert_called_once_with( '%s:%s' % (str(mock.sentinel.ip), str(mock.sentinel.port)), options=[('grpc.max_send_message_length', - tiller.MAX_MESSAGE_LENGTH), + tiller.MAX_MESSAGE_LENGTH), ('grpc.max_receive_message_length', - tiller.MAX_MESSAGE_LENGTH)] - ) + tiller.MAX_MESSAGE_LENGTH)]) @mock.patch('armada.handlers.tiller.K8s', autospec=True) @mock.patch('armada.handlers.tiller.grpc', autospec=True) @@ -131,8 +131,7 @@ class TillerTestCase(base.ArmadaTestCase): def test_get_tiller_namespace(self, mock_grpc, _, mock_ip): # verifies namespace set via instantiation tiller_obj = tiller.Tiller(None, None, 'test_namespace2') - self.assertEqual('test_namespace2', - tiller_obj._get_tiller_namespace()) + self.assertEqual('test_namespace2', tiller_obj._get_tiller_namespace()) @mock.patch.object(tiller.Tiller, '_get_tiller_ip', autospec=True) @mock.patch('armada.handlers.tiller.K8s', autospec=True) @@ -179,18 +178,19 @@ class TillerTestCase(base.ArmadaTestCase): tiller_obj = tiller.Tiller('host', '8080', None) self.assertEqual(['foo', 'bar'], tiller_obj.list_releases()) - mock_release_service_stub.assert_called_once_with( - tiller_obj.channel) + mock_release_service_stub.assert_called_once_with(tiller_obj.channel) list_releases_stub = mock_release_service_stub.return_value. \ ListReleases list_releases_stub.assert_called_once_with( - mock_list_releases_request.return_value, tiller_obj.timeout, + mock_list_releases_request.return_value, + tiller_obj.timeout, metadata=tiller_obj.metadata) mock_list_releases_request.assert_called_once_with( limit=tiller.RELEASE_LIMIT, - status_codes=[tiller.const.STATUS_DEPLOYED, - tiller.const.STATUS_FAILED], + status_codes=[ + tiller.const.STATUS_DEPLOYED, tiller.const.STATUS_FAILED + ], sort_by='LAST_RELEASED', sort_order='DESC') @@ -199,8 +199,7 @@ class TillerTestCase(base.ArmadaTestCase): @mock.patch.object(tiller, 'GetReleaseContentRequest') @mock.patch.object(tiller, 'ReleaseServiceStub') def test_get_release_content(self, mock_release_service_stub, - mock_release_content_request, - mock_grpc, _): + mock_release_content_request, mock_grpc, _): mock_release_service_stub.return_value.GetReleaseContent\ .return_value = {} @@ -210,7 +209,8 @@ class TillerTestCase(base.ArmadaTestCase): get_release_content_stub = mock_release_service_stub. \ return_value.GetReleaseContent get_release_content_stub.assert_called_once_with( - mock_release_content_request.return_value, tiller_obj.timeout, + mock_release_content_request.return_value, + tiller_obj.timeout, metadata=tiller_obj.metadata) @mock.patch('armada.handlers.tiller.K8s') @@ -218,8 +218,7 @@ class TillerTestCase(base.ArmadaTestCase): @mock.patch.object(tiller, 'GetVersionRequest') @mock.patch.object(tiller, 'ReleaseServiceStub') def test_tiller_version(self, mock_release_service_stub, - mock_version_request, - mock_grpc, _): + mock_version_request, mock_grpc, _): mock_version = mock.Mock() mock_version.Version.sem_ver = mock.sentinel.sem_ver @@ -230,12 +229,12 @@ class TillerTestCase(base.ArmadaTestCase): self.assertEqual(mock.sentinel.sem_ver, tiller_obj.tiller_version()) - mock_release_service_stub.assert_called_once_with( - tiller_obj.channel) + mock_release_service_stub.assert_called_once_with(tiller_obj.channel) get_version_stub = mock_release_service_stub.return_value.GetVersion get_version_stub.assert_called_once_with( - mock_version_request.return_value, tiller_obj.timeout, + mock_version_request.return_value, + tiller_obj.timeout, metadata=tiller_obj.metadata) @mock.patch('armada.handlers.tiller.K8s') @@ -252,12 +251,12 @@ class TillerTestCase(base.ArmadaTestCase): tiller_obj = tiller.Tiller('host', '8080', None) self.assertEqual({}, tiller_obj.get_release_status('release')) - mock_release_service_stub.assert_called_once_with( - tiller_obj.channel) + mock_release_service_stub.assert_called_once_with(tiller_obj.channel) get_release_status_stub = mock_release_service_stub.return_value. \ GetReleaseStatus get_release_status_stub.assert_called_once_with( - mock_rel_status_request.return_value, tiller_obj.timeout, + mock_rel_status_request.return_value, + tiller_obj.timeout, metadata=tiller_obj.metadata) @mock.patch('armada.handlers.tiller.K8s') @@ -265,8 +264,7 @@ class TillerTestCase(base.ArmadaTestCase): @mock.patch.object(tiller, 'UninstallReleaseRequest') @mock.patch.object(tiller, 'ReleaseServiceStub') def test_uninstall_release(self, mock_release_service_stub, - mock_uninstall_release_request, - mock_grpc, _): + mock_uninstall_release_request, mock_grpc, _): mock_release_service_stub.return_value.UninstallRelease\ .return_value = {} @@ -274,13 +272,13 @@ class TillerTestCase(base.ArmadaTestCase): self.assertEqual({}, tiller_obj.uninstall_release('release')) - mock_release_service_stub.assert_called_once_with( - tiller_obj.channel) + mock_release_service_stub.assert_called_once_with(tiller_obj.channel) uninstall_release_stub = mock_release_service_stub.return_value. \ UninstallRelease uninstall_release_stub.assert_called_once_with( - mock_uninstall_release_request.return_value, tiller_obj.timeout, + mock_uninstall_release_request.return_value, + tiller_obj.timeout, metadata=tiller_obj.metadata) @mock.patch('armada.handlers.tiller.K8s') @@ -303,9 +301,14 @@ class TillerTestCase(base.ArmadaTestCase): recreate_pods = True force = True - self.assertIsNone(tiller_obj.rollback_release( - release, version, wait=wait, timeout=timeout, force=force, - recreate_pods=recreate_pods)) + self.assertIsNone( + tiller_obj.rollback_release( + release, + version, + wait=wait, + timeout=timeout, + force=force, + recreate_pods=recreate_pods)) mock_rollback_release_request.assert_called_once_with( name=release, @@ -316,14 +319,13 @@ class TillerTestCase(base.ArmadaTestCase): force=force, recreate=recreate_pods) - mock_release_service_stub.assert_called_once_with( - tiller_obj.channel) + mock_release_service_stub.assert_called_once_with(tiller_obj.channel) rollback_release_stub = mock_release_service_stub.return_value. \ RollbackRelease rollback_release_stub.assert_called_once_with( - mock_rollback_release_request.return_value, timeout + - tiller.GRPC_EPSILON, + mock_rollback_release_request.return_value, + timeout + tiller.GRPC_EPSILON, metadata=tiller_obj.metadata) @mock.patch('armada.handlers.tiller.K8s') @@ -332,8 +334,7 @@ class TillerTestCase(base.ArmadaTestCase): @mock.patch.object(tiller, 'UpdateReleaseRequest') @mock.patch.object(tiller, 'ReleaseServiceStub') def test_update_release(self, mock_release_service_stub, - mock_update_release_request, mock_config, - _, __): + mock_update_release_request, mock_config, _, __): release = 'release' chart = {} namespace = 'namespace' @@ -377,7 +378,9 @@ class TillerTestCase(base.ArmadaTestCase): recreate_pods = True result = tiller_obj.update_release( - chart, release, namespace, + chart, + release, + namespace, pre_actions=pre_actions, post_actions=post_actions, disable_hooks=disable_hooks, @@ -404,22 +407,17 @@ class TillerTestCase(base.ArmadaTestCase): force=force, recreate=recreate_pods) - mock_release_service_stub.assert_called_once_with( - tiller_obj.channel) + mock_release_service_stub.assert_called_once_with(tiller_obj.channel) update_release_stub = mock_release_service_stub.return_value. \ UpdateRelease update_release_stub.assert_called_once_with( - mock_update_release_request.return_value, timeout + - tiller.GRPC_EPSILON, + mock_update_release_request.return_value, + timeout + tiller.GRPC_EPSILON, metadata=tiller_obj.metadata) - expected_result = tiller.TillerResult( - release, - namespace, - status, - description, - version) + expected_result = tiller.TillerResult(release, namespace, status, + description, version) self.assertEqual(expected_result, result) @@ -430,9 +428,8 @@ class TillerTestCase(base.ArmadaTestCase): @mock.patch('armada.handlers.tiller.Config') @mock.patch.object(tiller, 'TestReleaseRequest') @mock.patch.object(tiller, 'ReleaseServiceStub') - def do_test(self, mock_release_service_stub, - mock_test_release_request, mock_config, - _, __): + def do_test(self, mock_release_service_stub, mock_test_release_request, + mock_config, _, __): tiller_obj = tiller.Tiller('host', '8080', None) release = 'release' test_suite_run = {} @@ -441,14 +438,18 @@ class TillerTestCase(base.ArmadaTestCase): .return_value = grpc_response_mock tiller_obj.get_release_status = mock.Mock() - tiller_obj.get_release_status.return_value = AttrDict(**{ - 'info': AttrDict(**{ - 'status': AttrDict(**{ - 'last_test_suite_run': test_suite_run - }), - 'Description': 'Failed' + tiller_obj.get_release_status.return_value = AttrDict( + **{ + 'info': + AttrDict( + **{ + 'status': + AttrDict(** + {'last_test_suite_run': test_suite_run}), + 'Description': + 'Failed' + }) }) - }) result = tiller_obj.test_release(release) @@ -489,7 +490,9 @@ class TillerTestCase(base.ArmadaTestCase): ]) def test_test_release_failure_to_run(self): + class Iterator: + def __iter__(self): return self diff --git a/armada/tests/unit/utils/test_source.py b/armada/tests/unit/utils/test_source.py index 3813dcb0..9885fd65 100644 --- a/armada/tests/unit/utils/test_source.py +++ b/armada/tests/unit/utils/test_source.py @@ -39,23 +39,23 @@ class GitTestCase(base.ArmadaTestCase): as git_file: self.assertIn(expected_ref, git_file.read()) - @testtools.skipUnless( - base.is_connected(), 'git clone requires network connectivity.') + @testtools.skipUnless(base.is_connected(), + 'git clone requires network connectivity.') def test_git_clone_good_url(self): url = 'https://github.com/openstack/airship-armada' git_dir = source.git_clone(url) self._validate_git_clone(git_dir) - @testtools.skipUnless( - base.is_connected(), 'git clone requires network connectivity.') + @testtools.skipUnless(base.is_connected(), + 'git clone requires network connectivity.') def test_git_clone_commit(self): url = 'https://github.com/openstack/airship-armada' commit = 'cba78d1d03e4910f6ab1691bae633c5bddce893d' git_dir = source.git_clone(url, commit) self._validate_git_clone(git_dir) - @testtools.skipUnless( - base.is_connected(), 'git clone requires network connectivity.') + @testtools.skipUnless(base.is_connected(), + 'git clone requires network connectivity.') def test_git_clone_ref(self): ref = 'refs/changes/54/457754/73' git_dir = source.git_clone( @@ -63,29 +63,29 @@ class GitTestCase(base.ArmadaTestCase): self._validate_git_clone(git_dir, ref) @test_utils.attr(type=['negative']) - @testtools.skipUnless( - base.is_connected(), 'git clone requires network connectivity.') + @testtools.skipUnless(base.is_connected(), + 'git clone requires network connectivity.') def test_git_clone_empty_url(self): url = '' # error_re = '%s is not a valid git repository.' % url - self.assertRaises(source_exceptions.GitException, - source.git_clone, url) + self.assertRaises(source_exceptions.GitException, source.git_clone, + url) @test_utils.attr(type=['negative']) - @testtools.skipUnless( - base.is_connected(), 'git clone requires network connectivity.') + @testtools.skipUnless(base.is_connected(), + 'git clone requires network connectivity.') def test_git_clone_bad_url(self): url = 'https://github.com/dummy/armada' - self.assertRaises(source_exceptions.GitException, - source.git_clone, url) + self.assertRaises(source_exceptions.GitException, source.git_clone, + url) # TODO need to design a positive proxy test, # difficult to achieve behind a corporate proxy @test_utils.attr(type=['negative']) - @testtools.skipUnless( - base.is_connected(), 'git clone requires network connectivity.') + @testtools.skipUnless(base.is_connected(), + 'git clone requires network connectivity.') def test_git_clone_fake_proxy(self): url = 'https://github.com/openstack/airship-armada' proxy_url = test_utils.rand_name( @@ -94,7 +94,8 @@ class GitTestCase(base.ArmadaTestCase): self.assertRaises( source_exceptions.GitProxyException, - source.git_clone, url, + source.git_clone, + url, proxy_server=proxy_url) @mock.patch('armada.utils.source.tempfile') @@ -146,8 +147,8 @@ class GitTestCase(base.ArmadaTestCase): mock_tarfile.open.assert_not_called() mock_tarfile.extractall.assert_not_called() - @testtools.skipUnless( - base.is_connected(), 'git clone requires network connectivity.') + @testtools.skipUnless(base.is_connected(), + 'git clone requires network connectivity.') @mock.patch.object(source, 'LOG') def test_source_cleanup(self, mock_log): url = 'https://github.com/openstack/airship-armada' @@ -190,28 +191,34 @@ class GitTestCase(base.ArmadaTestCase): ('Could not delete the path %s. Is it a git repository?', path), actual_call) - @testtools.skipUnless( - base.is_connected(), 'git clone requires network connectivity.') + @testtools.skipUnless(base.is_connected(), + 'git clone requires network connectivity.') @test_utils.attr(type=['negative']) @mock.patch.object(source, 'os') def test_git_clone_ssh_auth_method_fails_auth(self, mock_os): mock_os.path.exists.return_value = True fake_user = test_utils.rand_name('fake_user') - url = ('ssh://%s@review.openstack.org:29418/openstack/airship-armada' - % fake_user) + url = ('ssh://%s@review.openstack.org:29418/openstack/airship-armada' % + fake_user) self.assertRaises( - source_exceptions.GitAuthException, source.git_clone, url, - ref='refs/changes/17/388517/5', auth_method='SSH') + source_exceptions.GitAuthException, + source.git_clone, + url, + ref='refs/changes/17/388517/5', + auth_method='SSH') - @testtools.skipUnless( - base.is_connected(), 'git clone requires network connectivity.') + @testtools.skipUnless(base.is_connected(), + 'git clone requires network connectivity.') @test_utils.attr(type=['negative']) @mock.patch.object(source, 'os') def test_git_clone_ssh_auth_method_missing_ssh_key(self, mock_os): mock_os.path.exists.return_value = False fake_user = test_utils.rand_name('fake_user') - url = ('ssh://%s@review.openstack.org:29418/openstack/airship-armada' - % fake_user) + url = ('ssh://%s@review.openstack.org:29418/openstack/airship-armada' % + fake_user) self.assertRaises( - source_exceptions.GitSSHException, source.git_clone, url, - ref='refs/changes/17/388517/5', auth_method='SSH') + source_exceptions.GitSSHException, + source.git_clone, + url, + ref='refs/changes/17/388517/5', + auth_method='SSH') diff --git a/armada/tests/unit/utils/test_validate.py b/armada/tests/unit/utils/test_validate.py index 5ecdb408..0572399d 100644 --- a/armada/tests/unit/utils/test_validate.py +++ b/armada/tests/unit/utils/test_validate.py @@ -20,7 +20,6 @@ import testtools from armada.tests.unit import base from armada.utils import validate - template_chart = """ schema: armada/Chart/v1 metadata: @@ -114,9 +113,7 @@ class ValidateTestCase(BaseValidateTest): def test_validate_load_schemas(self): expected_schemas = [ - 'armada/Chart/v1', - 'armada/ChartGroup/v1', - 'armada/Manifest/v1' + 'armada/Chart/v1', 'armada/ChartGroup/v1', 'armada/Manifest/v1' ] for expected_schema in expected_schemas: self.assertIn(expected_schema, validate.SCHEMAS) @@ -232,9 +229,8 @@ class ValidateNegativeTestCase(BaseValidateTest): import, and importing the schemas again in manually results in duplicates. """ - with self.assertRaisesRegexp( - RuntimeError, - 'Duplicate schema specified for: .*'): + with self.assertRaisesRegexp(RuntimeError, + 'Duplicate schema specified for: .*'): validate._load_schemas() def test_validate_no_dictionary_expect_type_error(self): @@ -251,13 +247,13 @@ class ValidateNegativeTestCase(BaseValidateTest): documents = list(yaml.safe_load_all(f.read())) mariadb_document = [ - d for d in documents if d['metadata']['name'] == 'mariadb'][0] + d for d in documents if d['metadata']['name'] == 'mariadb' + ][0] del mariadb_document['data']['release'] _, error_messages = validate.validate_armada_documents(documents) expected_error = self._build_error_message( - 'armada/Chart/v1', 'mariadb', - "'release' is a required property") + 'armada/Chart/v1', 'mariadb', "'release' is a required property") self.assertEqual(1, len(error_messages)) self.assertEqual(expected_error, error_messages[0]['message']) diff --git a/armada/utils/release.py b/armada/utils/release.py index 55fb40df..27b74003 100644 --- a/armada/utils/release.py +++ b/armada/utils/release.py @@ -26,5 +26,4 @@ def label_selectors(labels): :return: string of k8s labels """ - return ",".join( - ["%s=%s" % (k, v) for k, v in labels.items()]) + return ",".join(["%s=%s" % (k, v) for k, v in labels.items()]) diff --git a/armada/utils/source.py b/armada/utils/source.py index 5a1b4929..513502de 100644 --- a/armada/utils/source.py +++ b/armada/utils/source.py @@ -61,28 +61,31 @@ def git_clone(repo_url, ref='master', proxy_server=None, auth_method=None): ssh_cmd = None if auth_method and auth_method.lower() == 'ssh': - LOG.debug('Attempting to clone the repo at %s using reference %s ' - 'with SSH authentication.', repo_url, ref) + LOG.debug( + 'Attempting to clone the repo at %s using reference %s ' + 'with SSH authentication.', repo_url, ref) if not os.path.exists(CONF.ssh_key_path): LOG.error('SSH auth method was specified for cloning repo but ' 'the SSH key under CONF.ssh_key_path was not found.') raise source_exceptions.GitSSHException(CONF.ssh_key_path) - ssh_cmd = ( - 'ssh -i {} -o ConnectionAttempts=20 -o ConnectTimeout=10' - .format(os.path.expanduser(CONF.ssh_key_path)) - ) + ssh_cmd = ('ssh -i {} -o ConnectionAttempts=20 -o ConnectTimeout=10' + .format(os.path.expanduser(CONF.ssh_key_path))) env_vars.update({'GIT_SSH_COMMAND': ssh_cmd}) else: - LOG.debug('Attempting to clone the repo at %s using reference %s ' - 'with no authentication.', repo_url, ref) + LOG.debug( + 'Attempting to clone the repo at %s using reference %s ' + 'with no authentication.', repo_url, ref) try: if proxy_server: LOG.debug('Cloning [%s] with proxy [%s]', repo_url, proxy_server) - repo = Repo.clone_from(repo_url, temp_dir, env=env_vars, - config='http.proxy=%s' % proxy_server) + repo = Repo.clone_from( + repo_url, + temp_dir, + env=env_vars, + config='http.proxy=%s' % proxy_server) else: LOG.debug('Cloning [%s]', repo_url) repo = Repo.clone_from(repo_url, temp_dir, env=env_vars) diff --git a/armada/utils/validate.py b/armada/utils/validate.py index 0dd602a9..44ab2c6d 100644 --- a/armada/utils/validate.py +++ b/armada/utils/validate.py @@ -72,10 +72,8 @@ def _validate_armada_manifest(manifest): try: armada_object = manifest.get_manifest().get('armada') except ManifestException as me: - vmsg = ValidationMessage(message=str(me), - error=True, - name='ARM001', - level='Error') + vmsg = ValidationMessage( + message=str(me), error=True, name='ARM001', level='Error') LOG.error('ValidationMessage: %s', vmsg.get_output_json()) details.append(vmsg.get_output()) return False, details @@ -85,10 +83,8 @@ def _validate_armada_manifest(manifest): if not isinstance(groups, list): message = '{} entry is of wrong type: {} (expected: {})'.format( KEYWORD_GROUPS, type(groups), 'list') - vmsg = ValidationMessage(message=message, - error=True, - name='ARM101', - level='Error') + vmsg = ValidationMessage( + message=message, error=True, name='ARM101', level='Error') LOG.info('ValidationMessage: %s', vmsg.get_output_json()) details.append(vmsg.get_output()) @@ -98,10 +94,8 @@ def _validate_armada_manifest(manifest): if KEYWORD_RELEASE not in chart_obj: message = 'Could not find {} keyword in {}'.format( KEYWORD_RELEASE, chart_obj.get('release')) - vmsg = ValidationMessage(message=message, - error=True, - name='ARM102', - level='Error') + vmsg = ValidationMessage( + message=message, error=True, name='ARM102', level='Error') LOG.info('ValidationMessage: %s', vmsg.get_output_json()) details.append(vmsg.get_output()) @@ -147,8 +141,8 @@ def validate_armada_document(document): """ if not isinstance(document, dict): - raise TypeError('The provided input "%s" must be a dictionary.' - % document) + raise TypeError( + 'The provided input "%s" must be a dictionary.' % document) schema = document.get('schema', '') document_name = document.get('metadata', {}).get('name', None) @@ -161,34 +155,36 @@ def validate_armada_document(document): for error in validator.iter_errors(document.get('data')): error_message = "Invalid document [%s] %s: %s." % \ (schema, document_name, error.message) - vmsg = ValidationMessage(message=error_message, - error=True, - name='ARM100', - level='Error', - schema=schema, - doc_name=document_name) + vmsg = ValidationMessage( + message=error_message, + error=True, + name='ARM100', + level='Error', + schema=schema, + doc_name=document_name) LOG.info('ValidationMessage: %s', vmsg.get_output_json()) details.append(vmsg.get_output()) except jsonschema.SchemaError as e: error_message = ('The built-in Armada JSON schema %s is invalid. ' 'Details: %s.' % (e.schema, e.message)) - vmsg = ValidationMessage(message=error_message, - error=True, - name='ARM000', - level='Error', - diagnostic='Armada is misconfigured.') + vmsg = ValidationMessage( + message=error_message, + error=True, + name='ARM000', + level='Error', + diagnostic='Armada is misconfigured.') LOG.error('ValidationMessage: %s', vmsg.get_output_json()) details.append(vmsg.get_output()) else: - vmsg = ValidationMessage(message='Unsupported document type.', - error=False, - name='ARM002', - level='Warning', - schema=schema, - doc_name=document_name, - diagnostic='Please ensure document is one of ' - 'the following schema types: %s' % - list(SCHEMAS.keys())) + vmsg = ValidationMessage( + message='Unsupported document type.', + error=False, + name='ARM002', + level='Warning', + schema=schema, + doc_name=document_name, + diagnostic='Please ensure document is one of ' + 'the following schema types: %s' % list(SCHEMAS.keys())) LOG.info('Unsupported document type, ignoring %s.', schema) LOG.debug('ValidationMessage: %s', vmsg.get_output_json()) # Validation API doesn't care about this type of message, don't send diff --git a/setup.cfg b/setup.cfg index bf3ddea7..869a80ff 100644 --- a/setup.cfg +++ b/setup.cfg @@ -52,3 +52,9 @@ universal = 1 [nosetests] verbosity=3 with-doctest=1 + +[yapf] +based_on_style = pep8 +blank_line_before_nested_class_or_def = true +blank_line_before_module_docstring = true +split_before_logical_operator = false diff --git a/setup.py b/setup.py index c53ce4f7..7d9b6948 100755 --- a/setup.py +++ b/setup.py @@ -5,6 +5,4 @@ try: except ImportError: pass -setuptools.setup( - setup_requires=['pbr>=2.0.0'], - pbr=True) +setuptools.setup(setup_requires=['pbr>=2.0.0'], pbr=True) diff --git a/test-requirements.txt b/test-requirements.txt index 4f986afb..0f49ac07 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -13,3 +13,4 @@ os-testr>=1.0.0 # Apache-2.0 flake8>=3.3.0 mock responses>=0.8.1 +yapf diff --git a/tox.ini b/tox.ini index 80880a48..64fa2eec 100644 --- a/tox.ini +++ b/tox.ini @@ -45,15 +45,17 @@ commands = basepython = python3 deps = -r{toxinidir}/doc/requirements.txt commands = - rm -rf releasenotes/build - sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html + rm -rf releasenotes/build + sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html [testenv:pep8] basepython = python3 deps = + yapf .[bandit] {[testenv]deps} commands = + yapf -dr {toxinidir}/armada {toxinidir}/setup.py flake8 {posargs} # Run security linter as part of the pep8 gate instead of a separate zuul job. bandit -r armada -x armada/tests -n 5 @@ -68,13 +70,18 @@ basepython = python3 setenv = {[testenv]setenv} PYTHON=coverage run --source armada --parallel-mode commands = - coverage erase - find . -type f -name "*.pyc" -delete - stestr run {posargs} - coverage combine - coverage html -d cover - coverage xml -o cover/coverage.xml - coverage report + coverage erase + find . -type f -name "*.pyc" -delete + stestr run {posargs} + coverage combine + coverage html -d cover + coverage xml -o cover/coverage.xml + coverage report + +[testenv:yapf] +basepython = python3 +commands = + yapf -ir {toxinidir}/armada {toxinidir}/setup.py [flake8] filename = *.py