Adding yapf config, plus formatted code.

- Adding yapf diff to pep8 target
- Adding yapf tox target to do actual format

** The rest of this PS contains formatted code only, no other changes

Change-Id: Idfef60f53565add2d0cf65bb8e5b91072cf0aded
This commit is contained in:
Marshall Margenau 2018-06-22 13:14:32 -05:00 committed by Sean Eagan
parent 4b6a947577
commit f235512d57
66 changed files with 1234 additions and 1270 deletions

View File

@ -72,11 +72,8 @@ class BaseResource(object):
try: try:
return json.loads(raw_body.decode()) return json.loads(raw_body.decode())
except json.JSONDecodeError as jex: except json.JSONDecodeError as jex:
self.error( self.error(req.context, "Invalid JSON in request: %s" % str(jex))
req.context, raise Exception("%s: Invalid JSON in body: %s" % (req.path, jex))
"Invalid JSON in request: %s" % str(jex))
raise Exception(
"%s: Invalid JSON in body: %s" % (req.path, jex))
def return_error(self, resp, status_code, message="", retry=False): def return_error(self, resp, status_code, message="", retry=False):
resp.body = json.dumps({ resp.body = json.dumps({
@ -112,6 +109,7 @@ class BaseResource(object):
class ArmadaRequestContext(object): class ArmadaRequestContext(object):
def __init__(self): def __init__(self):
self.log_level = 'ERROR' self.log_level = 'ERROR'
self.user = None # Username self.user = None # Username

View File

@ -58,23 +58,22 @@ class Apply(api.BaseResource):
documents.extend(list(yaml.safe_load_all(d.decode()))) documents.extend(list(yaml.safe_load_all(d.decode())))
if req_body.get('overrides', None): if req_body.get('overrides', None):
overrides = Override(documents, overrides = Override(
overrides=req_body.get('overrides')) documents, overrides=req_body.get('overrides'))
documents = overrides.update_manifests() documents = overrides.update_manifests()
else: else:
self.error(req.context, "Unknown content-type %s" self.error(req.context,
% req.content_type) "Unknown content-type %s" % req.content_type)
# TODO(fmontei): Use falcon.<Relevant API Exception Class> instead. # TODO(fmontei): Use falcon.<Relevant API Exception Class> instead.
return self.return_error( return self.return_error(
resp, resp,
falcon.HTTP_415, falcon.HTTP_415,
message="Request must be in application/x-yaml" message="Request must be in application/x-yaml"
"or application/json") "or application/json")
try: try:
armada = Armada( armada = Armada(
documents, documents,
disable_update_pre=req.get_param_as_bool( disable_update_pre=req.get_param_as_bool('disable_update_pre'),
'disable_update_pre'),
disable_update_post=req.get_param_as_bool( disable_update_post=req.get_param_as_bool(
'disable_update_post'), 'disable_update_post'),
enable_chart_cleanup=req.get_param_as_bool( enable_chart_cleanup=req.get_param_as_bool(
@ -83,20 +82,17 @@ class Apply(api.BaseResource):
force_wait=req.get_param_as_bool('wait'), force_wait=req.get_param_as_bool('wait'),
timeout=req.get_param_as_int('timeout') or 0, timeout=req.get_param_as_int('timeout') or 0,
tiller_host=req.get_param('tiller_host'), tiller_host=req.get_param('tiller_host'),
tiller_port=req.get_param_as_int( tiller_port=req.get_param_as_int('tiller_port') or
'tiller_port') or CONF.tiller_port, CONF.tiller_port,
tiller_namespace=req.get_param( tiller_namespace=req.get_param(
'tiller_namespace', default=CONF.tiller_namespace), 'tiller_namespace', default=CONF.tiller_namespace),
target_manifest=req.get_param('target_manifest') target_manifest=req.get_param('target_manifest'))
)
msg = armada.sync() msg = armada.sync()
resp.body = json.dumps( resp.body = json.dumps({
{ 'message': msg,
'message': msg, })
}
)
resp.content_type = 'application/json' resp.content_type = 'application/json'
resp.status = falcon.HTTP_200 resp.status = falcon.HTTP_200
@ -106,5 +102,4 @@ class Apply(api.BaseResource):
self.logger.exception('Caught unexpected exception') self.logger.exception('Caught unexpected exception')
err_message = 'Failed to apply manifest: {}'.format(e) err_message = 'Failed to apply manifest: {}'.format(e)
self.error(req.context, err_message) self.error(req.context, err_message)
self.return_error( self.return_error(resp, falcon.HTTP_500, message=err_message)
resp, falcon.HTTP_500, message=err_message)

View File

@ -35,8 +35,8 @@ class Rollback(api.BaseResource):
tiller = Tiller( tiller = Tiller(
tiller_host=req.get_param('tiller_host'), tiller_host=req.get_param('tiller_host'),
tiller_port=req.get_param_as_int( tiller_port=req.get_param_as_int('tiller_port') or
'tiller_port') or CONF.tiller_port, CONF.tiller_port,
tiller_namespace=req.get_param( tiller_namespace=req.get_param(
'tiller_namespace', default=CONF.tiller_namespace), 'tiller_namespace', default=CONF.tiller_namespace),
dry_run=dry_run) dry_run=dry_run)
@ -49,12 +49,10 @@ class Rollback(api.BaseResource):
force=req.get_param_as_bool('force'), force=req.get_param_as_bool('force'),
recreate_pods=req.get_param_as_bool('recreate_pods')) recreate_pods=req.get_param_as_bool('recreate_pods'))
resp.body = json.dumps( resp.body = json.dumps({
{ 'message': ('(dry run) ' if dry_run else '') +
'message': ('(dry run) ' if dry_run else '') + 'Rollback of {} complete.'.format(release),
'Rollback of {} complete.'.format(release), })
}
)
resp.content_type = 'application/json' resp.content_type = 'application/json'
resp.status = falcon.HTTP_200 resp.status = falcon.HTTP_200
@ -62,5 +60,4 @@ class Rollback(api.BaseResource):
self.logger.exception('Caught unexpected exception') self.logger.exception('Caught unexpected exception')
err_message = 'Failed to rollback release: {}'.format(e) err_message = 'Failed to rollback release: {}'.format(e)
self.error(req.context, err_message) self.error(req.context, err_message)
self.return_error( self.return_error(resp, falcon.HTTP_500, message=err_message)
resp, falcon.HTTP_500, message=err_message)

View File

@ -41,8 +41,8 @@ class TestReleasesReleaseNameController(api.BaseResource):
try: try:
tiller = Tiller( tiller = Tiller(
tiller_host=req.get_param('tiller_host'), tiller_host=req.get_param('tiller_host'),
tiller_port=req.get_param_as_int( tiller_port=req.get_param_as_int('tiller_port') or
'tiller_port') or CONF.tiller_port, CONF.tiller_port,
tiller_namespace=req.get_param( tiller_namespace=req.get_param(
'tiller_namespace', default=CONF.tiller_namespace)) 'tiller_namespace', default=CONF.tiller_namespace))
success = test_release_for_success(tiller, release) success = test_release_for_success(tiller, release)
@ -110,8 +110,7 @@ class TestReleasesManifestController(api.BaseResource):
def _validate_documents(self, req, resp, documents): def _validate_documents(self, req, resp, documents):
result, details = validate.validate_armada_documents(documents) result, details = validate.validate_armada_documents(documents)
return self._format_validation_response(req, resp, result, return self._format_validation_response(req, resp, result, details)
details)
@policy.enforce('armada:tests_manifest') @policy.enforce('armada:tests_manifest')
def on_post(self, req, resp): def on_post(self, req, resp):
@ -122,8 +121,8 @@ class TestReleasesManifestController(api.BaseResource):
try: try:
tiller = Tiller( tiller = Tiller(
tiller_host=req.get_param('tiller_host'), tiller_host=req.get_param('tiller_host'),
tiller_port=req.get_param_as_int( tiller_port=req.get_param_as_int('tiller_port') or
'tiller_port') or CONF.tiller_port, CONF.tiller_port,
tiller_namespace=req.get_param( tiller_namespace=req.get_param(
'tiller_namespace', default=CONF.tiller_namespace)) 'tiller_namespace', default=CONF.tiller_namespace))
# TODO(fmontei): Provide more sensible exception(s) here. # TODO(fmontei): Provide more sensible exception(s) here.
@ -147,23 +146,16 @@ class TestReleasesManifestController(api.BaseResource):
armada_obj = Manifest( armada_obj = Manifest(
documents, target_manifest=target_manifest).get_manifest() documents, target_manifest=target_manifest).get_manifest()
prefix = armada_obj.get(const.KEYWORD_ARMADA).get( prefix = armada_obj.get(const.KEYWORD_ARMADA).get(const.KEYWORD_PREFIX)
const.KEYWORD_PREFIX)
known_releases = [release[0] for release in tiller.list_charts()] known_releases = [release[0] for release in tiller.list_charts()]
message = { message = {'tests': {'passed': [], 'skipped': [], 'failed': []}}
'tests': {
'passed': [],
'skipped': [],
'failed': []
}
}
for group in armada_obj.get(const.KEYWORD_ARMADA).get( for group in armada_obj.get(const.KEYWORD_ARMADA).get(
const.KEYWORD_GROUPS): const.KEYWORD_GROUPS):
for ch in group.get(const.KEYWORD_CHARTS): for ch in group.get(const.KEYWORD_CHARTS):
release_name = release_prefixer( release_name = release_prefixer(prefix,
prefix, ch.get('chart').get('release')) ch.get('chart').get('release'))
if release_name in known_releases: if release_name in known_releases:
self.logger.info('RUNNING: %s tests', release_name) self.logger.info('RUNNING: %s tests', release_name)
@ -175,8 +167,8 @@ class TestReleasesManifestController(api.BaseResource):
self.logger.info("FAILED: %s", release_name) self.logger.info("FAILED: %s", release_name)
message['test']['failed'].append(release_name) message['test']['failed'].append(release_name)
else: else:
self.logger.info( self.logger.info('Release %s not found - SKIPPING',
'Release %s not found - SKIPPING', release_name) release_name)
message['test']['skipped'].append(release_name) message['test']['skipped'].append(release_name)
resp.status = falcon.HTTP_200 resp.status = falcon.HTTP_200

View File

@ -27,6 +27,7 @@ LOG = logging.getLogger(__name__)
class Status(api.BaseResource): class Status(api.BaseResource):
@policy.enforce('tiller:get_status') @policy.enforce('tiller:get_status')
def on_get(self, req, resp): def on_get(self, req, resp):
''' '''
@ -35,8 +36,8 @@ class Status(api.BaseResource):
try: try:
tiller = Tiller( tiller = Tiller(
tiller_host=req.get_param('tiller_host'), tiller_host=req.get_param('tiller_host'),
tiller_port=req.get_param_as_int( tiller_port=req.get_param_as_int('tiller_port') or
'tiller_port') or CONF.tiller_port, CONF.tiller_port,
tiller_namespace=req.get_param( tiller_namespace=req.get_param(
'tiller_namespace', default=CONF.tiller_namespace)) 'tiller_namespace', default=CONF.tiller_namespace))
@ -58,11 +59,11 @@ class Status(api.BaseResource):
except Exception as e: except Exception as e:
err_message = 'Failed to get Tiller Status: {}'.format(e) err_message = 'Failed to get Tiller Status: {}'.format(e)
self.error(req.context, err_message) self.error(req.context, err_message)
self.return_error( self.return_error(resp, falcon.HTTP_500, message=err_message)
resp, falcon.HTTP_500, message=err_message)
class Release(api.BaseResource): class Release(api.BaseResource):
@policy.enforce('tiller:get_release') @policy.enforce('tiller:get_release')
def on_get(self, req, resp): def on_get(self, req, resp):
'''Controller for listing Tiller releases. '''Controller for listing Tiller releases.
@ -70,14 +71,15 @@ class Release(api.BaseResource):
try: try:
tiller = Tiller( tiller = Tiller(
tiller_host=req.get_param('tiller_host'), tiller_host=req.get_param('tiller_host'),
tiller_port=req.get_param_as_int( tiller_port=req.get_param_as_int('tiller_port') or
'tiller_port') or CONF.tiller_port, CONF.tiller_port,
tiller_namespace=req.get_param( tiller_namespace=req.get_param(
'tiller_namespace', default=CONF.tiller_namespace)) 'tiller_namespace', default=CONF.tiller_namespace))
LOG.debug('Tiller (Release) at: %s:%s, namespace=%s, ' LOG.debug(
'timeout=%s', tiller.tiller_host, tiller.tiller_port, 'Tiller (Release) at: %s:%s, namespace=%s, '
tiller.tiller_namespace, tiller.timeout) 'timeout=%s', tiller.tiller_host, tiller.tiller_port,
tiller.tiller_namespace, tiller.timeout)
releases = {} releases = {}
for release in tiller.list_releases(): for release in tiller.list_releases():
@ -91,5 +93,4 @@ class Release(api.BaseResource):
except Exception as e: except Exception as e:
err_message = 'Unable to find Tiller Releases: {}'.format(e) err_message = 'Unable to find Tiller Releases: {}'.format(e)
self.error(req.context, err_message) self.error(req.context, err_message)
self.return_error( self.return_error(resp, falcon.HTTP_500, message=err_message)
resp, falcon.HTTP_500, message=err_message)

View File

@ -47,8 +47,8 @@ class Validate(api.BaseResource):
manifest = self.req_yaml(req) manifest = self.req_yaml(req)
documents = list(manifest) documents = list(manifest)
self.logger.debug("Validating set of %d documents." self.logger.debug(
% len(documents)) "Validating set of %d documents." % len(documents))
result, details = validate_armada_documents(documents) result, details = validate_armada_documents(documents)
@ -81,5 +81,4 @@ class Validate(api.BaseResource):
except Exception as ex: except Exception as ex:
err_message = 'Failed to validate Armada Manifest' err_message = 'Failed to validate Armada Manifest'
self.logger.error(err_message, exc_info=ex) self.logger.error(err_message, exc_info=ex)
self.return_error( self.return_error(resp, falcon.HTTP_400, message=err_message)
resp, falcon.HTTP_400, message=err_message)

View File

@ -93,6 +93,7 @@ class ContextMiddleware(object):
class LoggingMiddleware(object): class LoggingMiddleware(object):
def __init__(self): def __init__(self):
self.logger = logging.getLogger(__name__) self.logger = logging.getLogger(__name__)
@ -121,8 +122,8 @@ class LoggingMiddleware(object):
'external_ctx': ctx.external_marker, 'external_ctx': ctx.external_marker,
} }
resp.append_header('X-Armada-Req', ctx.request_id) resp.append_header('X-Armada-Req', ctx.request_id)
self.logger.info("%s %s - %s" % (req.method, req.uri, resp.status), self.logger.info(
extra=extra) "%s %s - %s" % (req.method, req.uri, resp.status), extra=extra)
self.logger.debug("Response body:%s", resp.body) self.logger.debug("Response body:%s", resp.body)
def _log_headers(self, headers): def _log_headers(self, headers):

View File

@ -65,72 +65,71 @@ file:
SHORT_DESC = "Command installs manifest charts." SHORT_DESC = "Command installs manifest charts."
@apply.command(name='apply', @apply.command(name='apply', help=DESC, short_help=SHORT_DESC)
help=DESC, @click.argument('locations', nargs=-1)
short_help=SHORT_DESC) @click.option('--api', help="Contacts service endpoint.", is_flag=True)
@click.argument('locations', @click.option(
nargs=-1) '--disable-update-post',
@click.option('--api', help="Disable post-update Tiller operations.",
help="Contacts service endpoint.", is_flag=True)
is_flag=True) @click.option(
@click.option('--disable-update-post', '--disable-update-pre',
help="Disable post-update Tiller operations.", help="Disable pre-update Tiller operations.",
is_flag=True) is_flag=True)
@click.option('--disable-update-pre', @click.option(
help="Disable pre-update Tiller operations.", '--dry-run', help="Run charts without installing them.", is_flag=True)
is_flag=True) @click.option(
@click.option('--dry-run', '--enable-chart-cleanup', help="Clean up unmanaged charts.", is_flag=True)
help="Run charts without installing them.", @click.option(
is_flag=True) '--use-doc-ref', help="Use armada manifest file reference.", is_flag=True)
@click.option('--enable-chart-cleanup', @click.option(
help="Clean up unmanaged charts.", '--set',
is_flag=True) help=("Use to override Armada Manifest values. Accepts "
@click.option('--use-doc-ref', "overrides that adhere to the format "
help="Use armada manifest file reference.", "<path>:<to>:<property>=<value> to specify a primitive or "
is_flag=True) "<path>:<to>:<property>=<value1>,...,<valueN> to specify "
@click.option('--set', "a list of values."),
help=("Use to override Armada Manifest values. Accepts " multiple=True,
"overrides that adhere to the format " type=str,
"<path>:<to>:<property>=<value> to specify a primitive or " default=[])
"<path>:<to>:<property>=<value1>,...,<valueN> to specify " @click.option('--tiller-host', help="Tiller host IP.", default=None)
"a list of values."), @click.option(
multiple=True, '--tiller-port',
type=str, help="Tiller host port.",
default=[]) type=int,
@click.option('--tiller-host', default=CONF.tiller_port)
help="Tiller host IP.", @click.option(
default=None) '--tiller-namespace',
@click.option('--tiller-port', '-tn',
help="Tiller host port.", help="Tiller namespace.",
type=int, type=str,
default=CONF.tiller_port) default=CONF.tiller_namespace)
@click.option('--tiller-namespace', '-tn', @click.option(
help="Tiller namespace.", '--timeout',
type=str, help="Specifies time to wait for each chart to fully "
default=CONF.tiller_namespace) "finish deploying.",
@click.option('--timeout', type=int,
help="Specifies time to wait for each chart to fully " default=0)
"finish deploying.", @click.option(
type=int, '--values',
default=0) '-f',
@click.option('--values', '-f', help=("Use to override multiple Armada Manifest values by "
help=("Use to override multiple Armada Manifest values by " "reading overrides from a values.yaml-type file."),
"reading overrides from a values.yaml-type file."), multiple=True,
multiple=True, type=str,
type=str, default=[])
default=[]) @click.option(
@click.option('--wait', '--wait',
help=("Force Tiller to wait until all charts are deployed, " help=("Force Tiller to wait until all charts are deployed, "
"rather than using each chart's specified wait policy. " "rather than using each chart's specified wait policy. "
"This is equivalent to sequenced chartgroups."), "This is equivalent to sequenced chartgroups."),
is_flag=True) is_flag=True)
@click.option('--target-manifest', @click.option(
help=("The target manifest to run. Required for specifying " '--target-manifest',
"which manifest to run when multiple are available."), help=("The target manifest to run. Required for specifying "
default=None) "which manifest to run when multiple are available."),
@click.option('--debug', default=None)
help="Enable debug logging.", @click.option('--debug', help="Enable debug logging.", is_flag=True)
is_flag=True)
@click.pass_context @click.pass_context
def apply_create(ctx, locations, api, disable_update_post, disable_update_pre, def apply_create(ctx, locations, api, disable_update_post, disable_update_pre,
dry_run, enable_chart_cleanup, use_doc_ref, set, tiller_host, dry_run, enable_chart_cleanup, use_doc_ref, set, tiller_host,
@ -144,23 +143,11 @@ def apply_create(ctx, locations, api, disable_update_post, disable_update_pre,
class ApplyManifest(CliAction): class ApplyManifest(CliAction):
def __init__(self,
ctx, def __init__(self, ctx, locations, api, disable_update_post,
locations, disable_update_pre, dry_run, enable_chart_cleanup,
api, use_doc_ref, set, tiller_host, tiller_port, tiller_namespace,
disable_update_post, timeout, values, wait, target_manifest):
disable_update_pre,
dry_run,
enable_chart_cleanup,
use_doc_ref,
set,
tiller_host,
tiller_port,
tiller_namespace,
timeout,
values,
wait,
target_manifest):
super(ApplyManifest, self).__init__() super(ApplyManifest, self).__init__()
self.ctx = ctx self.ctx = ctx
# Filename can also be a URL reference # Filename can also be a URL reference

View File

@ -56,27 +56,16 @@ To delete releases by the name:
SHORT_DESC = "Command deletes releases." SHORT_DESC = "Command deletes releases."
@delete.command(name='delete', @delete.command(name='delete', help=DESC, short_help=SHORT_DESC)
help=DESC, @click.option('--manifest', help="Armada Manifest file.", type=str)
short_help=SHORT_DESC) @click.option(
@click.option('--manifest', '--releases', help="Comma-separated list of release names.", type=str)
help="Armada Manifest file.", @click.option(
type=str) '--no-purge', help="Deletes release without purge option.", is_flag=True)
@click.option('--releases', @click.option('--tiller-host', help="Tiller host IP.")
help="Comma-separated list of release names.", @click.option(
type=str) '--tiller-port', help="Tiller host port.", type=int, default=44134)
@click.option('--no-purge', @click.option('--debug', help="Enable debug logging.", is_flag=True)
help="Deletes release without purge option.",
is_flag=True)
@click.option('--tiller-host',
help="Tiller host IP.")
@click.option('--tiller-port',
help="Tiller host port.",
type=int,
default=44134)
@click.option('--debug',
help="Enable debug logging.",
is_flag=True)
@click.pass_context @click.pass_context
def delete_charts(ctx, manifest, releases, no_purge, tiller_host, tiller_port, def delete_charts(ctx, manifest, releases, no_purge, tiller_host, tiller_port,
debug): debug):
@ -86,6 +75,7 @@ def delete_charts(ctx, manifest, releases, no_purge, tiller_host, tiller_port,
class DeleteChartManifest(CliAction): class DeleteChartManifest(CliAction):
def __init__(self, ctx, manifest, releases, no_purge, tiller_host, def __init__(self, ctx, manifest, releases, no_purge, tiller_host,
tiller_port): tiller_port):
@ -103,8 +93,10 @@ class DeleteChartManifest(CliAction):
known_release_names = [release[0] for release in tiller.list_charts()] known_release_names = [release[0] for release in tiller.list_charts()]
if self.releases: if self.releases:
target_releases = [r.strip() for r in self.releases.split(',') target_releases = [
if r.strip() in known_release_names] r.strip() for r in self.releases.split(',')
if r.strip() in known_release_names
]
if not target_releases: if not target_releases:
self.logger.info("There's no release to delete.") self.logger.info("There's no release to delete.")
return return
@ -131,14 +123,16 @@ class DeleteChartManifest(CliAction):
const.KEYWORD_GROUPS): const.KEYWORD_GROUPS):
for ch in group.get(const.KEYWORD_CHARTS): for ch in group.get(const.KEYWORD_CHARTS):
release_name = release_prefixer( release_name = release_prefixer(
prefix, ch.get('chart').get('release')) prefix,
ch.get('chart').get('release'))
if release_name in known_release_names: if release_name in known_release_names:
target_releases.append(release_name) target_releases.append(release_name)
except yaml.YAMLError as e: except yaml.YAMLError as e:
mark = e.problem_mark mark = e.problem_mark
self.logger.info("While parsing the manifest file, %s. " self.logger.info(
"Error position: (%s:%s)", e.problem, "While parsing the manifest file, %s. "
mark.line + 1, mark.column + 1) "Error position: (%s:%s)", e.problem, mark.line + 1,
mark.column + 1)
if not target_releases: if not target_releases:
self.logger.info("There's no release to delete.") self.logger.info("There's no release to delete.")

View File

@ -41,48 +41,46 @@ To rollback a release, run:
SHORT_DESC = "Command performs a release rollback." SHORT_DESC = "Command performs a release rollback."
@rollback.command(name='rollback', @rollback.command(name='rollback', help=DESC, short_help=SHORT_DESC)
help=DESC, @click.option('--release', help="Release to rollback.", type=str)
short_help=SHORT_DESC) @click.option(
@click.option('--release', '--version',
help="Release to rollback.", help="Version of release to rollback to. 0 represents the "
type=str) "previous release",
@click.option('--version', type=int,
help="Version of release to rollback to. 0 represents the " default=0)
"previous release", @click.option('--dry-run', help="Perform a dry-run rollback.", is_flag=True)
type=int, @click.option('--tiller-host', help="Tiller host IP.", default=None)
default=0) @click.option(
@click.option('--dry-run', '--tiller-port',
help="Perform a dry-run rollback.", help="Tiller host port.",
is_flag=True) type=int,
@click.option('--tiller-host', default=CONF.tiller_port)
help="Tiller host IP.", @click.option(
default=None) '--tiller-namespace',
@click.option('--tiller-port', '-tn',
help="Tiller host port.", help="Tiller namespace.",
type=int, type=str,
default=CONF.tiller_port) default=CONF.tiller_namespace)
@click.option('--tiller-namespace', '-tn', @click.option(
help="Tiller namespace.", '--timeout',
type=str, help="Specifies time to wait for rollback to complete.",
default=CONF.tiller_namespace) type=int,
@click.option('--timeout', default=0)
help="Specifies time to wait for rollback to complete.", @click.option(
type=int, '--wait',
default=0) help=("Wait until rollback is complete before returning."),
@click.option('--wait', is_flag=True)
help=("Wait until rollback is complete before returning."), @click.option(
is_flag=True) '--force',
@click.option('--force', help=("Force resource update through delete/recreate if"
help=("Force resource update through delete/recreate if" " needed."),
" needed."), is_flag=True)
is_flag=True) @click.option(
@click.option('--recreate-pods', '--recreate-pods',
help=("Restarts pods for the resource if applicable."), help=("Restarts pods for the resource if applicable."),
is_flag=True) is_flag=True)
@click.option('--debug', @click.option('--debug', help="Enable debug logging.", is_flag=True)
help="Enable debug logging.",
is_flag=True)
@click.pass_context @click.pass_context
def rollback_charts(ctx, release, version, dry_run, tiller_host, tiller_port, def rollback_charts(ctx, release, version, dry_run, tiller_host, tiller_port,
tiller_namespace, timeout, wait, force, recreate_pods, tiller_namespace, timeout, wait, force, recreate_pods,
@ -94,17 +92,9 @@ def rollback_charts(ctx, release, version, dry_run, tiller_host, tiller_port,
class Rollback(CliAction): class Rollback(CliAction):
def __init__(self,
ctx, def __init__(self, ctx, release, version, dry_run, tiller_host,
release, tiller_port, tiller_namespace, timeout, wait, force,
version,
dry_run,
tiller_host,
tiller_port,
tiller_namespace,
timeout,
wait,
force,
recreate_pods): recreate_pods):
super(Rollback, self).__init__() super(Rollback, self).__init__()
self.ctx = ctx self.ctx = ctx
@ -121,8 +111,10 @@ class Rollback(CliAction):
def invoke(self): def invoke(self):
tiller = Tiller( tiller = Tiller(
tiller_host=self.tiller_host, tiller_port=self.tiller_port, tiller_host=self.tiller_host,
tiller_namespace=self.tiller_namespace, dry_run=self.dry_run) tiller_port=self.tiller_port,
tiller_namespace=self.tiller_namespace,
dry_run=self.dry_run)
response = tiller.rollback_release( response = tiller.rollback_release(
self.release, self.release,
@ -135,5 +127,7 @@ class Rollback(CliAction):
self.output(response) self.output(response)
def output(self, response): def output(self, response):
self.logger.info(('(dry run) ' if self.dry_run else '') + self.logger.info(
'Rollback of %s complete.', self.release) ('(dry run) '
if self.dry_run else '') + 'Rollback of %s complete.',
self.release)

View File

@ -54,43 +54,37 @@ To test release:
SHORT_DESC = "Command tests releases." SHORT_DESC = "Command tests releases."
@test.command(name='test', @test.command(name='test', help=DESC, short_help=SHORT_DESC)
help=DESC, @click.option('--file', help="Armada manifest.", type=str)
short_help=SHORT_DESC) @click.option('--release', help="Helm release.", type=str)
@click.option('--file', @click.option('--tiller-host', help="Tiller host IP.", default=None)
help="Armada manifest.", @click.option(
type=str) '--tiller-port',
@click.option('--release', help="Tiller host port.",
help="Helm release.", type=int,
type=str) default=CONF.tiller_port)
@click.option('--tiller-host', @click.option(
help="Tiller host IP.", '--tiller-namespace',
default=None) '-tn',
@click.option('--tiller-port', help="Tiller Namespace.",
help="Tiller host port.", type=str,
type=int, default=CONF.tiller_namespace)
default=CONF.tiller_port) @click.option(
@click.option('--tiller-namespace', '-tn', '--target-manifest',
help="Tiller Namespace.", help=("The target manifest to run. Required for specifying "
type=str, "which manifest to run when multiple are available."),
default=CONF.tiller_namespace) default=None)
@click.option('--target-manifest', @click.option('--debug', help="Enable debug logging.", is_flag=True)
help=("The target manifest to run. Required for specifying "
"which manifest to run when multiple are available."),
default=None)
@click.option('--debug',
help="Enable debug logging.",
is_flag=True)
@click.pass_context @click.pass_context
def test_charts(ctx, file, release, tiller_host, tiller_port, tiller_namespace, def test_charts(ctx, file, release, tiller_host, tiller_port, tiller_namespace,
target_manifest, debug): target_manifest, debug):
CONF.debug = debug CONF.debug = debug
TestChartManifest( TestChartManifest(ctx, file, release, tiller_host, tiller_port,
ctx, file, release, tiller_host, tiller_port, tiller_namespace, tiller_namespace, target_manifest).safe_invoke()
target_manifest).safe_invoke()
class TestChartManifest(CliAction): class TestChartManifest(CliAction):
def __init__(self, ctx, file, release, tiller_host, tiller_port, def __init__(self, ctx, file, release, tiller_host, tiller_port,
tiller_namespace, target_manifest): tiller_namespace, target_manifest):
@ -125,8 +119,8 @@ class TestChartManifest(CliAction):
'tiller_port': self.tiller_port, 'tiller_port': self.tiller_port,
'tiller_namespace': self.tiller_namespace 'tiller_namespace': self.tiller_namespace
} }
resp = client.get_test_release(release=self.release, resp = client.get_test_release(
query=query) release=self.release, query=query)
self.logger.info(resp.get('result')) self.logger.info(resp.get('result'))
self.logger.info(resp.get('message')) self.logger.info(resp.get('message'))
@ -144,7 +138,8 @@ class TestChartManifest(CliAction):
const.KEYWORD_GROUPS): const.KEYWORD_GROUPS):
for ch in group.get(const.KEYWORD_CHARTS): for ch in group.get(const.KEYWORD_CHARTS):
release_name = release_prefixer( release_name = release_prefixer(
prefix, ch.get('chart').get('release')) prefix,
ch.get('chart').get('release'))
if release_name in known_release_names: if release_name in known_release_names:
self.logger.info('RUNNING: %s tests', release_name) self.logger.info('RUNNING: %s tests', release_name)
@ -156,9 +151,8 @@ class TestChartManifest(CliAction):
self.logger.info("FAILED: %s", release_name) self.logger.info("FAILED: %s", release_name)
else: else:
self.logger.info( self.logger.info('Release %s not found - SKIPPING',
'Release %s not found - SKIPPING', release_name)
release_name)
else: else:
client = self.ctx.obj.get('CLIENT') client = self.ctx.obj.get('CLIENT')
query = { query = {
@ -168,8 +162,8 @@ class TestChartManifest(CliAction):
} }
with open(self.filename, 'r') as f: with open(self.filename, 'r') as f:
resp = client.get_test_manifest(manifest=f.read(), resp = client.get_test_manifest(
query=query) manifest=f.read(), query=query)
for test in resp.get('tests'): for test in resp.get('tests'):
self.logger.info('Test State: %s', test) self.logger.info('Test State: %s', test)
for item in test.get('tests').get(test): for item in test.get('tests').get(test):

View File

@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import click import click
from oslo_config import cfg from oslo_config import cfg
@ -47,29 +46,22 @@ To obtain Tiller service status/information:
SHORT_DESC = "Command gets Tiller information." SHORT_DESC = "Command gets Tiller information."
@tiller.command(name='tiller', @tiller.command(name='tiller', help=DESC, short_help=SHORT_DESC)
help=DESC, @click.option('--tiller-host', help="Tiller host IP.", default=None)
short_help=SHORT_DESC) @click.option(
@click.option('--tiller-host', '--tiller-port',
help="Tiller host IP.", help="Tiller host port.",
default=None) type=int,
@click.option('--tiller-port', default=CONF.tiller_port)
help="Tiller host port.", @click.option(
type=int, '--tiller-namespace',
default=CONF.tiller_port) '-tn',
@click.option('--tiller-namespace', '-tn', help="Tiller namespace.",
help="Tiller namespace.", type=str,
type=str, default=CONF.tiller_namespace)
default=CONF.tiller_namespace) @click.option('--releases', help="List of deployed releases.", is_flag=True)
@click.option('--releases', @click.option('--status', help="Status of Armada services.", is_flag=True)
help="List of deployed releases.", @click.option('--debug', help="Enable debug logging.", is_flag=True)
is_flag=True)
@click.option('--status',
help="Status of Armada services.",
is_flag=True)
@click.option('--debug',
help="Enable debug logging.",
is_flag=True)
@click.pass_context @click.pass_context
def tiller_service(ctx, tiller_host, tiller_port, tiller_namespace, releases, def tiller_service(ctx, tiller_host, tiller_port, tiller_namespace, releases,
status, debug): status, debug):
@ -93,7 +85,8 @@ class TillerServices(CliAction):
def invoke(self): def invoke(self):
tiller = Tiller( tiller = Tiller(
tiller_host=self.tiller_host, tiller_port=self.tiller_port, tiller_host=self.tiller_host,
tiller_port=self.tiller_port,
tiller_namespace=self.tiller_namespace) tiller_namespace=self.tiller_namespace)
if self.status: if self.status:
@ -117,9 +110,8 @@ class TillerServices(CliAction):
if self.releases: if self.releases:
if not self.ctx.obj.get('api', False): if not self.ctx.obj.get('api', False):
for release in tiller.list_releases(): for release in tiller.list_releases():
self.logger.info( self.logger.info("Release %s in namespace: %s",
"Release %s in namespace: %s", release.name, release.namespace)
release.name, release.namespace)
else: else:
client = self.ctx.obj.get('CLIENT') client = self.ctx.obj.get('CLIENT')
query = { query = {
@ -130,6 +122,5 @@ class TillerServices(CliAction):
resp = client.get_releases(query=query) resp = client.get_releases(query=query)
for namespace in resp.get('releases'): for namespace in resp.get('releases'):
for release in resp.get('releases').get(namespace): for release in resp.get('releases').get(namespace):
self.logger.info( self.logger.info('Release %s in namespace: %s',
'Release %s in namespace: %s', release, release, namespace)
namespace)

View File

@ -42,14 +42,9 @@ The validate argument must be a relative path to Armada manifest
SHORT_DESC = "Command validates Armada Manifest." SHORT_DESC = "Command validates Armada Manifest."
@validate.command(name='validate', @validate.command(name='validate', help=DESC, short_help=SHORT_DESC)
help=DESC, @click.argument('locations', nargs=-1)
short_help=SHORT_DESC) @click.option('--debug', help="Enable debug logging.", is_flag=True)
@click.argument('locations',
nargs=-1)
@click.option('--debug',
help="Enable debug logging.",
is_flag=True)
@click.pass_context @click.pass_context
def validate_manifest(ctx, locations, debug): def validate_manifest(ctx, locations, debug):
CONF.debug = debug CONF.debug = debug
@ -57,6 +52,7 @@ def validate_manifest(ctx, locations, debug):
class ValidateManifest(CliAction): class ValidateManifest(CliAction):
def __init__(self, ctx, locations): def __init__(self, ctx, locations):
super(ValidateManifest, self).__init__() super(ValidateManifest, self).__init__()
self.ctx = ctx self.ctx = ctx
@ -87,10 +83,8 @@ class ValidateManifest(CliAction):
'validation: %s', self.locations) 'validation: %s', self.locations)
else: else:
if len(self.locations) > 1: if len(self.locations) > 1:
self.logger.error( self.logger.error("Cannot specify multiple locations "
"Cannot specify multiple locations " "when using validate API.")
"when using validate API."
)
return return
client = self.ctx.obj.get('CLIENT') client = self.ctx.obj.get('CLIENT')

View File

@ -27,6 +27,7 @@ API_VERSION = 'v{}/{}'
class ArmadaClient(object): class ArmadaClient(object):
def __init__(self, session): def __init__(self, session):
self.session = session self.session = session
@ -61,9 +62,7 @@ class ArmadaClient(object):
resp = self.session.post( resp = self.session.post(
endpoint, endpoint,
data=req_body, data=req_body,
headers={ headers={'content-type': 'application/json'},
'content-type': 'application/json'
},
timeout=timeout) timeout=timeout)
self._check_response(resp) self._check_response(resp)
@ -107,9 +106,7 @@ class ArmadaClient(object):
endpoint, endpoint,
body=manifest, body=manifest,
query=query, query=query,
headers={ headers={'content-type': 'application/x-yaml'},
'content-type': 'application/x-yaml'
},
timeout=timeout) timeout=timeout)
elif manifest_ref: elif manifest_ref:
req_body = { req_body = {
@ -120,9 +117,7 @@ class ArmadaClient(object):
endpoint, endpoint,
data=req_body, data=req_body,
query=query, query=query,
headers={ headers={'content-type': 'application/json'},
'content-type': 'application/json'
},
timeout=timeout) timeout=timeout)
self._check_response(resp) self._check_response(resp)
@ -150,8 +145,8 @@ class ArmadaClient(object):
def post_test_manifest(self, manifest=None, query=None, timeout=None): def post_test_manifest(self, manifest=None, query=None, timeout=None):
endpoint = self._set_endpoint('1.0', 'tests') endpoint = self._set_endpoint('1.0', 'tests')
resp = self.session.post(endpoint, body=manifest, query=query, resp = self.session.post(
timeout=timeout) endpoint, body=manifest, query=query, timeout=timeout)
self._check_response(resp) self._check_response(resp)
@ -165,5 +160,5 @@ class ArmadaClient(object):
elif resp.status_code == 403: elif resp.status_code == 403:
raise err.ClientForbiddenError("Forbidden access to %s" % resp.url) raise err.ClientForbiddenError("Forbidden access to %s" % resp.url)
elif not resp.ok: elif not resp.ok:
raise err.ClientError("Error - received %d: %s" % raise err.ClientError(
(resp.status_code, resp.text)) "Error - received %d: %s" % (resp.status_code, resp.text))

View File

@ -12,7 +12,6 @@
import oslo_i18n import oslo_i18n
_translators = oslo_i18n.TranslatorFactory(domain='armada') _translators = oslo_i18n.TranslatorFactory(domain='armada')
# The primary translation function using the well-known name "_" # The primary translation function using the well-known name "_"

View File

@ -18,8 +18,5 @@ from armada.common.policies import tiller
def list_rules(): def list_rules():
return itertools.chain( return itertools.chain(base.list_rules(), service.list_rules(),
base.list_rules(), tiller.list_rules())
service.list_rules(),
tiller.list_rules()
)

View File

@ -19,14 +19,12 @@ RULE_ADMIN_OR_TARGET_PROJECT = (
'rule:admin_required or project_id:%(target.project.id)s') 'rule:admin_required or project_id:%(target.project.id)s')
RULE_SERVICE_OR_ADMIN = 'rule:service_or_admin' RULE_SERVICE_OR_ADMIN = 'rule:service_or_admin'
rules = [ rules = [
policy.RuleDefault(name='admin_required', policy.RuleDefault(name='admin_required', check_str='role:admin'),
check_str='role:admin'), policy.RuleDefault(
policy.RuleDefault(name='service_or_admin', name='service_or_admin',
check_str='rule:admin_required or rule:service_role'), check_str='rule:admin_required or rule:service_role'),
policy.RuleDefault(name='service_role', policy.RuleDefault(name='service_role', check_str='role:service'),
check_str='role:service'),
] ]

View File

@ -14,34 +14,47 @@ from oslo_policy import policy
from armada.common.policies import base from armada.common.policies import base
armada_policies = [ armada_policies = [
policy.DocumentedRuleDefault( policy.DocumentedRuleDefault(
name=base.ARMADA % 'create_endpoints', name=base.ARMADA % 'create_endpoints',
check_str=base.RULE_ADMIN_REQUIRED, check_str=base.RULE_ADMIN_REQUIRED,
description='Install manifest charts', description='Install manifest charts',
operations=[{'path': '/api/v1.0/apply/', 'method': 'POST'}]), operations=[{
'path': '/api/v1.0/apply/',
'method': 'POST'
}]),
policy.DocumentedRuleDefault( policy.DocumentedRuleDefault(
name=base.ARMADA % 'validate_manifest', name=base.ARMADA % 'validate_manifest',
check_str=base.RULE_ADMIN_REQUIRED, check_str=base.RULE_ADMIN_REQUIRED,
description='Validate manifest', description='Validate manifest',
operations=[{'path': '/api/v1.0/validatedesign/', 'method': 'POST'}]), operations=[{
'path': '/api/v1.0/validatedesign/',
'method': 'POST'
}]),
policy.DocumentedRuleDefault( policy.DocumentedRuleDefault(
name=base.ARMADA % 'test_release', name=base.ARMADA % 'test_release',
check_str=base.RULE_ADMIN_REQUIRED, check_str=base.RULE_ADMIN_REQUIRED,
description='Test release', description='Test release',
operations=[{'path': '/api/v1.0/test/{release}', 'method': 'GET'}]), operations=[{
'path': '/api/v1.0/test/{release}',
'method': 'GET'
}]),
policy.DocumentedRuleDefault( policy.DocumentedRuleDefault(
name=base.ARMADA % 'test_manifest', name=base.ARMADA % 'test_manifest',
check_str=base.RULE_ADMIN_REQUIRED, check_str=base.RULE_ADMIN_REQUIRED,
description='Test manifest', description='Test manifest',
operations=[{'path': '/api/v1.0/tests/', 'method': 'POST'}]), operations=[{
'path': '/api/v1.0/tests/',
'method': 'POST'
}]),
policy.DocumentedRuleDefault( policy.DocumentedRuleDefault(
name=base.ARMADA % 'rollback_release', name=base.ARMADA % 'rollback_release',
check_str=base.RULE_ADMIN_REQUIRED, check_str=base.RULE_ADMIN_REQUIRED,
description='Rollback release', description='Rollback release',
operations=[{'path': '/api/v1.0/rollback/{release}', 'method': 'POST'}] operations=[{
), 'path': '/api/v1.0/rollback/{release}',
'method': 'POST'
}]),
] ]

View File

@ -14,19 +14,23 @@ from oslo_policy import policy
from armada.common.policies import base from armada.common.policies import base
tiller_policies = [ tiller_policies = [
policy.DocumentedRuleDefault( policy.DocumentedRuleDefault(
name=base.TILLER % 'get_status', name=base.TILLER % 'get_status',
check_str=base.RULE_ADMIN_REQUIRED, check_str=base.RULE_ADMIN_REQUIRED,
description='Get Tiller status', description='Get Tiller status',
operations=[{'path': '/api/v1.0/status/', 'method': 'GET'}]), operations=[{
'path': '/api/v1.0/status/',
'method': 'GET'
}]),
policy.DocumentedRuleDefault( policy.DocumentedRuleDefault(
name=base.TILLER % 'get_release', name=base.TILLER % 'get_release',
check_str=base.RULE_ADMIN_REQUIRED, check_str=base.RULE_ADMIN_REQUIRED,
description='Get Tiller release', description='Get Tiller release',
operations=[{'path': '/api/v1.0/releases/', 'method': 'GET'}]), operations=[{
'path': '/api/v1.0/releases/',
'method': 'GET'
}]),
] ]

View File

@ -18,7 +18,6 @@ from oslo_policy import policy
from armada.common import policies from armada.common import policies
from armada.exceptions import base_exception as exc from armada.exceptions import base_exception as exc
CONF = cfg.CONF CONF = cfg.CONF
_ENFORCER = None _ENFORCER = None
@ -46,14 +45,18 @@ def _enforce_policy(action, target, credentials, do_raise=True):
def enforce(rule): def enforce(rule):
def decorator(func): def decorator(func):
@functools.wraps(func) @functools.wraps(func)
def handler(*args, **kwargs): def handler(*args, **kwargs):
setup_policy() setup_policy()
context = args[1].context context = args[1].context
_enforce_policy(rule, {}, context, do_raise=True) _enforce_policy(rule, {}, context, do_raise=True)
return func(*args, **kwargs) return func(*args, **kwargs)
return handler return handler
return decorator return decorator

View File

@ -35,8 +35,13 @@ class ArmadaSession(object):
read timeout to use read timeout to use
""" """
def __init__(self, host, port=None, scheme='http', token=None, def __init__(self,
marker=None, timeout=None): host,
port=None,
scheme='http',
token=None,
marker=None,
timeout=None):
self._session = requests.Session() self._session = requests.Session()
self._session.headers.update({ self._session.headers.update({
@ -48,11 +53,10 @@ class ArmadaSession(object):
if port: if port:
self.port = port self.port = port
self.base_url = "{}://{}:{}/api/".format( self.base_url = "{}://{}:{}/api/".format(self.scheme, self.host,
self.scheme, self.host, self.port) self.port)
else: else:
self.base_url = "{}://{}/api/".format( self.base_url = "{}://{}/api/".format(self.scheme, self.host)
self.scheme, self.host)
self.default_timeout = ArmadaSession._calc_timeout_tuple((20, 3600), self.default_timeout = ArmadaSession._calc_timeout_tuple((20, 3600),
timeout) timeout)
@ -75,15 +79,21 @@ class ArmadaSession(object):
api_url = '{}{}'.format(self.base_url, endpoint) api_url = '{}{}'.format(self.base_url, endpoint)
req_timeout = self._timeout(timeout) req_timeout = self._timeout(timeout)
self.logger.debug("Sending armada_client session GET %s with " self.logger.debug(
"params=[%s], headers=[%s], timeout=[%s]", "Sending armada_client session GET %s with "
api_url, query, headers, req_timeout) "params=[%s], headers=[%s], timeout=[%s]", api_url, query, headers,
req_timeout)
resp = self._session.get( resp = self._session.get(
api_url, params=query, headers=headers, timeout=req_timeout) api_url, params=query, headers=headers, timeout=req_timeout)
return resp return resp
def post(self, endpoint, query=None, body=None, data=None, headers=None, def post(self,
endpoint,
query=None,
body=None,
data=None,
headers=None,
timeout=None): timeout=None):
""" """
Send a POST request to armada. If both body and data are specified, Send a POST request to armada. If both body and data are specified,
@ -101,23 +111,26 @@ class ArmadaSession(object):
api_url = '{}{}'.format(self.base_url, endpoint) api_url = '{}{}'.format(self.base_url, endpoint)
req_timeout = self._timeout(timeout) req_timeout = self._timeout(timeout)
self.logger.debug("Sending armada_client session POST %s with " self.logger.debug(
"params=[%s], headers=[%s], timeout=[%s]", "Sending armada_client session POST %s with "
api_url, query, headers, req_timeout) "params=[%s], headers=[%s], timeout=[%s]", api_url, query, headers,
req_timeout)
if body is not None: if body is not None:
self.logger.debug("Sending POST with explicit body: \n%s" % body) self.logger.debug("Sending POST with explicit body: \n%s" % body)
resp = self._session.post(api_url, resp = self._session.post(
params=query, api_url,
data=body, params=query,
headers=headers, data=body,
timeout=req_timeout) headers=headers,
timeout=req_timeout)
else: else:
self.logger.debug("Sending POST with JSON body: \n%s" % str(data)) self.logger.debug("Sending POST with JSON body: \n%s" % str(data))
resp = self._session.post(api_url, resp = self._session.post(
params=query, api_url,
json=data, params=query,
headers=headers, json=data,
timeout=req_timeout) headers=headers,
timeout=req_timeout)
return resp return resp
@ -145,7 +158,7 @@ class ArmadaSession(object):
try: try:
if isinstance(timeout, tuple): if isinstance(timeout, tuple):
if all(isinstance(v, int) if all(isinstance(v, int)
for v in timeout) and len(timeout) == 2: for v in timeout) and len(timeout) == 2:
connect_timeout, read_timeout = timeout connect_timeout, read_timeout = timeout
else: else:
raise ValueError("Tuple non-integer or wrong length") raise ValueError("Tuple non-integer or wrong length")
@ -154,8 +167,8 @@ class ArmadaSession(object):
elif timeout is not None: elif timeout is not None:
raise ValueError("Non integer timeout value") raise ValueError("Non integer timeout value")
except ValueError: except ValueError:
LOG.warn("Timeout value must be a tuple of integers or a single" LOG.warn(
" integer. Proceeding with values of (%s, %s)", "Timeout value must be a tuple of integers or a single"
connect_timeout, " integer. Proceeding with values of (%s, %s)",
read_timeout) connect_timeout, read_timeout)
return (connect_timeout, read_timeout) return (connect_timeout, read_timeout)

View File

@ -54,10 +54,7 @@ def set_default_for_default_log_levels():
This function needs to be called before CONF(). This function needs to be called before CONF().
""" """
extra_log_level_defaults = [ extra_log_level_defaults = ['kubernetes.client.rest=INFO']
'kubernetes.client.rest=INFO'
]
log.set_defaults( log.set_defaults(default_log_levels=log.get_default_log_levels() +
default_log_levels=log.get_default_log_levels() + extra_log_level_defaults)
extra_log_level_defaults)

View File

@ -17,45 +17,37 @@ from oslo_config import cfg
from armada.conf import utils from armada.conf import utils
default_options = [ default_options = [
cfg.ListOpt( cfg.ListOpt(
'armada_apply_roles', 'armada_apply_roles',
default=['admin'], default=['admin'],
help=utils.fmt('IDs of approved API access roles.')), help=utils.fmt('IDs of approved API access roles.')),
cfg.StrOpt( cfg.StrOpt(
'auth_url', 'auth_url',
default='http://0.0.0.0/v3', default='http://0.0.0.0/v3',
help=utils.fmt('The default Keystone authentication url.')), help=utils.fmt('The default Keystone authentication url.')),
cfg.StrOpt( cfg.StrOpt(
'certs', 'certs',
default=None, default=None,
help=utils.fmt(""" help=utils.fmt("""
Absolute path to the certificate file to use for chart registries Absolute path to the certificate file to use for chart registries
""")), """)),
cfg.StrOpt( cfg.StrOpt(
'kubernetes_config_path', 'kubernetes_config_path',
default='/home/user/.kube/', default='/home/user/.kube/',
help=utils.fmt('Path to Kubernetes configurations.')), help=utils.fmt('Path to Kubernetes configurations.')),
cfg.BoolOpt( cfg.BoolOpt(
'middleware', 'middleware',
default=True, default=True,
help=utils.fmt(""" help=utils.fmt("""
Enables or disables Keystone authentication middleware. Enables or disables Keystone authentication middleware.
""")), """)),
cfg.StrOpt( cfg.StrOpt(
'project_domain_name', 'project_domain_name',
default='default', default='default',
help=utils.fmt(""" help=utils.fmt("""
The Keystone project domain name used for authentication. The Keystone project domain name used for authentication.
""")), """)),
cfg.StrOpt( cfg.StrOpt(
'project_name', 'project_name',
default='admin', default='admin',
@ -69,22 +61,18 @@ The Keystone project domain name used for authentication.
help=utils.fmt("""Optional path to an SSH private key used for help=utils.fmt("""Optional path to an SSH private key used for
authenticating against a Git source repository. The path must be an absolute authenticating against a Git source repository. The path must be an absolute
path to the private key that includes the name of the key itself.""")), path to the private key that includes the name of the key itself.""")),
cfg.StrOpt( cfg.StrOpt(
'tiller_pod_labels', 'tiller_pod_labels',
default='app=helm,name=tiller', default='app=helm,name=tiller',
help=utils.fmt('Labels for the Tiller pod.')), help=utils.fmt('Labels for the Tiller pod.')),
cfg.StrOpt( cfg.StrOpt(
'tiller_namespace', 'tiller_namespace',
default='kube-system', default='kube-system',
help=utils.fmt('Namespace for the Tiller pod.')), help=utils.fmt('Namespace for the Tiller pod.')),
cfg.IntOpt( cfg.IntOpt(
'tiller_port', 'tiller_port',
default=44134, default=44134,
help=utils.fmt('Port for the Tiller pod.')), help=utils.fmt('Port for the Tiller pod.')),
cfg.ListOpt( cfg.ListOpt(
'tiller_release_roles', 'tiller_release_roles',
default=['admin'], default=['admin'],
@ -99,11 +87,11 @@ def register_opts(conf):
def list_opts(): def list_opts():
return { return {
'DEFAULT': default_options, 'DEFAULT':
'keystone_authtoken': ( default_options,
ks_loading.get_session_conf_options() + 'keystone_authtoken':
ks_loading.get_auth_common_conf_options() + (ks_loading.get_session_conf_options() +
ks_loading.get_auth_plugin_conf_options('password') + ks_loading.get_auth_common_conf_options() +
ks_loading.get_auth_plugin_conf_options('v3password') ks_loading.get_auth_plugin_conf_options('password') +
) ks_loading.get_auth_plugin_conf_options('v3password'))
} }

View File

@ -33,7 +33,6 @@ import importlib
import os import os
import pkgutil import pkgutil
LIST_OPTS_FUNC_NAME = 'list_opts' LIST_OPTS_FUNC_NAME = 'list_opts'
IGNORED_MODULES = ('opts', 'constants', 'utils') IGNORED_MODULES = ('opts', 'constants', 'utils')
@ -71,9 +70,8 @@ def _import_modules(module_names):
if not hasattr(module, LIST_OPTS_FUNC_NAME): if not hasattr(module, LIST_OPTS_FUNC_NAME):
raise Exception( raise Exception(
"The module '%s' should have a '%s' function which " "The module '%s' should have a '%s' function which "
"returns the config options." % ( "returns the config options." % (full_module_path,
full_module_path, LIST_OPTS_FUNC_NAME))
LIST_OPTS_FUNC_NAME))
else: else:
imported_modules.append(module) imported_modules.append(module)
return imported_modules return imported_modules

View File

@ -19,7 +19,6 @@ import falcon
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
CONF = cfg.CONF CONF = cfg.CONF
@ -97,8 +96,10 @@ def format_error_resp(req,
# message list as well. In both cases, if the error flag is not # message list as well. In both cases, if the error flag is not
# set, set it appropriately. # set, set it appropriately.
if error_list is None: if error_list is None:
error_list = [{'message': 'An error occurred, but was not specified', error_list = [{
'error': True}] 'message': 'An error occurred, but was not specified',
'error': True
}]
else: else:
for error_item in error_list: for error_item in error_list:
if 'error' not in error_item: if 'error' not in error_item:
@ -145,9 +146,11 @@ def default_error_serializer(req, resp, exception):
message=exception.description, message=exception.description,
reason=exception.title, reason=exception.title,
error_type=exception.__class__.__name__, error_type=exception.__class__.__name__,
error_list=[{'message': exception.description, 'error': True}], error_list=[{
info_list=None 'message': exception.description,
) 'error': True
}],
info_list=None)
def default_exception_handler(ex, req, resp, params): def default_exception_handler(ex, req, resp, params):
@ -168,8 +171,7 @@ def default_exception_handler(ex, req, resp, params):
falcon.HTTP_500, falcon.HTTP_500,
error_type=ex.__class__.__name__, error_type=ex.__class__.__name__,
message="Unhandled Exception raised: %s" % str(ex), message="Unhandled Exception raised: %s" % str(ex),
retry=True retry=True)
)
class AppError(Exception): class AppError(Exception):
@ -180,13 +182,15 @@ class AppError(Exception):
title = 'Internal Server Error' title = 'Internal Server Error'
status = falcon.HTTP_500 status = falcon.HTTP_500
def __init__(self, def __init__(
title=None, self,
description=None, title=None,
error_list=None, description=None,
info_list=None, error_list=None,
status=None, info_list=None,
retry=False,): status=None,
retry=False,
):
""" """
:param description: The internal error description :param description: The internal error description
:param error_list: The list of errors :param error_list: The list of errors

View File

@ -14,5 +14,4 @@
from armada.exceptions.manifest_exceptions import ManifestException from armada.exceptions.manifest_exceptions import ManifestException
__all__ = ['ManifestException'] __all__ = ['ManifestException']

View File

@ -37,6 +37,6 @@ class ProtectedReleaseException(ArmadaException):
def __init__(self, reason): def __init__(self, reason):
self._message = ( self._message = (
'Armada encountered protected release %s in FAILED status' % reason 'Armada encountered protected release %s in FAILED status' %
) reason)
super(ProtectedReleaseException, self).__init__(self._message) super(ProtectedReleaseException, self).__init__(self._message)

View File

@ -43,9 +43,8 @@ class ArmadaAPIException(falcon.HTTPError):
def __init__(self, message=None, **kwargs): def __init__(self, message=None, **kwargs):
self.message = message or self.message self.message = message or self.message
super(ArmadaAPIException, self).__init__( super(ArmadaAPIException, self).__init__(self.status, self.title,
self.status, self.title, self.message, **kwargs self.message, **kwargs)
)
class ActionForbidden(ArmadaAPIException): class ActionForbidden(ArmadaAPIException):

View File

@ -48,9 +48,10 @@ class HelmChartBuildException(ChartBuilderException):
def __init__(self, chart_name, details): def __init__(self, chart_name, details):
self._chart_name = chart_name self._chart_name = chart_name
self._message = ('Failed to build Helm chart for {chart_name}. ' self._message = ('Failed to build Helm chart for {chart_name}. '
'Details: {details}'.format( 'Details: {details}'.format(**{
**{'chart_name': chart_name, 'chart_name': chart_name,
'details': details})) 'details': details
}))
super(HelmChartBuildException, self).__init__(self._message) super(HelmChartBuildException, self).__init__(self._message)

View File

@ -59,7 +59,7 @@ class InvalidOverrideValueException(OverrideException):
def __init__(self, override_command): def __init__(self, override_command):
self._message = '{} is not a valid override statement.'.format( self._message = '{} is not a valid override statement.'.format(
override_command) override_command)
super(InvalidOverrideValueException, self).__init__(self._message) super(InvalidOverrideValueException, self).__init__(self._message)

View File

@ -69,8 +69,8 @@ class GitSSHException(SourceException):
def __init__(self, ssh_key_path): def __init__(self, ssh_key_path):
self._ssh_key_path = ssh_key_path self._ssh_key_path = ssh_key_path
self._message = ( self._message = ('Failed to find specified SSH key: {}.'.format(
'Failed to find specified SSH key: {}.'.format(self._ssh_key_path)) self._ssh_key_path))
super(GitSSHException, self).__init__(self._message) super(GitSSHException, self).__init__(self._message)

View File

@ -52,8 +52,7 @@ class PostUpdateJobDeleteException(TillerException):
def __init__(self, name, namespace): def __init__(self, name, namespace):
message = 'Failed to delete k8s job {} in {}'.format( message = 'Failed to delete k8s job {} in {}'.format(name, namespace)
name, namespace)
super(PostUpdateJobDeleteException, self).__init__(message) super(PostUpdateJobDeleteException, self).__init__(message)
@ -68,8 +67,7 @@ class PostUpdateJobCreateException(TillerException):
def __init__(self, name, namespace): def __init__(self, name, namespace):
message = 'Failed to create k8s job {} in {}'.format( message = 'Failed to create k8s job {} in {}'.format(name, namespace)
name, namespace)
super(PostUpdateJobCreateException, self).__init__(message) super(PostUpdateJobCreateException, self).__init__(message)
@ -84,8 +82,7 @@ class PreUpdateJobDeleteException(TillerException):
def __init__(self, name, namespace): def __init__(self, name, namespace):
message = 'Failed to delete k8s job {} in {}'.format( message = 'Failed to delete k8s job {} in {}'.format(name, namespace)
name, namespace)
super(PreUpdateJobDeleteException, self).__init__(message) super(PreUpdateJobDeleteException, self).__init__(message)
@ -95,8 +92,7 @@ class PreUpdateJobCreateException(TillerException):
def __init__(self, name, namespace): def __init__(self, name, namespace):
message = 'Failed to create k8s job {} in {}'.format( message = 'Failed to create k8s job {} in {}'.format(name, namespace)
name, namespace)
super(PreUpdateJobCreateException, self).__init__(message) super(PreUpdateJobCreateException, self).__init__(message)
@ -152,8 +148,7 @@ class GetReleaseStatusException(TillerException):
''' '''
def __init__(self, release, version): def __init__(self, release, version):
message = 'Failed to get {} status {} version'.format( message = 'Failed to get {} status {} version'.format(release, version)
release, version)
super(GetReleaseStatusException, self).__init__(message) super(GetReleaseStatusException, self).__init__(message)

View File

@ -33,7 +33,6 @@ from armada.utils.release import release_prefixer
from armada.utils import source from armada.utils import source
from armada.utils import validate from armada.utils import validate
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
CONF = cfg.CONF CONF = cfg.CONF
@ -97,8 +96,10 @@ class Armada(object):
# TODO: Use dependency injection i.e. pass in a Tiller instead of # TODO: Use dependency injection i.e. pass in a Tiller instead of
# creating it here. # creating it here.
self.tiller = Tiller( self.tiller = Tiller(
tiller_host=tiller_host, tiller_port=tiller_port, tiller_host=tiller_host,
tiller_namespace=tiller_namespace, dry_run=dry_run) tiller_port=tiller_port,
tiller_namespace=tiller_namespace,
dry_run=dry_run)
self.documents = Override( self.documents = Override(
documents, overrides=set_ovr, values=values).update_manifests() documents, overrides=set_ovr, values=values).update_manifests()
self.k8s_wait_attempts = k8s_wait_attempts self.k8s_wait_attempts = k8s_wait_attempts
@ -166,8 +167,7 @@ class Armada(object):
LOG.info('Downloading tarball from: %s', location) LOG.info('Downloading tarball from: %s', location)
if not CONF.certs: if not CONF.certs:
LOG.warn( LOG.warn('Disabling server validation certs to extract charts')
'Disabling server validation certs to extract charts')
tarball_dir = source.get_tarball(location, verify=False) tarball_dir = source.get_tarball(location, verify=False)
else: else:
tarball_dir = source.get_tarball(location, verify=CONF.cert) tarball_dir = source.get_tarball(location, verify=CONF.cert)
@ -189,9 +189,10 @@ class Armada(object):
logstr += ' auth method: {}'.format(auth_method) logstr += ' auth method: {}'.format(auth_method)
LOG.info(logstr) LOG.info(logstr)
repo_dir = source.git_clone(*repo_branch, repo_dir = source.git_clone(
proxy_server=proxy_server, *repo_branch,
auth_method=auth_method) proxy_server=proxy_server,
auth_method=auth_method)
repos[repo_branch] = repo_dir repos[repo_branch] = repo_dir
chart['source_dir'] = (repo_dir, subpath) chart['source_dir'] = (repo_dir, subpath)
@ -216,8 +217,8 @@ class Armada(object):
else: else:
# tiller.list_charts() only looks at DEPLOYED/FAILED so # tiller.list_charts() only looks at DEPLOYED/FAILED so
# this should be unreachable # this should be unreachable
LOG.debug('Ignoring release %s in status %s.', LOG.debug('Ignoring release %s in status %s.', release[0],
release[0], release[4]) release[4])
return deployed_releases, failed_releases return deployed_releases, failed_releases
@ -251,9 +252,10 @@ class Armada(object):
cg_desc = chartgroup.get('description', '<missing description>') cg_desc = chartgroup.get('description', '<missing description>')
cg_sequenced = chartgroup.get('sequenced', False) cg_sequenced = chartgroup.get('sequenced', False)
cg_test_all_charts = chartgroup.get('test_charts', False) cg_test_all_charts = chartgroup.get('test_charts', False)
LOG.info('Processing ChartGroup: %s (%s), sequenced=%s, ' LOG.info(
'test_charts=%s', cg_name, cg_desc, cg_sequenced, 'Processing ChartGroup: %s (%s), sequenced=%s, '
cg_test_all_charts) 'test_charts=%s', cg_name, cg_desc, cg_sequenced,
cg_test_all_charts)
ns_label_set = set() ns_label_set = set()
tests_to_run = [] tests_to_run = []
@ -281,16 +283,17 @@ class Armada(object):
release_name) release_name)
if protected: if protected:
if p_continue: if p_continue:
LOG.warn('Release %s is `protected`, ' LOG.warn(
'continue_processing=True. Operator must ' 'Release %s is `protected`, '
'handle FAILED release manually.', 'continue_processing=True. Operator must '
release_name) 'handle FAILED release manually.',
release_name)
msg['protected'].append(release_name) msg['protected'].append(release_name)
continue continue
else: else:
LOG.error('Release %s is `protected`, ' LOG.error(
'continue_processing=False.', 'Release %s is `protected`, '
release_name) 'continue_processing=False.', release_name)
raise armada_exceptions.ProtectedReleaseException( raise armada_exceptions.ProtectedReleaseException(
release_name) release_name)
else: else:
@ -310,9 +313,9 @@ class Armada(object):
wait_labels = wait_values.get('labels', {}) wait_labels = wait_values.get('labels', {})
# Determine wait logic # Determine wait logic
this_chart_should_wait = ( this_chart_should_wait = (cg_sequenced or self.force_wait or
cg_sequenced or self.force_wait or wait_timeout > 0 or
wait_timeout > 0 or len(wait_labels) > 0) len(wait_labels) > 0)
if this_chart_should_wait and wait_timeout <= 0: if this_chart_should_wait and wait_timeout <= 0:
LOG.warn('No Chart timeout specified, using default: %ss', LOG.warn('No Chart timeout specified, using default: %ss',
@ -366,9 +369,10 @@ class Armada(object):
# TODO(alanmeadows) account for .files differences # TODO(alanmeadows) account for .files differences
# once we support those # once we support those
LOG.info('Checking upgrade chart diffs.') LOG.info('Checking upgrade chart diffs.')
upgrade_diff = self.show_diff( upgrade_diff = self.show_diff(chart, apply_chart,
chart, apply_chart, apply_values, apply_values,
chartbuilder.dump(), values, msg) chartbuilder.dump(), values,
msg)
if not upgrade_diff: if not upgrade_diff:
LOG.info("There are no updates found in this chart") LOG.info("There are no updates found in this chart")
@ -395,9 +399,8 @@ class Armada(object):
recreate_pods=recreate_pods) recreate_pods=recreate_pods)
if this_chart_should_wait: if this_chart_should_wait:
self._wait_until_ready( self._wait_until_ready(release_name, wait_labels,
release_name, wait_labels, namespace, timer namespace, timer)
)
# Track namespace+labels touched by upgrade # Track namespace+labels touched by upgrade
ns_label_set.add((namespace, tuple(wait_labels.items()))) ns_label_set.add((namespace, tuple(wait_labels.items())))
@ -423,9 +426,8 @@ class Armada(object):
timeout=timer) timeout=timer)
if this_chart_should_wait: if this_chart_should_wait:
self._wait_until_ready( self._wait_until_ready(release_name, wait_labels,
release_name, wait_labels, namespace, timer namespace, timer)
)
# Track namespace+labels touched by install # Track namespace+labels touched by install
ns_label_set.add((namespace, tuple(wait_labels.items()))) ns_label_set.add((namespace, tuple(wait_labels.items())))
@ -438,8 +440,9 @@ class Armada(object):
timer = int(round(deadline - time.time())) timer = int(round(deadline - time.time()))
if test_this_chart: if test_this_chart:
if cg_sequenced: if cg_sequenced:
LOG.info('Running sequenced test, timeout remaining: ' LOG.info(
'%ss.', timer) 'Running sequenced test, timeout remaining: '
'%ss.', timer)
if timer <= 0: if timer <= 0:
reason = ('Timeout expired before testing ' reason = ('Timeout expired before testing '
'sequenced release %s' % release_name) 'sequenced release %s' % release_name)
@ -466,9 +469,10 @@ class Armada(object):
for (ns, labels) in ns_label_set: for (ns, labels) in ns_label_set:
labels_dict = dict(labels) labels_dict = dict(labels)
timer = int(round(deadline - time.time())) timer = int(round(deadline - time.time()))
LOG.info('Final ChartGroup wait for healthy namespace (%s), ' LOG.info(
'labels=(%s), timeout remaining: %ss.', 'Final ChartGroup wait for healthy namespace (%s), '
ns, labels_dict, timer) 'labels=(%s), timeout remaining: %ss.', ns, labels_dict,
timer)
if timer <= 0: if timer <= 0:
reason = ('Timeout expired waiting on namespace: %s, ' reason = ('Timeout expired waiting on namespace: %s, '
'labels: (%s)' % (ns, labels_dict)) 'labels: (%s)' % (ns, labels_dict))
@ -476,9 +480,10 @@ class Armada(object):
raise armada_exceptions.ArmadaTimeoutException(reason) raise armada_exceptions.ArmadaTimeoutException(reason)
self._wait_until_ready( self._wait_until_ready(
release_name=None, wait_labels=labels_dict, release_name=None,
namespace=ns, timeout=timer wait_labels=labels_dict,
) namespace=ns,
timeout=timer)
# After entire ChartGroup is healthy, run any pending tests # After entire ChartGroup is healthy, run any pending tests
for (test, test_timer) in tests_to_run: for (test, test_timer) in tests_to_run:
@ -489,8 +494,7 @@ class Armada(object):
if self.enable_chart_cleanup: if self.enable_chart_cleanup:
self._chart_cleanup( self._chart_cleanup(
prefix, prefix,
self.manifest[const.KEYWORD_ARMADA][const.KEYWORD_GROUPS], self.manifest[const.KEYWORD_ARMADA][const.KEYWORD_GROUPS], msg)
msg)
LOG.info('Done applying manifest.') LOG.info('Done applying manifest.')
return msg return msg
@ -513,9 +517,10 @@ class Armada(object):
def _wait_until_ready(self, release_name, wait_labels, namespace, timeout): def _wait_until_ready(self, release_name, wait_labels, namespace, timeout):
if self.dry_run: if self.dry_run:
LOG.info('Skipping wait during `dry-run`, would have waited on ' LOG.info(
'namespace=%s, labels=(%s) for %ss.', 'Skipping wait during `dry-run`, would have waited on '
namespace, wait_labels, timeout) 'namespace=%s, labels=(%s) for %ss.', namespace, wait_labels,
timeout)
return return
self.tiller.k8s.wait_until_ready( self.tiller.k8s.wait_until_ready(
@ -524,13 +529,13 @@ class Armada(object):
namespace=namespace, namespace=namespace,
k8s_wait_attempts=self.k8s_wait_attempts, k8s_wait_attempts=self.k8s_wait_attempts,
k8s_wait_attempt_sleep=self.k8s_wait_attempt_sleep, k8s_wait_attempt_sleep=self.k8s_wait_attempt_sleep,
timeout=timeout timeout=timeout)
)
def _test_chart(self, release_name, timeout): def _test_chart(self, release_name, timeout):
if self.dry_run: if self.dry_run:
LOG.info('Skipping test during `dry-run`, would have tested ' LOG.info(
'release=%s with timeout %ss.', release_name, timeout) 'Skipping test during `dry-run`, would have tested '
'release=%s with timeout %ss.', release_name, timeout)
return True return True
success = test_release_for_success( success = test_release_for_success(
@ -591,8 +596,9 @@ class Armada(object):
valid_releases = [] valid_releases = []
for gchart in charts: for gchart in charts:
for chart in gchart.get(const.KEYWORD_CHARTS, []): for chart in gchart.get(const.KEYWORD_CHARTS, []):
valid_releases.append(release_prefixer( valid_releases.append(
prefix, chart.get('chart', {}).get('release'))) release_prefixer(prefix,
chart.get('chart', {}).get('release')))
actual_releases = [x.name for x in self.tiller.list_releases()] actual_releases = [x.name for x in self.tiller.list_releases()]
release_diff = list(set(actual_releases) - set(valid_releases)) release_diff = list(set(actual_releases) - set(valid_releases))

View File

@ -63,20 +63,16 @@ class ChartBuilder(object):
property from the chart, or else "" if the property isn't a 2-tuple. property from the chart, or else "" if the property isn't a 2-tuple.
''' '''
source_dir = self.chart.get('source_dir') source_dir = self.chart.get('source_dir')
return ( return (os.path.join(*source_dir)
os.path.join(*source_dir) if (source_dir and isinstance(source_dir, (list, tuple)) and
if (source_dir and len(source_dir) == 2) else "")
isinstance(source_dir, (list, tuple)) and
len(source_dir) == 2)
else ""
)
def get_ignored_files(self): def get_ignored_files(self):
'''Load files to ignore from .helmignore if present.''' '''Load files to ignore from .helmignore if present.'''
try: try:
ignored_files = [] ignored_files = []
if os.path.exists(os.path.join(self.source_directory, if os.path.exists(
'.helmignore')): os.path.join(self.source_directory, '.helmignore')):
with open(os.path.join(self.source_directory, with open(os.path.join(self.source_directory,
'.helmignore')) as f: '.helmignore')) as f:
ignored_files = f.readlines() ignored_files = f.readlines()
@ -149,8 +145,9 @@ class ChartBuilder(object):
with open(abspath, 'r') as f: with open(abspath, 'r') as f:
file_contents = f.read().encode(encoding) file_contents = f.read().encode(encoding)
except OSError as e: except OSError as e:
LOG.debug('Failed to open and read file %s in the helm ' LOG.debug(
'chart directory.', abspath) 'Failed to open and read file %s in the helm '
'chart directory.', abspath)
raise chartbuilder_exceptions.FilesLoadException( raise chartbuilder_exceptions.FilesLoadException(
file=abspath, details=e) file=abspath, details=e)
except UnicodeError as e: except UnicodeError as e:
@ -162,22 +159,24 @@ class ChartBuilder(object):
break break
if len(unicode_errors) == 2: if len(unicode_errors) == 2:
LOG.debug('Failed to read file %s in the helm chart directory.' LOG.debug(
' Ensure that it is encoded using utf-8.', abspath) 'Failed to read file %s in the helm chart directory.'
' Ensure that it is encoded using utf-8.', abspath)
raise chartbuilder_exceptions.FilesLoadException( raise chartbuilder_exceptions.FilesLoadException(
file=abspath, clazz=unicode_errors[0].__class__.__name__, file=abspath,
clazz=unicode_errors[0].__class__.__name__,
details='\n'.join(e for e in unicode_errors)) details='\n'.join(e for e in unicode_errors))
non_template_files.append( non_template_files.append(
Any(type_url=relpath, Any(type_url=relpath, value=file_contents))
value=file_contents))
for root, dirs, files in os.walk(self.source_directory): for root, dirs, files in os.walk(self.source_directory):
relfolder = os.path.split(root)[-1] relfolder = os.path.split(root)[-1]
rel_folder_path = os.path.relpath(root, self.source_directory) rel_folder_path = os.path.relpath(root, self.source_directory)
if not any(root.startswith(os.path.join(self.source_directory, x)) if not any(
for x in ['templates', 'charts']): root.startswith(os.path.join(self.source_directory, x))
for x in ['templates', 'charts']):
for file in files: for file in files:
if (file not in files_to_ignore and if (file not in files_to_ignore and
file not in non_template_files): file not in non_template_files):
@ -211,8 +210,9 @@ class ChartBuilder(object):
templates = [] templates = []
if not os.path.exists( if not os.path.exists(
os.path.join(self.source_directory, 'templates')): os.path.join(self.source_directory, 'templates')):
LOG.warn("Chart %s has no templates directory. " LOG.warn(
"No templates will be deployed", chart_name) "Chart %s has no templates directory. "
"No templates will be deployed", chart_name)
for root, _, files in os.walk( for root, _, files in os.walk(
os.path.join(self.source_directory, 'templates'), os.path.join(self.source_directory, 'templates'),
topdown=True): topdown=True):

View File

@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Module for resolving design references.""" """Module for resolving design references."""
import urllib.parse import urllib.parse
@ -66,8 +67,7 @@ class ReferenceResolver(object):
except ValueError: except ValueError:
raise InvalidPathException( raise InvalidPathException(
"Cannot resolve design reference %s: unable " "Cannot resolve design reference %s: unable "
"to parse as valid URI." "to parse as valid URI." % l)
% l)
return data return data
@ -90,8 +90,8 @@ class ReferenceResolver(object):
response = requests.get(design_uri.geturl(), timeout=30) response = requests.get(design_uri.geturl(), timeout=30)
if response.status_code >= 400: if response.status_code >= 400:
raise InvalidPathException( raise InvalidPathException(
"Error received for HTTP reference: %d" "Error received for HTTP reference: %d" %
% response.status_code) response.status_code)
return response.content return response.content

View File

@ -49,7 +49,9 @@ class K8s(object):
self.batch_v1beta1_api = client.BatchV1beta1Api() self.batch_v1beta1_api = client.BatchV1beta1Api()
self.extension_api = client.ExtensionsV1beta1Api() self.extension_api = client.ExtensionsV1beta1Api()
def delete_job_action(self, name, namespace="default", def delete_job_action(self,
name,
namespace="default",
propagation_policy='Foreground', propagation_policy='Foreground',
timeout=DEFAULT_K8S_TIMEOUT): timeout=DEFAULT_K8S_TIMEOUT):
''' '''
@ -59,16 +61,13 @@ class K8s(object):
to the delete. Default 'Foreground' means that child pods to the to the delete. Default 'Foreground' means that child pods to the
job will be deleted before the job is marked as deleted. job will be deleted before the job is marked as deleted.
''' '''
self._delete_job_action( self._delete_job_action(self.batch_api.list_namespaced_job,
self.batch_api.list_namespaced_job, self.batch_api.delete_namespaced_job, "job",
self.batch_api.delete_namespaced_job, name, namespace, propagation_policy, timeout)
"job",
name,
namespace,
propagation_policy,
timeout)
def delete_cron_job_action(self, name, namespace="default", def delete_cron_job_action(self,
name,
namespace="default",
propagation_policy='Foreground', propagation_policy='Foreground',
timeout=DEFAULT_K8S_TIMEOUT): timeout=DEFAULT_K8S_TIMEOUT):
''' '''
@ -80,15 +79,15 @@ class K8s(object):
''' '''
self._delete_job_action( self._delete_job_action(
self.batch_v1beta1_api.list_namespaced_cron_job, self.batch_v1beta1_api.list_namespaced_cron_job,
self.batch_v1beta1_api.delete_namespaced_cron_job, self.batch_v1beta1_api.delete_namespaced_cron_job, "cron job",
"cron job", name, namespace, propagation_policy, timeout)
name,
namespace,
propagation_policy,
timeout)
def _delete_job_action(self, list_func, delete_func, job_type_description, def _delete_job_action(self,
name, namespace="default", list_func,
delete_func,
job_type_description,
name,
namespace="default",
propagation_policy='Foreground', propagation_policy='Foreground',
timeout=DEFAULT_K8S_TIMEOUT): timeout=DEFAULT_K8S_TIMEOUT):
try: try:
@ -100,12 +99,13 @@ class K8s(object):
w = watch.Watch() w = watch.Watch()
issue_delete = True issue_delete = True
found_events = False found_events = False
for event in w.stream(list_func, for event in w.stream(
namespace=namespace, list_func, namespace=namespace, timeout_seconds=timeout):
timeout_seconds=timeout):
if issue_delete: if issue_delete:
delete_func( delete_func(
name=name, namespace=namespace, body=body, name=name,
namespace=namespace,
body=body,
propagation_policy=propagation_policy) propagation_policy=propagation_policy)
issue_delete = False issue_delete = False
@ -125,15 +125,14 @@ class K8s(object):
job_type_description, name, namespace) job_type_description, name, namespace)
err_msg = ('Reached timeout while waiting to delete %s: ' err_msg = ('Reached timeout while waiting to delete %s: '
'name=%s, namespace=%s' % 'name=%s, namespace=%s' % (job_type_description, name,
(job_type_description, name, namespace)) namespace))
LOG.error(err_msg) LOG.error(err_msg)
raise exceptions.KubernetesWatchTimeoutException(err_msg) raise exceptions.KubernetesWatchTimeoutException(err_msg)
except ApiException as e: except ApiException as e:
LOG.exception( LOG.exception("Exception when deleting %s: name=%s, namespace=%s",
"Exception when deleting %s: name=%s, namespace=%s", job_type_description, name, namespace)
job_type_description, name, namespace)
raise e raise e
def get_namespace_job(self, namespace="default", label_selector=''): def get_namespace_job(self, namespace="default", label_selector=''):
@ -282,8 +281,9 @@ class K8s(object):
w = watch.Watch() w = watch.Watch()
found_events = False found_events = False
for event in w.stream(self.client.list_pod_for_all_namespaces, for event in w.stream(
timeout_seconds=timeout): self.client.list_pod_for_all_namespaces,
timeout_seconds=timeout):
pod_name = event['object'].metadata.name pod_name = event['object'].metadata.name
if release in pod_name: if release in pod_name:
@ -322,13 +322,13 @@ class K8s(object):
label_selector = label_selectors(labels) if labels else '' label_selector = label_selectors(labels) if labels else ''
wait_attempts = (k8s_wait_attempts if k8s_wait_attempts >= 1 else 1) wait_attempts = (k8s_wait_attempts if k8s_wait_attempts >= 1 else 1)
sleep_time = (k8s_wait_attempt_sleep if k8s_wait_attempt_sleep >= 1 sleep_time = (k8s_wait_attempt_sleep
else 1) if k8s_wait_attempt_sleep >= 1 else 1)
LOG.debug("Wait on namespace=(%s) labels=(%s) for %s sec " LOG.debug(
"(k8s wait %s times, sleep %ss)", "Wait on namespace=(%s) labels=(%s) for %s sec "
namespace, label_selector, timeout, "(k8s wait %s times, sleep %ss)", namespace, label_selector,
wait_attempts, sleep_time) timeout, wait_attempts, sleep_time)
if not namespace: if not namespace:
# This shouldn't be reachable # This shouldn't be reachable
@ -349,14 +349,16 @@ class K8s(object):
if deadline_remaining <= 0: if deadline_remaining <= 0:
return False return False
timed_out, modified_pods, unready_pods, found_events = ( timed_out, modified_pods, unready_pods, found_events = (
self._wait_one_time(namespace=namespace, self._wait_one_time(
label_selector=label_selector, namespace=namespace,
timeout=deadline_remaining)) label_selector=label_selector,
timeout=deadline_remaining))
if not found_events: if not found_events:
LOG.warn('Saw no install/update events for release=%s, ' LOG.warn(
'namespace=%s, labels=(%s)', 'Saw no install/update events for release=%s, '
release, namespace, label_selector) 'namespace=%s, labels=(%s)', release, namespace,
label_selector)
if timed_out: if timed_out:
LOG.info('Timed out waiting for pods: %s', LOG.info('Timed out waiting for pods: %s',
@ -380,8 +382,9 @@ class K8s(object):
return True return True
def _wait_one_time(self, namespace, label_selector, timeout=100): def _wait_one_time(self, namespace, label_selector, timeout=100):
LOG.debug('Starting to wait: namespace=%s, label_selector=(%s), ' LOG.debug(
'timeout=%s', namespace, label_selector, timeout) 'Starting to wait: namespace=%s, label_selector=(%s), '
'timeout=%s', namespace, label_selector, timeout)
ready_pods = {} ready_pods = {}
modified_pods = set() modified_pods = set()
w = watch.Watch() w = watch.Watch()
@ -420,15 +423,15 @@ class K8s(object):
pod_ready = True pod_ready = True
if (pod_phase == 'Succeeded' or if (pod_phase == 'Succeeded' or
(pod_phase == 'Running' and (pod_phase == 'Running' and self._get_pod_condition(
self._get_pod_condition(status.conditions, status.conditions, 'Ready') == 'True')):
'Ready') == 'True')):
LOG.debug('Pod %s is ready!', pod_name) LOG.debug('Pod %s is ready!', pod_name)
else: else:
pod_ready = False pod_ready = False
LOG.debug('Pod %s not ready: conditions:\n%s\n' LOG.debug(
'container_statuses:\n%s', pod_name, 'Pod %s not ready: conditions:\n%s\n'
status.conditions, status.container_statuses) 'container_statuses:\n%s', pod_name, status.conditions,
status.container_statuses)
ready_pods[pod_name] = pod_ready ready_pods[pod_name] = pod_ready
@ -440,8 +443,8 @@ class K8s(object):
ready_pods.pop(pod_name) ready_pods.pop(pod_name)
elif event_type == 'ERROR': elif event_type == 'ERROR':
LOG.error('Pod %s: Got error event %s', LOG.error('Pod %s: Got error event %s', pod_name,
pod_name, event['object'].to_dict()) event['object'].to_dict())
raise exceptions.KubernetesErrorEventException( raise exceptions.KubernetesErrorEventException(
'Got error event for pod: %s' % event['object']) 'Got error event for pod: %s' % event['object'])
@ -449,8 +452,8 @@ class K8s(object):
LOG.error('Unrecognized event type (%s) for pod: %s', LOG.error('Unrecognized event type (%s) for pod: %s',
event_type, event['object']) event_type, event['object'])
raise exceptions.KubernetesUnknownStreamingEventTypeException( raise exceptions.KubernetesUnknownStreamingEventTypeException(
'Got unknown event type (%s) for pod: %s' 'Got unknown event type (%s) for pod: %s' %
% (event_type, event['object'])) (event_type, event['object']))
if all(ready_pods.values()): if all(ready_pods.values()):
return (False, modified_pods, [], found_events) return (False, modified_pods, [], found_events)
@ -468,7 +471,8 @@ class K8s(object):
def _check_timeout(self, timeout): def _check_timeout(self, timeout):
if timeout <= 0: if timeout <= 0:
LOG.warn('Kubernetes timeout is invalid or unspecified, ' LOG.warn(
'using default %ss.', DEFAULT_K8S_TIMEOUT) 'Kubernetes timeout is invalid or unspecified, '
'using default %ss.', DEFAULT_K8S_TIMEOUT)
timeout = DEFAULT_K8S_TIMEOUT timeout = DEFAULT_K8S_TIMEOUT
return timeout return timeout

View File

@ -188,8 +188,8 @@ class Manifest(object):
""" """
try: try:
chart = None chart = None
for iter, chart in enumerate(chart_group.get('data', {}).get( for iter, chart in enumerate(
'chart_group', [])): chart_group.get('data', {}).get('chart_group', [])):
if isinstance(chart, dict): if isinstance(chart, dict):
continue continue
chart_dep = self.find_chart_document(chart) chart_dep = self.find_chart_document(chart)
@ -214,8 +214,8 @@ class Manifest(object):
""" """
try: try:
group = None group = None
for iter, group in enumerate(self.manifest.get('data', {}).get( for iter, group in enumerate(
'chart_groups', [])): self.manifest.get('data', {}).get('chart_groups', [])):
if isinstance(group, dict): if isinstance(group, dict):
continue continue
chart_grp = self.find_chart_group_document(group) chart_grp = self.find_chart_group_document(group)
@ -244,6 +244,4 @@ class Manifest(object):
self.build_chart_groups() self.build_chart_groups()
self.build_armada_manifest() self.build_armada_manifest()
return { return {'armada': self.manifest.get('data', {})}
'armada': self.manifest.get('data', {})
}

View File

@ -22,6 +22,7 @@ from armada.utils import validate
class Override(object): class Override(object):
def __init__(self, documents, overrides=None, values=None): def __init__(self, documents, overrides=None, values=None):
self.documents = documents self.documents = documents
self.overrides = overrides self.overrides = overrides
@ -61,8 +62,8 @@ class Override(object):
def find_manifest_document(self, doc_path): def find_manifest_document(self, doc_path):
for doc in self.documents: for doc in self.documents:
if doc.get('schema') == self.find_document_type( if doc.get('schema') == self.find_document_type(
doc_path[0]) and doc.get('metadata', {}).get( doc_path[0]) and doc.get('metadata',
'name') == doc_path[1]: {}).get('name') == doc_path[1]:
return doc return doc
raise override_exceptions.UnknownDocumentOverrideException( raise override_exceptions.UnknownDocumentOverrideException(
@ -118,24 +119,24 @@ class Override(object):
def update_chart_document(self, ovr): def update_chart_document(self, ovr):
for doc in self.documents: for doc in self.documents:
if doc.get('schema') == const.DOCUMENT_CHART and doc.get( if doc.get('schema') == const.DOCUMENT_CHART and doc.get(
'metadata', {}).get('name') == ovr.get('metadata', {}).get( 'metadata', {}).get('name') == ovr.get('metadata',
'name'): {}).get('name'):
self.update(doc.get('data', {}), ovr.get('data', {})) self.update(doc.get('data', {}), ovr.get('data', {}))
return return
def update_chart_group_document(self, ovr): def update_chart_group_document(self, ovr):
for doc in self.documents: for doc in self.documents:
if doc.get('schema') == const.DOCUMENT_GROUP and doc.get( if doc.get('schema') == const.DOCUMENT_GROUP and doc.get(
'metadata', {}).get('name') == ovr.get('metadata', {}).get( 'metadata', {}).get('name') == ovr.get('metadata',
'name'): {}).get('name'):
self.update(doc.get('data', {}), ovr.get('data', {})) self.update(doc.get('data', {}), ovr.get('data', {}))
return return
def update_armada_manifest(self, ovr): def update_armada_manifest(self, ovr):
for doc in self.documents: for doc in self.documents:
if doc.get('schema') == const.DOCUMENT_MANIFEST and doc.get( if doc.get('schema') == const.DOCUMENT_MANIFEST and doc.get(
'metadata', {}).get('name') == ovr.get('metadata', {}).get( 'metadata', {}).get('name') == ovr.get('metadata',
'name'): {}).get('name'):
self.update(doc.get('data', {}), ovr.get('data', {})) self.update(doc.get('data', {}), ovr.get('data', {}))
return return

View File

@ -20,10 +20,10 @@ TESTRUN_STATUS_FAILURE = 2
TESTRUN_STATUS_RUNNING = 3 TESTRUN_STATUS_RUNNING = 3
def test_release_for_success(tiller, release, def test_release_for_success(tiller,
release,
timeout=const.DEFAULT_TILLER_TIMEOUT): timeout=const.DEFAULT_TILLER_TIMEOUT):
test_suite_run = tiller.test_release(release, timeout) test_suite_run = tiller.test_release(release, timeout)
results = getattr(test_suite_run, 'results', []) results = getattr(test_suite_run, 'results', [])
failed_results = [r for r in results if failed_results = [r for r in results if r.status != TESTRUN_STATUS_SUCCESS]
r.status != TESTRUN_STATUS_SUCCESS]
return len(failed_results) == 0 return len(failed_results) == 0

View File

@ -51,6 +51,7 @@ LOG = logging.getLogger(__name__)
class CommonEqualityMixin(object): class CommonEqualityMixin(object):
def __eq__(self, other): def __eq__(self, other):
return (isinstance(other, self.__class__) and return (isinstance(other, self.__class__) and
self.__dict__ == other.__dict__) self.__dict__ == other.__dict__)
@ -76,8 +77,11 @@ class Tiller(object):
service over gRPC service over gRPC
''' '''
def __init__(self, tiller_host=None, tiller_port=None, def __init__(self,
tiller_namespace=None, dry_run=False): tiller_host=None,
tiller_port=None,
tiller_namespace=None,
dry_run=False):
self.tiller_host = tiller_host self.tiller_host = tiller_host
self.tiller_port = tiller_port or CONF.tiller_port self.tiller_port = tiller_port or CONF.tiller_port
self.tiller_namespace = tiller_namespace or CONF.tiller_namespace self.tiller_namespace = tiller_namespace or CONF.tiller_namespace
@ -113,18 +117,16 @@ class Tiller(object):
tiller_ip = self._get_tiller_ip() tiller_ip = self._get_tiller_ip()
tiller_port = self._get_tiller_port() tiller_port = self._get_tiller_port()
try: try:
LOG.debug('Tiller getting gRPC insecure channel at %s:%s ' LOG.debug(
'with options: [grpc.max_send_message_length=%s, ' 'Tiller getting gRPC insecure channel at %s:%s '
'grpc.max_receive_message_length=%s]', 'with options: [grpc.max_send_message_length=%s, '
tiller_ip, tiller_port, 'grpc.max_receive_message_length=%s]', tiller_ip, tiller_port,
MAX_MESSAGE_LENGTH, MAX_MESSAGE_LENGTH) MAX_MESSAGE_LENGTH, MAX_MESSAGE_LENGTH)
return grpc.insecure_channel( return grpc.insecure_channel(
'%s:%s' % (tiller_ip, tiller_port), '%s:%s' % (tiller_ip, tiller_port),
options=[ options=[('grpc.max_send_message_length', MAX_MESSAGE_LENGTH),
('grpc.max_send_message_length', MAX_MESSAGE_LENGTH), ('grpc.max_receive_message_length',
('grpc.max_receive_message_length', MAX_MESSAGE_LENGTH) MAX_MESSAGE_LENGTH)])
]
)
except Exception: except Exception:
raise ex.ChannelException() raise ex.ChannelException()
@ -194,15 +196,15 @@ class Tiller(object):
# iterate through all the pages when collecting this list. # iterate through all the pages when collecting this list.
# NOTE(MarshM): `Helm List` defaults to returning Deployed and Failed, # NOTE(MarshM): `Helm List` defaults to returning Deployed and Failed,
# but this might not be a desireable ListReleasesRequest default. # but this might not be a desireable ListReleasesRequest default.
req = ListReleasesRequest(limit=RELEASE_LIMIT, req = ListReleasesRequest(
status_codes=[const.STATUS_DEPLOYED, limit=RELEASE_LIMIT,
const.STATUS_FAILED], status_codes=[const.STATUS_DEPLOYED, const.STATUS_FAILED],
sort_by='LAST_RELEASED', sort_by='LAST_RELEASED',
sort_order='DESC') sort_order='DESC')
LOG.debug('Tiller ListReleases() with timeout=%s', self.timeout) LOG.debug('Tiller ListReleases() with timeout=%s', self.timeout)
release_list = stub.ListReleases(req, self.timeout, release_list = stub.ListReleases(
metadata=self.metadata) req, self.timeout, metadata=self.metadata)
for y in release_list: for y in release_list:
# TODO(MarshM) this log is too noisy, fix later # TODO(MarshM) this log is too noisy, fix later
@ -251,8 +253,8 @@ class Tiller(object):
labels = action.get('labels') labels = action.get('labels')
self.rolling_upgrade_pod_deployment( self.rolling_upgrade_pod_deployment(
name, release_name, namespace, labels, name, release_name, namespace, labels, action_type, chart,
action_type, chart, disable_hooks, values, timeout) disable_hooks, values, timeout)
except Exception: except Exception:
LOG.warn("Pre: Could not update anything, please check yaml") LOG.warn("Pre: Could not update anything, please check yaml")
raise ex.PreUpdateJobDeleteException(name, namespace) raise ex.PreUpdateJobDeleteException(name, namespace)
@ -263,8 +265,8 @@ class Tiller(object):
action_type = action.get('type') action_type = action.get('type')
labels = action.get('labels', None) labels = action.get('labels', None)
self.delete_resources(release_name, name, action_type, self.delete_resources(release_name, name, action_type, labels,
labels, namespace, timeout) namespace, timeout)
except Exception: except Exception:
LOG.warn("PRE: Could not delete anything, please check yaml") LOG.warn("PRE: Could not delete anything, please check yaml")
raise ex.PreUpdateJobDeleteException(name, namespace) raise ex.PreUpdateJobDeleteException(name, namespace)
@ -307,13 +309,10 @@ class Tiller(object):
charts = [] charts = []
for latest_release in self.list_releases(): for latest_release in self.list_releases():
try: try:
release = ( release = (latest_release.name, latest_release.version,
latest_release.name, latest_release.chart, latest_release.config.raw,
latest_release.version, latest_release.info.status.Code.Name(
latest_release.chart, latest_release.info.status.code))
latest_release.config.raw,
latest_release.info.status.Code.Name(
latest_release.info.status.code))
charts.append(release) charts.append(release)
LOG.debug('Found release %s, version %s, status: %s', LOG.debug('Found release %s, version %s, status: %s',
release[0], release[1], release[4]) release[0], release[1], release[4])
@ -323,7 +322,10 @@ class Tiller(object):
continue continue
return charts return charts
def update_release(self, chart, release, namespace, def update_release(self,
chart,
release,
namespace,
pre_actions=None, pre_actions=None,
post_actions=None, post_actions=None,
disable_hooks=False, disable_hooks=False,
@ -337,10 +339,10 @@ class Tiller(object):
''' '''
timeout = self._check_timeout(wait, timeout) timeout = self._check_timeout(wait, timeout)
LOG.info('Helm update release%s: wait=%s, timeout=%s, force=%s, ' LOG.info(
'recreate_pods=%s', 'Helm update release%s: wait=%s, timeout=%s, force=%s, '
(' (dry run)' if self.dry_run else ''), 'recreate_pods=%s', (' (dry run)' if self.dry_run else ''), wait,
wait, timeout, force, recreate_pods) timeout, force, recreate_pods)
if values is None: if values is None:
values = Config(raw='') values = Config(raw='')
@ -366,7 +368,8 @@ class Tiller(object):
recreate=recreate_pods) recreate=recreate_pods)
update_msg = stub.UpdateRelease( update_msg = stub.UpdateRelease(
release_request, timeout + GRPC_EPSILON, release_request,
timeout + GRPC_EPSILON,
metadata=self.metadata) metadata=self.metadata)
except Exception: except Exception:
@ -377,16 +380,17 @@ class Tiller(object):
self._post_update_actions(post_actions, namespace) self._post_update_actions(post_actions, namespace)
tiller_result = TillerResult( tiller_result = TillerResult(
update_msg.release.name, update_msg.release.name, update_msg.release.namespace,
update_msg.release.namespace,
update_msg.release.info.status.Code.Name( update_msg.release.info.status.Code.Name(
update_msg.release.info.status.code), update_msg.release.info.status.code),
update_msg.release.info.Description, update_msg.release.info.Description, update_msg.release.version)
update_msg.release.version)
return tiller_result return tiller_result
def install_release(self, chart, release, namespace, def install_release(self,
chart,
release,
namespace,
values=None, values=None,
wait=False, wait=False,
timeout=None): timeout=None):
@ -396,8 +400,7 @@ class Tiller(object):
timeout = self._check_timeout(wait, timeout) timeout = self._check_timeout(wait, timeout)
LOG.info('Helm install release%s: wait=%s, timeout=%s', LOG.info('Helm install release%s: wait=%s, timeout=%s',
(' (dry run)' if self.dry_run else ''), (' (dry run)' if self.dry_run else ''), wait, timeout)
wait, timeout)
if values is None: if values is None:
values = Config(raw='') values = Config(raw='')
@ -417,12 +420,12 @@ class Tiller(object):
timeout=timeout) timeout=timeout)
install_msg = stub.InstallRelease( install_msg = stub.InstallRelease(
release_request, timeout + GRPC_EPSILON, release_request,
timeout + GRPC_EPSILON,
metadata=self.metadata) metadata=self.metadata)
tiller_result = TillerResult( tiller_result = TillerResult(
install_msg.release.name, install_msg.release.name, install_msg.release.namespace,
install_msg.release.namespace,
install_msg.release.info.status.Code.Name( install_msg.release.info.status.Code.Name(
install_msg.release.info.status.code), install_msg.release.info.status.code),
install_msg.release.info.Description, install_msg.release.info.Description,
@ -434,7 +437,9 @@ class Tiller(object):
status = self.get_release_status(release) status = self.get_release_status(release)
raise ex.ReleaseException(release, status, 'Install') raise ex.ReleaseException(release, status, 'Install')
def test_release(self, release, timeout=const.DEFAULT_TILLER_TIMEOUT, def test_release(self,
release,
timeout=const.DEFAULT_TILLER_TIMEOUT,
cleanup=True): cleanup=True):
''' '''
:param release - name of release to test :param release - name of release to test
@ -455,8 +460,7 @@ class Tiller(object):
# 1. Remove this timeout # 1. Remove this timeout
# 2. Add `k8s_timeout=const.DEFAULT_K8S_TIMEOUT` arg and use # 2. Add `k8s_timeout=const.DEFAULT_K8S_TIMEOUT` arg and use
release_request = TestReleaseRequest( release_request = TestReleaseRequest(
name=release, timeout=timeout, name=release, timeout=timeout, cleanup=cleanup)
cleanup=cleanup)
test_message_stream = stub.RunReleaseTest( test_message_stream = stub.RunReleaseTest(
release_request, timeout, metadata=self.metadata) release_request, timeout, metadata=self.metadata)
@ -550,16 +554,18 @@ class Tiller(object):
# Helm client calls ReleaseContent in Delete dry-run scenario # Helm client calls ReleaseContent in Delete dry-run scenario
if self.dry_run: if self.dry_run:
content = self.get_release_content(release) content = self.get_release_content(release)
LOG.info('Skipping delete during `dry-run`, would have deleted ' LOG.info(
'release=%s from namespace=%s.', 'Skipping delete during `dry-run`, would have deleted '
content.release.name, content.release.namespace) 'release=%s from namespace=%s.', content.release.name,
content.release.namespace)
return return
# build release uninstall request # build release uninstall request
try: try:
stub = ReleaseServiceStub(self.channel) stub = ReleaseServiceStub(self.channel)
LOG.info("Uninstall %s release with disable_hooks=%s, " LOG.info(
"purge=%s flags", release, disable_hooks, purge) "Uninstall %s release with disable_hooks=%s, "
"purge=%s flags", release, disable_hooks, purge)
release_request = UninstallReleaseRequest( release_request = UninstallReleaseRequest(
name=release, disable_hooks=disable_hooks, purge=purge) name=release, disable_hooks=disable_hooks, purge=purge)
@ -571,8 +577,13 @@ class Tiller(object):
status = self.get_release_status(release) status = self.get_release_status(release)
raise ex.ReleaseException(release, status, 'Delete') raise ex.ReleaseException(release, status, 'Delete')
def delete_resources(self, release_name, resource_name, resource_type, def delete_resources(self,
resource_labels, namespace, wait=False, release_name,
resource_name,
resource_type,
resource_labels,
namespace,
wait=False,
timeout=const.DEFAULT_TILLER_TIMEOUT): timeout=const.DEFAULT_TILLER_TIMEOUT):
''' '''
:params release_name - release name the specified resource is under :params release_name - release name the specified resource is under
@ -588,8 +599,9 @@ class Tiller(object):
label_selector = '' label_selector = ''
if resource_labels is not None: if resource_labels is not None:
label_selector = label_selectors(resource_labels) label_selector = label_selectors(resource_labels)
LOG.debug("Deleting resources in namespace %s matching " LOG.debug(
"selectors (%s).", namespace, label_selector) "Deleting resources in namespace %s matching "
"selectors (%s).", namespace, label_selector)
handled = False handled = False
if resource_type == 'job': if resource_type == 'job':
@ -598,19 +610,20 @@ class Tiller(object):
jb_name = jb.metadata.name jb_name = jb.metadata.name
if self.dry_run: if self.dry_run:
LOG.info('Skipping delete job during `dry-run`, would ' LOG.info(
'have deleted job %s in namespace=%s.', 'Skipping delete job during `dry-run`, would '
jb_name, namespace) 'have deleted job %s in namespace=%s.', jb_name,
namespace)
continue continue
LOG.info("Deleting job %s in namespace: %s", LOG.info("Deleting job %s in namespace: %s", jb_name,
jb_name, namespace) namespace)
self.k8s.delete_job_action(jb_name, namespace, timeout=timeout) self.k8s.delete_job_action(jb_name, namespace, timeout=timeout)
handled = True handled = True
if resource_type == 'cronjob' or resource_type == 'job': if resource_type == 'cronjob' or resource_type == 'job':
get_jobs = self.k8s.get_namespace_cron_job( get_jobs = self.k8s.get_namespace_cron_job(namespace,
namespace, label_selector) label_selector)
for jb in get_jobs.items: for jb in get_jobs.items:
jb_name = jb.metadata.name jb_name = jb.metadata.name
@ -621,42 +634,50 @@ class Tiller(object):
"deprecated, use `type: cronjob` instead") "deprecated, use `type: cronjob` instead")
if self.dry_run: if self.dry_run:
LOG.info('Skipping delete cronjob during `dry-run`, would ' LOG.info(
'have deleted cronjob %s in namespace=%s.', 'Skipping delete cronjob during `dry-run`, would '
jb_name, namespace) 'have deleted cronjob %s in namespace=%s.', jb_name,
namespace)
continue continue
LOG.info("Deleting cronjob %s in namespace: %s", LOG.info("Deleting cronjob %s in namespace: %s", jb_name,
jb_name, namespace) namespace)
self.k8s.delete_cron_job_action(jb_name, namespace) self.k8s.delete_cron_job_action(jb_name, namespace)
handled = True handled = True
if resource_type == 'pod': if resource_type == 'pod':
release_pods = self.k8s.get_namespace_pod( release_pods = self.k8s.get_namespace_pod(namespace,
namespace, label_selector) label_selector)
for pod in release_pods.items: for pod in release_pods.items:
pod_name = pod.metadata.name pod_name = pod.metadata.name
if self.dry_run: if self.dry_run:
LOG.info('Skipping delete pod during `dry-run`, would ' LOG.info(
'have deleted pod %s in namespace=%s.', 'Skipping delete pod during `dry-run`, would '
pod_name, namespace) 'have deleted pod %s in namespace=%s.', pod_name,
namespace)
continue continue
LOG.info("Deleting pod %s in namespace: %s", LOG.info("Deleting pod %s in namespace: %s", pod_name,
pod_name, namespace) namespace)
self.k8s.delete_namespace_pod(pod_name, namespace) self.k8s.delete_namespace_pod(pod_name, namespace)
if wait: if wait:
self.k8s.wait_for_pod_redeployment(pod_name, namespace) self.k8s.wait_for_pod_redeployment(pod_name, namespace)
handled = True handled = True
if not handled: if not handled:
LOG.error("Unable to execute name: %s type: %s ", LOG.error("Unable to execute name: %s type: %s ", resource_name,
resource_name, resource_type) resource_type)
def rolling_upgrade_pod_deployment(self, name, release_name, namespace, def rolling_upgrade_pod_deployment(self,
resource_labels, action_type, chart, name,
disable_hooks, values, release_name,
namespace,
resource_labels,
action_type,
chart,
disable_hooks,
values,
timeout=const.DEFAULT_TILLER_TIMEOUT): timeout=const.DEFAULT_TILLER_TIMEOUT):
''' '''
update statefullsets (daemon, stateful) update statefullsets (daemon, stateful)
@ -695,8 +716,13 @@ class Tiller(object):
# delete pods # delete pods
self.delete_resources( self.delete_resources(
release_name, name, 'pod', resource_labels, namespace, release_name,
wait=True, timeout=timeout) name,
'pod',
resource_labels,
namespace,
wait=True,
timeout=timeout)
else: else:
LOG.error("Unable to exectue name: % type: %s", name, action_type) LOG.error("Unable to exectue name: % type: %s", name, action_type)
@ -714,10 +740,10 @@ class Tiller(object):
timeout = self._check_timeout(wait, timeout) timeout = self._check_timeout(wait, timeout)
LOG.debug('Helm rollback%s of release=%s, version=%s, ' LOG.debug(
'wait=%s, timeout=%s', 'Helm rollback%s of release=%s, version=%s, '
(' (dry run)' if self.dry_run else ''), 'wait=%s, timeout=%s', (' (dry run)' if self.dry_run else ''),
release_name, version, wait, timeout) release_name, version, wait, timeout)
try: try:
stub = ReleaseServiceStub(self.channel) stub = ReleaseServiceStub(self.channel)
rollback_request = RollbackReleaseRequest( rollback_request = RollbackReleaseRequest(
@ -742,7 +768,8 @@ class Tiller(object):
def _check_timeout(self, wait, timeout): def _check_timeout(self, wait, timeout):
if timeout is None or timeout <= 0: if timeout is None or timeout <= 0:
if wait: if wait:
LOG.warn('Tiller timeout is invalid or unspecified, ' LOG.warn(
'using default %ss.', self.timeout) 'Tiller timeout is invalid or unspecified, '
'using default %ss.', self.timeout)
timeout = self.timeout timeout = self.timeout
return timeout return timeout

View File

@ -31,20 +31,15 @@ CONF = cfg.CONF
@click.group() @click.group()
@click.option('--debug', @click.option('--debug', help="Enable debug logging", is_flag=True)
help="Enable debug logging", @click.option(
is_flag=True) '--api/--no-api',
@click.option('--api/--no-api', help="Execute service endpoints. (requires url option)",
help="Execute service endpoints. (requires url option)", default=False)
default=False) @click.option(
@click.option('--url', '--url', help="Armada Service Endpoint", envvar='HOST', default=None)
help="Armada Service Endpoint", @click.option(
envvar='HOST', '--token', help="Keystone Service Token", envvar='TOKEN', default=None)
default=None)
@click.option('--token',
help="Keystone Service Token",
envvar='TOKEN',
default=None)
@click.pass_context @click.pass_context
def main(ctx, debug, api, url, token): def main(ctx, debug, api, url, token):
""" """
@ -83,8 +78,7 @@ def main(ctx, debug, api, url, token):
ArmadaSession( ArmadaSession(
host=parsed_url.netloc, host=parsed_url.netloc,
scheme=parsed_url.scheme, scheme=parsed_url.scheme,
token=token) token=token))
)
if debug: if debug:
CONF.debug = debug CONF.debug = debug

View File

@ -113,6 +113,7 @@ class AttrDict(dict):
"""Allows defining objects with attributes without defining a class """Allows defining objects with attributes without defining a class
""" """
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs) super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self self.__dict__ = self

View File

@ -35,8 +35,8 @@ class BaseControllerTest(test_base.ArmadaTestCase):
sample_conf_dir = os.path.join(current_dir, os.pardir, os.pardir, sample_conf_dir = os.path.join(current_dir, os.pardir, os.pardir,
os.pardir, os.pardir, 'etc', 'armada') os.pardir, os.pardir, 'etc', 'armada')
sample_conf_files = ['api-paste.ini', 'armada.conf.sample'] sample_conf_files = ['api-paste.ini', 'armada.conf.sample']
with mock.patch.object( with mock.patch.object(armada.conf,
armada.conf, '_get_config_files') as mock_get_config_files: '_get_config_files') as mock_get_config_files:
mock_get_config_files.return_value = [ mock_get_config_files.return_value = [
os.path.join(sample_conf_dir, x) for x in sample_conf_files os.path.join(sample_conf_dir, x) for x in sample_conf_files
] ]

View File

@ -34,14 +34,16 @@ class ArmadaControllerTest(base.BaseControllerTest):
rules = {'armada:create_endpoints': '@'} rules = {'armada:create_endpoints': '@'}
self.policy.set_rules(rules) self.policy.set_rules(rules)
options = {'debug': 'true', options = {
'disable_update_pre': 'false', 'debug': 'true',
'disable_update_post': 'false', 'disable_update_pre': 'false',
'enable_chart_cleanup': 'false', 'disable_update_post': 'false',
'skip_pre_flight': 'false', 'enable_chart_cleanup': 'false',
'dry_run': 'false', 'skip_pre_flight': 'false',
'wait': 'false', 'dry_run': 'false',
'timeout': '100'} 'wait': 'false',
'timeout': '100'
}
expected_armada_options = { expected_armada_options = {
'disable_update_pre': False, 'disable_update_pre': False,
@ -67,18 +69,18 @@ class ArmadaControllerTest(base.BaseControllerTest):
mock_armada.return_value.sync.return_value = \ mock_armada.return_value.sync.return_value = \
{'diff': [], 'install': [], 'upgrade': []} {'diff': [], 'install': [], 'upgrade': []}
result = self.app.simulate_post(path='/api/v1.0/apply', result = self.app.simulate_post(
body=body, path='/api/v1.0/apply',
headers={ body=body,
'Content-Type': 'application/json' headers={'Content-Type': 'application/json'},
}, params=options)
params=options)
self.assertEqual(result.json, expected) self.assertEqual(result.json, expected)
self.assertEqual('application/json', result.headers['content-type']) self.assertEqual('application/json', result.headers['content-type'])
mock_resolver.resolve_reference.assert_called_with([payload_url]) mock_resolver.resolve_reference.assert_called_with([payload_url])
mock_armada.assert_called_with([{'foo': 'bar'}], mock_armada.assert_called_with([{
**expected_armada_options) 'foo': 'bar'
}], **expected_armada_options)
mock_armada.return_value.sync.assert_called() mock_armada.return_value.sync.assert_called()
def test_armada_apply_no_href(self): def test_armada_apply_no_href(self):
@ -86,23 +88,24 @@ class ArmadaControllerTest(base.BaseControllerTest):
rules = {'armada:create_endpoints': '@'} rules = {'armada:create_endpoints': '@'}
self.policy.set_rules(rules) self.policy.set_rules(rules)
options = {'debug': 'true', options = {
'disable_update_pre': 'false', 'debug': 'true',
'disable_update_post': 'false', 'disable_update_pre': 'false',
'enable_chart_cleanup': 'false', 'disable_update_post': 'false',
'skip_pre_flight': 'false', 'enable_chart_cleanup': 'false',
'dry_run': 'false', 'skip_pre_flight': 'false',
'wait': 'false', 'dry_run': 'false',
'timeout': '100'} 'wait': 'false',
'timeout': '100'
}
payload = {'hrefs': []} payload = {'hrefs': []}
body = json.dumps(payload) body = json.dumps(payload)
result = self.app.simulate_post(path='/api/v1.0/apply', result = self.app.simulate_post(
body=body, path='/api/v1.0/apply',
headers={ body=body,
'Content-Type': 'application/json' headers={'Content-Type': 'application/json'},
}, params=options)
params=options)
self.assertEqual(result.status_code, 400) self.assertEqual(result.status_code, 400)

View File

@ -64,12 +64,7 @@ class RollbackReleaseControllerTest(base.BaseControllerTest):
dry_run=False) dry_run=False)
rollback_release.assert_called_once_with( rollback_release.assert_called_once_with(
release, release, 2, wait=True, timeout=123, force=True, recreate_pods=True)
2,
wait=True,
timeout=123,
force=True,
recreate_pods=True)
self.assertEqual(200, resp.status_code) self.assertEqual(200, resp.status_code)
self.assertEqual('Rollback of test-release complete.', self.assertEqual('Rollback of test-release complete.',

View File

@ -43,13 +43,10 @@ class TestReleasesManifestControllerTest(base.BaseControllerTest):
self.assertEqual(200, resp.status_code) self.assertEqual(200, resp.status_code)
result = json.loads(resp.text) result = json.loads(resp.text)
expected = { expected = {"tests": {"passed": [], "skipped": [], "failed": []}}
"tests": {"passed": [], "skipped": [], "failed": []}
}
self.assertEqual(expected, result) self.assertEqual(expected, result)
mock_manifest.assert_called_once_with( mock_manifest.assert_called_once_with(documents, target_manifest=None)
documents, target_manifest=None)
self.assertTrue(mock_tiller.called) self.assertTrue(mock_tiller.called)
@ -57,8 +54,8 @@ class TestReleasesReleaseNameControllerTest(base.BaseControllerTest):
@mock.patch.object(test, 'test_release_for_success') @mock.patch.object(test, 'test_release_for_success')
@mock.patch.object(test, 'Tiller') @mock.patch.object(test, 'Tiller')
def test_test_controller_test_pass( def test_test_controller_test_pass(self, mock_tiller,
self, mock_tiller, mock_test_release_for_success): mock_test_release_for_success):
rules = {'armada:test_release': '@'} rules = {'armada:test_release': '@'}
self.policy.set_rules(rules) self.policy.set_rules(rules)
@ -71,8 +68,8 @@ class TestReleasesReleaseNameControllerTest(base.BaseControllerTest):
@mock.patch.object(test, 'test_release_for_success') @mock.patch.object(test, 'test_release_for_success')
@mock.patch.object(test, 'Tiller') @mock.patch.object(test, 'Tiller')
def test_test_controller_test_fail( def test_test_controller_test_fail(self, mock_tiller,
self, mock_tiller, mock_test_release_for_success): mock_test_release_for_success):
rules = {'armada:test_release': '@'} rules = {'armada:test_release': '@'}
self.policy.set_rules(rules) self.policy.set_rules(rules)
@ -103,8 +100,7 @@ class TestReleasesManifestControllerNegativeTest(base.BaseControllerTest):
@mock.patch.object(test, 'Manifest') @mock.patch.object(test, 'Manifest')
@mock.patch.object(test, 'Tiller') @mock.patch.object(test, 'Tiller')
def test_test_controller_validation_failure_returns_400( def test_test_controller_validation_failure_returns_400(self, *_):
self, *_):
rules = {'armada:tests_manifest': '@'} rules = {'armada:tests_manifest': '@'}
self.policy.set_rules(rules) self.policy.set_rules(rules)
@ -123,19 +119,22 @@ class TestReleasesManifestControllerNegativeTest(base.BaseControllerTest):
resp_body = json.loads(resp.text) resp_body = json.loads(resp.text)
self.assertEqual(400, resp_body['code']) self.assertEqual(400, resp_body['code'])
self.assertEqual(1, resp_body['details']['errorCount']) self.assertEqual(1, resp_body['details']['errorCount'])
self.assertIn( self.assertIn({
{'message': ( 'message':
'An error occurred while generating the manifest: Could not ' ('An error occurred while generating the manifest: Could not '
'find dependency chart helm-toolkit in armada/Chart/v1.'), 'find dependency chart helm-toolkit in armada/Chart/v1.'),
'error': True, 'error':
'kind': 'ValidationMessage', True,
'level': 'Error', 'kind':
'name': 'ARM001', 'ValidationMessage',
'documents': []}, 'level':
resp_body['details']['messageList']) 'Error',
'name':
'ARM001',
'documents': []
}, resp_body['details']['messageList'])
self.assertEqual(('Failed to validate documents or generate Armada ' self.assertEqual(('Failed to validate documents or generate Armada '
'Manifest from documents.'), 'Manifest from documents.'), resp_body['message'])
resp_body['message'])
@mock.patch('armada.utils.validate.Manifest') @mock.patch('armada.utils.validate.Manifest')
@mock.patch.object(test, 'Tiller') @mock.patch.object(test, 'Tiller')
@ -158,18 +157,21 @@ class TestReleasesManifestControllerNegativeTest(base.BaseControllerTest):
resp_body = json.loads(resp.text) resp_body = json.loads(resp.text)
self.assertEqual(400, resp_body['code']) self.assertEqual(400, resp_body['code'])
self.assertEqual(1, resp_body['details']['errorCount']) self.assertEqual(1, resp_body['details']['errorCount'])
self.assertEqual( self.assertEqual([{
[{'message': ( 'message':
'An error occurred while generating the manifest: foo.'), ('An error occurred while generating the manifest: foo.'),
'error': True, 'error':
'kind': 'ValidationMessage', True,
'level': 'Error', 'kind':
'name': 'ARM001', 'ValidationMessage',
'documents': []}], 'level':
resp_body['details']['messageList']) 'Error',
'name':
'ARM001',
'documents': []
}], resp_body['details']['messageList'])
self.assertEqual(('Failed to validate documents or generate Armada ' self.assertEqual(('Failed to validate documents or generate Armada '
'Manifest from documents.'), 'Manifest from documents.'), resp_body['message'])
resp_body['message'])
@test_utils.attr(type=['negative']) @test_utils.attr(type=['negative'])

View File

@ -37,13 +37,17 @@ class TillerControllerTest(base.BaseControllerTest):
result = self.app.simulate_get('/api/v1.0/status') result = self.app.simulate_get('/api/v1.0/status')
expected = { expected = {
'tiller': {'version': 'fake_version', 'state': 'fake_status'} 'tiller': {
'version': 'fake_version',
'state': 'fake_status'
}
} }
self.assertEqual(expected, result.json) self.assertEqual(expected, result.json)
self.assertEqual('application/json', result.headers['content-type']) self.assertEqual('application/json', result.headers['content-type'])
mock_tiller.assert_called_once_with( mock_tiller.assert_called_once_with(
tiller_host=None, tiller_port=44134, tiller_host=None,
tiller_port=44134,
tiller_namespace='kube-system') tiller_namespace='kube-system')
@mock.patch.object(tiller_controller, 'Tiller') @mock.patch.object(tiller_controller, 'Tiller')
@ -55,20 +59,27 @@ class TillerControllerTest(base.BaseControllerTest):
mock_tiller.return_value.tiller_status.return_value = 'fake_status' mock_tiller.return_value.tiller_status.return_value = 'fake_status'
mock_tiller.return_value.tiller_version.return_value = 'fake_version' mock_tiller.return_value.tiller_version.return_value = 'fake_version'
result = self.app.simulate_get('/api/v1.0/status', result = self.app.simulate_get(
params_csv=False, '/api/v1.0/status',
params={'tiller_host': 'fake_host', params_csv=False,
'tiller_port': '98765', params={
'tiller_namespace': 'fake_ns'}) 'tiller_host': 'fake_host',
'tiller_port': '98765',
'tiller_namespace': 'fake_ns'
})
expected = { expected = {
'tiller': {'version': 'fake_version', 'state': 'fake_status'} 'tiller': {
'version': 'fake_version',
'state': 'fake_status'
}
} }
self.assertEqual(expected, result.json) self.assertEqual(expected, result.json)
self.assertEqual('application/json', result.headers['content-type']) self.assertEqual('application/json', result.headers['content-type'])
mock_tiller.assert_called_once_with(tiller_host='fake_host', mock_tiller.assert_called_once_with(
tiller_port=98765, tiller_host='fake_host',
tiller_namespace='fake_ns') tiller_port=98765,
tiller_namespace='fake_ns')
@mock.patch.object(tiller_controller, 'Tiller') @mock.patch.object(tiller_controller, 'Tiller')
def test_tiller_releases(self, mock_tiller): def test_tiller_releases(self, mock_tiller):
@ -82,17 +93,22 @@ class TillerControllerTest(base.BaseControllerTest):
return fake_release return fake_release
mock_tiller.return_value.list_releases.return_value = [ mock_tiller.return_value.list_releases.return_value = [
_get_fake_release('foo', 'bar'), _get_fake_release('baz', 'qux') _get_fake_release('foo', 'bar'),
_get_fake_release('baz', 'qux')
] ]
result = self.app.simulate_get('/api/v1.0/releases') result = self.app.simulate_get('/api/v1.0/releases')
expected = { expected = {
'releases': {'bar_namespace': ['foo'], 'qux_namespace': ['baz']} 'releases': {
'bar_namespace': ['foo'],
'qux_namespace': ['baz']
}
} }
self.assertEqual(expected, result.json) self.assertEqual(expected, result.json)
mock_tiller.assert_called_once_with( mock_tiller.assert_called_once_with(
tiller_host=None, tiller_port=44134, tiller_host=None,
tiller_port=44134,
tiller_namespace='kube-system') tiller_namespace='kube-system')
mock_tiller.return_value.list_releases.assert_called_once_with() mock_tiller.return_value.list_releases.assert_called_once_with()
@ -108,22 +124,30 @@ class TillerControllerTest(base.BaseControllerTest):
return fake_release return fake_release
mock_tiller.return_value.list_releases.return_value = [ mock_tiller.return_value.list_releases.return_value = [
_get_fake_release('foo', 'bar'), _get_fake_release('baz', 'qux') _get_fake_release('foo', 'bar'),
_get_fake_release('baz', 'qux')
] ]
result = self.app.simulate_get('/api/v1.0/releases', result = self.app.simulate_get(
params_csv=False, '/api/v1.0/releases',
params={'tiller_host': 'fake_host', params_csv=False,
'tiller_port': '98765', params={
'tiller_namespace': 'fake_ns'}) 'tiller_host': 'fake_host',
'tiller_port': '98765',
'tiller_namespace': 'fake_ns'
})
expected = { expected = {
'releases': {'bar_namespace': ['foo'], 'qux_namespace': ['baz']} 'releases': {
'bar_namespace': ['foo'],
'qux_namespace': ['baz']
}
} }
self.assertEqual(expected, result.json) self.assertEqual(expected, result.json)
mock_tiller.assert_called_once_with(tiller_host='fake_host', mock_tiller.assert_called_once_with(
tiller_port=98765, tiller_host='fake_host',
tiller_namespace='fake_ns') tiller_port=98765,
tiller_namespace='fake_ns')
mock_tiller.return_value.list_releases.assert_called_once_with() mock_tiller.return_value.list_releases.assert_called_once_with()

View File

@ -22,10 +22,5 @@ class VersionsControllerTest(base.BaseControllerTest):
Validate that /api/v1.0/health returns 204. Validate that /api/v1.0/health returns 204.
""" """
result = self.app.simulate_get('/versions') result = self.app.simulate_get('/versions')
expected = { expected = {'v1.0': {'path': '/api/v1.0', 'status': 'stable'}}
'v1.0': {
'path': '/api/v1.0',
'status': 'stable'
}
}
self.assertDictEqual(expected, result.json) self.assertDictEqual(expected, result.json)

View File

@ -20,7 +20,6 @@ from armada import conf as cfg
from armada.exceptions import base_exception as exc from armada.exceptions import base_exception as exc
from armada.tests.unit import fixtures from armada.tests.unit import fixtures
CONF = cfg.CONF CONF = cfg.CONF
@ -47,9 +46,8 @@ class PolicyTestCase(testtools.TestCase):
action = "example:nope" action = "example:nope"
mock_ctx.to_policy_view.return_value = self.credentials mock_ctx.to_policy_view.return_value = self.credentials
self.assertRaises( self.assertRaises(exc.ActionForbidden, policy._enforce_policy, action,
exc.ActionForbidden, policy._enforce_policy, action, self.target, mock_ctx)
self.target, mock_ctx)
@mock.patch('armada.api.ArmadaRequestContext') @mock.patch('armada.api.ArmadaRequestContext')
def test_enforce_good_action(self, mock_ctx): def test_enforce_good_action(self, mock_ctx):
@ -63,5 +61,5 @@ class PolicyTestCase(testtools.TestCase):
action = "example:disallowed" action = "example:disallowed"
mock_ctx.to_policy_view.return_value = self.credentials mock_ctx.to_policy_view.return_value = self.credentials
self.assertRaises(exc.ActionForbidden, policy._enforce_policy, self.assertRaises(exc.ActionForbidden, policy._enforce_policy, action,
action, self.target, mock_ctx) self.target, mock_ctx)

View File

@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
policy_data = """ policy_data = """
"admin_required": "role:admin" "admin_required": "role:admin"
"armada:create_endpoints": "rule:admin_required" "armada:create_endpoints": "rule:admin_required"

View File

@ -82,8 +82,7 @@ class RealPolicyFixture(fixtures.Fixture):
def _setUp(self): def _setUp(self):
super(RealPolicyFixture, self)._setUp() super(RealPolicyFixture, self)._setUp()
self.policy_dir = self.useFixture(fixtures.TempDir()) self.policy_dir = self.useFixture(fixtures.TempDir())
self.policy_file = os.path.join(self.policy_dir.path, self.policy_file = os.path.join(self.policy_dir.path, 'policy.yaml')
'policy.yaml')
# Load the fake_policy data and add the missing default rules. # Load the fake_policy data and add the missing default rules.
policy_rules = yaml.safe_load(fake_policy.policy_data) policy_rules = yaml.safe_load(fake_policy.policy_data)
self.add_missing_default_rules(policy_rules) self.add_missing_default_rules(policy_rules)
@ -92,8 +91,10 @@ class RealPolicyFixture(fixtures.Fixture):
policy_opts.set_defaults(CONF) policy_opts.set_defaults(CONF)
self.useFixture( self.useFixture(
ConfPatcher(policy_dirs=[], policy_file=self.policy_file, ConfPatcher(
group='oslo_policy')) policy_dirs=[],
policy_file=self.policy_file,
group='oslo_policy'))
armada.common.policy.reset_policy() armada.common.policy.reset_policy()
armada.common.policy.setup_policy() armada.common.policy.setup_policy()
@ -106,18 +107,16 @@ class RealPolicyFixture(fixtures.Fixture):
"""Validate that the expected and actual policies are equivalent. """Validate that the expected and actual policies are equivalent.
Otherwise an ``AssertionError`` is raised. Otherwise an ``AssertionError`` is raised.
""" """
if not (set(self.expected_policy_actions) == if not (set(self.expected_policy_actions) == set(
set(self.actual_policy_actions)): self.actual_policy_actions)):
error_msg = ( error_msg = (
'The expected policy actions passed to ' 'The expected policy actions passed to '
'`self.policy.set_rules` do not match the policy actions ' '`self.policy.set_rules` do not match the policy actions '
'that were actually enforced by Armada. Set of expected ' 'that were actually enforced by Armada. Set of expected '
'policies %s should be equal to set of actual policies: %s. ' 'policies %s should be equal to set of actual policies: %s. '
'There is either a bug with the test or with policy ' 'There is either a bug with the test or with policy '
'enforcement in the controller.' % ( 'enforcement in the controller.' %
self.expected_policy_actions, (self.expected_policy_actions, self.actual_policy_actions))
self.actual_policy_actions)
)
raise AssertionError(error_msg) raise AssertionError(error_msg)
def _install_policy_verification_hook(self): def _install_policy_verification_hook(self):
@ -152,8 +151,7 @@ class RealPolicyFixture(fixtures.Fixture):
self.expected_policy_actions = [] self.expected_policy_actions = []
_do_enforce_rbac = armada.common.policy._enforce_policy _do_enforce_rbac = armada.common.policy._enforce_policy
def enforce_policy_and_remember_actual_rules( def enforce_policy_and_remember_actual_rules(action, *a, **k):
action, *a, **k):
self.actual_policy_actions.append(action) self.actual_policy_actions.append(action)
_do_enforce_rbac(action, *a, **k) _do_enforce_rbac(action, *a, **k)

View File

@ -131,10 +131,10 @@ data:
timeout: 10 timeout: 10
""" """
CHART_SOURCES = [('git://github.com/dummy/armada', 'chart_1'), CHART_SOURCES = [('git://github.com/dummy/armada',
('/tmp/dummy/armada', 'chart_2'), 'chart_1'), ('/tmp/dummy/armada', 'chart_2'),
('/tmp/dummy/armada', 'chart_3'), ('/tmp/dummy/armada', 'chart_3'), ('/tmp/dummy/armada',
('/tmp/dummy/armada', 'chart_4')] 'chart_4')]
class ArmadaHandlerTestCase(base.ArmadaTestCase): class ArmadaHandlerTestCase(base.ArmadaTestCase):
@ -144,110 +144,106 @@ class ArmadaHandlerTestCase(base.ArmadaTestCase):
expected_config = { expected_config = {
'armada': { 'armada': {
'release_prefix': 'armada', 'release_prefix':
'chart_groups': [ 'armada',
{ 'chart_groups': [{
'chart_group': [ 'chart_group': [{
{ 'chart': {
'chart': { 'dependencies': [],
'dependencies': [], 'chart_name': 'test_chart_1',
'chart_name': 'test_chart_1', 'namespace': 'test',
'namespace': 'test', 'release': 'test_chart_1',
'release': 'test_chart_1', 'source': {
'source': { 'location': ('git://github.com/dummy/armada'),
'location': ( 'reference': 'master',
'git://github.com/dummy/armada'), 'subpath': 'chart_1',
'reference': 'master', 'type': 'git'
'subpath': 'chart_1',
'type': 'git'
},
'source_dir': CHART_SOURCES[0],
'values': {},
'wait': {
'timeout': 10
}
}
}, },
{ 'source_dir': CHART_SOURCES[0],
'chart': { 'values': {},
'dependencies': [], 'wait': {
'chart_name': 'test_chart_2', 'timeout': 10
'namespace': 'test', }
'protected': { }
'continue_processing': True }, {
}, 'chart': {
'release': 'test_chart_2', 'dependencies': [],
'source': { 'chart_name': 'test_chart_2',
'location': '/tmp/dummy/armada', 'namespace': 'test',
'subpath': 'chart_2', 'protected': {
'type': 'local' 'continue_processing': True
},
'source_dir': CHART_SOURCES[1],
'values': {},
'wait': {
'timeout': 10
},
'upgrade': {
'no_hooks': False,
'options': {
'force': True,
'recreate_pods': True
}
}
}
}, },
{ 'release': 'test_chart_2',
'chart': { 'source': {
'dependencies': [], 'location': '/tmp/dummy/armada',
'chart_name': 'test_chart_3', 'subpath': 'chart_2',
'namespace': 'test', 'type': 'local'
'protected': {
'continue_processing': False
},
'release': 'test_chart_3',
'source': {
'location': '/tmp/dummy/armada',
'subpath': 'chart_3',
'type': 'local'
},
'source_dir': CHART_SOURCES[2],
'values': {},
'wait': {
'timeout': 10
},
'upgrade': {
'no_hooks': False
}
}
}, },
{ 'source_dir': CHART_SOURCES[1],
'chart': { 'values': {},
'dependencies': [], 'wait': {
'chart_name': 'test_chart_4', 'timeout': 10
'namespace': 'test', },
'release': 'test_chart_4', 'upgrade': {
'source': { 'no_hooks': False,
'location': '/tmp/dummy/armada', 'options': {
'subpath': 'chart_4', 'force': True,
'type': 'local' 'recreate_pods': True
},
'source_dir': CHART_SOURCES[3],
'values': {},
'wait': {
'timeout': 10
},
'upgrade': {
'no_hooks': False
},
'test': True
} }
} }
], }
'description': 'this is a test', }, {
'name': 'example-group', 'chart': {
'sequenced': False 'dependencies': [],
} 'chart_name': 'test_chart_3',
] 'namespace': 'test',
'protected': {
'continue_processing': False
},
'release': 'test_chart_3',
'source': {
'location': '/tmp/dummy/armada',
'subpath': 'chart_3',
'type': 'local'
},
'source_dir': CHART_SOURCES[2],
'values': {},
'wait': {
'timeout': 10
},
'upgrade': {
'no_hooks': False
}
}
}, {
'chart': {
'dependencies': [],
'chart_name': 'test_chart_4',
'namespace': 'test',
'release': 'test_chart_4',
'source': {
'location': '/tmp/dummy/armada',
'subpath': 'chart_4',
'type': 'local'
},
'source_dir': CHART_SOURCES[3],
'values': {},
'wait': {
'timeout': 10
},
'upgrade': {
'no_hooks': False
},
'test': True
}
}],
'description':
'this is a test',
'name':
'example-group',
'sequenced':
False
}]
} }
} }
@ -270,12 +266,15 @@ class ArmadaHandlerTestCase(base.ArmadaTestCase):
self._test_pre_flight_ops(armada_obj) self._test_pre_flight_ops(armada_obj)
mock_tiller.assert_called_once_with(tiller_host=None, mock_tiller.assert_called_once_with(
tiller_namespace='kube-system', tiller_host=None,
tiller_port=44134, tiller_namespace='kube-system',
dry_run=False) tiller_port=44134,
dry_run=False)
mock_source.git_clone.assert_called_once_with( mock_source.git_clone.assert_called_once_with(
'git://github.com/dummy/armada', 'master', auth_method=None, 'git://github.com/dummy/armada',
'master',
auth_method=None,
proxy_server=None) proxy_server=None)
@mock.patch.object(armada, 'source') @mock.patch.object(armada, 'source')
@ -300,7 +299,9 @@ class ArmadaHandlerTestCase(base.ArmadaTestCase):
mock_source.source_cleanup.assert_called_with( mock_source.source_cleanup.assert_called_with(
CHART_SOURCES[counter][0]) CHART_SOURCES[counter][0])
def _test_sync(self, known_releases, test_success=True, def _test_sync(self,
known_releases,
test_success=True,
test_failure_to_run=False): test_failure_to_run=False):
"""Test install functionality from the sync() method.""" """Test install functionality from the sync() method."""
@ -323,14 +324,13 @@ class ArmadaHandlerTestCase(base.ArmadaTestCase):
m_tiller.list_charts.return_value = known_releases m_tiller.list_charts.return_value = known_releases
if test_failure_to_run: if test_failure_to_run:
def fail(tiller, release, timeout=None): def fail(tiller, release, timeout=None):
status = AttrDict(**{ status = AttrDict(
'info': AttrDict(**{ **{'info': AttrDict(**{'Description': 'Failed'})})
'Description': 'Failed'
})
})
raise tiller_exceptions.ReleaseException( raise tiller_exceptions.ReleaseException(
release, status, 'Test') release, status, 'Test')
mock_test_release_for_success.side_effect = fail mock_test_release_for_success.side_effect = fail
else: else:
mock_test_release_for_success.return_value = test_success mock_test_release_for_success.return_value = test_success
@ -359,15 +359,13 @@ class ArmadaHandlerTestCase(base.ArmadaTestCase):
expected_install_release_calls.append( expected_install_release_calls.append(
mock.call( mock.call(
mock_chartbuilder().get_helm_chart(), mock_chartbuilder().get_helm_chart(),
"{}-{}".format(armada_obj.manifest['armada'][ "{}-{}".format(
'release_prefix'], armada_obj.manifest['armada'][
chart['release']), 'release_prefix'], chart['release']),
chart['namespace'], chart['namespace'],
values=yaml.safe_dump(chart['values']), values=yaml.safe_dump(chart['values']),
wait=this_chart_should_wait, wait=this_chart_should_wait,
timeout=chart['wait']['timeout'] timeout=chart['wait']['timeout']))
)
)
else: else:
target_release = None target_release = None
for known_release in known_releases: for known_release in known_releases:
@ -391,9 +389,7 @@ class ArmadaHandlerTestCase(base.ArmadaTestCase):
chart['namespace'], chart['namespace'],
values=yaml.safe_dump(chart['values']), values=yaml.safe_dump(chart['values']),
wait=this_chart_should_wait, wait=this_chart_should_wait,
timeout=chart['wait']['timeout'] timeout=chart['wait']['timeout']))
)
)
else: else:
p_continue = protected.get( p_continue = protected.get(
'continue_processing', False) 'continue_processing', False)
@ -412,9 +408,8 @@ class ArmadaHandlerTestCase(base.ArmadaTestCase):
mock.call( mock.call(
mock_chartbuilder().get_helm_chart(), mock_chartbuilder().get_helm_chart(),
"{}-{}".format( "{}-{}".format(
armada_obj.manifest['armada'][ armada_obj.manifest['armada']
'release_prefix'], ['release_prefix'], chart['release']),
chart['release']),
chart['namespace'], chart['namespace'],
pre_actions={}, pre_actions={},
post_actions={}, post_actions={},
@ -423,21 +418,13 @@ class ArmadaHandlerTestCase(base.ArmadaTestCase):
recreate_pods=recreate_pods, recreate_pods=recreate_pods,
values=yaml.safe_dump(chart['values']), values=yaml.safe_dump(chart['values']),
wait=this_chart_should_wait, wait=this_chart_should_wait,
timeout=chart['wait']['timeout'] timeout=chart['wait']['timeout']))
)
)
test_this_chart = chart.get( test_this_chart = chart.get(
'test', 'test', chart_group.get('test_charts', False))
chart_group.get('test_charts', False))
if test_this_chart: if test_this_chart:
expected_test_release_for_success_calls.append( expected_test_release_for_success_calls.append(
mock.call( mock.call(m_tiller, release_name, timeout=mock.ANY))
m_tiller,
release_name,
timeout=mock.ANY
)
)
# Verify that at least 1 release is either installed or updated. # Verify that at least 1 release is either installed or updated.
self.assertTrue( self.assertTrue(
@ -445,26 +432,30 @@ class ArmadaHandlerTestCase(base.ArmadaTestCase):
len(expected_update_release_calls) >= 1) len(expected_update_release_calls) >= 1)
# Verify that the expected number of non-deployed releases are # Verify that the expected number of non-deployed releases are
# installed with expected arguments. # installed with expected arguments.
self.assertEqual(len(expected_install_release_calls), self.assertEqual(
m_tiller.install_release.call_count) len(expected_install_release_calls),
m_tiller.install_release.call_count)
m_tiller.install_release.assert_has_calls( m_tiller.install_release.assert_has_calls(
expected_install_release_calls) expected_install_release_calls)
# Verify that the expected number of deployed releases are # Verify that the expected number of deployed releases are
# updated with expected arguments. # updated with expected arguments.
self.assertEqual(len(expected_update_release_calls), self.assertEqual(
m_tiller.update_release.call_count) len(expected_update_release_calls),
m_tiller.update_release.call_count)
m_tiller.update_release.assert_has_calls( m_tiller.update_release.assert_has_calls(
expected_update_release_calls) expected_update_release_calls)
# Verify that the expected number of deployed releases are # Verify that the expected number of deployed releases are
# uninstalled with expected arguments. # uninstalled with expected arguments.
self.assertEqual(len(expected_uninstall_release_calls), self.assertEqual(
m_tiller.uninstall_release.call_count) len(expected_uninstall_release_calls),
m_tiller.uninstall_release.call_count)
m_tiller.uninstall_release.assert_has_calls( m_tiller.uninstall_release.assert_has_calls(
expected_uninstall_release_calls) expected_uninstall_release_calls)
# Verify that the expected number of deployed releases are # Verify that the expected number of deployed releases are
# tested with expected arguments. # tested with expected arguments.
self.assertEqual(len(expected_test_release_for_success_calls), self.assertEqual(
mock_test_release_for_success.call_count) len(expected_test_release_for_success_calls),
mock_test_release_for_success.call_count)
mock_test_release_for_success.assert_has_calls( mock_test_release_for_success.assert_has_calls(
expected_test_release_for_success_calls) expected_test_release_for_success_calls)
@ -473,8 +464,9 @@ class ArmadaHandlerTestCase(base.ArmadaTestCase):
def _get_chart_by_name(self, name): def _get_chart_by_name(self, name):
name = name.split('armada-')[-1] name = name.split('armada-')[-1]
yaml_documents = list(yaml.safe_load_all(TEST_YAML)) yaml_documents = list(yaml.safe_load_all(TEST_YAML))
return [c for c in yaml_documents return [
if c['data'].get('chart_name') == name][0] c for c in yaml_documents if c['data'].get('chart_name') == name
][0]
def test_armada_sync_with_no_deployed_releases(self): def test_armada_sync_with_no_deployed_releases(self):
known_releases = [] known_releases = []
@ -483,75 +475,71 @@ class ArmadaHandlerTestCase(base.ArmadaTestCase):
def test_armada_sync_with_one_deployed_release(self): def test_armada_sync_with_one_deployed_release(self):
c1 = 'armada-test_chart_1' c1 = 'armada-test_chart_1'
known_releases = [ known_releases = [[
[c1, None, self._get_chart_by_name(c1), None, c1, None,
const.STATUS_DEPLOYED] self._get_chart_by_name(c1), None, const.STATUS_DEPLOYED
] ]]
self._test_sync(known_releases) self._test_sync(known_releases)
def test_armada_sync_with_both_deployed_releases(self): def test_armada_sync_with_both_deployed_releases(self):
c1 = 'armada-test_chart_1' c1 = 'armada-test_chart_1'
c2 = 'armada-test_chart_2' c2 = 'armada-test_chart_2'
known_releases = [ known_releases = [[
[c1, None, self._get_chart_by_name(c1), None, c1, None,
const.STATUS_DEPLOYED], self._get_chart_by_name(c1), None, const.STATUS_DEPLOYED
[c2, None, self._get_chart_by_name(c2), None, ], [
const.STATUS_DEPLOYED] c2, None,
] self._get_chart_by_name(c2), None, const.STATUS_DEPLOYED
]]
self._test_sync(known_releases) self._test_sync(known_releases)
def test_armada_sync_with_unprotected_releases(self): def test_armada_sync_with_unprotected_releases(self):
c1 = 'armada-test_chart_1' c1 = 'armada-test_chart_1'
known_releases = [ known_releases = [[
[c1, None, self._get_chart_by_name(c1), None, c1, None,
const.STATUS_FAILED] self._get_chart_by_name(c1), None, const.STATUS_FAILED
] ]]
self._test_sync(known_releases) self._test_sync(known_releases)
def test_armada_sync_with_protected_releases_continue(self): def test_armada_sync_with_protected_releases_continue(self):
c1 = 'armada-test_chart_1' c1 = 'armada-test_chart_1'
c2 = 'armada-test_chart_2' c2 = 'armada-test_chart_2'
known_releases = [ known_releases = [[
[c2, None, self._get_chart_by_name(c2), None, c2, None,
const.STATUS_FAILED], self._get_chart_by_name(c2), None, const.STATUS_FAILED
[c1, None, self._get_chart_by_name(c1), None, ], [c1, None,
const.STATUS_FAILED] self._get_chart_by_name(c1), None, const.STATUS_FAILED]]
]
self._test_sync(known_releases) self._test_sync(known_releases)
def test_armada_sync_with_protected_releases_halt(self): def test_armada_sync_with_protected_releases_halt(self):
c3 = 'armada-test_chart_3' c3 = 'armada-test_chart_3'
known_releases = [ known_releases = [[
[c3, None, self._get_chart_by_name(c3), None, c3, None,
const.STATUS_FAILED] self._get_chart_by_name(c3), None, const.STATUS_FAILED
] ]]
def _test_method(): def _test_method():
self._test_sync(known_releases) self._test_sync(known_releases)
self.assertRaises( self.assertRaises(ProtectedReleaseException, _test_method)
ProtectedReleaseException,
_test_method)
def test_armada_sync_test_failure(self): def test_armada_sync_test_failure(self):
def _test_method(): def _test_method():
self._test_sync([], test_success=False) self._test_sync([], test_success=False)
self.assertRaises( self.assertRaises(tiller_exceptions.TestFailedException, _test_method)
tiller_exceptions.TestFailedException,
_test_method)
def test_armada_sync_test_failure_to_run(self): def test_armada_sync_test_failure_to_run(self):
def _test_method(): def _test_method():
self._test_sync([], test_failure_to_run=True) self._test_sync([], test_failure_to_run=True)
self.assertRaises( self.assertRaises(tiller_exceptions.ReleaseException, _test_method)
tiller_exceptions.ReleaseException,
_test_method)
@mock.patch.object(armada.Armada, 'post_flight_ops') @mock.patch.object(armada.Armada, 'post_flight_ops')
@mock.patch.object(armada.Armada, 'pre_flight_ops') @mock.patch.object(armada.Armada, 'pre_flight_ops')

View File

@ -145,9 +145,8 @@ class ChartBuilderTestCase(BaseChartBuilderTestCase):
test_chart = {'source_dir': (chart_dir.path, '')} test_chart = {'source_dir': (chart_dir.path, '')}
chartbuilder = ChartBuilder(test_chart) chartbuilder = ChartBuilder(test_chart)
self.assertRaises( self.assertRaises(chartbuilder_exceptions.MetadataLoadException,
chartbuilder_exceptions.MetadataLoadException, chartbuilder.get_metadata)
chartbuilder.get_metadata)
def test_get_files(self): def test_get_files(self):
"""Validates that ``get_files()`` ignores 'Chart.yaml', 'values.yaml' """Validates that ``get_files()`` ignores 'Chart.yaml', 'values.yaml'
@ -172,11 +171,11 @@ class ChartBuilderTestCase(BaseChartBuilderTestCase):
test_chart = {'source_dir': (chart_dir.path, '')} test_chart = {'source_dir': (chart_dir.path, '')}
chartbuilder = ChartBuilder(test_chart) chartbuilder = ChartBuilder(test_chart)
expected_files = ('[type_url: "%s"\n, type_url: "%s"\n]' % ('./bar', expected_files = (
'./foo')) '[type_url: "%s"\n, type_url: "%s"\n]' % ('./bar', './foo'))
# Validate that only 'foo' and 'bar' are returned. # Validate that only 'foo' and 'bar' are returned.
actual_files = sorted(chartbuilder.get_files(), actual_files = sorted(
key=lambda x: x.type_url) chartbuilder.get_files(), key=lambda x: x.type_url)
self.assertEqual(expected_files, repr(actual_files).strip()) self.assertEqual(expected_files, repr(actual_files).strip())
def test_get_files_with_unicode_characters(self): def test_get_files_with_unicode_characters(self):
@ -205,8 +204,7 @@ class ChartBuilderTestCase(BaseChartBuilderTestCase):
chartbuilder = ChartBuilder(test_chart) chartbuilder = ChartBuilder(test_chart)
helm_chart = chartbuilder.get_helm_chart() helm_chart = chartbuilder.get_helm_chart()
expected = inspect.cleandoc( expected = inspect.cleandoc("""
"""
metadata { metadata {
name: "hello-world-chart" name: "hello-world-chart"
version: "0.1.0" version: "0.1.0"
@ -214,8 +212,7 @@ class ChartBuilderTestCase(BaseChartBuilderTestCase):
} }
values { values {
} }
""" """).strip()
).strip()
self.assertIsInstance(helm_chart, Chart) self.assertIsInstance(helm_chart, Chart)
self.assertTrue(hasattr(helm_chart, 'metadata')) self.assertTrue(hasattr(helm_chart, 'metadata'))
@ -256,8 +253,8 @@ class ChartBuilderTestCase(BaseChartBuilderTestCase):
# Also create a nested directory and verify that files from it are also # Also create a nested directory and verify that files from it are also
# added. # added.
nested_dir = self._make_temporary_subdirectory( nested_dir = self._make_temporary_subdirectory(chart_dir.path,
chart_dir.path, 'nested') 'nested')
self._write_temporary_file_contents(nested_dir, 'nested0', "random") self._write_temporary_file_contents(nested_dir, 'nested0', "random")
ch = yaml.safe_load(self.chart_stream)['chart'] ch = yaml.safe_load(self.chart_stream)['chart']
@ -303,20 +300,18 @@ class ChartBuilderTestCase(BaseChartBuilderTestCase):
self._write_temporary_file_contents(chart_dir.path, file, "") self._write_temporary_file_contents(chart_dir.path, file, "")
file_to_ignore = 'file_to_ignore' file_to_ignore = 'file_to_ignore'
# Files to ignore within templates/ subdirectory. # Files to ignore within templates/ subdirectory.
self._write_temporary_file_contents( self._write_temporary_file_contents(templates_subdir, file_to_ignore,
templates_subdir, file_to_ignore, "") "")
# Files to ignore within charts/ subdirectory. # Files to ignore within charts/ subdirectory.
self._write_temporary_file_contents( self._write_temporary_file_contents(charts_subdir, file_to_ignore, "")
charts_subdir, file_to_ignore, "")
# Files to ignore within templates/bin subdirectory. # Files to ignore within templates/bin subdirectory.
self._write_temporary_file_contents( self._write_temporary_file_contents(templates_nested_subdir,
templates_nested_subdir, file_to_ignore, "") file_to_ignore, "")
# Files to ignore within charts/extra subdirectory. # Files to ignore within charts/extra subdirectory.
self._write_temporary_file_contents( self._write_temporary_file_contents(charts_nested_subdir,
charts_nested_subdir, file_to_ignore, "") file_to_ignore, "")
# Files to **include** within charts/ subdirectory. # Files to **include** within charts/ subdirectory.
self._write_temporary_file_contents( self._write_temporary_file_contents(charts_subdir, '.prov', "xyzzy")
charts_subdir, '.prov', "xyzzy")
ch = yaml.safe_load(self.chart_stream)['chart'] ch = yaml.safe_load(self.chart_stream)['chart']
ch['source_dir'] = (chart_dir.path, '') ch['source_dir'] = (chart_dir.path, '')

View File

@ -28,8 +28,8 @@ class ManifestTestCase(testtools.TestCase):
def setUp(self): def setUp(self):
super(ManifestTestCase, self).setUp() super(ManifestTestCase, self).setUp()
examples_dir = os.path.join( examples_dir = os.path.join(os.getcwd(), 'armada', 'tests', 'unit',
os.getcwd(), 'armada', 'tests', 'unit', 'resources') 'resources')
with open(os.path.join(examples_dir, 'keystone-manifest.yaml')) as f: with open(os.path.join(examples_dir, 'keystone-manifest.yaml')) as f:
self.documents = list(yaml.safe_load_all(f.read())) self.documents = list(yaml.safe_load_all(f.read()))
@ -139,8 +139,7 @@ class ManifestTestCase(testtools.TestCase):
keystone_infra_services_chart_group = armada_manifest. \ keystone_infra_services_chart_group = armada_manifest. \
find_chart_group_document('keystone-infra-services') find_chart_group_document('keystone-infra-services')
self.assertEqual(chart_groups[0], self.assertEqual(chart_groups[0], keystone_infra_services_chart_group)
keystone_infra_services_chart_group)
openstack_keystone_chart_group = armada_manifest. \ openstack_keystone_chart_group = armada_manifest. \
find_chart_group_document('openstack-keystone') find_chart_group_document('openstack-keystone')
@ -224,8 +223,8 @@ class ManifestTestCase(testtools.TestCase):
keystone_chart = armada_manifest.find_chart_document('keystone') keystone_chart = armada_manifest.find_chart_document('keystone')
keystone_chart_with_deps = armada_manifest.build_chart_deps( keystone_chart_with_deps = armada_manifest.build_chart_deps(
keystone_chart) keystone_chart)
keystone_dependencies = keystone_chart_with_deps[ keystone_dependencies = keystone_chart_with_deps['data'][
'data']['dependencies'] 'dependencies']
self.assertEqual(openstack_keystone_chart_group_deps_dep_added[0], self.assertEqual(openstack_keystone_chart_group_deps_dep_added[0],
keystone_dependencies[0]) keystone_dependencies[0])
@ -243,15 +242,14 @@ class ManifestTestCase(testtools.TestCase):
mariadb_chart = armada_manifest.find_chart_document('mariadb') mariadb_chart = armada_manifest.find_chart_document('mariadb')
mariadb_chart_with_deps = armada_manifest.build_chart_deps( mariadb_chart_with_deps = armada_manifest.build_chart_deps(
mariadb_chart) mariadb_chart)
mariadb_dependencies = mariadb_chart_with_deps[ mariadb_dependencies = mariadb_chart_with_deps['data']['dependencies']
'data']['dependencies']
# building memcached chart dependencies # building memcached chart dependencies
memcached_chart = armada_manifest.find_chart_document('memcached') memcached_chart = armada_manifest.find_chart_document('memcached')
memcached_chart_with_deps = armada_manifest.build_chart_deps( memcached_chart_with_deps = armada_manifest.build_chart_deps(
memcached_chart) memcached_chart)
memcached_dependencies = memcached_chart_with_deps[ memcached_dependencies = memcached_chart_with_deps['data'][
'data']['dependencies'] 'dependencies']
self.assertEqual(keystone_infra_services_dep_added[0], self.assertEqual(keystone_infra_services_dep_added[0],
mariadb_dependencies[0]) mariadb_dependencies[0])
@ -275,8 +273,9 @@ class ManifestTestCase(testtools.TestCase):
# helm-toolkit dependency, the basis for comparison of d # helm-toolkit dependency, the basis for comparison of d
# ependencies in other charts # ependencies in other charts
expected_helm_toolkit_dependency = {'chart': helm_toolkit_chart.get( expected_helm_toolkit_dependency = {
'data')} 'chart': helm_toolkit_chart.get('data')
}
# keystone chart dependencies # keystone chart dependencies
keystone_chart = armada_manifest.find_chart_document('keystone') keystone_chart = armada_manifest.find_chart_document('keystone')
@ -288,8 +287,8 @@ class ManifestTestCase(testtools.TestCase):
self.assertIn('data', keystone_chart_with_deps) self.assertIn('data', keystone_chart_with_deps)
self.assertIn('dependencies', keystone_chart_with_deps['data']) self.assertIn('dependencies', keystone_chart_with_deps['data'])
keystone_dependencies = keystone_chart_with_deps[ keystone_dependencies = keystone_chart_with_deps['data'][
'data']['dependencies'] 'dependencies']
self.assertIsInstance(keystone_dependencies, list) self.assertIsInstance(keystone_dependencies, list)
self.assertEqual(1, len(keystone_dependencies)) self.assertEqual(1, len(keystone_dependencies))
@ -306,8 +305,7 @@ class ManifestTestCase(testtools.TestCase):
self.assertIn('data', mariadb_chart_with_deps) self.assertIn('data', mariadb_chart_with_deps)
self.assertIn('dependencies', mariadb_chart_with_deps['data']) self.assertIn('dependencies', mariadb_chart_with_deps['data'])
mariadb_dependencies = mariadb_chart_with_deps[ mariadb_dependencies = mariadb_chart_with_deps['data']['dependencies']
'data']['dependencies']
self.assertIsInstance(mariadb_dependencies, list) self.assertIsInstance(mariadb_dependencies, list)
self.assertEqual(1, len(mariadb_dependencies)) self.assertEqual(1, len(mariadb_dependencies))
@ -325,8 +323,8 @@ class ManifestTestCase(testtools.TestCase):
self.assertIn('data', memcached_chart_with_deps) self.assertIn('data', memcached_chart_with_deps)
self.assertIn('dependencies', memcached_chart_with_deps['data']) self.assertIn('dependencies', memcached_chart_with_deps['data'])
memcached_dependencies = memcached_chart_with_deps[ memcached_dependencies = memcached_chart_with_deps['data'][
'data']['dependencies'] 'dependencies']
self.assertIsInstance(memcached_dependencies, list) self.assertIsInstance(memcached_dependencies, list)
self.assertEqual(1, len(memcached_dependencies)) self.assertEqual(1, len(memcached_dependencies))
@ -338,8 +336,8 @@ class ManifestNegativeTestCase(testtools.TestCase):
def setUp(self): def setUp(self):
super(ManifestNegativeTestCase, self).setUp() super(ManifestNegativeTestCase, self).setUp()
examples_dir = os.path.join( examples_dir = os.path.join(os.getcwd(), 'armada', 'tests', 'unit',
os.getcwd(), 'armada', 'tests', 'unit', 'resources') 'resources')
with open(os.path.join(examples_dir, 'keystone-manifest.yaml')) as f: with open(os.path.join(examples_dir, 'keystone-manifest.yaml')) as f:
self.documents = list(yaml.safe_load_all(f.read())) self.documents = list(yaml.safe_load_all(f.read()))
@ -350,9 +348,8 @@ class ManifestNegativeTestCase(testtools.TestCase):
documents.append(documents[-1]) # Copy the last manifest. documents.append(documents[-1]) # Copy the last manifest.
error_re = r'Multiple manifests are not supported.*' error_re = r'Multiple manifests are not supported.*'
self.assertRaisesRegexp( self.assertRaisesRegexp(exceptions.ManifestException, error_re,
exceptions.ManifestException, error_re, manifest.Manifest, manifest.Manifest, documents)
documents)
def test_get_documents_multi_target_manifests_raises_value_error(self): def test_get_documents_multi_target_manifests_raises_value_error(self):
# Validates that finding multiple manifests with `target_manifest` # Validates that finding multiple manifests with `target_manifest`
@ -362,26 +359,27 @@ class ManifestNegativeTestCase(testtools.TestCase):
error_re = r'Multiple manifests are not supported.*' error_re = r'Multiple manifests are not supported.*'
self.assertRaisesRegexp( self.assertRaisesRegexp(
exceptions.ManifestException, error_re, manifest.Manifest, exceptions.ManifestException,
documents, target_manifest='armada-manifest') error_re,
manifest.Manifest,
documents,
target_manifest='armada-manifest')
def test_get_documents_missing_manifest(self): def test_get_documents_missing_manifest(self):
# Validates exceptions.ManifestException is thrown if no manifest is # Validates exceptions.ManifestException is thrown if no manifest is
# found. Manifest is last document in sample YAML. # found. Manifest is last document in sample YAML.
error_re = ('Documents must be a list of documents with at least one ' error_re = ('Documents must be a list of documents with at least one '
'of each of the following schemas: .*') 'of each of the following schemas: .*')
self.assertRaisesRegexp( self.assertRaisesRegexp(exceptions.ManifestException, error_re,
exceptions.ManifestException, error_re, manifest.Manifest, manifest.Manifest, self.documents[:-1])
self.documents[:-1])
def test_get_documents_missing_charts(self): def test_get_documents_missing_charts(self):
# Validates exceptions.ManifestException is thrown if no chart is # Validates exceptions.ManifestException is thrown if no chart is
# found. Charts are first 4 documents in sample YAML. # found. Charts are first 4 documents in sample YAML.
error_re = ('Documents must be a list of documents with at least one ' error_re = ('Documents must be a list of documents with at least one '
'of each of the following schemas: .*') 'of each of the following schemas: .*')
self.assertRaisesRegexp( self.assertRaisesRegexp(exceptions.ManifestException, error_re,
exceptions.ManifestException, error_re, manifest.Manifest, manifest.Manifest, self.documents[4:])
self.documents[4:])
def test_get_documents_missing_chart_groups(self): def test_get_documents_missing_chart_groups(self):
# Validates exceptions.ManifestException is thrown if no chart is # Validates exceptions.ManifestException is thrown if no chart is
@ -389,21 +387,20 @@ class ManifestNegativeTestCase(testtools.TestCase):
documents = self.documents[:4] + [self.documents[-1]] documents = self.documents[:4] + [self.documents[-1]]
error_re = ('Documents must be a list of documents with at least one ' error_re = ('Documents must be a list of documents with at least one '
'of each of the following schemas: .*') 'of each of the following schemas: .*')
self.assertRaisesRegexp( self.assertRaisesRegexp(exceptions.ManifestException, error_re,
exceptions.ManifestException, error_re, manifest.Manifest, manifest.Manifest, documents)
documents)
def test_find_chart_document_negative(self): def test_find_chart_document_negative(self):
armada_manifest = manifest.Manifest(self.documents) armada_manifest = manifest.Manifest(self.documents)
error_re = r'Could not find a %s named "%s"' % ( error_re = r'Could not find a %s named "%s"' % (const.DOCUMENT_CHART,
const.DOCUMENT_CHART, 'invalid') 'invalid')
self.assertRaisesRegexp(exceptions.ManifestException, error_re, self.assertRaisesRegexp(exceptions.ManifestException, error_re,
armada_manifest.find_chart_document, 'invalid') armada_manifest.find_chart_document, 'invalid')
def test_find_group_document_negative(self): def test_find_group_document_negative(self):
armada_manifest = manifest.Manifest(self.documents) armada_manifest = manifest.Manifest(self.documents)
error_re = r'Could not find a %s named "%s"' % ( error_re = r'Could not find a %s named "%s"' % (const.DOCUMENT_GROUP,
const.DOCUMENT_GROUP, 'invalid') 'invalid')
self.assertRaisesRegexp(exceptions.ManifestException, error_re, self.assertRaisesRegexp(exceptions.ManifestException, error_re,
armada_manifest.find_chart_group_document, armada_manifest.find_chart_group_document,
'invalid') 'invalid')

View File

@ -25,6 +25,7 @@ from armada import const
class OverrideTestCase(testtools.TestCase): class OverrideTestCase(testtools.TestCase):
def setUp(self): def setUp(self):
super(OverrideTestCase, self).setUp() super(OverrideTestCase, self).setUp()
self.basepath = os.path.join(os.path.dirname(__file__)) self.basepath = os.path.join(os.path.dirname(__file__))
@ -64,8 +65,7 @@ class OverrideTestCase(testtools.TestCase):
documents_copy = copy.deepcopy(original_documents) documents_copy = copy.deepcopy(original_documents)
values_documents = list(yaml.safe_load_all(g.read())) values_documents = list(yaml.safe_load_all(g.read()))
override = ('manifest:simple-armada:release_prefix=' override = ('manifest:simple-armada:release_prefix=' 'overridden', )
'overridden',)
# Case 1: Checking if primitive gets updated. # Case 1: Checking if primitive gets updated.
ovr = Override(original_documents, override, [values_yaml]) ovr = Override(original_documents, override, [values_yaml])
@ -75,15 +75,14 @@ class OverrideTestCase(testtools.TestCase):
self.assertNotEqual(original_documents, documents_copy) self.assertNotEqual(original_documents, documents_copy)
# since overrides done, these documents aren't same anymore # since overrides done, these documents aren't same anymore
self.assertNotEqual(original_documents, values_documents) self.assertNotEqual(original_documents, values_documents)
target_doc = [x target_doc = [
for x x for x in ovr.documents
in ovr.documents if x.get('metadata').get('name') == 'simple-armada'
if x.get('metadata').get('name') == 'simple-armada'][0] ][0]
self.assertEqual('overridden', self.assertEqual('overridden', target_doc['data']['release_prefix'])
target_doc['data']['release_prefix'])
override = ('manifest:simple-armada:chart_groups=' override = ('manifest:simple-armada:chart_groups='
'blog-group3,blog-group4',) 'blog-group3,blog-group4', )
# Case 2: Checking if list gets updated. # Case 2: Checking if list gets updated.
ovr = Override(original_documents, override, [values_yaml]) ovr = Override(original_documents, override, [values_yaml])
@ -103,8 +102,7 @@ class OverrideTestCase(testtools.TestCase):
original_documents = list(yaml.safe_load_all(f.read())) original_documents = list(yaml.safe_load_all(f.read()))
original_documents[-1]['data']['test'] = {'foo': 'bar'} original_documents[-1]['data']['test'] = {'foo': 'bar'}
override = ('manifest:simple-armada:test=' override = ('manifest:simple-armada:test=' '{"foo": "bar"}', )
'{"foo": "bar"}',)
ovr = Override(original_documents, override, []) ovr = Override(original_documents, override, [])
self.assertRaises(json.decoder.JSONDecodeError, ovr.update_manifests) self.assertRaises(json.decoder.JSONDecodeError, ovr.update_manifests)
@ -283,15 +281,15 @@ class OverrideTestCase(testtools.TestCase):
documents = list(yaml.safe_load_all(f.read())) documents = list(yaml.safe_load_all(f.read()))
doc_path = ['manifest', 'simple-armada'] doc_path = ['manifest', 'simple-armada']
override = ('manifest:simple-armada:chart_groups=\ override = ('manifest:simple-armada:chart_groups=\
blog-group3,blog-group4',) blog-group3,blog-group4', )
ovr = Override(documents, override) ovr = Override(documents, override)
ovr.update_manifests() ovr.update_manifests()
ovr_doc = ovr.find_manifest_document(doc_path) ovr_doc = ovr.find_manifest_document(doc_path)
target_docs = list(yaml.load_all(e.read())) target_docs = list(yaml.load_all(e.read()))
expected_doc = [x expected_doc = [
for x x for x in target_docs
in target_docs if x.get('schema') == 'armada/Manifest/v1'
if x.get('schema') == 'armada/Manifest/v1'][0] ][0]
self.assertEqual(expected_doc.get('data'), ovr_doc.get('data')) self.assertEqual(expected_doc.get('data'), ovr_doc.get('data'))
def test_find_manifest_document_valid(self): def test_find_manifest_document_valid(self):
@ -316,6 +314,7 @@ class OverrideTestCase(testtools.TestCase):
class OverrideNegativeTestCase(testtools.TestCase): class OverrideNegativeTestCase(testtools.TestCase):
def setUp(self): def setUp(self):
super(OverrideNegativeTestCase, self).setUp() super(OverrideNegativeTestCase, self).setUp()
self.basepath = os.path.join(os.path.dirname(__file__)) self.basepath = os.path.join(os.path.dirname(__file__))

View File

@ -30,9 +30,8 @@ class TestHandlerTestCase(base.ArmadaTestCase):
release = 'release' release = 'release'
tiller_obj.test_release = mock.Mock() tiller_obj.test_release = mock.Mock()
tiller_obj.test_release.return_value = AttrDict(**{ tiller_obj.test_release.return_value = AttrDict(
'results': results **{'results': results})
})
success = test.test_release_for_success(tiller_obj, release) success = test.test_release_for_success(tiller_obj, release)
self.assertEqual(expected_success, success) self.assertEqual(expected_success, success)
@ -44,37 +43,22 @@ class TestHandlerTestCase(base.ArmadaTestCase):
def test_unknown(self): def test_unknown(self):
self._test_test_release_for_success(False, [ self._test_test_release_for_success(False, [
AttrDict(**{ AttrDict(**{'status': test.TESTRUN_STATUS_SUCCESS}),
'status': test.TESTRUN_STATUS_SUCCESS AttrDict(**{'status': test.TESTRUN_STATUS_UNKNOWN})
}),
AttrDict(**{
'status': test.TESTRUN_STATUS_UNKNOWN
})
]) ])
def test_success(self): def test_success(self):
self._test_test_release_for_success(True, [ self._test_test_release_for_success(
AttrDict(**{ True, [AttrDict(**{'status': test.TESTRUN_STATUS_SUCCESS})])
'status': test.TESTRUN_STATUS_SUCCESS
})
])
def test_failure(self): def test_failure(self):
self._test_test_release_for_success(False, [ self._test_test_release_for_success(False, [
AttrDict(**{ AttrDict(**{'status': test.TESTRUN_STATUS_SUCCESS}),
'status': test.TESTRUN_STATUS_SUCCESS AttrDict(**{'status': test.TESTRUN_STATUS_FAILURE})
}),
AttrDict(**{
'status': test.TESTRUN_STATUS_FAILURE
})
]) ])
def test_running(self): def test_running(self):
self._test_test_release_for_success(False, [ self._test_test_release_for_success(False, [
AttrDict(**{ AttrDict(**{'status': test.TESTRUN_STATUS_SUCCESS}),
'status': test.TESTRUN_STATUS_SUCCESS AttrDict(**{'status': test.TESTRUN_STATUS_RUNNING})
}),
AttrDict(**{
'status': test.TESTRUN_STATUS_RUNNING
})
]) ])

View File

@ -48,8 +48,12 @@ class TillerTestCase(base.ArmadaTestCase):
timeout = 3600 timeout = 3600
tiller_obj.install_release( tiller_obj.install_release(
chart, name, namespace, values=initial_values, chart,
wait=wait, timeout=timeout) name,
namespace,
values=initial_values,
wait=wait,
timeout=timeout)
mock_stub.assert_called_with(tiller_obj.channel) mock_stub.assert_called_with(tiller_obj.channel)
release_request = mock_install_request( release_request = mock_install_request(
@ -58,12 +62,9 @@ class TillerTestCase(base.ArmadaTestCase):
release=name, release=name,
namespace=namespace, namespace=namespace,
wait=wait, wait=wait,
timeout=timeout timeout=timeout)
) (mock_stub(tiller_obj.channel).InstallRelease.assert_called_with(
(mock_stub(tiller_obj.channel).InstallRelease release_request, timeout + 60, metadata=tiller_obj.metadata))
.assert_called_with(release_request,
timeout + 60,
metadata=tiller_obj.metadata))
@mock.patch('armada.handlers.tiller.K8s', autospec=True) @mock.patch('armada.handlers.tiller.K8s', autospec=True)
@mock.patch.object(tiller.Tiller, '_get_tiller_ip', autospec=True) @mock.patch.object(tiller.Tiller, '_get_tiller_ip', autospec=True)
@ -83,10 +84,9 @@ class TillerTestCase(base.ArmadaTestCase):
mock_grpc.insecure_channel.assert_called_once_with( mock_grpc.insecure_channel.assert_called_once_with(
'%s:%s' % (str(mock.sentinel.ip), str(mock.sentinel.port)), '%s:%s' % (str(mock.sentinel.ip), str(mock.sentinel.port)),
options=[('grpc.max_send_message_length', options=[('grpc.max_send_message_length',
tiller.MAX_MESSAGE_LENGTH), tiller.MAX_MESSAGE_LENGTH),
('grpc.max_receive_message_length', ('grpc.max_receive_message_length',
tiller.MAX_MESSAGE_LENGTH)] tiller.MAX_MESSAGE_LENGTH)])
)
@mock.patch('armada.handlers.tiller.K8s', autospec=True) @mock.patch('armada.handlers.tiller.K8s', autospec=True)
@mock.patch('armada.handlers.tiller.grpc', autospec=True) @mock.patch('armada.handlers.tiller.grpc', autospec=True)
@ -131,8 +131,7 @@ class TillerTestCase(base.ArmadaTestCase):
def test_get_tiller_namespace(self, mock_grpc, _, mock_ip): def test_get_tiller_namespace(self, mock_grpc, _, mock_ip):
# verifies namespace set via instantiation # verifies namespace set via instantiation
tiller_obj = tiller.Tiller(None, None, 'test_namespace2') tiller_obj = tiller.Tiller(None, None, 'test_namespace2')
self.assertEqual('test_namespace2', self.assertEqual('test_namespace2', tiller_obj._get_tiller_namespace())
tiller_obj._get_tiller_namespace())
@mock.patch.object(tiller.Tiller, '_get_tiller_ip', autospec=True) @mock.patch.object(tiller.Tiller, '_get_tiller_ip', autospec=True)
@mock.patch('armada.handlers.tiller.K8s', autospec=True) @mock.patch('armada.handlers.tiller.K8s', autospec=True)
@ -179,18 +178,19 @@ class TillerTestCase(base.ArmadaTestCase):
tiller_obj = tiller.Tiller('host', '8080', None) tiller_obj = tiller.Tiller('host', '8080', None)
self.assertEqual(['foo', 'bar'], tiller_obj.list_releases()) self.assertEqual(['foo', 'bar'], tiller_obj.list_releases())
mock_release_service_stub.assert_called_once_with( mock_release_service_stub.assert_called_once_with(tiller_obj.channel)
tiller_obj.channel)
list_releases_stub = mock_release_service_stub.return_value. \ list_releases_stub = mock_release_service_stub.return_value. \
ListReleases ListReleases
list_releases_stub.assert_called_once_with( list_releases_stub.assert_called_once_with(
mock_list_releases_request.return_value, tiller_obj.timeout, mock_list_releases_request.return_value,
tiller_obj.timeout,
metadata=tiller_obj.metadata) metadata=tiller_obj.metadata)
mock_list_releases_request.assert_called_once_with( mock_list_releases_request.assert_called_once_with(
limit=tiller.RELEASE_LIMIT, limit=tiller.RELEASE_LIMIT,
status_codes=[tiller.const.STATUS_DEPLOYED, status_codes=[
tiller.const.STATUS_FAILED], tiller.const.STATUS_DEPLOYED, tiller.const.STATUS_FAILED
],
sort_by='LAST_RELEASED', sort_by='LAST_RELEASED',
sort_order='DESC') sort_order='DESC')
@ -199,8 +199,7 @@ class TillerTestCase(base.ArmadaTestCase):
@mock.patch.object(tiller, 'GetReleaseContentRequest') @mock.patch.object(tiller, 'GetReleaseContentRequest')
@mock.patch.object(tiller, 'ReleaseServiceStub') @mock.patch.object(tiller, 'ReleaseServiceStub')
def test_get_release_content(self, mock_release_service_stub, def test_get_release_content(self, mock_release_service_stub,
mock_release_content_request, mock_release_content_request, mock_grpc, _):
mock_grpc, _):
mock_release_service_stub.return_value.GetReleaseContent\ mock_release_service_stub.return_value.GetReleaseContent\
.return_value = {} .return_value = {}
@ -210,7 +209,8 @@ class TillerTestCase(base.ArmadaTestCase):
get_release_content_stub = mock_release_service_stub. \ get_release_content_stub = mock_release_service_stub. \
return_value.GetReleaseContent return_value.GetReleaseContent
get_release_content_stub.assert_called_once_with( get_release_content_stub.assert_called_once_with(
mock_release_content_request.return_value, tiller_obj.timeout, mock_release_content_request.return_value,
tiller_obj.timeout,
metadata=tiller_obj.metadata) metadata=tiller_obj.metadata)
@mock.patch('armada.handlers.tiller.K8s') @mock.patch('armada.handlers.tiller.K8s')
@ -218,8 +218,7 @@ class TillerTestCase(base.ArmadaTestCase):
@mock.patch.object(tiller, 'GetVersionRequest') @mock.patch.object(tiller, 'GetVersionRequest')
@mock.patch.object(tiller, 'ReleaseServiceStub') @mock.patch.object(tiller, 'ReleaseServiceStub')
def test_tiller_version(self, mock_release_service_stub, def test_tiller_version(self, mock_release_service_stub,
mock_version_request, mock_version_request, mock_grpc, _):
mock_grpc, _):
mock_version = mock.Mock() mock_version = mock.Mock()
mock_version.Version.sem_ver = mock.sentinel.sem_ver mock_version.Version.sem_ver = mock.sentinel.sem_ver
@ -230,12 +229,12 @@ class TillerTestCase(base.ArmadaTestCase):
self.assertEqual(mock.sentinel.sem_ver, tiller_obj.tiller_version()) self.assertEqual(mock.sentinel.sem_ver, tiller_obj.tiller_version())
mock_release_service_stub.assert_called_once_with( mock_release_service_stub.assert_called_once_with(tiller_obj.channel)
tiller_obj.channel)
get_version_stub = mock_release_service_stub.return_value.GetVersion get_version_stub = mock_release_service_stub.return_value.GetVersion
get_version_stub.assert_called_once_with( get_version_stub.assert_called_once_with(
mock_version_request.return_value, tiller_obj.timeout, mock_version_request.return_value,
tiller_obj.timeout,
metadata=tiller_obj.metadata) metadata=tiller_obj.metadata)
@mock.patch('armada.handlers.tiller.K8s') @mock.patch('armada.handlers.tiller.K8s')
@ -252,12 +251,12 @@ class TillerTestCase(base.ArmadaTestCase):
tiller_obj = tiller.Tiller('host', '8080', None) tiller_obj = tiller.Tiller('host', '8080', None)
self.assertEqual({}, tiller_obj.get_release_status('release')) self.assertEqual({}, tiller_obj.get_release_status('release'))
mock_release_service_stub.assert_called_once_with( mock_release_service_stub.assert_called_once_with(tiller_obj.channel)
tiller_obj.channel)
get_release_status_stub = mock_release_service_stub.return_value. \ get_release_status_stub = mock_release_service_stub.return_value. \
GetReleaseStatus GetReleaseStatus
get_release_status_stub.assert_called_once_with( get_release_status_stub.assert_called_once_with(
mock_rel_status_request.return_value, tiller_obj.timeout, mock_rel_status_request.return_value,
tiller_obj.timeout,
metadata=tiller_obj.metadata) metadata=tiller_obj.metadata)
@mock.patch('armada.handlers.tiller.K8s') @mock.patch('armada.handlers.tiller.K8s')
@ -265,8 +264,7 @@ class TillerTestCase(base.ArmadaTestCase):
@mock.patch.object(tiller, 'UninstallReleaseRequest') @mock.patch.object(tiller, 'UninstallReleaseRequest')
@mock.patch.object(tiller, 'ReleaseServiceStub') @mock.patch.object(tiller, 'ReleaseServiceStub')
def test_uninstall_release(self, mock_release_service_stub, def test_uninstall_release(self, mock_release_service_stub,
mock_uninstall_release_request, mock_uninstall_release_request, mock_grpc, _):
mock_grpc, _):
mock_release_service_stub.return_value.UninstallRelease\ mock_release_service_stub.return_value.UninstallRelease\
.return_value = {} .return_value = {}
@ -274,13 +272,13 @@ class TillerTestCase(base.ArmadaTestCase):
self.assertEqual({}, tiller_obj.uninstall_release('release')) self.assertEqual({}, tiller_obj.uninstall_release('release'))
mock_release_service_stub.assert_called_once_with( mock_release_service_stub.assert_called_once_with(tiller_obj.channel)
tiller_obj.channel)
uninstall_release_stub = mock_release_service_stub.return_value. \ uninstall_release_stub = mock_release_service_stub.return_value. \
UninstallRelease UninstallRelease
uninstall_release_stub.assert_called_once_with( uninstall_release_stub.assert_called_once_with(
mock_uninstall_release_request.return_value, tiller_obj.timeout, mock_uninstall_release_request.return_value,
tiller_obj.timeout,
metadata=tiller_obj.metadata) metadata=tiller_obj.metadata)
@mock.patch('armada.handlers.tiller.K8s') @mock.patch('armada.handlers.tiller.K8s')
@ -303,9 +301,14 @@ class TillerTestCase(base.ArmadaTestCase):
recreate_pods = True recreate_pods = True
force = True force = True
self.assertIsNone(tiller_obj.rollback_release( self.assertIsNone(
release, version, wait=wait, timeout=timeout, force=force, tiller_obj.rollback_release(
recreate_pods=recreate_pods)) release,
version,
wait=wait,
timeout=timeout,
force=force,
recreate_pods=recreate_pods))
mock_rollback_release_request.assert_called_once_with( mock_rollback_release_request.assert_called_once_with(
name=release, name=release,
@ -316,14 +319,13 @@ class TillerTestCase(base.ArmadaTestCase):
force=force, force=force,
recreate=recreate_pods) recreate=recreate_pods)
mock_release_service_stub.assert_called_once_with( mock_release_service_stub.assert_called_once_with(tiller_obj.channel)
tiller_obj.channel)
rollback_release_stub = mock_release_service_stub.return_value. \ rollback_release_stub = mock_release_service_stub.return_value. \
RollbackRelease RollbackRelease
rollback_release_stub.assert_called_once_with( rollback_release_stub.assert_called_once_with(
mock_rollback_release_request.return_value, timeout + mock_rollback_release_request.return_value,
tiller.GRPC_EPSILON, timeout + tiller.GRPC_EPSILON,
metadata=tiller_obj.metadata) metadata=tiller_obj.metadata)
@mock.patch('armada.handlers.tiller.K8s') @mock.patch('armada.handlers.tiller.K8s')
@ -332,8 +334,7 @@ class TillerTestCase(base.ArmadaTestCase):
@mock.patch.object(tiller, 'UpdateReleaseRequest') @mock.patch.object(tiller, 'UpdateReleaseRequest')
@mock.patch.object(tiller, 'ReleaseServiceStub') @mock.patch.object(tiller, 'ReleaseServiceStub')
def test_update_release(self, mock_release_service_stub, def test_update_release(self, mock_release_service_stub,
mock_update_release_request, mock_config, mock_update_release_request, mock_config, _, __):
_, __):
release = 'release' release = 'release'
chart = {} chart = {}
namespace = 'namespace' namespace = 'namespace'
@ -377,7 +378,9 @@ class TillerTestCase(base.ArmadaTestCase):
recreate_pods = True recreate_pods = True
result = tiller_obj.update_release( result = tiller_obj.update_release(
chart, release, namespace, chart,
release,
namespace,
pre_actions=pre_actions, pre_actions=pre_actions,
post_actions=post_actions, post_actions=post_actions,
disable_hooks=disable_hooks, disable_hooks=disable_hooks,
@ -404,22 +407,17 @@ class TillerTestCase(base.ArmadaTestCase):
force=force, force=force,
recreate=recreate_pods) recreate=recreate_pods)
mock_release_service_stub.assert_called_once_with( mock_release_service_stub.assert_called_once_with(tiller_obj.channel)
tiller_obj.channel)
update_release_stub = mock_release_service_stub.return_value. \ update_release_stub = mock_release_service_stub.return_value. \
UpdateRelease UpdateRelease
update_release_stub.assert_called_once_with( update_release_stub.assert_called_once_with(
mock_update_release_request.return_value, timeout + mock_update_release_request.return_value,
tiller.GRPC_EPSILON, timeout + tiller.GRPC_EPSILON,
metadata=tiller_obj.metadata) metadata=tiller_obj.metadata)
expected_result = tiller.TillerResult( expected_result = tiller.TillerResult(release, namespace, status,
release, description, version)
namespace,
status,
description,
version)
self.assertEqual(expected_result, result) self.assertEqual(expected_result, result)
@ -430,9 +428,8 @@ class TillerTestCase(base.ArmadaTestCase):
@mock.patch('armada.handlers.tiller.Config') @mock.patch('armada.handlers.tiller.Config')
@mock.patch.object(tiller, 'TestReleaseRequest') @mock.patch.object(tiller, 'TestReleaseRequest')
@mock.patch.object(tiller, 'ReleaseServiceStub') @mock.patch.object(tiller, 'ReleaseServiceStub')
def do_test(self, mock_release_service_stub, def do_test(self, mock_release_service_stub, mock_test_release_request,
mock_test_release_request, mock_config, mock_config, _, __):
_, __):
tiller_obj = tiller.Tiller('host', '8080', None) tiller_obj = tiller.Tiller('host', '8080', None)
release = 'release' release = 'release'
test_suite_run = {} test_suite_run = {}
@ -441,14 +438,18 @@ class TillerTestCase(base.ArmadaTestCase):
.return_value = grpc_response_mock .return_value = grpc_response_mock
tiller_obj.get_release_status = mock.Mock() tiller_obj.get_release_status = mock.Mock()
tiller_obj.get_release_status.return_value = AttrDict(**{ tiller_obj.get_release_status.return_value = AttrDict(
'info': AttrDict(**{ **{
'status': AttrDict(**{ 'info':
'last_test_suite_run': test_suite_run AttrDict(
}), **{
'Description': 'Failed' 'status':
AttrDict(**
{'last_test_suite_run': test_suite_run}),
'Description':
'Failed'
})
}) })
})
result = tiller_obj.test_release(release) result = tiller_obj.test_release(release)
@ -489,7 +490,9 @@ class TillerTestCase(base.ArmadaTestCase):
]) ])
def test_test_release_failure_to_run(self): def test_test_release_failure_to_run(self):
class Iterator: class Iterator:
def __iter__(self): def __iter__(self):
return self return self

View File

@ -39,23 +39,23 @@ class GitTestCase(base.ArmadaTestCase):
as git_file: as git_file:
self.assertIn(expected_ref, git_file.read()) self.assertIn(expected_ref, git_file.read())
@testtools.skipUnless( @testtools.skipUnless(base.is_connected(),
base.is_connected(), 'git clone requires network connectivity.') 'git clone requires network connectivity.')
def test_git_clone_good_url(self): def test_git_clone_good_url(self):
url = 'https://github.com/openstack/airship-armada' url = 'https://github.com/openstack/airship-armada'
git_dir = source.git_clone(url) git_dir = source.git_clone(url)
self._validate_git_clone(git_dir) self._validate_git_clone(git_dir)
@testtools.skipUnless( @testtools.skipUnless(base.is_connected(),
base.is_connected(), 'git clone requires network connectivity.') 'git clone requires network connectivity.')
def test_git_clone_commit(self): def test_git_clone_commit(self):
url = 'https://github.com/openstack/airship-armada' url = 'https://github.com/openstack/airship-armada'
commit = 'cba78d1d03e4910f6ab1691bae633c5bddce893d' commit = 'cba78d1d03e4910f6ab1691bae633c5bddce893d'
git_dir = source.git_clone(url, commit) git_dir = source.git_clone(url, commit)
self._validate_git_clone(git_dir) self._validate_git_clone(git_dir)
@testtools.skipUnless( @testtools.skipUnless(base.is_connected(),
base.is_connected(), 'git clone requires network connectivity.') 'git clone requires network connectivity.')
def test_git_clone_ref(self): def test_git_clone_ref(self):
ref = 'refs/changes/54/457754/73' ref = 'refs/changes/54/457754/73'
git_dir = source.git_clone( git_dir = source.git_clone(
@ -63,29 +63,29 @@ class GitTestCase(base.ArmadaTestCase):
self._validate_git_clone(git_dir, ref) self._validate_git_clone(git_dir, ref)
@test_utils.attr(type=['negative']) @test_utils.attr(type=['negative'])
@testtools.skipUnless( @testtools.skipUnless(base.is_connected(),
base.is_connected(), 'git clone requires network connectivity.') 'git clone requires network connectivity.')
def test_git_clone_empty_url(self): def test_git_clone_empty_url(self):
url = '' url = ''
# error_re = '%s is not a valid git repository.' % url # error_re = '%s is not a valid git repository.' % url
self.assertRaises(source_exceptions.GitException, self.assertRaises(source_exceptions.GitException, source.git_clone,
source.git_clone, url) url)
@test_utils.attr(type=['negative']) @test_utils.attr(type=['negative'])
@testtools.skipUnless( @testtools.skipUnless(base.is_connected(),
base.is_connected(), 'git clone requires network connectivity.') 'git clone requires network connectivity.')
def test_git_clone_bad_url(self): def test_git_clone_bad_url(self):
url = 'https://github.com/dummy/armada' url = 'https://github.com/dummy/armada'
self.assertRaises(source_exceptions.GitException, self.assertRaises(source_exceptions.GitException, source.git_clone,
source.git_clone, url) url)
# TODO need to design a positive proxy test, # TODO need to design a positive proxy test,
# difficult to achieve behind a corporate proxy # difficult to achieve behind a corporate proxy
@test_utils.attr(type=['negative']) @test_utils.attr(type=['negative'])
@testtools.skipUnless( @testtools.skipUnless(base.is_connected(),
base.is_connected(), 'git clone requires network connectivity.') 'git clone requires network connectivity.')
def test_git_clone_fake_proxy(self): def test_git_clone_fake_proxy(self):
url = 'https://github.com/openstack/airship-armada' url = 'https://github.com/openstack/airship-armada'
proxy_url = test_utils.rand_name( proxy_url = test_utils.rand_name(
@ -94,7 +94,8 @@ class GitTestCase(base.ArmadaTestCase):
self.assertRaises( self.assertRaises(
source_exceptions.GitProxyException, source_exceptions.GitProxyException,
source.git_clone, url, source.git_clone,
url,
proxy_server=proxy_url) proxy_server=proxy_url)
@mock.patch('armada.utils.source.tempfile') @mock.patch('armada.utils.source.tempfile')
@ -146,8 +147,8 @@ class GitTestCase(base.ArmadaTestCase):
mock_tarfile.open.assert_not_called() mock_tarfile.open.assert_not_called()
mock_tarfile.extractall.assert_not_called() mock_tarfile.extractall.assert_not_called()
@testtools.skipUnless( @testtools.skipUnless(base.is_connected(),
base.is_connected(), 'git clone requires network connectivity.') 'git clone requires network connectivity.')
@mock.patch.object(source, 'LOG') @mock.patch.object(source, 'LOG')
def test_source_cleanup(self, mock_log): def test_source_cleanup(self, mock_log):
url = 'https://github.com/openstack/airship-armada' url = 'https://github.com/openstack/airship-armada'
@ -190,28 +191,34 @@ class GitTestCase(base.ArmadaTestCase):
('Could not delete the path %s. Is it a git repository?', path), ('Could not delete the path %s. Is it a git repository?', path),
actual_call) actual_call)
@testtools.skipUnless( @testtools.skipUnless(base.is_connected(),
base.is_connected(), 'git clone requires network connectivity.') 'git clone requires network connectivity.')
@test_utils.attr(type=['negative']) @test_utils.attr(type=['negative'])
@mock.patch.object(source, 'os') @mock.patch.object(source, 'os')
def test_git_clone_ssh_auth_method_fails_auth(self, mock_os): def test_git_clone_ssh_auth_method_fails_auth(self, mock_os):
mock_os.path.exists.return_value = True mock_os.path.exists.return_value = True
fake_user = test_utils.rand_name('fake_user') fake_user = test_utils.rand_name('fake_user')
url = ('ssh://%s@review.openstack.org:29418/openstack/airship-armada' url = ('ssh://%s@review.openstack.org:29418/openstack/airship-armada' %
% fake_user) fake_user)
self.assertRaises( self.assertRaises(
source_exceptions.GitAuthException, source.git_clone, url, source_exceptions.GitAuthException,
ref='refs/changes/17/388517/5', auth_method='SSH') source.git_clone,
url,
ref='refs/changes/17/388517/5',
auth_method='SSH')
@testtools.skipUnless( @testtools.skipUnless(base.is_connected(),
base.is_connected(), 'git clone requires network connectivity.') 'git clone requires network connectivity.')
@test_utils.attr(type=['negative']) @test_utils.attr(type=['negative'])
@mock.patch.object(source, 'os') @mock.patch.object(source, 'os')
def test_git_clone_ssh_auth_method_missing_ssh_key(self, mock_os): def test_git_clone_ssh_auth_method_missing_ssh_key(self, mock_os):
mock_os.path.exists.return_value = False mock_os.path.exists.return_value = False
fake_user = test_utils.rand_name('fake_user') fake_user = test_utils.rand_name('fake_user')
url = ('ssh://%s@review.openstack.org:29418/openstack/airship-armada' url = ('ssh://%s@review.openstack.org:29418/openstack/airship-armada' %
% fake_user) fake_user)
self.assertRaises( self.assertRaises(
source_exceptions.GitSSHException, source.git_clone, url, source_exceptions.GitSSHException,
ref='refs/changes/17/388517/5', auth_method='SSH') source.git_clone,
url,
ref='refs/changes/17/388517/5',
auth_method='SSH')

View File

@ -20,7 +20,6 @@ import testtools
from armada.tests.unit import base from armada.tests.unit import base
from armada.utils import validate from armada.utils import validate
template_chart = """ template_chart = """
schema: armada/Chart/v1 schema: armada/Chart/v1
metadata: metadata:
@ -114,9 +113,7 @@ class ValidateTestCase(BaseValidateTest):
def test_validate_load_schemas(self): def test_validate_load_schemas(self):
expected_schemas = [ expected_schemas = [
'armada/Chart/v1', 'armada/Chart/v1', 'armada/ChartGroup/v1', 'armada/Manifest/v1'
'armada/ChartGroup/v1',
'armada/Manifest/v1'
] ]
for expected_schema in expected_schemas: for expected_schema in expected_schemas:
self.assertIn(expected_schema, validate.SCHEMAS) self.assertIn(expected_schema, validate.SCHEMAS)
@ -232,9 +229,8 @@ class ValidateNegativeTestCase(BaseValidateTest):
import, and importing the schemas again in manually results in import, and importing the schemas again in manually results in
duplicates. duplicates.
""" """
with self.assertRaisesRegexp( with self.assertRaisesRegexp(RuntimeError,
RuntimeError, 'Duplicate schema specified for: .*'):
'Duplicate schema specified for: .*'):
validate._load_schemas() validate._load_schemas()
def test_validate_no_dictionary_expect_type_error(self): def test_validate_no_dictionary_expect_type_error(self):
@ -251,13 +247,13 @@ class ValidateNegativeTestCase(BaseValidateTest):
documents = list(yaml.safe_load_all(f.read())) documents = list(yaml.safe_load_all(f.read()))
mariadb_document = [ mariadb_document = [
d for d in documents if d['metadata']['name'] == 'mariadb'][0] d for d in documents if d['metadata']['name'] == 'mariadb'
][0]
del mariadb_document['data']['release'] del mariadb_document['data']['release']
_, error_messages = validate.validate_armada_documents(documents) _, error_messages = validate.validate_armada_documents(documents)
expected_error = self._build_error_message( expected_error = self._build_error_message(
'armada/Chart/v1', 'mariadb', 'armada/Chart/v1', 'mariadb', "'release' is a required property")
"'release' is a required property")
self.assertEqual(1, len(error_messages)) self.assertEqual(1, len(error_messages))
self.assertEqual(expected_error, error_messages[0]['message']) self.assertEqual(expected_error, error_messages[0]['message'])

View File

@ -26,5 +26,4 @@ def label_selectors(labels):
:return: string of k8s labels :return: string of k8s labels
""" """
return ",".join( return ",".join(["%s=%s" % (k, v) for k, v in labels.items()])
["%s=%s" % (k, v) for k, v in labels.items()])

View File

@ -61,28 +61,31 @@ def git_clone(repo_url, ref='master', proxy_server=None, auth_method=None):
ssh_cmd = None ssh_cmd = None
if auth_method and auth_method.lower() == 'ssh': if auth_method and auth_method.lower() == 'ssh':
LOG.debug('Attempting to clone the repo at %s using reference %s ' LOG.debug(
'with SSH authentication.', repo_url, ref) 'Attempting to clone the repo at %s using reference %s '
'with SSH authentication.', repo_url, ref)
if not os.path.exists(CONF.ssh_key_path): if not os.path.exists(CONF.ssh_key_path):
LOG.error('SSH auth method was specified for cloning repo but ' LOG.error('SSH auth method was specified for cloning repo but '
'the SSH key under CONF.ssh_key_path was not found.') 'the SSH key under CONF.ssh_key_path was not found.')
raise source_exceptions.GitSSHException(CONF.ssh_key_path) raise source_exceptions.GitSSHException(CONF.ssh_key_path)
ssh_cmd = ( ssh_cmd = ('ssh -i {} -o ConnectionAttempts=20 -o ConnectTimeout=10'
'ssh -i {} -o ConnectionAttempts=20 -o ConnectTimeout=10' .format(os.path.expanduser(CONF.ssh_key_path)))
.format(os.path.expanduser(CONF.ssh_key_path))
)
env_vars.update({'GIT_SSH_COMMAND': ssh_cmd}) env_vars.update({'GIT_SSH_COMMAND': ssh_cmd})
else: else:
LOG.debug('Attempting to clone the repo at %s using reference %s ' LOG.debug(
'with no authentication.', repo_url, ref) 'Attempting to clone the repo at %s using reference %s '
'with no authentication.', repo_url, ref)
try: try:
if proxy_server: if proxy_server:
LOG.debug('Cloning [%s] with proxy [%s]', repo_url, proxy_server) LOG.debug('Cloning [%s] with proxy [%s]', repo_url, proxy_server)
repo = Repo.clone_from(repo_url, temp_dir, env=env_vars, repo = Repo.clone_from(
config='http.proxy=%s' % proxy_server) repo_url,
temp_dir,
env=env_vars,
config='http.proxy=%s' % proxy_server)
else: else:
LOG.debug('Cloning [%s]', repo_url) LOG.debug('Cloning [%s]', repo_url)
repo = Repo.clone_from(repo_url, temp_dir, env=env_vars) repo = Repo.clone_from(repo_url, temp_dir, env=env_vars)

View File

@ -72,10 +72,8 @@ def _validate_armada_manifest(manifest):
try: try:
armada_object = manifest.get_manifest().get('armada') armada_object = manifest.get_manifest().get('armada')
except ManifestException as me: except ManifestException as me:
vmsg = ValidationMessage(message=str(me), vmsg = ValidationMessage(
error=True, message=str(me), error=True, name='ARM001', level='Error')
name='ARM001',
level='Error')
LOG.error('ValidationMessage: %s', vmsg.get_output_json()) LOG.error('ValidationMessage: %s', vmsg.get_output_json())
details.append(vmsg.get_output()) details.append(vmsg.get_output())
return False, details return False, details
@ -85,10 +83,8 @@ def _validate_armada_manifest(manifest):
if not isinstance(groups, list): if not isinstance(groups, list):
message = '{} entry is of wrong type: {} (expected: {})'.format( message = '{} entry is of wrong type: {} (expected: {})'.format(
KEYWORD_GROUPS, type(groups), 'list') KEYWORD_GROUPS, type(groups), 'list')
vmsg = ValidationMessage(message=message, vmsg = ValidationMessage(
error=True, message=message, error=True, name='ARM101', level='Error')
name='ARM101',
level='Error')
LOG.info('ValidationMessage: %s', vmsg.get_output_json()) LOG.info('ValidationMessage: %s', vmsg.get_output_json())
details.append(vmsg.get_output()) details.append(vmsg.get_output())
@ -98,10 +94,8 @@ def _validate_armada_manifest(manifest):
if KEYWORD_RELEASE not in chart_obj: if KEYWORD_RELEASE not in chart_obj:
message = 'Could not find {} keyword in {}'.format( message = 'Could not find {} keyword in {}'.format(
KEYWORD_RELEASE, chart_obj.get('release')) KEYWORD_RELEASE, chart_obj.get('release'))
vmsg = ValidationMessage(message=message, vmsg = ValidationMessage(
error=True, message=message, error=True, name='ARM102', level='Error')
name='ARM102',
level='Error')
LOG.info('ValidationMessage: %s', vmsg.get_output_json()) LOG.info('ValidationMessage: %s', vmsg.get_output_json())
details.append(vmsg.get_output()) details.append(vmsg.get_output())
@ -147,8 +141,8 @@ def validate_armada_document(document):
""" """
if not isinstance(document, dict): if not isinstance(document, dict):
raise TypeError('The provided input "%s" must be a dictionary.' raise TypeError(
% document) 'The provided input "%s" must be a dictionary.' % document)
schema = document.get('schema', '<missing>') schema = document.get('schema', '<missing>')
document_name = document.get('metadata', {}).get('name', None) document_name = document.get('metadata', {}).get('name', None)
@ -161,34 +155,36 @@ def validate_armada_document(document):
for error in validator.iter_errors(document.get('data')): for error in validator.iter_errors(document.get('data')):
error_message = "Invalid document [%s] %s: %s." % \ error_message = "Invalid document [%s] %s: %s." % \
(schema, document_name, error.message) (schema, document_name, error.message)
vmsg = ValidationMessage(message=error_message, vmsg = ValidationMessage(
error=True, message=error_message,
name='ARM100', error=True,
level='Error', name='ARM100',
schema=schema, level='Error',
doc_name=document_name) schema=schema,
doc_name=document_name)
LOG.info('ValidationMessage: %s', vmsg.get_output_json()) LOG.info('ValidationMessage: %s', vmsg.get_output_json())
details.append(vmsg.get_output()) details.append(vmsg.get_output())
except jsonschema.SchemaError as e: except jsonschema.SchemaError as e:
error_message = ('The built-in Armada JSON schema %s is invalid. ' error_message = ('The built-in Armada JSON schema %s is invalid. '
'Details: %s.' % (e.schema, e.message)) 'Details: %s.' % (e.schema, e.message))
vmsg = ValidationMessage(message=error_message, vmsg = ValidationMessage(
error=True, message=error_message,
name='ARM000', error=True,
level='Error', name='ARM000',
diagnostic='Armada is misconfigured.') level='Error',
diagnostic='Armada is misconfigured.')
LOG.error('ValidationMessage: %s', vmsg.get_output_json()) LOG.error('ValidationMessage: %s', vmsg.get_output_json())
details.append(vmsg.get_output()) details.append(vmsg.get_output())
else: else:
vmsg = ValidationMessage(message='Unsupported document type.', vmsg = ValidationMessage(
error=False, message='Unsupported document type.',
name='ARM002', error=False,
level='Warning', name='ARM002',
schema=schema, level='Warning',
doc_name=document_name, schema=schema,
diagnostic='Please ensure document is one of ' doc_name=document_name,
'the following schema types: %s' % diagnostic='Please ensure document is one of '
list(SCHEMAS.keys())) 'the following schema types: %s' % list(SCHEMAS.keys()))
LOG.info('Unsupported document type, ignoring %s.', schema) LOG.info('Unsupported document type, ignoring %s.', schema)
LOG.debug('ValidationMessage: %s', vmsg.get_output_json()) LOG.debug('ValidationMessage: %s', vmsg.get_output_json())
# Validation API doesn't care about this type of message, don't send # Validation API doesn't care about this type of message, don't send

View File

@ -52,3 +52,9 @@ universal = 1
[nosetests] [nosetests]
verbosity=3 verbosity=3
with-doctest=1 with-doctest=1
[yapf]
based_on_style = pep8
blank_line_before_nested_class_or_def = true
blank_line_before_module_docstring = true
split_before_logical_operator = false

View File

@ -5,6 +5,4 @@ try:
except ImportError: except ImportError:
pass pass
setuptools.setup( setuptools.setup(setup_requires=['pbr>=2.0.0'], pbr=True)
setup_requires=['pbr>=2.0.0'],
pbr=True)

View File

@ -13,3 +13,4 @@ os-testr>=1.0.0 # Apache-2.0
flake8>=3.3.0 flake8>=3.3.0
mock mock
responses>=0.8.1 responses>=0.8.1
yapf

25
tox.ini
View File

@ -45,15 +45,17 @@ commands =
basepython = python3 basepython = python3
deps = -r{toxinidir}/doc/requirements.txt deps = -r{toxinidir}/doc/requirements.txt
commands = commands =
rm -rf releasenotes/build rm -rf releasenotes/build
sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html
[testenv:pep8] [testenv:pep8]
basepython = python3 basepython = python3
deps = deps =
yapf
.[bandit] .[bandit]
{[testenv]deps} {[testenv]deps}
commands = commands =
yapf -dr {toxinidir}/armada {toxinidir}/setup.py
flake8 {posargs} flake8 {posargs}
# Run security linter as part of the pep8 gate instead of a separate zuul job. # Run security linter as part of the pep8 gate instead of a separate zuul job.
bandit -r armada -x armada/tests -n 5 bandit -r armada -x armada/tests -n 5
@ -68,13 +70,18 @@ basepython = python3
setenv = {[testenv]setenv} setenv = {[testenv]setenv}
PYTHON=coverage run --source armada --parallel-mode PYTHON=coverage run --source armada --parallel-mode
commands = commands =
coverage erase coverage erase
find . -type f -name "*.pyc" -delete find . -type f -name "*.pyc" -delete
stestr run {posargs} stestr run {posargs}
coverage combine coverage combine
coverage html -d cover coverage html -d cover
coverage xml -o cover/coverage.xml coverage xml -o cover/coverage.xml
coverage report coverage report
[testenv:yapf]
basepython = python3
commands =
yapf -ir {toxinidir}/armada {toxinidir}/setup.py
[flake8] [flake8]
filename = *.py filename = *.py