Standardize Armada code with YAPF

From recently merged document updates in [0] there is a desire to
standardize the Airship project python codebase.  This is the effort
to do so for the Armada project.

[0] https://review.opendev.org/#/c/671291/

Change-Id: I4fe916d6e330618ea3a1fccfa4bdfdfabb9ffcb2
This commit is contained in:
HUGHES, ALEXANDER (ah8742) 2019-07-24 14:27:20 -05:00 committed by Alexander Hughes
parent d404e3c034
commit b787c418e3
77 changed files with 1298 additions and 1182 deletions

10
.style.yapf Normal file
View File

@ -0,0 +1,10 @@
[style]
based_on_style = pep8
spaces_before_comment = 2
column_limit = 79
blank_line_before_nested_class_or_def = false
blank_line_before_module_docstring = true
split_before_logical_operator = true
split_before_first_argument = true
allow_split_before_dict_value = false
split_before_arithmetic_operator = true

View File

@ -163,6 +163,11 @@ test-coverage: check-tox
test-bandit: check-tox test-bandit: check-tox
tox -e bandit tox -e bandit
# Perform auto formatting
.PHONY: format
format:
tox -e fmt
# style checks # style checks
.PHONY: lint .PHONY: lint
lint: test-pep8 helm_lint lint: test-pep8 helm_lint

View File

@ -15,13 +15,12 @@
import json import json
import logging as log import logging as log
import uuid import uuid
import yaml
from oslo_utils import excutils
import falcon import falcon
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from oslo_utils import excutils
import yaml
from armada.handlers.tiller import Tiller from armada.handlers.tiller import Tiller
@ -31,7 +30,6 @@ HEALTH_PATH = 'health'
class BaseResource(object): class BaseResource(object):
def __init__(self): def __init__(self):
self.logger = logging.getLogger(__name__) self.logger = logging.getLogger(__name__)
@ -80,11 +78,12 @@ class BaseResource(object):
raise Exception("%s: Invalid JSON in body: %s" % (req.path, jex)) raise Exception("%s: Invalid JSON in body: %s" % (req.path, jex))
def return_error(self, resp, status_code, message="", retry=False): def return_error(self, resp, status_code, message="", retry=False):
resp.body = json.dumps({ resp.body = json.dumps(
'type': 'error', {
'message': message, 'type': 'error',
'retry': retry 'message': message,
}) 'retry': retry
})
resp.status = status_code resp.status = status_code
def log_error(self, ctx, level, msg): def log_error(self, ctx, level, msg):
@ -122,7 +121,6 @@ class BaseResource(object):
class ArmadaRequestContext(object): class ArmadaRequestContext(object):
def __init__(self): def __init__(self):
self.log_level = 'ERROR' self.log_level = 'ERROR'
self.user = None # Username self.user = None # Username

View File

@ -13,9 +13,9 @@
# limitations under the License. # limitations under the License.
import json import json
import yaml
import falcon import falcon
import yaml
from armada import api from armada import api
from armada.common import policy from armada.common import policy
@ -60,8 +60,8 @@ class Apply(api.BaseResource):
documents, overrides=req_body.get('overrides')) documents, overrides=req_body.get('overrides'))
documents = overrides.update_manifests() documents = overrides.update_manifests()
else: else:
self.error(req.context, self.error(
"Unknown content-type %s" % req.content_type) req.context, "Unknown content-type %s" % req.content_type)
# TODO(fmontei): Use falcon.<Relevant API Exception Class> instead. # TODO(fmontei): Use falcon.<Relevant API Exception Class> instead.
return self.return_error( return self.return_error(
resp, resp,

View File

@ -13,10 +13,10 @@
# limitations under the License. # limitations under the License.
import json import json
import yaml
import falcon import falcon
from oslo_config import cfg from oslo_config import cfg
import yaml
from armada import api from armada import api
from armada.common import policy from armada.common import policy
@ -165,8 +165,8 @@ class TestReleasesManifestController(api.BaseResource):
else: else:
message['test']['failed'].append(release_name) message['test']['failed'].append(release_name)
else: else:
self.logger.info('Release %s not found - SKIPPING', self.logger.info(
release_name) 'Release %s not found - SKIPPING', release_name)
message['test']['skipped'].append(release_name) message['test']['skipped'].append(release_name)
resp.status = falcon.HTTP_200 resp.status = falcon.HTTP_200

View File

@ -26,7 +26,6 @@ LOG = logging.getLogger(__name__)
class Status(api.BaseResource): class Status(api.BaseResource):
@policy.enforce('tiller:get_status') @policy.enforce('tiller:get_status')
def on_get(self, req, resp): def on_get(self, req, resp):
''' '''
@ -45,9 +44,10 @@ class Status(api.BaseResource):
self.return_error(resp, falcon.HTTP_500, message=err_message) self.return_error(resp, falcon.HTTP_500, message=err_message)
def handle(self, tiller): def handle(self, tiller):
LOG.debug('Tiller (Status) at: %s:%s, namespace=%s, ' LOG.debug(
'timeout=%s', tiller.tiller_host, tiller.tiller_port, 'Tiller (Status) at: %s:%s, namespace=%s, '
tiller.tiller_namespace, tiller.timeout) 'timeout=%s', tiller.tiller_host, tiller.tiller_port,
tiller.tiller_namespace, tiller.timeout)
message = { message = {
'tiller': { 'tiller': {
@ -59,7 +59,6 @@ class Status(api.BaseResource):
class Release(api.BaseResource): class Release(api.BaseResource):
@policy.enforce('tiller:get_release') @policy.enforce('tiller:get_release')
def on_get(self, req, resp): def on_get(self, req, resp):
'''Controller for listing Tiller releases. '''Controller for listing Tiller releases.
@ -79,9 +78,10 @@ class Release(api.BaseResource):
self.return_error(resp, falcon.HTTP_500, message=err_message) self.return_error(resp, falcon.HTTP_500, message=err_message)
def handle(self, tiller): def handle(self, tiller):
LOG.debug('Tiller (Release) at: %s:%s, namespace=%s, ' LOG.debug(
'timeout=%s', tiller.tiller_host, tiller.tiller_port, 'Tiller (Release) at: %s:%s, namespace=%s, '
tiller.tiller_namespace, tiller.timeout) 'timeout=%s', tiller.tiller_host, tiller.tiller_port,
tiller.tiller_namespace, tiller.timeout)
releases = {} releases = {}
for release in tiller.list_releases(): for release in tiller.list_releases():

View File

@ -13,6 +13,7 @@
# limitations under the License. # limitations under the License.
import json import json
import falcon import falcon
import yaml import yaml
@ -33,8 +34,9 @@ class Validate(api.BaseResource):
self.logger.debug("Validating manifest based on reference.") self.logger.debug("Validating manifest based on reference.")
json_body = self.req_json(req) json_body = self.req_json(req)
if json_body.get('href', None): if json_body.get('href', None):
self.logger.debug("Validating manifest from reference %s." self.logger.debug(
% json_body.get('href')) "Validating manifest from reference %s."
% json_body.get('href'))
data = ReferenceResolver.resolve_reference( data = ReferenceResolver.resolve_reference(
json_body.get('href')) json_body.get('href'))
documents = list() documents = list()

View File

@ -13,19 +13,17 @@
# limitations under the License. # limitations under the License.
import re import re
from armada.api import HEALTH_PATH
from uuid import UUID from uuid import UUID
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from armada.api import HEALTH_PATH
CONF = cfg.CONF CONF = cfg.CONF
class AuthMiddleware(object): class AuthMiddleware(object):
def __init__(self): def __init__(self):
self.logger = logging.getLogger(__name__) self.logger = logging.getLogger(__name__)
@ -76,7 +74,6 @@ class AuthMiddleware(object):
class ContextMiddleware(object): class ContextMiddleware(object):
def process_request(self, req, resp): def process_request(self, req, resp):
ctx = req.context ctx = req.context
@ -103,7 +100,6 @@ class ContextMiddleware(object):
class LoggingMiddleware(object): class LoggingMiddleware(object):
def __init__(self): def __init__(self):
self.logger = logging.getLogger(__name__) self.logger = logging.getLogger(__name__)

View File

@ -79,8 +79,8 @@ def create(enable_middleware=CONF.middleware):
# Error handlers (FILO handling) # Error handlers (FILO handling)
api.add_error_handler(Exception, exceptions.default_exception_handler) api.add_error_handler(Exception, exceptions.default_exception_handler)
api.add_error_handler(exceptions.ArmadaAPIException, api.add_error_handler(
exceptions.ArmadaAPIException.handle) exceptions.ArmadaAPIException, exceptions.ArmadaAPIException.handle)
# Built-in error serializer # Built-in error serializer
api.set_error_serializer(exceptions.default_error_serializer) api.set_error_serializer(exceptions.default_error_serializer)

View File

@ -28,7 +28,6 @@ LOG = logging.getLogger(__name__)
class CliAction(object): class CliAction(object):
def __init__(self): def __init__(self):
self.logger = LOG self.logger = LOG
logging.set_defaults(default_log_levels=CONF.default_log_levels) logging.set_defaults(default_log_levels=CONF.default_log_levels)

View File

@ -12,10 +12,9 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import yaml
import click import click
from oslo_config import cfg from oslo_config import cfg
import yaml
from armada.cli import CliAction from armada.cli import CliAction
from armada.exceptions.source_exceptions import InvalidPathException from armada.exceptions.source_exceptions import InvalidPathException
@ -86,11 +85,12 @@ SHORT_DESC = "Command installs manifest charts."
'--use-doc-ref', help="Use armada manifest file reference.", is_flag=True) '--use-doc-ref', help="Use armada manifest file reference.", is_flag=True)
@click.option( @click.option(
'--set', '--set',
help=("Use to override Armada Manifest values. Accepts " help=(
"overrides that adhere to the format " "Use to override Armada Manifest values. Accepts "
"<path>:<to>:<property>=<value> to specify a primitive or " "overrides that adhere to the format "
"<path>:<to>:<property>=<value1>,...,<valueN> to specify " "<path>:<to>:<property>=<value> to specify a primitive or "
"a list of values."), "<path>:<to>:<property>=<value1>,...,<valueN> to specify "
"a list of values."),
multiple=True, multiple=True,
type=str, type=str,
default=[]) default=[])
@ -111,42 +111,47 @@ SHORT_DESC = "Command installs manifest charts."
@click.option( @click.option(
'--values', '--values',
'-f', '-f',
help=("Use to override multiple Armada Manifest values by " help=(
"reading overrides from a values.yaml-type file."), "Use to override multiple Armada Manifest values by "
"reading overrides from a values.yaml-type file."),
multiple=True, multiple=True,
type=str, type=str,
default=[]) default=[])
@click.option( @click.option(
'--wait', '--wait',
help=("Force Tiller to wait until all charts are deployed, " help=(
"rather than using each chart's specified wait policy. " "Force Tiller to wait until all charts are deployed, "
"This is equivalent to sequenced chartgroups."), "rather than using each chart's specified wait policy. "
"This is equivalent to sequenced chartgroups."),
is_flag=True) is_flag=True)
@click.option( @click.option(
'--target-manifest', '--target-manifest',
help=("The target manifest to run. Required for specifying " help=(
"which manifest to run when multiple are available."), "The target manifest to run. Required for specifying "
"which manifest to run when multiple are available."),
default=None) default=None)
@click.option('--bearer-token', help="User Bearer token", default=None) @click.option('--bearer-token', help="User Bearer token", default=None)
@click.option('--debug', help="Enable debug logging.", is_flag=True) @click.option('--debug', help="Enable debug logging.", is_flag=True)
@click.pass_context @click.pass_context
def apply_create(ctx, locations, api, disable_update_post, disable_update_pre, def apply_create(
dry_run, enable_chart_cleanup, use_doc_ref, set, tiller_host, ctx, locations, api, disable_update_post, disable_update_pre, dry_run,
tiller_port, tiller_namespace, timeout, values, wait, enable_chart_cleanup, use_doc_ref, set, tiller_host, tiller_port,
target_manifest, bearer_token, debug): tiller_namespace, timeout, values, wait, target_manifest, bearer_token,
debug):
CONF.debug = debug CONF.debug = debug
ApplyManifest(ctx, locations, api, disable_update_post, disable_update_pre, ApplyManifest(
dry_run, enable_chart_cleanup, use_doc_ref, set, tiller_host, ctx, locations, api, disable_update_post, disable_update_pre, dry_run,
tiller_port, tiller_namespace, timeout, values, wait, enable_chart_cleanup, use_doc_ref, set, tiller_host, tiller_port,
target_manifest, bearer_token).safe_invoke() tiller_namespace, timeout, values, wait, target_manifest,
bearer_token).safe_invoke()
class ApplyManifest(CliAction): class ApplyManifest(CliAction):
def __init__(
def __init__(self, ctx, locations, api, disable_update_post, self, ctx, locations, api, disable_update_post, disable_update_pre,
disable_update_pre, dry_run, enable_chart_cleanup, dry_run, enable_chart_cleanup, use_doc_ref, set, tiller_host,
use_doc_ref, set, tiller_host, tiller_port, tiller_namespace, tiller_port, tiller_namespace, timeout, values, wait,
timeout, values, wait, target_manifest, bearer_token): target_manifest, bearer_token):
super(ApplyManifest, self).__init__() super(ApplyManifest, self).__init__()
self.ctx = ctx self.ctx = ctx
# Filename can also be a URL reference # Filename can also be a URL reference
@ -199,12 +204,11 @@ class ApplyManifest(CliAction):
return return
if not self.ctx.obj.get('api', False): if not self.ctx.obj.get('api', False):
with Tiller( with Tiller(tiller_host=self.tiller_host,
tiller_host=self.tiller_host, tiller_port=self.tiller_port,
tiller_port=self.tiller_port, tiller_namespace=self.tiller_namespace,
tiller_namespace=self.tiller_namespace, bearer_token=self.bearer_token,
bearer_token=self.bearer_token, dry_run=self.dry_run) as tiller:
dry_run=self.dry_run) as tiller:
resp = self.handle(documents, tiller) resp = self.handle(documents, tiller)
self.output(resp) self.output(resp)

View File

@ -12,10 +12,9 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import yaml
import click import click
from oslo_config import cfg from oslo_config import cfg
import yaml
from armada.cli import CliAction from armada.cli import CliAction
from armada import const from armada import const
@ -70,17 +69,19 @@ SHORT_DESC = "Command deletes releases."
@click.option('--bearer-token', help="User Bearer token.", default=None) @click.option('--bearer-token', help="User Bearer token.", default=None)
@click.option('--debug', help="Enable debug logging.", is_flag=True) @click.option('--debug', help="Enable debug logging.", is_flag=True)
@click.pass_context @click.pass_context
def delete_charts(ctx, manifest, releases, no_purge, tiller_host, tiller_port, def delete_charts(
bearer_token, debug): ctx, manifest, releases, no_purge, tiller_host, tiller_port,
bearer_token, debug):
CONF.debug = debug CONF.debug = debug
DeleteChartManifest(ctx, manifest, releases, no_purge, tiller_host, DeleteChartManifest(
tiller_port, bearer_token).safe_invoke() ctx, manifest, releases, no_purge, tiller_host, tiller_port,
bearer_token).safe_invoke()
class DeleteChartManifest(CliAction): class DeleteChartManifest(CliAction):
def __init__(
def __init__(self, ctx, manifest, releases, no_purge, tiller_host, self, ctx, manifest, releases, no_purge, tiller_host, tiller_port,
tiller_port, bearer_token): bearer_token):
super(DeleteChartManifest, self).__init__() super(DeleteChartManifest, self).__init__()
self.ctx = ctx self.ctx = ctx
@ -92,10 +93,8 @@ class DeleteChartManifest(CliAction):
self.bearer_token = bearer_token self.bearer_token = bearer_token
def invoke(self): def invoke(self):
with Tiller( with Tiller(tiller_host=self.tiller_host, tiller_port=self.tiller_port,
tiller_host=self.tiller_host, bearer_token=self.bearer_token) as tiller:
tiller_port=self.tiller_port,
bearer_token=self.bearer_token) as tiller:
self.handle(tiller) self.handle(tiller)
@lock_and_thread() @lock_and_thread()

View File

@ -81,20 +81,22 @@ SHORT_DESC = "Command performs a release rollback."
@click.option('--bearer-token', help=("User bearer token."), default=None) @click.option('--bearer-token', help=("User bearer token."), default=None)
@click.option('--debug', help="Enable debug logging.", is_flag=True) @click.option('--debug', help="Enable debug logging.", is_flag=True)
@click.pass_context @click.pass_context
def rollback_charts(ctx, release, version, dry_run, tiller_host, tiller_port, def rollback_charts(
tiller_namespace, timeout, wait, force, recreate_pods, ctx, release, version, dry_run, tiller_host, tiller_port,
bearer_token, debug): tiller_namespace, timeout, wait, force, recreate_pods, bearer_token,
debug):
CONF.debug = debug CONF.debug = debug
Rollback(ctx, release, version, dry_run, tiller_host, tiller_port, Rollback(
tiller_namespace, timeout, wait, force, recreate_pods, ctx, release, version, dry_run, tiller_host, tiller_port,
bearer_token).safe_invoke() tiller_namespace, timeout, wait, force, recreate_pods,
bearer_token).safe_invoke()
class Rollback(CliAction): class Rollback(CliAction):
def __init__(
def __init__(self, ctx, release, version, dry_run, tiller_host, self, ctx, release, version, dry_run, tiller_host, tiller_port,
tiller_port, tiller_namespace, timeout, wait, force, tiller_namespace, timeout, wait, force, recreate_pods,
recreate_pods, bearer_token): bearer_token):
super(Rollback, self).__init__() super(Rollback, self).__init__()
self.ctx = ctx self.ctx = ctx
self.release = release self.release = release
@ -110,12 +112,10 @@ class Rollback(CliAction):
self.bearer_token = bearer_token self.bearer_token = bearer_token
def invoke(self): def invoke(self):
with Tiller( with Tiller(tiller_host=self.tiller_host, tiller_port=self.tiller_port,
tiller_host=self.tiller_host, tiller_namespace=self.tiller_namespace,
tiller_port=self.tiller_port, bearer_token=self.bearer_token,
tiller_namespace=self.tiller_namespace, dry_run=self.dry_run) as tiller:
bearer_token=self.bearer_token,
dry_run=self.dry_run) as tiller:
response = self.handle(tiller) response = self.handle(tiller)
@ -132,5 +132,6 @@ class Rollback(CliAction):
recreate_pods=self.recreate_pods) recreate_pods=self.recreate_pods)
def output(self, response): def output(self, response):
self.logger.info(('(dry run) ' if self.dry_run else '') + self.logger.info(
'Rollback of %s complete.', self.release) ('(dry run) ' if self.dry_run else '')
+ 'Rollback of %s complete.', self.release)

View File

@ -12,10 +12,9 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import yaml
import click import click
from oslo_config import cfg from oslo_config import cfg
import yaml
from armada.cli import CliAction from armada.cli import CliAction
from armada import const from armada import const
@ -69,8 +68,9 @@ SHORT_DESC = "Command tests releases."
default=None) default=None)
@click.option( @click.option(
'--target-manifest', '--target-manifest',
help=("The target manifest to run. Required for specifying " help=(
"which manifest to run when multiple are available."), "The target manifest to run. Required for specifying "
"which manifest to run when multiple are available."),
default=None) default=None)
@click.option( @click.option(
'--cleanup', '--cleanup',
@ -79,24 +79,26 @@ SHORT_DESC = "Command tests releases."
default=None) default=None)
@click.option( @click.option(
'--enable-all', '--enable-all',
help=("Run all tests for all releases regardless of any disabled chart " help=(
"tests."), "Run all tests for all releases regardless of any disabled chart "
"tests."),
is_flag=True, is_flag=True,
default=False) default=False)
@click.option('--debug', help="Enable debug logging.", is_flag=True) @click.option('--debug', help="Enable debug logging.", is_flag=True)
@click.pass_context @click.pass_context
def test_charts(ctx, file, release, tiller_host, tiller_port, tiller_namespace, def test_charts(
target_manifest, cleanup, enable_all, debug): ctx, file, release, tiller_host, tiller_port, tiller_namespace,
target_manifest, cleanup, enable_all, debug):
CONF.debug = debug CONF.debug = debug
TestChartManifest(ctx, file, release, tiller_host, tiller_port, TestChartManifest(
tiller_namespace, target_manifest, cleanup, ctx, file, release, tiller_host, tiller_port, tiller_namespace,
enable_all).safe_invoke() target_manifest, cleanup, enable_all).safe_invoke()
class TestChartManifest(CliAction): class TestChartManifest(CliAction):
def __init__(
def __init__(self, ctx, file, release, tiller_host, tiller_port, self, ctx, file, release, tiller_host, tiller_port,
tiller_namespace, target_manifest, cleanup, enable_all): tiller_namespace, target_manifest, cleanup, enable_all):
super(TestChartManifest, self).__init__() super(TestChartManifest, self).__init__()
self.ctx = ctx self.ctx = ctx
@ -110,10 +112,8 @@ class TestChartManifest(CliAction):
self.enable_all = enable_all self.enable_all = enable_all
def invoke(self): def invoke(self):
with Tiller( with Tiller(tiller_host=self.tiller_host, tiller_port=self.tiller_port,
tiller_host=self.tiller_host, tiller_namespace=self.tiller_namespace) as tiller:
tiller_port=self.tiller_port,
tiller_namespace=self.tiller_namespace) as tiller:
self.handle(tiller) self.handle(tiller)
@ -123,10 +123,8 @@ class TestChartManifest(CliAction):
if self.release: if self.release:
if not self.ctx.obj.get('api', False): if not self.ctx.obj.get('api', False):
test_handler = Test({}, test_handler = Test(
self.release, {}, self.release, tiller, cleanup=self.cleanup)
tiller,
cleanup=self.cleanup)
test_handler.test_release_for_success() test_handler.test_release_for_success()
else: else:
client = self.ctx.obj.get('CLIENT') client = self.ctx.obj.get('CLIENT')
@ -168,8 +166,9 @@ class TestChartManifest(CliAction):
if test_handler.test_enabled: if test_handler.test_enabled:
test_handler.test_release_for_success() test_handler.test_release_for_success()
else: else:
self.logger.info('Release %s not found - SKIPPING', self.logger.info(
release_name) 'Release %s not found - SKIPPING',
release_name)
else: else:
client = self.ctx.obj.get('CLIENT') client = self.ctx.obj.get('CLIENT')
query = { query = {

View File

@ -61,17 +61,19 @@ SHORT_DESC = "Command gets Tiller information."
@click.option('--bearer-token', help="User bearer token.", default=None) @click.option('--bearer-token', help="User bearer token.", default=None)
@click.option('--debug', help="Enable debug logging.", is_flag=True) @click.option('--debug', help="Enable debug logging.", is_flag=True)
@click.pass_context @click.pass_context
def tiller_service(ctx, tiller_host, tiller_port, tiller_namespace, releases, def tiller_service(
status, bearer_token, debug): ctx, tiller_host, tiller_port, tiller_namespace, releases, status,
bearer_token, debug):
CONF.debug = debug CONF.debug = debug
TillerServices(ctx, tiller_host, tiller_port, tiller_namespace, releases, TillerServices(
status, bearer_token).safe_invoke() ctx, tiller_host, tiller_port, tiller_namespace, releases, status,
bearer_token).safe_invoke()
class TillerServices(CliAction): class TillerServices(CliAction):
def __init__(
def __init__(self, ctx, tiller_host, tiller_port, tiller_namespace, self, ctx, tiller_host, tiller_port, tiller_namespace, releases,
releases, status, bearer_token): status, bearer_token):
super(TillerServices, self).__init__() super(TillerServices, self).__init__()
self.ctx = ctx self.ctx = ctx
self.tiller_host = tiller_host self.tiller_host = tiller_host
@ -83,11 +85,9 @@ class TillerServices(CliAction):
def invoke(self): def invoke(self):
with Tiller( with Tiller(tiller_host=self.tiller_host, tiller_port=self.tiller_port,
tiller_host=self.tiller_host, tiller_namespace=self.tiller_namespace,
tiller_port=self.tiller_port, bearer_token=self.bearer_token) as tiller:
tiller_namespace=self.tiller_namespace,
bearer_token=self.bearer_token) as tiller:
self.handle(tiller) self.handle(tiller)
@ -113,8 +113,9 @@ class TillerServices(CliAction):
if self.releases: if self.releases:
if not self.ctx.obj.get('api', False): if not self.ctx.obj.get('api', False):
for release in tiller.list_releases(): for release in tiller.list_releases():
self.logger.info("Release %s in namespace: %s", self.logger.info(
release.name, release.namespace) "Release %s in namespace: %s", release.name,
release.namespace)
else: else:
client = self.ctx.obj.get('CLIENT') client = self.ctx.obj.get('CLIENT')
query = { query = {
@ -125,5 +126,5 @@ class TillerServices(CliAction):
resp = client.get_releases(query=query) resp = client.get_releases(query=query)
for namespace in resp.get('releases'): for namespace in resp.get('releases'):
for release in resp.get('releases').get(namespace): for release in resp.get('releases').get(namespace):
self.logger.info('Release %s in namespace: %s', self.logger.info(
release, namespace) 'Release %s in namespace: %s', release, namespace)

View File

@ -52,7 +52,6 @@ def validate_manifest(ctx, locations, debug):
class ValidateManifest(CliAction): class ValidateManifest(CliAction):
def __init__(self, ctx, locations): def __init__(self, ctx, locations):
super(ValidateManifest, self).__init__() super(ValidateManifest, self).__init__()
self.ctx = ctx self.ctx = ctx
@ -71,20 +70,22 @@ class ValidateManifest(CliAction):
if not documents: if not documents:
self.logger.warn('No documents to validate.') self.logger.warn('No documents to validate.')
elif valid: elif valid:
self.logger.info('Successfully validated: %s', self.logger.info(
self.locations) 'Successfully validated: %s', self.locations)
else: else:
self.logger.info('Validation failed: %s', self.locations) self.logger.info('Validation failed: %s', self.locations)
for m in details: for m in details:
self.logger.info('Validation details: %s', str(m)) self.logger.info('Validation details: %s', str(m))
except Exception: except Exception:
raise Exception('Exception raised during ' raise Exception(
'validation: %s', self.locations) 'Exception raised during '
'validation: %s', self.locations)
else: else:
if len(self.locations) > 1: if len(self.locations) > 1:
self.logger.error("Cannot specify multiple locations " self.logger.error(
"when using validate API.") "Cannot specify multiple locations "
"when using validate API.")
return return
client = self.ctx.obj.get('CLIENT') client = self.ctx.obj.get('CLIENT')

View File

@ -12,10 +12,9 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import yaml
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
import yaml
from armada.exceptions import api_exceptions as err from armada.exceptions import api_exceptions as err
from armada.handlers.armada import Override from armada.handlers.armada import Override
@ -27,7 +26,6 @@ API_VERSION = 'v{}/{}'
class ArmadaClient(object): class ArmadaClient(object):
def __init__(self, session): def __init__(self, session):
self.session = session self.session = session
@ -69,13 +67,14 @@ class ArmadaClient(object):
return resp.json() return resp.json()
def post_apply(self, def post_apply(
manifest=None, self,
manifest_ref=None, manifest=None,
values=None, manifest_ref=None,
set=None, values=None,
query=None, set=None,
timeout=None): query=None,
timeout=None):
"""Call the Armada API to apply a Manifest. """Call the Armada API to apply a Manifest.
If ``manifest`` is not None, then the request body will be a fully If ``manifest`` is not None, then the request body will be a fully

View File

@ -18,5 +18,5 @@ from armada.common.policies import tiller
def list_rules(): def list_rules():
return itertools.chain(base.list_rules(), service.list_rules(), return itertools.chain(
tiller.list_rules()) base.list_rules(), service.list_rules(), tiller.list_rules())

View File

@ -51,10 +51,12 @@ armada_policies = [
name=base.ARMADA % 'rollback_release', name=base.ARMADA % 'rollback_release',
check_str=base.RULE_ADMIN_REQUIRED, check_str=base.RULE_ADMIN_REQUIRED,
description='Rollback release', description='Rollback release',
operations=[{ operations=[
'path': '/api/v1.0/rollback/{release}', {
'method': 'POST' 'path': '/api/v1.0/rollback/{release}',
}]), 'method': 'POST'
}
]),
] ]

View File

@ -50,11 +50,11 @@ def _enforce_policy(action, target, credentials, do_raise=True):
# to enforce anything not found in ``armada.common.policies`` will error # to enforce anything not found in ``armada.common.policies`` will error
# out with a 'Policy not registered' message and 403 status code. # out with a 'Policy not registered' message and 403 status code.
try: try:
_ENFORCER.authorize(action, target, credentials.to_policy_view(), _ENFORCER.authorize(
**extras) action, target, credentials.to_policy_view(), **extras)
except policy.PolicyNotRegistered: except policy.PolicyNotRegistered:
LOG.exception('Policy not registered for %(action)s', LOG.exception(
{'action': action}) 'Policy not registered for %(action)s', {'action': action})
raise exc.ActionForbidden() raise exc.ActionForbidden()
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
@ -69,9 +69,7 @@ def _enforce_policy(action, target, credentials, do_raise=True):
# NOTE(felipemonteiro): This naming is OK. It's just kept around for legacy # NOTE(felipemonteiro): This naming is OK. It's just kept around for legacy
# reasons. What's important is that authorize is used above. # reasons. What's important is that authorize is used above.
def enforce(rule): def enforce(rule):
def decorator(func): def decorator(func):
@functools.wraps(func) @functools.wraps(func)
def handler(*args, **kwargs): def handler(*args, **kwargs):
setup_policy() setup_policy()

View File

@ -12,10 +12,9 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import requests
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
import requests
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
CONF = cfg.CONF CONF = cfg.CONF
@ -35,33 +34,35 @@ class ArmadaSession(object):
read timeout to use read timeout to use
""" """
def __init__(self, def __init__(
host, self,
port=None, host,
scheme='http', port=None,
token=None, scheme='http',
marker=None, token=None,
end_user=None, marker=None,
timeout=None): end_user=None,
timeout=None):
self._session = requests.Session() self._session = requests.Session()
self._session.headers.update({ self._session.headers.update(
'X-Auth-Token': token, {
'X-Context-Marker': marker, 'X-Auth-Token': token,
'X-End-User': end_user, 'X-Context-Marker': marker,
}) 'X-End-User': end_user,
})
self.host = host self.host = host
self.scheme = scheme self.scheme = scheme
if port: if port:
self.port = port self.port = port
self.base_url = "{}://{}:{}/api/".format(self.scheme, self.host, self.base_url = "{}://{}:{}/api/".format(
self.port) self.scheme, self.host, self.port)
else: else:
self.base_url = "{}://{}/api/".format(self.scheme, self.host) self.base_url = "{}://{}/api/".format(self.scheme, self.host)
self.default_timeout = ArmadaSession._calc_timeout_tuple((20, 3600), self.default_timeout = ArmadaSession._calc_timeout_tuple(
timeout) (20, 3600), timeout)
self.token = token self.token = token
self.marker = marker self.marker = marker
self.end_user = end_user self.end_user = end_user
@ -91,13 +92,14 @@ class ArmadaSession(object):
return resp return resp
def post(self, def post(
endpoint, self,
query=None, endpoint,
body=None, query=None,
data=None, body=None,
headers=None, data=None,
timeout=None): headers=None,
timeout=None):
""" """
Send a POST request to armada. If both body and data are specified, Send a POST request to armada. If both body and data are specified,
body will will be used. body will will be used.

View File

@ -76,8 +76,8 @@ def set_default_for_default_log_levels():
extra_log_level_defaults = ['kubernetes.client.rest=INFO'] extra_log_level_defaults = ['kubernetes.client.rest=INFO']
log.set_defaults( log.set_defaults(
default_log_levels=log.get_default_log_levels() + default_log_levels=log.get_default_log_levels()
extra_log_level_defaults, ) + extra_log_level_defaults, )
class ChartDeployAwareLogger(logging.Logger): class ChartDeployAwareLogger(logging.Logger):
@ -93,5 +93,5 @@ class ChartDeployAwareLogger(logging.Logger):
else: else:
prefix = '' prefix = ''
prefixed = '{}{}'.format(prefix, msg) prefixed = '{}{}'.format(prefix, msg)
return super(ChartDeployAwareLogger, self)._log( return super(ChartDeployAwareLogger,
level, prefixed, *args, **kwargs) self)._log(level, prefixed, *args, **kwargs)

View File

@ -29,7 +29,8 @@ default_options = [
cfg.StrOpt( cfg.StrOpt(
'certs', 'certs',
default=None, default=None,
help=utils.fmt(""" help=utils.fmt(
"""
Absolute path to the certificate file to use for chart registries Absolute path to the certificate file to use for chart registries
""")), """)),
cfg.StrOpt( cfg.StrOpt(
@ -39,13 +40,15 @@ Absolute path to the certificate file to use for chart registries
cfg.BoolOpt( cfg.BoolOpt(
'middleware', 'middleware',
default=True, default=True,
help=utils.fmt(""" help=utils.fmt(
"""
Enables or disables Keystone authentication middleware. Enables or disables Keystone authentication middleware.
""")), """)),
cfg.StrOpt( cfg.StrOpt(
'project_domain_name', 'project_domain_name',
default='default', default='default',
help=utils.fmt(""" help=utils.fmt(
"""
The Keystone project domain name used for authentication. The Keystone project domain name used for authentication.
""")), """)),
cfg.StrOpt( cfg.StrOpt(
@ -58,7 +61,8 @@ The Keystone project domain name used for authentication.
cfg.StrOpt( cfg.StrOpt(
'ssh_key_path', 'ssh_key_path',
default='/home/user/.ssh/', default='/home/user/.ssh/',
help=utils.fmt("""Optional path to an SSH private key used for help=utils.fmt(
"""Optional path to an SSH private key used for
authenticating against a Git source repository. The path must be an absolute authenticating against a Git source repository. The path must be an absolute
path to the private key that includes the name of the key itself.""")), path to the private key that includes the name of the key itself.""")),
cfg.StrOpt( cfg.StrOpt(
@ -85,25 +89,29 @@ path to the private key that includes the name of the key itself.""")),
'lock_acquire_timeout', 'lock_acquire_timeout',
default=60, default=60,
min=0, min=0,
help=utils.fmt("""Time in seconds of how long armada will attempt to help=utils.fmt(
"""Time in seconds of how long armada will attempt to
acquire a lock before an exception is raised""")), acquire a lock before an exception is raised""")),
cfg.IntOpt( cfg.IntOpt(
'lock_acquire_delay', 'lock_acquire_delay',
default=5, default=5,
min=0, min=0,
help=utils.fmt("""Time in seconds of how long to wait between attempts help=utils.fmt(
"""Time in seconds of how long to wait between attempts
to acquire a lock""")), to acquire a lock""")),
cfg.IntOpt( cfg.IntOpt(
'lock_update_interval', 'lock_update_interval',
default=60, default=60,
min=0, min=0,
help=utils.fmt("""Time in seconds of how often armada will update the help=utils.fmt(
"""Time in seconds of how often armada will update the
lock while it is continuing to do work""")), lock while it is continuing to do work""")),
cfg.IntOpt( cfg.IntOpt(
'lock_expiration', 'lock_expiration',
default=600, default=600,
min=0, min=0,
help=utils.fmt("""Time in seconds of how much time needs to pass since help=utils.fmt(
"""Time in seconds of how much time needs to pass since
the last update of an existing lock before armada forcibly removes it the last update of an existing lock before armada forcibly removes it
and tries to acquire its own lock""")), and tries to acquire its own lock""")),
] ]
@ -116,11 +124,10 @@ def register_opts(conf):
def list_opts(): def list_opts():
return { return {
'DEFAULT': 'DEFAULT': default_options,
default_options, 'keystone_authtoken': (
'keystone_authtoken': ks_loading.get_session_conf_options()
(ks_loading.get_session_conf_options() + + ks_loading.get_auth_common_conf_options()
ks_loading.get_auth_common_conf_options() + + ks_loading.get_auth_plugin_conf_options('password')
ks_loading.get_auth_plugin_conf_options('password') + + ks_loading.get_auth_plugin_conf_options('v3password'))
ks_loading.get_auth_plugin_conf_options('v3password'))
} }

View File

@ -70,8 +70,8 @@ def _import_modules(module_names):
if not hasattr(module, LIST_OPTS_FUNC_NAME): if not hasattr(module, LIST_OPTS_FUNC_NAME):
raise Exception( raise Exception(
"The module '%s' should have a '%s' function which " "The module '%s' should have a '%s' function which "
"returns the config options." % (full_module_path, "returns the config options." %
LIST_OPTS_FUNC_NAME)) (full_module_path, LIST_OPTS_FUNC_NAME))
else: else:
imported_modules.append(module) imported_modules.append(module)
return imported_modules return imported_modules

View File

@ -51,8 +51,8 @@ class InvalidValuesYamlException(ArmadaException):
def __init__(self, chart_description): def __init__(self, chart_description):
self._message = ( self._message = (
'Armada encountered invalid values.yaml in helm chart: %s' % 'Armada encountered invalid values.yaml in helm chart: %s'
chart_description) % chart_description)
super(InvalidValuesYamlException, self).__init__(self._message) super(InvalidValuesYamlException, self).__init__(self._message)
@ -64,8 +64,8 @@ class InvalidOverrideValuesYamlException(ArmadaException):
def __init__(self, chart_description): def __init__(self, chart_description):
self._message = ( self._message = (
'Armada encountered invalid values.yaml in helm chart: %s' % 'Armada encountered invalid values.yaml in helm chart: %s'
chart_description) % chart_description)
super(InvalidValuesYamlException, self).__init__(self._message) super(InvalidValuesYamlException, self).__init__(self._message)

View File

@ -54,15 +54,16 @@ def get_version_from_request(req):
# Standard error handler # Standard error handler
def format_error_resp(req, def format_error_resp(
resp, req,
status_code, resp,
message="", status_code,
reason="", message="",
error_type=None, reason="",
retry=False, error_type=None,
error_list=None, retry=False,
info_list=None): error_list=None,
info_list=None):
""" """
Write a error message body and throw a Falcon exception to trigger Write a error message body and throw a Falcon exception to trigger
an HTTP status an HTTP status
@ -97,10 +98,12 @@ def format_error_resp(req,
# message list as well. In both cases, if the error flag is not # message list as well. In both cases, if the error flag is not
# set, set it appropriately. # set, set it appropriately.
if error_list is None: if error_list is None:
error_list = [{ error_list = [
'message': 'An error occurred, but was not specified', {
'error': True 'message': 'An error occurred, but was not specified',
}] 'error': True
}
]
else: else:
for error_item in error_list: for error_item in error_list:
if 'error' not in error_item: if 'error' not in error_item:
@ -216,8 +219,9 @@ class ArmadaAPIException(falcon.HTTPError):
self.error_list = massage_error_list(error_list, description) self.error_list = massage_error_list(error_list, description)
self.info_list = info_list self.info_list = info_list
self.retry = retry self.retry = retry
super().__init__(self.status, self.title, super().__init__(
self._gen_ex_message(self.title, self.description)) self.status, self.title,
self._gen_ex_message(self.title, self.description))
@staticmethod @staticmethod
def _gen_ex_message(title, description): def _gen_ex_message(title, description):

View File

@ -47,11 +47,13 @@ class HelmChartBuildException(ChartBuilderException):
def __init__(self, chart_name, details): def __init__(self, chart_name, details):
self._chart_name = chart_name self._chart_name = chart_name
self._message = ('Failed to build Helm chart for {chart_name}. ' self._message = (
'Details: {details}'.format(**{ 'Failed to build Helm chart for {chart_name}. '
'chart_name': chart_name, 'Details: {details}'.format(
'details': details **{
})) 'chart_name': chart_name,
'details': details
}))
super(HelmChartBuildException, self).__init__(self._message) super(HelmChartBuildException, self).__init__(self._message)
@ -65,8 +67,9 @@ class FilesLoadException(ChartBuilderException):
* Ensure that the file can be encoded to utf-8 or else it cannot be parsed. * Ensure that the file can be encoded to utf-8 or else it cannot be parsed.
''' '''
message = ('A %(clazz)s exception occurred while trying to read ' message = (
'file: %(file)s. Details:\n%(details)s') 'A %(clazz)s exception occurred while trying to read '
'file: %(file)s. Details:\n%(details)s')
class IgnoredFilesLoadException(ChartBuilderException): class IgnoredFilesLoadException(ChartBuilderException):

View File

@ -31,8 +31,9 @@ class GitException(SourceException):
def __init__(self, location): def __init__(self, location):
self._location = location self._location = location
self._message = ('Git exception occurred, [', self._location, self._message = (
'] may not be a valid git repository.') 'Git exception occurred, [', self._location,
'] may not be a valid git repository.')
super(GitException, self).__init__(self._message) super(GitException, self).__init__(self._message)
@ -44,10 +45,11 @@ class GitAuthException(SourceException):
self._repo_url = repo_url self._repo_url = repo_url
self._ssh_key_path = ssh_key_path self._ssh_key_path = ssh_key_path
self._message = ('Failed to authenticate for repo {} with ssh-key at ' self._message = (
'path {}. Verify the repo exists and the correct ssh ' 'Failed to authenticate for repo {} with ssh-key at '
'key path was supplied in the Armada config ' 'path {}. Verify the repo exists and the correct ssh '
'file.').format(self._repo_url, self._ssh_key_path) 'key path was supplied in the Armada config '
'file.').format(self._repo_url, self._ssh_key_path)
super(GitAuthException, self).__init__(self._message) super(GitAuthException, self).__init__(self._message)
@ -69,8 +71,8 @@ class GitSSHException(SourceException):
def __init__(self, ssh_key_path): def __init__(self, ssh_key_path):
self._ssh_key_path = ssh_key_path self._ssh_key_path = ssh_key_path
self._message = ('Failed to find specified SSH key: {}.'.format( self._message = (
self._ssh_key_path)) 'Failed to find specified SSH key: {}.'.format(self._ssh_key_path))
super(GitSSHException, self).__init__(self._message) super(GitSSHException, self).__init__(self._message)

View File

@ -212,5 +212,6 @@ class TillerListReleasesPagingException(TillerException):
*Coming Soon* *Coming Soon*
''' '''
message = ('Failed to page through tiller releases, possibly due to ' message = (
'releases being added between pages') 'Failed to page through tiller releases, possibly due to '
'releases being added between pages')

View File

@ -29,8 +29,9 @@ class InvalidManifestException(ValidateException):
*Coming Soon* *Coming Soon*
''' '''
message = ('Armada manifest(s) failed validation. Details: ' message = (
'%(error_messages)s.') 'Armada manifest(s) failed validation. Details: '
'%(error_messages)s.')
class InvalidChartNameException(ValidateException): class InvalidChartNameException(ValidateException):
@ -59,5 +60,6 @@ class InvalidArmadaObjectException(ValidateException):
*Coming Soon* *Coming Soon*
''' '''
message = ('An Armada object failed internal validation. Details: ' message = (
'%(details)s.') 'An Armada object failed internal validation. Details: '
'%(details)s.')

View File

@ -13,6 +13,7 @@
# limitations under the License. # limitations under the License.
from concurrent.futures import ThreadPoolExecutor, as_completed from concurrent.futures import ThreadPoolExecutor, as_completed
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
@ -39,20 +40,21 @@ class Armada(object):
workflows workflows
''' '''
def __init__(self, def __init__(
documents, self,
tiller, documents,
disable_update_pre=False, tiller,
disable_update_post=False, disable_update_pre=False,
enable_chart_cleanup=False, disable_update_post=False,
dry_run=False, enable_chart_cleanup=False,
set_ovr=None, dry_run=False,
force_wait=False, set_ovr=None,
timeout=None, force_wait=False,
values=None, timeout=None,
target_manifest=None, values=None,
k8s_wait_attempts=1, target_manifest=None,
k8s_wait_attempt_sleep=1): k8s_wait_attempts=1,
k8s_wait_attempt_sleep=1):
''' '''
Initialize the Armada engine and establish a connection to Tiller. Initialize the Armada engine and establish a connection to Tiller.
@ -106,8 +108,8 @@ class Armada(object):
# Clone the chart sources # Clone the chart sources
manifest_data = self.manifest.get(const.KEYWORD_DATA, {}) manifest_data = self.manifest.get(const.KEYWORD_DATA, {})
for group in manifest_data.get(const.KEYWORD_GROUPS, []): for group in manifest_data.get(const.KEYWORD_GROUPS, []):
for ch in group.get(const.KEYWORD_DATA).get( for ch in group.get(const.KEYWORD_DATA).get(const.KEYWORD_CHARTS,
const.KEYWORD_CHARTS, []): []):
self.get_chart(ch) self.get_chart(ch)
def get_chart(self, ch): def get_chart(self, ch):
@ -193,12 +195,12 @@ class Armada(object):
chartgroup = cg.get(const.KEYWORD_DATA) chartgroup = cg.get(const.KEYWORD_DATA)
cg_name = cg.get('metadata').get('name') cg_name = cg.get('metadata').get('name')
cg_desc = chartgroup.get('description', '<missing description>') cg_desc = chartgroup.get('description', '<missing description>')
cg_sequenced = chartgroup.get('sequenced', cg_sequenced = chartgroup.get(
False) or self.force_wait 'sequenced', False) or self.force_wait
LOG.info('Processing ChartGroup: %s (%s), sequenced=%s%s', cg_name, LOG.info(
cg_desc, cg_sequenced, 'Processing ChartGroup: %s (%s), sequenced=%s%s', cg_name,
' (forced)' if self.force_wait else '') cg_desc, cg_sequenced, ' (forced)' if self.force_wait else '')
# TODO: Remove when v1 doc support is removed. # TODO: Remove when v1 doc support is removed.
cg_test_all_charts = chartgroup.get('test_charts') cg_test_all_charts = chartgroup.get('test_charts')
@ -208,8 +210,8 @@ class Armada(object):
def deploy_chart(chart): def deploy_chart(chart):
set_current_chart(chart) set_current_chart(chart)
try: try:
return self.chart_deploy.execute(chart, cg_test_all_charts, return self.chart_deploy.execute(
prefix, known_releases) chart, cg_test_all_charts, prefix, known_releases)
finally: finally:
set_current_chart(None) set_current_chart(None)
@ -284,15 +286,16 @@ class Armada(object):
for gchart in charts: for gchart in charts:
for chart in gchart.get(const.KEYWORD_CHARTS, []): for chart in gchart.get(const.KEYWORD_CHARTS, []):
valid_releases.append( valid_releases.append(
release_prefixer(prefix, release_prefixer(
chart.get('chart', {}).get('release'))) prefix,
chart.get('chart', {}).get('release')))
actual_releases = [x.name for x in self.tiller.list_releases()] actual_releases = [x.name for x in self.tiller.list_releases()]
release_diff = list(set(actual_releases) - set(valid_releases)) release_diff = list(set(actual_releases) - set(valid_releases))
for release in release_diff: for release in release_diff:
if release.startswith(prefix): if release.startswith(prefix):
LOG.info('Purging release %s as part of chart cleanup.', LOG.info(
release) 'Purging release %s as part of chart cleanup.', release)
self.tiller.uninstall_release(release) self.tiller.uninstall_release(release)
msg['purge'].append(release) msg['purge'].append(release)

View File

@ -16,7 +16,6 @@ from armada import const
class ChartDelete(object): class ChartDelete(object):
def __init__(self, chart, release_name, tiller, purge=True): def __init__(self, chart, release_name, tiller, purge=True):
"""Initialize a chart delete handler. """Initialize a chart delete handler.
@ -39,8 +38,8 @@ class ChartDelete(object):
# TODO(seaneagan): Consider allowing this to be a percentage of the # TODO(seaneagan): Consider allowing this to be a percentage of the
# chart's `wait.timeout` so that the timeouts can scale together, and # chart's `wait.timeout` so that the timeouts can scale together, and
# likely default to some reasonable value, e.g. "50%". # likely default to some reasonable value, e.g. "50%".
self.timeout = self.delete_config.get('timeout', self.timeout = self.delete_config.get(
const.DEFAULT_DELETE_TIMEOUT) 'timeout', const.DEFAULT_DELETE_TIMEOUT)
def get_timeout(self): def get_timeout(self):
return self.timeout return self.timeout

View File

@ -12,8 +12,9 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from oslo_log import log as logging
import time import time
from oslo_log import log as logging
import yaml import yaml
from armada import const from armada import const
@ -31,9 +32,9 @@ LOG = logging.getLogger(__name__)
class ChartDeploy(object): class ChartDeploy(object):
def __init__(
def __init__(self, disable_update_pre, disable_update_post, dry_run, self, disable_update_pre, disable_update_post, dry_run,
k8s_wait_attempts, k8s_wait_attempt_sleep, timeout, tiller): k8s_wait_attempts, k8s_wait_attempt_sleep, timeout, tiller):
self.disable_update_pre = disable_update_pre self.disable_update_pre = disable_update_pre
self.disable_update_post = disable_update_post self.disable_update_post = disable_update_post
self.dry_run = dry_run self.dry_run = dry_run
@ -84,8 +85,9 @@ class ChartDeploy(object):
if status == const.STATUS_DEPLOYED: if status == const.STATUS_DEPLOYED:
# indicate to the end user what path we are taking # indicate to the end user what path we are taking
LOG.info("Existing release %s found in namespace %s", release_name, LOG.info(
namespace) "Existing release %s found in namespace %s", release_name,
namespace)
# extract the installed chart and installed values from the # extract the installed chart and installed values from the
# latest release so we can compare to the intended state # latest release so we can compare to the intended state
@ -114,8 +116,9 @@ class ChartDeploy(object):
pre_actions = upgrade_pre pre_actions = upgrade_pre
if not self.disable_update_post and upgrade_post: if not self.disable_update_post and upgrade_post:
LOG.warning('Post upgrade actions are ignored by Armada' LOG.warning(
'and will not affect deployment.') 'Post upgrade actions are ignored by Armada'
'and will not affect deployment.')
post_actions = upgrade_post post_actions = upgrade_post
try: try:
@ -158,8 +161,9 @@ class ChartDeploy(object):
force=force, force=force,
recreate_pods=recreate_pods) recreate_pods=recreate_pods)
LOG.info('Upgrade completed with results from Tiller: %s', LOG.info(
tiller_result.__dict__) 'Upgrade completed with results from Tiller: %s',
tiller_result.__dict__)
result['upgrade'] = release_name result['upgrade'] = release_name
else: else:
# Check for release with status other than DEPLOYED # Check for release with status other than DEPLOYED
@ -213,10 +217,11 @@ class ChartDeploy(object):
release_name, status) release_name, status)
else: else:
# Purge the release # Purge the release
LOG.info('Purging release %s with status %s', release_name, LOG.info(
status) 'Purging release %s with status %s', release_name,
chart_delete = ChartDelete(chart, release_name, status)
self.tiller) chart_delete = ChartDelete(
chart, release_name, self.tiller)
chart_delete.delete() chart_delete.delete()
result['purge'] = release_name result['purge'] = release_name
@ -233,8 +238,9 @@ class ChartDeploy(object):
wait=native_wait_enabled, wait=native_wait_enabled,
timeout=timer) timeout=timer)
LOG.info('Install completed with results from Tiller: %s', LOG.info(
tiller_result.__dict__) 'Install completed with results from Tiller: %s',
tiller_result.__dict__)
result['install'] = release_name result['install'] = release_name
# Wait # Wait
@ -251,8 +257,8 @@ class ChartDeploy(object):
self.tiller, self.tiller,
cg_test_charts=cg_test_all_charts) cg_test_charts=cg_test_all_charts)
run_test = test_handler.test_enabled and (just_deployed or run_test = test_handler.test_enabled and (
not last_test_passed) just_deployed or not last_test_passed)
if run_test: if run_test:
self._test_chart(release_name, test_handler) self._test_chart(release_name, test_handler)
@ -279,6 +285,7 @@ class ChartDeploy(object):
for release in known_releases: for release in known_releases:
if release.name == release_name: if release.name == release_name:
return release return release
LOG.info("known: %s, release_name: %s", LOG.info(
list(map(lambda r: r.name, known_releases)), release_name) "known: %s, release_name: %s",
list(map(lambda r: r.name, known_releases)), release_name)
return None return None

View File

@ -13,16 +13,15 @@
# limitations under the License. # limitations under the License.
import os import os
import yaml
from google.protobuf.any_pb2 import Any from google.protobuf.any_pb2 import Any
from hapi.chart.chart_pb2 import Chart from hapi.chart.chart_pb2 import Chart
from hapi.chart.config_pb2 import Config from hapi.chart.config_pb2 import Config
from hapi.chart.metadata_pb2 import Metadata from hapi.chart.metadata_pb2 import Metadata
from hapi.chart.template_pb2 import Template from hapi.chart.template_pb2 import Template
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
import yaml
from armada.exceptions import chartbuilder_exceptions from armada.exceptions import chartbuilder_exceptions
from armada import const from armada import const
@ -65,16 +64,17 @@ class ChartBuilder(object):
property from the chart, or else "" if the property isn't a 2-tuple. property from the chart, or else "" if the property isn't a 2-tuple.
''' '''
source_dir = self.chart_data.get('source_dir') source_dir = self.chart_data.get('source_dir')
return (os.path.join(*source_dir) if return (
(source_dir and isinstance(source_dir, (list, tuple)) and os.path.join(*source_dir) if (
len(source_dir) == 2) else "") source_dir and isinstance(source_dir, (list, tuple))
and len(source_dir) == 2) else "")
def get_ignored_files(self): def get_ignored_files(self):
'''Load files to ignore from .helmignore if present.''' '''Load files to ignore from .helmignore if present.'''
try: try:
ignored_files = [] ignored_files = []
if os.path.exists( if os.path.exists(os.path.join(self.source_directory,
os.path.join(self.source_directory, '.helmignore')): '.helmignore')):
with open(os.path.join(self.source_directory, with open(os.path.join(self.source_directory,
'.helmignore')) as f: '.helmignore')) as f:
ignored_files = f.readlines() ignored_files = f.readlines()
@ -90,8 +90,8 @@ class ChartBuilder(object):
False otherwise. False otherwise.
''' '''
for ignored_file in self.ignored_files: for ignored_file in self.ignored_files:
if (ignored_file.startswith('*') and if (ignored_file.startswith('*')
filename.endswith(ignored_file.strip('*'))): and filename.endswith(ignored_file.strip('*'))):
return True return True
elif ignored_file == filename: elif ignored_file == filename:
return True return True
@ -153,8 +153,9 @@ class ChartBuilder(object):
raise chartbuilder_exceptions.FilesLoadException( raise chartbuilder_exceptions.FilesLoadException(
file=abspath, details=e) file=abspath, details=e)
except UnicodeError as e: except UnicodeError as e:
LOG.debug('Attempting to read %s using encoding %s.', LOG.debug(
abspath, encoding) 'Attempting to read %s using encoding %s.', abspath,
encoding)
msg = "(encoding=%s) %s" % (encoding, str(e)) msg = "(encoding=%s) %s" % (encoding, str(e))
unicode_errors.append(msg) unicode_errors.append(msg)
else: else:
@ -176,12 +177,11 @@ class ChartBuilder(object):
relfolder = os.path.split(root)[-1] relfolder = os.path.split(root)[-1]
rel_folder_path = os.path.relpath(root, self.source_directory) rel_folder_path = os.path.relpath(root, self.source_directory)
if not any( if not any(root.startswith(os.path.join(self.source_directory, x))
root.startswith(os.path.join(self.source_directory, x)) for x in ['templates', 'charts']):
for x in ['templates', 'charts']):
for file in files: for file in files:
if (file not in files_to_ignore and if (file not in files_to_ignore
file not in non_template_files): and file not in non_template_files):
_append_file_to_result(root, rel_folder_path, file) _append_file_to_result(root, rel_folder_path, file)
elif relfolder == 'charts' and '.prov' in files: elif relfolder == 'charts' and '.prov' in files:
_append_file_to_result(root, rel_folder_path, '.prov') _append_file_to_result(root, rel_folder_path, '.prov')
@ -196,8 +196,9 @@ class ChartBuilder(object):
with open(os.path.join(self.source_directory, 'values.yaml')) as f: with open(os.path.join(self.source_directory, 'values.yaml')) as f:
raw_values = f.read() raw_values = f.read()
else: else:
LOG.warn("No values.yaml in %s, using empty values", LOG.warn(
self.source_directory) "No values.yaml in %s, using empty values",
self.source_directory)
raw_values = '' raw_values = ''
return Config(raw=raw_values) return Config(raw=raw_values)
@ -210,14 +211,13 @@ class ChartBuilder(object):
''' '''
chart_name = self.chart['metadata']['name'] chart_name = self.chart['metadata']['name']
templates = [] templates = []
if not os.path.exists( if not os.path.exists(os.path.join(self.source_directory,
os.path.join(self.source_directory, 'templates')): 'templates')):
LOG.warn( LOG.warn(
"Chart %s has no templates directory. " "Chart %s has no templates directory. "
"No templates will be deployed", chart_name) "No templates will be deployed", chart_name)
for root, _, files in os.walk( for root, _, files in os.walk(os.path.join(self.source_directory,
os.path.join(self.source_directory, 'templates'), 'templates'), topdown=True):
topdown=True):
for tpl_file in files: for tpl_file in files:
tname = os.path.relpath( tname = os.path.relpath(
os.path.join(root, tpl_file), os.path.join(root, tpl_file),
@ -247,8 +247,9 @@ class ChartBuilder(object):
chart_release = self.chart_data.get('release', None) chart_release = self.chart_data.get('release', None)
for dep_chart in chart_dependencies: for dep_chart in chart_dependencies:
dep_chart_name = dep_chart['metadata']['name'] dep_chart_name = dep_chart['metadata']['name']
LOG.info("Building dependency chart %s for release %s.", LOG.info(
dep_chart_name, chart_release) "Building dependency chart %s for release %s.", dep_chart_name,
chart_release)
try: try:
dependencies.append(ChartBuilder(dep_chart).get_helm_chart()) dependencies.append(ChartBuilder(dep_chart).get_helm_chart())
except Exception: except Exception:

View File

@ -16,8 +16,8 @@
import urllib.parse import urllib.parse
import re import re
import requests
import requests
from oslo_log import log as logging from oslo_log import log as logging
from armada.exceptions.source_exceptions import InvalidPathException from armada.exceptions.source_exceptions import InvalidPathException
@ -58,8 +58,8 @@ class ReferenceResolver(object):
if handler is None: if handler is None:
raise InvalidPathException( raise InvalidPathException(
"Invalid reference scheme %s: no handler." % "Invalid reference scheme %s: no handler."
design_uri.scheme) % design_uri.scheme)
else: else:
# Have to do a little magic to call the classmethod # Have to do a little magic to call the classmethod
# as a pointer # as a pointer
@ -90,8 +90,8 @@ class ReferenceResolver(object):
response = requests.get(design_uri.geturl(), timeout=30) response = requests.get(design_uri.geturl(), timeout=30)
if response.status_code >= 400: if response.status_code >= 400:
raise InvalidPathException( raise InvalidPathException(
"Error received for HTTP reference: %d" % "Error received for HTTP reference: %d"
response.status_code) % response.status_code)
return response.content return response.content
@ -122,8 +122,9 @@ class ReferenceResolver(object):
ks_sess = ks_utils.get_keystone_session() ks_sess = ks_utils.get_keystone_session()
(new_scheme, foo) = re.subn(r'^[^+]+\+', '', design_uri.scheme) (new_scheme, foo) = re.subn(r'^[^+]+\+', '', design_uri.scheme)
url = urllib.parse.urlunparse( url = urllib.parse.urlunparse(
(new_scheme, design_uri.netloc, design_uri.path, design_uri.params, (
design_uri.query, design_uri.fragment)) new_scheme, design_uri.netloc, design_uri.path,
design_uri.params, design_uri.query, design_uri.fragment))
LOG.debug("Calling Keystone session for url %s" % str(url)) LOG.debug("Calling Keystone session for url %s" % str(url))
resp = ks_sess.get(url) resp = ks_sess.get(url)
if resp.status_code >= 400: if resp.status_code >= 400:

View File

@ -60,11 +60,12 @@ class K8s(object):
self.extension_api = client.ExtensionsV1beta1Api(api_client) self.extension_api = client.ExtensionsV1beta1Api(api_client)
self.apps_v1_api = client.AppsV1Api(api_client) self.apps_v1_api = client.AppsV1Api(api_client)
def delete_job_action(self, def delete_job_action(
name, self,
namespace="default", name,
propagation_policy='Foreground', namespace="default",
timeout=DEFAULT_K8S_TIMEOUT): propagation_policy='Foreground',
timeout=DEFAULT_K8S_TIMEOUT):
''' '''
Delete a job from a namespace (see _delete_item_action). Delete a job from a namespace (see _delete_item_action).
@ -74,15 +75,17 @@ class K8s(object):
to the delete. to the delete.
:param timeout: The timeout to wait for the delete to complete :param timeout: The timeout to wait for the delete to complete
''' '''
self._delete_item_action(self.batch_api.list_namespaced_job, self._delete_item_action(
self.batch_api.delete_namespaced_job, "job", self.batch_api.list_namespaced_job,
name, namespace, propagation_policy, timeout) self.batch_api.delete_namespaced_job, "job", name, namespace,
propagation_policy, timeout)
def delete_cron_job_action(self, def delete_cron_job_action(
name, self,
namespace="default", name,
propagation_policy='Foreground', namespace="default",
timeout=DEFAULT_K8S_TIMEOUT): propagation_policy='Foreground',
timeout=DEFAULT_K8S_TIMEOUT):
''' '''
Delete a cron job from a namespace (see _delete_item_action). Delete a cron job from a namespace (see _delete_item_action).
@ -97,11 +100,12 @@ class K8s(object):
self.batch_v1beta1_api.delete_namespaced_cron_job, "cron job", self.batch_v1beta1_api.delete_namespaced_cron_job, "cron job",
name, namespace, propagation_policy, timeout) name, namespace, propagation_policy, timeout)
def delete_pod_action(self, def delete_pod_action(
name, self,
namespace="default", name,
propagation_policy='Foreground', namespace="default",
timeout=DEFAULT_K8S_TIMEOUT): propagation_policy='Foreground',
timeout=DEFAULT_K8S_TIMEOUT):
''' '''
Delete a pod from a namespace (see _delete_item_action). Delete a pod from a namespace (see _delete_item_action).
@ -111,18 +115,19 @@ class K8s(object):
to the delete. to the delete.
:param timeout: The timeout to wait for the delete to complete :param timeout: The timeout to wait for the delete to complete
''' '''
self._delete_item_action(self.client.list_namespaced_pod, self._delete_item_action(
self.client.delete_namespaced_pod, "pod", self.client.list_namespaced_pod, self.client.delete_namespaced_pod,
name, namespace, propagation_policy, timeout) "pod", name, namespace, propagation_policy, timeout)
def _delete_item_action(self, def _delete_item_action(
list_func, self,
delete_func, list_func,
object_type_description, delete_func,
name, object_type_description,
namespace="default", name,
propagation_policy='Foreground', namespace="default",
timeout=DEFAULT_K8S_TIMEOUT): propagation_policy='Foreground',
timeout=DEFAULT_K8S_TIMEOUT):
''' '''
This function takes the action to delete an object (job, cronjob, pod) This function takes the action to delete an object (job, cronjob, pod)
from kubernetes. It will wait for the object to be fully deleted before from kubernetes. It will wait for the object to be fully deleted before
@ -145,14 +150,15 @@ class K8s(object):
try: try:
timeout = self._check_timeout(timeout) timeout = self._check_timeout(timeout)
LOG.debug('Watching to delete %s %s, Wait timeout=%s', LOG.debug(
object_type_description, name, timeout) 'Watching to delete %s %s, Wait timeout=%s',
object_type_description, name, timeout)
body = client.V1DeleteOptions() body = client.V1DeleteOptions()
w = watch.Watch() w = watch.Watch()
issue_delete = True issue_delete = True
found_events = False found_events = False
for event in w.stream( for event in w.stream(list_func, namespace=namespace,
list_func, namespace=namespace, timeout_seconds=timeout): timeout_seconds=timeout):
if issue_delete: if issue_delete:
delete_func( delete_func(
name=name, name=name,
@ -168,23 +174,27 @@ class K8s(object):
if item_name == name: if item_name == name:
found_events = True found_events = True
if event_type == 'DELETED': if event_type == 'DELETED':
LOG.info('Successfully deleted %s %s', LOG.info(
object_type_description, item_name) 'Successfully deleted %s %s',
object_type_description, item_name)
return return
if not found_events: if not found_events:
LOG.warn('Saw no delete events for %s %s in namespace=%s', LOG.warn(
object_type_description, name, namespace) 'Saw no delete events for %s %s in namespace=%s',
object_type_description, name, namespace)
err_msg = ('Reached timeout while waiting to delete %s: ' err_msg = (
'name=%s, namespace=%s' % (object_type_description, 'Reached timeout while waiting to delete %s: '
name, namespace)) 'name=%s, namespace=%s' %
(object_type_description, name, namespace))
LOG.error(err_msg) LOG.error(err_msg)
raise exceptions.KubernetesWatchTimeoutException(err_msg) raise exceptions.KubernetesWatchTimeoutException(err_msg)
except ApiException as e: except ApiException as e:
LOG.exception("Exception when deleting %s: name=%s, namespace=%s", LOG.exception(
object_type_description, name, namespace) "Exception when deleting %s: name=%s, namespace=%s",
object_type_description, name, namespace)
raise e raise e
def get_namespace_job(self, namespace="default", **kwargs): def get_namespace_job(self, namespace="default", **kwargs):
@ -196,8 +206,9 @@ class K8s(object):
try: try:
return self.batch_api.list_namespaced_job(namespace, **kwargs) return self.batch_api.list_namespaced_job(namespace, **kwargs)
except ApiException as e: except ApiException as e:
LOG.error("Exception getting jobs: namespace=%s, label=%s: %s", LOG.error(
namespace, kwargs.get('label_selector', ''), e) "Exception getting jobs: namespace=%s, label=%s: %s",
namespace, kwargs.get('label_selector', ''), e)
def get_namespace_cron_job(self, namespace="default", **kwargs): def get_namespace_cron_job(self, namespace="default", **kwargs):
''' '''
@ -276,8 +287,8 @@ class K8s(object):
base_pod_pattern = re.compile('^(.+)-[a-zA-Z0-9]+$') base_pod_pattern = re.compile('^(.+)-[a-zA-Z0-9]+$')
if not base_pod_pattern.match(old_pod_name): if not base_pod_pattern.match(old_pod_name):
LOG.error('Could not identify new pod after purging %s', LOG.error(
old_pod_name) 'Could not identify new pod after purging %s', old_pod_name)
return return
pod_base_name = base_pod_pattern.match(old_pod_name).group(1) pod_base_name = base_pod_pattern.match(old_pod_name).group(1)
@ -297,13 +308,13 @@ class K8s(object):
new_pod_name = event_name new_pod_name = event_name
elif new_pod_name: elif new_pod_name:
for condition in pod_conditions: for condition in pod_conditions:
if (condition.type == 'Ready' and if (condition.type == 'Ready'
condition.status == 'True'): and condition.status == 'True'):
LOG.info('New pod %s deployed', new_pod_name) LOG.info('New pod %s deployed', new_pod_name)
w.stop() w.stop()
def wait_get_completed_podphase(self, release, def wait_get_completed_podphase(
timeout=DEFAULT_K8S_TIMEOUT): self, release, timeout=DEFAULT_K8S_TIMEOUT):
''' '''
:param release: part of namespace :param release: part of namespace
:param timeout: time before disconnecting stream :param timeout: time before disconnecting stream
@ -312,9 +323,8 @@ class K8s(object):
w = watch.Watch() w = watch.Watch()
found_events = False found_events = False
for event in w.stream( for event in w.stream(self.client.list_pod_for_all_namespaces,
self.client.list_pod_for_all_namespaces, timeout_seconds=timeout):
timeout_seconds=timeout):
resource_name = event['object'].metadata.name resource_name = event['object'].metadata.name
if release in resource_name: if release in resource_name:
@ -362,8 +372,8 @@ class K8s(object):
return self.custom_objects.create_namespaced_custom_object( return self.custom_objects.create_namespaced_custom_object(
group, version, namespace, plural, body) group, version, namespace, plural, body)
def delete_custom_resource(self, group, version, namespace, plural, name, def delete_custom_resource(
body): self, group, version, namespace, plural, name, body):
"""Deletes a custom resource """Deletes a custom resource
:param group: the custom resource's group :param group: the custom resource's group
@ -394,8 +404,8 @@ class K8s(object):
return self.custom_objects.get_namespaced_custom_object( return self.custom_objects.get_namespaced_custom_object(
group, version, namespace, plural, name) group, version, namespace, plural, name)
def replace_custom_resource(self, group, version, namespace, plural, name, def replace_custom_resource(
body): self, group, version, namespace, plural, name, body):
"""Replaces a custom resource """Replaces a custom resource
:param group: the custom resource's group :param group: the custom resource's group

View File

@ -11,10 +11,12 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import functools import functools
import time import time
from datetime import datetime, timedelta from datetime import datetime, timedelta
from concurrent.futures import ThreadPoolExecutor from concurrent.futures import ThreadPoolExecutor
from kubernetes import client from kubernetes import client
from kubernetes.client.rest import ApiException from kubernetes.client.rest import ApiException
from oslo_config import cfg from oslo_config import cfg
@ -49,7 +51,6 @@ def lock_and_thread(lock_name="lock"):
""" """
def lock_decorator(func): def lock_decorator(func):
@functools.wraps(func) @functools.wraps(func)
def func_wrapper(*args, **kwargs): def func_wrapper(*args, **kwargs):
bearer_token = None bearer_token = None
@ -62,10 +63,11 @@ def lock_and_thread(lock_name="lock"):
# we did not find a Tiller object to extract a bearer token from # we did not find a Tiller object to extract a bearer token from
# log this to assist with potential debugging in the future # log this to assist with potential debugging in the future
if not found_tiller: if not found_tiller:
LOG.info("no Tiller object found in parameters of function " LOG.info(
"decorated by lock_and_thread, this might create " "no Tiller object found in parameters of function "
"authentication issues in Kubernetes clusters with " "decorated by lock_and_thread, this might create "
"external auth backend") "authentication issues in Kubernetes clusters with "
"external auth backend")
with Lock(lock_name, bearer_token=bearer_token) as lock: with Lock(lock_name, bearer_token=bearer_token) as lock:
pool = ThreadPoolExecutor(1) pool = ThreadPoolExecutor(1)
@ -84,7 +86,6 @@ def lock_and_thread(lock_name="lock"):
class Lock: class Lock:
def __init__(self, lock_name, bearer_token=None, additional_data=None): def __init__(self, lock_name, bearer_token=None, additional_data=None):
"""Creates a lock with the specified name and data. When a lock with """Creates a lock with the specified name and data. When a lock with
that name already exists then this will continuously attempt to acquire that name already exists then this will continuously attempt to acquire
@ -137,8 +138,9 @@ class Lock:
return True return True
except ApiException as err: except ApiException as err:
if err.status == 404: if err.status == 404:
LOG.info("Lock Custom Resource Definition not found, " LOG.info(
"creating now") "Lock Custom Resource Definition not found, "
"creating now")
self.lock_config.create_definition() self.lock_config.create_definition()
continue continue
elif err.status == 409: elif err.status == 409:
@ -157,8 +159,9 @@ class Lock:
# of the lock exceeds the expire time in order to avoid # of the lock exceeds the expire time in order to avoid
# removing another thread's lock while it is still working # removing another thread's lock while it is still working
if self.lock_age() > timedelta(seconds=self.expire_time): if self.lock_age() > timedelta(seconds=self.expire_time):
LOG.info("Lock has exceeded expiry time, removing so" LOG.info(
"processing can continue") "Lock has exceeded expiry time, removing so"
"processing can continue")
self.release_lock() self.release_lock()
continue continue
LOG.debug("Sleeping before attempting to acquire lock again") LOG.debug("Sleeping before attempting to acquire lock again")
@ -183,7 +186,6 @@ class Lock:
class LockConfig: class LockConfig:
def __init__(self, name, bearer_token=None, additional_data=None): def __init__(self, name, bearer_token=None, additional_data=None):
self.name = name self.name = name
data = additional_data or dict() data = additional_data or dict()

View File

@ -23,7 +23,6 @@ LOG = logging.getLogger(__name__)
class Manifest(object): class Manifest(object):
def __init__(self, documents, target_manifest=None): def __init__(self, documents, target_manifest=None):
"""Instantiates a Manifest object. """Instantiates a Manifest object.
@ -54,9 +53,10 @@ class Manifest(object):
target_manifest) target_manifest)
if len(manifests) > 1: if len(manifests) > 1:
error = ('Multiple manifests are not supported. Ensure that the ' error = (
'`target_manifest` option is set to specify the target ' 'Multiple manifests are not supported. Ensure that the '
'manifest') '`target_manifest` option is set to specify the target '
'manifest')
LOG.error(error) LOG.error(error)
raise exceptions.ManifestException(details=error) raise exceptions.ManifestException(details=error)
else: else:
@ -64,9 +64,10 @@ class Manifest(object):
if not all([self.charts, self.groups, self.manifest]): if not all([self.charts, self.groups, self.manifest]):
expected_schemas = [schema.TYPE_CHART, schema.TYPE_CHARTGROUP] expected_schemas = [schema.TYPE_CHART, schema.TYPE_CHARTGROUP]
error = ('Documents must include at least one of each of {} ' error = (
'and only one {}').format(expected_schemas, 'Documents must include at least one of each of {} '
schema.TYPE_MANIFEST) 'and only one {}').format(
expected_schemas, schema.TYPE_MANIFEST)
LOG.error(error) LOG.error(error)
raise exceptions.ManifestException(details=error) raise exceptions.ManifestException(details=error)
@ -147,8 +148,8 @@ class Manifest(object):
under ``chart['data']['dependencies']`` could not be found. under ``chart['data']['dependencies']`` could not be found.
""" """
try: try:
chart_dependencies = chart.get(const.KEYWORD_DATA, {}).get( chart_dependencies = chart.get(const.KEYWORD_DATA,
'dependencies', []) {}).get('dependencies', [])
for iter, dep in enumerate(chart_dependencies): for iter, dep in enumerate(chart_dependencies):
if isinstance(dep, dict): if isinstance(dep, dict):
continue continue
@ -175,9 +176,8 @@ class Manifest(object):
""" """
try: try:
chart = None chart = None
for iter, chart in enumerate( for iter, chart in enumerate(chart_group.get(
chart_group.get(const.KEYWORD_DATA).get( const.KEYWORD_DATA).get(const.KEYWORD_CHARTS, [])):
const.KEYWORD_CHARTS, [])):
if isinstance(chart, dict): if isinstance(chart, dict):
continue continue
chart_object = self.find_chart_document(chart) chart_object = self.find_chart_document(chart)
@ -201,9 +201,8 @@ class Manifest(object):
:raises ManifestException: If a chart group's data listed :raises ManifestException: If a chart group's data listed
under ``chart_group[const.KEYWORD_DATA]`` could not be found. under ``chart_group[const.KEYWORD_DATA]`` could not be found.
""" """
for iter, group in enumerate( for iter, group in enumerate(self.manifest.get(
self.manifest.get(const.KEYWORD_DATA, {}).get( const.KEYWORD_DATA, {}).get(const.KEYWORD_GROUPS, [])):
const.KEYWORD_GROUPS, [])):
if isinstance(group, dict): if isinstance(group, dict):
continue continue
chart_grp = self.find_chart_group_document(group) chart_grp = self.find_chart_group_document(group)

View File

@ -14,6 +14,7 @@
import collections import collections
import json import json
import yaml import yaml
from armada.exceptions import override_exceptions from armada.exceptions import override_exceptions
@ -23,7 +24,6 @@ from armada.utils import validate
class Override(object): class Override(object):
def __init__(self, documents, overrides=None, values=None): def __init__(self, documents, overrides=None, values=None):
self.documents = documents self.documents = documents
self.overrides = overrides self.overrides = overrides

View File

@ -60,10 +60,10 @@ class ReleaseDiff(object):
:rtype: dict :rtype: dict
''' '''
old_input = self.make_release_input(self.old_chart, self.old_values, old_input = self.make_release_input(
'previously deployed') self.old_chart, self.old_values, 'previously deployed')
new_input = self.make_release_input(self.new_chart, self.new_values, new_input = self.make_release_input(
'currently being deployed') self.new_chart, self.new_values, 'currently being deployed')
return DeepDiff(old_input, new_input, view='tree') return DeepDiff(old_input, new_input, view='tree')

View File

@ -13,8 +13,9 @@
# limitations under the License. # limitations under the License.
import os import os
import pkg_resources
import re import re
import pkg_resources
import yaml import yaml
# Types # Types
@ -34,7 +35,6 @@ _SCHEMAS = {}
class SchemaInfo(object): class SchemaInfo(object):
def __init__(self, type, version, data): def __init__(self, type, version, data):
self.type = type self.type = type
self.version = version self.version = version

View File

@ -15,7 +15,6 @@
from oslo_log import log as logging from oslo_log import log as logging
from armada import const from armada import const
from armada.handlers.wait import get_wait_labels from armada.handlers.wait import get_wait_labels
from armada.utils.release import label_selectors from armada.utils.release import label_selectors
from armada.utils.helm import get_test_suite_run_success, is_test_pod from armada.utils.helm import get_test_suite_run_success, is_test_pod
@ -24,14 +23,14 @@ LOG = logging.getLogger(__name__)
class Test(object): class Test(object):
def __init__(
def __init__(self, self,
chart, chart,
release_name, release_name,
tiller, tiller,
cg_test_charts=None, cg_test_charts=None,
cleanup=None, cleanup=None,
enable_all=False): enable_all=False):
"""Initialize a test handler to run Helm tests corresponding to a """Initialize a test handler to run Helm tests corresponding to a
release. release.
@ -62,16 +61,18 @@ class Test(object):
# TODO: Remove when v1 doc support is removed. # TODO: Remove when v1 doc support is removed.
if cg_test_charts is not None: if cg_test_charts is not None:
LOG.warn('Chart group key `test_charts` is deprecated and will be ' LOG.warn(
'removed. Use `test.enabled` instead.') 'Chart group key `test_charts` is deprecated and will be '
'removed. Use `test.enabled` instead.')
self.test_enabled = cg_test_charts self.test_enabled = cg_test_charts
else: else:
self.test_enabled = True self.test_enabled = True
# TODO: Remove when v1 doc support is removed. # TODO: Remove when v1 doc support is removed.
if (type(test_values) == bool): if (type(test_values) == bool):
LOG.warn('Boolean value for chart `test` key is deprecated and ' LOG.warn(
'will be removed. Use `test.enabled` instead.') 'Boolean value for chart `test` key is deprecated and '
'will be removed. Use `test.enabled` instead.')
self.test_enabled = test_values self.test_enabled = test_values
@ -107,14 +108,16 @@ class Test(object):
:return: Helm test suite run result :return: Helm test suite run result
""" """
LOG.info('RUNNING: %s tests with timeout=%ds', self.release_name, LOG.info(
self.timeout) 'RUNNING: %s tests with timeout=%ds', self.release_name,
self.timeout)
try: try:
self.delete_test_pods() self.delete_test_pods()
except Exception: except Exception:
LOG.exception("Exception when deleting test pods for release: %s", LOG.exception(
self.release_name) "Exception when deleting test pods for release: %s",
self.release_name)
test_suite_run = self.tiller.test_release( test_suite_run = self.tiller.test_release(
self.release_name, timeout=self.timeout, cleanup=self.cleanup) self.release_name, timeout=self.timeout, cleanup=self.cleanup)

View File

@ -13,8 +13,6 @@
# limitations under the License. # limitations under the License.
import grpc import grpc
import yaml
from hapi.chart.config_pb2 import Config from hapi.chart.config_pb2 import Config
from hapi.services.tiller_pb2 import GetReleaseContentRequest from hapi.services.tiller_pb2 import GetReleaseContentRequest
from hapi.services.tiller_pb2 import GetReleaseStatusRequest from hapi.services.tiller_pb2 import GetReleaseStatusRequest
@ -28,6 +26,7 @@ from hapi.services.tiller_pb2 import UninstallReleaseRequest
from hapi.services.tiller_pb2 import UpdateReleaseRequest from hapi.services.tiller_pb2 import UpdateReleaseRequest
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
import yaml
from armada import const from armada import const
from armada.conf import get_current_chart from armada.conf import get_current_chart
@ -52,10 +51,10 @@ LOG = logging.getLogger(__name__)
class CommonEqualityMixin(object): class CommonEqualityMixin(object):
def __eq__(self, other): def __eq__(self, other):
return (isinstance(other, self.__class__) and return (
self.__dict__ == other.__dict__) isinstance(other, self.__class__)
and self.__dict__ == other.__dict__)
def __ne__(self, other): def __ne__(self, other):
return not self.__eq__(other) return not self.__eq__(other)
@ -78,12 +77,13 @@ class Tiller(object):
service over gRPC service over gRPC
''' '''
def __init__(self, def __init__(
tiller_host=None, self,
tiller_port=None, tiller_host=None,
tiller_namespace=None, tiller_port=None,
bearer_token=None, tiller_namespace=None,
dry_run=None): bearer_token=None,
dry_run=None):
self.tiller_host = tiller_host or CONF.tiller_host self.tiller_host = tiller_host or CONF.tiller_host
self.tiller_port = tiller_port or CONF.tiller_port self.tiller_port = tiller_port or CONF.tiller_port
self.tiller_namespace = tiller_namespace or CONF.tiller_namespace self.tiller_namespace = tiller_namespace or CONF.tiller_namespace
@ -101,9 +101,10 @@ class Tiller(object):
# be fed at runtime as an override # be fed at runtime as an override
self.timeout = const.DEFAULT_TILLER_TIMEOUT self.timeout = const.DEFAULT_TILLER_TIMEOUT
LOG.debug('Armada is using Tiller at: %s:%s, namespace=%s, timeout=%s', LOG.debug(
self.tiller_host, self.tiller_port, self.tiller_namespace, 'Armada is using Tiller at: %s:%s, namespace=%s, timeout=%s',
self.timeout) self.tiller_host, self.tiller_port, self.tiller_namespace,
self.timeout)
@property @property
def metadata(self): def metadata(self):
@ -126,9 +127,10 @@ class Tiller(object):
MAX_MESSAGE_LENGTH, MAX_MESSAGE_LENGTH) MAX_MESSAGE_LENGTH, MAX_MESSAGE_LENGTH)
return grpc.insecure_channel( return grpc.insecure_channel(
'%s:%s' % (tiller_ip, tiller_port), '%s:%s' % (tiller_ip, tiller_port),
options=[('grpc.max_send_message_length', MAX_MESSAGE_LENGTH), options=[
('grpc.max_receive_message_length', ('grpc.max_send_message_length', MAX_MESSAGE_LENGTH),
MAX_MESSAGE_LENGTH)]) ('grpc.max_receive_message_length', MAX_MESSAGE_LENGTH)
])
except Exception: except Exception:
LOG.exception('Failed to initialize grpc channel to tiller.') LOG.exception('Failed to initialize grpc channel to tiller.')
raise ex.ChannelException() raise ex.ChannelException()
@ -208,8 +210,9 @@ class Tiller(object):
limit=LIST_RELEASES_PAGE_SIZE, limit=LIST_RELEASES_PAGE_SIZE,
status_codes=const.STATUS_ALL) status_codes=const.STATUS_ALL)
LOG.debug('Tiller ListReleases() with timeout=%s, request=%s', LOG.debug(
self.timeout, req) 'Tiller ListReleases() with timeout=%s, request=%s',
self.timeout, req)
response = stub.ListReleases( response = stub.ListReleases(
req, self.timeout, metadata=self.metadata) req, self.timeout, metadata=self.metadata)
@ -248,8 +251,9 @@ class Tiller(object):
try: try:
releases = get_results() releases = get_results()
except ex.TillerListReleasesPagingException: except ex.TillerListReleasesPagingException:
LOG.warning('List releases paging failed on attempt %s/%s', LOG.warning(
attempt, LIST_RELEASES_ATTEMPTS) 'List releases paging failed on attempt %s/%s', attempt,
LIST_RELEASES_ATTEMPTS)
if attempt == LIST_RELEASES_ATTEMPTS: if attempt == LIST_RELEASES_ATTEMPTS:
raise raise
else: else:
@ -267,14 +271,16 @@ class Tiller(object):
latest_releases = [] latest_releases = []
for r in releases: for r in releases:
if latest_versions[r.name] == r.version: if latest_versions[r.name] == r.version:
LOG.debug('Found release %s, version %s, status: %s', LOG.debug(
r.name, r.version, get_release_status(r)) 'Found release %s, version %s, status: %s', r.name,
r.version, get_release_status(r))
latest_releases.append(r) latest_releases.append(r)
return latest_releases return latest_releases
def get_chart_templates(self, template_name, name, release_name, namespace, def get_chart_templates(
chart, disable_hooks, values): self, template_name, name, release_name, namespace, chart,
disable_hooks, values):
# returns some info # returns some info
LOG.info("Template( %s ) : %s ", template_name, name) LOG.info("Template( %s ) : %s ", template_name, name)
@ -291,15 +297,16 @@ class Tiller(object):
templates = stub.InstallRelease( templates = stub.InstallRelease(
release_request, self.timeout, metadata=self.metadata) release_request, self.timeout, metadata=self.metadata)
for template in yaml.load_all( for template in yaml.load_all(getattr(templates.release, 'manifest',
getattr(templates.release, 'manifest', [])): [])):
if template_name == template.get('metadata', None).get( if template_name == template.get('metadata', None).get('name',
'name', None): None):
LOG.info(template_name) LOG.info(template_name)
return template return template
def _pre_update_actions(self, actions, release_name, namespace, chart, def _pre_update_actions(
disable_hooks, values, timeout): self, actions, release_name, namespace, chart, disable_hooks,
values, timeout):
''' '''
:param actions: array of items actions :param actions: array of items actions
:param namespace: name of pod for actions :param namespace: name of pod for actions
@ -353,29 +360,32 @@ class Tiller(object):
charts = [] charts = []
for latest_release in self.list_releases(): for latest_release in self.list_releases():
try: try:
release = (latest_release.name, latest_release.version, release = (
latest_release.chart, latest_release.config.raw, latest_release.name, latest_release.version,
latest_release.info.status.Code.Name( latest_release.chart, latest_release.config.raw,
latest_release.info.status.code)) latest_release.info.status.Code.Name(
latest_release.info.status.code))
charts.append(release) charts.append(release)
except (AttributeError, IndexError) as e: except (AttributeError, IndexError) as e:
LOG.debug('%s while getting releases: %s, ex=%s', LOG.debug(
e.__class__.__name__, latest_release, e) '%s while getting releases: %s, ex=%s',
e.__class__.__name__, latest_release, e)
continue continue
return charts return charts
def update_release(self, def update_release(
chart, self,
release, chart,
namespace, release,
pre_actions=None, namespace,
post_actions=None, pre_actions=None,
disable_hooks=False, post_actions=None,
values=None, disable_hooks=False,
wait=False, values=None,
timeout=None, wait=False,
force=False, timeout=None,
recreate_pods=False): force=False,
recreate_pods=False):
''' '''
Update a Helm Release Update a Helm Release
''' '''
@ -391,8 +401,9 @@ class Tiller(object):
else: else:
values = Config(raw=values) values = Config(raw=values)
self._pre_update_actions(pre_actions, release, namespace, chart, self._pre_update_actions(
disable_hooks, values, timeout) pre_actions, release, namespace, chart, disable_hooks, values,
timeout)
update_msg = None update_msg = None
# build release install request # build release install request
@ -427,20 +438,17 @@ class Tiller(object):
return tiller_result return tiller_result
def install_release(self, def install_release(
chart, self, chart, release, namespace, values=None, wait=False,
release, timeout=None):
namespace,
values=None,
wait=False,
timeout=None):
''' '''
Create a Helm Release Create a Helm Release
''' '''
timeout = self._check_timeout(wait, timeout) timeout = self._check_timeout(wait, timeout)
LOG.info('Helm install release%s: wait=%s, timeout=%s', LOG.info(
(' (dry run)' if self.dry_run else ''), wait, timeout) 'Helm install release%s: wait=%s, timeout=%s',
(' (dry run)' if self.dry_run else ''), wait, timeout)
if values is None: if values is None:
values = Config(raw='') values = Config(raw='')
@ -477,10 +485,9 @@ class Tiller(object):
status = self.get_release_status(release) status = self.get_release_status(release)
raise ex.ReleaseException(release, status, 'Install') raise ex.ReleaseException(release, status, 'Install')
def test_release(self, def test_release(
release, self, release, timeout=const.DEFAULT_TILLER_TIMEOUT,
timeout=const.DEFAULT_TILLER_TIMEOUT, cleanup=False):
cleanup=False):
''' '''
:param release: name of release to test :param release: name of release to test
:param timeout: runtime before exiting :param timeout: runtime before exiting
@ -527,8 +534,9 @@ class Tiller(object):
:param version: version of release status :param version: version of release status
''' '''
LOG.debug('Helm getting release status for release=%s, version=%s', LOG.debug(
release, version) 'Helm getting release status for release=%s, version=%s', release,
version)
try: try:
stub = ReleaseServiceStub(self.channel) stub = ReleaseServiceStub(self.channel)
status_request = GetReleaseStatusRequest( status_request = GetReleaseStatusRequest(
@ -549,8 +557,9 @@ class Tiller(object):
:param version: version of release status :param version: version of release status
''' '''
LOG.debug('Helm getting release content for release=%s, version=%s', LOG.debug(
release, version) 'Helm getting release content for release=%s, version=%s', release,
version)
try: try:
stub = ReleaseServiceStub(self.channel) stub = ReleaseServiceStub(self.channel)
status_request = GetReleaseContentRequest( status_request = GetReleaseContentRequest(
@ -585,11 +594,8 @@ class Tiller(object):
LOG.exception('Failed to get Tiller version.') LOG.exception('Failed to get Tiller version.')
raise ex.TillerVersionException() raise ex.TillerVersionException()
def uninstall_release(self, def uninstall_release(
release, self, release, disable_hooks=False, purge=True, timeout=None):
disable_hooks=False,
purge=True,
timeout=None):
''' '''
:param: release - Helm chart release name :param: release - Helm chart release name
:param: purge - deep delete of chart :param: purge - deep delete of chart
@ -628,12 +634,13 @@ class Tiller(object):
status = self.get_release_status(release) status = self.get_release_status(release)
raise ex.ReleaseException(release, status, 'Delete') raise ex.ReleaseException(release, status, 'Delete')
def delete_resources(self, def delete_resources(
resource_type, self,
resource_labels, resource_type,
namespace, resource_labels,
wait=False, namespace,
timeout=const.DEFAULT_TILLER_TIMEOUT): wait=False,
timeout=const.DEFAULT_TILLER_TIMEOUT):
''' '''
Delete resources matching provided resource type, labels, and Delete resources matching provided resource type, labels, and
namespace. namespace.
@ -665,8 +672,8 @@ class Tiller(object):
namespace) namespace)
continue continue
LOG.info("Deleting job %s in namespace: %s", jb_name, LOG.info(
namespace) "Deleting job %s in namespace: %s", jb_name, namespace)
self.k8s.delete_job_action(jb_name, namespace, timeout=timeout) self.k8s.delete_job_action(jb_name, namespace, timeout=timeout)
handled = True handled = True
@ -684,8 +691,9 @@ class Tiller(object):
# TODO: Remove when v1 doc support is removed. # TODO: Remove when v1 doc support is removed.
if implied_cronjob: if implied_cronjob:
LOG.warn("Deleting cronjobs via `type: job` is " LOG.warn(
"deprecated, use `type: cronjob` instead") "Deleting cronjobs via `type: job` is "
"deprecated, use `type: cronjob` instead")
if self.dry_run: if self.dry_run:
LOG.info( LOG.info(
@ -694,8 +702,8 @@ class Tiller(object):
namespace) namespace)
continue continue
LOG.info("Deleting cronjob %s in namespace: %s", jb_name, LOG.info(
namespace) "Deleting cronjob %s in namespace: %s", jb_name, namespace)
self.k8s.delete_cron_job_action(jb_name, namespace) self.k8s.delete_cron_job_action(jb_name, namespace)
handled = True handled = True
@ -712,27 +720,29 @@ class Tiller(object):
namespace) namespace)
continue continue
LOG.info("Deleting pod %s in namespace: %s", pod_name, LOG.info(
namespace) "Deleting pod %s in namespace: %s", pod_name, namespace)
self.k8s.delete_pod_action(pod_name, namespace) self.k8s.delete_pod_action(pod_name, namespace)
if wait: if wait:
self.k8s.wait_for_pod_redeployment(pod_name, namespace) self.k8s.wait_for_pod_redeployment(pod_name, namespace)
handled = True handled = True
if not handled: if not handled:
LOG.error('No resources found with labels=%s type=%s namespace=%s', LOG.error(
resource_labels, resource_type, namespace) 'No resources found with labels=%s type=%s namespace=%s',
resource_labels, resource_type, namespace)
def rolling_upgrade_pod_deployment(self, def rolling_upgrade_pod_deployment(
name, self,
release_name, name,
namespace, release_name,
resource_labels, namespace,
action_type, resource_labels,
chart, action_type,
disable_hooks, chart,
values, disable_hooks,
timeout=const.DEFAULT_TILLER_TIMEOUT): values,
timeout=const.DEFAULT_TILLER_TIMEOUT):
''' '''
update statefulsets (daemon, stateful) update statefulsets (daemon, stateful)
''' '''
@ -753,8 +763,9 @@ class Tiller(object):
ds_name = ds.metadata.name ds_name = ds.metadata.name
ds_labels = ds.metadata.labels ds_labels = ds.metadata.labels
if ds_name == name: if ds_name == name:
LOG.info("Deleting %s : %s in %s", action_type, ds_name, LOG.info(
namespace) "Deleting %s : %s in %s", action_type, ds_name,
namespace)
self.k8s.delete_daemon_action(ds_name, namespace) self.k8s.delete_daemon_action(ds_name, namespace)
# update the daemonset yaml # update the daemonset yaml
@ -779,13 +790,14 @@ class Tiller(object):
else: else:
LOG.error("Unable to exectue name: % type: %s", name, action_type) LOG.error("Unable to exectue name: % type: %s", name, action_type)
def rollback_release(self, def rollback_release(
release_name, self,
version, release_name,
wait=False, version,
timeout=None, wait=False,
force=False, timeout=None,
recreate_pods=False): force=False,
recreate_pods=False):
''' '''
Rollback a helm release. Rollback a helm release.
''' '''

View File

@ -43,9 +43,9 @@ def get_wait_labels(chart):
# TODO: Validate this object up front in armada validate flow. # TODO: Validate this object up front in armada validate flow.
class ChartWait(): class ChartWait():
def __init__(
def __init__(self, k8s, release_name, chart, namespace, k8s_wait_attempts, self, k8s, release_name, chart, namespace, k8s_wait_attempts,
k8s_wait_attempt_sleep, timeout): k8s_wait_attempt_sleep, timeout):
self.k8s = k8s self.k8s = k8s
self.release_name = release_name self.release_name = release_name
self.chart = chart self.chart = chart
@ -65,12 +65,14 @@ class ChartWait():
else: else:
# TODO: Remove when v1 doc support is removed. # TODO: Remove when v1 doc support is removed.
if schema_info.version < 2: if schema_info.version < 2:
resources_list = [{ resources_list = [
'type': 'job', {
'required': False 'type': 'job',
}, { 'required': False
'type': 'pod' }, {
}] 'type': 'pod'
}
]
else: else:
resources_list = self.get_resources_list(resources) resources_list = self.get_resources_list(resources)
@ -96,15 +98,17 @@ class ChartWait():
# TODO: Remove when v1 doc support is removed. # TODO: Remove when v1 doc support is removed.
deprecated_timeout = self.chart_data.get('timeout') deprecated_timeout = self.chart_data.get('timeout')
if deprecated_timeout is not None: if deprecated_timeout is not None:
LOG.warn('The `timeout` key is deprecated and support ' LOG.warn(
'for this will be removed soon. Use ' 'The `timeout` key is deprecated and support '
'`wait.timeout` instead.') 'for this will be removed soon. Use '
'`wait.timeout` instead.')
if wait_timeout is None: if wait_timeout is None:
wait_timeout = deprecated_timeout wait_timeout = deprecated_timeout
if wait_timeout is None: if wait_timeout is None:
LOG.info('No Chart timeout specified, using default: %ss', LOG.info(
const.DEFAULT_CHART_TIMEOUT) 'No Chart timeout specified, using default: %ss',
const.DEFAULT_CHART_TIMEOUT)
wait_timeout = const.DEFAULT_CHART_TIMEOUT wait_timeout = const.DEFAULT_CHART_TIMEOUT
self.timeout = wait_timeout self.timeout = wait_timeout
@ -206,13 +210,9 @@ class ChartWait():
class ResourceWait(ABC): class ResourceWait(ABC):
def __init__(
def __init__(self, self, resource_type, chart_wait, labels, get_resources,
resource_type, required=True):
chart_wait,
labels,
get_resources,
required=True):
self.resource_type = resource_type self.resource_type = resource_type
self.chart_wait = chart_wait self.chart_wait = chart_wait
self.label_selector = label_selectors(labels) self.label_selector = label_selectors(labels)
@ -241,8 +241,9 @@ class ResourceWait(ABC):
exclude_reason = self.get_exclude_reason(resource) exclude_reason = self.get_exclude_reason(resource)
if exclude_reason: if exclude_reason:
LOG.debug('Excluding %s %s from wait: %s', self.resource_type, LOG.debug(
resource.metadata.name, exclude_reason) 'Excluding %s %s from wait: %s', self.resource_type,
resource.metadata.name, exclude_reason)
return not exclude_reason return not exclude_reason
@ -276,8 +277,9 @@ class ResourceWait(ABC):
self.chart_wait.namespace, self.label_selector, self.required, self.chart_wait.namespace, self.label_selector, self.required,
min_ready_msg, timeout) min_ready_msg, timeout)
if not self.label_selector: if not self.label_selector:
LOG.warn('"label_selector" not specified, waiting with no labels ' LOG.warn(
'may cause unintended consequences.') '"label_selector" not specified, waiting with no labels '
'may cause unintended consequences.')
# Track the overall deadline for timing out during waits # Track the overall deadline for timing out during waits
deadline = time.time() + timeout deadline = time.time() + timeout
@ -319,10 +321,11 @@ class ResourceWait(ABC):
deadline_remaining = int(round(deadline - time.time())) deadline_remaining = int(round(deadline - time.time()))
if deadline_remaining <= 0: if deadline_remaining <= 0:
error = ("Timed out waiting for resource type={}, namespace={}, " error = (
"labels={}".format(self.resource_type, "Timed out waiting for resource type={}, namespace={}, "
self.chart_wait.namespace, "labels={}".format(
self.label_selector)) self.resource_type, self.chart_wait.namespace,
self.label_selector))
LOG.error(error) LOG.error(error)
raise k8s_exceptions.KubernetesWatchTimeoutException(error) raise k8s_exceptions.KubernetesWatchTimeoutException(error)
@ -339,12 +342,14 @@ class ResourceWait(ABC):
'`wait.resources` need to exclude `type: {}`?'.format( '`wait.resources` need to exclude `type: {}`?'.format(
self.resource_type)) self.resource_type))
else: else:
details = ('These {}s were not ready={}'.format( details = (
self.resource_type, sorted(unready))) 'These {}s were not ready={}'.format(
self.resource_type, sorted(unready)))
error = ( error = (
'Timed out waiting for {}s (namespace={}, labels=({})). {}'. 'Timed out waiting for {}s (namespace={}, labels=({})). {}'.
format(self.resource_type, self.chart_wait.namespace, format(
self.label_selector, details)) self.resource_type, self.chart_wait.namespace,
self.label_selector, details))
LOG.error(error) LOG.error(error)
raise k8s_exceptions.KubernetesWatchTimeoutException(error) raise k8s_exceptions.KubernetesWatchTimeoutException(error)
@ -399,10 +404,12 @@ class ResourceWait(ABC):
if not self.include_resource(resource): if not self.include_resource(resource):
continue continue
msg = ('Watch event: type=%s, name=%s, namespace=%s,' msg = (
'resource_version=%s') 'Watch event: type=%s, name=%s, namespace=%s,'
LOG.debug(msg, event_type, resource_name, 'resource_version=%s')
self.chart_wait.namespace, resource_version) LOG.debug(
msg, event_type, resource_name, self.chart_wait.namespace,
resource_version)
if event_type in {'ADDED', 'MODIFIED'}: if event_type in {'ADDED', 'MODIFIED'}:
found_resources = True found_resources = True
@ -417,25 +424,29 @@ class ResourceWait(ABC):
ready.pop(resource_name) ready.pop(resource_name)
elif event_type == 'ERROR': elif event_type == 'ERROR':
LOG.error('Resource %s: Got error event %s', resource_name, LOG.error(
event['object'].to_dict()) 'Resource %s: Got error event %s', resource_name,
event['object'].to_dict())
raise k8s_exceptions.KubernetesErrorEventException( raise k8s_exceptions.KubernetesErrorEventException(
'Got error event for resource: %s' % event['object']) 'Got error event for resource: %s' % event['object'])
else: else:
LOG.error('Unrecognized event type (%s) for resource: %s', LOG.error(
event_type, event['object']) 'Unrecognized event type (%s) for resource: %s',
raise (k8s_exceptions. event_type, event['object'])
KubernetesUnknownStreamingEventTypeException( raise (
'Got unknown event type (%s) for resource: %s' % k8s_exceptions.
(event_type, event['object']))) KubernetesUnknownStreamingEventTypeException(
'Got unknown event type (%s) for resource: %s' %
(event_type, event['object'])))
if all(ready.values()): if all(ready.values()):
return (False, modified, [], found_resources) return (False, modified, [], found_resources)
return (True, modified, return (
[name for name, is_ready in ready.items() if not is_ready], True, modified,
found_resources) [name for name, is_ready in ready.items()
if not is_ready], found_resources)
def _get_resource_condition(self, resource_conditions, condition_type): def _get_resource_condition(self, resource_conditions, condition_type):
for pc in resource_conditions: for pc in resource_conditions:
@ -444,7 +455,6 @@ class ResourceWait(ABC):
class PodWait(ResourceWait): class PodWait(ResourceWait):
def __init__(self, resource_type, chart_wait, labels, **kwargs): def __init__(self, resource_type, chart_wait, labels, **kwargs):
super(PodWait, self).__init__( super(PodWait, self).__init__(
resource_type, chart_wait, labels, resource_type, chart_wait, labels,
@ -494,7 +504,6 @@ class PodWait(ResourceWait):
class JobWait(ResourceWait): class JobWait(ResourceWait):
def __init__(self, resource_type, chart_wait, labels, **kwargs): def __init__(self, resource_type, chart_wait, labels, **kwargs):
super(JobWait, self).__init__( super(JobWait, self).__init__(
resource_type, chart_wait, labels, resource_type, chart_wait, labels,
@ -533,8 +542,8 @@ def has_owner(resource, kind=None):
return False return False
CountOrPercent = collections.namedtuple('CountOrPercent', CountOrPercent = collections.namedtuple(
'number is_percent source') 'CountOrPercent', 'number is_percent source')
# Controller logic (Deployment, DaemonSet, StatefulSet) is adapted from # Controller logic (Deployment, DaemonSet, StatefulSet) is adapted from
# `kubectl rollout status`: # `kubectl rollout status`:
@ -542,16 +551,16 @@ CountOrPercent = collections.namedtuple('CountOrPercent',
class ControllerWait(ResourceWait): class ControllerWait(ResourceWait):
def __init__(
def __init__(self, self,
resource_type, resource_type,
chart_wait, chart_wait,
labels, labels,
get_resources, get_resources,
min_ready="100%", min_ready="100%",
**kwargs): **kwargs):
super(ControllerWait, self).__init__(resource_type, chart_wait, labels, super(ControllerWait, self).__init__(
get_resources, **kwargs) resource_type, chart_wait, labels, get_resources, **kwargs)
if isinstance(min_ready, str): if isinstance(min_ready, str):
match = re.match('(.*)%$', min_ready) match = re.match('(.*)%$', min_ready)
@ -578,7 +587,6 @@ class ControllerWait(ResourceWait):
class DeploymentWait(ControllerWait): class DeploymentWait(ControllerWait):
def __init__(self, resource_type, chart_wait, labels, **kwargs): def __init__(self, resource_type, chart_wait, labels, **kwargs):
super(DeploymentWait, self).__init__( super(DeploymentWait, self).__init__(
resource_type, chart_wait, labels, resource_type, chart_wait, labels,
@ -596,8 +604,8 @@ class DeploymentWait(ControllerWait):
# TODO: Don't fail for lack of progress if `min_ready` is met. # TODO: Don't fail for lack of progress if `min_ready` is met.
# TODO: Consider continuing after `min_ready` is met, so long as # TODO: Consider continuing after `min_ready` is met, so long as
# progress is being made. # progress is being made.
cond = self._get_resource_condition(status.conditions, cond = self._get_resource_condition(
'Progressing') status.conditions, 'Progressing')
if cond and (cond.reason or '') == 'ProgressDeadlineExceeded': if cond and (cond.reason or '') == 'ProgressDeadlineExceeded':
msg = "deployment {} exceeded its progress deadline" msg = "deployment {} exceeded its progress deadline"
return (msg.format(name), False) return (msg.format(name), False)
@ -606,21 +614,26 @@ class DeploymentWait(ControllerWait):
updated_replicas = status.updated_replicas or 0 updated_replicas = status.updated_replicas or 0
available_replicas = status.available_replicas or 0 available_replicas = status.available_replicas or 0
if updated_replicas < replicas: if updated_replicas < replicas:
msg = ("Waiting for deployment {} rollout to finish: {} out " msg = (
"of {} new replicas have been updated...") "Waiting for deployment {} rollout to finish: {} out "
"of {} new replicas have been updated...")
return (msg.format(name, updated_replicas, replicas), False) return (msg.format(name, updated_replicas, replicas), False)
if replicas > updated_replicas: if replicas > updated_replicas:
msg = ("Waiting for deployment {} rollout to finish: {} old " msg = (
"replicas are pending termination...") "Waiting for deployment {} rollout to finish: {} old "
"replicas are pending termination...")
pending = replicas - updated_replicas pending = replicas - updated_replicas
return (msg.format(name, pending), False) return (msg.format(name, pending), False)
if not self._is_min_ready(available_replicas, updated_replicas): if not self._is_min_ready(available_replicas, updated_replicas):
msg = ("Waiting for deployment {} rollout to finish: {} of {} " msg = (
"updated replicas are available, with min_ready={}") "Waiting for deployment {} rollout to finish: {} of {} "
return (msg.format(name, available_replicas, updated_replicas, "updated replicas are available, with min_ready={}")
self.min_ready.source), False) return (
msg.format(
name, available_replicas, updated_replicas,
self.min_ready.source), False)
msg = "deployment {} successfully rolled out\n" msg = "deployment {} successfully rolled out\n"
return (msg.format(name), True) return (msg.format(name), True)
@ -629,13 +642,13 @@ class DeploymentWait(ControllerWait):
class DaemonSetWait(ControllerWait): class DaemonSetWait(ControllerWait):
def __init__(
def __init__(self, self,
resource_type, resource_type,
chart_wait, chart_wait,
labels, labels,
allow_async_updates=False, allow_async_updates=False,
**kwargs): **kwargs):
super(DaemonSetWait, self).__init__( super(DaemonSetWait, self).__init__(
resource_type, chart_wait, labels, resource_type, chart_wait, labels,
chart_wait.k8s.apps_v1_api.list_namespaced_daemon_set, **kwargs) chart_wait.k8s.apps_v1_api.list_namespaced_daemon_set, **kwargs)
@ -667,18 +680,23 @@ class DaemonSetWait(ControllerWait):
number_available = status.number_available or 0 number_available = status.number_available or 0
if (updated_number_scheduled < desired_number_scheduled): if (updated_number_scheduled < desired_number_scheduled):
msg = ("Waiting for daemon set {} rollout to finish: {} out " msg = (
"of {} new pods have been updated...") "Waiting for daemon set {} rollout to finish: {} out "
return (msg.format(name, updated_number_scheduled, "of {} new pods have been updated...")
desired_number_scheduled), False) return (
msg.format(
name, updated_number_scheduled,
desired_number_scheduled), False)
if not self._is_min_ready(number_available, if not self._is_min_ready(number_available,
desired_number_scheduled): desired_number_scheduled):
msg = ("Waiting for daemon set {} rollout to finish: {} of {} " msg = (
"updated pods are available, with min_ready={}") "Waiting for daemon set {} rollout to finish: {} of {} "
return (msg.format(name, number_available, "updated pods are available, with min_ready={}")
desired_number_scheduled, return (
self.min_ready.source), False) msg.format(
name, number_available, desired_number_scheduled,
self.min_ready.source), False)
msg = "daemon set {} successfully rolled out" msg = "daemon set {} successfully rolled out"
return (msg.format(name), True) return (msg.format(name), True)
@ -688,13 +706,13 @@ class DaemonSetWait(ControllerWait):
class StatefulSetWait(ControllerWait): class StatefulSetWait(ControllerWait):
def __init__(
def __init__(self, self,
resource_type, resource_type,
chart_wait, chart_wait,
labels, labels,
allow_async_updates=False, allow_async_updates=False,
**kwargs): **kwargs):
super(StatefulSetWait, self).__init__( super(StatefulSetWait, self).__init__(
resource_type, chart_wait, labels, resource_type, chart_wait, labels,
chart_wait.k8s.apps_v1_api.list_namespaced_stateful_set, **kwargs) chart_wait.k8s.apps_v1_api.list_namespaced_stateful_set, **kwargs)
@ -724,9 +742,9 @@ class StatefulSetWait(ControllerWait):
raise armada_exceptions.WaitException( raise armada_exceptions.WaitException(
msg.format(ASYNC_UPDATE_NOT_ALLOWED_MSG, strategy)) msg.format(ASYNC_UPDATE_NOT_ALLOWED_MSG, strategy))
if (is_rolling and replicas and if (is_rolling and replicas
spec.update_strategy.rolling_update and and spec.update_strategy.rolling_update
spec.update_strategy.rolling_update.partition): and spec.update_strategy.rolling_update.partition):
msg = "{}: partitioned rollout" msg = "{}: partitioned rollout"
raise armada_exceptions.WaitException( raise armada_exceptions.WaitException(
@ -737,17 +755,21 @@ class StatefulSetWait(ControllerWait):
return (msg, False) return (msg, False)
if replicas and not self._is_min_ready(ready_replicas, replicas): if replicas and not self._is_min_ready(ready_replicas, replicas):
msg = ("Waiting for statefulset {} rollout to finish: {} of {} " msg = (
"pods are ready, with min_ready={}") "Waiting for statefulset {} rollout to finish: {} of {} "
return (msg.format(name, ready_replicas, replicas, "pods are ready, with min_ready={}")
self.min_ready.source), False) return (
msg.format(
name, ready_replicas, replicas,
self.min_ready.source), False)
update_revision = status.update_revision or 0 update_revision = status.update_revision or 0
current_revision = status.current_revision or 0 current_revision = status.current_revision or 0
if update_revision != current_revision: if update_revision != current_revision:
msg = ("waiting for statefulset rolling update to complete {} " msg = (
"pods at revision {}...") "waiting for statefulset rolling update to complete {} "
"pods at revision {}...")
return (msg.format(updated_replicas, update_revision), False) return (msg.format(updated_replicas, update_revision), False)
msg = "statefulset rolling update complete {} pods at revision {}..." msg = "statefulset rolling update complete {} pods at revision {}..."

View File

@ -15,14 +15,14 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import mock
import random import random
import string import string
import testtools
import threading import threading
import uuid import uuid
import mock
import testtools
_mock_thread_safe = False _mock_thread_safe = False
_mock_call_lock = threading.RLock() _mock_call_lock = threading.RLock()

View File

@ -32,8 +32,9 @@ class BaseControllerTest(test_base.ArmadaTestCase):
# the sample configuration files to avoid oslo.conf errors when # the sample configuration files to avoid oslo.conf errors when
# creating the server below. # creating the server below.
current_dir = os.path.dirname(os.path.realpath(__file__)) current_dir = os.path.dirname(os.path.realpath(__file__))
sample_conf_dir = os.path.join(current_dir, os.pardir, os.pardir, sample_conf_dir = os.path.join(
os.pardir, os.pardir, 'etc', 'armada') current_dir, os.pardir, os.pardir, os.pardir, os.pardir, 'etc',
'armada')
sample_conf_files = ['api-paste.ini', 'armada.conf.sample'] sample_conf_files = ['api-paste.ini', 'armada.conf.sample']
with mock.patch.object(armada.conf, with mock.patch.object(armada.conf,
'_get_config_files') as mock_get_config_files: '_get_config_files') as mock_get_config_files:

View File

@ -20,7 +20,6 @@ from armada.tests.unit.api import base as test_base
class TestApi(test_base.BaseControllerTest): class TestApi(test_base.BaseControllerTest):
def test_init_application(self): def test_init_application(self):
server = importlib.import_module('armada.api.server') server = importlib.import_module('armada.api.server')
api = server.create() api = server.create()

View File

@ -13,8 +13,8 @@
# limitations under the License. # limitations under the License.
import json import json
import mock
import mock
from oslo_config import cfg from oslo_config import cfg
from armada import api from armada import api
@ -26,15 +26,14 @@ from armada.tests.unit.api import base
CONF = cfg.CONF CONF = cfg.CONF
@mock.patch.object(armada_api.Apply, 'handle', @mock.patch.object(
armada_api.Apply.handle.__wrapped__) armada_api.Apply, 'handle', armada_api.Apply.handle.__wrapped__)
class ArmadaControllerTest(base.BaseControllerTest): class ArmadaControllerTest(base.BaseControllerTest):
@mock.patch.object(api, 'Tiller') @mock.patch.object(api, 'Tiller')
@mock.patch.object(armada_api, 'Armada') @mock.patch.object(armada_api, 'Armada')
@mock.patch.object(armada_api, 'ReferenceResolver') @mock.patch.object(armada_api, 'ReferenceResolver')
def test_armada_apply_resource(self, mock_resolver, mock_armada, def test_armada_apply_resource(
mock_tiller): self, mock_resolver, mock_armada, mock_tiller):
"""Tests the POST /api/v1.0/apply endpoint.""" """Tests the POST /api/v1.0/apply endpoint."""
rules = {'armada:create_endpoints': '@'} rules = {'armada:create_endpoints': '@'}
self.policy.set_rules(rules) self.policy.set_rules(rules)
@ -84,9 +83,10 @@ class ArmadaControllerTest(base.BaseControllerTest):
self.assertEqual('application/json', result.headers['content-type']) self.assertEqual('application/json', result.headers['content-type'])
mock_resolver.resolve_reference.assert_called_with([payload_url]) mock_resolver.resolve_reference.assert_called_with([payload_url])
mock_armada.assert_called_with([{ mock_armada.assert_called_with(
'foo': 'bar' [{
}], **expected_armada_options) 'foo': 'bar'
}], **expected_armada_options)
mock_armada.return_value.sync.assert_called() mock_armada.return_value.sync.assert_called()
mock_tiller.assert_called_with(dry_run=False) mock_tiller.assert_called_with(dry_run=False)
@ -119,7 +119,6 @@ class ArmadaControllerTest(base.BaseControllerTest):
class ArmadaControllerNegativeTest(base.BaseControllerTest): class ArmadaControllerNegativeTest(base.BaseControllerTest):
@test_utils.attr(type=['negative']) @test_utils.attr(type=['negative'])
def test_armada_apply_raises_415_given_unsupported_media_type(self): def test_armada_apply_raises_415_given_unsupported_media_type(self):
"""Tests the POST /api/v1.0/apply endpoint returns 415 given """Tests the POST /api/v1.0/apply endpoint returns 415 given
@ -133,7 +132,6 @@ class ArmadaControllerNegativeTest(base.BaseControllerTest):
class ArmadaControllerNegativeRbacTest(base.BaseControllerTest): class ArmadaControllerNegativeRbacTest(base.BaseControllerTest):
@test_utils.attr(type=['negative']) @test_utils.attr(type=['negative'])
def test_armada_apply_resource_insufficient_permissions(self): def test_armada_apply_resource_insufficient_permissions(self):
"""Tests the POST /api/v1.0/apply endpoint returns 403 following failed """Tests the POST /api/v1.0/apply endpoint returns 403 following failed

View File

@ -18,7 +18,6 @@ from armada.tests.unit.api import base
class HealthControllerTest(base.BaseControllerTest): class HealthControllerTest(base.BaseControllerTest):
def test_get_health_status(self): def test_get_health_status(self):
""" """
Validate that /api/v1.0/health returns 204. Validate that /api/v1.0/health returns 204.

View File

@ -23,10 +23,9 @@ from armada.tests.unit.api import base
from armada.api.controller import rollback from armada.api.controller import rollback
@mock.patch.object(rollback.Rollback, 'handle', @mock.patch.object(
rollback.Rollback.handle.__wrapped__) rollback.Rollback, 'handle', rollback.Rollback.handle.__wrapped__)
class RollbackReleaseControllerTest(base.BaseControllerTest): class RollbackReleaseControllerTest(base.BaseControllerTest):
@mock.patch.object(api, 'Tiller') @mock.patch.object(api, 'Tiller')
def test_rollback_controller_pass(self, mock_tiller): def test_rollback_controller_pass(self, mock_tiller):
rules = {'armada:rollback_release': '@'} rules = {'armada:rollback_release': '@'}
@ -62,14 +61,14 @@ class RollbackReleaseControllerTest(base.BaseControllerTest):
release, 2, wait=True, timeout=123, force=True, recreate_pods=True) release, 2, wait=True, timeout=123, force=True, recreate_pods=True)
self.assertEqual(200, resp.status_code) self.assertEqual(200, resp.status_code)
self.assertEqual('Rollback of test-release complete.', self.assertEqual(
json.loads(resp.text)['message']) 'Rollback of test-release complete.',
json.loads(resp.text)['message'])
m_tiller.__exit__.assert_called() m_tiller.__exit__.assert_called()
@test_utils.attr(type=['negative']) @test_utils.attr(type=['negative'])
class RollbackReleaseControllerNegativeTest(base.BaseControllerTest): class RollbackReleaseControllerNegativeTest(base.BaseControllerTest):
@mock.patch.object(api, 'Tiller') @mock.patch.object(api, 'Tiller')
def test_rollback_controller_tiller_exc_return_500(self, mock_tiller): def test_rollback_controller_tiller_exc_return_500(self, mock_tiller):
rules = {'armada:rollback_release': '@'} rules = {'armada:rollback_release': '@'}
@ -83,7 +82,6 @@ class RollbackReleaseControllerNegativeTest(base.BaseControllerTest):
@test_utils.attr(type=['negative']) @test_utils.attr(type=['negative'])
class RollbackReleaseControllerNegativeRbacTest(base.BaseControllerTest): class RollbackReleaseControllerNegativeRbacTest(base.BaseControllerTest):
def test_rollback_release_insufficient_permissions(self): def test_rollback_release_insufficient_permissions(self):
"""Tests the GET /api/v1.0/rollback/{release} endpoint returns 403 """Tests the GET /api/v1.0/rollback/{release} endpoint returns 403
following failed authorization. following failed authorization.

View File

@ -14,9 +14,9 @@
import json import json
import os import os
import yaml
import mock import mock
import yaml
from armada import api from armada import api
from armada.api.controller import test from armada.api.controller import test
@ -26,10 +26,10 @@ from armada.tests import test_utils
from armada.tests.unit.api import base from armada.tests.unit.api import base
@mock.patch.object(test.TestReleasesManifestController, 'handle', @mock.patch.object(
test.TestReleasesManifestController.handle.__wrapped__) test.TestReleasesManifestController, 'handle',
test.TestReleasesManifestController.handle.__wrapped__)
class TestReleasesManifestControllerTest(base.BaseControllerTest): class TestReleasesManifestControllerTest(base.BaseControllerTest):
@mock.patch.object(test, 'Manifest') @mock.patch.object(test, 'Manifest')
@mock.patch.object(api, 'Tiller') @mock.patch.object(api, 'Tiller')
def test_test_controller_with_manifest(self, mock_tiller, mock_manifest): def test_test_controller_with_manifest(self, mock_tiller, mock_manifest):
@ -38,8 +38,8 @@ class TestReleasesManifestControllerTest(base.BaseControllerTest):
# TODO: Don't use example charts in tests. # TODO: Don't use example charts in tests.
# TODO: Test cleanup arg is taken from url, then manifest. # TODO: Test cleanup arg is taken from url, then manifest.
manifest_path = os.path.join(os.getcwd(), 'examples', manifest_path = os.path.join(
'keystone-manifest.yaml') os.getcwd(), 'examples', 'keystone-manifest.yaml')
with open(manifest_path, 'r') as f: with open(manifest_path, 'r') as f:
payload = f.read() payload = f.read()
documents = list(yaml.safe_load_all(payload)) documents = list(yaml.safe_load_all(payload))
@ -59,14 +59,14 @@ class TestReleasesManifestControllerTest(base.BaseControllerTest):
m_tiller.__exit__.assert_called() m_tiller.__exit__.assert_called()
@mock.patch.object(test.TestReleasesReleaseNameController, 'handle', @mock.patch.object(
test.TestReleasesReleaseNameController.handle.__wrapped__) test.TestReleasesReleaseNameController, 'handle',
test.TestReleasesReleaseNameController.handle.__wrapped__)
class TestReleasesReleaseNameControllerTest(base.BaseControllerTest): class TestReleasesReleaseNameControllerTest(base.BaseControllerTest):
@mock.patch.object(test.Test, 'test_release_for_success') @mock.patch.object(test.Test, 'test_release_for_success')
@mock.patch.object(api, 'Tiller') @mock.patch.object(api, 'Tiller')
def test_test_controller_test_pass(self, mock_tiller, def test_test_controller_test_pass(
mock_test_release_for_success): self, mock_tiller, mock_test_release_for_success):
rules = {'armada:test_release': '@'} rules = {'armada:test_release': '@'}
self.policy.set_rules(rules) self.policy.set_rules(rules)
@ -79,14 +79,15 @@ class TestReleasesReleaseNameControllerTest(base.BaseControllerTest):
resp = self.app.simulate_get('/api/v1.0/test/{}'.format(release)) resp = self.app.simulate_get('/api/v1.0/test/{}'.format(release))
mock_test_release_for_success.assert_called_once() mock_test_release_for_success.assert_called_once()
self.assertEqual(200, resp.status_code) self.assertEqual(200, resp.status_code)
self.assertEqual('MESSAGE: Test Pass', self.assertEqual(
json.loads(resp.text)['message']) 'MESSAGE: Test Pass',
json.loads(resp.text)['message'])
m_tiller.__exit__.assert_called() m_tiller.__exit__.assert_called()
@mock.patch.object(test.Test, 'test_release_for_success') @mock.patch.object(test.Test, 'test_release_for_success')
@mock.patch.object(api, 'Tiller') @mock.patch.object(api, 'Tiller')
def test_test_controller_test_fail(self, mock_tiller, def test_test_controller_test_fail(
mock_test_release_for_success): self, mock_tiller, mock_test_release_for_success):
rules = {'armada:test_release': '@'} rules = {'armada:test_release': '@'}
self.policy.set_rules(rules) self.policy.set_rules(rules)
@ -97,14 +98,15 @@ class TestReleasesReleaseNameControllerTest(base.BaseControllerTest):
release = 'fake-release' release = 'fake-release'
resp = self.app.simulate_get('/api/v1.0/test/{}'.format(release)) resp = self.app.simulate_get('/api/v1.0/test/{}'.format(release))
self.assertEqual(200, resp.status_code) self.assertEqual(200, resp.status_code)
self.assertEqual('MESSAGE: Test Fail', self.assertEqual(
json.loads(resp.text)['message']) 'MESSAGE: Test Fail',
json.loads(resp.text)['message'])
m_tiller.__exit__.assert_called() m_tiller.__exit__.assert_called()
@mock.patch.object(test.Test, 'test_release_for_success') @mock.patch.object(test.Test, 'test_release_for_success')
@mock.patch.object(api, 'Tiller') @mock.patch.object(api, 'Tiller')
def test_test_controller_cleanup(self, mock_tiller, def test_test_controller_cleanup(
mock_test_release_for_success): self, mock_tiller, mock_test_release_for_success):
rules = {'armada:test_release': '@'} rules = {'armada:test_release': '@'}
self.policy.set_rules(rules) self.policy.set_rules(rules)
@ -117,16 +119,17 @@ class TestReleasesReleaseNameControllerTest(base.BaseControllerTest):
'/api/v1.0/test/{}'.format(release), query_string='cleanup=true') '/api/v1.0/test/{}'.format(release), query_string='cleanup=true')
mock_test_release_for_success.assert_called_once() mock_test_release_for_success.assert_called_once()
self.assertEqual(200, resp.status_code) self.assertEqual(200, resp.status_code)
self.assertEqual('MESSAGE: Test Pass', self.assertEqual(
json.loads(resp.text)['message']) 'MESSAGE: Test Pass',
json.loads(resp.text)['message'])
m_tiller.__exit__.assert_called() m_tiller.__exit__.assert_called()
@test_utils.attr(type=['negative']) @test_utils.attr(type=['negative'])
@mock.patch.object(test.TestReleasesManifestController, 'handle', @mock.patch.object(
test.TestReleasesManifestController.handle.__wrapped__) test.TestReleasesManifestController, 'handle',
test.TestReleasesManifestController.handle.__wrapped__)
class TestReleasesManifestControllerNegativeTest(base.BaseControllerTest): class TestReleasesManifestControllerNegativeTest(base.BaseControllerTest):
@mock.patch.object(test, 'Manifest') @mock.patch.object(test, 'Manifest')
@mock.patch.object(api, 'Tiller') @mock.patch.object(api, 'Tiller')
@mock.patch.object(test.Test, 'test_release_for_success') @mock.patch.object(test.Test, 'test_release_for_success')
@ -148,8 +151,8 @@ class TestReleasesManifestControllerNegativeTest(base.BaseControllerTest):
rules = {'armada:test_manifest': '@'} rules = {'armada:test_manifest': '@'}
self.policy.set_rules(rules) self.policy.set_rules(rules)
manifest_path = os.path.join(os.getcwd(), 'examples', manifest_path = os.path.join(
'keystone-manifest.yaml') os.getcwd(), 'examples', 'keystone-manifest.yaml')
with open(manifest_path, 'r') as f: with open(manifest_path, 'r') as f:
payload = f.read() payload = f.read()
@ -166,22 +169,22 @@ class TestReleasesManifestControllerNegativeTest(base.BaseControllerTest):
resp_body = json.loads(resp.text) resp_body = json.loads(resp.text)
self.assertEqual(400, resp_body['code']) self.assertEqual(400, resp_body['code'])
self.assertEqual(1, resp_body['details']['errorCount']) self.assertEqual(1, resp_body['details']['errorCount'])
self.assertIn({ self.assertIn(
'message': {
('An error occurred while building chart group: ' 'message': (
'Could not build ChartGroup named "keystone-infra-services".'), 'An error occurred while building chart group: '
'error': 'Could not build ChartGroup named '
True, '"keystone-infra-services".'),
'kind': 'error': True,
'ValidationMessage', 'kind': 'ValidationMessage',
'level': 'level': 'Error',
'Error', 'name': 'ARM001',
'name': 'documents': []
'ARM001', }, resp_body['details']['messageList'])
'documents': [] self.assertEqual(
}, resp_body['details']['messageList']) (
self.assertEqual(('Failed to validate documents or generate Armada ' 'Failed to validate documents or generate Armada '
'Manifest from documents.'), resp_body['message']) 'Manifest from documents.'), resp_body['message'])
m_tiller.__exit__.assert_called() m_tiller.__exit__.assert_called()
@mock.patch('armada.utils.validate.Manifest') @mock.patch('armada.utils.validate.Manifest')
@ -194,8 +197,8 @@ class TestReleasesManifestControllerNegativeTest(base.BaseControllerTest):
mock_manifest.return_value.get_manifest.side_effect = ( mock_manifest.return_value.get_manifest.side_effect = (
manifest_exceptions.ManifestException(details='foo')) manifest_exceptions.ManifestException(details='foo'))
manifest_path = os.path.join(os.getcwd(), 'examples', manifest_path = os.path.join(
'keystone-manifest.yaml') os.getcwd(), 'examples', 'keystone-manifest.yaml')
with open(manifest_path, 'r') as f: with open(manifest_path, 'r') as f:
payload = f.read() payload = f.read()
@ -208,27 +211,28 @@ class TestReleasesManifestControllerNegativeTest(base.BaseControllerTest):
resp_body = json.loads(resp.text) resp_body = json.loads(resp.text)
self.assertEqual(400, resp_body['code']) self.assertEqual(400, resp_body['code'])
self.assertEqual(1, resp_body['details']['errorCount']) self.assertEqual(1, resp_body['details']['errorCount'])
self.assertEqual([{ self.assertEqual(
'message': [
('An error occurred while generating the manifest: foo.'), {
'error': 'message': (
True, 'An error occurred while generating the manifest: foo.'
'kind': ),
'ValidationMessage', 'error': True,
'level': 'kind': 'ValidationMessage',
'Error', 'level': 'Error',
'name': 'name': 'ARM001',
'ARM001', 'documents': []
'documents': [] }
}], resp_body['details']['messageList']) ], resp_body['details']['messageList'])
self.assertEqual(('Failed to validate documents or generate Armada ' self.assertEqual(
'Manifest from documents.'), resp_body['message']) (
'Failed to validate documents or generate Armada '
'Manifest from documents.'), resp_body['message'])
m_tiller.__exit__.assert_called() m_tiller.__exit__.assert_called()
@test_utils.attr(type=['negative']) @test_utils.attr(type=['negative'])
class TestReleasesReleaseNameControllerNegativeTest(base.BaseControllerTest): class TestReleasesReleaseNameControllerNegativeTest(base.BaseControllerTest):
@mock.patch.object(api, 'Tiller') @mock.patch.object(api, 'Tiller')
@mock.patch.object(test.Test, 'test_release_for_success') @mock.patch.object(test.Test, 'test_release_for_success')
def test_test_controller_tiller_exc_returns_500( def test_test_controller_tiller_exc_returns_500(
@ -243,9 +247,8 @@ class TestReleasesReleaseNameControllerNegativeTest(base.BaseControllerTest):
self.assertEqual(500, resp.status_code) self.assertEqual(500, resp.status_code)
class TestReleasesReleaseNameControllerNegativeRbacTest( class TestReleasesReleaseNameControllerNegativeRbacTest(base.BaseControllerTest
base.BaseControllerTest): ):
@test_utils.attr(type=['negative']) @test_utils.attr(type=['negative'])
def test_test_release_insufficient_permissions(self): def test_test_release_insufficient_permissions(self):
"""Tests the GET /api/v1.0/test/{release} endpoint returns 403 """Tests the GET /api/v1.0/test/{release} endpoint returns 403
@ -258,7 +261,6 @@ class TestReleasesReleaseNameControllerNegativeRbacTest(
class TestReleasesManifestControllerNegativeRbacTest(base.BaseControllerTest): class TestReleasesManifestControllerNegativeRbacTest(base.BaseControllerTest):
@test_utils.attr(type=['negative']) @test_utils.attr(type=['negative'])
def test_test_manifest_insufficient_permissions(self): def test_test_manifest_insufficient_permissions(self):
"""Tests the POST /api/v1.0/tests endpoint returns 403 following failed """Tests the POST /api/v1.0/tests endpoint returns 403 following failed

View File

@ -13,7 +13,6 @@
# limitations under the License. # limitations under the License.
import mock import mock
from oslo_config import cfg from oslo_config import cfg
from armada import api from armada import api
@ -25,7 +24,6 @@ CONF = cfg.CONF
class TillerControllerTest(base.BaseControllerTest): class TillerControllerTest(base.BaseControllerTest):
@mock.patch.object(api, 'Tiller') @mock.patch.object(api, 'Tiller')
def test_get_tiller_status(self, mock_tiller): def test_get_tiller_status(self, mock_tiller):
"""Tests GET /api/v1.0/status endpoint.""" """Tests GET /api/v1.0/status endpoint."""
@ -140,7 +138,6 @@ class TillerControllerTest(base.BaseControllerTest):
class TillerControllerNegativeRbacTest(base.BaseControllerTest): class TillerControllerNegativeRbacTest(base.BaseControllerTest):
@test_utils.attr(type=['negative']) @test_utils.attr(type=['negative'])
def test_list_tiller_releases_insufficient_permissions(self): def test_list_tiller_releases_insufficient_permissions(self):
"""Tests the GET /api/v1.0/releases endpoint returns 403 following """Tests the GET /api/v1.0/releases endpoint returns 403 following

View File

@ -18,7 +18,6 @@ from armada.tests.unit.api import base
class ValidationControllerNegativeRbacTest(base.BaseControllerTest): class ValidationControllerNegativeRbacTest(base.BaseControllerTest):
@test_utils.attr(type=['negative']) @test_utils.attr(type=['negative'])
def test_validate_manifest_insufficient_permissions(self): def test_validate_manifest_insufficient_permissions(self):
"""Tests the POST /api/v1.0/validate endpoint returns 403 following """Tests the POST /api/v1.0/validate endpoint returns 403 following

View File

@ -16,7 +16,6 @@ from armada.tests.unit.api import base
class VersionsControllerTest(base.BaseControllerTest): class VersionsControllerTest(base.BaseControllerTest):
def test_list_versions(self): def test_list_versions(self):
""" """
Validate that /api/v1.0/health returns 204. Validate that /api/v1.0/health returns 204.

View File

@ -42,7 +42,6 @@ def is_connected():
class ArmadaTestCase(testtools.TestCase): class ArmadaTestCase(testtools.TestCase):
def setUp(self): def setUp(self):
super(ArmadaTestCase, self).setUp() super(ArmadaTestCase, self).setUp()
self.useFixture(fixtures.FakeLogger('armada')) self.useFixture(fixtures.FakeLogger('armada'))

View File

@ -10,10 +10,9 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import testtools
import mock import mock
from oslo_policy import policy as common_policy from oslo_policy import policy as common_policy
import testtools
from armada.common import policy from armada.common import policy
from armada import conf as cfg from armada import conf as cfg
@ -24,7 +23,6 @@ CONF = cfg.CONF
class PolicyTestCase(testtools.TestCase): class PolicyTestCase(testtools.TestCase):
def setUp(self): def setUp(self):
super(PolicyTestCase, self).setUp() super(PolicyTestCase, self).setUp()
self.rules = { self.rules = {
@ -48,8 +46,9 @@ class PolicyTestCase(testtools.TestCase):
action = "example:nope" action = "example:nope"
mock_ctx.to_policy_view.return_value = self.credentials mock_ctx.to_policy_view.return_value = self.credentials
self.assertRaises(exc.ActionForbidden, policy._enforce_policy, action, self.assertRaises(
self.target, mock_ctx) exc.ActionForbidden, policy._enforce_policy, action, self.target,
mock_ctx)
mock_log.exception.assert_called_once_with( mock_log.exception.assert_called_once_with(
'Policy not registered for %(action)s', {'action': 'example:nope'}) 'Policy not registered for %(action)s', {'action': 'example:nope'})
@ -67,5 +66,6 @@ class PolicyTestCase(testtools.TestCase):
action = "armada:create_endpoints" action = "armada:create_endpoints"
mock_ctx.to_policy_view.return_value = self.credentials mock_ctx.to_policy_view.return_value = self.credentials
self.assertRaises(exc.ActionForbidden, policy._enforce_policy, action, self.assertRaises(
self.target, mock_ctx) exc.ActionForbidden, policy._enforce_policy, action, self.target,
mock_ctx)

View File

@ -12,15 +12,13 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import testtools
import responses import responses
import testtools
from armada.common.session import ArmadaSession from armada.common.session import ArmadaSession
class SessionTestCase(testtools.TestCase): class SessionTestCase(testtools.TestCase):
def test_create_session(self): def test_create_session(self):
"""Tests setting up an Armada session""" """Tests setting up an Armada session"""
sess = ArmadaSession("testarmada") sess = ArmadaSession("testarmada")

View File

@ -19,13 +19,13 @@
from __future__ import absolute_import from __future__ import absolute_import
import os import os
import yaml
import fixtures import fixtures
import mock import mock
from oslo_config import cfg from oslo_config import cfg
from oslo_policy import opts as policy_opts from oslo_policy import opts as policy_opts
from oslo_policy import policy as oslo_policy from oslo_policy import policy as oslo_policy
import yaml
from armada.common import policies from armada.common import policies
import armada.common.policy import armada.common.policy

View File

@ -145,15 +145,15 @@ data:
enabled: true enabled: true
""" """
CHART_SOURCES = [('git://opendev.org/dummy/armada.git', 'chart_1'), CHART_SOURCES = [
('/tmp/dummy/armada', 'chart_2'), ('git://opendev.org/dummy/armada.git', 'chart_1'),
('/tmp/dummy/armada', 'chart_3'), ('/tmp/dummy/armada', 'chart_2'), ('/tmp/dummy/armada', 'chart_3'),
('/tmp/dummy/armada', 'chart_4')] ('/tmp/dummy/armada', 'chart_4')
]
# TODO(seaneagan): Add unit tests with dependencies, including transitive. # TODO(seaneagan): Add unit tests with dependencies, including transitive.
class ArmadaHandlerTestCase(base.ArmadaTestCase): class ArmadaHandlerTestCase(base.ArmadaTestCase):
def _test_pre_flight_ops(self, armada_obj): def _test_pre_flight_ops(self, armada_obj):
armada_obj.pre_flight_ops() armada_obj.pre_flight_ops()
@ -343,8 +343,8 @@ class ArmadaHandlerTestCase(base.ArmadaTestCase):
armada_obj.post_flight_ops() armada_obj.post_flight_ops()
for group in armada_obj.manifest['data']['chart_groups']: for group in armada_obj.manifest['data']['chart_groups']:
for counter, chart in enumerate( for counter, chart in enumerate(group.get(const.KEYWORD_DATA).get(
group.get(const.KEYWORD_DATA).get(const.KEYWORD_CHARTS)): const.KEYWORD_CHARTS)):
if chart.get( if chart.get(
const.KEYWORD_DATA).get('source').get('type') == 'git': const.KEYWORD_DATA).get('source').get('type') == 'git':
mock_source.source_cleanup.assert_called_with( mock_source.source_cleanup.assert_called_with(
@ -355,20 +355,22 @@ class ArmadaHandlerTestCase(base.ArmadaTestCase):
# run sync tests for unsequenced as well by moving them to separate test # run sync tests for unsequenced as well by moving them to separate test
# class with two separate subclasses which set chart group `sequenced` # class with two separate subclasses which set chart group `sequenced`
# field, one to true, one to false. # field, one to true, one to false.
def _test_sync(self, def _test_sync(
known_releases, self,
test_success=True, known_releases,
test_failure_to_run=False, test_success=True,
expected_last_test_result=None, test_failure_to_run=False,
diff={'some_key': {'some diff'}}): expected_last_test_result=None,
diff={'some_key': {'some diff'}}):
"""Test install functionality from the sync() method.""" """Test install functionality from the sync() method."""
@mock.patch.object(armada.Armada, 'post_flight_ops') @mock.patch.object(armada.Armada, 'post_flight_ops')
@mock.patch.object(armada.Armada, 'pre_flight_ops') @mock.patch.object(armada.Armada, 'pre_flight_ops')
@mock.patch('armada.handlers.chart_deploy.ChartBuilder') @mock.patch('armada.handlers.chart_deploy.ChartBuilder')
@mock.patch('armada.handlers.chart_deploy.Test') @mock.patch('armada.handlers.chart_deploy.Test')
def _do_test(mock_test, mock_chartbuilder, mock_pre_flight, def _do_test(
mock_post_flight): mock_test, mock_chartbuilder, mock_pre_flight,
mock_post_flight):
# Instantiate Armada object. # Instantiate Armada object.
yaml_documents = list(yaml.safe_load_all(TEST_YAML)) yaml_documents = list(yaml.safe_load_all(TEST_YAML))
@ -417,8 +419,8 @@ class ArmadaHandlerTestCase(base.ArmadaTestCase):
release_name = release_prefixer(prefix, release) release_name = release_prefixer(prefix, release)
# Simplified check because the actual code uses logical-or's # Simplified check because the actual code uses logical-or's
# multiple conditions, so this is enough. # multiple conditions, so this is enough.
native_wait_enabled = (chart['wait'].get('native', {}).get( native_wait_enabled = (
'enabled', True)) chart['wait'].get('native', {}).get('enabled', True))
if release_name not in [x.name for x in known_releases]: if release_name not in [x.name for x in known_releases]:
expected_install_release_calls.append( expected_install_release_calls.append(
@ -503,8 +505,8 @@ class ArmadaHandlerTestCase(base.ArmadaTestCase):
any_order = not chart_group['sequenced'] any_order = not chart_group['sequenced']
# Verify that at least 1 release is either installed or updated. # Verify that at least 1 release is either installed or updated.
self.assertTrue( self.assertTrue(
len(expected_install_release_calls) >= 1 or len(expected_install_release_calls) >= 1
len(expected_update_release_calls) >= 1) or len(expected_update_release_calls) >= 1)
# Verify that the expected number of non-deployed releases are # Verify that the expected number of non-deployed releases are
# installed with expected arguments. # installed with expected arguments.
self.assertEqual( self.assertEqual(
@ -549,8 +551,8 @@ class ArmadaHandlerTestCase(base.ArmadaTestCase):
chart = self._get_chart_by_name(name) chart = self._get_chart_by_name(name)
def get_test_result(success): def get_test_result(success):
status = (TESTRUN_STATUS_SUCCESS status = (
if success else TESTRUN_STATUS_FAILURE) TESTRUN_STATUS_SUCCESS if success else TESTRUN_STATUS_FAILURE)
return mock.Mock(status=status) return mock.Mock(status=status)
last_test_suite_run = None last_test_suite_run = None
@ -658,14 +660,12 @@ class ArmadaHandlerTestCase(base.ArmadaTestCase):
self.assertRaises(ChartDeployException, _test_method) self.assertRaises(ChartDeployException, _test_method)
def test_armada_sync_test_failure(self): def test_armada_sync_test_failure(self):
def _test_method(): def _test_method():
self._test_sync([], test_success=False) self._test_sync([], test_success=False)
self.assertRaises(ChartDeployException, _test_method) self.assertRaises(ChartDeployException, _test_method)
def test_armada_sync_test_failure_to_run(self): def test_armada_sync_test_failure_to_run(self):
def _test_method(): def _test_method():
self._test_sync([], test_failure_to_run=True) self._test_sync([], test_failure_to_run=True)
@ -673,15 +673,16 @@ class ArmadaHandlerTestCase(base.ArmadaTestCase):
class ArmadaNegativeHandlerTestCase(base.ArmadaTestCase): class ArmadaNegativeHandlerTestCase(base.ArmadaTestCase):
@mock.patch.object(armada, 'source') @mock.patch.object(armada, 'source')
def test_armada_get_manifest_exception(self, mock_source): def test_armada_get_manifest_exception(self, mock_source):
"""Test armada handling with invalid manifest.""" """Test armada handling with invalid manifest."""
yaml_documents = list(yaml.safe_load_all(TEST_YAML)) yaml_documents = list(yaml.safe_load_all(TEST_YAML))
error_re = ('.*Documents must include at least one of each of .* and ' error_re = (
'only one .*') '.*Documents must include at least one of each of .* and '
self.assertRaisesRegexp(ManifestException, error_re, armada.Armada, 'only one .*')
yaml_documents[:1], mock.MagicMock()) self.assertRaisesRegexp(
ManifestException, error_re, armada.Armada, yaml_documents[:1],
mock.MagicMock())
@mock.patch.object(armada, 'source') @mock.patch.object(armada, 'source')
def test_armada_override_exception(self, mock_source): def test_armada_override_exception(self, mock_source):

View File

@ -15,13 +15,13 @@
import inspect import inspect
import os import os
import shutil import shutil
import yaml
import fixtures import fixtures
from hapi.chart.chart_pb2 import Chart from hapi.chart.chart_pb2 import Chart
from hapi.chart.metadata_pb2 import Metadata from hapi.chart.metadata_pb2 import Metadata
import mock import mock
import testtools import testtools
import yaml
from armada import const from armada import const
from armada.handlers.chartbuilder import ChartBuilder from armada.handlers.chartbuilder import ChartBuilder
@ -137,14 +137,13 @@ class BaseChartBuilderTestCase(testtools.TestCase):
class ChartBuilderTestCase(BaseChartBuilderTestCase): class ChartBuilderTestCase(BaseChartBuilderTestCase):
def test_source_clone(self): def test_source_clone(self):
# Create a temporary directory with Chart.yaml that contains data # Create a temporary directory with Chart.yaml that contains data
# from ``self.chart_yaml``. # from ``self.chart_yaml``.
chart_dir = self.useFixture(fixtures.TempDir()) chart_dir = self.useFixture(fixtures.TempDir())
self.addCleanup(shutil.rmtree, chart_dir.path) self.addCleanup(shutil.rmtree, chart_dir.path)
self._write_temporary_file_contents(chart_dir.path, 'Chart.yaml', self._write_temporary_file_contents(
self.chart_yaml) chart_dir.path, 'Chart.yaml', self.chart_yaml)
chartbuilder = ChartBuilder(self._get_test_chart(chart_dir)) chartbuilder = ChartBuilder(self._get_test_chart(chart_dir))
@ -158,8 +157,9 @@ class ChartBuilderTestCase(BaseChartBuilderTestCase):
chartbuilder = ChartBuilder(self._get_test_chart(chart_dir)) chartbuilder = ChartBuilder(self._get_test_chart(chart_dir))
self.assertRaises(chartbuilder_exceptions.MetadataLoadException, self.assertRaises(
chartbuilder.get_metadata) chartbuilder_exceptions.MetadataLoadException,
chartbuilder.get_metadata)
def test_get_files(self): def test_get_files(self):
"""Validates that ``get_files()`` ignores 'Chart.yaml', 'values.yaml' """Validates that ``get_files()`` ignores 'Chart.yaml', 'values.yaml'
@ -206,8 +206,8 @@ class ChartBuilderTestCase(BaseChartBuilderTestCase):
# that that logic has already been performed. # that that logic has already been performed.
chart_dir = self.useFixture(fixtures.TempDir()) chart_dir = self.useFixture(fixtures.TempDir())
self.addCleanup(shutil.rmtree, chart_dir.path) self.addCleanup(shutil.rmtree, chart_dir.path)
self._write_temporary_file_contents(chart_dir.path, 'Chart.yaml', self._write_temporary_file_contents(
self.chart_yaml) chart_dir.path, 'Chart.yaml', self.chart_yaml)
ch = yaml.safe_load(self.chart_stream) ch = yaml.safe_load(self.chart_stream)
ch['data']['source_dir'] = (chart_dir.path, '') ch['data']['source_dir'] = (chart_dir.path, '')
@ -215,7 +215,8 @@ class ChartBuilderTestCase(BaseChartBuilderTestCase):
chartbuilder = ChartBuilder(test_chart) chartbuilder = ChartBuilder(test_chart)
helm_chart = chartbuilder.get_helm_chart() helm_chart = chartbuilder.get_helm_chart()
expected = inspect.cleandoc(""" expected = inspect.cleandoc(
"""
metadata { metadata {
name: "hello-world-chart" name: "hello-world-chart"
version: "0.1.0" version: "0.1.0"
@ -234,10 +235,10 @@ class ChartBuilderTestCase(BaseChartBuilderTestCase):
chart_dir = self.useFixture(fixtures.TempDir()) chart_dir = self.useFixture(fixtures.TempDir())
self.addCleanup(shutil.rmtree, chart_dir.path) self.addCleanup(shutil.rmtree, chart_dir.path)
self._write_temporary_file_contents(chart_dir.path, 'Chart.yaml', self._write_temporary_file_contents(
self.chart_yaml) chart_dir.path, 'Chart.yaml', self.chart_yaml)
self._write_temporary_file_contents(chart_dir.path, 'values.yaml', self._write_temporary_file_contents(
self.chart_value) chart_dir.path, 'values.yaml', self.chart_value)
ch = yaml.safe_load(self.chart_stream) ch = yaml.safe_load(self.chart_stream)
ch['data']['source_dir'] = (chart_dir.path, '') ch['data']['source_dir'] = (chart_dir.path, '')
@ -257,15 +258,15 @@ class ChartBuilderTestCase(BaseChartBuilderTestCase):
chart_dir = self.useFixture(fixtures.TempDir()) chart_dir = self.useFixture(fixtures.TempDir())
self.addCleanup(shutil.rmtree, chart_dir.path) self.addCleanup(shutil.rmtree, chart_dir.path)
# Chart.yaml is mandatory for `ChartBuilder.get_metadata`. # Chart.yaml is mandatory for `ChartBuilder.get_metadata`.
self._write_temporary_file_contents(chart_dir.path, 'Chart.yaml', self._write_temporary_file_contents(
self.chart_yaml) chart_dir.path, 'Chart.yaml', self.chart_yaml)
self._write_temporary_file_contents(chart_dir.path, 'foo', "foobar") self._write_temporary_file_contents(chart_dir.path, 'foo', "foobar")
self._write_temporary_file_contents(chart_dir.path, 'bar', "bazqux") self._write_temporary_file_contents(chart_dir.path, 'bar', "bazqux")
# Also create a nested directory and verify that files from it are also # Also create a nested directory and verify that files from it are also
# added. # added.
nested_dir = self._make_temporary_subdirectory(chart_dir.path, nested_dir = self._make_temporary_subdirectory(
'nested') chart_dir.path, 'nested')
self._write_temporary_file_contents(nested_dir, 'nested0', "random") self._write_temporary_file_contents(nested_dir, 'nested0', "random")
ch = yaml.safe_load(self.chart_stream) ch = yaml.safe_load(self.chart_stream)
@ -275,10 +276,11 @@ class ChartBuilderTestCase(BaseChartBuilderTestCase):
chartbuilder = ChartBuilder(test_chart) chartbuilder = ChartBuilder(test_chart)
helm_chart = chartbuilder.get_helm_chart() helm_chart = chartbuilder.get_helm_chart()
expected_files = ('[type_url: "%s"\nvalue: "bazqux"\n, ' expected_files = (
'type_url: "%s"\nvalue: "foobar"\n, ' '[type_url: "%s"\nvalue: "bazqux"\n, '
'type_url: "%s"\nvalue: "random"\n]' % 'type_url: "%s"\nvalue: "foobar"\n, '
('./bar', './foo', 'nested/nested0')) 'type_url: "%s"\nvalue: "random"\n]' %
('./bar', './foo', 'nested/nested0'))
self.assertIsInstance(helm_chart, Chart) self.assertIsInstance(helm_chart, Chart)
self.assertTrue(hasattr(helm_chart, 'metadata')) self.assertTrue(hasattr(helm_chart, 'metadata'))
@ -300,8 +302,8 @@ class ChartBuilderTestCase(BaseChartBuilderTestCase):
charts_nested_subdir = self._make_temporary_subdirectory( charts_nested_subdir = self._make_temporary_subdirectory(
charts_subdir, 'extra') charts_subdir, 'extra')
self._write_temporary_file_contents(chart_dir.path, 'Chart.yaml', self._write_temporary_file_contents(
self.chart_yaml) chart_dir.path, 'Chart.yaml', self.chart_yaml)
self._write_temporary_file_contents(chart_dir.path, 'foo', "foobar") self._write_temporary_file_contents(chart_dir.path, 'foo', "foobar")
self._write_temporary_file_contents(chart_dir.path, 'bar', "bazqux") self._write_temporary_file_contents(chart_dir.path, 'bar', "bazqux")
@ -311,16 +313,16 @@ class ChartBuilderTestCase(BaseChartBuilderTestCase):
self._write_temporary_file_contents(chart_dir.path, file, "") self._write_temporary_file_contents(chart_dir.path, file, "")
file_to_ignore = 'file_to_ignore' file_to_ignore = 'file_to_ignore'
# Files to ignore within templates/ subdirectory. # Files to ignore within templates/ subdirectory.
self._write_temporary_file_contents(templates_subdir, file_to_ignore, self._write_temporary_file_contents(
"") templates_subdir, file_to_ignore, "")
# Files to ignore within charts/ subdirectory. # Files to ignore within charts/ subdirectory.
self._write_temporary_file_contents(charts_subdir, file_to_ignore, "") self._write_temporary_file_contents(charts_subdir, file_to_ignore, "")
# Files to ignore within templates/bin subdirectory. # Files to ignore within templates/bin subdirectory.
self._write_temporary_file_contents(templates_nested_subdir, self._write_temporary_file_contents(
file_to_ignore, "") templates_nested_subdir, file_to_ignore, "")
# Files to ignore within charts/extra subdirectory. # Files to ignore within charts/extra subdirectory.
self._write_temporary_file_contents(charts_nested_subdir, self._write_temporary_file_contents(
file_to_ignore, "") charts_nested_subdir, file_to_ignore, "")
# Files to **include** within charts/ subdirectory. # Files to **include** within charts/ subdirectory.
self._write_temporary_file_contents(charts_subdir, '.prov', "xyzzy") self._write_temporary_file_contents(charts_subdir, '.prov', "xyzzy")
@ -331,10 +333,11 @@ class ChartBuilderTestCase(BaseChartBuilderTestCase):
chartbuilder = ChartBuilder(test_chart) chartbuilder = ChartBuilder(test_chart)
helm_chart = chartbuilder.get_helm_chart() helm_chart = chartbuilder.get_helm_chart()
expected_files = ('[type_url: "%s"\nvalue: "bazqux"\n, ' expected_files = (
'type_url: "%s"\nvalue: "foobar"\n, ' '[type_url: "%s"\nvalue: "bazqux"\n, '
'type_url: "%s"\nvalue: "xyzzy"\n]' % 'type_url: "%s"\nvalue: "foobar"\n, '
('./bar', './foo', 'charts/.prov')) 'type_url: "%s"\nvalue: "xyzzy"\n]' %
('./bar', './foo', 'charts/.prov'))
# Validate that only relevant files are included, that the ignored # Validate that only relevant files are included, that the ignored
# files are present. # files are present.
@ -349,16 +352,16 @@ class ChartBuilderTestCase(BaseChartBuilderTestCase):
# Main chart directory and files. # Main chart directory and files.
chart_dir = self.useFixture(fixtures.TempDir()) chart_dir = self.useFixture(fixtures.TempDir())
self.addCleanup(shutil.rmtree, chart_dir.path) self.addCleanup(shutil.rmtree, chart_dir.path)
self._write_temporary_file_contents(chart_dir.path, 'Chart.yaml', self._write_temporary_file_contents(
self.chart_yaml) chart_dir.path, 'Chart.yaml', self.chart_yaml)
ch = yaml.safe_load(self.chart_stream) ch = yaml.safe_load(self.chart_stream)
ch['data']['source_dir'] = (chart_dir.path, '') ch['data']['source_dir'] = (chart_dir.path, '')
# Dependency chart directory and files. # Dependency chart directory and files.
dep_chart_dir = self.useFixture(fixtures.TempDir()) dep_chart_dir = self.useFixture(fixtures.TempDir())
self.addCleanup(shutil.rmtree, dep_chart_dir.path) self.addCleanup(shutil.rmtree, dep_chart_dir.path)
self._write_temporary_file_contents(dep_chart_dir.path, 'Chart.yaml', self._write_temporary_file_contents(
self.dependency_chart_yaml) dep_chart_dir.path, 'Chart.yaml', self.dependency_chart_yaml)
dep_ch = yaml.safe_load(self.dependency_chart_stream) dep_ch = yaml.safe_load(self.dependency_chart_stream)
dep_ch['data']['source_dir'] = (dep_chart_dir.path, '') dep_ch['data']['source_dir'] = (dep_chart_dir.path, '')
@ -369,7 +372,8 @@ class ChartBuilderTestCase(BaseChartBuilderTestCase):
chartbuilder = ChartBuilder(main_chart) chartbuilder = ChartBuilder(main_chart)
helm_chart = chartbuilder.get_helm_chart() helm_chart = chartbuilder.get_helm_chart()
expected_dependency = inspect.cleandoc(""" expected_dependency = inspect.cleandoc(
"""
metadata { metadata {
name: "dependency-chart" name: "dependency-chart"
version: "0.1.0" version: "0.1.0"
@ -379,7 +383,8 @@ class ChartBuilderTestCase(BaseChartBuilderTestCase):
} }
""").strip() """).strip()
expected = inspect.cleandoc(""" expected = inspect.cleandoc(
"""
metadata { metadata {
name: "hello-world-chart" name: "hello-world-chart"
version: "0.1.0" version: "0.1.0"
@ -418,8 +423,8 @@ class ChartBuilderTestCase(BaseChartBuilderTestCase):
# Validate base case. # Validate base case.
chart_dir = self.useFixture(fixtures.TempDir()) chart_dir = self.useFixture(fixtures.TempDir())
self.addCleanup(shutil.rmtree, chart_dir.path) self.addCleanup(shutil.rmtree, chart_dir.path)
self._write_temporary_file_contents(chart_dir.path, 'Chart.yaml', self._write_temporary_file_contents(
self.chart_yaml) chart_dir.path, 'Chart.yaml', self.chart_yaml)
ch = yaml.safe_load(self.chart_stream) ch = yaml.safe_load(self.chart_stream)
ch['data']['source_dir'] = (chart_dir.path, '') ch['data']['source_dir'] = (chart_dir.path, '')
@ -432,8 +437,8 @@ class ChartBuilderTestCase(BaseChartBuilderTestCase):
# Validate recursive case (with dependencies). # Validate recursive case (with dependencies).
dep_chart_dir = self.useFixture(fixtures.TempDir()) dep_chart_dir = self.useFixture(fixtures.TempDir())
self.addCleanup(shutil.rmtree, dep_chart_dir.path) self.addCleanup(shutil.rmtree, dep_chart_dir.path)
self._write_temporary_file_contents(dep_chart_dir.path, 'Chart.yaml', self._write_temporary_file_contents(
self.dependency_chart_yaml) dep_chart_dir.path, 'Chart.yaml', self.dependency_chart_yaml)
dep_ch = yaml.safe_load(self.dependency_chart_stream) dep_ch = yaml.safe_load(self.dependency_chart_stream)
dep_ch['data']['source_dir'] = (dep_chart_dir.path, '') dep_ch['data']['source_dir'] = (dep_chart_dir.path, '')
@ -441,7 +446,8 @@ class ChartBuilderTestCase(BaseChartBuilderTestCase):
test_chart['data']['dependencies'] = [dependency_chart] test_chart['data']['dependencies'] = [dependency_chart]
chartbuilder = ChartBuilder(test_chart) chartbuilder = ChartBuilder(test_chart)
re = inspect.cleandoc(""" re = inspect.cleandoc(
"""
hello-world-chart.*A sample Helm chart for Kubernetes.* hello-world-chart.*A sample Helm chart for Kubernetes.*
dependency-chart.*Another sample Helm chart for Kubernetes.* dependency-chart.*Another sample Helm chart for Kubernetes.*
""").replace('\n', '').strip() """).replace('\n', '').strip()
@ -449,7 +455,6 @@ class ChartBuilderTestCase(BaseChartBuilderTestCase):
class ChartBuilderNegativeTestCase(BaseChartBuilderTestCase): class ChartBuilderNegativeTestCase(BaseChartBuilderTestCase):
def setUp(self): def setUp(self):
super(ChartBuilderNegativeTestCase, self).setUp() super(ChartBuilderNegativeTestCase, self).setUp()
# Create an exception for testing since instantiating one manually # Create an exception for testing since instantiating one manually
@ -471,13 +476,15 @@ class ChartBuilderNegativeTestCase(BaseChartBuilderTestCase):
chartbuilder = ChartBuilder(self._get_test_chart(chart_dir)) chartbuilder = ChartBuilder(self._get_test_chart(chart_dir))
# Confirm it failed for both encodings. # Confirm it failed for both encodings.
error_re = (r'.*A str exception occurred while trying to read file:' error_re = (
r'.*Details:\n.*\(encoding=utf-8\).*\n\(encoding=latin1\)') r'.*A str exception occurred while trying to read file:'
r'.*Details:\n.*\(encoding=utf-8\).*\n\(encoding=latin1\)')
with mock.patch("builtins.open", mock.mock_open(read_data="")) \ with mock.patch("builtins.open", mock.mock_open(read_data="")) \
as mock_file: as mock_file:
mock_file.return_value.read.side_effect = self.exc_to_raise mock_file.return_value.read.side_effect = self.exc_to_raise
self.assertRaisesRegexp(chartbuilder_exceptions.FilesLoadException, self.assertRaisesRegexp(
error_re, chartbuilder.get_files) chartbuilder_exceptions.FilesLoadException, error_re,
chartbuilder.get_files)
def test_get_files_fails_once_to_read_binary_file_passes(self): def test_get_files_fails_once_to_read_binary_file_passes(self):
chart_dir = self.useFixture(fixtures.TempDir()) chart_dir = self.useFixture(fixtures.TempDir())

View File

@ -11,20 +11,20 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import copy import copy
from datetime import datetime from datetime import datetime
from kubernetes.client.rest import ApiException
import mock import mock
import testtools import testtools
from kubernetes.client.rest import ApiException
from armada.handlers import lock from armada.handlers import lock
@mock.patch('armada.handlers.lock.K8s') @mock.patch('armada.handlers.lock.K8s')
@mock.patch.object(lock.time, 'sleep', lambda x: True) @mock.patch.object(lock.time, 'sleep', lambda x: True)
class LockTestCase(testtools.TestCase): class LockTestCase(testtools.TestCase):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super(LockTestCase, self).__init__(*args, **kwargs) super(LockTestCase, self).__init__(*args, **kwargs)
self.resp = None self.resp = None

View File

@ -14,9 +14,9 @@
import copy import copy
import os import os
import yaml
import testtools import testtools
import yaml
from armada import exceptions from armada import exceptions
from armada.handlers import manifest from armada.handlers import manifest
@ -25,11 +25,10 @@ from armada.utils import validate
class ManifestTestCase(testtools.TestCase): class ManifestTestCase(testtools.TestCase):
def setUp(self): def setUp(self):
super(ManifestTestCase, self).setUp() super(ManifestTestCase, self).setUp()
examples_dir = os.path.join(os.getcwd(), 'armada', 'tests', 'unit', examples_dir = os.path.join(
'resources') os.getcwd(), 'armada', 'tests', 'unit', 'resources')
with open(os.path.join(examples_dir, 'keystone-manifest.yaml')) as f: with open(os.path.join(examples_dir, 'keystone-manifest.yaml')) as f:
self.documents = list(yaml.safe_load_all(f.read())) self.documents = list(yaml.safe_load_all(f.read()))
@ -43,10 +42,10 @@ class ManifestTestCase(testtools.TestCase):
self.assertEqual(5, len(armada_manifest.charts)) self.assertEqual(5, len(armada_manifest.charts))
self.assertEqual(3, len(armada_manifest.groups)) self.assertEqual(3, len(armada_manifest.groups))
self.assertEqual([self.documents[x] for x in range(5)], self.assertEqual(
armada_manifest.charts) [self.documents[x] for x in range(5)], armada_manifest.charts)
self.assertEqual([self.documents[x] for x in range(5, 8)], self.assertEqual(
armada_manifest.groups) [self.documents[x] for x in range(5, 8)], armada_manifest.groups)
self.assertEqual(self.documents[-1], armada_manifest.manifest) self.assertEqual(self.documents[-1], armada_manifest.manifest)
def test_get_documents_with_target_manifest(self): def test_get_documents_with_target_manifest(self):
@ -62,13 +61,13 @@ class ManifestTestCase(testtools.TestCase):
self.assertEqual(5, len(armada_manifest.charts)) self.assertEqual(5, len(armada_manifest.charts))
self.assertEqual(3, len(armada_manifest.groups)) self.assertEqual(3, len(armada_manifest.groups))
self.assertEqual([self.documents[x] for x in range(5)], self.assertEqual(
armada_manifest.charts) [self.documents[x] for x in range(5)], armada_manifest.charts)
self.assertEqual([self.documents[x] for x in range(5, 8)], self.assertEqual(
armada_manifest.groups) [self.documents[x] for x in range(5, 8)], armada_manifest.groups)
self.assertEqual(self.documents[-1], armada_manifest.manifest) self.assertEqual(self.documents[-1], armada_manifest.manifest)
self.assertEqual('armada-manifest', self.assertEqual(
self.documents[-1]['metadata']['name']) 'armada-manifest', self.documents[-1]['metadata']['name'])
def test_get_documents_with_multi_manifest_and_target_manifest(self): def test_get_documents_with_multi_manifest_and_target_manifest(self):
# Validate that specifying `target_manifest` flag returns the correct # Validate that specifying `target_manifest` flag returns the correct
@ -90,29 +89,30 @@ class ManifestTestCase(testtools.TestCase):
self.assertEqual(5, len(armada_manifest.charts)) self.assertEqual(5, len(armada_manifest.charts))
self.assertEqual(3, len(armada_manifest.groups)) self.assertEqual(3, len(armada_manifest.groups))
self.assertEqual([self.documents[x] for x in range(5)], self.assertEqual(
armada_manifest.charts) [self.documents[x] for x in range(5)], armada_manifest.charts)
self.assertEqual([self.documents[x] for x in range(5, 8)], self.assertEqual(
armada_manifest.groups) [self.documents[x] for x in range(5, 8)], armada_manifest.groups)
self.assertEqual(armada_manifest.manifest, self.documents[-1]) self.assertEqual(armada_manifest.manifest, self.documents[-1])
self.assertEqual('armada-manifest', self.assertEqual(
armada_manifest.manifest['metadata']['name']) 'armada-manifest', armada_manifest.manifest['metadata']['name'])
# Specify the alternative manifest and verify it works. # Specify the alternative manifest and verify it works.
armada_manifest = manifest.Manifest( armada_manifest = manifest.Manifest(
documents, target_manifest='alt-armada-manifest') documents, target_manifest='alt-armada-manifest')
self.assertIsNotNone(armada_manifest.manifest) self.assertIsNotNone(armada_manifest.manifest)
self.assertEqual(other_manifest, armada_manifest.manifest) self.assertEqual(other_manifest, armada_manifest.manifest)
self.assertEqual('alt-armada-manifest', self.assertEqual(
armada_manifest.manifest['metadata']['name']) 'alt-armada-manifest',
armada_manifest.manifest['metadata']['name'])
def test_get_manifest(self): def test_get_manifest(self):
armada_manifest = manifest.Manifest( armada_manifest = manifest.Manifest(
self.documents, target_manifest='armada-manifest') self.documents, target_manifest='armada-manifest')
obtained_manifest = armada_manifest.get_manifest() obtained_manifest = armada_manifest.get_manifest()
self.assertIsInstance(obtained_manifest, dict) self.assertIsInstance(obtained_manifest, dict)
self.assertEqual(obtained_manifest['data'], self.assertEqual(
armada_manifest.manifest['data']) obtained_manifest['data'], armada_manifest.manifest['data'])
def test_find_documents(self): def test_find_documents(self):
armada_manifest = manifest.Manifest(self.documents) armada_manifest = manifest.Manifest(self.documents)
@ -195,15 +195,17 @@ class ManifestTestCase(testtools.TestCase):
keystone_infra_services_chart_group = armada_manifest. \ keystone_infra_services_chart_group = armada_manifest. \
find_chart_group_document('keystone-infra-services') find_chart_group_document('keystone-infra-services')
self.assertEqual(keystone_infra_services_chart_group, self.assertEqual(
built_armada_manifest['data']['chart_groups'][0]) keystone_infra_services_chart_group,
built_armada_manifest['data']['chart_groups'][0])
# the first chart group in the Armada manifest # the first chart group in the Armada manifest
openstack_keystone_chart_group = armada_manifest. \ openstack_keystone_chart_group = armada_manifest. \
find_chart_group_document('openstack-keystone') find_chart_group_document('openstack-keystone')
self.assertEqual(openstack_keystone_chart_group, self.assertEqual(
built_armada_manifest['data']['chart_groups'][1]) openstack_keystone_chart_group,
built_armada_manifest['data']['chart_groups'][1])
def test_verify_build_chart_group_deps(self): def test_verify_build_chart_group_deps(self):
armada_manifest = manifest.Manifest(self.documents) armada_manifest = manifest.Manifest(self.documents)
@ -223,8 +225,9 @@ class ManifestTestCase(testtools.TestCase):
keystone_dependencies = keystone_chart_with_deps['data'][ keystone_dependencies = keystone_chart_with_deps['data'][
'dependencies'] 'dependencies']
self.assertEqual(openstack_keystone_chart_group_deps_dep_added[0], self.assertEqual(
keystone_dependencies[0]) openstack_keystone_chart_group_deps_dep_added[0],
keystone_dependencies[0])
# building the deps for openstack-keystone chart group # building the deps for openstack-keystone chart group
chart_group = armada_manifest.find_chart_group_document( chart_group = armada_manifest.find_chart_group_document(
@ -248,10 +251,10 @@ class ManifestTestCase(testtools.TestCase):
memcached_dependencies = memcached_chart_with_deps['data'][ memcached_dependencies = memcached_chart_with_deps['data'][
'dependencies'] 'dependencies']
self.assertEqual(keystone_infra_services_dep_added[0], self.assertEqual(
mariadb_dependencies[0]) keystone_infra_services_dep_added[0], mariadb_dependencies[0])
self.assertEqual(keystone_infra_services_dep_added[0], self.assertEqual(
memcached_dependencies[0]) keystone_infra_services_dep_added[0], memcached_dependencies[0])
def test_verify_build_chart_deps(self): def test_verify_build_chart_deps(self):
armada_manifest = manifest.Manifest(self.documents) armada_manifest = manifest.Manifest(self.documents)
@ -265,8 +268,8 @@ class ManifestTestCase(testtools.TestCase):
# since not dependent on other charts, the original and modified # since not dependent on other charts, the original and modified
# dependencies are the same # dependencies are the same
self.assertEqual(helm_toolkit_original_dependency, self.assertEqual(
helm_toolkit_chart_with_deps) helm_toolkit_original_dependency, helm_toolkit_chart_with_deps)
# helm-toolkit dependency, the basis for comparison of d # helm-toolkit dependency, the basis for comparison of d
# ependencies in other charts # ependencies in other charts
@ -287,8 +290,8 @@ class ManifestTestCase(testtools.TestCase):
self.assertIsInstance(keystone_dependencies, list) self.assertIsInstance(keystone_dependencies, list)
self.assertEqual(1, len(keystone_dependencies)) self.assertEqual(1, len(keystone_dependencies))
self.assertEqual(expected_helm_toolkit_dependency, self.assertEqual(
keystone_dependencies[0]) expected_helm_toolkit_dependency, keystone_dependencies[0])
# mariadb chart dependencies # mariadb chart dependencies
mariadb_chart = armada_manifest.find_chart_document('mariadb') mariadb_chart = armada_manifest.find_chart_document('mariadb')
@ -304,8 +307,8 @@ class ManifestTestCase(testtools.TestCase):
self.assertIsInstance(mariadb_dependencies, list) self.assertIsInstance(mariadb_dependencies, list)
self.assertEqual(1, len(mariadb_dependencies)) self.assertEqual(1, len(mariadb_dependencies))
self.assertEqual(expected_helm_toolkit_dependency, self.assertEqual(
mariadb_dependencies[0]) expected_helm_toolkit_dependency, mariadb_dependencies[0])
# memcached chart dependencies # memcached chart dependencies
memcached_chart = armada_manifest.find_chart_document('memcached') memcached_chart = armada_manifest.find_chart_document('memcached')
@ -313,8 +316,8 @@ class ManifestTestCase(testtools.TestCase):
memcached_chart_with_deps = armada_manifest.build_chart_deps( memcached_chart_with_deps = armada_manifest.build_chart_deps(
memcached_chart) memcached_chart)
self.assertNotEqual(original_memcached_chart, self.assertNotEqual(
memcached_chart_with_deps) original_memcached_chart, memcached_chart_with_deps)
self.assertIn('data', memcached_chart_with_deps) self.assertIn('data', memcached_chart_with_deps)
self.assertIn('dependencies', memcached_chart_with_deps['data']) self.assertIn('dependencies', memcached_chart_with_deps['data'])
@ -323,16 +326,15 @@ class ManifestTestCase(testtools.TestCase):
self.assertIsInstance(memcached_dependencies, list) self.assertIsInstance(memcached_dependencies, list)
self.assertEqual(1, len(memcached_dependencies)) self.assertEqual(1, len(memcached_dependencies))
self.assertEqual(expected_helm_toolkit_dependency, self.assertEqual(
memcached_dependencies[0]) expected_helm_toolkit_dependency, memcached_dependencies[0])
class ManifestNegativeTestCase(testtools.TestCase): class ManifestNegativeTestCase(testtools.TestCase):
def setUp(self): def setUp(self):
super(ManifestNegativeTestCase, self).setUp() super(ManifestNegativeTestCase, self).setUp()
examples_dir = os.path.join(os.getcwd(), 'armada', 'tests', 'unit', examples_dir = os.path.join(
'resources') os.getcwd(), 'armada', 'tests', 'unit', 'resources')
with open(os.path.join(examples_dir, 'keystone-manifest.yaml')) as f: with open(os.path.join(examples_dir, 'keystone-manifest.yaml')) as f:
self.documents = list(yaml.safe_load_all(f.read())) self.documents = list(yaml.safe_load_all(f.read()))
@ -343,8 +345,9 @@ class ManifestNegativeTestCase(testtools.TestCase):
documents.append(documents[-1]) # Copy the last manifest. documents.append(documents[-1]) # Copy the last manifest.
error_re = r'Multiple manifests are not supported.*' error_re = r'Multiple manifests are not supported.*'
self.assertRaisesRegexp(exceptions.ManifestException, error_re, self.assertRaisesRegexp(
manifest.Manifest, documents) exceptions.ManifestException, error_re, manifest.Manifest,
documents)
def test_get_documents_multi_target_manifests_raises_value_error(self): def test_get_documents_multi_target_manifests_raises_value_error(self):
# Validates that finding multiple manifests with `target_manifest` # Validates that finding multiple manifests with `target_manifest`
@ -361,10 +364,12 @@ class ManifestNegativeTestCase(testtools.TestCase):
target_manifest='armada-manifest') target_manifest='armada-manifest')
def _assert_missing_documents_raises(self, documents): def _assert_missing_documents_raises(self, documents):
error_re = ('.*Documents must include at least one of each of .* and ' error_re = (
'only one .*') '.*Documents must include at least one of each of .* and '
self.assertRaisesRegexp(exceptions.ManifestException, error_re, 'only one .*')
manifest.Manifest, documents) self.assertRaisesRegexp(
exceptions.ManifestException, error_re, manifest.Manifest,
documents)
def test_get_documents_missing_manifest(self): def test_get_documents_missing_manifest(self):
# Validates exceptions.ManifestException is thrown if no manifest is # Validates exceptions.ManifestException is thrown if no manifest is
@ -384,18 +389,19 @@ class ManifestNegativeTestCase(testtools.TestCase):
def test_find_chart_document_negative(self): def test_find_chart_document_negative(self):
armada_manifest = manifest.Manifest(self.documents) armada_manifest = manifest.Manifest(self.documents)
error_re = r'.*Could not find %s named "%s"' % (schema.TYPE_CHART, error_re = r'.*Could not find %s named "%s"' % (
'invalid') schema.TYPE_CHART, 'invalid')
self.assertRaisesRegexp(exceptions.BuildChartException, error_re, self.assertRaisesRegexp(
armada_manifest.find_chart_document, 'invalid') exceptions.BuildChartException, error_re,
armada_manifest.find_chart_document, 'invalid')
def test_find_group_document_negative(self): def test_find_group_document_negative(self):
armada_manifest = manifest.Manifest(self.documents) armada_manifest = manifest.Manifest(self.documents)
error_re = r'.*Could not find %s named "%s"' % (schema.TYPE_CHARTGROUP, error_re = r'.*Could not find %s named "%s"' % (
'invalid') schema.TYPE_CHARTGROUP, 'invalid')
self.assertRaisesRegexp(exceptions.BuildChartGroupException, error_re, self.assertRaisesRegexp(
armada_manifest.find_chart_group_document, exceptions.BuildChartGroupException, error_re,
'invalid') armada_manifest.find_chart_group_document, 'invalid')
def test_build_chart_deps_with_missing_dependency_fails(self): def test_build_chart_deps_with_missing_dependency_fails(self):
"""Validate that attempting to build a chart that points to """Validate that attempting to build a chart that points to

View File

@ -15,9 +15,9 @@
import copy import copy
import json import json
import os import os
import yaml
import testtools import testtools
import yaml
from armada.handlers.override import Override from armada.handlers.override import Override
from armada.handlers import schema from armada.handlers import schema
@ -25,7 +25,6 @@ from armada.exceptions import override_exceptions
class OverrideTestCase(testtools.TestCase): class OverrideTestCase(testtools.TestCase):
def setUp(self): def setUp(self):
super(OverrideTestCase, self).setUp() super(OverrideTestCase, self).setUp()
self.basepath = os.path.join(os.path.dirname(__file__)) self.basepath = os.path.join(os.path.dirname(__file__))
@ -80,8 +79,9 @@ class OverrideTestCase(testtools.TestCase):
][0] ][0]
self.assertEqual('overridden', target_doc['data']['release_prefix']) self.assertEqual('overridden', target_doc['data']['release_prefix'])
override = ('manifest:simple-armada:chart_groups=' override = (
'blog-group3,blog-group4', ) 'manifest:simple-armada:chart_groups='
'blog-group3,blog-group4', )
# Case 2: Checking if list gets updated. # Case 2: Checking if list gets updated.
ovr = Override(original_documents, override, [values_yaml]) ovr = Override(original_documents, override, [values_yaml])
@ -93,8 +93,9 @@ class OverrideTestCase(testtools.TestCase):
with open(comparison_yaml) as c: with open(comparison_yaml) as c:
comparison_documents = list(yaml.safe_load_all(c.read())) comparison_documents = list(yaml.safe_load_all(c.read()))
# verifying that the override is correct # verifying that the override is correct
self.assertEqual(original_documents[2]['data']['chart_groups'], self.assertEqual(
comparison_documents[0]['data']['chart_groups']) original_documents[2]['data']['chart_groups'],
comparison_documents[0]['data']['chart_groups'])
def test_update_manifests_invalid_override_format(self): def test_update_manifests_invalid_override_format(self):
with open(self.base_manifest) as f: with open(self.base_manifest) as f:
@ -141,8 +142,9 @@ class OverrideTestCase(testtools.TestCase):
ovr.update_document(documents_modified[0]) ovr.update_document(documents_modified[0])
# after the update, both documents are equal # after the update, both documents are equal
self.assertEqual(ovr.documents[0]['data']['chart_name'], self.assertEqual(
documents_modified[0]['data']['chart_name']) ovr.documents[0]['data']['chart_name'],
documents_modified[0]['data']['chart_name'])
self.assertEqual(ovr.documents[0], documents_modified[0]) self.assertEqual(ovr.documents[0], documents_modified[0])
# Case 2: Checking if dictionaries get updated # Case 2: Checking if dictionaries get updated
@ -151,8 +153,9 @@ class OverrideTestCase(testtools.TestCase):
ovr.update_document(documents_modified[0]) ovr.update_document(documents_modified[0])
# after the update, both documents are equal # after the update, both documents are equal
self.assertEqual(ovr.documents[0]['data']['values'], self.assertEqual(
documents_modified[0]['data']['values']) ovr.documents[0]['data']['values'],
documents_modified[0]['data']['values'])
self.assertEqual(ovr.documents[0], documents_modified[0]) self.assertEqual(ovr.documents[0], documents_modified[0])
# Case 3: Checking if lists get updated # Case 3: Checking if lists get updated
@ -161,10 +164,11 @@ class OverrideTestCase(testtools.TestCase):
ovr.update_document(documents_modified[0]) ovr.update_document(documents_modified[0])
# after the update, both documents are equal # after the update, both documents are equal
self.assertEqual(['foo', 'bar'], self.assertEqual(
ovr.documents[0]['data']['dependencies']) ['foo', 'bar'], ovr.documents[0]['data']['dependencies'])
self.assertEqual(documents_modified[0]['data']['dependencies'], self.assertEqual(
ovr.documents[0]['data']['dependencies']) documents_modified[0]['data']['dependencies'],
ovr.documents[0]['data']['dependencies'])
self.assertEqual(ovr.documents[0], documents_modified[0]) self.assertEqual(ovr.documents[0], documents_modified[0])
def test_update_chart_document_keys_not_removed_with_override(self): def test_update_chart_document_keys_not_removed_with_override(self):
@ -198,8 +202,9 @@ class OverrideTestCase(testtools.TestCase):
ovr.update_document(documents_modified[1]) ovr.update_document(documents_modified[1])
# after the update, both documents are equal # after the update, both documents are equal
self.assertEqual(ovr.documents[1]['data']['sequenced'], self.assertEqual(
documents_modified[1]['data']['sequenced']) ovr.documents[1]['data']['sequenced'],
documents_modified[1]['data']['sequenced'])
self.assertEqual(ovr.documents[1], documents_modified[1]) self.assertEqual(ovr.documents[1], documents_modified[1])
def test_update_chart_group_document_keys_not_removed_with_override(self): def test_update_chart_group_document_keys_not_removed_with_override(self):
@ -233,8 +238,9 @@ class OverrideTestCase(testtools.TestCase):
ovr.update_document(documents_modified[2]) ovr.update_document(documents_modified[2])
# after the update, both documents are equal # after the update, both documents are equal
self.assertEqual(ovr.documents[2]['data']['release_prefix'], self.assertEqual(
documents_modified[2]['data']['release_prefix']) ovr.documents[2]['data']['release_prefix'],
documents_modified[2]['data']['release_prefix'])
self.assertEqual(ovr.documents[2], documents_modified[2]) self.assertEqual(ovr.documents[2], documents_modified[2])
def test_update_armada_manifest_keys_not_removed_with_override(self): def test_update_armada_manifest_keys_not_removed_with_override(self):
@ -278,7 +284,8 @@ class OverrideTestCase(testtools.TestCase):
with open(self.base_manifest) as f, open(expected) as e: with open(self.base_manifest) as f, open(expected) as e:
documents = list(yaml.safe_load_all(f.read())) documents = list(yaml.safe_load_all(f.read()))
doc_path = ['manifest', 'simple-armada'] doc_path = ['manifest', 'simple-armada']
override = ('manifest:simple-armada:chart_groups=\ override = (
'manifest:simple-armada:chart_groups=\
blog-group3,blog-group4', ) blog-group3,blog-group4', )
ovr = Override(documents, override) ovr = Override(documents, override)
ovr.update_manifests() ovr.update_manifests()
@ -312,7 +319,6 @@ class OverrideTestCase(testtools.TestCase):
class OverrideNegativeTestCase(testtools.TestCase): class OverrideNegativeTestCase(testtools.TestCase):
def setUp(self): def setUp(self):
super(OverrideNegativeTestCase, self).setUp() super(OverrideNegativeTestCase, self).setUp()
self.basepath = os.path.join(os.path.dirname(__file__)) self.basepath = os.path.join(os.path.dirname(__file__))
@ -342,8 +348,9 @@ class OverrideNegativeTestCase(testtools.TestCase):
override = ('manifest:simple-armada:name=' 'overridden', ) override = ('manifest:simple-armada:name=' 'overridden', )
ovr = Override(original_documents, override) ovr = Override(original_documents, override)
self.assertRaises(override_exceptions.InvalidOverrideValueException, self.assertRaises(
ovr.update_manifests) override_exceptions.InvalidOverrideValueException,
ovr.update_manifests)
def test_load_yaml_file_invalid(self): def test_load_yaml_file_invalid(self):
missing_yaml = "{}/templates/non_existing_yaml.yaml". \ missing_yaml = "{}/templates/non_existing_yaml.yaml". \
@ -351,15 +358,16 @@ class OverrideNegativeTestCase(testtools.TestCase):
with open(self.base_manifest) as f: with open(self.base_manifest) as f:
documents = list(yaml.safe_load_all(f.read())) documents = list(yaml.safe_load_all(f.read()))
ovr = Override(documents) ovr = Override(documents)
self.assertRaises(override_exceptions.InvalidOverrideFileException, self.assertRaises(
ovr._load_yaml_file, missing_yaml) override_exceptions.InvalidOverrideFileException,
ovr._load_yaml_file, missing_yaml)
def test_find_document_type_invalid(self): def test_find_document_type_invalid(self):
with open(self.base_manifest) as f: with open(self.base_manifest) as f:
documents = list(yaml.safe_load_all(f.read())) documents = list(yaml.safe_load_all(f.read()))
ovr = Override(documents) ovr = Override(documents)
self.assertRaises(ValueError, ovr.find_document_type, self.assertRaises(
'non_existing_document') ValueError, ovr.find_document_type, 'non_existing_document')
def test_convert_array_to_dict_invalid(self): def test_convert_array_to_dict_invalid(self):
data_path = ['a', 'b', 'c'] data_path = ['a', 'b', 'c']

View File

@ -12,20 +12,19 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from armada.handlers.release_diff import ReleaseDiff
from armada.tests.unit import base
from google.protobuf.any_pb2 import Any from google.protobuf.any_pb2 import Any
from hapi.chart.chart_pb2 import Chart from hapi.chart.chart_pb2 import Chart
from hapi.chart.config_pb2 import Config from hapi.chart.config_pb2 import Config
from hapi.chart.metadata_pb2 import Metadata from hapi.chart.metadata_pb2 import Metadata
from hapi.chart.template_pb2 import Template from hapi.chart.template_pb2 import Template
from armada.handlers.release_diff import ReleaseDiff
from armada.tests.unit import base
# Tests for diffs which can occur in both top-level or dependency charts, # Tests for diffs which can occur in both top-level or dependency charts,
# and thus are inherited by both of those test classes. # and thus are inherited by both of those test classes.
class _BaseReleaseDiffTestCase(): class _BaseReleaseDiffTestCase():
def setUp(self): def setUp(self):
super(base.ArmadaTestCase, self).setUp() super(base.ArmadaTestCase, self).setUp()
self.old_chart = self.make_chart() self.old_chart = self.make_chart()
@ -67,8 +66,9 @@ class _BaseReleaseDiffTestCase():
new_chart = self.make_chart() new_chart = self.make_chart()
chart_to_update = self.get_chart_to_update(new_chart) chart_to_update = self.get_chart_to_update(new_chart)
update_chart(chart_to_update) update_chart(chart_to_update)
diff = ReleaseDiff(self.old_chart, self.old_values, new_chart, diff = ReleaseDiff(
self.old_values).get_diff() self.old_chart, self.old_values, new_chart,
self.old_values).get_diff()
self.assertTrue(diff) self.assertTrue(diff)
def get_chart_to_update(self, chart): def get_chart_to_update(self, chart):
@ -78,89 +78,82 @@ class _BaseReleaseDiffTestCase():
new_chart = self.make_chart() new_chart = self.make_chart()
chart_to_update = self.get_chart_to_update(new_chart) chart_to_update = self.get_chart_to_update(new_chart)
chart_to_update.metadata.description = 'new chart description' chart_to_update.metadata.description = 'new chart description'
diff = ReleaseDiff(self.old_chart, self.old_values, new_chart, diff = ReleaseDiff(
self.old_values).get_diff() self.old_chart, self.old_values, new_chart,
self.old_values).get_diff()
self.assertFalse(diff) self.assertFalse(diff)
def test_metadata_name_diff(self): def test_metadata_name_diff(self):
def update_chart(chart): def update_chart(chart):
chart.metadata.name = 'new_chart_name' chart.metadata.name = 'new_chart_name'
self._test_chart_diff(update_chart) self._test_chart_diff(update_chart)
def test_default_values_diff(self): def test_default_values_diff(self):
def update_chart(chart): def update_chart(chart):
chart.values.raw = '{param: d2}' chart.values.raw = '{param: d2}'
self._test_chart_diff(update_chart) self._test_chart_diff(update_chart)
def test_template_name_diff(self): def test_template_name_diff(self):
def update_chart(chart): def update_chart(chart):
chart.templates[0].name = 'new_template_name' chart.templates[0].name = 'new_template_name'
self._test_chart_diff(update_chart) self._test_chart_diff(update_chart)
def test_template_data_diff(self): def test_template_data_diff(self):
def update_chart(chart): def update_chart(chart):
chart.templates[0].data = 'new template content'.encode() chart.templates[0].data = 'new template content'.encode()
self._test_chart_diff(update_chart) self._test_chart_diff(update_chart)
def test_add_template_diff(self): def test_add_template_diff(self):
def update_chart(chart): def update_chart(chart):
chart.templates.extend([ chart.templates.extend(
Template( [
name='new_template_name', Template(
data='new template content'.encode()) name='new_template_name',
]) data='new template content'.encode())
])
self._test_chart_diff(update_chart) self._test_chart_diff(update_chart)
def test_remove_template_diff(self): def test_remove_template_diff(self):
def update_chart(chart): def update_chart(chart):
del chart.templates[0] del chart.templates[0]
self._test_chart_diff(update_chart) self._test_chart_diff(update_chart)
def test_file_type_url_diff(self): def test_file_type_url_diff(self):
def update_chart(chart): def update_chart(chart):
chart.files[0].type_url = './new_file_name.ext' chart.files[0].type_url = './new_file_name.ext'
self._test_chart_diff(update_chart) self._test_chart_diff(update_chart)
def test_file_value_diff(self): def test_file_value_diff(self):
def update_chart(chart): def update_chart(chart):
chart.files[0].value = 'new file content'.encode() chart.files[0].value = 'new file content'.encode()
self._test_chart_diff(update_chart) self._test_chart_diff(update_chart)
def test_add_file_diff(self): def test_add_file_diff(self):
def update_chart(chart): def update_chart(chart):
chart.files.extend([ chart.files.extend(
Any(type_url='./new_file_name.ext', [
value='new file content'.encode()) Any(
]) type_url='./new_file_name.ext',
value='new file content'.encode())
])
self._test_chart_diff(update_chart) self._test_chart_diff(update_chart)
def test_remove_file_diff(self): def test_remove_file_diff(self):
def update_chart(chart): def update_chart(chart):
del chart.files[0] del chart.files[0]
self._test_chart_diff(update_chart) self._test_chart_diff(update_chart)
def test_add_dependency_diff(self): def test_add_dependency_diff(self):
def update_chart(chart): def update_chart(chart):
dep = self._make_chart() dep = self._make_chart()
dep.metadata.name = 'dep2' dep.metadata.name = 'dep2'
@ -169,7 +162,6 @@ class _BaseReleaseDiffTestCase():
self._test_chart_diff(update_chart) self._test_chart_diff(update_chart)
def test_remove_dependency_diff(self): def test_remove_dependency_diff(self):
def update_chart(chart): def update_chart(chart):
del chart.dependencies[0] del chart.dependencies[0]
@ -178,26 +170,26 @@ class _BaseReleaseDiffTestCase():
# Test diffs (or absence of) in top-level chart / values. # Test diffs (or absence of) in top-level chart / values.
class ReleaseDiffTestCase(_BaseReleaseDiffTestCase, base.ArmadaTestCase): class ReleaseDiffTestCase(_BaseReleaseDiffTestCase, base.ArmadaTestCase):
def get_chart_to_update(self, chart): def get_chart_to_update(self, chart):
return chart return chart
def test_same_input_no_diff(self): def test_same_input_no_diff(self):
diff = ReleaseDiff(self.old_chart, self.old_values, self.make_chart(), diff = ReleaseDiff(
self.make_values()).get_diff() self.old_chart, self.old_values, self.make_chart(),
self.make_values()).get_diff()
self.assertFalse(diff) self.assertFalse(diff)
def test_override_values_diff(self): def test_override_values_diff(self):
new_values = {'param': 'o2'} new_values = {'param': 'o2'}
diff = ReleaseDiff(self.old_chart, self.old_values, self.old_chart, diff = ReleaseDiff(
new_values).get_diff() self.old_chart, self.old_values, self.old_chart,
new_values).get_diff()
self.assertTrue(diff) self.assertTrue(diff)
# Test diffs in dependencies. # Test diffs in dependencies.
class DependencyReleaseDiffTestCase(_BaseReleaseDiffTestCase, class DependencyReleaseDiffTestCase(_BaseReleaseDiffTestCase,
base.ArmadaTestCase): base.ArmadaTestCase):
def get_chart_to_update(self, chart): def get_chart_to_update(self, chart):
return chart.dependencies[0] return chart.dependencies[0]
@ -205,6 +197,5 @@ class DependencyReleaseDiffTestCase(_BaseReleaseDiffTestCase,
# Test diffs in transitive dependencies. # Test diffs in transitive dependencies.
class TransitiveDependencyReleaseDiffTestCase(_BaseReleaseDiffTestCase, class TransitiveDependencyReleaseDiffTestCase(_BaseReleaseDiffTestCase,
base.ArmadaTestCase): base.ArmadaTestCase):
def get_chart_to_update(self, chart): def get_chart_to_update(self, chart):
return chart.dependencies[0].dependencies[0] return chart.dependencies[0].dependencies[0]

View File

@ -15,7 +15,6 @@
import mock import mock
from armada import const from armada import const
from armada.handlers import test from armada.handlers import test
from armada.handlers import tiller from armada.handlers import tiller
from armada.tests.unit import base from armada.tests.unit import base
@ -24,9 +23,7 @@ from armada.utils import helm
class TestHandlerTestCase(base.ArmadaTestCase): class TestHandlerTestCase(base.ArmadaTestCase):
def _test_test_release_for_success(self, expected_success, results): def _test_test_release_for_success(self, expected_success, results):
@mock.patch('armada.handlers.tiller.K8s') @mock.patch('armada.handlers.tiller.K8s')
def do_test(_): def do_test(_):
tiller_obj = tiller.Tiller('host', '8080', None) tiller_obj = tiller.Tiller('host', '8080', None)
@ -47,26 +44,29 @@ class TestHandlerTestCase(base.ArmadaTestCase):
self._test_test_release_for_success(True, []) self._test_test_release_for_success(True, [])
def test_unknown(self): def test_unknown(self):
self._test_test_release_for_success(False, [ self._test_test_release_for_success(
AttrDict(**{'status': helm.TESTRUN_STATUS_SUCCESS}), False, [
AttrDict(**{'status': helm.TESTRUN_STATUS_UNKNOWN}) AttrDict(**{'status': helm.TESTRUN_STATUS_SUCCESS}),
]) AttrDict(**{'status': helm.TESTRUN_STATUS_UNKNOWN})
])
def test_success(self): def test_success(self):
self._test_test_release_for_success( self._test_test_release_for_success(
True, [AttrDict(**{'status': helm.TESTRUN_STATUS_SUCCESS})]) True, [AttrDict(**{'status': helm.TESTRUN_STATUS_SUCCESS})])
def test_failure(self): def test_failure(self):
self._test_test_release_for_success(False, [ self._test_test_release_for_success(
AttrDict(**{'status': helm.TESTRUN_STATUS_SUCCESS}), False, [
AttrDict(**{'status': helm.TESTRUN_STATUS_FAILURE}) AttrDict(**{'status': helm.TESTRUN_STATUS_SUCCESS}),
]) AttrDict(**{'status': helm.TESTRUN_STATUS_FAILURE})
])
def test_running(self): def test_running(self):
self._test_test_release_for_success(False, [ self._test_test_release_for_success(
AttrDict(**{'status': helm.TESTRUN_STATUS_SUCCESS}), False, [
AttrDict(**{'status': helm.TESTRUN_STATUS_RUNNING}) AttrDict(**{'status': helm.TESTRUN_STATUS_SUCCESS}),
]) AttrDict(**{'status': helm.TESTRUN_STATUS_RUNNING})
])
def test_cg_disabled(self): def test_cg_disabled(self):
"""Test that tests are disabled when a chart group disables all """Test that tests are disabled when a chart group disables all

View File

@ -23,15 +23,15 @@ from armada.tests.test_utils import AttrDict
class TillerTestCase(base.ArmadaTestCase): class TillerTestCase(base.ArmadaTestCase):
@mock.patch.object(tiller.Tiller, '_get_tiller_ip') @mock.patch.object(tiller.Tiller, '_get_tiller_ip')
@mock.patch('armada.handlers.tiller.K8s') @mock.patch('armada.handlers.tiller.K8s')
@mock.patch('armada.handlers.tiller.grpc') @mock.patch('armada.handlers.tiller.grpc')
@mock.patch('armada.handlers.tiller.Config') @mock.patch('armada.handlers.tiller.Config')
@mock.patch('armada.handlers.tiller.InstallReleaseRequest') @mock.patch('armada.handlers.tiller.InstallReleaseRequest')
@mock.patch('armada.handlers.tiller.ReleaseServiceStub') @mock.patch('armada.handlers.tiller.ReleaseServiceStub')
def test_install_release(self, mock_stub, mock_install_request, def test_install_release(
mock_config, mock_grpc, mock_k8s, mock_ip): self, mock_stub, mock_install_request, mock_config, mock_grpc,
mock_k8s, mock_ip):
# instantiate Tiller object # instantiate Tiller object
mock_grpc.insecure_channel.return_value = mock.Mock() mock_grpc.insecure_channel.return_value = mock.Mock()
mock_ip.return_value = '0.0.0.0' mock_ip.return_value = '0.0.0.0'
@ -63,8 +63,9 @@ class TillerTestCase(base.ArmadaTestCase):
namespace=namespace, namespace=namespace,
wait=wait, wait=wait,
timeout=timeout) timeout=timeout)
(mock_stub(tiller_obj.channel).InstallRelease.assert_called_with( (
release_request, timeout + 60, metadata=tiller_obj.metadata)) mock_stub(tiller_obj.channel).InstallRelease.assert_called_with(
release_request, timeout + 60, metadata=tiller_obj.metadata))
@mock.patch('armada.handlers.tiller.K8s', autospec=True) @mock.patch('armada.handlers.tiller.K8s', autospec=True)
@mock.patch.object(tiller.Tiller, '_get_tiller_ip', autospec=True) @mock.patch.object(tiller.Tiller, '_get_tiller_ip', autospec=True)
@ -85,10 +86,10 @@ class TillerTestCase(base.ArmadaTestCase):
mock_grpc.insecure_channel.assert_called_once_with( mock_grpc.insecure_channel.assert_called_once_with(
'%s:%s' % (str(mock.sentinel.ip), str(mock.sentinel.port)), '%s:%s' % (str(mock.sentinel.ip), str(mock.sentinel.port)),
options=[('grpc.max_send_message_length', options=[
tiller.MAX_MESSAGE_LENGTH), ('grpc.max_send_message_length', tiller.MAX_MESSAGE_LENGTH),
('grpc.max_receive_message_length', ('grpc.max_receive_message_length', tiller.MAX_MESSAGE_LENGTH)
tiller.MAX_MESSAGE_LENGTH)]) ])
@mock.patch('armada.handlers.tiller.K8s', autospec=True) @mock.patch('armada.handlers.tiller.K8s', autospec=True)
@mock.patch('armada.handlers.tiller.grpc', autospec=True) @mock.patch('armada.handlers.tiller.grpc', autospec=True)
@ -100,8 +101,8 @@ class TillerTestCase(base.ArmadaTestCase):
@mock.patch.object(tiller.Tiller, '_get_tiller_pod', autospec=True) @mock.patch.object(tiller.Tiller, '_get_tiller_pod', autospec=True)
@mock.patch('armada.handlers.tiller.K8s', autospec=True) @mock.patch('armada.handlers.tiller.K8s', autospec=True)
@mock.patch('armada.handlers.tiller.grpc', autospec=True) @mock.patch('armada.handlers.tiller.grpc', autospec=True)
def test_get_tiller_ip_with_mocked_pod(self, mock_grpc, mock_k8s, def test_get_tiller_ip_with_mocked_pod(
mock_pod): self, mock_grpc, mock_k8s, mock_pod):
status = mock.Mock(pod_ip='1.1.1.1') status = mock.Mock(pod_ip='1.1.1.1')
mock_pod.return_value.status = status mock_pod.return_value.status = status
tiller_obj = tiller.Tiller() tiller_obj = tiller.Tiller()
@ -110,14 +111,14 @@ class TillerTestCase(base.ArmadaTestCase):
@mock.patch.object(tiller.Tiller, '_get_tiller_ip', autospec=True) @mock.patch.object(tiller.Tiller, '_get_tiller_ip', autospec=True)
@mock.patch('armada.handlers.tiller.K8s', autospec=True) @mock.patch('armada.handlers.tiller.K8s', autospec=True)
@mock.patch('armada.handlers.tiller.grpc', autospec=True) @mock.patch('armada.handlers.tiller.grpc', autospec=True)
def test_get_tiller_pod_throws_exception(self, mock_grpc, mock_k8s, def test_get_tiller_pod_throws_exception(
mock_ip): self, mock_grpc, mock_k8s, mock_ip):
mock_k8s.get_namespace_pod.return_value.items = [] mock_k8s.get_namespace_pod.return_value.items = []
tiller_obj = tiller.Tiller() tiller_obj = tiller.Tiller()
mock_grpc.insecure_channel.side_effect = ex.ChannelException() mock_grpc.insecure_channel.side_effect = ex.ChannelException()
self.assertRaises(ex.TillerPodNotRunningException, self.assertRaises(
tiller_obj._get_tiller_pod) ex.TillerPodNotRunningException, tiller_obj._get_tiller_pod)
@mock.patch.object(tiller.Tiller, '_get_tiller_ip', autospec=True) @mock.patch.object(tiller.Tiller, '_get_tiller_ip', autospec=True)
@mock.patch('armada.handlers.tiller.K8s', autospec=True) @mock.patch('armada.handlers.tiller.K8s', autospec=True)
@ -241,22 +242,25 @@ class TillerTestCase(base.ArmadaTestCase):
@mock.patch('armada.handlers.tiller.grpc') @mock.patch('armada.handlers.tiller.grpc')
@mock.patch.object(tiller, 'ListReleasesRequest') @mock.patch.object(tiller, 'ListReleasesRequest')
@mock.patch.object(tiller, 'ReleaseServiceStub') @mock.patch.object(tiller, 'ReleaseServiceStub')
def test_list_releases_paged(self, mock_stub, mock_list_releases_request, def test_list_releases_paged(
mock_grpc, _): self, mock_stub, mock_list_releases_request, mock_grpc, _):
page_count = 3 page_count = 3
release_count = tiller.LIST_RELEASES_PAGE_SIZE * page_count release_count = tiller.LIST_RELEASES_PAGE_SIZE * page_count
releases = [mock.Mock() for i in range(release_count)] releases = [mock.Mock() for i in range(release_count)]
for i, release in enumerate(releases): for i, release in enumerate(releases):
release.name = mock.PropertyMock(return_value=str(i)) release.name = mock.PropertyMock(return_value=str(i))
pages = [[ pages = [
mock.Mock( [
count=release_count, mock.Mock(
total=release_count + 5, count=release_count,
next='' if i == page_count - 1 else str( total=release_count + 5,
(tiller.LIST_RELEASES_PAGE_SIZE * (i + 1))), next='' if i == page_count - 1 else str(
releases=releases[tiller.LIST_RELEASES_PAGE_SIZE * (tiller.LIST_RELEASES_PAGE_SIZE * (i + 1))),
i:tiller.LIST_RELEASES_PAGE_SIZE * (i + 1)]) releases=releases[tiller.LIST_RELEASES_PAGE_SIZE
] for i in range(page_count)] * i:tiller.LIST_RELEASES_PAGE_SIZE
* (i + 1)])
] for i in range(page_count)
]
mock_stub.return_value.ListReleases.side_effect = pages mock_stub.return_value.ListReleases.side_effect = pages
mock_list_releases_side_effect = [ mock_list_releases_side_effect = [
@ -280,8 +284,8 @@ class TillerTestCase(base.ArmadaTestCase):
list_release_request_calls = [ list_release_request_calls = [
mock.call( mock.call(
offset='' offset='' if i == 0 else str(
if i == 0 else str(tiller.LIST_RELEASES_PAGE_SIZE * i), tiller.LIST_RELEASES_PAGE_SIZE * i),
limit=tiller.LIST_RELEASES_PAGE_SIZE, limit=tiller.LIST_RELEASES_PAGE_SIZE,
status_codes=tiller.const.STATUS_ALL) status_codes=tiller.const.STATUS_ALL)
for i in range(page_count) for i in range(page_count)
@ -292,8 +296,9 @@ class TillerTestCase(base.ArmadaTestCase):
@mock.patch('armada.handlers.tiller.grpc') @mock.patch('armada.handlers.tiller.grpc')
@mock.patch.object(tiller, 'GetReleaseContentRequest') @mock.patch.object(tiller, 'GetReleaseContentRequest')
@mock.patch.object(tiller, 'ReleaseServiceStub') @mock.patch.object(tiller, 'ReleaseServiceStub')
def test_get_release_content(self, mock_release_service_stub, def test_get_release_content(
mock_release_content_request, mock_grpc, _): self, mock_release_service_stub, mock_release_content_request,
mock_grpc, _):
mock_release_service_stub.return_value.GetReleaseContent\ mock_release_service_stub.return_value.GetReleaseContent\
.return_value = {} .return_value = {}
@ -311,8 +316,9 @@ class TillerTestCase(base.ArmadaTestCase):
@mock.patch('armada.handlers.tiller.grpc') @mock.patch('armada.handlers.tiller.grpc')
@mock.patch.object(tiller, 'GetVersionRequest') @mock.patch.object(tiller, 'GetVersionRequest')
@mock.patch.object(tiller, 'ReleaseServiceStub') @mock.patch.object(tiller, 'ReleaseServiceStub')
def test_tiller_version(self, mock_release_service_stub, def test_tiller_version(
mock_version_request, mock_grpc, _): self, mock_release_service_stub, mock_version_request, mock_grpc,
_):
mock_version = mock.Mock() mock_version = mock.Mock()
mock_version.Version.sem_ver = mock.sentinel.sem_ver mock_version.Version.sem_ver = mock.sentinel.sem_ver
@ -336,9 +342,9 @@ class TillerTestCase(base.ArmadaTestCase):
@mock.patch.object(tiller, 'GetVersionRequest') @mock.patch.object(tiller, 'GetVersionRequest')
@mock.patch.object(tiller, 'GetReleaseStatusRequest') @mock.patch.object(tiller, 'GetReleaseStatusRequest')
@mock.patch.object(tiller, 'ReleaseServiceStub') @mock.patch.object(tiller, 'ReleaseServiceStub')
def test_get_release_status(self, mock_release_service_stub, def test_get_release_status(
mock_rel_status_request, mock_version_request, self, mock_release_service_stub, mock_rel_status_request,
mock_grpc, _): mock_version_request, mock_grpc, _):
mock_release_service_stub.return_value.GetReleaseStatus. \ mock_release_service_stub.return_value.GetReleaseStatus. \
return_value = {} return_value = {}
@ -357,8 +363,9 @@ class TillerTestCase(base.ArmadaTestCase):
@mock.patch('armada.handlers.tiller.grpc') @mock.patch('armada.handlers.tiller.grpc')
@mock.patch.object(tiller, 'UninstallReleaseRequest') @mock.patch.object(tiller, 'UninstallReleaseRequest')
@mock.patch.object(tiller, 'ReleaseServiceStub') @mock.patch.object(tiller, 'ReleaseServiceStub')
def test_uninstall_release(self, mock_release_service_stub, def test_uninstall_release(
mock_uninstall_release_request, mock_grpc, _): self, mock_release_service_stub, mock_uninstall_release_request,
mock_grpc, _):
mock_release_service_stub.return_value.UninstallRelease\ mock_release_service_stub.return_value.UninstallRelease\
.return_value = {} .return_value = {}
@ -379,8 +386,9 @@ class TillerTestCase(base.ArmadaTestCase):
@mock.patch('armada.handlers.tiller.grpc') @mock.patch('armada.handlers.tiller.grpc')
@mock.patch.object(tiller, 'RollbackReleaseRequest') @mock.patch.object(tiller, 'RollbackReleaseRequest')
@mock.patch.object(tiller, 'ReleaseServiceStub') @mock.patch.object(tiller, 'ReleaseServiceStub')
def test_rollback_release(self, mock_release_service_stub, def test_rollback_release(
mock_rollback_release_request, _, __): self, mock_release_service_stub, mock_rollback_release_request, _,
__):
mock_release_service_stub.return_value.RollbackRelease\ mock_release_service_stub.return_value.RollbackRelease\
.return_value = {} .return_value = {}
@ -427,8 +435,9 @@ class TillerTestCase(base.ArmadaTestCase):
@mock.patch('armada.handlers.tiller.Config') @mock.patch('armada.handlers.tiller.Config')
@mock.patch.object(tiller, 'UpdateReleaseRequest') @mock.patch.object(tiller, 'UpdateReleaseRequest')
@mock.patch.object(tiller, 'ReleaseServiceStub') @mock.patch.object(tiller, 'ReleaseServiceStub')
def test_update_release(self, mock_release_service_stub, def test_update_release(
mock_update_release_request, mock_config, _, __): self, mock_release_service_stub, mock_update_release_request,
mock_config, _, __):
release = 'release' release = 'release'
chart = {} chart = {}
namespace = 'namespace' namespace = 'namespace'
@ -507,20 +516,20 @@ class TillerTestCase(base.ArmadaTestCase):
timeout + tiller.GRPC_EPSILON, timeout + tiller.GRPC_EPSILON,
metadata=tiller_obj.metadata) metadata=tiller_obj.metadata)
expected_result = tiller.TillerResult(release, namespace, status, expected_result = tiller.TillerResult(
description, version) release, namespace, status, description, version)
self.assertEqual(expected_result, result) self.assertEqual(expected_result, result)
def _test_test_release(self, grpc_response_mock): def _test_test_release(self, grpc_response_mock):
@mock.patch('armada.handlers.tiller.K8s') @mock.patch('armada.handlers.tiller.K8s')
@mock.patch('armada.handlers.tiller.grpc') @mock.patch('armada.handlers.tiller.grpc')
@mock.patch('armada.handlers.tiller.Config') @mock.patch('armada.handlers.tiller.Config')
@mock.patch.object(tiller, 'TestReleaseRequest') @mock.patch.object(tiller, 'TestReleaseRequest')
@mock.patch.object(tiller, 'ReleaseServiceStub') @mock.patch.object(tiller, 'ReleaseServiceStub')
def do_test(self, mock_release_service_stub, mock_test_release_request, def do_test(
mock_config, _, __): self, mock_release_service_stub, mock_test_release_request,
mock_config, _, __):
tiller_obj = tiller.Tiller('host', '8080', None) tiller_obj = tiller.Tiller('host', '8080', None)
release = 'release' release = 'release'
test_suite_run = {} test_suite_run = {}
@ -531,14 +540,11 @@ class TillerTestCase(base.ArmadaTestCase):
tiller_obj.get_release_status = mock.Mock() tiller_obj.get_release_status = mock.Mock()
tiller_obj.get_release_status.return_value = AttrDict( tiller_obj.get_release_status.return_value = AttrDict(
**{ **{
'info': 'info': AttrDict(
AttrDict(
**{ **{
'status': 'status': AttrDict(
AttrDict(
**{'last_test_suite_run': test_suite_run}), **{'last_test_suite_run': test_suite_run}),
'Description': 'Description': 'Failed'
'Failed'
}) })
}) })
@ -549,41 +555,47 @@ class TillerTestCase(base.ArmadaTestCase):
do_test(self) do_test(self)
def test_test_release_no_tests(self): def test_test_release_no_tests(self):
self._test_test_release([ self._test_test_release(
AttrDict(**{ [
'msg': 'No Tests Found', AttrDict(
'status': helm.TESTRUN_STATUS_UNKNOWN **{
}) 'msg': 'No Tests Found',
]) 'status': helm.TESTRUN_STATUS_UNKNOWN
})
])
def test_test_release_success(self): def test_test_release_success(self):
self._test_test_release([ self._test_test_release(
AttrDict(**{ [
'msg': 'RUNNING: ...', AttrDict(
'status': helm.TESTRUN_STATUS_RUNNING **{
}), 'msg': 'RUNNING: ...',
AttrDict(**{ 'status': helm.TESTRUN_STATUS_RUNNING
'msg': 'SUCCESS: ...', }),
'status': helm.TESTRUN_STATUS_SUCCESS AttrDict(
}) **{
]) 'msg': 'SUCCESS: ...',
'status': helm.TESTRUN_STATUS_SUCCESS
})
])
def test_test_release_failure(self): def test_test_release_failure(self):
self._test_test_release([ self._test_test_release(
AttrDict(**{ [
'msg': 'RUNNING: ...', AttrDict(
'status': helm.TESTRUN_STATUS_RUNNING **{
}), 'msg': 'RUNNING: ...',
AttrDict(**{ 'status': helm.TESTRUN_STATUS_RUNNING
'msg': 'FAILURE: ...', }),
'status': helm.TESTRUN_STATUS_FAILURE AttrDict(
}) **{
]) 'msg': 'FAILURE: ...',
'status': helm.TESTRUN_STATUS_FAILURE
})
])
def test_test_release_failure_to_run(self): def test_test_release_failure_to_run(self):
class Iterator: class Iterator:
def __iter__(self): def __iter__(self):
return self return self

View File

@ -23,7 +23,6 @@ test_chart = {'wait': {'timeout': 10, 'native': {'enabled': False}}}
class ChartWaitTestCase(base.ArmadaTestCase): class ChartWaitTestCase(base.ArmadaTestCase):
def get_unit(self, chart_data, timeout=None, version=2): def get_unit(self, chart_data, timeout=None, version=2):
chart = { chart = {
'schema': 'armada/Chart/v{}'.format(str(version)), 'schema': 'armada/Chart/v{}'.format(str(version)),
@ -118,60 +117,66 @@ class ChartWaitTestCase(base.ArmadaTestCase):
self.assertIsInstance(unit.waits[4], wait.StatefulSetWait) self.assertIsInstance(unit.waits[4], wait.StatefulSetWait)
def test_waits_init_min_ready_fails_if_not_controller(self): def test_waits_init_min_ready_fails_if_not_controller(self):
def create_pod_wait_min_ready(): def create_pod_wait_min_ready():
self.get_unit({ self.get_unit(
'wait': { {
'resources': [{ 'wait': {
'type': 'pod', 'resources': [
'labels': { {
'foo': 'bar' 'type': 'pod',
}, 'labels': {
'min_ready': 5 'foo': 'bar'
}] },
} 'min_ready': 5
}) }
]
}
})
self.assertRaises(manifest_exceptions.ManifestException, self.assertRaises(
create_pod_wait_min_ready) manifest_exceptions.ManifestException, create_pod_wait_min_ready)
def create_job_wait_min_ready(): def create_job_wait_min_ready():
self.get_unit({ self.get_unit(
'wait': { {
'resources': [{ 'wait': {
'type': 'job', 'resources': [
'labels': { {
'foo': 'bar' 'type': 'job',
}, 'labels': {
'min_ready': 5 'foo': 'bar'
}] },
} 'min_ready': 5
}) }
]
}
})
self.assertRaises(manifest_exceptions.ManifestException, self.assertRaises(
create_job_wait_min_ready) manifest_exceptions.ManifestException, create_job_wait_min_ready)
def test_waits_init_invalid_type(self): def test_waits_init_invalid_type(self):
def create_with_invalid_type(): def create_with_invalid_type():
self.get_unit({ self.get_unit(
'wait': { {
'resources': [{ 'wait': {
'type': 'invalid', 'resources': [
'labels': { {
'foo': 'bar' 'type': 'invalid',
}, 'labels': {
'min_ready': 5 'foo': 'bar'
}] },
} 'min_ready': 5
}) }
]
}
})
self.assertRaises(manifest_exceptions.ManifestException, self.assertRaises(
create_with_invalid_type) manifest_exceptions.ManifestException, create_with_invalid_type)
@mock.patch.object(wait.ChartWait, 'get_resource_wait') @mock.patch.object(wait.ChartWait, 'get_resource_wait')
def test_wait(self, get_resource_wait): def test_wait(self, get_resource_wait):
def return_mock(*args, **kwargs): def return_mock(*args, **kwargs):
return mock.MagicMock() return mock.MagicMock()
@ -194,7 +199,6 @@ class ChartWaitTestCase(base.ArmadaTestCase):
class PodWaitTestCase(base.ArmadaTestCase): class PodWaitTestCase(base.ArmadaTestCase):
def get_unit(self, labels, version=2): def get_unit(self, labels, version=2):
return wait.PodWait( return wait.PodWait(
resource_type='pod', resource_type='pod',
@ -202,7 +206,6 @@ class PodWaitTestCase(base.ArmadaTestCase):
labels=labels) labels=labels)
def test_include_resource(self): def test_include_resource(self):
def mock_resource(annotations={}, owner_references=None): def mock_resource(annotations={}, owner_references=None):
resource = mock.Mock() resource = mock.Mock()
resource.metadata.annotations = annotations resource.metadata.annotations = annotations
@ -219,10 +222,11 @@ class PodWaitTestCase(base.ArmadaTestCase):
] ]
job_pods = [ job_pods = [
mock_resource(owner_references=[mock.Mock(kind='Job')]), mock_resource(owner_references=[mock.Mock(kind='Job')]),
mock_resource(owner_references=[ mock_resource(
mock.Mock(kind='NotAJob'), owner_references=[
mock.Mock(kind='Job') mock.Mock(kind='NotAJob'),
]) mock.Mock(kind='Job')
])
] ]
included_pods = [ included_pods = [
mock_resource(), mock_resource(),
@ -248,13 +252,11 @@ class PodWaitTestCase(base.ArmadaTestCase):
class JobWaitTestCase(base.ArmadaTestCase): class JobWaitTestCase(base.ArmadaTestCase):
def get_unit(self, labels): def get_unit(self, labels):
return wait.JobWait( return wait.JobWait(
resource_type='job', chart_wait=mock.MagicMock(), labels=labels) resource_type='job', chart_wait=mock.MagicMock(), labels=labels)
def test_include_resource(self): def test_include_resource(self):
def mock_resource(annotations={}, owner_references=None): def mock_resource(annotations={}, owner_references=None):
resource = mock.Mock() resource = mock.Mock()
resource.metadata.annotations = annotations resource.metadata.annotations = annotations
@ -263,10 +265,11 @@ class JobWaitTestCase(base.ArmadaTestCase):
cronjob_jobs = [ cronjob_jobs = [
mock_resource(owner_references=[mock.Mock(kind='CronJob')]), mock_resource(owner_references=[mock.Mock(kind='CronJob')]),
mock_resource(owner_references=[ mock_resource(
mock.Mock(kind='NotACronJob'), owner_references=[
mock.Mock(kind='CronJob') mock.Mock(kind='NotACronJob'),
]) mock.Mock(kind='CronJob')
])
] ]
included_jobs = [ included_jobs = [
mock_resource(), mock_resource(),

View File

@ -18,7 +18,6 @@ from armada.utils import schema
class SchemaTestCase(unittest.TestCase): class SchemaTestCase(unittest.TestCase):
def test_validate_load_schemas(self): def test_validate_load_schemas(self):
expected_schemas = [ expected_schemas = [
'armada/Chart/v1', 'armada/ChartGroup/v1', 'armada/Manifest/v1' 'armada/Chart/v1', 'armada/ChartGroup/v1', 'armada/Manifest/v1'

View File

@ -18,7 +18,6 @@ from armada.utils import release as rel
class ReleaseTestCase(unittest.TestCase): class ReleaseTestCase(unittest.TestCase):
def test_release_prefix_pass(self): def test_release_prefix_pass(self):
expected = 'armada-test' expected = 'armada-test'
prefix, release = ('armada', 'test') prefix, release = ('armada', 'test')

View File

@ -25,7 +25,6 @@ from armada.utils import source
class GitTestCase(base.ArmadaTestCase): class GitTestCase(base.ArmadaTestCase):
def _validate_git_clone(self, repo_dir, expected_ref=None): def _validate_git_clone(self, repo_dir, expected_ref=None):
self.assertTrue(os.path.isdir(repo_dir)) self.assertTrue(os.path.isdir(repo_dir))
self.addCleanup(shutil.rmtree, repo_dir) self.addCleanup(shutil.rmtree, repo_dir)
@ -38,23 +37,23 @@ class GitTestCase(base.ArmadaTestCase):
as git_file: as git_file:
self.assertIn(expected_ref, git_file.read()) self.assertIn(expected_ref, git_file.read())
@testtools.skipUnless(base.is_connected(), @testtools.skipUnless(
'git clone requires network connectivity.') base.is_connected(), 'git clone requires network connectivity.')
def test_git_clone_good_url(self): def test_git_clone_good_url(self):
url = 'https://opendev.org/airship/armada.git' url = 'https://opendev.org/airship/armada.git'
git_dir = source.git_clone(url) git_dir = source.git_clone(url)
self._validate_git_clone(git_dir) self._validate_git_clone(git_dir)
@testtools.skipUnless(base.is_connected(), @testtools.skipUnless(
'git clone requires network connectivity.') base.is_connected(), 'git clone requires network connectivity.')
def test_git_clone_commit(self): def test_git_clone_commit(self):
url = 'https://opendev.org/airship/armada.git' url = 'https://opendev.org/airship/armada.git'
commit = 'cba78d1d03e4910f6ab1691bae633c5bddce893d' commit = 'cba78d1d03e4910f6ab1691bae633c5bddce893d'
git_dir = source.git_clone(url, commit) git_dir = source.git_clone(url, commit)
self._validate_git_clone(git_dir, commit) self._validate_git_clone(git_dir, commit)
@testtools.skipUnless(base.is_connected(), @testtools.skipUnless(
'git clone requires network connectivity.') base.is_connected(), 'git clone requires network connectivity.')
def test_git_clone_ref(self): def test_git_clone_ref(self):
ref = 'refs/changes/54/457754/73' ref = 'refs/changes/54/457754/73'
git_dir = source.git_clone( git_dir = source.git_clone(
@ -62,29 +61,29 @@ class GitTestCase(base.ArmadaTestCase):
self._validate_git_clone(git_dir, ref) self._validate_git_clone(git_dir, ref)
@test_utils.attr(type=['negative']) @test_utils.attr(type=['negative'])
@testtools.skipUnless(base.is_connected(), @testtools.skipUnless(
'git clone requires network connectivity.') base.is_connected(), 'git clone requires network connectivity.')
def test_git_clone_empty_url(self): def test_git_clone_empty_url(self):
url = '' url = ''
# error_re = '%s is not a valid git repository.' % url # error_re = '%s is not a valid git repository.' % url
self.assertRaises(source_exceptions.GitException, source.git_clone, self.assertRaises(
url) source_exceptions.GitException, source.git_clone, url)
@test_utils.attr(type=['negative']) @test_utils.attr(type=['negative'])
@testtools.skipUnless(base.is_connected(), @testtools.skipUnless(
'git clone requires network connectivity.') base.is_connected(), 'git clone requires network connectivity.')
def test_git_clone_bad_url(self): def test_git_clone_bad_url(self):
url = 'https://opendev.org/dummy/armada' url = 'https://opendev.org/dummy/armada'
self.assertRaises(source_exceptions.GitException, source.git_clone, self.assertRaises(
url) source_exceptions.GitException, source.git_clone, url)
# TODO need to design a positive proxy test, # TODO need to design a positive proxy test,
# difficult to achieve behind a corporate proxy # difficult to achieve behind a corporate proxy
@test_utils.attr(type=['negative']) @test_utils.attr(type=['negative'])
@testtools.skipUnless(base.is_connected(), @testtools.skipUnless(
'git clone requires network connectivity.') base.is_connected(), 'git clone requires network connectivity.')
def test_git_clone_fake_proxy(self): def test_git_clone_fake_proxy(self):
url = 'https://opendev.org/airship/armada.git' url = 'https://opendev.org/airship/armada.git'
proxy_url = test_utils.rand_name( proxy_url = test_utils.rand_name(
@ -140,14 +139,15 @@ class GitTestCase(base.ArmadaTestCase):
mock_path.exists.return_value = False mock_path.exists.return_value = False
path = '/tmp/armada' path = '/tmp/armada'
self.assertRaises(source_exceptions.InvalidPathException, self.assertRaises(
source.extract_tarball, path) source_exceptions.InvalidPathException, source.extract_tarball,
path)
mock_tarfile.open.assert_not_called() mock_tarfile.open.assert_not_called()
mock_tarfile.extractall.assert_not_called() mock_tarfile.extractall.assert_not_called()
@testtools.skipUnless(base.is_connected(), @testtools.skipUnless(
'git clone requires network connectivity.') base.is_connected(), 'git clone requires network connectivity.')
@mock.patch.object(source, 'LOG') @mock.patch.object(source, 'LOG')
def test_source_cleanup(self, mock_log): def test_source_cleanup(self, mock_log):
url = 'https://opendev.org/airship/armada.git' url = 'https://opendev.org/airship/armada.git'
@ -159,8 +159,8 @@ class GitTestCase(base.ArmadaTestCase):
@mock.patch.object(source, 'LOG') @mock.patch.object(source, 'LOG')
@mock.patch('armada.utils.source.shutil') @mock.patch('armada.utils.source.shutil')
@mock.patch('armada.utils.source.os.path') @mock.patch('armada.utils.source.os.path')
def test_source_cleanup_missing_git_path(self, mock_path, mock_shutil, def test_source_cleanup_missing_git_path(
mock_log): self, mock_path, mock_shutil, mock_log):
# Verify that passing in a missing path does nothing but log a warning. # Verify that passing in a missing path does nothing but log a warning.
mock_path.exists.return_value = False mock_path.exists.return_value = False
path = 'armada' path = 'armada'
@ -169,11 +169,11 @@ class GitTestCase(base.ArmadaTestCase):
mock_shutil.rmtree.assert_not_called() mock_shutil.rmtree.assert_not_called()
self.assertTrue(mock_log.warning.called) self.assertTrue(mock_log.warning.called)
actual_call = mock_log.warning.mock_calls[0][1] actual_call = mock_log.warning.mock_calls[0][1]
self.assertEqual(('Could not find the chart path %s to delete.', path), self.assertEqual(
actual_call) ('Could not find the chart path %s to delete.', path), actual_call)
@testtools.skipUnless(base.is_connected(), @testtools.skipUnless(
'git clone requires network connectivity.') base.is_connected(), 'git clone requires network connectivity.')
@test_utils.attr(type=['negative']) @test_utils.attr(type=['negative'])
@mock.patch.object(source, 'os') @mock.patch.object(source, 'os')
def test_git_clone_ssh_auth_method_fails_auth(self, mock_os): def test_git_clone_ssh_auth_method_fails_auth(self, mock_os):
@ -187,8 +187,8 @@ class GitTestCase(base.ArmadaTestCase):
ref='refs/changes/17/388517/5', ref='refs/changes/17/388517/5',
auth_method='SSH') auth_method='SSH')
@testtools.skipUnless(base.is_connected(), @testtools.skipUnless(
'git clone requires network connectivity.') base.is_connected(), 'git clone requires network connectivity.')
@test_utils.attr(type=['negative']) @test_utils.attr(type=['negative'])
@mock.patch.object(source, 'os') @mock.patch.object(source, 'os')
def test_git_clone_ssh_auth_method_missing_ssh_key(self, mock_os): def test_git_clone_ssh_auth_method_missing_ssh_key(self, mock_os):

View File

@ -13,9 +13,9 @@
# limitations under the License. # limitations under the License.
import os import os
import yaml
import testtools import testtools
import yaml
from armada.tests.unit import base from armada.tests.unit import base
from armada.utils import validate from armada.utils import validate
@ -67,7 +67,6 @@ data:
class BaseValidateTest(base.ArmadaTestCase): class BaseValidateTest(base.ArmadaTestCase):
def setUp(self): def setUp(self):
super(BaseValidateTest, self).setUp() super(BaseValidateTest, self).setUp()
self.basepath = os.path.join(os.path.dirname(__file__), os.pardir) self.basepath = os.path.join(os.path.dirname(__file__), os.pardir)
@ -110,7 +109,6 @@ class ValidateOwnExamplesTestCase(BaseValidateTest):
class ValidateTestCase(BaseValidateTest): class ValidateTestCase(BaseValidateTest):
def test_validate_armada_yaml_passes(self): def test_validate_armada_yaml_passes(self):
template = '{}/resources/valid_armada_document.yaml'.format( template = '{}/resources/valid_armada_document.yaml'.format(
self.basepath) self.basepath)
@ -215,12 +213,11 @@ data:
class ValidateNegativeTestCase(BaseValidateTest): class ValidateNegativeTestCase(BaseValidateTest):
def test_validate_no_dictionary_expect_type_error(self): def test_validate_no_dictionary_expect_type_error(self):
expected_error = 'The provided input "invalid" must be a dictionary.' expected_error = 'The provided input "invalid" must be a dictionary.'
self.assertRaisesRegexp(TypeError, expected_error, self.assertRaisesRegexp(
validate.validate_armada_documents, TypeError, expected_error, validate.validate_armada_documents,
['invalid']) ['invalid'])
def test_validate_invalid_chart_armada_manifest(self): def test_validate_invalid_chart_armada_manifest(self):
template = '{}/resources/valid_armada_document.yaml'.format( template = '{}/resources/valid_armada_document.yaml'.format(

View File

@ -12,10 +12,10 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from armada.utils.helm import get_test_suite_run_success
import time import time
from armada.utils.helm import get_test_suite_run_success
def release_prefixer(prefix, release): def release_prefixer(prefix, release):
''' '''

View File

@ -65,8 +65,9 @@ def git_clone(repo_url, ref='master', proxy_server=None, auth_method=None):
'with SSH authentication.', repo_url, ref) 'with SSH authentication.', repo_url, ref)
if not os.path.exists(CONF.ssh_key_path): if not os.path.exists(CONF.ssh_key_path):
LOG.error('SSH auth method was specified for cloning repo but ' LOG.error(
'the SSH key under CONF.ssh_key_path was not found.') 'SSH auth method was specified for cloning repo but '
'the SSH key under CONF.ssh_key_path was not found.')
raise source_exceptions.GitSSHException(CONF.ssh_key_path) raise source_exceptions.GitSSHException(CONF.ssh_key_path)
ssh_cmd = ( ssh_cmd = (
@ -99,8 +100,8 @@ def git_clone(repo_url, ref='master', proxy_server=None, auth_method=None):
except git_exc.GitCommandError as e: except git_exc.GitCommandError as e:
LOG.exception('Encountered GitCommandError during clone.') LOG.exception('Encountered GitCommandError during clone.')
if ssh_cmd and ssh_cmd in e.stderr: if ssh_cmd and ssh_cmd in e.stderr:
raise source_exceptions.GitAuthException(repo_url, raise source_exceptions.GitAuthException(
CONF.ssh_key_path) repo_url, CONF.ssh_key_path)
elif 'Could not resolve proxy' in e.stderr: elif 'Could not resolve proxy' in e.stderr:
raise source_exceptions.GitProxyException(proxy_server) raise source_exceptions.GitProxyException(proxy_server)
else: else:
@ -165,7 +166,7 @@ def source_cleanup(chart_path):
try: try:
shutil.rmtree(chart_path) shutil.rmtree(chart_path)
except OSError as e: except OSError as e:
LOG.warning('Could not delete the path %s. Details: %s', LOG.warning(
chart_path, e) 'Could not delete the path %s. Details: %s', chart_path, e)
else: else:
LOG.warning('Could not find the chart path %s to delete.', chart_path) LOG.warning('Could not find the chart path %s to delete.', chart_path)

View File

@ -12,10 +12,10 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import jsonschema
import requests
import traceback import traceback
import jsonschema
import requests
from oslo_log import log as logging from oslo_log import log as logging
from armada import const from armada import const
@ -124,8 +124,9 @@ def validate_armada_document(document):
LOG.info('ValidationMessage: %s', vmsg.get_output_json()) LOG.info('ValidationMessage: %s', vmsg.get_output_json())
details.append(vmsg.get_output()) details.append(vmsg.get_output())
except jsonschema.SchemaError as e: except jsonschema.SchemaError as e:
error_message = ('The built-in Armada JSON schema %s is invalid. ' error_message = (
'Details: %s.' % (e.schema, e.message)) 'The built-in Armada JSON schema %s is invalid. '
'Details: %s.' % (e.schema, e.message))
vmsg = ValidationMessage( vmsg = ValidationMessage(
message=error_message, message=error_message,
error=True, error=True,

View File

@ -32,14 +32,15 @@ class ValidationMessage(object):
or details for resolution. or details for resolution.
""" """
def __init__(self, def __init__(
message='Document validation error.', self,
error=True, message='Document validation error.',
name='Armada error', error=True,
level='Error', name='Armada error',
schema=None, level='Error',
doc_name=None, schema=None,
diagnostic=None): doc_name=None,
diagnostic=None):
# TODO(MarshM) should validate error and level inputs # TODO(MarshM) should validate error and level inputs

View File

@ -13,6 +13,7 @@ os-testr>=1.0.0 # Apache-2.0
flake8>=3.3.0 flake8>=3.3.0
mock mock
responses>=0.8.1 responses>=0.8.1
yapf==0.26.0 yapf==0.27.0
flake8-import-order==0.18.1
grpcio-tools==1.16.0 grpcio-tools==1.16.0

18
tox.ini
View File

@ -72,7 +72,7 @@ commands =
# Whitespace linter (for chart files) # Whitespace linter (for chart files)
bash {toxinidir}/tools/whitespace-linter.sh bash {toxinidir}/tools/whitespace-linter.sh
yapf -dr {toxinidir}/armada {toxinidir}/setup.py yapf -dr {toxinidir}/armada {toxinidir}/setup.py
flake8 {posargs} flake8 {toxinidir}/armada {toxinidir}/setup.py
# Run security linter as part of the pep8 gate instead of a separate zuul job. # Run security linter as part of the pep8 gate instead of a separate zuul job.
bandit -r armada -n 5 -x armada/tests/* bandit -r armada -n 5 -x armada/tests/*
@ -94,7 +94,7 @@ commands =
coverage xml -o cover/coverage.xml coverage xml -o cover/coverage.xml
coverage report coverage report
[testenv:yapf] [testenv:fmt]
basepython = python3 basepython = python3
deps = {[testenv]deps} deps = {[testenv]deps}
commands = commands =
@ -102,8 +102,14 @@ commands =
[flake8] [flake8]
filename = *.py filename = *.py
# These are ignored intentionally: show-source = true
# W504 - line break after binary operator, we cannot have both # [H106] Don't put vim configuration in source files.
# W503 and W504 enabled # [H201] No 'except:' at least use 'except Exception:'
ignore = W504 # [H904] Delay string interpolations at logging calls.
enable-extensions = H106,H201,H904
# [W503] line break before binary operator
ignore = W503
exclude = .git,.tox,dist,*lib/python*,*egg,build,releasenotes,doc/*,hapi,venv exclude = .git,.tox,dist,*lib/python*,*egg,build,releasenotes,doc/*,hapi,venv
max-complexity = 24
application-import-names = armada
import-order-style = pep8