Standardize Armada code with YAPF

From recently merged document updates in [0] there is a desire to
standardize the Airship project python codebase.  This is the effort
to do so for the Armada project.

[0] https://review.opendev.org/#/c/671291/

Change-Id: I4fe916d6e330618ea3a1fccfa4bdfdfabb9ffcb2
This commit is contained in:
HUGHES, ALEXANDER (ah8742) 2019-07-24 14:27:20 -05:00 committed by Alexander Hughes
parent d404e3c034
commit b787c418e3
77 changed files with 1298 additions and 1182 deletions

10
.style.yapf Normal file
View File

@ -0,0 +1,10 @@
[style]
based_on_style = pep8
spaces_before_comment = 2
column_limit = 79
blank_line_before_nested_class_or_def = false
blank_line_before_module_docstring = true
split_before_logical_operator = true
split_before_first_argument = true
allow_split_before_dict_value = false
split_before_arithmetic_operator = true

View File

@ -163,6 +163,11 @@ test-coverage: check-tox
test-bandit: check-tox
tox -e bandit
# Perform auto formatting
.PHONY: format
format:
tox -e fmt
# style checks
.PHONY: lint
lint: test-pep8 helm_lint

View File

@ -15,13 +15,12 @@
import json
import logging as log
import uuid
import yaml
from oslo_utils import excutils
import falcon
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
import yaml
from armada.handlers.tiller import Tiller
@ -31,7 +30,6 @@ HEALTH_PATH = 'health'
class BaseResource(object):
def __init__(self):
self.logger = logging.getLogger(__name__)
@ -80,11 +78,12 @@ class BaseResource(object):
raise Exception("%s: Invalid JSON in body: %s" % (req.path, jex))
def return_error(self, resp, status_code, message="", retry=False):
resp.body = json.dumps({
'type': 'error',
'message': message,
'retry': retry
})
resp.body = json.dumps(
{
'type': 'error',
'message': message,
'retry': retry
})
resp.status = status_code
def log_error(self, ctx, level, msg):
@ -122,7 +121,6 @@ class BaseResource(object):
class ArmadaRequestContext(object):
def __init__(self):
self.log_level = 'ERROR'
self.user = None # Username

View File

@ -13,9 +13,9 @@
# limitations under the License.
import json
import yaml
import falcon
import yaml
from armada import api
from armada.common import policy
@ -60,8 +60,8 @@ class Apply(api.BaseResource):
documents, overrides=req_body.get('overrides'))
documents = overrides.update_manifests()
else:
self.error(req.context,
"Unknown content-type %s" % req.content_type)
self.error(
req.context, "Unknown content-type %s" % req.content_type)
# TODO(fmontei): Use falcon.<Relevant API Exception Class> instead.
return self.return_error(
resp,

View File

@ -13,10 +13,10 @@
# limitations under the License.
import json
import yaml
import falcon
from oslo_config import cfg
import yaml
from armada import api
from armada.common import policy
@ -165,8 +165,8 @@ class TestReleasesManifestController(api.BaseResource):
else:
message['test']['failed'].append(release_name)
else:
self.logger.info('Release %s not found - SKIPPING',
release_name)
self.logger.info(
'Release %s not found - SKIPPING', release_name)
message['test']['skipped'].append(release_name)
resp.status = falcon.HTTP_200

View File

@ -26,7 +26,6 @@ LOG = logging.getLogger(__name__)
class Status(api.BaseResource):
@policy.enforce('tiller:get_status')
def on_get(self, req, resp):
'''
@ -45,9 +44,10 @@ class Status(api.BaseResource):
self.return_error(resp, falcon.HTTP_500, message=err_message)
def handle(self, tiller):
LOG.debug('Tiller (Status) at: %s:%s, namespace=%s, '
'timeout=%s', tiller.tiller_host, tiller.tiller_port,
tiller.tiller_namespace, tiller.timeout)
LOG.debug(
'Tiller (Status) at: %s:%s, namespace=%s, '
'timeout=%s', tiller.tiller_host, tiller.tiller_port,
tiller.tiller_namespace, tiller.timeout)
message = {
'tiller': {
@ -59,7 +59,6 @@ class Status(api.BaseResource):
class Release(api.BaseResource):
@policy.enforce('tiller:get_release')
def on_get(self, req, resp):
'''Controller for listing Tiller releases.
@ -79,9 +78,10 @@ class Release(api.BaseResource):
self.return_error(resp, falcon.HTTP_500, message=err_message)
def handle(self, tiller):
LOG.debug('Tiller (Release) at: %s:%s, namespace=%s, '
'timeout=%s', tiller.tiller_host, tiller.tiller_port,
tiller.tiller_namespace, tiller.timeout)
LOG.debug(
'Tiller (Release) at: %s:%s, namespace=%s, '
'timeout=%s', tiller.tiller_host, tiller.tiller_port,
tiller.tiller_namespace, tiller.timeout)
releases = {}
for release in tiller.list_releases():

View File

@ -13,6 +13,7 @@
# limitations under the License.
import json
import falcon
import yaml
@ -33,8 +34,9 @@ class Validate(api.BaseResource):
self.logger.debug("Validating manifest based on reference.")
json_body = self.req_json(req)
if json_body.get('href', None):
self.logger.debug("Validating manifest from reference %s."
% json_body.get('href'))
self.logger.debug(
"Validating manifest from reference %s."
% json_body.get('href'))
data = ReferenceResolver.resolve_reference(
json_body.get('href'))
documents = list()

View File

@ -13,19 +13,17 @@
# limitations under the License.
import re
from armada.api import HEALTH_PATH
from uuid import UUID
from oslo_config import cfg
from oslo_log import log as logging
from armada.api import HEALTH_PATH
CONF = cfg.CONF
class AuthMiddleware(object):
def __init__(self):
self.logger = logging.getLogger(__name__)
@ -76,7 +74,6 @@ class AuthMiddleware(object):
class ContextMiddleware(object):
def process_request(self, req, resp):
ctx = req.context
@ -103,7 +100,6 @@ class ContextMiddleware(object):
class LoggingMiddleware(object):
def __init__(self):
self.logger = logging.getLogger(__name__)

View File

@ -79,8 +79,8 @@ def create(enable_middleware=CONF.middleware):
# Error handlers (FILO handling)
api.add_error_handler(Exception, exceptions.default_exception_handler)
api.add_error_handler(exceptions.ArmadaAPIException,
exceptions.ArmadaAPIException.handle)
api.add_error_handler(
exceptions.ArmadaAPIException, exceptions.ArmadaAPIException.handle)
# Built-in error serializer
api.set_error_serializer(exceptions.default_error_serializer)

View File

@ -28,7 +28,6 @@ LOG = logging.getLogger(__name__)
class CliAction(object):
def __init__(self):
self.logger = LOG
logging.set_defaults(default_log_levels=CONF.default_log_levels)

View File

@ -12,10 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
import click
from oslo_config import cfg
import yaml
from armada.cli import CliAction
from armada.exceptions.source_exceptions import InvalidPathException
@ -86,11 +85,12 @@ SHORT_DESC = "Command installs manifest charts."
'--use-doc-ref', help="Use armada manifest file reference.", is_flag=True)
@click.option(
'--set',
help=("Use to override Armada Manifest values. Accepts "
"overrides that adhere to the format "
"<path>:<to>:<property>=<value> to specify a primitive or "
"<path>:<to>:<property>=<value1>,...,<valueN> to specify "
"a list of values."),
help=(
"Use to override Armada Manifest values. Accepts "
"overrides that adhere to the format "
"<path>:<to>:<property>=<value> to specify a primitive or "
"<path>:<to>:<property>=<value1>,...,<valueN> to specify "
"a list of values."),
multiple=True,
type=str,
default=[])
@ -111,42 +111,47 @@ SHORT_DESC = "Command installs manifest charts."
@click.option(
'--values',
'-f',
help=("Use to override multiple Armada Manifest values by "
"reading overrides from a values.yaml-type file."),
help=(
"Use to override multiple Armada Manifest values by "
"reading overrides from a values.yaml-type file."),
multiple=True,
type=str,
default=[])
@click.option(
'--wait',
help=("Force Tiller to wait until all charts are deployed, "
"rather than using each chart's specified wait policy. "
"This is equivalent to sequenced chartgroups."),
help=(
"Force Tiller to wait until all charts are deployed, "
"rather than using each chart's specified wait policy. "
"This is equivalent to sequenced chartgroups."),
is_flag=True)
@click.option(
'--target-manifest',
help=("The target manifest to run. Required for specifying "
"which manifest to run when multiple are available."),
help=(
"The target manifest to run. Required for specifying "
"which manifest to run when multiple are available."),
default=None)
@click.option('--bearer-token', help="User Bearer token", default=None)
@click.option('--debug', help="Enable debug logging.", is_flag=True)
@click.pass_context
def apply_create(ctx, locations, api, disable_update_post, disable_update_pre,
dry_run, enable_chart_cleanup, use_doc_ref, set, tiller_host,
tiller_port, tiller_namespace, timeout, values, wait,
target_manifest, bearer_token, debug):
def apply_create(
ctx, locations, api, disable_update_post, disable_update_pre, dry_run,
enable_chart_cleanup, use_doc_ref, set, tiller_host, tiller_port,
tiller_namespace, timeout, values, wait, target_manifest, bearer_token,
debug):
CONF.debug = debug
ApplyManifest(ctx, locations, api, disable_update_post, disable_update_pre,
dry_run, enable_chart_cleanup, use_doc_ref, set, tiller_host,
tiller_port, tiller_namespace, timeout, values, wait,
target_manifest, bearer_token).safe_invoke()
ApplyManifest(
ctx, locations, api, disable_update_post, disable_update_pre, dry_run,
enable_chart_cleanup, use_doc_ref, set, tiller_host, tiller_port,
tiller_namespace, timeout, values, wait, target_manifest,
bearer_token).safe_invoke()
class ApplyManifest(CliAction):
def __init__(self, ctx, locations, api, disable_update_post,
disable_update_pre, dry_run, enable_chart_cleanup,
use_doc_ref, set, tiller_host, tiller_port, tiller_namespace,
timeout, values, wait, target_manifest, bearer_token):
def __init__(
self, ctx, locations, api, disable_update_post, disable_update_pre,
dry_run, enable_chart_cleanup, use_doc_ref, set, tiller_host,
tiller_port, tiller_namespace, timeout, values, wait,
target_manifest, bearer_token):
super(ApplyManifest, self).__init__()
self.ctx = ctx
# Filename can also be a URL reference
@ -199,12 +204,11 @@ class ApplyManifest(CliAction):
return
if not self.ctx.obj.get('api', False):
with Tiller(
tiller_host=self.tiller_host,
tiller_port=self.tiller_port,
tiller_namespace=self.tiller_namespace,
bearer_token=self.bearer_token,
dry_run=self.dry_run) as tiller:
with Tiller(tiller_host=self.tiller_host,
tiller_port=self.tiller_port,
tiller_namespace=self.tiller_namespace,
bearer_token=self.bearer_token,
dry_run=self.dry_run) as tiller:
resp = self.handle(documents, tiller)
self.output(resp)

View File

@ -12,10 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
import click
from oslo_config import cfg
import yaml
from armada.cli import CliAction
from armada import const
@ -70,17 +69,19 @@ SHORT_DESC = "Command deletes releases."
@click.option('--bearer-token', help="User Bearer token.", default=None)
@click.option('--debug', help="Enable debug logging.", is_flag=True)
@click.pass_context
def delete_charts(ctx, manifest, releases, no_purge, tiller_host, tiller_port,
bearer_token, debug):
def delete_charts(
ctx, manifest, releases, no_purge, tiller_host, tiller_port,
bearer_token, debug):
CONF.debug = debug
DeleteChartManifest(ctx, manifest, releases, no_purge, tiller_host,
tiller_port, bearer_token).safe_invoke()
DeleteChartManifest(
ctx, manifest, releases, no_purge, tiller_host, tiller_port,
bearer_token).safe_invoke()
class DeleteChartManifest(CliAction):
def __init__(self, ctx, manifest, releases, no_purge, tiller_host,
tiller_port, bearer_token):
def __init__(
self, ctx, manifest, releases, no_purge, tiller_host, tiller_port,
bearer_token):
super(DeleteChartManifest, self).__init__()
self.ctx = ctx
@ -92,10 +93,8 @@ class DeleteChartManifest(CliAction):
self.bearer_token = bearer_token
def invoke(self):
with Tiller(
tiller_host=self.tiller_host,
tiller_port=self.tiller_port,
bearer_token=self.bearer_token) as tiller:
with Tiller(tiller_host=self.tiller_host, tiller_port=self.tiller_port,
bearer_token=self.bearer_token) as tiller:
self.handle(tiller)
@lock_and_thread()

View File

@ -81,20 +81,22 @@ SHORT_DESC = "Command performs a release rollback."
@click.option('--bearer-token', help=("User bearer token."), default=None)
@click.option('--debug', help="Enable debug logging.", is_flag=True)
@click.pass_context
def rollback_charts(ctx, release, version, dry_run, tiller_host, tiller_port,
tiller_namespace, timeout, wait, force, recreate_pods,
bearer_token, debug):
def rollback_charts(
ctx, release, version, dry_run, tiller_host, tiller_port,
tiller_namespace, timeout, wait, force, recreate_pods, bearer_token,
debug):
CONF.debug = debug
Rollback(ctx, release, version, dry_run, tiller_host, tiller_port,
tiller_namespace, timeout, wait, force, recreate_pods,
bearer_token).safe_invoke()
Rollback(
ctx, release, version, dry_run, tiller_host, tiller_port,
tiller_namespace, timeout, wait, force, recreate_pods,
bearer_token).safe_invoke()
class Rollback(CliAction):
def __init__(self, ctx, release, version, dry_run, tiller_host,
tiller_port, tiller_namespace, timeout, wait, force,
recreate_pods, bearer_token):
def __init__(
self, ctx, release, version, dry_run, tiller_host, tiller_port,
tiller_namespace, timeout, wait, force, recreate_pods,
bearer_token):
super(Rollback, self).__init__()
self.ctx = ctx
self.release = release
@ -110,12 +112,10 @@ class Rollback(CliAction):
self.bearer_token = bearer_token
def invoke(self):
with Tiller(
tiller_host=self.tiller_host,
tiller_port=self.tiller_port,
tiller_namespace=self.tiller_namespace,
bearer_token=self.bearer_token,
dry_run=self.dry_run) as tiller:
with Tiller(tiller_host=self.tiller_host, tiller_port=self.tiller_port,
tiller_namespace=self.tiller_namespace,
bearer_token=self.bearer_token,
dry_run=self.dry_run) as tiller:
response = self.handle(tiller)
@ -132,5 +132,6 @@ class Rollback(CliAction):
recreate_pods=self.recreate_pods)
def output(self, response):
self.logger.info(('(dry run) ' if self.dry_run else '') +
'Rollback of %s complete.', self.release)
self.logger.info(
('(dry run) ' if self.dry_run else '')
+ 'Rollback of %s complete.', self.release)

View File

@ -12,10 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
import click
from oslo_config import cfg
import yaml
from armada.cli import CliAction
from armada import const
@ -69,8 +68,9 @@ SHORT_DESC = "Command tests releases."
default=None)
@click.option(
'--target-manifest',
help=("The target manifest to run. Required for specifying "
"which manifest to run when multiple are available."),
help=(
"The target manifest to run. Required for specifying "
"which manifest to run when multiple are available."),
default=None)
@click.option(
'--cleanup',
@ -79,24 +79,26 @@ SHORT_DESC = "Command tests releases."
default=None)
@click.option(
'--enable-all',
help=("Run all tests for all releases regardless of any disabled chart "
"tests."),
help=(
"Run all tests for all releases regardless of any disabled chart "
"tests."),
is_flag=True,
default=False)
@click.option('--debug', help="Enable debug logging.", is_flag=True)
@click.pass_context
def test_charts(ctx, file, release, tiller_host, tiller_port, tiller_namespace,
target_manifest, cleanup, enable_all, debug):
def test_charts(
ctx, file, release, tiller_host, tiller_port, tiller_namespace,
target_manifest, cleanup, enable_all, debug):
CONF.debug = debug
TestChartManifest(ctx, file, release, tiller_host, tiller_port,
tiller_namespace, target_manifest, cleanup,
enable_all).safe_invoke()
TestChartManifest(
ctx, file, release, tiller_host, tiller_port, tiller_namespace,
target_manifest, cleanup, enable_all).safe_invoke()
class TestChartManifest(CliAction):
def __init__(self, ctx, file, release, tiller_host, tiller_port,
tiller_namespace, target_manifest, cleanup, enable_all):
def __init__(
self, ctx, file, release, tiller_host, tiller_port,
tiller_namespace, target_manifest, cleanup, enable_all):
super(TestChartManifest, self).__init__()
self.ctx = ctx
@ -110,10 +112,8 @@ class TestChartManifest(CliAction):
self.enable_all = enable_all
def invoke(self):
with Tiller(
tiller_host=self.tiller_host,
tiller_port=self.tiller_port,
tiller_namespace=self.tiller_namespace) as tiller:
with Tiller(tiller_host=self.tiller_host, tiller_port=self.tiller_port,
tiller_namespace=self.tiller_namespace) as tiller:
self.handle(tiller)
@ -123,10 +123,8 @@ class TestChartManifest(CliAction):
if self.release:
if not self.ctx.obj.get('api', False):
test_handler = Test({},
self.release,
tiller,
cleanup=self.cleanup)
test_handler = Test(
{}, self.release, tiller, cleanup=self.cleanup)
test_handler.test_release_for_success()
else:
client = self.ctx.obj.get('CLIENT')
@ -168,8 +166,9 @@ class TestChartManifest(CliAction):
if test_handler.test_enabled:
test_handler.test_release_for_success()
else:
self.logger.info('Release %s not found - SKIPPING',
release_name)
self.logger.info(
'Release %s not found - SKIPPING',
release_name)
else:
client = self.ctx.obj.get('CLIENT')
query = {

View File

@ -61,17 +61,19 @@ SHORT_DESC = "Command gets Tiller information."
@click.option('--bearer-token', help="User bearer token.", default=None)
@click.option('--debug', help="Enable debug logging.", is_flag=True)
@click.pass_context
def tiller_service(ctx, tiller_host, tiller_port, tiller_namespace, releases,
status, bearer_token, debug):
def tiller_service(
ctx, tiller_host, tiller_port, tiller_namespace, releases, status,
bearer_token, debug):
CONF.debug = debug
TillerServices(ctx, tiller_host, tiller_port, tiller_namespace, releases,
status, bearer_token).safe_invoke()
TillerServices(
ctx, tiller_host, tiller_port, tiller_namespace, releases, status,
bearer_token).safe_invoke()
class TillerServices(CliAction):
def __init__(self, ctx, tiller_host, tiller_port, tiller_namespace,
releases, status, bearer_token):
def __init__(
self, ctx, tiller_host, tiller_port, tiller_namespace, releases,
status, bearer_token):
super(TillerServices, self).__init__()
self.ctx = ctx
self.tiller_host = tiller_host
@ -83,11 +85,9 @@ class TillerServices(CliAction):
def invoke(self):
with Tiller(
tiller_host=self.tiller_host,
tiller_port=self.tiller_port,
tiller_namespace=self.tiller_namespace,
bearer_token=self.bearer_token) as tiller:
with Tiller(tiller_host=self.tiller_host, tiller_port=self.tiller_port,
tiller_namespace=self.tiller_namespace,
bearer_token=self.bearer_token) as tiller:
self.handle(tiller)
@ -113,8 +113,9 @@ class TillerServices(CliAction):
if self.releases:
if not self.ctx.obj.get('api', False):
for release in tiller.list_releases():
self.logger.info("Release %s in namespace: %s",
release.name, release.namespace)
self.logger.info(
"Release %s in namespace: %s", release.name,
release.namespace)
else:
client = self.ctx.obj.get('CLIENT')
query = {
@ -125,5 +126,5 @@ class TillerServices(CliAction):
resp = client.get_releases(query=query)
for namespace in resp.get('releases'):
for release in resp.get('releases').get(namespace):
self.logger.info('Release %s in namespace: %s',
release, namespace)
self.logger.info(
'Release %s in namespace: %s', release, namespace)

View File

@ -52,7 +52,6 @@ def validate_manifest(ctx, locations, debug):
class ValidateManifest(CliAction):
def __init__(self, ctx, locations):
super(ValidateManifest, self).__init__()
self.ctx = ctx
@ -71,20 +70,22 @@ class ValidateManifest(CliAction):
if not documents:
self.logger.warn('No documents to validate.')
elif valid:
self.logger.info('Successfully validated: %s',
self.locations)
self.logger.info(
'Successfully validated: %s', self.locations)
else:
self.logger.info('Validation failed: %s', self.locations)
for m in details:
self.logger.info('Validation details: %s', str(m))
except Exception:
raise Exception('Exception raised during '
'validation: %s', self.locations)
raise Exception(
'Exception raised during '
'validation: %s', self.locations)
else:
if len(self.locations) > 1:
self.logger.error("Cannot specify multiple locations "
"when using validate API.")
self.logger.error(
"Cannot specify multiple locations "
"when using validate API.")
return
client = self.ctx.obj.get('CLIENT')

View File

@ -12,10 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
from oslo_config import cfg
from oslo_log import log as logging
import yaml
from armada.exceptions import api_exceptions as err
from armada.handlers.armada import Override
@ -27,7 +26,6 @@ API_VERSION = 'v{}/{}'
class ArmadaClient(object):
def __init__(self, session):
self.session = session
@ -69,13 +67,14 @@ class ArmadaClient(object):
return resp.json()
def post_apply(self,
manifest=None,
manifest_ref=None,
values=None,
set=None,
query=None,
timeout=None):
def post_apply(
self,
manifest=None,
manifest_ref=None,
values=None,
set=None,
query=None,
timeout=None):
"""Call the Armada API to apply a Manifest.
If ``manifest`` is not None, then the request body will be a fully

View File

@ -18,5 +18,5 @@ from armada.common.policies import tiller
def list_rules():
return itertools.chain(base.list_rules(), service.list_rules(),
tiller.list_rules())
return itertools.chain(
base.list_rules(), service.list_rules(), tiller.list_rules())

View File

@ -51,10 +51,12 @@ armada_policies = [
name=base.ARMADA % 'rollback_release',
check_str=base.RULE_ADMIN_REQUIRED,
description='Rollback release',
operations=[{
'path': '/api/v1.0/rollback/{release}',
'method': 'POST'
}]),
operations=[
{
'path': '/api/v1.0/rollback/{release}',
'method': 'POST'
}
]),
]

View File

@ -50,11 +50,11 @@ def _enforce_policy(action, target, credentials, do_raise=True):
# to enforce anything not found in ``armada.common.policies`` will error
# out with a 'Policy not registered' message and 403 status code.
try:
_ENFORCER.authorize(action, target, credentials.to_policy_view(),
**extras)
_ENFORCER.authorize(
action, target, credentials.to_policy_view(), **extras)
except policy.PolicyNotRegistered:
LOG.exception('Policy not registered for %(action)s',
{'action': action})
LOG.exception(
'Policy not registered for %(action)s', {'action': action})
raise exc.ActionForbidden()
except Exception:
with excutils.save_and_reraise_exception():
@ -69,9 +69,7 @@ def _enforce_policy(action, target, credentials, do_raise=True):
# NOTE(felipemonteiro): This naming is OK. It's just kept around for legacy
# reasons. What's important is that authorize is used above.
def enforce(rule):
def decorator(func):
@functools.wraps(func)
def handler(*args, **kwargs):
setup_policy()

View File

@ -12,10 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
from oslo_config import cfg
from oslo_log import log as logging
import requests
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
@ -35,33 +34,35 @@ class ArmadaSession(object):
read timeout to use
"""
def __init__(self,
host,
port=None,
scheme='http',
token=None,
marker=None,
end_user=None,
timeout=None):
def __init__(
self,
host,
port=None,
scheme='http',
token=None,
marker=None,
end_user=None,
timeout=None):
self._session = requests.Session()
self._session.headers.update({
'X-Auth-Token': token,
'X-Context-Marker': marker,
'X-End-User': end_user,
})
self._session.headers.update(
{
'X-Auth-Token': token,
'X-Context-Marker': marker,
'X-End-User': end_user,
})
self.host = host
self.scheme = scheme
if port:
self.port = port
self.base_url = "{}://{}:{}/api/".format(self.scheme, self.host,
self.port)
self.base_url = "{}://{}:{}/api/".format(
self.scheme, self.host, self.port)
else:
self.base_url = "{}://{}/api/".format(self.scheme, self.host)
self.default_timeout = ArmadaSession._calc_timeout_tuple((20, 3600),
timeout)
self.default_timeout = ArmadaSession._calc_timeout_tuple(
(20, 3600), timeout)
self.token = token
self.marker = marker
self.end_user = end_user
@ -91,13 +92,14 @@ class ArmadaSession(object):
return resp
def post(self,
endpoint,
query=None,
body=None,
data=None,
headers=None,
timeout=None):
def post(
self,
endpoint,
query=None,
body=None,
data=None,
headers=None,
timeout=None):
"""
Send a POST request to armada. If both body and data are specified,
body will will be used.

View File

@ -76,8 +76,8 @@ def set_default_for_default_log_levels():
extra_log_level_defaults = ['kubernetes.client.rest=INFO']
log.set_defaults(
default_log_levels=log.get_default_log_levels() +
extra_log_level_defaults, )
default_log_levels=log.get_default_log_levels()
+ extra_log_level_defaults, )
class ChartDeployAwareLogger(logging.Logger):
@ -93,5 +93,5 @@ class ChartDeployAwareLogger(logging.Logger):
else:
prefix = ''
prefixed = '{}{}'.format(prefix, msg)
return super(ChartDeployAwareLogger, self)._log(
level, prefixed, *args, **kwargs)
return super(ChartDeployAwareLogger,
self)._log(level, prefixed, *args, **kwargs)

View File

@ -29,7 +29,8 @@ default_options = [
cfg.StrOpt(
'certs',
default=None,
help=utils.fmt("""
help=utils.fmt(
"""
Absolute path to the certificate file to use for chart registries
""")),
cfg.StrOpt(
@ -39,13 +40,15 @@ Absolute path to the certificate file to use for chart registries
cfg.BoolOpt(
'middleware',
default=True,
help=utils.fmt("""
help=utils.fmt(
"""
Enables or disables Keystone authentication middleware.
""")),
cfg.StrOpt(
'project_domain_name',
default='default',
help=utils.fmt("""
help=utils.fmt(
"""
The Keystone project domain name used for authentication.
""")),
cfg.StrOpt(
@ -58,7 +61,8 @@ The Keystone project domain name used for authentication.
cfg.StrOpt(
'ssh_key_path',
default='/home/user/.ssh/',
help=utils.fmt("""Optional path to an SSH private key used for
help=utils.fmt(
"""Optional path to an SSH private key used for
authenticating against a Git source repository. The path must be an absolute
path to the private key that includes the name of the key itself.""")),
cfg.StrOpt(
@ -85,25 +89,29 @@ path to the private key that includes the name of the key itself.""")),
'lock_acquire_timeout',
default=60,
min=0,
help=utils.fmt("""Time in seconds of how long armada will attempt to
help=utils.fmt(
"""Time in seconds of how long armada will attempt to
acquire a lock before an exception is raised""")),
cfg.IntOpt(
'lock_acquire_delay',
default=5,
min=0,
help=utils.fmt("""Time in seconds of how long to wait between attempts
help=utils.fmt(
"""Time in seconds of how long to wait between attempts
to acquire a lock""")),
cfg.IntOpt(
'lock_update_interval',
default=60,
min=0,
help=utils.fmt("""Time in seconds of how often armada will update the
help=utils.fmt(
"""Time in seconds of how often armada will update the
lock while it is continuing to do work""")),
cfg.IntOpt(
'lock_expiration',
default=600,
min=0,
help=utils.fmt("""Time in seconds of how much time needs to pass since
help=utils.fmt(
"""Time in seconds of how much time needs to pass since
the last update of an existing lock before armada forcibly removes it
and tries to acquire its own lock""")),
]
@ -116,11 +124,10 @@ def register_opts(conf):
def list_opts():
return {
'DEFAULT':
default_options,
'keystone_authtoken':
(ks_loading.get_session_conf_options() +
ks_loading.get_auth_common_conf_options() +
ks_loading.get_auth_plugin_conf_options('password') +
ks_loading.get_auth_plugin_conf_options('v3password'))
'DEFAULT': default_options,
'keystone_authtoken': (
ks_loading.get_session_conf_options()
+ ks_loading.get_auth_common_conf_options()
+ ks_loading.get_auth_plugin_conf_options('password')
+ ks_loading.get_auth_plugin_conf_options('v3password'))
}

View File

@ -70,8 +70,8 @@ def _import_modules(module_names):
if not hasattr(module, LIST_OPTS_FUNC_NAME):
raise Exception(
"The module '%s' should have a '%s' function which "
"returns the config options." % (full_module_path,
LIST_OPTS_FUNC_NAME))
"returns the config options." %
(full_module_path, LIST_OPTS_FUNC_NAME))
else:
imported_modules.append(module)
return imported_modules

View File

@ -51,8 +51,8 @@ class InvalidValuesYamlException(ArmadaException):
def __init__(self, chart_description):
self._message = (
'Armada encountered invalid values.yaml in helm chart: %s' %
chart_description)
'Armada encountered invalid values.yaml in helm chart: %s'
% chart_description)
super(InvalidValuesYamlException, self).__init__(self._message)
@ -64,8 +64,8 @@ class InvalidOverrideValuesYamlException(ArmadaException):
def __init__(self, chart_description):
self._message = (
'Armada encountered invalid values.yaml in helm chart: %s' %
chart_description)
'Armada encountered invalid values.yaml in helm chart: %s'
% chart_description)
super(InvalidValuesYamlException, self).__init__(self._message)

View File

@ -54,15 +54,16 @@ def get_version_from_request(req):
# Standard error handler
def format_error_resp(req,
resp,
status_code,
message="",
reason="",
error_type=None,
retry=False,
error_list=None,
info_list=None):
def format_error_resp(
req,
resp,
status_code,
message="",
reason="",
error_type=None,
retry=False,
error_list=None,
info_list=None):
"""
Write a error message body and throw a Falcon exception to trigger
an HTTP status
@ -97,10 +98,12 @@ def format_error_resp(req,
# message list as well. In both cases, if the error flag is not
# set, set it appropriately.
if error_list is None:
error_list = [{
'message': 'An error occurred, but was not specified',
'error': True
}]
error_list = [
{
'message': 'An error occurred, but was not specified',
'error': True
}
]
else:
for error_item in error_list:
if 'error' not in error_item:
@ -216,8 +219,9 @@ class ArmadaAPIException(falcon.HTTPError):
self.error_list = massage_error_list(error_list, description)
self.info_list = info_list
self.retry = retry
super().__init__(self.status, self.title,
self._gen_ex_message(self.title, self.description))
super().__init__(
self.status, self.title,
self._gen_ex_message(self.title, self.description))
@staticmethod
def _gen_ex_message(title, description):

View File

@ -47,11 +47,13 @@ class HelmChartBuildException(ChartBuilderException):
def __init__(self, chart_name, details):
self._chart_name = chart_name
self._message = ('Failed to build Helm chart for {chart_name}. '
'Details: {details}'.format(**{
'chart_name': chart_name,
'details': details
}))
self._message = (
'Failed to build Helm chart for {chart_name}. '
'Details: {details}'.format(
**{
'chart_name': chart_name,
'details': details
}))
super(HelmChartBuildException, self).__init__(self._message)
@ -65,8 +67,9 @@ class FilesLoadException(ChartBuilderException):
* Ensure that the file can be encoded to utf-8 or else it cannot be parsed.
'''
message = ('A %(clazz)s exception occurred while trying to read '
'file: %(file)s. Details:\n%(details)s')
message = (
'A %(clazz)s exception occurred while trying to read '
'file: %(file)s. Details:\n%(details)s')
class IgnoredFilesLoadException(ChartBuilderException):

View File

@ -31,8 +31,9 @@ class GitException(SourceException):
def __init__(self, location):
self._location = location
self._message = ('Git exception occurred, [', self._location,
'] may not be a valid git repository.')
self._message = (
'Git exception occurred, [', self._location,
'] may not be a valid git repository.')
super(GitException, self).__init__(self._message)
@ -44,10 +45,11 @@ class GitAuthException(SourceException):
self._repo_url = repo_url
self._ssh_key_path = ssh_key_path
self._message = ('Failed to authenticate for repo {} with ssh-key at '
'path {}. Verify the repo exists and the correct ssh '
'key path was supplied in the Armada config '
'file.').format(self._repo_url, self._ssh_key_path)
self._message = (
'Failed to authenticate for repo {} with ssh-key at '
'path {}. Verify the repo exists and the correct ssh '
'key path was supplied in the Armada config '
'file.').format(self._repo_url, self._ssh_key_path)
super(GitAuthException, self).__init__(self._message)
@ -69,8 +71,8 @@ class GitSSHException(SourceException):
def __init__(self, ssh_key_path):
self._ssh_key_path = ssh_key_path
self._message = ('Failed to find specified SSH key: {}.'.format(
self._ssh_key_path))
self._message = (
'Failed to find specified SSH key: {}.'.format(self._ssh_key_path))
super(GitSSHException, self).__init__(self._message)

View File

@ -212,5 +212,6 @@ class TillerListReleasesPagingException(TillerException):
*Coming Soon*
'''
message = ('Failed to page through tiller releases, possibly due to '
'releases being added between pages')
message = (
'Failed to page through tiller releases, possibly due to '
'releases being added between pages')

View File

@ -29,8 +29,9 @@ class InvalidManifestException(ValidateException):
*Coming Soon*
'''
message = ('Armada manifest(s) failed validation. Details: '
'%(error_messages)s.')
message = (
'Armada manifest(s) failed validation. Details: '
'%(error_messages)s.')
class InvalidChartNameException(ValidateException):
@ -59,5 +60,6 @@ class InvalidArmadaObjectException(ValidateException):
*Coming Soon*
'''
message = ('An Armada object failed internal validation. Details: '
'%(details)s.')
message = (
'An Armada object failed internal validation. Details: '
'%(details)s.')

View File

@ -13,6 +13,7 @@
# limitations under the License.
from concurrent.futures import ThreadPoolExecutor, as_completed
from oslo_config import cfg
from oslo_log import log as logging
@ -39,20 +40,21 @@ class Armada(object):
workflows
'''
def __init__(self,
documents,
tiller,
disable_update_pre=False,
disable_update_post=False,
enable_chart_cleanup=False,
dry_run=False,
set_ovr=None,
force_wait=False,
timeout=None,
values=None,
target_manifest=None,
k8s_wait_attempts=1,
k8s_wait_attempt_sleep=1):
def __init__(
self,
documents,
tiller,
disable_update_pre=False,
disable_update_post=False,
enable_chart_cleanup=False,
dry_run=False,
set_ovr=None,
force_wait=False,
timeout=None,
values=None,
target_manifest=None,
k8s_wait_attempts=1,
k8s_wait_attempt_sleep=1):
'''
Initialize the Armada engine and establish a connection to Tiller.
@ -106,8 +108,8 @@ class Armada(object):
# Clone the chart sources
manifest_data = self.manifest.get(const.KEYWORD_DATA, {})
for group in manifest_data.get(const.KEYWORD_GROUPS, []):
for ch in group.get(const.KEYWORD_DATA).get(
const.KEYWORD_CHARTS, []):
for ch in group.get(const.KEYWORD_DATA).get(const.KEYWORD_CHARTS,
[]):
self.get_chart(ch)
def get_chart(self, ch):
@ -193,12 +195,12 @@ class Armada(object):
chartgroup = cg.get(const.KEYWORD_DATA)
cg_name = cg.get('metadata').get('name')
cg_desc = chartgroup.get('description', '<missing description>')
cg_sequenced = chartgroup.get('sequenced',
False) or self.force_wait
cg_sequenced = chartgroup.get(
'sequenced', False) or self.force_wait
LOG.info('Processing ChartGroup: %s (%s), sequenced=%s%s', cg_name,
cg_desc, cg_sequenced,
' (forced)' if self.force_wait else '')
LOG.info(
'Processing ChartGroup: %s (%s), sequenced=%s%s', cg_name,
cg_desc, cg_sequenced, ' (forced)' if self.force_wait else '')
# TODO: Remove when v1 doc support is removed.
cg_test_all_charts = chartgroup.get('test_charts')
@ -208,8 +210,8 @@ class Armada(object):
def deploy_chart(chart):
set_current_chart(chart)
try:
return self.chart_deploy.execute(chart, cg_test_all_charts,
prefix, known_releases)
return self.chart_deploy.execute(
chart, cg_test_all_charts, prefix, known_releases)
finally:
set_current_chart(None)
@ -284,15 +286,16 @@ class Armada(object):
for gchart in charts:
for chart in gchart.get(const.KEYWORD_CHARTS, []):
valid_releases.append(
release_prefixer(prefix,
chart.get('chart', {}).get('release')))
release_prefixer(
prefix,
chart.get('chart', {}).get('release')))
actual_releases = [x.name for x in self.tiller.list_releases()]
release_diff = list(set(actual_releases) - set(valid_releases))
for release in release_diff:
if release.startswith(prefix):
LOG.info('Purging release %s as part of chart cleanup.',
release)
LOG.info(
'Purging release %s as part of chart cleanup.', release)
self.tiller.uninstall_release(release)
msg['purge'].append(release)

View File

@ -16,7 +16,6 @@ from armada import const
class ChartDelete(object):
def __init__(self, chart, release_name, tiller, purge=True):
"""Initialize a chart delete handler.
@ -39,8 +38,8 @@ class ChartDelete(object):
# TODO(seaneagan): Consider allowing this to be a percentage of the
# chart's `wait.timeout` so that the timeouts can scale together, and
# likely default to some reasonable value, e.g. "50%".
self.timeout = self.delete_config.get('timeout',
const.DEFAULT_DELETE_TIMEOUT)
self.timeout = self.delete_config.get(
'timeout', const.DEFAULT_DELETE_TIMEOUT)
def get_timeout(self):
return self.timeout

View File

@ -12,8 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log as logging
import time
from oslo_log import log as logging
import yaml
from armada import const
@ -31,9 +32,9 @@ LOG = logging.getLogger(__name__)
class ChartDeploy(object):
def __init__(self, disable_update_pre, disable_update_post, dry_run,
k8s_wait_attempts, k8s_wait_attempt_sleep, timeout, tiller):
def __init__(
self, disable_update_pre, disable_update_post, dry_run,
k8s_wait_attempts, k8s_wait_attempt_sleep, timeout, tiller):
self.disable_update_pre = disable_update_pre
self.disable_update_post = disable_update_post
self.dry_run = dry_run
@ -84,8 +85,9 @@ class ChartDeploy(object):
if status == const.STATUS_DEPLOYED:
# indicate to the end user what path we are taking
LOG.info("Existing release %s found in namespace %s", release_name,
namespace)
LOG.info(
"Existing release %s found in namespace %s", release_name,
namespace)
# extract the installed chart and installed values from the
# latest release so we can compare to the intended state
@ -114,8 +116,9 @@ class ChartDeploy(object):
pre_actions = upgrade_pre
if not self.disable_update_post and upgrade_post:
LOG.warning('Post upgrade actions are ignored by Armada'
'and will not affect deployment.')
LOG.warning(
'Post upgrade actions are ignored by Armada'
'and will not affect deployment.')
post_actions = upgrade_post
try:
@ -158,8 +161,9 @@ class ChartDeploy(object):
force=force,
recreate_pods=recreate_pods)
LOG.info('Upgrade completed with results from Tiller: %s',
tiller_result.__dict__)
LOG.info(
'Upgrade completed with results from Tiller: %s',
tiller_result.__dict__)
result['upgrade'] = release_name
else:
# Check for release with status other than DEPLOYED
@ -213,10 +217,11 @@ class ChartDeploy(object):
release_name, status)
else:
# Purge the release
LOG.info('Purging release %s with status %s', release_name,
status)
chart_delete = ChartDelete(chart, release_name,
self.tiller)
LOG.info(
'Purging release %s with status %s', release_name,
status)
chart_delete = ChartDelete(
chart, release_name, self.tiller)
chart_delete.delete()
result['purge'] = release_name
@ -233,8 +238,9 @@ class ChartDeploy(object):
wait=native_wait_enabled,
timeout=timer)
LOG.info('Install completed with results from Tiller: %s',
tiller_result.__dict__)
LOG.info(
'Install completed with results from Tiller: %s',
tiller_result.__dict__)
result['install'] = release_name
# Wait
@ -251,8 +257,8 @@ class ChartDeploy(object):
self.tiller,
cg_test_charts=cg_test_all_charts)
run_test = test_handler.test_enabled and (just_deployed or
not last_test_passed)
run_test = test_handler.test_enabled and (
just_deployed or not last_test_passed)
if run_test:
self._test_chart(release_name, test_handler)
@ -279,6 +285,7 @@ class ChartDeploy(object):
for release in known_releases:
if release.name == release_name:
return release
LOG.info("known: %s, release_name: %s",
list(map(lambda r: r.name, known_releases)), release_name)
LOG.info(
"known: %s, release_name: %s",
list(map(lambda r: r.name, known_releases)), release_name)
return None

View File

@ -13,16 +13,15 @@
# limitations under the License.
import os
import yaml
from google.protobuf.any_pb2 import Any
from hapi.chart.chart_pb2 import Chart
from hapi.chart.config_pb2 import Config
from hapi.chart.metadata_pb2 import Metadata
from hapi.chart.template_pb2 import Template
from oslo_config import cfg
from oslo_log import log as logging
import yaml
from armada.exceptions import chartbuilder_exceptions
from armada import const
@ -65,16 +64,17 @@ class ChartBuilder(object):
property from the chart, or else "" if the property isn't a 2-tuple.
'''
source_dir = self.chart_data.get('source_dir')
return (os.path.join(*source_dir) if
(source_dir and isinstance(source_dir, (list, tuple)) and
len(source_dir) == 2) else "")
return (
os.path.join(*source_dir) if (
source_dir and isinstance(source_dir, (list, tuple))
and len(source_dir) == 2) else "")
def get_ignored_files(self):
'''Load files to ignore from .helmignore if present.'''
try:
ignored_files = []
if os.path.exists(
os.path.join(self.source_directory, '.helmignore')):
if os.path.exists(os.path.join(self.source_directory,
'.helmignore')):
with open(os.path.join(self.source_directory,
'.helmignore')) as f:
ignored_files = f.readlines()
@ -90,8 +90,8 @@ class ChartBuilder(object):
False otherwise.
'''
for ignored_file in self.ignored_files:
if (ignored_file.startswith('*') and
filename.endswith(ignored_file.strip('*'))):
if (ignored_file.startswith('*')
and filename.endswith(ignored_file.strip('*'))):
return True
elif ignored_file == filename:
return True
@ -153,8 +153,9 @@ class ChartBuilder(object):
raise chartbuilder_exceptions.FilesLoadException(
file=abspath, details=e)
except UnicodeError as e:
LOG.debug('Attempting to read %s using encoding %s.',
abspath, encoding)
LOG.debug(
'Attempting to read %s using encoding %s.', abspath,
encoding)
msg = "(encoding=%s) %s" % (encoding, str(e))
unicode_errors.append(msg)
else:
@ -176,12 +177,11 @@ class ChartBuilder(object):
relfolder = os.path.split(root)[-1]
rel_folder_path = os.path.relpath(root, self.source_directory)
if not any(
root.startswith(os.path.join(self.source_directory, x))
for x in ['templates', 'charts']):
if not any(root.startswith(os.path.join(self.source_directory, x))
for x in ['templates', 'charts']):
for file in files:
if (file not in files_to_ignore and
file not in non_template_files):
if (file not in files_to_ignore
and file not in non_template_files):
_append_file_to_result(root, rel_folder_path, file)
elif relfolder == 'charts' and '.prov' in files:
_append_file_to_result(root, rel_folder_path, '.prov')
@ -196,8 +196,9 @@ class ChartBuilder(object):
with open(os.path.join(self.source_directory, 'values.yaml')) as f:
raw_values = f.read()
else:
LOG.warn("No values.yaml in %s, using empty values",
self.source_directory)
LOG.warn(
"No values.yaml in %s, using empty values",
self.source_directory)
raw_values = ''
return Config(raw=raw_values)
@ -210,14 +211,13 @@ class ChartBuilder(object):
'''
chart_name = self.chart['metadata']['name']
templates = []
if not os.path.exists(
os.path.join(self.source_directory, 'templates')):
if not os.path.exists(os.path.join(self.source_directory,
'templates')):
LOG.warn(
"Chart %s has no templates directory. "
"No templates will be deployed", chart_name)
for root, _, files in os.walk(
os.path.join(self.source_directory, 'templates'),
topdown=True):
for root, _, files in os.walk(os.path.join(self.source_directory,
'templates'), topdown=True):
for tpl_file in files:
tname = os.path.relpath(
os.path.join(root, tpl_file),
@ -247,8 +247,9 @@ class ChartBuilder(object):
chart_release = self.chart_data.get('release', None)
for dep_chart in chart_dependencies:
dep_chart_name = dep_chart['metadata']['name']
LOG.info("Building dependency chart %s for release %s.",
dep_chart_name, chart_release)
LOG.info(
"Building dependency chart %s for release %s.", dep_chart_name,
chart_release)
try:
dependencies.append(ChartBuilder(dep_chart).get_helm_chart())
except Exception:

View File

@ -16,8 +16,8 @@
import urllib.parse
import re
import requests
import requests
from oslo_log import log as logging
from armada.exceptions.source_exceptions import InvalidPathException
@ -58,8 +58,8 @@ class ReferenceResolver(object):
if handler is None:
raise InvalidPathException(
"Invalid reference scheme %s: no handler." %
design_uri.scheme)
"Invalid reference scheme %s: no handler."
% design_uri.scheme)
else:
# Have to do a little magic to call the classmethod
# as a pointer
@ -90,8 +90,8 @@ class ReferenceResolver(object):
response = requests.get(design_uri.geturl(), timeout=30)
if response.status_code >= 400:
raise InvalidPathException(
"Error received for HTTP reference: %d" %
response.status_code)
"Error received for HTTP reference: %d"
% response.status_code)
return response.content
@ -122,8 +122,9 @@ class ReferenceResolver(object):
ks_sess = ks_utils.get_keystone_session()
(new_scheme, foo) = re.subn(r'^[^+]+\+', '', design_uri.scheme)
url = urllib.parse.urlunparse(
(new_scheme, design_uri.netloc, design_uri.path, design_uri.params,
design_uri.query, design_uri.fragment))
(
new_scheme, design_uri.netloc, design_uri.path,
design_uri.params, design_uri.query, design_uri.fragment))
LOG.debug("Calling Keystone session for url %s" % str(url))
resp = ks_sess.get(url)
if resp.status_code >= 400:

View File

@ -60,11 +60,12 @@ class K8s(object):
self.extension_api = client.ExtensionsV1beta1Api(api_client)
self.apps_v1_api = client.AppsV1Api(api_client)
def delete_job_action(self,
name,
namespace="default",
propagation_policy='Foreground',
timeout=DEFAULT_K8S_TIMEOUT):
def delete_job_action(
self,
name,
namespace="default",
propagation_policy='Foreground',
timeout=DEFAULT_K8S_TIMEOUT):
'''
Delete a job from a namespace (see _delete_item_action).
@ -74,15 +75,17 @@ class K8s(object):
to the delete.
:param timeout: The timeout to wait for the delete to complete
'''
self._delete_item_action(self.batch_api.list_namespaced_job,
self.batch_api.delete_namespaced_job, "job",
name, namespace, propagation_policy, timeout)
self._delete_item_action(
self.batch_api.list_namespaced_job,
self.batch_api.delete_namespaced_job, "job", name, namespace,
propagation_policy, timeout)
def delete_cron_job_action(self,
name,
namespace="default",
propagation_policy='Foreground',
timeout=DEFAULT_K8S_TIMEOUT):
def delete_cron_job_action(
self,
name,
namespace="default",
propagation_policy='Foreground',
timeout=DEFAULT_K8S_TIMEOUT):
'''
Delete a cron job from a namespace (see _delete_item_action).
@ -97,11 +100,12 @@ class K8s(object):
self.batch_v1beta1_api.delete_namespaced_cron_job, "cron job",
name, namespace, propagation_policy, timeout)
def delete_pod_action(self,
name,
namespace="default",
propagation_policy='Foreground',
timeout=DEFAULT_K8S_TIMEOUT):
def delete_pod_action(
self,
name,
namespace="default",
propagation_policy='Foreground',
timeout=DEFAULT_K8S_TIMEOUT):
'''
Delete a pod from a namespace (see _delete_item_action).
@ -111,18 +115,19 @@ class K8s(object):
to the delete.
:param timeout: The timeout to wait for the delete to complete
'''
self._delete_item_action(self.client.list_namespaced_pod,
self.client.delete_namespaced_pod, "pod",
name, namespace, propagation_policy, timeout)
self._delete_item_action(
self.client.list_namespaced_pod, self.client.delete_namespaced_pod,
"pod", name, namespace, propagation_policy, timeout)
def _delete_item_action(self,
list_func,
delete_func,
object_type_description,
name,
namespace="default",
propagation_policy='Foreground',
timeout=DEFAULT_K8S_TIMEOUT):
def _delete_item_action(
self,
list_func,
delete_func,
object_type_description,
name,
namespace="default",
propagation_policy='Foreground',
timeout=DEFAULT_K8S_TIMEOUT):
'''
This function takes the action to delete an object (job, cronjob, pod)
from kubernetes. It will wait for the object to be fully deleted before
@ -145,14 +150,15 @@ class K8s(object):
try:
timeout = self._check_timeout(timeout)
LOG.debug('Watching to delete %s %s, Wait timeout=%s',
object_type_description, name, timeout)
LOG.debug(
'Watching to delete %s %s, Wait timeout=%s',
object_type_description, name, timeout)
body = client.V1DeleteOptions()
w = watch.Watch()
issue_delete = True
found_events = False
for event in w.stream(
list_func, namespace=namespace, timeout_seconds=timeout):
for event in w.stream(list_func, namespace=namespace,
timeout_seconds=timeout):
if issue_delete:
delete_func(
name=name,
@ -168,23 +174,27 @@ class K8s(object):
if item_name == name:
found_events = True
if event_type == 'DELETED':
LOG.info('Successfully deleted %s %s',
object_type_description, item_name)
LOG.info(
'Successfully deleted %s %s',
object_type_description, item_name)
return
if not found_events:
LOG.warn('Saw no delete events for %s %s in namespace=%s',
object_type_description, name, namespace)
LOG.warn(
'Saw no delete events for %s %s in namespace=%s',
object_type_description, name, namespace)
err_msg = ('Reached timeout while waiting to delete %s: '
'name=%s, namespace=%s' % (object_type_description,
name, namespace))
err_msg = (
'Reached timeout while waiting to delete %s: '
'name=%s, namespace=%s' %
(object_type_description, name, namespace))
LOG.error(err_msg)
raise exceptions.KubernetesWatchTimeoutException(err_msg)
except ApiException as e:
LOG.exception("Exception when deleting %s: name=%s, namespace=%s",
object_type_description, name, namespace)
LOG.exception(
"Exception when deleting %s: name=%s, namespace=%s",
object_type_description, name, namespace)
raise e
def get_namespace_job(self, namespace="default", **kwargs):
@ -196,8 +206,9 @@ class K8s(object):
try:
return self.batch_api.list_namespaced_job(namespace, **kwargs)
except ApiException as e:
LOG.error("Exception getting jobs: namespace=%s, label=%s: %s",
namespace, kwargs.get('label_selector', ''), e)
LOG.error(
"Exception getting jobs: namespace=%s, label=%s: %s",
namespace, kwargs.get('label_selector', ''), e)
def get_namespace_cron_job(self, namespace="default", **kwargs):
'''
@ -276,8 +287,8 @@ class K8s(object):
base_pod_pattern = re.compile('^(.+)-[a-zA-Z0-9]+$')
if not base_pod_pattern.match(old_pod_name):
LOG.error('Could not identify new pod after purging %s',
old_pod_name)
LOG.error(
'Could not identify new pod after purging %s', old_pod_name)
return
pod_base_name = base_pod_pattern.match(old_pod_name).group(1)
@ -297,13 +308,13 @@ class K8s(object):
new_pod_name = event_name
elif new_pod_name:
for condition in pod_conditions:
if (condition.type == 'Ready' and
condition.status == 'True'):
if (condition.type == 'Ready'
and condition.status == 'True'):
LOG.info('New pod %s deployed', new_pod_name)
w.stop()
def wait_get_completed_podphase(self, release,
timeout=DEFAULT_K8S_TIMEOUT):
def wait_get_completed_podphase(
self, release, timeout=DEFAULT_K8S_TIMEOUT):
'''
:param release: part of namespace
:param timeout: time before disconnecting stream
@ -312,9 +323,8 @@ class K8s(object):
w = watch.Watch()
found_events = False
for event in w.stream(
self.client.list_pod_for_all_namespaces,
timeout_seconds=timeout):
for event in w.stream(self.client.list_pod_for_all_namespaces,
timeout_seconds=timeout):
resource_name = event['object'].metadata.name
if release in resource_name:
@ -362,8 +372,8 @@ class K8s(object):
return self.custom_objects.create_namespaced_custom_object(
group, version, namespace, plural, body)
def delete_custom_resource(self, group, version, namespace, plural, name,
body):
def delete_custom_resource(
self, group, version, namespace, plural, name, body):
"""Deletes a custom resource
:param group: the custom resource's group
@ -394,8 +404,8 @@ class K8s(object):
return self.custom_objects.get_namespaced_custom_object(
group, version, namespace, plural, name)
def replace_custom_resource(self, group, version, namespace, plural, name,
body):
def replace_custom_resource(
self, group, version, namespace, plural, name, body):
"""Replaces a custom resource
:param group: the custom resource's group

View File

@ -11,10 +11,12 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import time
from datetime import datetime, timedelta
from concurrent.futures import ThreadPoolExecutor
from kubernetes import client
from kubernetes.client.rest import ApiException
from oslo_config import cfg
@ -49,7 +51,6 @@ def lock_and_thread(lock_name="lock"):
"""
def lock_decorator(func):
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
bearer_token = None
@ -62,10 +63,11 @@ def lock_and_thread(lock_name="lock"):
# we did not find a Tiller object to extract a bearer token from
# log this to assist with potential debugging in the future
if not found_tiller:
LOG.info("no Tiller object found in parameters of function "
"decorated by lock_and_thread, this might create "
"authentication issues in Kubernetes clusters with "
"external auth backend")
LOG.info(
"no Tiller object found in parameters of function "
"decorated by lock_and_thread, this might create "
"authentication issues in Kubernetes clusters with "
"external auth backend")
with Lock(lock_name, bearer_token=bearer_token) as lock:
pool = ThreadPoolExecutor(1)
@ -84,7 +86,6 @@ def lock_and_thread(lock_name="lock"):
class Lock:
def __init__(self, lock_name, bearer_token=None, additional_data=None):
"""Creates a lock with the specified name and data. When a lock with
that name already exists then this will continuously attempt to acquire
@ -137,8 +138,9 @@ class Lock:
return True
except ApiException as err:
if err.status == 404:
LOG.info("Lock Custom Resource Definition not found, "
"creating now")
LOG.info(
"Lock Custom Resource Definition not found, "
"creating now")
self.lock_config.create_definition()
continue
elif err.status == 409:
@ -157,8 +159,9 @@ class Lock:
# of the lock exceeds the expire time in order to avoid
# removing another thread's lock while it is still working
if self.lock_age() > timedelta(seconds=self.expire_time):
LOG.info("Lock has exceeded expiry time, removing so"
"processing can continue")
LOG.info(
"Lock has exceeded expiry time, removing so"
"processing can continue")
self.release_lock()
continue
LOG.debug("Sleeping before attempting to acquire lock again")
@ -183,7 +186,6 @@ class Lock:
class LockConfig:
def __init__(self, name, bearer_token=None, additional_data=None):
self.name = name
data = additional_data or dict()

View File

@ -23,7 +23,6 @@ LOG = logging.getLogger(__name__)
class Manifest(object):
def __init__(self, documents, target_manifest=None):
"""Instantiates a Manifest object.
@ -54,9 +53,10 @@ class Manifest(object):
target_manifest)
if len(manifests) > 1:
error = ('Multiple manifests are not supported. Ensure that the '
'`target_manifest` option is set to specify the target '
'manifest')
error = (
'Multiple manifests are not supported. Ensure that the '
'`target_manifest` option is set to specify the target '
'manifest')
LOG.error(error)
raise exceptions.ManifestException(details=error)
else:
@ -64,9 +64,10 @@ class Manifest(object):
if not all([self.charts, self.groups, self.manifest]):
expected_schemas = [schema.TYPE_CHART, schema.TYPE_CHARTGROUP]
error = ('Documents must include at least one of each of {} '
'and only one {}').format(expected_schemas,
schema.TYPE_MANIFEST)
error = (
'Documents must include at least one of each of {} '
'and only one {}').format(
expected_schemas, schema.TYPE_MANIFEST)
LOG.error(error)
raise exceptions.ManifestException(details=error)
@ -147,8 +148,8 @@ class Manifest(object):
under ``chart['data']['dependencies']`` could not be found.
"""
try:
chart_dependencies = chart.get(const.KEYWORD_DATA, {}).get(
'dependencies', [])
chart_dependencies = chart.get(const.KEYWORD_DATA,
{}).get('dependencies', [])
for iter, dep in enumerate(chart_dependencies):
if isinstance(dep, dict):
continue
@ -175,9 +176,8 @@ class Manifest(object):
"""
try:
chart = None
for iter, chart in enumerate(
chart_group.get(const.KEYWORD_DATA).get(
const.KEYWORD_CHARTS, [])):
for iter, chart in enumerate(chart_group.get(
const.KEYWORD_DATA).get(const.KEYWORD_CHARTS, [])):
if isinstance(chart, dict):
continue
chart_object = self.find_chart_document(chart)
@ -201,9 +201,8 @@ class Manifest(object):
:raises ManifestException: If a chart group's data listed
under ``chart_group[const.KEYWORD_DATA]`` could not be found.
"""
for iter, group in enumerate(
self.manifest.get(const.KEYWORD_DATA, {}).get(
const.KEYWORD_GROUPS, [])):
for iter, group in enumerate(self.manifest.get(
const.KEYWORD_DATA, {}).get(const.KEYWORD_GROUPS, [])):
if isinstance(group, dict):
continue
chart_grp = self.find_chart_group_document(group)

View File

@ -14,6 +14,7 @@
import collections
import json
import yaml
from armada.exceptions import override_exceptions
@ -23,7 +24,6 @@ from armada.utils import validate
class Override(object):
def __init__(self, documents, overrides=None, values=None):
self.documents = documents
self.overrides = overrides

View File

@ -60,10 +60,10 @@ class ReleaseDiff(object):
:rtype: dict
'''
old_input = self.make_release_input(self.old_chart, self.old_values,
'previously deployed')
new_input = self.make_release_input(self.new_chart, self.new_values,
'currently being deployed')
old_input = self.make_release_input(
self.old_chart, self.old_values, 'previously deployed')
new_input = self.make_release_input(
self.new_chart, self.new_values, 'currently being deployed')
return DeepDiff(old_input, new_input, view='tree')

View File

@ -13,8 +13,9 @@
# limitations under the License.
import os
import pkg_resources
import re
import pkg_resources
import yaml
# Types
@ -34,7 +35,6 @@ _SCHEMAS = {}
class SchemaInfo(object):
def __init__(self, type, version, data):
self.type = type
self.version = version

View File

@ -15,7 +15,6 @@
from oslo_log import log as logging
from armada import const
from armada.handlers.wait import get_wait_labels
from armada.utils.release import label_selectors
from armada.utils.helm import get_test_suite_run_success, is_test_pod
@ -24,14 +23,14 @@ LOG = logging.getLogger(__name__)
class Test(object):
def __init__(self,
chart,
release_name,
tiller,
cg_test_charts=None,
cleanup=None,
enable_all=False):
def __init__(
self,
chart,
release_name,
tiller,
cg_test_charts=None,
cleanup=None,
enable_all=False):
"""Initialize a test handler to run Helm tests corresponding to a
release.
@ -62,16 +61,18 @@ class Test(object):
# TODO: Remove when v1 doc support is removed.
if cg_test_charts is not None:
LOG.warn('Chart group key `test_charts` is deprecated and will be '
'removed. Use `test.enabled` instead.')
LOG.warn(
'Chart group key `test_charts` is deprecated and will be '
'removed. Use `test.enabled` instead.')
self.test_enabled = cg_test_charts
else:
self.test_enabled = True
# TODO: Remove when v1 doc support is removed.
if (type(test_values) == bool):
LOG.warn('Boolean value for chart `test` key is deprecated and '
'will be removed. Use `test.enabled` instead.')
LOG.warn(
'Boolean value for chart `test` key is deprecated and '
'will be removed. Use `test.enabled` instead.')
self.test_enabled = test_values
@ -107,14 +108,16 @@ class Test(object):
:return: Helm test suite run result
"""
LOG.info('RUNNING: %s tests with timeout=%ds', self.release_name,
self.timeout)
LOG.info(
'RUNNING: %s tests with timeout=%ds', self.release_name,
self.timeout)
try:
self.delete_test_pods()
except Exception:
LOG.exception("Exception when deleting test pods for release: %s",
self.release_name)
LOG.exception(
"Exception when deleting test pods for release: %s",
self.release_name)
test_suite_run = self.tiller.test_release(
self.release_name, timeout=self.timeout, cleanup=self.cleanup)

View File

@ -13,8 +13,6 @@
# limitations under the License.
import grpc
import yaml
from hapi.chart.config_pb2 import Config
from hapi.services.tiller_pb2 import GetReleaseContentRequest
from hapi.services.tiller_pb2 import GetReleaseStatusRequest
@ -28,6 +26,7 @@ from hapi.services.tiller_pb2 import UninstallReleaseRequest
from hapi.services.tiller_pb2 import UpdateReleaseRequest
from oslo_config import cfg
from oslo_log import log as logging
import yaml
from armada import const
from armada.conf import get_current_chart
@ -52,10 +51,10 @@ LOG = logging.getLogger(__name__)
class CommonEqualityMixin(object):
def __eq__(self, other):
return (isinstance(other, self.__class__) and
self.__dict__ == other.__dict__)
return (
isinstance(other, self.__class__)
and self.__dict__ == other.__dict__)
def __ne__(self, other):
return not self.__eq__(other)
@ -78,12 +77,13 @@ class Tiller(object):
service over gRPC
'''
def __init__(self,
tiller_host=None,
tiller_port=None,
tiller_namespace=None,
bearer_token=None,
dry_run=None):
def __init__(
self,
tiller_host=None,
tiller_port=None,
tiller_namespace=None,
bearer_token=None,
dry_run=None):
self.tiller_host = tiller_host or CONF.tiller_host
self.tiller_port = tiller_port or CONF.tiller_port
self.tiller_namespace = tiller_namespace or CONF.tiller_namespace
@ -101,9 +101,10 @@ class Tiller(object):
# be fed at runtime as an override
self.timeout = const.DEFAULT_TILLER_TIMEOUT
LOG.debug('Armada is using Tiller at: %s:%s, namespace=%s, timeout=%s',
self.tiller_host, self.tiller_port, self.tiller_namespace,
self.timeout)
LOG.debug(
'Armada is using Tiller at: %s:%s, namespace=%s, timeout=%s',
self.tiller_host, self.tiller_port, self.tiller_namespace,
self.timeout)
@property
def metadata(self):
@ -126,9 +127,10 @@ class Tiller(object):
MAX_MESSAGE_LENGTH, MAX_MESSAGE_LENGTH)
return grpc.insecure_channel(
'%s:%s' % (tiller_ip, tiller_port),
options=[('grpc.max_send_message_length', MAX_MESSAGE_LENGTH),
('grpc.max_receive_message_length',
MAX_MESSAGE_LENGTH)])
options=[
('grpc.max_send_message_length', MAX_MESSAGE_LENGTH),
('grpc.max_receive_message_length', MAX_MESSAGE_LENGTH)
])
except Exception:
LOG.exception('Failed to initialize grpc channel to tiller.')
raise ex.ChannelException()
@ -208,8 +210,9 @@ class Tiller(object):
limit=LIST_RELEASES_PAGE_SIZE,
status_codes=const.STATUS_ALL)
LOG.debug('Tiller ListReleases() with timeout=%s, request=%s',
self.timeout, req)
LOG.debug(
'Tiller ListReleases() with timeout=%s, request=%s',
self.timeout, req)
response = stub.ListReleases(
req, self.timeout, metadata=self.metadata)
@ -248,8 +251,9 @@ class Tiller(object):
try:
releases = get_results()
except ex.TillerListReleasesPagingException:
LOG.warning('List releases paging failed on attempt %s/%s',
attempt, LIST_RELEASES_ATTEMPTS)
LOG.warning(
'List releases paging failed on attempt %s/%s', attempt,
LIST_RELEASES_ATTEMPTS)
if attempt == LIST_RELEASES_ATTEMPTS:
raise
else:
@ -267,14 +271,16 @@ class Tiller(object):
latest_releases = []
for r in releases:
if latest_versions[r.name] == r.version:
LOG.debug('Found release %s, version %s, status: %s',
r.name, r.version, get_release_status(r))
LOG.debug(
'Found release %s, version %s, status: %s', r.name,
r.version, get_release_status(r))
latest_releases.append(r)
return latest_releases
def get_chart_templates(self, template_name, name, release_name, namespace,
chart, disable_hooks, values):
def get_chart_templates(
self, template_name, name, release_name, namespace, chart,
disable_hooks, values):
# returns some info
LOG.info("Template( %s ) : %s ", template_name, name)
@ -291,15 +297,16 @@ class Tiller(object):
templates = stub.InstallRelease(
release_request, self.timeout, metadata=self.metadata)
for template in yaml.load_all(
getattr(templates.release, 'manifest', [])):
if template_name == template.get('metadata', None).get(
'name', None):
for template in yaml.load_all(getattr(templates.release, 'manifest',
[])):
if template_name == template.get('metadata', None).get('name',
None):
LOG.info(template_name)
return template
def _pre_update_actions(self, actions, release_name, namespace, chart,
disable_hooks, values, timeout):
def _pre_update_actions(
self, actions, release_name, namespace, chart, disable_hooks,
values, timeout):
'''
:param actions: array of items actions
:param namespace: name of pod for actions
@ -353,29 +360,32 @@ class Tiller(object):
charts = []
for latest_release in self.list_releases():
try:
release = (latest_release.name, latest_release.version,
latest_release.chart, latest_release.config.raw,
latest_release.info.status.Code.Name(
latest_release.info.status.code))
release = (
latest_release.name, latest_release.version,
latest_release.chart, latest_release.config.raw,
latest_release.info.status.Code.Name(
latest_release.info.status.code))
charts.append(release)
except (AttributeError, IndexError) as e:
LOG.debug('%s while getting releases: %s, ex=%s',
e.__class__.__name__, latest_release, e)
LOG.debug(
'%s while getting releases: %s, ex=%s',
e.__class__.__name__, latest_release, e)
continue
return charts
def update_release(self,
chart,
release,
namespace,
pre_actions=None,
post_actions=None,
disable_hooks=False,
values=None,
wait=False,
timeout=None,
force=False,
recreate_pods=False):
def update_release(
self,
chart,
release,
namespace,
pre_actions=None,
post_actions=None,
disable_hooks=False,
values=None,
wait=False,
timeout=None,
force=False,
recreate_pods=False):
'''
Update a Helm Release
'''
@ -391,8 +401,9 @@ class Tiller(object):
else:
values = Config(raw=values)
self._pre_update_actions(pre_actions, release, namespace, chart,
disable_hooks, values, timeout)
self._pre_update_actions(
pre_actions, release, namespace, chart, disable_hooks, values,
timeout)
update_msg = None
# build release install request
@ -427,20 +438,17 @@ class Tiller(object):
return tiller_result
def install_release(self,
chart,
release,
namespace,
values=None,
wait=False,
timeout=None):
def install_release(
self, chart, release, namespace, values=None, wait=False,
timeout=None):
'''
Create a Helm Release
'''
timeout = self._check_timeout(wait, timeout)
LOG.info('Helm install release%s: wait=%s, timeout=%s',
(' (dry run)' if self.dry_run else ''), wait, timeout)
LOG.info(
'Helm install release%s: wait=%s, timeout=%s',
(' (dry run)' if self.dry_run else ''), wait, timeout)
if values is None:
values = Config(raw='')
@ -477,10 +485,9 @@ class Tiller(object):
status = self.get_release_status(release)
raise ex.ReleaseException(release, status, 'Install')
def test_release(self,
release,
timeout=const.DEFAULT_TILLER_TIMEOUT,
cleanup=False):
def test_release(
self, release, timeout=const.DEFAULT_TILLER_TIMEOUT,
cleanup=False):
'''
:param release: name of release to test
:param timeout: runtime before exiting
@ -527,8 +534,9 @@ class Tiller(object):
:param version: version of release status
'''
LOG.debug('Helm getting release status for release=%s, version=%s',
release, version)
LOG.debug(
'Helm getting release status for release=%s, version=%s', release,
version)
try:
stub = ReleaseServiceStub(self.channel)
status_request = GetReleaseStatusRequest(
@ -549,8 +557,9 @@ class Tiller(object):
:param version: version of release status
'''
LOG.debug('Helm getting release content for release=%s, version=%s',
release, version)
LOG.debug(
'Helm getting release content for release=%s, version=%s', release,
version)
try:
stub = ReleaseServiceStub(self.channel)
status_request = GetReleaseContentRequest(
@ -585,11 +594,8 @@ class Tiller(object):
LOG.exception('Failed to get Tiller version.')
raise ex.TillerVersionException()
def uninstall_release(self,
release,
disable_hooks=False,
purge=True,
timeout=None):
def uninstall_release(
self, release, disable_hooks=False, purge=True, timeout=None):
'''
:param: release - Helm chart release name
:param: purge - deep delete of chart
@ -628,12 +634,13 @@ class Tiller(object):
status = self.get_release_status(release)
raise ex.ReleaseException(release, status, 'Delete')
def delete_resources(self,
resource_type,
resource_labels,
namespace,
wait=False,
timeout=const.DEFAULT_TILLER_TIMEOUT):
def delete_resources(
self,
resource_type,
resource_labels,
namespace,
wait=False,
timeout=const.DEFAULT_TILLER_TIMEOUT):
'''
Delete resources matching provided resource type, labels, and
namespace.
@ -665,8 +672,8 @@ class Tiller(object):
namespace)
continue
LOG.info("Deleting job %s in namespace: %s", jb_name,
namespace)
LOG.info(
"Deleting job %s in namespace: %s", jb_name, namespace)
self.k8s.delete_job_action(jb_name, namespace, timeout=timeout)
handled = True
@ -684,8 +691,9 @@ class Tiller(object):
# TODO: Remove when v1 doc support is removed.
if implied_cronjob:
LOG.warn("Deleting cronjobs via `type: job` is "
"deprecated, use `type: cronjob` instead")
LOG.warn(
"Deleting cronjobs via `type: job` is "
"deprecated, use `type: cronjob` instead")
if self.dry_run:
LOG.info(
@ -694,8 +702,8 @@ class Tiller(object):
namespace)
continue
LOG.info("Deleting cronjob %s in namespace: %s", jb_name,
namespace)
LOG.info(
"Deleting cronjob %s in namespace: %s", jb_name, namespace)
self.k8s.delete_cron_job_action(jb_name, namespace)
handled = True
@ -712,27 +720,29 @@ class Tiller(object):
namespace)
continue
LOG.info("Deleting pod %s in namespace: %s", pod_name,
namespace)
LOG.info(
"Deleting pod %s in namespace: %s", pod_name, namespace)
self.k8s.delete_pod_action(pod_name, namespace)
if wait:
self.k8s.wait_for_pod_redeployment(pod_name, namespace)
handled = True
if not handled:
LOG.error('No resources found with labels=%s type=%s namespace=%s',
resource_labels, resource_type, namespace)
LOG.error(
'No resources found with labels=%s type=%s namespace=%s',
resource_labels, resource_type, namespace)
def rolling_upgrade_pod_deployment(self,
name,
release_name,
namespace,
resource_labels,
action_type,
chart,
disable_hooks,
values,
timeout=const.DEFAULT_TILLER_TIMEOUT):
def rolling_upgrade_pod_deployment(
self,
name,
release_name,
namespace,
resource_labels,
action_type,
chart,
disable_hooks,
values,
timeout=const.DEFAULT_TILLER_TIMEOUT):
'''
update statefulsets (daemon, stateful)
'''
@ -753,8 +763,9 @@ class Tiller(object):
ds_name = ds.metadata.name
ds_labels = ds.metadata.labels
if ds_name == name:
LOG.info("Deleting %s : %s in %s", action_type, ds_name,
namespace)
LOG.info(
"Deleting %s : %s in %s", action_type, ds_name,
namespace)
self.k8s.delete_daemon_action(ds_name, namespace)
# update the daemonset yaml
@ -779,13 +790,14 @@ class Tiller(object):
else:
LOG.error("Unable to exectue name: % type: %s", name, action_type)
def rollback_release(self,
release_name,
version,
wait=False,
timeout=None,
force=False,
recreate_pods=False):
def rollback_release(
self,
release_name,
version,
wait=False,
timeout=None,
force=False,
recreate_pods=False):
'''
Rollback a helm release.
'''

View File

@ -43,9 +43,9 @@ def get_wait_labels(chart):
# TODO: Validate this object up front in armada validate flow.
class ChartWait():
def __init__(self, k8s, release_name, chart, namespace, k8s_wait_attempts,
k8s_wait_attempt_sleep, timeout):
def __init__(
self, k8s, release_name, chart, namespace, k8s_wait_attempts,
k8s_wait_attempt_sleep, timeout):
self.k8s = k8s
self.release_name = release_name
self.chart = chart
@ -65,12 +65,14 @@ class ChartWait():
else:
# TODO: Remove when v1 doc support is removed.
if schema_info.version < 2:
resources_list = [{
'type': 'job',
'required': False
}, {
'type': 'pod'
}]
resources_list = [
{
'type': 'job',
'required': False
}, {
'type': 'pod'
}
]
else:
resources_list = self.get_resources_list(resources)
@ -96,15 +98,17 @@ class ChartWait():
# TODO: Remove when v1 doc support is removed.
deprecated_timeout = self.chart_data.get('timeout')
if deprecated_timeout is not None:
LOG.warn('The `timeout` key is deprecated and support '
'for this will be removed soon. Use '
'`wait.timeout` instead.')
LOG.warn(
'The `timeout` key is deprecated and support '
'for this will be removed soon. Use '
'`wait.timeout` instead.')
if wait_timeout is None:
wait_timeout = deprecated_timeout
if wait_timeout is None:
LOG.info('No Chart timeout specified, using default: %ss',
const.DEFAULT_CHART_TIMEOUT)
LOG.info(
'No Chart timeout specified, using default: %ss',
const.DEFAULT_CHART_TIMEOUT)
wait_timeout = const.DEFAULT_CHART_TIMEOUT
self.timeout = wait_timeout
@ -206,13 +210,9 @@ class ChartWait():
class ResourceWait(ABC):
def __init__(self,
resource_type,
chart_wait,
labels,
get_resources,
required=True):
def __init__(
self, resource_type, chart_wait, labels, get_resources,
required=True):
self.resource_type = resource_type
self.chart_wait = chart_wait
self.label_selector = label_selectors(labels)
@ -241,8 +241,9 @@ class ResourceWait(ABC):
exclude_reason = self.get_exclude_reason(resource)
if exclude_reason:
LOG.debug('Excluding %s %s from wait: %s', self.resource_type,
resource.metadata.name, exclude_reason)
LOG.debug(
'Excluding %s %s from wait: %s', self.resource_type,
resource.metadata.name, exclude_reason)
return not exclude_reason
@ -276,8 +277,9 @@ class ResourceWait(ABC):
self.chart_wait.namespace, self.label_selector, self.required,
min_ready_msg, timeout)
if not self.label_selector:
LOG.warn('"label_selector" not specified, waiting with no labels '
'may cause unintended consequences.')
LOG.warn(
'"label_selector" not specified, waiting with no labels '
'may cause unintended consequences.')
# Track the overall deadline for timing out during waits
deadline = time.time() + timeout
@ -319,10 +321,11 @@ class ResourceWait(ABC):
deadline_remaining = int(round(deadline - time.time()))
if deadline_remaining <= 0:
error = ("Timed out waiting for resource type={}, namespace={}, "
"labels={}".format(self.resource_type,
self.chart_wait.namespace,
self.label_selector))
error = (
"Timed out waiting for resource type={}, namespace={}, "
"labels={}".format(
self.resource_type, self.chart_wait.namespace,
self.label_selector))
LOG.error(error)
raise k8s_exceptions.KubernetesWatchTimeoutException(error)
@ -339,12 +342,14 @@ class ResourceWait(ABC):
'`wait.resources` need to exclude `type: {}`?'.format(
self.resource_type))
else:
details = ('These {}s were not ready={}'.format(
self.resource_type, sorted(unready)))
details = (
'These {}s were not ready={}'.format(
self.resource_type, sorted(unready)))
error = (
'Timed out waiting for {}s (namespace={}, labels=({})). {}'.
format(self.resource_type, self.chart_wait.namespace,
self.label_selector, details))
format(
self.resource_type, self.chart_wait.namespace,
self.label_selector, details))
LOG.error(error)
raise k8s_exceptions.KubernetesWatchTimeoutException(error)
@ -399,10 +404,12 @@ class ResourceWait(ABC):
if not self.include_resource(resource):
continue
msg = ('Watch event: type=%s, name=%s, namespace=%s,'
'resource_version=%s')
LOG.debug(msg, event_type, resource_name,
self.chart_wait.namespace, resource_version)
msg = (
'Watch event: type=%s, name=%s, namespace=%s,'
'resource_version=%s')
LOG.debug(
msg, event_type, resource_name, self.chart_wait.namespace,
resource_version)
if event_type in {'ADDED', 'MODIFIED'}:
found_resources = True
@ -417,25 +424,29 @@ class ResourceWait(ABC):
ready.pop(resource_name)
elif event_type == 'ERROR':
LOG.error('Resource %s: Got error event %s', resource_name,
event['object'].to_dict())
LOG.error(
'Resource %s: Got error event %s', resource_name,
event['object'].to_dict())
raise k8s_exceptions.KubernetesErrorEventException(
'Got error event for resource: %s' % event['object'])
else:
LOG.error('Unrecognized event type (%s) for resource: %s',
event_type, event['object'])
raise (k8s_exceptions.
KubernetesUnknownStreamingEventTypeException(
'Got unknown event type (%s) for resource: %s' %
(event_type, event['object'])))
LOG.error(
'Unrecognized event type (%s) for resource: %s',
event_type, event['object'])
raise (
k8s_exceptions.
KubernetesUnknownStreamingEventTypeException(
'Got unknown event type (%s) for resource: %s' %
(event_type, event['object'])))
if all(ready.values()):
return (False, modified, [], found_resources)
return (True, modified,
[name for name, is_ready in ready.items() if not is_ready],
found_resources)
return (
True, modified,
[name for name, is_ready in ready.items()
if not is_ready], found_resources)
def _get_resource_condition(self, resource_conditions, condition_type):
for pc in resource_conditions:
@ -444,7 +455,6 @@ class ResourceWait(ABC):
class PodWait(ResourceWait):
def __init__(self, resource_type, chart_wait, labels, **kwargs):
super(PodWait, self).__init__(
resource_type, chart_wait, labels,
@ -494,7 +504,6 @@ class PodWait(ResourceWait):
class JobWait(ResourceWait):
def __init__(self, resource_type, chart_wait, labels, **kwargs):
super(JobWait, self).__init__(
resource_type, chart_wait, labels,
@ -533,8 +542,8 @@ def has_owner(resource, kind=None):
return False
CountOrPercent = collections.namedtuple('CountOrPercent',
'number is_percent source')
CountOrPercent = collections.namedtuple(
'CountOrPercent', 'number is_percent source')
# Controller logic (Deployment, DaemonSet, StatefulSet) is adapted from
# `kubectl rollout status`:
@ -542,16 +551,16 @@ CountOrPercent = collections.namedtuple('CountOrPercent',
class ControllerWait(ResourceWait):
def __init__(self,
resource_type,
chart_wait,
labels,
get_resources,
min_ready="100%",
**kwargs):
super(ControllerWait, self).__init__(resource_type, chart_wait, labels,
get_resources, **kwargs)
def __init__(
self,
resource_type,
chart_wait,
labels,
get_resources,
min_ready="100%",
**kwargs):
super(ControllerWait, self).__init__(
resource_type, chart_wait, labels, get_resources, **kwargs)
if isinstance(min_ready, str):
match = re.match('(.*)%$', min_ready)
@ -578,7 +587,6 @@ class ControllerWait(ResourceWait):
class DeploymentWait(ControllerWait):
def __init__(self, resource_type, chart_wait, labels, **kwargs):
super(DeploymentWait, self).__init__(
resource_type, chart_wait, labels,
@ -596,8 +604,8 @@ class DeploymentWait(ControllerWait):
# TODO: Don't fail for lack of progress if `min_ready` is met.
# TODO: Consider continuing after `min_ready` is met, so long as
# progress is being made.
cond = self._get_resource_condition(status.conditions,
'Progressing')
cond = self._get_resource_condition(
status.conditions, 'Progressing')
if cond and (cond.reason or '') == 'ProgressDeadlineExceeded':
msg = "deployment {} exceeded its progress deadline"
return (msg.format(name), False)
@ -606,21 +614,26 @@ class DeploymentWait(ControllerWait):
updated_replicas = status.updated_replicas or 0
available_replicas = status.available_replicas or 0
if updated_replicas < replicas:
msg = ("Waiting for deployment {} rollout to finish: {} out "
"of {} new replicas have been updated...")
msg = (
"Waiting for deployment {} rollout to finish: {} out "
"of {} new replicas have been updated...")
return (msg.format(name, updated_replicas, replicas), False)
if replicas > updated_replicas:
msg = ("Waiting for deployment {} rollout to finish: {} old "
"replicas are pending termination...")
msg = (
"Waiting for deployment {} rollout to finish: {} old "
"replicas are pending termination...")
pending = replicas - updated_replicas
return (msg.format(name, pending), False)
if not self._is_min_ready(available_replicas, updated_replicas):
msg = ("Waiting for deployment {} rollout to finish: {} of {} "
"updated replicas are available, with min_ready={}")
return (msg.format(name, available_replicas, updated_replicas,
self.min_ready.source), False)
msg = (
"Waiting for deployment {} rollout to finish: {} of {} "
"updated replicas are available, with min_ready={}")
return (
msg.format(
name, available_replicas, updated_replicas,
self.min_ready.source), False)
msg = "deployment {} successfully rolled out\n"
return (msg.format(name), True)
@ -629,13 +642,13 @@ class DeploymentWait(ControllerWait):
class DaemonSetWait(ControllerWait):
def __init__(self,
resource_type,
chart_wait,
labels,
allow_async_updates=False,
**kwargs):
def __init__(
self,
resource_type,
chart_wait,
labels,
allow_async_updates=False,
**kwargs):
super(DaemonSetWait, self).__init__(
resource_type, chart_wait, labels,
chart_wait.k8s.apps_v1_api.list_namespaced_daemon_set, **kwargs)
@ -667,18 +680,23 @@ class DaemonSetWait(ControllerWait):
number_available = status.number_available or 0
if (updated_number_scheduled < desired_number_scheduled):
msg = ("Waiting for daemon set {} rollout to finish: {} out "
"of {} new pods have been updated...")
return (msg.format(name, updated_number_scheduled,
desired_number_scheduled), False)
msg = (
"Waiting for daemon set {} rollout to finish: {} out "
"of {} new pods have been updated...")
return (
msg.format(
name, updated_number_scheduled,
desired_number_scheduled), False)
if not self._is_min_ready(number_available,
desired_number_scheduled):
msg = ("Waiting for daemon set {} rollout to finish: {} of {} "
"updated pods are available, with min_ready={}")
return (msg.format(name, number_available,
desired_number_scheduled,
self.min_ready.source), False)
msg = (
"Waiting for daemon set {} rollout to finish: {} of {} "
"updated pods are available, with min_ready={}")
return (
msg.format(
name, number_available, desired_number_scheduled,
self.min_ready.source), False)
msg = "daemon set {} successfully rolled out"
return (msg.format(name), True)
@ -688,13 +706,13 @@ class DaemonSetWait(ControllerWait):
class StatefulSetWait(ControllerWait):
def __init__(self,
resource_type,
chart_wait,
labels,
allow_async_updates=False,
**kwargs):
def __init__(
self,
resource_type,
chart_wait,
labels,
allow_async_updates=False,
**kwargs):
super(StatefulSetWait, self).__init__(
resource_type, chart_wait, labels,
chart_wait.k8s.apps_v1_api.list_namespaced_stateful_set, **kwargs)
@ -724,9 +742,9 @@ class StatefulSetWait(ControllerWait):
raise armada_exceptions.WaitException(
msg.format(ASYNC_UPDATE_NOT_ALLOWED_MSG, strategy))
if (is_rolling and replicas and
spec.update_strategy.rolling_update and
spec.update_strategy.rolling_update.partition):
if (is_rolling and replicas
and spec.update_strategy.rolling_update
and spec.update_strategy.rolling_update.partition):
msg = "{}: partitioned rollout"
raise armada_exceptions.WaitException(
@ -737,17 +755,21 @@ class StatefulSetWait(ControllerWait):
return (msg, False)
if replicas and not self._is_min_ready(ready_replicas, replicas):
msg = ("Waiting for statefulset {} rollout to finish: {} of {} "
"pods are ready, with min_ready={}")
return (msg.format(name, ready_replicas, replicas,
self.min_ready.source), False)
msg = (
"Waiting for statefulset {} rollout to finish: {} of {} "
"pods are ready, with min_ready={}")
return (
msg.format(
name, ready_replicas, replicas,
self.min_ready.source), False)
update_revision = status.update_revision or 0
current_revision = status.current_revision or 0
if update_revision != current_revision:
msg = ("waiting for statefulset rolling update to complete {} "
"pods at revision {}...")
msg = (
"waiting for statefulset rolling update to complete {} "
"pods at revision {}...")
return (msg.format(updated_replicas, update_revision), False)
msg = "statefulset rolling update complete {} pods at revision {}..."

View File

@ -15,14 +15,14 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import random
import string
import testtools
import threading
import uuid
import mock
import testtools
_mock_thread_safe = False
_mock_call_lock = threading.RLock()

View File

@ -32,8 +32,9 @@ class BaseControllerTest(test_base.ArmadaTestCase):
# the sample configuration files to avoid oslo.conf errors when
# creating the server below.
current_dir = os.path.dirname(os.path.realpath(__file__))
sample_conf_dir = os.path.join(current_dir, os.pardir, os.pardir,
os.pardir, os.pardir, 'etc', 'armada')
sample_conf_dir = os.path.join(
current_dir, os.pardir, os.pardir, os.pardir, os.pardir, 'etc',
'armada')
sample_conf_files = ['api-paste.ini', 'armada.conf.sample']
with mock.patch.object(armada.conf,
'_get_config_files') as mock_get_config_files:

View File

@ -20,7 +20,6 @@ from armada.tests.unit.api import base as test_base
class TestApi(test_base.BaseControllerTest):
def test_init_application(self):
server = importlib.import_module('armada.api.server')
api = server.create()

View File

@ -13,8 +13,8 @@
# limitations under the License.
import json
import mock
import mock
from oslo_config import cfg
from armada import api
@ -26,15 +26,14 @@ from armada.tests.unit.api import base
CONF = cfg.CONF
@mock.patch.object(armada_api.Apply, 'handle',
armada_api.Apply.handle.__wrapped__)
@mock.patch.object(
armada_api.Apply, 'handle', armada_api.Apply.handle.__wrapped__)
class ArmadaControllerTest(base.BaseControllerTest):
@mock.patch.object(api, 'Tiller')
@mock.patch.object(armada_api, 'Armada')
@mock.patch.object(armada_api, 'ReferenceResolver')
def test_armada_apply_resource(self, mock_resolver, mock_armada,
mock_tiller):
def test_armada_apply_resource(
self, mock_resolver, mock_armada, mock_tiller):
"""Tests the POST /api/v1.0/apply endpoint."""
rules = {'armada:create_endpoints': '@'}
self.policy.set_rules(rules)
@ -84,9 +83,10 @@ class ArmadaControllerTest(base.BaseControllerTest):
self.assertEqual('application/json', result.headers['content-type'])
mock_resolver.resolve_reference.assert_called_with([payload_url])
mock_armada.assert_called_with([{
'foo': 'bar'
}], **expected_armada_options)
mock_armada.assert_called_with(
[{
'foo': 'bar'
}], **expected_armada_options)
mock_armada.return_value.sync.assert_called()
mock_tiller.assert_called_with(dry_run=False)
@ -119,7 +119,6 @@ class ArmadaControllerTest(base.BaseControllerTest):
class ArmadaControllerNegativeTest(base.BaseControllerTest):
@test_utils.attr(type=['negative'])
def test_armada_apply_raises_415_given_unsupported_media_type(self):
"""Tests the POST /api/v1.0/apply endpoint returns 415 given
@ -133,7 +132,6 @@ class ArmadaControllerNegativeTest(base.BaseControllerTest):
class ArmadaControllerNegativeRbacTest(base.BaseControllerTest):
@test_utils.attr(type=['negative'])
def test_armada_apply_resource_insufficient_permissions(self):
"""Tests the POST /api/v1.0/apply endpoint returns 403 following failed

View File

@ -18,7 +18,6 @@ from armada.tests.unit.api import base
class HealthControllerTest(base.BaseControllerTest):
def test_get_health_status(self):
"""
Validate that /api/v1.0/health returns 204.

View File

@ -23,10 +23,9 @@ from armada.tests.unit.api import base
from armada.api.controller import rollback
@mock.patch.object(rollback.Rollback, 'handle',
rollback.Rollback.handle.__wrapped__)
@mock.patch.object(
rollback.Rollback, 'handle', rollback.Rollback.handle.__wrapped__)
class RollbackReleaseControllerTest(base.BaseControllerTest):
@mock.patch.object(api, 'Tiller')
def test_rollback_controller_pass(self, mock_tiller):
rules = {'armada:rollback_release': '@'}
@ -62,14 +61,14 @@ class RollbackReleaseControllerTest(base.BaseControllerTest):
release, 2, wait=True, timeout=123, force=True, recreate_pods=True)
self.assertEqual(200, resp.status_code)
self.assertEqual('Rollback of test-release complete.',
json.loads(resp.text)['message'])
self.assertEqual(
'Rollback of test-release complete.',
json.loads(resp.text)['message'])
m_tiller.__exit__.assert_called()
@test_utils.attr(type=['negative'])
class RollbackReleaseControllerNegativeTest(base.BaseControllerTest):
@mock.patch.object(api, 'Tiller')
def test_rollback_controller_tiller_exc_return_500(self, mock_tiller):
rules = {'armada:rollback_release': '@'}
@ -83,7 +82,6 @@ class RollbackReleaseControllerNegativeTest(base.BaseControllerTest):
@test_utils.attr(type=['negative'])
class RollbackReleaseControllerNegativeRbacTest(base.BaseControllerTest):
def test_rollback_release_insufficient_permissions(self):
"""Tests the GET /api/v1.0/rollback/{release} endpoint returns 403
following failed authorization.

View File

@ -14,9 +14,9 @@
import json
import os
import yaml
import mock
import yaml
from armada import api
from armada.api.controller import test
@ -26,10 +26,10 @@ from armada.tests import test_utils
from armada.tests.unit.api import base
@mock.patch.object(test.TestReleasesManifestController, 'handle',
test.TestReleasesManifestController.handle.__wrapped__)
@mock.patch.object(
test.TestReleasesManifestController, 'handle',
test.TestReleasesManifestController.handle.__wrapped__)
class TestReleasesManifestControllerTest(base.BaseControllerTest):
@mock.patch.object(test, 'Manifest')
@mock.patch.object(api, 'Tiller')
def test_test_controller_with_manifest(self, mock_tiller, mock_manifest):
@ -38,8 +38,8 @@ class TestReleasesManifestControllerTest(base.BaseControllerTest):
# TODO: Don't use example charts in tests.
# TODO: Test cleanup arg is taken from url, then manifest.
manifest_path = os.path.join(os.getcwd(), 'examples',
'keystone-manifest.yaml')
manifest_path = os.path.join(
os.getcwd(), 'examples', 'keystone-manifest.yaml')
with open(manifest_path, 'r') as f:
payload = f.read()
documents = list(yaml.safe_load_all(payload))
@ -59,14 +59,14 @@ class TestReleasesManifestControllerTest(base.BaseControllerTest):
m_tiller.__exit__.assert_called()
@mock.patch.object(test.TestReleasesReleaseNameController, 'handle',
test.TestReleasesReleaseNameController.handle.__wrapped__)
@mock.patch.object(
test.TestReleasesReleaseNameController, 'handle',
test.TestReleasesReleaseNameController.handle.__wrapped__)
class TestReleasesReleaseNameControllerTest(base.BaseControllerTest):
@mock.patch.object(test.Test, 'test_release_for_success')
@mock.patch.object(api, 'Tiller')
def test_test_controller_test_pass(self, mock_tiller,
mock_test_release_for_success):
def test_test_controller_test_pass(
self, mock_tiller, mock_test_release_for_success):
rules = {'armada:test_release': '@'}
self.policy.set_rules(rules)
@ -79,14 +79,15 @@ class TestReleasesReleaseNameControllerTest(base.BaseControllerTest):
resp = self.app.simulate_get('/api/v1.0/test/{}'.format(release))
mock_test_release_for_success.assert_called_once()
self.assertEqual(200, resp.status_code)
self.assertEqual('MESSAGE: Test Pass',
json.loads(resp.text)['message'])
self.assertEqual(
'MESSAGE: Test Pass',
json.loads(resp.text)['message'])
m_tiller.__exit__.assert_called()
@mock.patch.object(test.Test, 'test_release_for_success')
@mock.patch.object(api, 'Tiller')
def test_test_controller_test_fail(self, mock_tiller,
mock_test_release_for_success):
def test_test_controller_test_fail(
self, mock_tiller, mock_test_release_for_success):
rules = {'armada:test_release': '@'}
self.policy.set_rules(rules)
@ -97,14 +98,15 @@ class TestReleasesReleaseNameControllerTest(base.BaseControllerTest):
release = 'fake-release'
resp = self.app.simulate_get('/api/v1.0/test/{}'.format(release))
self.assertEqual(200, resp.status_code)
self.assertEqual('MESSAGE: Test Fail',
json.loads(resp.text)['message'])
self.assertEqual(
'MESSAGE: Test Fail',
json.loads(resp.text)['message'])
m_tiller.__exit__.assert_called()
@mock.patch.object(test.Test, 'test_release_for_success')
@mock.patch.object(api, 'Tiller')
def test_test_controller_cleanup(self, mock_tiller,
mock_test_release_for_success):
def test_test_controller_cleanup(
self, mock_tiller, mock_test_release_for_success):
rules = {'armada:test_release': '@'}
self.policy.set_rules(rules)
@ -117,16 +119,17 @@ class TestReleasesReleaseNameControllerTest(base.BaseControllerTest):
'/api/v1.0/test/{}'.format(release), query_string='cleanup=true')
mock_test_release_for_success.assert_called_once()
self.assertEqual(200, resp.status_code)
self.assertEqual('MESSAGE: Test Pass',
json.loads(resp.text)['message'])
self.assertEqual(
'MESSAGE: Test Pass',
json.loads(resp.text)['message'])
m_tiller.__exit__.assert_called()
@test_utils.attr(type=['negative'])
@mock.patch.object(test.TestReleasesManifestController, 'handle',
test.TestReleasesManifestController.handle.__wrapped__)
@mock.patch.object(
test.TestReleasesManifestController, 'handle',
test.TestReleasesManifestController.handle.__wrapped__)
class TestReleasesManifestControllerNegativeTest(base.BaseControllerTest):
@mock.patch.object(test, 'Manifest')
@mock.patch.object(api, 'Tiller')
@mock.patch.object(test.Test, 'test_release_for_success')
@ -148,8 +151,8 @@ class TestReleasesManifestControllerNegativeTest(base.BaseControllerTest):
rules = {'armada:test_manifest': '@'}
self.policy.set_rules(rules)
manifest_path = os.path.join(os.getcwd(), 'examples',
'keystone-manifest.yaml')
manifest_path = os.path.join(
os.getcwd(), 'examples', 'keystone-manifest.yaml')
with open(manifest_path, 'r') as f:
payload = f.read()
@ -166,22 +169,22 @@ class TestReleasesManifestControllerNegativeTest(base.BaseControllerTest):
resp_body = json.loads(resp.text)
self.assertEqual(400, resp_body['code'])
self.assertEqual(1, resp_body['details']['errorCount'])
self.assertIn({
'message':
('An error occurred while building chart group: '
'Could not build ChartGroup named "keystone-infra-services".'),
'error':
True,
'kind':
'ValidationMessage',
'level':
'Error',
'name':
'ARM001',
'documents': []
}, resp_body['details']['messageList'])
self.assertEqual(('Failed to validate documents or generate Armada '
'Manifest from documents.'), resp_body['message'])
self.assertIn(
{
'message': (
'An error occurred while building chart group: '
'Could not build ChartGroup named '
'"keystone-infra-services".'),
'error': True,
'kind': 'ValidationMessage',
'level': 'Error',
'name': 'ARM001',
'documents': []
}, resp_body['details']['messageList'])
self.assertEqual(
(
'Failed to validate documents or generate Armada '
'Manifest from documents.'), resp_body['message'])
m_tiller.__exit__.assert_called()
@mock.patch('armada.utils.validate.Manifest')
@ -194,8 +197,8 @@ class TestReleasesManifestControllerNegativeTest(base.BaseControllerTest):
mock_manifest.return_value.get_manifest.side_effect = (
manifest_exceptions.ManifestException(details='foo'))
manifest_path = os.path.join(os.getcwd(), 'examples',
'keystone-manifest.yaml')
manifest_path = os.path.join(
os.getcwd(), 'examples', 'keystone-manifest.yaml')
with open(manifest_path, 'r') as f:
payload = f.read()
@ -208,27 +211,28 @@ class TestReleasesManifestControllerNegativeTest(base.BaseControllerTest):
resp_body = json.loads(resp.text)
self.assertEqual(400, resp_body['code'])
self.assertEqual(1, resp_body['details']['errorCount'])
self.assertEqual([{
'message':
('An error occurred while generating the manifest: foo.'),
'error':
True,
'kind':
'ValidationMessage',
'level':
'Error',
'name':
'ARM001',
'documents': []
}], resp_body['details']['messageList'])
self.assertEqual(('Failed to validate documents or generate Armada '
'Manifest from documents.'), resp_body['message'])
self.assertEqual(
[
{
'message': (
'An error occurred while generating the manifest: foo.'
),
'error': True,
'kind': 'ValidationMessage',
'level': 'Error',
'name': 'ARM001',
'documents': []
}
], resp_body['details']['messageList'])
self.assertEqual(
(
'Failed to validate documents or generate Armada '
'Manifest from documents.'), resp_body['message'])
m_tiller.__exit__.assert_called()
@test_utils.attr(type=['negative'])
class TestReleasesReleaseNameControllerNegativeTest(base.BaseControllerTest):
@mock.patch.object(api, 'Tiller')
@mock.patch.object(test.Test, 'test_release_for_success')
def test_test_controller_tiller_exc_returns_500(
@ -243,9 +247,8 @@ class TestReleasesReleaseNameControllerNegativeTest(base.BaseControllerTest):
self.assertEqual(500, resp.status_code)
class TestReleasesReleaseNameControllerNegativeRbacTest(
base.BaseControllerTest):
class TestReleasesReleaseNameControllerNegativeRbacTest(base.BaseControllerTest
):
@test_utils.attr(type=['negative'])
def test_test_release_insufficient_permissions(self):
"""Tests the GET /api/v1.0/test/{release} endpoint returns 403
@ -258,7 +261,6 @@ class TestReleasesReleaseNameControllerNegativeRbacTest(
class TestReleasesManifestControllerNegativeRbacTest(base.BaseControllerTest):
@test_utils.attr(type=['negative'])
def test_test_manifest_insufficient_permissions(self):
"""Tests the POST /api/v1.0/tests endpoint returns 403 following failed

View File

@ -13,7 +13,6 @@
# limitations under the License.
import mock
from oslo_config import cfg
from armada import api
@ -25,7 +24,6 @@ CONF = cfg.CONF
class TillerControllerTest(base.BaseControllerTest):
@mock.patch.object(api, 'Tiller')
def test_get_tiller_status(self, mock_tiller):
"""Tests GET /api/v1.0/status endpoint."""
@ -140,7 +138,6 @@ class TillerControllerTest(base.BaseControllerTest):
class TillerControllerNegativeRbacTest(base.BaseControllerTest):
@test_utils.attr(type=['negative'])
def test_list_tiller_releases_insufficient_permissions(self):
"""Tests the GET /api/v1.0/releases endpoint returns 403 following

View File

@ -18,7 +18,6 @@ from armada.tests.unit.api import base
class ValidationControllerNegativeRbacTest(base.BaseControllerTest):
@test_utils.attr(type=['negative'])
def test_validate_manifest_insufficient_permissions(self):
"""Tests the POST /api/v1.0/validate endpoint returns 403 following

View File

@ -16,7 +16,6 @@ from armada.tests.unit.api import base
class VersionsControllerTest(base.BaseControllerTest):
def test_list_versions(self):
"""
Validate that /api/v1.0/health returns 204.

View File

@ -42,7 +42,6 @@ def is_connected():
class ArmadaTestCase(testtools.TestCase):
def setUp(self):
super(ArmadaTestCase, self).setUp()
self.useFixture(fixtures.FakeLogger('armada'))

View File

@ -10,10 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
import testtools
import mock
from oslo_policy import policy as common_policy
import testtools
from armada.common import policy
from armada import conf as cfg
@ -24,7 +23,6 @@ CONF = cfg.CONF
class PolicyTestCase(testtools.TestCase):
def setUp(self):
super(PolicyTestCase, self).setUp()
self.rules = {
@ -48,8 +46,9 @@ class PolicyTestCase(testtools.TestCase):
action = "example:nope"
mock_ctx.to_policy_view.return_value = self.credentials
self.assertRaises(exc.ActionForbidden, policy._enforce_policy, action,
self.target, mock_ctx)
self.assertRaises(
exc.ActionForbidden, policy._enforce_policy, action, self.target,
mock_ctx)
mock_log.exception.assert_called_once_with(
'Policy not registered for %(action)s', {'action': 'example:nope'})
@ -67,5 +66,6 @@ class PolicyTestCase(testtools.TestCase):
action = "armada:create_endpoints"
mock_ctx.to_policy_view.return_value = self.credentials
self.assertRaises(exc.ActionForbidden, policy._enforce_policy, action,
self.target, mock_ctx)
self.assertRaises(
exc.ActionForbidden, policy._enforce_policy, action, self.target,
mock_ctx)

View File

@ -12,15 +12,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import testtools
import responses
import testtools
from armada.common.session import ArmadaSession
class SessionTestCase(testtools.TestCase):
def test_create_session(self):
"""Tests setting up an Armada session"""
sess = ArmadaSession("testarmada")

View File

@ -19,13 +19,13 @@
from __future__ import absolute_import
import os
import yaml
import fixtures
import mock
from oslo_config import cfg
from oslo_policy import opts as policy_opts
from oslo_policy import policy as oslo_policy
import yaml
from armada.common import policies
import armada.common.policy

View File

@ -145,15 +145,15 @@ data:
enabled: true
"""
CHART_SOURCES = [('git://opendev.org/dummy/armada.git', 'chart_1'),
('/tmp/dummy/armada', 'chart_2'),
('/tmp/dummy/armada', 'chart_3'),
('/tmp/dummy/armada', 'chart_4')]
CHART_SOURCES = [
('git://opendev.org/dummy/armada.git', 'chart_1'),
('/tmp/dummy/armada', 'chart_2'), ('/tmp/dummy/armada', 'chart_3'),
('/tmp/dummy/armada', 'chart_4')
]
# TODO(seaneagan): Add unit tests with dependencies, including transitive.
class ArmadaHandlerTestCase(base.ArmadaTestCase):
def _test_pre_flight_ops(self, armada_obj):
armada_obj.pre_flight_ops()
@ -343,8 +343,8 @@ class ArmadaHandlerTestCase(base.ArmadaTestCase):
armada_obj.post_flight_ops()
for group in armada_obj.manifest['data']['chart_groups']:
for counter, chart in enumerate(
group.get(const.KEYWORD_DATA).get(const.KEYWORD_CHARTS)):
for counter, chart in enumerate(group.get(const.KEYWORD_DATA).get(
const.KEYWORD_CHARTS)):
if chart.get(
const.KEYWORD_DATA).get('source').get('type') == 'git':
mock_source.source_cleanup.assert_called_with(
@ -355,20 +355,22 @@ class ArmadaHandlerTestCase(base.ArmadaTestCase):
# run sync tests for unsequenced as well by moving them to separate test
# class with two separate subclasses which set chart group `sequenced`
# field, one to true, one to false.
def _test_sync(self,
known_releases,
test_success=True,
test_failure_to_run=False,
expected_last_test_result=None,
diff={'some_key': {'some diff'}}):
def _test_sync(
self,
known_releases,
test_success=True,
test_failure_to_run=False,
expected_last_test_result=None,
diff={'some_key': {'some diff'}}):
"""Test install functionality from the sync() method."""
@mock.patch.object(armada.Armada, 'post_flight_ops')
@mock.patch.object(armada.Armada, 'pre_flight_ops')
@mock.patch('armada.handlers.chart_deploy.ChartBuilder')
@mock.patch('armada.handlers.chart_deploy.Test')
def _do_test(mock_test, mock_chartbuilder, mock_pre_flight,
mock_post_flight):
def _do_test(
mock_test, mock_chartbuilder, mock_pre_flight,
mock_post_flight):
# Instantiate Armada object.
yaml_documents = list(yaml.safe_load_all(TEST_YAML))
@ -417,8 +419,8 @@ class ArmadaHandlerTestCase(base.ArmadaTestCase):
release_name = release_prefixer(prefix, release)
# Simplified check because the actual code uses logical-or's
# multiple conditions, so this is enough.
native_wait_enabled = (chart['wait'].get('native', {}).get(
'enabled', True))
native_wait_enabled = (
chart['wait'].get('native', {}).get('enabled', True))
if release_name not in [x.name for x in known_releases]:
expected_install_release_calls.append(
@ -503,8 +505,8 @@ class ArmadaHandlerTestCase(base.ArmadaTestCase):
any_order = not chart_group['sequenced']
# Verify that at least 1 release is either installed or updated.
self.assertTrue(
len(expected_install_release_calls) >= 1 or
len(expected_update_release_calls) >= 1)
len(expected_install_release_calls) >= 1
or len(expected_update_release_calls) >= 1)
# Verify that the expected number of non-deployed releases are
# installed with expected arguments.
self.assertEqual(
@ -549,8 +551,8 @@ class ArmadaHandlerTestCase(base.ArmadaTestCase):
chart = self._get_chart_by_name(name)
def get_test_result(success):
status = (TESTRUN_STATUS_SUCCESS
if success else TESTRUN_STATUS_FAILURE)
status = (
TESTRUN_STATUS_SUCCESS if success else TESTRUN_STATUS_FAILURE)
return mock.Mock(status=status)
last_test_suite_run = None
@ -658,14 +660,12 @@ class ArmadaHandlerTestCase(base.ArmadaTestCase):
self.assertRaises(ChartDeployException, _test_method)
def test_armada_sync_test_failure(self):
def _test_method():
self._test_sync([], test_success=False)
self.assertRaises(ChartDeployException, _test_method)
def test_armada_sync_test_failure_to_run(self):
def _test_method():
self._test_sync([], test_failure_to_run=True)
@ -673,15 +673,16 @@ class ArmadaHandlerTestCase(base.ArmadaTestCase):
class ArmadaNegativeHandlerTestCase(base.ArmadaTestCase):
@mock.patch.object(armada, 'source')
def test_armada_get_manifest_exception(self, mock_source):
"""Test armada handling with invalid manifest."""
yaml_documents = list(yaml.safe_load_all(TEST_YAML))
error_re = ('.*Documents must include at least one of each of .* and '
'only one .*')
self.assertRaisesRegexp(ManifestException, error_re, armada.Armada,
yaml_documents[:1], mock.MagicMock())
error_re = (
'.*Documents must include at least one of each of .* and '
'only one .*')
self.assertRaisesRegexp(
ManifestException, error_re, armada.Armada, yaml_documents[:1],
mock.MagicMock())
@mock.patch.object(armada, 'source')
def test_armada_override_exception(self, mock_source):

View File

@ -15,13 +15,13 @@
import inspect
import os
import shutil
import yaml
import fixtures
from hapi.chart.chart_pb2 import Chart
from hapi.chart.metadata_pb2 import Metadata
import mock
import testtools
import yaml
from armada import const
from armada.handlers.chartbuilder import ChartBuilder
@ -137,14 +137,13 @@ class BaseChartBuilderTestCase(testtools.TestCase):
class ChartBuilderTestCase(BaseChartBuilderTestCase):
def test_source_clone(self):
# Create a temporary directory with Chart.yaml that contains data
# from ``self.chart_yaml``.
chart_dir = self.useFixture(fixtures.TempDir())
self.addCleanup(shutil.rmtree, chart_dir.path)
self._write_temporary_file_contents(chart_dir.path, 'Chart.yaml',
self.chart_yaml)
self._write_temporary_file_contents(
chart_dir.path, 'Chart.yaml', self.chart_yaml)
chartbuilder = ChartBuilder(self._get_test_chart(chart_dir))
@ -158,8 +157,9 @@ class ChartBuilderTestCase(BaseChartBuilderTestCase):
chartbuilder = ChartBuilder(self._get_test_chart(chart_dir))
self.assertRaises(chartbuilder_exceptions.MetadataLoadException,
chartbuilder.get_metadata)
self.assertRaises(
chartbuilder_exceptions.MetadataLoadException,
chartbuilder.get_metadata)
def test_get_files(self):
"""Validates that ``get_files()`` ignores 'Chart.yaml', 'values.yaml'
@ -206,8 +206,8 @@ class ChartBuilderTestCase(BaseChartBuilderTestCase):
# that that logic has already been performed.
chart_dir = self.useFixture(fixtures.TempDir())
self.addCleanup(shutil.rmtree, chart_dir.path)
self._write_temporary_file_contents(chart_dir.path, 'Chart.yaml',
self.chart_yaml)
self._write_temporary_file_contents(
chart_dir.path, 'Chart.yaml', self.chart_yaml)
ch = yaml.safe_load(self.chart_stream)
ch['data']['source_dir'] = (chart_dir.path, '')
@ -215,7 +215,8 @@ class ChartBuilderTestCase(BaseChartBuilderTestCase):
chartbuilder = ChartBuilder(test_chart)
helm_chart = chartbuilder.get_helm_chart()
expected = inspect.cleandoc("""
expected = inspect.cleandoc(
"""
metadata {
name: "hello-world-chart"
version: "0.1.0"
@ -234,10 +235,10 @@ class ChartBuilderTestCase(BaseChartBuilderTestCase):
chart_dir = self.useFixture(fixtures.TempDir())
self.addCleanup(shutil.rmtree, chart_dir.path)
self._write_temporary_file_contents(chart_dir.path, 'Chart.yaml',
self.chart_yaml)
self._write_temporary_file_contents(chart_dir.path, 'values.yaml',
self.chart_value)
self._write_temporary_file_contents(
chart_dir.path, 'Chart.yaml', self.chart_yaml)
self._write_temporary_file_contents(
chart_dir.path, 'values.yaml', self.chart_value)
ch = yaml.safe_load(self.chart_stream)
ch['data']['source_dir'] = (chart_dir.path, '')
@ -257,15 +258,15 @@ class ChartBuilderTestCase(BaseChartBuilderTestCase):
chart_dir = self.useFixture(fixtures.TempDir())
self.addCleanup(shutil.rmtree, chart_dir.path)
# Chart.yaml is mandatory for `ChartBuilder.get_metadata`.
self._write_temporary_file_contents(chart_dir.path, 'Chart.yaml',
self.chart_yaml)
self._write_temporary_file_contents(
chart_dir.path, 'Chart.yaml', self.chart_yaml)
self._write_temporary_file_contents(chart_dir.path, 'foo', "foobar")
self._write_temporary_file_contents(chart_dir.path, 'bar', "bazqux")
# Also create a nested directory and verify that files from it are also
# added.
nested_dir = self._make_temporary_subdirectory(chart_dir.path,
'nested')
nested_dir = self._make_temporary_subdirectory(
chart_dir.path, 'nested')
self._write_temporary_file_contents(nested_dir, 'nested0', "random")
ch = yaml.safe_load(self.chart_stream)
@ -275,10 +276,11 @@ class ChartBuilderTestCase(BaseChartBuilderTestCase):
chartbuilder = ChartBuilder(test_chart)
helm_chart = chartbuilder.get_helm_chart()
expected_files = ('[type_url: "%s"\nvalue: "bazqux"\n, '
'type_url: "%s"\nvalue: "foobar"\n, '
'type_url: "%s"\nvalue: "random"\n]' %
('./bar', './foo', 'nested/nested0'))
expected_files = (
'[type_url: "%s"\nvalue: "bazqux"\n, '
'type_url: "%s"\nvalue: "foobar"\n, '
'type_url: "%s"\nvalue: "random"\n]' %
('./bar', './foo', 'nested/nested0'))
self.assertIsInstance(helm_chart, Chart)
self.assertTrue(hasattr(helm_chart, 'metadata'))
@ -300,8 +302,8 @@ class ChartBuilderTestCase(BaseChartBuilderTestCase):
charts_nested_subdir = self._make_temporary_subdirectory(
charts_subdir, 'extra')
self._write_temporary_file_contents(chart_dir.path, 'Chart.yaml',
self.chart_yaml)
self._write_temporary_file_contents(
chart_dir.path, 'Chart.yaml', self.chart_yaml)
self._write_temporary_file_contents(chart_dir.path, 'foo', "foobar")
self._write_temporary_file_contents(chart_dir.path, 'bar', "bazqux")
@ -311,16 +313,16 @@ class ChartBuilderTestCase(BaseChartBuilderTestCase):
self._write_temporary_file_contents(chart_dir.path, file, "")
file_to_ignore = 'file_to_ignore'
# Files to ignore within templates/ subdirectory.
self._write_temporary_file_contents(templates_subdir, file_to_ignore,
"")
self._write_temporary_file_contents(
templates_subdir, file_to_ignore, "")
# Files to ignore within charts/ subdirectory.
self._write_temporary_file_contents(charts_subdir, file_to_ignore, "")
# Files to ignore within templates/bin subdirectory.
self._write_temporary_file_contents(templates_nested_subdir,
file_to_ignore, "")
self._write_temporary_file_contents(
templates_nested_subdir, file_to_ignore, "")
# Files to ignore within charts/extra subdirectory.
self._write_temporary_file_contents(charts_nested_subdir,
file_to_ignore, "")
self._write_temporary_file_contents(
charts_nested_subdir, file_to_ignore, "")
# Files to **include** within charts/ subdirectory.
self._write_temporary_file_contents(charts_subdir, '.prov', "xyzzy")
@ -331,10 +333,11 @@ class ChartBuilderTestCase(BaseChartBuilderTestCase):
chartbuilder = ChartBuilder(test_chart)
helm_chart = chartbuilder.get_helm_chart()
expected_files = ('[type_url: "%s"\nvalue: "bazqux"\n, '
'type_url: "%s"\nvalue: "foobar"\n, '
'type_url: "%s"\nvalue: "xyzzy"\n]' %
('./bar', './foo', 'charts/.prov'))
expected_files = (
'[type_url: "%s"\nvalue: "bazqux"\n, '
'type_url: "%s"\nvalue: "foobar"\n, '
'type_url: "%s"\nvalue: "xyzzy"\n]' %
('./bar', './foo', 'charts/.prov'))
# Validate that only relevant files are included, that the ignored
# files are present.
@ -349,16 +352,16 @@ class ChartBuilderTestCase(BaseChartBuilderTestCase):
# Main chart directory and files.
chart_dir = self.useFixture(fixtures.TempDir())
self.addCleanup(shutil.rmtree, chart_dir.path)
self._write_temporary_file_contents(chart_dir.path, 'Chart.yaml',
self.chart_yaml)
self._write_temporary_file_contents(
chart_dir.path, 'Chart.yaml', self.chart_yaml)
ch = yaml.safe_load(self.chart_stream)
ch['data']['source_dir'] = (chart_dir.path, '')
# Dependency chart directory and files.
dep_chart_dir = self.useFixture(fixtures.TempDir())
self.addCleanup(shutil.rmtree, dep_chart_dir.path)
self._write_temporary_file_contents(dep_chart_dir.path, 'Chart.yaml',
self.dependency_chart_yaml)
self._write_temporary_file_contents(
dep_chart_dir.path, 'Chart.yaml', self.dependency_chart_yaml)
dep_ch = yaml.safe_load(self.dependency_chart_stream)
dep_ch['data']['source_dir'] = (dep_chart_dir.path, '')
@ -369,7 +372,8 @@ class ChartBuilderTestCase(BaseChartBuilderTestCase):
chartbuilder = ChartBuilder(main_chart)
helm_chart = chartbuilder.get_helm_chart()
expected_dependency = inspect.cleandoc("""
expected_dependency = inspect.cleandoc(
"""
metadata {
name: "dependency-chart"
version: "0.1.0"
@ -379,7 +383,8 @@ class ChartBuilderTestCase(BaseChartBuilderTestCase):
}
""").strip()
expected = inspect.cleandoc("""
expected = inspect.cleandoc(
"""
metadata {
name: "hello-world-chart"
version: "0.1.0"
@ -418,8 +423,8 @@ class ChartBuilderTestCase(BaseChartBuilderTestCase):
# Validate base case.
chart_dir = self.useFixture(fixtures.TempDir())
self.addCleanup(shutil.rmtree, chart_dir.path)
self._write_temporary_file_contents(chart_dir.path, 'Chart.yaml',
self.chart_yaml)
self._write_temporary_file_contents(
chart_dir.path, 'Chart.yaml', self.chart_yaml)
ch = yaml.safe_load(self.chart_stream)
ch['data']['source_dir'] = (chart_dir.path, '')
@ -432,8 +437,8 @@ class ChartBuilderTestCase(BaseChartBuilderTestCase):
# Validate recursive case (with dependencies).
dep_chart_dir = self.useFixture(fixtures.TempDir())
self.addCleanup(shutil.rmtree, dep_chart_dir.path)
self._write_temporary_file_contents(dep_chart_dir.path, 'Chart.yaml',
self.dependency_chart_yaml)
self._write_temporary_file_contents(
dep_chart_dir.path, 'Chart.yaml', self.dependency_chart_yaml)
dep_ch = yaml.safe_load(self.dependency_chart_stream)
dep_ch['data']['source_dir'] = (dep_chart_dir.path, '')
@ -441,7 +446,8 @@ class ChartBuilderTestCase(BaseChartBuilderTestCase):
test_chart['data']['dependencies'] = [dependency_chart]
chartbuilder = ChartBuilder(test_chart)
re = inspect.cleandoc("""
re = inspect.cleandoc(
"""
hello-world-chart.*A sample Helm chart for Kubernetes.*
dependency-chart.*Another sample Helm chart for Kubernetes.*
""").replace('\n', '').strip()
@ -449,7 +455,6 @@ class ChartBuilderTestCase(BaseChartBuilderTestCase):
class ChartBuilderNegativeTestCase(BaseChartBuilderTestCase):
def setUp(self):
super(ChartBuilderNegativeTestCase, self).setUp()
# Create an exception for testing since instantiating one manually
@ -471,13 +476,15 @@ class ChartBuilderNegativeTestCase(BaseChartBuilderTestCase):
chartbuilder = ChartBuilder(self._get_test_chart(chart_dir))
# Confirm it failed for both encodings.
error_re = (r'.*A str exception occurred while trying to read file:'
r'.*Details:\n.*\(encoding=utf-8\).*\n\(encoding=latin1\)')
error_re = (
r'.*A str exception occurred while trying to read file:'
r'.*Details:\n.*\(encoding=utf-8\).*\n\(encoding=latin1\)')
with mock.patch("builtins.open", mock.mock_open(read_data="")) \
as mock_file:
mock_file.return_value.read.side_effect = self.exc_to_raise
self.assertRaisesRegexp(chartbuilder_exceptions.FilesLoadException,
error_re, chartbuilder.get_files)
self.assertRaisesRegexp(
chartbuilder_exceptions.FilesLoadException, error_re,
chartbuilder.get_files)
def test_get_files_fails_once_to_read_binary_file_passes(self):
chart_dir = self.useFixture(fixtures.TempDir())

View File

@ -11,20 +11,20 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from datetime import datetime
from kubernetes.client.rest import ApiException
import mock
import testtools
from kubernetes.client.rest import ApiException
from armada.handlers import lock
@mock.patch('armada.handlers.lock.K8s')
@mock.patch.object(lock.time, 'sleep', lambda x: True)
class LockTestCase(testtools.TestCase):
def __init__(self, *args, **kwargs):
super(LockTestCase, self).__init__(*args, **kwargs)
self.resp = None

View File

@ -14,9 +14,9 @@
import copy
import os
import yaml
import testtools
import testtools
import yaml
from armada import exceptions
from armada.handlers import manifest
@ -25,11 +25,10 @@ from armada.utils import validate
class ManifestTestCase(testtools.TestCase):
def setUp(self):
super(ManifestTestCase, self).setUp()
examples_dir = os.path.join(os.getcwd(), 'armada', 'tests', 'unit',
'resources')
examples_dir = os.path.join(
os.getcwd(), 'armada', 'tests', 'unit', 'resources')
with open(os.path.join(examples_dir, 'keystone-manifest.yaml')) as f:
self.documents = list(yaml.safe_load_all(f.read()))
@ -43,10 +42,10 @@ class ManifestTestCase(testtools.TestCase):
self.assertEqual(5, len(armada_manifest.charts))
self.assertEqual(3, len(armada_manifest.groups))
self.assertEqual([self.documents[x] for x in range(5)],
armada_manifest.charts)
self.assertEqual([self.documents[x] for x in range(5, 8)],
armada_manifest.groups)
self.assertEqual(
[self.documents[x] for x in range(5)], armada_manifest.charts)
self.assertEqual(
[self.documents[x] for x in range(5, 8)], armada_manifest.groups)
self.assertEqual(self.documents[-1], armada_manifest.manifest)
def test_get_documents_with_target_manifest(self):
@ -62,13 +61,13 @@ class ManifestTestCase(testtools.TestCase):
self.assertEqual(5, len(armada_manifest.charts))
self.assertEqual(3, len(armada_manifest.groups))
self.assertEqual([self.documents[x] for x in range(5)],
armada_manifest.charts)
self.assertEqual([self.documents[x] for x in range(5, 8)],
armada_manifest.groups)
self.assertEqual(
[self.documents[x] for x in range(5)], armada_manifest.charts)
self.assertEqual(
[self.documents[x] for x in range(5, 8)], armada_manifest.groups)
self.assertEqual(self.documents[-1], armada_manifest.manifest)
self.assertEqual('armada-manifest',
self.documents[-1]['metadata']['name'])
self.assertEqual(
'armada-manifest', self.documents[-1]['metadata']['name'])
def test_get_documents_with_multi_manifest_and_target_manifest(self):
# Validate that specifying `target_manifest` flag returns the correct
@ -90,29 +89,30 @@ class ManifestTestCase(testtools.TestCase):
self.assertEqual(5, len(armada_manifest.charts))
self.assertEqual(3, len(armada_manifest.groups))
self.assertEqual([self.documents[x] for x in range(5)],
armada_manifest.charts)
self.assertEqual([self.documents[x] for x in range(5, 8)],
armada_manifest.groups)
self.assertEqual(
[self.documents[x] for x in range(5)], armada_manifest.charts)
self.assertEqual(
[self.documents[x] for x in range(5, 8)], armada_manifest.groups)
self.assertEqual(armada_manifest.manifest, self.documents[-1])
self.assertEqual('armada-manifest',
armada_manifest.manifest['metadata']['name'])
self.assertEqual(
'armada-manifest', armada_manifest.manifest['metadata']['name'])
# Specify the alternative manifest and verify it works.
armada_manifest = manifest.Manifest(
documents, target_manifest='alt-armada-manifest')
self.assertIsNotNone(armada_manifest.manifest)
self.assertEqual(other_manifest, armada_manifest.manifest)
self.assertEqual('alt-armada-manifest',
armada_manifest.manifest['metadata']['name'])
self.assertEqual(
'alt-armada-manifest',
armada_manifest.manifest['metadata']['name'])
def test_get_manifest(self):
armada_manifest = manifest.Manifest(
self.documents, target_manifest='armada-manifest')
obtained_manifest = armada_manifest.get_manifest()
self.assertIsInstance(obtained_manifest, dict)
self.assertEqual(obtained_manifest['data'],
armada_manifest.manifest['data'])
self.assertEqual(
obtained_manifest['data'], armada_manifest.manifest['data'])
def test_find_documents(self):
armada_manifest = manifest.Manifest(self.documents)
@ -195,15 +195,17 @@ class ManifestTestCase(testtools.TestCase):
keystone_infra_services_chart_group = armada_manifest. \
find_chart_group_document('keystone-infra-services')
self.assertEqual(keystone_infra_services_chart_group,
built_armada_manifest['data']['chart_groups'][0])
self.assertEqual(
keystone_infra_services_chart_group,
built_armada_manifest['data']['chart_groups'][0])
# the first chart group in the Armada manifest
openstack_keystone_chart_group = armada_manifest. \
find_chart_group_document('openstack-keystone')
self.assertEqual(openstack_keystone_chart_group,
built_armada_manifest['data']['chart_groups'][1])
self.assertEqual(
openstack_keystone_chart_group,
built_armada_manifest['data']['chart_groups'][1])
def test_verify_build_chart_group_deps(self):
armada_manifest = manifest.Manifest(self.documents)
@ -223,8 +225,9 @@ class ManifestTestCase(testtools.TestCase):
keystone_dependencies = keystone_chart_with_deps['data'][
'dependencies']
self.assertEqual(openstack_keystone_chart_group_deps_dep_added[0],
keystone_dependencies[0])
self.assertEqual(
openstack_keystone_chart_group_deps_dep_added[0],
keystone_dependencies[0])
# building the deps for openstack-keystone chart group
chart_group = armada_manifest.find_chart_group_document(
@ -248,10 +251,10 @@ class ManifestTestCase(testtools.TestCase):
memcached_dependencies = memcached_chart_with_deps['data'][
'dependencies']
self.assertEqual(keystone_infra_services_dep_added[0],
mariadb_dependencies[0])
self.assertEqual(keystone_infra_services_dep_added[0],
memcached_dependencies[0])
self.assertEqual(
keystone_infra_services_dep_added[0], mariadb_dependencies[0])
self.assertEqual(
keystone_infra_services_dep_added[0], memcached_dependencies[0])
def test_verify_build_chart_deps(self):
armada_manifest = manifest.Manifest(self.documents)
@ -265,8 +268,8 @@ class ManifestTestCase(testtools.TestCase):
# since not dependent on other charts, the original and modified
# dependencies are the same
self.assertEqual(helm_toolkit_original_dependency,
helm_toolkit_chart_with_deps)
self.assertEqual(
helm_toolkit_original_dependency, helm_toolkit_chart_with_deps)
# helm-toolkit dependency, the basis for comparison of d
# ependencies in other charts
@ -287,8 +290,8 @@ class ManifestTestCase(testtools.TestCase):
self.assertIsInstance(keystone_dependencies, list)
self.assertEqual(1, len(keystone_dependencies))
self.assertEqual(expected_helm_toolkit_dependency,
keystone_dependencies[0])
self.assertEqual(
expected_helm_toolkit_dependency, keystone_dependencies[0])
# mariadb chart dependencies
mariadb_chart = armada_manifest.find_chart_document('mariadb')
@ -304,8 +307,8 @@ class ManifestTestCase(testtools.TestCase):
self.assertIsInstance(mariadb_dependencies, list)
self.assertEqual(1, len(mariadb_dependencies))
self.assertEqual(expected_helm_toolkit_dependency,
mariadb_dependencies[0])
self.assertEqual(
expected_helm_toolkit_dependency, mariadb_dependencies[0])
# memcached chart dependencies
memcached_chart = armada_manifest.find_chart_document('memcached')
@ -313,8 +316,8 @@ class ManifestTestCase(testtools.TestCase):
memcached_chart_with_deps = armada_manifest.build_chart_deps(
memcached_chart)
self.assertNotEqual(original_memcached_chart,
memcached_chart_with_deps)
self.assertNotEqual(
original_memcached_chart, memcached_chart_with_deps)
self.assertIn('data', memcached_chart_with_deps)
self.assertIn('dependencies', memcached_chart_with_deps['data'])
@ -323,16 +326,15 @@ class ManifestTestCase(testtools.TestCase):
self.assertIsInstance(memcached_dependencies, list)
self.assertEqual(1, len(memcached_dependencies))
self.assertEqual(expected_helm_toolkit_dependency,
memcached_dependencies[0])
self.assertEqual(
expected_helm_toolkit_dependency, memcached_dependencies[0])
class ManifestNegativeTestCase(testtools.TestCase):
def setUp(self):
super(ManifestNegativeTestCase, self).setUp()
examples_dir = os.path.join(os.getcwd(), 'armada', 'tests', 'unit',
'resources')
examples_dir = os.path.join(
os.getcwd(), 'armada', 'tests', 'unit', 'resources')
with open(os.path.join(examples_dir, 'keystone-manifest.yaml')) as f:
self.documents = list(yaml.safe_load_all(f.read()))
@ -343,8 +345,9 @@ class ManifestNegativeTestCase(testtools.TestCase):
documents.append(documents[-1]) # Copy the last manifest.
error_re = r'Multiple manifests are not supported.*'
self.assertRaisesRegexp(exceptions.ManifestException, error_re,
manifest.Manifest, documents)
self.assertRaisesRegexp(
exceptions.ManifestException, error_re, manifest.Manifest,
documents)
def test_get_documents_multi_target_manifests_raises_value_error(self):
# Validates that finding multiple manifests with `target_manifest`
@ -361,10 +364,12 @@ class ManifestNegativeTestCase(testtools.TestCase):
target_manifest='armada-manifest')
def _assert_missing_documents_raises(self, documents):
error_re = ('.*Documents must include at least one of each of .* and '
'only one .*')
self.assertRaisesRegexp(exceptions.ManifestException, error_re,
manifest.Manifest, documents)
error_re = (
'.*Documents must include at least one of each of .* and '
'only one .*')
self.assertRaisesRegexp(
exceptions.ManifestException, error_re, manifest.Manifest,
documents)
def test_get_documents_missing_manifest(self):
# Validates exceptions.ManifestException is thrown if no manifest is
@ -384,18 +389,19 @@ class ManifestNegativeTestCase(testtools.TestCase):
def test_find_chart_document_negative(self):
armada_manifest = manifest.Manifest(self.documents)
error_re = r'.*Could not find %s named "%s"' % (schema.TYPE_CHART,
'invalid')
self.assertRaisesRegexp(exceptions.BuildChartException, error_re,
armada_manifest.find_chart_document, 'invalid')
error_re = r'.*Could not find %s named "%s"' % (
schema.TYPE_CHART, 'invalid')
self.assertRaisesRegexp(
exceptions.BuildChartException, error_re,
armada_manifest.find_chart_document, 'invalid')
def test_find_group_document_negative(self):
armada_manifest = manifest.Manifest(self.documents)
error_re = r'.*Could not find %s named "%s"' % (schema.TYPE_CHARTGROUP,
'invalid')
self.assertRaisesRegexp(exceptions.BuildChartGroupException, error_re,
armada_manifest.find_chart_group_document,
'invalid')
error_re = r'.*Could not find %s named "%s"' % (
schema.TYPE_CHARTGROUP, 'invalid')
self.assertRaisesRegexp(
exceptions.BuildChartGroupException, error_re,
armada_manifest.find_chart_group_document, 'invalid')
def test_build_chart_deps_with_missing_dependency_fails(self):
"""Validate that attempting to build a chart that points to

View File

@ -15,9 +15,9 @@
import copy
import json
import os
import yaml
import testtools
import yaml
from armada.handlers.override import Override
from armada.handlers import schema
@ -25,7 +25,6 @@ from armada.exceptions import override_exceptions
class OverrideTestCase(testtools.TestCase):
def setUp(self):
super(OverrideTestCase, self).setUp()
self.basepath = os.path.join(os.path.dirname(__file__))
@ -80,8 +79,9 @@ class OverrideTestCase(testtools.TestCase):
][0]
self.assertEqual('overridden', target_doc['data']['release_prefix'])
override = ('manifest:simple-armada:chart_groups='
'blog-group3,blog-group4', )
override = (
'manifest:simple-armada:chart_groups='
'blog-group3,blog-group4', )
# Case 2: Checking if list gets updated.
ovr = Override(original_documents, override, [values_yaml])
@ -93,8 +93,9 @@ class OverrideTestCase(testtools.TestCase):
with open(comparison_yaml) as c:
comparison_documents = list(yaml.safe_load_all(c.read()))
# verifying that the override is correct
self.assertEqual(original_documents[2]['data']['chart_groups'],
comparison_documents[0]['data']['chart_groups'])
self.assertEqual(
original_documents[2]['data']['chart_groups'],
comparison_documents[0]['data']['chart_groups'])
def test_update_manifests_invalid_override_format(self):
with open(self.base_manifest) as f:
@ -141,8 +142,9 @@ class OverrideTestCase(testtools.TestCase):
ovr.update_document(documents_modified[0])
# after the update, both documents are equal
self.assertEqual(ovr.documents[0]['data']['chart_name'],
documents_modified[0]['data']['chart_name'])
self.assertEqual(
ovr.documents[0]['data']['chart_name'],
documents_modified[0]['data']['chart_name'])
self.assertEqual(ovr.documents[0], documents_modified[0])
# Case 2: Checking if dictionaries get updated
@ -151,8 +153,9 @@ class OverrideTestCase(testtools.TestCase):
ovr.update_document(documents_modified[0])
# after the update, both documents are equal
self.assertEqual(ovr.documents[0]['data']['values'],
documents_modified[0]['data']['values'])
self.assertEqual(
ovr.documents[0]['data']['values'],
documents_modified[0]['data']['values'])
self.assertEqual(ovr.documents[0], documents_modified[0])
# Case 3: Checking if lists get updated
@ -161,10 +164,11 @@ class OverrideTestCase(testtools.TestCase):
ovr.update_document(documents_modified[0])
# after the update, both documents are equal
self.assertEqual(['foo', 'bar'],
ovr.documents[0]['data']['dependencies'])
self.assertEqual(documents_modified[0]['data']['dependencies'],
ovr.documents[0]['data']['dependencies'])
self.assertEqual(
['foo', 'bar'], ovr.documents[0]['data']['dependencies'])
self.assertEqual(
documents_modified[0]['data']['dependencies'],
ovr.documents[0]['data']['dependencies'])
self.assertEqual(ovr.documents[0], documents_modified[0])
def test_update_chart_document_keys_not_removed_with_override(self):
@ -198,8 +202,9 @@ class OverrideTestCase(testtools.TestCase):
ovr.update_document(documents_modified[1])
# after the update, both documents are equal
self.assertEqual(ovr.documents[1]['data']['sequenced'],
documents_modified[1]['data']['sequenced'])
self.assertEqual(
ovr.documents[1]['data']['sequenced'],
documents_modified[1]['data']['sequenced'])
self.assertEqual(ovr.documents[1], documents_modified[1])
def test_update_chart_group_document_keys_not_removed_with_override(self):
@ -233,8 +238,9 @@ class OverrideTestCase(testtools.TestCase):
ovr.update_document(documents_modified[2])
# after the update, both documents are equal
self.assertEqual(ovr.documents[2]['data']['release_prefix'],
documents_modified[2]['data']['release_prefix'])
self.assertEqual(
ovr.documents[2]['data']['release_prefix'],
documents_modified[2]['data']['release_prefix'])
self.assertEqual(ovr.documents[2], documents_modified[2])
def test_update_armada_manifest_keys_not_removed_with_override(self):
@ -278,7 +284,8 @@ class OverrideTestCase(testtools.TestCase):
with open(self.base_manifest) as f, open(expected) as e:
documents = list(yaml.safe_load_all(f.read()))
doc_path = ['manifest', 'simple-armada']
override = ('manifest:simple-armada:chart_groups=\
override = (
'manifest:simple-armada:chart_groups=\
blog-group3,blog-group4', )
ovr = Override(documents, override)
ovr.update_manifests()
@ -312,7 +319,6 @@ class OverrideTestCase(testtools.TestCase):
class OverrideNegativeTestCase(testtools.TestCase):
def setUp(self):
super(OverrideNegativeTestCase, self).setUp()
self.basepath = os.path.join(os.path.dirname(__file__))
@ -342,8 +348,9 @@ class OverrideNegativeTestCase(testtools.TestCase):
override = ('manifest:simple-armada:name=' 'overridden', )
ovr = Override(original_documents, override)
self.assertRaises(override_exceptions.InvalidOverrideValueException,
ovr.update_manifests)
self.assertRaises(
override_exceptions.InvalidOverrideValueException,
ovr.update_manifests)
def test_load_yaml_file_invalid(self):
missing_yaml = "{}/templates/non_existing_yaml.yaml". \
@ -351,15 +358,16 @@ class OverrideNegativeTestCase(testtools.TestCase):
with open(self.base_manifest) as f:
documents = list(yaml.safe_load_all(f.read()))
ovr = Override(documents)
self.assertRaises(override_exceptions.InvalidOverrideFileException,
ovr._load_yaml_file, missing_yaml)
self.assertRaises(
override_exceptions.InvalidOverrideFileException,
ovr._load_yaml_file, missing_yaml)
def test_find_document_type_invalid(self):
with open(self.base_manifest) as f:
documents = list(yaml.safe_load_all(f.read()))
ovr = Override(documents)
self.assertRaises(ValueError, ovr.find_document_type,
'non_existing_document')
self.assertRaises(
ValueError, ovr.find_document_type, 'non_existing_document')
def test_convert_array_to_dict_invalid(self):
data_path = ['a', 'b', 'c']

View File

@ -12,20 +12,19 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from armada.handlers.release_diff import ReleaseDiff
from armada.tests.unit import base
from google.protobuf.any_pb2 import Any
from hapi.chart.chart_pb2 import Chart
from hapi.chart.config_pb2 import Config
from hapi.chart.metadata_pb2 import Metadata
from hapi.chart.template_pb2 import Template
from armada.handlers.release_diff import ReleaseDiff
from armada.tests.unit import base
# Tests for diffs which can occur in both top-level or dependency charts,
# and thus are inherited by both of those test classes.
class _BaseReleaseDiffTestCase():
def setUp(self):
super(base.ArmadaTestCase, self).setUp()
self.old_chart = self.make_chart()
@ -67,8 +66,9 @@ class _BaseReleaseDiffTestCase():
new_chart = self.make_chart()
chart_to_update = self.get_chart_to_update(new_chart)
update_chart(chart_to_update)
diff = ReleaseDiff(self.old_chart, self.old_values, new_chart,
self.old_values).get_diff()
diff = ReleaseDiff(
self.old_chart, self.old_values, new_chart,
self.old_values).get_diff()
self.assertTrue(diff)
def get_chart_to_update(self, chart):
@ -78,89 +78,82 @@ class _BaseReleaseDiffTestCase():
new_chart = self.make_chart()
chart_to_update = self.get_chart_to_update(new_chart)
chart_to_update.metadata.description = 'new chart description'
diff = ReleaseDiff(self.old_chart, self.old_values, new_chart,
self.old_values).get_diff()
diff = ReleaseDiff(
self.old_chart, self.old_values, new_chart,
self.old_values).get_diff()
self.assertFalse(diff)
def test_metadata_name_diff(self):
def update_chart(chart):
chart.metadata.name = 'new_chart_name'
self._test_chart_diff(update_chart)
def test_default_values_diff(self):
def update_chart(chart):
chart.values.raw = '{param: d2}'
self._test_chart_diff(update_chart)
def test_template_name_diff(self):
def update_chart(chart):
chart.templates[0].name = 'new_template_name'
self._test_chart_diff(update_chart)
def test_template_data_diff(self):
def update_chart(chart):
chart.templates[0].data = 'new template content'.encode()
self._test_chart_diff(update_chart)
def test_add_template_diff(self):
def update_chart(chart):
chart.templates.extend([
Template(
name='new_template_name',
data='new template content'.encode())
])
chart.templates.extend(
[
Template(
name='new_template_name',
data='new template content'.encode())
])
self._test_chart_diff(update_chart)
def test_remove_template_diff(self):
def update_chart(chart):
del chart.templates[0]
self._test_chart_diff(update_chart)
def test_file_type_url_diff(self):
def update_chart(chart):
chart.files[0].type_url = './new_file_name.ext'
self._test_chart_diff(update_chart)
def test_file_value_diff(self):
def update_chart(chart):
chart.files[0].value = 'new file content'.encode()
self._test_chart_diff(update_chart)
def test_add_file_diff(self):
def update_chart(chart):
chart.files.extend([
Any(type_url='./new_file_name.ext',
value='new file content'.encode())
])
chart.files.extend(
[
Any(
type_url='./new_file_name.ext',
value='new file content'.encode())
])
self._test_chart_diff(update_chart)
def test_remove_file_diff(self):
def update_chart(chart):
del chart.files[0]
self._test_chart_diff(update_chart)
def test_add_dependency_diff(self):
def update_chart(chart):
dep = self._make_chart()
dep.metadata.name = 'dep2'
@ -169,7 +162,6 @@ class _BaseReleaseDiffTestCase():
self._test_chart_diff(update_chart)
def test_remove_dependency_diff(self):
def update_chart(chart):
del chart.dependencies[0]
@ -178,26 +170,26 @@ class _BaseReleaseDiffTestCase():
# Test diffs (or absence of) in top-level chart / values.
class ReleaseDiffTestCase(_BaseReleaseDiffTestCase, base.ArmadaTestCase):
def get_chart_to_update(self, chart):
return chart
def test_same_input_no_diff(self):
diff = ReleaseDiff(self.old_chart, self.old_values, self.make_chart(),
self.make_values()).get_diff()
diff = ReleaseDiff(
self.old_chart, self.old_values, self.make_chart(),
self.make_values()).get_diff()
self.assertFalse(diff)
def test_override_values_diff(self):
new_values = {'param': 'o2'}
diff = ReleaseDiff(self.old_chart, self.old_values, self.old_chart,
new_values).get_diff()
diff = ReleaseDiff(
self.old_chart, self.old_values, self.old_chart,
new_values).get_diff()
self.assertTrue(diff)
# Test diffs in dependencies.
class DependencyReleaseDiffTestCase(_BaseReleaseDiffTestCase,
base.ArmadaTestCase):
def get_chart_to_update(self, chart):
return chart.dependencies[0]
@ -205,6 +197,5 @@ class DependencyReleaseDiffTestCase(_BaseReleaseDiffTestCase,
# Test diffs in transitive dependencies.
class TransitiveDependencyReleaseDiffTestCase(_BaseReleaseDiffTestCase,
base.ArmadaTestCase):
def get_chart_to_update(self, chart):
return chart.dependencies[0].dependencies[0]

View File

@ -15,7 +15,6 @@
import mock
from armada import const
from armada.handlers import test
from armada.handlers import tiller
from armada.tests.unit import base
@ -24,9 +23,7 @@ from armada.utils import helm
class TestHandlerTestCase(base.ArmadaTestCase):
def _test_test_release_for_success(self, expected_success, results):
@mock.patch('armada.handlers.tiller.K8s')
def do_test(_):
tiller_obj = tiller.Tiller('host', '8080', None)
@ -47,26 +44,29 @@ class TestHandlerTestCase(base.ArmadaTestCase):
self._test_test_release_for_success(True, [])
def test_unknown(self):
self._test_test_release_for_success(False, [
AttrDict(**{'status': helm.TESTRUN_STATUS_SUCCESS}),
AttrDict(**{'status': helm.TESTRUN_STATUS_UNKNOWN})
])
self._test_test_release_for_success(
False, [
AttrDict(**{'status': helm.TESTRUN_STATUS_SUCCESS}),
AttrDict(**{'status': helm.TESTRUN_STATUS_UNKNOWN})
])
def test_success(self):
self._test_test_release_for_success(
True, [AttrDict(**{'status': helm.TESTRUN_STATUS_SUCCESS})])
def test_failure(self):
self._test_test_release_for_success(False, [
AttrDict(**{'status': helm.TESTRUN_STATUS_SUCCESS}),
AttrDict(**{'status': helm.TESTRUN_STATUS_FAILURE})
])
self._test_test_release_for_success(
False, [
AttrDict(**{'status': helm.TESTRUN_STATUS_SUCCESS}),
AttrDict(**{'status': helm.TESTRUN_STATUS_FAILURE})
])
def test_running(self):
self._test_test_release_for_success(False, [
AttrDict(**{'status': helm.TESTRUN_STATUS_SUCCESS}),
AttrDict(**{'status': helm.TESTRUN_STATUS_RUNNING})
])
self._test_test_release_for_success(
False, [
AttrDict(**{'status': helm.TESTRUN_STATUS_SUCCESS}),
AttrDict(**{'status': helm.TESTRUN_STATUS_RUNNING})
])
def test_cg_disabled(self):
"""Test that tests are disabled when a chart group disables all

View File

@ -23,15 +23,15 @@ from armada.tests.test_utils import AttrDict
class TillerTestCase(base.ArmadaTestCase):
@mock.patch.object(tiller.Tiller, '_get_tiller_ip')
@mock.patch('armada.handlers.tiller.K8s')
@mock.patch('armada.handlers.tiller.grpc')
@mock.patch('armada.handlers.tiller.Config')
@mock.patch('armada.handlers.tiller.InstallReleaseRequest')
@mock.patch('armada.handlers.tiller.ReleaseServiceStub')
def test_install_release(self, mock_stub, mock_install_request,
mock_config, mock_grpc, mock_k8s, mock_ip):
def test_install_release(
self, mock_stub, mock_install_request, mock_config, mock_grpc,
mock_k8s, mock_ip):
# instantiate Tiller object
mock_grpc.insecure_channel.return_value = mock.Mock()
mock_ip.return_value = '0.0.0.0'
@ -63,8 +63,9 @@ class TillerTestCase(base.ArmadaTestCase):
namespace=namespace,
wait=wait,
timeout=timeout)
(mock_stub(tiller_obj.channel).InstallRelease.assert_called_with(
release_request, timeout + 60, metadata=tiller_obj.metadata))
(
mock_stub(tiller_obj.channel).InstallRelease.assert_called_with(
release_request, timeout + 60, metadata=tiller_obj.metadata))
@mock.patch('armada.handlers.tiller.K8s', autospec=True)
@mock.patch.object(tiller.Tiller, '_get_tiller_ip', autospec=True)
@ -85,10 +86,10 @@ class TillerTestCase(base.ArmadaTestCase):
mock_grpc.insecure_channel.assert_called_once_with(
'%s:%s' % (str(mock.sentinel.ip), str(mock.sentinel.port)),
options=[('grpc.max_send_message_length',
tiller.MAX_MESSAGE_LENGTH),
('grpc.max_receive_message_length',
tiller.MAX_MESSAGE_LENGTH)])
options=[
('grpc.max_send_message_length', tiller.MAX_MESSAGE_LENGTH),
('grpc.max_receive_message_length', tiller.MAX_MESSAGE_LENGTH)
])
@mock.patch('armada.handlers.tiller.K8s', autospec=True)
@mock.patch('armada.handlers.tiller.grpc', autospec=True)
@ -100,8 +101,8 @@ class TillerTestCase(base.ArmadaTestCase):
@mock.patch.object(tiller.Tiller, '_get_tiller_pod', autospec=True)
@mock.patch('armada.handlers.tiller.K8s', autospec=True)
@mock.patch('armada.handlers.tiller.grpc', autospec=True)
def test_get_tiller_ip_with_mocked_pod(self, mock_grpc, mock_k8s,
mock_pod):
def test_get_tiller_ip_with_mocked_pod(
self, mock_grpc, mock_k8s, mock_pod):
status = mock.Mock(pod_ip='1.1.1.1')
mock_pod.return_value.status = status
tiller_obj = tiller.Tiller()
@ -110,14 +111,14 @@ class TillerTestCase(base.ArmadaTestCase):
@mock.patch.object(tiller.Tiller, '_get_tiller_ip', autospec=True)
@mock.patch('armada.handlers.tiller.K8s', autospec=True)
@mock.patch('armada.handlers.tiller.grpc', autospec=True)
def test_get_tiller_pod_throws_exception(self, mock_grpc, mock_k8s,
mock_ip):
def test_get_tiller_pod_throws_exception(
self, mock_grpc, mock_k8s, mock_ip):
mock_k8s.get_namespace_pod.return_value.items = []
tiller_obj = tiller.Tiller()
mock_grpc.insecure_channel.side_effect = ex.ChannelException()
self.assertRaises(ex.TillerPodNotRunningException,
tiller_obj._get_tiller_pod)
self.assertRaises(
ex.TillerPodNotRunningException, tiller_obj._get_tiller_pod)
@mock.patch.object(tiller.Tiller, '_get_tiller_ip', autospec=True)
@mock.patch('armada.handlers.tiller.K8s', autospec=True)
@ -241,22 +242,25 @@ class TillerTestCase(base.ArmadaTestCase):
@mock.patch('armada.handlers.tiller.grpc')
@mock.patch.object(tiller, 'ListReleasesRequest')
@mock.patch.object(tiller, 'ReleaseServiceStub')
def test_list_releases_paged(self, mock_stub, mock_list_releases_request,
mock_grpc, _):
def test_list_releases_paged(
self, mock_stub, mock_list_releases_request, mock_grpc, _):
page_count = 3
release_count = tiller.LIST_RELEASES_PAGE_SIZE * page_count
releases = [mock.Mock() for i in range(release_count)]
for i, release in enumerate(releases):
release.name = mock.PropertyMock(return_value=str(i))
pages = [[
mock.Mock(
count=release_count,
total=release_count + 5,
next='' if i == page_count - 1 else str(
(tiller.LIST_RELEASES_PAGE_SIZE * (i + 1))),
releases=releases[tiller.LIST_RELEASES_PAGE_SIZE *
i:tiller.LIST_RELEASES_PAGE_SIZE * (i + 1)])
] for i in range(page_count)]
pages = [
[
mock.Mock(
count=release_count,
total=release_count + 5,
next='' if i == page_count - 1 else str(
(tiller.LIST_RELEASES_PAGE_SIZE * (i + 1))),
releases=releases[tiller.LIST_RELEASES_PAGE_SIZE
* i:tiller.LIST_RELEASES_PAGE_SIZE
* (i + 1)])
] for i in range(page_count)
]
mock_stub.return_value.ListReleases.side_effect = pages
mock_list_releases_side_effect = [
@ -280,8 +284,8 @@ class TillerTestCase(base.ArmadaTestCase):
list_release_request_calls = [
mock.call(
offset=''
if i == 0 else str(tiller.LIST_RELEASES_PAGE_SIZE * i),
offset='' if i == 0 else str(
tiller.LIST_RELEASES_PAGE_SIZE * i),
limit=tiller.LIST_RELEASES_PAGE_SIZE,
status_codes=tiller.const.STATUS_ALL)
for i in range(page_count)
@ -292,8 +296,9 @@ class TillerTestCase(base.ArmadaTestCase):
@mock.patch('armada.handlers.tiller.grpc')
@mock.patch.object(tiller, 'GetReleaseContentRequest')
@mock.patch.object(tiller, 'ReleaseServiceStub')
def test_get_release_content(self, mock_release_service_stub,
mock_release_content_request, mock_grpc, _):
def test_get_release_content(
self, mock_release_service_stub, mock_release_content_request,
mock_grpc, _):
mock_release_service_stub.return_value.GetReleaseContent\
.return_value = {}
@ -311,8 +316,9 @@ class TillerTestCase(base.ArmadaTestCase):
@mock.patch('armada.handlers.tiller.grpc')
@mock.patch.object(tiller, 'GetVersionRequest')
@mock.patch.object(tiller, 'ReleaseServiceStub')
def test_tiller_version(self, mock_release_service_stub,
mock_version_request, mock_grpc, _):
def test_tiller_version(
self, mock_release_service_stub, mock_version_request, mock_grpc,
_):
mock_version = mock.Mock()
mock_version.Version.sem_ver = mock.sentinel.sem_ver
@ -336,9 +342,9 @@ class TillerTestCase(base.ArmadaTestCase):
@mock.patch.object(tiller, 'GetVersionRequest')
@mock.patch.object(tiller, 'GetReleaseStatusRequest')
@mock.patch.object(tiller, 'ReleaseServiceStub')
def test_get_release_status(self, mock_release_service_stub,
mock_rel_status_request, mock_version_request,
mock_grpc, _):
def test_get_release_status(
self, mock_release_service_stub, mock_rel_status_request,
mock_version_request, mock_grpc, _):
mock_release_service_stub.return_value.GetReleaseStatus. \
return_value = {}
@ -357,8 +363,9 @@ class TillerTestCase(base.ArmadaTestCase):
@mock.patch('armada.handlers.tiller.grpc')
@mock.patch.object(tiller, 'UninstallReleaseRequest')
@mock.patch.object(tiller, 'ReleaseServiceStub')
def test_uninstall_release(self, mock_release_service_stub,
mock_uninstall_release_request, mock_grpc, _):
def test_uninstall_release(
self, mock_release_service_stub, mock_uninstall_release_request,
mock_grpc, _):
mock_release_service_stub.return_value.UninstallRelease\
.return_value = {}
@ -379,8 +386,9 @@ class TillerTestCase(base.ArmadaTestCase):
@mock.patch('armada.handlers.tiller.grpc')
@mock.patch.object(tiller, 'RollbackReleaseRequest')
@mock.patch.object(tiller, 'ReleaseServiceStub')
def test_rollback_release(self, mock_release_service_stub,
mock_rollback_release_request, _, __):
def test_rollback_release(
self, mock_release_service_stub, mock_rollback_release_request, _,
__):
mock_release_service_stub.return_value.RollbackRelease\
.return_value = {}
@ -427,8 +435,9 @@ class TillerTestCase(base.ArmadaTestCase):
@mock.patch('armada.handlers.tiller.Config')
@mock.patch.object(tiller, 'UpdateReleaseRequest')
@mock.patch.object(tiller, 'ReleaseServiceStub')
def test_update_release(self, mock_release_service_stub,
mock_update_release_request, mock_config, _, __):
def test_update_release(
self, mock_release_service_stub, mock_update_release_request,
mock_config, _, __):
release = 'release'
chart = {}
namespace = 'namespace'
@ -507,20 +516,20 @@ class TillerTestCase(base.ArmadaTestCase):
timeout + tiller.GRPC_EPSILON,
metadata=tiller_obj.metadata)
expected_result = tiller.TillerResult(release, namespace, status,
description, version)
expected_result = tiller.TillerResult(
release, namespace, status, description, version)
self.assertEqual(expected_result, result)
def _test_test_release(self, grpc_response_mock):
@mock.patch('armada.handlers.tiller.K8s')
@mock.patch('armada.handlers.tiller.grpc')
@mock.patch('armada.handlers.tiller.Config')
@mock.patch.object(tiller, 'TestReleaseRequest')
@mock.patch.object(tiller, 'ReleaseServiceStub')
def do_test(self, mock_release_service_stub, mock_test_release_request,
mock_config, _, __):
def do_test(
self, mock_release_service_stub, mock_test_release_request,
mock_config, _, __):
tiller_obj = tiller.Tiller('host', '8080', None)
release = 'release'
test_suite_run = {}
@ -531,14 +540,11 @@ class TillerTestCase(base.ArmadaTestCase):
tiller_obj.get_release_status = mock.Mock()
tiller_obj.get_release_status.return_value = AttrDict(
**{
'info':
AttrDict(
'info': AttrDict(
**{
'status':
AttrDict(
'status': AttrDict(
**{'last_test_suite_run': test_suite_run}),
'Description':
'Failed'
'Description': 'Failed'
})
})
@ -549,41 +555,47 @@ class TillerTestCase(base.ArmadaTestCase):
do_test(self)
def test_test_release_no_tests(self):
self._test_test_release([
AttrDict(**{
'msg': 'No Tests Found',
'status': helm.TESTRUN_STATUS_UNKNOWN
})
])
self._test_test_release(
[
AttrDict(
**{
'msg': 'No Tests Found',
'status': helm.TESTRUN_STATUS_UNKNOWN
})
])
def test_test_release_success(self):
self._test_test_release([
AttrDict(**{
'msg': 'RUNNING: ...',
'status': helm.TESTRUN_STATUS_RUNNING
}),
AttrDict(**{
'msg': 'SUCCESS: ...',
'status': helm.TESTRUN_STATUS_SUCCESS
})
])
self._test_test_release(
[
AttrDict(
**{
'msg': 'RUNNING: ...',
'status': helm.TESTRUN_STATUS_RUNNING
}),
AttrDict(
**{
'msg': 'SUCCESS: ...',
'status': helm.TESTRUN_STATUS_SUCCESS
})
])
def test_test_release_failure(self):
self._test_test_release([
AttrDict(**{
'msg': 'RUNNING: ...',
'status': helm.TESTRUN_STATUS_RUNNING
}),
AttrDict(**{
'msg': 'FAILURE: ...',
'status': helm.TESTRUN_STATUS_FAILURE
})
])
self._test_test_release(
[
AttrDict(
**{
'msg': 'RUNNING: ...',
'status': helm.TESTRUN_STATUS_RUNNING
}),
AttrDict(
**{
'msg': 'FAILURE: ...',
'status': helm.TESTRUN_STATUS_FAILURE
})
])
def test_test_release_failure_to_run(self):
class Iterator:
def __iter__(self):
return self

View File

@ -23,7 +23,6 @@ test_chart = {'wait': {'timeout': 10, 'native': {'enabled': False}}}
class ChartWaitTestCase(base.ArmadaTestCase):
def get_unit(self, chart_data, timeout=None, version=2):
chart = {
'schema': 'armada/Chart/v{}'.format(str(version)),
@ -118,60 +117,66 @@ class ChartWaitTestCase(base.ArmadaTestCase):
self.assertIsInstance(unit.waits[4], wait.StatefulSetWait)
def test_waits_init_min_ready_fails_if_not_controller(self):
def create_pod_wait_min_ready():
self.get_unit({
'wait': {
'resources': [{
'type': 'pod',
'labels': {
'foo': 'bar'
},
'min_ready': 5
}]
}
})
self.get_unit(
{
'wait': {
'resources': [
{
'type': 'pod',
'labels': {
'foo': 'bar'
},
'min_ready': 5
}
]
}
})
self.assertRaises(manifest_exceptions.ManifestException,
create_pod_wait_min_ready)
self.assertRaises(
manifest_exceptions.ManifestException, create_pod_wait_min_ready)
def create_job_wait_min_ready():
self.get_unit({
'wait': {
'resources': [{
'type': 'job',
'labels': {
'foo': 'bar'
},
'min_ready': 5
}]
}
})
self.get_unit(
{
'wait': {
'resources': [
{
'type': 'job',
'labels': {
'foo': 'bar'
},
'min_ready': 5
}
]
}
})
self.assertRaises(manifest_exceptions.ManifestException,
create_job_wait_min_ready)
self.assertRaises(
manifest_exceptions.ManifestException, create_job_wait_min_ready)
def test_waits_init_invalid_type(self):
def create_with_invalid_type():
self.get_unit({
'wait': {
'resources': [{
'type': 'invalid',
'labels': {
'foo': 'bar'
},
'min_ready': 5
}]
}
})
self.get_unit(
{
'wait': {
'resources': [
{
'type': 'invalid',
'labels': {
'foo': 'bar'
},
'min_ready': 5
}
]
}
})
self.assertRaises(manifest_exceptions.ManifestException,
create_with_invalid_type)
self.assertRaises(
manifest_exceptions.ManifestException, create_with_invalid_type)
@mock.patch.object(wait.ChartWait, 'get_resource_wait')
def test_wait(self, get_resource_wait):
def return_mock(*args, **kwargs):
return mock.MagicMock()
@ -194,7 +199,6 @@ class ChartWaitTestCase(base.ArmadaTestCase):
class PodWaitTestCase(base.ArmadaTestCase):
def get_unit(self, labels, version=2):
return wait.PodWait(
resource_type='pod',
@ -202,7 +206,6 @@ class PodWaitTestCase(base.ArmadaTestCase):
labels=labels)
def test_include_resource(self):
def mock_resource(annotations={}, owner_references=None):
resource = mock.Mock()
resource.metadata.annotations = annotations
@ -219,10 +222,11 @@ class PodWaitTestCase(base.ArmadaTestCase):
]
job_pods = [
mock_resource(owner_references=[mock.Mock(kind='Job')]),
mock_resource(owner_references=[
mock.Mock(kind='NotAJob'),
mock.Mock(kind='Job')
])
mock_resource(
owner_references=[
mock.Mock(kind='NotAJob'),
mock.Mock(kind='Job')
])
]
included_pods = [
mock_resource(),
@ -248,13 +252,11 @@ class PodWaitTestCase(base.ArmadaTestCase):
class JobWaitTestCase(base.ArmadaTestCase):
def get_unit(self, labels):
return wait.JobWait(
resource_type='job', chart_wait=mock.MagicMock(), labels=labels)
def test_include_resource(self):
def mock_resource(annotations={}, owner_references=None):
resource = mock.Mock()
resource.metadata.annotations = annotations
@ -263,10 +265,11 @@ class JobWaitTestCase(base.ArmadaTestCase):
cronjob_jobs = [
mock_resource(owner_references=[mock.Mock(kind='CronJob')]),
mock_resource(owner_references=[
mock.Mock(kind='NotACronJob'),
mock.Mock(kind='CronJob')
])
mock_resource(
owner_references=[
mock.Mock(kind='NotACronJob'),
mock.Mock(kind='CronJob')
])
]
included_jobs = [
mock_resource(),

View File

@ -18,7 +18,6 @@ from armada.utils import schema
class SchemaTestCase(unittest.TestCase):
def test_validate_load_schemas(self):
expected_schemas = [
'armada/Chart/v1', 'armada/ChartGroup/v1', 'armada/Manifest/v1'

View File

@ -18,7 +18,6 @@ from armada.utils import release as rel
class ReleaseTestCase(unittest.TestCase):
def test_release_prefix_pass(self):
expected = 'armada-test'
prefix, release = ('armada', 'test')

View File

@ -25,7 +25,6 @@ from armada.utils import source
class GitTestCase(base.ArmadaTestCase):
def _validate_git_clone(self, repo_dir, expected_ref=None):
self.assertTrue(os.path.isdir(repo_dir))
self.addCleanup(shutil.rmtree, repo_dir)
@ -38,23 +37,23 @@ class GitTestCase(base.ArmadaTestCase):
as git_file:
self.assertIn(expected_ref, git_file.read())
@testtools.skipUnless(base.is_connected(),
'git clone requires network connectivity.')
@testtools.skipUnless(
base.is_connected(), 'git clone requires network connectivity.')
def test_git_clone_good_url(self):
url = 'https://opendev.org/airship/armada.git'
git_dir = source.git_clone(url)
self._validate_git_clone(git_dir)
@testtools.skipUnless(base.is_connected(),
'git clone requires network connectivity.')
@testtools.skipUnless(
base.is_connected(), 'git clone requires network connectivity.')
def test_git_clone_commit(self):
url = 'https://opendev.org/airship/armada.git'
commit = 'cba78d1d03e4910f6ab1691bae633c5bddce893d'
git_dir = source.git_clone(url, commit)
self._validate_git_clone(git_dir, commit)
@testtools.skipUnless(base.is_connected(),
'git clone requires network connectivity.')
@testtools.skipUnless(
base.is_connected(), 'git clone requires network connectivity.')
def test_git_clone_ref(self):
ref = 'refs/changes/54/457754/73'
git_dir = source.git_clone(
@ -62,29 +61,29 @@ class GitTestCase(base.ArmadaTestCase):
self._validate_git_clone(git_dir, ref)
@test_utils.attr(type=['negative'])
@testtools.skipUnless(base.is_connected(),
'git clone requires network connectivity.')
@testtools.skipUnless(
base.is_connected(), 'git clone requires network connectivity.')
def test_git_clone_empty_url(self):
url = ''
# error_re = '%s is not a valid git repository.' % url
self.assertRaises(source_exceptions.GitException, source.git_clone,
url)
self.assertRaises(
source_exceptions.GitException, source.git_clone, url)
@test_utils.attr(type=['negative'])
@testtools.skipUnless(base.is_connected(),
'git clone requires network connectivity.')
@testtools.skipUnless(
base.is_connected(), 'git clone requires network connectivity.')
def test_git_clone_bad_url(self):
url = 'https://opendev.org/dummy/armada'
self.assertRaises(source_exceptions.GitException, source.git_clone,
url)
self.assertRaises(
source_exceptions.GitException, source.git_clone, url)
# TODO need to design a positive proxy test,
# difficult to achieve behind a corporate proxy
@test_utils.attr(type=['negative'])
@testtools.skipUnless(base.is_connected(),
'git clone requires network connectivity.')
@testtools.skipUnless(
base.is_connected(), 'git clone requires network connectivity.')
def test_git_clone_fake_proxy(self):
url = 'https://opendev.org/airship/armada.git'
proxy_url = test_utils.rand_name(
@ -140,14 +139,15 @@ class GitTestCase(base.ArmadaTestCase):
mock_path.exists.return_value = False
path = '/tmp/armada'
self.assertRaises(source_exceptions.InvalidPathException,
source.extract_tarball, path)
self.assertRaises(
source_exceptions.InvalidPathException, source.extract_tarball,
path)
mock_tarfile.open.assert_not_called()
mock_tarfile.extractall.assert_not_called()
@testtools.skipUnless(base.is_connected(),
'git clone requires network connectivity.')
@testtools.skipUnless(
base.is_connected(), 'git clone requires network connectivity.')
@mock.patch.object(source, 'LOG')
def test_source_cleanup(self, mock_log):
url = 'https://opendev.org/airship/armada.git'
@ -159,8 +159,8 @@ class GitTestCase(base.ArmadaTestCase):
@mock.patch.object(source, 'LOG')
@mock.patch('armada.utils.source.shutil')
@mock.patch('armada.utils.source.os.path')
def test_source_cleanup_missing_git_path(self, mock_path, mock_shutil,
mock_log):
def test_source_cleanup_missing_git_path(
self, mock_path, mock_shutil, mock_log):
# Verify that passing in a missing path does nothing but log a warning.
mock_path.exists.return_value = False
path = 'armada'
@ -169,11 +169,11 @@ class GitTestCase(base.ArmadaTestCase):
mock_shutil.rmtree.assert_not_called()
self.assertTrue(mock_log.warning.called)
actual_call = mock_log.warning.mock_calls[0][1]
self.assertEqual(('Could not find the chart path %s to delete.', path),
actual_call)
self.assertEqual(
('Could not find the chart path %s to delete.', path), actual_call)
@testtools.skipUnless(base.is_connected(),
'git clone requires network connectivity.')
@testtools.skipUnless(
base.is_connected(), 'git clone requires network connectivity.')
@test_utils.attr(type=['negative'])
@mock.patch.object(source, 'os')
def test_git_clone_ssh_auth_method_fails_auth(self, mock_os):
@ -187,8 +187,8 @@ class GitTestCase(base.ArmadaTestCase):
ref='refs/changes/17/388517/5',
auth_method='SSH')
@testtools.skipUnless(base.is_connected(),
'git clone requires network connectivity.')
@testtools.skipUnless(
base.is_connected(), 'git clone requires network connectivity.')
@test_utils.attr(type=['negative'])
@mock.patch.object(source, 'os')
def test_git_clone_ssh_auth_method_missing_ssh_key(self, mock_os):

View File

@ -13,9 +13,9 @@
# limitations under the License.
import os
import yaml
import testtools
import yaml
from armada.tests.unit import base
from armada.utils import validate
@ -67,7 +67,6 @@ data:
class BaseValidateTest(base.ArmadaTestCase):
def setUp(self):
super(BaseValidateTest, self).setUp()
self.basepath = os.path.join(os.path.dirname(__file__), os.pardir)
@ -110,7 +109,6 @@ class ValidateOwnExamplesTestCase(BaseValidateTest):
class ValidateTestCase(BaseValidateTest):
def test_validate_armada_yaml_passes(self):
template = '{}/resources/valid_armada_document.yaml'.format(
self.basepath)
@ -215,12 +213,11 @@ data:
class ValidateNegativeTestCase(BaseValidateTest):
def test_validate_no_dictionary_expect_type_error(self):
expected_error = 'The provided input "invalid" must be a dictionary.'
self.assertRaisesRegexp(TypeError, expected_error,
validate.validate_armada_documents,
['invalid'])
self.assertRaisesRegexp(
TypeError, expected_error, validate.validate_armada_documents,
['invalid'])
def test_validate_invalid_chart_armada_manifest(self):
template = '{}/resources/valid_armada_document.yaml'.format(

View File

@ -12,10 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from armada.utils.helm import get_test_suite_run_success
import time
from armada.utils.helm import get_test_suite_run_success
def release_prefixer(prefix, release):
'''

View File

@ -65,8 +65,9 @@ def git_clone(repo_url, ref='master', proxy_server=None, auth_method=None):
'with SSH authentication.', repo_url, ref)
if not os.path.exists(CONF.ssh_key_path):
LOG.error('SSH auth method was specified for cloning repo but '
'the SSH key under CONF.ssh_key_path was not found.')
LOG.error(
'SSH auth method was specified for cloning repo but '
'the SSH key under CONF.ssh_key_path was not found.')
raise source_exceptions.GitSSHException(CONF.ssh_key_path)
ssh_cmd = (
@ -99,8 +100,8 @@ def git_clone(repo_url, ref='master', proxy_server=None, auth_method=None):
except git_exc.GitCommandError as e:
LOG.exception('Encountered GitCommandError during clone.')
if ssh_cmd and ssh_cmd in e.stderr:
raise source_exceptions.GitAuthException(repo_url,
CONF.ssh_key_path)
raise source_exceptions.GitAuthException(
repo_url, CONF.ssh_key_path)
elif 'Could not resolve proxy' in e.stderr:
raise source_exceptions.GitProxyException(proxy_server)
else:
@ -165,7 +166,7 @@ def source_cleanup(chart_path):
try:
shutil.rmtree(chart_path)
except OSError as e:
LOG.warning('Could not delete the path %s. Details: %s',
chart_path, e)
LOG.warning(
'Could not delete the path %s. Details: %s', chart_path, e)
else:
LOG.warning('Could not find the chart path %s to delete.', chart_path)

View File

@ -12,10 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import jsonschema
import requests
import traceback
import jsonschema
import requests
from oslo_log import log as logging
from armada import const
@ -124,8 +124,9 @@ def validate_armada_document(document):
LOG.info('ValidationMessage: %s', vmsg.get_output_json())
details.append(vmsg.get_output())
except jsonschema.SchemaError as e:
error_message = ('The built-in Armada JSON schema %s is invalid. '
'Details: %s.' % (e.schema, e.message))
error_message = (
'The built-in Armada JSON schema %s is invalid. '
'Details: %s.' % (e.schema, e.message))
vmsg = ValidationMessage(
message=error_message,
error=True,

View File

@ -32,14 +32,15 @@ class ValidationMessage(object):
or details for resolution.
"""
def __init__(self,
message='Document validation error.',
error=True,
name='Armada error',
level='Error',
schema=None,
doc_name=None,
diagnostic=None):
def __init__(
self,
message='Document validation error.',
error=True,
name='Armada error',
level='Error',
schema=None,
doc_name=None,
diagnostic=None):
# TODO(MarshM) should validate error and level inputs

View File

@ -13,6 +13,7 @@ os-testr>=1.0.0 # Apache-2.0
flake8>=3.3.0
mock
responses>=0.8.1
yapf==0.26.0
yapf==0.27.0
flake8-import-order==0.18.1
grpcio-tools==1.16.0

18
tox.ini
View File

@ -72,7 +72,7 @@ commands =
# Whitespace linter (for chart files)
bash {toxinidir}/tools/whitespace-linter.sh
yapf -dr {toxinidir}/armada {toxinidir}/setup.py
flake8 {posargs}
flake8 {toxinidir}/armada {toxinidir}/setup.py
# Run security linter as part of the pep8 gate instead of a separate zuul job.
bandit -r armada -n 5 -x armada/tests/*
@ -94,7 +94,7 @@ commands =
coverage xml -o cover/coverage.xml
coverage report
[testenv:yapf]
[testenv:fmt]
basepython = python3
deps = {[testenv]deps}
commands =
@ -102,8 +102,14 @@ commands =
[flake8]
filename = *.py
# These are ignored intentionally:
# W504 - line break after binary operator, we cannot have both
# W503 and W504 enabled
ignore = W504
show-source = true
# [H106] Don't put vim configuration in source files.
# [H201] No 'except:' at least use 'except Exception:'
# [H904] Delay string interpolations at logging calls.
enable-extensions = H106,H201,H904
# [W503] line break before binary operator
ignore = W503
exclude = .git,.tox,dist,*lib/python*,*egg,build,releasenotes,doc/*,hapi,venv
max-complexity = 24
application-import-names = armada
import-order-style = pep8