Implement bootaction API

- Implement boot action rendering and API
- Reorganize DB integration tests and add a tox -e postgres entrypoint
- Add boot action unit tests
- Add node filter unit test
- Add boot action context creation to deployment workflow
- Fix regression bug in MaaS Machines model
- Downgrade to Python 3.5 due to CICD limitations

Change-Id: I6c8f100cbe209f9b1c6c6ff1285365d89343ae2a
This commit is contained in:
Scott Hussey 2017-10-26 11:28:51 -05:00
parent 7af8623d91
commit f4dba218ac
45 changed files with 1394 additions and 454 deletions

View File

@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
FROM ubuntu:16.04 FROM python:3.5
ENV DEBIAN_FRONTEND noninteractive ENV DEBIAN_FRONTEND noninteractive
ENV container docker ENV container docker
@ -19,21 +19,6 @@ ENV PORT 9000
ENV LC_ALL C.UTF-8 ENV LC_ALL C.UTF-8
ENV LANG C.UTF-8 ENV LANG C.UTF-8
RUN apt -qq update && \
apt -y install git \
netbase \
python3-minimal \
python3-setuptools \
python3-pip \
python3-dev \
ca-certificates \
gcc \
g++ \
make \
libffi-dev \
libssl-dev --no-install-recommends
RUN pip3 install wheel
# Copy direct dependency requirements only to build a dependency layer # Copy direct dependency requirements only to build a dependency layer
COPY ./requirements-lock.txt /tmp/drydock/ COPY ./requirements-lock.txt /tmp/drydock/
RUN pip3 install -r /tmp/drydock/requirements-lock.txt RUN pip3 install -r /tmp/drydock/requirements-lock.txt

View File

@ -23,12 +23,15 @@ def upgrade():
*tables.ResultMessage.__schema__) *tables.ResultMessage.__schema__)
op.create_table(tables.ActiveInstance.__tablename__, op.create_table(tables.ActiveInstance.__tablename__,
*tables.ActiveInstance.__schema__) *tables.ActiveInstance.__schema__)
op.create_table(tables.BuildData.__tablename__, op.create_table(tables.BootAction.__tablename__,
*tables.BuildData.__schema__) *tables.BootAction.__schema__)
op.create_table(tables.BootActionStatus.__tablename__,
*tables.BootActionStatus.__schema__)
def downgrade(): def downgrade():
op.drop_table(tables.Tasks.__tablename__) op.drop_table(tables.Tasks.__tablename__)
op.drop_table(tables.ResultMessage.__tablename__) op.drop_table(tables.ResultMessage.__tablename__)
op.drop_table(tables.ActiveInstance.__tablename__) op.drop_table(tables.ActiveInstance.__tablename__)
op.drop_table(tables.BuildData.__tablename__) op.drop_table(tables.BootAction.__tablename__)
op.drop_table(tables.BootActionStatus.__tablename__)

View File

@ -70,8 +70,9 @@ are separate pipelines for the ``location`` field to build the URL that referenc
be sourced from and the ``data`` field (or the data sourced from resolving the ``location`` field). be sourced from and the ``data`` field (or the data sourced from resolving the ``location`` field).
The ``location`` string will be passed through the ``location_pipeline`` before it is queried. This response The ``location`` string will be passed through the ``location_pipeline`` before it is queried. This response
or the ``data`` field will then be passed through the ``data_pipeline``. Below are pipeline segments available or the ``data`` field will then be passed through the ``data_pipeline``. The data entity will start the pipeline
for use. as a bytestring meaning if it is defined in the ``data`` field, it will first be encoded into a bytestring.
Below are pipeline segments available for use.
base64_decode base64_decode
Decode the data element from base64 Decode the data element from base64
@ -79,6 +80,12 @@ base64_decode
base64_encode base64_encode
Encode the data element in base64 Encode the data element in base64
utf8_decode
Decode the data element from bytes to UTF-8 string
utf8_encode
Encode the data element from a UTF-8 string to bytes
template template
Treat the data element as a Jinja2 template and apply a node context to it. The defined context available Treat the data element as a Jinja2 template and apply a node context to it. The defined context available
to the template is below. to the template is below.

View File

@ -99,7 +99,7 @@ class TaskCreate(CliAction): # pylint: disable=too-few-public-methods
while True: while True:
time.sleep(self.poll_interval) time.sleep(self.poll_interval)
task = self.api_client.get_task(task_id=task_id) task = self.api_client.get_task(task_id=task_id)
if task.status in ['completed', 'terminated']: if task.get('status', '') in ['completed', 'terminated']:
return task return task

View File

@ -87,6 +87,12 @@ class DrydockConfig(object):
help='The URI database connect string.'), help='The URI database connect string.'),
] ]
# Options for the boot action framework
bootactions_options = [
cfg.StrOpt(
'report_url',
default='http://localhost:9000/api/v1.0/bootactions/')
]
# Enabled plugins # Enabled plugins
plugin_options = [ plugin_options = [
cfg.StrOpt( cfg.StrOpt(
@ -151,22 +157,31 @@ class DrydockConfig(object):
'deploy_node', 'deploy_node',
default=45, default=45,
help='Timeout in minutes for deploying a node'), help='Timeout in minutes for deploying a node'),
cfg.IntOpt(
'bootaction_final_status',
default=15,
help=
'Timeout in minutes between deployment completion and the all boot actions reporting status'
),
] ]
def __init__(self): def __init__(self):
self.conf = cfg.CONF self.conf = cfg.CONF
def register_options(self): def register_options(self, enable_keystone=True):
self.conf.register_opts(DrydockConfig.options) self.conf.register_opts(DrydockConfig.options)
self.conf.register_opts(
DrydockConfig.bootactions_options, group='bootactions')
self.conf.register_opts(DrydockConfig.logging_options, group='logging') self.conf.register_opts(DrydockConfig.logging_options, group='logging')
self.conf.register_opts(DrydockConfig.plugin_options, group='plugins') self.conf.register_opts(DrydockConfig.plugin_options, group='plugins')
self.conf.register_opts( self.conf.register_opts(
DrydockConfig.database_options, group='database') DrydockConfig.database_options, group='database')
self.conf.register_opts( self.conf.register_opts(
DrydockConfig.timeout_options, group='timeouts') DrydockConfig.timeout_options, group='timeouts')
self.conf.register_opts( if enable_keystone:
loading.get_auth_plugin_conf_options('password'), self.conf.register_opts(
group='keystone_authtoken') loading.get_auth_plugin_conf_options('password'),
group='keystone_authtoken')
config_mgr = DrydockConfig() config_mgr = DrydockConfig()

View File

@ -20,9 +20,11 @@ from .designs import DesignsPartsKindsResource
from .designs import DesignsPartResource from .designs import DesignsPartResource
from .tasks import TasksResource from .tasks import TasksResource
from .tasks import TaskResource from .tasks import TaskResource
from .bootdata import BootdataResource
from .nodes import NodesResource from .nodes import NodesResource
from .health import HealthResource from .health import HealthResource
from .bootaction import BootactionUnitsResource
from .bootaction import BootactionFilesResource
from .bootaction import BootactionResource
from .base import DrydockRequest, BaseResource from .base import DrydockRequest, BaseResource
from .middleware import AuthMiddleware, ContextMiddleware, LoggingMiddleware from .middleware import AuthMiddleware, ContextMiddleware, LoggingMiddleware
@ -67,12 +69,15 @@ def start_api(state_manager=None, ingester=None, orchestrator=None):
('/designs/{design_id}/parts/{kind}/{name}', DesignsPartResource( ('/designs/{design_id}/parts/{kind}/{name}', DesignsPartResource(
state_manager=state_manager, orchestrator=orchestrator)), state_manager=state_manager, orchestrator=orchestrator)),
# API for nodes to discover their bootdata during curtin install
('/bootdata/{hostname}/{data_key}', BootdataResource(
state_manager=state_manager, orchestrator=orchestrator)),
# API to list current MaaS nodes # API to list current MaaS nodes
('/nodes', NodesResource()), ('/nodes', NodesResource()),
# API for nodes to discover their boot actions during curtin install
('/bootactions/nodes/{hostname}/units', BootactionUnitsResource(
state_manager=state_manager, orchestrator=orchestrator)),
('/bootactions/nodes/{hostname}/files', BootactionFilesResource(
state_manager=state_manager, orchestrator=orchestrator)),
('/bootactions/{action_id}', BootactionResource(
state_manager=state_manager, orchestrator=orchestrator)),
] ]
for path, res in v1_0_routes: for path, res in v1_0_routes:

View File

@ -23,7 +23,7 @@ import drydock_provisioner.error as errors
class BaseResource(object): class BaseResource(object):
def __init__(self): def __init__(self):
self.logger = logging.getLogger('control') self.logger = logging.getLogger('drydock')
def on_options(self, req, resp): def on_options(self, req, resp):
self_attrs = dir(self) self_attrs = dir(self)

View File

@ -0,0 +1,180 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handle resources for boot action API endpoints. """
import falcon
import ulid2
import tarfile
import io
import logging
from .base import StatefulResource
logger = logging.getLogger('drydock')
class BootactionResource(StatefulResource):
def __init__(self, orchestrator=None, **kwargs):
super().__init__(**kwargs)
self.orchestrator = orchestrator
def on_post(self, req, resp, action_id):
"""Post status messages or final status for a boot action.
This endpoint does not use the standard oslo_policy enforcement as this endpoint
is accessed by unmanned nodes. Instead it uses a internal key authentication
:param req: falcon request
:param resp: falcone response
:param action_id: ULID ID of the boot action
"""
class BootactionAssetsResource(StatefulResource):
def __init__(self, orchestrator=None, **kwargs):
super().__init__(**kwargs)
self.orchestrator = orchestrator
def do_get(self, req, resp, hostname, asset_type):
"""Render ``unit`` type boot action assets for hostname.
Get the boot action context for ``hostname`` from the database
and render all ``unit`` type assets for the host. Validate host
is providing the correct idenity key in the ``X-Bootaction-Key``
header.
:param req: falcon request object
:param resp: falcon response object
:param hostname: URL path parameter indicating the calling host
:param asset_type: Asset type to include in the response - ``unit``, ``file``, ``pkg_list``, ``all``
"""
try:
ba_ctx = self.state_manager.get_boot_action_context(hostname)
except Exception as ex:
self.logger.error(
"Error locating boot action for %s" % hostname, exc_info=ex)
raise falcon.HTTPNotFound()
if ba_ctx is None:
raise falcon.HTTPNotFound(
description="Error locating boot action for %s" % hostname)
BootactionUtils.check_auth(ba_ctx, req)
asset_type_filter = None if asset_type == 'all' else asset_type
try:
task = self.state_manager.get_task(ba_ctx['task_id'])
design_status, site_design = self.orchestrator.get_effective_site(
task.design_ref)
assets = list()
for ba in site_design.bootactions:
if hostname in ba.target_nodes:
action_id = ulid2.generate_binary_ulid()
assets.extend(
ba.render_assets(
hostname,
site_design,
action_id,
type_filter=asset_type_filter))
self.state_manager.post_boot_action(
hostname, ba_ctx['task_id'], ba_ctx['identity_key'],
action_id)
tarball = BootactionUtils.tarbuilder(asset_list=assets)
resp.set_header('Content-Type', 'application/gzip')
resp.set_header('Content-Disposition',
"attachment; filename=\"%s-%s.tar.gz\"" %
(hostname, asset_type))
resp.data = tarball
resp.status = falcon.HTTP_200
return
except Exception as ex:
self.logger.debug("Exception in boot action API.", exc_info=ex)
raise falcon.HTTPInternalServerError(str(ex))
class BootactionUnitsResource(BootactionAssetsResource):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def on_get(self, req, resp, hostname):
self.logger.debug(
"Accessing boot action units resource for host %s." % hostname)
super().do_get(req, resp, hostname, 'unit')
class BootactionFilesResource(BootactionAssetsResource):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def on_get(self, req, resp, hostname):
super().do_get(req, resp, hostname, 'file')
class BootactionUtils(object):
"""Utility class shared by Boot Action API resources."""
@staticmethod
def check_auth(ba_ctx, req):
"""Check request authentication based on boot action context.
Raise proper Falcon exception if authentication fails, otherwise
silently return
:param ba_ctx: Boot Action context from database
:param req: The falcon request object of the API call
"""
identity_key = req.get_header('X-Bootaction-Key', default='')
if identity_key == '':
raise falcon.HTTPUnauthorized(
title='Unauthorized',
description='No X-Bootaction-Key',
challenges=['Bootaction-Key'])
if ba_ctx['identity_key'] != bytes.fromhex(identity_key):
logger.warn(
"Forbidding boot action access - node: %s, identity_key: %s, req header: %s"
% (ba_ctx['node_name'], str(ba_ctx['identity_key']),
str(bytes.fromhex(identity_key))))
raise falcon.HTTPForbidden(
title='Unauthorized', description='Invalid X-Bootaction-Key')
@staticmethod
def tarbuilder(asset_list=None):
"""Create a tar file from rendered assets.
Add each asset in ``asset_list`` to a tar file with the defined
path and permission. The assets need to have the rendered_bytes field
populated. Return a tarfile.TarFile.
:param hostname: the hostname the tar is destined for
:param balltype: the type of assets being included
:param asset_list: list of objects.BootActionAsset instances
"""
tarbytes = io.BytesIO()
tarball = tarfile.open(
mode='w:gz', fileobj=tarbytes, format=tarfile.GNU_FORMAT)
asset_list = asset_list or []
for a in asset_list:
fileobj = io.BytesIO(a.rendered_bytes)
tarasset = tarfile.TarInfo(name=a.path)
tarasset.size = len(a.rendered_bytes)
tarasset.mode = a.permissions if a.permissions else 0o600
tarasset.uid = 0
tarasset.gid = 0
tarball.addfile(tarasset, fileobj=fileobj)
tarball.close()
return tarbytes.getvalue()

View File

@ -1,120 +0,0 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handle resources for bootdata API endpoints.
THIS API IS DEPRECATED
"""
from oslo_config import cfg
from .base import StatefulResource
class BootdataResource(StatefulResource):
bootdata_options = [
cfg.StrOpt(
'prom_init',
default='/etc/drydock/bootdata/join.sh',
help='Path to file to distribute for prom_init.sh')
]
def __init__(self, orchestrator=None, **kwargs):
super(BootdataResource, self).__init__(**kwargs)
self.authorized_roles = ['anyone']
self.orchestrator = orchestrator
cfg.CONF.register_opts(
BootdataResource.bootdata_options, group='bootdata')
init_file = open(cfg.CONF.bootdata.prom_init, 'r')
self.prom_init = init_file.read()
init_file.close()
def on_get(self, req, resp, hostname, data_key):
if data_key == 'promservice':
resp.body = BootdataResource.prom_init_service
resp.content_type = 'text/plain'
return
elif data_key == 'vfservice':
resp.body = BootdataResource.vfs_service
resp.content_type = 'text/plain'
return
elif data_key == 'prominit':
resp.body = self.prom_init
resp.content_type = 'text/plain'
return
elif data_key == 'promconfig':
# The next PS will be a complete rewrite of the bootdata system
# so not wasting time refactoring this
# TODO(sh8121att) rebuild bootdata API for BootAction framework
resp.content = 'text/plain'
return
# bootdata = self.state_manager.get_bootdata_key(hostname)
#
# if bootdata is None:
# resp.status = falcon.HTTP_404
# return
# else:
# resp.content_type = 'text/plain'
#
# host_design_id = bootdata.get('design_id', None)
# host_design = self.orchestrator.get_effective_site(
# host_design_id)
#
# host_model = host_design.get_baremetal_node(hostname)
#
# part_selectors = ['all', hostname]
#
# if host_model.tags is not None:
# part_selectors.extend(host_model.tags)
#
# all_configs = host_design.get_promenade_config(part_selectors)
#
# part_list = [i.document for i in all_configs]
#
# resp.body = "---\n" + "---\n".join([
# base64.b64decode(i.encode()).decode('utf-8')
# for i in part_list
# ]) + "\n..."
# return
prom_init_service = (
"[Unit]\n"
"Description=Promenade Initialization Service\n"
"Documentation=http://github.com/att-comdev/drydock\n"
"After=network-online.target local-fs.target\n"
"ConditionPathExists=!/var/lib/prom.done\n\n"
"[Service]\n"
"Type=simple\n"
"ExecStart=/var/tmp/prom_init.sh /etc/prom_init.yaml\n\n"
"[Install]\n"
"WantedBy=multi-user.target\n")
vfs_service = (
"[Unit]\n"
"Description=SR-IOV Virtual Function configuration\n"
"Documentation=http://github.com/att-comdev/drydock\n"
"After=network.target local-fs.target\n\n"
"[Service]\n"
"Type=simple\n"
"ExecStart=/bin/sh -c '/bin/echo 4 >/sys/class/net/ens3f0/device/sriov_numvfs'\n\n"
"[Install]\n"
"WantedBy=multi-user.target\n")
def list_opts():
return {'bootdata': BootdataResource.bootdata_options}

View File

@ -15,11 +15,13 @@ import falcon
from drydock_provisioner.control.base import BaseResource from drydock_provisioner.control.base import BaseResource
class HealthResource(BaseResource): class HealthResource(BaseResource):
""" """
Return empty response/body to show Return empty response/body to show
that Drydock is healthy that Drydock is healthy
""" """
def on_get(self, req, resp): def on_get(self, req, resp):
""" """
It really does nothing right now. It may do more later It really does nothing right now. It may do more later

View File

@ -73,6 +73,7 @@ class AuthMiddleware(object):
'Request from authenticated user %s with roles %s' % 'Request from authenticated user %s with roles %s' %
(ctx.user, ','.join(ctx.roles))) (ctx.user, ','.join(ctx.roles)))
else: else:
self.logger.debug('Request from unauthenticated client.')
ctx.authenticated = False ctx.authenticated = False

View File

@ -213,7 +213,7 @@ class CreateNetworkTemplate(BaseMaasAction):
site_design = self._load_site_design() site_design = self._load_site_design()
except errors.OrchestratorError: except errors.OrchestratorError:
self.task.add_status_msg( self.task.add_status_msg(
"Error loading site design.", msg="Error loading site design.",
error=True, error=True,
ctx='NA', ctx='NA',
ctx_type='NA') ctx_type='NA')
@ -593,7 +593,7 @@ class ConfigureUserCredentials(BaseMaasAction):
site_design = self._load_site_design() site_design = self._load_site_design()
except errors.OrchestratorError: except errors.OrchestratorError:
self.task.add_status_msg( self.task.add_status_msg(
"Error loading site design.", msg="Error loading site design.",
error=True, error=True,
ctx='NA', ctx='NA',
ctx_type='NA') ctx_type='NA')
@ -605,7 +605,8 @@ class ConfigureUserCredentials(BaseMaasAction):
try: try:
key_list = maas_keys.SshKeys(self.maas_client) key_list = maas_keys.SshKeys(self.maas_client)
key_list.refresh() key_list.refresh()
except Exception: except Exception as ex:
self.logger.debug("Error accessing the MaaS API.", exc_info=ex)
self.task.set_status(hd_fields.TaskStatus.Complete) self.task.set_status(hd_fields.TaskStatus.Complete)
self.task.failure() self.task.failure()
self.task.add_status_msg( self.task.add_status_msg(
@ -655,11 +656,12 @@ class IdentifyNode(BaseMaasAction):
try: try:
machine_list = maas_machine.Machines(self.maas_client) machine_list = maas_machine.Machines(self.maas_client)
machine_list.refresh() machine_list.refresh()
except Exception: except Exception as ex:
self.logger.debug("Error accessing the MaaS API.", exc_info=ex)
self.task.set_status(hd_fields.TaskStatus.Complete) self.task.set_status(hd_fields.TaskStatus.Complete)
self.task.failure() self.task.failure()
self.task.add_status_msg( self.task.add_status_msg(
msg='Error accessing MaaS Machines API.', msg='Error accessing MaaS Machines API: %s' % str(ex),
error=True, error=True,
ctx='NA', ctx='NA',
ctx_type='NA') ctx_type='NA')
@ -673,7 +675,7 @@ class IdentifyNode(BaseMaasAction):
site_design = self._load_site_design() site_design = self._load_site_design()
except errors.OrchestratorError: except errors.OrchestratorError:
self.task.add_status_msg( self.task.add_status_msg(
"Error loading site design.", msg="Error loading site design.",
error=True, error=True,
ctx='NA', ctx='NA',
ctx_type='NA') ctx_type='NA')
@ -725,6 +727,7 @@ class ConfigureHardware(BaseMaasAction):
machine_list = maas_machine.Machines(self.maas_client) machine_list = maas_machine.Machines(self.maas_client)
machine_list.refresh() machine_list.refresh()
except Exception as ex: except Exception as ex:
self.logger.debug("Error accessing the MaaS API.", exc_info=ex)
self.task.set_status(hd_fields.TaskStatus.Complete) self.task.set_status(hd_fields.TaskStatus.Complete)
self.task.failure() self.task.failure()
self.task.add_status_msg( self.task.add_status_msg(
@ -742,7 +745,7 @@ class ConfigureHardware(BaseMaasAction):
site_design = self._load_site_design() site_design = self._load_site_design()
except errors.OrchestratorError: except errors.OrchestratorError:
self.task.add_status_msg( self.task.add_status_msg(
"Error loading site design.", msg="Error loading site design.",
error=True, error=True,
ctx='NA', ctx='NA',
ctx_type='NA') ctx_type='NA')
@ -856,6 +859,7 @@ class ApplyNodeNetworking(BaseMaasAction):
subnets = maas_subnet.Subnets(self.maas_client) subnets = maas_subnet.Subnets(self.maas_client)
subnets.refresh() subnets.refresh()
except Exception as ex: except Exception as ex:
self.logger.debug("Error accessing the MaaS API.", exc_info=ex)
self.task.set_status(hd_fields.TaskStatus.Complete) self.task.set_status(hd_fields.TaskStatus.Complete)
self.task.failure() self.task.failure()
self.task.add_status_msg( self.task.add_status_msg(
@ -873,7 +877,7 @@ class ApplyNodeNetworking(BaseMaasAction):
site_design = self._load_site_design() site_design = self._load_site_design()
except errors.OrchestratorError: except errors.OrchestratorError:
self.task.add_status_msg( self.task.add_status_msg(
"Error loading site design.", msg="Error loading site design.",
error=True, error=True,
ctx='NA', ctx='NA',
ctx_type='NA') ctx_type='NA')
@ -1155,6 +1159,7 @@ class ApplyNodePlatform(BaseMaasAction):
tag_list = maas_tag.Tags(self.maas_client) tag_list = maas_tag.Tags(self.maas_client)
tag_list.refresh() tag_list.refresh()
except Exception as ex: except Exception as ex:
self.logger.debug("Error accessing the MaaS API.", exc_info=ex)
self.task.set_status(hd_fields.TaskStatus.Complete) self.task.set_status(hd_fields.TaskStatus.Complete)
self.task.failure() self.task.failure()
self.task.add_status_msg( self.task.add_status_msg(
@ -1172,7 +1177,7 @@ class ApplyNodePlatform(BaseMaasAction):
site_design = self._load_site_design() site_design = self._load_site_design()
except errors.OrchestratorError: except errors.OrchestratorError:
self.task.add_status_msg( self.task.add_status_msg(
"Error loading site design.", msg="Error loading site design.",
error=True, error=True,
ctx='NA', ctx='NA',
ctx_type='NA') ctx_type='NA')
@ -1313,6 +1318,7 @@ class ApplyNodeStorage(BaseMaasAction):
machine_list = maas_machine.Machines(self.maas_client) machine_list = maas_machine.Machines(self.maas_client)
machine_list.refresh() machine_list.refresh()
except Exception as ex: except Exception as ex:
self.logger.debug("Error accessing the MaaS API.", exc_info=ex)
self.task.set_status(hd_fields.TaskStatus.Complete) self.task.set_status(hd_fields.TaskStatus.Complete)
self.task.failure() self.task.failure()
self.task.add_status_msg( self.task.add_status_msg(
@ -1330,7 +1336,7 @@ class ApplyNodeStorage(BaseMaasAction):
site_design = self._load_site_design() site_design = self._load_site_design()
except errors.OrchestratorError: except errors.OrchestratorError:
self.task.add_status_msg( self.task.add_status_msg(
"Error loading site design.", msg="Error loading site design.",
error=True, error=True,
ctx='NA', ctx='NA',
ctx_type='NA') ctx_type='NA')
@ -1563,7 +1569,7 @@ class ApplyNodeStorage(BaseMaasAction):
except Exception as ex: except Exception as ex:
self.task.failure(focus=n.get_id()) self.task.failure(focus=n.get_id())
self.task.add_status_msg( self.task.add_status_msg(
"Error configuring storage.", msg="Error configuring storage.",
error=True, error=True,
ctx=n.name, ctx=n.name,
ctx_type='node') ctx_type='node')
@ -1640,6 +1646,7 @@ class DeployNode(BaseMaasAction):
machine_list = maas_machine.Machines(self.maas_client) machine_list = maas_machine.Machines(self.maas_client)
machine_list.refresh() machine_list.refresh()
except Exception as ex: except Exception as ex:
self.logger.debug("Error accessing the MaaS API.", exc_info=ex)
self.task.set_status(hd_fields.TaskStatus.Complete) self.task.set_status(hd_fields.TaskStatus.Complete)
self.task.failure() self.task.failure()
self.task.add_status_msg( self.task.add_status_msg(
@ -1657,7 +1664,7 @@ class DeployNode(BaseMaasAction):
site_design = self._load_site_design() site_design = self._load_site_design()
except errors.OrchestratorError: except errors.OrchestratorError:
self.task.add_status_msg( self.task.add_status_msg(
"Error loading site design.", msg="Error loading site design.",
error=True, error=True,
ctx='NA', ctx='NA',
ctx_type='NA') ctx_type='NA')
@ -1721,6 +1728,35 @@ class DeployNode(BaseMaasAction):
msg=msg, error=True, ctx=n.name, ctx_type='node') msg=msg, error=True, ctx=n.name, ctx_type='node')
continue continue
# Saving boot action context for a node
self.logger.info("Saving Boot Action context for node %s." %
(n.name))
try:
ba_key = self.orchestrator.create_bootaction_context(
n.name, self.task)
tag_list = maas_tag.Tags(self.maas_client)
tag_list.refresh()
node_id_tags = tag_list.startswith("%s_baid-" % (n.name))
for t in node_id_tags:
t.delete()
if ba_key is not None:
msg = "Creating boot action id key tag for node %s" % (
n.name)
self.logger.debug(msg)
node_baid_tag = maas_tag.Tag(
self.maas_client,
name="%s_baid-%s" % (n.name, ba_key.hex()))
node_baid_tag = tag_list.add(node_baid_tag)
node_baid_tag.apply_to_node(machine.resource_id)
self.task.add_status_msg(
msg=msg, error=False, ctx=n.name, ctx_type='node')
except Exception as ex:
self.logger.error(
"Error setting boot action id key tag for %s." % n.name,
exc_info=ex)
self.logger.info("Deploying node %s" % (n.name)) self.logger.info("Deploying node %s" % (n.name))
try: try:

View File

@ -337,8 +337,9 @@ class Machine(model_base.ResourceBase):
if isinstance(obj_dict['boot_interface'], dict): if isinstance(obj_dict['boot_interface'], dict):
refined_dict['boot_mac'] = obj_dict['boot_interface'][ refined_dict['boot_mac'] = obj_dict['boot_interface'][
'mac_address'] 'mac_address']
refined_dict['boot_ip'] = obj_dict['boot_interface']['links'][ if len(obj_dict['boot_interface']['links']) > 0:
0]['ip_address'] refined_dict['boot_ip'] = obj_dict['boot_interface'][
'links'][0].get('ip_address', None)
i = cls(api_client, **refined_dict) i = cls(api_client, **refined_dict)
return i return i

View File

@ -129,6 +129,19 @@ class Tags(model_base.ResourceCollectionBase):
def __init__(self, api_client, **kwargs): def __init__(self, api_client, **kwargs):
super(Tags, self).__init__(api_client) super(Tags, self).__init__(api_client)
def startswith(self, partial_tag):
"""Find the set of tags that start with ``partial_tag``.
Return a list of Tag instances that start with ``partial_tag``.
:param partial_tag: string to compare to tags
"""
results = list()
for k, v in self.resources.items():
if k.startswith(partial_tag):
results.append(v)
return results
def add(self, res): def add(self, res):
""" """
Create a new resource in this collection in MaaS Create a new resource in this collection in MaaS

View File

@ -54,6 +54,22 @@ class PersistentOrchestratorError(OrchestratorError):
pass pass
class BootactionError(Exception):
pass
class UnknownPipelineSegment(BootactionError):
pass
class PipelineFailure(BootactionError):
pass
class InvalidAssetLocation(BootactionError):
pass
class DriverError(Exception): class DriverError(Exception):
pass pass

View File

@ -12,12 +12,24 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Object models for BootActions.""" """Object models for BootActions."""
import requests
import base64
from jinja2 import Template
from urllib.parse import urlparse
from urllib.parse import urlunparse
import re
import ulid2
import oslo_versionedobjects.fields as ovo_fields import oslo_versionedobjects.fields as ovo_fields
import drydock_provisioner.objects.base as base import drydock_provisioner.objects.base as base
import drydock_provisioner.objects.fields as hd_fields import drydock_provisioner.objects.fields as hd_fields
import drydock_provisioner.config as config
import drydock_provisioner.error as errors
from drydock_provisioner.util import KeystoneUtils
@base.DrydockObjectRegistry.register @base.DrydockObjectRegistry.register
class BootAction(base.DrydockPersistentObject, base.DrydockObject): class BootAction(base.DrydockPersistentObject, base.DrydockObject):
@ -33,6 +45,8 @@ class BootAction(base.DrydockPersistentObject, base.DrydockObject):
ovo_fields.ObjectField('BootActionAssetList', nullable=False), ovo_fields.ObjectField('BootActionAssetList', nullable=False),
'node_filter': 'node_filter':
ovo_fields.ObjectField('NodeFilterSet', nullable=True), ovo_fields.ObjectField('NodeFilterSet', nullable=True),
'target_nodes':
ovo_fields.ListOfStringsField(nullable=True),
} }
def __init__(self, **kwargs): def __init__(self, **kwargs):
@ -45,6 +59,30 @@ class BootAction(base.DrydockPersistentObject, base.DrydockObject):
def get_name(self): def get_name(self):
return self.name return self.name
def render_assets(self, nodename, site_design, action_id,
type_filter=None):
"""Render all of the assets in this bootaction.
Render the assets of this bootaction and return them in a list.
The ``nodename`` and ``action_id`` will be
used to build the context for any assets utilizing the ``template``
pipeline segment.
:param nodename: name of the node the assets are destined for
:param site_design: a objects.SiteDesign instance holding the design sets
:param action_id: a 128-bit ULID action_id of the boot action
the assets are part of
:param type_filter: optional filter of the types of assets to render
"""
assets = list()
for a in self.asset_list:
if type_filter is None or (type_filter is not None
and a.type == type_filter):
a.render(nodename, site_design, action_id)
assets.append(a)
return assets
@base.DrydockObjectRegistry.register @base.DrydockObjectRegistry.register
class BootActionList(base.DrydockObjectListBase, base.DrydockObject): class BootActionList(base.DrydockObjectListBase, base.DrydockObject):
@ -72,7 +110,174 @@ class BootActionAsset(base.DrydockObject):
} }
def __init__(self, **kwargs): def __init__(self, **kwargs):
super().__init__(**kwargs) if 'permissions' in kwargs:
mode = kwargs.pop('permissions')
if isinstance(mode, str):
mode = int(mode, base=8)
else:
mode = None
super().__init__(permissions=mode, **kwargs)
self.rendered_bytes = None
def render(self, nodename, site_design, action_id):
"""Render this asset into a base64 encoded string.
The ``nodename`` and ``action_id`` will be used to construct
the context for evaluating the ``template`` pipeline segment
:param nodename: the name of the node where the asset will be deployed
:param site_design: instance of objects.SiteDesign
:param action_id: a 128-bit ULID boot action id
"""
node = site_design.get_baremetal_node(nodename)
tpl_ctx = {
'node': {
'hostname': nodename,
'tags': [t for t in node.tags],
'labels': {k: v
for (k, v) in node.owner_data.items()},
'network': {},
},
'action': {
'key': ulid2.ulid_to_base32(action_id),
'report_url': config.config_mgr.conf.bootactions.report_url,
}
}
for a in node.addressing:
if a.address is not None:
tpl_ctx['node']['network'][a.network] = dict()
tpl_ctx['node']['network'][a.network]['ip'] = a.address
network = site_design.get_network(a.network)
tpl_ctx['node']['network'][a.network]['cidr'] = network.cidr
tpl_ctx['node']['network'][a.network][
'dns_suffix'] = network.dns_domain
if self.location is not None:
rendered_location = self.execute_pipeline(
self.location, self.location_pipeline, tpl_ctx=tpl_ctx)
data_block = self.resolve_asset_location(rendered_location)
else:
data_block = self.data.encode('utf-8')
value = self.execute_pipeline(
data_block, self.data_pipeline, tpl_ctx=tpl_ctx)
if isinstance(value, str):
value = value.encode('utf-8')
self.rendered_bytes = value
def resolve_asset_location(self, asset_url):
"""Retrieve the data asset from the url.
Returns the asset as a bytestring.
:param asset_url: URL to retrieve the data asset from
"""
url_parts = urlparse(asset_url)
if url_parts.scheme in ['http', 'https']:
try:
resp = requests.get(asset_url)
except Exception as ex:
raise errors.InvalidAssetLocation(
"Failed retrieving asset: %s - %s" % (type(ex).__name__,
str(ex)))
return resp.content
elif url_parts.scheme in [
'promenade+http', 'promenade+https', 'deckhand+http',
'deckhand+https'
]:
try:
ks_sess = KeystoneUtils.get_session()
url_parts.scheme = re.sub('^[^+]+\+', '', url_parts.scheme)
new_url = urlunparse(url_parts)
resp = ks_sess.get(new_url)
except Exception as ex:
raise errors.InvalidAssetLocation(
"Failed retrieving asset: %s - %s" % (type(ex).__name__,
str(ex)))
return resp.content
else:
raise errors.InvalidAssetLocation(
"Unknown scheme %s" % url_parts.scheme)
def execute_pipeline(self, data, pipeline, tpl_ctx=None):
"""Execute a pipeline against a data element.
Returns the manipulated ``data`` element
:param data: The data element to be manipulated by the pipeline
:param pipeline: list of pipeline segments to execute
:param tpl_ctx: The optional context to be made available to the ``template`` pipeline
"""
segment_funcs = {
'base64_encode': self.eval_base64_encode,
'base64_decode': self.eval_base64_decode,
'utf8_decode': self.eval_utf8_decode,
'utf8_encode': self.eval_utf8_encode,
'template': self.eval_template,
}
for s in pipeline:
try:
data = segment_funcs[s](data, ctx=tpl_ctx)
except KeyError:
raise errors.UnknownPipelineSegment(
"Bootaction pipeline segment %s unknown." % s)
except Exception as ex:
raise errors.PipelineFailure(
"Error when running bootaction pipeline segment %s: %s - %s"
% (s, type(ex).__name__, str(ex)))
return data
def eval_base64_encode(self, data, ctx=None):
"""Encode data as base64.
Light weight wrapper around base64 library to shed the ctx kwarg
:param data: data to be encoded
:param ctx: throwaway, just allows a generic interface for pipeline segments
"""
return base64.b64encode(data)
def eval_base64_decode(self, data, ctx=None):
"""Decode data from base64.
Light weight wrapper around base64 library to shed the ctx kwarg
:param data: data to be decoded
:param ctx: throwaway, just allows a generic interface for pipeline segments
"""
return base64.b64decode(data)
def eval_utf8_decode(self, data, ctx=None):
"""Decode data from bytes to UTF-8 string.
:param data: data to be decoded
:param ctx: throwaway, just allows a generic interface for pipeline segments
"""
return data.decode('utf-8')
def eval_utf8_encode(self, data, ctx=None):
"""Encode data from UTF-8 to bytes.
:param data: data to be encoded
:param ctx: throwaway, just allows a generic interface for pipeline segments
"""
return data.encode('utf-8')
def eval_template(self, data, ctx=None):
"""Evaluate data as a Jinja2 template.
:param data: The template
:param ctx: Optional ctx to inject into the template render
"""
template = Template(data)
return template.render(ctx)
@base.DrydockObjectRegistry.register @base.DrydockObjectRegistry.register

View File

@ -144,8 +144,6 @@ class SiteDesign(base.DrydockPersistentObject, base.DrydockObject):
ovo_fields.ObjectField('HardwareProfileList', nullable=True), ovo_fields.ObjectField('HardwareProfileList', nullable=True),
'baremetal_nodes': 'baremetal_nodes':
ovo_fields.ObjectField('BaremetalNodeList', nullable=True), ovo_fields.ObjectField('BaremetalNodeList', nullable=True),
'prom_configs':
ovo_fields.ObjectField('PromenadeConfigList', nullable=True),
'racks': 'racks':
ovo_fields.ObjectField('RackList', nullable=True), ovo_fields.ObjectField('RackList', nullable=True),
'bootactions': 'bootactions':

View File

@ -18,6 +18,7 @@ import importlib
import logging import logging
import uuid import uuid
import concurrent.futures import concurrent.futures
import os
import drydock_provisioner.config as config import drydock_provisioner.config as config
import drydock_provisioner.objects as objects import drydock_provisioner.objects as objects
@ -294,6 +295,7 @@ class Orchestrator(object):
status, site_design = self.get_described_site(design_ref) status, site_design = self.get_described_site(design_ref)
if status.status == hd_fields.ActionResult.Success: if status.status == hd_fields.ActionResult.Success:
self.compute_model_inheritance(site_design) self.compute_model_inheritance(site_design)
self.compute_bootaction_targets(site_design)
status = self._validate_design(site_design, result_status=status) status = self._validate_design(site_design, result_status=status)
except Exception as ex: except Exception as ex:
if status is not None: if status is not None:
@ -303,9 +305,8 @@ class Orchestrator(object):
ctx='NA', ctx='NA',
ctx_type='NA') ctx_type='NA')
status.set_status(hd_fields.ActionResult.Failure) status.set_status(hd_fields.ActionResult.Failure)
else: self.logger.error(
self.logger.error( "Error getting site definition: %s" % str(ex), exc_info=ex)
"Error getting site definition: %s" % str(ex), exc_info=ex)
else: else:
status.add_status_msg( status.add_status_msg(
msg="Successfully computed effective design.", msg="Successfully computed effective design.",
@ -369,24 +370,48 @@ class Orchestrator(object):
return nf return nf
def compute_bootaction_targets(self, site_design):
"""Find target nodes for each bootaction in ``site_design``.
Calculate the node_filter for each bootaction and save the list
of target node names.
:param site_design: an instance of objects.SiteDesign
"""
if site_design.bootactions is None:
return
for ba in site_design.bootactions:
nf = ba.node_filter
target_nodes = self.process_node_filter(nf, site_design)
ba.target_nodes = [x.get_id() for x in target_nodes]
def process_node_filter(self, node_filter, site_design): def process_node_filter(self, node_filter, site_design):
target_nodes = site_design.baremetal_nodes target_nodes = site_design.baremetal_nodes
if node_filter is None: if node_filter is None:
return target_nodes return target_nodes
if not isinstance(node_filter, dict): if not isinstance(node_filter, dict) and not isinstance(
node_filter, objects.NodeFilterSet):
msg = "Invalid node_filter, must be a dictionary with keys 'filter_set_type' and 'filter_set'." msg = "Invalid node_filter, must be a dictionary with keys 'filter_set_type' and 'filter_set'."
self.logger.error(msg) self.logger.error(msg)
raise errors.OrchestratorError(msg) raise errors.OrchestratorError(msg)
result_sets = [] result_sets = []
for f in node_filter.get('filter_set', []): if isinstance(node_filter, dict):
result_sets.append(self.process_filter(target_nodes, f)) for f in node_filter.get('filter_set', []):
result_sets.append(self.process_filter(target_nodes, f))
return self.join_filter_sets( return self.join_filter_sets(
node_filter.get('filter_set_type'), result_sets) node_filter.get('filter_set_type'), result_sets)
elif isinstance(node_filter, objects.NodeFilterSet):
for f in node_filter.filter_set:
result_sets.append(self.process_filter(target_nodes, f))
return self.join_filter_sets(node_filter.filter_set_type,
result_sets)
def join_filter_sets(self, filter_set_type, result_sets): def join_filter_sets(self, filter_set_type, result_sets):
if filter_set_type == 'union': if filter_set_type == 'union':
@ -401,38 +426,50 @@ class Orchestrator(object):
"""Take a filter and apply it to the node_set. """Take a filter and apply it to the node_set.
:param node_set: A full set of objects.BaremetalNode :param node_set: A full set of objects.BaremetalNode
:param filter_set: A filter set describing filters to apply to the node set :param filter_set: A node filter describing filters to apply to the node set.
Either a dict or objects.NodeFilter
""" """
try: try:
set_type = filter_set.get('filter_type', None) if isinstance(filter_set, dict):
set_type = filter_set.get('filter_type', None)
node_names = filter_set.get('node_names', []) node_names = filter_set.get('node_names', [])
node_tags = filter_set.get('node_tags', []) node_tags = filter_set.get('node_tags', [])
node_labels = filter_set.get('node_labels', {}) node_labels = filter_set.get('node_labels', {})
rack_names = filter_set.get('rack_names', []) rack_names = filter_set.get('rack_names', [])
rack_labels = filter_set.get('rack_labels', {}) rack_labels = filter_set.get('rack_labels', {})
elif isinstance(filter_set, objects.NodeFilter):
set_type = filter_set.filter_type
node_names = filter_set.node_names
node_tags = filter_set.node_tags
node_labels = filter_set.node_labels
rack_names = filter_set.rack_names
rack_labels = filter_set.rack_labels
else:
raise errors.OrchestratorError(
"Node filter must be a dictionary or a NodeFilter instance"
)
target_nodes = dict() target_nodes = dict()
if len(node_names) > 0: if node_names and len(node_names) > 0:
self.logger.debug("Filtering nodes based on node names.") self.logger.debug("Filtering nodes based on node names.")
target_nodes['node_names'] = [ target_nodes['node_names'] = [
x for x in node_set if x.get_name() in node_names x for x in node_set if x.get_name() in node_names
] ]
if len(node_tags) > 0: if node_tags and len(node_tags) > 0:
self.logger.debug("Filtering nodes based on node tags.") self.logger.debug("Filtering nodes based on node tags.")
target_nodes['node_tags'] = [ target_nodes['node_tags'] = [
x for x in node_set for t in node_tags if x.has_tag(t) x for x in node_set for t in node_tags if x.has_tag(t)
] ]
if len(rack_names) > 0: if rack_names and len(rack_names) > 0:
self.logger.debug("Filtering nodes based on rack names.") self.logger.debug("Filtering nodes based on rack names.")
target_nodes['rack_names'] = [ target_nodes['rack_names'] = [
x for x in node_set if x.get_rack() in rack_names x for x in node_set if x.get_rack() in rack_names
] ]
if len(node_labels) > 0: if node_labels and len(node_labels) > 0:
self.logger.debug("Filtering nodes based on node labels.") self.logger.debug("Filtering nodes based on node labels.")
target_nodes['node_labels'] = [] target_nodes['node_labels'] = []
for k, v in node_labels.items(): for k, v in node_labels.items():
@ -441,27 +478,27 @@ class Orchestrator(object):
if getattr(x, 'owner_data', {}).get(k, None) == v if getattr(x, 'owner_data', {}).get(k, None) == v
]) ])
if len(rack_labels) > 0: if rack_labels and len(rack_labels) > 0:
self.logger.info( self.logger.info(
"Rack label filtering not yet implemented, returning all nodes." "Rack label filtering not yet implemented, returning all nodes."
) )
target_nodes['rack_labels'] = node_set target_nodes['rack_labels'] = node_set
if set_type == 'union': if set_type == 'union':
result_set = self.list_union( return self.list_union(
target_nodes.get('node_names', []), target_nodes.get('node_names', []),
target_nodes.get('node_tags', []), target_nodes.get('node_tags', []),
target_nodes.get('rack_names', []), target_nodes.get('rack_names', []),
target_nodes.get('node_labels', [])) target_nodes.get('node_labels', []))
elif set_type == 'intersection': elif set_type == 'intersection':
result_set = self.list_intersection( return self.list_intersection(
target_nodes.get('node_names', []), target_nodes.get('node_names', None),
target_nodes.get('node_tags', []), target_nodes.get('node_tags', None),
target_nodes.get('rack_names', []), target_nodes.get('rack_names', None),
target_nodes.get('node_labels', [])) target_nodes.get('node_labels', None))
return result_set
except Exception as ex: except Exception as ex:
self.logger.error("Error processing node filter.", exc_info=ex)
raise errors.OrchestratorError( raise errors.OrchestratorError(
"Error processing node filter: %s" % str(ex)) "Error processing node filter: %s" % str(ex))
@ -472,11 +509,20 @@ class Orchestrator(object):
:params rest: 0 or more lists of values :params rest: 0 or more lists of values
""" """
if len(rest) > 1: if len(rest) > 1:
return list( result = self.list_intersection(rest[0], *rest[1:])
set(a).intersection( if a is None:
set(Orchestrator.list_intersection(rest[0], rest[1:])))) return result
elif result is None:
return a
else:
return list(set(a).intersection(set(result)))
elif len(rest) == 1: elif len(rest) == 1:
return list(set(a).intersection(set(rest[0]))) if a is None and rest[0] is None:
return None
elif rest is None or rest[0]:
return a
else:
return list(set(a).intersection(set(rest[0])))
else: else:
return a return a
@ -494,3 +540,27 @@ class Orchestrator(object):
return list(set(lists[0])) return list(set(lists[0]))
else: else:
return None return None
def create_bootaction_context(self, nodename, task):
"""Save a boot action context for ``nodename``
Generate a identity key and persist the boot action context
for nodename pointing at the top level task. Return the
generated identity key as ``bytes``.
:param nodename: Name of the node the bootaction context is targeted for
:param task: The task instigating the ndoe deployment
"""
design_status, site_design = self.get_effective_site(task.design_ref)
if site_design.bootactions is None:
return None
for ba in site_design.bootactions:
if nodename in ba.target_nodes:
identity_key = os.urandom(32)
self.state_manager.post_boot_action_context(
nodename, task.get_id(), identity_key)
return identity_key
return None

View File

@ -44,8 +44,11 @@ data:
- 'base64_encode' - 'base64_encode'
- 'template' - 'template'
- 'base64_decode' - 'base64_decode'
- 'utf8_encode'
- 'utf8_decode'
permissions: permissions:
type: 'integer' type: 'string'
pattern: '\d{3}'
required: required:
- 'type' - 'type'
node_filter: node_filter:

View File

@ -70,13 +70,27 @@ class ActiveInstance(ExtendTable):
] ]
class BuildData(ExtendTable): class BootAction(ExtendTable):
"""Table persisting node build data.""" """Table persisting node build data."""
__tablename__ = 'build_data' __tablename__ = 'boot_action'
__schema__ = [ __schema__ = [
Column('node_name', String(16), primary_key=True), Column('node_name', String(16), primary_key=True),
Column('task_id', pg.BYTEA(16)), Column('task_id', pg.BYTEA(16)),
Column('message', String(128)), Column('identity_key', pg.BYTEA(32)),
]
class BootActionStatus(ExtendTable):
"""Table tracking status of node boot actions."""
__tablename__ = 'boot_action_status'
__schema__ = [
Column('node_name', String(32)),
Column('bootaction_id', pg.BYTEA(16), primary_key=True),
Column('task_id', pg.BYTEA(16)),
Column('identity_key', pg.BYTEA(32)),
Column('action_status', String(32)),
] ]

View File

@ -14,7 +14,9 @@
"""Access methods for managing external data access and persistence.""" """Access methods for managing external data access and persistence."""
import logging import logging
import uuid
from datetime import datetime from datetime import datetime
import ulid2
from sqlalchemy import create_engine from sqlalchemy import create_engine
from sqlalchemy import sql from sqlalchemy import sql
@ -48,7 +50,8 @@ class DrydockState(object):
self.tasks_tbl = tables.Tasks(self.db_metadata) self.tasks_tbl = tables.Tasks(self.db_metadata)
self.result_message_tbl = tables.ResultMessage(self.db_metadata) self.result_message_tbl = tables.ResultMessage(self.db_metadata)
self.active_instance_tbl = tables.ActiveInstance(self.db_metadata) self.active_instance_tbl = tables.ActiveInstance(self.db_metadata)
self.build_data_tbl = tables.BuildData(self.db_metadata) self.boot_action_tbl = tables.BootAction(self.db_metadata)
self.ba_status_tbl = tables.BootActionStatus(self.db_metadata)
return return
def tabularasa(self): def tabularasa(self):
@ -60,7 +63,8 @@ class DrydockState(object):
'tasks', 'tasks',
'result_message', 'result_message',
'active_instance', 'active_instance',
'build_data', 'boot_action',
'boot_action_status',
] ]
conn = self.db_engine.connect() conn = self.db_engine.connect()
@ -379,7 +383,7 @@ class DrydockState(object):
"INSERT INTO active_instance (dummy_key, identity, last_ping) " "INSERT INTO active_instance (dummy_key, identity, last_ping) "
"VALUES (1, :instance_id, timezone('UTC', now())) " "VALUES (1, :instance_id, timezone('UTC', now())) "
"ON CONFLICT (dummy_key) DO UPDATE SET " "ON CONFLICT (dummy_key) DO UPDATE SET "
"identity = :instance_id " "identity = :instance_id, last_ping = timezone('UTC', now()) "
"WHERE active_instance.last_ping < (now() - interval '%d seconds')" "WHERE active_instance.last_ping < (now() - interval '%d seconds')"
% (config.config_mgr.conf.leader_grace_period % (config.config_mgr.conf.leader_grace_period
)).execution_options(autocommit=True) )).execution_options(autocommit=True)
@ -420,6 +424,119 @@ class DrydockState(object):
except Exception as ex: except Exception as ex:
self.logger.error("Error abidcating leadership: %s" % str(ex)) self.logger.error("Error abidcating leadership: %s" % str(ex))
def post_boot_action_context(self, nodename, task_id, identity):
"""Save the context for a boot action for later access by a node.
The ``task_id`` passed here will be maintained for the context of the boot action
so that the design_ref can be accessed for loading the design document set. When
status messages for the boot actions are reported, they will be attached to this task.
:param nodename: The name of the node
:param task_id: The uuid.UUID task id instigating the node deployment
:param identity: A 32 byte string that the node must provide in the ``X-BootAction-Key``
header when accessing the boot action API
"""
try:
with self.db_engine.connect() as conn:
query = sql.text(
"INSERT INTO boot_action AS ba1 (node_name, task_id, identity_key) "
"VALUES (:node, :task_id, :identity) "
"ON CONFLICT (node_name) DO UPDATE SET "
"task_id = :task_id, identity_key = :identity "
"WHERE ba1.node_name = :node").execution_options(
autocommit=True)
conn.execute(
query,
node=nodename,
task_id=task_id.bytes,
identity=identity)
return True
except Exception as ex:
self.logger.error(
"Error posting boot action context for node %s" % nodename,
exc_info=ex)
return False
def get_boot_action_context(self, nodename):
"""Get the boot action context for a node.
Returns dictionary with ``node_name``, ``task_id`` and ``identity_key`` keys
:param nodename: Name of the node
"""
try:
with self.db_engine.connect() as conn:
query = self.boot_action_tbl.select().where(
self.boot_action_tbl.c.node_name == nodename)
rs = conn.execute(query)
r = rs.fetchone()
if r is not None:
result_dict = dict(r)
result_dict['task_id'] = uuid.UUID(
bytes=bytes(result_dict['task_id']))
result_dict['identity_key'] = bytes(
result_dict['identity_key'])
return result_dict
return None
except Exception as ex:
self.logger.error(
"Error retrieving boot action context for node %s" % nodename,
exc_info=ex)
return None
def post_boot_action(self,
nodename,
task_id,
identity_key,
action_id,
action_status=hd_fields.ActionResult.Incomplete):
"""Post a individual boot action.
:param nodename: The name of the node the boot action is running on
:param task_id: The uuid.UUID task_id of the task that instigated the node deployment
:param identity_key: A 256-bit key the node must provide when accessing the boot action API
:param action_id: The string ULID id of the boot action
:param action_status: The status of the action.
"""
try:
with self.db_engine.connect() as conn:
query = self.ba_status_tbl.insert().values(
node_name=nodename,
bootaction_id=action_id,
task_id=task_id.bytes,
identity_key=identity_key,
action_status=action_status)
conn.execute(query)
return True
except Exception as ex:
self.logger.error(
"Error saving boot action %s." % action_id, exc_info=ex)
def get_boot_action(self, action_id):
"""Query for a single boot action by ID.
:param action_id: string ULID bootaction id
"""
try:
with self.db_engine.connect() as conn:
query = self.ba_status_tbl.select().where(
bootaction_id=ulid2.decode_ulid_base32(action_id))
rs = conn.execute(query)
r = rs.fetchone()
if r is not None:
ba_dict = dict(r)
ba_dict['bootaction_id'] = bytes(ba_dict['bootaction_id'])
ba_dict['identity_key'] = bytes(
ba_dict['identity_key']).hex()
return ba_dict
else:
return None
except Exception as ex:
self.logger.error(
"Error querying boot action %s" % action_id, exc_info=ex)
def post_promenade_part(self, part): def post_promenade_part(self, part):
my_lock = self.promenade_lock.acquire(blocking=True, timeout=10) my_lock = self.promenade_lock.acquire(blocking=True, timeout=10)
if my_lock: if my_lock:

View File

@ -0,0 +1,40 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Utility classes."""
from keystoneauth1 import session
from keystoneauth1.identity import v3
import drydock_provisioner.config as config
class KeystoneUtils(object):
"""Utility methods for using Keystone."""
@staticmethod
def get_session():
"""Get an initialized keystone session.
Authentication is based on the keystone_authtoken section of the config file.
"""
auth_info = dict()
for f in [
'auth_url', 'username', 'password', 'project_id',
'user_domain_name'
]:
auth_info[f] = getattr(config.config_mgr.conf.keystone_authtoken,
f)
auth = v3.Password(**auth_info)
return session.Session(auth=auth)

View File

@ -19,3 +19,5 @@ alembic==0.8.2
sqlalchemy==1.1.14 sqlalchemy==1.1.14
psycopg2==2.7.3.1 psycopg2==2.7.3.1
jsonschema==2.6.0 jsonschema==2.6.0
jinja2==2.9.6
ulid2==0.1.1

110
tests/conftest.py Normal file
View File

@ -0,0 +1,110 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared fixtures used by integration tests."""
import logging
import os
import shutil
import drydock_provisioner.config as config
import drydock_provisioner.objects as objects
from drydock_provisioner.statemgmt.state import DrydockState
from drydock_provisioner.ingester.ingester import Ingester
from drydock_provisioner.orchestrator.orchestrator import Orchestrator
import pytest
@pytest.fixture()
def test_ingester():
ingester = Ingester()
ingester.enable_plugin(
'drydock_provisioner.ingester.plugins.yaml.YamlIngester')
return ingester
@pytest.fixture()
def test_orchestrator(drydock_state, test_ingester):
orchestrator = Orchestrator(
state_manager=drydock_state, ingester=test_ingester)
return orchestrator
@pytest.fixture()
def blank_state(drydock_state):
drydock_state.tabularasa()
return drydock_state
@pytest.fixture(scope='session')
def drydock_state(setup):
state_mgr = DrydockState()
state_mgr.connect_db()
return state_mgr
@pytest.fixture(scope='module')
def input_files(tmpdir_factory, request):
tmpdir = tmpdir_factory.mktemp('data')
samples_dir = os.path.dirname(os.getenv('YAMLDIR'))
samples = os.listdir(samples_dir)
for f in samples:
src_file = samples_dir + "/" + f
dst_file = str(tmpdir) + "/" + f
shutil.copyfile(src_file, dst_file)
return tmpdir
@pytest.fixture(scope='session')
def setup(setup_logging):
objects.register_all()
config.config_mgr.register_options(enable_keystone=False)
config.config_mgr.conf([])
config.config_mgr.conf.set_override(
name="database_connect_string",
group="database",
override="postgresql+psycopg2://drydock:drydock@localhost:5432/drydock"
)
config.config_mgr.conf.set_override(
name="leader_grace_period", override=15)
config.config_mgr.conf.set_override(name="poll_interval", override=3)
return
@pytest.fixture(scope='session')
def setup_logging():
# Setup root logger
logger = logging.getLogger('drydock')
logger.setLevel('DEBUG')
ch = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s - %(levelname)s - %(filename)s:%(funcName)s - %(message)s'
)
ch.setFormatter(formatter)
logger.addHandler(ch)
# Specalized format for API logging
logger = logging.getLogger('drydock.control')
logger.propagate = False
formatter = logging.Formatter(
'%(asctime)s - %(levelname)s - %(user)s - %(req_id)s - %(external_ctx)s - %(message)s'
)
ch = logging.StreamHandler()
ch.setFormatter(formatter)
logger.addHandler(ch)

View File

@ -1,68 +0,0 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared fixtures used by integration tests."""
import logging
from oslo_config import cfg
import drydock_provisioner.config as config
import drydock_provisioner.objects as objects
from drydock_provisioner.statemgmt.state import DrydockState
import pytest
@pytest.fixture()
def blank_state(drydock_state):
drydock_state.tabularasa()
return drydock_state
@pytest.fixture(scope='session')
def drydock_state(setup):
state_mgr = DrydockState()
state_mgr.connect_db()
return state_mgr
@pytest.fixture(scope='session')
def setup():
objects.register_all()
logging.basicConfig(level='DEBUG')
req_opts = {
'default':
[cfg.IntOpt('leader_grace_period'),
cfg.IntOpt('poll_interval')],
'database': [cfg.StrOpt('database_connect_string')],
'logging': [
cfg.StrOpt('global_logger_name', default='drydock'),
]
}
for k, v in req_opts.items():
config.config_mgr.conf.register_opts(v, group=k)
config.config_mgr.conf([])
config.config_mgr.conf.set_override(
name="database_connect_string",
group="database",
override="postgresql+psycopg2://drydock:drydock@localhost:5432/drydock"
)
config.config_mgr.conf.set_override(
name="leader_grace_period", group="default", override=15)
config.config_mgr.conf.set_override(
name="poll_interval", group="default", override=3)
return

View File

@ -0,0 +1,94 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic testing for the orchestrator."""
from falcon import testing
import pytest
import os
import tarfile
import io
import falcon
import drydock_provisioner.objects.fields as hd_fields
from drydock_provisioner.control.api import start_api
class TestClass(object):
def test_bootaction_context(self, falcontest, seed_bootaction):
"""Test that the API will return a boot action context"""
url = "/api/v1.0/bootactions/nodes/%s/units" % seed_bootaction[
'nodename']
auth_hdr = {'X-Bootaction-Key': "%s" % seed_bootaction['identity_key']}
result = falcontest.simulate_get(url, headers=auth_hdr)
assert result.status == falcon.HTTP_200
fileobj = io.BytesIO(result.content)
tarfile.open(mode='r:gz', fileobj=fileobj)
def test_bootaction_context_notfound(self, falcontest):
"""Test that the API will return a 404 for unknown node"""
url = "/api/v1.0/bootactions/nodes/%s/units" % 'foo'
auth_hdr = {'X-Bootaction-Key': "%s" % 'bar'}
result = falcontest.simulate_get(url, headers=auth_hdr)
assert result.status == falcon.HTTP_404
def test_bootaction_context_noauth(self, falcontest, seed_bootaction):
"""Test that the API will return a boot action context"""
url = "/api/v1.0/bootactions/nodes/%s/units" % seed_bootaction[
'nodename']
result = falcontest.simulate_get(url)
assert result.status == falcon.HTTP_401
def test_bootaction_context_badauth(self, falcontest, seed_bootaction):
"""Test that the API will return a boot action context"""
url = "/api/v1.0/bootactions/nodes/%s/units" % seed_bootaction[
'nodename']
auth_hdr = {'X-Bootaction-Key': 'deadbeef'}
result = falcontest.simulate_get(url, headers=auth_hdr)
assert result.status == falcon.HTTP_403
@pytest.fixture()
def seed_bootaction(self, blank_state, test_orchestrator, input_files):
"""Add a task and boot action to the database for testing."""
input_file = input_files.join("fullsite.yaml")
design_ref = "file://%s" % input_file
test_task = test_orchestrator.create_task(
action=hd_fields.OrchestratorAction.Noop, design_ref=design_ref)
id_key = os.urandom(32)
blank_state.post_boot_action_context('compute01',
test_task.get_id(), id_key)
ba_ctx = dict(
nodename='compute01',
task_id=test_task.get_id(),
identity_key=id_key.hex())
return ba_ctx
@pytest.fixture()
def falcontest(self, drydock_state, test_ingester, test_orchestrator):
"""Create a test harness for the the Falcon API framework."""
return testing.TestClient(
start_api(
state_manager=drydock_state,
ingester=test_ingester,
orchestrator=test_orchestrator))

View File

@ -6,6 +6,11 @@ sleep 15
psql -h localhost -c "create user drydock with password 'drydock';" postgres postgres psql -h localhost -c "create user drydock with password 'drydock';" postgres postgres
psql -h localhost -c "create database drydock;" postgres postgres psql -h localhost -c "create database drydock;" postgres postgres
export DRYDOCK_DB_URL="postgresql+psycopg2://drydock:drydock@localhost:5432/drydock" sudo docker run --rm -t --net=host -e DRYDOCK_DB_URL="postgresql+psycopg2://drydock:drydock@localhost:5432/drydock" --entrypoint /usr/local/bin/alembic drydock:latest upgrade head
alembic upgrade head
py.test $1
RESULT=$?
sudo docker stop psql_integration
exit $RESULT

View File

@ -0,0 +1,41 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test that boot action assets are rendered correctly."""
import ulid2
from drydock_provisioner.ingester.ingester import Ingester
from drydock_provisioner.statemgmt.state import DrydockState
import drydock_provisioner.objects as objects
class TestClass(object):
def test_bootaction_render(self, input_files, setup):
objects.register_all()
input_file = input_files.join("fullsite.yaml")
design_state = DrydockState()
design_ref = "file://%s" % str(input_file)
ingester = Ingester()
ingester.enable_plugin(
'drydock_provisioner.ingester.plugins.yaml.YamlIngester')
design_status, design_data = ingester.ingest_data(
design_state=design_state, design_ref=design_ref)
ba = design_data.get_bootaction('helloworld')
action_id = ulid2.generate_binary_ulid()
assets = ba.render_assets('compute01', design_data, action_id)
assert 'compute01' in assets[0].rendered_bytes.decode('utf-8')

View File

@ -0,0 +1,43 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test that rack models are properly parsed."""
import base64
import drydock_provisioner.objects as objects
class TestClass(object):
def test_bootaction_pipeline_base64(self):
objects.register_all()
ba = objects.BootActionAsset()
orig = 'Test 1 2 3!'.encode('utf-8')
expected_value = base64.b64encode(orig)
test_value = ba.execute_pipeline(orig, ['base64_encode'])
assert expected_value == test_value
def test_bootaction_pipeline_utf8(self):
objects.register_all()
ba = objects.BootActionAsset()
expected_value = 'Test 1 2 3!'
orig = expected_value.encode('utf-8')
test_value = ba.execute_pipeline(orig, ['utf8_decode'])
assert test_value == expected_value

View File

@ -0,0 +1,54 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import drydock_provisioner.objects as objects
class TestClass(object):
def test_bootaction_scoping_blankfilter(self, input_files,
test_orchestrator):
"""Test a boot action with no node filter scopes correctly."""
input_file = input_files.join("fullsite.yaml")
design_ref = "file://%s" % str(input_file)
design_status, design_data = test_orchestrator.get_effective_site(
design_ref)
assert design_status.status == objects.fields.ActionResult.Success
assert len(design_data.bootactions) > 0
for ba in design_data.bootactions:
if ba.get_id() == 'helloworld':
assert 'compute01' in ba.target_nodes
assert 'controller01' in ba.target_nodes
def test_bootaction_scoping_unionfilter(self, input_files,
test_orchestrator):
"""Test a boot action with a union node filter scopes correctly."""
input_file = input_files.join("fullsite.yaml")
design_ref = "file://%s" % str(input_file)
design_status, design_data = test_orchestrator.get_effective_site(
design_ref)
assert design_status.status == objects.fields.ActionResult.Success
assert len(design_data.bootactions) > 0
for ba in design_data.bootactions:
if ba.get_id() == 'hw_filtered':
assert 'compute01' in ba.target_nodes
assert 'controller01' not in ba.target_nodes

View File

@ -0,0 +1,58 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test that rack models are properly parsed."""
import ulid2
import tarfile
import io
import drydock_provisioner.objects as objects
from drydock_provisioner.ingester.ingester import Ingester
from drydock_provisioner.statemgmt.state import DrydockState
from drydock_provisioner.control.bootaction import BootactionUtils
class TestClass(object):
def test_bootaction_tarbuilder(self, input_files, setup):
objects.register_all()
input_file = input_files.join("fullsite.yaml")
design_state = DrydockState()
design_ref = "file://%s" % str(input_file)
ingester = Ingester()
ingester.enable_plugin(
'drydock_provisioner.ingester.plugins.yaml.YamlIngester')
design_status, design_data = ingester.ingest_data(
design_state=design_state, design_ref=design_ref)
target_host = 'compute01'
ba = design_data.get_bootaction('helloworld')
action_id = ulid2.generate_binary_ulid()
assets = ba.render_assets(target_host, design_data, action_id)
assert len(assets) > 0
tarbytes = BootactionUtils.tarbuilder(assets)
assert tarbytes is not None
fileobj = io.BytesIO(tarbytes)
tarball = tarfile.open(mode='r:gz', fileobj=fileobj)
tarasset = tarball.getmember('/var/tmp/hello.sh')
assert tarasset.mode == 0o555

View File

@ -12,23 +12,10 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import pytest
import shutil
import os
import logging
from oslo_config import cfg
import drydock_provisioner.config as config
import drydock_provisioner.objects as objects
from drydock_provisioner.ingester.ingester import Ingester from drydock_provisioner.ingester.ingester import Ingester
from drydock_provisioner.statemgmt.state import DrydockState from drydock_provisioner.statemgmt.state import DrydockState
from drydock_provisioner.orchestrator.orchestrator import Orchestrator from drydock_provisioner.orchestrator.orchestrator import Orchestrator
logging.basicConfig(level=logging.DEBUG)
class TestClass(object): class TestClass(object):
def test_design_inheritance(self, input_files, setup): def test_design_inheritance(self, input_files, setup):
input_file = input_files.join("fullsite.yaml") input_file = input_files.join("fullsite.yaml")
@ -59,44 +46,3 @@ class TestClass(object):
iface = node.get_applied_interface('pxe') iface = node.get_applied_interface('pxe')
assert len(iface.get_hw_slaves()) == 1 assert len(iface.get_hw_slaves()) == 1
@pytest.fixture(scope='module')
def input_files(self, tmpdir_factory, request):
tmpdir = tmpdir_factory.mktemp('data')
samples_dir = os.path.dirname(str(
request.fspath)) + "/" + "../yaml_samples"
samples = os.listdir(samples_dir)
for f in samples:
src_file = samples_dir + "/" + f
dst_file = str(tmpdir) + "/" + f
shutil.copyfile(src_file, dst_file)
return tmpdir
@pytest.fixture(scope='module')
def setup(self):
objects.register_all()
logging.basicConfig()
req_opts = {
'default': [cfg.IntOpt('leader_grace_period')],
'database': [cfg.StrOpt('database_connect_string')],
'logging': [
cfg.StrOpt('global_logger_name', default='drydock'),
]
}
for k, v in req_opts.items():
config.config_mgr.conf.register_opts(v, group=k)
config.config_mgr.conf([])
config.config_mgr.conf.set_override(
name="database_connect_string",
group="database",
override=
"postgresql+psycopg2://drydock:drydock@localhost:5432/drydock")
config.config_mgr.conf.set_override(
name="leader_grace_period", group="default", override=15)
return

View File

@ -11,19 +11,11 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Test YAML data ingestion."""
from drydock_provisioner.ingester.ingester import Ingester from drydock_provisioner.ingester.ingester import Ingester
from drydock_provisioner.statemgmt.state import DrydockState from drydock_provisioner.statemgmt.state import DrydockState
import drydock_provisioner.objects as objects import drydock_provisioner.objects as objects
import drydock_provisioner.config as config
from oslo_config import cfg
import logging
import pytest
import shutil
import os
class TestClass(object): class TestClass(object):
def test_ingest_full_site(self, input_files, setup): def test_ingest_full_site(self, input_files, setup):
@ -42,44 +34,3 @@ class TestClass(object):
assert len(design_data.host_profiles) == 2 assert len(design_data.host_profiles) == 2
assert len(design_data.baremetal_nodes) == 2 assert len(design_data.baremetal_nodes) == 2
@pytest.fixture(scope='module')
def input_files(self, tmpdir_factory, request):
tmpdir = tmpdir_factory.mktemp('data')
samples_dir = os.path.dirname(str(
request.fspath)) + "/" + "../yaml_samples"
samples = os.listdir(samples_dir)
for f in samples:
src_file = samples_dir + "/" + f
dst_file = str(tmpdir) + "/" + f
shutil.copyfile(src_file, dst_file)
return tmpdir
@pytest.fixture(scope='module')
def setup(self):
objects.register_all()
logging.basicConfig()
req_opts = {
'default': [cfg.IntOpt('leader_grace_period')],
'database': [cfg.StrOpt('database_connect_string')],
'logging': [
cfg.StrOpt('global_logger_name', default='drydock'),
]
}
for k, v in req_opts.items():
config.config_mgr.conf.register_opts(v, group=k)
config.config_mgr.conf([])
config.config_mgr.conf.set_override(
name="database_connect_string",
group="database",
override=
"postgresql+psycopg2://drydock:drydock@localhost:5432/drydock")
config.config_mgr.conf.set_override(
name="leader_grace_period", group="default", override=15)
return

View File

@ -0,0 +1,37 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test that boot action models are properly parsed."""
from drydock_provisioner.ingester.ingester import Ingester
from drydock_provisioner.statemgmt.state import DrydockState
import drydock_provisioner.objects as objects
class TestClass(object):
def test_bootaction_parse(self, input_files, setup):
objects.register_all()
input_file = input_files.join("bootaction.yaml")
design_state = DrydockState()
design_ref = "file://%s" % str(input_file)
ingester = Ingester()
ingester.enable_plugin(
'drydock_provisioner.ingester.plugins.yaml.YamlIngester')
design_status, design_data = ingester.ingest_data(
design_state=design_state, design_ref=design_ref)
ba = design_data.get_bootaction('helloworld')
assert len(ba.asset_list) == 2

View File

@ -16,15 +16,9 @@
from drydock_provisioner.ingester.ingester import Ingester from drydock_provisioner.ingester.ingester import Ingester
from drydock_provisioner.statemgmt.state import DrydockState from drydock_provisioner.statemgmt.state import DrydockState
import drydock_provisioner.objects as objects import drydock_provisioner.objects as objects
import drydock_provisioner.config as config
import drydock_provisioner.error as errors import drydock_provisioner.error as errors
from oslo_config import cfg
import logging
import pytest import pytest
import shutil
import os
class TestClass(object): class TestClass(object):
@ -62,44 +56,3 @@ class TestClass(object):
with pytest.raises(errors.DesignError): with pytest.raises(errors.DesignError):
design_data.get_rack('foo') design_data.get_rack('foo')
@pytest.fixture(scope='module')
def input_files(self, tmpdir_factory, request):
tmpdir = tmpdir_factory.mktemp('data')
samples_dir = os.path.dirname(str(
request.fspath)) + "/" + "../yaml_samples"
samples = os.listdir(samples_dir)
for f in samples:
src_file = samples_dir + "/" + f
dst_file = str(tmpdir) + "/" + f
shutil.copyfile(src_file, dst_file)
return tmpdir
@pytest.fixture(scope='module')
def setup(self):
objects.register_all()
logging.basicConfig()
req_opts = {
'default': [cfg.IntOpt('leader_grace_period')],
'database': [cfg.StrOpt('database_connect_string')],
'logging': [
cfg.StrOpt('global_logger_name', default='drydock'),
]
}
for k, v in req_opts.items():
config.config_mgr.conf.register_opts(v, group=k)
config.config_mgr.conf([])
config.config_mgr.conf.set_override(
name="database_connect_string",
group="database",
override=
"postgresql+psycopg2://drydock:drydock@localhost:5432/drydock")
config.config_mgr.conf.set_override(
name="leader_grace_period", group="default", override=15)
return

View File

@ -11,16 +11,10 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import pytest """Test that YAML ingestion works."""
import shutil
import os
import logging
from drydock_provisioner.ingester.plugins.yaml import YamlIngester from drydock_provisioner.ingester.plugins.yaml import YamlIngester
logging.basicConfig(level=logging.DEBUG)
class TestClass(object): class TestClass(object):
def test_ingest_singledoc(self, input_files): def test_ingest_singledoc(self, input_files):
input_file = input_files.join("singledoc.yaml") input_file = input_files.join("singledoc.yaml")
@ -47,17 +41,3 @@ class TestClass(object):
assert status.status == 'success' assert status.status == 'success'
assert len(models) == 3 assert len(models) == 3
@pytest.fixture(scope='module')
def input_files(self, tmpdir_factory, request):
tmpdir = tmpdir_factory.mktemp('data')
samples_dir = os.path.dirname(str(
request.fspath)) + "/" + "../yaml_samples"
samples = os.listdir(samples_dir)
for f in samples:
src_file = samples_dir + "/" + f
dst_file = str(tmpdir) + "/" + f
shutil.copyfile(src_file, dst_file)
return tmpdir

View File

@ -0,0 +1,68 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the node filter logic in the orchestrator."""
from drydock_provisioner.ingester.ingester import Ingester
from drydock_provisioner.statemgmt.state import DrydockState
import drydock_provisioner.objects as objects
class TestClass(object):
def test_node_filter_obj(self, input_files, setup, test_orchestrator):
input_file = input_files.join("fullsite.yaml")
design_state = DrydockState()
design_ref = "file://%s" % str(input_file)
ingester = Ingester()
ingester.enable_plugin(
'drydock_provisioner.ingester.plugins.yaml.YamlIngester')
design_status, design_data = ingester.ingest_data(
design_state=design_state, design_ref=design_ref)
nf = objects.NodeFilter()
nf.filter_type = 'intersection'
nf.node_names = ['compute01']
nfs = objects.NodeFilterSet(
filter_set_type='intersection', filter_set=[nf])
node_list = test_orchestrator.process_node_filter(nfs, design_data)
assert len(node_list) == 1
def test_node_filter_dict(self, input_files, setup, test_orchestrator):
input_file = input_files.join("fullsite.yaml")
design_state = DrydockState()
design_ref = "file://%s" % str(input_file)
ingester = Ingester()
ingester.enable_plugin(
'drydock_provisioner.ingester.plugins.yaml.YamlIngester')
design_status, design_data = ingester.ingest_data(
design_state=design_state, design_ref=design_ref)
nfs = {
'filter_set_type':
'intersection',
'filter_set': [
{
'filter_type': 'intersection',
'node_names': 'compute01',
},
],
}
node_list = test_orchestrator.process_node_filter(nfs, design_data)
assert len(node_list) == 1

View File

@ -10,14 +10,15 @@ spec:
assets: assets:
- path: /var/tmp/hello.sh - path: /var/tmp/hello.sh
type: file type: file
permissions: 555 permissions: '555'
data: | data: |-
IyEvYmluL2Jhc2gKCmVjaG8gJ0hlbGxvIFdvcmxkIScK IyEvYmluL2Jhc2gKCmVjaG8gJ0hlbGxvIFdvcmxkIScK
data_pipeline: data_pipeline:
- base64_decode - base64_decode
- utf8_encode
- path: /lib/systemd/system/hello.service - path: /lib/systemd/system/hello.service
type: unit type: unit
data: | data: |-
W1VuaXRdCkRlc2NyaXB0aW9uPUhlbGxvIFdvcmxkCgpbU2VydmljZV0KVHlwZT1vbmVzaG90CkV4 W1VuaXRdCkRlc2NyaXB0aW9uPUhlbGxvIFdvcmxkCgpbU2VydmljZV0KVHlwZT1vbmVzaG90CkV4
ZWNTdGFydD0vdmFyL3RtcC9oZWxsby5zaAoKW0luc3RhbGxdCldhbnRlZEJ5PW11bHRpLXVzZXIu ZWNTdGFydD0vdmFyL3RtcC9oZWxsby5zaAoKW0luc3RhbGxdCldhbnRlZEJ5PW11bHRpLXVzZXIu
dGFyZ2V0Cg== dGFyZ2V0Cg==

View File

@ -491,3 +491,71 @@ spec:
dev_type: 'VBOX HARDDISK' dev_type: 'VBOX HARDDISK'
bus_type: 'scsi' bus_type: 'scsi'
... ...
---
apiVersion: 'drydock/v1'
kind: BootAction
metadata:
name: helloworld
region: sitename
date: 17-FEB-2017
author: Scott Hussey
spec:
assets:
- path: /var/tmp/hello.sh
type: file
permissions: '555'
data: |-
IyEvYmluL2Jhc2gKCmVjaG8gJ0hlbGxvIFdvcmxkISAtZnJvbSB7eyBub2RlLmhvc3RuYW1lIH19
Jwo=
data_pipeline:
- base64_decode
- utf8_decode
- template
- path: /lib/systemd/system/hello.service
type: unit
permissions: '600'
data: |-
W1VuaXRdCkRlc2NyaXB0aW9uPUhlbGxvIFdvcmxkCgpbU2VydmljZV0KVHlwZT1vbmVzaG90CkV4
ZWNTdGFydD0vdmFyL3RtcC9oZWxsby5zaAoKW0luc3RhbGxdCldhbnRlZEJ5PW11bHRpLXVzZXIu
dGFyZ2V0Cg==
data_pipeline:
- base64_decode
- utf8_decode
...
---
apiVersion: 'drydock/v1'
kind: BootAction
metadata:
name: hw_filtered
region: sitename
date: 17-FEB-2017
author: Scott Hussey
spec:
node_filter:
filter_set_type: 'union'
filter_set:
- filter_type: 'union'
node_names:
- 'compute01'
assets:
- path: /var/tmp/hello.sh
type: file
permissions: '555'
data: |-
IyEvYmluL2Jhc2gKCmVjaG8gJ0hlbGxvIFdvcmxkISAtZnJvbSB7eyBub2RlLmhvc3RuYW1lIH19
Jwo=
data_pipeline:
- base64_decode
- utf8_decode
- template
- path: /lib/systemd/system/hello.service
type: unit
permissions: '600'
data: |-
W1VuaXRdCkRlc2NyaXB0aW9uPUhlbGxvIFdvcmxkCgpbU2VydmljZV0KVHlwZT1vbmVzaG90CkV4
ZWNTdGFydD0vdmFyL3RtcC9oZWxsby5zaAoKW0luc3RhbGxdCldhbnRlZEJ5PW11bHRpLXVzZXIu
dGFyZ2V0Cg==
data_pipeline:
- base64_decode
- utf8_decode
...

View File

@ -1,7 +1,8 @@
[tox] [tox]
envlist = py35 envlist = unit,pep8,bandit
[testenv] [testenv]
setenv = YAMLDIR = {toxinidir}/tests/yaml_samples/
basepython=python3.5 basepython=python3.5
deps= deps=
-rrequirements-direct.txt -rrequirements-direct.txt
@ -27,6 +28,7 @@ commands=
[testenv:unit] [testenv:unit]
setenv= setenv=
PYTHONWARNING=all PYTHONWARNING=all
YAMLDIR={toxinidir}/tests/yaml_samples/
commands= commands=
py.test \ py.test \
tests/unit/{posargs} tests/unit/{posargs}
@ -38,6 +40,10 @@ commands=
py.test \ py.test \
tests/integration/{posargs} tests/integration/{posargs}
[testenv:postgres]
commands=
{toxinidir}/tests/integration/postgres/test_postgres.sh {toxinidir}/tests/integration/postgres/{posargs}
[testenv:genconfig] [testenv:genconfig]
commands = oslo-config-generator --config-file=etc/drydock/drydock-config-generator.conf commands = oslo-config-generator --config-file=etc/drydock/drydock-config-generator.conf