diff --git a/Dockerfile b/Dockerfile index 7ee1961b..f7a91b64 100644 --- a/Dockerfile +++ b/Dockerfile @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -FROM ubuntu:16.04 +FROM python:3.5 ENV DEBIAN_FRONTEND noninteractive ENV container docker @@ -19,21 +19,6 @@ ENV PORT 9000 ENV LC_ALL C.UTF-8 ENV LANG C.UTF-8 -RUN apt -qq update && \ - apt -y install git \ - netbase \ - python3-minimal \ - python3-setuptools \ - python3-pip \ - python3-dev \ - ca-certificates \ - gcc \ - g++ \ - make \ - libffi-dev \ - libssl-dev --no-install-recommends - -RUN pip3 install wheel # Copy direct dependency requirements only to build a dependency layer COPY ./requirements-lock.txt /tmp/drydock/ RUN pip3 install -r /tmp/drydock/requirements-lock.txt diff --git a/alembic/versions/9593a123e7c5_create_base_database_tables.py b/alembic/versions/9593a123e7c5_create_base_database_tables.py index 2709507e..e8ebab52 100644 --- a/alembic/versions/9593a123e7c5_create_base_database_tables.py +++ b/alembic/versions/9593a123e7c5_create_base_database_tables.py @@ -23,12 +23,15 @@ def upgrade(): *tables.ResultMessage.__schema__) op.create_table(tables.ActiveInstance.__tablename__, *tables.ActiveInstance.__schema__) - op.create_table(tables.BuildData.__tablename__, - *tables.BuildData.__schema__) + op.create_table(tables.BootAction.__tablename__, + *tables.BootAction.__schema__) + op.create_table(tables.BootActionStatus.__tablename__, + *tables.BootActionStatus.__schema__) def downgrade(): op.drop_table(tables.Tasks.__tablename__) op.drop_table(tables.ResultMessage.__tablename__) op.drop_table(tables.ActiveInstance.__tablename__) - op.drop_table(tables.BuildData.__tablename__) + op.drop_table(tables.BootAction.__tablename__) + op.drop_table(tables.BootActionStatus.__tablename__) diff --git a/docs/source/bootaction.rst b/docs/source/bootaction.rst index cec13c28..a7d7c090 100644 --- a/docs/source/bootaction.rst +++ b/docs/source/bootaction.rst @@ -70,8 +70,9 @@ are separate pipelines for the ``location`` field to build the URL that referenc be sourced from and the ``data`` field (or the data sourced from resolving the ``location`` field). The ``location`` string will be passed through the ``location_pipeline`` before it is queried. This response -or the ``data`` field will then be passed through the ``data_pipeline``. Below are pipeline segments available -for use. +or the ``data`` field will then be passed through the ``data_pipeline``. The data entity will start the pipeline +as a bytestring meaning if it is defined in the ``data`` field, it will first be encoded into a bytestring. +Below are pipeline segments available for use. base64_decode Decode the data element from base64 @@ -79,6 +80,12 @@ base64_decode base64_encode Encode the data element in base64 +utf8_decode + Decode the data element from bytes to UTF-8 string + +utf8_encode + Encode the data element from a UTF-8 string to bytes + template Treat the data element as a Jinja2 template and apply a node context to it. The defined context available to the template is below. diff --git a/drydock_provisioner/cli/task/actions.py b/drydock_provisioner/cli/task/actions.py index 7c9a5091..0a27395b 100644 --- a/drydock_provisioner/cli/task/actions.py +++ b/drydock_provisioner/cli/task/actions.py @@ -99,7 +99,7 @@ class TaskCreate(CliAction): # pylint: disable=too-few-public-methods while True: time.sleep(self.poll_interval) task = self.api_client.get_task(task_id=task_id) - if task.status in ['completed', 'terminated']: + if task.get('status', '') in ['completed', 'terminated']: return task diff --git a/drydock_provisioner/config.py b/drydock_provisioner/config.py index 0db636d0..ea63ff71 100644 --- a/drydock_provisioner/config.py +++ b/drydock_provisioner/config.py @@ -87,6 +87,12 @@ class DrydockConfig(object): help='The URI database connect string.'), ] + # Options for the boot action framework + bootactions_options = [ + cfg.StrOpt( + 'report_url', + default='http://localhost:9000/api/v1.0/bootactions/') + ] # Enabled plugins plugin_options = [ cfg.StrOpt( @@ -151,22 +157,31 @@ class DrydockConfig(object): 'deploy_node', default=45, help='Timeout in minutes for deploying a node'), + cfg.IntOpt( + 'bootaction_final_status', + default=15, + help= + 'Timeout in minutes between deployment completion and the all boot actions reporting status' + ), ] def __init__(self): self.conf = cfg.CONF - def register_options(self): + def register_options(self, enable_keystone=True): self.conf.register_opts(DrydockConfig.options) + self.conf.register_opts( + DrydockConfig.bootactions_options, group='bootactions') self.conf.register_opts(DrydockConfig.logging_options, group='logging') self.conf.register_opts(DrydockConfig.plugin_options, group='plugins') self.conf.register_opts( DrydockConfig.database_options, group='database') self.conf.register_opts( DrydockConfig.timeout_options, group='timeouts') - self.conf.register_opts( - loading.get_auth_plugin_conf_options('password'), - group='keystone_authtoken') + if enable_keystone: + self.conf.register_opts( + loading.get_auth_plugin_conf_options('password'), + group='keystone_authtoken') config_mgr = DrydockConfig() diff --git a/drydock_provisioner/control/api.py b/drydock_provisioner/control/api.py index a4b3ab31..8dd3131f 100644 --- a/drydock_provisioner/control/api.py +++ b/drydock_provisioner/control/api.py @@ -20,9 +20,11 @@ from .designs import DesignsPartsKindsResource from .designs import DesignsPartResource from .tasks import TasksResource from .tasks import TaskResource -from .bootdata import BootdataResource from .nodes import NodesResource from .health import HealthResource +from .bootaction import BootactionUnitsResource +from .bootaction import BootactionFilesResource +from .bootaction import BootactionResource from .base import DrydockRequest, BaseResource from .middleware import AuthMiddleware, ContextMiddleware, LoggingMiddleware @@ -67,12 +69,15 @@ def start_api(state_manager=None, ingester=None, orchestrator=None): ('/designs/{design_id}/parts/{kind}/{name}', DesignsPartResource( state_manager=state_manager, orchestrator=orchestrator)), - # API for nodes to discover their bootdata during curtin install - ('/bootdata/{hostname}/{data_key}', BootdataResource( - state_manager=state_manager, orchestrator=orchestrator)), - # API to list current MaaS nodes ('/nodes', NodesResource()), + # API for nodes to discover their boot actions during curtin install + ('/bootactions/nodes/{hostname}/units', BootactionUnitsResource( + state_manager=state_manager, orchestrator=orchestrator)), + ('/bootactions/nodes/{hostname}/files', BootactionFilesResource( + state_manager=state_manager, orchestrator=orchestrator)), + ('/bootactions/{action_id}', BootactionResource( + state_manager=state_manager, orchestrator=orchestrator)), ] for path, res in v1_0_routes: diff --git a/drydock_provisioner/control/base.py b/drydock_provisioner/control/base.py index 42afb72f..5001c601 100644 --- a/drydock_provisioner/control/base.py +++ b/drydock_provisioner/control/base.py @@ -23,7 +23,7 @@ import drydock_provisioner.error as errors class BaseResource(object): def __init__(self): - self.logger = logging.getLogger('control') + self.logger = logging.getLogger('drydock') def on_options(self, req, resp): self_attrs = dir(self) diff --git a/drydock_provisioner/control/bootaction.py b/drydock_provisioner/control/bootaction.py new file mode 100644 index 00000000..1aee068b --- /dev/null +++ b/drydock_provisioner/control/bootaction.py @@ -0,0 +1,180 @@ +# Copyright 2017 AT&T Intellectual Property. All other rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Handle resources for boot action API endpoints. """ + +import falcon +import ulid2 +import tarfile +import io +import logging + +from .base import StatefulResource + +logger = logging.getLogger('drydock') + +class BootactionResource(StatefulResource): + def __init__(self, orchestrator=None, **kwargs): + super().__init__(**kwargs) + self.orchestrator = orchestrator + + def on_post(self, req, resp, action_id): + """Post status messages or final status for a boot action. + + This endpoint does not use the standard oslo_policy enforcement as this endpoint + is accessed by unmanned nodes. Instead it uses a internal key authentication + + :param req: falcon request + :param resp: falcone response + :param action_id: ULID ID of the boot action + """ + + +class BootactionAssetsResource(StatefulResource): + def __init__(self, orchestrator=None, **kwargs): + super().__init__(**kwargs) + self.orchestrator = orchestrator + + def do_get(self, req, resp, hostname, asset_type): + """Render ``unit`` type boot action assets for hostname. + + Get the boot action context for ``hostname`` from the database + and render all ``unit`` type assets for the host. Validate host + is providing the correct idenity key in the ``X-Bootaction-Key`` + header. + + :param req: falcon request object + :param resp: falcon response object + :param hostname: URL path parameter indicating the calling host + :param asset_type: Asset type to include in the response - ``unit``, ``file``, ``pkg_list``, ``all`` + """ + try: + ba_ctx = self.state_manager.get_boot_action_context(hostname) + except Exception as ex: + self.logger.error( + "Error locating boot action for %s" % hostname, exc_info=ex) + raise falcon.HTTPNotFound() + + if ba_ctx is None: + raise falcon.HTTPNotFound( + description="Error locating boot action for %s" % hostname) + + BootactionUtils.check_auth(ba_ctx, req) + + asset_type_filter = None if asset_type == 'all' else asset_type + + try: + task = self.state_manager.get_task(ba_ctx['task_id']) + design_status, site_design = self.orchestrator.get_effective_site( + task.design_ref) + + assets = list() + for ba in site_design.bootactions: + if hostname in ba.target_nodes: + action_id = ulid2.generate_binary_ulid() + assets.extend( + ba.render_assets( + hostname, + site_design, + action_id, + type_filter=asset_type_filter)) + self.state_manager.post_boot_action( + hostname, ba_ctx['task_id'], ba_ctx['identity_key'], + action_id) + + tarball = BootactionUtils.tarbuilder(asset_list=assets) + resp.set_header('Content-Type', 'application/gzip') + resp.set_header('Content-Disposition', + "attachment; filename=\"%s-%s.tar.gz\"" % + (hostname, asset_type)) + resp.data = tarball + resp.status = falcon.HTTP_200 + return + except Exception as ex: + self.logger.debug("Exception in boot action API.", exc_info=ex) + raise falcon.HTTPInternalServerError(str(ex)) + + +class BootactionUnitsResource(BootactionAssetsResource): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def on_get(self, req, resp, hostname): + self.logger.debug( + "Accessing boot action units resource for host %s." % hostname) + super().do_get(req, resp, hostname, 'unit') + + +class BootactionFilesResource(BootactionAssetsResource): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def on_get(self, req, resp, hostname): + super().do_get(req, resp, hostname, 'file') + + +class BootactionUtils(object): + """Utility class shared by Boot Action API resources.""" + + @staticmethod + def check_auth(ba_ctx, req): + """Check request authentication based on boot action context. + + Raise proper Falcon exception if authentication fails, otherwise + silently return + + :param ba_ctx: Boot Action context from database + :param req: The falcon request object of the API call + """ + identity_key = req.get_header('X-Bootaction-Key', default='') + + if identity_key == '': + raise falcon.HTTPUnauthorized( + title='Unauthorized', + description='No X-Bootaction-Key', + challenges=['Bootaction-Key']) + + if ba_ctx['identity_key'] != bytes.fromhex(identity_key): + logger.warn( + "Forbidding boot action access - node: %s, identity_key: %s, req header: %s" + % (ba_ctx['node_name'], str(ba_ctx['identity_key']), + str(bytes.fromhex(identity_key)))) + raise falcon.HTTPForbidden( + title='Unauthorized', description='Invalid X-Bootaction-Key') + + @staticmethod + def tarbuilder(asset_list=None): + """Create a tar file from rendered assets. + + Add each asset in ``asset_list`` to a tar file with the defined + path and permission. The assets need to have the rendered_bytes field + populated. Return a tarfile.TarFile. + + :param hostname: the hostname the tar is destined for + :param balltype: the type of assets being included + :param asset_list: list of objects.BootActionAsset instances + """ + tarbytes = io.BytesIO() + tarball = tarfile.open( + mode='w:gz', fileobj=tarbytes, format=tarfile.GNU_FORMAT) + asset_list = asset_list or [] + for a in asset_list: + fileobj = io.BytesIO(a.rendered_bytes) + tarasset = tarfile.TarInfo(name=a.path) + tarasset.size = len(a.rendered_bytes) + tarasset.mode = a.permissions if a.permissions else 0o600 + tarasset.uid = 0 + tarasset.gid = 0 + tarball.addfile(tarasset, fileobj=fileobj) + tarball.close() + return tarbytes.getvalue() diff --git a/drydock_provisioner/control/bootdata.py b/drydock_provisioner/control/bootdata.py deleted file mode 100644 index 2493f757..00000000 --- a/drydock_provisioner/control/bootdata.py +++ /dev/null @@ -1,120 +0,0 @@ -# Copyright 2017 AT&T Intellectual Property. All other rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Handle resources for bootdata API endpoints. - -THIS API IS DEPRECATED -""" - -from oslo_config import cfg - -from .base import StatefulResource - - -class BootdataResource(StatefulResource): - - bootdata_options = [ - cfg.StrOpt( - 'prom_init', - default='/etc/drydock/bootdata/join.sh', - help='Path to file to distribute for prom_init.sh') - ] - - def __init__(self, orchestrator=None, **kwargs): - super(BootdataResource, self).__init__(**kwargs) - self.authorized_roles = ['anyone'] - self.orchestrator = orchestrator - - cfg.CONF.register_opts( - BootdataResource.bootdata_options, group='bootdata') - - init_file = open(cfg.CONF.bootdata.prom_init, 'r') - self.prom_init = init_file.read() - init_file.close() - - def on_get(self, req, resp, hostname, data_key): - if data_key == 'promservice': - resp.body = BootdataResource.prom_init_service - resp.content_type = 'text/plain' - return - elif data_key == 'vfservice': - resp.body = BootdataResource.vfs_service - resp.content_type = 'text/plain' - return - elif data_key == 'prominit': - resp.body = self.prom_init - resp.content_type = 'text/plain' - return - elif data_key == 'promconfig': - # The next PS will be a complete rewrite of the bootdata system - # so not wasting time refactoring this - # TODO(sh8121att) rebuild bootdata API for BootAction framework - resp.content = 'text/plain' - return - - -# bootdata = self.state_manager.get_bootdata_key(hostname) -# -# if bootdata is None: -# resp.status = falcon.HTTP_404 -# return -# else: -# resp.content_type = 'text/plain' -# -# host_design_id = bootdata.get('design_id', None) -# host_design = self.orchestrator.get_effective_site( -# host_design_id) -# -# host_model = host_design.get_baremetal_node(hostname) -# -# part_selectors = ['all', hostname] -# -# if host_model.tags is not None: -# part_selectors.extend(host_model.tags) -# -# all_configs = host_design.get_promenade_config(part_selectors) -# -# part_list = [i.document for i in all_configs] -# -# resp.body = "---\n" + "---\n".join([ -# base64.b64decode(i.encode()).decode('utf-8') -# for i in part_list -# ]) + "\n..." -# return - - prom_init_service = ( - "[Unit]\n" - "Description=Promenade Initialization Service\n" - "Documentation=http://github.com/att-comdev/drydock\n" - "After=network-online.target local-fs.target\n" - "ConditionPathExists=!/var/lib/prom.done\n\n" - "[Service]\n" - "Type=simple\n" - "ExecStart=/var/tmp/prom_init.sh /etc/prom_init.yaml\n\n" - "[Install]\n" - "WantedBy=multi-user.target\n") - - vfs_service = ( - "[Unit]\n" - "Description=SR-IOV Virtual Function configuration\n" - "Documentation=http://github.com/att-comdev/drydock\n" - "After=network.target local-fs.target\n\n" - "[Service]\n" - "Type=simple\n" - "ExecStart=/bin/sh -c '/bin/echo 4 >/sys/class/net/ens3f0/device/sriov_numvfs'\n\n" - "[Install]\n" - "WantedBy=multi-user.target\n") - - -def list_opts(): - return {'bootdata': BootdataResource.bootdata_options} diff --git a/drydock_provisioner/control/health.py b/drydock_provisioner/control/health.py index a37cae35..0c2915a4 100644 --- a/drydock_provisioner/control/health.py +++ b/drydock_provisioner/control/health.py @@ -15,11 +15,13 @@ import falcon from drydock_provisioner.control.base import BaseResource + class HealthResource(BaseResource): """ Return empty response/body to show that Drydock is healthy """ + def on_get(self, req, resp): """ It really does nothing right now. It may do more later diff --git a/drydock_provisioner/control/middleware.py b/drydock_provisioner/control/middleware.py index 83ed0c9e..21f6dfab 100644 --- a/drydock_provisioner/control/middleware.py +++ b/drydock_provisioner/control/middleware.py @@ -73,6 +73,7 @@ class AuthMiddleware(object): 'Request from authenticated user %s with roles %s' % (ctx.user, ','.join(ctx.roles))) else: + self.logger.debug('Request from unauthenticated client.') ctx.authenticated = False diff --git a/drydock_provisioner/drivers/node/maasdriver/actions/node.py b/drydock_provisioner/drivers/node/maasdriver/actions/node.py index b745c4c6..bcdbf98a 100644 --- a/drydock_provisioner/drivers/node/maasdriver/actions/node.py +++ b/drydock_provisioner/drivers/node/maasdriver/actions/node.py @@ -213,7 +213,7 @@ class CreateNetworkTemplate(BaseMaasAction): site_design = self._load_site_design() except errors.OrchestratorError: self.task.add_status_msg( - "Error loading site design.", + msg="Error loading site design.", error=True, ctx='NA', ctx_type='NA') @@ -593,7 +593,7 @@ class ConfigureUserCredentials(BaseMaasAction): site_design = self._load_site_design() except errors.OrchestratorError: self.task.add_status_msg( - "Error loading site design.", + msg="Error loading site design.", error=True, ctx='NA', ctx_type='NA') @@ -605,7 +605,8 @@ class ConfigureUserCredentials(BaseMaasAction): try: key_list = maas_keys.SshKeys(self.maas_client) key_list.refresh() - except Exception: + except Exception as ex: + self.logger.debug("Error accessing the MaaS API.", exc_info=ex) self.task.set_status(hd_fields.TaskStatus.Complete) self.task.failure() self.task.add_status_msg( @@ -655,11 +656,12 @@ class IdentifyNode(BaseMaasAction): try: machine_list = maas_machine.Machines(self.maas_client) machine_list.refresh() - except Exception: + except Exception as ex: + self.logger.debug("Error accessing the MaaS API.", exc_info=ex) self.task.set_status(hd_fields.TaskStatus.Complete) self.task.failure() self.task.add_status_msg( - msg='Error accessing MaaS Machines API.', + msg='Error accessing MaaS Machines API: %s' % str(ex), error=True, ctx='NA', ctx_type='NA') @@ -673,7 +675,7 @@ class IdentifyNode(BaseMaasAction): site_design = self._load_site_design() except errors.OrchestratorError: self.task.add_status_msg( - "Error loading site design.", + msg="Error loading site design.", error=True, ctx='NA', ctx_type='NA') @@ -725,6 +727,7 @@ class ConfigureHardware(BaseMaasAction): machine_list = maas_machine.Machines(self.maas_client) machine_list.refresh() except Exception as ex: + self.logger.debug("Error accessing the MaaS API.", exc_info=ex) self.task.set_status(hd_fields.TaskStatus.Complete) self.task.failure() self.task.add_status_msg( @@ -742,7 +745,7 @@ class ConfigureHardware(BaseMaasAction): site_design = self._load_site_design() except errors.OrchestratorError: self.task.add_status_msg( - "Error loading site design.", + msg="Error loading site design.", error=True, ctx='NA', ctx_type='NA') @@ -856,6 +859,7 @@ class ApplyNodeNetworking(BaseMaasAction): subnets = maas_subnet.Subnets(self.maas_client) subnets.refresh() except Exception as ex: + self.logger.debug("Error accessing the MaaS API.", exc_info=ex) self.task.set_status(hd_fields.TaskStatus.Complete) self.task.failure() self.task.add_status_msg( @@ -873,7 +877,7 @@ class ApplyNodeNetworking(BaseMaasAction): site_design = self._load_site_design() except errors.OrchestratorError: self.task.add_status_msg( - "Error loading site design.", + msg="Error loading site design.", error=True, ctx='NA', ctx_type='NA') @@ -1155,6 +1159,7 @@ class ApplyNodePlatform(BaseMaasAction): tag_list = maas_tag.Tags(self.maas_client) tag_list.refresh() except Exception as ex: + self.logger.debug("Error accessing the MaaS API.", exc_info=ex) self.task.set_status(hd_fields.TaskStatus.Complete) self.task.failure() self.task.add_status_msg( @@ -1172,7 +1177,7 @@ class ApplyNodePlatform(BaseMaasAction): site_design = self._load_site_design() except errors.OrchestratorError: self.task.add_status_msg( - "Error loading site design.", + msg="Error loading site design.", error=True, ctx='NA', ctx_type='NA') @@ -1313,6 +1318,7 @@ class ApplyNodeStorage(BaseMaasAction): machine_list = maas_machine.Machines(self.maas_client) machine_list.refresh() except Exception as ex: + self.logger.debug("Error accessing the MaaS API.", exc_info=ex) self.task.set_status(hd_fields.TaskStatus.Complete) self.task.failure() self.task.add_status_msg( @@ -1330,7 +1336,7 @@ class ApplyNodeStorage(BaseMaasAction): site_design = self._load_site_design() except errors.OrchestratorError: self.task.add_status_msg( - "Error loading site design.", + msg="Error loading site design.", error=True, ctx='NA', ctx_type='NA') @@ -1563,7 +1569,7 @@ class ApplyNodeStorage(BaseMaasAction): except Exception as ex: self.task.failure(focus=n.get_id()) self.task.add_status_msg( - "Error configuring storage.", + msg="Error configuring storage.", error=True, ctx=n.name, ctx_type='node') @@ -1640,6 +1646,7 @@ class DeployNode(BaseMaasAction): machine_list = maas_machine.Machines(self.maas_client) machine_list.refresh() except Exception as ex: + self.logger.debug("Error accessing the MaaS API.", exc_info=ex) self.task.set_status(hd_fields.TaskStatus.Complete) self.task.failure() self.task.add_status_msg( @@ -1657,7 +1664,7 @@ class DeployNode(BaseMaasAction): site_design = self._load_site_design() except errors.OrchestratorError: self.task.add_status_msg( - "Error loading site design.", + msg="Error loading site design.", error=True, ctx='NA', ctx_type='NA') @@ -1721,6 +1728,35 @@ class DeployNode(BaseMaasAction): msg=msg, error=True, ctx=n.name, ctx_type='node') continue + # Saving boot action context for a node + self.logger.info("Saving Boot Action context for node %s." % + (n.name)) + try: + ba_key = self.orchestrator.create_bootaction_context( + n.name, self.task) + + tag_list = maas_tag.Tags(self.maas_client) + tag_list.refresh() + node_id_tags = tag_list.startswith("%s_baid-" % (n.name)) + for t in node_id_tags: + t.delete() + + if ba_key is not None: + msg = "Creating boot action id key tag for node %s" % ( + n.name) + self.logger.debug(msg) + node_baid_tag = maas_tag.Tag( + self.maas_client, + name="%s_baid-%s" % (n.name, ba_key.hex())) + node_baid_tag = tag_list.add(node_baid_tag) + node_baid_tag.apply_to_node(machine.resource_id) + self.task.add_status_msg( + msg=msg, error=False, ctx=n.name, ctx_type='node') + except Exception as ex: + self.logger.error( + "Error setting boot action id key tag for %s." % n.name, + exc_info=ex) + self.logger.info("Deploying node %s" % (n.name)) try: diff --git a/drydock_provisioner/drivers/node/maasdriver/models/machine.py b/drydock_provisioner/drivers/node/maasdriver/models/machine.py index dd628d40..7edfbcf4 100644 --- a/drydock_provisioner/drivers/node/maasdriver/models/machine.py +++ b/drydock_provisioner/drivers/node/maasdriver/models/machine.py @@ -337,8 +337,9 @@ class Machine(model_base.ResourceBase): if isinstance(obj_dict['boot_interface'], dict): refined_dict['boot_mac'] = obj_dict['boot_interface'][ 'mac_address'] - refined_dict['boot_ip'] = obj_dict['boot_interface']['links'][ - 0]['ip_address'] + if len(obj_dict['boot_interface']['links']) > 0: + refined_dict['boot_ip'] = obj_dict['boot_interface'][ + 'links'][0].get('ip_address', None) i = cls(api_client, **refined_dict) return i diff --git a/drydock_provisioner/drivers/node/maasdriver/models/tag.py b/drydock_provisioner/drivers/node/maasdriver/models/tag.py index 351eca40..a240320d 100644 --- a/drydock_provisioner/drivers/node/maasdriver/models/tag.py +++ b/drydock_provisioner/drivers/node/maasdriver/models/tag.py @@ -129,6 +129,19 @@ class Tags(model_base.ResourceCollectionBase): def __init__(self, api_client, **kwargs): super(Tags, self).__init__(api_client) + def startswith(self, partial_tag): + """Find the set of tags that start with ``partial_tag``. + + Return a list of Tag instances that start with ``partial_tag``. + + :param partial_tag: string to compare to tags + """ + results = list() + for k, v in self.resources.items(): + if k.startswith(partial_tag): + results.append(v) + return results + def add(self, res): """ Create a new resource in this collection in MaaS diff --git a/drydock_provisioner/error.py b/drydock_provisioner/error.py index 4f5b4545..a7fabf89 100644 --- a/drydock_provisioner/error.py +++ b/drydock_provisioner/error.py @@ -54,6 +54,22 @@ class PersistentOrchestratorError(OrchestratorError): pass +class BootactionError(Exception): + pass + + +class UnknownPipelineSegment(BootactionError): + pass + + +class PipelineFailure(BootactionError): + pass + + +class InvalidAssetLocation(BootactionError): + pass + + class DriverError(Exception): pass diff --git a/drydock_provisioner/objects/bootaction.py b/drydock_provisioner/objects/bootaction.py index bdd0bc8f..6e29c0ce 100644 --- a/drydock_provisioner/objects/bootaction.py +++ b/drydock_provisioner/objects/bootaction.py @@ -12,12 +12,24 @@ # See the License for the specific language governing permissions and # limitations under the License. """Object models for BootActions.""" +import requests +import base64 +from jinja2 import Template +from urllib.parse import urlparse +from urllib.parse import urlunparse +import re +import ulid2 import oslo_versionedobjects.fields as ovo_fields import drydock_provisioner.objects.base as base import drydock_provisioner.objects.fields as hd_fields +import drydock_provisioner.config as config +import drydock_provisioner.error as errors + +from drydock_provisioner.util import KeystoneUtils + @base.DrydockObjectRegistry.register class BootAction(base.DrydockPersistentObject, base.DrydockObject): @@ -33,6 +45,8 @@ class BootAction(base.DrydockPersistentObject, base.DrydockObject): ovo_fields.ObjectField('BootActionAssetList', nullable=False), 'node_filter': ovo_fields.ObjectField('NodeFilterSet', nullable=True), + 'target_nodes': + ovo_fields.ListOfStringsField(nullable=True), } def __init__(self, **kwargs): @@ -45,6 +59,30 @@ class BootAction(base.DrydockPersistentObject, base.DrydockObject): def get_name(self): return self.name + def render_assets(self, nodename, site_design, action_id, + type_filter=None): + """Render all of the assets in this bootaction. + + Render the assets of this bootaction and return them in a list. + The ``nodename`` and ``action_id`` will be + used to build the context for any assets utilizing the ``template`` + pipeline segment. + + :param nodename: name of the node the assets are destined for + :param site_design: a objects.SiteDesign instance holding the design sets + :param action_id: a 128-bit ULID action_id of the boot action + the assets are part of + :param type_filter: optional filter of the types of assets to render + """ + assets = list() + for a in self.asset_list: + if type_filter is None or (type_filter is not None + and a.type == type_filter): + a.render(nodename, site_design, action_id) + assets.append(a) + + return assets + @base.DrydockObjectRegistry.register class BootActionList(base.DrydockObjectListBase, base.DrydockObject): @@ -72,7 +110,174 @@ class BootActionAsset(base.DrydockObject): } def __init__(self, **kwargs): - super().__init__(**kwargs) + if 'permissions' in kwargs: + mode = kwargs.pop('permissions') + if isinstance(mode, str): + mode = int(mode, base=8) + else: + mode = None + + super().__init__(permissions=mode, **kwargs) + self.rendered_bytes = None + + def render(self, nodename, site_design, action_id): + """Render this asset into a base64 encoded string. + + The ``nodename`` and ``action_id`` will be used to construct + the context for evaluating the ``template`` pipeline segment + + :param nodename: the name of the node where the asset will be deployed + :param site_design: instance of objects.SiteDesign + :param action_id: a 128-bit ULID boot action id + """ + node = site_design.get_baremetal_node(nodename) + + tpl_ctx = { + 'node': { + 'hostname': nodename, + 'tags': [t for t in node.tags], + 'labels': {k: v + for (k, v) in node.owner_data.items()}, + 'network': {}, + }, + 'action': { + 'key': ulid2.ulid_to_base32(action_id), + 'report_url': config.config_mgr.conf.bootactions.report_url, + } + } + + for a in node.addressing: + if a.address is not None: + tpl_ctx['node']['network'][a.network] = dict() + tpl_ctx['node']['network'][a.network]['ip'] = a.address + network = site_design.get_network(a.network) + tpl_ctx['node']['network'][a.network]['cidr'] = network.cidr + tpl_ctx['node']['network'][a.network][ + 'dns_suffix'] = network.dns_domain + + if self.location is not None: + rendered_location = self.execute_pipeline( + self.location, self.location_pipeline, tpl_ctx=tpl_ctx) + data_block = self.resolve_asset_location(rendered_location) + else: + data_block = self.data.encode('utf-8') + + value = self.execute_pipeline( + data_block, self.data_pipeline, tpl_ctx=tpl_ctx) + + if isinstance(value, str): + value = value.encode('utf-8') + self.rendered_bytes = value + + def resolve_asset_location(self, asset_url): + """Retrieve the data asset from the url. + + Returns the asset as a bytestring. + + :param asset_url: URL to retrieve the data asset from + """ + url_parts = urlparse(asset_url) + + if url_parts.scheme in ['http', 'https']: + try: + resp = requests.get(asset_url) + except Exception as ex: + raise errors.InvalidAssetLocation( + "Failed retrieving asset: %s - %s" % (type(ex).__name__, + str(ex))) + return resp.content + elif url_parts.scheme in [ + 'promenade+http', 'promenade+https', 'deckhand+http', + 'deckhand+https' + ]: + try: + ks_sess = KeystoneUtils.get_session() + url_parts.scheme = re.sub('^[^+]+\+', '', url_parts.scheme) + new_url = urlunparse(url_parts) + resp = ks_sess.get(new_url) + except Exception as ex: + raise errors.InvalidAssetLocation( + "Failed retrieving asset: %s - %s" % (type(ex).__name__, + str(ex))) + return resp.content + else: + raise errors.InvalidAssetLocation( + "Unknown scheme %s" % url_parts.scheme) + + def execute_pipeline(self, data, pipeline, tpl_ctx=None): + """Execute a pipeline against a data element. + + Returns the manipulated ``data`` element + + :param data: The data element to be manipulated by the pipeline + :param pipeline: list of pipeline segments to execute + :param tpl_ctx: The optional context to be made available to the ``template`` pipeline + """ + segment_funcs = { + 'base64_encode': self.eval_base64_encode, + 'base64_decode': self.eval_base64_decode, + 'utf8_decode': self.eval_utf8_decode, + 'utf8_encode': self.eval_utf8_encode, + 'template': self.eval_template, + } + + for s in pipeline: + try: + data = segment_funcs[s](data, ctx=tpl_ctx) + except KeyError: + raise errors.UnknownPipelineSegment( + "Bootaction pipeline segment %s unknown." % s) + except Exception as ex: + raise errors.PipelineFailure( + "Error when running bootaction pipeline segment %s: %s - %s" + % (s, type(ex).__name__, str(ex))) + + return data + + def eval_base64_encode(self, data, ctx=None): + """Encode data as base64. + + Light weight wrapper around base64 library to shed the ctx kwarg + + :param data: data to be encoded + :param ctx: throwaway, just allows a generic interface for pipeline segments + """ + return base64.b64encode(data) + + def eval_base64_decode(self, data, ctx=None): + """Decode data from base64. + + Light weight wrapper around base64 library to shed the ctx kwarg + + :param data: data to be decoded + :param ctx: throwaway, just allows a generic interface for pipeline segments + """ + return base64.b64decode(data) + + def eval_utf8_decode(self, data, ctx=None): + """Decode data from bytes to UTF-8 string. + + :param data: data to be decoded + :param ctx: throwaway, just allows a generic interface for pipeline segments + """ + return data.decode('utf-8') + + def eval_utf8_encode(self, data, ctx=None): + """Encode data from UTF-8 to bytes. + + :param data: data to be encoded + :param ctx: throwaway, just allows a generic interface for pipeline segments + """ + return data.encode('utf-8') + + def eval_template(self, data, ctx=None): + """Evaluate data as a Jinja2 template. + + :param data: The template + :param ctx: Optional ctx to inject into the template render + """ + template = Template(data) + return template.render(ctx) @base.DrydockObjectRegistry.register diff --git a/drydock_provisioner/objects/site.py b/drydock_provisioner/objects/site.py index baccbb5c..999354cb 100644 --- a/drydock_provisioner/objects/site.py +++ b/drydock_provisioner/objects/site.py @@ -144,8 +144,6 @@ class SiteDesign(base.DrydockPersistentObject, base.DrydockObject): ovo_fields.ObjectField('HardwareProfileList', nullable=True), 'baremetal_nodes': ovo_fields.ObjectField('BaremetalNodeList', nullable=True), - 'prom_configs': - ovo_fields.ObjectField('PromenadeConfigList', nullable=True), 'racks': ovo_fields.ObjectField('RackList', nullable=True), 'bootactions': diff --git a/drydock_provisioner/orchestrator/orchestrator.py b/drydock_provisioner/orchestrator/orchestrator.py index 745d97b1..b24488c7 100644 --- a/drydock_provisioner/orchestrator/orchestrator.py +++ b/drydock_provisioner/orchestrator/orchestrator.py @@ -18,6 +18,7 @@ import importlib import logging import uuid import concurrent.futures +import os import drydock_provisioner.config as config import drydock_provisioner.objects as objects @@ -294,6 +295,7 @@ class Orchestrator(object): status, site_design = self.get_described_site(design_ref) if status.status == hd_fields.ActionResult.Success: self.compute_model_inheritance(site_design) + self.compute_bootaction_targets(site_design) status = self._validate_design(site_design, result_status=status) except Exception as ex: if status is not None: @@ -303,9 +305,8 @@ class Orchestrator(object): ctx='NA', ctx_type='NA') status.set_status(hd_fields.ActionResult.Failure) - else: - self.logger.error( - "Error getting site definition: %s" % str(ex), exc_info=ex) + self.logger.error( + "Error getting site definition: %s" % str(ex), exc_info=ex) else: status.add_status_msg( msg="Successfully computed effective design.", @@ -369,24 +370,48 @@ class Orchestrator(object): return nf + def compute_bootaction_targets(self, site_design): + """Find target nodes for each bootaction in ``site_design``. + + Calculate the node_filter for each bootaction and save the list + of target node names. + + :param site_design: an instance of objects.SiteDesign + """ + if site_design.bootactions is None: + return + for ba in site_design.bootactions: + nf = ba.node_filter + target_nodes = self.process_node_filter(nf, site_design) + ba.target_nodes = [x.get_id() for x in target_nodes] + def process_node_filter(self, node_filter, site_design): target_nodes = site_design.baremetal_nodes if node_filter is None: return target_nodes - if not isinstance(node_filter, dict): + if not isinstance(node_filter, dict) and not isinstance( + node_filter, objects.NodeFilterSet): msg = "Invalid node_filter, must be a dictionary with keys 'filter_set_type' and 'filter_set'." self.logger.error(msg) raise errors.OrchestratorError(msg) result_sets = [] - for f in node_filter.get('filter_set', []): - result_sets.append(self.process_filter(target_nodes, f)) + if isinstance(node_filter, dict): + for f in node_filter.get('filter_set', []): + result_sets.append(self.process_filter(target_nodes, f)) - return self.join_filter_sets( - node_filter.get('filter_set_type'), result_sets) + return self.join_filter_sets( + node_filter.get('filter_set_type'), result_sets) + + elif isinstance(node_filter, objects.NodeFilterSet): + for f in node_filter.filter_set: + result_sets.append(self.process_filter(target_nodes, f)) + + return self.join_filter_sets(node_filter.filter_set_type, + result_sets) def join_filter_sets(self, filter_set_type, result_sets): if filter_set_type == 'union': @@ -401,38 +426,50 @@ class Orchestrator(object): """Take a filter and apply it to the node_set. :param node_set: A full set of objects.BaremetalNode - :param filter_set: A filter set describing filters to apply to the node set + :param filter_set: A node filter describing filters to apply to the node set. + Either a dict or objects.NodeFilter """ try: - set_type = filter_set.get('filter_type', None) - - node_names = filter_set.get('node_names', []) - node_tags = filter_set.get('node_tags', []) - node_labels = filter_set.get('node_labels', {}) - rack_names = filter_set.get('rack_names', []) - rack_labels = filter_set.get('rack_labels', {}) + if isinstance(filter_set, dict): + set_type = filter_set.get('filter_type', None) + node_names = filter_set.get('node_names', []) + node_tags = filter_set.get('node_tags', []) + node_labels = filter_set.get('node_labels', {}) + rack_names = filter_set.get('rack_names', []) + rack_labels = filter_set.get('rack_labels', {}) + elif isinstance(filter_set, objects.NodeFilter): + set_type = filter_set.filter_type + node_names = filter_set.node_names + node_tags = filter_set.node_tags + node_labels = filter_set.node_labels + rack_names = filter_set.rack_names + rack_labels = filter_set.rack_labels + else: + raise errors.OrchestratorError( + "Node filter must be a dictionary or a NodeFilter instance" + ) target_nodes = dict() - if len(node_names) > 0: + if node_names and len(node_names) > 0: self.logger.debug("Filtering nodes based on node names.") target_nodes['node_names'] = [ x for x in node_set if x.get_name() in node_names ] - if len(node_tags) > 0: + if node_tags and len(node_tags) > 0: self.logger.debug("Filtering nodes based on node tags.") target_nodes['node_tags'] = [ x for x in node_set for t in node_tags if x.has_tag(t) ] - if len(rack_names) > 0: + if rack_names and len(rack_names) > 0: self.logger.debug("Filtering nodes based on rack names.") target_nodes['rack_names'] = [ x for x in node_set if x.get_rack() in rack_names ] - if len(node_labels) > 0: + if node_labels and len(node_labels) > 0: self.logger.debug("Filtering nodes based on node labels.") target_nodes['node_labels'] = [] for k, v in node_labels.items(): @@ -441,27 +478,27 @@ class Orchestrator(object): if getattr(x, 'owner_data', {}).get(k, None) == v ]) - if len(rack_labels) > 0: + if rack_labels and len(rack_labels) > 0: self.logger.info( "Rack label filtering not yet implemented, returning all nodes." ) target_nodes['rack_labels'] = node_set if set_type == 'union': - result_set = self.list_union( + return self.list_union( target_nodes.get('node_names', []), target_nodes.get('node_tags', []), target_nodes.get('rack_names', []), target_nodes.get('node_labels', [])) elif set_type == 'intersection': - result_set = self.list_intersection( - target_nodes.get('node_names', []), - target_nodes.get('node_tags', []), - target_nodes.get('rack_names', []), - target_nodes.get('node_labels', [])) + return self.list_intersection( + target_nodes.get('node_names', None), + target_nodes.get('node_tags', None), + target_nodes.get('rack_names', None), + target_nodes.get('node_labels', None)) - return result_set except Exception as ex: + self.logger.error("Error processing node filter.", exc_info=ex) raise errors.OrchestratorError( "Error processing node filter: %s" % str(ex)) @@ -472,11 +509,20 @@ class Orchestrator(object): :params rest: 0 or more lists of values """ if len(rest) > 1: - return list( - set(a).intersection( - set(Orchestrator.list_intersection(rest[0], rest[1:])))) + result = self.list_intersection(rest[0], *rest[1:]) + if a is None: + return result + elif result is None: + return a + else: + return list(set(a).intersection(set(result))) elif len(rest) == 1: - return list(set(a).intersection(set(rest[0]))) + if a is None and rest[0] is None: + return None + elif rest is None or rest[0]: + return a + else: + return list(set(a).intersection(set(rest[0]))) else: return a @@ -494,3 +540,27 @@ class Orchestrator(object): return list(set(lists[0])) else: return None + + def create_bootaction_context(self, nodename, task): + """Save a boot action context for ``nodename`` + + Generate a identity key and persist the boot action context + for nodename pointing at the top level task. Return the + generated identity key as ``bytes``. + + :param nodename: Name of the node the bootaction context is targeted for + :param task: The task instigating the ndoe deployment + """ + design_status, site_design = self.get_effective_site(task.design_ref) + + if site_design.bootactions is None: + return None + + for ba in site_design.bootactions: + if nodename in ba.target_nodes: + identity_key = os.urandom(32) + self.state_manager.post_boot_action_context( + nodename, task.get_id(), identity_key) + return identity_key + + return None diff --git a/drydock_provisioner/schemas/bootaction.yaml b/drydock_provisioner/schemas/bootaction.yaml index acdfcbe8..7230d8ac 100644 --- a/drydock_provisioner/schemas/bootaction.yaml +++ b/drydock_provisioner/schemas/bootaction.yaml @@ -44,8 +44,11 @@ data: - 'base64_encode' - 'template' - 'base64_decode' + - 'utf8_encode' + - 'utf8_decode' permissions: - type: 'integer' + type: 'string' + pattern: '\d{3}' required: - 'type' node_filter: diff --git a/drydock_provisioner/statemgmt/db/tables.py b/drydock_provisioner/statemgmt/db/tables.py index dc4a79af..e04207be 100644 --- a/drydock_provisioner/statemgmt/db/tables.py +++ b/drydock_provisioner/statemgmt/db/tables.py @@ -70,13 +70,27 @@ class ActiveInstance(ExtendTable): ] -class BuildData(ExtendTable): +class BootAction(ExtendTable): """Table persisting node build data.""" - __tablename__ = 'build_data' + __tablename__ = 'boot_action' __schema__ = [ Column('node_name', String(16), primary_key=True), Column('task_id', pg.BYTEA(16)), - Column('message', String(128)), + Column('identity_key', pg.BYTEA(32)), + ] + + +class BootActionStatus(ExtendTable): + """Table tracking status of node boot actions.""" + + __tablename__ = 'boot_action_status' + + __schema__ = [ + Column('node_name', String(32)), + Column('bootaction_id', pg.BYTEA(16), primary_key=True), + Column('task_id', pg.BYTEA(16)), + Column('identity_key', pg.BYTEA(32)), + Column('action_status', String(32)), ] diff --git a/drydock_provisioner/statemgmt/state.py b/drydock_provisioner/statemgmt/state.py index 0c26e360..934c01ce 100644 --- a/drydock_provisioner/statemgmt/state.py +++ b/drydock_provisioner/statemgmt/state.py @@ -14,7 +14,9 @@ """Access methods for managing external data access and persistence.""" import logging +import uuid from datetime import datetime +import ulid2 from sqlalchemy import create_engine from sqlalchemy import sql @@ -48,7 +50,8 @@ class DrydockState(object): self.tasks_tbl = tables.Tasks(self.db_metadata) self.result_message_tbl = tables.ResultMessage(self.db_metadata) self.active_instance_tbl = tables.ActiveInstance(self.db_metadata) - self.build_data_tbl = tables.BuildData(self.db_metadata) + self.boot_action_tbl = tables.BootAction(self.db_metadata) + self.ba_status_tbl = tables.BootActionStatus(self.db_metadata) return def tabularasa(self): @@ -60,7 +63,8 @@ class DrydockState(object): 'tasks', 'result_message', 'active_instance', - 'build_data', + 'boot_action', + 'boot_action_status', ] conn = self.db_engine.connect() @@ -379,7 +383,7 @@ class DrydockState(object): "INSERT INTO active_instance (dummy_key, identity, last_ping) " "VALUES (1, :instance_id, timezone('UTC', now())) " "ON CONFLICT (dummy_key) DO UPDATE SET " - "identity = :instance_id " + "identity = :instance_id, last_ping = timezone('UTC', now()) " "WHERE active_instance.last_ping < (now() - interval '%d seconds')" % (config.config_mgr.conf.leader_grace_period )).execution_options(autocommit=True) @@ -420,6 +424,119 @@ class DrydockState(object): except Exception as ex: self.logger.error("Error abidcating leadership: %s" % str(ex)) + def post_boot_action_context(self, nodename, task_id, identity): + """Save the context for a boot action for later access by a node. + + The ``task_id`` passed here will be maintained for the context of the boot action + so that the design_ref can be accessed for loading the design document set. When + status messages for the boot actions are reported, they will be attached to this task. + + :param nodename: The name of the node + :param task_id: The uuid.UUID task id instigating the node deployment + :param identity: A 32 byte string that the node must provide in the ``X-BootAction-Key`` + header when accessing the boot action API + """ + try: + with self.db_engine.connect() as conn: + query = sql.text( + "INSERT INTO boot_action AS ba1 (node_name, task_id, identity_key) " + "VALUES (:node, :task_id, :identity) " + "ON CONFLICT (node_name) DO UPDATE SET " + "task_id = :task_id, identity_key = :identity " + "WHERE ba1.node_name = :node").execution_options( + autocommit=True) + + conn.execute( + query, + node=nodename, + task_id=task_id.bytes, + identity=identity) + + return True + except Exception as ex: + self.logger.error( + "Error posting boot action context for node %s" % nodename, + exc_info=ex) + return False + + def get_boot_action_context(self, nodename): + """Get the boot action context for a node. + + Returns dictionary with ``node_name``, ``task_id`` and ``identity_key`` keys + + :param nodename: Name of the node + """ + try: + with self.db_engine.connect() as conn: + query = self.boot_action_tbl.select().where( + self.boot_action_tbl.c.node_name == nodename) + rs = conn.execute(query) + r = rs.fetchone() + if r is not None: + result_dict = dict(r) + result_dict['task_id'] = uuid.UUID( + bytes=bytes(result_dict['task_id'])) + result_dict['identity_key'] = bytes( + result_dict['identity_key']) + return result_dict + return None + except Exception as ex: + self.logger.error( + "Error retrieving boot action context for node %s" % nodename, + exc_info=ex) + return None + + def post_boot_action(self, + nodename, + task_id, + identity_key, + action_id, + action_status=hd_fields.ActionResult.Incomplete): + """Post a individual boot action. + + :param nodename: The name of the node the boot action is running on + :param task_id: The uuid.UUID task_id of the task that instigated the node deployment + :param identity_key: A 256-bit key the node must provide when accessing the boot action API + :param action_id: The string ULID id of the boot action + :param action_status: The status of the action. + """ + try: + with self.db_engine.connect() as conn: + query = self.ba_status_tbl.insert().values( + node_name=nodename, + bootaction_id=action_id, + task_id=task_id.bytes, + identity_key=identity_key, + action_status=action_status) + conn.execute(query) + return True + except Exception as ex: + self.logger.error( + "Error saving boot action %s." % action_id, exc_info=ex) + + def get_boot_action(self, action_id): + """Query for a single boot action by ID. + + :param action_id: string ULID bootaction id + """ + try: + with self.db_engine.connect() as conn: + query = self.ba_status_tbl.select().where( + bootaction_id=ulid2.decode_ulid_base32(action_id)) + rs = conn.execute(query) + r = rs.fetchone() + if r is not None: + ba_dict = dict(r) + ba_dict['bootaction_id'] = bytes(ba_dict['bootaction_id']) + ba_dict['identity_key'] = bytes( + ba_dict['identity_key']).hex() + return ba_dict + else: + return None + except Exception as ex: + self.logger.error( + "Error querying boot action %s" % action_id, exc_info=ex) + def post_promenade_part(self, part): my_lock = self.promenade_lock.acquire(blocking=True, timeout=10) if my_lock: diff --git a/drydock_provisioner/util.py b/drydock_provisioner/util.py new file mode 100644 index 00000000..101d9ddb --- /dev/null +++ b/drydock_provisioner/util.py @@ -0,0 +1,40 @@ +# Copyright 2017 AT&T Intellectual Property. All other rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +"""Utility classes.""" +from keystoneauth1 import session +from keystoneauth1.identity import v3 + +import drydock_provisioner.config as config + + +class KeystoneUtils(object): + """Utility methods for using Keystone.""" + + @staticmethod + def get_session(): + """Get an initialized keystone session. + + Authentication is based on the keystone_authtoken section of the config file. + """ + auth_info = dict() + for f in [ + 'auth_url', 'username', 'password', 'project_id', + 'user_domain_name' + ]: + auth_info[f] = getattr(config.config_mgr.conf.keystone_authtoken, + f) + + auth = v3.Password(**auth_info) + return session.Session(auth=auth) diff --git a/requirements-direct.txt b/requirements-direct.txt index ce90a9b2..88172fcf 100644 --- a/requirements-direct.txt +++ b/requirements-direct.txt @@ -19,3 +19,5 @@ alembic==0.8.2 sqlalchemy==1.1.14 psycopg2==2.7.3.1 jsonschema==2.6.0 +jinja2==2.9.6 +ulid2==0.1.1 diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 00000000..9a030319 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,110 @@ +# Copyright 2017 AT&T Intellectual Property. All other rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Shared fixtures used by integration tests.""" +import logging +import os +import shutil + +import drydock_provisioner.config as config +import drydock_provisioner.objects as objects + +from drydock_provisioner.statemgmt.state import DrydockState +from drydock_provisioner.ingester.ingester import Ingester +from drydock_provisioner.orchestrator.orchestrator import Orchestrator + +import pytest + + +@pytest.fixture() +def test_ingester(): + ingester = Ingester() + ingester.enable_plugin( + 'drydock_provisioner.ingester.plugins.yaml.YamlIngester') + return ingester + + +@pytest.fixture() +def test_orchestrator(drydock_state, test_ingester): + orchestrator = Orchestrator( + state_manager=drydock_state, ingester=test_ingester) + return orchestrator + + +@pytest.fixture() +def blank_state(drydock_state): + drydock_state.tabularasa() + return drydock_state + + +@pytest.fixture(scope='session') +def drydock_state(setup): + state_mgr = DrydockState() + state_mgr.connect_db() + return state_mgr + + +@pytest.fixture(scope='module') +def input_files(tmpdir_factory, request): + tmpdir = tmpdir_factory.mktemp('data') + samples_dir = os.path.dirname(os.getenv('YAMLDIR')) + samples = os.listdir(samples_dir) + + for f in samples: + src_file = samples_dir + "/" + f + dst_file = str(tmpdir) + "/" + f + shutil.copyfile(src_file, dst_file) + + return tmpdir + + +@pytest.fixture(scope='session') +def setup(setup_logging): + objects.register_all() + + config.config_mgr.register_options(enable_keystone=False) + + config.config_mgr.conf([]) + config.config_mgr.conf.set_override( + name="database_connect_string", + group="database", + override="postgresql+psycopg2://drydock:drydock@localhost:5432/drydock" + ) + config.config_mgr.conf.set_override( + name="leader_grace_period", override=15) + config.config_mgr.conf.set_override(name="poll_interval", override=3) + return + + +@pytest.fixture(scope='session') +def setup_logging(): + # Setup root logger + logger = logging.getLogger('drydock') + logger.setLevel('DEBUG') + ch = logging.StreamHandler() + formatter = logging.Formatter( + '%(asctime)s - %(levelname)s - %(filename)s:%(funcName)s - %(message)s' + ) + ch.setFormatter(formatter) + logger.addHandler(ch) + + # Specalized format for API logging + logger = logging.getLogger('drydock.control') + logger.propagate = False + formatter = logging.Formatter( + '%(asctime)s - %(levelname)s - %(user)s - %(req_id)s - %(external_ctx)s - %(message)s' + ) + + ch = logging.StreamHandler() + ch.setFormatter(formatter) + logger.addHandler(ch) diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py deleted file mode 100644 index 19de7131..00000000 --- a/tests/integration/conftest.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright 2017 AT&T Intellectual Property. All other rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Shared fixtures used by integration tests.""" -import logging - -from oslo_config import cfg - -import drydock_provisioner.config as config -import drydock_provisioner.objects as objects - -from drydock_provisioner.statemgmt.state import DrydockState - -import pytest - - -@pytest.fixture() -def blank_state(drydock_state): - drydock_state.tabularasa() - return drydock_state - - -@pytest.fixture(scope='session') -def drydock_state(setup): - state_mgr = DrydockState() - state_mgr.connect_db() - return state_mgr - - -@pytest.fixture(scope='session') -def setup(): - objects.register_all() - logging.basicConfig(level='DEBUG') - - req_opts = { - 'default': - [cfg.IntOpt('leader_grace_period'), - cfg.IntOpt('poll_interval')], - 'database': [cfg.StrOpt('database_connect_string')], - 'logging': [ - cfg.StrOpt('global_logger_name', default='drydock'), - ] - } - - for k, v in req_opts.items(): - config.config_mgr.conf.register_opts(v, group=k) - - config.config_mgr.conf([]) - config.config_mgr.conf.set_override( - name="database_connect_string", - group="database", - override="postgresql+psycopg2://drydock:drydock@localhost:5432/drydock" - ) - config.config_mgr.conf.set_override( - name="leader_grace_period", group="default", override=15) - config.config_mgr.conf.set_override( - name="poll_interval", group="default", override=3) - return diff --git a/tests/integration/postgres/test_api_bootaction.py b/tests/integration/postgres/test_api_bootaction.py new file mode 100644 index 00000000..1beb1a55 --- /dev/null +++ b/tests/integration/postgres/test_api_bootaction.py @@ -0,0 +1,94 @@ +# Copyright 2017 AT&T Intellectual Property. All other rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Generic testing for the orchestrator.""" +from falcon import testing +import pytest +import os +import tarfile +import io +import falcon + +import drydock_provisioner.objects.fields as hd_fields + +from drydock_provisioner.control.api import start_api + + +class TestClass(object): + def test_bootaction_context(self, falcontest, seed_bootaction): + """Test that the API will return a boot action context""" + url = "/api/v1.0/bootactions/nodes/%s/units" % seed_bootaction[ + 'nodename'] + auth_hdr = {'X-Bootaction-Key': "%s" % seed_bootaction['identity_key']} + + result = falcontest.simulate_get(url, headers=auth_hdr) + + assert result.status == falcon.HTTP_200 + + fileobj = io.BytesIO(result.content) + tarfile.open(mode='r:gz', fileobj=fileobj) + + def test_bootaction_context_notfound(self, falcontest): + """Test that the API will return a 404 for unknown node""" + url = "/api/v1.0/bootactions/nodes/%s/units" % 'foo' + auth_hdr = {'X-Bootaction-Key': "%s" % 'bar'} + + result = falcontest.simulate_get(url, headers=auth_hdr) + + assert result.status == falcon.HTTP_404 + + def test_bootaction_context_noauth(self, falcontest, seed_bootaction): + """Test that the API will return a boot action context""" + url = "/api/v1.0/bootactions/nodes/%s/units" % seed_bootaction[ + 'nodename'] + + result = falcontest.simulate_get(url) + + assert result.status == falcon.HTTP_401 + + def test_bootaction_context_badauth(self, falcontest, seed_bootaction): + """Test that the API will return a boot action context""" + url = "/api/v1.0/bootactions/nodes/%s/units" % seed_bootaction[ + 'nodename'] + auth_hdr = {'X-Bootaction-Key': 'deadbeef'} + + result = falcontest.simulate_get(url, headers=auth_hdr) + + assert result.status == falcon.HTTP_403 + + @pytest.fixture() + def seed_bootaction(self, blank_state, test_orchestrator, input_files): + """Add a task and boot action to the database for testing.""" + input_file = input_files.join("fullsite.yaml") + design_ref = "file://%s" % input_file + test_task = test_orchestrator.create_task( + action=hd_fields.OrchestratorAction.Noop, design_ref=design_ref) + + id_key = os.urandom(32) + blank_state.post_boot_action_context('compute01', + test_task.get_id(), id_key) + + ba_ctx = dict( + nodename='compute01', + task_id=test_task.get_id(), + identity_key=id_key.hex()) + return ba_ctx + + @pytest.fixture() + def falcontest(self, drydock_state, test_ingester, test_orchestrator): + """Create a test harness for the the Falcon API framework.""" + return testing.TestClient( + start_api( + state_manager=drydock_state, + ingester=test_ingester, + orchestrator=test_orchestrator)) diff --git a/tests/integration/test_api_tasks.py b/tests/integration/postgres/test_api_tasks.py similarity index 100% rename from tests/integration/test_api_tasks.py rename to tests/integration/postgres/test_api_tasks.py diff --git a/tests/integration/test_orch_generic.py b/tests/integration/postgres/test_orch_generic.py similarity index 100% rename from tests/integration/test_orch_generic.py rename to tests/integration/postgres/test_orch_generic.py diff --git a/tests/integration/bs_psql.sh b/tests/integration/postgres/test_postgres.sh similarity index 50% rename from tests/integration/bs_psql.sh rename to tests/integration/postgres/test_postgres.sh index 1fa4b723..217dd0f1 100755 --- a/tests/integration/bs_psql.sh +++ b/tests/integration/postgres/test_postgres.sh @@ -6,6 +6,11 @@ sleep 15 psql -h localhost -c "create user drydock with password 'drydock';" postgres postgres psql -h localhost -c "create database drydock;" postgres postgres -export DRYDOCK_DB_URL="postgresql+psycopg2://drydock:drydock@localhost:5432/drydock" -alembic upgrade head +sudo docker run --rm -t --net=host -e DRYDOCK_DB_URL="postgresql+psycopg2://drydock:drydock@localhost:5432/drydock" --entrypoint /usr/local/bin/alembic drydock:latest upgrade head +py.test $1 +RESULT=$? + +sudo docker stop psql_integration + +exit $RESULT diff --git a/tests/integration/test_postgres_leadership.py b/tests/integration/postgres/test_postgres_leadership.py similarity index 100% rename from tests/integration/test_postgres_leadership.py rename to tests/integration/postgres/test_postgres_leadership.py diff --git a/tests/integration/test_postgres_results.py b/tests/integration/postgres/test_postgres_results.py similarity index 100% rename from tests/integration/test_postgres_results.py rename to tests/integration/postgres/test_postgres_results.py diff --git a/tests/integration/test_postgres_tasks.py b/tests/integration/postgres/test_postgres_tasks.py similarity index 100% rename from tests/integration/test_postgres_tasks.py rename to tests/integration/postgres/test_postgres_tasks.py diff --git a/tests/unit/test_bootaction_asset_render.py b/tests/unit/test_bootaction_asset_render.py new file mode 100644 index 00000000..b5ba1a66 --- /dev/null +++ b/tests/unit/test_bootaction_asset_render.py @@ -0,0 +1,41 @@ +# Copyright 2017 AT&T Intellectual Property. All other rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Test that boot action assets are rendered correctly.""" + +import ulid2 + +from drydock_provisioner.ingester.ingester import Ingester +from drydock_provisioner.statemgmt.state import DrydockState +import drydock_provisioner.objects as objects + +class TestClass(object): + def test_bootaction_render(self, input_files, setup): + objects.register_all() + + input_file = input_files.join("fullsite.yaml") + + design_state = DrydockState() + design_ref = "file://%s" % str(input_file) + + ingester = Ingester() + ingester.enable_plugin( + 'drydock_provisioner.ingester.plugins.yaml.YamlIngester') + design_status, design_data = ingester.ingest_data( + design_state=design_state, design_ref=design_ref) + + ba = design_data.get_bootaction('helloworld') + action_id = ulid2.generate_binary_ulid() + assets = ba.render_assets('compute01', design_data, action_id) + + assert 'compute01' in assets[0].rendered_bytes.decode('utf-8') diff --git a/tests/unit/test_bootaction_pipeline.py b/tests/unit/test_bootaction_pipeline.py new file mode 100644 index 00000000..b9858ad4 --- /dev/null +++ b/tests/unit/test_bootaction_pipeline.py @@ -0,0 +1,43 @@ +# Copyright 2017 AT&T Intellectual Property. All other rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Test that rack models are properly parsed.""" +import base64 + +import drydock_provisioner.objects as objects + + +class TestClass(object): + def test_bootaction_pipeline_base64(self): + objects.register_all() + + ba = objects.BootActionAsset() + + orig = 'Test 1 2 3!'.encode('utf-8') + expected_value = base64.b64encode(orig) + + test_value = ba.execute_pipeline(orig, ['base64_encode']) + + assert expected_value == test_value + + def test_bootaction_pipeline_utf8(self): + objects.register_all() + + ba = objects.BootActionAsset() + + expected_value = 'Test 1 2 3!' + orig = expected_value.encode('utf-8') + + test_value = ba.execute_pipeline(orig, ['utf8_decode']) + + assert test_value == expected_value diff --git a/tests/unit/test_bootaction_scoping.py b/tests/unit/test_bootaction_scoping.py new file mode 100644 index 00000000..88e7283c --- /dev/null +++ b/tests/unit/test_bootaction_scoping.py @@ -0,0 +1,54 @@ +# Copyright 2017 AT&T Intellectual Property. All other rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import drydock_provisioner.objects as objects + +class TestClass(object): + def test_bootaction_scoping_blankfilter(self, input_files, + test_orchestrator): + """Test a boot action with no node filter scopes correctly.""" + input_file = input_files.join("fullsite.yaml") + + design_ref = "file://%s" % str(input_file) + + design_status, design_data = test_orchestrator.get_effective_site( + design_ref) + + assert design_status.status == objects.fields.ActionResult.Success + + assert len(design_data.bootactions) > 0 + + for ba in design_data.bootactions: + if ba.get_id() == 'helloworld': + assert 'compute01' in ba.target_nodes + assert 'controller01' in ba.target_nodes + + def test_bootaction_scoping_unionfilter(self, input_files, + test_orchestrator): + """Test a boot action with a union node filter scopes correctly.""" + input_file = input_files.join("fullsite.yaml") + + design_ref = "file://%s" % str(input_file) + + design_status, design_data = test_orchestrator.get_effective_site( + design_ref) + + assert design_status.status == objects.fields.ActionResult.Success + + assert len(design_data.bootactions) > 0 + + for ba in design_data.bootactions: + if ba.get_id() == 'hw_filtered': + assert 'compute01' in ba.target_nodes + assert 'controller01' not in ba.target_nodes diff --git a/tests/unit/test_bootaction_tarbuilder.py b/tests/unit/test_bootaction_tarbuilder.py new file mode 100644 index 00000000..2d956b56 --- /dev/null +++ b/tests/unit/test_bootaction_tarbuilder.py @@ -0,0 +1,58 @@ +# Copyright 2017 AT&T Intellectual Property. All other rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Test that rack models are properly parsed.""" + +import ulid2 +import tarfile +import io + +import drydock_provisioner.objects as objects +from drydock_provisioner.ingester.ingester import Ingester +from drydock_provisioner.statemgmt.state import DrydockState +from drydock_provisioner.control.bootaction import BootactionUtils + + +class TestClass(object): + def test_bootaction_tarbuilder(self, input_files, setup): + objects.register_all() + + input_file = input_files.join("fullsite.yaml") + + design_state = DrydockState() + design_ref = "file://%s" % str(input_file) + + ingester = Ingester() + ingester.enable_plugin( + 'drydock_provisioner.ingester.plugins.yaml.YamlIngester') + design_status, design_data = ingester.ingest_data( + design_state=design_state, design_ref=design_ref) + + target_host = 'compute01' + + ba = design_data.get_bootaction('helloworld') + action_id = ulid2.generate_binary_ulid() + assets = ba.render_assets(target_host, design_data, action_id) + + assert len(assets) > 0 + + tarbytes = BootactionUtils.tarbuilder(assets) + + assert tarbytes is not None + + fileobj = io.BytesIO(tarbytes) + tarball = tarfile.open(mode='r:gz', fileobj=fileobj) + + tarasset = tarball.getmember('/var/tmp/hello.sh') + + assert tarasset.mode == 0o555 diff --git a/tests/unit/test_design_inheritance.py b/tests/unit/test_design_inheritance.py index 7e63df9f..2dc4013d 100644 --- a/tests/unit/test_design_inheritance.py +++ b/tests/unit/test_design_inheritance.py @@ -12,23 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest -import shutil -import os -import logging - -from oslo_config import cfg - -import drydock_provisioner.config as config -import drydock_provisioner.objects as objects - from drydock_provisioner.ingester.ingester import Ingester from drydock_provisioner.statemgmt.state import DrydockState from drydock_provisioner.orchestrator.orchestrator import Orchestrator -logging.basicConfig(level=logging.DEBUG) - - class TestClass(object): def test_design_inheritance(self, input_files, setup): input_file = input_files.join("fullsite.yaml") @@ -59,44 +46,3 @@ class TestClass(object): iface = node.get_applied_interface('pxe') assert len(iface.get_hw_slaves()) == 1 - - @pytest.fixture(scope='module') - def input_files(self, tmpdir_factory, request): - tmpdir = tmpdir_factory.mktemp('data') - samples_dir = os.path.dirname(str( - request.fspath)) + "/" + "../yaml_samples" - samples = os.listdir(samples_dir) - - for f in samples: - src_file = samples_dir + "/" + f - dst_file = str(tmpdir) + "/" + f - shutil.copyfile(src_file, dst_file) - - return tmpdir - - @pytest.fixture(scope='module') - def setup(self): - objects.register_all() - logging.basicConfig() - - req_opts = { - 'default': [cfg.IntOpt('leader_grace_period')], - 'database': [cfg.StrOpt('database_connect_string')], - 'logging': [ - cfg.StrOpt('global_logger_name', default='drydock'), - ] - } - - for k, v in req_opts.items(): - config.config_mgr.conf.register_opts(v, group=k) - - config.config_mgr.conf([]) - config.config_mgr.conf.set_override( - name="database_connect_string", - group="database", - override= - "postgresql+psycopg2://drydock:drydock@localhost:5432/drydock") - config.config_mgr.conf.set_override( - name="leader_grace_period", group="default", override=15) - - return diff --git a/tests/unit/test_ingester.py b/tests/unit/test_ingester.py index 1110c8ef..2ab71343 100644 --- a/tests/unit/test_ingester.py +++ b/tests/unit/test_ingester.py @@ -11,19 +11,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Test YAML data ingestion.""" from drydock_provisioner.ingester.ingester import Ingester from drydock_provisioner.statemgmt.state import DrydockState import drydock_provisioner.objects as objects -import drydock_provisioner.config as config - -from oslo_config import cfg - -import logging -import pytest -import shutil -import os - class TestClass(object): def test_ingest_full_site(self, input_files, setup): @@ -42,44 +34,3 @@ class TestClass(object): assert len(design_data.host_profiles) == 2 assert len(design_data.baremetal_nodes) == 2 - - @pytest.fixture(scope='module') - def input_files(self, tmpdir_factory, request): - tmpdir = tmpdir_factory.mktemp('data') - samples_dir = os.path.dirname(str( - request.fspath)) + "/" + "../yaml_samples" - samples = os.listdir(samples_dir) - - for f in samples: - src_file = samples_dir + "/" + f - dst_file = str(tmpdir) + "/" + f - shutil.copyfile(src_file, dst_file) - - return tmpdir - - @pytest.fixture(scope='module') - def setup(self): - objects.register_all() - logging.basicConfig() - - req_opts = { - 'default': [cfg.IntOpt('leader_grace_period')], - 'database': [cfg.StrOpt('database_connect_string')], - 'logging': [ - cfg.StrOpt('global_logger_name', default='drydock'), - ] - } - - for k, v in req_opts.items(): - config.config_mgr.conf.register_opts(v, group=k) - - config.config_mgr.conf([]) - config.config_mgr.conf.set_override( - name="database_connect_string", - group="database", - override= - "postgresql+psycopg2://drydock:drydock@localhost:5432/drydock") - config.config_mgr.conf.set_override( - name="leader_grace_period", group="default", override=15) - - return diff --git a/tests/unit/test_ingester_bootaction.py b/tests/unit/test_ingester_bootaction.py new file mode 100644 index 00000000..b622d2e1 --- /dev/null +++ b/tests/unit/test_ingester_bootaction.py @@ -0,0 +1,37 @@ +# Copyright 2017 AT&T Intellectual Property. All other rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Test that boot action models are properly parsed.""" + +from drydock_provisioner.ingester.ingester import Ingester +from drydock_provisioner.statemgmt.state import DrydockState +import drydock_provisioner.objects as objects + +class TestClass(object): + def test_bootaction_parse(self, input_files, setup): + objects.register_all() + + input_file = input_files.join("bootaction.yaml") + + design_state = DrydockState() + design_ref = "file://%s" % str(input_file) + + ingester = Ingester() + ingester.enable_plugin( + 'drydock_provisioner.ingester.plugins.yaml.YamlIngester') + design_status, design_data = ingester.ingest_data( + design_state=design_state, design_ref=design_ref) + + ba = design_data.get_bootaction('helloworld') + + assert len(ba.asset_list) == 2 diff --git a/tests/unit/test_ingester_rack_model.py b/tests/unit/test_ingester_rack_model.py index d2176e1e..8032c00e 100644 --- a/tests/unit/test_ingester_rack_model.py +++ b/tests/unit/test_ingester_rack_model.py @@ -16,15 +16,9 @@ from drydock_provisioner.ingester.ingester import Ingester from drydock_provisioner.statemgmt.state import DrydockState import drydock_provisioner.objects as objects -import drydock_provisioner.config as config import drydock_provisioner.error as errors -from oslo_config import cfg - -import logging import pytest -import shutil -import os class TestClass(object): @@ -62,44 +56,3 @@ class TestClass(object): with pytest.raises(errors.DesignError): design_data.get_rack('foo') - - @pytest.fixture(scope='module') - def input_files(self, tmpdir_factory, request): - tmpdir = tmpdir_factory.mktemp('data') - samples_dir = os.path.dirname(str( - request.fspath)) + "/" + "../yaml_samples" - samples = os.listdir(samples_dir) - - for f in samples: - src_file = samples_dir + "/" + f - dst_file = str(tmpdir) + "/" + f - shutil.copyfile(src_file, dst_file) - - return tmpdir - - @pytest.fixture(scope='module') - def setup(self): - objects.register_all() - logging.basicConfig() - - req_opts = { - 'default': [cfg.IntOpt('leader_grace_period')], - 'database': [cfg.StrOpt('database_connect_string')], - 'logging': [ - cfg.StrOpt('global_logger_name', default='drydock'), - ] - } - - for k, v in req_opts.items(): - config.config_mgr.conf.register_opts(v, group=k) - - config.config_mgr.conf([]) - config.config_mgr.conf.set_override( - name="database_connect_string", - group="database", - override= - "postgresql+psycopg2://drydock:drydock@localhost:5432/drydock") - config.config_mgr.conf.set_override( - name="leader_grace_period", group="default", override=15) - - return diff --git a/tests/unit/test_ingester_yaml.py b/tests/unit/test_ingester_yaml.py index b0a5c935..75d23027 100644 --- a/tests/unit/test_ingester_yaml.py +++ b/tests/unit/test_ingester_yaml.py @@ -11,16 +11,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import pytest -import shutil -import os -import logging +"""Test that YAML ingestion works.""" from drydock_provisioner.ingester.plugins.yaml import YamlIngester -logging.basicConfig(level=logging.DEBUG) - - class TestClass(object): def test_ingest_singledoc(self, input_files): input_file = input_files.join("singledoc.yaml") @@ -47,17 +41,3 @@ class TestClass(object): assert status.status == 'success' assert len(models) == 3 - - @pytest.fixture(scope='module') - def input_files(self, tmpdir_factory, request): - tmpdir = tmpdir_factory.mktemp('data') - samples_dir = os.path.dirname(str( - request.fspath)) + "/" + "../yaml_samples" - samples = os.listdir(samples_dir) - - for f in samples: - src_file = samples_dir + "/" + f - dst_file = str(tmpdir) + "/" + f - shutil.copyfile(src_file, dst_file) - - return tmpdir diff --git a/tests/unit/test_orch_node_filter.py b/tests/unit/test_orch_node_filter.py new file mode 100644 index 00000000..c9113e8f --- /dev/null +++ b/tests/unit/test_orch_node_filter.py @@ -0,0 +1,68 @@ +# Copyright 2017 AT&T Intellectual Property. All other rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Test the node filter logic in the orchestrator.""" + +from drydock_provisioner.ingester.ingester import Ingester +from drydock_provisioner.statemgmt.state import DrydockState +import drydock_provisioner.objects as objects + +class TestClass(object): + def test_node_filter_obj(self, input_files, setup, test_orchestrator): + input_file = input_files.join("fullsite.yaml") + + design_state = DrydockState() + design_ref = "file://%s" % str(input_file) + + ingester = Ingester() + ingester.enable_plugin( + 'drydock_provisioner.ingester.plugins.yaml.YamlIngester') + design_status, design_data = ingester.ingest_data( + design_state=design_state, design_ref=design_ref) + + nf = objects.NodeFilter() + nf.filter_type = 'intersection' + nf.node_names = ['compute01'] + nfs = objects.NodeFilterSet( + filter_set_type='intersection', filter_set=[nf]) + + node_list = test_orchestrator.process_node_filter(nfs, design_data) + + assert len(node_list) == 1 + + def test_node_filter_dict(self, input_files, setup, test_orchestrator): + input_file = input_files.join("fullsite.yaml") + + design_state = DrydockState() + design_ref = "file://%s" % str(input_file) + + ingester = Ingester() + ingester.enable_plugin( + 'drydock_provisioner.ingester.plugins.yaml.YamlIngester') + design_status, design_data = ingester.ingest_data( + design_state=design_state, design_ref=design_ref) + + nfs = { + 'filter_set_type': + 'intersection', + 'filter_set': [ + { + 'filter_type': 'intersection', + 'node_names': 'compute01', + }, + ], + } + + node_list = test_orchestrator.process_node_filter(nfs, design_data) + + assert len(node_list) == 1 diff --git a/tests/yaml_samples/bootaction.yaml b/tests/yaml_samples/bootaction.yaml index 3eb3b6db..e3f33e0d 100644 --- a/tests/yaml_samples/bootaction.yaml +++ b/tests/yaml_samples/bootaction.yaml @@ -10,14 +10,15 @@ spec: assets: - path: /var/tmp/hello.sh type: file - permissions: 555 - data: | + permissions: '555' + data: |- IyEvYmluL2Jhc2gKCmVjaG8gJ0hlbGxvIFdvcmxkIScK data_pipeline: - base64_decode + - utf8_encode - path: /lib/systemd/system/hello.service type: unit - data: | + data: |- W1VuaXRdCkRlc2NyaXB0aW9uPUhlbGxvIFdvcmxkCgpbU2VydmljZV0KVHlwZT1vbmVzaG90CkV4 ZWNTdGFydD0vdmFyL3RtcC9oZWxsby5zaAoKW0luc3RhbGxdCldhbnRlZEJ5PW11bHRpLXVzZXIu dGFyZ2V0Cg== diff --git a/tests/yaml_samples/fullsite.yaml b/tests/yaml_samples/fullsite.yaml index 3370224a..4560f555 100644 --- a/tests/yaml_samples/fullsite.yaml +++ b/tests/yaml_samples/fullsite.yaml @@ -491,3 +491,71 @@ spec: dev_type: 'VBOX HARDDISK' bus_type: 'scsi' ... +--- +apiVersion: 'drydock/v1' +kind: BootAction +metadata: + name: helloworld + region: sitename + date: 17-FEB-2017 + author: Scott Hussey +spec: + assets: + - path: /var/tmp/hello.sh + type: file + permissions: '555' + data: |- + IyEvYmluL2Jhc2gKCmVjaG8gJ0hlbGxvIFdvcmxkISAtZnJvbSB7eyBub2RlLmhvc3RuYW1lIH19 + Jwo= + data_pipeline: + - base64_decode + - utf8_decode + - template + - path: /lib/systemd/system/hello.service + type: unit + permissions: '600' + data: |- + W1VuaXRdCkRlc2NyaXB0aW9uPUhlbGxvIFdvcmxkCgpbU2VydmljZV0KVHlwZT1vbmVzaG90CkV4 + ZWNTdGFydD0vdmFyL3RtcC9oZWxsby5zaAoKW0luc3RhbGxdCldhbnRlZEJ5PW11bHRpLXVzZXIu + dGFyZ2V0Cg== + data_pipeline: + - base64_decode + - utf8_decode +... +--- +apiVersion: 'drydock/v1' +kind: BootAction +metadata: + name: hw_filtered + region: sitename + date: 17-FEB-2017 + author: Scott Hussey +spec: + node_filter: + filter_set_type: 'union' + filter_set: + - filter_type: 'union' + node_names: + - 'compute01' + assets: + - path: /var/tmp/hello.sh + type: file + permissions: '555' + data: |- + IyEvYmluL2Jhc2gKCmVjaG8gJ0hlbGxvIFdvcmxkISAtZnJvbSB7eyBub2RlLmhvc3RuYW1lIH19 + Jwo= + data_pipeline: + - base64_decode + - utf8_decode + - template + - path: /lib/systemd/system/hello.service + type: unit + permissions: '600' + data: |- + W1VuaXRdCkRlc2NyaXB0aW9uPUhlbGxvIFdvcmxkCgpbU2VydmljZV0KVHlwZT1vbmVzaG90CkV4 + ZWNTdGFydD0vdmFyL3RtcC9oZWxsby5zaAoKW0luc3RhbGxdCldhbnRlZEJ5PW11bHRpLXVzZXIu + dGFyZ2V0Cg== + data_pipeline: + - base64_decode + - utf8_decode +... diff --git a/tox.ini b/tox.ini index d8b81081..9d763ef7 100644 --- a/tox.ini +++ b/tox.ini @@ -1,7 +1,8 @@ [tox] -envlist = py35 +envlist = unit,pep8,bandit [testenv] +setenv = YAMLDIR = {toxinidir}/tests/yaml_samples/ basepython=python3.5 deps= -rrequirements-direct.txt @@ -27,6 +28,7 @@ commands= [testenv:unit] setenv= PYTHONWARNING=all + YAMLDIR={toxinidir}/tests/yaml_samples/ commands= py.test \ tests/unit/{posargs} @@ -38,6 +40,10 @@ commands= py.test \ tests/integration/{posargs} +[testenv:postgres] +commands= + {toxinidir}/tests/integration/postgres/test_postgres.sh {toxinidir}/tests/integration/postgres/{posargs} + [testenv:genconfig] commands = oslo-config-generator --config-file=etc/drydock/drydock-config-generator.conf