diff --git a/alembic/versions/4a5bef3702b_create_build_data_table.py b/alembic/versions/4a5bef3702b_create_build_data_table.py index 7adc8579..79e0ec46 100644 --- a/alembic/versions/4a5bef3702b_create_build_data_table.py +++ b/alembic/versions/4a5bef3702b_create_build_data_table.py @@ -18,7 +18,9 @@ from drydock_provisioner.statemgmt.db import tables def upgrade(): - op.create_table(tables.BuildData.__tablename__, *tables.BuildData.__schema__) + op.create_table(tables.BuildData.__tablename__, + *tables.BuildData.__schema__) + def downgrade(): op.drop_table(tables.BuildData.__tablename__) diff --git a/drydock_provisioner/cli/const.py b/drydock_provisioner/cli/const.py index c6faa7a2..62cf59f9 100644 --- a/drydock_provisioner/cli/const.py +++ b/drydock_provisioner/cli/const.py @@ -14,6 +14,7 @@ """Constants for the CLI and API client.""" from enum import Enum + class TaskStatus(Enum): Requested = 'requested' Queued = 'queued' diff --git a/drydock_provisioner/cli/task/actions.py b/drydock_provisioner/cli/task/actions.py index 2afe4ffe..1453c22a 100644 --- a/drydock_provisioner/cli/task/actions.py +++ b/drydock_provisioner/cli/task/actions.py @@ -17,6 +17,7 @@ import time from drydock_provisioner.cli.action import CliAction from drydock_provisioner.cli.const import TaskStatus + class TaskList(CliAction): # pylint: disable=too-few-public-methods """Action to list tasks.""" @@ -99,7 +100,8 @@ class TaskCreate(CliAction): # pylint: disable=too-few-public-methods while True: time.sleep(self.poll_interval) task = self.api_client.get_task(task_id=task_id) - if task.get('status', '') in [TaskStatus.Complete, TaskStatus.Terminated]: + if task.get('status', + '') in [TaskStatus.Complete, TaskStatus.Terminated]: return task diff --git a/drydock_provisioner/control/api.py b/drydock_provisioner/control/api.py index 14598358..b9505990 100644 --- a/drydock_provisioner/control/api.py +++ b/drydock_provisioner/control/api.py @@ -55,34 +55,41 @@ def start_api(state_manager=None, ingester=None, orchestrator=None): v1_0_routes = [ # API for managing orchestrator tasks ('/health', HealthResource()), - ('/tasks', TasksResource( - state_manager=state_manager, orchestrator=orchestrator)), + ('/tasks', + TasksResource(state_manager=state_manager, + orchestrator=orchestrator)), ('/tasks/{task_id}', TaskResource(state_manager=state_manager)), # API for managing site design data ('/designs', DesignsResource(state_manager=state_manager)), - ('/designs/{design_id}', DesignResource( - state_manager=state_manager, orchestrator=orchestrator)), - ('/designs/{design_id}/parts', DesignsPartsResource( - state_manager=state_manager, ingester=ingester)), + ('/designs/{design_id}', + DesignResource( + state_manager=state_manager, orchestrator=orchestrator)), + ('/designs/{design_id}/parts', + DesignsPartsResource(state_manager=state_manager, ingester=ingester)), ('/designs/{design_id}/parts/{kind}', DesignsPartsKindsResource(state_manager=state_manager)), - ('/designs/{design_id}/parts/{kind}/{name}', DesignsPartResource( - state_manager=state_manager, orchestrator=orchestrator)), + ('/designs/{design_id}/parts/{kind}/{name}', + DesignsPartResource( + state_manager=state_manager, orchestrator=orchestrator)), # API to list current MaaS nodes ('/nodes', NodesResource()), # API for nodes to discover their boot actions during curtin install - ('/bootactions/nodes/{hostname}/units', BootactionUnitsResource( - state_manager=state_manager, orchestrator=orchestrator)), - ('/bootactions/nodes/{hostname}/files', BootactionFilesResource( - state_manager=state_manager, orchestrator=orchestrator)), - ('/bootactions/{action_id}', BootactionResource( - state_manager=state_manager, orchestrator=orchestrator)), + ('/bootactions/nodes/{hostname}/units', + BootactionUnitsResource( + state_manager=state_manager, orchestrator=orchestrator)), + ('/bootactions/nodes/{hostname}/files', + BootactionFilesResource( + state_manager=state_manager, orchestrator=orchestrator)), + ('/bootactions/{action_id}', + BootactionResource( + state_manager=state_manager, orchestrator=orchestrator)), # API to validate schemas - ('/validatedesign', ValidationResource( - state_manager=state_manager, orchestrator=orchestrator)), + ('/validatedesign', + ValidationResource( + state_manager=state_manager, orchestrator=orchestrator)), ] for path, res in v1_0_routes: diff --git a/drydock_provisioner/control/tasks.py b/drydock_provisioner/control/tasks.py index aba3581d..37889096 100644 --- a/drydock_provisioner/control/tasks.py +++ b/drydock_provisioner/control/tasks.py @@ -45,8 +45,9 @@ class TasksResource(StatefulResource): resp.body = json.dumps(task_list) resp.status = falcon.HTTP_200 except Exception as ex: - self.error(req.context, "Unknown error: %s\n%s" % - (str(ex), traceback.format_exc())) + self.error(req.context, + "Unknown error: %s\n%s" % (str(ex), + traceback.format_exc())) self.return_error( resp, falcon.HTTP_500, message="Unknown error", retry=False) @@ -78,8 +79,9 @@ class TasksResource(StatefulResource): else: supported_actions.get(action)(self, req, resp, json_data) except Exception as ex: - self.error(req.context, "Unknown error: %s\n%s" % - (str(ex), traceback.format_exc())) + self.error(req.context, + "Unknown error: %s\n%s" % (str(ex), + traceback.format_exc())) self.return_error( resp, falcon.HTTP_500, message="Unknown error", retry=False) diff --git a/drydock_provisioner/error.py b/drydock_provisioner/error.py index a7fabf89..5db609b6 100644 --- a/drydock_provisioner/error.py +++ b/drydock_provisioner/error.py @@ -70,6 +70,10 @@ class InvalidAssetLocation(BootactionError): pass +class BuildDataError(Exception): + pass + + class DriverError(Exception): pass diff --git a/drydock_provisioner/objects/__init__.py b/drydock_provisioner/objects/__init__.py index 77fb3665..3323f56f 100644 --- a/drydock_provisioner/objects/__init__.py +++ b/drydock_provisioner/objects/__init__.py @@ -31,6 +31,7 @@ def register_all(): importlib.import_module('drydock_provisioner.objects.rack') importlib.import_module('drydock_provisioner.objects.bootaction') importlib.import_module('drydock_provisioner.objects.task') + importlib.import_module('drydock_provisioner.objects.builddata') # Utility class for calculating inheritance diff --git a/drydock_provisioner/objects/bootaction.py b/drydock_provisioner/objects/bootaction.py index 400922cc..2dfa204d 100644 --- a/drydock_provisioner/objects/bootaction.py +++ b/drydock_provisioner/objects/bootaction.py @@ -57,7 +57,11 @@ class BootAction(base.DrydockPersistentObject, base.DrydockObject): def get_name(self): return self.name - def render_assets(self, nodename, site_design, action_id, design_ref, + def render_assets(self, + nodename, + site_design, + action_id, + design_ref, type_filter=None): """Render all of the assets in this bootaction. diff --git a/drydock_provisioner/objects/builddata.py b/drydock_provisioner/objects/builddata.py new file mode 100644 index 00000000..174b8c3c --- /dev/null +++ b/drydock_provisioner/objects/builddata.py @@ -0,0 +1,132 @@ +# Copyright 2017 AT&T Intellectual Property. All other rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Models for representing build data.""" +import uuid + +from datetime import datetime + +from drydock_provisioner import objects + +import drydock_provisioner.error as errors + + +class BuildData(object): + """Build data + + :param node_name: The name of the node the data was collected from. + :param task_id: The uuid.UUID ID of the task initiating the collection + :param collected_data: Date/time the data was collected + :param generator: String description of the source of data (e.g. ``lshw``) + :param data_format: String MIME-type of ``data_element`` + :param data_element: Data to be saved, will be cast to ``str`` + """ + + def __init__(self, + node_name=None, + task_id=None, + collected_date=None, + generator=None, + data_format=None, + data_element=None): + """Initiator for BuildData.""" + if not all((node_name, task_id, generator, data_format, data_element)): + raise ValueError("Required field missing.") + + try: + if isinstance(data_element, bytes): + data_element = data_element.decode('utf-8') + elif not isinstance(data_element, str): + data_element = str(data_element) + except Exception as ex: + raise errors.BuildDataError( + "Error saving build data - data_element type %s could" + "not be cast to string." % str(type(data_element))) + + self.node_name = node_name + self.task_id = task_id + self.collected_date = collected_date or datetime.utcnow() + self.generator = generator + self.data_format = data_format + self.data_element = data_element + + @classmethod + def obj_name(cls): + return cls.__name__ + + def to_db(self): + """Convert this instance to a dictionary for use persisting to a db. + + include_id=False can be used for doing an update where the primary key + of the table shouldn't included in the values set + + :param include_id: Whether to include task_id in the dictionary + """ + _dict = { + 'node_name': + self.node_name, + 'task_id': + self.task_id.bytes, + 'collected_date': + None if self.collected_date is None else str(self.collected_date), + 'generator': + self.generator, + 'data_format': + self.data_format, + 'data_element': + self.data_element, + } + + return _dict + + def to_dict(self, verbosity=2): + """Convert this instance to a dictionary. + + Intended for use in JSON serialization + ``verbosity`` of 1 omits the data_element + + :param verbosity: integer of how verbose to make the result. + """ + _dict = { + 'node_name': + self.node_name, + 'task_id': + str(self.task_id), + 'collected_date': + None if self.collected_date is None else str(self.collected_date), + 'generator': + self.generator, + 'data_format': + self.data_format, + } + + if verbosity > 1: + _dict['data_element'] = self.data_element + + return _dict + + @classmethod + def from_db(cls, d): + """Create an instance from a DB-based dictionary. + + :param d: Dictionary of instance data + """ + d['task_id'] = uuid.UUID(bytes=bytes(d.get('task_id'))) + + i = BuildData(**d) + + return i + + +# Add BuildData to objects scope +setattr(objects, BuildData.obj_name(), BuildData) diff --git a/drydock_provisioner/objects/hostprofile.py b/drydock_provisioner/objects/hostprofile.py index 8a1e52b1..277886bf 100644 --- a/drydock_provisioner/objects/hostprofile.py +++ b/drydock_provisioner/objects/hostprofile.py @@ -364,8 +364,8 @@ class HostVolumeGroup(base.DrydockObject): for f in inheritable_field_list: setattr(p, f, objects.Utils.apply_field_inheritance( - getattr(j, f, None), - getattr(i, f, None))) + getattr(j, f, None), getattr( + i, f, None))) p.partitions = HostPartitionList.from_basic_list( HostPartition.merge_lists( @@ -478,8 +478,8 @@ class HostStorageDevice(base.DrydockObject): for f in inherit_field_list: setattr(p, f, objects.Utils.apply_field_inheritance( - getattr(j, f, None), - getattr(i, f, None))) + getattr(j, f, None), getattr( + i, f, None))) p.labels = objects.Utils.merge_dicts( getattr(j, 'labels', None), @@ -625,8 +625,8 @@ class HostPartition(base.DrydockObject): for f in inherit_field_list: setattr(p, f, objects.Utils.apply_field_inheritance( - getattr(j, f, None), - getattr(i, f, None))) + getattr(j, f, None), getattr( + i, f, None))) add = False p.source = hd_fields.ModelSource.Compiled effective_list.append(p) @@ -752,8 +752,8 @@ class HostVolume(base.DrydockObject): for f in inherit_field_list: setattr(p, f, objects.Utils.apply_field_inheritance( - getattr(j, f, None), - getattr(i, f, None))) + getattr(j, f, None), getattr( + i, f, None))) add = False p.source = hd_fields.ModelSource.Compiled effective_list.append(p) diff --git a/drydock_provisioner/objects/task.py b/drydock_provisioner/objects/task.py index ebfb2408..ba30fea1 100644 --- a/drydock_provisioner/objects/task.py +++ b/drydock_provisioner/objects/task.py @@ -625,8 +625,8 @@ class TaskStatusMessage(object): :param d: dictionary of values """ i = TaskStatusMessage( - d.get('message', None), - d.get('error'), d.get('context_type'), d.get('context')) + d.get('message', None), d.get('error'), d.get('context_type'), + d.get('context')) if 'extra' in d: i.extra = d.get('extra') i.ts = d.get('ts', None) diff --git a/drydock_provisioner/orchestrator/orchestrator.py b/drydock_provisioner/orchestrator/orchestrator.py index 0191e13e..564288e6 100644 --- a/drydock_provisioner/orchestrator/orchestrator.py +++ b/drydock_provisioner/orchestrator/orchestrator.py @@ -252,7 +252,9 @@ class Orchestrator(object): for n in nodes or []: n.compile_applied_model(site_design) except AttributeError: - self.logger.debug("Model inheritance skipped, no node definitions in site design.") + self.logger.debug( + "Model inheritance skipped, no node definitions in site design." + ) return @@ -552,8 +554,8 @@ class Orchestrator(object): (ba.name, nodename)) action_id = ulid2.generate_binary_ulid() self.state_manager.post_boot_action( - nodename, - task.get_id(), identity_key, action_id, ba.name) + nodename, task.get_id(), identity_key, action_id, + ba.name) else: self.logger.debug( "Boot action %s has disabled signaling." % ba.name) diff --git a/drydock_provisioner/orchestrator/util.py b/drydock_provisioner/orchestrator/util.py index 6aad9e2f..4c150c7f 100644 --- a/drydock_provisioner/orchestrator/util.py +++ b/drydock_provisioner/orchestrator/util.py @@ -16,6 +16,7 @@ import re import drydock_provisioner.error as errors + class SimpleBytes(): def calulate_bytes(size_str): """ @@ -33,7 +34,8 @@ class SimpleBytes(): match = regex.match(size_str) if not match: - raise errors.InvalidSizeFormat("Invalid size string format: %s" % size_str) + raise errors.InvalidSizeFormat( + "Invalid size string format: %s" % size_str) base_size = int(match.group(1)) diff --git a/drydock_provisioner/orchestrator/validations/validator.py b/drydock_provisioner/orchestrator/validations/validator.py index 1364c954..81998e65 100644 --- a/drydock_provisioner/orchestrator/validations/validator.py +++ b/drydock_provisioner/orchestrator/validations/validator.py @@ -235,8 +235,10 @@ class Validator(): # error if both are defined if all([fstype, partition_volume_group]): - msg = ('Storage Partitioning Error: Both a volume group AND file system cannot be ' - 'defined in a sigle partition; on BaremetalNode %s' % baremetal_node.get('name')) + msg = ( + 'Storage Partitioning Error: Both a volume group AND file system cannot be ' + 'defined in a sigle partition; on BaremetalNode %s' + % baremetal_node.get('name')) message_list.append( TaskStatusMessage( @@ -255,9 +257,10 @@ class Validator(): for volume_group in all_volume_groups: if volume_group.get('name') not in volume_group_check_list: - msg = ('Storage Partitioning Error: A volume group must be assigned to a storage device or ' - 'partition; volume group %s on BaremetalNode %s' % (volume_group.get('name'), - baremetal_node.get('name'))) + msg = ( + 'Storage Partitioning Error: A volume group must be assigned to a storage device or ' + 'partition; volume group %s on BaremetalNode %s' % + (volume_group.get('name'), baremetal_node.get('name'))) message_list.append( TaskStatusMessage( @@ -308,9 +311,13 @@ class Validator(): ] for name in duplicated_names: - msg = ('Unique Network Error: Allowed network %s duplicated on NetworkLink %s and NetworkLink ' - '%s' % (name, network_link_name, network_link_name_2)) - message_list.append(TaskStatusMessage(msg=msg, error=True, ctx_type='NA', ctx='NA')) + msg = ( + 'Unique Network Error: Allowed network %s duplicated on NetworkLink %s and NetworkLink ' + '%s' % (name, network_link_name, + network_link_name_2)) + message_list.append( + TaskStatusMessage( + msg=msg, error=True, ctx_type='NA', ctx='NA')) if not message_list: message_list.append( @@ -339,8 +346,11 @@ class Validator(): mtu = network_link.get('mtu') # check mtu > 1400 and < 64000 if mtu and (mtu < 1400 or mtu > 64000): - msg = 'Mtu Error: Mtu must be between 1400 and 64000; on Network Link %s.' % network_link.get('name') - message_list.append(TaskStatusMessage(msg=msg, error=True, ctx_type='NA', ctx='NA')) + msg = 'Mtu Error: Mtu must be between 1400 and 64000; on Network Link %s.' % network_link.get( + 'name') + message_list.append( + TaskStatusMessage( + msg=msg, error=True, ctx_type='NA', ctx='NA')) # add assigned network to dict with parent mtu assigned_network = network_link.get('native_network') @@ -351,19 +361,27 @@ class Validator(): # check mtu > 1400 and < 64000 if network_mtu and (network_mtu < 1400 or network_mtu > 64000): - msg = 'Mtu Error: Mtu must be between 1400 and 64000; on Network %s.' % network.get('name') - message_list.append(TaskStatusMessage(msg=msg, error=True, ctx_type='NA', ctx='NA')) + msg = 'Mtu Error: Mtu must be between 1400 and 64000; on Network %s.' % network.get( + 'name') + message_list.append( + TaskStatusMessage( + msg=msg, error=True, ctx_type='NA', ctx='NA')) name = network.get('name') parent_mtu = parent_mtu_check.get(name) if network_mtu and parent_mtu: # check to make sure mtu for network is <= parent network link if network_mtu > parent_mtu: - msg = 'Mtu Error: Mtu must be <= the parent Network Link; for Network %s' % (network.get('name')) - message_list.append(TaskStatusMessage(msg=msg, error=True, ctx_type='NA', ctx='NA')) + msg = 'Mtu Error: Mtu must be <= the parent Network Link; for Network %s' % ( + network.get('name')) + message_list.append( + TaskStatusMessage( + msg=msg, error=True, ctx_type='NA', ctx='NA')) if not message_list: - message_list.append(TaskStatusMessage(msg='Mtu', error=False, ctx_type='NA', ctx='NA')) + message_list.append( + TaskStatusMessage( + msg='Mtu', error=False, ctx_type='NA', ctx='NA')) return message_list @classmethod @@ -390,38 +408,64 @@ class Validator(): percent = size.split('%') if len(percent) == 2: if int(percent[0]) < 0: - msg = ('Storage Sizing Error: Storage partition size is < 0 ' - 'on Baremetal Node %s' % baremetal_node.get('name')) - message_list.append(TaskStatusMessage(msg=msg, error=True, ctx_type='NA', ctx='NA')) + msg = ( + 'Storage Sizing Error: Storage partition size is < 0 ' + 'on Baremetal Node %s' % + baremetal_node.get('name')) + message_list.append( + TaskStatusMessage( + msg=msg, + error=True, + ctx_type='NA', + ctx='NA')) partition_sum += int(percent[0]) if partition_sum > 99: - msg = ('Storage Sizing Error: Storage partition size is greater than ' - '99 on Baremetal Node %s' % baremetal_node.get('name')) - message_list.append(TaskStatusMessage(msg=msg, error=True, ctx_type='NA', ctx='NA')) + msg = ( + 'Storage Sizing Error: Storage partition size is greater than ' + '99 on Baremetal Node %s' % + baremetal_node.get('name')) + message_list.append( + TaskStatusMessage( + msg=msg, error=True, ctx_type='NA', ctx='NA')) volume_groups = baremetal_node.get('volume_groups', []) volume_sum = 0 for volume_group in volume_groups: - logical_volume_list = volume_group.get('logical_volumes', []) + logical_volume_list = volume_group.get( + 'logical_volumes', []) for logical_volume in logical_volume_list: size = logical_volume.get('size') percent = size.split('%') if len(percent) == 2: if int(percent[0]) < 0: - msg = ('Storage Sizing Error: Storage volume size is < 0 ' - 'on Baremetal Node %s' % baremetal_node.get('name')) - message_list.append(TaskStatusMessage(msg=msg, error=True, ctx_type='NA', ctx='NA')) + msg = ( + 'Storage Sizing Error: Storage volume size is < 0 ' + 'on Baremetal Node %s' % + baremetal_node.get('name')) + message_list.append( + TaskStatusMessage( + msg=msg, + error=True, + ctx_type='NA', + ctx='NA')) volume_sum += int(percent[0]) if volume_sum > 99: - msg = ('Storage Sizing Error: Storage volume size is greater ' - 'than 99 on Baremetal Node %s.' % baremetal_node.get('name')) - message_list.append(TaskStatusMessage(msg=msg, error=True, ctx_type='NA', ctx='NA')) + msg = ( + 'Storage Sizing Error: Storage volume size is greater ' + 'than 99 on Baremetal Node %s.' % + baremetal_node.get('name')) + message_list.append( + TaskStatusMessage( + msg=msg, error=True, ctx_type='NA', ctx='NA')) if not message_list: - message_list.append(TaskStatusMessage(msg='Storage Sizing', error=False, ctx_type='NA', ctx='NA')) + message_list.append( + TaskStatusMessage( + msg='Storage Sizing', error=False, ctx_type='NA', + ctx='NA')) return message_list @classmethod @@ -450,8 +494,11 @@ class Validator(): if address in found_ips and address is not None: msg = ('Error! Duplicate IP Address Found: %s ' - 'is in use by both %s and %s.' % (address, found_ips[address], node_name)) - message_list.append(TaskStatusMessage(msg=msg, error=True, ctx_type='NA', ctx='NA')) + 'is in use by both %s and %s.' % + (address, found_ips[address], node_name)) + message_list.append( + TaskStatusMessage( + msg=msg, error=True, ctx_type='NA', ctx='NA')) elif address is not None: found_ips[address] = node_name @@ -488,13 +535,25 @@ class Validator(): root_set = True # check if size < 20GB if cal_size < 20 * BYTES_IN_GB: - msg = ('Boot Storage Error: Root volume must be > 20GB on BaremetalNode ' - '%s' % baremetal_node.get('name')) - message_list.append(TaskStatusMessage(msg=msg, error=True, ctx_type='NA', ctx='NA')) + msg = ( + 'Boot Storage Error: Root volume must be > 20GB on BaremetalNode ' + '%s' % baremetal_node.get('name')) + message_list.append( + TaskStatusMessage( + msg=msg, + error=True, + ctx_type='NA', + ctx='NA')) except errors.InvalidSizeFormat as e: - msg = ('Boot Storage Error: Root volume has an invalid size format on BaremetalNode' - '%s.' % baremetal_node.get('name')) - message_list.append(TaskStatusMessage(msg=msg, error=True, ctx_type='NA', ctx='NA')) + msg = ( + 'Boot Storage Error: Root volume has an invalid size format on BaremetalNode' + '%s.' % baremetal_node.get('name')) + message_list.append( + TaskStatusMessage( + msg=msg, + error=True, + ctx_type='NA', + ctx='NA')) # check make sure root has been defined and boot volume > 1GB if root_set and host_partition.get('name') == 'boot': @@ -504,22 +563,39 @@ class Validator(): cal_size = SimpleBytes.calulate_bytes(size) # check if size < 1GB if cal_size < BYTES_IN_GB: - msg = ('Boot Storage Error: Boot volume must be > 1GB on BaremetalNode ' - '%s' % baremetal_node.get('name')) - message_list.append(TaskStatusMessage(msg=msg, error=True, ctx_type='NA', ctx='NA')) + msg = ( + 'Boot Storage Error: Boot volume must be > 1GB on BaremetalNode ' + '%s' % baremetal_node.get('name')) + message_list.append( + TaskStatusMessage( + msg=msg, + error=True, + ctx_type='NA', + ctx='NA')) except errors.InvalidSizeFormat as e: - msg = ('Boot Storage Error: Boot volume has an invalid size format on BaremetalNode ' - '%s.' % baremetal_node.get('name')) - message_list.append(TaskStatusMessage(msg=msg, error=True, ctx_type='NA', ctx='NA')) + msg = ( + 'Boot Storage Error: Boot volume has an invalid size format on BaremetalNode ' + '%s.' % baremetal_node.get('name')) + message_list.append( + TaskStatusMessage( + msg=msg, + error=True, + ctx_type='NA', + ctx='NA')) # This must be set if not root_set: - msg = ('Boot Storage Error: Root volume has to be set and must be > 20GB on BaremetalNode ' - '%s' % baremetal_node.get('name')) - message_list.append(TaskStatusMessage(msg=msg, error=True, ctx_type='NA', ctx='NA')) + msg = ( + 'Boot Storage Error: Root volume has to be set and must be > 20GB on BaremetalNode ' + '%s' % baremetal_node.get('name')) + message_list.append( + TaskStatusMessage( + msg=msg, error=True, ctx_type='NA', ctx='NA')) if not message_list: - message_list.append(TaskStatusMessage(msg='Boot Storage', error=False, ctx_type='NA', ctx='NA')) + message_list.append( + TaskStatusMessage( + msg='Boot Storage', error=False, ctx_type='NA', ctx='NA')) return message_list @classmethod @@ -553,13 +629,25 @@ class Validator(): if not gateway: msg = 'No gateway found for route %s.' % routes - message_list.append(TaskStatusMessage(msg=msg, error=True, ctx_type='NA', ctx='NA')) + message_list.append( + TaskStatusMessage( + msg=msg, + error=True, + ctx_type='NA', + ctx='NA')) else: ip = IPAddress(gateway) if ip not in cidr_range: - msg = ('IP Locality Error: The gateway IP Address %s ' - 'is not within the defined CIDR: %s of %s.' % (gateway, cidr, name)) - message_list.append(TaskStatusMessage(msg=msg, error=True, ctx_type='NA', ctx='NA')) + msg = ( + 'IP Locality Error: The gateway IP Address %s ' + 'is not within the defined CIDR: %s of %s.' + % (gateway, cidr, name)) + message_list.append( + TaskStatusMessage( + msg=msg, + error=True, + ctx_type='NA', + ctx='NA')) if not baremetal_nodes_list: msg = 'No baremetal_nodes found.' message_list.append(TaskStatusMessage(msg=msg, error=False, ctx_type='NA', ctx='NA')) @@ -576,13 +664,27 @@ class Validator(): if ip_address_network_name not in network_dict: msg = 'IP Locality Error: %s is not a valid network.' \ % (ip_address_network_name) - message_list.append(TaskStatusMessage(msg=msg, error=True, ctx_type='NA', ctx='NA')) + message_list.append( + TaskStatusMessage( + msg=msg, + error=True, + ctx_type='NA', + ctx='NA')) else: - if IPAddress(address) not in IPNetwork(network_dict[ip_address_network_name]): - msg = ('IP Locality Error: The IP Address %s ' - 'is not within the defined CIDR: %s of %s .' % - (address, network_dict[ip_address_network_name], ip_address_network_name)) - message_list.append(TaskStatusMessage(msg=msg, error=True, ctx_type='NA', ctx='NA')) + if IPAddress(address) not in IPNetwork( + network_dict[ip_address_network_name]): + msg = ( + 'IP Locality Error: The IP Address %s ' + 'is not within the defined CIDR: %s of %s .' + % (address, + network_dict[ip_address_network_name], + ip_address_network_name)) + message_list.append( + TaskStatusMessage( + msg=msg, + error=True, + ctx_type='NA', + ctx='NA')) if not message_list: msg = 'IP Locality Success' message_list.append(TaskStatusMessage(msg=msg, error=False, ctx_type='NA', ctx='NA')) diff --git a/drydock_provisioner/statemgmt/db/tables.py b/drydock_provisioner/statemgmt/db/tables.py index baa88b08..b6d8b1a2 100644 --- a/drydock_provisioner/statemgmt/db/tables.py +++ b/drydock_provisioner/statemgmt/db/tables.py @@ -121,6 +121,6 @@ class BuildData(ExtendTable): Column('task_id', pg.BYTEA(16), index=True), Column('collected_date', DateTime), Column('generator', String(256)), - Column('format', String(32)), + Column('data_format', String(32)), Column('data_element', Text), ] diff --git a/drydock_provisioner/statemgmt/state.py b/drydock_provisioner/statemgmt/state.py index 09a7cc1f..e5375eb4 100644 --- a/drydock_provisioner/statemgmt/state.py +++ b/drydock_provisioner/statemgmt/state.py @@ -24,6 +24,7 @@ from sqlalchemy import MetaData import drydock_provisioner.objects as objects import drydock_provisioner.objects.fields as hd_fields +import drydock_provisioner.error as errors from .db import tables @@ -49,6 +50,7 @@ class DrydockState(object): self.active_instance_tbl = tables.ActiveInstance(self.db_metadata) self.boot_action_tbl = tables.BootAction(self.db_metadata) self.ba_status_tbl = tables.BootActionStatus(self.db_metadata) + self.build_data_tbl = tables.BuildData(self.db_metadata) return def tabularasa(self): @@ -62,6 +64,7 @@ class DrydockState(object): 'active_instance', 'boot_action', 'boot_action_status', + 'build_data', ] conn = self.db_engine.connect() @@ -585,3 +588,85 @@ class DrydockState(object): except Exception as ex: self.logger.error( "Error querying boot action %s" % action_id, exc_info=ex) + + def post_build_data(self, build_data): + """Write a new build data element to the database. + + :param build_data: objects.BuildData instance to write + """ + try: + with self.db_engine.connect() as conn: + query = self.build_data_tbl.insert().values( + **build_data.to_db()) + conn.execute(query) + return True + except Exception as ex: + self.logger.error("Error saving build data.", exc_info=ex) + return False + + def get_build_data(self, + node_name=None, + task_id=None, + latest=False, + verbosity=2): + """Retrieve build data from the database. + + If ``node_name`` or ``task_id`` are defined, use them as + filters for the build_data retrieved. If ``task_id`` is not + defined, ``latest`` determines if all build data is returned, + or only the chronologically latest version for each generator + description. + + :param node_name: String name of the node to filter on + :param task_id: uuid.UUID ID of the task to filter on + :param latest: boolean whether to return only the latest + version for each generator + :param verbosity: integer of how verbose the response should + be. 1 is summary, 2 includes the collected data + :returns: list of objects.BuildData instances + """ + # TODO(sh8121att) possibly optimize queries by changing select column + # list based on verbosity + try: + with self.db_engine.connect() as conn: + if node_name and task_id: + query = self.build_data_tbl.select().where( + self.build_data_tbl.c.node_name == node_name + and self.build_data_tbl.c.task_id == task_id.bytes + ).order_by(self.build_data_tbl.c.collected_date.desc()) + rs = conn.execute(query) + elif node_name: + if latest: + query = sql.text( + 'SELECT DISTINCT ON (generator) build_data.* ' + 'FROM build_data ' + 'WHERE build_data.node_name = :nodename ' + 'ORDER BY generator, build_data.collected_date DESC' + ) + rs = conn.execute(query, nodename=node_name) + else: + query = self.build_data_tbl.select().where( + self.build_data_tbl.c.node_name == node_name) + rs = conn.execute(query) + elif task_id: + query = self.build_data_tbl.select().where( + self.build_data_tbl.c.task_id == task_id.bytes) + rs = conn.execute(query) + else: + if latest: + query = sql.text( + 'SELECT DISTINCT ON (generator), build_data.* ' + 'FROM build_data ' + 'ORDER BY generator, build_data.collected.date DESC' + ) + rs = conn.execute(query) + else: + query = self.build_data_tbl.select() + rs = conn.execute(query) + + result_data = rs.fetchall() + + return [objects.BuildData.from_db(dict(r)) for r in result_data] + except Exception as ex: + self.logger.error("Error selecting build data.", exc_info=ex) + raise errors.BuildDataError("Error selecting build data.") diff --git a/tests/integration/postgres/test_api_bootaction_status.py b/tests/integration/postgres/test_api_bootaction_status.py index b789d6f4..e79525e7 100644 --- a/tests/integration/postgres/test_api_bootaction_status.py +++ b/tests/integration/postgres/test_api_bootaction_status.py @@ -103,9 +103,8 @@ class TestClass(object): id_key = os.urandom(32) action_id = ulid2.generate_binary_ulid() - blank_state.post_boot_action('compute01', - test_task.get_id(), id_key, action_id, - 'helloworld') + blank_state.post_boot_action('compute01', test_task.get_id(), id_key, + action_id, 'helloworld') ba = dict( nodename='compute01', diff --git a/tests/integration/postgres/test_bootaction_signalling.py b/tests/integration/postgres/test_bootaction_signalling.py index d9db9792..9f5df2d0 100644 --- a/tests/integration/postgres/test_bootaction_signalling.py +++ b/tests/integration/postgres/test_bootaction_signalling.py @@ -15,15 +15,16 @@ from drydock_provisioner.objects import fields as hd_fields + class TestBootActionSignal(object): - def test_bootaction_signal_disable(self, deckhand_orchestrator, drydock_state, input_files): + def test_bootaction_signal_disable(self, deckhand_orchestrator, + drydock_state, input_files): """Test that disabled signaling omits a status entry in the DB.""" input_file = input_files.join("deckhand_fullsite.yaml") design_ref = "file://%s" % str(input_file) task = deckhand_orchestrator.create_task( - design_ref=design_ref, - action=hd_fields.OrchestratorAction.Noop) + design_ref=design_ref, action=hd_fields.OrchestratorAction.Noop) deckhand_orchestrator.create_bootaction_context("compute01", task) diff --git a/tests/integration/postgres/test_orch_generic.py b/tests/integration/postgres/test_orch_generic.py index de4659ee..950cb0fe 100644 --- a/tests/integration/postgres/test_orch_generic.py +++ b/tests/integration/postgres/test_orch_generic.py @@ -20,15 +20,15 @@ import drydock_provisioner.objects.fields as hd_fields class TestClass(object): - def test_task_complete(self, yaml_ingester, input_files, setup, blank_state): + def test_task_complete(self, yaml_ingester, input_files, setup, + blank_state): input_file = input_files.join("fullsite.yaml") design_ref = "file://%s" % str(input_file) orchestrator = orch.Orchestrator( state_manager=blank_state, ingester=yaml_ingester) orch_task = orchestrator.create_task( - action=hd_fields.OrchestratorAction.Noop, - design_ref=design_ref) + action=hd_fields.OrchestratorAction.Noop, design_ref=design_ref) orch_task.set_status(hd_fields.TaskStatus.Queued) orch_task.save() @@ -45,15 +45,15 @@ class TestClass(object): orchestrator.stop_orchestrator() orch_thread.join(10) - def test_task_termination(self, input_files, yaml_ingester, setup, blank_state): + def test_task_termination(self, input_files, yaml_ingester, setup, + blank_state): input_file = input_files.join("fullsite.yaml") design_ref = "file://%s" % str(input_file) orchestrator = orch.Orchestrator( state_manager=blank_state, ingester=yaml_ingester) orch_task = orchestrator.create_task( - action=hd_fields.OrchestratorAction.Noop, - design_ref=design_ref) + action=hd_fields.OrchestratorAction.Noop, design_ref=design_ref) orch_task.set_status(hd_fields.TaskStatus.Queued) orch_task.save() diff --git a/tests/integration/postgres/test_postgres_bootaction_status.py b/tests/integration/postgres/test_postgres_bootaction_status.py index 166ffe9d..541bc806 100644 --- a/tests/integration/postgres/test_postgres_bootaction_status.py +++ b/tests/integration/postgres/test_postgres_bootaction_status.py @@ -26,9 +26,8 @@ class TestPostgresBootAction(object): id_key = os.urandom(32) action_id = ulid2.generate_binary_ulid() nodename = 'testnode' - result = drydock_state.post_boot_action(nodename, - populateddb.get_id(), id_key, - action_id, 'helloworld') + result = drydock_state.post_boot_action( + nodename, populateddb.get_id(), id_key, action_id, 'helloworld') assert result @@ -37,9 +36,8 @@ class TestPostgresBootAction(object): id_key = os.urandom(32) action_id = ulid2.generate_binary_ulid() nodename = 'testnode' - drydock_state.post_boot_action(nodename, - populateddb.get_id(), id_key, action_id, - 'helloworld') + drydock_state.post_boot_action(nodename, populateddb.get_id(), id_key, + action_id, 'helloworld') result = drydock_state.put_bootaction_status( ulid2.encode_ulid_base32(action_id), @@ -52,9 +50,8 @@ class TestPostgresBootAction(object): id_key = os.urandom(32) action_id = ulid2.generate_binary_ulid() nodename = 'testnode' - drydock_state.post_boot_action(nodename, - populateddb.get_id(), id_key, action_id, - 'helloworld') + drydock_state.post_boot_action(nodename, populateddb.get_id(), id_key, + action_id, 'helloworld') ba = drydock_state.get_boot_action(ulid2.encode_ulid_base32(action_id)) diff --git a/tests/integration/postgres/test_postgres_builddata.py b/tests/integration/postgres/test_postgres_builddata.py new file mode 100644 index 00000000..377ee62b --- /dev/null +++ b/tests/integration/postgres/test_postgres_builddata.py @@ -0,0 +1,113 @@ +# Copyright 2017 AT&T Intellectual Property. All other rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Test postgres integration for build data management.""" + +import uuid +import copy + +from datetime import datetime, timedelta + +from drydock_provisioner import objects + + +class TestBuildData(object): + def test_build_data_insert_no_collected_date(self, blank_state): + """Test that build data can be inserted omitting collection date.""" + build_data_fields = { + 'node_name': 'foo', + 'generator': 'hello_world', + 'data_format': 'text/plain', + 'data_element': 'Hello World!', + 'task_id': uuid.uuid4(), + } + + build_data = objects.BuildData(**build_data_fields) + + result = blank_state.post_build_data(build_data) + + assert result + + def test_build_data_insert_iwth_collected_date(self, blank_state): + """Test that build data can be inserted specifying collection date.""" + build_data_fields = { + 'node_name': 'foo', + 'generator': 'hello_world', + 'data_format': 'text/plain', + 'data_element': 'Hello World!', + 'task_id': uuid.uuid4(), + 'collected_date': datetime.utcnow(), + } + + build_data = objects.BuildData(**build_data_fields) + + result = blank_state.post_build_data(build_data) + + assert result + + def test_build_data_select(self, blank_state): + """Test that build data can be deserialized from the database.""" + build_data_fields = { + 'node_name': 'foo', + 'generator': 'hello_world', + 'data_format': 'text/plain', + 'data_element': 'Hello World!', + 'task_id': uuid.uuid4(), + 'collected_date': datetime.utcnow(), + } + + build_data = objects.BuildData(**build_data_fields) + + result = blank_state.post_build_data(build_data) + + assert result + + bd_list = blank_state.get_build_data() + + assert len(bd_list) == 1 + + assert bd_list[0].to_dict() == build_data.to_dict() + + def test_build_data_select_latest(self, blank_state): + """Test that build data can be selected for only latest instance.""" + build_data_latest = { + 'node_name': 'foo', + 'generator': 'hello_world', + 'data_format': 'text/plain', + 'data_element': 'Hello World!', + 'task_id': uuid.uuid4(), + 'collected_date': datetime.utcnow(), + } + + build_data_old = copy.deepcopy(build_data_latest) + build_data_old[ + 'collected_date'] = build_data_latest['collected_date'] - timedelta( + days=1) + build_data_old['task_id'] = uuid.uuid4() + + build_data1 = objects.BuildData(**build_data_latest) + build_data2 = objects.BuildData(**build_data_old) + + result = blank_state.post_build_data(build_data1) + + assert result + + result = blank_state.post_build_data(build_data2) + + assert result + + bd_list = blank_state.get_build_data(node_name='foo', latest=True) + + assert len(bd_list) == 1 + + assert bd_list[0].to_dict() == build_data1.to_dict() diff --git a/tests/unit/test_bootaction_asset_render.py b/tests/unit/test_bootaction_asset_render.py index 1baeb29e..c2ca5202 100644 --- a/tests/unit/test_bootaction_asset_render.py +++ b/tests/unit/test_bootaction_asset_render.py @@ -20,7 +20,8 @@ import drydock_provisioner.objects as objects class TestClass(object): - def test_bootaction_render_nodename(self, input_files, deckhand_ingester, setup): + def test_bootaction_render_nodename(self, input_files, deckhand_ingester, + setup): """Test the bootaction render routine provides expected output.""" objects.register_all() @@ -34,11 +35,13 @@ class TestClass(object): ba = design_data.get_bootaction('helloworld') action_id = ulid2.generate_binary_ulid() - assets = ba.render_assets('compute01', design_data, action_id, design_ref) + assets = ba.render_assets('compute01', design_data, action_id, + design_ref) assert 'compute01' in assets[0].rendered_bytes.decode('utf-8') - def test_bootaction_render_design_ref(self, input_files, deckhand_ingester, setup): + def test_bootaction_render_design_ref(self, input_files, deckhand_ingester, + setup): """Test the bootaction render routine provides expected output.""" objects.register_all() @@ -52,6 +55,8 @@ class TestClass(object): ba = design_data.get_bootaction('helloworld') action_id = ulid2.generate_binary_ulid() - assets = ba.render_assets('compute01', design_data, action_id, design_ref) + assets = ba.render_assets('compute01', design_data, action_id, + design_ref) - assert 'deckhand_fullsite.yaml' in assets[2].rendered_bytes.decode('utf-8') + assert 'deckhand_fullsite.yaml' in assets[2].rendered_bytes.decode( + 'utf-8') diff --git a/tests/unit/test_bootaction_tarbuilder.py b/tests/unit/test_bootaction_tarbuilder.py index 2c35798c..0649cb4c 100644 --- a/tests/unit/test_bootaction_tarbuilder.py +++ b/tests/unit/test_bootaction_tarbuilder.py @@ -39,7 +39,8 @@ class TestClass(object): ba = design_data.get_bootaction('helloworld') action_id = ulid2.generate_binary_ulid() - assets = ba.render_assets(target_host, design_data, action_id, design_ref) + assets = ba.render_assets(target_host, design_data, action_id, + design_ref) assert len(assets) > 0 diff --git a/tests/unit/test_validate_design.py b/tests/unit/test_validate_design.py index 550683b0..b0e7226c 100644 --- a/tests/unit/test_validate_design.py +++ b/tests/unit/test_validate_design.py @@ -18,13 +18,15 @@ from drydock_provisioner.orchestrator.validations.validator import Validator class TestDesignValidator(object): - def test_validate_design(self, deckhand_ingester, drydock_state, input_files): + def test_validate_design(self, deckhand_ingester, drydock_state, + input_files): """Test the basic validation engine.""" input_file = input_files.join("deckhand_fullsite.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator(state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator( + state_manager=drydock_state, ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) diff --git a/tests/unit/test_validation_rule_boot_storage.py b/tests/unit/test_validation_rule_boot_storage.py index 826ed3b0..08a117e6 100644 --- a/tests/unit/test_validation_rule_boot_storage.py +++ b/tests/unit/test_validation_rule_boot_storage.py @@ -20,12 +20,14 @@ from drydock_provisioner.orchestrator.validations.validator import Validator class TestRationalBootStorage(object): - def test_boot_storage_rational(self, deckhand_ingester, drydock_state, input_files): + def test_boot_storage_rational(self, deckhand_ingester, drydock_state, + input_files): input_file = input_files.join("validation.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator(state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator( + state_manager=drydock_state, ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) @@ -36,18 +38,21 @@ class TestRationalBootStorage(object): assert msg.get('error') is False assert len(message_list) == 1 - def test_invalid_boot_storage_small(self, deckhand_ingester, drydock_state, input_files): + def test_invalid_boot_storage_small(self, deckhand_ingester, drydock_state, + input_files): input_file = input_files.join("invalid_boot_storage_small.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator(state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator( + state_manager=drydock_state, ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) message_list = Validator.boot_storage_rational(site_design) - regex = re.compile('Boot Storage Error: .+ volume must be > .+GB on BaremetalNode .+') + regex = re.compile( + 'Boot Storage Error: .+ volume must be > .+GB on BaremetalNode .+') for msg in message_list: msg = msg.to_dict() @@ -56,18 +61,22 @@ class TestRationalBootStorage(object): assert len(message_list) == 4 - def test_invalid_boot_storage_root_not_set(self, deckhand_ingester, drydock_state, input_files): + def test_invalid_boot_storage_root_not_set(self, deckhand_ingester, + drydock_state, input_files): input_file = input_files.join("invalid_validation.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator(state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator( + state_manager=drydock_state, ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) message_list = Validator.boot_storage_rational(site_design) - regex = re.compile('Boot Storage Error: Root volume has to be set and must be > 20GB on BaremetalNode .+') + regex = re.compile( + 'Boot Storage Error: Root volume has to be set and must be > 20GB on BaremetalNode .+' + ) for msg in message_list: msg = msg.to_dict() diff --git a/tests/unit/test_validation_rule_ip_locality.py b/tests/unit/test_validation_rule_ip_locality.py index 97f4c8db..eecfbbd2 100644 --- a/tests/unit/test_validation_rule_ip_locality.py +++ b/tests/unit/test_validation_rule_ip_locality.py @@ -23,7 +23,8 @@ class TestIPLocality(object): input_file = input_files.join("validation.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator(state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator( + state_manager=drydock_state, ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) @@ -33,11 +34,13 @@ class TestIPLocality(object): assert msg.get('message') == 'IP Locality Success' assert msg.get('error') is False - def test_ip_locality_no_networks(self, input_files, drydock_state, deckhand_ingester): + def test_ip_locality_no_networks(self, input_files, drydock_state, + deckhand_ingester): input_file = input_files.join("ip_locality_no_networks.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator(state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator( + state_manager=drydock_state, ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) @@ -47,11 +50,13 @@ class TestIPLocality(object): assert msg.get('message') == 'No networks found.' assert msg.get('error') is False - def test_ip_locality_no_gateway(self, input_files, drydock_state, deckhand_ingester): + def test_ip_locality_no_gateway(self, input_files, drydock_state, + deckhand_ingester): input_file = input_files.join("ip_locality_no_gateway.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator(state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator( + state_manager=drydock_state, ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) @@ -61,11 +66,13 @@ class TestIPLocality(object): assert 'No gateway found' in msg.get('message') assert msg.get('error') is True - def test_no_baremetal_node(self, input_files, drydock_state, deckhand_ingester): + def test_no_baremetal_node(self, input_files, drydock_state, + deckhand_ingester): input_file = input_files.join("no_baremetal_node.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator(state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator( + state_manager=drydock_state, ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) @@ -75,24 +82,30 @@ class TestIPLocality(object): assert msg.get('message') == 'No baremetal_nodes found.' assert msg.get('error') is False - def test_invalid_ip_locality_invalid_network(self, input_files, drydock_state, deckhand_ingester): + def test_invalid_ip_locality_invalid_network( + self, input_files, drydock_state, deckhand_ingester): input_file = input_files.join("invalid_validation.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator(state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator( + state_manager=drydock_state, ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) message_list = Validator.ip_locality_check(site_design) - regex = re.compile('IP Locality Error: The gateway IP Address .+ is not within the defined CIDR: .+ of .+') + regex = re.compile( + 'IP Locality Error: The gateway IP Address .+ is not within the defined CIDR: .+ of .+' + ) regex_1 = re.compile('IP Locality Error: .+ is not a valid network.') - regex_2 = re.compile('IP Locality Error: The IP Address .+ is not within the defined CIDR: .+ of .+ .') + regex_2 = re.compile( + 'IP Locality Error: The IP Address .+ is not within the defined CIDR: .+ of .+ .' + ) assert len(message_list) == 3 for msg in message_list: msg = msg.to_dict() assert msg.get('error') - assert (regex.match(msg.get('message')) is not None or - regex_1.match(msg.get('message')) is not None or - regex_2.match(msg.get('message')) is not None) + assert (regex.match(msg.get('message')) is not None + or regex_1.match(msg.get('message')) is not None + or regex_2.match(msg.get('message')) is not None) diff --git a/tests/unit/test_validation_rule_mtu_rational.py b/tests/unit/test_validation_rule_mtu_rational.py index 2d2a01cf..790a2e67 100644 --- a/tests/unit/test_validation_rule_mtu_rational.py +++ b/tests/unit/test_validation_rule_mtu_rational.py @@ -25,7 +25,8 @@ class TestMtu(object): input_file = input_files.join("validation.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator(state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator( + state_manager=drydock_state, ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) @@ -36,23 +37,30 @@ class TestMtu(object): assert msg.get('error') is False assert len(message_list) == 1 - def test_invalid_mtu(self, mocker, deckhand_ingester, drydock_state, input_files): + def test_invalid_mtu(self, mocker, deckhand_ingester, drydock_state, + input_files): input_file = input_files.join("invalid_validation.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator(state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator( + state_manager=drydock_state, ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) message_list = Validator.mtu_rational(site_design) - regex = re.compile('Mtu Error: Mtu must be between 1400 and 64000; on Network .+') - regex_1 = re.compile('Mtu Error: Mtu must be <= the parent Network Link; for Network .+') + regex = re.compile( + 'Mtu Error: Mtu must be between 1400 and 64000; on Network .+') + regex_1 = re.compile( + 'Mtu Error: Mtu must be <= the parent Network Link; for Network .+' + ) for msg in message_list: msg = msg.to_dict() assert msg.get('error') - assert regex.match(msg.get('message')) is not None or regex_1.match(msg.get('message')) is not None + assert regex.match( + msg.get('message')) is not None or regex_1.match( + msg.get('message')) is not None assert len(message_list) == 4 diff --git a/tests/unit/test_validation_rule_network_bond.py b/tests/unit/test_validation_rule_network_bond.py index 505e4d9b..8b7d2559 100644 --- a/tests/unit/test_validation_rule_network_bond.py +++ b/tests/unit/test_validation_rule_network_bond.py @@ -50,12 +50,18 @@ class TestRationalNetworkLinkBond(object): message_list = Validator.rational_network_bond(site_design) - regex = re.compile('Network Link Bonding Error: Down delay is less than mon rate on BaremetalNode .+') - regex_1 = re.compile('Network Link Bonding Error: Up delay is less than mon rate on BaremetalNode .+') + regex = re.compile( + 'Network Link Bonding Error: Down delay is less than mon rate on BaremetalNode .+' + ) + regex_1 = re.compile( + 'Network Link Bonding Error: Up delay is less than mon rate on BaremetalNode .+' + ) for msg in message_list: msg = msg.to_dict() assert msg.get('error') is True - assert regex.match(msg.get('message')) is not None or regex_1.match(msg.get('message')) is not None + assert regex.match( + msg.get('message')) is not None or regex_1.match( + msg.get('message')) is not None assert len(message_list) == 2 diff --git a/tests/unit/test_validation_rule_network_trunking.py b/tests/unit/test_validation_rule_network_trunking.py index 618df6e9..2680d245 100644 --- a/tests/unit/test_validation_rule_network_trunking.py +++ b/tests/unit/test_validation_rule_network_trunking.py @@ -48,15 +48,19 @@ class TestRationalNetworkTrunking(object): message_list = Validator.network_trunking_rational(site_design) - regex = re.compile('Rational Network Trunking Error: Trunking mode is disabled, a trunking' - 'default_network must be defined; on NetworkLink .+') + regex = re.compile( + 'Rational Network Trunking Error: Trunking mode is disabled, a trunking' + 'default_network must be defined; on NetworkLink .+') - regex_1 = re.compile('Rational Network Trunking Error: If there is more than 1 allowed network,' - 'trunking mode must be enabled; on NetworkLink .+') + regex_1 = re.compile( + 'Rational Network Trunking Error: If there is more than 1 allowed network,' + 'trunking mode must be enabled; on NetworkLink .+') for msg in message_list: msg = msg.to_dict() assert msg.get('error') - assert regex.match(msg.get('message')) is not None or regex_1.match(msg.get('message')) is not None + assert regex.match( + msg.get('message')) is not None or regex_1.match( + msg.get('message')) is not None assert len(message_list) == 2 diff --git a/tests/unit/test_validation_rule_no_duplicate_IPs.py b/tests/unit/test_validation_rule_no_duplicate_IPs.py index 059ea855..20adfed8 100644 --- a/tests/unit/test_validation_rule_no_duplicate_IPs.py +++ b/tests/unit/test_validation_rule_no_duplicate_IPs.py @@ -24,8 +24,8 @@ class TestDuplicateIPs(object): input_file = input_files.join("validation.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator(state_manager=drydock_state, - ingester=deckhand_ingester) + orch = Orchestrator( + state_manager=drydock_state, ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) @@ -37,12 +37,11 @@ class TestDuplicateIPs(object): def test_no_duplicate_IPs_no_baremetal_node( self, input_files, drydock_state, deckhand_ingester): - input_file = input_files.join( - "no_baremetal_node.yaml") + input_file = input_files.join("no_baremetal_node.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator(state_manager=drydock_state, - ingester=deckhand_ingester) + orch = Orchestrator( + state_manager=drydock_state, ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) @@ -52,13 +51,13 @@ class TestDuplicateIPs(object): assert msg.get('message') == 'No BaremetalNodes Found.' assert msg.get('error') is False - def test_no_duplicate_IPs_no_addressing( - self, input_files, drydock_state, deckhand_ingester): + def test_no_duplicate_IPs_no_addressing(self, input_files, drydock_state, + deckhand_ingester): input_file = input_files.join("no_duplicate_IPs_no_addressing.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator(state_manager=drydock_state, - ingester=deckhand_ingester) + orch = Orchestrator( + state_manager=drydock_state, ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) @@ -68,19 +67,21 @@ class TestDuplicateIPs(object): assert msg.get('message') == 'No BaremetalNodes Found.' assert msg.get('error') is False - def test_invalid_no_duplicate_IPs( - self, input_files, drydock_state, deckhand_ingester): + def test_invalid_no_duplicate_IPs(self, input_files, drydock_state, + deckhand_ingester): input_file = input_files.join("invalid_validation.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator(state_manager=drydock_state, - ingester=deckhand_ingester) + orch = Orchestrator( + state_manager=drydock_state, ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) message_list = Validator.no_duplicate_IPs_check(site_design) - regex = re.compile('Error! Duplicate IP Address Found: .+ is in use by both .+ and .+.') + regex = re.compile( + 'Error! Duplicate IP Address Found: .+ is in use by both .+ and .+.' + ) for msg in message_list: msg = msg.to_dict() assert msg.get('error') is True diff --git a/tests/unit/test_validation_rule_storage_partitioning.py b/tests/unit/test_validation_rule_storage_partitioning.py index 42605172..78200ec1 100644 --- a/tests/unit/test_validation_rule_storage_partitioning.py +++ b/tests/unit/test_validation_rule_storage_partitioning.py @@ -37,11 +37,14 @@ class TestRationalNetworkTrunking(object): assert msg.get('message') == 'Storage Partitioning' assert msg.get('error') is False - def test_storage_partitioning_unassigned_partition(self, deckhand_ingester, drydock_state, input_files): - input_file = input_files.join("storage_partitioning_unassigned_partition.yaml") + def test_storage_partitioning_unassigned_partition( + self, deckhand_ingester, drydock_state, input_files): + input_file = input_files.join( + "storage_partitioning_unassigned_partition.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator(state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator( + state_manager=drydock_state, ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) @@ -65,8 +68,9 @@ class TestRationalNetworkTrunking(object): message_list = Validator.storage_partitioning(site_design) - regex = re.compile('Storage Partitioning Error: A volume group must be assigned to a storage device or ' - 'partition; volume group .+ on BaremetalNode .+') + regex = re.compile( + 'Storage Partitioning Error: A volume group must be assigned to a storage device or ' + 'partition; volume group .+ on BaremetalNode .+') for msg in message_list: msg = msg.to_dict() diff --git a/tests/unit/test_validation_rule_storage_sizing.py b/tests/unit/test_validation_rule_storage_sizing.py index c45dc82f..f91ebc69 100644 --- a/tests/unit/test_validation_rule_storage_sizing.py +++ b/tests/unit/test_validation_rule_storage_sizing.py @@ -20,12 +20,14 @@ from drydock_provisioner.orchestrator.validations.validator import Validator class TestStorageSizing(object): - def test_storage_sizing(self, deckhand_ingester, drydock_state, input_files): + def test_storage_sizing(self, deckhand_ingester, drydock_state, + input_files): input_file = input_files.join("storage_sizing.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator(state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator( + state_manager=drydock_state, ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) @@ -36,22 +38,30 @@ class TestStorageSizing(object): assert msg.get('message') == 'Storage Sizing' assert msg.get('error') is False - def test_invalid_storage_sizing(self, deckhand_ingester, drydock_state, input_files): + def test_invalid_storage_sizing(self, deckhand_ingester, drydock_state, + input_files): input_file = input_files.join("invalid_validation.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator(state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator( + state_manager=drydock_state, ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) message_list = Validator.storage_sizing(site_design) - regex = re.compile('Storage Sizing Error: Storage .+ size is < 0 on Baremetal Node .+') - regex_1 = re.compile('Storage Sizing Error: Storage .+ size is greater than 99 on Baremetal Node .+') + regex = re.compile( + 'Storage Sizing Error: Storage .+ size is < 0 on Baremetal Node .+' + ) + regex_1 = re.compile( + 'Storage Sizing Error: Storage .+ size is greater than 99 on Baremetal Node .+' + ) assert len(message_list) == 6 for msg in message_list: msg = msg.to_dict() - assert regex.match(msg.get('message')) is not None or regex_1.match(msg.get('message')) is not None + assert regex.match( + msg.get('message')) is not None or regex_1.match( + msg.get('message')) is not None assert msg.get('error') is True diff --git a/tests/unit/test_validation_rule_unique_network.py b/tests/unit/test_validation_rule_unique_network.py index 63db0ada..f2200d1d 100644 --- a/tests/unit/test_validation_rule_unique_network.py +++ b/tests/unit/test_validation_rule_unique_network.py @@ -20,7 +20,8 @@ from drydock_provisioner.orchestrator.validations.validator import Validator class TestUniqueNetwork(object): - def test_unique_network(self, mocker, deckhand_ingester, drydock_state, input_files): + def test_unique_network(self, mocker, deckhand_ingester, drydock_state, + input_files): input_file = input_files.join("validation.yaml") design_ref = "file://%s" % str(input_file) @@ -37,7 +38,8 @@ class TestUniqueNetwork(object): assert msg.get('error') is False assert len(message_list) == 1 - def test_invalid_unique_network(self, mocker, deckhand_ingester, drydock_state, input_files): + def test_invalid_unique_network(self, mocker, deckhand_ingester, + drydock_state, input_files): input_file = input_files.join("invalid_unique_network.yaml") design_ref = "file://%s" % str(input_file) @@ -49,7 +51,9 @@ class TestUniqueNetwork(object): message_list = Validator.unique_network_check(site_design) - regex = re.compile('Unique Network Error: Allowed network .+ duplicated on NetworkLink .+ and NetworkLink .+') + regex = re.compile( + 'Unique Network Error: Allowed network .+ duplicated on NetworkLink .+ and NetworkLink .+' + ) for msg in message_list: msg = msg.to_dict()