From f9cfc239973c68a34612747e065671dbecf5a269 Mon Sep 17 00:00:00 2001 From: Scott Hussey Date: Tue, 21 Mar 2017 09:16:41 -0500 Subject: [PATCH 01/11] Refactor models to clearly delineate attributes for design, applied and build data. --- helm_drydock/model/hostprofile.py | 414 +++++++++++++++--------- helm_drydock/model/node.py | 74 +++-- helm_drydock/orchestrator/designdata.py | 11 +- tests/test_design_inheritance.py | 16 +- 4 files changed, 312 insertions(+), 203 deletions(-) diff --git a/helm_drydock/model/hostprofile.py b/helm_drydock/model/hostprofile.py index b239219c..595272c3 100644 --- a/helm_drydock/model/hostprofile.py +++ b/helm_drydock/model/hostprofile.py @@ -38,50 +38,55 @@ class HostProfile(object): self.name = metadata.get('name', '') self.site = metadata.get('region', '') - self.parent_profile = spec.get('host_profile', None) - self.hardware_profile = spec.get('hardware_profile', None) + # Design Data + self.design = {} + + self.design['parent_profile'] = spec.get('host_profile', None) + self.design['hardware_profile'] = spec.get('hardware_profile', None) + oob = spec.get('oob', {}) - self.oob_type = oob.get('type', None) - self.oob_network = oob.get('network', None) - self.oob_account = oob.get('account', None) - self.oob_credential = oob.get('credential', None) + + self.design['oob_type'] = oob.get('type', None) + self.design['oob_network'] = oob.get('network', None) + self.design['oob_account'] = oob.get('account', None) + self.design['oob_credential'] = oob.get('credential', None) storage = spec.get('storage', {}) - self.storage_layout = storage.get('layout', 'lvm') + self.design['storage_layout'] = storage.get('layout', 'lvm') bootdisk = storage.get('bootdisk', {}) - self.bootdisk_device = bootdisk.get('device', None) - self.bootdisk_root_size = bootdisk.get('root_size', None) - self.bootdisk_boot_size = bootdisk.get('boot_size', None) + self.design['bootdisk_device'] = bootdisk.get('device', None) + self.design['bootdisk_root_size'] = bootdisk.get('root_size', None) + self.design['bootdisk_boot_size'] = bootdisk.get('boot_size', None) partitions = storage.get('partitions', []) - self.partitions = [] + self.design['partitions'] = [] for p in partitions: - self.partitions.append(HostPartition(self.api_version, **p)) + self.design['partitions'].append(HostPartition(self.api_version, **p)) interfaces = spec.get('interfaces', []) - self.interfaces = [] + self.design['interfaces'] = [] for i in interfaces: - self.interfaces.append(HostInterface(self.api_version, **i)) + self.design['interfaces'].append(HostInterface(self.api_version, **i)) node_metadata = spec.get('metadata', {}) metadata_tags = node_metadata.get('tags', []) - self.tags = [] + self.design['tags'] = [] for t in metadata_tags: - self.tags.append(t) + self.design['tags'].append(t) owner_data = node_metadata.get('owner_data', {}) - self.owner_data = {} + self.design['owner_data'] = {} for k, v in owner_data.items(): - self.owner_data[k] = v + self.design['owner_data'][k] = v - self.rack = node_metadata.get('rack', None) + self.design['rack'] = node_metadata.get('rack', None) else: self.log.error("Unknown API version %s of %s" % @@ -89,32 +94,31 @@ class HostProfile(object): raise ValueError('Unknown API version of object') def get_rack(self): - return self.rack + return self.design['rack'] def get_name(self): return self.name def has_tag(self, tag): - if tag in self.tags: + if tag in self.design['tags']: return True return False def apply_inheritance(self, site): - # We return a deep copy of the profile so as not to corrupt - # the original model - self_copy = deepcopy(self) + # No parent to inherit from, just apply design values + # and return + if self.design['parent_profile'] is None: + self.applied = deepcopy(self.design) + return - if self.parent_profile is None: - return self_copy - - parent = site.get_host_profile(self.parent_profile) + parent = site.get_host_profile(self.design['parent_profile']) if parent is None: raise NameError("Cannot find parent profile %s for %s" - % (self.parent_profile, self.name)) + % (self.design['parent_profile'], self.name)) - parent = parent.apply_inheritance(site) + parent.apply_inheritance(site) # First compute inheritance for simple fields inheritable_field_list = [ @@ -123,24 +127,34 @@ class HostProfile(object): "bootdisk_device", "bootdisk_root_size", "bootdisk_boot_size", "rack"] + # Create applied data from self design values and parent + # applied values + + self.applied = {} + for f in inheritable_field_list: - setattr(self_copy, f, - Utils.apply_field_inheritance(getattr(self, f, None), - getattr(parent, f, None))) + self.applied[f] = Utils.apply_field_inheritance( + self.design.get(f, None), + parent.applied.get(f, None)) # Now compute inheritance for complex types - self_copy.tags = Utils.merge_lists(self.tags, parent.tags) + self.applied['tags'] = Utils.merge_lists(self.design['tags'], + parent.applied['tags']) - self_copy.owner_data = Utils.merge_dicts( - self.owner_data, parent.owner_data) + self.applied['owner_data'] = Utils.merge_dicts( + self.design['owner_data'], parent.applied['owner_data']) - self_copy.interfaces = HostInterface.merge_lists( - self.interfaces, parent.interfaces) + self.applied['interfaces'] = HostInterface.merge_lists( + self.design['interfaces'], parent.applied['interfaces']) + for i in self.applied.get('interfaces', []): + i.ensure_applied_data() - self_copy.partitions = HostPartition.merge_lists( - self.partitions, parent.partitions) + self.applied['partitions'] = HostPartition.merge_lists( + self.design['partitions'], parent.applied['partitions']) + for p in self.applied.get('partitions', []): + p. ensure_applied_data() - return self_copy + return class HostInterface(object): @@ -152,24 +166,56 @@ class HostInterface(object): if self.api_version == "v1.0": self.device_name = kwargs.get('device_name', None) - self.network_link = kwargs.get('device_link', None) - self.hardware_slaves = [] + self.design = {} + self.design['network_link'] = kwargs.get('device_link', None) + + self.design['hardware_slaves'] = [] slaves = kwargs.get('slaves', []) for s in slaves: - self.hardware_slaves.append(s) + self.design['hardware_slaves'].append(s) - self.networks = [] + self.design['networks'] = [] networks = kwargs.get('networks', []) for n in networks: - self.networks.append(n) + self.design['networks'].append(n) else: self.log.error("Unknown API version %s of %s" % (self.api_version, self.__class__)) raise ValueError('Unknown API version of object') + # Ensure applied_data exists + def ensure_applied_data(self): + if getattr(self, 'applied', None) is None: + self.applied = deepcopy(self.design) + + return + + def get_name(self): + return self.device_name + + def get_applied_hw_slaves(self): + self.ensure_applied_data() + + return self.applied.get('hardware_slaves', []) + + def get_applied_slave_selectors(self): + self.ensure_applied_data() + + return self.applied.get('selectors', None) + + # Return number of slaves for this interface + def get_applied_slave_count(self): + self.ensure_applied_data() + + return len(self.applied.get('hardware_slaves', [])) + + def get_network_configs(self): + self.ensure_applied_data() + return self.applied.get('attached_networks', []) + # The device attribute may be hardware alias that translates to a # physical device address. If the device attribute does not match an # alias, we assume it directly identifies a OS device name. When the @@ -177,51 +223,48 @@ class HostInterface(object): # device, the selector will be decided and applied def add_selector(self, sel_type, address='', dev_type=''): - if getattr(self, 'selectors', None) is None: - self.selectors = [] + self.ensure_applied_data() + + if self.applied.get('selectors', None) is None: + self.applied['selectors'] = [] new_selector = {} new_selector['selector_type'] = sel_type new_selector['address'] = address new_selector['device_type'] = dev_type - self.selectors.append(new_selector) - - def get_slave_selectors(self): - return self.selectors - - # Return number of slaves for this interface - def get_slave_count(self): - return len(self.hardware_slaves) + self.applied['selectors'].append(new_selector) def apply_link_config(self, net_link): if (net_link is not None and isinstance(net_link, NetworkLink) and - net_link.name == self.network_link): + net_link.name == self.design.get('network_link', '')): - self.attached_link = deepcopy(net_link) + self.ensure_applied_data() + + self.applied['attached_link'] = deepcopy(net_link) return True return False def apply_network_config(self, network): - if network in self.networks: - if getattr(self, 'attached_networks', None) is None: - self.attached_networks = [] - self.attached_networks.append(deepcopy(network)) + if network.name in self.design['networks']: + self.ensure_applied_data() + if self.applied.get('attached_networks', None) is None: + self.applied['attached_networks'] = [] + self.applied['attached_networks'].append(deepcopy(network)) return True else: return False def set_network_address(self, network_name, address): - if getattr(self, 'attached_networks', None) is None: + self.ensure_applied_data() + + if self.applied.get('attached_networks', None) is None: return False - for n in self.attached_neteworks: + for n in self.applied.get('attached_networks', []): if n.name == network_name: - n.assigned_address = address - - def get_network_configs(self): - return self.attached_networks + setattr(n, 'assigned_address', address) """ Merge two lists of HostInterface models with child_list taking @@ -232,60 +275,81 @@ class HostInterface(object): @staticmethod def merge_lists(child_list, parent_list): - if len(child_list) == 0: - return deepcopy(parent_list) - effective_list = [] - if len(parent_list) == 0: + + if len(child_list) == 0 and len(parent_list) > 0: + for p in parent_list: + pp = deepcopy(p) + pp.ensure_applied_data() + effective_list.append(pp) + elif len(parent_list) == 0 and len(child_list) > 0: for i in child_list: - if i.device_name.startswith('!'): + if i.get_name().startswith('!'): continue else: - effective_list.append(deepcopy(i)) - return effective_list + ii = deepcopy(i) + ii.ensure_applied_data() + effective_list.append(ii) + elif len(parent_list) > 0 and len(child_list) > 0: + parent_interfaces = [] + for i in parent_list: + parent_name = i.device_name + parent_interfaces.append(parent_name) + add = True + for j in child_list: + if j.device_name == ("!" + parent_name): + add = False + break + elif j.device_name == parent_name: + m = HostInterface(j.api_version) + m.device_name = j.get_name() + m.design['network_link'] = \ + Utils.apply_field_inheritance( + j.design.get('network_link', None), + i.applied.get('network_link', None)) + + s = [x for x + in i.applied.get('hardware_slaves', []) + if ("!" + x) not in j.design.get( + 'hardware_slaves', [])] + s = list(s) + + s.extend( + [x for x + in j.design.get('hardware_slaves', []) + if not x.startswith("!")]) + + m.design['hardware_slaves'] = s + + n = [x for x + in i.applied.get('networks',[]) + if ("!" + x) not in j.design.get( + 'networks', [])] + n = list(n) + + n.extend( + [x for x + in j.design.get('networks', []) + if not x.startswith("!")]) + + m.design['networks'] = n + m.ensure_applied_data() + + effective_list.append(m) + add = False + break + + if add: + ii = deepcopy(i) + ii.ensure_applied_data() + effective_list.append(ii) - parent_interfaces = [] - for i in parent_list: - parent_name = i.device_name - parent_interfaces.append(parent_name) - add = True for j in child_list: - if j.device_name == ("!" + parent_name): - add = False - break - elif j.device_name == parent_name: - m = HostInterface(j.api_version) - m.device_name = j.device_name - m.network_link = \ - Utils.apply_field_inheritance(j.network_link, - i.network_link) - s = filter(lambda x: ("!" + x) not in j.hardware_slaves, - i.hardware_slaves) - s = list(s) - - s.extend(filter(lambda x: not x.startswith("!"), - j.hardware_slaves)) - m.hardware_slaves = s - - n = filter(lambda x: ("!" + x) not in j.networks, - i.networks) - n = list(n) - - n.extend(filter(lambda x: not x.startswith("!"), - j.networks)) - m.networks = n - - effective_list.append(m) - add = False - break - - if add: - effective_list.append(deepcopy(i)) - - for j in child_list: - if (j.device_name not in parent_interfaces - and not j.device_name.startswith("!")): - effective_list.append(deepcopy(j)) + if (j.device_name not in parent_interfaces + and not j.device_name.startswith("!")): + jj = deepcopy(j) + jj.ensure_applied_data() + effective_list.append(jj) return effective_list @@ -297,17 +361,37 @@ class HostPartition(object): if self.api_version == "v1.0": self.name = kwargs.get('name', None) - self.device = kwargs.get('device', None) - self.part_uuid = kwargs.get('part_uuid', None) - self.size = kwargs.get('size', None) - self.mountpoint = kwargs.get('mountpoint', None) - self.fstype = kwargs.get('fstype', 'ext4') - self.mount_options = kwargs.get('mount_options', 'defaults') - self.fs_uuid = kwargs.get('fs_uuid', None) - self.fs_label = kwargs.get('fs_label', None) + + self.design = {} + self.design['device'] = kwargs.get('device', None) + self.design['part_uuid'] = kwargs.get('part_uuid', None) + self.design['size'] = kwargs.get('size', None) + self.design['mountpoint'] = kwargs.get('mountpoint', None) + self.design['fstype'] = kwargs.get('fstype', 'ext4') + self.design['mount_options'] = kwargs.get('mount_options', 'defaults') + self.design['fs_uuid'] = kwargs.get('fs_uuid', None) + self.design['fs_label'] = kwargs.get('fs_label', None) + + self.applied = kwargs.get('applied', None) + self.build = kwargs.get('build', None) else: raise ValueError('Unknown API version of object') + # Ensure applied_data exists + def ensure_applied_data(self): + if getattr(self, 'applied', None) is None: + self.applied = deepcopy(self.design) + + return + + def get_applied_device(self): + self.ensure_applied_data() + + return self.applied.get('device', '') + + def get_name(self): + return self.name + # The device attribute may be hardware alias that translates to a # physical device address. If the device attribute does not match an # alias, we assume it directly identifies a OS device name. When the @@ -315,15 +399,18 @@ class HostPartition(object): # device, the selector will be decided and applied def set_selector(self, sel_type, address='', dev_type=''): + self.ensure_applied_data() + selector = {} selector['type'] = sel_type selector['address'] = address selector['device_type'] = dev_type - self.selector = selector + self.applied['selector'] = selector def get_selector(self): - return self.selector + self.ensure_applied_data() + return self.applied.get('selector', None) """ Merge two lists of HostPartition models with child_list taking @@ -334,45 +421,56 @@ class HostPartition(object): @staticmethod def merge_lists(child_list, parent_list): - if len(child_list) == 0: - return deepcopy(parent_list) - effective_list = [] - if len(parent_list) == 0: + + if len(child_list) == 0 and len(parent_list) > 0: + for p in parent_list: + pp = deepcopy(p) + pp.ensure_applied_data() + effective_list.append(pp) + elif len(parent_list) == 0 and len(child_list) > 0: for i in child_list: - if i.name.startswith('!'): + if i.get_name().startswith('!'): continue else: - effective_list.append(deepcopy(i)) + ii = deepcopy(i) + ii.ensure_applied_data() + effective_list.append(ii) + elif len(parent_list) > 0 and len(child_list) > 0: + inherit_field_list = ["device", "part_uuid", "size", + "mountpoint", "fstype", "mount_options", + "fs_uuid", "fs_label"] + parent_partitions = [] + for i in parent_list: + parent_name = i.get_name() + parent_partitions.append(parent_name) + add = True + for j in child_list: + if j.get_name() == ("!" + parent_name): + add = False + break + elif j.get_name() == parent_name: + p = HostPartition(j.api_version) + p.name = j.get_name() - inherit_field_list = ["device", "part_uuid", "size", - "mountpoint", "fstype", "mount_options", - "fs_uuid", "fs_label"] - - parent_partitions = [] - for i in parent_list: - parent_name = i.name - parent_partitions.append(parent_name) - add = True - for j in child_list: - if j.name == ("!" + parent_name): - add = False - break - elif j.name == parent_name: - p = HostPartition(j.api_version) - p.name = j.name - - for f in inherit_field_list: - setattr(p, Utils.apply_field_inheritance(getattr(j, f), - getattr(i, f)) - ) - add = False - effective_list.append(p) + for f in inherit_field_list: + j_f = j.design.get(f, None) + i_f = i.applied.get(f, None) + p.design.set(p, + Utils.apply_field_inheritance(j_f, i_f)) + add = False + p.ensure_applied_data() + effective_list.append(p) if add: - effective_list.append(deepcopy(i)) + ii = deepcopy(i) + ii.ensure_applied_data() + effective_list.append(ii) for j in child_list: - if j.name not in parent_list: - effective_list.append(deepcopy(j)) + if (j.get_name() not in parent_list and + not j.get_name().startswith("!")): + jj = deepcopy(j) + jj.ensure_applied_data + effective_list.append(jj) return effective_list diff --git a/helm_drydock/model/node.py b/helm_drydock/model/node.py index 22d41b2e..c6d4c6a1 100644 --- a/helm_drydock/model/node.py +++ b/helm_drydock/model/node.py @@ -58,38 +58,50 @@ class BaremetalNode(HostProfile): self.log.error("Invalid address assignment %s on Node %s" % (address, self.name)) - self.build = kwargs.get('build', {}) + self.applied = kwargs.get('applied_data', None) + self.build = kwargs.get('build', None) - def start_build(self): - if self.build.get('status','') == '': + # Compile the applied version of this model sourcing referenced + # data from the passed site design + def compile_applied_model(self, site): + self.apply_host_profile(site) + self.apply_hardware_profile(site) + self.apply_network_connections(site) + return + + def init_build(self): + if self.build is None: + self.build = {} self.build['status'] = NodeStatus.Unknown def apply_host_profile(self, site): - return self.apply_inheritance(site) + self.apply_inheritance(site) + return # Translate device alises to physical selectors and copy # other hardware attributes into this object def apply_hardware_profile(self, site): - self_copy = deepcopy(self) - - if self.hardware_profile is None: + if self.applied['hardware_profile'] is None: raise ValueError("Hardware profile not set") - hw_profile = site.get_hardware_profile(self.hardware_profile) + hw_profile = site.get_hardware_profile( + self.applied['hardware_profile']) - for i in self_copy.interfaces: - for s in i.hardware_slaves: + for i in self.applied.get('interfaces', []): + for s in i.get_applied_hw_slaves(): selector = hw_profile.resolve_alias("pci", s) if selector is None: - i.add_selector("name", address=p.device) + i.add_selector("name", address=s) else: i.add_selector("address", address=selector['address'], dev_type=selector['device_type']) - for p in self_copy.partitions: - selector = hw_profile.resolve_alias("scsi", p.device) + for p in self.applied.get('partitions', []): + selector = hw_profile.resolve_alias("scsi", + p.get_applied_device()) if selector is None: - p.set_selector("name", address=p.device) + p.set_selector("name", + address=p.get_applied_device()) else: p.set_selector("address", address=selector['address'], dev_type=selector['device_type']) @@ -106,44 +118,50 @@ class BaremetalNode(HostProfile): "pxe_interface": getattr(hw_profile, 'pxe_interface', None) } - self_copy.hardware = hardware + self.applied['hardware'] = hardware - return self_copy + return def apply_network_connections(self, site): - self_copy = deepcopy(self) - for n in site.network_links: - for i in self_copy.interfaces: + for i in self.applied.get('interfaces', []): i.apply_link_config(n) for n in site.networks: - for i in self_copy.interfaces: + for i in self.applied.get('interfaces', []): i.apply_network_config(n) - for a in self_copy.addressing: - for i in self_copy.interfaces: + for a in self.applied.get('addressing', []): + for i in self.applied.get('interfaces', []): i.set_network_address(a.get('network'), a.get('address')) - return self_copy + return + + def get_applied_interface(self, iface_name): + if getattr(self, 'applied', None) is not None: + for i in self.applied.get('interfaces', []): + if i.get_name() == iface_name: + return i - def get_interface(self, iface_name): - for i in self.interfaces: - if i.device_name == iface_name: - return i return None def get_status(self): - return self.build['status'] + self.init_build() + return self.build.get('status', NodeStatus.Unknown) def set_status(self, status): if isinstance(status, NodeStatus): + self.init_build() self.build['status'] = status def get_last_build_action(self): + if getattr(self, 'build', None) is None: + return None + return self.build.get('last_action', None) def set_last_build_action(self, action, result, detail=None): + self.init_build() last_action = self.build.get('last_action', None) if last_action is None: self.build['last_action'] = {} diff --git a/helm_drydock/orchestrator/designdata.py b/helm_drydock/orchestrator/designdata.py index bc2cfbba..b953e5b1 100644 --- a/helm_drydock/orchestrator/designdata.py +++ b/helm_drydock/orchestrator/designdata.py @@ -90,15 +90,10 @@ class DesignStateClient(object): site_copy = deepcopy(site_root) - effective_nodes = [] - for n in site_copy.baremetal_nodes: - resolved = n.apply_host_profile(site_copy) - resolved = resolved.apply_hardware_profile(site_copy) - resolved = resolved.apply_network_connections(site_copy) - effective_nodes.append(resolved) - - site_copy.baremetal_nodes = effective_nodes + n.apply_host_profile(site_copy) + n.apply_hardware_profile(site_copy) + n.apply_network_connections(site_copy) return site_copy """ diff --git a/tests/test_design_inheritance.py b/tests/test_design_inheritance.py index 0e13e8f1..b629b94c 100644 --- a/tests/test_design_inheritance.py +++ b/tests/test_design_inheritance.py @@ -37,23 +37,21 @@ class TestClass(object): assert len(design_data.baremetal_nodes) == 2 - print(yaml.dump(design_data, default_flow_style=False)) - design_data = client.compute_model_inheritance(design_data) node = design_data.get_baremetal_node("controller01") - - print(yaml.dump(node, default_flow_style=False)) - assert node.hardware_profile == 'HPGen9v3' + assert node.applied.get('hardware_profile') == 'HPGen9v3' - iface = node.get_interface('bond0') + iface = node.get_applied_interface('bond0') - assert iface.get_slave_count() == 2 + print(yaml.dump(iface, default_flow_style=False)) + + assert iface.get_applied_slave_count() == 2 - iface = node.get_interface('pxe') + iface = node.get_applied_interface('pxe') - assert iface.get_slave_count() == 1 + assert iface.get_applied_slave_count() == 1 @pytest.fixture(scope='module') def loaded_design(self, input_files): From 37daf1f95f09c0446a4cbce1b03a844ad20333f7 Mon Sep 17 00:00:00 2001 From: Scott Hussey Date: Tue, 21 Mar 2017 16:30:35 -0500 Subject: [PATCH 02/11] Add locking around DesignState change methods to prepare for multithreaded access Support design and build change merging instead of just replacing. Includes basic unit test. Add readme for model design --- helm_drydock/error.py | 3 + helm_drydock/model/hostprofile.py | 12 +- helm_drydock/model/hwprofile.py | 3 + helm_drydock/model/network.py | 5 + helm_drydock/model/readme.md | 37 +++++ helm_drydock/model/site.py | 3 + helm_drydock/orchestrator/designdata.py | 4 +- helm_drydock/statemgmt/__init__.py | 210 ++++++++++++++++++++---- tests/test_statemgmt.py | 62 +++++++ 9 files changed, 294 insertions(+), 45 deletions(-) create mode 100644 helm_drydock/model/readme.md create mode 100644 tests/test_statemgmt.py diff --git a/helm_drydock/error.py b/helm_drydock/error.py index 9319eeca..074e389a 100644 --- a/helm_drydock/error.py +++ b/helm_drydock/error.py @@ -14,3 +14,6 @@ class DesignError(Exception): pass + +class StateError(Exception): + pass \ No newline at end of file diff --git a/helm_drydock/model/hostprofile.py b/helm_drydock/model/hostprofile.py index 595272c3..9f054cb0 100644 --- a/helm_drydock/model/hostprofile.py +++ b/helm_drydock/model/hostprofile.py @@ -146,13 +146,9 @@ class HostProfile(object): self.applied['interfaces'] = HostInterface.merge_lists( self.design['interfaces'], parent.applied['interfaces']) - for i in self.applied.get('interfaces', []): - i.ensure_applied_data() self.applied['partitions'] = HostPartition.merge_lists( self.design['partitions'], parent.applied['partitions']) - for p in self.applied.get('partitions', []): - p. ensure_applied_data() return @@ -215,7 +211,7 @@ class HostInterface(object): def get_network_configs(self): self.ensure_applied_data() return self.applied.get('attached_networks', []) - + # The device attribute may be hardware alias that translates to a # physical device address. If the device attribute does not match an # alias, we assume it directly identifies a OS device name. When the @@ -293,11 +289,11 @@ class HostInterface(object): elif len(parent_list) > 0 and len(child_list) > 0: parent_interfaces = [] for i in parent_list: - parent_name = i.device_name + parent_name = i.get_name() parent_interfaces.append(parent_name) add = True for j in child_list: - if j.device_name == ("!" + parent_name): + if j.get_name() == ("!" + parent_name): add = False break elif j.device_name == parent_name: @@ -312,7 +308,6 @@ class HostInterface(object): in i.applied.get('hardware_slaves', []) if ("!" + x) not in j.design.get( 'hardware_slaves', [])] - s = list(s) s.extend( [x for x @@ -325,7 +320,6 @@ class HostInterface(object): in i.applied.get('networks',[]) if ("!" + x) not in j.design.get( 'networks', [])] - n = list(n) n.extend( [x for x diff --git a/helm_drydock/model/hwprofile.py b/helm_drydock/model/hwprofile.py index cb8d3779..193ab7ee 100644 --- a/helm_drydock/model/hwprofile.py +++ b/helm_drydock/model/hwprofile.py @@ -67,6 +67,9 @@ class HardwareProfile(object): return + def get_name(self): + return self.name + def resolve_alias(self, alias_type, alias): selector = {} for d in self.devices: diff --git a/helm_drydock/model/network.py b/helm_drydock/model/network.py index ef784114..e60ce1cf 100644 --- a/helm_drydock/model/network.py +++ b/helm_drydock/model/network.py @@ -58,6 +58,8 @@ class NetworkLink(object): (self.api_version, self.__class__)) raise ValueError('Unknown API version of object') + def get_name(self): + return self.name class Network(object): @@ -98,6 +100,9 @@ class Network(object): (self.api_version, self.__class__)) raise ValueError('Unknown API version of object') + def get_name(self): + return self.name + class NetworkAddressRange(object): diff --git a/helm_drydock/model/readme.md b/helm_drydock/model/readme.md new file mode 100644 index 00000000..61966ec8 --- /dev/null +++ b/helm_drydock/model/readme.md @@ -0,0 +1,37 @@ +# Drydock Model # + +Models for the drydock design parts and subparts + +## Features ## + +### Inheritance ### + +Drydock supports inheritance in the design data model. + +Currently this only supports BaremetalNode inheriting from HostProfile and +HostProfile inheriting from HostProfile. + +Inheritance rules: + +1. A child overrides a parent for part and subpart attributes +2. For attributes that are lists, the parent list and child list +are merged. +3. A child can remove a list member by prefixing the value with '!' +4. For lists of subparts (i.e. HostInterface and HostPartition) if +there is a member in the parent list and child list with the same name +(as defined by the get_name() method), the child member inherits from +the parent member. The '!' prefix applies here for deleting a member +based on the name. + +### Phased Data ### + +In other words, as a modeled object goes from design to apply +to build the model keeps the data separated to retain reference +values and provide context around particular attribute values. + +* Design - The data ingested from sources such as Formation +* Apply - Computing inheritance of design data to render an effective site design +* Build - Maintaining actions taken to implement the design and the results + +Currently only applies to BaremetalNodes as no other design parts +flow through the build process. \ No newline at end of file diff --git a/helm_drydock/model/site.py b/helm_drydock/model/site.py index 0bddb45a..cfb0c7a1 100644 --- a/helm_drydock/model/site.py +++ b/helm_drydock/model/site.py @@ -60,6 +60,9 @@ class Site(object): (self.api_version, self.__class__)) raise ValueError('Unknown API version of object') + def get_name(self): + return self.name + def start_build(self): if self.build.get('status', '') == '': self.build['status'] = SiteStatus.Unknown diff --git a/helm_drydock/orchestrator/designdata.py b/helm_drydock/orchestrator/designdata.py index b953e5b1..135d13f9 100644 --- a/helm_drydock/orchestrator/designdata.py +++ b/helm_drydock/orchestrator/designdata.py @@ -91,9 +91,7 @@ class DesignStateClient(object): site_copy = deepcopy(site_root) for n in site_copy.baremetal_nodes: - n.apply_host_profile(site_copy) - n.apply_hardware_profile(site_copy) - n.apply_network_connections(site_copy) + n.compile_applied_model(site_copy) return site_copy """ diff --git a/helm_drydock/statemgmt/__init__.py b/helm_drydock/statemgmt/__init__.py index b630cdfd..91dfa1f5 100644 --- a/helm_drydock/statemgmt/__init__.py +++ b/helm_drydock/statemgmt/__init__.py @@ -14,6 +14,7 @@ from copy import deepcopy from datetime import datetime from datetime import timezone +from threading import Lock import uuid @@ -23,16 +24,20 @@ import helm_drydock.model.network as network import helm_drydock.model.site as site import helm_drydock.model.hwprofile as hwprofile -from helm_drydock.error import DesignError +from helm_drydock.error import DesignError, StateError class DesignState(object): def __init__(self): self.design_base = None + self.design_base_lock = Lock() self.design_changes = [] + self.design_changes_lock = Lock() self.builds = [] + self.builds_lock = Lock() + return # TODO Need to lock a design base or change once implementation @@ -45,14 +50,27 @@ class DesignState(object): def post_design_base(self, site_design): if site_design is not None and isinstance(site_design, SiteDesign): - self.design_base = deepcopy(site_design) - return True + my_lock = self.design_base_lock.acquire(blocking=True, + timeout=10) + if my_lock: + self.design_base = deepcopy(site_design) + self.design_base_lock.release() + return True + raise StateError("Could not acquire lock") + else: + raise DesignError("Design change must be a SiteDesign instance") def put_design_base(self, site_design): - # TODO Support merging if site_design is not None and isinstance(site_design, SiteDesign): - self.design_base = deepcopy(site_design) - return True + my_lock = self.design_base_lock.acquire(blocking=True, + timeout=10) + if my_lock: + self.design_base.merge_updates(site_design) + self.design_base_lock.release() + return True + raise StateError("Could not acquire lock") + else: + raise DesignError("Design base must be a SiteDesign instance") def get_design_change(self, changeid): match = [x for x in self.design_changes if x.changeid == changeid] @@ -64,27 +82,35 @@ class DesignState(object): def post_design_change(self, site_design): if site_design is not None and isinstance(site_design, SiteDesign): - exists = [(x) for x - in self.design_changes - if x.changeid == site_design.changeid] - if len(exists) > 0: - raise DesignError("Existing change %s found" % - (site_design.changeid)) - self.design_changes.append(deepcopy(site_design)) - return True + my_lock = self.design_changes_lock.acquire(block=True, + timeout=10) + if my_lock: + exists = [(x) for x + in self.design_changes + if x.changeid == site_design.changeid] + if len(exists) > 0: + self.design_changs_lock.release() + raise DesignError("Existing change %s found" % + (site_design.changeid)) + + self.design_changes.append(deepcopy(site_design)) + self.design_changes_lock.release() + return True + raise StateError("Could not acquire lock") else: raise DesignError("Design change must be a SiteDesign instance") def put_design_change(self, site_design): - # TODO Support merging if site_design is not None and isinstance(site_design, SiteDesign): - design_copy = deepcopy(site_design) - self.design_changes = [design_copy - if x.changeid == design_copy.changeid - else x - for x - in self.design_changes] - return True + my_lock = self.design_changes_lock.acquire(block=True, + timeout=10) + if my_lock: + changeid = site_design.changeid + for c in self.design_changes: + if c.changeid == changeid: + c.merge_updates(site_design) + return True + raise StateError("Could not acquire lock") else: raise DesignError("Design change must be a SiteDesign instance") @@ -108,23 +134,48 @@ class DesignState(object): def post_build(self, site_build): if site_build is not None and isinstance(site_build, SiteBuild): - exists = [b for b in self.builds - if b.build_id == site_build.build_id] + my_lock = self.builds_lock.acquire(block=True, timeout=10) + if my_lock: + exists = [b for b in self.builds + if b.build_id == site_build.build_id] - if len(exists) > 0: - raise DesignError("Already a site build with ID %s" % - (str(site_build.build_id))) + if len(exists) > 0: + self.builds_lock.release() + raise DesignError("Already a site build with ID %s" % + (str(site_build.build_id))) + self.builds.append(deepcopy(site_build)) + self.builds_lock.release() + return True + raise StateError("Could not acquire lock") else: - self.builds.append(deepcopy(site_build)) - return True + raise DesignError("Design change must be a SiteDesign instance") + def put_build(self, site_build): + if site_build is not None and isinstance(site_build, SiteBuild): + my_lock = self.builds_lock.acquire(block=True, timeout=10) + if my_lock: + buildid = site_build.buildid + for b in self.builds: + if b.buildid == buildid: + b.merge_updates(site_build) + self.builds_lock.release() + return True + self.builds_lock.release() + return False + raise StateError("Could not acquire lock") + else: + raise DesignError("Design change must be a SiteDesign instance") class SiteDesign(object): - def __init__(self, ischange=False): + def __init__(self, ischange=False, changeid=None): if ischange: - self.changeid = uuid.uuid4() + if changeid is not None: + self.changeid = changeid + else: + self.changeid = uuid.uuid4() else: + # Base design self.changeid = 0 self.sites = [] @@ -140,6 +191,17 @@ class SiteDesign(object): self.sites.append(new_site) + def update_site(self, update): + if update is None or not isinstance(update, site.Site): + raise DesignError("Invalid Site model") + + for i, s in enumerate(self.sites): + if s.get_name() == update.get_name(): + self.sites[i] = deepcopy(update) + return True + + return False + def get_sites(self): return self.sites @@ -156,6 +218,17 @@ class SiteDesign(object): self.networks.append(new_network) + def update_network(self, update): + if update is None or not isinstance(update, network.Network): + raise DesignError("Invalid Network model") + + for i, n in enumerate(self.networks): + if n.get_name() == update.get_name(): + self.networks[i] = deepcopy(update) + return True + + return False + def get_networks(self): return self.networks @@ -174,6 +247,17 @@ class SiteDesign(object): self.network_links.append(new_network_link) + def update_network_link(self, update): + if update is None or not isinstance(update, network.NetworkLink): + raise DesignError("Invalid NetworkLink model") + + for i, n in enumerate(self.network_links): + if n.get_name() == update.get_name(): + self.network_links[i] = deepcopy(update) + return True + + return False + def get_network_links(self): return self.network_links @@ -192,6 +276,17 @@ class SiteDesign(object): self.host_profiles.append(new_host_profile) + def update_host_profile(self, update): + if update is None or not isinstance(update, hostprofile.HostProfile): + raise DesignError("Invalid HostProfile model") + + for i, h in enumerate(self.host_profiles): + if h.get_name() == update.get_name(): + self.host_profiles[i] = deepcopy(h) + return True + + return False + def get_host_profiles(self): return self.host_profiles @@ -210,6 +305,17 @@ class SiteDesign(object): self.hardware_profiles.append(new_hardware_profile) + def update_hardware_profile(self, update): + if update is None or not isinstance(update, hwprofile.HardwareProfile): + raise DesignError("Invalid HardwareProfile model") + + for i, h in enumerate(self.hardware_profiles): + if h.get_name() == update.get_name(): + self.hardware_profiles[i] = deepcopy(h) + return True + + return False + def get_hardware_profiles(self): return self.hardware_profiles @@ -228,6 +334,17 @@ class SiteDesign(object): self.baremetal_nodes.append(new_baremetal_node) + def update_baremetal_node(self, update): + if (update is None or not isinstance(update, node.BaremetalNode)): + raise DesignError("Invalid BaremetalNode model") + + for i, b in enumerate(self.baremetal_nodes): + if b.get_name() == update.get_name(): + self.baremetal_nodes[i] = deepcopy(b) + return True + + return False + def get_baremetal_nodes(self): return self.baremetal_nodes @@ -239,6 +356,33 @@ class SiteDesign(object): raise DesignError("BaremetalNode %s not found in design state" % node_name) + # Only merge the design parts included in the updated site + # design. Changes are merged at the part level, not for fields + # within a design part + # + # TODO convert update_* methods to use exceptions and convert to try block + def merge_updates(self, updates): + if updates is not None and isinstance(updates, SiteDesign): + if updates.changeid == self.changeid: + for u in updates.sites: + if not self.update_site(u): + self.add_site(u) + for u in updates.networks: + if not self.update_network(u): + self.add_network(u) + for u in updates.network_links: + if not self.update_network_link(u): + self.add_network_link(u) + for u in updates.host_profiles: + if not self.update_host_profile(u): + self.add_host_profile(u) + for u in updates.hardware_profiles: + if not self.update_hardware_profile(u): + self.add_hardware_profile(u) + for u in updates.baremetal_nodes: + if not self.update_baremetal_node(u): + self.add_baremetal_node(u) + class SiteBuild(SiteDesign): @@ -246,9 +390,9 @@ class SiteBuild(SiteDesign): super(SiteBuild, self).__init__() if build_id is None: - self.build_id = datetime.datetime.now(timezone.utc).timestamp() + self.buildid = datetime.datetime.now(timezone.utc).timestamp() else: - self.build_id = build_id + self.buildid = build_id def get_filtered_nodes(self, node_filter): effective_nodes = self.get_baremetal_nodes() diff --git a/tests/test_statemgmt.py b/tests/test_statemgmt.py new file mode 100644 index 00000000..4d9ea560 --- /dev/null +++ b/tests/test_statemgmt.py @@ -0,0 +1,62 @@ +# Copyright 2017 AT&T Intellectual Property. All other rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from helm_drydock.statemgmt import SiteDesign + +import helm_drydock.model.site as site +import helm_drydock.model.network as network + +import pytest +import shutil +import os +import helm_drydock.ingester.plugins.yaml + +class TestClass(object): + + def setup_method(self, method): + print("Running test {0}".format(method.__name__)) + + def test_sitedesign_merge(self): + design_data = SiteDesign() + + initial_site = site.Site(**{'apiVersion': 'v1.0', + 'metadata': { + 'name': 'testsite', + }, + }) + net_a = network.Network(**{ 'apiVersion': 'v1.0', + 'metadata': { + 'name': 'net_a', + 'region': 'testsite', + }, + 'spec': { + 'cidr': '172.16.0.0/24', + }}) + net_b = network.Network(**{ 'apiVersion': 'v1.0', + 'metadata': { + 'name': 'net_b', + 'region': 'testsite', + }, + 'spec': { + 'cidr': '172.16.0.1/24', + }}) + + design_data.add_site(initial_site) + design_data.add_network(net_a) + + design_update = SiteDesign() + design_update.add_network(net_b) + + design_data.merge_updates(design_update) + + assert len(design_data.get_networks()) == 2 \ No newline at end of file From 55860428584822018acd1cf18f87632159b655fe Mon Sep 17 00:00:00 2001 From: Scott Hussey Date: Wed, 22 Mar 2017 09:04:19 -0500 Subject: [PATCH 03/11] Update orchestration docs and test push with two-factor and SSH --- helm_drydock/orchestrator/readme.md | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/helm_drydock/orchestrator/readme.md b/helm_drydock/orchestrator/readme.md index fd11755f..fb6324ae 100644 --- a/helm_drydock/orchestrator/readme.md +++ b/helm_drydock/orchestrator/readme.md @@ -10,17 +10,18 @@ Orchestrator should persist the state of each task such that on failure the task can retried and only the steps needed will be executed. +## Drydock Tasks ## Bullet points listed below are not exhaustive and will change as we move through testing -## ValidateDesign ## +### ValidateDesign ### Load design data from the statemgmt persistent store and validate that the current state of design data represents a valid site design. No claim is made that the design data is compatible with the physical state of the site. -## VerifySite ## +### VerifySite ### Verify site-wide resources are in a useful state @@ -29,7 +30,7 @@ Verify site-wide resources are in a useful state * Promenade or other next-step services are up and available * Verify credentials are available -## PrepareSite ## +### PrepareSite ### Begin preparing site-wide resources for bootstrapping. This action will lock site design data for changes. @@ -37,7 +38,7 @@ action will lock site design data for changes. * Configure bootstrapper with site network configs * Shuffle images so they are correctly configured for bootstrapping -## VerifyNode ## +### VerifyNode ### Verification of per-node configurations within the context of the current node status @@ -54,7 +55,7 @@ of the current node status - Possibly network connectivity - Firmware versions -## PrepareNode ## +### PrepareNode ### Prepare a node for bootstrapping @@ -67,7 +68,7 @@ Prepare a node for bootstrapping * Configure node networking * Configure node storage -## DeployNode ## +### DeployNode ### Begin bootstrapping the node and monitor success @@ -78,6 +79,13 @@ success * Reboot node from local disk * Monitor platform bootstrapping -## DestroyNode ## +### DestroyNode ### -Destroy current node configuration and rebootstrap from scratch \ No newline at end of file +Destroy current node configuration and rebootstrap from scratch + +## Integration with Drivers ## + +Based on the requested task and the current known state of a node +the orchestrator will call the enabled downstream drivers with one +or more tasks. Each call will provide the driver with the desired +state (the applied model) and current known state (the build model). \ No newline at end of file From e5dc950f1fd64f18a8fae779ff35e9f1e3832ec6 Mon Sep 17 00:00:00 2001 From: Scott Hussey Date: Fri, 14 Apr 2017 10:52:24 -0500 Subject: [PATCH 04/11] WIP - Initial attempt at task-based orchestration - Refactored a few files to prevent import loops - Created basic Orchestrator and ProviderDriver scheme for task exection - Created task model more concurrent-friendly task mgmt --- helm_drydock/control/readme.md | 10 ++- helm_drydock/drivers/__init__.py | 111 +++++++++++++++++++++++- helm_drydock/{orchestrator => }/enum.py | 13 ++- helm_drydock/error.py | 3 + helm_drydock/model/hostprofile.py | 4 +- helm_drydock/model/hwprofile.py | 4 +- helm_drydock/model/network.py | 4 +- helm_drydock/model/node.py | 4 +- helm_drydock/model/site.py | 4 +- helm_drydock/model/task.py | 101 +++++++++++++++++++++ helm_drydock/orchestrator/__init__.py | 71 ++++++++------- helm_drydock/statemgmt/__init__.py | 44 ++++++++++ helm_drydock/statemgmt/readme.md | 6 ++ setup.py | 6 +- tests/test_orch_generic.py | 61 +++++++++++++ 15 files changed, 395 insertions(+), 51 deletions(-) rename helm_drydock/{orchestrator => }/enum.py (90%) create mode 100644 helm_drydock/model/task.py create mode 100644 tests/test_orch_generic.py diff --git a/helm_drydock/control/readme.md b/helm_drydock/control/readme.md index f3c602fb..2dad5b24 100644 --- a/helm_drydock/control/readme.md +++ b/helm_drydock/control/readme.md @@ -3,4 +3,12 @@ This is the external facing API service to control the rest of Drydock and query Drydock-managed data. -Anticipate basing this service on the falcon Python library \ No newline at end of file +Anticipate basing this service on the falcon Python library + +## Endpoints ## + +### /tasks ### + +POST - Create a new orchestration task and submit it for execution +GET - Get status of a task +DELETE - Cancel execution of a task if permitted diff --git a/helm_drydock/drivers/__init__.py b/helm_drydock/drivers/__init__.py index a0e27676..227914d7 100644 --- a/helm_drydock/drivers/__init__.py +++ b/helm_drydock/drivers/__init__.py @@ -11,11 +11,114 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from threading import Thread, Lock +import uuid +import time +import helm_drydock.statemgmt as statemgmt +import helm_drydock.enum as enum +import helm_drydock.model.task as tasks + +# This is the interface for the orchestrator to access a driver +# TODO Need to have each driver spin up a seperate thread to manage +# driver tasks and feed them via queue class ProviderDriver(object): - __init__(self): - pass - + def __init__(self, state_manager): + self.state_manager = state_manager -class DriverTask(object): \ No newline at end of file + def execute_task(self, task): + task_manager = DriverTaskManager(task, self.state_manager) + task_manager.start() + + while task_manager.is_alive(): + self.state_manager.put_task(task) + time.sleep(1) + + return + +# Execute a single task in a separate thread +class DriverTaskManager(Thread): + + def __init__(self, task, state_manager): + super(DriverTaskManager, self).__init__() + + if isinstance(task, DriverTask): + self.task = task + else: + raise DriverError("DriverTaskManager must be initialized" \ + "with a DriverTask instance") + + if isinstance(state_manager, statemgmt.DesignState): + self.state_manager = state_manager + else: + raise DriverError("Invalid state manager specified") + + return + + def run(): + self.task.set_manager(self.name) + + if self.task.action == enum.OrchestratorAction.Noop: + task.set_status(enum.TaskStatus.Running) + self.state_manager.put_task(task) + i = 0 + while i < 5: + i = i + 1 + if task.terminate: + task.set_status(enum.TaskStatus.Terminated) + self.state_manager.put_task(task) + return + else: + time.sleep(1) + task.set_status(enum.TaskStatus.Complete) + self.state_manager.put_task(task) + return + else: + raise DriverError("Unknown Task action") + + + +class DriverTask(tasks.Task): + # subclasses implemented by each driver should override this with the list + # of actions that driver supports + + supported_actions = [enum.OrchestratorAction.Noop] + + def __init__(self, target_design_id=None, + target_action=None, task_scope={}, **kwargs): + super(DriverTask, self).__init__(**kwargs) + + if target_design_id is None: + raise DriverError("target_design_id cannot be None") + + self.target_design_id = target_design_id + + if target_action in self.supported_actions: + self.target_action = target_action + else: + raise DriverError("DriverTask does not support action %s" + % (target_action)) + + self.task_scope = task_scope + + # The DriverTaskManager thread that is managing this task. We + # don't want a task to be submitted multiple times + + self.task_manager = None + + def set_manager(self, manager_name): + my_lock = self.get_lock() + if my_lock: + if self.task_manager is None: + self.task_manager = manager_name + else: + self.release_lock() + raise DriverError("Task %s already managed by %s" + % (self.taskid, self.task_manager)) + self.release_lock() + return True + raise DriverError("Could not acquire lock") + + def get_manager(self): + return self.task_manager \ No newline at end of file diff --git a/helm_drydock/orchestrator/enum.py b/helm_drydock/enum.py similarity index 90% rename from helm_drydock/orchestrator/enum.py rename to helm_drydock/enum.py index ffbf958a..308bb7c3 100644 --- a/helm_drydock/orchestrator/enum.py +++ b/helm_drydock/enum.py @@ -14,7 +14,7 @@ from enum import Enum, unique @unique -class Action(Enum): +class OrchestratorAction(Enum): Noop = 'noop' ValidateDesign = 'validate_design' VerifySite = 'verify_site' @@ -62,3 +62,14 @@ class NodeStatus(Enum): FailedBootstrap = 'failed_bootstrap' # Node bootstrapping failed Bootstrapped = 'bootstrapped' # Node fully bootstrapped Complete = 'complete' # Node is complete + +@unique +class TaskStatus(Enum): + Created = 'created' + Waiting = 'waiting' + Running = 'running' + Stopping = 'stopping' + Terminated = 'terminated' + Errored = 'errored' + Complete = 'complete' + Stopped = 'stopped' \ No newline at end of file diff --git a/helm_drydock/error.py b/helm_drydock/error.py index 074e389a..5f0b72e0 100644 --- a/helm_drydock/error.py +++ b/helm_drydock/error.py @@ -16,4 +16,7 @@ class DesignError(Exception): pass class StateError(Exception): + pass + +class OrchestratorError(Exception): pass \ No newline at end of file diff --git a/helm_drydock/model/hostprofile.py b/helm_drydock/model/hostprofile.py index 9f054cb0..68c0ea22 100644 --- a/helm_drydock/model/hostprofile.py +++ b/helm_drydock/model/hostprofile.py @@ -18,8 +18,8 @@ import logging from copy import deepcopy -from helm_drydock.orchestrator.enum import SiteStatus -from helm_drydock.orchestrator.enum import NodeStatus +from helm_drydock.enum import SiteStatus +from helm_drydock.enum import NodeStatus from helm_drydock.model.network import Network from helm_drydock.model.network import NetworkLink from helm_drydock.model import Utils diff --git a/helm_drydock/model/hwprofile.py b/helm_drydock/model/hwprofile.py index 193ab7ee..a37053aa 100644 --- a/helm_drydock/model/hwprofile.py +++ b/helm_drydock/model/hwprofile.py @@ -18,8 +18,8 @@ import logging from copy import deepcopy -from helm_drydock.orchestrator.enum import SiteStatus -from helm_drydock.orchestrator.enum import NodeStatus +from helm_drydock.enum import SiteStatus +from helm_drydock.enum import NodeStatus class HardwareProfile(object): diff --git a/helm_drydock/model/network.py b/helm_drydock/model/network.py index e60ce1cf..4369b002 100644 --- a/helm_drydock/model/network.py +++ b/helm_drydock/model/network.py @@ -18,8 +18,8 @@ import logging from copy import deepcopy -from helm_drydock.orchestrator.enum import SiteStatus -from helm_drydock.orchestrator.enum import NodeStatus +from helm_drydock.enum import SiteStatus +from helm_drydock.enum import NodeStatus class NetworkLink(object): diff --git a/helm_drydock/model/node.py b/helm_drydock/model/node.py index c6d4c6a1..f9091e6f 100644 --- a/helm_drydock/model/node.py +++ b/helm_drydock/model/node.py @@ -19,8 +19,8 @@ import logging from copy import deepcopy -from helm_drydock.orchestrator.enum import SiteStatus -from helm_drydock.orchestrator.enum import NodeStatus +from helm_drydock.enum import SiteStatus +from helm_drydock.enum import NodeStatus from helm_drydock.model.hostprofile import HostProfile from helm_drydock.model import Utils diff --git a/helm_drydock/model/site.py b/helm_drydock/model/site.py index cfb0c7a1..0b6105f3 100644 --- a/helm_drydock/model/site.py +++ b/helm_drydock/model/site.py @@ -18,8 +18,8 @@ import logging from copy import deepcopy -from helm_drydock.orchestrator.enum import SiteStatus -from helm_drydock.orchestrator.enum import NodeStatus +from helm_drydock.enum import SiteStatus +from helm_drydock.enum import NodeStatus class Site(object): diff --git a/helm_drydock/model/task.py b/helm_drydock/model/task.py new file mode 100644 index 00000000..da583fc3 --- /dev/null +++ b/helm_drydock/model/task.py @@ -0,0 +1,101 @@ +# Copyright 2017 AT&T Intellectual Property. All other rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import uuid + +from threading import Lock + +import helm_drydock.error as errors + +from helm_drydock.enum import TaskStatus, OrchestratorAction + +class Task(object): + + def __init__(self, **kwargs): + self.task_id = uuid.uuid4() + self.status = TaskStatus.Created + self.terminate = False + self.subtasks = [] + + parent_task = kwargs.get('parent_task','') + + # A lock to prevent concurrency race conditions + self.update_lock = Lock() + + def get_id(self): + return self.task_id + + # Mark this task and all subtasks as requested termination + # so that the task manager knows termination has been requested + def terminate_task(self): + locked = self.get_lock() + if locked: + # TODO Naively assume subtask termination will succeed for now + for t in self.subtasks: + t.terminate_task() + self.terminate = True + self.release_lock() + else: + raise errors.OrchestratorError("Could not get task update lock") + + def set_status(self, status): + locked = self.get_lock() + if locked: + self.status = status + self.release_lock() + else: + raise errors.OrchestratorError("Could not get task update lock") + + def get_status(self): + return self.status + + def get_lock(self): + locked = self.update_lock.acquire(blocking=True, timeout=10) + return locked + + def release_lock(self): + self.update_lock.release() + return + + def create_subtask(self, subtask_class, **kwargs): + if self.terminate: + raise errors.OrchestratorError("Cannot create subtask for parent" \ + " marked for termination") + locked = self.get_lock() + if locked: + subtask = subtask_class(parent_task=self.get_id(), **kwargs) + self.subtasks.append(subtask.get_id()) + self.release_lock() + return subtask + else: + raise errors.OrchestratorError("Could not get task update lock") + + +class OrchestratorTask(Task): + + def __init__(self, **kwargs): + super(OrchestratorTask, self).__init__(**kwargs) + + self.action = kwargs.get('action', OrchestratorAction.Noop) + + # Validate parameters based on action + self.site = kwargs.get('site', '') + + if self.site == '': + raise ValueError("Orchestration Task requires 'site' parameter") + + if self.action in [OrchestratorAction.VerifyNode, + OrchestratorAction.PrepareNode, + OrchestratorAction.DeployNode, + OrchestratorAction.DestroyNode]: + self.node_filter = kwargs.get('node_filter', None) \ No newline at end of file diff --git a/helm_drydock/orchestrator/__init__.py b/helm_drydock/orchestrator/__init__.py index 6ae6f0fb..d50499bf 100644 --- a/helm_drydock/orchestrator/__init__.py +++ b/helm_drydock/orchestrator/__init__.py @@ -11,23 +11,34 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import uuid +import time + from enum import Enum, unique -import uuid +import helm_drydock.drivers as drivers +import helm_drydock.model.task as tasks +import helm_drydock.error as errors + +from helm_drydock.enum import TaskStatus, OrchestratorAction class Orchestrator(object): # enabled_drivers is a map which provider drivers # should be enabled for use by this orchestrator - - def __init__(self, enabled_drivers=None, design_state=None): + def __init__(self, enabled_drivers=None, state_manager=None): self.enabled_drivers = {} - self.enabled_drivers['oob'] = enabled_drivers.get('oob', None) - self.enabled_drivers['server'] = enabled_drivers.get('server', None) - self.enabled_drivers['network'] = enabled_drivers.get('network', None) + if enabled_drivers is not None: + self.enabled_drivers['oob'] = enabled_drivers.get('oob', None) + self.enabled_drivers['server'] = enabled_drivers.get( + 'server', None) + self.enabled_drivers['network'] = enabled_drivers.get( + 'network', None) - self.design_state = design_state + self.state_manager = state_manager + + self.thread_objs = {} """ execute_task @@ -38,34 +49,28 @@ class Orchestrator(object): module. Based on those 3 inputs, we'll decide what is needed next. """ def execute_task(self, task): - if design_state is None: - raise Exception("Cannot execute task without initialized state manager") + if self.state_manager is None: + raise errors.OrchestratorError("Cannot execute task without" \ + " initialized state manager") + # Just for testing now, need to implement with enabled_drivers + # logic + if task.action == OrchestratorAction.Noop: + task.set_status(TaskStatus.Running) + self.state_manager.put_task(task) -class OrchestrationTask(object): - - def __init__(self, action, **kwargs): - self.taskid = uuid.uuid4() - - self.action = action - - parent_task = kwargs.get('parent_task','') - - # Validate parameters based on action - self.site = kwargs.get('site', '') - - - if self.site == '': - raise ValueError("Task requires 'site' parameter") - - if action in [Action.VerifyNode, Action.PrepareNode, - Action.DeployNode, Action.DestroyNode]: - self.node_filter = kwargs.get('node_filter', None) - - def child_task(self, action, **kwargs): - child_task = OrchestrationTask(action, parent_task=self.taskid, site=self.site, **kwargs) - return child_task - + driver_task = task.create_subtask(drivers.DriverTask, + target_design_id=0, + target_action=OrchestratorAction.Noop) + self.state_manager.post_task(driver_task) + driver = drivers.ProviderDriver(self.state_manager) + driver.execute_task(driver_task) + task.set_status(driver_task.get_status()) + self.state_manager.put_task(task) + return + else: + raise errors.OrchestratorError("Action %s not supported" + % (task.action)) \ No newline at end of file diff --git a/helm_drydock/statemgmt/__init__.py b/helm_drydock/statemgmt/__init__.py index 91dfa1f5..752042b8 100644 --- a/helm_drydock/statemgmt/__init__.py +++ b/helm_drydock/statemgmt/__init__.py @@ -23,6 +23,7 @@ import helm_drydock.model.hostprofile as hostprofile import helm_drydock.model.network as network import helm_drydock.model.site as site import helm_drydock.model.hwprofile as hwprofile +import helm_drydock.model.task as tasks from helm_drydock.error import DesignError, StateError @@ -38,6 +39,9 @@ class DesignState(object): self.builds = [] self.builds_lock = Lock() + self.tasks = [] + self.tasks_lock = Lock() + return # TODO Need to lock a design base or change once implementation @@ -166,6 +170,46 @@ class DesignState(object): else: raise DesignError("Design change must be a SiteDesign instance") + def get_task(self, task_id): + for t in self.tasks: + if t.get_id() == task_id: + return t + return None + + def post_task(self, task): + if task is not None and isinstance(task, tasks.Task): + my_lock = self.tasks_lock.acquire(blocking=True, timeout=10) + if my_lock: + task_id = task.get_id() + matching_tasks = [t for t in self.tasks + if t.get_id() == task_id] + if len(matching_tasks) > 0: + self.tasks_lock.release() + raise StateError("Task %s already created" % task_id) + + self.tasks.append(deepcopy(task)) + self.tasks_lock.release() + return True + else: + raise StateError("Could not acquire lock") + else: + raise StateError("Task is not the correct type") + + def put_task(self, task): + if task is not None and isinstance(task, tasks.Task): + my_lock = self.tasks_lock.acquire(blocking=True, timeout=10) + if my_lock: + task_id = task.get_id() + self.tasks = [t + if t.get_id() != task_id else deepcopy(task) + for t in self.tasks] + self.tasks_lock.release() + return True + else: + raise StateError("Could not acquire lock") + else: + raise StateError("Task is not the correct type") + class SiteDesign(object): def __init__(self, ischange=False, changeid=None): diff --git a/helm_drydock/statemgmt/readme.md b/helm_drydock/statemgmt/readme.md index 85e4e0d5..6477014a 100644 --- a/helm_drydock/statemgmt/readme.md +++ b/helm_drydock/statemgmt/readme.md @@ -20,6 +20,12 @@ Serialization of Drydock internal model as rendered to effective implementation /drydock/build /drydock/build/[datestamp] - A point-in-time view of what was deployed with deployment results +## Tasks ## + +Management of task state for the internal orchestrator + +/drydock/tasks + ## Node data ## Per-node data that can drive introspection as well as accept updates from nodes diff --git a/setup.py b/setup.py index 69a958b3..eea0d7ed 100644 --- a/setup.py +++ b/setup.py @@ -45,12 +45,14 @@ setup(name='helm_drydock', 'helm_drydock.ingester.plugins', 'helm_drydock.statemgmt', 'helm_drydock.orchestrator', - 'helm_drydock.control'], + 'helm_drydock.control', + 'helm_drydock.drivers', + 'helm_drydock.drivers.oob'], install_requires=[ 'PyYAML', 'oauth', 'requests-oauthlib', - 'pyghmi', + 'pyghmi>=1.0.18', 'netaddr', 'pecan', 'webob' diff --git a/tests/test_orch_generic.py b/tests/test_orch_generic.py new file mode 100644 index 00000000..25262a66 --- /dev/null +++ b/tests/test_orch_generic.py @@ -0,0 +1,61 @@ +# Copyright 2017 AT&T Intellectual Property. All other rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Generic testing for the orchestrator +# + +import helm_drydock.orchestrator as orch +import helm_drydock.enum as enum +import helm_drydock.statemgmt as statemgmt +import helm_drydock.model.task as task +import helm_drydock.drivers as drivers +import threading +import time + +class TestClass(object): + + def test_driver_threading(self): + state_mgr = statemgmt.DesignState() + orchestrator = orch.Orchestrator(state_manager=state_mgr) + orch_task = task.OrchestratorTask(action=enum.OrchestratorAction.Noop, + site="default") + + orchestrator.execute_task(orch_task) + + # Check that each subtask executed in a different thread than this one + for t in orch_task.subtasks: + if isinstance(t, drivers.DriverTask): + assert t.get_manager() != threading.current_thread().name + + def test_task_termination(self): + state_mgr = statemgmt.DesignState() + orchestrator = orch.Orchestrator(state_manager=state_mgr) + orch_task = task.OrchestratorTask(action=enum.OrchestratorAction.Noop, + site="default") + + orch_thread = threading.Thread(target=orchestrator.execute_task, + args=(orch_task,)) + orch_thread.start() + + time.sleep(1) + orch_task.terminate_task() + + while orch_thread.is_alive(): + time.sleep(1) + + assert orch_task.get_status() == enum.TaskStatus.Terminated + + for t in orch_task.subtasks: + assert t.get_status() == enum.TaskStatus.Terminated \ No newline at end of file From 93605c42faee3c74cddefdcd3049012351359d9d Mon Sep 17 00:00:00 2001 From: Scott Hussey Date: Mon, 17 Apr 2017 16:06:35 -0500 Subject: [PATCH 05/11] Basic orchestrator and driver task mgmt and threading works --- helm_drydock/drivers/__init__.py | 63 ++++++------------ helm_drydock/model/task.py | 48 +++----------- helm_drydock/orchestrator/__init__.py | 93 +++++++++++++++++++++++---- helm_drydock/statemgmt/__init__.py | 46 +++++++++++-- tests/test_orch_generic.py | 33 ++++++---- 5 files changed, 172 insertions(+), 111 deletions(-) diff --git a/helm_drydock/drivers/__init__.py b/helm_drydock/drivers/__init__.py index 227914d7..2449c64b 100644 --- a/helm_drydock/drivers/__init__.py +++ b/helm_drydock/drivers/__init__.py @@ -24,15 +24,16 @@ import helm_drydock.model.task as tasks # driver tasks and feed them via queue class ProviderDriver(object): - def __init__(self, state_manager): + def __init__(self, state_manager, orchestrator): + self.orchestrator = orchestrator self.state_manager = state_manager - def execute_task(self, task): - task_manager = DriverTaskManager(task, self.state_manager) + def execute_task(self, task_id): + task_manager = DriverTaskManager(task_id, self.state_manager, + self.orchestrator) task_manager.start() while task_manager.is_alive(): - self.state_manager.put_task(task) time.sleep(1) return @@ -40,39 +41,38 @@ class ProviderDriver(object): # Execute a single task in a separate thread class DriverTaskManager(Thread): - def __init__(self, task, state_manager): + def __init__(self, task_id, state_manager, orchestrator): super(DriverTaskManager, self).__init__() - if isinstance(task, DriverTask): - self.task = task - else: - raise DriverError("DriverTaskManager must be initialized" \ - "with a DriverTask instance") + self.orchestrator = orchestrator if isinstance(state_manager, statemgmt.DesignState): self.state_manager = state_manager else: raise DriverError("Invalid state manager specified") + self.task = self.state_manager.get_task(task_id) + return - def run(): - self.task.set_manager(self.name) + def run(self): + if self.task.target_action == enum.OrchestratorAction.Noop: + self.orchestrator.task_field_update(self.task.get_id(), + status=enum.TaskStatus.Running) - if self.task.action == enum.OrchestratorAction.Noop: - task.set_status(enum.TaskStatus.Running) - self.state_manager.put_task(task) i = 0 while i < 5: + self.task = self.state_manager.get_task(self.task.get_id()) i = i + 1 - if task.terminate: - task.set_status(enum.TaskStatus.Terminated) - self.state_manager.put_task(task) + if self.task.terminate: + self.orchestrator.task_field_update(self.task.get_id(), + status=enum.TaskStatus.Terminated) return else: time.sleep(1) - task.set_status(enum.TaskStatus.Complete) - self.state_manager.put_task(task) + + self.orchestrator.task_field_update(self.task.get_id(), + status=enum.TaskStatus.Complete) return else: raise DriverError("Unknown Task action") @@ -100,25 +100,4 @@ class DriverTask(tasks.Task): raise DriverError("DriverTask does not support action %s" % (target_action)) - self.task_scope = task_scope - - # The DriverTaskManager thread that is managing this task. We - # don't want a task to be submitted multiple times - - self.task_manager = None - - def set_manager(self, manager_name): - my_lock = self.get_lock() - if my_lock: - if self.task_manager is None: - self.task_manager = manager_name - else: - self.release_lock() - raise DriverError("Task %s already managed by %s" - % (self.taskid, self.task_manager)) - self.release_lock() - return True - raise DriverError("Could not acquire lock") - - def get_manager(self): - return self.task_manager \ No newline at end of file + self.task_scope = task_scope \ No newline at end of file diff --git a/helm_drydock/model/task.py b/helm_drydock/model/task.py index da583fc3..5ca699af 100644 --- a/helm_drydock/model/task.py +++ b/helm_drydock/model/task.py @@ -26,60 +26,30 @@ class Task(object): self.status = TaskStatus.Created self.terminate = False self.subtasks = [] + self.lock_id = None - parent_task = kwargs.get('parent_task','') - - # A lock to prevent concurrency race conditions - self.update_lock = Lock() + self.parent_task_id = kwargs.get('parent_task_id','') def get_id(self): return self.task_id - # Mark this task and all subtasks as requested termination - # so that the task manager knows termination has been requested def terminate_task(self): - locked = self.get_lock() - if locked: - # TODO Naively assume subtask termination will succeed for now - for t in self.subtasks: - t.terminate_task() - self.terminate = True - self.release_lock() - else: - raise errors.OrchestratorError("Could not get task update lock") + self.terminate = True def set_status(self, status): - locked = self.get_lock() - if locked: - self.status = status - self.release_lock() - else: - raise errors.OrchestratorError("Could not get task update lock") + self.status = status def get_status(self): return self.status - def get_lock(self): - locked = self.update_lock.acquire(blocking=True, timeout=10) - return locked - - def release_lock(self): - self.update_lock.release() - return - - def create_subtask(self, subtask_class, **kwargs): + def register_subtask(self, subtask_id): if self.terminate: - raise errors.OrchestratorError("Cannot create subtask for parent" \ + raise errors.OrchestratorError("Cannot add subtask for parent" \ " marked for termination") - locked = self.get_lock() - if locked: - subtask = subtask_class(parent_task=self.get_id(), **kwargs) - self.subtasks.append(subtask.get_id()) - self.release_lock() - return subtask - else: - raise errors.OrchestratorError("Could not get task update lock") + self.subtasks.append(subtask_id) + def get_subtasks(self): + return self.subtasks class OrchestratorTask(Task): diff --git a/helm_drydock/orchestrator/__init__.py b/helm_drydock/orchestrator/__init__.py index d50499bf..bf5b5578 100644 --- a/helm_drydock/orchestrator/__init__.py +++ b/helm_drydock/orchestrator/__init__.py @@ -13,6 +13,7 @@ # limitations under the License. import uuid import time +import threading from enum import Enum, unique @@ -38,7 +39,6 @@ class Orchestrator(object): self.state_manager = state_manager - self.thread_objs = {} """ execute_task @@ -48,29 +48,98 @@ class Orchestrator(object): the current designed state and current built state from the statemgmt module. Based on those 3 inputs, we'll decide what is needed next. """ - def execute_task(self, task): + def execute_task(self, task_id): if self.state_manager is None: raise errors.OrchestratorError("Cannot execute task without" \ " initialized state manager") + task = self.state_manager.get_task(task_id) + + if task is None: + raise errors.OrchestratorError("Task %s not found." + % (task_id)) # Just for testing now, need to implement with enabled_drivers # logic if task.action == OrchestratorAction.Noop: - task.set_status(TaskStatus.Running) - self.state_manager.put_task(task) + self.task_field_update(task_id, + status=TaskStatus.Running) - driver_task = task.create_subtask(drivers.DriverTask, + driver_task = self.create_task(drivers.DriverTask, target_design_id=0, - target_action=OrchestratorAction.Noop) - self.state_manager.post_task(driver_task) + target_action=OrchestratorAction.Noop, + parent_task_id=task.get_id()) - driver = drivers.ProviderDriver(self.state_manager) - driver.execute_task(driver_task) - task.set_status(driver_task.get_status()) - self.state_manager.put_task(task) + driver = drivers.ProviderDriver(self.state_manager, self) + driver.execute_task(driver_task.get_id()) + driver_task = self.state_manager.get_task(driver_task.get_id()) + + self.task_field_update(task_id, status=driver_task.get_status()) + return else: raise errors.OrchestratorError("Action %s not supported" - % (task.action)) \ No newline at end of file + % (task.action)) + + """ + terminate_task + + Mark a task for termination and optionally propagate the termination + recursively to all subtasks + """ + def terminate_task(self, task_id, propagate=True): + task = self.state_manager.get_task(task_id) + + if task is None: + raise errors.OrchestratorError("Could find task %s" % task_id) + else: + # Terminate initial task first to prevent add'l subtasks + + self.task_field_update(task_id, terminate=True) + + if propagate: + # Get subtasks list + subtasks = task.get_subtasks() + + for st in subtasks: + self.terminate_task(st, propagate=True) + else: + return True + + def create_task(self, task_class, **kwargs): + parent_task_id = kwargs.get('parent_task_id', None) + new_task = task_class(**kwargs) + self.state_manager.post_task(new_task) + + if parent_task_id is not None: + self.task_subtask_add(parent_task_id, new_task.get_id()) + + return new_task + + # Lock a task and make all field updates, then unlock it + def task_field_update(self, task_id, **kwargs): + lock_id = self.state_manager.lock_task(task_id) + if lock_id is not None: + task = self.state_manager.get_task(task_id) + + for k,v in kwargs.items(): + print("Setting task %s field %s to %s" % (task_id, k, v)) + setattr(task, k, v) + + self.state_manager.put_task(task, lock_id=lock_id) + self.state_manager.unlock_task(task_id, lock_id) + return True + else: + return False + + def task_subtask_add(self, task_id, subtask_id): + lock_id = self.state_manager.lock_task(task_id) + if lock_id is not None: + task = self.state_manager.get_task(task_id) + task.register_subtask(subtask_id) + self.state_manager.put_task(task, lock_id=lock_id) + self.state_manager.unlock_task(task_id, lock_id) + return True + else: + return False \ No newline at end of file diff --git a/helm_drydock/statemgmt/__init__.py b/helm_drydock/statemgmt/__init__.py index 752042b8..252ef561 100644 --- a/helm_drydock/statemgmt/__init__.py +++ b/helm_drydock/statemgmt/__init__.py @@ -173,7 +173,7 @@ class DesignState(object): def get_task(self, task_id): for t in self.tasks: if t.get_id() == task_id: - return t + return deepcopy(t) return None def post_task(self, task): @@ -195,14 +195,22 @@ class DesignState(object): else: raise StateError("Task is not the correct type") - def put_task(self, task): + def put_task(self, task, lock_id=None): if task is not None and isinstance(task, tasks.Task): my_lock = self.tasks_lock.acquire(blocking=True, timeout=10) if my_lock: task_id = task.get_id() - self.tasks = [t - if t.get_id() != task_id else deepcopy(task) - for t in self.tasks] + t = self.get_task(task_id) + if t.lock_id is not None and t.lock_id != lock_id: + self.tasks_lock.release() + raise StateError("Task locked for updates") + + task.lock_id = lock_id + self.tasks = [i + if i.get_id() != task_id + else deepcopy(task) + for i in self.tasks] + self.tasks_lock.release() return True else: @@ -210,6 +218,34 @@ class DesignState(object): else: raise StateError("Task is not the correct type") + def lock_task(self, task_id): + my_lock = self.tasks_lock.acquire(blocking=True, timeout=10) + if my_lock: + lock_id = uuid.uuid4() + for t in self.tasks: + if t.get_id() == task_id and t.lock_id is None: + t.lock_id = lock_id + self.tasks_lock.release() + return lock_id + self.tasks_lock.release() + return None + else: + raise StateError("Could not acquire lock") + + def unlock_task(self, task_id, lock_id): + my_lock = self.tasks_lock.acquire(blocking=True, timeout=10) + if my_lock: + for t in self.tasks: + if t.get_id() == task_id and t.lock_id == lock_id: + t.lock_id = None + self.tasks_lock.release() + return True + self.tasks_lock.release() + return False + else: + raise StateError("Could not acquire lock") + + class SiteDesign(object): def __init__(self, ischange=False, changeid=None): diff --git a/tests/test_orch_generic.py b/tests/test_orch_generic.py index 25262a66..faa6178f 100644 --- a/tests/test_orch_generic.py +++ b/tests/test_orch_generic.py @@ -26,36 +26,43 @@ import time class TestClass(object): - def test_driver_threading(self): + def test_task_complete(self): state_mgr = statemgmt.DesignState() orchestrator = orch.Orchestrator(state_manager=state_mgr) - orch_task = task.OrchestratorTask(action=enum.OrchestratorAction.Noop, - site="default") + orch_task = orchestrator.create_task(task.OrchestratorTask, + site='default', + action=enum.OrchestratorAction.Noop) - orchestrator.execute_task(orch_task) + orchestrator.execute_task(orch_task.get_id()) - # Check that each subtask executed in a different thread than this one - for t in orch_task.subtasks: - if isinstance(t, drivers.DriverTask): - assert t.get_manager() != threading.current_thread().name + orch_task = state_mgr.get_task(orch_task.get_id()) + + assert orch_task.get_status() == enum.TaskStatus.Complete + + for t_id in orch_task.subtasks: + t = state_mgr.get_task(t_id) + assert t.get_status() == enum.TaskStatus.Complete def test_task_termination(self): state_mgr = statemgmt.DesignState() orchestrator = orch.Orchestrator(state_manager=state_mgr) - orch_task = task.OrchestratorTask(action=enum.OrchestratorAction.Noop, - site="default") + orch_task = orchestrator.create_task(task.OrchestratorTask, + site='default', + action=enum.OrchestratorAction.Noop) orch_thread = threading.Thread(target=orchestrator.execute_task, - args=(orch_task,)) + args=(orch_task.get_id(),)) orch_thread.start() time.sleep(1) - orch_task.terminate_task() + orchestrator.terminate_task(orch_task.get_id()) while orch_thread.is_alive(): time.sleep(1) + orch_task = state_mgr.get_task(orch_task.get_id()) assert orch_task.get_status() == enum.TaskStatus.Terminated - for t in orch_task.subtasks: + for t_id in orch_task.subtasks: + t = state_mgr.get_task(t_id) assert t.get_status() == enum.TaskStatus.Terminated \ No newline at end of file From aeef4b13720df2d520246220838113a8652dad81 Mon Sep 17 00:00:00 2001 From: Scott Hussey Date: Fri, 21 Apr 2017 13:49:58 -0500 Subject: [PATCH 06/11] Finish initial orchestration for OOB actions using the pyghmi_driver. Orchestration is working, but pyghmi_driver is not Python 3 compatible. --- helm_drydock/drivers/__init__.py | 75 +++-- helm_drydock/drivers/oob/__init__.py | 33 +- .../drivers/oob/pyghmi_driver/__init__.py | 294 +++++++++++++++++ helm_drydock/enum.py | 10 + helm_drydock/error.py | 3 + helm_drydock/ingester/__init__.py | 1 + helm_drydock/model/hostprofile.py | 15 +- helm_drydock/model/node.py | 21 +- helm_drydock/model/task.py | 47 ++- helm_drydock/orchestrator/__init__.py | 295 ++++++++++++++++-- helm_drydock/orchestrator/designdata.py | 102 ------ setup.py | 3 +- testrequirements.txt | 2 + tests/test_design_inheritance.py | 9 +- tests/test_orch_oob.py | 99 ++++++ tests/yaml_samples/fullsite.yaml | 4 + tests/yaml_samples/oob.yaml | 226 ++++++++++++++ 17 files changed, 1047 insertions(+), 192 deletions(-) create mode 100644 helm_drydock/drivers/oob/pyghmi_driver/__init__.py delete mode 100644 helm_drydock/orchestrator/designdata.py create mode 100644 tests/test_orch_oob.py create mode 100644 tests/yaml_samples/oob.yaml diff --git a/helm_drydock/drivers/__init__.py b/helm_drydock/drivers/__init__.py index 2449c64b..b060fb03 100644 --- a/helm_drydock/drivers/__init__.py +++ b/helm_drydock/drivers/__init__.py @@ -18,31 +18,53 @@ import time import helm_drydock.statemgmt as statemgmt import helm_drydock.enum as enum import helm_drydock.model.task as tasks +import helm_drydock.error as errors # This is the interface for the orchestrator to access a driver # TODO Need to have each driver spin up a seperate thread to manage # driver tasks and feed them via queue class ProviderDriver(object): - def __init__(self, state_manager, orchestrator): + def __init__(self, orchestrator=None, state_manager=None, **kwargs): + if orchestrator is None: + raise ValueError("ProviderDriver requires valid orchestrator") + self.orchestrator = orchestrator + + if state_manager is None: + raise ValueError("ProviderDriver requires valid state manager") + self.state_manager = state_manager + + # These are the actions that this driver supports + self.supported_actions = [enum.OrchestratorAction.Noop] + + self.driver_name = "generic" + self.driver_key = "generic" + self.driver_desc = "Generic Provider Driver" def execute_task(self, task_id): - task_manager = DriverTaskManager(task_id, self.state_manager, - self.orchestrator) - task_manager.start() - - while task_manager.is_alive(): - time.sleep(1) + task = self.state_manager.get_task(task_id) + task_action = task.action - return + if task_action in self.supported_actions: + task_runner = DriverTaskRunner(task_id, self.state_manager, + self.orchestrator) + task_runner.start() + + while task_runner.is_alive(): + time.sleep(1) + + return + else: + raise errors.DriverError("Unsupported action %s for driver %s" % + (task_action, self.driver_desc)) # Execute a single task in a separate thread -class DriverTaskManager(Thread): +class DriverTaskRunner(Thread): - def __init__(self, task_id, state_manager, orchestrator): - super(DriverTaskManager, self).__init__() + def __init__(self, task_id, state_manager=None, orchestrator=None): + super(DriverTaskRunner, self).__init__() self.orchestrator = orchestrator @@ -56,7 +78,10 @@ class DriverTaskManager(Thread): return def run(self): - if self.task.target_action == enum.OrchestratorAction.Noop: + self.execute_task() + + def execute_task(self): + if self.task.action == enum.OrchestratorAction.Noop: self.orchestrator.task_field_update(self.task.get_id(), status=enum.TaskStatus.Running) @@ -74,30 +99,4 @@ class DriverTaskManager(Thread): self.orchestrator.task_field_update(self.task.get_id(), status=enum.TaskStatus.Complete) return - else: - raise DriverError("Unknown Task action") - - -class DriverTask(tasks.Task): - # subclasses implemented by each driver should override this with the list - # of actions that driver supports - - supported_actions = [enum.OrchestratorAction.Noop] - - def __init__(self, target_design_id=None, - target_action=None, task_scope={}, **kwargs): - super(DriverTask, self).__init__(**kwargs) - - if target_design_id is None: - raise DriverError("target_design_id cannot be None") - - self.target_design_id = target_design_id - - if target_action in self.supported_actions: - self.target_action = target_action - else: - raise DriverError("DriverTask does not support action %s" - % (target_action)) - - self.task_scope = task_scope \ No newline at end of file diff --git a/helm_drydock/drivers/oob/__init__.py b/helm_drydock/drivers/oob/__init__.py index 9dd08ff8..f6a78e42 100644 --- a/helm_drydock/drivers/oob/__init__.py +++ b/helm_drydock/drivers/oob/__init__.py @@ -19,24 +19,33 @@ # initiate_reboot # set_power_off # set_power_on +import helm_drydock.enum as enum +import helm_drydock.error as errors from helm_drydock.drivers import ProviderDriver class OobDriver(ProviderDriver): - def __init__(self): - pass + def __init__(self, **kwargs): + super(OobDriver, self).__init__(**kwargs) - def execute_action(self, action, **kwargs): - if action == + self.supported_actions = [enum.OobAction.ConfigNodePxe, + enum.OobAction.SetNodeBoot, + enum.OobAction.PowerOffNode, + enum.OobAction.PowerOnNode, + enum.OobAction.PowerCycleNode, + enum.OobAction.InterrogateNode] + self.driver_name = "oob_generic" + self.driver_key = "oob_generic" + self.driver_desc = "Generic OOB Driver" + def execute_task(self, task_id): + task = self.state_manager.get_task(task_id) + task_action = task.action -class OobAction(Enum): - ConfigNodePxe = 'config_node_pxe' - SetNodeBoot = 'set_node_boot' - PowerOffNode = 'power_off_node' - PowerOnNode = 'power_on_node' - PowerCycleNode = 'power_cycle_node' - InterrogateNode = 'interrogate_node' - + if task_action in self.supported_actions: + return + else: + raise DriverError("Unsupported action %s for driver %s" % + (task_action, self.driver_desc)) diff --git a/helm_drydock/drivers/oob/pyghmi_driver/__init__.py b/helm_drydock/drivers/oob/pyghmi_driver/__init__.py new file mode 100644 index 00000000..ffab675a --- /dev/null +++ b/helm_drydock/drivers/oob/pyghmi_driver/__init__.py @@ -0,0 +1,294 @@ +# Copyright 2017 AT&T Intellectual Property. All other rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import time + +from pyghmi.ipmi.command import Command + +import helm_drydock.error as errors +import helm_drydock.enum as enum +import helm_drydock.model.task as task_model + +import helm_drydock.drivers.oob as oob +import helm_drydock.drivers as drivers + + +class PyghmiDriver(oob.OobDriver): + + def __init__(self, **kwargs): + super(PyghmiDriver, self).__init__(**kwargs) + + self.driver_name = "pyghmi_driver" + self.driver_key = "pyghmi_driver" + self.driver_desc = "Pyghmi OOB Driver" + + def execute_task(self, task_id): + task = self.state_manager.get_task(task_id) + + if task is None: + raise errors.DriverError("Invalid task %s" % (task_id)) + + if task.action not in self.supported_actions: + raise errors.DriverError("Driver %s doesn't support task action %s" + % (self.driver_desc, task.action)) + + design_id = getattr(task, 'design_id', None) + + if design_id is None: + raise errors.DriverError("No design ID specified in task %s" % + (task_id)) + + + if task.site_name is None: + raise errors.DriverError("Not site specified for task %s." % + (task_id)) + + self.orchestrator.task_field_update(task.get_id(), + status=enum.TaskStatus.Running) + + site_design = self.orchestrator.get_effective_site(task.site_name, + change_id=design_id) + + target_nodes = [] + + if len(task.node_list) > 0: + target_nodes.extend([x + for x in site_design.baremetal_nodes + if x.get_name() in task.node_list]) + else: + target_nodes.extend(site_design.baremetal_nodes) + + incomplete_subtasks = [] + # For each target node, create a subtask and kick off a runner + for n in target_nodes: + subtask = self.orchestrator.create_task(task_model.DriverTask, + parent_task_id=task.get_id(), design_id=design_id, + action=task.action, + task_scope={'site': task.site_name, + 'node_names': [n.get_name()]}) + incomplete_subtasks.append(subtask.get_id()) + + runner = PyghmiTaskRunner(state_manager=self.state_manager, + orchestrator=self.orchestrator, + task_id=subtask.get_id(), node=n) + runner.start() + + # Wait for subtasks to complete + # TODO need some kind of timeout + i = 0 + while len(incomplete_subtasks) > 0: + for n in incomplete_subtasks: + t = self.state_manager.get_task(n) + if t.get_status() in [enum.TaskStatus.Terminated, + enum.TaskStatus.Complete, + enum.TaskStatus.Errored]: + incomplete_subtasks.remove(n) + time.sleep(2) + i = i+1 + if i == 5: + break + + task = self.state_manager.get_task(task.get_id()) + subtasks = map(self.state_manager.get_task, task.get_subtasks()) + + success_subtasks = [x + for x in subtasks + if x.get_result() == enum.ActionResult.Success] + nosuccess_subtasks = [x + for x in subtasks + if x.get_result() in [enum.ActionResult.PartialSuccess, + enum.ActionResult.Failure]] + + print("Task %s successful subtasks: %s" % + (task.get_id(), len(success_subtasks))) + print("Task %s unsuccessful subtasks: %s" % + (task.get_id(), len(nosuccess_subtasks))) + print("Task %s total subtasks: %s" % + (task.get_id(), len(task.get_subtasks()))) + + task_result = None + if len(success_subtasks) > 0 and len(nosuccess_subtasks) > 0: + task_result = enum.ActionResult.PartialSuccess + elif len(success_subtasks) == 0 and len(nosuccess_subtasks) > 0: + task_result = enum.ActionResult.Failure + elif len(success_subtasks) > 0 and len(nosuccess_subtasks) == 0: + task_result = enum.ActionResult.Success + else: + task_result = enum.ActionResult.Incomplete + + self.orchestrator.task_field_update(task.get_id(), + result=task_result, + status=enum.TaskStatus.Complete) + return + +class PyghmiTaskRunner(drivers.DriverTaskRunner): + + def __init__(self, node=None, **kwargs): + super(PyghmiTaskRunner, self).__init__(**kwargs) + + # We cheat here by providing the Node model instead + # of making the runner source it from statemgmt + if node is None: + raise errors.DriverError("Did not specify target node") + + self.node = node + + def execute_task(self): + task_action = self.task.action + + if len(self.task.node_list) != 1: + self.orchestrator.task_field_update(self.task.get_id(), + result=enum.ActionResult.Incomplete, + status=enum.TaskStatus.Errored) + raise errors.DriverError("Multiple names (%s) in task %s node_list" + % (len(self.task.node_list), self.task.get_id())) + + target_node_name = self.task.node_list[0] + + if self.node.get_name() != target_node_name: + self.orchestrator.task_field_update(self.task.get_id(), + result=enum.ActionResult.Incomplete, + status=enum.TaskStatus.Errored) + raise errors.DriverError("Runner node does not match " \ + "task node scope") + + + ipmi_network = self.node.applied.get('oob_network') + ipmi_address = self.node.get_network_address(ipmi_network) + + if ipmi_address is None: + self.orchestrator.task_field_update(self.task.get_id(), + result=enum.ActionResult.Incomplete, + status=enum.TaskStatus.Errored) + raise errors.DriverError("Node %s has no IPMI address" % + (target_node_name)) + + self.orchestrator.task_field_update(self.task.get_id(), + status=enum.TaskStatus.Running) + ipmi_account = self.node.applied.get('oob_account', '') + ipmi_credential = self.node.applied.get('oob_credential', '') + + ipmi_session = Command(bmc=ipmi_address, userid=ipmi_account, + password=ipmi_credential) + + if task_action == enum.OobAction.ConfigNodePxe: + self.orchestrator.task_field_update(self.task.get_id(), + result=enum.ActionResult.Failure, + status=enum.TaskStatus.Complete) + return + elif task_action == enum.OobAction.SetNodeBoot: + ipmi_session.set_bootdev('pxe') + + time.sleep(3) + + bootdev = ipmi_session.get_bootdev() + + if bootdev.get('bootdev', '') == 'network': + self.orchestrator.task_field_update(self.task.get_id(), + result=enum.ActionResult.Success, + status=enum.TaskStatus.Complete) + else: + self.orchestrator.task_field_update(self.task.get_id(), + result=enum.ActionResult.Failure, + status=enum.TaskStatus.Complete) + return + elif task_action == enum.OobAction.PowerOffNode: + ipmi_session.set_power('off') + + i = 18 + + while i > 0: + power_state = ipmi_session.get_power() + if power_state.get('powerstate', '') == 'off': + break + time.sleep(10) + i = i - 1 + + if power_state.get('powerstate', '') == 'off': + self.orchestrator.task_field_update(self.task.get_id(), + result=enum.ActionResult.Success, + status=enum.TaskStatus.Complete) + else: + self.orchestrator.task_field_update(self.task.get_id(), + result=enum.ActionResult.Failure, + status=enum.TaskStatus.Complete) + return + elif task_action == enum.OobAction.PowerOnNode: + ipmi_session.set_power('on') + + i = 18 + + while i > 0: + power_state = ipmi_session.get_power() + if power_state.get('powerstate', '') == 'on': + break + time.sleep(10) + i = i - 1 + + if power_state.get('powerstate', '') == 'on': + self.orchestrator.task_field_update(self.task.get_id(), + result=enum.ActionResult.Success, + status=enum.TaskStatus.Complete) + else: + self.orchestrator.task_field_update(self.task.get_id(), + result=enum.ActionResult.Failure, + status=enum.TaskStatus.Complete) + return + elif task_action == enum.OobAction.PowerCycleNode: + ipmi_session.set_power('off') + + # Wait for power state of off before booting back up + # We'll wait for up to 3 minutes to power off + i = 18 + + while i > 0: + power_state = ipmi_session.get_power() + if power_state.get('powerstate', '') == 'off': + break + time.sleep(10) + i = i - 1 + + if power_state.get('powerstate', '') == 'on': + self.orchestrator.task_field_update(self.task.get_id(), + result=enum.ActionResult.Failure, + status=enum.TaskStatus.Complete) + return + + ipmi_session.set_power('on') + + i = 18 + + while i > 0: + power_state = ipmi_session.get_power() + if power_state.get('powerstate', '') == 'on': + break + time.sleep(10) + i = i - 1 + + if power_state.get('powerstate', '') == 'on': + self.orchestrator.task_field_update(self.task.get_id(), + result=enum.ActionResult.Success, + status=enum.TaskStatus.Complete) + else: + self.orchestrator.task_field_update(self.task.get_id(), + result=enum.ActionResult.Failure, + status=enum.TaskStatus.Complete) + return + elif task_action == enum.OobAction.InterrogateNode: + mci_id = ipmi_session.get_mci() + + self.orchestrator.task_field_update(self.task.get_id(), + result=enum.ActionResult.Success, + status=enum.TaskStatus.Complete, + result_detail=mci_id) + return \ No newline at end of file diff --git a/helm_drydock/enum.py b/helm_drydock/enum.py index 308bb7c3..ec14d666 100644 --- a/helm_drydock/enum.py +++ b/helm_drydock/enum.py @@ -24,8 +24,18 @@ class OrchestratorAction(Enum): DeployNode = 'deploy_node' DestroyNode = 'destroy_node' +@unique +class OobAction(Enum): + ConfigNodePxe = 'config_node_pxe' + SetNodeBoot = 'set_node_boot' + PowerOffNode = 'power_off_node' + PowerOnNode = 'power_on_node' + PowerCycleNode = 'power_cycle_node' + InterrogateNode = 'interrogate_node' + @unique class ActionResult(Enum): + Incomplete = 'incomplete' Success = 'success' PartialSuccess = 'partial_success' Failure = 'failure' diff --git a/helm_drydock/error.py b/helm_drydock/error.py index 5f0b72e0..a1accb22 100644 --- a/helm_drydock/error.py +++ b/helm_drydock/error.py @@ -19,4 +19,7 @@ class StateError(Exception): pass class OrchestratorError(Exception): + pass + +class DriverError(Exception): pass \ No newline at end of file diff --git a/helm_drydock/ingester/__init__.py b/helm_drydock/ingester/__init__.py index 48553c2d..507b8aab 100644 --- a/helm_drydock/ingester/__init__.py +++ b/helm_drydock/ingester/__init__.py @@ -72,6 +72,7 @@ class Ingester(object): design_data = design_state.get_design_base() except DesignError: design_data = SiteDesign() + design_state.post_design_base(design_data) if plugin_name in self.registered_plugins: design_items = self.registered_plugins[plugin_name].ingest_data(**kwargs) diff --git a/helm_drydock/model/hostprofile.py b/helm_drydock/model/hostprofile.py index 68c0ea22..a3692743 100644 --- a/helm_drydock/model/hostprofile.py +++ b/helm_drydock/model/hostprofile.py @@ -94,15 +94,22 @@ class HostProfile(object): raise ValueError('Unknown API version of object') def get_rack(self): - return self.design['rack'] + if getattr(self, 'applied', None) is not None: + return self.applied.get('rack', None) + else: + return self.design.get('rack', None) def get_name(self): return self.name def has_tag(self, tag): - if tag in self.design['tags']: - return True - + if getattr(self, 'applied', None) is not None: + if tag in self.applied.get('tags', []): + return True + else: + if tag in self.design.get('tags', []): + return True + return False def apply_inheritance(self, site): diff --git a/helm_drydock/model/node.py b/helm_drydock/model/node.py index f9091e6f..fed005e6 100644 --- a/helm_drydock/model/node.py +++ b/helm_drydock/model/node.py @@ -33,7 +33,9 @@ class BaremetalNode(HostProfile): super(BaremetalNode, self).__init__(**kwargs) if self.api_version == "v1.0": - self.addressing = [] + addressing = [] + + self.design['addressing'] = addressing spec = kwargs.get('spec', {}) addresses = spec.get('addressing', []) @@ -48,12 +50,12 @@ class BaremetalNode(HostProfile): assignment['type'] = 'dhcp' assignment['address'] = None assignment['network'] = a.get('network') - self.addressing.append(assignment) + addressing.append(assignment) elif address != '': assignment['type'] = 'static' assignment['address'] = a.get('address') assignment['network'] = a.get('network') - self.addressing.append(assignment) + addressing.append(assignment) else: self.log.error("Invalid address assignment %s on Node %s" % (address, self.name)) @@ -65,6 +67,9 @@ class BaremetalNode(HostProfile): # data from the passed site design def compile_applied_model(self, site): self.apply_host_profile(site) + + self.applied['addressing'] = deepcopy(self.design['addressing']) + self.apply_hardware_profile(site) self.apply_network_connections(site) return @@ -170,3 +175,13 @@ class BaremetalNode(HostProfile): last_action['result'] = result if detail is not None: last_action['detail'] = detail + + def get_network_address(self, network_name): + if self.applied is None: + return None + + for a in self.applied.get('addressing', []): + if a.get('network', None) == network_name: + return a.get('address', None) + + return None diff --git a/helm_drydock/model/task.py b/helm_drydock/model/task.py index 5ca699af..ff6fb922 100644 --- a/helm_drydock/model/task.py +++ b/helm_drydock/model/task.py @@ -17,16 +17,19 @@ from threading import Lock import helm_drydock.error as errors -from helm_drydock.enum import TaskStatus, OrchestratorAction +import helm_drydock.enum as enum class Task(object): def __init__(self, **kwargs): self.task_id = uuid.uuid4() - self.status = TaskStatus.Created + self.status = enum.TaskStatus.Created self.terminate = False self.subtasks = [] self.lock_id = None + self.result = enum.ActionResult.Incomplete + self.result_detail = None + self.action = kwargs.get('action', enum.OrchestratorAction.Noop) self.parent_task_id = kwargs.get('parent_task_id','') @@ -42,6 +45,18 @@ class Task(object): def get_status(self): return self.status + def set_result(self, result): + self.result = result + + def get_result(self): + return self.result + + def set_result_detail(self, detail): + self.result_detail = detail + + def get_result_detail(self): + return self.result_detail + def register_subtask(self, subtask_id): if self.terminate: raise errors.OrchestratorError("Cannot add subtask for parent" \ @@ -56,16 +71,30 @@ class OrchestratorTask(Task): def __init__(self, **kwargs): super(OrchestratorTask, self).__init__(**kwargs) - self.action = kwargs.get('action', OrchestratorAction.Noop) - # Validate parameters based on action self.site = kwargs.get('site', '') if self.site == '': raise ValueError("Orchestration Task requires 'site' parameter") - if self.action in [OrchestratorAction.VerifyNode, - OrchestratorAction.PrepareNode, - OrchestratorAction.DeployNode, - OrchestratorAction.DestroyNode]: - self.node_filter = kwargs.get('node_filter', None) \ No newline at end of file + self.design_id = kwargs.get('design_id', 0) + + if self.action in [enum.OrchestratorAction.VerifyNode, + enum.OrchestratorAction.PrepareNode, + enum.OrchestratorAction.DeployNode, + enum.OrchestratorAction.DestroyNode]: + self.node_filter = kwargs.get('node_filter', None) + + +class DriverTask(Task): + # subclasses implemented by each driver should override this with the list + # of actions that driver supports + + def __init__(self, task_scope={}, **kwargs): + super(DriverTask, self).__init__(**kwargs) + + self.design_id = kwargs.get('design_id', 0) + + self.site_name = task_scope.get('site', None) + + self.node_list = task_scope.get('node_names', []) \ No newline at end of file diff --git a/helm_drydock/orchestrator/__init__.py b/helm_drydock/orchestrator/__init__.py index bf5b5578..cc01924f 100644 --- a/helm_drydock/orchestrator/__init__.py +++ b/helm_drydock/orchestrator/__init__.py @@ -14,14 +14,15 @@ import uuid import time import threading +import importlib from enum import Enum, unique +from copy import deepcopy import helm_drydock.drivers as drivers import helm_drydock.model.task as tasks import helm_drydock.error as errors - -from helm_drydock.enum import TaskStatus, OrchestratorAction +import helm_drydock.enum as enum class Orchestrator(object): @@ -30,15 +31,36 @@ class Orchestrator(object): def __init__(self, enabled_drivers=None, state_manager=None): self.enabled_drivers = {} - if enabled_drivers is not None: - self.enabled_drivers['oob'] = enabled_drivers.get('oob', None) - self.enabled_drivers['server'] = enabled_drivers.get( - 'server', None) - self.enabled_drivers['network'] = enabled_drivers.get( - 'network', None) - self.state_manager = state_manager + if enabled_drivers is not None: + oob_driver_name = enabled_drivers.get('oob', None) + if oob_driver_name is not None: + m, c = oob_driver_name.rsplit('.', 1) + oob_driver_class = \ + getattr(importlib.import_module(m), c, None) + if oob_driver_class is not None: + self.enabled_drivers['oob'] = oob_driver_class(state_manager=state_manager, + orchestrator=self) + + node_driver_name = enabled_drivers.get('node', None) + if node_driver_name is not None: + m, c = node_driver_name.rsplit('.', 1) + node_driver_class = \ + getattr(importlib.import_module(m), c, None) + if node_driver_class is not None: + self.enabled_drivers['node'] = node_driver_class(state_manager=state_manager, + orchestrator=self) + + network_driver_name = enabled_drivers.get('network', None) + if network_driver_name is not None: + m, c = network_driver_name.rsplit('.', 1) + network_driver_class = \ + getattr(importlib.import_module(m), c, None) + if network_driver_class is not None: + self.enabled_drivers['network'] = network_driver_class(state_manager=state_manager, + orchestrator=self) + """ execute_task @@ -58,25 +80,139 @@ class Orchestrator(object): if task is None: raise errors.OrchestratorError("Task %s not found." % (task_id)) + + design_id = task.design_id + task_site = task.site + # Just for testing now, need to implement with enabled_drivers # logic - if task.action == OrchestratorAction.Noop: + if task.action == enum.OrchestratorAction.Noop: self.task_field_update(task_id, - status=TaskStatus.Running) + status=enum.TaskStatus.Running) - driver_task = self.create_task(drivers.DriverTask, - target_design_id=0, - target_action=OrchestratorAction.Noop, + driver_task = self.create_task(tasks.DriverTask, + design_id=0, + action=enum.OrchestratorAction.Noop, parent_task_id=task.get_id()) - - - driver = drivers.ProviderDriver(self.state_manager, self) + driver = drivers.ProviderDriver(state_manager=self.state_manager, + orchestrator=self) driver.execute_task(driver_task.get_id()) driver_task = self.state_manager.get_task(driver_task.get_id()) self.task_field_update(task_id, status=driver_task.get_status()) + return + elif task.action == enum.OrchestratorAction.ValidateDesign: + self.task_field_update(task_id, + status=enum.TaskStatus.Running) + try: + site_design = self.get_effective_site(task_site, + change_id=design_id) + self.task_field_update(task_id, + result=enum.ActionResult.Success) + except: + self.task_field_update(task_id, + result=enum.ActionResult.Failure) + + self.task_field_update(task_id, status=enum.TaskStatus.Complete) + return + elif task.action == enum.OrchestratorAction.VerifyNode: + self.task_field_update(task_id, + status=enum.TaskStatus.Running) + + driver = self.enabled_drivers['oob'] + + if driver is None: + self.task_field_update(task_id, + status=enum.TaskStatus.Errored, + result=enum.ActionResult.Failure) + return + + site_design = self.get_effective_site(task_site, + change_id=design_id) + + node_filter = task.node_filter + + target_nodes = self.process_node_filter(node_filter, site_design) + + target_names = [x.get_name() for x in target_nodes] + + task_scope = {'site' : task_site, + 'node_names' : target_names} + + driver_task = self.create_task(tasks.DriverTask, + parent_task_id=task.get_id(), + design_id=design_id, + action=enum.OobAction.InterrogateNode, + task_scope=task_scope) + + driver.execute_task(driver_task.get_id()) + + driver_task = self.state_manager.get_task(driver_task.get_id()) + + self.task_field_update(task_id, + status=enum.TaskStatus.Complete, + result=driver_task.get_result()) + return + elif task.action == enum.OrchestratorAction.PrepareNode: + self.task_field_update(task_id, + status=enum.TaskStatus.Running) + + driver = self.enabled_drivers['oob'] + + if driver is None: + self.task_field_update(task_id, + status=enum.TaskStatus.Errored, + result=enum.ActionResult.Failure) + return + + site_design = self.get_effective_site(task_site, + change_id=design_id) + + node_filter = task.node_filter + + target_nodes = self.process_node_filter(node_filter, site_design) + + target_names = [x.get_name() for x in target_nodes] + + task_scope = {'site' : task_site, + 'node_names' : target_names} + + setboot_task = self.create_task(tasks.DriverTask, + parent_task_id=task.get_id(), + design_id=design_id, + action=enum.OobAction.SetNodeBoot, + task_scope=task_scope) + + driver.execute_task(setboot_task.get_id()) + + setboot_task = self.state_manager.get_task(setboot_task.get_id()) + + cycle_task = self.create_task(tasks.DriverTask, + parent_task_id=task.get_id(), + design_id=design_id, + action=enum.OobAction.PowerCycleNode, + task_scope=task_scope) + driver.execute_task(cycle_task.get_id()) + + cycle_task = self.state_manager.get_task(cycle_task.get_id()) + + if (setboot_task.get_result() == enum.ActionResult.Success and + cycle_task.get_result() == enum.ActionResult.Success): + self.task_field_update(task_id, + status=enum.TaskStatus.Complete, + result=enum.ActionResult.Success) + elif (setboot_task.get_result() == enum.ActionResult.Success or + cycle_task.get_result() == enum.ActionResult.Success): + self.task_field_update(task_id, + status=enum.TaskStatus.Complete, + result=enum.ActionResult.PartialSuccess) + else: + self.task_field_update(task_id, + status=enum.TaskStatus.Complete, + result=enum.ActionResult.Failure) + return else: raise errors.OrchestratorError("Action %s not supported" @@ -124,7 +260,6 @@ class Orchestrator(object): task = self.state_manager.get_task(task_id) for k,v in kwargs.items(): - print("Setting task %s field %s to %s" % (task_id, k, v)) setattr(task, k, v) self.state_manager.put_task(task, lock_id=lock_id) @@ -142,4 +277,126 @@ class Orchestrator(object): self.state_manager.unlock_task(task_id, lock_id) return True else: - return False \ No newline at end of file + return False + + """ + load_design_data - Pull all the defined models in statemgmt and assemble + them into a representation of the site. Does not compute inheritance. + Throws an exception if multiple Site models are found. + + param design_state - Instance of statemgmt.DesignState to load data from + + return a Site model populated with all components from the design state + """ + + def load_design_data(self, site_name, change_id=None): + design_data = None + + if change_id is None or change_id == 0: + try: + design_data = self.state_manager.get_design_base() + except DesignError(e): + raise e + else: + design_data = self.state_manager.get_design_change(change_id) + + site = design_data.get_site(site_name) + + networks = design_data.get_networks() + + for n in networks: + if n.site == site_name: + site.networks.append(n) + + network_links = design_data.get_network_links() + + for l in network_links: + if l.site == site_name: + site.network_links.append(l) + + host_profiles = design_data.get_host_profiles() + + for p in host_profiles: + if p.site == site_name: + site.host_profiles.append(p) + + hardware_profiles = design_data.get_hardware_profiles() + + for p in hardware_profiles: + if p.site == site_name: + site.hardware_profiles.append(p) + + baremetal_nodes = design_data.get_baremetal_nodes() + + for n in baremetal_nodes: + if n.site == site_name: + site.baremetal_nodes.append(n) + + return site + + def compute_model_inheritance(self, site_root): + + # For now the only thing that really incorporates inheritance is + # host profiles and baremetal nodes. So we'll just resolve it for + # the baremetal nodes which recursively resolves it for host profiles + # assigned to those nodes + + site_copy = deepcopy(site_root) + + for n in site_copy.baremetal_nodes: + n.compile_applied_model(site_copy) + + return site_copy + """ + compute_model_inheritance - given a fully populated Site model, + compute the effecitve design by applying inheritance and references + + return a Site model reflecting the effective design for the site + """ + + def get_described_site(self, site_name, change_id=None): + site_design = None + + if site_name is None: + raise errors.OrchestratorError("Cannot source design for site None") + + site_design = self.load_design_data(site_name, change_id=change_id) + + return site_design + + def get_effective_site(self, site_name, change_id=None): + site_design = self.get_described_site(site_name, change_id=change_id) + + site_model = self.compute_model_inheritance(site_design) + + return site_model + + def process_node_filter(self, node_filter, site_design): + target_nodes = site_design.baremetal_nodes + + if node_filter is None: + return target_nodes + + node_names = node_filter.get('node_names', []) + node_racks = node_filter.get('rack_names', []) + node_tags = node_filter.get('node_tags', []) + + if len(node_names) > 0: + target_nodes = [x + for x in target_nodes + if x.get_name() in node_names] + + if len(node_racks) > 0: + target_nodes = [x + for x in target_nodes + if x.get_rack() in node_racks] + + if len(node_tags) > 0: + target_nodes = [x + for x in target_nodes + for t in node_tags + if x.has_tag(t)] + + return target_nodes + + diff --git a/helm_drydock/orchestrator/designdata.py b/helm_drydock/orchestrator/designdata.py deleted file mode 100644 index 135d13f9..00000000 --- a/helm_drydock/orchestrator/designdata.py +++ /dev/null @@ -1,102 +0,0 @@ -# Copyright 2017 AT&T Intellectual Property. All other rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging - -from copy import deepcopy - -from helm_drydock.error import DesignError - -class DesignStateClient(object): - - def __init__(self): - self.log = logging.Logger('orchestrator') - - """ - load_design_data - Pull all the defined models in statemgmt and assemble - them into a representation of the site. Does not compute inheritance. - Throws an exception if multiple Site models are found. - - param design_state - Instance of statemgmt.DesignState to load data from - - return a Site model populated with all components from the design state - """ - - def load_design_data(self, site_name, design_state=None, change_id=None): - if design_state is None: - raise ValueError("Design state is None") - - design_data = None - - if change_id is None: - try: - design_data = design_state.get_design_base() - except DesignError(e): - raise e - else: - design_data = design_state.get_design_change(change_id) - - site = design_data.get_site(site_name) - - networks = design_data.get_networks() - - for n in networks: - if n.site == site_name: - site.networks.append(n) - - network_links = design_data.get_network_links() - - for l in network_links: - if l.site == site_name: - site.network_links.append(l) - - host_profiles = design_data.get_host_profiles() - - for p in host_profiles: - if p.site == site_name: - site.host_profiles.append(p) - - hardware_profiles = design_data.get_hardware_profiles() - - for p in hardware_profiles: - if p.site == site_name: - site.hardware_profiles.append(p) - - baremetal_nodes = design_data.get_baremetal_nodes() - - for n in baremetal_nodes: - if n.site == site_name: - site.baremetal_nodes.append(n) - - return site - - def compute_model_inheritance(self, site_root): - - # For now the only thing that really incorporates inheritance is - # host profiles and baremetal nodes. So we'll just resolve it for - # the baremetal nodes which recursively resolves it for host profiles - # assigned to those nodes - - site_copy = deepcopy(site_root) - - for n in site_copy.baremetal_nodes: - n.compile_applied_model(site_copy) - - return site_copy - """ - compute_model_inheritance - given a fully populated Site model, - compute the effecitve design by applying inheritance and references - - return a Site model reflecting the effective design for the site - """ diff --git a/setup.py b/setup.py index eea0d7ed..7e55ed61 100644 --- a/setup.py +++ b/setup.py @@ -47,7 +47,8 @@ setup(name='helm_drydock', 'helm_drydock.orchestrator', 'helm_drydock.control', 'helm_drydock.drivers', - 'helm_drydock.drivers.oob'], + 'helm_drydock.drivers.oob', + 'helm_drydock.drivers.oob.pyghmi_driver'], install_requires=[ 'PyYAML', 'oauth', diff --git a/testrequirements.txt b/testrequirements.txt index 642edbdd..89f9905f 100644 --- a/testrequirements.txt +++ b/testrequirements.txt @@ -1,2 +1,4 @@ +pytest-mock pytest +mock tox \ No newline at end of file diff --git a/tests/test_design_inheritance.py b/tests/test_design_inheritance.py index b629b94c..027e9bb8 100644 --- a/tests/test_design_inheritance.py +++ b/tests/test_design_inheritance.py @@ -14,7 +14,7 @@ from helm_drydock.ingester import Ingester from helm_drydock.statemgmt import DesignState, SiteDesign -from helm_drydock.orchestrator.designdata import DesignStateClient +from helm_drydock.orchestrator import Orchestrator from copy import deepcopy @@ -31,13 +31,14 @@ class TestClass(object): def test_design_inheritance(self, loaded_design): - client = DesignStateClient() + orchestrator = Orchestrator(state_manager=loaded_design, + enabled_drivers={'oob': 'helm_drydock.drivers.oob.pyghmi_driver.PyghmiDriver'}) - design_data = client.load_design_data("sitename", design_state=loaded_design) + design_data = orchestrator.load_design_data("sitename") assert len(design_data.baremetal_nodes) == 2 - design_data = client.compute_model_inheritance(design_data) + design_data = orchestrator.compute_model_inheritance(design_data) node = design_data.get_baremetal_node("controller01") diff --git a/tests/test_orch_oob.py b/tests/test_orch_oob.py new file mode 100644 index 00000000..2f6737c1 --- /dev/null +++ b/tests/test_orch_oob.py @@ -0,0 +1,99 @@ +# Copyright 2017 AT&T Intellectual Property. All other rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Generic testing for the orchestrator +# +import pytest +#from pytest_mock import mocker +#import mock + +import os +import shutil + +from helm_drydock.ingester import Ingester + +import helm_drydock.orchestrator as orch +import helm_drydock.enum as enum +import helm_drydock.statemgmt as statemgmt +import helm_drydock.model.task as task +import helm_drydock.drivers as drivers +import helm_drydock.ingester.plugins.yaml as yaml_ingester + +class TestClass(object): + + + # sthussey None of these work right until I figure out correct + # mocking of pyghmi + def test_oob_verify_all_node(self, loaded_design): + #mocker.patch('pyghmi.ipmi.private.session.Session') + #mocker.patch.object('pyghmi.ipmi.command.Command','get_asset_tag') + + orchestrator = orch.Orchestrator(state_manager=loaded_design, + enabled_drivers={'oob': 'helm_drydock.drivers.oob.pyghmi_driver.PyghmiDriver'}) + + orch_task = orchestrator.create_task(task.OrchestratorTask, + site='sitename', + action=enum.OrchestratorAction.VerifyNode) + + orchestrator.execute_task(orch_task.get_id()) + + orch_task = loaded_design.get_task(orch_task.get_id()) + + assert True + + """ + def test_oob_prepare_all_nodes(self, loaded_design): + #mocker.patch('pyghmi.ipmi.private.session.Session') + #mocker.patch.object('pyghmi.ipmi.command.Command','set_bootdev') + + orchestrator = orch.Orchestrator(state_manager=loaded_design, + enabled_drivers={'oob': 'helm_drydock.drivers.oob.pyghmi_driver.PyghmiDriver'}) + + orch_task = orchestrator.create_task(task.OrchestratorTask, + site='sitename', + action=enum.OrchestratorAction.PrepareNode) + + orchestrator.execute_task(orch_task.get_id()) + + #assert pyghmi.ipmi.command.Command.set_bootdev.call_count == 3 + #assert pyghmi.ipmi.command.Command.set_power.call_count == 6 + """ + + @pytest.fixture(scope='module') + def loaded_design(self, input_files): + input_file = input_files.join("oob.yaml") + + design_state = statemgmt.DesignState() + design_data = statemgmt.SiteDesign() + design_state.post_design_base(design_data) + + ingester = Ingester() + ingester.enable_plugins([yaml_ingester.YamlIngester]) + ingester.ingest_data(plugin_name='yaml', design_state=design_state, filenames=[str(input_file)]) + + return design_state + + @pytest.fixture(scope='module') + def input_files(self, tmpdir_factory, request): + tmpdir = tmpdir_factory.mktemp('data') + samples_dir = os.path.dirname(str(request.fspath)) + "/yaml_samples" + samples = os.listdir(samples_dir) + + for f in samples: + src_file = samples_dir + "/" + f + dst_file = str(tmpdir) + "/" + f + shutil.copyfile(src_file, dst_file) + + return tmpdir \ No newline at end of file diff --git a/tests/yaml_samples/fullsite.yaml b/tests/yaml_samples/fullsite.yaml index df2f39ba..5618372e 100644 --- a/tests/yaml_samples/fullsite.yaml +++ b/tests/yaml_samples/fullsite.yaml @@ -395,6 +395,8 @@ spec: address: 172.16.1.20 - network: public address: 172.16.3.20 + - network: oob + address: 172.16.100.20 metadata: roles: os_ctl rack: rack01 @@ -416,6 +418,8 @@ spec: address: 172.16.1.21 - network: private address: 172.16.2.21 + - network: oob + address: 172.16.100.21 --- apiVersion: 'v1.0' kind: HardwareProfile diff --git a/tests/yaml_samples/oob.yaml b/tests/yaml_samples/oob.yaml new file mode 100644 index 00000000..5c463c55 --- /dev/null +++ b/tests/yaml_samples/oob.yaml @@ -0,0 +1,226 @@ +# Copyright 2017 AT&T Intellectual Property. All other rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#################### +# +# bootstrap_seed.yaml - Site server design definition for physical layer +# +#################### +# version the schema in this file so consumers can rationally parse it +--- +apiVersion: 'v1.0' +kind: Region +metadata: + name: sitename + date: 17-FEB-2017 + description: Sample site design + author: sh8121@att.com +# Not sure if we have site wide data that doesn't fall into another 'Kind' +--- +apiVersion: 'v1.0' +kind: NetworkLink +metadata: + name: oob + region: sitename + date: 17-FEB-2017 + author: sh8121@att.com + description: Describe layer 1 attributes. Primary key is 'name'. These settings will generally be things the switch and server have to agree on +spec: + bonding: + mode: none + mtu: 1500 + linkspeed: 100full + trunking: + mode: none + default_network: oob +--- +apiVersion: 'v1.0' +kind: Network +metadata: + name: oob + region: sitename + date: 17-FEB-2017 + author: sh8121@att.com + description: Describe layer 2/3 attributes. Primarily CIs used for configuring server interfaces +spec: + allocation: static + cidr: 172.16.100.0/24 + ranges: + - type: static + start: 172.16.100.15 + end: 172.16.100.254 + dns: + domain: ilo.sitename.att.com + servers: 172.16.100.10 +--- +apiVersion: 'v1.0' +kind: HostProfile +metadata: + name: defaults + region: sitename + date: 17-FEB-2017 + author: sh8121@att.com + description: Describe layer 2/3 attributes. Primarily CIs used for configuring server interfaces + # No magic to this host_profile, it just provides a way to specify + # sitewide settings. If it is absent from a node's inheritance chain + # then these values will NOT be applied +spec: + # OOB (iLO, iDRAC, etc...) settings. Should prefer open standards such + # as IPMI over vender-specific when possible. + oob: + type: ipmi + # OOB networking should be preconfigured, but we can include a network + # definition for validation or enhancement (DNS registration) + network: oob + account: admin + credential: admin + # Specify storage layout of base OS. Ceph out of scope + storage: + # How storage should be carved up: lvm (logical volumes), flat + # (single partition) + layout: lvm + # Info specific to the boot and root disk/partitions + bootdisk: + # Device will specify an alias defined in hwdefinition.yaml + device: primary_boot + # For LVM, the size of the partition added to VG as a PV + # For flat, the size of the partition formatted as ext4 + root_size: 50g + # The /boot partition. If not specified, /boot will in root + boot_size: 2g + # Info for additional partitions. Need to balance between + # flexibility and complexity + partitions: + - name: logs + device: primary_boot + # Partition uuid if needed + part_uuid: 84db9664-f45e-11e6-823d-080027ef795a + size: 10g + # Optional, can carve up unformatted block devices + mountpoint: /var/log + fstype: ext4 + mount_options: defaults + # Filesystem UUID or label can be specified. UUID recommended + fs_uuid: cdb74f1c-9e50-4e51-be1d-068b0e9ff69e + fs_label: logs + # Platform (Operating System) settings + platform: + image: ubuntu_16.04_hwe + kernel_params: default + # Additional metadata to apply to a node + metadata: + # Base URL of the introspection service - may go in curtin data + introspection_url: http://172.16.1.10:9090 +--- +apiVersion: 'v1.0' +kind: HostProfile +metadata: + name: k8-node + region: sitename + date: 17-FEB-2017 + author: sh8121@att.com + description: Describe layer 2/3 attributes. Primarily CIs used for configuring server interfaces +spec: + # host_profile inheritance allows for deduplication of common CIs + # Inheritance is additive for CIs that are lists of multiple items + # To remove an inherited list member, prefix the primary key value + # with '!'. + host_profile: defaults + # Hardware profile will map hardware specific details to the abstract + # names uses in the host profile as well as specify hardware specific + # configs. A viable model should be to build a host profile without a + # hardware_profile and then for each node inherit the host profile and + # specify a hardware_profile to map that node's hardware to the abstract + # settings of the host_profile + hardware_profile: HPGen9v3 + metadata: + # Explicit tag assignment + tags: + - 'test' + # MaaS supports key/value pairs. Not sure of the use yet + owner_data: + foo: bar +--- +apiVersion: 'v1.0' +kind: BaremetalNode +metadata: + name: node01 + region: sitename + date: 17-FEB-2017 + author: sh8121@att.com + description: Describe layer 2/3 attributes. Primarily CIs used for configuring server interfaces +spec: + host_profile: k8-node + addressing: + - network: oob + address: 172.16.100.20 + metadata: + rack: rack01 + tags: + - 'odd' +--- +apiVersion: 'v1.0' +kind: BaremetalNode +metadata: + name: node02 + region: sitename + date: 17-FEB-2017 + author: sh8121@att.com + description: Describe layer 2/3 attributes. Primarily CIs used for configuring server interfaces +spec: + host_profile: k8-node + addressing: + - network: oob + address: 172.16.100.21 + metadata: + rack: rack01 + tags: + - 'even' +--- +apiVersion: 'v1.0' +kind: HardwareProfile +metadata: + name: HPGen9v3 + region: sitename + date: 17-FEB-2017 + author: Scott Hussey +spec: + # Vendor of the server chassis + vendor: HP + # Generation of the chassis model + generation: '8' + # Version of the chassis model within its generation - not version of the hardware definition + hw_version: '3' + # The certified version of the chassis BIOS + bios_version: '2.2.3' + # Mode of the default boot of hardware - bios, uefi + boot_mode: bios + # Protocol of boot of the hardware - pxe, usb, hdd + bootstrap_protocol: pxe + # Which interface to use for network booting within the OOB manager, not OS device + pxe_interface: 0 + # Map hardware addresses to aliases/roles to allow a mix of hardware configs + # in a site to result in a consistent configuration + device_aliases: + pci: + - address: pci@0000:00:03.0 + alias: prim_nic01 + # type could identify expected hardware - used for hardware manifest validation + type: '82540EM Gigabit Ethernet Controller' + - address: pci@0000:00:04.0 + alias: prim_nic02 + type: '82540EM Gigabit Ethernet Controller' + scsi: + - address: scsi@2:0.0.0 + alias: primary_boot + type: 'VBOX HARDDISK' From 2f1968fa2565ab15b95d4b4cd4a9f8a5cd301c65 Mon Sep 17 00:00:00 2001 From: Scott Hussey Date: Tue, 2 May 2017 10:57:22 -0500 Subject: [PATCH 07/11] Convert the object model to use oslo.versionedobjects (OVO) - Initially this is a naive implementation and not fully utilizing the abilites of OVO - Updated test YAML files to match model - Did not convert Task objects as they will be refactored for Celery in the near future --- helm_drydock/drivers/__init__.py | 14 +- helm_drydock/drivers/oob/__init__.py | 14 +- .../drivers/oob/pyghmi_driver/__init__.py | 102 ++-- helm_drydock/ingester/__init__.py | 34 +- helm_drydock/ingester/plugins/yaml.py | 295 ++++++++++- helm_drydock/model/hostprofile.py | 477 ------------------ helm_drydock/model/hwprofile.py | 98 ---- helm_drydock/model/network.py | 138 ----- helm_drydock/model/node.py | 187 ------- helm_drydock/model/site.py | 125 ----- helm_drydock/{model => objects}/__init__.py | 53 +- helm_drydock/objects/base.py | 68 +++ helm_drydock/{enum.py => objects/fields.py} | 106 +++- helm_drydock/objects/hostprofile.py | 381 ++++++++++++++ helm_drydock/objects/hwprofile.py | 125 +++++ helm_drydock/objects/network.py | 109 ++++ helm_drydock/objects/node.py | 134 +++++ helm_drydock/{model => objects}/readme.md | 18 +- helm_drydock/objects/site.py | 244 +++++++++ helm_drydock/{model => objects}/task.py | 16 +- helm_drydock/orchestrator/__init__.py | 147 ++---- helm_drydock/statemgmt/__init__.py | 367 ++------------ setup.py | 7 +- testrequirements.txt | 3 +- tests/test_design_inheritance.py | 1 - tests/test_ingester.py | 32 +- tests/test_ingester_yaml.py | 5 +- tests/test_models.py | 110 ++-- tests/test_orch_generic.py | 25 +- tests/test_orch_oob.py | 20 +- tests/test_statemgmt.py | 56 +- tests/yaml_samples/fullsite.yaml | 37 +- tests/yaml_samples/fullsite_networks.yaml | 13 +- tests/yaml_samples/fullsite_nodes.yaml | 3 +- tests/yaml_samples/fullsite_profiles.yaml | 23 +- tests/yaml_samples/multidoc.yaml | 15 +- tests/yaml_samples/oob.yaml | 29 +- tests/yaml_samples/singledoc.yaml | 23 +- 38 files changed, 1828 insertions(+), 1826 deletions(-) delete mode 100644 helm_drydock/model/hostprofile.py delete mode 100644 helm_drydock/model/hwprofile.py delete mode 100644 helm_drydock/model/network.py delete mode 100644 helm_drydock/model/node.py delete mode 100644 helm_drydock/model/site.py rename helm_drydock/{model => objects}/__init__.py (68%) create mode 100644 helm_drydock/objects/base.py rename helm_drydock/{enum.py => objects/fields.py} (53%) create mode 100644 helm_drydock/objects/hostprofile.py create mode 100644 helm_drydock/objects/hwprofile.py create mode 100644 helm_drydock/objects/network.py create mode 100644 helm_drydock/objects/node.py rename helm_drydock/{model => objects}/readme.md (51%) create mode 100644 helm_drydock/objects/site.py rename helm_drydock/{model => objects}/task.py (82%) diff --git a/helm_drydock/drivers/__init__.py b/helm_drydock/drivers/__init__.py index b060fb03..2ced9f00 100644 --- a/helm_drydock/drivers/__init__.py +++ b/helm_drydock/drivers/__init__.py @@ -15,9 +15,9 @@ from threading import Thread, Lock import uuid import time +import helm_drydock.objects.fields as hd_fields import helm_drydock.statemgmt as statemgmt -import helm_drydock.enum as enum -import helm_drydock.model.task as tasks +import helm_drydock.objects.task as tasks import helm_drydock.error as errors # This is the interface for the orchestrator to access a driver @@ -37,7 +37,7 @@ class ProviderDriver(object): self.state_manager = state_manager # These are the actions that this driver supports - self.supported_actions = [enum.OrchestratorAction.Noop] + self.supported_actions = [hd_fields.OrchestratorAction.Noop] self.driver_name = "generic" self.driver_key = "generic" @@ -81,9 +81,9 @@ class DriverTaskRunner(Thread): self.execute_task() def execute_task(self): - if self.task.action == enum.OrchestratorAction.Noop: + if self.task.action == hd_fields.OrchestratorAction.Noop: self.orchestrator.task_field_update(self.task.get_id(), - status=enum.TaskStatus.Running) + status=hd_fields.TaskStatus.Running) i = 0 while i < 5: @@ -91,12 +91,12 @@ class DriverTaskRunner(Thread): i = i + 1 if self.task.terminate: self.orchestrator.task_field_update(self.task.get_id(), - status=enum.TaskStatus.Terminated) + status=hd_fields.TaskStatus.Terminated) return else: time.sleep(1) self.orchestrator.task_field_update(self.task.get_id(), - status=enum.TaskStatus.Complete) + status=hd_fields.TaskStatus.Complete) return diff --git a/helm_drydock/drivers/oob/__init__.py b/helm_drydock/drivers/oob/__init__.py index f6a78e42..50f353a5 100644 --- a/helm_drydock/drivers/oob/__init__.py +++ b/helm_drydock/drivers/oob/__init__.py @@ -19,7 +19,7 @@ # initiate_reboot # set_power_off # set_power_on -import helm_drydock.enum as enum +import helm_drydock.objects.fields as hd_fields import helm_drydock.error as errors from helm_drydock.drivers import ProviderDriver @@ -29,12 +29,12 @@ class OobDriver(ProviderDriver): def __init__(self, **kwargs): super(OobDriver, self).__init__(**kwargs) - self.supported_actions = [enum.OobAction.ConfigNodePxe, - enum.OobAction.SetNodeBoot, - enum.OobAction.PowerOffNode, - enum.OobAction.PowerOnNode, - enum.OobAction.PowerCycleNode, - enum.OobAction.InterrogateNode] + self.supported_actions = [hd_fields.OrchestratorAction.ConfigNodePxe, + hd_fields.OrchestratorAction.SetNodeBoot, + hd_fields.OrchestratorAction.PowerOffNode, + hd_fields.OrchestratorAction.PowerOnNode, + hd_fields.OrchestratorAction.PowerCycleNode, + hd_fields.OrchestratorAction.InterrogateNode] self.driver_name = "oob_generic" self.driver_key = "oob_generic" diff --git a/helm_drydock/drivers/oob/pyghmi_driver/__init__.py b/helm_drydock/drivers/oob/pyghmi_driver/__init__.py index ffab675a..33e4d3df 100644 --- a/helm_drydock/drivers/oob/pyghmi_driver/__init__.py +++ b/helm_drydock/drivers/oob/pyghmi_driver/__init__.py @@ -16,8 +16,9 @@ import time from pyghmi.ipmi.command import Command import helm_drydock.error as errors -import helm_drydock.enum as enum -import helm_drydock.model.task as task_model + +import helm_drydock.objects.fields as hd_fields +import helm_drydock.objects.task as task_model import helm_drydock.drivers.oob as oob import helm_drydock.drivers as drivers @@ -54,10 +55,9 @@ class PyghmiDriver(oob.OobDriver): (task_id)) self.orchestrator.task_field_update(task.get_id(), - status=enum.TaskStatus.Running) + status=hd_fields.TaskStatus.Running) - site_design = self.orchestrator.get_effective_site(task.site_name, - change_id=design_id) + site_design = self.orchestrator.get_effective_site(design_id, task.site_name) target_nodes = [] @@ -89,9 +89,9 @@ class PyghmiDriver(oob.OobDriver): while len(incomplete_subtasks) > 0: for n in incomplete_subtasks: t = self.state_manager.get_task(n) - if t.get_status() in [enum.TaskStatus.Terminated, - enum.TaskStatus.Complete, - enum.TaskStatus.Errored]: + if t.get_status() in [hd_fields.TaskStatus.Terminated, + hd_fields.TaskStatus.Complete, + hd_fields.TaskStatus.Errored]: incomplete_subtasks.remove(n) time.sleep(2) i = i+1 @@ -103,11 +103,11 @@ class PyghmiDriver(oob.OobDriver): success_subtasks = [x for x in subtasks - if x.get_result() == enum.ActionResult.Success] + if x.get_result() == hd_fields.ActionResult.Success] nosuccess_subtasks = [x for x in subtasks - if x.get_result() in [enum.ActionResult.PartialSuccess, - enum.ActionResult.Failure]] + if x.get_result() in [hd_fields.ActionResult.PartialSuccess, + hd_fields.ActionResult.Failure]] print("Task %s successful subtasks: %s" % (task.get_id(), len(success_subtasks))) @@ -118,17 +118,17 @@ class PyghmiDriver(oob.OobDriver): task_result = None if len(success_subtasks) > 0 and len(nosuccess_subtasks) > 0: - task_result = enum.ActionResult.PartialSuccess + task_result = hd_fields.ActionResult.PartialSuccess elif len(success_subtasks) == 0 and len(nosuccess_subtasks) > 0: - task_result = enum.ActionResult.Failure + task_result = hd_fields.ActionResult.Failure elif len(success_subtasks) > 0 and len(nosuccess_subtasks) == 0: - task_result = enum.ActionResult.Success + task_result = hd_fields.ActionResult.Success else: - task_result = enum.ActionResult.Incomplete + task_result = hd_fields.ActionResult.Incomplete self.orchestrator.task_field_update(task.get_id(), result=task_result, - status=enum.TaskStatus.Complete) + status=hd_fields.TaskStatus.Complete) return class PyghmiTaskRunner(drivers.DriverTaskRunner): @@ -148,8 +148,8 @@ class PyghmiTaskRunner(drivers.DriverTaskRunner): if len(self.task.node_list) != 1: self.orchestrator.task_field_update(self.task.get_id(), - result=enum.ActionResult.Incomplete, - status=enum.TaskStatus.Errored) + result=hd_fields.ActionResult.Incomplete, + status=hd_fields.TaskStatus.Errored) raise errors.DriverError("Multiple names (%s) in task %s node_list" % (len(self.task.node_list), self.task.get_id())) @@ -157,8 +157,8 @@ class PyghmiTaskRunner(drivers.DriverTaskRunner): if self.node.get_name() != target_node_name: self.orchestrator.task_field_update(self.task.get_id(), - result=enum.ActionResult.Incomplete, - status=enum.TaskStatus.Errored) + result=hd_fields.ActionResult.Incomplete, + status=hd_fields.TaskStatus.Errored) raise errors.DriverError("Runner node does not match " \ "task node scope") @@ -168,25 +168,25 @@ class PyghmiTaskRunner(drivers.DriverTaskRunner): if ipmi_address is None: self.orchestrator.task_field_update(self.task.get_id(), - result=enum.ActionResult.Incomplete, - status=enum.TaskStatus.Errored) + result=hd_fields.ActionResult.Incomplete, + status=hd_fields.TaskStatus.Errored) raise errors.DriverError("Node %s has no IPMI address" % (target_node_name)) self.orchestrator.task_field_update(self.task.get_id(), - status=enum.TaskStatus.Running) + status=hd_fields.TaskStatus.Running) ipmi_account = self.node.applied.get('oob_account', '') ipmi_credential = self.node.applied.get('oob_credential', '') ipmi_session = Command(bmc=ipmi_address, userid=ipmi_account, password=ipmi_credential) - if task_action == enum.OobAction.ConfigNodePxe: + if task_action == hd_fields.OrchestratorAction.ConfigNodePxe: self.orchestrator.task_field_update(self.task.get_id(), - result=enum.ActionResult.Failure, - status=enum.TaskStatus.Complete) + result=hd_fields.ActionResult.Failure, + status=hd_fields.TaskStatus.Complete) return - elif task_action == enum.OobAction.SetNodeBoot: + elif task_action == hd_fields.OrchestratorAction.SetNodeBoot: ipmi_session.set_bootdev('pxe') time.sleep(3) @@ -195,14 +195,14 @@ class PyghmiTaskRunner(drivers.DriverTaskRunner): if bootdev.get('bootdev', '') == 'network': self.orchestrator.task_field_update(self.task.get_id(), - result=enum.ActionResult.Success, - status=enum.TaskStatus.Complete) + result=hd_fields.ActionResult.Success, + status=hd_fields.TaskStatus.Complete) else: self.orchestrator.task_field_update(self.task.get_id(), - result=enum.ActionResult.Failure, - status=enum.TaskStatus.Complete) + result=hd_fields.ActionResult.Failure, + status=hd_fields.TaskStatus.Complete) return - elif task_action == enum.OobAction.PowerOffNode: + elif task_action == hd_fields.OrchestratorAction.PowerOffNode: ipmi_session.set_power('off') i = 18 @@ -216,14 +216,14 @@ class PyghmiTaskRunner(drivers.DriverTaskRunner): if power_state.get('powerstate', '') == 'off': self.orchestrator.task_field_update(self.task.get_id(), - result=enum.ActionResult.Success, - status=enum.TaskStatus.Complete) + result=hd_fields.ActionResult.Success, + status=hd_fields.TaskStatus.Complete) else: self.orchestrator.task_field_update(self.task.get_id(), - result=enum.ActionResult.Failure, - status=enum.TaskStatus.Complete) + result=hd_fields.ActionResult.Failure, + status=hd_fields.TaskStatus.Complete) return - elif task_action == enum.OobAction.PowerOnNode: + elif task_action == hd_fields.OrchestratorAction.PowerOnNode: ipmi_session.set_power('on') i = 18 @@ -237,14 +237,14 @@ class PyghmiTaskRunner(drivers.DriverTaskRunner): if power_state.get('powerstate', '') == 'on': self.orchestrator.task_field_update(self.task.get_id(), - result=enum.ActionResult.Success, - status=enum.TaskStatus.Complete) + result=hd_fields.ActionResult.Success, + status=hd_fields.TaskStatus.Complete) else: self.orchestrator.task_field_update(self.task.get_id(), - result=enum.ActionResult.Failure, - status=enum.TaskStatus.Complete) + result=hd_fields.ActionResult.Failure, + status=hd_fields.TaskStatus.Complete) return - elif task_action == enum.OobAction.PowerCycleNode: + elif task_action == hd_fields.OrchestratorAction.PowerCycleNode: ipmi_session.set_power('off') # Wait for power state of off before booting back up @@ -260,8 +260,8 @@ class PyghmiTaskRunner(drivers.DriverTaskRunner): if power_state.get('powerstate', '') == 'on': self.orchestrator.task_field_update(self.task.get_id(), - result=enum.ActionResult.Failure, - status=enum.TaskStatus.Complete) + result=hd_fields.ActionResult.Failure, + status=hd_fields.TaskStatus.Complete) return ipmi_session.set_power('on') @@ -277,18 +277,18 @@ class PyghmiTaskRunner(drivers.DriverTaskRunner): if power_state.get('powerstate', '') == 'on': self.orchestrator.task_field_update(self.task.get_id(), - result=enum.ActionResult.Success, - status=enum.TaskStatus.Complete) + result=hd_fields.ActionResult.Success, + status=hd_fields.TaskStatus.Complete) else: self.orchestrator.task_field_update(self.task.get_id(), - result=enum.ActionResult.Failure, - status=enum.TaskStatus.Complete) + result=hd_fields.ActionResult.Failure, + status=hd_fields.TaskStatus.Complete) return - elif task_action == enum.OobAction.InterrogateNode: + elif task_action == hd_fields.OrchestratorAction.InterrogateNode: mci_id = ipmi_session.get_mci() self.orchestrator.task_field_update(self.task.get_id(), - result=enum.ActionResult.Success, - status=enum.TaskStatus.Complete, + result=hd_fields.ActionResult.Success, + status=hd_fields.TaskStatus.Complete, result_detail=mci_id) return \ No newline at end of file diff --git a/helm_drydock/ingester/__init__.py b/helm_drydock/ingester/__init__.py index 507b8aab..b2b69072 100644 --- a/helm_drydock/ingester/__init__.py +++ b/helm_drydock/ingester/__init__.py @@ -17,14 +17,16 @@ import logging import yaml +import uuid -import helm_drydock.model.site as site -import helm_drydock.model.network as network -import helm_drydock.model.hwprofile as hwprofile -import helm_drydock.model.node as node -import helm_drydock.model.hostprofile as hostprofile +import helm_drydock.objects as objects +import helm_drydock.objects.site as site +import helm_drydock.objects.network as network +import helm_drydock.objects.hwprofile as hwprofile +import helm_drydock.objects.node as node +import helm_drydock.objects.hostprofile as hostprofile -from helm_drydock.statemgmt import DesignState, SiteDesign, DesignError +from helm_drydock.statemgmt import DesignState class Ingester(object): @@ -64,22 +66,22 @@ class Ingester(object): self.log.error("ingest_data called without valid DesignState handler") raise Exception("Invalid design_state handler") - # TODO this method needs refactored to handle design base vs change - design_data = None - try: - design_data = design_state.get_design_base() - except DesignError: - design_data = SiteDesign() - design_state.post_design_base(design_data) + # If no design_id is specified, instantiate a new one + if 'design_id' not in kwargs.keys(): + design_id = str(uuid.uuid4()) + design_data = objects.SiteDesign(id=design_id) + design_state.post_design(design_data) + else: + design_id = kwargs.get('design_id') + design_data = design_state.get_design(design_id) if plugin_name in self.registered_plugins: design_items = self.registered_plugins[plugin_name].ingest_data(**kwargs) - # Need to persist data here, but we don't yet have the statemgmt service working for m in design_items: if type(m) is site.Site: - design_data.add_site(m) + design_data.set_site(m) elif type(m) is network.Network: design_data.add_network(m) elif type(m) is network.NetworkLink: @@ -90,7 +92,7 @@ class Ingester(object): design_data.add_hardware_profile(m) elif type(m) is node.BaremetalNode: design_data.add_baremetal_node(m) - design_state.put_design_base(design_data) + design_state.put_design(design_data) else: self.log.error("Could not find plugin %s to ingest data." % (plugin_name)) raise LookupError("Could not find plugin %s" % plugin_name) diff --git a/helm_drydock/ingester/plugins/yaml.py b/helm_drydock/ingester/plugins/yaml.py index 20a4d902..02fb797e 100644 --- a/helm_drydock/ingester/plugins/yaml.py +++ b/helm_drydock/ingester/plugins/yaml.py @@ -19,25 +19,13 @@ import yaml import logging -import helm_drydock.model.hwprofile as hwprofile -import helm_drydock.model.node as node -import helm_drydock.model.site as site -import helm_drydock.model.hostprofile as hostprofile -import helm_drydock.model.network as network +import helm_drydock.objects.fields as hd_fields +from helm_drydock import objects from helm_drydock.ingester.plugins import IngesterPlugin class YamlIngester(IngesterPlugin): - kind_map = { - "Region": site.Site, - "NetworkLink": network.NetworkLink, - "HardwareProfile": hwprofile.HardwareProfile, - "Network": network.Network, - "HostProfile": hostprofile.HostProfile, - "BaremetalNode": node.BaremetalNode, - } - def __init__(self): super(YamlIngester, self).__init__() @@ -91,19 +79,274 @@ class YamlIngester(IngesterPlugin): for d in parsed_data: kind = d.get('kind', '') if kind != '': - if kind in YamlIngester.kind_map: - try: - model = YamlIngester.kind_map[kind](**d) + if kind == 'Region': + api_version = d.get('apiVersion', '') + + if api_version == 'v1.0': + model = objects.Site() + + metadata = d.get('metadata', {}) + + # Need to add validation logic, we'll assume the input is + # valid for now + model.name = metadata.get('name', '') + model.status = hd_fields.SiteStatus.Unknown + model.source = hd_fields.ModelSource.Designed + + spec = d.get('spec', {}) + + model.tag_definitions = objects.NodeTagDefinitionList() + + tag_defs = spec.get('tag_definitions', []) + + for t in tag_defs: + tag_model = objects.NodeTagDefinition() + tag_model.tag = t.get('tag', '') + tag_model.type = t.get('definition_type', '') + tag_model.definition = t.get('definition', '') + + if tag_model.type not in ['lshw_xpath']: + raise ValueError('Unknown definition type in ' \ + 'NodeTagDefinition: %s' % (self.definition_type)) + model.tag_definitions.append(tag_model) + models.append(model) - except Exception as err: - self.log.error("Error building model %s: %s" - % (kind, str(err))) - continue - else: - self.log.error( - "Error processing document, unknown kind %s" - % (kind)) - continue + else: + raise ValueError('Unknown API version %s of Region kind' %s (api_version)) + elif kind == 'NetworkLink': + api_version = d.get('apiVersion', '') + + if api_version == "v1.0": + model = objects.NetworkLink() + + metadata = d.get('metadata', {}) + spec = d.get('spec', {}) + + model.name = metadata.get('name', '') + model.site = metadata.get('region', '') + + bonding = spec.get('bonding', {}) + model.bonding_mode = bonding.get('mode', + hd_fields.NetworkLinkBondingMode.Disabled) + + # How should we define defaults for CIs not in the input? + if model.bonding_mode == hd_fields.NetworkLinkBondingMode.LACP: + model.bonding_xmit_hash = bonding.get('hash', 'layer3+4') + model.bonding_peer_rate = bonding.get('peer_rate', 'fast') + model.bonding_mon_rate = bonding.get('mon_rate', '100') + model.bonding_up_delay = bonding.get('up_delay', '200') + model.bonding_down_delay = bonding.get('down_delay', '200') + + model.mtu = spec.get('mtu', None) + model.linkspeed = spec.get('linkspeed', None) + + trunking = spec.get('trunking', {}) + model.trunk_mode = trunking.get('mode', hd_fields.NetworkLinkTrunkingMode.Disabled) + model.native_network = trunking.get('default_network', None) + + models.append(model) + else: + raise ValueError('Unknown API version of object') + elif kind == 'Network': + api_version = d.get('apiVersion', '') + + if api_version == "v1.0": + model = objects.Network() + + metadata = d.get('metadata', {}) + spec = d.get('spec', {}) + + model.name = metadata.get('name', '') + model.site = metadata.get('region', '') + + model.cidr = spec.get('cidr', None) + model.allocation_strategy = spec.get('allocation', 'static') + model.vlan_id = spec.get('vlan_id', 1) + model.mtu = spec.get('mtu', None) + + dns = spec.get('dns', {}) + model.dns_domain = dns.get('domain', 'local') + model.dns_servers = dns.get('servers', None) + + ranges = spec.get('ranges', []) + model.ranges = [] + + for r in ranges: + model.ranges.append({'type': r.get('type', None), + 'start': r.get('start', None), + 'end': r.get('end', None), + }) + + routes = spec.get('routes', []) + model.routes = [] + + for r in routes: + model.routes.append({'subnet': r.get('subnet', None), + 'gateway': r.get('gateway', None), + 'metric': r.get('metric', None), + }) + models.append(model) + elif kind == 'HardwareProfile': + api_version = d.get('apiVersion', '') + + if api_version == 'v1.0': + metadata = d.get('metadata', {}) + spec = d.get('spec', {}) + + model = objects.HardwareProfile() + + # Need to add validation logic, we'll assume the input is + # valid for now + model.name = metadata.get('name', '') + model.site = metadata.get('region', '') + model.source = hd_fields.ModelSource.Designed + + model.vendor = spec.get('vendor', None) + model.generation = spec.get('generation', None) + model.hw_version = spec.get('hw_version', None) + model.bios_version = spec.get('bios_version', None) + model.boot_mode = spec.get('boot_mode', None) + model.bootstrap_protocol = spec.get('bootstrap_protocol', None) + model.pxe_interface = spec.get('pxe_interface', None) + + model.devices = objects.HardwareDeviceAliasList() + + device_aliases = spec.get('device_aliases', {}) + + for d in device_aliases: + dev_model = objects.HardwareDeviceAlias() + dev_model.source = hd_fields.ModelSource.Designed + dev_model.alias = d.get('alias', None) + dev_model.bus_type = d.get('bus_type', None) + dev_model.dev_type = d.get('dev_type', None) + dev_model.address = d.get('address', None) + model.devices.append(dev_model) + + models.append(model) + elif kind == 'HostProfile' or kind == 'BaremetalNode': + api_version = d.get('apiVersion', '') + + if api_version == "v1.0": + model = None + + if kind == 'HostProfile': + model = objects.HostProfile() + else: + model = objects.BaremetalNode() + + metadata = d.get('metadata', {}) + spec = d.get('spec', {}) + + model.name = metadata.get('name', '') + model.site = metadata.get('region', '') + model.source = hd_fields.ModelSource.Designed + + model.parent_profile = spec.get('host_profile', None) + model.hardware_profile = spec.get('hardware_profile', None) + + oob = spec.get('oob', {}) + + model.oob_type = oob.get('type', None) + model.oob_network = oob.get('network', None) + model.oob_account = oob.get('account', None) + model.oob_credential = oob.get('credential', None) + + storage = spec.get('storage', {}) + model.storage_layout = storage.get('layout', 'lvm') + + bootdisk = storage.get('bootdisk', {}) + model.bootdisk_device = bootdisk.get('device', None) + model.bootdisk_root_size = bootdisk.get('root_size', None) + model.bootdisk_boot_size = bootdisk.get('boot_size', None) + + partitions = storage.get('partitions', []) + model.partitions = objects.HostPartitionList() + + for p in partitions: + part_model = objects.HostPartition() + + part_model.name = p.get('name', None) + part_model.source = hd_fields.ModelSource.Designed + part_model.device = p.get('device', None) + part_model.part_uuid = p.get('part_uuid', None) + part_model.size = p.get('size', None) + part_model.mountpoint = p.get('mountpoint', None) + part_model.fstype = p.get('fstype', 'ext4') + part_model.mount_options = p.get('mount_options', 'defaults') + part_model.fs_uuid = p.get('fs_uuid', None) + part_model.fs_label = p.get('fs_label', None) + + model.partitions.append(part_model) + + interfaces = spec.get('interfaces', []) + model.interfaces = objects.HostInterfaceList() + + for i in interfaces: + int_model = objects.HostInterface() + + int_model.device_name = i.get('device_name', None) + int_model.network_link = i.get('device_link', None) + + int_model.hardware_slaves = [] + slaves = i.get('slaves', []) + + for s in slaves: + int_model.hardware_slaves.append(s) + + int_model.networks = [] + networks = i.get('networks', []) + + for n in networks: + int_model.networks.append(n) + + model.interfaces.append(int_model) + + node_metadata = spec.get('metadata', {}) + metadata_tags = node_metadata.get('tags', []) + model.tags = [] + + for t in metadata_tags: + model.tags.append(t) + + owner_data = node_metadata.get('owner_data', {}) + model.owner_data = {} + + for k, v in owner_data.items(): + model.owner_data[k] = v + + model.rack = node_metadata.get('rack', None) + + if kind == 'BaremetalNode': + addresses = spec.get('addressing', []) + + if len(addresses) == 0: + raise ValueError('BaremetalNode needs at least' \ + ' 1 assigned address') + + model.addressing = objects.IpAddressAssignmentList() + + for a in addresses: + assignment = objects.IpAddressAssignment() + + address = a.get('address', '') + if address == 'dhcp': + assignment.type = 'dhcp' + assignment.address = None + assignment.network = a.get('network') + + model.addressing.append(assignment) + elif address != '': + assignment.type = 'static' + assignment.address = a.get('address') + assignment.network = a.get('network') + + model.addressing.append(assignment) + else: + self.log.error("Invalid address assignment %s on Node %s" + % (address, self.name)) + models.append(model) + else: + raise ValueError('Unknown API version %s of Kind HostProfile' % (api_version)) else: self.log.error( "Error processing document in %s, no kind field" diff --git a/helm_drydock/model/hostprofile.py b/helm_drydock/model/hostprofile.py deleted file mode 100644 index a3692743..00000000 --- a/helm_drydock/model/hostprofile.py +++ /dev/null @@ -1,477 +0,0 @@ -# Copyright 2017 AT&T Intellectual Property. All other rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Models for helm_drydock -# -import logging - -from copy import deepcopy - -from helm_drydock.enum import SiteStatus -from helm_drydock.enum import NodeStatus -from helm_drydock.model.network import Network -from helm_drydock.model.network import NetworkLink -from helm_drydock.model import Utils - -class HostProfile(object): - - def __init__(self, **kwargs): - self.log = logging.Logger('model') - - self.api_version = kwargs.get('apiVersion', '') - - if self.api_version == "v1.0": - metadata = kwargs.get('metadata', {}) - spec = kwargs.get('spec', {}) - - self.name = metadata.get('name', '') - self.site = metadata.get('region', '') - - # Design Data - self.design = {} - - self.design['parent_profile'] = spec.get('host_profile', None) - self.design['hardware_profile'] = spec.get('hardware_profile', None) - - - oob = spec.get('oob', {}) - - self.design['oob_type'] = oob.get('type', None) - self.design['oob_network'] = oob.get('network', None) - self.design['oob_account'] = oob.get('account', None) - self.design['oob_credential'] = oob.get('credential', None) - - storage = spec.get('storage', {}) - self.design['storage_layout'] = storage.get('layout', 'lvm') - - bootdisk = storage.get('bootdisk', {}) - self.design['bootdisk_device'] = bootdisk.get('device', None) - self.design['bootdisk_root_size'] = bootdisk.get('root_size', None) - self.design['bootdisk_boot_size'] = bootdisk.get('boot_size', None) - - partitions = storage.get('partitions', []) - self.design['partitions'] = [] - - for p in partitions: - self.design['partitions'].append(HostPartition(self.api_version, **p)) - - interfaces = spec.get('interfaces', []) - self.design['interfaces'] = [] - - for i in interfaces: - self.design['interfaces'].append(HostInterface(self.api_version, **i)) - - node_metadata = spec.get('metadata', {}) - - metadata_tags = node_metadata.get('tags', []) - self.design['tags'] = [] - - for t in metadata_tags: - self.design['tags'].append(t) - - owner_data = node_metadata.get('owner_data', {}) - self.design['owner_data'] = {} - - for k, v in owner_data.items(): - self.design['owner_data'][k] = v - - self.design['rack'] = node_metadata.get('rack', None) - - else: - self.log.error("Unknown API version %s of %s" % - (self.api_version, self.__class__)) - raise ValueError('Unknown API version of object') - - def get_rack(self): - if getattr(self, 'applied', None) is not None: - return self.applied.get('rack', None) - else: - return self.design.get('rack', None) - - def get_name(self): - return self.name - - def has_tag(self, tag): - if getattr(self, 'applied', None) is not None: - if tag in self.applied.get('tags', []): - return True - else: - if tag in self.design.get('tags', []): - return True - - return False - - def apply_inheritance(self, site): - # No parent to inherit from, just apply design values - # and return - if self.design['parent_profile'] is None: - self.applied = deepcopy(self.design) - return - - parent = site.get_host_profile(self.design['parent_profile']) - - if parent is None: - raise NameError("Cannot find parent profile %s for %s" - % (self.design['parent_profile'], self.name)) - - parent.apply_inheritance(site) - - # First compute inheritance for simple fields - inheritable_field_list = [ - "hardware_profile", "oob_type", "oob_network", - "oob_credential", "oob_account", "storage_layout", - "bootdisk_device", "bootdisk_root_size", "bootdisk_boot_size", - "rack"] - - # Create applied data from self design values and parent - # applied values - - self.applied = {} - - for f in inheritable_field_list: - self.applied[f] = Utils.apply_field_inheritance( - self.design.get(f, None), - parent.applied.get(f, None)) - - # Now compute inheritance for complex types - self.applied['tags'] = Utils.merge_lists(self.design['tags'], - parent.applied['tags']) - - self.applied['owner_data'] = Utils.merge_dicts( - self.design['owner_data'], parent.applied['owner_data']) - - self.applied['interfaces'] = HostInterface.merge_lists( - self.design['interfaces'], parent.applied['interfaces']) - - self.applied['partitions'] = HostPartition.merge_lists( - self.design['partitions'], parent.applied['partitions']) - - return - - -class HostInterface(object): - - def __init__(self, api_version, **kwargs): - self.log = logging.Logger('model') - - self.api_version = api_version - - if self.api_version == "v1.0": - self.device_name = kwargs.get('device_name', None) - - self.design = {} - self.design['network_link'] = kwargs.get('device_link', None) - - self.design['hardware_slaves'] = [] - slaves = kwargs.get('slaves', []) - - for s in slaves: - self.design['hardware_slaves'].append(s) - - self.design['networks'] = [] - networks = kwargs.get('networks', []) - - for n in networks: - self.design['networks'].append(n) - else: - self.log.error("Unknown API version %s of %s" % - (self.api_version, self.__class__)) - raise ValueError('Unknown API version of object') - - # Ensure applied_data exists - def ensure_applied_data(self): - if getattr(self, 'applied', None) is None: - self.applied = deepcopy(self.design) - - return - - def get_name(self): - return self.device_name - - def get_applied_hw_slaves(self): - self.ensure_applied_data() - - return self.applied.get('hardware_slaves', []) - - def get_applied_slave_selectors(self): - self.ensure_applied_data() - - return self.applied.get('selectors', None) - - # Return number of slaves for this interface - def get_applied_slave_count(self): - self.ensure_applied_data() - - return len(self.applied.get('hardware_slaves', [])) - - def get_network_configs(self): - self.ensure_applied_data() - return self.applied.get('attached_networks', []) - - # The device attribute may be hardware alias that translates to a - # physical device address. If the device attribute does not match an - # alias, we assume it directly identifies a OS device name. When the - # apply_hardware_profile method is called on the parent Node of this - # device, the selector will be decided and applied - - def add_selector(self, sel_type, address='', dev_type=''): - self.ensure_applied_data() - - if self.applied.get('selectors', None) is None: - self.applied['selectors'] = [] - - new_selector = {} - new_selector['selector_type'] = sel_type - new_selector['address'] = address - new_selector['device_type'] = dev_type - - self.applied['selectors'].append(new_selector) - - def apply_link_config(self, net_link): - if (net_link is not None and - isinstance(net_link, NetworkLink) and - net_link.name == self.design.get('network_link', '')): - - self.ensure_applied_data() - - self.applied['attached_link'] = deepcopy(net_link) - return True - return False - - def apply_network_config(self, network): - if network.name in self.design['networks']: - self.ensure_applied_data() - if self.applied.get('attached_networks', None) is None: - self.applied['attached_networks'] = [] - self.applied['attached_networks'].append(deepcopy(network)) - return True - else: - return False - - def set_network_address(self, network_name, address): - self.ensure_applied_data() - - if self.applied.get('attached_networks', None) is None: - return False - - for n in self.applied.get('attached_networks', []): - if n.name == network_name: - setattr(n, 'assigned_address', address) - - """ - Merge two lists of HostInterface models with child_list taking - priority when conflicts. If a member of child_list has a device_name - beginning with '!' it indicates that HostInterface should be - removed from the merged list - """ - - @staticmethod - def merge_lists(child_list, parent_list): - effective_list = [] - - if len(child_list) == 0 and len(parent_list) > 0: - for p in parent_list: - pp = deepcopy(p) - pp.ensure_applied_data() - effective_list.append(pp) - elif len(parent_list) == 0 and len(child_list) > 0: - for i in child_list: - if i.get_name().startswith('!'): - continue - else: - ii = deepcopy(i) - ii.ensure_applied_data() - effective_list.append(ii) - elif len(parent_list) > 0 and len(child_list) > 0: - parent_interfaces = [] - for i in parent_list: - parent_name = i.get_name() - parent_interfaces.append(parent_name) - add = True - for j in child_list: - if j.get_name() == ("!" + parent_name): - add = False - break - elif j.device_name == parent_name: - m = HostInterface(j.api_version) - m.device_name = j.get_name() - m.design['network_link'] = \ - Utils.apply_field_inheritance( - j.design.get('network_link', None), - i.applied.get('network_link', None)) - - s = [x for x - in i.applied.get('hardware_slaves', []) - if ("!" + x) not in j.design.get( - 'hardware_slaves', [])] - - s.extend( - [x for x - in j.design.get('hardware_slaves', []) - if not x.startswith("!")]) - - m.design['hardware_slaves'] = s - - n = [x for x - in i.applied.get('networks',[]) - if ("!" + x) not in j.design.get( - 'networks', [])] - - n.extend( - [x for x - in j.design.get('networks', []) - if not x.startswith("!")]) - - m.design['networks'] = n - m.ensure_applied_data() - - effective_list.append(m) - add = False - break - - if add: - ii = deepcopy(i) - ii.ensure_applied_data() - effective_list.append(ii) - - for j in child_list: - if (j.device_name not in parent_interfaces - and not j.device_name.startswith("!")): - jj = deepcopy(j) - jj.ensure_applied_data() - effective_list.append(jj) - - return effective_list - - -class HostPartition(object): - - def __init__(self, api_version, **kwargs): - self.api_version = api_version - - if self.api_version == "v1.0": - self.name = kwargs.get('name', None) - - self.design = {} - self.design['device'] = kwargs.get('device', None) - self.design['part_uuid'] = kwargs.get('part_uuid', None) - self.design['size'] = kwargs.get('size', None) - self.design['mountpoint'] = kwargs.get('mountpoint', None) - self.design['fstype'] = kwargs.get('fstype', 'ext4') - self.design['mount_options'] = kwargs.get('mount_options', 'defaults') - self.design['fs_uuid'] = kwargs.get('fs_uuid', None) - self.design['fs_label'] = kwargs.get('fs_label', None) - - self.applied = kwargs.get('applied', None) - self.build = kwargs.get('build', None) - else: - raise ValueError('Unknown API version of object') - - # Ensure applied_data exists - def ensure_applied_data(self): - if getattr(self, 'applied', None) is None: - self.applied = deepcopy(self.design) - - return - - def get_applied_device(self): - self.ensure_applied_data() - - return self.applied.get('device', '') - - def get_name(self): - return self.name - - # The device attribute may be hardware alias that translates to a - # physical device address. If the device attribute does not match an - # alias, we assume it directly identifies a OS device name. When the - # apply_hardware_profile method is called on the parent Node of this - # device, the selector will be decided and applied - - def set_selector(self, sel_type, address='', dev_type=''): - self.ensure_applied_data() - - selector = {} - selector['type'] = sel_type - selector['address'] = address - selector['device_type'] = dev_type - - self.applied['selector'] = selector - - def get_selector(self): - self.ensure_applied_data() - return self.applied.get('selector', None) - - """ - Merge two lists of HostPartition models with child_list taking - priority when conflicts. If a member of child_list has a name - beginning with '!' it indicates that HostPartition should be - removed from the merged list - """ - - @staticmethod - def merge_lists(child_list, parent_list): - effective_list = [] - - if len(child_list) == 0 and len(parent_list) > 0: - for p in parent_list: - pp = deepcopy(p) - pp.ensure_applied_data() - effective_list.append(pp) - elif len(parent_list) == 0 and len(child_list) > 0: - for i in child_list: - if i.get_name().startswith('!'): - continue - else: - ii = deepcopy(i) - ii.ensure_applied_data() - effective_list.append(ii) - elif len(parent_list) > 0 and len(child_list) > 0: - inherit_field_list = ["device", "part_uuid", "size", - "mountpoint", "fstype", "mount_options", - "fs_uuid", "fs_label"] - parent_partitions = [] - for i in parent_list: - parent_name = i.get_name() - parent_partitions.append(parent_name) - add = True - for j in child_list: - if j.get_name() == ("!" + parent_name): - add = False - break - elif j.get_name() == parent_name: - p = HostPartition(j.api_version) - p.name = j.get_name() - - for f in inherit_field_list: - j_f = j.design.get(f, None) - i_f = i.applied.get(f, None) - p.design.set(p, - Utils.apply_field_inheritance(j_f, i_f)) - add = False - p.ensure_applied_data() - effective_list.append(p) - if add: - ii = deepcopy(i) - ii.ensure_applied_data() - effective_list.append(ii) - - for j in child_list: - if (j.get_name() not in parent_list and - not j.get_name().startswith("!")): - jj = deepcopy(j) - jj.ensure_applied_data - effective_list.append(jj) - - return effective_list diff --git a/helm_drydock/model/hwprofile.py b/helm_drydock/model/hwprofile.py deleted file mode 100644 index a37053aa..00000000 --- a/helm_drydock/model/hwprofile.py +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright 2017 AT&T Intellectual Property. All other rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Models for helm_drydock -# -import logging - -from copy import deepcopy - -from helm_drydock.enum import SiteStatus -from helm_drydock.enum import NodeStatus - -class HardwareProfile(object): - - def __init__(self, **kwargs): - self.log = logging.Logger('model') - - self.api_version = kwargs.get('apiVersion', '') - - if self.api_version == "v1.0": - metadata = kwargs.get('metadata', {}) - spec = kwargs.get('spec', {}) - - # Need to add validation logic, we'll assume the input is - # valid for now - self.name = metadata.get('name', '') - self.site = metadata.get('region', '') - - self.vendor = spec.get('vendor', None) - self.generation = spec.get('generation', None) - self.hw_version = spec.get('hw_version', None) - self.bios_version = spec.get('bios_version', None) - self.boot_mode = spec.get('boot_mode', None) - self.bootstrap_protocol = spec.get('bootstrap_protocol', None) - self.pxe_interface = spec.get('pxe_interface', None) - self.devices = [] - - device_aliases = spec.get('device_aliases', {}) - - pci_devices = device_aliases.get('pci', []) - scsi_devices = device_aliases.get('scsi', []) - - for d in pci_devices: - d['bus_type'] = 'pci' - self.devices.append( - HardwareDeviceAlias(self.api_version, **d)) - - for d in scsi_devices: - d['bus_type'] = 'scsi' - self.devices.append( - HardwareDeviceAlias(self.api_version, **d)) - else: - self.log.error("Unknown API version %s of %s" % - (self.api_version, self.__class__)) - raise ValueError('Unknown API version of object') - - return - - def get_name(self): - return self.name - - def resolve_alias(self, alias_type, alias): - selector = {} - for d in self.devices: - if d.alias == alias and d.bus_type == alias_type: - selector['address'] = d.address - selector['device_type'] = d.type - return selector - - return None - -class HardwareDeviceAlias(object): - - def __init__(self, api_version, **kwargs): - self.log = logging.Logger('model') - - self.api_version = api_version - - if self.api_version == "v1.0": - self.bus_type = kwargs.get('bus_type', None) - self.address = kwargs.get('address', None) - self.alias = kwargs.get('alias', None) - self.type = kwargs.get('type', None) - else: - self.log.error("Unknown API version %s of %s" % - (self.api_version, self.__class__)) - raise ValueError('Unknown API version of object') diff --git a/helm_drydock/model/network.py b/helm_drydock/model/network.py deleted file mode 100644 index 4369b002..00000000 --- a/helm_drydock/model/network.py +++ /dev/null @@ -1,138 +0,0 @@ -# Copyright 2017 AT&T Intellectual Property. All other rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Models for helm_drydock -# -import logging - -from copy import deepcopy - -from helm_drydock.enum import SiteStatus -from helm_drydock.enum import NodeStatus - -class NetworkLink(object): - - def __init__(self, **kwargs): - self.log = logging.Logger('model') - - self.api_version = kwargs.get('apiVersion', '') - - if self.api_version == "v1.0": - metadata = kwargs.get('metadata', {}) - spec = kwargs.get('spec', {}) - - self.name = metadata.get('name', '') - self.site = metadata.get('region', '') - - bonding = spec.get('bonding', {}) - self.bonding_mode = bonding.get('mode', 'none') - - # How should we define defaults for CIs not in the input? - if self.bonding_mode == '802.3ad': - self.bonding_xmit_hash = bonding.get('hash', 'layer3+4') - self.bonding_peer_rate = bonding.get('peer_rate', 'fast') - self.bonding_mon_rate = bonding.get('mon_rate', '100') - self.bonding_up_delay = bonding.get('up_delay', '200') - self.bonding_down_delay = bonding.get('down_delay', '200') - - self.mtu = spec.get('mtu', 1500) - self.linkspeed = spec.get('linkspeed', 'auto') - - trunking = spec.get('trunking', {}) - self.trunk_mode = trunking.get('mode', 'none') - - self.native_network = spec.get('default_network', '') - else: - self.log.error("Unknown API version %s of %s" % - (self.api_version, self.__class__)) - raise ValueError('Unknown API version of object') - - def get_name(self): - return self.name - -class Network(object): - - def __init__(self, **kwargs): - self.log = logging.Logger('model') - - self.api_version = kwargs.get('apiVersion', '') - - if self.api_version == "v1.0": - metadata = kwargs.get('metadata', {}) - spec = kwargs.get('spec', {}) - - self.name = metadata.get('name', '') - self.site = metadata.get('region', '') - - self.cidr = spec.get('cidr', None) - self.allocation_strategy = spec.get('allocation', 'static') - self.vlan_id = spec.get('vlan_id', 1) - self.mtu = spec.get('mtu', 0) - - dns = spec.get('dns', {}) - self.dns_domain = dns.get('domain', 'local') - self.dns_servers = dns.get('servers', None) - - ranges = spec.get('ranges', []) - self.ranges = [] - - for r in ranges: - self.ranges.append(NetworkAddressRange(self.api_version, **r)) - - routes = spec.get('routes', []) - self.routes = [] - - for r in routes: - self.routes.append(NetworkRoute(self.api_version, **r)) - else: - self.log.error("Unknown API version %s of %s" % - (self.api_version, self.__class__)) - raise ValueError('Unknown API version of object') - - def get_name(self): - return self.name - - -class NetworkAddressRange(object): - - def __init__(self, api_version, **kwargs): - self.log = logging.Logger('model') - - self.api_version = api_version - - if self.api_version == "v1.0": - self.type = kwargs.get('type', None) - self.start = kwargs.get('start', None) - self.end = kwargs.get('end', None) - else: - self.log.error("Unknown API version %s of %s" % - (self.api_version, self.__class__)) - raise ValueError('Unknown API version of object') - - -class NetworkRoute(object): - - def __init__(self, api_version, **kwargs): - self.log = logging.Logger('model') - - self.api_version = api_version - - if self.api_version == "v1.0": - self.type = kwargs.get('subnet', None) - self.start = kwargs.get('gateway', None) - self.end = kwargs.get('metric', 100) - else: - self.log.error("Unknown API version %s of %s" % - (self.api_version, self.__class__)) - raise ValueError('Unknown API version of object') diff --git a/helm_drydock/model/node.py b/helm_drydock/model/node.py deleted file mode 100644 index fed005e6..00000000 --- a/helm_drydock/model/node.py +++ /dev/null @@ -1,187 +0,0 @@ -# Copyright 2017 AT&T Intellectual Property. All other rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Models for helm_drydock -# -import logging - -from copy import deepcopy - - -from helm_drydock.enum import SiteStatus -from helm_drydock.enum import NodeStatus -from helm_drydock.model.hostprofile import HostProfile -from helm_drydock.model import Utils - -class BaremetalNode(HostProfile): - - # A BaremetalNode is really nothing more than a physical - # instantiation of a HostProfile, so they both represent - # the same set of CIs - def __init__(self, **kwargs): - super(BaremetalNode, self).__init__(**kwargs) - - if self.api_version == "v1.0": - addressing = [] - - self.design['addressing'] = addressing - - spec = kwargs.get('spec', {}) - addresses = spec.get('addressing', []) - - if len(addresses) == 0: - raise ValueError('BaremetalNode needs at least' \ - ' 1 assigned address') - for a in addresses: - assignment = {} - address = a.get('address', '') - if address == 'dhcp': - assignment['type'] = 'dhcp' - assignment['address'] = None - assignment['network'] = a.get('network') - addressing.append(assignment) - elif address != '': - assignment['type'] = 'static' - assignment['address'] = a.get('address') - assignment['network'] = a.get('network') - addressing.append(assignment) - else: - self.log.error("Invalid address assignment %s on Node %s" - % (address, self.name)) - - self.applied = kwargs.get('applied_data', None) - self.build = kwargs.get('build', None) - - # Compile the applied version of this model sourcing referenced - # data from the passed site design - def compile_applied_model(self, site): - self.apply_host_profile(site) - - self.applied['addressing'] = deepcopy(self.design['addressing']) - - self.apply_hardware_profile(site) - self.apply_network_connections(site) - return - - def init_build(self): - if self.build is None: - self.build = {} - self.build['status'] = NodeStatus.Unknown - - def apply_host_profile(self, site): - self.apply_inheritance(site) - return - - # Translate device alises to physical selectors and copy - # other hardware attributes into this object - def apply_hardware_profile(self, site): - if self.applied['hardware_profile'] is None: - raise ValueError("Hardware profile not set") - - hw_profile = site.get_hardware_profile( - self.applied['hardware_profile']) - - for i in self.applied.get('interfaces', []): - for s in i.get_applied_hw_slaves(): - selector = hw_profile.resolve_alias("pci", s) - if selector is None: - i.add_selector("name", address=s) - else: - i.add_selector("address", address=selector['address'], - dev_type=selector['device_type']) - - for p in self.applied.get('partitions', []): - selector = hw_profile.resolve_alias("scsi", - p.get_applied_device()) - if selector is None: - p.set_selector("name", - address=p.get_applied_device()) - else: - p.set_selector("address", address=selector['address'], - dev_type=selector['device_type']) - - - hardware = {"vendor": getattr(hw_profile, 'vendor', None), - "generation": getattr(hw_profile, 'generation', None), - "hw_version": getattr(hw_profile, 'hw_version', None), - "bios_version": getattr(hw_profile, 'bios_version', None), - "boot_mode": getattr(hw_profile, 'boot_mode', None), - "bootstrap_protocol": getattr(hw_profile, - 'bootstrap_protocol', - None), - "pxe_interface": getattr(hw_profile, 'pxe_interface', None) - } - - self.applied['hardware'] = hardware - - return - - def apply_network_connections(self, site): - for n in site.network_links: - for i in self.applied.get('interfaces', []): - i.apply_link_config(n) - - for n in site.networks: - for i in self.applied.get('interfaces', []): - i.apply_network_config(n) - - for a in self.applied.get('addressing', []): - for i in self.applied.get('interfaces', []): - i.set_network_address(a.get('network'), a.get('address')) - - return - - def get_applied_interface(self, iface_name): - if getattr(self, 'applied', None) is not None: - for i in self.applied.get('interfaces', []): - if i.get_name() == iface_name: - return i - - return None - - def get_status(self): - self.init_build() - return self.build.get('status', NodeStatus.Unknown) - - def set_status(self, status): - if isinstance(status, NodeStatus): - self.init_build() - self.build['status'] = status - - def get_last_build_action(self): - if getattr(self, 'build', None) is None: - return None - - return self.build.get('last_action', None) - - def set_last_build_action(self, action, result, detail=None): - self.init_build() - last_action = self.build.get('last_action', None) - if last_action is None: - self.build['last_action'] = {} - last_action = self.build['last_action'] - last_action['action'] = action - last_action['result'] = result - if detail is not None: - last_action['detail'] = detail - - def get_network_address(self, network_name): - if self.applied is None: - return None - - for a in self.applied.get('addressing', []): - if a.get('network', None) == network_name: - return a.get('address', None) - - return None diff --git a/helm_drydock/model/site.py b/helm_drydock/model/site.py deleted file mode 100644 index 0b6105f3..00000000 --- a/helm_drydock/model/site.py +++ /dev/null @@ -1,125 +0,0 @@ -# Copyright 2017 AT&T Intellectual Property. All other rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Models for helm_drydock -# -import logging - -from copy import deepcopy - -from helm_drydock.enum import SiteStatus -from helm_drydock.enum import NodeStatus - -class Site(object): - - def __init__(self, **kwargs): - self.log = logging.Logger('model') - - if kwargs is None: - raise ValueError("Empty arguments") - - self.api_version = kwargs.get('apiVersion', '') - - self.build = kwargs.get('build', {}) - - if self.api_version == "v1.0": - metadata = kwargs.get('metadata', {}) - - # Need to add validation logic, we'll assume the input is - # valid for now - self.name = metadata.get('name', '') - - spec = kwargs.get('spec', {}) - - self.tag_definitions = [] - tag_defs = spec.get('tag_definitions', []) - - for t in tag_defs: - self.tag_definitions.append( - NodeTagDefinition(self.api_version, **t)) - - self.networks = [] - self.network_links = [] - self.host_profiles = [] - self.hardware_profiles = [] - self.baremetal_nodes = [] - - else: - self.log.error("Unknown API version %s of %s" % - (self.api_version, self.__class__)) - raise ValueError('Unknown API version of object') - - def get_name(self): - return self.name - - def start_build(self): - if self.build.get('status', '') == '': - self.build['status'] = SiteStatus.Unknown - - def get_network(self, network_name): - for n in self.networks: - if n.name == network_name: - return n - - return None - - def get_network_link(self, link_name): - for l in self.network_links: - if l.name == link_name: - return l - - return None - - def get_host_profile(self, profile_name): - for p in self.host_profiles: - if p.name == profile_name: - return p - - return None - - def get_hardware_profile(self, profile_name): - for p in self.hardware_profiles: - if p.name == profile_name: - return p - - return None - - def get_baremetal_node(self, node_name): - for n in self.baremetal_nodes: - if n.name == node_name: - return n - - return None - - def set_status(self, status): - if isinstance(status, SiteStatus): - self.build['status'] = status - -class NodeTagDefinition(object): - - def __init__(self, api_version, **kwargs): - self.api_version = api_version - - if self.api_version == "v1.0": - self.tag = kwargs.get('tag', '') - self.definition_type = kwargs.get('definition_type', '') - self.definition = kwargs.get('definition', '') - - if self.definition_type not in ['lshw_xpath']: - raise ValueError('Unknown definition type in ' \ - 'NodeTagDefinition: %s' % (self.definition_type)) - else: - self.log.error("Unknown API version %s of %s" % - (self.api_version, self.__class__)) - raise ValueError('Unknown API version of object') \ No newline at end of file diff --git a/helm_drydock/model/__init__.py b/helm_drydock/objects/__init__.py similarity index 68% rename from helm_drydock/model/__init__.py rename to helm_drydock/objects/__init__.py index e682cefd..b88365d2 100644 --- a/helm_drydock/model/__init__.py +++ b/helm_drydock/objects/__init__.py @@ -18,6 +18,17 @@ import logging from copy import deepcopy + +def register_all(): + # NOTE(sh8121att) - Import all versioned objects so + # they are available via RPC. Any new object definitions + # need to be added here. + __import__('helm_drydock.objects.network') + __import__('helm_drydock.objects.node') + __import__('helm_drydock.objects.hostprofile') + __import__('helm_drydock.objects.hwprofile') + __import__('helm_drydock.objects.site') + # Utility class for calculating inheritance class Utils(object): @@ -74,18 +85,18 @@ class Utils(object): @staticmethod def merge_lists(child_list, parent_list): - if type(child_list) is not list or type(parent_list) is not list: - raise ValueError("One parameter is not a list") - effective_list = [] - # Probably should handle non-string values - effective_list.extend( - filter(lambda x: not x.startswith("!"), child_list)) + try: + # Probably should handle non-string values + effective_list.extend( + filter(lambda x: not x.startswith("!"), child_list)) - effective_list.extend( - filter(lambda x: ("!" + x) not in child_list, - filter(lambda x: x not in effective_list, parent_list))) + effective_list.extend( + filter(lambda x: ("!" + x) not in child_list, + filter(lambda x: x not in effective_list, parent_list))) + except TypeError: + raise TypeError("Error iterating list argument") return effective_list @@ -107,21 +118,21 @@ class Utils(object): @staticmethod def merge_dicts(child_dict, parent_dict): - if type(child_dict) is not dict or type(parent_dict) is not dict: - raise ValueError("One parameter is not a dict") - effective_dict = {} - # Probably should handle non-string keys - use_keys = filter(lambda x: ("!" + x) not in child_dict.keys(), - parent_dict) + try: + # Probably should handle non-string keys + use_keys = filter(lambda x: ("!" + x) not in child_dict.keys(), + parent_dict) - for k in use_keys: - effective_dict[k] = deepcopy(parent_dict[k]) + for k in use_keys: + effective_dict[k] = deepcopy(parent_dict[k]) - use_keys = filter(lambda x: not x.startswith("!"), child_dict) - - for k in use_keys: - effective_dict[k] = deepcopy(child_dict[k]) + use_keys = filter(lambda x: not x.startswith("!"), child_dict) + for k in use_keys: + effective_dict[k] = deepcopy(child_dict[k]) + except TypeError: + raise TypeError("Error iterating dict argument") + return effective_dict diff --git a/helm_drydock/objects/base.py b/helm_drydock/objects/base.py new file mode 100644 index 00000000..f481d7c9 --- /dev/null +++ b/helm_drydock/objects/base.py @@ -0,0 +1,68 @@ +# Copyright 2017 AT&T Intellectual Property. All other rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo_versionedobjects import base +from oslo_versionedobjects import fields as obj_fields + +import helm_drydock.objects as objects + +class DrydockObjectRegistry(base.VersionedObjectRegistry): + + # Steal this from Cinder to bring all registered objects + # into the helm_drydock.objects namespace + + def registration_hook(self, cls, index): + setattr(objects, cls.obj_name(), cls) + +class DrydockObject(base.VersionedObject): + + VERSION = '1.0' + + OBJ_PROJECT_NAMESPACE = 'helm_drydock.objects' + +class DrydockPersistentObject(base.VersionedObject): + + fields = { + 'created_at': obj_fields.DateTimeField(nullable=False), + 'created_by': obj_fields.StringField(nullable=False), + 'updated_at': obj_fields.DateTimeField(nullable=True), + 'updated_by': obj_fields.StringField(nullable=True), + } + +class DrydockObjectListBase(base.ObjectListBase): + + def __init__(self, **kwargs): + super(DrydockObjectListBase, self).__init__(**kwargs) + + def append(self, obj): + self.objects.append(obj) + + def replace_by_id(self, obj): + i = 0; + while i < len(self.objects): + if self.objects[i].get_id() == obj.get_id(): + objects[i] = obj + return True + i = i + 1 + + return False + + @classmethod + def from_basic_list(cls, obj_list): + model_list = cls() + + for o in obj_list: + model_list.append(o) + + return model_list diff --git a/helm_drydock/enum.py b/helm_drydock/objects/fields.py similarity index 53% rename from helm_drydock/enum.py rename to helm_drydock/objects/fields.py index ec14d666..cdcf152a 100644 --- a/helm_drydock/enum.py +++ b/helm_drydock/objects/fields.py @@ -11,10 +11,15 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from enum import Enum, unique -@unique -class OrchestratorAction(Enum): +from oslo_versionedobjects import fields + +class BaseDrydockEnum(fields.Enum): + def __init__(self): + super(BaseDrydockEnum, self).__init__(valid_values=self.__class__.ALL) + +class OrchestratorAction(BaseDrydockEnum): + # Orchestrator actions Noop = 'noop' ValidateDesign = 'validate_design' VerifySite = 'verify_site' @@ -24,8 +29,7 @@ class OrchestratorAction(Enum): DeployNode = 'deploy_node' DestroyNode = 'destroy_node' -@unique -class OobAction(Enum): + # OOB driver actions ConfigNodePxe = 'config_node_pxe' SetNodeBoot = 'set_node_boot' PowerOffNode = 'power_off_node' @@ -33,16 +37,53 @@ class OobAction(Enum): PowerCycleNode = 'power_cycle_node' InterrogateNode = 'interrogate_node' -@unique -class ActionResult(Enum): + ALL = (Noop, ValidateDesign, VerifySite, PrepareSite, VerifyNode, + PrepareNode, DeployNode, DestroyNode, ConfigNodePxe, + SetNodeBoot, PowerOffNode, PowerOnNode, PowerCycleNode, + InterrogateNode) + +class OrchestratorActionField(fields.BaseEnumField): + AUTO_TYPE = OrchestratorAction() + +class ActionResult(BaseDrydockEnum): Incomplete = 'incomplete' Success = 'success' PartialSuccess = 'partial_success' Failure = 'failure' DependentFailure = 'dependent_failure' -@unique -class SiteStatus(Enum): + ALL = (Incomplete, Success, PartialSuccess, Failure) + +class ActionResultField(fields.BaseEnumField): + AUTO_TYPE = ActionResult() + +class TaskStatus(BaseDrydockEnum): + Created = 'created' + Waiting = 'waiting' + Running = 'running' + Stopping = 'stopping' + Terminated = 'terminated' + Errored = 'errored' + Complete = 'complete' + Stopped = 'stopped' + + ALL = (Created, Waiting, Running, Stopping, Terminated, + Errored, Complete, Stopped) + +class TaskStatusField(fields.BaseEnumField): + AUTO_TYPE = TaskStatus() + +class ModelSource(BaseDrydockEnum): + Designed = 'designed' + Compiled = 'compiled' + Build = 'build' + + ALL = (Designed, Compiled, Build) + +class ModelSourceField(fields.BaseEnumField): + AUTO_TYPE = ModelSource() + +class SiteStatus(BaseDrydockEnum): Unknown = 'unknown' DesignStarted = 'design_started' DesignAvailable = 'design_available' @@ -51,10 +92,15 @@ class SiteStatus(Enum): Deployed = 'deployed' DesignUpdated = 'design_updated' -@unique -class NodeStatus(Enum): + ALL = (Unknown, Deploying, Deployed) + +class SiteStatusField(fields.BaseEnumField): + AUTO_TYPE = SiteStatus() + +class NodeStatus(BaseDrydockEnum): Unknown = 'unknown' Designed = 'designed' + Compiled = 'compiled' # Node attributes represent effective config after inheritance/merge Present = 'present' # IPMI access verified BasicVerifying = 'basic_verifying' # Base node verification in process FailedBasicVerify = 'failed_basic_verify' # Base node verification failed @@ -73,13 +119,31 @@ class NodeStatus(Enum): Bootstrapped = 'bootstrapped' # Node fully bootstrapped Complete = 'complete' # Node is complete -@unique -class TaskStatus(Enum): - Created = 'created' - Waiting = 'waiting' - Running = 'running' - Stopping = 'stopping' - Terminated = 'terminated' - Errored = 'errored' - Complete = 'complete' - Stopped = 'stopped' \ No newline at end of file + ALL = (Unknown, Designed, Compiled, Present, BasicVerifying, FailedBasicVerify, + BasicVerified, Preparing, FailedPrepare, Prepared, FullyVerifying, + FailedFullVerify, FullyVerified, Deploying, FailedDeploy, Deployed, + Bootstrapping, FailedBootstrap, Bootstrapped, Complete) + + +class NodeStatusField(fields.BaseEnumField): + AUTO_TYPE = NodeStatus() + +class NetworkLinkBondingMode(BaseDrydockEnum): + Disabled = 'disabled' + LACP = '802.3ad' + RoundRobin = 'balanced-rr' + Standby = 'active-backup' + + ALL = (Disabled, LACP, RoundRobin, Standby) + +class NetworkLinkBondingModeField(fields.BaseEnumField): + AUTO_TYPE = NetworkLinkBondingMode() + +class NetworkLinkTrunkingMode(BaseDrydockEnum): + Disabled = 'disabled' + Tagged = '802.1q' + + ALL = (Disabled, Tagged) + +class NetworkLinkTrunkingModeField(fields.BaseEnumField): + AUTO_TYPE = NetworkLinkTrunkingMode() diff --git a/helm_drydock/objects/hostprofile.py b/helm_drydock/objects/hostprofile.py new file mode 100644 index 00000000..8e822205 --- /dev/null +++ b/helm_drydock/objects/hostprofile.py @@ -0,0 +1,381 @@ +# Copyright 2017 AT&T Intellectual Property. All other rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from copy import deepcopy + +import oslo_versionedobjects.fields as obj_fields + +import helm_drydock.objects as objects +import helm_drydock.objects.base as base +import helm_drydock.objects.fields as hd_fields + + +@base.DrydockObjectRegistry.register +class HostProfile(base.DrydockPersistentObject, base.DrydockObject): + + VERSION = '1.0' + + fields = { + 'name': obj_fields.StringField(nullable=False), + 'site': obj_fields.StringField(nullable=False), + 'source': hd_fields.ModelSourceField(nullable=False), + 'parent_profile': obj_fields.StringField(nullable=True), + 'hardware_profile': obj_fields.StringField(nullable=True), + 'oob_type': obj_fields.StringField(nullable=True), + 'oob_network': obj_fields.StringField(nullable=True), + 'oob_account': obj_fields.StringField(nullable=True), + 'oob_credential': obj_fields.StringField(nullable=True), + 'storage_layout': obj_fields.StringField(nullable=True), + 'bootdisk_device': obj_fields.StringField(nullable=True), + # Consider a custom field for storage size + 'bootdisk_root_size': obj_fields.StringField(nullable=True), + 'bootdisk_boot_size': obj_fields.StringField(nullable=True), + 'partitions': obj_fields.ObjectField('HostPartitionList', + nullable=True), + 'interfaces': obj_fields.ObjectField('HostInterfaceList', + nullable=True), + 'tags': obj_fields.ListOfStringsField(nullable=True), + 'owner_data': obj_fields.DictOfStringsField(nullable=True), + 'rack': obj_fields.StringField(nullable=True), + } + + def __init__(self, **kwargs): + super(HostProfile, self).__init__(**kwargs) + + + def get_rack(self): + return self.rack + + # HostProfile is keyed by name + def get_id(self): + return self.get_name() + + def get_name(self): + return self.name + + def has_tag(self, tag): + if tag in self.tags: + return True + + return False + + def apply_inheritance(self, site_design): + # No parent to inherit from, just apply design values + # and return + if self.parent_profile is None: + self.source = hd_fields.ModelSource.Compiled + return + + parent = site_design.get_host_profile(self.parent_profile) + + if parent is None: + raise NameError("Cannot find parent profile %s for %s" + % (self.design['parent_profile'], self.name)) + + parent.apply_inheritance(site_design) + + # First compute inheritance for simple fields + inheritable_field_list = [ + "hardware_profile", "oob_type", "oob_network", + "oob_credential", "oob_account", "storage_layout", + "bootdisk_device", "bootdisk_root_size", "bootdisk_boot_size", + "rack"] + + # Create applied data from self design values and parent + # applied values + + for f in inheritable_field_list: + setattr(self, f, objects.Utils.apply_field_inheritance( + getattr(self, f, None), + getattr(parent, f, None))) + + # Now compute inheritance for complex types + self.tags = objects.Utils.merge_lists(self.tags, parent.tags) + + self.owner_data = objects.Utils.merge_dicts(self.owner_data, parent.owner_data) + + self.interfaces = HostInterfaceList.from_basic_list( + HostInterface.merge_lists(self.interfaces, parent.interfaces)) + + self.partitions = HostPartitionList.from_basic_list( + HostPartition.merge_lists(self.partitions, parent.partitions)) + + self.source = hd_fields.ModelSource.Compiled + + return + +@base.DrydockObjectRegistry.register +class HostProfileList(base.DrydockObjectListBase, base.DrydockObject): + + VERSION = '1.0' + + fields = { + 'objects': obj_fields.ListOfObjectsField('HostProfile') + } + +@base.DrydockObjectRegistry.register +class HostInterface(base.DrydockObject): + + VERSION = '1.0' + + fields = { + 'device_name': obj_fields.StringField(), + 'source': hd_fields.ModelSourceField(), + 'network_link': obj_fields.StringField(nullable=True), + 'hardware_slaves': obj_fields.ListOfStringsField(nullable=True), + 'slave_selectors': obj_fields.ObjectField('HardwareDeviceSelectorList', + nullable=True), + 'networks': obj_fields.ListOfStringsField(nullable=True), + } + + def __init__(self, **kwargs): + super(HostInterface, self).__init__(**kwargs) + + # HostInterface is keyed by device_name + def get_id(self): + return self.get_name() + + def get_name(self): + return self.device_name + + def get_hw_slaves(self): + return self.hardware_slaves + + def get_slave_selectors(self): + return self.slave_selectors + + # Return number of slaves for this interface + def get_slave_count(self): + return len(self.hardware_slaves) + + # The device attribute may be hardware alias that translates to a + # physical device address. If the device attribute does not match an + # alias, we assume it directly identifies a OS device name. When the + # apply_hardware_profile method is called on the parent Node of this + # device, the selector will be decided and applied + + def add_selector(self, slave_selector): + if self.slave_selectors is None: + self.slave_selectors = objects.HardwareDeviceSelectorList() + + self.slave_selectors.append(slave_selector) + + """ + Merge two lists of HostInterface models with child_list taking + priority when conflicts. If a member of child_list has a device_name + beginning with '!' it indicates that HostInterface should be + removed from the merged list + """ + + @staticmethod + def merge_lists(child_list, parent_list): + effective_list = [] + + if len(child_list) == 0 and len(parent_list) > 0: + for p in parent_list: + pp = deepcopy(p) + pp.source = hd_obj_fields.ModelSource.Compiled + effective_list.append(pp) + elif len(parent_list) == 0 and len(child_list) > 0: + for i in child_list: + if i.get_name().startswith('!'): + continue + else: + ii = deepcopy(i) + ii.source = hd_obj_fields.ModelSource.Compiled + effective_list.append(ii) + elif len(parent_list) > 0 and len(child_list) > 0: + parent_interfaces = [] + for i in parent_list: + parent_name = i.get_name() + parent_interfaces.append(parent_name) + add = True + for j in child_list: + if j.get_name() == ("!" + parent_name): + add = False + break + elif j.get_name() == parent_name: + m = objects.HostInterface() + m.device_name = j.get_name() + m.network_link = \ + objects.Utils.apply_field_inheritance( + getattr(j, 'network_link', None), + getattr(i, 'network_link', None)) + + s = [x for x + in getattr(i, 'hardware_slaves', []) + if ("!" + x) not in getattr(j, 'hardware_slaves', [])] + + s.extend( + [x for x + in getattr(j, 'hardware_slaves', []) + if not x.startswith("!")]) + + m.hardware_slaves = s + + n = [x for x + in getattr(i, 'networks',[]) + if ("!" + x) not in getattr(j, 'networks', [])] + + n.extend( + [x for x + in getattr(j, 'networks', []) + if not x.startswith("!")]) + + m.networks = n + m.source = hd_obj_fields.ModelSource.Compiled + + effective_list.append(m) + add = False + break + + if add: + ii = deepcopy(i) + ii.source = hd_obj_fields.ModelSource.Compiled + effective_list.append(ii) + + for j in child_list: + if (j.device_name not in parent_interfaces + and not j.get_name().startswith("!")): + jj = deepcopy(j) + jj.source = hd_obj_fields.ModelSource.Compiled + effective_list.append(jj) + + return effective_list + +@base.DrydockObjectRegistry.register +class HostInterfaceList(base.DrydockObjectListBase, base.DrydockObject): + + VERSION = '1.0' + + fields = { + 'objects': obj_fields.ListOfObjectsField('HostInterface') + } + +@base.DrydockObjectRegistry.register +class HostPartition(base.DrydockObject): + + VERSION = '1.0' + + fields = { + 'name': obj_fields.StringField(), + 'source': hd_fields.ModelSourceField(), + 'device': obj_fields.StringField(nullable=True), + 'part_uuid': obj_fields.UUIDField(nullable=True), + 'size': obj_fields.StringField(nullable=True), + 'mountpoint': obj_fields.StringField(nullable=True), + 'fstype': obj_fields.StringField(nullable=True, default='ext4'), + 'mount_options': obj_fields.StringField(nullable=True, default='defaults'), + 'fs_uuid': obj_fields.UUIDField(nullable=True), + 'fs_label': obj_fields.StringField(nullable=True), + 'selector': obj_fields.ObjectField('HardwareDeviceSelector', + nullable=True), + } + + def __init__(self, **kwargs): + super(HostPartition, self).__init__(**kwargs) + + def get_device(self): + return self.device + + # HostPartition keyed by name + def get_id(self): + return self.get_name() + + def get_name(self): + return self.name + + # The device attribute may be hardware alias that translates to a + # physical device address. If the device attribute does not match an + # alias, we assume it directly identifies a OS device name. When the + # apply_hardware_profile method is called on the parent Node of this + # device, the selector will be decided and applied + + def set_selector(self, selector): + self.selector = selector + + def get_selector(self): + return self.selector + + """ + Merge two lists of HostPartition models with child_list taking + priority when conflicts. If a member of child_list has a name + beginning with '!' it indicates that HostPartition should be + removed from the merged list + """ + + @staticmethod + def merge_lists(child_list, parent_list): + effective_list = [] + + if len(child_list) == 0 and len(parent_list) > 0: + for p in parent_list: + pp = deepcopy(p) + pp.source = hd_fields.ModelSource.Compiled + effective_list.append(pp) + elif len(parent_list) == 0 and len(child_list) > 0: + for i in child_list: + if i.get_name().startswith('!'): + continue + else: + ii = deepcopy(i) + ii.source = hd_fields.ModelSource.Compiled + effective_list.append(ii) + elif len(parent_list) > 0 and len(child_list) > 0: + inherit_field_list = ["device", "part_uuid", "size", + "mountpoint", "fstype", "mount_options", + "fs_uuid", "fs_label"] + parent_partitions = [] + for i in parent_list: + parent_name = i.get_name() + parent_partitions.append(parent_name) + add = True + for j in child_list: + if j.get_name() == ("!" + parent_name): + add = False + break + elif j.get_name() == parent_name: + p = objects.HostPartition() + p.name = j.get_name() + + for f in inherit_field_list: + setattr(p, f, + objects.Utils.apply_field_inheritance(getattr(j, f, None), + getattr(i, f, None))) + add = False + p.source = hd_fields.ModelSource.Compiled + effective_list.append(p) + if add: + ii = deepcopy(i) + ii.source = hd_fields.ModelSource.Compiled + effective_list.append(ii) + + for j in child_list: + if (j.get_name() not in parent_list and + not j.get_name().startswith("!")): + jj = deepcopy(j) + jj.source = hd_fields.ModelSource.Compiled + effective_list.append(jj) + + return effective_list + + +@base.DrydockObjectRegistry.register +class HostPartitionList(base.DrydockObjectListBase, base.DrydockObject): + + VERSION = '1.0' + + fields = { + 'objects': obj_fields.ListOfObjectsField('HostPartition') + } diff --git a/helm_drydock/objects/hwprofile.py b/helm_drydock/objects/hwprofile.py new file mode 100644 index 00000000..3ff5afbe --- /dev/null +++ b/helm_drydock/objects/hwprofile.py @@ -0,0 +1,125 @@ +# Copyright 2017 AT&T Intellectual Property. All other rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from copy import deepcopy + +from oslo_versionedobjects import fields as ovo_fields + +import helm_drydock.objects as objects +import helm_drydock.objects.base as base +import helm_drydock.objects.fields as hd_fields + +@base.DrydockObjectRegistry.register +class HardwareProfile(base.DrydockPersistentObject, base.DrydockObject): + + VERSION = '1.0' + + fields = { + 'name': ovo_fields.StringField(), + 'source': hd_fields.ModelSourceField(), + 'site': ovo_fields.StringField(), + 'vendor': ovo_fields.StringField(nullable=True), + 'generation': ovo_fields.StringField(nullable=True), + 'hw_version': ovo_fields.StringField(nullable=True), + 'bios_version': ovo_fields.StringField(nullable=True), + 'boot_mode': ovo_fields.StringField(nullable=True), + 'bootstrap_protocol': ovo_fields.StringField(nullable=True), + 'pxe_interface': ovo_fields.StringField(nullable=True), + 'devices': ovo_fields.ObjectField('HardwareDeviceAliasList', + nullable=True), + } + + def __init__(self, **kwargs): + super(HardwareProfile, self).__init__(**kwargs) + + return + + # HardwareProfile keyed on name + def get_id(self): + return self.get_name() + + def get_name(self): + return self.name + + def resolve_alias(self, alias_type, alias): + for d in self.devices: + if d.alias == alias and d.bus_type == alias_type: + selector = objects.HardwareDeviceSelector() + selector.selector_type = "address" + selector.address = d.address + selector.device_type = d.dev_type + return selector + + return None + +@base.DrydockObjectRegistry.register +class HardwareProfileList(base.DrydockObjectListBase, base.DrydockObject): + + VERSION = '1.0' + + fields = { + 'objects': ovo_fields.ListOfObjectsField('HardwareProfile') + } + +@base.DrydockObjectRegistry.register +class HardwareDeviceAlias(base.DrydockObject): + + VERSION = '1.0' + + fields = { + 'alias': ovo_fields.StringField(), + 'source': hd_fields.ModelSourceField(), + 'address': ovo_fields.StringField(), + 'bus_type': ovo_fields.StringField(), + 'dev_type': ovo_fields.StringField(nullable=True), + } + + def __init__(self, **kwargs): + super(HardwareDeviceAlias, self).__init__(**kwargs) + + # HardwareDeviceAlias keyed on alias + def get_id(self): + return self.alias + +@base.DrydockObjectRegistry.register +class HardwareDeviceAliasList(base.DrydockObjectListBase, base.DrydockObject): + + VERSION = '1.0' + + fields = { + 'objects': ovo_fields.ListOfObjectsField('HardwareDeviceAlias') + } + +@base.DrydockObjectRegistry.register +class HardwareDeviceSelector(base.DrydockObject): + + VERSION = '1.0' + + fields = { + 'selector_type': ovo_fields.StringField(), + 'address': ovo_fields.StringField(), + 'device_type': ovo_fields.StringField() + } + + def __init__(self, **kwargs): + super(HardwareDeviceSelector, self).__init__(**kwargs) + +@base.DrydockObjectRegistry.register +class HardwareDeviceSelectorList(base.DrydockObjectListBase, base.DrydockObject): + + VERSION = '1.0' + + fields = { + 'objects': ovo_fields.ListOfObjectsField('HardwareDeviceSelector') + } \ No newline at end of file diff --git a/helm_drydock/objects/network.py b/helm_drydock/objects/network.py new file mode 100644 index 00000000..e2e0334f --- /dev/null +++ b/helm_drydock/objects/network.py @@ -0,0 +1,109 @@ +# Copyright 2017 AT&T Intellectual Property. All other rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Models for helm_drydock +# +import logging + +from copy import deepcopy + +import oslo_versionedobjects.fields as ovo_fields + +import helm_drydock.objects as objects +import helm_drydock.objects.base as base +import helm_drydock.objects.fields as hd_fields + +@base.DrydockObjectRegistry.register +class NetworkLink(base.DrydockPersistentObject, base.DrydockObject): + + VERSION = '1.0' + + fields = { + 'name': ovo_fields.StringField(), + 'site': ovo_fields.StringField(), + 'bonding_mode': hd_fields.NetworkLinkBondingModeField( + default=hd_fields.NetworkLinkBondingMode.Disabled), + 'bonding_xmit_hash': ovo_fields.StringField(nullable=True), + 'bonding_peer_rate': ovo_fields.StringField(nullable=True), + 'bonding_mon_rate': ovo_fields.IntegerField(nullable=True), + 'bonding_up_delay': ovo_fields.IntegerField(nullable=True), + 'bonding_down_delay': ovo_fields.IntegerField(nullable=True), + 'mtu': ovo_fields.IntegerField(default=1500), + 'linkspeed': ovo_fields.StringField(default='auto'), + 'trunk_mode': hd_fields.NetworkLinkTrunkingModeField( + default=hd_fields.NetworkLinkTrunkingMode.Disabled), + 'native_network': ovo_fields.StringField(nullable=True), + } + + def __init__(self, **kwargs): + super(NetworkLink, self).__init__(**kwargs) + + # NetworkLink keyed by name + def get_id(self): + return self.get_name() + + def get_name(self): + return self.name + + +@base.DrydockObjectRegistry.register +class NetworkLinkList(base.DrydockObjectListBase, base.DrydockObject): + + VERSION = '1.0' + + fields = { + 'objects': ovo_fields.ListOfObjectsField('NetworkLink'), + } + + +@base.DrydockObjectRegistry.register +class Network(base.DrydockPersistentObject, base.DrydockObject): + + VERSION = '1.0' + + fields = { + 'name': ovo_fields.StringField(), + 'site': ovo_fields.StringField(), + 'cidr': ovo_fields.StringField(), + 'allocation_strategy': ovo_fields.StringField(), + 'vlan_id': ovo_fields.StringField(nullable=True), + 'mtu': ovo_fields.IntegerField(nullable=True), + 'dns_domain': ovo_fields.StringField(nullable=True), + 'dns_servers': ovo_fields.StringField(nullable=True), + 'ranges': ovo_fields.ListOfDictOfNullableStringsField(), + 'routes': ovo_fields.ListOfDictOfNullableStringsField(), + } + + def __init__(self, **kwargs): + super(Network, self).__init__(**kwargs) + + # Network keyed on name + def get_id(self): + return self.get_name() + + def get_name(self): + return self.name + + +@base.DrydockObjectRegistry.register +class NetworkList(base.DrydockObjectListBase, base.DrydockObject): + + VERSION = '1.0' + + fields = { + 'objects': ovo_fields.ListOfObjectsField('Network'), + } + + def __init__(self, **kwargs): + super(NetworkList, self).__init__(**kwargs) \ No newline at end of file diff --git a/helm_drydock/objects/node.py b/helm_drydock/objects/node.py new file mode 100644 index 00000000..1d353576 --- /dev/null +++ b/helm_drydock/objects/node.py @@ -0,0 +1,134 @@ +# Copyright 2017 AT&T Intellectual Property. All other rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Models for helm_drydock +# +import logging + +from copy import deepcopy + +from oslo_versionedobjects import fields as ovo_fields + +import helm_drydock.objects as objects +import helm_drydock.objects.hostprofile +import helm_drydock.objects.base as base +import helm_drydock.objects.fields as hd_fields + +@base.DrydockObjectRegistry.register +class BaremetalNode(helm_drydock.objects.hostprofile.HostProfile): + + VERSION = '1.0' + + fields = { + 'addressing': ovo_fields.ObjectField('IpAddressAssignmentList') + } + + # A BaremetalNode is really nothing more than a physical + # instantiation of a HostProfile, so they both represent + # the same set of CIs + def __init__(self, **kwargs): + super(BaremetalNode, self).__init__(**kwargs) + + # Compile the applied version of this model sourcing referenced + # data from the passed site design + def compile_applied_model(self, site_design): + self.apply_host_profile(site_design) + self.apply_hardware_profile(site_design) + self.source = hd_fields.ModelSource.Compiled + return + + def apply_host_profile(self, site_design): + self.apply_inheritance(site_design) + return + + # Translate device alises to physical selectors and copy + # other hardware attributes into this object + def apply_hardware_profile(self, site_design): + if self.hardware_profile is None: + raise ValueError("Hardware profile not set") + + hw_profile = site_design.get_hardware_profile(self.hardware_profile) + + for i in getattr(self, 'interfaces', []): + for s in i.get_hw_slaves(): + selector = hw_profile.resolve_alias("pci", s) + if selector is None: + selector = objects.HardwareDeviceSelector() + selector.selector_type = 'name' + selector.address = s + + i.add_selector(selector) + + for p in getattr(self, 'partitions', []): + selector = hw_profile.resolve_alias("scsi", p.get_device()) + if selector is None: + selector = objects.HardwareDeviceSelector() + selector.selector_type = 'name' + selector.address = p.get_device() + p.set_selector(selector) + + return + + def get_applied_interface(self, iface_name): + for i in getattr(self, 'interfaces', []): + if i.get_name() == iface_name: + return i + + return None + + + def get_network_address(self, network_name): + for a in getattr(self, 'addressing', []): + if a.network == network_name: + return a.address + + return None + + +@base.DrydockObjectRegistry.register +class BaremetalNodeList(base.DrydockObjectListBase, base.DrydockObject): + + VERSION = '1.0' + + fields = { + 'objects': ovo_fields.ListOfObjectsField('BaremetalNode') + } + + +@base.DrydockObjectRegistry.register +class IpAddressAssignment(base.DrydockObject): + + VERSION = '1.0' + + fields = { + 'type': ovo_fields.StringField(), + 'address': ovo_fields.StringField(nullable=True), + 'network': ovo_fields.StringField(), + } + + def __init__(self, **kwargs): + super(IpAddressAssignment, self).__init__(**kwargs) + + # IpAddressAssignment keyed by network + def get_id(self): + return self.network + +@base.DrydockObjectRegistry.register +class IpAddressAssignmentList(base.DrydockObjectListBase, base.DrydockObject): + + VERSION = '1.0' + + fields = { + 'objects': ovo_fields.ListOfObjectsField('IpAddressAssignment') + } \ No newline at end of file diff --git a/helm_drydock/model/readme.md b/helm_drydock/objects/readme.md similarity index 51% rename from helm_drydock/model/readme.md rename to helm_drydock/objects/readme.md index 61966ec8..58a5f34a 100644 --- a/helm_drydock/model/readme.md +++ b/helm_drydock/objects/readme.md @@ -1,6 +1,7 @@ # Drydock Model # -Models for the drydock design parts and subparts +Object models for the drydock design parts and subparts. We use oslo.versionedobjects as the supporting library for object management +to support RPC and versioned persistence. ## Features ## @@ -25,13 +26,10 @@ based on the name. ### Phased Data ### -In other words, as a modeled object goes from design to apply -to build the model keeps the data separated to retain reference -values and provide context around particular attribute values. +The *source* of the data in a object instance can be one of three +types. -* Design - The data ingested from sources such as Formation -* Apply - Computing inheritance of design data to render an effective site design -* Build - Maintaining actions taken to implement the design and the results - -Currently only applies to BaremetalNodes as no other design parts -flow through the build process. \ No newline at end of file +* Designed - This is data directly ingested by Drydock representing a design part (Site, HostProfile, etc...) supplied by an external source +* Compiled - This is designed data that has been processed through the Drydock +inheritance / merge system. It is the effective design that will be implemented. +* Build - This is the result of actual implementation. It should basically match the compiled view of the model, but might have some additional information only available after implementation. \ No newline at end of file diff --git a/helm_drydock/objects/site.py b/helm_drydock/objects/site.py new file mode 100644 index 00000000..8b30e2e3 --- /dev/null +++ b/helm_drydock/objects/site.py @@ -0,0 +1,244 @@ +# Copyright 2017 AT&T Intellectual Property. All other rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Models for helm_drydock +# +from copy import deepcopy +import uuid + +import oslo_versionedobjects.fields as ovo_fields + +import helm_drydock.objects as objects +import helm_drydock.objects.base as base +import helm_drydock.objects.fields as hd_fields + + +@base.DrydockObjectRegistry.register +class Site(base.DrydockPersistentObject, base.DrydockObject): + + VERSION = '1.0' + + fields = { + 'name': ovo_fields.StringField(), + 'status': hd_fields.SiteStatusField(default=hd_fields.SiteStatus.Unknown), + 'source': hd_fields.ModelSourceField(), + 'tag_definitions': ovo_fields.ObjectField('NodeTagDefinitionList', + nullable=True), + } + + def __init__(self, **kwargs): + super(Site, self).__init__(**kwargs) + + def get_id(self): + return self.name + + def get_name(self): + return self.name + + def add_tag_definition(self, tag_definition): + self.tag_definitions.append(tag_definition) + +@base.DrydockObjectRegistry.register +class NodeTagDefinition(base.DrydockObject): + + VERSION = '1.0' + + fields = { + 'tag': ovo_fields.StringField(), + 'type': ovo_fields.StringField(), + 'definition': ovo_fields.StringField(), + 'source': hd_fields.ModelSourceField(), + } + + def __init__(self, **kwargs): + super(NodeTagDefinition, self).__init__(**kwargs) + + # TagDefinition keyed by tag + def get_id(self): + return self.tag + +@base.DrydockObjectRegistry.register +class NodeTagDefinitionList(base.DrydockObjectListBase, base.DrydockObject): + + VERSION = '1.0' + + fields = { + 'objects': ovo_fields.ListOfObjectsField('NodeTagDefinition'), + } + +@base.DrydockObjectRegistry.register +class SiteDesign(base.DrydockPersistentObject, base.DrydockObject): + + VERSION = '1.0' + + fields = { + 'id': ovo_fields.UUIDField(), + # if null, indicates this is the site base design + 'base_design_id': ovo_fields.UUIDField(nullable=True), + 'source': hd_fields.ModelSourceField(), + 'site': ovo_fields.ObjectField('Site', nullable=True), + 'networks': ovo_fields.ObjectField('NetworkList', nullable=True), + 'network_links': ovo_fields.ObjectField('NetworkLinkList', nullable=True), + 'host_profiles': ovo_fields.ObjectField('HostProfileList', nullable=True), + 'hardware_profiles': ovo_fields.ObjectField('HardwareProfileList', nullable=True), + 'baremetal_nodes': ovo_fields.ObjectField('BaremetalNodeList', nullable=True), + } + + def __init__(self, **kwargs): + super(SiteDesign, self).__init__(**kwargs) + + # Initialize lists for blank instances + def obj_load_attr(self, attrname): + if attrname in self.fields.keys(): + setattr(self, attrname, None) + else: + raise ValueError("Unknown field %s" % (attrname)) + + # Assign UUID id + def assign_id(self): + self.id = uuid.uuid4() + return self.id + + # SiteDesign Keyed by id + def get_id(self): + return self.id + + def get_site(self): + return self.site + + def set_site(self, site): + self.site = site + + def add_network(self, new_network): + if new_network is None: + raise DesignError("Invalid Network model") + + if self.networks is None: + self.networks = objects.NetworkList() + + self.networks.append(new_network) + + def get_network(self, network_key): + for n in self.networks: + if n.get_id() == network_key: + return n + + raise DesignError("Network %s not found in design state" + % network_key) + + def add_network_link(self, new_network_link): + if new_network_link is None: + raise DesignError("Invalid NetworkLink model") + + if self.network_links is None: + self.network_links = objects.NetworkLinkList() + + self.network_links.append(new_network_link) + + def get_network_link(self, link_key): + for l in self.network_links: + if l.get_id() == link_key: + return l + + raise DesignError("NetworkLink %s not found in design state" + % link_key) + + def add_host_profile(self, new_host_profile): + if new_host_profile is None: + raise DesignError("Invalid HostProfile model") + + if self.host_profiles is None: + self.host_profiles = objects.HostProfileList() + + self.host_profiles.append(new_host_profile) + + def get_host_profile(self, profile_key): + for p in self.host_profiles: + if p.get_id() == profile_key: + return p + + raise DesignError("HostProfile %s not found in design state" + % profile_key) + + def add_hardware_profile(self, new_hardware_profile): + if new_hardware_profile is None: + raise DesignError("Invalid HardwareProfile model") + + if self.hardware_profiles is None: + self.hardware_profiles = objects.HardwareProfileList() + + self.hardware_profiles.append(new_hardware_profile) + + def get_hardware_profile(self, profile_key): + for p in self.hardware_profiles: + if p.get_id() == profile_key: + return p + + raise DesignError("HardwareProfile %s not found in design state" + % profile_key) + + def add_baremetal_node(self, new_baremetal_node): + if new_baremetal_node is None: + raise DesignError("Invalid BaremetalNode model") + + if self.baremetal_nodes is None: + self.baremetal_nodes = objects.BaremetalNodeList() + + self.baremetal_nodes.append(new_baremetal_node) + + def get_baremetal_node(self, node_key): + for n in self.baremetal_nodes: + if n.get_id() == node_key: + return n + + raise DesignError("BaremetalNode %s not found in design state" + % node_key) + + """ + Support filtering on rack name, node name or node tag + for now. Each filter can be a comma-delimited list of + values. The final result is an intersection of all the + filters + """ + def get_filtered_nodes(self, node_filter): + effective_nodes = self.baremetal_nodes + + # filter by rack + rack_filter = node_filter.get('rackname', None) + + if rack_filter is not None: + rack_list = rack_filter.split(',') + effective_nodes = [x + for x in effective_nodes + if x.get_rack() in rack_list] + # filter by name + name_filter = node_filter.get('nodename', None) + + if name_filter is not None: + name_list = name_filter.split(',') + effective_nodes = [x + for x in effective_nodes + if x.get_name() in name_list] + # filter by tag + tag_filter = node_filter.get('tags', None) + + if tag_filter is not None: + tag_list = tag_filter.split(',') + effective_nodes = [x + for x in effective_nodes + for t in tag_list + if x.has_tag(t)] + + return effective_nodes + diff --git a/helm_drydock/model/task.py b/helm_drydock/objects/task.py similarity index 82% rename from helm_drydock/model/task.py rename to helm_drydock/objects/task.py index ff6fb922..0c05e678 100644 --- a/helm_drydock/model/task.py +++ b/helm_drydock/objects/task.py @@ -17,19 +17,19 @@ from threading import Lock import helm_drydock.error as errors -import helm_drydock.enum as enum +import helm_drydock.objects.fields as hd_fields class Task(object): def __init__(self, **kwargs): self.task_id = uuid.uuid4() - self.status = enum.TaskStatus.Created + self.status = hd_fields.TaskStatus.Created self.terminate = False self.subtasks = [] self.lock_id = None - self.result = enum.ActionResult.Incomplete + self.result = hd_fields.ActionResult.Incomplete self.result_detail = None - self.action = kwargs.get('action', enum.OrchestratorAction.Noop) + self.action = kwargs.get('action', hd_fields.OrchestratorAction.Noop) self.parent_task_id = kwargs.get('parent_task_id','') @@ -79,10 +79,10 @@ class OrchestratorTask(Task): self.design_id = kwargs.get('design_id', 0) - if self.action in [enum.OrchestratorAction.VerifyNode, - enum.OrchestratorAction.PrepareNode, - enum.OrchestratorAction.DeployNode, - enum.OrchestratorAction.DestroyNode]: + if self.action in [hd_fields.OrchestratorAction.VerifyNode, + hd_fields.OrchestratorAction.PrepareNode, + hd_fields.OrchestratorAction.DeployNode, + hd_fields.OrchestratorAction.DestroyNode]: self.node_filter = kwargs.get('node_filter', None) diff --git a/helm_drydock/orchestrator/__init__.py b/helm_drydock/orchestrator/__init__.py index cc01924f..28def977 100644 --- a/helm_drydock/orchestrator/__init__.py +++ b/helm_drydock/orchestrator/__init__.py @@ -16,13 +16,12 @@ import time import threading import importlib -from enum import Enum, unique from copy import deepcopy import helm_drydock.drivers as drivers -import helm_drydock.model.task as tasks +import helm_drydock.objects.task as tasks import helm_drydock.error as errors -import helm_drydock.enum as enum +import helm_drydock.objects.fields as hd_fields class Orchestrator(object): @@ -86,13 +85,13 @@ class Orchestrator(object): # Just for testing now, need to implement with enabled_drivers # logic - if task.action == enum.OrchestratorAction.Noop: + if task.action == hd_fields.OrchestratorAction.Noop: self.task_field_update(task_id, - status=enum.TaskStatus.Running) + status=hd_fields.TaskStatus.Running) driver_task = self.create_task(tasks.DriverTask, design_id=0, - action=enum.OrchestratorAction.Noop, + action=hd_fields.OrchestratorAction.Noop, parent_task_id=task.get_id()) driver = drivers.ProviderDriver(state_manager=self.state_manager, @@ -103,34 +102,33 @@ class Orchestrator(object): self.task_field_update(task_id, status=driver_task.get_status()) return - elif task.action == enum.OrchestratorAction.ValidateDesign: + elif task.action == hd_fields.OrchestratorAction.ValidateDesign: self.task_field_update(task_id, - status=enum.TaskStatus.Running) + status=hd_fields.TaskStatus.Running) try: site_design = self.get_effective_site(task_site, change_id=design_id) self.task_field_update(task_id, - result=enum.ActionResult.Success) + result=hd_fields.ActionResult.Success) except: self.task_field_update(task_id, - result=enum.ActionResult.Failure) + result=hd_fields.ActionResult.Failure) - self.task_field_update(task_id, status=enum.TaskStatus.Complete) + self.task_field_update(task_id, status=hd_fields.TaskStatus.Complete) return - elif task.action == enum.OrchestratorAction.VerifyNode: + elif task.action == hd_fields.OrchestratorAction.VerifyNode: self.task_field_update(task_id, - status=enum.TaskStatus.Running) + status=hd_fields.TaskStatus.Running) driver = self.enabled_drivers['oob'] if driver is None: self.task_field_update(task_id, - status=enum.TaskStatus.Errored, - result=enum.ActionResult.Failure) + status=hd_fields.TaskStatus.Errored, + result=hd_fields.ActionResult.Failure) return - site_design = self.get_effective_site(task_site, - change_id=design_id) + site_design = self.get_effective_site(design_id, task_site) node_filter = task.node_filter @@ -144,7 +142,7 @@ class Orchestrator(object): driver_task = self.create_task(tasks.DriverTask, parent_task_id=task.get_id(), design_id=design_id, - action=enum.OobAction.InterrogateNode, + action=hd_fields.OrchestratorAction.InterrogateNode, task_scope=task_scope) driver.execute_task(driver_task.get_id()) @@ -152,19 +150,19 @@ class Orchestrator(object): driver_task = self.state_manager.get_task(driver_task.get_id()) self.task_field_update(task_id, - status=enum.TaskStatus.Complete, + status=hd_fields.TaskStatus.Complete, result=driver_task.get_result()) return - elif task.action == enum.OrchestratorAction.PrepareNode: + elif task.action == hd_fields.OrchestratorAction.PrepareNode: self.task_field_update(task_id, - status=enum.TaskStatus.Running) + status=hd_fields.TaskStatus.Running) driver = self.enabled_drivers['oob'] if driver is None: self.task_field_update(task_id, - status=enum.TaskStatus.Errored, - result=enum.ActionResult.Failure) + status=hd_fields.TaskStatus.Errored, + result=hd_fields.ActionResult.Failure) return site_design = self.get_effective_site(task_site, @@ -182,7 +180,7 @@ class Orchestrator(object): setboot_task = self.create_task(tasks.DriverTask, parent_task_id=task.get_id(), design_id=design_id, - action=enum.OobAction.SetNodeBoot, + action=hd_fields.OrchestratorAction.SetNodeBoot, task_scope=task_scope) driver.execute_task(setboot_task.get_id()) @@ -192,26 +190,26 @@ class Orchestrator(object): cycle_task = self.create_task(tasks.DriverTask, parent_task_id=task.get_id(), design_id=design_id, - action=enum.OobAction.PowerCycleNode, + action=hd_fields.OrchestratorAction.PowerCycleNode, task_scope=task_scope) driver.execute_task(cycle_task.get_id()) cycle_task = self.state_manager.get_task(cycle_task.get_id()) - if (setboot_task.get_result() == enum.ActionResult.Success and - cycle_task.get_result() == enum.ActionResult.Success): + if (setboot_task.get_result() == hd_fields.ActionResult.Success and + cycle_task.get_result() == hd_fields.ActionResult.Success): self.task_field_update(task_id, - status=enum.TaskStatus.Complete, - result=enum.ActionResult.Success) - elif (setboot_task.get_result() == enum.ActionResult.Success or - cycle_task.get_result() == enum.ActionResult.Success): + status=hd_fields.TaskStatus.Complete, + result=hd_fields.ActionResult.Success) + elif (setboot_task.get_result() == hd_fields.ActionResult.Success or + cycle_task.get_result() == hd_fields.ActionResult.Success): self.task_field_update(task_id, - status=enum.TaskStatus.Complete, - result=enum.ActionResult.PartialSuccess) + status=hd_fields.TaskStatus.Complete, + result=hd_fields.ActionResult.PartialSuccess) else: self.task_field_update(task_id, - status=enum.TaskStatus.Complete, - result=enum.ActionResult.Failure) + status=hd_fields.TaskStatus.Complete, + result=hd_fields.ActionResult.Failure) return else: @@ -279,74 +277,17 @@ class Orchestrator(object): else: return False - """ - load_design_data - Pull all the defined models in statemgmt and assemble - them into a representation of the site. Does not compute inheritance. - Throws an exception if multiple Site models are found. - - param design_state - Instance of statemgmt.DesignState to load data from - - return a Site model populated with all components from the design state - """ - - def load_design_data(self, site_name, change_id=None): - design_data = None - - if change_id is None or change_id == 0: - try: - design_data = self.state_manager.get_design_base() - except DesignError(e): - raise e - else: - design_data = self.state_manager.get_design_change(change_id) - - site = design_data.get_site(site_name) - - networks = design_data.get_networks() - - for n in networks: - if n.site == site_name: - site.networks.append(n) - - network_links = design_data.get_network_links() - - for l in network_links: - if l.site == site_name: - site.network_links.append(l) - - host_profiles = design_data.get_host_profiles() - - for p in host_profiles: - if p.site == site_name: - site.host_profiles.append(p) - - hardware_profiles = design_data.get_hardware_profiles() - - for p in hardware_profiles: - if p.site == site_name: - site.hardware_profiles.append(p) - - baremetal_nodes = design_data.get_baremetal_nodes() - - for n in baremetal_nodes: - if n.site == site_name: - site.baremetal_nodes.append(n) - - return site - - def compute_model_inheritance(self, site_root): + def compute_model_inheritance(self, site_design): # For now the only thing that really incorporates inheritance is # host profiles and baremetal nodes. So we'll just resolve it for # the baremetal nodes which recursively resolves it for host profiles # assigned to those nodes - site_copy = deepcopy(site_root) - - for n in site_copy.baremetal_nodes: - n.compile_applied_model(site_copy) + for n in site_design.baremetal_nodes: + n.compile_applied_model(site_design) - return site_copy + return """ compute_model_inheritance - given a fully populated Site model, compute the effecitve design by applying inheritance and references @@ -354,22 +295,22 @@ class Orchestrator(object): return a Site model reflecting the effective design for the site """ - def get_described_site(self, site_name, change_id=None): + def get_described_site(self, design_id, site_name): site_design = None if site_name is None: raise errors.OrchestratorError("Cannot source design for site None") - site_design = self.load_design_data(site_name, change_id=change_id) + site_design = self.state_manager.get_design(design_id) return site_design - def get_effective_site(self, site_name, change_id=None): - site_design = self.get_described_site(site_name, change_id=change_id) + def get_effective_site(self, design_id, site_name): + site_design = self.get_described_site(design_id, site_name) - site_model = self.compute_model_inheritance(site_design) + self.compute_model_inheritance(site_design) - return site_model + return site_design def process_node_filter(self, node_filter, site_design): target_nodes = site_design.baremetal_nodes @@ -398,5 +339,3 @@ class Orchestrator(object): if x.has_tag(t)] return target_nodes - - diff --git a/helm_drydock/statemgmt/__init__.py b/helm_drydock/statemgmt/__init__.py index 252ef561..22112c29 100644 --- a/helm_drydock/statemgmt/__init__.py +++ b/helm_drydock/statemgmt/__init__.py @@ -18,23 +18,16 @@ from threading import Lock import uuid -import helm_drydock.model.node as node -import helm_drydock.model.hostprofile as hostprofile -import helm_drydock.model.network as network -import helm_drydock.model.site as site -import helm_drydock.model.hwprofile as hwprofile -import helm_drydock.model.task as tasks +import helm_drydock.objects as objects +import helm_drydock.objects.task as tasks from helm_drydock.error import DesignError, StateError class DesignState(object): def __init__(self): - self.design_base = None - self.design_base_lock = Lock() - - self.design_changes = [] - self.design_changes_lock = Lock() + self.designs = {} + self.designs_lock = Lock() self.builds = [] self.builds_lock = Lock() @@ -46,78 +39,46 @@ class DesignState(object): # TODO Need to lock a design base or change once implementation # has started - def get_design_base(self): - if self.design_base is None: - raise DesignError("No design base submitted") + def get_design(self, design_id): + if design_id not in self.designs.keys(): + raise DesignError("Design ID %s not found" % (design_id)) - return deepcopy(self.design_base) + return objects.SiteDesign.obj_from_primitive(self.designs[design_id]) - def post_design_base(self, site_design): - if site_design is not None and isinstance(site_design, SiteDesign): - my_lock = self.design_base_lock.acquire(blocking=True, + def post_design(self, site_design): + if site_design is not None: + my_lock = self.designs_lock.acquire(blocking=True, timeout=10) if my_lock: - self.design_base = deepcopy(site_design) - self.design_base_lock.release() + design_id = site_design.id + if design_id not in self.designs.keys(): + self.designs[design_id] = site_design.obj_to_primitive() + else: + self.designs_lock.release() + raise StateError("Design ID %s already exists" % design_id) + self.designs_lock.release() return True raise StateError("Could not acquire lock") else: raise DesignError("Design change must be a SiteDesign instance") - def put_design_base(self, site_design): - if site_design is not None and isinstance(site_design, SiteDesign): - my_lock = self.design_base_lock.acquire(blocking=True, + def put_design(self, site_design): + if site_design is not None: + my_lock = self.designs_lock.acquire(blocking=True, timeout=10) if my_lock: - self.design_base.merge_updates(site_design) - self.design_base_lock.release() - return True + design_id = site_design.id + if design_id not in self.designs.keys(): + self.designs_lock.release() + raise StateError("Design ID %s does not exist" % design_id) + else: + self.designs[design_id] = site_design.obj_to_primitive() + self.designs_lock.release() + return True raise StateError("Could not acquire lock") else: raise DesignError("Design base must be a SiteDesign instance") - def get_design_change(self, changeid): - match = [x for x in self.design_changes if x.changeid == changeid] - - if len(match) == 0: - raise DesignError("No design change %s found." % (changeid)) - else: - return deepcopy(match[0]) - - def post_design_change(self, site_design): - if site_design is not None and isinstance(site_design, SiteDesign): - my_lock = self.design_changes_lock.acquire(block=True, - timeout=10) - if my_lock: - exists = [(x) for x - in self.design_changes - if x.changeid == site_design.changeid] - if len(exists) > 0: - self.design_changs_lock.release() - raise DesignError("Existing change %s found" % - (site_design.changeid)) - - self.design_changes.append(deepcopy(site_design)) - self.design_changes_lock.release() - return True - raise StateError("Could not acquire lock") - else: - raise DesignError("Design change must be a SiteDesign instance") - - def put_design_change(self, site_design): - if site_design is not None and isinstance(site_design, SiteDesign): - my_lock = self.design_changes_lock.acquire(block=True, - timeout=10) - if my_lock: - changeid = site_design.changeid - for c in self.design_changes: - if c.changeid == changeid: - c.merge_updates(site_design) - return True - raise StateError("Could not acquire lock") - else: - raise DesignError("Design change must be a SiteDesign instance") - def get_current_build(self): latest_stamp = 0 current_build = None @@ -246,273 +207,3 @@ class DesignState(object): raise StateError("Could not acquire lock") -class SiteDesign(object): - - def __init__(self, ischange=False, changeid=None): - if ischange: - if changeid is not None: - self.changeid = changeid - else: - self.changeid = uuid.uuid4() - else: - # Base design - self.changeid = 0 - - self.sites = [] - self.networks = [] - self.network_links = [] - self.host_profiles = [] - self.hardware_profiles = [] - self.baremetal_nodes = [] - - def add_site(self, new_site): - if new_site is None or not isinstance(new_site, site.Site): - raise DesignError("Invalid Site model") - - self.sites.append(new_site) - - def update_site(self, update): - if update is None or not isinstance(update, site.Site): - raise DesignError("Invalid Site model") - - for i, s in enumerate(self.sites): - if s.get_name() == update.get_name(): - self.sites[i] = deepcopy(update) - return True - - return False - - def get_sites(self): - return self.sites - - def get_site(self, site_name): - for s in self.sites: - if s.name == site_name: - return s - - raise DesignError("Site %s not found in design state" % site_name) - - def add_network(self, new_network): - if new_network is None or not isinstance(new_network, network.Network): - raise DesignError("Invalid Network model") - - self.networks.append(new_network) - - def update_network(self, update): - if update is None or not isinstance(update, network.Network): - raise DesignError("Invalid Network model") - - for i, n in enumerate(self.networks): - if n.get_name() == update.get_name(): - self.networks[i] = deepcopy(update) - return True - - return False - - def get_networks(self): - return self.networks - - def get_network(self, network_name): - for n in self.networks: - if n.name == network_name: - return n - - raise DesignError("Network %s not found in design state" - % network_name) - - def add_network_link(self, new_network_link): - if new_network_link is None or not isinstance(new_network_link, - network.NetworkLink): - raise DesignError("Invalid NetworkLink model") - - self.network_links.append(new_network_link) - - def update_network_link(self, update): - if update is None or not isinstance(update, network.NetworkLink): - raise DesignError("Invalid NetworkLink model") - - for i, n in enumerate(self.network_links): - if n.get_name() == update.get_name(): - self.network_links[i] = deepcopy(update) - return True - - return False - - def get_network_links(self): - return self.network_links - - def get_network_link(self, link_name): - for l in self.network_links: - if l.name == link_name: - return l - - raise DesignError("NetworkLink %s not found in design state" - % link_name) - - def add_host_profile(self, new_host_profile): - if new_host_profile is None or not isinstance(new_host_profile, - hostprofile.HostProfile): - raise DesignError("Invalid HostProfile model") - - self.host_profiles.append(new_host_profile) - - def update_host_profile(self, update): - if update is None or not isinstance(update, hostprofile.HostProfile): - raise DesignError("Invalid HostProfile model") - - for i, h in enumerate(self.host_profiles): - if h.get_name() == update.get_name(): - self.host_profiles[i] = deepcopy(h) - return True - - return False - - def get_host_profiles(self): - return self.host_profiles - - def get_host_profile(self, profile_name): - for p in self.host_profiles: - if p.name == profile_name: - return p - - raise DesignError("HostProfile %s not found in design state" - % profile_name) - - def add_hardware_profile(self, new_hardware_profile): - if (new_hardware_profile is None or - not isinstance(new_hardware_profile, hwprofile.HardwareProfile)): - raise DesignError("Invalid HardwareProfile model") - - self.hardware_profiles.append(new_hardware_profile) - - def update_hardware_profile(self, update): - if update is None or not isinstance(update, hwprofile.HardwareProfile): - raise DesignError("Invalid HardwareProfile model") - - for i, h in enumerate(self.hardware_profiles): - if h.get_name() == update.get_name(): - self.hardware_profiles[i] = deepcopy(h) - return True - - return False - - def get_hardware_profiles(self): - return self.hardware_profiles - - def get_hardware_profile(self, profile_name): - for p in self.hardware_profiles: - if p.name == profile_name: - return p - - raise DesignError("HardwareProfile %s not found in design state" - % profile_name) - - def add_baremetal_node(self, new_baremetal_node): - if (new_baremetal_node is None or - not isinstance(new_baremetal_node, node.BaremetalNode)): - raise DesignError("Invalid BaremetalNode model") - - self.baremetal_nodes.append(new_baremetal_node) - - def update_baremetal_node(self, update): - if (update is None or not isinstance(update, node.BaremetalNode)): - raise DesignError("Invalid BaremetalNode model") - - for i, b in enumerate(self.baremetal_nodes): - if b.get_name() == update.get_name(): - self.baremetal_nodes[i] = deepcopy(b) - return True - - return False - - def get_baremetal_nodes(self): - return self.baremetal_nodes - - def get_baremetal_node(self, node_name): - for n in self.baremetal_nodes: - if n.name == node_name: - return n - - raise DesignError("BaremetalNode %s not found in design state" - % node_name) - - # Only merge the design parts included in the updated site - # design. Changes are merged at the part level, not for fields - # within a design part - # - # TODO convert update_* methods to use exceptions and convert to try block - def merge_updates(self, updates): - if updates is not None and isinstance(updates, SiteDesign): - if updates.changeid == self.changeid: - for u in updates.sites: - if not self.update_site(u): - self.add_site(u) - for u in updates.networks: - if not self.update_network(u): - self.add_network(u) - for u in updates.network_links: - if not self.update_network_link(u): - self.add_network_link(u) - for u in updates.host_profiles: - if not self.update_host_profile(u): - self.add_host_profile(u) - for u in updates.hardware_profiles: - if not self.update_hardware_profile(u): - self.add_hardware_profile(u) - for u in updates.baremetal_nodes: - if not self.update_baremetal_node(u): - self.add_baremetal_node(u) - - -class SiteBuild(SiteDesign): - - def __init__(self, build_id=None): - super(SiteBuild, self).__init__() - - if build_id is None: - self.buildid = datetime.datetime.now(timezone.utc).timestamp() - else: - self.buildid = build_id - - def get_filtered_nodes(self, node_filter): - effective_nodes = self.get_baremetal_nodes() - - # filter by rack - rack_filter = node_filter.get('rackname', None) - - if rack_filter is not None: - rack_list = rack_filter.split(',') - effective_nodes = [x - for x in effective_nodes - if x.get_rack() in rack_list] - # filter by name - name_filter = node_filter.get('nodename', None) - - if name_filter is not None: - name_list = name_filter.split(',') - effective_nodes = [x - for x in effective_nodes - if x.get_name() in name_list] - # filter by tag - tag_filter = node_filter.get('tags', None) - - if tag_filter is not None: - tag_list = tag_filter.split(',') - effective_nodes = [x - for x in effective_nodes - for t in tag_list - if x.has_tag(t)] - - return effective_nodes - """ - Support filtering on rack name, node name or node tag - for now. Each filter can be a comma-delimited list of - values. The final result is an intersection of all the - filters - """ - - def set_nodes_status(self, node_filter, status): - target_nodes = self.get_filtered_nodes(node_filter) - - for n in target_nodes: - n.set_status(status) diff --git a/setup.py b/setup.py index 7e55ed61..0287e80f 100644 --- a/setup.py +++ b/setup.py @@ -40,7 +40,7 @@ setup(name='helm_drydock', author_email='sh8121@att.com', license='Apache 2.0', packages=['helm_drydock', - 'helm_drydock.model', + 'helm_drydock.objects', 'helm_drydock.ingester', 'helm_drydock.ingester.plugins', 'helm_drydock.statemgmt', @@ -55,8 +55,9 @@ setup(name='helm_drydock', 'requests-oauthlib', 'pyghmi>=1.0.18', 'netaddr', - 'pecan', - 'webob' + 'falcon', + 'webob', + 'oslo.versionedobjects>=1.23.0', ], dependency_link=[ 'git+https://github.com/maas/python-libmaas.git' diff --git a/testrequirements.txt b/testrequirements.txt index 89f9905f..16222260 100644 --- a/testrequirements.txt +++ b/testrequirements.txt @@ -1,4 +1,5 @@ pytest-mock pytest mock -tox \ No newline at end of file +tox +oslo.versionedobjects[fixtures]>=1.23.0 \ No newline at end of file diff --git a/tests/test_design_inheritance.py b/tests/test_design_inheritance.py index 027e9bb8..587f737d 100644 --- a/tests/test_design_inheritance.py +++ b/tests/test_design_inheritance.py @@ -68,7 +68,6 @@ class TestClass(object): return design_state - @pytest.fixture(scope='module') def input_files(self, tmpdir_factory, request): diff --git a/tests/test_ingester.py b/tests/test_ingester.py index e9db185f..4fcb2af6 100644 --- a/tests/test_ingester.py +++ b/tests/test_ingester.py @@ -13,7 +13,8 @@ # limitations under the License. from helm_drydock.ingester import Ingester -from helm_drydock.statemgmt import DesignState, SiteDesign +from helm_drydock.statemgmt import DesignState +import helm_drydock.objects as objects import pytest import shutil @@ -26,36 +27,43 @@ class TestClass(object): print("Running test {0}".format(method.__name__)) def test_ingest_full_site(self, input_files): + objects.register_all() + input_file = input_files.join("fullsite.yaml") design_state = DesignState() - design_data = SiteDesign() - design_state.post_design_base(design_data) + design_data = objects.SiteDesign() + design_id = design_data.assign_id() + design_state.post_design(design_data) ingester = Ingester() ingester.enable_plugins([helm_drydock.ingester.plugins.yaml.YamlIngester]) - ingester.ingest_data(plugin_name='yaml', design_state=design_state, filenames=[str(input_file)]) + ingester.ingest_data(plugin_name='yaml', design_state=design_state, + filenames=[str(input_file)], design_id=design_id) - design_data = design_state.get_design_base() + design_data = design_state.get_design(design_id) - assert len(design_data.get_host_profiles()) == 3 - assert len(design_data.get_baremetal_nodes()) == 2 + assert len(design_data.host_profiles) == 3 + assert len(design_data.baremetal_nodes) == 2 def test_ingest_federated_design(self, input_files): + objects.register_all() + profiles_file = input_files.join("fullsite_profiles.yaml") networks_file = input_files.join("fullsite_networks.yaml") nodes_file = input_files.join("fullsite_nodes.yaml") design_state = DesignState() - design_data = SiteDesign() - design_state.post_design_base(design_data) + design_data = objects.SiteDesign() + design_id = design_data.assign_id() + design_state.post_design(design_data) ingester = Ingester() ingester.enable_plugins([helm_drydock.ingester.plugins.yaml.YamlIngester]) - ingester.ingest_data(plugin_name='yaml', design_state=design_state, - filenames=[str(profiles_file), str(networks_file), str(nodes_file)]) + ingester.ingest_data(plugin_name='yaml', design_state=design_state, design_id=design_id, + filenames=[str(profiles_file), str(networks_file), str(nodes_file)]) - design_data = design_state.get_design_base() + design_data = design_state.get_design(design_id) assert len(design_data.host_profiles) == 3 diff --git a/tests/test_ingester_yaml.py b/tests/test_ingester_yaml.py index 081a006b..99992ebf 100644 --- a/tests/test_ingester_yaml.py +++ b/tests/test_ingester_yaml.py @@ -11,11 +11,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -from helm_drydock.ingester.plugins.yaml import YamlIngester import pytest import shutil import os +import uuid + +from helm_drydock.ingester.plugins.yaml import YamlIngester class TestClass(object): diff --git a/tests/test_models.py b/tests/test_models.py index a2d6fe8f..e5014444 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -13,57 +13,73 @@ # limitations under the License. import pytest -import yaml -from helm_drydock.model.hwprofile import HardwareProfile + +import helm_drydock.objects as objects +from helm_drydock.objects import fields class TestClass(object): - def setup_method(self, method): - print("Running test {0}".format(method.__name__)) - def test_hardwareprofile(self): - yaml_snippet = ("---\n" - "apiVersion: 'v1.0'\n" - "kind: HardwareProfile\n" - "metadata:\n" - " name: HPGen8v3\n" - " region: sitename\n" - " date: 17-FEB-2017\n" - " name: Sample hardware definition\n" - " author: Scott Hussey\n" - "spec:\n" - " # Vendor of the server chassis\n" - " vendor: HP\n" - " # Generation of the chassis model\n" - " generation: '8'\n" - " # Version of the chassis model within its generation - not version of the hardware definition\n" - " hw_version: '3'\n" - " # The certified version of the chassis BIOS\n" - " bios_version: '2.2.3'\n" - " # Mode of the default boot of hardware - bios, uefi\n" - " boot_mode: bios\n" - " # Protocol of boot of the hardware - pxe, usb, hdd\n" - " bootstrap_protocol: pxe\n" - " # Which interface to use for network booting within the OOB manager, not OS device\n" - " pxe_interface: 0\n" - " # Map hardware addresses to aliases/roles to allow a mix of hardware configs\n" - " # in a site to result in a consistent configuration\n" - " device_aliases:\n" - " pci:\n" - " - address: pci@0000:00:03.0\n" - " alias: prim_nic01\n" - " # type could identify expected hardware - used for hardware manifest validation\n" - " type: '82540EM Gigabit Ethernet Controller'\n" - " - address: pci@0000:00:04.0\n" - " alias: prim_nic02\n" - " type: '82540EM Gigabit Ethernet Controller'\n" - " scsi:\n" - " - address: scsi@2:0.0.0\n" - " alias: primary_boot\n" - " type: 'VBOX HARDDISK'\n") + objects.register_all() - hw_profile = yaml.load(yaml_snippet) - hw_profile_model = HardwareProfile(**hw_profile) + model_attr = { + 'versioned_object.namespace': 'helm_drydock.objects', + 'versioned_object.name': 'HardwareProfile', + 'versioned_object.version': '1.0', + 'versioned_object.data': { + 'name': 'server', + 'source': fields.ModelSource.Designed, + 'site': 'test_site', + 'vendor': 'Acme', + 'generation': '9', + 'hw_version': '3', + 'bios_version': '2.1.1', + 'boot_mode': 'bios', + 'bootstrap_protocol': 'pxe', + 'pxe_interface': '0', + 'devices': { + 'versioned_object.namespace': 'helm_drydock.objects', + 'versioned_object.name': 'HardwareDeviceAliasList', + 'versioned_object.version': '1.0', + 'versioned_object.data': { + 'objects': [ + { + 'versioned_object.namespace': 'helm_drydock.objects', + 'versioned_object.name': 'HardwareDeviceAlias', + 'versioned_object.version': '1.0', + 'versioned_object.data': { + 'alias': 'nic', + 'source': fields.ModelSource.Designed, + 'address': '0000:00:03.0', + 'bus_type': 'pci', + 'dev_type': '82540EM Gigabit Ethernet Controller', + } + }, + { + 'versioned_object.namespace': 'helm_drydock.objects', + 'versioned_object.name': 'HardwareDeviceAlias', + 'versioned_object.version': '1.0', + 'versioned_object.data': { + 'alias': 'bootdisk', + 'source': fields.ModelSource.Designed, + 'address': '2:0.0.0', + 'bus_type': 'scsi', + 'dev_type': 'SSD', + } + }, + ] - assert hasattr(hw_profile_model, 'bootstrap_protocol') + } + } + } + } + + hwprofile = objects.HardwareProfile.obj_from_primitive(model_attr) + + assert getattr(hwprofile, 'bootstrap_protocol') == 'pxe' + + hwprofile.bootstrap_protocol = 'network' + + assert 'bootstrap_protocol' in hwprofile.obj_what_changed() + assert 'bios_version' not in hwprofile.obj_what_changed() diff --git a/tests/test_orch_generic.py b/tests/test_orch_generic.py index faa6178f..86739965 100644 --- a/tests/test_orch_generic.py +++ b/tests/test_orch_generic.py @@ -15,15 +15,16 @@ # # Generic testing for the orchestrator # - -import helm_drydock.orchestrator as orch -import helm_drydock.enum as enum -import helm_drydock.statemgmt as statemgmt -import helm_drydock.model.task as task -import helm_drydock.drivers as drivers import threading import time +import helm_drydock.orchestrator as orch +import helm_drydock.objects.fields as hd_fields +import helm_drydock.statemgmt as statemgmt +import helm_drydock.objects.task as task +import helm_drydock.drivers as drivers + + class TestClass(object): def test_task_complete(self): @@ -31,24 +32,24 @@ class TestClass(object): orchestrator = orch.Orchestrator(state_manager=state_mgr) orch_task = orchestrator.create_task(task.OrchestratorTask, site='default', - action=enum.OrchestratorAction.Noop) + action=hd_fields.OrchestratorAction.Noop) orchestrator.execute_task(orch_task.get_id()) orch_task = state_mgr.get_task(orch_task.get_id()) - assert orch_task.get_status() == enum.TaskStatus.Complete + assert orch_task.get_status() == hd_fields.TaskStatus.Complete for t_id in orch_task.subtasks: t = state_mgr.get_task(t_id) - assert t.get_status() == enum.TaskStatus.Complete + assert t.get_status() == hd_fields.TaskStatus.Complete def test_task_termination(self): state_mgr = statemgmt.DesignState() orchestrator = orch.Orchestrator(state_manager=state_mgr) orch_task = orchestrator.create_task(task.OrchestratorTask, site='default', - action=enum.OrchestratorAction.Noop) + action=hd_fields.OrchestratorAction.Noop) orch_thread = threading.Thread(target=orchestrator.execute_task, args=(orch_task.get_id(),)) @@ -61,8 +62,8 @@ class TestClass(object): time.sleep(1) orch_task = state_mgr.get_task(orch_task.get_id()) - assert orch_task.get_status() == enum.TaskStatus.Terminated + assert orch_task.get_status() == hd_fields.TaskStatus.Terminated for t_id in orch_task.subtasks: t = state_mgr.get_task(t_id) - assert t.get_status() == enum.TaskStatus.Terminated \ No newline at end of file + assert t.get_status() == hd_fields.TaskStatus.Terminated \ No newline at end of file diff --git a/tests/test_orch_oob.py b/tests/test_orch_oob.py index 2f6737c1..42d2c30f 100644 --- a/tests/test_orch_oob.py +++ b/tests/test_orch_oob.py @@ -21,18 +21,21 @@ import pytest import os import shutil +import uuid from helm_drydock.ingester import Ingester import helm_drydock.orchestrator as orch -import helm_drydock.enum as enum +import helm_drydock.objects.fields as hd_fields import helm_drydock.statemgmt as statemgmt -import helm_drydock.model.task as task +import helm_drydock.objects as objects +import helm_drydock.objects.task as task import helm_drydock.drivers as drivers import helm_drydock.ingester.plugins.yaml as yaml_ingester class TestClass(object): + design_id = str(uuid.uuid4()) # sthussey None of these work right until I figure out correct # mocking of pyghmi @@ -45,7 +48,8 @@ class TestClass(object): orch_task = orchestrator.create_task(task.OrchestratorTask, site='sitename', - action=enum.OrchestratorAction.VerifyNode) + design_id=self.design_id, + action=hd_fields.OrchestratorAction.VerifyNode) orchestrator.execute_task(orch_task.get_id()) @@ -73,15 +77,19 @@ class TestClass(object): @pytest.fixture(scope='module') def loaded_design(self, input_files): + objects.register_all() + input_file = input_files.join("oob.yaml") design_state = statemgmt.DesignState() - design_data = statemgmt.SiteDesign() - design_state.post_design_base(design_data) + design_data = objects.SiteDesign(id=self.design_id) + + design_state.post_design(design_data) ingester = Ingester() ingester.enable_plugins([yaml_ingester.YamlIngester]) - ingester.ingest_data(plugin_name='yaml', design_state=design_state, filenames=[str(input_file)]) + ingester.ingest_data(plugin_name='yaml', design_state=design_state, + design_id=self.design_id, filenames=[str(input_file)]) return design_state diff --git a/tests/test_statemgmt.py b/tests/test_statemgmt.py index 4d9ea560..edb82764 100644 --- a/tests/test_statemgmt.py +++ b/tests/test_statemgmt.py @@ -11,52 +11,38 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from helm_drydock.statemgmt import SiteDesign - -import helm_drydock.model.site as site -import helm_drydock.model.network as network - import pytest import shutil -import os -import helm_drydock.ingester.plugins.yaml + + +import helm_drydock.objects as objects +import helm_drydock.statemgmt as statemgmt class TestClass(object): def setup_method(self, method): print("Running test {0}".format(method.__name__)) - def test_sitedesign_merge(self): - design_data = SiteDesign() + def test_sitedesign_post(self): + objects.register_all() - initial_site = site.Site(**{'apiVersion': 'v1.0', - 'metadata': { - 'name': 'testsite', - }, - }) - net_a = network.Network(**{ 'apiVersion': 'v1.0', - 'metadata': { - 'name': 'net_a', - 'region': 'testsite', - }, - 'spec': { - 'cidr': '172.16.0.0/24', - }}) - net_b = network.Network(**{ 'apiVersion': 'v1.0', - 'metadata': { - 'name': 'net_b', - 'region': 'testsite', - }, - 'spec': { - 'cidr': '172.16.0.1/24', - }}) + state_manager = statemgmt.DesignState() + design_data = objects.SiteDesign() + design_id = design_data.assign_id() - design_data.add_site(initial_site) + initial_site = objects.Site() + initial_site.name = 'testsite' + + net_a = objects.Network() + net_a.name = 'net_a' + net_a.region = 'testsite' + net_a.cidr = '172.16.0.0/24' + + design_data.set_site(initial_site) design_data.add_network(net_a) - design_update = SiteDesign() - design_update.add_network(net_b) + state_manager.post_design(design_data) - design_data.merge_updates(design_update) + my_design = state_manager.get_design(design_id) - assert len(design_data.get_networks()) == 2 \ No newline at end of file + assert design_data.obj_to_primitive() == my_design.obj_to_primitive() \ No newline at end of file diff --git a/tests/yaml_samples/fullsite.yaml b/tests/yaml_samples/fullsite.yaml index 5618372e..06980841 100644 --- a/tests/yaml_samples/fullsite.yaml +++ b/tests/yaml_samples/fullsite.yaml @@ -37,11 +37,11 @@ metadata: description: Describe layer 1 attributes. Primary key is 'name'. These settings will generally be things the switch and server have to agree on spec: bonding: - mode: none + mode: disabled mtu: 1500 linkspeed: 100full trunking: - mode: none + mode: disabled default_network: oob --- # pxe is a bit of 'magic' indicating the link config used when PXE booting @@ -57,14 +57,14 @@ metadata: description: Describe layer 1 attributes. Primary key is 'name'. These settings will generally be things the switch and server have to agree on spec: bonding: - mode: none + mode: disabled mtu: 1500 linkspeed: auto # Is this link supporting multiple layer 2 networks? # none is a port-based VLAN identified by default_network # tagged is is using 802.1q VLAN tagging. Untagged packets will default to default_netwokr trunking: - mode: none + mode: disabled # use name, will translate to VLAN ID default_network: pxe --- @@ -91,14 +91,11 @@ spec: hash: layer3+4 # 802.3ad specific options peer_rate: slow - mon_rate: default - up_delay: default - down_delay: default mtu: 9000 linkspeed: auto # Is this link supporting multiple layer 2 networks? trunking: - mode: tagged + mode: 802.1q default_network: mgmt --- apiVersion: 'v1.0' @@ -446,15 +443,17 @@ spec: # Map hardware addresses to aliases/roles to allow a mix of hardware configs # in a site to result in a consistent configuration device_aliases: - pci: - - address: pci@0000:00:03.0 - alias: prim_nic01 + - address: '0000:00:03.0' + alias: prim_nic01 # type could identify expected hardware - used for hardware manifest validation - type: '82540EM Gigabit Ethernet Controller' - - address: pci@0000:00:04.0 - alias: prim_nic02 - type: '82540EM Gigabit Ethernet Controller' - scsi: - - address: scsi@2:0.0.0 - alias: primary_boot - type: 'VBOX HARDDISK' + dev_type: '82540EM Gigabit Ethernet Controller' + bus_type: 'pci' + - address: '0000:00:04.0' + alias: prim_nic02 + dev_type: '82540EM Gigabit Ethernet Controller' + bus_type: 'pci' + - address: '2:0.0.0' + alias: primary_boot + dev_type: 'VBOX HARDDISK' + bus_type: 'scsi' + diff --git a/tests/yaml_samples/fullsite_networks.yaml b/tests/yaml_samples/fullsite_networks.yaml index 37af1412..cd16b296 100644 --- a/tests/yaml_samples/fullsite_networks.yaml +++ b/tests/yaml_samples/fullsite_networks.yaml @@ -28,11 +28,11 @@ metadata: description: Describe layer 1 attributes. Primary key is 'name'. These settings will generally be things the switch and server have to agree on spec: bonding: - mode: none + mode: disabled mtu: 1500 linkspeed: 100full trunking: - mode: none + mode: disabled default_network: oob --- # pxe is a bit of 'magic' indicating the link config used when PXE booting @@ -48,14 +48,14 @@ metadata: description: Describe layer 1 attributes. Primary key is 'name'. These settings will generally be things the switch and server have to agree on spec: bonding: - mode: none + mode: disabled mtu: 1500 linkspeed: auto # Is this link supporting multiple layer 2 networks? # none is a port-based VLAN identified by default_network # tagged is is using 802.1q VLAN tagging. Untagged packets will default to default_netwokr trunking: - mode: none + mode: disabled # use name, will translate to VLAN ID default_network: pxe --- @@ -82,14 +82,11 @@ spec: hash: layer3+4 # 802.3ad specific options peer_rate: slow - mon_rate: default - up_delay: default - down_delay: default mtu: 9000 linkspeed: auto # Is this link supporting multiple layer 2 networks? trunking: - mode: tagged + mode: 802.1q default_network: mgmt --- apiVersion: 'v1.0' diff --git a/tests/yaml_samples/fullsite_nodes.yaml b/tests/yaml_samples/fullsite_nodes.yaml index 05625e0e..95426caa 100644 --- a/tests/yaml_samples/fullsite_nodes.yaml +++ b/tests/yaml_samples/fullsite_nodes.yaml @@ -48,7 +48,8 @@ spec: - network: public address: 172.16.3.20 metadata: - roles: os_ctl + tags: + - os_ctl rack: rack01 --- apiVersion: 'v1.0' diff --git a/tests/yaml_samples/fullsite_profiles.yaml b/tests/yaml_samples/fullsite_profiles.yaml index c5430085..a7ba5414 100644 --- a/tests/yaml_samples/fullsite_profiles.yaml +++ b/tests/yaml_samples/fullsite_profiles.yaml @@ -182,15 +182,16 @@ spec: # Map hardware addresses to aliases/roles to allow a mix of hardware configs # in a site to result in a consistent configuration device_aliases: - pci: - - address: pci@0000:00:03.0 - alias: prim_nic01 + - address: 0000:00:03.0 + alias: prim_nic01 # type could identify expected hardware - used for hardware manifest validation - type: '82540EM Gigabit Ethernet Controller' - - address: pci@0000:00:04.0 - alias: prim_nic02 - type: '82540EM Gigabit Ethernet Controller' - scsi: - - address: scsi@2:0.0.0 - alias: primary_boot - type: 'VBOX HARDDISK' + dev_type: '82540EM Gigabit Ethernet Controller' + bus_type: 'pci' + - address: 0000:00:04.0 + alias: prim_nic02 + dev_type: '82540EM Gigabit Ethernet Controller' + bus_type: 'pci' + - address: 2:0.0.0 + alias: primary_boot + dev_type: 'VBOX HARDDISK' + bus_type: 'scsi' diff --git a/tests/yaml_samples/multidoc.yaml b/tests/yaml_samples/multidoc.yaml index 0bd35190..2bb8bf49 100644 --- a/tests/yaml_samples/multidoc.yaml +++ b/tests/yaml_samples/multidoc.yaml @@ -10,11 +10,11 @@ metadata: description: Describe layer 1 attributes. Primary key is 'name'. These settings will generally be things the switch and server have to agree on spec: bonding: - mode: none + mode: disabled mtu: 1500 linkspeed: 100full trunking: - mode: none + mode: disabled default_network: oob --- # pxe is a bit of 'magic' indicating the link config used when PXE booting @@ -31,14 +31,14 @@ metadata: description: Describe layer 1 attributes. Primary key is 'name'. These settings will generally be things the switch and server have to agree on spec: bonding: - mode: none + mode: disabled mtu: 1500 linkspeed: auto # Is this link supporting multiple layer 2 networks? # none is a port-based VLAN identified by default_network # tagged is is using 802.1q VLAN tagging. Untagged packets will default to default_netwokr trunking: - mode: none + mode: disabled # use name, will translate to VLAN ID default_network: pxe --- @@ -61,17 +61,14 @@ spec: # balance-rr # Can add support for others down the road bonding: - mode: 802.3ad + mode: '802.3ad' # For LACP (802.3ad) xmit hashing policy: layer2, layer2+3, layer3+4, encap3+4 hash: layer3+4 # 802.3ad specific options peer_rate: slow - mon_rate: default - up_delay: default - down_delay: default mtu: 9000 linkspeed: auto # Is this link supporting multiple layer 2 networks? trunking: - mode: tagged + mode: '802.1q' default_network: mgmt \ No newline at end of file diff --git a/tests/yaml_samples/oob.yaml b/tests/yaml_samples/oob.yaml index 5c463c55..7f5aca5d 100644 --- a/tests/yaml_samples/oob.yaml +++ b/tests/yaml_samples/oob.yaml @@ -37,11 +37,11 @@ metadata: description: Describe layer 1 attributes. Primary key is 'name'. These settings will generally be things the switch and server have to agree on spec: bonding: - mode: none + mode: disabled mtu: 1500 linkspeed: 100full trunking: - mode: none + mode: disabled default_network: oob --- apiVersion: 'v1.0' @@ -212,15 +212,16 @@ spec: # Map hardware addresses to aliases/roles to allow a mix of hardware configs # in a site to result in a consistent configuration device_aliases: - pci: - - address: pci@0000:00:03.0 - alias: prim_nic01 - # type could identify expected hardware - used for hardware manifest validation - type: '82540EM Gigabit Ethernet Controller' - - address: pci@0000:00:04.0 - alias: prim_nic02 - type: '82540EM Gigabit Ethernet Controller' - scsi: - - address: scsi@2:0.0.0 - alias: primary_boot - type: 'VBOX HARDDISK' + - address: 0000:00:03.0 + alias: prim_nic01 + # type could identify expected hardware - used for hardware manifest validation + dev_type: '82540EM Gigabit Ethernet Controller' + bus_type: 'pci' + - address: 0000:00:04.0 + alias: prim_nic02 + dev_type: '82540EM Gigabit Ethernet Controller' + bus_type: 'pci' + - address: 2:0.0.0 + alias: primary_boot + dev_type: 'VBOX HARDDISK' + bus_type: 'scsi' diff --git a/tests/yaml_samples/singledoc.yaml b/tests/yaml_samples/singledoc.yaml index 6d24c8c8..69ad43d5 100644 --- a/tests/yaml_samples/singledoc.yaml +++ b/tests/yaml_samples/singledoc.yaml @@ -25,15 +25,16 @@ spec: # Map hardware addresses to aliases/roles to allow a mix of hardware configs # in a site to result in a consistent configuration device_aliases: - pci: - - address: pci@0000:00:03.0 - alias: prim_nic01 + - address: 0000:00:03.0 + alias: prim_nic01 # type could identify expected hardware - used for hardware manifest validation - type: '82540EM Gigabit Ethernet Controller' - - address: pci@0000:00:04.0 - alias: prim_nic02 - type: '82540EM Gigabit Ethernet Controller' - scsi: - - address: scsi@2:0.0.0 - alias: primary_boot - type: 'VBOX HARDDISK' \ No newline at end of file + dev_type: '82540EM Gigabit Ethernet Controller' + bus_type: 'pci' + - address: 0000:00:04.0 + alias: prim_nic02 + dev_type: '82540EM Gigabit Ethernet Controller' + bus_type: 'pci' + - address: 2:0.0.0 + alias: primary_boot + dev_type: 'VBOX HARDDISK' + bus_type: 'scsi' \ No newline at end of file From 7760769b25fc54e5ad52885fa5357af317333ac1 Mon Sep 17 00:00:00 2001 From: Scott Hussey Date: Fri, 5 May 2017 09:49:55 -0500 Subject: [PATCH 08/11] Add repository attribute to a site definition --- helm_drydock/objects/site.py | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/helm_drydock/objects/site.py b/helm_drydock/objects/site.py index 8b30e2e3..d092c853 100644 --- a/helm_drydock/objects/site.py +++ b/helm_drydock/objects/site.py @@ -35,6 +35,7 @@ class Site(base.DrydockPersistentObject, base.DrydockObject): 'source': hd_fields.ModelSourceField(), 'tag_definitions': ovo_fields.ObjectField('NodeTagDefinitionList', nullable=True), + 'repositories': ovo_fields.ObjectField('RepositoryList', nullable=True), } def __init__(self, **kwargs): @@ -77,6 +78,33 @@ class NodeTagDefinitionList(base.DrydockObjectListBase, base.DrydockObject): 'objects': ovo_fields.ListOfObjectsField('NodeTagDefinition'), } +# Need to determine how best to define a repository that can encompass +# all repositories needed +@base.DrydockObjectRegistry.register +class Repository(base.DrydockObject): + + VERSION = '1.0' + + fields = { + 'name': ovo_fields.StringField(), + } + + def __init__(self, **kwargs): + super(Repository, self).__init__(**kwargs) + + # TagDefinition keyed by tag + def get_id(self): + return self.name + +@base.DrydockObjectRegistry.register +class RepositoryList(base.DrydockObjectListBase, base.DrydockObject): + + VERSION = '1.0' + + fields = { + 'objects': ovo_fields.ListOfObjectsField('Repository'), + } + @base.DrydockObjectRegistry.register class SiteDesign(base.DrydockPersistentObject, base.DrydockObject): From 595f3d9fc370ad70498866bd53d12e55cfcace8c Mon Sep 17 00:00:00 2001 From: Scott Hussey Date: Fri, 5 May 2017 10:30:21 -0500 Subject: [PATCH 09/11] Add attributes for base_os and kernel selection and custom kernel params to HostProfile Add attribute for defining the primary interface (i.e. default route) for a node. --- helm_drydock/objects/hostprofile.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/helm_drydock/objects/hostprofile.py b/helm_drydock/objects/hostprofile.py index 8e822205..47144734 100644 --- a/helm_drydock/objects/hostprofile.py +++ b/helm_drydock/objects/hostprofile.py @@ -48,6 +48,9 @@ class HostProfile(base.DrydockPersistentObject, base.DrydockObject): 'tags': obj_fields.ListOfStringsField(nullable=True), 'owner_data': obj_fields.DictOfStringsField(nullable=True), 'rack': obj_fields.StringField(nullable=True), + 'base_os': obj_fields.StringField(nullable=True), + 'kernel': obj_fields.StringField(nullable=True), + 'kernel_params': obj_fields.StringField(nullable=True), } def __init__(self, **kwargs): @@ -87,10 +90,10 @@ class HostProfile(base.DrydockPersistentObject, base.DrydockObject): # First compute inheritance for simple fields inheritable_field_list = [ - "hardware_profile", "oob_type", "oob_network", - "oob_credential", "oob_account", "storage_layout", - "bootdisk_device", "bootdisk_root_size", "bootdisk_boot_size", - "rack"] + 'hardware_profile', 'oob_type', 'oob_network', + 'oob_credential', 'oob_account', 'storage_layout', + 'bootdisk_device', 'bootdisk_root_size', 'bootdisk_boot_size', + 'rack', 'base_os', 'kernel', 'kernel_params'] # Create applied data from self design values and parent # applied values @@ -131,6 +134,7 @@ class HostInterface(base.DrydockObject): fields = { 'device_name': obj_fields.StringField(), + 'primary_network': obj_fields.BooleanField(nullable=False, default=False), 'source': hd_fields.ModelSourceField(), 'network_link': obj_fields.StringField(nullable=True), 'hardware_slaves': obj_fields.ListOfStringsField(nullable=True), @@ -208,6 +212,11 @@ class HostInterface(base.DrydockObject): elif j.get_name() == parent_name: m = objects.HostInterface() m.device_name = j.get_name() + m.primary_network = + objects.Util.apply_field_inheritance( + getattr(j, 'primary_network', None), + getattr(i, 'primary_network', None)) + m.network_link = \ objects.Utils.apply_field_inheritance( getattr(j, 'network_link', None), From da736b74b79f486578ab72c4121ea527cddcdc75 Mon Sep 17 00:00:00 2001 From: Scott Hussey Date: Thu, 18 May 2017 09:06:15 -0500 Subject: [PATCH 10/11] Drydock orchestration of MaaS networking - Create a MaaS API client for managing API access/authentication - Start MaaS object model for accessing API resources - Add orchestration step for PrepareSite action - Create maasdriver logic to handle CreateNetworkTemplate action - Separate tests for unit and integration - Fix YAML ingester to use a default of None for VLAN tag instead of 1 --- helm_drydock/config.py | 28 +- helm_drydock/drivers/node/__init__.py | 40 ++- .../drivers/node/maasdriver/__init__.py | 7 - .../drivers/node/maasdriver/api_client.py | 147 +++++++++ .../drivers/node/maasdriver/driver.py | 306 ++++++++++++++++++ .../node/maasdriver/models/__init__.py | 13 + .../drivers/node/maasdriver/models/base.py | 273 ++++++++++++++++ .../drivers/node/maasdriver/models/fabric.py | 53 +++ .../drivers/node/maasdriver/models/subnet.py | 55 ++++ .../drivers/node/maasdriver/models/vlan.py | 86 +++++ .../drivers/node/maasdriver/readme.md | 46 +++ helm_drydock/drivers/oob/__init__.py | 12 +- .../drivers/oob/pyghmi_driver/__init__.py | 11 +- helm_drydock/drivers/readme.md | 35 +- helm_drydock/error.py | 12 + helm_drydock/ingester/plugins/yaml.py | 3 +- helm_drydock/objects/base.py | 7 + helm_drydock/objects/fields.py | 28 +- helm_drydock/objects/hostprofile.py | 14 +- helm_drydock/objects/network.py | 20 +- helm_drydock/objects/site.py | 7 +- helm_drydock/objects/task.py | 3 - helm_drydock/orchestrator/__init__.py | 47 +++ helm_drydock/orchestrator/readme.md | 12 + setup.py | 13 +- tests/integration/test_maasdriver_client.py | 30 ++ tests/integration/test_maasdriver_network.py | 58 ++++ tests/integration/test_orch_node_networks.py | 94 ++++++ tests/{ => unit}/test_design_inheritance.py | 4 +- tests/{ => unit}/test_ingester.py | 2 +- tests/{ => unit}/test_ingester_yaml.py | 2 +- tests/{ => unit}/test_models.py | 0 tests/{ => unit}/test_orch_generic.py | 0 tests/{ => unit}/test_orch_oob.py | 2 +- tests/{ => unit}/test_statemgmt.py | 0 35 files changed, 1390 insertions(+), 80 deletions(-) create mode 100644 helm_drydock/drivers/node/maasdriver/api_client.py create mode 100644 helm_drydock/drivers/node/maasdriver/driver.py create mode 100644 helm_drydock/drivers/node/maasdriver/models/__init__.py create mode 100644 helm_drydock/drivers/node/maasdriver/models/base.py create mode 100644 helm_drydock/drivers/node/maasdriver/models/fabric.py create mode 100644 helm_drydock/drivers/node/maasdriver/models/subnet.py create mode 100644 helm_drydock/drivers/node/maasdriver/models/vlan.py create mode 100644 helm_drydock/drivers/node/maasdriver/readme.md create mode 100644 tests/integration/test_maasdriver_client.py create mode 100644 tests/integration/test_maasdriver_network.py create mode 100644 tests/integration/test_orch_node_networks.py rename tests/{ => unit}/test_design_inheritance.py (94%) rename tests/{ => unit}/test_ingester.py (96%) rename tests/{ => unit}/test_ingester_yaml.py (94%) rename tests/{ => unit}/test_models.py (100%) rename tests/{ => unit}/test_orch_generic.py (100%) rename tests/{ => unit}/test_orch_oob.py (97%) rename tests/{ => unit}/test_statemgmt.py (100%) diff --git a/helm_drydock/config.py b/helm_drydock/config.py index 92b3d8ac..244e85c0 100644 --- a/helm_drydock/config.py +++ b/helm_drydock/config.py @@ -21,21 +21,13 @@ class DrydockConfig(object): - def __init__(self): - self.server_driver_config = { - selected_driver = helm_drydock.drivers.server.maasdriver, - params = { - maas_api_key = "" - maas_api_url = "" - } - } - self.selected_network_driver = helm_drydock.drivers.network.noopdriver - self.control_config = {} - self.ingester_config = { - plugins = [helm_drydock.ingester.plugins.aicyaml.AicYamlIngester] - } - self.introspection_config = {} - self.orchestrator_config = {} - self.statemgmt_config = { - backend_driver = helm_drydock.drivers.statemgmt.etcd, - } + node_driver = { + 'maasdriver': { + 'api_key': 'KTMHgA42cNSMnfmJ82:cdg4yQUhp542aHsCTV:7Dc2KB9hQpWq3LfQAAAKAj6wdg22yWxZ', + 'api_url': 'http://localhost:5240/MAAS/api/2.0/' + }, + } + + ingester_config = { + 'plugins': ['helm_drydock.ingester.plugins.yaml'] + } \ No newline at end of file diff --git a/helm_drydock/drivers/node/__init__.py b/helm_drydock/drivers/node/__init__.py index bae95f38..87ea3046 100644 --- a/helm_drydock/drivers/node/__init__.py +++ b/helm_drydock/drivers/node/__init__.py @@ -13,16 +13,44 @@ # limitations under the License. # +import helm_drydock.objects.fields as hd_fields +import helm_drydock.error as errors + from helm_drydock.drivers import ProviderDriver class NodeDriver(ProviderDriver): -class NodeAction(Enum): - PrepareNode = 'prepare_node' - ApplyNetworkConfig = 'apply_network_config' - ApplyStorageConfig = 'apply_storage_config' - InterrogateNode = 'interrogate_node' - DeployNode = 'deploy_node' + def __init__(self, **kwargs): + super(NodeDriver, self).__init__(**kwargs) + + self.supported_actions = [hd_fields.OrchestratorAction.ValidateNodeServices, + hd_fields.OrchestratorAction.CreateNetworkTemplate, + hd_fields.OrchestratorAction.CreateStorageTemplate, + hd_fields.OrchestratorAction.CreateBootMedia, + hd_fields.OrchestratorAction.PrepareHardwareConfig, + hd_fields.OrchestratorAction.ConfigureHardware, + hd_fields.OrchestratorAction.InterrogateNode, + hd_fields.OrchestratorAction.ApplyNodeNetworking, + hd_fields.OrchestratorAction.ApplyNodeStorage, + hd_fields.OrchestratorAction.ApplyNodePlatform, + hd_fields.OrchestratorAction.DeployNode, + hd_fields.OrchestratorAction.DestroyNode] + + self.driver_name = "node_generic" + self.driver_key = "node_generic" + self.driver_desc = "Generic Node Driver" + + def execute_task(self, task_id): + task = self.state_manager.get_task(task_id) + task_action = task.action + + if task_action in self.supported_actions: + return + else: + raise DriverError("Unsupported action %s for driver %s" % + (task_action, self.driver_desc)) + + diff --git a/helm_drydock/drivers/node/maasdriver/__init__.py b/helm_drydock/drivers/node/maasdriver/__init__.py index 7c0c2b74..f10bbbf6 100644 --- a/helm_drydock/drivers/node/maasdriver/__init__.py +++ b/helm_drydock/drivers/node/maasdriver/__init__.py @@ -11,10 +11,3 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from helm_drydock.drivers.node import NodeDriver - -class MaasNodeDriver(NodeDriver): - - def __init__(self, kwargs): - super(MaasNodeDriver, self).__init__(**kwargs) - \ No newline at end of file diff --git a/helm_drydock/drivers/node/maasdriver/api_client.py b/helm_drydock/drivers/node/maasdriver/api_client.py new file mode 100644 index 00000000..fa463109 --- /dev/null +++ b/helm_drydock/drivers/node/maasdriver/api_client.py @@ -0,0 +1,147 @@ +# Copyright 2017 AT&T Intellectual Property. All other rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from oauthlib import oauth1 +import requests +import requests.auth as req_auth +import base64 + +class MaasOauth(req_auth.AuthBase): + def __init__(self, apikey): + self.consumer_key, self.token_key, self.token_secret = apikey.split(':') + self.consumer_secret = "" + self.realm = "OAuth" + + self.oauth_client = oauth1.Client(self.consumer_key, self.consumer_secret, + self.token_key, self.token_secret, signature_method=oauth1.SIGNATURE_PLAINTEXT, + realm=self.realm) + + def __call__(self, req): + headers = req.headers + url = req.url + method = req.method + body = None if req.body is None or len(req.body) == 0 else req.body + + new_url, signed_headers, new_body = self.oauth_client.sign(url, method, body, headers) + + req.headers['Authorization'] = signed_headers['Authorization'] + + return req + +class MaasRequestFactory(object): + + def __init__(self, base_url, apikey): + self.base_url = base_url + self.apikey = apikey + self.signer = MaasOauth(apikey) + self.http_session = requests.Session() + + def get(self, endpoint, **kwargs): + return self._send_request('GET', endpoint, **kwargs) + + def post(self, endpoint, **kwargs): + return self._send_request('POST', endpoint, **kwargs) + + def delete(self, endpoint, **kwargs): + return self._send_request('DELETE', endpoint, **kwargs) + + def put(self, endpoint, **kwargs): + return self._send_request('PUT', endpoint, **kwargs) + + def test_connectivity(self): + try: + resp = self.get('version/') + except requests.Timeout(ex): + raise errors.TransientDriverError("Timeout connection to MaaS") + + if resp.status_code in [500, 503]: + raise errors.TransientDriverError("Received 50x error from MaaS") + + if resp.status_code != 200: + raise errors.PersistentDriverError("Received unexpected error from MaaS") + + return True + + def test_authentication(self): + try: + resp = self.get('account/', op='list_authorisation_tokens') + except requests.Timeout(ex): + raise errors.TransientDriverError("Timeout connection to MaaS") + except: + raise errors.PersistentDriverError("Error accessing MaaS") + + if resp.status_code in [401, 403] : + raise errors.PersistentDriverError("MaaS API Authentication Failed") + + if resp.status_code in [500, 503]: + raise errors.TransientDriverError("Received 50x error from MaaS") + + if resp.status_code != 200: + raise errors.PersistentDriverError("Received unexpected error from MaaS") + + return True + + def _send_request(self, method, endpoint, **kwargs): + # Delete auth mechanism if defined + kwargs.pop('auth', None) + + headers = kwargs.pop('headers', {}) + + if 'Accept' not in headers.keys(): + headers['Accept'] = 'application/json' + + if 'files' in kwargs.keys(): + files = kwargs.pop('files') + + files_tuples = {} + + for (k, v) in files.items(): + if v is None: + continue + files_tuples[k] = (None, base64.b64encode(str(v).encode('utf-8')).decode('utf-8'), 'text/plain; charset="utf-8"', {'Content-Transfer-Encoding': 'base64'}) + # elif isinstance(v, str): + # files_tuples[k] = (None, base64.b64encode(v.encode('utf-8')).decode('utf-8'), 'text/plain; charset="utf-8"', {'Content-Transfer-Encoding': 'base64'}) + # elif isinstance(v, int) or isinstance(v, bool): + # if isinstance(v, bool): + # v = int(v) + # files_tuples[k] = (None, base64.b64encode(v.to_bytes(2, byteorder='big')), 'application/octet-stream', {'Content-Transfer-Encoding': 'base64'}) + + + kwargs['files'] = files_tuples + + params = kwargs.get('params', None) + + if params is None and 'op' in kwargs.keys(): + params = {'op': kwargs.pop('op')} + elif 'op' in kwargs.keys() and 'op' not in params.keys(): + params['op'] = kwargs.pop('op') + elif 'op' in kwargs.keys(): + kwargs.pop('op') + + # TODO timeouts should be configurable + timeout = kwargs.pop('timeout', None) + if timeout is None: + timeout = (2, 30) + + request = requests.Request(method=method, url=self.base_url + endpoint, auth=self.signer, + headers=headers, params=params, **kwargs) + + prepared_req = self.http_session.prepare_request(request) + + resp = self.http_session.send(prepared_req, timeout=timeout) + + if resp.status_code >= 400: + print("FAILED API CALL:\nURL: %s %s\nBODY:\n%s\nRESPONSE: %s\nBODY:\n%s" % + (prepared_req.method, prepared_req.url, str(prepared_req.body).replace('\\r\\n','\n'), + resp.status_code, resp.text)) + return resp \ No newline at end of file diff --git a/helm_drydock/drivers/node/maasdriver/driver.py b/helm_drydock/drivers/node/maasdriver/driver.py new file mode 100644 index 00000000..83406a18 --- /dev/null +++ b/helm_drydock/drivers/node/maasdriver/driver.py @@ -0,0 +1,306 @@ +# Copyright 2017 AT&T Intellectual Property. All other rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import helm_drydock.error as errors +import helm_drydock.config as config +import helm_drydock.drivers as drivers +import helm_drydock.objects.fields as hd_fields +import helm_drydock.objects.task as task_model + +from helm_drydock.drivers.node import NodeDriver +from .api_client import MaasRequestFactory +import helm_drydock.drivers.node.maasdriver.models.fabric as maas_fabric +import helm_drydock.drivers.node.maasdriver.models.vlan as maas_vlan +import helm_drydock.drivers.node.maasdriver.models.subnet as maas_subnet + +class MaasNodeDriver(NodeDriver): + + def __init__(self, **kwargs): + super(MaasNodeDriver, self).__init__(**kwargs) + + self.driver_name = "maasdriver" + self.driver_key = "maasdriver" + self.driver_desc = "MaaS Node Provisioning Driver" + + self.config = config.DrydockConfig.node_driver[self.driver_key] + + def execute_task(self, task_id): + task = self.state_manager.get_task(task_id) + + if task is None: + raise errors.DriverError("Invalid task %s" % (task_id)) + + if task.action not in self.supported_actions: + raise errors.DriverError("Driver %s doesn't support task action %s" + % (self.driver_desc, task.action)) + + if task.action == hd_fields.OrchestratorAction.ValidateNodeServices: + self.orchestrator.task_field_update(task.get_id(), + status=hd_fields.TaskStatus.Running) + maas_client = MaasRequestFactory(self.config['api_url'], self.config['api_key']) + + try: + if maas_client.test_connectivity(): + if maas_client.test_authentication(): + self.orchestrator.task_field_update(task.get_id(), + status=hd_fields.TaskStatus.Complete, + result=hd_fields.ActionResult.Success) + return + except errors.TransientDriverError(ex): + result = { + 'retry': True, + 'detail': str(ex), + } + self.orchestrator.task_field_update(task.get_id(), + status=hd_fields.TaskStatus.Complete, + result=hd_fields.ActionResult.Failure, + result_details=result) + return + except errors.PersistentDriverError(ex): + result = { + 'retry': False, + 'detail': str(ex), + } + self.orchestrator.task_field_update(task.get_id(), + status=hd_fields.TaskStatus.Complete, + result=hd_fields.ActionResult.Failure, + result_details=result) + return + except Exception(ex): + result = { + 'retry': False, + 'detail': str(ex), + } + self.orchestrator.task_field_update(task.get_id(), + status=hd_fields.TaskStatus.Complete, + result=hd_fields.ActionResult.Failure, + result_details=result) + return + + design_id = getattr(task, 'design_id', None) + + if design_id is None: + raise errors.DriverError("No design ID specified in task %s" % + (task_id)) + + + if task.site_name is None: + raise errors.DriverError("No site specified for task %s." % + (task_id)) + + self.orchestrator.task_field_update(task.get_id(), + status=hd_fields.TaskStatus.Running) + + site_design = self.orchestrator.get_effective_site(design_id, task.site_name) + + if task.action == hd_fields.OrchestratorAction.CreateNetworkTemplate: + subtask = self.orchestrator.create_task(task_model.DriverTask, + parent_task_id=task.get_id(), design_id=design_id, + action=task.action, site_name=task.site_name, + task_scope={'site': task.site_name}) + runner = MaasTaskRunner(state_manager=self.state_manager, + orchestrator=self.orchestrator, + task_id=subtask.get_id(),config=self.config) + runner.start() + + runner.join(timeout=120) + + if runner.is_alive(): + result = { + 'retry': False, + 'detail': 'MaaS Network creation timed-out' + } + self.orchestrator.task_field_update(task.get_id(), + status=hd_fields.TaskStatus.Complete, + result=hd_fields.ActionResult.Failure, + result_detail=result) + else: + subtask = self.state_manager.get_task(subtask.get_id()) + self.orchestrator.task_field_update(task.get_id(), + status=hd_fields.TaskStatus.Complete, + result=subtask.get_result()) + + return + +class MaasTaskRunner(drivers.DriverTaskRunner): + + def __init__(self, config=None, **kwargs): + super(MaasTaskRunner, self).__init__(**kwargs) + + self.driver_config = config + + def execute_task(self): + task_action = self.task.action + + self.orchestrator.task_field_update(self.task.get_id(), + status=hd_fields.TaskStatus.Running, + result=hd_fields.ActionResult.Incomplete) + + self.maas_client = MaasRequestFactory(self.driver_config['api_url'], + self.driver_config['api_key']) + + site_design = self.orchestrator.get_effective_site(self.task.design_id, + self.task.site_name) + + if task_action == hd_fields.OrchestratorAction.CreateNetworkTemplate: + # Try to true up MaaS definitions of fabrics/vlans/subnets + # with the networks defined in Drydock + design_networks = site_design.networks + + subnets = maas_subnet.Subnets(self.maas_client) + subnets.refresh() + + result_detail = { + 'detail': [] + } + + for n in design_networks: + exists = subnets.query({'cidr': n.cidr}) + + subnet = None + + if len(exists) > 0: + subnet = exists[0] + + subnet.name = n.name + subnet.dns_servers = n.dns_servers + + vlan_list = maas_vlan.Vlans(self.maas_client, fabric_id=subnet.fabric) + vlan_list.refresh() + + vlan = vlan_list.select(subnet.vlan) + + if vlan is not None: + if ((n.vlan_id is None and vlan.vid != 0) or + (n.vlan_id is not None and vlan.vid != n.vlan_id)): + + # if the VLAN name matches, assume this is the correct resource + # and it needs to be updated + if vlan.name == n.name: + vlan.set_vid(n.vlan_id) + vlan.mtu = n.mtu + vlan.update() + else: + vlan_id = n.vlan_id if n.vlan_id is not None else 0 + target_vlan = vlan_list.query({'vid': vlan_id}) + if len(target_vlan) > 0: + subnet.vlan = target_vlan[0].resource_id + else: + # This is a flag that after creating a fabric and + # VLAN below, update the subnet + subnet.vlan = None + else: + subnet.vlan = None + + # Check if the routes have a default route + subnet.gateway_ip = n.get_default_gateway() + + + result_detail['detail'].append("Subnet %s found for network %s, updated attributes" + % (exists[0].resource_id, n.name)) + + # Need to create a Fabric/Vlan for this network + if (subnet is None or (subnet is not None and subnet.vlan is None)): + fabric_list = maas_fabric.Fabrics(self.maas_client) + fabric_list.refresh() + matching_fabrics = fabric_list.query({'name': n.name}) + + fabric = None + vlan = None + + if len(matching_fabrics) > 0: + # Fabric exists, update VLAN + fabric = matching_fabrics[0] + + vlan_list = maas_vlan.Vlans(self.maas_client, fabric_id=fabric.resource_id) + vlan_list.refresh() + vlan_id = n.vlan_id if n.vlan_id is not None else 0 + matching_vlans = vlan_list.query({'vid': vlan_id}) + + if len(matching_vlans) > 0: + vlan = matching_vlans[0] + + vlan.name = n.name + if getattr(n, 'mtu', None) is not None: + vlan.mtu = n.mtu + + if subnet is not None: + subnet.vlan = vlan.resource_id + subnet.update() + vlan.update() + else: + vlan = maas_vlan.Vlan(self.maas_client, name=n.name, vid=vlan_id, + mtu=getattr(n, 'mtu', None),fabric_id=fabric.resource_id) + vlan = vlan_list.add(vlan) + + if subnet is not None: + subnet.vlan = vlan.resource_id + subnet.update() + + else: + new_fabric = maas_fabric.Fabric(self.maas_client, name=n.name) + new_fabric = fabric_list.add(new_fabric) + new_fabric.refresh() + fabric = new_fabric + + vlan_list = maas_vlan.Vlans(self.maas_client, fabric_id=new_fabric.resource_id) + vlan_list.refresh() + vlan = vlan_list.single() + + vlan.name = n.name + vlan.vid = n.vlan_id if n.vlan_id is not None else 0 + if getattr(n, 'mtu', None) is not None: + vlan.mtu = n.mtu + + vlan.update() + + if subnet is not None: + subnet.vlan = vlan.resource_id + subnet.update() + + if subnet is None: + subnet = maas_subnet.Subnet(self.maas_client, name=n.name, cidr=n.cidr, fabric=fabric.resource_id, + vlan=vlan.resource_id, gateway_ip=n.get_default_gateway()) + + subnet_list = maas_subnet.Subnets(self.maas_client) + subnet = subnet_list.add(subnet) + + subnet_list = maas_subnet.Subnets(self.maas_client) + subnet_list.refresh() + + action_result = hd_fields.ActionResult.Incomplete + + success_rate = 0 + + for n in design_networks: + exists = subnet_list.query({'cidr': n.cidr}) + if len(exists) > 0: + subnet = exists[0] + if subnet.name == n.name: + success_rate = success_rate + 1 + else: + success_rate = success_rate + 1 + else: + success_rate = success_rate + 1 + + if success_rate == len(design_networks): + action_result = hd_fields.ActionResult.Success + elif success_rate == - (len(design_networks)): + action_result = hd_fields.ActionResult.Failure + else: + action_result = hd_fields.ActionResult.PartialSuccess + + self.orchestrator.task_field_update(self.task.get_id(), + status=hd_fields.TaskStatus.Complete, + result=action_result, + result_detail=result_detail) \ No newline at end of file diff --git a/helm_drydock/drivers/node/maasdriver/models/__init__.py b/helm_drydock/drivers/node/maasdriver/models/__init__.py new file mode 100644 index 00000000..2a385a45 --- /dev/null +++ b/helm_drydock/drivers/node/maasdriver/models/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2017 AT&T Intellectual Property. All other rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. \ No newline at end of file diff --git a/helm_drydock/drivers/node/maasdriver/models/base.py b/helm_drydock/drivers/node/maasdriver/models/base.py new file mode 100644 index 00000000..9f3aa336 --- /dev/null +++ b/helm_drydock/drivers/node/maasdriver/models/base.py @@ -0,0 +1,273 @@ +# Copyright 2017 AT&T Intellectual Property. All other rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import json +import re + +import helm_drydock.error as errors +""" +A representation of a MaaS REST resource. Should be subclassed +for different resources and augmented with operations specific +to those resources +""" +class ResourceBase(object): + + resource_url = '/{id}' + fields = ['resource_id'] + json_fields = ['resource_id'] + + def __init__(self, api_client, **kwargs): + self.api_client = api_client + + for f in self.fields: + if f in kwargs.keys(): + setattr(self, f, kwargs.get(f)) + + """ + Update resource attributes from MaaS + """ + def refresh(self): + url = self.interpolate_url() + resp = self.api_client.get(url) + + updated_fields = resp.json() + + for f in self.json_fields: + if f in updated_fields.keys(): + setattr(self, f, updated_fields.get(f)) + + """ + Parse URL for placeholders and replace them with current + instance values + """ + def interpolate_url(self): + pattern = '\{([a-z_]+)\}' + regex = re.compile(pattern) + start = 0 + new_url = self.resource_url + + while (start+1) < len(self.resource_url): + match = regex.search(self.resource_url, start) + if match is None: + return new_url + + param = match.group(1) + val = getattr(self, param, None) + if val is None: + raise ValueError("Missing variable value") + new_url = new_url.replace('{' + param + '}', str(val)) + start = match.end(1) + 1 + + return new_url + + """ + Update MaaS with current resource attributes + """ + def update(self): + data_dict = self.to_dict() + url = self.interpolate_url() + + resp = self.api_client.put(url, files=data_dict) + + if resp.status_code == 200: + return True + + raise errors.DriverError("Failed updating MAAS url %s - return code %s\n%s" + % (url, resp.status_code, resp.text)) + + """ + Set the resource_id for this instance + Should only be called when creating new instances and MAAS has assigned + an id + """ + def set_resource_id(self, res_id): + self.resource_id = res_id + + """ + Serialize this resource instance into JSON matching the + MaaS respresentation of this resource + """ + def to_json(self): + return json.dumps(self.to_dict()) + + """ + Serialize this resource instance into a dict matching the + MAAS representation of the resource + """ + def to_dict(self): + data_dict = {} + + for f in self.json_fields: + if getattr(self, f, None) is not None: + if f == 'resource_id': + data_dict['id'] = getattr(self, f) + else: + data_dict[f] = getattr(self, f) + + return data_dict + + """ + Create a instance of this resource class based on the MaaS + representation of this resource type + """ + @classmethod + def from_json(cls, api_client, json_string): + parsed = json.loads(json_string) + + if isinstance(parsed, dict): + return cls.from_dict(api_client, parsed) + + raise errors.DriverError("Invalid JSON for class %s" % (cls.__name__)) + + """ + Create a instance of this resource class based on a dict + of MaaS type attributes + """ + @classmethod + def from_dict(cls, api_client, obj_dict): + refined_dict = {k: obj_dict.get(k, None) for k in cls.fields} + if 'id' in obj_dict.keys(): + refined_dict['resource_id'] = obj_dict.get('id') + + i = cls(api_client, **refined_dict) + return i + + +""" +A collection of MaaS resources. + +Rather than a simple list, we will key the collection on resource +ID for more efficient access. +""" +class ResourceCollectionBase(object): + + collection_url = '' + collection_resource = ResourceBase + + def __init__(self, api_client): + self.api_client = api_client + self.resources = {} + + """ + Parse URL for placeholders and replace them with current + instance values + """ + def interpolate_url(self): + pattern = '\{([a-z_]+)\}' + regex = re.compile(pattern) + start = 0 + new_url = self.collection_url + + while (start+1) < len(self.collection_url): + match = regex.search(self.collection_url, start) + if match is None: + return new_url + + param = match.group(1) + val = getattr(self, param, None) + if val is None: + raise ValueError("Missing variable value") + new_url = new_url.replace('{' + param + '}', str(val)) + start = match.end(1) + 1 + + return new_url + + """ + Create a new resource in this collection in MaaS + """ + def add(self, res): + data_dict = res.to_dict() + url = self.interpolate_url() + + resp = self.api_client.post(url, files=data_dict) + + if resp.status_code == 200: + resp_json = resp.json() + res.set_resource_id(resp_json.get('id')) + return res + + raise errors.DriverError("Failed updating MAAS url %s - return code %s" + % (url, resp.status_code)) + + """ + Append a resource instance to the list locally only + """ + def append(self, res): + if isinstance(res, self.collection_resource): + self.resources[res.resource_id] = res + + """ + Initialize or refresh the collection list from MaaS + """ + def refresh(self): + url = self.interpolate_url() + resp = self.api_client.get(url) + + if resp.status_code == 200: + self.resource = {} + json_list = resp.json() + + for o in json_list: + if isinstance(o, dict): + i = self.collection_resource.from_dict(self.api_client, o) + self.resources[i.resource_id] = i + + return + + """ + Check if resource id is in this collection + """ + def contains(self, res_id): + if res_id in self.resources.keys(): + return True + + return False + + """ + Select a resource based on ID or None if not found + """ + def select(self, res_id): + return self.resources.get(res_id, None) + + """ + Query the collection based on a resource attribute other than primary id + """ + def query(self, query): + result = list(self.resources.values()) + for (k, v) in query.items(): + result = [i for i in result + if str(getattr(i, k, None)) == str(v)] + + return result + + """ + If the collection has a single item, return it + """ + def single(self): + if self.len() == 1: + for v in self.resources.values(): + return v + else: + return None + + """ + Iterate over the resources in the collection + """ + def __iter__(self): + return iter(self.resources.values()) + + """ + Resource count + """ + def len(self): + return len(self.resources) \ No newline at end of file diff --git a/helm_drydock/drivers/node/maasdriver/models/fabric.py b/helm_drydock/drivers/node/maasdriver/models/fabric.py new file mode 100644 index 00000000..a105f354 --- /dev/null +++ b/helm_drydock/drivers/node/maasdriver/models/fabric.py @@ -0,0 +1,53 @@ +# Copyright 2017 AT&T Intellectual Property. All other rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import json + +import helm_drydock.drivers.node.maasdriver.models.base as model_base +import helm_drydock.drivers.node.maasdriver.models.vlan as model_vlan + +class Fabric(model_base.ResourceBase): + + resource_url = 'fabrics/{resource_id}/' + fields = ['resource_id', 'name', 'description'] + json_fields = ['name', 'description'] + + def __init__(self, api_client, **kwargs): + super(Fabric, self).__init__(api_client, **kwargs) + + if hasattr(self, 'resource_id'): + self.refresh_vlans() + + def refresh(self): + super(Fabric, self).refresh() + + self.refresh_vlans() + + return + + def refresh_vlans(self): + self.vlans = model_vlan.Vlans(self.api_client, fabric_id=self.resource_id) + self.vlans.refresh() + + + def set_resource_id(self, res_id): + self.resource_id = res_id + self.refresh_vlans() + +class Fabrics(model_base.ResourceCollectionBase): + + collection_url = 'fabrics/' + collection_resource = Fabric + + def __init__(self, api_client): + super(Fabrics, self).__init__(api_client) \ No newline at end of file diff --git a/helm_drydock/drivers/node/maasdriver/models/subnet.py b/helm_drydock/drivers/node/maasdriver/models/subnet.py new file mode 100644 index 00000000..ccf677c2 --- /dev/null +++ b/helm_drydock/drivers/node/maasdriver/models/subnet.py @@ -0,0 +1,55 @@ +# Copyright 2017 AT&T Intellectual Property. All other rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import helm_drydock.drivers.node.maasdriver.models.base as model_base + +class Subnet(model_base.ResourceBase): + + resource_url = 'subnets/{resource_id}/' + fields = ['resource_id', 'name', 'description', 'fabric', 'vlan', 'vid', 'dhcp_on', + 'space', 'cidr', 'gateway_ip', 'rdns_mode', 'allow_proxy', 'dns_servers'] + json_fields = ['name', 'description','vlan', 'space', 'cidr', 'gateway_ip', 'rdns_mode', + 'allow_proxy', 'dns_servers'] + + def __init__(self, api_client, **kwargs): + super(Subnet, self).__init__(api_client, **kwargs) + + # For now all subnets will be part of the default space + self.space = 0 + + """ + Because MaaS decides to replace the VLAN id with the + representation of the VLAN, we must reverse it for a true + representation of the resource + """ + @classmethod + def from_dict(cls, api_client, obj_dict): + refined_dict = {k: obj_dict.get(k, None) for k in cls.fields} + if 'id' in obj_dict.keys(): + refined_dict['resource_id'] = obj_dict.get('id') + + if isinstance(refined_dict.get('vlan', None), dict): + refined_dict['fabric'] = refined_dict['vlan']['fabric_id'] + refined_dict['vlan'] = refined_dict['vlan']['id'] + + i = cls(api_client, **refined_dict) + return i + +class Subnets(model_base.ResourceCollectionBase): + + collection_url = 'subnets/' + collection_resource = Subnet + + def __init__(self, api_client, **kwargs): + super(Subnets, self).__init__(api_client) diff --git a/helm_drydock/drivers/node/maasdriver/models/vlan.py b/helm_drydock/drivers/node/maasdriver/models/vlan.py new file mode 100644 index 00000000..f4f506ef --- /dev/null +++ b/helm_drydock/drivers/node/maasdriver/models/vlan.py @@ -0,0 +1,86 @@ +# Copyright 2017 AT&T Intellectual Property. All other rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import json + +import helm_drydock.error as errors +import helm_drydock.drivers.node.maasdriver.models.base as model_base + +class Vlan(model_base.ResourceBase): + + resource_url = 'fabrics/{fabric_id}/vlans/{api_id}/' + fields = ['resource_id', 'name', 'description', 'vid', 'fabric_id', 'dhcp_on', 'mtu'] + json_fields = ['name', 'description', 'vid', 'dhcp_on', 'mtu'] + + def __init__(self, api_client, **kwargs): + super(Vlan, self).__init__(api_client, **kwargs) + + if self.vid is None: + self.vid = 0 + + # the MaaS API decided that the URL endpoint for VLANs should use + # the VLAN tag (vid) rather than the resource ID. So to update the + # vid, we have to keep two copies so that the resource_url + # is accurate for updates + self.api_id = self.vid + + def update(self): + super(Vlan, self).update() + + self.api_id = self.vid + + def set_vid(self, new_vid): + if new_vid is None: + self.vid = 0 + else: + self.vid = int(new_vid) + +class Vlans(model_base.ResourceCollectionBase): + + collection_url = 'fabrics/{fabric_id}/vlans/' + collection_resource = Vlan + + def __init__(self, api_client, **kwargs): + super(Vlans, self).__init__(api_client) + + self.fabric_id = kwargs.get('fabric_id', None) + """ + Create a new resource in this collection in MaaS + def add(self, res): + #MAAS API doesn't support all attributes in POST, so create and + # then promptly update via PUT + + min_fields = { + 'name': res.name, + 'description': getattr(res, 'description', None), + } + + if getattr(res, 'vid', None) is None: + min_fields['vid'] = 0 + else: + min_fields['vid'] = res.vid + + url = self.interpolate_url() + resp = self.api_client.post(url, files=min_fields) + + # Check on initial POST creation + if resp.status_code == 200: + resp_json = resp.json() + res.id = resp_json.get('id') + # Submit PUT for additonal fields + res.update() + return res + + raise errors.DriverError("Failed updating MAAS url %s - return code %s\n%s" + % (url, resp.status_code, resp.text)) + """ \ No newline at end of file diff --git a/helm_drydock/drivers/node/maasdriver/readme.md b/helm_drydock/drivers/node/maasdriver/readme.md new file mode 100644 index 00000000..c5d7e98b --- /dev/null +++ b/helm_drydock/drivers/node/maasdriver/readme.md @@ -0,0 +1,46 @@ +# MaaS Node Driver # + +This driver will handle node provisioning using Ubuntu MaaS 2.1. It expects +the Drydock config to hold a valid MaaS API URL (e.g. http://host:port/MAAS/api/2.0) +and a valid API key for authentication. + +## Drydock Model to MaaS Model Relationship ## + +### Site ### + +Will provide some attributes used for configuring MaaS site-wide such +as tag definitions and repositories. + +### Network Link ### + +Will provide attributes for configuring Node/Machine interfaces + +### Network ### + +MaaS will be configured with a single 'space'. Each Network in Drydock +will translate to a unique MaaS fabric+vlan+subnet. Any network with +an address range of type 'dhcp' will cause DHCP to be enabled in MaaS +for that network. + +### Hardware Profile ### + +A foundation to a Baremetal Node definition. Not directly used in MaaS + +### Host Profile ### + +A foundation to a Baremetal Node definition. Not directly used in MaaS + +### Baremetal Node ### + +Defines all the attributes required to commission and deploy nodes via MaaS + +* bootdisk fields and partitions list - Define local node storage configuration +to be implemented by MaaS +* addressing and interface list - Combined with referenced network links and networks, define +interface (physical and virtual (bond / vlan)) configurations and network +addressing +* tags and owner data - Statically defined metadata that will propagate to +MaaS +* base_os - Select which stream a node will be deployed with +* kernel and kernel params - Allow for custom kernel selection and parameter +definition \ No newline at end of file diff --git a/helm_drydock/drivers/oob/__init__.py b/helm_drydock/drivers/oob/__init__.py index 50f353a5..ada30fb8 100644 --- a/helm_drydock/drivers/oob/__init__.py +++ b/helm_drydock/drivers/oob/__init__.py @@ -12,13 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -# OOB: -# sync_hardware_clock -# collect_chassis_sysinfo -# enable_netboot -# initiate_reboot -# set_power_off -# set_power_on import helm_drydock.objects.fields as hd_fields import helm_drydock.error as errors @@ -29,12 +22,13 @@ class OobDriver(ProviderDriver): def __init__(self, **kwargs): super(OobDriver, self).__init__(**kwargs) - self.supported_actions = [hd_fields.OrchestratorAction.ConfigNodePxe, + self.supported_actions = [hd_fields.OrchestrationAction.ValidateOobServices, + hd_fields.OrchestratorAction.ConfigNodePxe, hd_fields.OrchestratorAction.SetNodeBoot, hd_fields.OrchestratorAction.PowerOffNode, hd_fields.OrchestratorAction.PowerOnNode, hd_fields.OrchestratorAction.PowerCycleNode, - hd_fields.OrchestratorAction.InterrogateNode] + hd_fields.OrchestratorAction.InterrogateOob] self.driver_name = "oob_generic" self.driver_key = "oob_generic" diff --git a/helm_drydock/drivers/oob/pyghmi_driver/__init__.py b/helm_drydock/drivers/oob/pyghmi_driver/__init__.py index 33e4d3df..9a57efe9 100644 --- a/helm_drydock/drivers/oob/pyghmi_driver/__init__.py +++ b/helm_drydock/drivers/oob/pyghmi_driver/__init__.py @@ -16,6 +16,7 @@ import time from pyghmi.ipmi.command import Command import helm_drydock.error as errors +import helm_drydock.config as config import helm_drydock.objects.fields as hd_fields import helm_drydock.objects.task as task_model @@ -33,6 +34,8 @@ class PyghmiDriver(oob.OobDriver): self.driver_key = "pyghmi_driver" self.driver_desc = "Pyghmi OOB Driver" + self.config = config.DrydockConfig.node_driver[self.driver_key] + def execute_task(self, task_id): task = self.state_manager.get_task(task_id) @@ -57,6 +60,12 @@ class PyghmiDriver(oob.OobDriver): self.orchestrator.task_field_update(task.get_id(), status=hd_fields.TaskStatus.Running) + if task.action == hd_fields.OrchestratorAction.ValidateOobServices: + self.orchestrator.task_field_update(task.get_id(), + status=hd_fields.TaskStatus.Complete, + result=hd_fields.ActionResult.Success) + return + site_design = self.orchestrator.get_effective_site(design_id, task.site_name) target_nodes = [] @@ -284,7 +293,7 @@ class PyghmiTaskRunner(drivers.DriverTaskRunner): result=hd_fields.ActionResult.Failure, status=hd_fields.TaskStatus.Complete) return - elif task_action == hd_fields.OrchestratorAction.InterrogateNode: + elif task_action == hd_fields.OrchestratorAction.InterrogateOob: mci_id = ipmi_session.get_mci() self.orchestrator.task_field_update(self.task.get_id(), diff --git a/helm_drydock/drivers/readme.md b/helm_drydock/drivers/readme.md index 0a663a16..0aab4c1c 100644 --- a/helm_drydock/drivers/readme.md +++ b/helm_drydock/drivers/readme.md @@ -2,14 +2,23 @@ Drivers are downstream actors that Drydock will use to actually execute orchestration actions. It is intended to be a pluggable architecture -so that various downstream automation can be used. +so that various downstream automation can be used. A driver must implement all actions even if the implementation is effectively a no-op. ## oob ## The oob drivers will interface with physical servers' out-of-band management system (e.g. Dell iDRAC, HP iLO, etc...). OOB management will be used for setting a system to use PXE boot and power cycling -servers. +servers. + +### Actions ### + +* ConfigNodePxe - Where available, configure PXE boot options (e.g. PXE interface) +* SetNodeBoot - Set boot source (PXE, hard disk) of a node +* PowerOffNode - Power down a node +* PowerOnNode - Power up a node +* PowerCycleNode - Power cycle a node +* InterrogateOob - Interrogate a node's OOB interface. Resultant data is dependent on what functionality is implemented for a particular OOB interface ## node ## @@ -17,10 +26,30 @@ The node drivers will interface with an external bootstrapping system for loading the base OS on a server and configuring hardware, network, and storage. +### Actions ### + +* CreateNetworkTemplate - Configure site-wide network information in bootstrapper +* CreateStorageTemplate - Configure site-wide storage information in bootstrapper +* CreateBootMedia - Ensure all needed boot media is available to the bootstrapper including external repositories +* PrepareHardwareConfig - Prepare the bootstrapper to handle all hardware configuration actions (firmware updates, RAID configuration, driver installation) +* ConfigureHardware - Update and validate all hardware configurations on a node prior to deploying the OS on it +* InterrogateNode - Interrogate the bootstrapper about node information. Depending on the current state of the node, this interrogation will produce different information. +* ApplyNodeNetworking - Configure networking for a node +* ApplyNodeStorage - Configure storage for a node +* ApplyNodePlatform - Configure stream and kernel options for a node +* DeployNode - Deploy the OS to a node +* DestroyNode - Take steps to bring a node back to a blank undeployed state + ## network ## The network drivers will interface with switches for managing port configuration to support the bootstrapping of physical nodes. This is not intended to be a network provisioner, but instead is a support driver for node bootstrapping where temporary changes to network configurations -are required. \ No newline at end of file +are required. + +### Actions ### + +* InterrogatePort - Request information about the current configuration of a network port +* ConfigurePortProvisioning - Configure a network port in provisioning (PXE) mode +* ConfigurePortProduction - Configure a network port in production (configuration post-deployment) mode \ No newline at end of file diff --git a/helm_drydock/error.py b/helm_drydock/error.py index a1accb22..a8988f97 100644 --- a/helm_drydock/error.py +++ b/helm_drydock/error.py @@ -21,5 +21,17 @@ class StateError(Exception): class OrchestratorError(Exception): pass +class TransientOrchestratorError(OrchestratorError): + pass + +class PersistentOrchestratorError(OrchestratorError): + pass + class DriverError(Exception): + pass + +class TransientDriverError(DriverError): + pass + +class PersistentDriverError(DriverError): pass \ No newline at end of file diff --git a/helm_drydock/ingester/plugins/yaml.py b/helm_drydock/ingester/plugins/yaml.py index 02fb797e..ce531f8c 100644 --- a/helm_drydock/ingester/plugins/yaml.py +++ b/helm_drydock/ingester/plugins/yaml.py @@ -161,7 +161,7 @@ class YamlIngester(IngesterPlugin): model.cidr = spec.get('cidr', None) model.allocation_strategy = spec.get('allocation', 'static') - model.vlan_id = spec.get('vlan_id', 1) + model.vlan_id = spec.get('vlan_id', None) model.mtu = spec.get('mtu', None) dns = spec.get('dns', {}) @@ -286,6 +286,7 @@ class YamlIngester(IngesterPlugin): int_model.device_name = i.get('device_name', None) int_model.network_link = i.get('device_link', None) + int_model.primary_netowrk = i.get('primary', False) int_model.hardware_slaves = [] slaves = i.get('slaves', []) diff --git a/helm_drydock/objects/base.py b/helm_drydock/objects/base.py index f481d7c9..d22b2183 100644 --- a/helm_drydock/objects/base.py +++ b/helm_drydock/objects/base.py @@ -31,6 +31,13 @@ class DrydockObject(base.VersionedObject): OBJ_PROJECT_NAMESPACE = 'helm_drydock.objects' + # Return None for undefined attributes + def obj_load_attr(self, attrname): + if attrname in self.fields.keys(): + setattr(self, attrname, None) + else: + raise ValueError("Unknown field %s" % (attrname)) + class DrydockPersistentObject(base.VersionedObject): fields = { diff --git a/helm_drydock/objects/fields.py b/helm_drydock/objects/fields.py index cdcf152a..c6ac8ac3 100644 --- a/helm_drydock/objects/fields.py +++ b/helm_drydock/objects/fields.py @@ -30,17 +30,41 @@ class OrchestratorAction(BaseDrydockEnum): DestroyNode = 'destroy_node' # OOB driver actions + ValidateOobServices = 'validate_oob_services' ConfigNodePxe = 'config_node_pxe' SetNodeBoot = 'set_node_boot' PowerOffNode = 'power_off_node' PowerOnNode = 'power_on_node' PowerCycleNode = 'power_cycle_node' + InterrogateOob = 'interrogate_oob' + + # Node driver actions + ValidateNodeServices = 'validate_node_services' + CreateNetworkTemplate = 'create_network_template' + CreateStorageTemplate = 'create_storage_template' + CreateBootMedia = 'create_boot_media' + PrepareHardwareConfig = 'prepare_hardware_config' + ConfigureHardware = 'configure_hardware' InterrogateNode = 'interrogate_node' + ApplyNodeNetworking = 'apply_node_networking' + ApplyNodeStorage = 'apply_node_storage' + ApplyNodePlatform = 'apply_node_platform' + DeployNode = 'deploy_node' + DestroyNode = 'destroy_node' + + # Network driver actions + ValidateNetworkServices = 'validate_network_services' + InterrogatePort = 'interrogate_port' + ConfigurePortProvisioning = 'config_port_provisioning' + ConfigurePortProduction = 'config_port_production' ALL = (Noop, ValidateDesign, VerifySite, PrepareSite, VerifyNode, PrepareNode, DeployNode, DestroyNode, ConfigNodePxe, SetNodeBoot, PowerOffNode, PowerOnNode, PowerCycleNode, - InterrogateNode) + InterrogateOob, CreateNetworkTemplate, CreateStorageTemplate, + CreateBootMedia, PrepareHardwareConfig, ConfigureHardware, + InterrogateNode, ApplyNodeNetworking, ApplyNodeStorage, + ApplyNodePlatform, DeployNode, DestroyNode) class OrchestratorActionField(fields.BaseEnumField): AUTO_TYPE = OrchestratorAction() @@ -52,7 +76,7 @@ class ActionResult(BaseDrydockEnum): Failure = 'failure' DependentFailure = 'dependent_failure' - ALL = (Incomplete, Success, PartialSuccess, Failure) + ALL = (Incomplete, Success, PartialSuccess, Failure, DependentFailure) class ActionResultField(fields.BaseEnumField): AUTO_TYPE = ActionResult() diff --git a/helm_drydock/objects/hostprofile.py b/helm_drydock/objects/hostprofile.py index 47144734..5a416dde 100644 --- a/helm_drydock/objects/hostprofile.py +++ b/helm_drydock/objects/hostprofile.py @@ -189,7 +189,7 @@ class HostInterface(base.DrydockObject): if len(child_list) == 0 and len(parent_list) > 0: for p in parent_list: pp = deepcopy(p) - pp.source = hd_obj_fields.ModelSource.Compiled + pp.source = hd_fields.ModelSource.Compiled effective_list.append(pp) elif len(parent_list) == 0 and len(child_list) > 0: for i in child_list: @@ -197,7 +197,7 @@ class HostInterface(base.DrydockObject): continue else: ii = deepcopy(i) - ii.source = hd_obj_fields.ModelSource.Compiled + ii.source = hd_fields.ModelSource.Compiled effective_list.append(ii) elif len(parent_list) > 0 and len(child_list) > 0: parent_interfaces = [] @@ -212,8 +212,8 @@ class HostInterface(base.DrydockObject): elif j.get_name() == parent_name: m = objects.HostInterface() m.device_name = j.get_name() - m.primary_network = - objects.Util.apply_field_inheritance( + m.primary_network = \ + objects.Utils.apply_field_inheritance( getattr(j, 'primary_network', None), getattr(i, 'primary_network', None)) @@ -243,7 +243,7 @@ class HostInterface(base.DrydockObject): if not x.startswith("!")]) m.networks = n - m.source = hd_obj_fields.ModelSource.Compiled + m.source = hd_fields.ModelSource.Compiled effective_list.append(m) add = False @@ -251,14 +251,14 @@ class HostInterface(base.DrydockObject): if add: ii = deepcopy(i) - ii.source = hd_obj_fields.ModelSource.Compiled + ii.source = hd_fields.ModelSource.Compiled effective_list.append(ii) for j in child_list: if (j.device_name not in parent_interfaces and not j.get_name().startswith("!")): jj = deepcopy(j) - jj.source = hd_obj_fields.ModelSource.Compiled + jj.source = hd_fields.ModelSource.Compiled effective_list.append(jj) return effective_list diff --git a/helm_drydock/objects/network.py b/helm_drydock/objects/network.py index e2e0334f..e1ccc693 100644 --- a/helm_drydock/objects/network.py +++ b/helm_drydock/objects/network.py @@ -34,11 +34,11 @@ class NetworkLink(base.DrydockPersistentObject, base.DrydockObject): 'site': ovo_fields.StringField(), 'bonding_mode': hd_fields.NetworkLinkBondingModeField( default=hd_fields.NetworkLinkBondingMode.Disabled), - 'bonding_xmit_hash': ovo_fields.StringField(nullable=True), - 'bonding_peer_rate': ovo_fields.StringField(nullable=True), - 'bonding_mon_rate': ovo_fields.IntegerField(nullable=True), - 'bonding_up_delay': ovo_fields.IntegerField(nullable=True), - 'bonding_down_delay': ovo_fields.IntegerField(nullable=True), + 'bonding_xmit_hash': ovo_fields.StringField(nullable=True, default='layer3+4'), + 'bonding_peer_rate': ovo_fields.StringField(nullable=True, default='slow'), + 'bonding_mon_rate': ovo_fields.IntegerField(nullable=True, default=100), + 'bonding_up_delay': ovo_fields.IntegerField(nullable=True, default=200), + 'bonding_down_delay': ovo_fields.IntegerField(nullable=True, default=200), 'mtu': ovo_fields.IntegerField(default=1500), 'linkspeed': ovo_fields.StringField(default='auto'), 'trunk_mode': hd_fields.NetworkLinkTrunkingModeField( @@ -81,7 +81,9 @@ class Network(base.DrydockPersistentObject, base.DrydockObject): 'mtu': ovo_fields.IntegerField(nullable=True), 'dns_domain': ovo_fields.StringField(nullable=True), 'dns_servers': ovo_fields.StringField(nullable=True), + # Keys of ranges are 'type', 'start', 'end' 'ranges': ovo_fields.ListOfDictOfNullableStringsField(), + # Keys of routes are 'subnet', 'gateway', 'metric' 'routes': ovo_fields.ListOfDictOfNullableStringsField(), } @@ -95,6 +97,14 @@ class Network(base.DrydockPersistentObject, base.DrydockObject): def get_name(self): return self.name + def get_default_gateway(self): + for r in getattr(self,'routes', []): + if r.get('subnet', '') == '0.0.0.0/0': + return r.get('gateway', None) + + return None + + @base.DrydockObjectRegistry.register class NetworkList(base.DrydockObjectListBase, base.DrydockObject): diff --git a/helm_drydock/objects/site.py b/helm_drydock/objects/site.py index d092c853..f786d911 100644 --- a/helm_drydock/objects/site.py +++ b/helm_drydock/objects/site.py @@ -126,12 +126,7 @@ class SiteDesign(base.DrydockPersistentObject, base.DrydockObject): def __init__(self, **kwargs): super(SiteDesign, self).__init__(**kwargs) - # Initialize lists for blank instances - def obj_load_attr(self, attrname): - if attrname in self.fields.keys(): - setattr(self, attrname, None) - else: - raise ValueError("Unknown field %s" % (attrname)) + # Assign UUID id def assign_id(self): diff --git a/helm_drydock/objects/task.py b/helm_drydock/objects/task.py index 0c05e678..9985b285 100644 --- a/helm_drydock/objects/task.py +++ b/helm_drydock/objects/task.py @@ -87,9 +87,6 @@ class OrchestratorTask(Task): class DriverTask(Task): - # subclasses implemented by each driver should override this with the list - # of actions that driver supports - def __init__(self, task_scope={}, **kwargs): super(DriverTask, self).__init__(**kwargs) diff --git a/helm_drydock/orchestrator/__init__.py b/helm_drydock/orchestrator/__init__.py index 28def977..2b589156 100644 --- a/helm_drydock/orchestrator/__init__.py +++ b/helm_drydock/orchestrator/__init__.py @@ -116,6 +116,53 @@ class Orchestrator(object): self.task_field_update(task_id, status=hd_fields.TaskStatus.Complete) return + elif task.action == hd_fields.OrchestratorAction.VerifySite: + self.task_field_update(task_id, + status=hd_fields.TaskStatus.Running) + + node_driver = self.enabled_drivers['node'] + + if node_driver is not None: + node_driver_task = self.create_task(tasks.DriverTask, + parent_task_id=task.get_id(), + design_id=design_id, + action=hd_fields.OrchestratorAction.ValidateNodeServices) + + node_driver.execute_task(node_driver_task.get_id()) + + node_driver_task = self.state_manager.get_task(node_driver_task.get_id()) + + self.task_field_update(task_id, + status=hd_fields.TaskStatus.Complete, + result=node_driver_task.get_result()) + return + elif task.action == hd_fields.OrchestratorAction.PrepareSite: + driver = self.enabled_drivers['node'] + + if driver is None: + self.task_field_update(task_id, + status=hd_fields.TaskStatus.Errored, + result=hd_fields.ActionResult.Failure) + return + + task_scope = { + 'site': task.site + } + + driver_task = self.create_task(tasks.DriverTask, + parent_task_id=task.get_id(), + design_id=design_id, + task_scope=task_scope, + action=hd_fields.OrchestratorAction.CreateNetworkTemplate) + + driver.execute_task(driver_task.get_id()) + + driver_task = self.state_manager.get_task(driver_task.get_id()) + + self.task_field_update(task_id, + status=hd_fields.TaskStatus.Complete, + result=driver_task.get_result()) + return elif task.action == hd_fields.OrchestratorAction.VerifyNode: self.task_field_update(task_id, status=hd_fields.TaskStatus.Running) diff --git a/helm_drydock/orchestrator/readme.md b/helm_drydock/orchestrator/readme.md index fb6324ae..abb48068 100644 --- a/helm_drydock/orchestrator/readme.md +++ b/helm_drydock/orchestrator/readme.md @@ -11,6 +11,7 @@ such that on failure the task can retried and only the steps needed will be executed. ## Drydock Tasks ## + Bullet points listed below are not exhaustive and will change as we move through testing @@ -21,6 +22,14 @@ validate that the current state of design data represents a valid site design. No claim is made that the design data is compatible with the physical state of the site. +#### Validations #### + +* All baremetal nodes have an address, either static or DHCP, for all networks they are attached to. +* No static IP assignments are duplicated +* No static IP assignments are outside of the network they are targetted for +* No network MTU mismatches due to a network riding different links on different nodes +* Boot drive is above minimum size + ### VerifySite ### Verify site-wide resources are in a useful state @@ -67,6 +76,9 @@ Prepare a node for bootstrapping - Hardware configuration (e.g. RAID) * Configure node networking * Configure node storage +* Interrogate node + - lshw output + - lldp output ### DeployNode ### diff --git a/setup.py b/setup.py index 0287e80f..01bbff5f 100644 --- a/setup.py +++ b/setup.py @@ -48,19 +48,18 @@ setup(name='helm_drydock', 'helm_drydock.control', 'helm_drydock.drivers', 'helm_drydock.drivers.oob', - 'helm_drydock.drivers.oob.pyghmi_driver'], + 'helm_drydock.drivers.oob.pyghmi_driver', + 'helm_drydock.drivers.node', + 'helm_drydock.drivers.node.maasdriver', + 'helm_drydock.drivers.node.maasdriver.models'], install_requires=[ 'PyYAML', - 'oauth', - 'requests-oauthlib', 'pyghmi>=1.0.18', 'netaddr', 'falcon', - 'webob', 'oslo.versionedobjects>=1.23.0', - ], - dependency_link=[ - 'git+https://github.com/maas/python-libmaas.git' + 'requests', + 'oauthlib', ] ) diff --git a/tests/integration/test_maasdriver_client.py b/tests/integration/test_maasdriver_client.py new file mode 100644 index 00000000..88b86e95 --- /dev/null +++ b/tests/integration/test_maasdriver_client.py @@ -0,0 +1,30 @@ +# Copyright 2017 AT&T Intellectual Property. All other rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import json + +import helm_drydock.config as config +import helm_drydock.drivers.node.maasdriver.api_client as client + +class TestClass(object): + + def test_client_authenticate(self): + client_config = config.DrydockConfig.node_driver['maasdriver'] + + maas_client = client.MaasRequestFactory(client_config['api_url'], client_config['api_key']) + + resp = maas_client.get('account/', params={'op': 'list_authorisation_tokens'}) + + parsed = resp.json() + + assert len(parsed) > 0 \ No newline at end of file diff --git a/tests/integration/test_maasdriver_network.py b/tests/integration/test_maasdriver_network.py new file mode 100644 index 00000000..36c8b324 --- /dev/null +++ b/tests/integration/test_maasdriver_network.py @@ -0,0 +1,58 @@ +# Copyright 2017 AT&T Intellectual Property. All other rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import json +import uuid + +import helm_drydock.config as config +import helm_drydock.drivers.node.maasdriver.api_client as client +import helm_drydock.drivers.node.maasdriver.models.fabric as maas_fabric +import helm_drydock.drivers.node.maasdriver.models.subnet as maas_subnet + +class TestClass(object): + + def test_maas_fabric(self): + client_config = config.DrydockConfig.node_driver['maasdriver'] + + maas_client = client.MaasRequestFactory(client_config['api_url'], client_config['api_key']) + + fabric_name = str(uuid.uuid4()) + + fabric_list = maas_fabric.Fabrics(maas_client) + fabric_list.refresh() + + test_fabric = maas_fabric.Fabric(maas_client, name=fabric_name, description='Test Fabric') + test_fabric = fabric_list.add(test_fabric) + + assert test_fabric.name == fabric_name + assert test_fabric.resource_id is not None + + query_fabric = maas_fabric.Fabric(maas_client, resource_id=test_fabric.resource_id) + query_fabric.refresh() + + assert query_fabric.name == test_fabric.name + + def test_maas_subnet(self): + client_config = config.DrydockConfig.node_driver['maasdriver'] + + maas_client = client.MaasRequestFactory(client_config['api_url'], client_config['api_key']) + + subnet_list = maas_subnet.Subnets(maas_client) + subnet_list.refresh() + + for s in subnet_list: + print(s.to_dict()) + assert False + + + diff --git a/tests/integration/test_orch_node_networks.py b/tests/integration/test_orch_node_networks.py new file mode 100644 index 00000000..85619a30 --- /dev/null +++ b/tests/integration/test_orch_node_networks.py @@ -0,0 +1,94 @@ +# Copyright 2017 AT&T Intellectual Property. All other rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import json +import pytest +import shutil +import os +import uuid + +import helm_drydock.config as config +import helm_drydock.drivers.node.maasdriver.api_client as client +import helm_drydock.ingester.plugins.yaml +import helm_drydock.statemgmt as statemgmt +import helm_drydock.objects as objects +import helm_drydock.orchestrator as orch +import helm_drydock.objects.fields as hd_fields +import helm_drydock.objects.task as task +import helm_drydock.drivers as drivers +from helm_drydock.ingester import Ingester + +class TestClass(object): + + def test_client_verify(self): + design_state = statemgmt.DesignState() + orchestrator = orch.Orchestrator(state_manager=design_state, + enabled_drivers={'node': 'helm_drydock.drivers.node.maasdriver.driver.MaasNodeDriver'}) + + orch_task = orchestrator.create_task(task.OrchestratorTask, + site='sitename', + design_id=None, + action=hd_fields.OrchestratorAction.VerifySite) + + orchestrator.execute_task(orch_task.get_id()) + + orch_task = design_state.get_task(orch_task.get_id()) + + assert orch_task.result == hd_fields.ActionResult.Success + + def test_orch_preparesite(self, input_files): + objects.register_all() + + input_file = input_files.join("fullsite.yaml") + + design_state = statemgmt.DesignState() + design_data = objects.SiteDesign() + design_id = design_data.assign_id() + design_state.post_design(design_data) + + ingester = Ingester() + ingester.enable_plugins([helm_drydock.ingester.plugins.yaml.YamlIngester]) + ingester.ingest_data(plugin_name='yaml', design_state=design_state, + filenames=[str(input_file)], design_id=design_id) + + design_data = design_state.get_design(design_id) + + orchestrator = orch.Orchestrator(state_manager=design_state, + enabled_drivers={'node': 'helm_drydock.drivers.node.maasdriver.driver.MaasNodeDriver'}) + + orch_task = orchestrator.create_task(task.OrchestratorTask, + site='sitename', + design_id=design_id, + action=hd_fields.OrchestratorAction.PrepareSite) + + orchestrator.execute_task(orch_task.get_id()) + + orch_task = design_state.get_task(orch_task.get_id()) + + assert orch_task.result == hd_fields.ActionResult.Success + + + + + @pytest.fixture(scope='module') + def input_files(self, tmpdir_factory, request): + tmpdir = tmpdir_factory.mktemp('data') + samples_dir = os.path.dirname(str(request.fspath)) + "/../yaml_samples" + samples = os.listdir(samples_dir) + + for f in samples: + src_file = samples_dir + "/" + f + dst_file = str(tmpdir) + "/" + f + shutil.copyfile(src_file, dst_file) + + return tmpdir \ No newline at end of file diff --git a/tests/test_design_inheritance.py b/tests/unit/test_design_inheritance.py similarity index 94% rename from tests/test_design_inheritance.py rename to tests/unit/test_design_inheritance.py index 587f737d..e5c57ce8 100644 --- a/tests/test_design_inheritance.py +++ b/tests/unit/test_design_inheritance.py @@ -13,7 +13,7 @@ # limitations under the License. from helm_drydock.ingester import Ingester -from helm_drydock.statemgmt import DesignState, SiteDesign +from helm_drydock.statemgmt import DesignState from helm_drydock.orchestrator import Orchestrator from copy import deepcopy @@ -72,7 +72,7 @@ class TestClass(object): @pytest.fixture(scope='module') def input_files(self, tmpdir_factory, request): tmpdir = tmpdir_factory.mktemp('data') - samples_dir = os.path.dirname(str(request.fspath)) + "/yaml_samples" + samples_dir = os.path.dirname(str(request.fspath)) + "../yaml_samples" samples = os.listdir(samples_dir) for f in samples: diff --git a/tests/test_ingester.py b/tests/unit/test_ingester.py similarity index 96% rename from tests/test_ingester.py rename to tests/unit/test_ingester.py index 4fcb2af6..a719ad6c 100644 --- a/tests/test_ingester.py +++ b/tests/unit/test_ingester.py @@ -70,7 +70,7 @@ class TestClass(object): @pytest.fixture(scope='module') def input_files(self, tmpdir_factory, request): tmpdir = tmpdir_factory.mktemp('data') - samples_dir = os.path.dirname(str(request.fspath)) + "/yaml_samples" + samples_dir = os.path.dirname(str(request.fspath)) + "../yaml_samples" samples = os.listdir(samples_dir) for f in samples: diff --git a/tests/test_ingester_yaml.py b/tests/unit/test_ingester_yaml.py similarity index 94% rename from tests/test_ingester_yaml.py rename to tests/unit/test_ingester_yaml.py index 99992ebf..3be5db08 100644 --- a/tests/test_ingester_yaml.py +++ b/tests/unit/test_ingester_yaml.py @@ -44,7 +44,7 @@ class TestClass(object): @pytest.fixture(scope='module') def input_files(self, tmpdir_factory, request): tmpdir = tmpdir_factory.mktemp('data') - samples_dir = os.path.dirname(str(request.fspath)) + "/yaml_samples" + samples_dir = os.path.dirname(str(request.fspath)) + "../yaml_samples" samples = os.listdir(samples_dir) for f in samples: diff --git a/tests/test_models.py b/tests/unit/test_models.py similarity index 100% rename from tests/test_models.py rename to tests/unit/test_models.py diff --git a/tests/test_orch_generic.py b/tests/unit/test_orch_generic.py similarity index 100% rename from tests/test_orch_generic.py rename to tests/unit/test_orch_generic.py diff --git a/tests/test_orch_oob.py b/tests/unit/test_orch_oob.py similarity index 97% rename from tests/test_orch_oob.py rename to tests/unit/test_orch_oob.py index 42d2c30f..6c10d8f1 100644 --- a/tests/test_orch_oob.py +++ b/tests/unit/test_orch_oob.py @@ -96,7 +96,7 @@ class TestClass(object): @pytest.fixture(scope='module') def input_files(self, tmpdir_factory, request): tmpdir = tmpdir_factory.mktemp('data') - samples_dir = os.path.dirname(str(request.fspath)) + "/yaml_samples" + samples_dir = os.path.dirname(str(request.fspath)) + "../yaml_samples" samples = os.listdir(samples_dir) for f in samples: diff --git a/tests/test_statemgmt.py b/tests/unit/test_statemgmt.py similarity index 100% rename from tests/test_statemgmt.py rename to tests/unit/test_statemgmt.py From 25e72f94cc26a8d125b0cec6b5cb3450c9e097cc Mon Sep 17 00:00:00 2001 From: Scott Hussey Date: Thu, 18 May 2017 15:47:59 -0500 Subject: [PATCH 11/11] Update the YAML design parts example to match model updates --- examples/bootstrap_hwdefinition.yaml | 58 ---- examples/bootstrap_seed.yaml | 420 --------------------------- examples/designparts_v1.0.yaml | 331 +++++++++++++++++++++ examples/readme.md | 7 + 4 files changed, 338 insertions(+), 478 deletions(-) delete mode 100644 examples/bootstrap_hwdefinition.yaml delete mode 100644 examples/bootstrap_seed.yaml create mode 100644 examples/designparts_v1.0.yaml create mode 100644 examples/readme.md diff --git a/examples/bootstrap_hwdefinition.yaml b/examples/bootstrap_hwdefinition.yaml deleted file mode 100644 index d7daa741..00000000 --- a/examples/bootstrap_hwdefinition.yaml +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright 2017 AT&T Intellectual Property. All other rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -############################################################################# -# -# bootstrap_hwdefinition.yaml - Definitions of server hardware layout -# -############################################################################# -# version the schema in this file so consumers can rationally parse it ---- -apiVersion: 'v1.0' -kind: HardwareProfile -metadata: - name: HPGen8v3 - region: sitename - date: 17-FEB-2017 - description: Sample hardware definition - author: Scott Hussey -spec: - # Vendor of the server chassis - vendor: HP - # Generation of the chassis model - generation: '8' - # Version of the chassis model within its generation - not version of the hardware definition - hw_version: '3' - # The certified version of the chassis BIOS - bios_version: '2.2.3' - # Mode of the default boot of hardware - bios, uefi - boot_mode: bios - # Protocol of boot of the hardware - pxe, usb, hdd - bootstrap_protocol: pxe - # Which interface to use for network booting within the OOB manager, not OS device - pxe_interface: 0 - # Map hardware addresses to aliases/roles to allow a mix of hardware configs - # in a site to result in a consistent configuration - device_aliases: - pci: - - address: pci@0000:00:03.0 - alias: prim_nic01 - # type could identify expected hardware - used for hardware manifest validation - type: '82540EM Gigabit Ethernet Controller' - - address: pci@0000:00:04.0 - alias: prim_nic02 - type: '82540EM Gigabit Ethernet Controller' - scsi: - - address: scsi@2:0.0.0 - alias: primary_boot - type: 'VBOX HARDDISK' \ No newline at end of file diff --git a/examples/bootstrap_seed.yaml b/examples/bootstrap_seed.yaml deleted file mode 100644 index 360075f9..00000000 --- a/examples/bootstrap_seed.yaml +++ /dev/null @@ -1,420 +0,0 @@ -# Copyright 2017 AT&T Intellectual Property. All other rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#################### -# -# bootstrap_seed.yaml - Site server design definition for physical layer -# -#################### -# version the schema in this file so consumers can rationally parse it ---- -apiVersion: 'v1.0' -kind: Region -metadata: - name: sitename - date: 17-FEB-2017 - description: Sample site design - author: sh8121@att.com -spec: - # Not sure if we have site wide data that doesn't fall into another 'Kind' ---- -apiVersion: 'v1.0' -kind: NetworkLink -metadata: - name: oob - region: sitename - date: 17-FEB-2017 - author: sh8121@att.com - description: Describe layer 1 attributes. Primary key is 'name'. These settings will generally be things the switch and server have to agree on -spec: - bonding: - mode: none - mtu: 1500 - linkspeed: 100full - trunking: - mode: none - default_network: oob ---- -# pxe is a bit of 'magic' indicating the link config used when PXE booting -# a node. All other links indicate network configs applied when the node -# is deployed. -apiVersion: 'v1.0' -kind: NetworkLink -metadata: - name: pxe - region: sitename - date: 17-FEB-2017 - author: sh8121@att.com - description: Describe layer 1 attributes. Primary key is 'name'. These settings will generally be things the switch and server have to agree on -spec: - bonding: - mode: none - mtu: 1500 - linkspeed: auto - # Is this link supporting multiple layer 2 networks? - # none is a port-based VLAN identified by default_network - # tagged is is using 802.1q VLAN tagging. Untagged packets will default to default_netwokr - trunking: - mode: none - # use name, will translate to VLAN ID - default_network: pxe ---- -apiVersion: 'v1.0' -kind: NetworkLink -metadata: - name: gp - region: sitename - date: 17-FEB-2017 - author: sh8121@att.com - description: Describe layer 1 attributes. These CIs will generally be things the switch and server have to agree on - # pxe is a bit of 'magic' indicating the link config used when PXE booting - # a node. All other links indicate network configs applied when the node - # is deployed. -spec: - # If this link is a bond of physical links, how is it configured - # 802.3ad - # active-backup - # balance-rr - # Can add support for others down the road - bonding: - mode: 802.3ad - # For LACP (802.3ad) xmit hashing policy: layer2, layer2+3, layer3+4, encap3+4 - hash: layer3+4 - # 802.3ad specific options - peer_rate: slow - mon_rate: default - up_delay: default - down_delay: default - mtu: 9000 - linkspeed: auto - # Is this link supporting multiple layer 2 networks? - trunking: - mode: tagged - default_network: mgmt ---- -apiVersion: 'v1.0' -kind: Network -metadata: - name: oob - region: sitename - date: 17-FEB-2017 - author: sh8121@att.com - description: Describe layer 2/3 attributes. Primarily CIs used for configuring server interfaces -spec: - allocation: static - cidr: 172.16.100.0/24 - ranges: - - type: static - start: 172.16.100.15 - end: 172.16.100.254 - dns: - domain: ilo.sitename.att.com - servers: 172.16.100.10 ---- -apiVersion: 'v1.0' -kind: Network -metadata: - name: pxe - region: sitename - date: 17-FEB-2017 - author: sh8121@att.com - description: Describe layer 2/3 attributes. Primarily CIs used for configuring server interfaces -spec: - # Layer 2 VLAN segment id, could support other segmentations. Optional - vlan_id: '99' - # How are addresses assigned? - allocation: dhcp - # MTU for this VLAN interface, if not specified it will be inherited from the link - mtu: 1500 - # Network address - cidr: 172.16.0.0/24 - # Desribe IP address ranges - ranges: - - type: dhcp - start: 172.16.0.5 - end: 172.16.0.254 - # DNS settings for this network - dns: - # Domain addresses on this network will be registered under - domain: admin.sitename.att.com - # DNS servers that a server using this network as its default gateway should use - servers: 172.16.0.10 ---- -apiVersion: 'v1.0' -kind: Network -metadata: - name: mgmt - region: sitename - date: 17-FEB-2017 - author: sh8121@att.com - description: Describe layer 2/3 attributes. Primarily CIs used for configuring server interfaces -spec: - vlan_id: '100' - # How are addresses assigned? - allocation: static - # Allow MTU to be inherited from link the network rides on - mtu: 1500 - # Network address - cidr: 172.16.1.0/24 - # Desribe IP address ranges - ranges: - - type: static - start: 172.16.1.15 - end: 172.16.1.254 - # Static routes to be added for this network - routes: - - subnet: 0.0.0.0/0 - # A blank gateway would leave to a static route specifying - # only the interface as a source - gateway: 172.16.1.1 - metric: 10 - # DNS settings for this network - dns: - # Domain addresses on this network will be registered under - domain: mgmt.sitename.example.com - # DNS servers that a server using this network as its default gateway should use - servers: 172.16.1.9,172.16.1.10 ---- -apiVersion: 'v1.0' -kind: Network -metadata: - name: private - region: sitename - date: 17-FEB-2017 - author: sh8121@att.com - description: Describe layer 2/3 attributes. Primarily CIs used for configuring server interfaces -spec: - vlan_id: '101' - allocation: static - mtu: 9000 - cidr: 172.16.2.0/24 - # Desribe IP address ranges - ranges: - # Type can be reserved (not used for baremetal), static (all explicit - # assignments should fall here), dhcp (will be used by a DHCP server on this network) - - type: static - start: 172.16.2.15 - end: 172.16.2.254 - dns: - domain: priv.sitename.example.com - servers: 172.16.2.9,172.16.2.10 ---- -apiVersion: 'v1.0' -kind: Network -metadata: - name: public - region: sitename - date: 17-FEB-2017 - author: sh8121@att.com - description: Describe layer 2/3 attributes. Primarily CIs used for configuring server interfaces -spec: - vlan_id: '102' - # How are addresses assigned? - allocation: static - # MTU size for the VLAN interface - mtu: 1500 - cidr: 172.16.3.0/24 - # Desribe IP address ranges - ranges: - - type: static - start: 172.16.3.15 - end: 172.16.3.254 - routes: - - subnet: 0.0.0.0/0 - gateway: 172.16.3.1 - metric: 9 - dns: - domain: sitename.example.com - servers: 8.8.8.8 ---- -apiVersion: 'v1.0' -kind: HostProfile -metadata: - name: default - region: sitename - date: 17-FEB-2017 - author: sh8121@att.com - description: Describe layer 2/3 attributes. Primarily CIs used for configuring server interfaces - # No magic to this host_profile, it just provides a way to specify - # sitewide settings. If it is absent from a node's inheritance chain - # then these values will NOT be applied -spec: - # OOB (iLO, iDRAC, etc...) settings. Should prefer open standards such - # as IPMI over vender-specific when possible. - oob: - type: ipmi - # OOB networking should be preconfigured, but we can include a network - # definition for validation or enhancement (DNS registration) - network: oob - account: admin - credential: admin - # Specify storage layout of base OS. Ceph out of scope - storage: - # How storage should be carved up: lvm (logical volumes), flat - # (single partition) - layout: lvm - # Info specific to the boot and root disk/partitions - bootdisk: - # Device will specify an alias defined in hwdefinition.yaml - device: primary_boot - # For LVM, the size of the partition added to VG as a PV - # For flat, the size of the partition formatted as ext4 - root_size: 50g - # The /boot partition. If not specified, /boot will in root - boot_size: 2g - # Info for additional partitions. Need to balance between - # flexibility and complexity - partitions: - - name: logs - device: primary_boot - # Partition uuid if needed - part_uuid: 84db9664-f45e-11e6-823d-080027ef795a - size: 10g - # Optional, can carve up unformatted block devices - mountpoint: /var/log - fstype: ext4 - mount_options: defaults - # Filesystem UUID or label can be specified. UUID recommended - fs_uuid: cdb74f1c-9e50-4e51-be1d-068b0e9ff69e - fs_label: logs - # Platform (Operating System) settings - platform: - image: ubuntu_16.04_hwe - kernel_params: default - # Additional metadata to apply to a node - metadata: - # Base URL of the introspection service - may go in curtin data - introspection_url: http://172.16.1.10:9090 ---- -apiVersion: 'v1.0' -kind: HostProfile -metadata: - name: k8-node - region: sitename - date: 17-FEB-2017 - author: sh8121@att.com - description: Describe layer 2/3 attributes. Primarily CIs used for configuring server interfaces -spec: - # host_profile inheritance allows for deduplication of common CIs - # Inheritance is additive for CIs that are lists of multiple items - # To remove an inherited list member, prefix the primary key value - # with '!'. - host_profile: defaults - # Hardware profile will map hardware specific details to the abstract - # names uses in the host profile as well as specify hardware specific - # configs. A viable model should be to build a host profile without a - # hardware_profile and then for each node inherit the host profile and - # specify a hardware_profile to map that node's hardware to the abstract - # settings of the host_profile - hardware_profile: HPGen9v3 - # Network interfaces. - interfaces: - # Keyed on device_name - # pxe is a special marker indicating which device should be used for pxe boot - - device_name: pxe - # The network link attached to this - network_link: pxe - # Slaves will specify aliases from hwdefinition.yaml - slaves: - - prim_nic01 - # Which networks will be configured on this interface - networks: - - name: pxe - - device_name: bond0 - network_link: gp - # If multiple slaves are specified, but no bonding config - # is applied to the link, design validation will fail - slaves: - - prim_nic01 - - prim_nic02 - # If multiple networks are specified, but no trunking - # config is applied to the link, design validation will fail - networks: - - name: mgmt - - name: private - metadata: - # Explicit tag assignment - tags: - - 'test' - # MaaS supports key/value pairs. Not sure of the use yet - owner_data: - foo: bar ---- -apiVersion: 'v1.0' -kind: HostProfile -metadata: - name: k8-node-public - region: sitename - date: 17-FEB-2017 - author: sh8121@att.com - description: Describe layer 2/3 attributes. Primarily CIs used for configuring server interfaces -spec: - host_profile: k8-node - interfaces: - - device_name: bond0 - networks: - # This is additive, so adds a network to those defined in the host_profile - # inheritance chain - - name: public ---- -apiVersion: 'v1.0' -kind: BaremetalNode -metadata: - name: controller01 - region: sitename - date: 17-FEB-2017 - author: sh8121@att.com - description: Describe layer 2/3 attributes. Primarily CIs used for configuring server interfaces -spec: - host_profile: k8-node-public - # the hostname for a server, could be used in multiple DNS domains to - # represent different interfaces - interfaces: - - device_name: bond0 - networks: - # '!' prefix for the value of the primary key indicates a record should be removed - - name: '!private' - # Addresses assigned to network interfaces - addressing: - # Which network the address applies to. If a network appears in addressing - # that isn't assigned to an interface, design validation will fail - - network: pxe - # The address assigned. Either a explicit IPv4 or IPv6 address - # or dhcp or slaac - address: dhcp - - network: mgmt - address: 172.16.1.20 - - network: public - address: 172.16.3.20 - metadata: - tags: - - os_ctl - rack: rack01 ---- -apiVersion: 'v1.0' -kind: BaremetalNode -metadata: - name: compute01 - region: sitename - date: 17-FEB-2017 - author: sh8121@att.com - description: Describe layer 2/3 attributes. Primarily CIs used for configuring server interfaces -spec: - host_profile: k8-node - addressing: - - network: pxe - address: dhcp - - network: mgmt - address: 172.16.1.21 - - network: private - address: 172.16.2.21 diff --git a/examples/designparts_v1.0.yaml b/examples/designparts_v1.0.yaml new file mode 100644 index 00000000..530efbc0 --- /dev/null +++ b/examples/designparts_v1.0.yaml @@ -0,0 +1,331 @@ +# Copyright 2017 AT&T Intellectual Property. All other rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +# Site/Region wide definitions. Each design part will be a constituent +# of the design for exactly one Region +apiVersion: 'v1.0' +kind: Region +metadata: + name: sitename + date: 17-FEB-2017 + description: Sample site design + author: sh8121@att.com +spec: + # List of query-based definitions for applying tags to deployed nodes + tag_definitions: + - tag: 'high_memory' + # Tag to apply to nodes that qualify for the query + definition_type: 'lshw_xpath' + # Only support on type for now - 'lshw_xpath' used by MaaS + definition: //node[@id="memory"]/'size units="bytes"' > 137438953472 + # an xpath query that is run against the output of 'lshw -xml' from the node + # Image and package repositories needed by Drydock drivers. Needs to be defined + repositories: + - name: 'ubuntu-main' +--- +apiVersion: 'v1.0' +kind: NetworkLink +metadata: + name: oob + region: sitename + date: 17-FEB-2017 + author: sh8121@att.com + description: Describe layer 1 attributes. Primary key is 'name'. These settings will generally be things the switch and server have to agree on +spec: + bonding: + # Mode can be 'disabled', '802.3ad', 'balanced-rr', 'active-backup'. Defaults to disabled + mode: '802.3ad' + # The below apply to 802.3ad (LACP + # Link selection hash. Supports 'layer3+4', 'layer2', 'layer2+3'. Defaults to 'layer3+4' + hash: 'layer3+4' + # LACP peering rate. Supports 'slow', 'fast'. Defaults to 'fast' + peer_rate: 'fast' + # LACP link monitor rate in milliseconds. Defaults to 100ms + mon_rate: 100 + # LACP delay for marking link up in milliseconds. Must be greater than mon_rate. Defaults to 200ms + up_delay: 200 + # LACP dleay for marking link down in milliseconds. Must be greater than mon_rate. Defaults to 200ms + down_delay: 200 + # Physical link default MTU size. No default + mtu: 1500 + # Physical link speed. Supports 'auto', '100full'. Gigabit+ speeds require auto. No default + linkspeed: 'auto' + # Settings for using a link for multiple L2 networks + trunking: + # Trunking mode. Supports 'disabled', '802.1q'. Defaults to disabled + mode: disabled + # If disabled, what network is this port on. If '802.1q' what is the default network for the port. No default. + default_network: oob +--- +apiVersion: 'v1.0' +kind: Network +metadata: + name: oob + region: sitename + date: 17-FEB-2017 + author: sh8121@att.com + description: Describe layer 2 and 3 attributes. Primary key is 'name'. +spec: + # CIDR representation of network number and netmask + cidr: '172.16.1.0/24' + # How addresses are allocated on the network. Supports 'static', 'dhcp'. Defaults to 'static' + allocation: 'static' + # VLAN of this network. Defaults to None + vlan: 100 + # MTU of this network. Defaults to the MTU specified for the NetworkLink used for this network + dns: + # Domain name used to register addresses assigned from this network. Defaults to 'local' + domain: 'aic.att.com' + # Comma-separated list of DNS server IP addresses. These will be configured on the node if + # this network is identified as the node's primary network + servers: '8.8.8.8, 4.4.4.4' + # Defined IP address ranges. All node IP address assignments must fall into a defined range + # of the correct type + ranges: + # Type of range. Supports 'static' or 'dhcp'. No default + - type: 'dhcp' + # Start of the address range, inclusive. No default + start: '172.16.1.100' + # End of the address range, inclusive. No default + end: '172.16.1.254' + # Routes defined for this network, including the default route (i.e. default gateway) + routes: + # The network being routed to in CIDR notation. Default gateway is 0.0.0.0/0. + - subnet: '0.0.0.0/0' + # Next hop for traffic using this route + gateway: '172.16.1.3' + # Selection metric for the host selecting this route. No default + metric: 10 +--- +apiVersion: 'v1.0' +kind: HardwareProfile +metadata: + name: DellR720v2 + region: sitename + date: 17-FEB-2017 + author: sh8121@att.com + description: Describe server hardware attributes. Not a specific server, but profile adopted by a server defintion. +spec: + # Chassis vendor + vendor: 'Dell' + # Chassis model generation + generation: '1' + # Chassis model version + hw_version: '2' + # Certified BIOS version for this chassis + bios_version: '2.2.3' + # Boot mode. Supports 'bios' or 'uefi' + boot_mode: 'bios' + # How the node should be initially bootstrapped. Supports 'pxe' + bootstrap_protocol: 'pxe' + # What network interface to use for PXE booting + # for chassis that support selection + pxe_interface: '0' + # Mapping of hardware alias/role to physical address + device_aliases: + # the device alias that will be referenced in HostProfile or BaremetalNode design parts + - alias: 'pnic01' + # The hardware bus the device resides on. Supports 'pci' and 'scsi'. No default + bus_type: 'pci' + # The type of device as reported by lshw. Can be used to validate hardware manifest. No default + dev_type: 'Intel 10Gbps NIC' + # Physical address on the bus + address: '0000:00:03.0' +--- +apiVersion: 'v1.0' +kind: HostProfile +metadata: + name: lcp_node + region: sitename + date: 17-FEB-2017 + author: sh8121@att.com + description: Describe server configuration attributes. Not a specific server, but profile adopted by a server definition +spec: + # The HostProfile this profile adopts initial state from. No default. + # See helm_drydock/objects/readme.md for information on how HostProfile and BaremetalNode inheritance works + host_profile: 'defaults' + # The HardwareProfile describing the node hardware. No default. + hardware_profile: 'DellR720v1' + # OOB access to node + oob: + # Type of OOB access. Supports 'ipmi' + type: 'ipmi' + # Which network - as defined in a Network design part - to access the OOB interface on + network: 'oob' + # Account name for authenticating on the OOB interface + account: 'admin' + # Credential for authentication on the OOB interface. The OOB driver will interpret this. + credential: 'admin' + # How local node storage is configured + storage: + # How storage is laid out. Supports 'lvm' and 'flat'. Defaults to 'lvm' + layout: 'lvm' + # Configuration for the boot disk + bootdisk: + # Hardware disk (or hardware RAID device) used for booting. Can refer to a + # HardwareProfile device alias or a explicit device name + device: 'bootdisk' + # Size of the root volume. Can be specified by percentage or explicit size in + # megabytes or gigabytes. Defaults to 100% of boot device. + root_size: '100g' + # If a separate boot volume is needed, specify size. Defaults to 0 where /boot goes on root. + boot_size: '0' + # Non-boot volumes that should be carved out of local storage + partitions: + # Name of the volume. Doesn't translate to any operating system config + name: 'logs' + # Hardware device the volume should go on + device: 'bootdisk' + # Partition UUID. Defaults to None. A value of 'generate' means Drydock will generate a UUID + part_uuid: + # Size of the volume in megabytes or gigabytes + size: '10g' + # Filesystem mountpoint if volume should be a filesystem + mountpoint: '/var/logs' + # The below are ignored if mountpoint is None + # Format of filesystem. Defaults to ext4 + fstype: 'ext4' + # Mount options of the file system as used in /etc/fstab. Defaults to 'defaults' + mount_options: 'defaults' + # Filesystem UUID. Defaults to None. A value of 'generate' means Drydock will generate a UUID + fs_uuid: + # A filesystem label. Defaults to None + fs_label: + # Physical and logical network interfaces + interfaces: + # What the interface should be named in the operating system. May not match a hardware device name + device_name: bond0 + # The NetworkLink connected to this interface. Must be the name of a NetworkLink design part + device_link: 'gp' + # Whether this interface is considered the primary interface on the server. Supports true and false. Defaults to false + primary: true + # Hardware devices that support this interface. For configurating a physical device, this would be a list of one + # For bonds, this would be a list of all the physical devices in the bond. These can refer to HardwareProfile device aliases + # or explicit device names + slaves: + - 'pnic01' + - 'pnic02' + # Network that will be accessed on this interface. These should each be to the name of a Network design part + # Multiple networks listed here assume that this interface is attached to a NetworkLink supporting trunking + networks: + - 'mgmt' + - 'admin' + # Metadata about the node + metadata: + # Explicit tags to propagate to Kubernetes. Simple strings of any value + tags: + - 'lcp_node' + # Key/value mapping that will propagate to the node for next-step bootstrapping + owner_data: + nic_access: 'sriov' + # The rack a node sits in. Simple string + rack: r1 +--- +apiVersion: 'v1.0' +kind: BaremetalNode +metadata: + name: lcp_controller01 + region: sitename + date: 17-FEB-2017 + author: sh8121@att.com + description: Specify a physical server. +spec: + # The HostProfile this server adopts initial state from. No default. + # See helm_drydock/objects/readme.md for information on how HostProfile and BaremetalNode inheritance works + host_profile: 'defaults' + # The HardwareProfile describing the node hardware. No default. + hardware_profile: 'DellR720v1' + # OOB access to node + oob: + # Type of OOB access. Supports 'ipmi' + type: 'ipmi' + # Which network - as defined in a Network design part - to access the OOB interface on + network: 'oob' + # Account name for authenticating on the OOB interface + account: 'admin' + # Credential for authentication on the OOB interface. The OOB driver will interpret this. + credential: 'admin' + # How local node storage is configured + storage: + # How storage is laid out. Supports 'lvm' and 'flat'. Defaults to 'lvm' + layout: 'lvm' + # Configuration for the boot disk + bootdisk: + # Hardware disk (or hardware RAID device) used for booting. Can refer to a + # HardwareProfile device alias or a explicit device name + device: 'bootdisk' + # Size of the root volume. Can be specified by percentage or explicit size in + # megabytes or gigabytes. Defaults to 100% of boot device. + root_size: '100g' + # If a separate boot volume is needed, specify size. Defaults to 0 where /boot goes on root. + boot_size: '0' + # Non-boot volumes that should be carved out of local storage + partitions: + # Name of the volume. Doesn't translate to any operating system config + name: 'logs' + # Hardware device the volume should go on + device: 'bootdisk' + # Partition UUID. Defaults to None. A value of 'generate' means Drydock will generate a UUID + part_uuid: + # Size of the volume in megabytes or gigabytes + size: '10g' + # Filesystem mountpoint if volume should be a filesystem + mountpoint: '/var/logs' + # The below are ignored if mountpoint is None + # Format of filesystem. Defaults to ext4 + fstype: 'ext4' + # Mount options of the file system as used in /etc/fstab. Defaults to 'defaults' + mount_options: 'defaults' + # Filesystem UUID. Defaults to None. A value of 'generate' means Drydock will generate a UUID + fs_uuid: + # A filesystem label. Defaults to None + fs_label: + # Physical and logical network interfaces + interfaces: + # What the interface should be named in the operating system. May not match a hardware device name + - device_name: bond0 + # The NetworkLink connected to this interface. Must be the name of a NetworkLink design part + device_link: 'gp' + # Whether this interface is considered the primary interface on the server. Supports true and false. Defaults to false + primary: true + # Hardware devices that support this interface. For configurating a physical device, this would be a list of one + # For bonds, this would be a list of all the physical devices in the bond. These can refer to HardwareProfile device aliases + # or explicit device names + slaves: + - 'pnic01' + - 'pnic02' + # Network that will be accessed on this interface. These should each be to the name of a Network design part + # Multiple networks listed here assume that this interface is attached to a NetworkLink supporting trunking + networks: + - 'mgmt' + - 'admin' + # Metadata about the node + metadata: + # Explicit tags to propagate to Kubernetes. Simple strings of any value + tags: + - 'lcp_node' + # Key/value mapping that will propagate to the node for next-step bootstrapping + owner_data: + nic_access: 'sriov' + # The rack a node sits in. Simple string + rack: r1 + # How each attached network is accessed by this node + addressing: + # The name of a defined Network design part also listed in the 'networks' section of a interface definition + - network: 'pxe' + # Address should be an explicit IP address assignment or 'dhcp' + address: 'dhcp' + - network: 'mgmt' + address: '172.16.1.83' +--- \ No newline at end of file diff --git a/examples/readme.md b/examples/readme.md new file mode 100644 index 00000000..567e9981 --- /dev/null +++ b/examples/readme.md @@ -0,0 +1,7 @@ +# File Definition Examples + +## designparts_v1.0.yaml + +This is a reference file for the YAML schema supported by the Drydock YAML +ingester. Each design part currently supported is listed with all supported +attributes and comments on attribute use and restrictions. \ No newline at end of file