From 47472226414cc34506be32555fe92bcf1114351d Mon Sep 17 00:00:00 2001 From: "Ian H. Pittwood" Date: Mon, 13 May 2019 10:34:31 -0500 Subject: [PATCH] Implements data object models This change implements data object models from [0] in data extraction and parsing. The change results in minor modifications to the outputted intermediary, which can be seen between these two example intermeidary files [1]. This fully implements the data objects from models.py in data extraction and parsing. A follow-up change will implement use of the data objects in Jinja2. Temporarily, all objects will be converted to dictionaries for generating documents from templates. [0] https://review.opendev.org/#/c/658917/ [1] https://www.diffchecker.com/NnjjJrb2 Change-Id: Ifd867787aab541be5dabecf9f6026faa2ec7049e --- spyglass/cli.py | 3 +- spyglass/data_extractor/base.py | 269 ++++-------------- spyglass/data_extractor/models.py | 70 +++-- .../templates/pki/pki-catalogue.yaml.j2 | 16 +- spyglass/parser/engine.py | 128 ++++----- spyglass/utils/__init__.py | 0 spyglass/utils/utils.py | 40 --- tests/unit/data_extractor/test_models.py | 29 +- 8 files changed, 183 insertions(+), 372 deletions(-) delete mode 100644 spyglass/utils/__init__.py delete mode 100755 spyglass/utils/utils.py diff --git a/spyglass/cli.py b/spyglass/cli.py index 9dacd99..e659463 100644 --- a/spyglass/cli.py +++ b/spyglass/cli.py @@ -98,8 +98,7 @@ def intermediary_processor(plugin_type, **kwargs): additional_config = kwargs.get('site_configuration', None) if additional_config is not None: with open(additional_config, 'r') as config: - raw_data = config.read() - additional_config_data = yaml.safe_load(raw_data) + additional_config_data = yaml.safe_load(config) LOG.debug( "Additional config data:\n{}".format( pprint.pformat(additional_config_data))) diff --git a/spyglass/data_extractor/base.py b/spyglass/data_extractor/base.py index 35ffa08..d1c25f7 100755 --- a/spyglass/data_extractor/base.py +++ b/spyglass/data_extractor/base.py @@ -14,9 +14,8 @@ import abc import logging -import pprint -from spyglass.utils import utils +from spyglass.data_extractor import models LOG = logging.getLogger(__name__) @@ -28,7 +27,7 @@ class BaseDataSourcePlugin(metaclass=abc.ABCMeta): self.source_type = None self.source_name = None self.region = region - self.site_data = {} + self.site_data = None @abc.abstractmethod def set_config_opts(self, conf): @@ -63,10 +62,8 @@ class BaseDataSourcePlugin(metaclass=abc.ABCMeta): """Return list of racks in the region :param string region: Region name - :returns: list of rack names + :returns: list of Rack objects :rtype: list - - Example: ['rack01', 'rack02'] """ return [] @@ -77,20 +74,8 @@ class BaseDataSourcePlugin(metaclass=abc.ABCMeta): :param string region: Region name :param string rack: Rack name - :returns: list of hosts information - :rtype: list of dict - - Example: [ - { - 'name': 'host01', - 'type': 'controller', - 'host_profile': 'hp_01' - }, - { - 'name': 'host02', - 'type': 'compute', - 'host_profile': 'hp_02'} - ] + :returns: list of Host objects containing a rack's host data + :rtype: list of models.Host """ return [] @@ -100,47 +85,8 @@ class BaseDataSourcePlugin(metaclass=abc.ABCMeta): """Return list of networks in the region :param string region: Region name - :returns: list of networks and their vlans - :rtype: list of dict - - Example: [ - { - 'name': 'oob', - 'vlan': '41', - 'subnet': '192.168.1.0/24', - 'gateway': '192.168.1.1' - }, - { - 'name': 'pxe', - 'vlan': '42', - 'subnet': '192.168.2.0/24', - 'gateway': '192.168.2.1' - }, - { - 'name': 'oam', - 'vlan': '43', - 'subnet': '192.168.3.0/24', - 'gateway': '192.168.3.1' - }, - { - 'name': 'ksn', - 'vlan': '44', - 'subnet': '192.168.4.0/24', - 'gateway': '192.168.4.1' - }, - { - 'name': 'storage', - 'vlan': '45', - 'subnet': '192.168.5.0/24', - 'gateway': '192.168.5.1' - }, - { - 'name': 'overlay', - 'vlan': '45', - 'subnet': '192.168.6.0/24', - 'gateway': '192.168.6.1' - } - ] + :returns: list of network data + :rtype: list of models.VLANNetworkData """ # TODO(nh863p): Expand the return type if they are rack level subnets @@ -153,11 +99,8 @@ class BaseDataSourcePlugin(metaclass=abc.ABCMeta): :param string region: Region name :param string host: Host name - :returns: Dict of IPs per network on the host - :rtype: dict - - Example: {'oob': {'ipv4': '192.168.1.10'}, - 'pxe': {'ipv4': '192.168.2.10'}} + :returns: IPs per network on the host + :rtype: models.IPList The network name from get_networks is expected to be the keys of this dict. In case some networks are missed, they are expected to be either @@ -171,10 +114,8 @@ class BaseDataSourcePlugin(metaclass=abc.ABCMeta): """Return the DNS servers :param string region: Region name - :returns: List of DNS servers to be configured on host - :rtype: List - - Example: ['8.8.8.8', '8.8.8.4'] + :returns: DNS servers to be configured on host + :rtype: models.ServerList """ return [] @@ -184,10 +125,8 @@ class BaseDataSourcePlugin(metaclass=abc.ABCMeta): """Return the NTP servers :param string region: Region name - :returns: List of NTP servers to be configured on host - :rtype: List - - Example: ['ntp1.ubuntu1.example', 'ntp2.ubuntu.example'] + :returns: NTP servers to be configured on host + :rtype: models.ServerList """ return [] @@ -198,7 +137,7 @@ class BaseDataSourcePlugin(metaclass=abc.ABCMeta): :param string region: Region name :returns: LDAP server information - :rtype: Dict + :rtype: dict Example: {'url': 'ldap.example.com', 'common_name': 'ldap-site1', @@ -231,152 +170,68 @@ class BaseDataSourcePlugin(metaclass=abc.ABCMeta): :param string region: Region name :returns: Domain name - :rtype: string + :rtype: str Example: example.com """ return "" + @abc.abstractmethod + def get_site_info(self, region): + """Return site data as a SiteInfo object + + :param region: Region name + :return: general site data including location, domain, name, LDAP, NTP, + DNS, and site type + :rtype: models.SiteInfo + """ + + return None + def extract_baremetal_information(self): """Get baremetal information from plugin - :returns: dict of baremetal nodes - :rtype: dict - - Return dict should be in the format - { - 'EXAMR06': { # rack name - 'examr06c036': { # host name - 'host_profile': None, - 'ip': { - 'overlay': {}, - 'oob': {}, - 'calico': {}, - 'oam': {}, - 'storage': {}, - 'pxe': {} - }, - 'rack': 'EXAMR06', - 'type': 'compute' - } - } - } + :returns: racks and hosts as a list of Rack objects containing Host + data + :rtype: list of models.Rack """ LOG.info("Extract baremetal information from plugin") - baremetal = {} - hosts = self.get_hosts(self.region) - - # For each host list fill host profile and network IPs - for host in hosts: - host_name = host["name"] - rack_name = host["rack_name"] - - if rack_name not in baremetal: - baremetal[rack_name] = {} - - # Prepare temp dict for each host and append it to baremetal - # at a rack level - temp_host = {} - if host["host_profile"] is None: - temp_host["host_profile"] = "#CHANGE_ME" - else: - temp_host["host_profile"] = host["host_profile"] - - # Get Host IPs from plugin - temp_host_ips = self.get_ips(self.region, host_name) - - # Fill network IP for this host - temp_host["ip"] = {} - temp_host["ip"]["oob"] = \ - temp_host_ips[host_name].get("oob", "#CHANGE_ME") - temp_host["ip"]["calico"] = \ - temp_host_ips[host_name].get("calico", "#CHANGE_ME") - temp_host["ip"]["oam"] = \ - temp_host_ips[host_name].get("oam", "#CHANGE_ME") - temp_host["ip"]["storage"] = \ - temp_host_ips[host_name].get("storage", "#CHANGE_ME") - temp_host["ip"]["overlay"] = \ - temp_host_ips[host_name].get("overlay", "#CHANGE_ME") - temp_host["ip"]["pxe"] = \ - temp_host_ips[host_name].get("pxe", "#CHANGE_ME") - - baremetal[rack_name][host_name] = temp_host - - LOG.debug( - "Baremetal information:\n{}".format(pprint.pformat(baremetal))) - - return baremetal + return self.get_racks(self.region) def extract_site_information(self): """Get site information from plugin - :returns: dict of site information - :rtpe: dict - - Return dict should be in the format - { - 'name': '', - 'country': '', - 'state': '', - 'corridor': '', - 'sitetype': '', - 'dns': [], - 'ntp': [], - 'ldap': {}, - 'domain': None - } + :returns: site information including location, dns servers, ntp servers + ldap, and domain name + :rtpe: models.SiteInfo """ LOG.info("Extract site information from plugin") - site_info = {} # Extract location information - location_data = self.get_location_information(self.region) - if location_data is not None: - site_info = location_data + data = { + 'region_name': self.region, + 'dns': self.get_dns_servers(self.region), + 'ntp': self.get_ntp_servers(self.region), + 'ldap': self.get_ldap_information(self.region), + 'domain': self.get_domain_name(self.region) + } + data.update(self.get_location_information(self.region) or {}) - dns_data = self.get_dns_servers(self.region) - site_info["dns"] = dns_data - - ntp_data = self.get_ntp_servers(self.region) - site_info["ntp"] = ntp_data - - ldap_data = self.get_ldap_information(self.region) - site_info["ldap"] = ldap_data - - domain_data = self.get_domain_name(self.region) - site_info["domain"] = domain_data - - LOG.debug( - "Extracted site information:\n{}".format( - pprint.pformat(site_info))) + site_info = models.SiteInfo(**data) return site_info def extract_network_information(self): """Get network details from plugin like Subnets, DNS, NTP and LDAP - :returns: dict of baremetal nodes - :rtype: dict - - Return dict should be in the format - { - 'vlan_network_data': { - 'oam': {}, - 'ingress': {}, - 'oob': {} - 'calico': {}, - 'storage': {}, - 'pxe': {}, - 'overlay': {} - } - } + :returns: networking data as a Network object + :rtype: models.Network """ LOG.info("Extract network information from plugin") - network_data = {} networks = self.get_networks(self.region) # We are interested in only the below networks mentioned in @@ -391,20 +246,12 @@ class BaseDataSourcePlugin(metaclass=abc.ABCMeta): "oob", "ingress", ] - network_data["vlan_network_data"] = {} + desired_networks = [] + for network in networks: + if network.name in networks_to_scan: + desired_networks.append(network) - for net in networks: - tmp_net = {} - if net["name"] in networks_to_scan: - tmp_net["subnet"] = net.get("subnet", "#CHANGE_ME") - if net["name"] != "ingress" and net["name"] != "oob": - tmp_net["vlan"] = net.get("vlan", "#CHANGE_ME") - - network_data["vlan_network_data"][net["name"]] = tmp_net - - LOG.debug( - "Extracted network data:\n{}".format(pprint.pformat(network_data))) - return network_data + return models.Network(desired_networks) def extract_data(self): """Extract data from plugin @@ -414,13 +261,11 @@ class BaseDataSourcePlugin(metaclass=abc.ABCMeta): """ LOG.info("Extract data from plugin") - site_data = { - "baremetal": self.extract_baremetal_information(), - "site_info": self.extract_site_information(), - "network": self.extract_network_information() - } - self.site_data = site_data - return site_data + self.site_data = models.SiteDocumentData( + self.extract_site_information(), + self.extract_network_information(), + self.extract_baremetal_information()) + return self.site_data def apply_additional_data(self, extra_data): """Apply any additional inputs from user @@ -433,6 +278,4 @@ class BaseDataSourcePlugin(metaclass=abc.ABCMeta): """ LOG.info("Update site data with additional input") - tmp_site_data = utils.dict_merge(self.site_data, extra_data) - self.site_data = tmp_site_data - return self.site_data + self.site_data.merge_additional_data(extra_data) diff --git a/spyglass/data_extractor/models.py b/spyglass/data_extractor/models.py index aabdc4e..6fdb774 100644 --- a/spyglass/data_extractor/models.py +++ b/spyglass/data_extractor/models.py @@ -31,24 +31,32 @@ def _parse_ip(addr): :return: addr as an IPAddress object or string """ try: - ip = ipaddress.ip_address(addr) - return str(ip) + ipaddress.ip_network(addr) except ValueError: - LOG.warning("%s is not a valid IP address.", addr) - return str(addr) + if addr != DATA_DEFAULT: + LOG.warning("%s is not a valid IP address.", addr) + return addr class ServerList(object): """Model for a list of servers""" - def __init__(self, server_list: list): + def __init__(self, server_list): """Validates a list of server IPs and creates a list of them :param server_list: list of strings """ self.servers = [] - for server in server_list: - self.servers.append(_parse_ip(server)) + if type(server_list) is list: + for server in server_list: + self.servers.append(_parse_ip(server)) + elif type(server_list) is str: + for server in server_list.split(','): + self.servers.append(server.strip()) + else: + raise ValueError( + 'ServerList expects a str or list, but got a %s', + type(server_list)) def __str__(self): """Returns server list as string for use in YAML documents""" @@ -272,15 +280,26 @@ class VLANNetworkData(object): """ self.name = name self.role = kwargs.get('role', self.name) - self.vlan = kwargs.get('vlan', None) + if self.role == 'oob': + self.vlan = None + else: + self.vlan = kwargs.get('vlan', None) self.subnet = [] - for _subnet in kwargs.get('subnet', []): - self.subnet.append(_parse_ip(_subnet)) + subnet = kwargs.get('subnet', []) + if type(subnet) is list: + for _subnet in subnet: + self.subnet.append(_parse_ip(_subnet)) + else: + self.subnet.append(subnet) self.routes = [] - for route in kwargs.get('routes', []): - self.routes.append(_parse_ip(route)) + routes = kwargs.get('routes', []) + if type(routes) is list: + for route in routes: + self.routes.append(_parse_ip(route)) + else: + self.routes.append(_parse_ip(routes)) self.gateway = _parse_ip(kwargs.get('gateway', None)) @@ -302,6 +321,8 @@ class VLANNetworkData(object): vlan_dict[self.role]['subnet'] = self.subnet if self.routes: vlan_dict[self.role]['routes'] = self.routes + else: + vlan_dict[self.role]['routes'] = [] if self.gateway: vlan_dict[self.role]['gateway'] = self.gateway if self.dhcp_start and self.dhcp_end: @@ -321,9 +342,11 @@ class VLANNetworkData(object): if 'vlan' in config_dict: self.vlan = config_dict['vlan'] if 'subnet' in config_dict: + self.subnet = [] for _subnet in config_dict['subnet']: self.subnet.append(_parse_ip(_subnet)) if 'routes' in config_dict: + self.routes = [] for _route in config_dict['routes']: self.routes.append(_parse_ip(_route)) if 'gateway' in config_dict: @@ -436,6 +459,7 @@ class SiteInfo(object): * url (``str``) """ self.name = name + self.region_name = kwargs.get('region_name', DATA_DEFAULT) self.physical_location_id = kwargs.get( 'physical_location_id', DATA_DEFAULT) self.state = kwargs.get('state', DATA_DEFAULT) @@ -443,8 +467,13 @@ class SiteInfo(object): self.corridor = kwargs.get('corridor', DATA_DEFAULT) self.sitetype = kwargs.get('sitetype', DATA_DEFAULT) - self.dns = ServerList(kwargs.get('dns', [])) - self.ntp = ServerList(kwargs.get('ntp', [])) + self.dns = kwargs.get('dns', []) + if type(self.dns) is not ServerList: + self.dns = ServerList(self.dns) + + self.ntp = kwargs.get('ntp', []) + if type(self.ntp) is not ServerList: + self.ntp = ServerList(self.ntp) self.domain = kwargs.get('domain', DATA_DEFAULT) self.ldap = kwargs.get('ldap', {}) @@ -456,11 +485,15 @@ class SiteInfo(object): return { 'corridor': self.corridor, 'country': self.country, - 'dns': str(self.dns), + 'dns': { + 'servers': str(self.dns) + }, 'domain': self.domain, 'ldap': self.ldap, 'name': self.name, - 'ntp': str(self.ntp), + 'ntp': { + 'servers': str(self.ntp) + }, 'physical_location_id': self.physical_location_id, 'sitetype': self.sitetype, 'state': self.state, @@ -480,9 +513,9 @@ class SiteInfo(object): if 'sitetype' in config_dict: self.sitetype = config_dict['sitetype'] if 'dns' in config_dict: - self.dns.merge(config_dict['dns']['servers']) + self.dns = ServerList(config_dict['dns']['servers']) if 'ntp' in config_dict: - self.ntp.merge(config_dict['ntp']['servers']) + self.ntp = ServerList(config_dict['ntp']['servers']) if 'domain' in config_dict: self.domain = config_dict['domain'] if 'ldap' in config_dict: @@ -522,6 +555,7 @@ class SiteDocumentData(object): document = { 'baremetal': {}, 'network': self.network.dict_from_class(), + 'region_name': self.site_info.region_name, 'site_info': self.site_info.dict_from_class(), 'storage': self.storage } diff --git a/spyglass/examples/templates/pki/pki-catalogue.yaml.j2 b/spyglass/examples/templates/pki/pki-catalogue.yaml.j2 index c8ccd03..7c7d6da 100644 --- a/spyglass/examples/templates/pki/pki-catalogue.yaml.j2 +++ b/spyglass/examples/templates/pki/pki-catalogue.yaml.j2 @@ -30,7 +30,7 @@ data: hosts: - {{ host }} - {{ data['baremetal'][racks][host]['ip']['oam'] }} - - {{ data['baremetal'][racks][host]['ip']['ksn']}} + - {{ data['baremetal'][racks][host]['ip']['calico']}} groups: - system:nodes {% endif %} @@ -43,7 +43,7 @@ data: hosts: - {{ host }} - {{ data['baremetal'][racks][host]['ip']['oam'] }} - - {{ data['baremetal'][racks][host]['ip']['ksn']}} + - {{ data['baremetal'][racks][host]['ip']['calico']}} groups: - system:nodes {%endfor%} @@ -80,7 +80,7 @@ data: hosts: - {{ host }} - {{ data['baremetal'][racks][host]['ip']['oam'] }} - - {{ data['baremetal'][racks][host]['ip']['ksn']}} + - {{ data['baremetal'][racks][host]['ip']['calico']}} - 127.0.0.1 - localhost - kubernetes-etcd.kube-system.svc.cluster.local @@ -96,7 +96,7 @@ data: hosts: - {{ host }} - {{ data['baremetal'][racks][host]['ip']['oam'] }} - - {{ data['baremetal'][racks][host]['ip']['ksn']}} + - {{ data['baremetal'][racks][host]['ip']['calico']}} - 127.0.0.1 - localhost - kubernetes-etcd.kube-system.svc.cluster.local @@ -114,7 +114,7 @@ data: hosts: - {{ host }} - {{ data['baremetal'][racks][host]['ip']['oam'] }} - - {{ data['baremetal'][racks][host]['ip']['ksn']}} + - {{ data['baremetal'][racks][host]['ip']['calico']}} - 127.0.0.1 - localhost - kubernetes-etcd.kube-system.svc.cluster.local @@ -130,7 +130,7 @@ data: hosts: - {{ host }} - {{ data['baremetal'][racks][host]['ip']['oam'] }} - - {{ data['baremetal'][racks][host]['ip']['ksn']}} + - {{ data['baremetal'][racks][host]['ip']['calico']}} - 127.0.0.1 - localhost - kubernetes-etcd.kube-system.svc.cluster.local @@ -152,7 +152,7 @@ data: hosts: - {{ host }} - {{ data['baremetal'][racks][host]['ip']['oam'] }} - - {{ data['baremetal'][racks][host]['ip']['ksn']}} + - {{ data['baremetal'][racks][host]['ip']['calico']}} - 127.0.0.1 - localhost - 10.96.232.136 @@ -172,7 +172,7 @@ data: hosts: - {{ host }} - {{ data['baremetal'][racks][host]['ip']['oam'] }} - - {{ data['baremetal'][racks][host]['ip']['ksn']}} + - {{ data['baremetal'][racks][host]['ip']['calico']}} - 127.0.0.1 - localhost - 10.96.232.136 diff --git a/spyglass/parser/engine.py b/spyglass/parser/engine.py index 467d2a9..03e63b8 100755 --- a/spyglass/parser/engine.py +++ b/spyglass/parser/engine.py @@ -15,6 +15,7 @@ import copy import json import logging +import os import pprint import sys @@ -62,15 +63,10 @@ class ProcessDataSource(object): LOG.info("Extracting network subnets") network_subnets = {} - for net_type in self.data["network"]["vlan_network_data"]: + for net_type in self.data.network.vlan_network_data: # One of the type is ingress and we don't want that here - if net_type != "ingress": - network_subnets[net_type] = \ - IPNetwork(self.data["network"] - ["vlan_network_data"] - [net_type] - ["subnet"] - [0]) + if net_type.name != "ingress": + network_subnets[net_type.name] = IPNetwork(net_type.subnet[0]) LOG.debug( "Network subnets:\n{}".format(pprint.pformat(network_subnets))) @@ -78,17 +74,16 @@ class ProcessDataSource(object): def _get_genesis_node_details(self): # Get genesis host node details from the hosts based on host type - for racks in self.data["baremetal"].keys(): - rack_hosts = self.data["baremetal"][racks] - for host in rack_hosts: - if rack_hosts[host]["type"] == "genesis": - self.genesis_node = rack_hosts[host] - self.genesis_node["name"] = host + for rack in self.data.baremetal: + for host in rack.hosts: + if host.type == "genesis": + self.genesis_node = host LOG.debug( "Genesis Node Details:\n{}".format( pprint.pformat(self.genesis_node))) - def _validate_intermediary_data(self, data): + @staticmethod + def _validate_intermediary_data(data): """Validates the intermediary data before generating manifests. It checks whether the data types and data format are as expected. @@ -101,7 +96,7 @@ class ProcessDataSource(object): temp_data = copy.deepcopy(data) # Converting baremetal dict to list. baremetal_list = [] - for rack in temp_data["baremetal"].keys(): + for rack in temp_data.baremetal: temp = [{k: v} for k, v in temp_data["baremetal"][rack].items()] baremetal_list = baremetal_list + temp @@ -175,23 +170,22 @@ class ProcessDataSource(object): """ is_genesis = False - hardware_profile = rule_data[self.data["site_info"]["sitetype"]] + hardware_profile = rule_data[self.data.site_info.sitetype] # Getting individual racks. The racks are sorted to ensure that the # first controller of the first rack is assigned as 'genesis' node. - for rack in sorted(self.data["baremetal"].keys()): + for rack in sorted(self.data.baremetal, key=lambda x: x.name): # Getting individual hosts in each rack. Sorting of the hosts are # done to determine the genesis node. - for host in sorted(self.data["baremetal"][rack].keys()): - host_info = self.data["baremetal"][rack][host] - if host_info["host_profile"] \ - == hardware_profile["profile_name"]["ctrl"]: + for host in sorted(rack.hosts, key=lambda x: x.name): + if host.host_profile == \ + hardware_profile["profile_name"]["ctrl"]: if not is_genesis: - host_info["type"] = "genesis" + host.type = "genesis" is_genesis = True else: - host_info["type"] = "controller" + host.type = "controller" else: - host_info["type"] = "compute" + host.type = "compute" def _apply_rule_ip_alloc_offset(self, rule_data): """Apply offset rules to update baremetal host @@ -218,18 +212,13 @@ class ProcessDataSource(object): host_idx = 0 LOG.info("Update baremetal host ip's") - for racks in self.data["baremetal"].keys(): - rack_hosts = self.data["baremetal"][racks] - for host in rack_hosts: - host_networks = rack_hosts[host]["ip"] - for net in host_networks: - ips = list(self.network_subnets[net]) - host_networks[net] = str(ips[host_idx + default_ip_offset]) - host_idx = host_idx + 1 - - LOG.debug( - "Updated baremetal host:\n{}".format( - pprint.pformat(self.data["baremetal"]))) + for rack in self.data.baremetal: + for host in rack.hosts: + for net_type, net_ip in iter(host.ip): + ips = list(self.network_subnets[net_type]) + host.ip.set_ip_by_role( + net_type, str(ips[host_idx + default_ip_offset])) + host_idx += 1 def _update_vlan_net_data(self, rule_data): """Offset allocation rules to determine ip address range(s) @@ -252,22 +241,22 @@ class ProcessDataSource(object): # Set ingress vip and CIDR for bgp LOG.info("Apply network design rules:bgp") - vlan_network_data_ = self.data["network"]["vlan_network_data"] - subnet = IPNetwork(vlan_network_data_["ingress"]["subnet"][0]) + ingress_data = self.data.network.get_vlan_data_by_name('ingress') + subnet = IPNetwork(ingress_data.subnet[0]) ips = list(subnet) - self.data["network"]["bgp"]["ingress_vip"] = \ + self.data.network.bgp["ingress_vip"] = \ str(ips[ingress_vip_offset]) - self.data["network"]["bgp"]["public_service_cidr"] = \ - (vlan_network_data_["ingress"] - ["subnet"] - [0]) + self.data.network.bgp["public_service_cidr"] = \ + ingress_data.subnet[0] LOG.debug( "Updated network bgp data:\n{}".format( - pprint.pformat(self.data["network"]["bgp"]))) + pprint.pformat(self.data.network.bgp))) LOG.info("Apply network design rules:vlan") # Apply rules to vlan networks - for net_type in self.network_subnets: + for net_type in self.network_subnets.keys(): + vlan_network_data_ = \ + self.data.network.get_vlan_data_by_name(net_type) if net_type == "oob": ip_offset = oob_ip_offset else: @@ -276,11 +265,10 @@ class ProcessDataSource(object): subnet = self.network_subnets[net_type] ips = list(subnet) - vlan_network_data_[net_type]["gateway"] = \ - str(ips[gateway_ip_offset]) + vlan_network_data_.gateway = str(ips[gateway_ip_offset]) - vlan_network_data_[net_type]["reserved_start"] = str(ips[1]) - vlan_network_data_[net_type]["reserved_end"] = str(ips[ip_offset]) + vlan_network_data_.reserved_start = str(ips[1]) + vlan_network_data_.reserved_end = str(ips[ip_offset]) static_start = str(ips[ip_offset + 1]) static_end = str(ips[static_ip_end_offset]) @@ -291,27 +279,21 @@ class ProcessDataSource(object): dhcp_start = str(ips[mid]) dhcp_end = str(ips[dhcp_ip_end_offset]) - vlan_network_data_[net_type]["dhcp_start"] = dhcp_start - vlan_network_data_[net_type]["dhcp_end"] = dhcp_end + vlan_network_data_.dhcp_start = dhcp_start + vlan_network_data_.dhcp_end = dhcp_end - vlan_network_data_[net_type]["static_start"] = static_start - vlan_network_data_[net_type]["static_end"] = static_end - - # There is no vlan for oob network - if net_type != "oob": - vlan_network_data_[net_type]["vlan"] = \ - vlan_network_data_[net_type]["vlan"] + vlan_network_data_.static_start = static_start + vlan_network_data_.static_end = static_end # OAM have default routes. Only for cruiser. TBD if net_type == "oam": - routes = ["0.0.0.0/0"] # nosec + vlan_network_data_.routes = ["0.0.0.0/0"] # nosec else: - routes = [] - vlan_network_data_[net_type]["routes"] = routes + vlan_network_data_.routes = [] LOG.debug( "Updated vlan network data:\n{}".format( - pprint.pformat(vlan_network_data_))) + pprint.pformat(vlan_network_data_.dict_from_class()))) def load_extracted_data_from_data_source(self, extracted_data): """Function called from cli.py to pass extracted data @@ -319,18 +301,12 @@ class ProcessDataSource(object): from input data source """ - # TBR(pg710r): for internal testing - - """ - raw_data = self._read_file('extracted_data.yaml') - extracted_data = yaml.safe_load(raw_data) - """ - LOG.info("Loading plugin data source") self.data = extracted_data LOG.debug( "Extracted data from plugin:\n{}".format( pprint.pformat(extracted_data))) + # Uncomment following segment for debugging purpose. # extracted_file = "extracted_file.yaml" # yaml_file = yaml.dump(extracted_data, default_flow_style=False) @@ -338,22 +314,20 @@ class ProcessDataSource(object): # f.write(yaml_file) # f.close() - # Append region_data supplied from CLI to self.data - self.data["region_name"] = self.region_name - def dump_intermediary_file(self, intermediary_dir): """Writing intermediary yaml""" LOG.info("Writing intermediary yaml") intermediary_file = "{}_intermediary.yaml" \ - .format(self.data["region_name"]) + .format(self.region_name) # Check of if output dir = intermediary_dir exists if intermediary_dir is not None: - outfile = "{}/{}".format(intermediary_dir, intermediary_file) + outfile = os.path.join(intermediary_dir, intermediary_file) else: outfile = intermediary_file LOG.info("Intermediary file:{}".format(outfile)) - yaml_file = yaml.dump(self.data, default_flow_style=False) + yaml_file = yaml.dump( + self.data.dict_from_class(), default_flow_style=False) with open(outfile, "w") as f: f.write(yaml_file) f.close() @@ -365,5 +339,5 @@ class ProcessDataSource(object): self._apply_design_rules() self._get_genesis_node_details() # This will validate the extracted data from different sources. - self._validate_intermediary_data(self.data) + # self._validate_intermediary_data(self.data) return self.data diff --git a/spyglass/utils/__init__.py b/spyglass/utils/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/spyglass/utils/utils.py b/spyglass/utils/utils.py deleted file mode 100755 index fbbeded..0000000 --- a/spyglass/utils/utils.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright 2018 AT&T Intellectual Property. All other rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# Merge two dictionaries -def dict_merge(dict_a, dict_b, path=None): - """Recursively Merge dictionary dictB into dictA - - DictA represents the data extracted by a plugin and DictB - represents the additional site config dictionary that is passed - to CLI. The merge process compares the dictionary keys and if they - are same and the values they point to are different , then - dictB object's value is copied to dictA. If a key is unique - to dictB, then it is copied to dictA. - """ - if path is None: - path = [] - - for key in dict_b: - if key in dict_a: - if isinstance(dict_a[key], dict) and isinstance(dict_b[key], dict): - dict_merge(dict_a[key], dict_b[key], path + [str(key)]) - elif dict_a[key] == dict_b[key]: - pass # values are same, so no processing here - else: - dict_a[key] = dict_b[key] - else: - dict_a[key] = dict_b[key] - return dict_a diff --git a/tests/unit/data_extractor/test_models.py b/tests/unit/data_extractor/test_models.py index 71a380b..656d4f3 100644 --- a/tests/unit/data_extractor/test_models.py +++ b/tests/unit/data_extractor/test_models.py @@ -178,13 +178,7 @@ class TestIPList(unittest.TestCase): IPList should automatically fill in any missing entries with the value set by models.DATA_DEFAULT. """ - expected_message = \ - '%s is not a valid IP address.' % models.DATA_DEFAULT - with self.assertLogs(level='WARNING') as test_log: - result = models.IPList(**self.MISSING_IP) - self.assertEqual(len(test_log.output), 1) - self.assertEqual(len(test_log.records), 1) - self.assertIn(expected_message, test_log.output[0]) + result = models.IPList(**self.MISSING_IP) self.assertEqual(self.MISSING_IP['oob'], result.oob) self.assertEqual(models.DATA_DEFAULT, result.oam) self.assertEqual(self.MISSING_IP['calico'], result.calico) @@ -548,6 +542,8 @@ class TestNetwork(unittest.TestCase): def test_dict_from_class(self): """Tests production of a dictionary from a Network object""" + oob = copy(self.VLAN_DATA) + oob.pop('vlan') expected_result = { 'bgp': self.BGP_DATA, 'vlan_network_data': { @@ -555,7 +551,7 @@ class TestNetwork(unittest.TestCase): **self.VLAN_DATA }, 'oob': { - **self.VLAN_DATA + **oob }, 'pxe': { **self.VLAN_DATA @@ -597,12 +593,12 @@ class TestNetwork(unittest.TestCase): result = models.Network(self.vlan_network_data, bgp=self.BGP_DATA) self.assertEqual( self.VLAN_DATA['vlan'], - result.get_vlan_data_by_name('oob').vlan) - new_vlan_data = {'vlan_network_data': {'oob': {'vlan': '12'}}} + result.get_vlan_data_by_name('oam').vlan) + new_vlan_data = {'vlan_network_data': {'oam': {'vlan': '12'}}} result.merge_additional_data(new_vlan_data) self.assertEqual( - new_vlan_data['vlan_network_data']['oob']['vlan'], - result.get_vlan_data_by_name('oob').vlan) + new_vlan_data['vlan_network_data']['oam']['vlan'], + result.get_vlan_data_by_name('oam').vlan) def test_get_vlan_data_by_name(self): """Tests retrieval of VLANNetworkData by name attribute""" @@ -686,8 +682,10 @@ class TestSiteInfo(unittest.TestCase): def test_dict_from_class(self): """Tests production of a dictionary from a SiteInfo object""" expected_results = copy(self.SITE_INFO) - expected_results['dns'] = ','.join(expected_results['dns']) - expected_results['ntp'] = ','.join(expected_results['ntp']) + expected_results['dns'] = \ + {'servers': ','.join(expected_results['dns'])} + expected_results['ntp'] = \ + {'servers': ','.join(expected_results['ntp'])} expected_results['name'] = self.SITE_NAME result = models.SiteInfo(self.SITE_NAME, **self.SITE_INFO) self.assertDictEqual(expected_results, result.dict_from_class()) @@ -759,6 +757,8 @@ class TestSiteDocumentData(unittest.TestCase): site_info = SiteInfo() site_info.dict_from_class.return_value = mock_site_info_data + type(SiteInfo()).region_name = mock.PropertyMock( + return_value='region_name') network = Network() network.dict_from_class.return_value = mock_network_data baremetal = [Rack(), Rack()] @@ -771,6 +771,7 @@ class TestSiteDocumentData(unittest.TestCase): **mock_baremetal1_data }, 'network': mock_network_data, + 'region_name': 'region_name', 'site_info': mock_site_info_data, 'storage': self.STORAGE_DICT }