Merge pull request #32 from sh8121att/yaml_example_update

Yaml example update
This commit is contained in:
Alan Meadows 2017-05-24 19:54:05 -04:00 committed by GitHub
commit ad05a0976b
64 changed files with 4915 additions and 2080 deletions

View File

@ -1,58 +0,0 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#############################################################################
#
# bootstrap_hwdefinition.yaml - Definitions of server hardware layout
#
#############################################################################
# version the schema in this file so consumers can rationally parse it
---
apiVersion: 'v1.0'
kind: HardwareProfile
metadata:
name: HPGen8v3
region: sitename
date: 17-FEB-2017
description: Sample hardware definition
author: Scott Hussey
spec:
# Vendor of the server chassis
vendor: HP
# Generation of the chassis model
generation: '8'
# Version of the chassis model within its generation - not version of the hardware definition
hw_version: '3'
# The certified version of the chassis BIOS
bios_version: '2.2.3'
# Mode of the default boot of hardware - bios, uefi
boot_mode: bios
# Protocol of boot of the hardware - pxe, usb, hdd
bootstrap_protocol: pxe
# Which interface to use for network booting within the OOB manager, not OS device
pxe_interface: 0
# Map hardware addresses to aliases/roles to allow a mix of hardware configs
# in a site to result in a consistent configuration
device_aliases:
pci:
- address: pci@0000:00:03.0
alias: prim_nic01
# type could identify expected hardware - used for hardware manifest validation
type: '82540EM Gigabit Ethernet Controller'
- address: pci@0000:00:04.0
alias: prim_nic02
type: '82540EM Gigabit Ethernet Controller'
scsi:
- address: scsi@2:0.0.0
alias: primary_boot
type: 'VBOX HARDDISK'

View File

@ -1,420 +0,0 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################
#
# bootstrap_seed.yaml - Site server design definition for physical layer
#
####################
# version the schema in this file so consumers can rationally parse it
---
apiVersion: 'v1.0'
kind: Region
metadata:
name: sitename
date: 17-FEB-2017
description: Sample site design
author: sh8121@att.com
spec:
# Not sure if we have site wide data that doesn't fall into another 'Kind'
---
apiVersion: 'v1.0'
kind: NetworkLink
metadata:
name: oob
region: sitename
date: 17-FEB-2017
author: sh8121@att.com
description: Describe layer 1 attributes. Primary key is 'name'. These settings will generally be things the switch and server have to agree on
spec:
bonding:
mode: none
mtu: 1500
linkspeed: 100full
trunking:
mode: none
default_network: oob
---
# pxe is a bit of 'magic' indicating the link config used when PXE booting
# a node. All other links indicate network configs applied when the node
# is deployed.
apiVersion: 'v1.0'
kind: NetworkLink
metadata:
name: pxe
region: sitename
date: 17-FEB-2017
author: sh8121@att.com
description: Describe layer 1 attributes. Primary key is 'name'. These settings will generally be things the switch and server have to agree on
spec:
bonding:
mode: none
mtu: 1500
linkspeed: auto
# Is this link supporting multiple layer 2 networks?
# none is a port-based VLAN identified by default_network
# tagged is is using 802.1q VLAN tagging. Untagged packets will default to default_netwokr
trunking:
mode: none
# use name, will translate to VLAN ID
default_network: pxe
---
apiVersion: 'v1.0'
kind: NetworkLink
metadata:
name: gp
region: sitename
date: 17-FEB-2017
author: sh8121@att.com
description: Describe layer 1 attributes. These CIs will generally be things the switch and server have to agree on
# pxe is a bit of 'magic' indicating the link config used when PXE booting
# a node. All other links indicate network configs applied when the node
# is deployed.
spec:
# If this link is a bond of physical links, how is it configured
# 802.3ad
# active-backup
# balance-rr
# Can add support for others down the road
bonding:
mode: 802.3ad
# For LACP (802.3ad) xmit hashing policy: layer2, layer2+3, layer3+4, encap3+4
hash: layer3+4
# 802.3ad specific options
peer_rate: slow
mon_rate: default
up_delay: default
down_delay: default
mtu: 9000
linkspeed: auto
# Is this link supporting multiple layer 2 networks?
trunking:
mode: tagged
default_network: mgmt
---
apiVersion: 'v1.0'
kind: Network
metadata:
name: oob
region: sitename
date: 17-FEB-2017
author: sh8121@att.com
description: Describe layer 2/3 attributes. Primarily CIs used for configuring server interfaces
spec:
allocation: static
cidr: 172.16.100.0/24
ranges:
- type: static
start: 172.16.100.15
end: 172.16.100.254
dns:
domain: ilo.sitename.att.com
servers: 172.16.100.10
---
apiVersion: 'v1.0'
kind: Network
metadata:
name: pxe
region: sitename
date: 17-FEB-2017
author: sh8121@att.com
description: Describe layer 2/3 attributes. Primarily CIs used for configuring server interfaces
spec:
# Layer 2 VLAN segment id, could support other segmentations. Optional
vlan_id: '99'
# How are addresses assigned?
allocation: dhcp
# MTU for this VLAN interface, if not specified it will be inherited from the link
mtu: 1500
# Network address
cidr: 172.16.0.0/24
# Desribe IP address ranges
ranges:
- type: dhcp
start: 172.16.0.5
end: 172.16.0.254
# DNS settings for this network
dns:
# Domain addresses on this network will be registered under
domain: admin.sitename.att.com
# DNS servers that a server using this network as its default gateway should use
servers: 172.16.0.10
---
apiVersion: 'v1.0'
kind: Network
metadata:
name: mgmt
region: sitename
date: 17-FEB-2017
author: sh8121@att.com
description: Describe layer 2/3 attributes. Primarily CIs used for configuring server interfaces
spec:
vlan_id: '100'
# How are addresses assigned?
allocation: static
# Allow MTU to be inherited from link the network rides on
mtu: 1500
# Network address
cidr: 172.16.1.0/24
# Desribe IP address ranges
ranges:
- type: static
start: 172.16.1.15
end: 172.16.1.254
# Static routes to be added for this network
routes:
- subnet: 0.0.0.0/0
# A blank gateway would leave to a static route specifying
# only the interface as a source
gateway: 172.16.1.1
metric: 10
# DNS settings for this network
dns:
# Domain addresses on this network will be registered under
domain: mgmt.sitename.example.com
# DNS servers that a server using this network as its default gateway should use
servers: 172.16.1.9,172.16.1.10
---
apiVersion: 'v1.0'
kind: Network
metadata:
name: private
region: sitename
date: 17-FEB-2017
author: sh8121@att.com
description: Describe layer 2/3 attributes. Primarily CIs used for configuring server interfaces
spec:
vlan_id: '101'
allocation: static
mtu: 9000
cidr: 172.16.2.0/24
# Desribe IP address ranges
ranges:
# Type can be reserved (not used for baremetal), static (all explicit
# assignments should fall here), dhcp (will be used by a DHCP server on this network)
- type: static
start: 172.16.2.15
end: 172.16.2.254
dns:
domain: priv.sitename.example.com
servers: 172.16.2.9,172.16.2.10
---
apiVersion: 'v1.0'
kind: Network
metadata:
name: public
region: sitename
date: 17-FEB-2017
author: sh8121@att.com
description: Describe layer 2/3 attributes. Primarily CIs used for configuring server interfaces
spec:
vlan_id: '102'
# How are addresses assigned?
allocation: static
# MTU size for the VLAN interface
mtu: 1500
cidr: 172.16.3.0/24
# Desribe IP address ranges
ranges:
- type: static
start: 172.16.3.15
end: 172.16.3.254
routes:
- subnet: 0.0.0.0/0
gateway: 172.16.3.1
metric: 9
dns:
domain: sitename.example.com
servers: 8.8.8.8
---
apiVersion: 'v1.0'
kind: HostProfile
metadata:
name: default
region: sitename
date: 17-FEB-2017
author: sh8121@att.com
description: Describe layer 2/3 attributes. Primarily CIs used for configuring server interfaces
# No magic to this host_profile, it just provides a way to specify
# sitewide settings. If it is absent from a node's inheritance chain
# then these values will NOT be applied
spec:
# OOB (iLO, iDRAC, etc...) settings. Should prefer open standards such
# as IPMI over vender-specific when possible.
oob:
type: ipmi
# OOB networking should be preconfigured, but we can include a network
# definition for validation or enhancement (DNS registration)
network: oob
account: admin
credential: admin
# Specify storage layout of base OS. Ceph out of scope
storage:
# How storage should be carved up: lvm (logical volumes), flat
# (single partition)
layout: lvm
# Info specific to the boot and root disk/partitions
bootdisk:
# Device will specify an alias defined in hwdefinition.yaml
device: primary_boot
# For LVM, the size of the partition added to VG as a PV
# For flat, the size of the partition formatted as ext4
root_size: 50g
# The /boot partition. If not specified, /boot will in root
boot_size: 2g
# Info for additional partitions. Need to balance between
# flexibility and complexity
partitions:
- name: logs
device: primary_boot
# Partition uuid if needed
part_uuid: 84db9664-f45e-11e6-823d-080027ef795a
size: 10g
# Optional, can carve up unformatted block devices
mountpoint: /var/log
fstype: ext4
mount_options: defaults
# Filesystem UUID or label can be specified. UUID recommended
fs_uuid: cdb74f1c-9e50-4e51-be1d-068b0e9ff69e
fs_label: logs
# Platform (Operating System) settings
platform:
image: ubuntu_16.04_hwe
kernel_params: default
# Additional metadata to apply to a node
metadata:
# Base URL of the introspection service - may go in curtin data
introspection_url: http://172.16.1.10:9090
---
apiVersion: 'v1.0'
kind: HostProfile
metadata:
name: k8-node
region: sitename
date: 17-FEB-2017
author: sh8121@att.com
description: Describe layer 2/3 attributes. Primarily CIs used for configuring server interfaces
spec:
# host_profile inheritance allows for deduplication of common CIs
# Inheritance is additive for CIs that are lists of multiple items
# To remove an inherited list member, prefix the primary key value
# with '!'.
host_profile: defaults
# Hardware profile will map hardware specific details to the abstract
# names uses in the host profile as well as specify hardware specific
# configs. A viable model should be to build a host profile without a
# hardware_profile and then for each node inherit the host profile and
# specify a hardware_profile to map that node's hardware to the abstract
# settings of the host_profile
hardware_profile: HPGen9v3
# Network interfaces.
interfaces:
# Keyed on device_name
# pxe is a special marker indicating which device should be used for pxe boot
- device_name: pxe
# The network link attached to this
network_link: pxe
# Slaves will specify aliases from hwdefinition.yaml
slaves:
- prim_nic01
# Which networks will be configured on this interface
networks:
- name: pxe
- device_name: bond0
network_link: gp
# If multiple slaves are specified, but no bonding config
# is applied to the link, design validation will fail
slaves:
- prim_nic01
- prim_nic02
# If multiple networks are specified, but no trunking
# config is applied to the link, design validation will fail
networks:
- name: mgmt
- name: private
metadata:
# Explicit tag assignment
tags:
- 'test'
# MaaS supports key/value pairs. Not sure of the use yet
owner_data:
foo: bar
---
apiVersion: 'v1.0'
kind: HostProfile
metadata:
name: k8-node-public
region: sitename
date: 17-FEB-2017
author: sh8121@att.com
description: Describe layer 2/3 attributes. Primarily CIs used for configuring server interfaces
spec:
host_profile: k8-node
interfaces:
- device_name: bond0
networks:
# This is additive, so adds a network to those defined in the host_profile
# inheritance chain
- name: public
---
apiVersion: 'v1.0'
kind: BaremetalNode
metadata:
name: controller01
region: sitename
date: 17-FEB-2017
author: sh8121@att.com
description: Describe layer 2/3 attributes. Primarily CIs used for configuring server interfaces
spec:
host_profile: k8-node-public
# the hostname for a server, could be used in multiple DNS domains to
# represent different interfaces
interfaces:
- device_name: bond0
networks:
# '!' prefix for the value of the primary key indicates a record should be removed
- name: '!private'
# Addresses assigned to network interfaces
addressing:
# Which network the address applies to. If a network appears in addressing
# that isn't assigned to an interface, design validation will fail
- network: pxe
# The address assigned. Either a explicit IPv4 or IPv6 address
# or dhcp or slaac
address: dhcp
- network: mgmt
address: 172.16.1.20
- network: public
address: 172.16.3.20
metadata:
tags:
- os_ctl
rack: rack01
---
apiVersion: 'v1.0'
kind: BaremetalNode
metadata:
name: compute01
region: sitename
date: 17-FEB-2017
author: sh8121@att.com
description: Describe layer 2/3 attributes. Primarily CIs used for configuring server interfaces
spec:
host_profile: k8-node
addressing:
- network: pxe
address: dhcp
- network: mgmt
address: 172.16.1.21
- network: private
address: 172.16.2.21

View File

@ -0,0 +1,331 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Site/Region wide definitions. Each design part will be a constituent
# of the design for exactly one Region
apiVersion: 'v1.0'
kind: Region
metadata:
name: sitename
date: 17-FEB-2017
description: Sample site design
author: sh8121@att.com
spec:
# List of query-based definitions for applying tags to deployed nodes
tag_definitions:
- tag: 'high_memory'
# Tag to apply to nodes that qualify for the query
definition_type: 'lshw_xpath'
# Only support on type for now - 'lshw_xpath' used by MaaS
definition: //node[@id="memory"]/'size units="bytes"' > 137438953472
# an xpath query that is run against the output of 'lshw -xml' from the node
# Image and package repositories needed by Drydock drivers. Needs to be defined
repositories:
- name: 'ubuntu-main'
---
apiVersion: 'v1.0'
kind: NetworkLink
metadata:
name: oob
region: sitename
date: 17-FEB-2017
author: sh8121@att.com
description: Describe layer 1 attributes. Primary key is 'name'. These settings will generally be things the switch and server have to agree on
spec:
bonding:
# Mode can be 'disabled', '802.3ad', 'balanced-rr', 'active-backup'. Defaults to disabled
mode: '802.3ad'
# The below apply to 802.3ad (LACP
# Link selection hash. Supports 'layer3+4', 'layer2', 'layer2+3'. Defaults to 'layer3+4'
hash: 'layer3+4'
# LACP peering rate. Supports 'slow', 'fast'. Defaults to 'fast'
peer_rate: 'fast'
# LACP link monitor rate in milliseconds. Defaults to 100ms
mon_rate: 100
# LACP delay for marking link up in milliseconds. Must be greater than mon_rate. Defaults to 200ms
up_delay: 200
# LACP dleay for marking link down in milliseconds. Must be greater than mon_rate. Defaults to 200ms
down_delay: 200
# Physical link default MTU size. No default
mtu: 1500
# Physical link speed. Supports 'auto', '100full'. Gigabit+ speeds require auto. No default
linkspeed: 'auto'
# Settings for using a link for multiple L2 networks
trunking:
# Trunking mode. Supports 'disabled', '802.1q'. Defaults to disabled
mode: disabled
# If disabled, what network is this port on. If '802.1q' what is the default network for the port. No default.
default_network: oob
---
apiVersion: 'v1.0'
kind: Network
metadata:
name: oob
region: sitename
date: 17-FEB-2017
author: sh8121@att.com
description: Describe layer 2 and 3 attributes. Primary key is 'name'.
spec:
# CIDR representation of network number and netmask
cidr: '172.16.1.0/24'
# How addresses are allocated on the network. Supports 'static', 'dhcp'. Defaults to 'static'
allocation: 'static'
# VLAN of this network. Defaults to None
vlan: 100
# MTU of this network. Defaults to the MTU specified for the NetworkLink used for this network
dns:
# Domain name used to register addresses assigned from this network. Defaults to 'local'
domain: 'aic.att.com'
# Comma-separated list of DNS server IP addresses. These will be configured on the node if
# this network is identified as the node's primary network
servers: '8.8.8.8, 4.4.4.4'
# Defined IP address ranges. All node IP address assignments must fall into a defined range
# of the correct type
ranges:
# Type of range. Supports 'static' or 'dhcp'. No default
- type: 'dhcp'
# Start of the address range, inclusive. No default
start: '172.16.1.100'
# End of the address range, inclusive. No default
end: '172.16.1.254'
# Routes defined for this network, including the default route (i.e. default gateway)
routes:
# The network being routed to in CIDR notation. Default gateway is 0.0.0.0/0.
- subnet: '0.0.0.0/0'
# Next hop for traffic using this route
gateway: '172.16.1.3'
# Selection metric for the host selecting this route. No default
metric: 10
---
apiVersion: 'v1.0'
kind: HardwareProfile
metadata:
name: DellR720v2
region: sitename
date: 17-FEB-2017
author: sh8121@att.com
description: Describe server hardware attributes. Not a specific server, but profile adopted by a server defintion.
spec:
# Chassis vendor
vendor: 'Dell'
# Chassis model generation
generation: '1'
# Chassis model version
hw_version: '2'
# Certified BIOS version for this chassis
bios_version: '2.2.3'
# Boot mode. Supports 'bios' or 'uefi'
boot_mode: 'bios'
# How the node should be initially bootstrapped. Supports 'pxe'
bootstrap_protocol: 'pxe'
# What network interface to use for PXE booting
# for chassis that support selection
pxe_interface: '0'
# Mapping of hardware alias/role to physical address
device_aliases:
# the device alias that will be referenced in HostProfile or BaremetalNode design parts
- alias: 'pnic01'
# The hardware bus the device resides on. Supports 'pci' and 'scsi'. No default
bus_type: 'pci'
# The type of device as reported by lshw. Can be used to validate hardware manifest. No default
dev_type: 'Intel 10Gbps NIC'
# Physical address on the bus
address: '0000:00:03.0'
---
apiVersion: 'v1.0'
kind: HostProfile
metadata:
name: lcp_node
region: sitename
date: 17-FEB-2017
author: sh8121@att.com
description: Describe server configuration attributes. Not a specific server, but profile adopted by a server definition
spec:
# The HostProfile this profile adopts initial state from. No default.
# See helm_drydock/objects/readme.md for information on how HostProfile and BaremetalNode inheritance works
host_profile: 'defaults'
# The HardwareProfile describing the node hardware. No default.
hardware_profile: 'DellR720v1'
# OOB access to node
oob:
# Type of OOB access. Supports 'ipmi'
type: 'ipmi'
# Which network - as defined in a Network design part - to access the OOB interface on
network: 'oob'
# Account name for authenticating on the OOB interface
account: 'admin'
# Credential for authentication on the OOB interface. The OOB driver will interpret this.
credential: 'admin'
# How local node storage is configured
storage:
# How storage is laid out. Supports 'lvm' and 'flat'. Defaults to 'lvm'
layout: 'lvm'
# Configuration for the boot disk
bootdisk:
# Hardware disk (or hardware RAID device) used for booting. Can refer to a
# HardwareProfile device alias or a explicit device name
device: 'bootdisk'
# Size of the root volume. Can be specified by percentage or explicit size in
# megabytes or gigabytes. Defaults to 100% of boot device.
root_size: '100g'
# If a separate boot volume is needed, specify size. Defaults to 0 where /boot goes on root.
boot_size: '0'
# Non-boot volumes that should be carved out of local storage
partitions:
# Name of the volume. Doesn't translate to any operating system config
name: 'logs'
# Hardware device the volume should go on
device: 'bootdisk'
# Partition UUID. Defaults to None. A value of 'generate' means Drydock will generate a UUID
part_uuid:
# Size of the volume in megabytes or gigabytes
size: '10g'
# Filesystem mountpoint if volume should be a filesystem
mountpoint: '/var/logs'
# The below are ignored if mountpoint is None
# Format of filesystem. Defaults to ext4
fstype: 'ext4'
# Mount options of the file system as used in /etc/fstab. Defaults to 'defaults'
mount_options: 'defaults'
# Filesystem UUID. Defaults to None. A value of 'generate' means Drydock will generate a UUID
fs_uuid:
# A filesystem label. Defaults to None
fs_label:
# Physical and logical network interfaces
interfaces:
# What the interface should be named in the operating system. May not match a hardware device name
device_name: bond0
# The NetworkLink connected to this interface. Must be the name of a NetworkLink design part
device_link: 'gp'
# Whether this interface is considered the primary interface on the server. Supports true and false. Defaults to false
primary: true
# Hardware devices that support this interface. For configurating a physical device, this would be a list of one
# For bonds, this would be a list of all the physical devices in the bond. These can refer to HardwareProfile device aliases
# or explicit device names
slaves:
- 'pnic01'
- 'pnic02'
# Network that will be accessed on this interface. These should each be to the name of a Network design part
# Multiple networks listed here assume that this interface is attached to a NetworkLink supporting trunking
networks:
- 'mgmt'
- 'admin'
# Metadata about the node
metadata:
# Explicit tags to propagate to Kubernetes. Simple strings of any value
tags:
- 'lcp_node'
# Key/value mapping that will propagate to the node for next-step bootstrapping
owner_data:
nic_access: 'sriov'
# The rack a node sits in. Simple string
rack: r1
---
apiVersion: 'v1.0'
kind: BaremetalNode
metadata:
name: lcp_controller01
region: sitename
date: 17-FEB-2017
author: sh8121@att.com
description: Specify a physical server.
spec:
# The HostProfile this server adopts initial state from. No default.
# See helm_drydock/objects/readme.md for information on how HostProfile and BaremetalNode inheritance works
host_profile: 'defaults'
# The HardwareProfile describing the node hardware. No default.
hardware_profile: 'DellR720v1'
# OOB access to node
oob:
# Type of OOB access. Supports 'ipmi'
type: 'ipmi'
# Which network - as defined in a Network design part - to access the OOB interface on
network: 'oob'
# Account name for authenticating on the OOB interface
account: 'admin'
# Credential for authentication on the OOB interface. The OOB driver will interpret this.
credential: 'admin'
# How local node storage is configured
storage:
# How storage is laid out. Supports 'lvm' and 'flat'. Defaults to 'lvm'
layout: 'lvm'
# Configuration for the boot disk
bootdisk:
# Hardware disk (or hardware RAID device) used for booting. Can refer to a
# HardwareProfile device alias or a explicit device name
device: 'bootdisk'
# Size of the root volume. Can be specified by percentage or explicit size in
# megabytes or gigabytes. Defaults to 100% of boot device.
root_size: '100g'
# If a separate boot volume is needed, specify size. Defaults to 0 where /boot goes on root.
boot_size: '0'
# Non-boot volumes that should be carved out of local storage
partitions:
# Name of the volume. Doesn't translate to any operating system config
name: 'logs'
# Hardware device the volume should go on
device: 'bootdisk'
# Partition UUID. Defaults to None. A value of 'generate' means Drydock will generate a UUID
part_uuid:
# Size of the volume in megabytes or gigabytes
size: '10g'
# Filesystem mountpoint if volume should be a filesystem
mountpoint: '/var/logs'
# The below are ignored if mountpoint is None
# Format of filesystem. Defaults to ext4
fstype: 'ext4'
# Mount options of the file system as used in /etc/fstab. Defaults to 'defaults'
mount_options: 'defaults'
# Filesystem UUID. Defaults to None. A value of 'generate' means Drydock will generate a UUID
fs_uuid:
# A filesystem label. Defaults to None
fs_label:
# Physical and logical network interfaces
interfaces:
# What the interface should be named in the operating system. May not match a hardware device name
- device_name: bond0
# The NetworkLink connected to this interface. Must be the name of a NetworkLink design part
device_link: 'gp'
# Whether this interface is considered the primary interface on the server. Supports true and false. Defaults to false
primary: true
# Hardware devices that support this interface. For configurating a physical device, this would be a list of one
# For bonds, this would be a list of all the physical devices in the bond. These can refer to HardwareProfile device aliases
# or explicit device names
slaves:
- 'pnic01'
- 'pnic02'
# Network that will be accessed on this interface. These should each be to the name of a Network design part
# Multiple networks listed here assume that this interface is attached to a NetworkLink supporting trunking
networks:
- 'mgmt'
- 'admin'
# Metadata about the node
metadata:
# Explicit tags to propagate to Kubernetes. Simple strings of any value
tags:
- 'lcp_node'
# Key/value mapping that will propagate to the node for next-step bootstrapping
owner_data:
nic_access: 'sriov'
# The rack a node sits in. Simple string
rack: r1
# How each attached network is accessed by this node
addressing:
# The name of a defined Network design part also listed in the 'networks' section of a interface definition
- network: 'pxe'
# Address should be an explicit IP address assignment or 'dhcp'
address: 'dhcp'
- network: 'mgmt'
address: '172.16.1.83'
---

7
examples/readme.md Normal file
View File

@ -0,0 +1,7 @@
# File Definition Examples
## designparts_v1.0.yaml
This is a reference file for the YAML schema supported by the Drydock YAML
ingester. Each design part currently supported is listed with all supported
attributes and comments on attribute use and restrictions.

View File

@ -21,21 +21,13 @@
class DrydockConfig(object):
def __init__(self):
self.server_driver_config = {
selected_driver = helm_drydock.drivers.server.maasdriver,
params = {
maas_api_key = ""
maas_api_url = ""
}
}
self.selected_network_driver = helm_drydock.drivers.network.noopdriver
self.control_config = {}
self.ingester_config = {
plugins = [helm_drydock.ingester.plugins.aicyaml.AicYamlIngester]
}
self.introspection_config = {}
self.orchestrator_config = {}
self.statemgmt_config = {
backend_driver = helm_drydock.drivers.statemgmt.etcd,
}
node_driver = {
'maasdriver': {
'api_key': 'KTMHgA42cNSMnfmJ82:cdg4yQUhp542aHsCTV:7Dc2KB9hQpWq3LfQAAAKAj6wdg22yWxZ',
'api_url': 'http://localhost:5240/MAAS/api/2.0/'
},
}
ingester_config = {
'plugins': ['helm_drydock.ingester.plugins.yaml']
}

View File

@ -3,4 +3,12 @@
This is the external facing API service to control the rest
of Drydock and query Drydock-managed data.
Anticipate basing this service on the falcon Python library
Anticipate basing this service on the falcon Python library
## Endpoints ##
### /tasks ###
POST - Create a new orchestration task and submit it for execution
GET - Get status of a task
DELETE - Cancel execution of a task if permitted

View File

@ -11,11 +11,92 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from threading import Thread, Lock
import uuid
import time
import helm_drydock.objects.fields as hd_fields
import helm_drydock.statemgmt as statemgmt
import helm_drydock.objects.task as tasks
import helm_drydock.error as errors
# This is the interface for the orchestrator to access a driver
# TODO Need to have each driver spin up a seperate thread to manage
# driver tasks and feed them via queue
class ProviderDriver(object):
__init__(self):
pass
def __init__(self, orchestrator=None, state_manager=None, **kwargs):
if orchestrator is None:
raise ValueError("ProviderDriver requires valid orchestrator")
self.orchestrator = orchestrator
if state_manager is None:
raise ValueError("ProviderDriver requires valid state manager")
self.state_manager = state_manager
# These are the actions that this driver supports
self.supported_actions = [hd_fields.OrchestratorAction.Noop]
self.driver_name = "generic"
self.driver_key = "generic"
self.driver_desc = "Generic Provider Driver"
def execute_task(self, task_id):
task = self.state_manager.get_task(task_id)
task_action = task.action
if task_action in self.supported_actions:
task_runner = DriverTaskRunner(task_id, self.state_manager,
self.orchestrator)
task_runner.start()
while task_runner.is_alive():
time.sleep(1)
return
else:
raise errors.DriverError("Unsupported action %s for driver %s" %
(task_action, self.driver_desc))
# Execute a single task in a separate thread
class DriverTaskRunner(Thread):
def __init__(self, task_id, state_manager=None, orchestrator=None):
super(DriverTaskRunner, self).__init__()
self.orchestrator = orchestrator
if isinstance(state_manager, statemgmt.DesignState):
self.state_manager = state_manager
else:
raise DriverError("Invalid state manager specified")
self.task = self.state_manager.get_task(task_id)
return
def run(self):
self.execute_task()
def execute_task(self):
if self.task.action == hd_fields.OrchestratorAction.Noop:
self.orchestrator.task_field_update(self.task.get_id(),
status=hd_fields.TaskStatus.Running)
i = 0
while i < 5:
self.task = self.state_manager.get_task(self.task.get_id())
i = i + 1
if self.task.terminate:
self.orchestrator.task_field_update(self.task.get_id(),
status=hd_fields.TaskStatus.Terminated)
return
else:
time.sleep(1)
self.orchestrator.task_field_update(self.task.get_id(),
status=hd_fields.TaskStatus.Complete)
return
class DriverTask(object):

View File

@ -13,16 +13,44 @@
# limitations under the License.
#
import helm_drydock.objects.fields as hd_fields
import helm_drydock.error as errors
from helm_drydock.drivers import ProviderDriver
class NodeDriver(ProviderDriver):
class NodeAction(Enum):
PrepareNode = 'prepare_node'
ApplyNetworkConfig = 'apply_network_config'
ApplyStorageConfig = 'apply_storage_config'
InterrogateNode = 'interrogate_node'
DeployNode = 'deploy_node'
def __init__(self, **kwargs):
super(NodeDriver, self).__init__(**kwargs)
self.supported_actions = [hd_fields.OrchestratorAction.ValidateNodeServices,
hd_fields.OrchestratorAction.CreateNetworkTemplate,
hd_fields.OrchestratorAction.CreateStorageTemplate,
hd_fields.OrchestratorAction.CreateBootMedia,
hd_fields.OrchestratorAction.PrepareHardwareConfig,
hd_fields.OrchestratorAction.ConfigureHardware,
hd_fields.OrchestratorAction.InterrogateNode,
hd_fields.OrchestratorAction.ApplyNodeNetworking,
hd_fields.OrchestratorAction.ApplyNodeStorage,
hd_fields.OrchestratorAction.ApplyNodePlatform,
hd_fields.OrchestratorAction.DeployNode,
hd_fields.OrchestratorAction.DestroyNode]
self.driver_name = "node_generic"
self.driver_key = "node_generic"
self.driver_desc = "Generic Node Driver"
def execute_task(self, task_id):
task = self.state_manager.get_task(task_id)
task_action = task.action
if task_action in self.supported_actions:
return
else:
raise DriverError("Unsupported action %s for driver %s" %
(task_action, self.driver_desc))

View File

@ -11,10 +11,3 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from helm_drydock.drivers.node import NodeDriver
class MaasNodeDriver(NodeDriver):
def __init__(self, kwargs):
super(MaasNodeDriver, self).__init__(**kwargs)

View File

@ -0,0 +1,147 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oauthlib import oauth1
import requests
import requests.auth as req_auth
import base64
class MaasOauth(req_auth.AuthBase):
def __init__(self, apikey):
self.consumer_key, self.token_key, self.token_secret = apikey.split(':')
self.consumer_secret = ""
self.realm = "OAuth"
self.oauth_client = oauth1.Client(self.consumer_key, self.consumer_secret,
self.token_key, self.token_secret, signature_method=oauth1.SIGNATURE_PLAINTEXT,
realm=self.realm)
def __call__(self, req):
headers = req.headers
url = req.url
method = req.method
body = None if req.body is None or len(req.body) == 0 else req.body
new_url, signed_headers, new_body = self.oauth_client.sign(url, method, body, headers)
req.headers['Authorization'] = signed_headers['Authorization']
return req
class MaasRequestFactory(object):
def __init__(self, base_url, apikey):
self.base_url = base_url
self.apikey = apikey
self.signer = MaasOauth(apikey)
self.http_session = requests.Session()
def get(self, endpoint, **kwargs):
return self._send_request('GET', endpoint, **kwargs)
def post(self, endpoint, **kwargs):
return self._send_request('POST', endpoint, **kwargs)
def delete(self, endpoint, **kwargs):
return self._send_request('DELETE', endpoint, **kwargs)
def put(self, endpoint, **kwargs):
return self._send_request('PUT', endpoint, **kwargs)
def test_connectivity(self):
try:
resp = self.get('version/')
except requests.Timeout(ex):
raise errors.TransientDriverError("Timeout connection to MaaS")
if resp.status_code in [500, 503]:
raise errors.TransientDriverError("Received 50x error from MaaS")
if resp.status_code != 200:
raise errors.PersistentDriverError("Received unexpected error from MaaS")
return True
def test_authentication(self):
try:
resp = self.get('account/', op='list_authorisation_tokens')
except requests.Timeout(ex):
raise errors.TransientDriverError("Timeout connection to MaaS")
except:
raise errors.PersistentDriverError("Error accessing MaaS")
if resp.status_code in [401, 403] :
raise errors.PersistentDriverError("MaaS API Authentication Failed")
if resp.status_code in [500, 503]:
raise errors.TransientDriverError("Received 50x error from MaaS")
if resp.status_code != 200:
raise errors.PersistentDriverError("Received unexpected error from MaaS")
return True
def _send_request(self, method, endpoint, **kwargs):
# Delete auth mechanism if defined
kwargs.pop('auth', None)
headers = kwargs.pop('headers', {})
if 'Accept' not in headers.keys():
headers['Accept'] = 'application/json'
if 'files' in kwargs.keys():
files = kwargs.pop('files')
files_tuples = {}
for (k, v) in files.items():
if v is None:
continue
files_tuples[k] = (None, base64.b64encode(str(v).encode('utf-8')).decode('utf-8'), 'text/plain; charset="utf-8"', {'Content-Transfer-Encoding': 'base64'})
# elif isinstance(v, str):
# files_tuples[k] = (None, base64.b64encode(v.encode('utf-8')).decode('utf-8'), 'text/plain; charset="utf-8"', {'Content-Transfer-Encoding': 'base64'})
# elif isinstance(v, int) or isinstance(v, bool):
# if isinstance(v, bool):
# v = int(v)
# files_tuples[k] = (None, base64.b64encode(v.to_bytes(2, byteorder='big')), 'application/octet-stream', {'Content-Transfer-Encoding': 'base64'})
kwargs['files'] = files_tuples
params = kwargs.get('params', None)
if params is None and 'op' in kwargs.keys():
params = {'op': kwargs.pop('op')}
elif 'op' in kwargs.keys() and 'op' not in params.keys():
params['op'] = kwargs.pop('op')
elif 'op' in kwargs.keys():
kwargs.pop('op')
# TODO timeouts should be configurable
timeout = kwargs.pop('timeout', None)
if timeout is None:
timeout = (2, 30)
request = requests.Request(method=method, url=self.base_url + endpoint, auth=self.signer,
headers=headers, params=params, **kwargs)
prepared_req = self.http_session.prepare_request(request)
resp = self.http_session.send(prepared_req, timeout=timeout)
if resp.status_code >= 400:
print("FAILED API CALL:\nURL: %s %s\nBODY:\n%s\nRESPONSE: %s\nBODY:\n%s" %
(prepared_req.method, prepared_req.url, str(prepared_req.body).replace('\\r\\n','\n'),
resp.status_code, resp.text))
return resp

View File

@ -0,0 +1,306 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import helm_drydock.error as errors
import helm_drydock.config as config
import helm_drydock.drivers as drivers
import helm_drydock.objects.fields as hd_fields
import helm_drydock.objects.task as task_model
from helm_drydock.drivers.node import NodeDriver
from .api_client import MaasRequestFactory
import helm_drydock.drivers.node.maasdriver.models.fabric as maas_fabric
import helm_drydock.drivers.node.maasdriver.models.vlan as maas_vlan
import helm_drydock.drivers.node.maasdriver.models.subnet as maas_subnet
class MaasNodeDriver(NodeDriver):
def __init__(self, **kwargs):
super(MaasNodeDriver, self).__init__(**kwargs)
self.driver_name = "maasdriver"
self.driver_key = "maasdriver"
self.driver_desc = "MaaS Node Provisioning Driver"
self.config = config.DrydockConfig.node_driver[self.driver_key]
def execute_task(self, task_id):
task = self.state_manager.get_task(task_id)
if task is None:
raise errors.DriverError("Invalid task %s" % (task_id))
if task.action not in self.supported_actions:
raise errors.DriverError("Driver %s doesn't support task action %s"
% (self.driver_desc, task.action))
if task.action == hd_fields.OrchestratorAction.ValidateNodeServices:
self.orchestrator.task_field_update(task.get_id(),
status=hd_fields.TaskStatus.Running)
maas_client = MaasRequestFactory(self.config['api_url'], self.config['api_key'])
try:
if maas_client.test_connectivity():
if maas_client.test_authentication():
self.orchestrator.task_field_update(task.get_id(),
status=hd_fields.TaskStatus.Complete,
result=hd_fields.ActionResult.Success)
return
except errors.TransientDriverError(ex):
result = {
'retry': True,
'detail': str(ex),
}
self.orchestrator.task_field_update(task.get_id(),
status=hd_fields.TaskStatus.Complete,
result=hd_fields.ActionResult.Failure,
result_details=result)
return
except errors.PersistentDriverError(ex):
result = {
'retry': False,
'detail': str(ex),
}
self.orchestrator.task_field_update(task.get_id(),
status=hd_fields.TaskStatus.Complete,
result=hd_fields.ActionResult.Failure,
result_details=result)
return
except Exception(ex):
result = {
'retry': False,
'detail': str(ex),
}
self.orchestrator.task_field_update(task.get_id(),
status=hd_fields.TaskStatus.Complete,
result=hd_fields.ActionResult.Failure,
result_details=result)
return
design_id = getattr(task, 'design_id', None)
if design_id is None:
raise errors.DriverError("No design ID specified in task %s" %
(task_id))
if task.site_name is None:
raise errors.DriverError("No site specified for task %s." %
(task_id))
self.orchestrator.task_field_update(task.get_id(),
status=hd_fields.TaskStatus.Running)
site_design = self.orchestrator.get_effective_site(design_id, task.site_name)
if task.action == hd_fields.OrchestratorAction.CreateNetworkTemplate:
subtask = self.orchestrator.create_task(task_model.DriverTask,
parent_task_id=task.get_id(), design_id=design_id,
action=task.action, site_name=task.site_name,
task_scope={'site': task.site_name})
runner = MaasTaskRunner(state_manager=self.state_manager,
orchestrator=self.orchestrator,
task_id=subtask.get_id(),config=self.config)
runner.start()
runner.join(timeout=120)
if runner.is_alive():
result = {
'retry': False,
'detail': 'MaaS Network creation timed-out'
}
self.orchestrator.task_field_update(task.get_id(),
status=hd_fields.TaskStatus.Complete,
result=hd_fields.ActionResult.Failure,
result_detail=result)
else:
subtask = self.state_manager.get_task(subtask.get_id())
self.orchestrator.task_field_update(task.get_id(),
status=hd_fields.TaskStatus.Complete,
result=subtask.get_result())
return
class MaasTaskRunner(drivers.DriverTaskRunner):
def __init__(self, config=None, **kwargs):
super(MaasTaskRunner, self).__init__(**kwargs)
self.driver_config = config
def execute_task(self):
task_action = self.task.action
self.orchestrator.task_field_update(self.task.get_id(),
status=hd_fields.TaskStatus.Running,
result=hd_fields.ActionResult.Incomplete)
self.maas_client = MaasRequestFactory(self.driver_config['api_url'],
self.driver_config['api_key'])
site_design = self.orchestrator.get_effective_site(self.task.design_id,
self.task.site_name)
if task_action == hd_fields.OrchestratorAction.CreateNetworkTemplate:
# Try to true up MaaS definitions of fabrics/vlans/subnets
# with the networks defined in Drydock
design_networks = site_design.networks
subnets = maas_subnet.Subnets(self.maas_client)
subnets.refresh()
result_detail = {
'detail': []
}
for n in design_networks:
exists = subnets.query({'cidr': n.cidr})
subnet = None
if len(exists) > 0:
subnet = exists[0]
subnet.name = n.name
subnet.dns_servers = n.dns_servers
vlan_list = maas_vlan.Vlans(self.maas_client, fabric_id=subnet.fabric)
vlan_list.refresh()
vlan = vlan_list.select(subnet.vlan)
if vlan is not None:
if ((n.vlan_id is None and vlan.vid != 0) or
(n.vlan_id is not None and vlan.vid != n.vlan_id)):
# if the VLAN name matches, assume this is the correct resource
# and it needs to be updated
if vlan.name == n.name:
vlan.set_vid(n.vlan_id)
vlan.mtu = n.mtu
vlan.update()
else:
vlan_id = n.vlan_id if n.vlan_id is not None else 0
target_vlan = vlan_list.query({'vid': vlan_id})
if len(target_vlan) > 0:
subnet.vlan = target_vlan[0].resource_id
else:
# This is a flag that after creating a fabric and
# VLAN below, update the subnet
subnet.vlan = None
else:
subnet.vlan = None
# Check if the routes have a default route
subnet.gateway_ip = n.get_default_gateway()
result_detail['detail'].append("Subnet %s found for network %s, updated attributes"
% (exists[0].resource_id, n.name))
# Need to create a Fabric/Vlan for this network
if (subnet is None or (subnet is not None and subnet.vlan is None)):
fabric_list = maas_fabric.Fabrics(self.maas_client)
fabric_list.refresh()
matching_fabrics = fabric_list.query({'name': n.name})
fabric = None
vlan = None
if len(matching_fabrics) > 0:
# Fabric exists, update VLAN
fabric = matching_fabrics[0]
vlan_list = maas_vlan.Vlans(self.maas_client, fabric_id=fabric.resource_id)
vlan_list.refresh()
vlan_id = n.vlan_id if n.vlan_id is not None else 0
matching_vlans = vlan_list.query({'vid': vlan_id})
if len(matching_vlans) > 0:
vlan = matching_vlans[0]
vlan.name = n.name
if getattr(n, 'mtu', None) is not None:
vlan.mtu = n.mtu
if subnet is not None:
subnet.vlan = vlan.resource_id
subnet.update()
vlan.update()
else:
vlan = maas_vlan.Vlan(self.maas_client, name=n.name, vid=vlan_id,
mtu=getattr(n, 'mtu', None),fabric_id=fabric.resource_id)
vlan = vlan_list.add(vlan)
if subnet is not None:
subnet.vlan = vlan.resource_id
subnet.update()
else:
new_fabric = maas_fabric.Fabric(self.maas_client, name=n.name)
new_fabric = fabric_list.add(new_fabric)
new_fabric.refresh()
fabric = new_fabric
vlan_list = maas_vlan.Vlans(self.maas_client, fabric_id=new_fabric.resource_id)
vlan_list.refresh()
vlan = vlan_list.single()
vlan.name = n.name
vlan.vid = n.vlan_id if n.vlan_id is not None else 0
if getattr(n, 'mtu', None) is not None:
vlan.mtu = n.mtu
vlan.update()
if subnet is not None:
subnet.vlan = vlan.resource_id
subnet.update()
if subnet is None:
subnet = maas_subnet.Subnet(self.maas_client, name=n.name, cidr=n.cidr, fabric=fabric.resource_id,
vlan=vlan.resource_id, gateway_ip=n.get_default_gateway())
subnet_list = maas_subnet.Subnets(self.maas_client)
subnet = subnet_list.add(subnet)
subnet_list = maas_subnet.Subnets(self.maas_client)
subnet_list.refresh()
action_result = hd_fields.ActionResult.Incomplete
success_rate = 0
for n in design_networks:
exists = subnet_list.query({'cidr': n.cidr})
if len(exists) > 0:
subnet = exists[0]
if subnet.name == n.name:
success_rate = success_rate + 1
else:
success_rate = success_rate + 1
else:
success_rate = success_rate + 1
if success_rate == len(design_networks):
action_result = hd_fields.ActionResult.Success
elif success_rate == - (len(design_networks)):
action_result = hd_fields.ActionResult.Failure
else:
action_result = hd_fields.ActionResult.PartialSuccess
self.orchestrator.task_field_update(self.task.get_id(),
status=hd_fields.TaskStatus.Complete,
result=action_result,
result_detail=result_detail)

View File

@ -0,0 +1,13 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -0,0 +1,273 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import re
import helm_drydock.error as errors
"""
A representation of a MaaS REST resource. Should be subclassed
for different resources and augmented with operations specific
to those resources
"""
class ResourceBase(object):
resource_url = '/{id}'
fields = ['resource_id']
json_fields = ['resource_id']
def __init__(self, api_client, **kwargs):
self.api_client = api_client
for f in self.fields:
if f in kwargs.keys():
setattr(self, f, kwargs.get(f))
"""
Update resource attributes from MaaS
"""
def refresh(self):
url = self.interpolate_url()
resp = self.api_client.get(url)
updated_fields = resp.json()
for f in self.json_fields:
if f in updated_fields.keys():
setattr(self, f, updated_fields.get(f))
"""
Parse URL for placeholders and replace them with current
instance values
"""
def interpolate_url(self):
pattern = '\{([a-z_]+)\}'
regex = re.compile(pattern)
start = 0
new_url = self.resource_url
while (start+1) < len(self.resource_url):
match = regex.search(self.resource_url, start)
if match is None:
return new_url
param = match.group(1)
val = getattr(self, param, None)
if val is None:
raise ValueError("Missing variable value")
new_url = new_url.replace('{' + param + '}', str(val))
start = match.end(1) + 1
return new_url
"""
Update MaaS with current resource attributes
"""
def update(self):
data_dict = self.to_dict()
url = self.interpolate_url()
resp = self.api_client.put(url, files=data_dict)
if resp.status_code == 200:
return True
raise errors.DriverError("Failed updating MAAS url %s - return code %s\n%s"
% (url, resp.status_code, resp.text))
"""
Set the resource_id for this instance
Should only be called when creating new instances and MAAS has assigned
an id
"""
def set_resource_id(self, res_id):
self.resource_id = res_id
"""
Serialize this resource instance into JSON matching the
MaaS respresentation of this resource
"""
def to_json(self):
return json.dumps(self.to_dict())
"""
Serialize this resource instance into a dict matching the
MAAS representation of the resource
"""
def to_dict(self):
data_dict = {}
for f in self.json_fields:
if getattr(self, f, None) is not None:
if f == 'resource_id':
data_dict['id'] = getattr(self, f)
else:
data_dict[f] = getattr(self, f)
return data_dict
"""
Create a instance of this resource class based on the MaaS
representation of this resource type
"""
@classmethod
def from_json(cls, api_client, json_string):
parsed = json.loads(json_string)
if isinstance(parsed, dict):
return cls.from_dict(api_client, parsed)
raise errors.DriverError("Invalid JSON for class %s" % (cls.__name__))
"""
Create a instance of this resource class based on a dict
of MaaS type attributes
"""
@classmethod
def from_dict(cls, api_client, obj_dict):
refined_dict = {k: obj_dict.get(k, None) for k in cls.fields}
if 'id' in obj_dict.keys():
refined_dict['resource_id'] = obj_dict.get('id')
i = cls(api_client, **refined_dict)
return i
"""
A collection of MaaS resources.
Rather than a simple list, we will key the collection on resource
ID for more efficient access.
"""
class ResourceCollectionBase(object):
collection_url = ''
collection_resource = ResourceBase
def __init__(self, api_client):
self.api_client = api_client
self.resources = {}
"""
Parse URL for placeholders and replace them with current
instance values
"""
def interpolate_url(self):
pattern = '\{([a-z_]+)\}'
regex = re.compile(pattern)
start = 0
new_url = self.collection_url
while (start+1) < len(self.collection_url):
match = regex.search(self.collection_url, start)
if match is None:
return new_url
param = match.group(1)
val = getattr(self, param, None)
if val is None:
raise ValueError("Missing variable value")
new_url = new_url.replace('{' + param + '}', str(val))
start = match.end(1) + 1
return new_url
"""
Create a new resource in this collection in MaaS
"""
def add(self, res):
data_dict = res.to_dict()
url = self.interpolate_url()
resp = self.api_client.post(url, files=data_dict)
if resp.status_code == 200:
resp_json = resp.json()
res.set_resource_id(resp_json.get('id'))
return res
raise errors.DriverError("Failed updating MAAS url %s - return code %s"
% (url, resp.status_code))
"""
Append a resource instance to the list locally only
"""
def append(self, res):
if isinstance(res, self.collection_resource):
self.resources[res.resource_id] = res
"""
Initialize or refresh the collection list from MaaS
"""
def refresh(self):
url = self.interpolate_url()
resp = self.api_client.get(url)
if resp.status_code == 200:
self.resource = {}
json_list = resp.json()
for o in json_list:
if isinstance(o, dict):
i = self.collection_resource.from_dict(self.api_client, o)
self.resources[i.resource_id] = i
return
"""
Check if resource id is in this collection
"""
def contains(self, res_id):
if res_id in self.resources.keys():
return True
return False
"""
Select a resource based on ID or None if not found
"""
def select(self, res_id):
return self.resources.get(res_id, None)
"""
Query the collection based on a resource attribute other than primary id
"""
def query(self, query):
result = list(self.resources.values())
for (k, v) in query.items():
result = [i for i in result
if str(getattr(i, k, None)) == str(v)]
return result
"""
If the collection has a single item, return it
"""
def single(self):
if self.len() == 1:
for v in self.resources.values():
return v
else:
return None
"""
Iterate over the resources in the collection
"""
def __iter__(self):
return iter(self.resources.values())
"""
Resource count
"""
def len(self):
return len(self.resources)

View File

@ -0,0 +1,53 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import helm_drydock.drivers.node.maasdriver.models.base as model_base
import helm_drydock.drivers.node.maasdriver.models.vlan as model_vlan
class Fabric(model_base.ResourceBase):
resource_url = 'fabrics/{resource_id}/'
fields = ['resource_id', 'name', 'description']
json_fields = ['name', 'description']
def __init__(self, api_client, **kwargs):
super(Fabric, self).__init__(api_client, **kwargs)
if hasattr(self, 'resource_id'):
self.refresh_vlans()
def refresh(self):
super(Fabric, self).refresh()
self.refresh_vlans()
return
def refresh_vlans(self):
self.vlans = model_vlan.Vlans(self.api_client, fabric_id=self.resource_id)
self.vlans.refresh()
def set_resource_id(self, res_id):
self.resource_id = res_id
self.refresh_vlans()
class Fabrics(model_base.ResourceCollectionBase):
collection_url = 'fabrics/'
collection_resource = Fabric
def __init__(self, api_client):
super(Fabrics, self).__init__(api_client)

View File

@ -0,0 +1,55 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import helm_drydock.drivers.node.maasdriver.models.base as model_base
class Subnet(model_base.ResourceBase):
resource_url = 'subnets/{resource_id}/'
fields = ['resource_id', 'name', 'description', 'fabric', 'vlan', 'vid', 'dhcp_on',
'space', 'cidr', 'gateway_ip', 'rdns_mode', 'allow_proxy', 'dns_servers']
json_fields = ['name', 'description','vlan', 'space', 'cidr', 'gateway_ip', 'rdns_mode',
'allow_proxy', 'dns_servers']
def __init__(self, api_client, **kwargs):
super(Subnet, self).__init__(api_client, **kwargs)
# For now all subnets will be part of the default space
self.space = 0
"""
Because MaaS decides to replace the VLAN id with the
representation of the VLAN, we must reverse it for a true
representation of the resource
"""
@classmethod
def from_dict(cls, api_client, obj_dict):
refined_dict = {k: obj_dict.get(k, None) for k in cls.fields}
if 'id' in obj_dict.keys():
refined_dict['resource_id'] = obj_dict.get('id')
if isinstance(refined_dict.get('vlan', None), dict):
refined_dict['fabric'] = refined_dict['vlan']['fabric_id']
refined_dict['vlan'] = refined_dict['vlan']['id']
i = cls(api_client, **refined_dict)
return i
class Subnets(model_base.ResourceCollectionBase):
collection_url = 'subnets/'
collection_resource = Subnet
def __init__(self, api_client, **kwargs):
super(Subnets, self).__init__(api_client)

View File

@ -0,0 +1,86 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import helm_drydock.error as errors
import helm_drydock.drivers.node.maasdriver.models.base as model_base
class Vlan(model_base.ResourceBase):
resource_url = 'fabrics/{fabric_id}/vlans/{api_id}/'
fields = ['resource_id', 'name', 'description', 'vid', 'fabric_id', 'dhcp_on', 'mtu']
json_fields = ['name', 'description', 'vid', 'dhcp_on', 'mtu']
def __init__(self, api_client, **kwargs):
super(Vlan, self).__init__(api_client, **kwargs)
if self.vid is None:
self.vid = 0
# the MaaS API decided that the URL endpoint for VLANs should use
# the VLAN tag (vid) rather than the resource ID. So to update the
# vid, we have to keep two copies so that the resource_url
# is accurate for updates
self.api_id = self.vid
def update(self):
super(Vlan, self).update()
self.api_id = self.vid
def set_vid(self, new_vid):
if new_vid is None:
self.vid = 0
else:
self.vid = int(new_vid)
class Vlans(model_base.ResourceCollectionBase):
collection_url = 'fabrics/{fabric_id}/vlans/'
collection_resource = Vlan
def __init__(self, api_client, **kwargs):
super(Vlans, self).__init__(api_client)
self.fabric_id = kwargs.get('fabric_id', None)
"""
Create a new resource in this collection in MaaS
def add(self, res):
#MAAS API doesn't support all attributes in POST, so create and
# then promptly update via PUT
min_fields = {
'name': res.name,
'description': getattr(res, 'description', None),
}
if getattr(res, 'vid', None) is None:
min_fields['vid'] = 0
else:
min_fields['vid'] = res.vid
url = self.interpolate_url()
resp = self.api_client.post(url, files=min_fields)
# Check on initial POST creation
if resp.status_code == 200:
resp_json = resp.json()
res.id = resp_json.get('id')
# Submit PUT for additonal fields
res.update()
return res
raise errors.DriverError("Failed updating MAAS url %s - return code %s\n%s"
% (url, resp.status_code, resp.text))
"""

View File

@ -0,0 +1,46 @@
# MaaS Node Driver #
This driver will handle node provisioning using Ubuntu MaaS 2.1. It expects
the Drydock config to hold a valid MaaS API URL (e.g. http://host:port/MAAS/api/2.0)
and a valid API key for authentication.
## Drydock Model to MaaS Model Relationship ##
### Site ###
Will provide some attributes used for configuring MaaS site-wide such
as tag definitions and repositories.
### Network Link ###
Will provide attributes for configuring Node/Machine interfaces
### Network ###
MaaS will be configured with a single 'space'. Each Network in Drydock
will translate to a unique MaaS fabric+vlan+subnet. Any network with
an address range of type 'dhcp' will cause DHCP to be enabled in MaaS
for that network.
### Hardware Profile ###
A foundation to a Baremetal Node definition. Not directly used in MaaS
### Host Profile ###
A foundation to a Baremetal Node definition. Not directly used in MaaS
### Baremetal Node ###
Defines all the attributes required to commission and deploy nodes via MaaS
* bootdisk fields and partitions list - Define local node storage configuration
to be implemented by MaaS
* addressing and interface list - Combined with referenced network links and networks, define
interface (physical and virtual (bond / vlan)) configurations and network
addressing
* tags and owner data - Statically defined metadata that will propagate to
MaaS
* base_os - Select which stream a node will be deployed with
* kernel and kernel params - Allow for custom kernel selection and parameter
definition

View File

@ -12,31 +12,34 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# OOB:
# sync_hardware_clock
# collect_chassis_sysinfo
# enable_netboot
# initiate_reboot
# set_power_off
# set_power_on
import helm_drydock.objects.fields as hd_fields
import helm_drydock.error as errors
from helm_drydock.drivers import ProviderDriver
class OobDriver(ProviderDriver):
def __init__(self):
pass
def __init__(self, **kwargs):
super(OobDriver, self).__init__(**kwargs)
def execute_action(self, action, **kwargs):
if action ==
self.supported_actions = [hd_fields.OrchestrationAction.ValidateOobServices,
hd_fields.OrchestratorAction.ConfigNodePxe,
hd_fields.OrchestratorAction.SetNodeBoot,
hd_fields.OrchestratorAction.PowerOffNode,
hd_fields.OrchestratorAction.PowerOnNode,
hd_fields.OrchestratorAction.PowerCycleNode,
hd_fields.OrchestratorAction.InterrogateOob]
self.driver_name = "oob_generic"
self.driver_key = "oob_generic"
self.driver_desc = "Generic OOB Driver"
def execute_task(self, task_id):
task = self.state_manager.get_task(task_id)
task_action = task.action
class OobAction(Enum):
ConfigNodePxe = 'config_node_pxe'
SetNodeBoot = 'set_node_boot'
PowerOffNode = 'power_off_node'
PowerOnNode = 'power_on_node'
PowerCycleNode = 'power_cycle_node'
InterrogateNode = 'interrogate_node'
if task_action in self.supported_actions:
return
else:
raise DriverError("Unsupported action %s for driver %s" %
(task_action, self.driver_desc))

View File

@ -0,0 +1,303 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from pyghmi.ipmi.command import Command
import helm_drydock.error as errors
import helm_drydock.config as config
import helm_drydock.objects.fields as hd_fields
import helm_drydock.objects.task as task_model
import helm_drydock.drivers.oob as oob
import helm_drydock.drivers as drivers
class PyghmiDriver(oob.OobDriver):
def __init__(self, **kwargs):
super(PyghmiDriver, self).__init__(**kwargs)
self.driver_name = "pyghmi_driver"
self.driver_key = "pyghmi_driver"
self.driver_desc = "Pyghmi OOB Driver"
self.config = config.DrydockConfig.node_driver[self.driver_key]
def execute_task(self, task_id):
task = self.state_manager.get_task(task_id)
if task is None:
raise errors.DriverError("Invalid task %s" % (task_id))
if task.action not in self.supported_actions:
raise errors.DriverError("Driver %s doesn't support task action %s"
% (self.driver_desc, task.action))
design_id = getattr(task, 'design_id', None)
if design_id is None:
raise errors.DriverError("No design ID specified in task %s" %
(task_id))
if task.site_name is None:
raise errors.DriverError("Not site specified for task %s." %
(task_id))
self.orchestrator.task_field_update(task.get_id(),
status=hd_fields.TaskStatus.Running)
if task.action == hd_fields.OrchestratorAction.ValidateOobServices:
self.orchestrator.task_field_update(task.get_id(),
status=hd_fields.TaskStatus.Complete,
result=hd_fields.ActionResult.Success)
return
site_design = self.orchestrator.get_effective_site(design_id, task.site_name)
target_nodes = []
if len(task.node_list) > 0:
target_nodes.extend([x
for x in site_design.baremetal_nodes
if x.get_name() in task.node_list])
else:
target_nodes.extend(site_design.baremetal_nodes)
incomplete_subtasks = []
# For each target node, create a subtask and kick off a runner
for n in target_nodes:
subtask = self.orchestrator.create_task(task_model.DriverTask,
parent_task_id=task.get_id(), design_id=design_id,
action=task.action,
task_scope={'site': task.site_name,
'node_names': [n.get_name()]})
incomplete_subtasks.append(subtask.get_id())
runner = PyghmiTaskRunner(state_manager=self.state_manager,
orchestrator=self.orchestrator,
task_id=subtask.get_id(), node=n)
runner.start()
# Wait for subtasks to complete
# TODO need some kind of timeout
i = 0
while len(incomplete_subtasks) > 0:
for n in incomplete_subtasks:
t = self.state_manager.get_task(n)
if t.get_status() in [hd_fields.TaskStatus.Terminated,
hd_fields.TaskStatus.Complete,
hd_fields.TaskStatus.Errored]:
incomplete_subtasks.remove(n)
time.sleep(2)
i = i+1
if i == 5:
break
task = self.state_manager.get_task(task.get_id())
subtasks = map(self.state_manager.get_task, task.get_subtasks())
success_subtasks = [x
for x in subtasks
if x.get_result() == hd_fields.ActionResult.Success]
nosuccess_subtasks = [x
for x in subtasks
if x.get_result() in [hd_fields.ActionResult.PartialSuccess,
hd_fields.ActionResult.Failure]]
print("Task %s successful subtasks: %s" %
(task.get_id(), len(success_subtasks)))
print("Task %s unsuccessful subtasks: %s" %
(task.get_id(), len(nosuccess_subtasks)))
print("Task %s total subtasks: %s" %
(task.get_id(), len(task.get_subtasks())))
task_result = None
if len(success_subtasks) > 0 and len(nosuccess_subtasks) > 0:
task_result = hd_fields.ActionResult.PartialSuccess
elif len(success_subtasks) == 0 and len(nosuccess_subtasks) > 0:
task_result = hd_fields.ActionResult.Failure
elif len(success_subtasks) > 0 and len(nosuccess_subtasks) == 0:
task_result = hd_fields.ActionResult.Success
else:
task_result = hd_fields.ActionResult.Incomplete
self.orchestrator.task_field_update(task.get_id(),
result=task_result,
status=hd_fields.TaskStatus.Complete)
return
class PyghmiTaskRunner(drivers.DriverTaskRunner):
def __init__(self, node=None, **kwargs):
super(PyghmiTaskRunner, self).__init__(**kwargs)
# We cheat here by providing the Node model instead
# of making the runner source it from statemgmt
if node is None:
raise errors.DriverError("Did not specify target node")
self.node = node
def execute_task(self):
task_action = self.task.action
if len(self.task.node_list) != 1:
self.orchestrator.task_field_update(self.task.get_id(),
result=hd_fields.ActionResult.Incomplete,
status=hd_fields.TaskStatus.Errored)
raise errors.DriverError("Multiple names (%s) in task %s node_list"
% (len(self.task.node_list), self.task.get_id()))
target_node_name = self.task.node_list[0]
if self.node.get_name() != target_node_name:
self.orchestrator.task_field_update(self.task.get_id(),
result=hd_fields.ActionResult.Incomplete,
status=hd_fields.TaskStatus.Errored)
raise errors.DriverError("Runner node does not match " \
"task node scope")
ipmi_network = self.node.applied.get('oob_network')
ipmi_address = self.node.get_network_address(ipmi_network)
if ipmi_address is None:
self.orchestrator.task_field_update(self.task.get_id(),
result=hd_fields.ActionResult.Incomplete,
status=hd_fields.TaskStatus.Errored)
raise errors.DriverError("Node %s has no IPMI address" %
(target_node_name))
self.orchestrator.task_field_update(self.task.get_id(),
status=hd_fields.TaskStatus.Running)
ipmi_account = self.node.applied.get('oob_account', '')
ipmi_credential = self.node.applied.get('oob_credential', '')
ipmi_session = Command(bmc=ipmi_address, userid=ipmi_account,
password=ipmi_credential)
if task_action == hd_fields.OrchestratorAction.ConfigNodePxe:
self.orchestrator.task_field_update(self.task.get_id(),
result=hd_fields.ActionResult.Failure,
status=hd_fields.TaskStatus.Complete)
return
elif task_action == hd_fields.OrchestratorAction.SetNodeBoot:
ipmi_session.set_bootdev('pxe')
time.sleep(3)
bootdev = ipmi_session.get_bootdev()
if bootdev.get('bootdev', '') == 'network':
self.orchestrator.task_field_update(self.task.get_id(),
result=hd_fields.ActionResult.Success,
status=hd_fields.TaskStatus.Complete)
else:
self.orchestrator.task_field_update(self.task.get_id(),
result=hd_fields.ActionResult.Failure,
status=hd_fields.TaskStatus.Complete)
return
elif task_action == hd_fields.OrchestratorAction.PowerOffNode:
ipmi_session.set_power('off')
i = 18
while i > 0:
power_state = ipmi_session.get_power()
if power_state.get('powerstate', '') == 'off':
break
time.sleep(10)
i = i - 1
if power_state.get('powerstate', '') == 'off':
self.orchestrator.task_field_update(self.task.get_id(),
result=hd_fields.ActionResult.Success,
status=hd_fields.TaskStatus.Complete)
else:
self.orchestrator.task_field_update(self.task.get_id(),
result=hd_fields.ActionResult.Failure,
status=hd_fields.TaskStatus.Complete)
return
elif task_action == hd_fields.OrchestratorAction.PowerOnNode:
ipmi_session.set_power('on')
i = 18
while i > 0:
power_state = ipmi_session.get_power()
if power_state.get('powerstate', '') == 'on':
break
time.sleep(10)
i = i - 1
if power_state.get('powerstate', '') == 'on':
self.orchestrator.task_field_update(self.task.get_id(),
result=hd_fields.ActionResult.Success,
status=hd_fields.TaskStatus.Complete)
else:
self.orchestrator.task_field_update(self.task.get_id(),
result=hd_fields.ActionResult.Failure,
status=hd_fields.TaskStatus.Complete)
return
elif task_action == hd_fields.OrchestratorAction.PowerCycleNode:
ipmi_session.set_power('off')
# Wait for power state of off before booting back up
# We'll wait for up to 3 minutes to power off
i = 18
while i > 0:
power_state = ipmi_session.get_power()
if power_state.get('powerstate', '') == 'off':
break
time.sleep(10)
i = i - 1
if power_state.get('powerstate', '') == 'on':
self.orchestrator.task_field_update(self.task.get_id(),
result=hd_fields.ActionResult.Failure,
status=hd_fields.TaskStatus.Complete)
return
ipmi_session.set_power('on')
i = 18
while i > 0:
power_state = ipmi_session.get_power()
if power_state.get('powerstate', '') == 'on':
break
time.sleep(10)
i = i - 1
if power_state.get('powerstate', '') == 'on':
self.orchestrator.task_field_update(self.task.get_id(),
result=hd_fields.ActionResult.Success,
status=hd_fields.TaskStatus.Complete)
else:
self.orchestrator.task_field_update(self.task.get_id(),
result=hd_fields.ActionResult.Failure,
status=hd_fields.TaskStatus.Complete)
return
elif task_action == hd_fields.OrchestratorAction.InterrogateOob:
mci_id = ipmi_session.get_mci()
self.orchestrator.task_field_update(self.task.get_id(),
result=hd_fields.ActionResult.Success,
status=hd_fields.TaskStatus.Complete,
result_detail=mci_id)
return

View File

@ -2,14 +2,23 @@
Drivers are downstream actors that Drydock will use to actually execute
orchestration actions. It is intended to be a pluggable architecture
so that various downstream automation can be used.
so that various downstream automation can be used. A driver must implement all actions even if the implementation is effectively a no-op.
## oob ##
The oob drivers will interface with physical servers' out-of-band
management system (e.g. Dell iDRAC, HP iLO, etc...). OOB management
will be used for setting a system to use PXE boot and power cycling
servers.
servers.
### Actions ###
* ConfigNodePxe - Where available, configure PXE boot options (e.g. PXE interface)
* SetNodeBoot - Set boot source (PXE, hard disk) of a node
* PowerOffNode - Power down a node
* PowerOnNode - Power up a node
* PowerCycleNode - Power cycle a node
* InterrogateOob - Interrogate a node's OOB interface. Resultant data is dependent on what functionality is implemented for a particular OOB interface
## node ##
@ -17,10 +26,30 @@ The node drivers will interface with an external bootstrapping system
for loading the base OS on a server and configuring hardware, network,
and storage.
### Actions ###
* CreateNetworkTemplate - Configure site-wide network information in bootstrapper
* CreateStorageTemplate - Configure site-wide storage information in bootstrapper
* CreateBootMedia - Ensure all needed boot media is available to the bootstrapper including external repositories
* PrepareHardwareConfig - Prepare the bootstrapper to handle all hardware configuration actions (firmware updates, RAID configuration, driver installation)
* ConfigureHardware - Update and validate all hardware configurations on a node prior to deploying the OS on it
* InterrogateNode - Interrogate the bootstrapper about node information. Depending on the current state of the node, this interrogation will produce different information.
* ApplyNodeNetworking - Configure networking for a node
* ApplyNodeStorage - Configure storage for a node
* ApplyNodePlatform - Configure stream and kernel options for a node
* DeployNode - Deploy the OS to a node
* DestroyNode - Take steps to bring a node back to a blank undeployed state
## network ##
The network drivers will interface with switches for managing port
configuration to support the bootstrapping of physical nodes. This is not
intended to be a network provisioner, but instead is a support driver
for node bootstrapping where temporary changes to network configurations
are required.
are required.
### Actions ###
* InterrogatePort - Request information about the current configuration of a network port
* ConfigurePortProvisioning - Configure a network port in provisioning (PXE) mode
* ConfigurePortProduction - Configure a network port in production (configuration post-deployment) mode

View File

@ -14,3 +14,24 @@
class DesignError(Exception):
pass
class StateError(Exception):
pass
class OrchestratorError(Exception):
pass
class TransientOrchestratorError(OrchestratorError):
pass
class PersistentOrchestratorError(OrchestratorError):
pass
class DriverError(Exception):
pass
class TransientDriverError(DriverError):
pass
class PersistentDriverError(DriverError):
pass

View File

@ -17,14 +17,16 @@
import logging
import yaml
import uuid
import helm_drydock.model.site as site
import helm_drydock.model.network as network
import helm_drydock.model.hwprofile as hwprofile
import helm_drydock.model.node as node
import helm_drydock.model.hostprofile as hostprofile
import helm_drydock.objects as objects
import helm_drydock.objects.site as site
import helm_drydock.objects.network as network
import helm_drydock.objects.hwprofile as hwprofile
import helm_drydock.objects.node as node
import helm_drydock.objects.hostprofile as hostprofile
from helm_drydock.statemgmt import DesignState, SiteDesign, DesignError
from helm_drydock.statemgmt import DesignState
class Ingester(object):
@ -64,21 +66,22 @@ class Ingester(object):
self.log.error("ingest_data called without valid DesignState handler")
raise Exception("Invalid design_state handler")
# TODO this method needs refactored to handle design base vs change
design_data = None
try:
design_data = design_state.get_design_base()
except DesignError:
design_data = SiteDesign()
# If no design_id is specified, instantiate a new one
if 'design_id' not in kwargs.keys():
design_id = str(uuid.uuid4())
design_data = objects.SiteDesign(id=design_id)
design_state.post_design(design_data)
else:
design_id = kwargs.get('design_id')
design_data = design_state.get_design(design_id)
if plugin_name in self.registered_plugins:
design_items = self.registered_plugins[plugin_name].ingest_data(**kwargs)
# Need to persist data here, but we don't yet have the statemgmt service working
for m in design_items:
if type(m) is site.Site:
design_data.add_site(m)
design_data.set_site(m)
elif type(m) is network.Network:
design_data.add_network(m)
elif type(m) is network.NetworkLink:
@ -89,7 +92,7 @@ class Ingester(object):
design_data.add_hardware_profile(m)
elif type(m) is node.BaremetalNode:
design_data.add_baremetal_node(m)
design_state.put_design_base(design_data)
design_state.put_design(design_data)
else:
self.log.error("Could not find plugin %s to ingest data." % (plugin_name))
raise LookupError("Could not find plugin %s" % plugin_name)

View File

@ -19,25 +19,13 @@
import yaml
import logging
import helm_drydock.model.hwprofile as hwprofile
import helm_drydock.model.node as node
import helm_drydock.model.site as site
import helm_drydock.model.hostprofile as hostprofile
import helm_drydock.model.network as network
import helm_drydock.objects.fields as hd_fields
from helm_drydock import objects
from helm_drydock.ingester.plugins import IngesterPlugin
class YamlIngester(IngesterPlugin):
kind_map = {
"Region": site.Site,
"NetworkLink": network.NetworkLink,
"HardwareProfile": hwprofile.HardwareProfile,
"Network": network.Network,
"HostProfile": hostprofile.HostProfile,
"BaremetalNode": node.BaremetalNode,
}
def __init__(self):
super(YamlIngester, self).__init__()
@ -91,19 +79,275 @@ class YamlIngester(IngesterPlugin):
for d in parsed_data:
kind = d.get('kind', '')
if kind != '':
if kind in YamlIngester.kind_map:
try:
model = YamlIngester.kind_map[kind](**d)
if kind == 'Region':
api_version = d.get('apiVersion', '')
if api_version == 'v1.0':
model = objects.Site()
metadata = d.get('metadata', {})
# Need to add validation logic, we'll assume the input is
# valid for now
model.name = metadata.get('name', '')
model.status = hd_fields.SiteStatus.Unknown
model.source = hd_fields.ModelSource.Designed
spec = d.get('spec', {})
model.tag_definitions = objects.NodeTagDefinitionList()
tag_defs = spec.get('tag_definitions', [])
for t in tag_defs:
tag_model = objects.NodeTagDefinition()
tag_model.tag = t.get('tag', '')
tag_model.type = t.get('definition_type', '')
tag_model.definition = t.get('definition', '')
if tag_model.type not in ['lshw_xpath']:
raise ValueError('Unknown definition type in ' \
'NodeTagDefinition: %s' % (self.definition_type))
model.tag_definitions.append(tag_model)
models.append(model)
except Exception as err:
self.log.error("Error building model %s: %s"
% (kind, str(err)))
continue
else:
self.log.error(
"Error processing document, unknown kind %s"
% (kind))
continue
else:
raise ValueError('Unknown API version %s of Region kind' %s (api_version))
elif kind == 'NetworkLink':
api_version = d.get('apiVersion', '')
if api_version == "v1.0":
model = objects.NetworkLink()
metadata = d.get('metadata', {})
spec = d.get('spec', {})
model.name = metadata.get('name', '')
model.site = metadata.get('region', '')
bonding = spec.get('bonding', {})
model.bonding_mode = bonding.get('mode',
hd_fields.NetworkLinkBondingMode.Disabled)
# How should we define defaults for CIs not in the input?
if model.bonding_mode == hd_fields.NetworkLinkBondingMode.LACP:
model.bonding_xmit_hash = bonding.get('hash', 'layer3+4')
model.bonding_peer_rate = bonding.get('peer_rate', 'fast')
model.bonding_mon_rate = bonding.get('mon_rate', '100')
model.bonding_up_delay = bonding.get('up_delay', '200')
model.bonding_down_delay = bonding.get('down_delay', '200')
model.mtu = spec.get('mtu', None)
model.linkspeed = spec.get('linkspeed', None)
trunking = spec.get('trunking', {})
model.trunk_mode = trunking.get('mode', hd_fields.NetworkLinkTrunkingMode.Disabled)
model.native_network = trunking.get('default_network', None)
models.append(model)
else:
raise ValueError('Unknown API version of object')
elif kind == 'Network':
api_version = d.get('apiVersion', '')
if api_version == "v1.0":
model = objects.Network()
metadata = d.get('metadata', {})
spec = d.get('spec', {})
model.name = metadata.get('name', '')
model.site = metadata.get('region', '')
model.cidr = spec.get('cidr', None)
model.allocation_strategy = spec.get('allocation', 'static')
model.vlan_id = spec.get('vlan_id', None)
model.mtu = spec.get('mtu', None)
dns = spec.get('dns', {})
model.dns_domain = dns.get('domain', 'local')
model.dns_servers = dns.get('servers', None)
ranges = spec.get('ranges', [])
model.ranges = []
for r in ranges:
model.ranges.append({'type': r.get('type', None),
'start': r.get('start', None),
'end': r.get('end', None),
})
routes = spec.get('routes', [])
model.routes = []
for r in routes:
model.routes.append({'subnet': r.get('subnet', None),
'gateway': r.get('gateway', None),
'metric': r.get('metric', None),
})
models.append(model)
elif kind == 'HardwareProfile':
api_version = d.get('apiVersion', '')
if api_version == 'v1.0':
metadata = d.get('metadata', {})
spec = d.get('spec', {})
model = objects.HardwareProfile()
# Need to add validation logic, we'll assume the input is
# valid for now
model.name = metadata.get('name', '')
model.site = metadata.get('region', '')
model.source = hd_fields.ModelSource.Designed
model.vendor = spec.get('vendor', None)
model.generation = spec.get('generation', None)
model.hw_version = spec.get('hw_version', None)
model.bios_version = spec.get('bios_version', None)
model.boot_mode = spec.get('boot_mode', None)
model.bootstrap_protocol = spec.get('bootstrap_protocol', None)
model.pxe_interface = spec.get('pxe_interface', None)
model.devices = objects.HardwareDeviceAliasList()
device_aliases = spec.get('device_aliases', {})
for d in device_aliases:
dev_model = objects.HardwareDeviceAlias()
dev_model.source = hd_fields.ModelSource.Designed
dev_model.alias = d.get('alias', None)
dev_model.bus_type = d.get('bus_type', None)
dev_model.dev_type = d.get('dev_type', None)
dev_model.address = d.get('address', None)
model.devices.append(dev_model)
models.append(model)
elif kind == 'HostProfile' or kind == 'BaremetalNode':
api_version = d.get('apiVersion', '')
if api_version == "v1.0":
model = None
if kind == 'HostProfile':
model = objects.HostProfile()
else:
model = objects.BaremetalNode()
metadata = d.get('metadata', {})
spec = d.get('spec', {})
model.name = metadata.get('name', '')
model.site = metadata.get('region', '')
model.source = hd_fields.ModelSource.Designed
model.parent_profile = spec.get('host_profile', None)
model.hardware_profile = spec.get('hardware_profile', None)
oob = spec.get('oob', {})
model.oob_type = oob.get('type', None)
model.oob_network = oob.get('network', None)
model.oob_account = oob.get('account', None)
model.oob_credential = oob.get('credential', None)
storage = spec.get('storage', {})
model.storage_layout = storage.get('layout', 'lvm')
bootdisk = storage.get('bootdisk', {})
model.bootdisk_device = bootdisk.get('device', None)
model.bootdisk_root_size = bootdisk.get('root_size', None)
model.bootdisk_boot_size = bootdisk.get('boot_size', None)
partitions = storage.get('partitions', [])
model.partitions = objects.HostPartitionList()
for p in partitions:
part_model = objects.HostPartition()
part_model.name = p.get('name', None)
part_model.source = hd_fields.ModelSource.Designed
part_model.device = p.get('device', None)
part_model.part_uuid = p.get('part_uuid', None)
part_model.size = p.get('size', None)
part_model.mountpoint = p.get('mountpoint', None)
part_model.fstype = p.get('fstype', 'ext4')
part_model.mount_options = p.get('mount_options', 'defaults')
part_model.fs_uuid = p.get('fs_uuid', None)
part_model.fs_label = p.get('fs_label', None)
model.partitions.append(part_model)
interfaces = spec.get('interfaces', [])
model.interfaces = objects.HostInterfaceList()
for i in interfaces:
int_model = objects.HostInterface()
int_model.device_name = i.get('device_name', None)
int_model.network_link = i.get('device_link', None)
int_model.primary_netowrk = i.get('primary', False)
int_model.hardware_slaves = []
slaves = i.get('slaves', [])
for s in slaves:
int_model.hardware_slaves.append(s)
int_model.networks = []
networks = i.get('networks', [])
for n in networks:
int_model.networks.append(n)
model.interfaces.append(int_model)
node_metadata = spec.get('metadata', {})
metadata_tags = node_metadata.get('tags', [])
model.tags = []
for t in metadata_tags:
model.tags.append(t)
owner_data = node_metadata.get('owner_data', {})
model.owner_data = {}
for k, v in owner_data.items():
model.owner_data[k] = v
model.rack = node_metadata.get('rack', None)
if kind == 'BaremetalNode':
addresses = spec.get('addressing', [])
if len(addresses) == 0:
raise ValueError('BaremetalNode needs at least' \
' 1 assigned address')
model.addressing = objects.IpAddressAssignmentList()
for a in addresses:
assignment = objects.IpAddressAssignment()
address = a.get('address', '')
if address == 'dhcp':
assignment.type = 'dhcp'
assignment.address = None
assignment.network = a.get('network')
model.addressing.append(assignment)
elif address != '':
assignment.type = 'static'
assignment.address = a.get('address')
assignment.network = a.get('network')
model.addressing.append(assignment)
else:
self.log.error("Invalid address assignment %s on Node %s"
% (address, self.name))
models.append(model)
else:
raise ValueError('Unknown API version %s of Kind HostProfile' % (api_version))
else:
self.log.error(
"Error processing document in %s, no kind field"

View File

@ -1,378 +0,0 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Models for helm_drydock
#
import logging
from copy import deepcopy
from helm_drydock.orchestrator.enum import SiteStatus
from helm_drydock.orchestrator.enum import NodeStatus
from helm_drydock.model.network import Network
from helm_drydock.model.network import NetworkLink
from helm_drydock.model import Utils
class HostProfile(object):
def __init__(self, **kwargs):
self.log = logging.Logger('model')
self.api_version = kwargs.get('apiVersion', '')
if self.api_version == "v1.0":
metadata = kwargs.get('metadata', {})
spec = kwargs.get('spec', {})
self.name = metadata.get('name', '')
self.site = metadata.get('region', '')
self.parent_profile = spec.get('host_profile', None)
self.hardware_profile = spec.get('hardware_profile', None)
oob = spec.get('oob', {})
self.oob_type = oob.get('type', None)
self.oob_network = oob.get('network', None)
self.oob_account = oob.get('account', None)
self.oob_credential = oob.get('credential', None)
storage = spec.get('storage', {})
self.storage_layout = storage.get('layout', 'lvm')
bootdisk = storage.get('bootdisk', {})
self.bootdisk_device = bootdisk.get('device', None)
self.bootdisk_root_size = bootdisk.get('root_size', None)
self.bootdisk_boot_size = bootdisk.get('boot_size', None)
partitions = storage.get('partitions', [])
self.partitions = []
for p in partitions:
self.partitions.append(HostPartition(self.api_version, **p))
interfaces = spec.get('interfaces', [])
self.interfaces = []
for i in interfaces:
self.interfaces.append(HostInterface(self.api_version, **i))
node_metadata = spec.get('metadata', {})
metadata_tags = node_metadata.get('tags', [])
self.tags = []
for t in metadata_tags:
self.tags.append(t)
owner_data = node_metadata.get('owner_data', {})
self.owner_data = {}
for k, v in owner_data.items():
self.owner_data[k] = v
self.rack = node_metadata.get('rack', None)
else:
self.log.error("Unknown API version %s of %s" %
(self.api_version, self.__class__))
raise ValueError('Unknown API version of object')
def get_rack(self):
return self.rack
def get_name(self):
return self.name
def has_tag(self, tag):
if tag in self.tags:
return True
return False
def apply_inheritance(self, site):
# We return a deep copy of the profile so as not to corrupt
# the original model
self_copy = deepcopy(self)
if self.parent_profile is None:
return self_copy
parent = site.get_host_profile(self.parent_profile)
if parent is None:
raise NameError("Cannot find parent profile %s for %s"
% (self.parent_profile, self.name))
parent = parent.apply_inheritance(site)
# First compute inheritance for simple fields
inheritable_field_list = [
"hardware_profile", "oob_type", "oob_network",
"oob_credential", "oob_account", "storage_layout",
"bootdisk_device", "bootdisk_root_size", "bootdisk_boot_size",
"rack"]
for f in inheritable_field_list:
setattr(self_copy, f,
Utils.apply_field_inheritance(getattr(self, f, None),
getattr(parent, f, None)))
# Now compute inheritance for complex types
self_copy.tags = Utils.merge_lists(self.tags, parent.tags)
self_copy.owner_data = Utils.merge_dicts(
self.owner_data, parent.owner_data)
self_copy.interfaces = HostInterface.merge_lists(
self.interfaces, parent.interfaces)
self_copy.partitions = HostPartition.merge_lists(
self.partitions, parent.partitions)
return self_copy
class HostInterface(object):
def __init__(self, api_version, **kwargs):
self.log = logging.Logger('model')
self.api_version = api_version
if self.api_version == "v1.0":
self.device_name = kwargs.get('device_name', None)
self.network_link = kwargs.get('device_link', None)
self.hardware_slaves = []
slaves = kwargs.get('slaves', [])
for s in slaves:
self.hardware_slaves.append(s)
self.networks = []
networks = kwargs.get('networks', [])
for n in networks:
self.networks.append(n)
else:
self.log.error("Unknown API version %s of %s" %
(self.api_version, self.__class__))
raise ValueError('Unknown API version of object')
# The device attribute may be hardware alias that translates to a
# physical device address. If the device attribute does not match an
# alias, we assume it directly identifies a OS device name. When the
# apply_hardware_profile method is called on the parent Node of this
# device, the selector will be decided and applied
def add_selector(self, sel_type, address='', dev_type=''):
if getattr(self, 'selectors', None) is None:
self.selectors = []
new_selector = {}
new_selector['selector_type'] = sel_type
new_selector['address'] = address
new_selector['device_type'] = dev_type
self.selectors.append(new_selector)
def get_slave_selectors(self):
return self.selectors
# Return number of slaves for this interface
def get_slave_count(self):
return len(self.hardware_slaves)
def apply_link_config(self, net_link):
if (net_link is not None and
isinstance(net_link, NetworkLink) and
net_link.name == self.network_link):
self.attached_link = deepcopy(net_link)
return True
return False
def apply_network_config(self, network):
if network in self.networks:
if getattr(self, 'attached_networks', None) is None:
self.attached_networks = []
self.attached_networks.append(deepcopy(network))
return True
else:
return False
def set_network_address(self, network_name, address):
if getattr(self, 'attached_networks', None) is None:
return False
for n in self.attached_neteworks:
if n.name == network_name:
n.assigned_address = address
def get_network_configs(self):
return self.attached_networks
"""
Merge two lists of HostInterface models with child_list taking
priority when conflicts. If a member of child_list has a device_name
beginning with '!' it indicates that HostInterface should be
removed from the merged list
"""
@staticmethod
def merge_lists(child_list, parent_list):
if len(child_list) == 0:
return deepcopy(parent_list)
effective_list = []
if len(parent_list) == 0:
for i in child_list:
if i.device_name.startswith('!'):
continue
else:
effective_list.append(deepcopy(i))
return effective_list
parent_interfaces = []
for i in parent_list:
parent_name = i.device_name
parent_interfaces.append(parent_name)
add = True
for j in child_list:
if j.device_name == ("!" + parent_name):
add = False
break
elif j.device_name == parent_name:
m = HostInterface(j.api_version)
m.device_name = j.device_name
m.network_link = \
Utils.apply_field_inheritance(j.network_link,
i.network_link)
s = filter(lambda x: ("!" + x) not in j.hardware_slaves,
i.hardware_slaves)
s = list(s)
s.extend(filter(lambda x: not x.startswith("!"),
j.hardware_slaves))
m.hardware_slaves = s
n = filter(lambda x: ("!" + x) not in j.networks,
i.networks)
n = list(n)
n.extend(filter(lambda x: not x.startswith("!"),
j.networks))
m.networks = n
effective_list.append(m)
add = False
break
if add:
effective_list.append(deepcopy(i))
for j in child_list:
if (j.device_name not in parent_interfaces
and not j.device_name.startswith("!")):
effective_list.append(deepcopy(j))
return effective_list
class HostPartition(object):
def __init__(self, api_version, **kwargs):
self.api_version = api_version
if self.api_version == "v1.0":
self.name = kwargs.get('name', None)
self.device = kwargs.get('device', None)
self.part_uuid = kwargs.get('part_uuid', None)
self.size = kwargs.get('size', None)
self.mountpoint = kwargs.get('mountpoint', None)
self.fstype = kwargs.get('fstype', 'ext4')
self.mount_options = kwargs.get('mount_options', 'defaults')
self.fs_uuid = kwargs.get('fs_uuid', None)
self.fs_label = kwargs.get('fs_label', None)
else:
raise ValueError('Unknown API version of object')
# The device attribute may be hardware alias that translates to a
# physical device address. If the device attribute does not match an
# alias, we assume it directly identifies a OS device name. When the
# apply_hardware_profile method is called on the parent Node of this
# device, the selector will be decided and applied
def set_selector(self, sel_type, address='', dev_type=''):
selector = {}
selector['type'] = sel_type
selector['address'] = address
selector['device_type'] = dev_type
self.selector = selector
def get_selector(self):
return self.selector
"""
Merge two lists of HostPartition models with child_list taking
priority when conflicts. If a member of child_list has a name
beginning with '!' it indicates that HostPartition should be
removed from the merged list
"""
@staticmethod
def merge_lists(child_list, parent_list):
if len(child_list) == 0:
return deepcopy(parent_list)
effective_list = []
if len(parent_list) == 0:
for i in child_list:
if i.name.startswith('!'):
continue
else:
effective_list.append(deepcopy(i))
inherit_field_list = ["device", "part_uuid", "size",
"mountpoint", "fstype", "mount_options",
"fs_uuid", "fs_label"]
parent_partitions = []
for i in parent_list:
parent_name = i.name
parent_partitions.append(parent_name)
add = True
for j in child_list:
if j.name == ("!" + parent_name):
add = False
break
elif j.name == parent_name:
p = HostPartition(j.api_version)
p.name = j.name
for f in inherit_field_list:
setattr(p, Utils.apply_field_inheritance(getattr(j, f),
getattr(i, f))
)
add = False
effective_list.append(p)
if add:
effective_list.append(deepcopy(i))
for j in child_list:
if j.name not in parent_list:
effective_list.append(deepcopy(j))
return effective_list

View File

@ -1,95 +0,0 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Models for helm_drydock
#
import logging
from copy import deepcopy
from helm_drydock.orchestrator.enum import SiteStatus
from helm_drydock.orchestrator.enum import NodeStatus
class HardwareProfile(object):
def __init__(self, **kwargs):
self.log = logging.Logger('model')
self.api_version = kwargs.get('apiVersion', '')
if self.api_version == "v1.0":
metadata = kwargs.get('metadata', {})
spec = kwargs.get('spec', {})
# Need to add validation logic, we'll assume the input is
# valid for now
self.name = metadata.get('name', '')
self.site = metadata.get('region', '')
self.vendor = spec.get('vendor', None)
self.generation = spec.get('generation', None)
self.hw_version = spec.get('hw_version', None)
self.bios_version = spec.get('bios_version', None)
self.boot_mode = spec.get('boot_mode', None)
self.bootstrap_protocol = spec.get('bootstrap_protocol', None)
self.pxe_interface = spec.get('pxe_interface', None)
self.devices = []
device_aliases = spec.get('device_aliases', {})
pci_devices = device_aliases.get('pci', [])
scsi_devices = device_aliases.get('scsi', [])
for d in pci_devices:
d['bus_type'] = 'pci'
self.devices.append(
HardwareDeviceAlias(self.api_version, **d))
for d in scsi_devices:
d['bus_type'] = 'scsi'
self.devices.append(
HardwareDeviceAlias(self.api_version, **d))
else:
self.log.error("Unknown API version %s of %s" %
(self.api_version, self.__class__))
raise ValueError('Unknown API version of object')
return
def resolve_alias(self, alias_type, alias):
selector = {}
for d in self.devices:
if d.alias == alias and d.bus_type == alias_type:
selector['address'] = d.address
selector['device_type'] = d.type
return selector
return None
class HardwareDeviceAlias(object):
def __init__(self, api_version, **kwargs):
self.log = logging.Logger('model')
self.api_version = api_version
if self.api_version == "v1.0":
self.bus_type = kwargs.get('bus_type', None)
self.address = kwargs.get('address', None)
self.alias = kwargs.get('alias', None)
self.type = kwargs.get('type', None)
else:
self.log.error("Unknown API version %s of %s" %
(self.api_version, self.__class__))
raise ValueError('Unknown API version of object')

View File

@ -1,133 +0,0 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Models for helm_drydock
#
import logging
from copy import deepcopy
from helm_drydock.orchestrator.enum import SiteStatus
from helm_drydock.orchestrator.enum import NodeStatus
class NetworkLink(object):
def __init__(self, **kwargs):
self.log = logging.Logger('model')
self.api_version = kwargs.get('apiVersion', '')
if self.api_version == "v1.0":
metadata = kwargs.get('metadata', {})
spec = kwargs.get('spec', {})
self.name = metadata.get('name', '')
self.site = metadata.get('region', '')
bonding = spec.get('bonding', {})
self.bonding_mode = bonding.get('mode', 'none')
# How should we define defaults for CIs not in the input?
if self.bonding_mode == '802.3ad':
self.bonding_xmit_hash = bonding.get('hash', 'layer3+4')
self.bonding_peer_rate = bonding.get('peer_rate', 'fast')
self.bonding_mon_rate = bonding.get('mon_rate', '100')
self.bonding_up_delay = bonding.get('up_delay', '200')
self.bonding_down_delay = bonding.get('down_delay', '200')
self.mtu = spec.get('mtu', 1500)
self.linkspeed = spec.get('linkspeed', 'auto')
trunking = spec.get('trunking', {})
self.trunk_mode = trunking.get('mode', 'none')
self.native_network = spec.get('default_network', '')
else:
self.log.error("Unknown API version %s of %s" %
(self.api_version, self.__class__))
raise ValueError('Unknown API version of object')
class Network(object):
def __init__(self, **kwargs):
self.log = logging.Logger('model')
self.api_version = kwargs.get('apiVersion', '')
if self.api_version == "v1.0":
metadata = kwargs.get('metadata', {})
spec = kwargs.get('spec', {})
self.name = metadata.get('name', '')
self.site = metadata.get('region', '')
self.cidr = spec.get('cidr', None)
self.allocation_strategy = spec.get('allocation', 'static')
self.vlan_id = spec.get('vlan_id', 1)
self.mtu = spec.get('mtu', 0)
dns = spec.get('dns', {})
self.dns_domain = dns.get('domain', 'local')
self.dns_servers = dns.get('servers', None)
ranges = spec.get('ranges', [])
self.ranges = []
for r in ranges:
self.ranges.append(NetworkAddressRange(self.api_version, **r))
routes = spec.get('routes', [])
self.routes = []
for r in routes:
self.routes.append(NetworkRoute(self.api_version, **r))
else:
self.log.error("Unknown API version %s of %s" %
(self.api_version, self.__class__))
raise ValueError('Unknown API version of object')
class NetworkAddressRange(object):
def __init__(self, api_version, **kwargs):
self.log = logging.Logger('model')
self.api_version = api_version
if self.api_version == "v1.0":
self.type = kwargs.get('type', None)
self.start = kwargs.get('start', None)
self.end = kwargs.get('end', None)
else:
self.log.error("Unknown API version %s of %s" %
(self.api_version, self.__class__))
raise ValueError('Unknown API version of object')
class NetworkRoute(object):
def __init__(self, api_version, **kwargs):
self.log = logging.Logger('model')
self.api_version = api_version
if self.api_version == "v1.0":
self.type = kwargs.get('subnet', None)
self.start = kwargs.get('gateway', None)
self.end = kwargs.get('metric', 100)
else:
self.log.error("Unknown API version %s of %s" %
(self.api_version, self.__class__))
raise ValueError('Unknown API version of object')

View File

@ -1,154 +0,0 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Models for helm_drydock
#
import logging
from copy import deepcopy
from helm_drydock.orchestrator.enum import SiteStatus
from helm_drydock.orchestrator.enum import NodeStatus
from helm_drydock.model.hostprofile import HostProfile
from helm_drydock.model import Utils
class BaremetalNode(HostProfile):
# A BaremetalNode is really nothing more than a physical
# instantiation of a HostProfile, so they both represent
# the same set of CIs
def __init__(self, **kwargs):
super(BaremetalNode, self).__init__(**kwargs)
if self.api_version == "v1.0":
self.addressing = []
spec = kwargs.get('spec', {})
addresses = spec.get('addressing', [])
if len(addresses) == 0:
raise ValueError('BaremetalNode needs at least' \
' 1 assigned address')
for a in addresses:
assignment = {}
address = a.get('address', '')
if address == 'dhcp':
assignment['type'] = 'dhcp'
assignment['address'] = None
assignment['network'] = a.get('network')
self.addressing.append(assignment)
elif address != '':
assignment['type'] = 'static'
assignment['address'] = a.get('address')
assignment['network'] = a.get('network')
self.addressing.append(assignment)
else:
self.log.error("Invalid address assignment %s on Node %s"
% (address, self.name))
self.build = kwargs.get('build', {})
def start_build(self):
if self.build.get('status','') == '':
self.build['status'] = NodeStatus.Unknown
def apply_host_profile(self, site):
return self.apply_inheritance(site)
# Translate device alises to physical selectors and copy
# other hardware attributes into this object
def apply_hardware_profile(self, site):
self_copy = deepcopy(self)
if self.hardware_profile is None:
raise ValueError("Hardware profile not set")
hw_profile = site.get_hardware_profile(self.hardware_profile)
for i in self_copy.interfaces:
for s in i.hardware_slaves:
selector = hw_profile.resolve_alias("pci", s)
if selector is None:
i.add_selector("name", address=p.device)
else:
i.add_selector("address", address=selector['address'],
dev_type=selector['device_type'])
for p in self_copy.partitions:
selector = hw_profile.resolve_alias("scsi", p.device)
if selector is None:
p.set_selector("name", address=p.device)
else:
p.set_selector("address", address=selector['address'],
dev_type=selector['device_type'])
hardware = {"vendor": getattr(hw_profile, 'vendor', None),
"generation": getattr(hw_profile, 'generation', None),
"hw_version": getattr(hw_profile, 'hw_version', None),
"bios_version": getattr(hw_profile, 'bios_version', None),
"boot_mode": getattr(hw_profile, 'boot_mode', None),
"bootstrap_protocol": getattr(hw_profile,
'bootstrap_protocol',
None),
"pxe_interface": getattr(hw_profile, 'pxe_interface', None)
}
self_copy.hardware = hardware
return self_copy
def apply_network_connections(self, site):
self_copy = deepcopy(self)
for n in site.network_links:
for i in self_copy.interfaces:
i.apply_link_config(n)
for n in site.networks:
for i in self_copy.interfaces:
i.apply_network_config(n)
for a in self_copy.addressing:
for i in self_copy.interfaces:
i.set_network_address(a.get('network'), a.get('address'))
return self_copy
def get_interface(self, iface_name):
for i in self.interfaces:
if i.device_name == iface_name:
return i
return None
def get_status(self):
return self.build['status']
def set_status(self, status):
if isinstance(status, NodeStatus):
self.build['status'] = status
def get_last_build_action(self):
return self.build.get('last_action', None)
def set_last_build_action(self, action, result, detail=None):
last_action = self.build.get('last_action', None)
if last_action is None:
self.build['last_action'] = {}
last_action = self.build['last_action']
last_action['action'] = action
last_action['result'] = result
if detail is not None:
last_action['detail'] = detail

View File

@ -1,122 +0,0 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Models for helm_drydock
#
import logging
from copy import deepcopy
from helm_drydock.orchestrator.enum import SiteStatus
from helm_drydock.orchestrator.enum import NodeStatus
class Site(object):
def __init__(self, **kwargs):
self.log = logging.Logger('model')
if kwargs is None:
raise ValueError("Empty arguments")
self.api_version = kwargs.get('apiVersion', '')
self.build = kwargs.get('build', {})
if self.api_version == "v1.0":
metadata = kwargs.get('metadata', {})
# Need to add validation logic, we'll assume the input is
# valid for now
self.name = metadata.get('name', '')
spec = kwargs.get('spec', {})
self.tag_definitions = []
tag_defs = spec.get('tag_definitions', [])
for t in tag_defs:
self.tag_definitions.append(
NodeTagDefinition(self.api_version, **t))
self.networks = []
self.network_links = []
self.host_profiles = []
self.hardware_profiles = []
self.baremetal_nodes = []
else:
self.log.error("Unknown API version %s of %s" %
(self.api_version, self.__class__))
raise ValueError('Unknown API version of object')
def start_build(self):
if self.build.get('status', '') == '':
self.build['status'] = SiteStatus.Unknown
def get_network(self, network_name):
for n in self.networks:
if n.name == network_name:
return n
return None
def get_network_link(self, link_name):
for l in self.network_links:
if l.name == link_name:
return l
return None
def get_host_profile(self, profile_name):
for p in self.host_profiles:
if p.name == profile_name:
return p
return None
def get_hardware_profile(self, profile_name):
for p in self.hardware_profiles:
if p.name == profile_name:
return p
return None
def get_baremetal_node(self, node_name):
for n in self.baremetal_nodes:
if n.name == node_name:
return n
return None
def set_status(self, status):
if isinstance(status, SiteStatus):
self.build['status'] = status
class NodeTagDefinition(object):
def __init__(self, api_version, **kwargs):
self.api_version = api_version
if self.api_version == "v1.0":
self.tag = kwargs.get('tag', '')
self.definition_type = kwargs.get('definition_type', '')
self.definition = kwargs.get('definition', '')
if self.definition_type not in ['lshw_xpath']:
raise ValueError('Unknown definition type in ' \
'NodeTagDefinition: %s' % (self.definition_type))
else:
self.log.error("Unknown API version %s of %s" %
(self.api_version, self.__class__))
raise ValueError('Unknown API version of object')

View File

@ -18,6 +18,17 @@ import logging
from copy import deepcopy
def register_all():
# NOTE(sh8121att) - Import all versioned objects so
# they are available via RPC. Any new object definitions
# need to be added here.
__import__('helm_drydock.objects.network')
__import__('helm_drydock.objects.node')
__import__('helm_drydock.objects.hostprofile')
__import__('helm_drydock.objects.hwprofile')
__import__('helm_drydock.objects.site')
# Utility class for calculating inheritance
class Utils(object):
@ -74,18 +85,18 @@ class Utils(object):
@staticmethod
def merge_lists(child_list, parent_list):
if type(child_list) is not list or type(parent_list) is not list:
raise ValueError("One parameter is not a list")
effective_list = []
# Probably should handle non-string values
effective_list.extend(
filter(lambda x: not x.startswith("!"), child_list))
try:
# Probably should handle non-string values
effective_list.extend(
filter(lambda x: not x.startswith("!"), child_list))
effective_list.extend(
filter(lambda x: ("!" + x) not in child_list,
filter(lambda x: x not in effective_list, parent_list)))
effective_list.extend(
filter(lambda x: ("!" + x) not in child_list,
filter(lambda x: x not in effective_list, parent_list)))
except TypeError:
raise TypeError("Error iterating list argument")
return effective_list
@ -107,21 +118,21 @@ class Utils(object):
@staticmethod
def merge_dicts(child_dict, parent_dict):
if type(child_dict) is not dict or type(parent_dict) is not dict:
raise ValueError("One parameter is not a dict")
effective_dict = {}
# Probably should handle non-string keys
use_keys = filter(lambda x: ("!" + x) not in child_dict.keys(),
parent_dict)
try:
# Probably should handle non-string keys
use_keys = filter(lambda x: ("!" + x) not in child_dict.keys(),
parent_dict)
for k in use_keys:
effective_dict[k] = deepcopy(parent_dict[k])
for k in use_keys:
effective_dict[k] = deepcopy(parent_dict[k])
use_keys = filter(lambda x: not x.startswith("!"), child_dict)
for k in use_keys:
effective_dict[k] = deepcopy(child_dict[k])
use_keys = filter(lambda x: not x.startswith("!"), child_dict)
for k in use_keys:
effective_dict[k] = deepcopy(child_dict[k])
except TypeError:
raise TypeError("Error iterating dict argument")
return effective_dict

View File

@ -0,0 +1,75 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_versionedobjects import base
from oslo_versionedobjects import fields as obj_fields
import helm_drydock.objects as objects
class DrydockObjectRegistry(base.VersionedObjectRegistry):
# Steal this from Cinder to bring all registered objects
# into the helm_drydock.objects namespace
def registration_hook(self, cls, index):
setattr(objects, cls.obj_name(), cls)
class DrydockObject(base.VersionedObject):
VERSION = '1.0'
OBJ_PROJECT_NAMESPACE = 'helm_drydock.objects'
# Return None for undefined attributes
def obj_load_attr(self, attrname):
if attrname in self.fields.keys():
setattr(self, attrname, None)
else:
raise ValueError("Unknown field %s" % (attrname))
class DrydockPersistentObject(base.VersionedObject):
fields = {
'created_at': obj_fields.DateTimeField(nullable=False),
'created_by': obj_fields.StringField(nullable=False),
'updated_at': obj_fields.DateTimeField(nullable=True),
'updated_by': obj_fields.StringField(nullable=True),
}
class DrydockObjectListBase(base.ObjectListBase):
def __init__(self, **kwargs):
super(DrydockObjectListBase, self).__init__(**kwargs)
def append(self, obj):
self.objects.append(obj)
def replace_by_id(self, obj):
i = 0;
while i < len(self.objects):
if self.objects[i].get_id() == obj.get_id():
objects[i] = obj
return True
i = i + 1
return False
@classmethod
def from_basic_list(cls, obj_list):
model_list = cls()
for o in obj_list:
model_list.append(o)
return model_list

View File

@ -0,0 +1,173 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_versionedobjects import fields
class BaseDrydockEnum(fields.Enum):
def __init__(self):
super(BaseDrydockEnum, self).__init__(valid_values=self.__class__.ALL)
class OrchestratorAction(BaseDrydockEnum):
# Orchestrator actions
Noop = 'noop'
ValidateDesign = 'validate_design'
VerifySite = 'verify_site'
PrepareSite = 'prepare_site'
VerifyNode = 'verify_node'
PrepareNode = 'prepare_node'
DeployNode = 'deploy_node'
DestroyNode = 'destroy_node'
# OOB driver actions
ValidateOobServices = 'validate_oob_services'
ConfigNodePxe = 'config_node_pxe'
SetNodeBoot = 'set_node_boot'
PowerOffNode = 'power_off_node'
PowerOnNode = 'power_on_node'
PowerCycleNode = 'power_cycle_node'
InterrogateOob = 'interrogate_oob'
# Node driver actions
ValidateNodeServices = 'validate_node_services'
CreateNetworkTemplate = 'create_network_template'
CreateStorageTemplate = 'create_storage_template'
CreateBootMedia = 'create_boot_media'
PrepareHardwareConfig = 'prepare_hardware_config'
ConfigureHardware = 'configure_hardware'
InterrogateNode = 'interrogate_node'
ApplyNodeNetworking = 'apply_node_networking'
ApplyNodeStorage = 'apply_node_storage'
ApplyNodePlatform = 'apply_node_platform'
DeployNode = 'deploy_node'
DestroyNode = 'destroy_node'
# Network driver actions
ValidateNetworkServices = 'validate_network_services'
InterrogatePort = 'interrogate_port'
ConfigurePortProvisioning = 'config_port_provisioning'
ConfigurePortProduction = 'config_port_production'
ALL = (Noop, ValidateDesign, VerifySite, PrepareSite, VerifyNode,
PrepareNode, DeployNode, DestroyNode, ConfigNodePxe,
SetNodeBoot, PowerOffNode, PowerOnNode, PowerCycleNode,
InterrogateOob, CreateNetworkTemplate, CreateStorageTemplate,
CreateBootMedia, PrepareHardwareConfig, ConfigureHardware,
InterrogateNode, ApplyNodeNetworking, ApplyNodeStorage,
ApplyNodePlatform, DeployNode, DestroyNode)
class OrchestratorActionField(fields.BaseEnumField):
AUTO_TYPE = OrchestratorAction()
class ActionResult(BaseDrydockEnum):
Incomplete = 'incomplete'
Success = 'success'
PartialSuccess = 'partial_success'
Failure = 'failure'
DependentFailure = 'dependent_failure'
ALL = (Incomplete, Success, PartialSuccess, Failure, DependentFailure)
class ActionResultField(fields.BaseEnumField):
AUTO_TYPE = ActionResult()
class TaskStatus(BaseDrydockEnum):
Created = 'created'
Waiting = 'waiting'
Running = 'running'
Stopping = 'stopping'
Terminated = 'terminated'
Errored = 'errored'
Complete = 'complete'
Stopped = 'stopped'
ALL = (Created, Waiting, Running, Stopping, Terminated,
Errored, Complete, Stopped)
class TaskStatusField(fields.BaseEnumField):
AUTO_TYPE = TaskStatus()
class ModelSource(BaseDrydockEnum):
Designed = 'designed'
Compiled = 'compiled'
Build = 'build'
ALL = (Designed, Compiled, Build)
class ModelSourceField(fields.BaseEnumField):
AUTO_TYPE = ModelSource()
class SiteStatus(BaseDrydockEnum):
Unknown = 'unknown'
DesignStarted = 'design_started'
DesignAvailable = 'design_available'
DesignValidated = 'design_validated'
Deploying = 'deploying'
Deployed = 'deployed'
DesignUpdated = 'design_updated'
ALL = (Unknown, Deploying, Deployed)
class SiteStatusField(fields.BaseEnumField):
AUTO_TYPE = SiteStatus()
class NodeStatus(BaseDrydockEnum):
Unknown = 'unknown'
Designed = 'designed'
Compiled = 'compiled' # Node attributes represent effective config after inheritance/merge
Present = 'present' # IPMI access verified
BasicVerifying = 'basic_verifying' # Base node verification in process
FailedBasicVerify = 'failed_basic_verify' # Base node verification failed
BasicVerified = 'basic_verified' # Base node verification successful
Preparing = 'preparing' # Node preparation in progress
FailedPrepare = 'failed_prepare' # Node preparation failed
Prepared = 'prepared' # Node preparation complete
FullyVerifying = 'fully_verifying' # Node full verification in progress
FailedFullVerify = 'failed_full_verify' # Node full verification failed
FullyVerified = 'fully_verified' # Deeper verification successful
Deploying = 'deploy' # Node deployment in progress
FailedDeploy = 'failed_deploy' # Node deployment failed
Deployed = 'deployed' # Node deployed successfully
Bootstrapping = 'bootstrapping' # Node bootstrapping
FailedBootstrap = 'failed_bootstrap' # Node bootstrapping failed
Bootstrapped = 'bootstrapped' # Node fully bootstrapped
Complete = 'complete' # Node is complete
ALL = (Unknown, Designed, Compiled, Present, BasicVerifying, FailedBasicVerify,
BasicVerified, Preparing, FailedPrepare, Prepared, FullyVerifying,
FailedFullVerify, FullyVerified, Deploying, FailedDeploy, Deployed,
Bootstrapping, FailedBootstrap, Bootstrapped, Complete)
class NodeStatusField(fields.BaseEnumField):
AUTO_TYPE = NodeStatus()
class NetworkLinkBondingMode(BaseDrydockEnum):
Disabled = 'disabled'
LACP = '802.3ad'
RoundRobin = 'balanced-rr'
Standby = 'active-backup'
ALL = (Disabled, LACP, RoundRobin, Standby)
class NetworkLinkBondingModeField(fields.BaseEnumField):
AUTO_TYPE = NetworkLinkBondingMode()
class NetworkLinkTrunkingMode(BaseDrydockEnum):
Disabled = 'disabled'
Tagged = '802.1q'
ALL = (Disabled, Tagged)
class NetworkLinkTrunkingModeField(fields.BaseEnumField):
AUTO_TYPE = NetworkLinkTrunkingMode()

View File

@ -0,0 +1,390 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from copy import deepcopy
import oslo_versionedobjects.fields as obj_fields
import helm_drydock.objects as objects
import helm_drydock.objects.base as base
import helm_drydock.objects.fields as hd_fields
@base.DrydockObjectRegistry.register
class HostProfile(base.DrydockPersistentObject, base.DrydockObject):
VERSION = '1.0'
fields = {
'name': obj_fields.StringField(nullable=False),
'site': obj_fields.StringField(nullable=False),
'source': hd_fields.ModelSourceField(nullable=False),
'parent_profile': obj_fields.StringField(nullable=True),
'hardware_profile': obj_fields.StringField(nullable=True),
'oob_type': obj_fields.StringField(nullable=True),
'oob_network': obj_fields.StringField(nullable=True),
'oob_account': obj_fields.StringField(nullable=True),
'oob_credential': obj_fields.StringField(nullable=True),
'storage_layout': obj_fields.StringField(nullable=True),
'bootdisk_device': obj_fields.StringField(nullable=True),
# Consider a custom field for storage size
'bootdisk_root_size': obj_fields.StringField(nullable=True),
'bootdisk_boot_size': obj_fields.StringField(nullable=True),
'partitions': obj_fields.ObjectField('HostPartitionList',
nullable=True),
'interfaces': obj_fields.ObjectField('HostInterfaceList',
nullable=True),
'tags': obj_fields.ListOfStringsField(nullable=True),
'owner_data': obj_fields.DictOfStringsField(nullable=True),
'rack': obj_fields.StringField(nullable=True),
'base_os': obj_fields.StringField(nullable=True),
'kernel': obj_fields.StringField(nullable=True),
'kernel_params': obj_fields.StringField(nullable=True),
}
def __init__(self, **kwargs):
super(HostProfile, self).__init__(**kwargs)
def get_rack(self):
return self.rack
# HostProfile is keyed by name
def get_id(self):
return self.get_name()
def get_name(self):
return self.name
def has_tag(self, tag):
if tag in self.tags:
return True
return False
def apply_inheritance(self, site_design):
# No parent to inherit from, just apply design values
# and return
if self.parent_profile is None:
self.source = hd_fields.ModelSource.Compiled
return
parent = site_design.get_host_profile(self.parent_profile)
if parent is None:
raise NameError("Cannot find parent profile %s for %s"
% (self.design['parent_profile'], self.name))
parent.apply_inheritance(site_design)
# First compute inheritance for simple fields
inheritable_field_list = [
'hardware_profile', 'oob_type', 'oob_network',
'oob_credential', 'oob_account', 'storage_layout',
'bootdisk_device', 'bootdisk_root_size', 'bootdisk_boot_size',
'rack', 'base_os', 'kernel', 'kernel_params']
# Create applied data from self design values and parent
# applied values
for f in inheritable_field_list:
setattr(self, f, objects.Utils.apply_field_inheritance(
getattr(self, f, None),
getattr(parent, f, None)))
# Now compute inheritance for complex types
self.tags = objects.Utils.merge_lists(self.tags, parent.tags)
self.owner_data = objects.Utils.merge_dicts(self.owner_data, parent.owner_data)
self.interfaces = HostInterfaceList.from_basic_list(
HostInterface.merge_lists(self.interfaces, parent.interfaces))
self.partitions = HostPartitionList.from_basic_list(
HostPartition.merge_lists(self.partitions, parent.partitions))
self.source = hd_fields.ModelSource.Compiled
return
@base.DrydockObjectRegistry.register
class HostProfileList(base.DrydockObjectListBase, base.DrydockObject):
VERSION = '1.0'
fields = {
'objects': obj_fields.ListOfObjectsField('HostProfile')
}
@base.DrydockObjectRegistry.register
class HostInterface(base.DrydockObject):
VERSION = '1.0'
fields = {
'device_name': obj_fields.StringField(),
'primary_network': obj_fields.BooleanField(nullable=False, default=False),
'source': hd_fields.ModelSourceField(),
'network_link': obj_fields.StringField(nullable=True),
'hardware_slaves': obj_fields.ListOfStringsField(nullable=True),
'slave_selectors': obj_fields.ObjectField('HardwareDeviceSelectorList',
nullable=True),
'networks': obj_fields.ListOfStringsField(nullable=True),
}
def __init__(self, **kwargs):
super(HostInterface, self).__init__(**kwargs)
# HostInterface is keyed by device_name
def get_id(self):
return self.get_name()
def get_name(self):
return self.device_name
def get_hw_slaves(self):
return self.hardware_slaves
def get_slave_selectors(self):
return self.slave_selectors
# Return number of slaves for this interface
def get_slave_count(self):
return len(self.hardware_slaves)
# The device attribute may be hardware alias that translates to a
# physical device address. If the device attribute does not match an
# alias, we assume it directly identifies a OS device name. When the
# apply_hardware_profile method is called on the parent Node of this
# device, the selector will be decided and applied
def add_selector(self, slave_selector):
if self.slave_selectors is None:
self.slave_selectors = objects.HardwareDeviceSelectorList()
self.slave_selectors.append(slave_selector)
"""
Merge two lists of HostInterface models with child_list taking
priority when conflicts. If a member of child_list has a device_name
beginning with '!' it indicates that HostInterface should be
removed from the merged list
"""
@staticmethod
def merge_lists(child_list, parent_list):
effective_list = []
if len(child_list) == 0 and len(parent_list) > 0:
for p in parent_list:
pp = deepcopy(p)
pp.source = hd_fields.ModelSource.Compiled
effective_list.append(pp)
elif len(parent_list) == 0 and len(child_list) > 0:
for i in child_list:
if i.get_name().startswith('!'):
continue
else:
ii = deepcopy(i)
ii.source = hd_fields.ModelSource.Compiled
effective_list.append(ii)
elif len(parent_list) > 0 and len(child_list) > 0:
parent_interfaces = []
for i in parent_list:
parent_name = i.get_name()
parent_interfaces.append(parent_name)
add = True
for j in child_list:
if j.get_name() == ("!" + parent_name):
add = False
break
elif j.get_name() == parent_name:
m = objects.HostInterface()
m.device_name = j.get_name()
m.primary_network = \
objects.Utils.apply_field_inheritance(
getattr(j, 'primary_network', None),
getattr(i, 'primary_network', None))
m.network_link = \
objects.Utils.apply_field_inheritance(
getattr(j, 'network_link', None),
getattr(i, 'network_link', None))
s = [x for x
in getattr(i, 'hardware_slaves', [])
if ("!" + x) not in getattr(j, 'hardware_slaves', [])]
s.extend(
[x for x
in getattr(j, 'hardware_slaves', [])
if not x.startswith("!")])
m.hardware_slaves = s
n = [x for x
in getattr(i, 'networks',[])
if ("!" + x) not in getattr(j, 'networks', [])]
n.extend(
[x for x
in getattr(j, 'networks', [])
if not x.startswith("!")])
m.networks = n
m.source = hd_fields.ModelSource.Compiled
effective_list.append(m)
add = False
break
if add:
ii = deepcopy(i)
ii.source = hd_fields.ModelSource.Compiled
effective_list.append(ii)
for j in child_list:
if (j.device_name not in parent_interfaces
and not j.get_name().startswith("!")):
jj = deepcopy(j)
jj.source = hd_fields.ModelSource.Compiled
effective_list.append(jj)
return effective_list
@base.DrydockObjectRegistry.register
class HostInterfaceList(base.DrydockObjectListBase, base.DrydockObject):
VERSION = '1.0'
fields = {
'objects': obj_fields.ListOfObjectsField('HostInterface')
}
@base.DrydockObjectRegistry.register
class HostPartition(base.DrydockObject):
VERSION = '1.0'
fields = {
'name': obj_fields.StringField(),
'source': hd_fields.ModelSourceField(),
'device': obj_fields.StringField(nullable=True),
'part_uuid': obj_fields.UUIDField(nullable=True),
'size': obj_fields.StringField(nullable=True),
'mountpoint': obj_fields.StringField(nullable=True),
'fstype': obj_fields.StringField(nullable=True, default='ext4'),
'mount_options': obj_fields.StringField(nullable=True, default='defaults'),
'fs_uuid': obj_fields.UUIDField(nullable=True),
'fs_label': obj_fields.StringField(nullable=True),
'selector': obj_fields.ObjectField('HardwareDeviceSelector',
nullable=True),
}
def __init__(self, **kwargs):
super(HostPartition, self).__init__(**kwargs)
def get_device(self):
return self.device
# HostPartition keyed by name
def get_id(self):
return self.get_name()
def get_name(self):
return self.name
# The device attribute may be hardware alias that translates to a
# physical device address. If the device attribute does not match an
# alias, we assume it directly identifies a OS device name. When the
# apply_hardware_profile method is called on the parent Node of this
# device, the selector will be decided and applied
def set_selector(self, selector):
self.selector = selector
def get_selector(self):
return self.selector
"""
Merge two lists of HostPartition models with child_list taking
priority when conflicts. If a member of child_list has a name
beginning with '!' it indicates that HostPartition should be
removed from the merged list
"""
@staticmethod
def merge_lists(child_list, parent_list):
effective_list = []
if len(child_list) == 0 and len(parent_list) > 0:
for p in parent_list:
pp = deepcopy(p)
pp.source = hd_fields.ModelSource.Compiled
effective_list.append(pp)
elif len(parent_list) == 0 and len(child_list) > 0:
for i in child_list:
if i.get_name().startswith('!'):
continue
else:
ii = deepcopy(i)
ii.source = hd_fields.ModelSource.Compiled
effective_list.append(ii)
elif len(parent_list) > 0 and len(child_list) > 0:
inherit_field_list = ["device", "part_uuid", "size",
"mountpoint", "fstype", "mount_options",
"fs_uuid", "fs_label"]
parent_partitions = []
for i in parent_list:
parent_name = i.get_name()
parent_partitions.append(parent_name)
add = True
for j in child_list:
if j.get_name() == ("!" + parent_name):
add = False
break
elif j.get_name() == parent_name:
p = objects.HostPartition()
p.name = j.get_name()
for f in inherit_field_list:
setattr(p, f,
objects.Utils.apply_field_inheritance(getattr(j, f, None),
getattr(i, f, None)))
add = False
p.source = hd_fields.ModelSource.Compiled
effective_list.append(p)
if add:
ii = deepcopy(i)
ii.source = hd_fields.ModelSource.Compiled
effective_list.append(ii)
for j in child_list:
if (j.get_name() not in parent_list and
not j.get_name().startswith("!")):
jj = deepcopy(j)
jj.source = hd_fields.ModelSource.Compiled
effective_list.append(jj)
return effective_list
@base.DrydockObjectRegistry.register
class HostPartitionList(base.DrydockObjectListBase, base.DrydockObject):
VERSION = '1.0'
fields = {
'objects': obj_fields.ListOfObjectsField('HostPartition')
}

View File

@ -0,0 +1,125 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from copy import deepcopy
from oslo_versionedobjects import fields as ovo_fields
import helm_drydock.objects as objects
import helm_drydock.objects.base as base
import helm_drydock.objects.fields as hd_fields
@base.DrydockObjectRegistry.register
class HardwareProfile(base.DrydockPersistentObject, base.DrydockObject):
VERSION = '1.0'
fields = {
'name': ovo_fields.StringField(),
'source': hd_fields.ModelSourceField(),
'site': ovo_fields.StringField(),
'vendor': ovo_fields.StringField(nullable=True),
'generation': ovo_fields.StringField(nullable=True),
'hw_version': ovo_fields.StringField(nullable=True),
'bios_version': ovo_fields.StringField(nullable=True),
'boot_mode': ovo_fields.StringField(nullable=True),
'bootstrap_protocol': ovo_fields.StringField(nullable=True),
'pxe_interface': ovo_fields.StringField(nullable=True),
'devices': ovo_fields.ObjectField('HardwareDeviceAliasList',
nullable=True),
}
def __init__(self, **kwargs):
super(HardwareProfile, self).__init__(**kwargs)
return
# HardwareProfile keyed on name
def get_id(self):
return self.get_name()
def get_name(self):
return self.name
def resolve_alias(self, alias_type, alias):
for d in self.devices:
if d.alias == alias and d.bus_type == alias_type:
selector = objects.HardwareDeviceSelector()
selector.selector_type = "address"
selector.address = d.address
selector.device_type = d.dev_type
return selector
return None
@base.DrydockObjectRegistry.register
class HardwareProfileList(base.DrydockObjectListBase, base.DrydockObject):
VERSION = '1.0'
fields = {
'objects': ovo_fields.ListOfObjectsField('HardwareProfile')
}
@base.DrydockObjectRegistry.register
class HardwareDeviceAlias(base.DrydockObject):
VERSION = '1.0'
fields = {
'alias': ovo_fields.StringField(),
'source': hd_fields.ModelSourceField(),
'address': ovo_fields.StringField(),
'bus_type': ovo_fields.StringField(),
'dev_type': ovo_fields.StringField(nullable=True),
}
def __init__(self, **kwargs):
super(HardwareDeviceAlias, self).__init__(**kwargs)
# HardwareDeviceAlias keyed on alias
def get_id(self):
return self.alias
@base.DrydockObjectRegistry.register
class HardwareDeviceAliasList(base.DrydockObjectListBase, base.DrydockObject):
VERSION = '1.0'
fields = {
'objects': ovo_fields.ListOfObjectsField('HardwareDeviceAlias')
}
@base.DrydockObjectRegistry.register
class HardwareDeviceSelector(base.DrydockObject):
VERSION = '1.0'
fields = {
'selector_type': ovo_fields.StringField(),
'address': ovo_fields.StringField(),
'device_type': ovo_fields.StringField()
}
def __init__(self, **kwargs):
super(HardwareDeviceSelector, self).__init__(**kwargs)
@base.DrydockObjectRegistry.register
class HardwareDeviceSelectorList(base.DrydockObjectListBase, base.DrydockObject):
VERSION = '1.0'
fields = {
'objects': ovo_fields.ListOfObjectsField('HardwareDeviceSelector')
}

View File

@ -0,0 +1,119 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Models for helm_drydock
#
import logging
from copy import deepcopy
import oslo_versionedobjects.fields as ovo_fields
import helm_drydock.objects as objects
import helm_drydock.objects.base as base
import helm_drydock.objects.fields as hd_fields
@base.DrydockObjectRegistry.register
class NetworkLink(base.DrydockPersistentObject, base.DrydockObject):
VERSION = '1.0'
fields = {
'name': ovo_fields.StringField(),
'site': ovo_fields.StringField(),
'bonding_mode': hd_fields.NetworkLinkBondingModeField(
default=hd_fields.NetworkLinkBondingMode.Disabled),
'bonding_xmit_hash': ovo_fields.StringField(nullable=True, default='layer3+4'),
'bonding_peer_rate': ovo_fields.StringField(nullable=True, default='slow'),
'bonding_mon_rate': ovo_fields.IntegerField(nullable=True, default=100),
'bonding_up_delay': ovo_fields.IntegerField(nullable=True, default=200),
'bonding_down_delay': ovo_fields.IntegerField(nullable=True, default=200),
'mtu': ovo_fields.IntegerField(default=1500),
'linkspeed': ovo_fields.StringField(default='auto'),
'trunk_mode': hd_fields.NetworkLinkTrunkingModeField(
default=hd_fields.NetworkLinkTrunkingMode.Disabled),
'native_network': ovo_fields.StringField(nullable=True),
}
def __init__(self, **kwargs):
super(NetworkLink, self).__init__(**kwargs)
# NetworkLink keyed by name
def get_id(self):
return self.get_name()
def get_name(self):
return self.name
@base.DrydockObjectRegistry.register
class NetworkLinkList(base.DrydockObjectListBase, base.DrydockObject):
VERSION = '1.0'
fields = {
'objects': ovo_fields.ListOfObjectsField('NetworkLink'),
}
@base.DrydockObjectRegistry.register
class Network(base.DrydockPersistentObject, base.DrydockObject):
VERSION = '1.0'
fields = {
'name': ovo_fields.StringField(),
'site': ovo_fields.StringField(),
'cidr': ovo_fields.StringField(),
'allocation_strategy': ovo_fields.StringField(),
'vlan_id': ovo_fields.StringField(nullable=True),
'mtu': ovo_fields.IntegerField(nullable=True),
'dns_domain': ovo_fields.StringField(nullable=True),
'dns_servers': ovo_fields.StringField(nullable=True),
# Keys of ranges are 'type', 'start', 'end'
'ranges': ovo_fields.ListOfDictOfNullableStringsField(),
# Keys of routes are 'subnet', 'gateway', 'metric'
'routes': ovo_fields.ListOfDictOfNullableStringsField(),
}
def __init__(self, **kwargs):
super(Network, self).__init__(**kwargs)
# Network keyed on name
def get_id(self):
return self.get_name()
def get_name(self):
return self.name
def get_default_gateway(self):
for r in getattr(self,'routes', []):
if r.get('subnet', '') == '0.0.0.0/0':
return r.get('gateway', None)
return None
@base.DrydockObjectRegistry.register
class NetworkList(base.DrydockObjectListBase, base.DrydockObject):
VERSION = '1.0'
fields = {
'objects': ovo_fields.ListOfObjectsField('Network'),
}
def __init__(self, **kwargs):
super(NetworkList, self).__init__(**kwargs)

View File

@ -0,0 +1,134 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Models for helm_drydock
#
import logging
from copy import deepcopy
from oslo_versionedobjects import fields as ovo_fields
import helm_drydock.objects as objects
import helm_drydock.objects.hostprofile
import helm_drydock.objects.base as base
import helm_drydock.objects.fields as hd_fields
@base.DrydockObjectRegistry.register
class BaremetalNode(helm_drydock.objects.hostprofile.HostProfile):
VERSION = '1.0'
fields = {
'addressing': ovo_fields.ObjectField('IpAddressAssignmentList')
}
# A BaremetalNode is really nothing more than a physical
# instantiation of a HostProfile, so they both represent
# the same set of CIs
def __init__(self, **kwargs):
super(BaremetalNode, self).__init__(**kwargs)
# Compile the applied version of this model sourcing referenced
# data from the passed site design
def compile_applied_model(self, site_design):
self.apply_host_profile(site_design)
self.apply_hardware_profile(site_design)
self.source = hd_fields.ModelSource.Compiled
return
def apply_host_profile(self, site_design):
self.apply_inheritance(site_design)
return
# Translate device alises to physical selectors and copy
# other hardware attributes into this object
def apply_hardware_profile(self, site_design):
if self.hardware_profile is None:
raise ValueError("Hardware profile not set")
hw_profile = site_design.get_hardware_profile(self.hardware_profile)
for i in getattr(self, 'interfaces', []):
for s in i.get_hw_slaves():
selector = hw_profile.resolve_alias("pci", s)
if selector is None:
selector = objects.HardwareDeviceSelector()
selector.selector_type = 'name'
selector.address = s
i.add_selector(selector)
for p in getattr(self, 'partitions', []):
selector = hw_profile.resolve_alias("scsi", p.get_device())
if selector is None:
selector = objects.HardwareDeviceSelector()
selector.selector_type = 'name'
selector.address = p.get_device()
p.set_selector(selector)
return
def get_applied_interface(self, iface_name):
for i in getattr(self, 'interfaces', []):
if i.get_name() == iface_name:
return i
return None
def get_network_address(self, network_name):
for a in getattr(self, 'addressing', []):
if a.network == network_name:
return a.address
return None
@base.DrydockObjectRegistry.register
class BaremetalNodeList(base.DrydockObjectListBase, base.DrydockObject):
VERSION = '1.0'
fields = {
'objects': ovo_fields.ListOfObjectsField('BaremetalNode')
}
@base.DrydockObjectRegistry.register
class IpAddressAssignment(base.DrydockObject):
VERSION = '1.0'
fields = {
'type': ovo_fields.StringField(),
'address': ovo_fields.StringField(nullable=True),
'network': ovo_fields.StringField(),
}
def __init__(self, **kwargs):
super(IpAddressAssignment, self).__init__(**kwargs)
# IpAddressAssignment keyed by network
def get_id(self):
return self.network
@base.DrydockObjectRegistry.register
class IpAddressAssignmentList(base.DrydockObjectListBase, base.DrydockObject):
VERSION = '1.0'
fields = {
'objects': ovo_fields.ListOfObjectsField('IpAddressAssignment')
}

View File

@ -0,0 +1,35 @@
# Drydock Model #
Object models for the drydock design parts and subparts. We use oslo.versionedobjects as the supporting library for object management
to support RPC and versioned persistence.
## Features ##
### Inheritance ###
Drydock supports inheritance in the design data model.
Currently this only supports BaremetalNode inheriting from HostProfile and
HostProfile inheriting from HostProfile.
Inheritance rules:
1. A child overrides a parent for part and subpart attributes
2. For attributes that are lists, the parent list and child list
are merged.
3. A child can remove a list member by prefixing the value with '!'
4. For lists of subparts (i.e. HostInterface and HostPartition) if
there is a member in the parent list and child list with the same name
(as defined by the get_name() method), the child member inherits from
the parent member. The '!' prefix applies here for deleting a member
based on the name.
### Phased Data ###
The *source* of the data in a object instance can be one of three
types.
* Designed - This is data directly ingested by Drydock representing a design part (Site, HostProfile, etc...) supplied by an external source
* Compiled - This is designed data that has been processed through the Drydock
inheritance / merge system. It is the effective design that will be implemented.
* Build - This is the result of actual implementation. It should basically match the compiled view of the model, but might have some additional information only available after implementation.

View File

@ -0,0 +1,267 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Models for helm_drydock
#
from copy import deepcopy
import uuid
import oslo_versionedobjects.fields as ovo_fields
import helm_drydock.objects as objects
import helm_drydock.objects.base as base
import helm_drydock.objects.fields as hd_fields
@base.DrydockObjectRegistry.register
class Site(base.DrydockPersistentObject, base.DrydockObject):
VERSION = '1.0'
fields = {
'name': ovo_fields.StringField(),
'status': hd_fields.SiteStatusField(default=hd_fields.SiteStatus.Unknown),
'source': hd_fields.ModelSourceField(),
'tag_definitions': ovo_fields.ObjectField('NodeTagDefinitionList',
nullable=True),
'repositories': ovo_fields.ObjectField('RepositoryList', nullable=True),
}
def __init__(self, **kwargs):
super(Site, self).__init__(**kwargs)
def get_id(self):
return self.name
def get_name(self):
return self.name
def add_tag_definition(self, tag_definition):
self.tag_definitions.append(tag_definition)
@base.DrydockObjectRegistry.register
class NodeTagDefinition(base.DrydockObject):
VERSION = '1.0'
fields = {
'tag': ovo_fields.StringField(),
'type': ovo_fields.StringField(),
'definition': ovo_fields.StringField(),
'source': hd_fields.ModelSourceField(),
}
def __init__(self, **kwargs):
super(NodeTagDefinition, self).__init__(**kwargs)
# TagDefinition keyed by tag
def get_id(self):
return self.tag
@base.DrydockObjectRegistry.register
class NodeTagDefinitionList(base.DrydockObjectListBase, base.DrydockObject):
VERSION = '1.0'
fields = {
'objects': ovo_fields.ListOfObjectsField('NodeTagDefinition'),
}
# Need to determine how best to define a repository that can encompass
# all repositories needed
@base.DrydockObjectRegistry.register
class Repository(base.DrydockObject):
VERSION = '1.0'
fields = {
'name': ovo_fields.StringField(),
}
def __init__(self, **kwargs):
super(Repository, self).__init__(**kwargs)
# TagDefinition keyed by tag
def get_id(self):
return self.name
@base.DrydockObjectRegistry.register
class RepositoryList(base.DrydockObjectListBase, base.DrydockObject):
VERSION = '1.0'
fields = {
'objects': ovo_fields.ListOfObjectsField('Repository'),
}
@base.DrydockObjectRegistry.register
class SiteDesign(base.DrydockPersistentObject, base.DrydockObject):
VERSION = '1.0'
fields = {
'id': ovo_fields.UUIDField(),
# if null, indicates this is the site base design
'base_design_id': ovo_fields.UUIDField(nullable=True),
'source': hd_fields.ModelSourceField(),
'site': ovo_fields.ObjectField('Site', nullable=True),
'networks': ovo_fields.ObjectField('NetworkList', nullable=True),
'network_links': ovo_fields.ObjectField('NetworkLinkList', nullable=True),
'host_profiles': ovo_fields.ObjectField('HostProfileList', nullable=True),
'hardware_profiles': ovo_fields.ObjectField('HardwareProfileList', nullable=True),
'baremetal_nodes': ovo_fields.ObjectField('BaremetalNodeList', nullable=True),
}
def __init__(self, **kwargs):
super(SiteDesign, self).__init__(**kwargs)
# Assign UUID id
def assign_id(self):
self.id = uuid.uuid4()
return self.id
# SiteDesign Keyed by id
def get_id(self):
return self.id
def get_site(self):
return self.site
def set_site(self, site):
self.site = site
def add_network(self, new_network):
if new_network is None:
raise DesignError("Invalid Network model")
if self.networks is None:
self.networks = objects.NetworkList()
self.networks.append(new_network)
def get_network(self, network_key):
for n in self.networks:
if n.get_id() == network_key:
return n
raise DesignError("Network %s not found in design state"
% network_key)
def add_network_link(self, new_network_link):
if new_network_link is None:
raise DesignError("Invalid NetworkLink model")
if self.network_links is None:
self.network_links = objects.NetworkLinkList()
self.network_links.append(new_network_link)
def get_network_link(self, link_key):
for l in self.network_links:
if l.get_id() == link_key:
return l
raise DesignError("NetworkLink %s not found in design state"
% link_key)
def add_host_profile(self, new_host_profile):
if new_host_profile is None:
raise DesignError("Invalid HostProfile model")
if self.host_profiles is None:
self.host_profiles = objects.HostProfileList()
self.host_profiles.append(new_host_profile)
def get_host_profile(self, profile_key):
for p in self.host_profiles:
if p.get_id() == profile_key:
return p
raise DesignError("HostProfile %s not found in design state"
% profile_key)
def add_hardware_profile(self, new_hardware_profile):
if new_hardware_profile is None:
raise DesignError("Invalid HardwareProfile model")
if self.hardware_profiles is None:
self.hardware_profiles = objects.HardwareProfileList()
self.hardware_profiles.append(new_hardware_profile)
def get_hardware_profile(self, profile_key):
for p in self.hardware_profiles:
if p.get_id() == profile_key:
return p
raise DesignError("HardwareProfile %s not found in design state"
% profile_key)
def add_baremetal_node(self, new_baremetal_node):
if new_baremetal_node is None:
raise DesignError("Invalid BaremetalNode model")
if self.baremetal_nodes is None:
self.baremetal_nodes = objects.BaremetalNodeList()
self.baremetal_nodes.append(new_baremetal_node)
def get_baremetal_node(self, node_key):
for n in self.baremetal_nodes:
if n.get_id() == node_key:
return n
raise DesignError("BaremetalNode %s not found in design state"
% node_key)
"""
Support filtering on rack name, node name or node tag
for now. Each filter can be a comma-delimited list of
values. The final result is an intersection of all the
filters
"""
def get_filtered_nodes(self, node_filter):
effective_nodes = self.baremetal_nodes
# filter by rack
rack_filter = node_filter.get('rackname', None)
if rack_filter is not None:
rack_list = rack_filter.split(',')
effective_nodes = [x
for x in effective_nodes
if x.get_rack() in rack_list]
# filter by name
name_filter = node_filter.get('nodename', None)
if name_filter is not None:
name_list = name_filter.split(',')
effective_nodes = [x
for x in effective_nodes
if x.get_name() in name_list]
# filter by tag
tag_filter = node_filter.get('tags', None)
if tag_filter is not None:
tag_list = tag_filter.split(',')
effective_nodes = [x
for x in effective_nodes
for t in tag_list
if x.has_tag(t)]
return effective_nodes

View File

@ -0,0 +1,97 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
from threading import Lock
import helm_drydock.error as errors
import helm_drydock.objects.fields as hd_fields
class Task(object):
def __init__(self, **kwargs):
self.task_id = uuid.uuid4()
self.status = hd_fields.TaskStatus.Created
self.terminate = False
self.subtasks = []
self.lock_id = None
self.result = hd_fields.ActionResult.Incomplete
self.result_detail = None
self.action = kwargs.get('action', hd_fields.OrchestratorAction.Noop)
self.parent_task_id = kwargs.get('parent_task_id','')
def get_id(self):
return self.task_id
def terminate_task(self):
self.terminate = True
def set_status(self, status):
self.status = status
def get_status(self):
return self.status
def set_result(self, result):
self.result = result
def get_result(self):
return self.result
def set_result_detail(self, detail):
self.result_detail = detail
def get_result_detail(self):
return self.result_detail
def register_subtask(self, subtask_id):
if self.terminate:
raise errors.OrchestratorError("Cannot add subtask for parent" \
" marked for termination")
self.subtasks.append(subtask_id)
def get_subtasks(self):
return self.subtasks
class OrchestratorTask(Task):
def __init__(self, **kwargs):
super(OrchestratorTask, self).__init__(**kwargs)
# Validate parameters based on action
self.site = kwargs.get('site', '')
if self.site == '':
raise ValueError("Orchestration Task requires 'site' parameter")
self.design_id = kwargs.get('design_id', 0)
if self.action in [hd_fields.OrchestratorAction.VerifyNode,
hd_fields.OrchestratorAction.PrepareNode,
hd_fields.OrchestratorAction.DeployNode,
hd_fields.OrchestratorAction.DestroyNode]:
self.node_filter = kwargs.get('node_filter', None)
class DriverTask(Task):
def __init__(self, task_scope={}, **kwargs):
super(DriverTask, self).__init__(**kwargs)
self.design_id = kwargs.get('design_id', 0)
self.site_name = task_scope.get('site', None)
self.node_list = task_scope.get('node_names', [])

View File

@ -11,23 +11,55 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum, unique
import uuid
import time
import threading
import importlib
from copy import deepcopy
import helm_drydock.drivers as drivers
import helm_drydock.objects.task as tasks
import helm_drydock.error as errors
import helm_drydock.objects.fields as hd_fields
class Orchestrator(object):
# enabled_drivers is a map which provider drivers
# should be enabled for use by this orchestrator
def __init__(self, enabled_drivers=None, design_state=None):
def __init__(self, enabled_drivers=None, state_manager=None):
self.enabled_drivers = {}
self.enabled_drivers['oob'] = enabled_drivers.get('oob', None)
self.enabled_drivers['server'] = enabled_drivers.get('server', None)
self.enabled_drivers['network'] = enabled_drivers.get('network', None)
self.state_manager = state_manager
if enabled_drivers is not None:
oob_driver_name = enabled_drivers.get('oob', None)
if oob_driver_name is not None:
m, c = oob_driver_name.rsplit('.', 1)
oob_driver_class = \
getattr(importlib.import_module(m), c, None)
if oob_driver_class is not None:
self.enabled_drivers['oob'] = oob_driver_class(state_manager=state_manager,
orchestrator=self)
node_driver_name = enabled_drivers.get('node', None)
if node_driver_name is not None:
m, c = node_driver_name.rsplit('.', 1)
node_driver_class = \
getattr(importlib.import_module(m), c, None)
if node_driver_class is not None:
self.enabled_drivers['node'] = node_driver_class(state_manager=state_manager,
orchestrator=self)
network_driver_name = enabled_drivers.get('network', None)
if network_driver_name is not None:
m, c = network_driver_name.rsplit('.', 1)
network_driver_class = \
getattr(importlib.import_module(m), c, None)
if network_driver_class is not None:
self.enabled_drivers['network'] = network_driver_class(state_manager=state_manager,
orchestrator=self)
self.design_state = design_state
"""
execute_task
@ -37,35 +69,320 @@ class Orchestrator(object):
the current designed state and current built state from the statemgmt
module. Based on those 3 inputs, we'll decide what is needed next.
"""
def execute_task(self, task):
if design_state is None:
raise Exception("Cannot execute task without initialized state manager")
def execute_task(self, task_id):
if self.state_manager is None:
raise errors.OrchestratorError("Cannot execute task without" \
" initialized state manager")
task = self.state_manager.get_task(task_id)
class OrchestrationTask(object):
if task is None:
raise errors.OrchestratorError("Task %s not found."
% (task_id))
def __init__(self, action, **kwargs):
self.taskid = uuid.uuid4()
design_id = task.design_id
task_site = task.site
self.action = action
# Just for testing now, need to implement with enabled_drivers
# logic
if task.action == hd_fields.OrchestratorAction.Noop:
self.task_field_update(task_id,
status=hd_fields.TaskStatus.Running)
parent_task = kwargs.get('parent_task','')
driver_task = self.create_task(tasks.DriverTask,
design_id=0,
action=hd_fields.OrchestratorAction.Noop,
parent_task_id=task.get_id())
# Validate parameters based on action
self.site = kwargs.get('site', '')
driver = drivers.ProviderDriver(state_manager=self.state_manager,
orchestrator=self)
driver.execute_task(driver_task.get_id())
driver_task = self.state_manager.get_task(driver_task.get_id())
self.task_field_update(task_id, status=driver_task.get_status())
return
elif task.action == hd_fields.OrchestratorAction.ValidateDesign:
self.task_field_update(task_id,
status=hd_fields.TaskStatus.Running)
try:
site_design = self.get_effective_site(task_site,
change_id=design_id)
self.task_field_update(task_id,
result=hd_fields.ActionResult.Success)
except:
self.task_field_update(task_id,
result=hd_fields.ActionResult.Failure)
self.task_field_update(task_id, status=hd_fields.TaskStatus.Complete)
return
elif task.action == hd_fields.OrchestratorAction.VerifySite:
self.task_field_update(task_id,
status=hd_fields.TaskStatus.Running)
if self.site == '':
raise ValueError("Task requires 'site' parameter")
node_driver = self.enabled_drivers['node']
if action in [Action.VerifyNode, Action.PrepareNode,
Action.DeployNode, Action.DestroyNode]:
self.node_filter = kwargs.get('node_filter', None)
if node_driver is not None:
node_driver_task = self.create_task(tasks.DriverTask,
parent_task_id=task.get_id(),
design_id=design_id,
action=hd_fields.OrchestratorAction.ValidateNodeServices)
def child_task(self, action, **kwargs):
child_task = OrchestrationTask(action, parent_task=self.taskid, site=self.site, **kwargs)
return child_task
node_driver.execute_task(node_driver_task.get_id())
node_driver_task = self.state_manager.get_task(node_driver_task.get_id())
self.task_field_update(task_id,
status=hd_fields.TaskStatus.Complete,
result=node_driver_task.get_result())
return
elif task.action == hd_fields.OrchestratorAction.PrepareSite:
driver = self.enabled_drivers['node']
if driver is None:
self.task_field_update(task_id,
status=hd_fields.TaskStatus.Errored,
result=hd_fields.ActionResult.Failure)
return
task_scope = {
'site': task.site
}
driver_task = self.create_task(tasks.DriverTask,
parent_task_id=task.get_id(),
design_id=design_id,
task_scope=task_scope,
action=hd_fields.OrchestratorAction.CreateNetworkTemplate)
driver.execute_task(driver_task.get_id())
driver_task = self.state_manager.get_task(driver_task.get_id())
self.task_field_update(task_id,
status=hd_fields.TaskStatus.Complete,
result=driver_task.get_result())
return
elif task.action == hd_fields.OrchestratorAction.VerifyNode:
self.task_field_update(task_id,
status=hd_fields.TaskStatus.Running)
driver = self.enabled_drivers['oob']
if driver is None:
self.task_field_update(task_id,
status=hd_fields.TaskStatus.Errored,
result=hd_fields.ActionResult.Failure)
return
site_design = self.get_effective_site(design_id, task_site)
node_filter = task.node_filter
target_nodes = self.process_node_filter(node_filter, site_design)
target_names = [x.get_name() for x in target_nodes]
task_scope = {'site' : task_site,
'node_names' : target_names}
driver_task = self.create_task(tasks.DriverTask,
parent_task_id=task.get_id(),
design_id=design_id,
action=hd_fields.OrchestratorAction.InterrogateNode,
task_scope=task_scope)
driver.execute_task(driver_task.get_id())
driver_task = self.state_manager.get_task(driver_task.get_id())
self.task_field_update(task_id,
status=hd_fields.TaskStatus.Complete,
result=driver_task.get_result())
return
elif task.action == hd_fields.OrchestratorAction.PrepareNode:
self.task_field_update(task_id,
status=hd_fields.TaskStatus.Running)
driver = self.enabled_drivers['oob']
if driver is None:
self.task_field_update(task_id,
status=hd_fields.TaskStatus.Errored,
result=hd_fields.ActionResult.Failure)
return
site_design = self.get_effective_site(task_site,
change_id=design_id)
node_filter = task.node_filter
target_nodes = self.process_node_filter(node_filter, site_design)
target_names = [x.get_name() for x in target_nodes]
task_scope = {'site' : task_site,
'node_names' : target_names}
setboot_task = self.create_task(tasks.DriverTask,
parent_task_id=task.get_id(),
design_id=design_id,
action=hd_fields.OrchestratorAction.SetNodeBoot,
task_scope=task_scope)
driver.execute_task(setboot_task.get_id())
setboot_task = self.state_manager.get_task(setboot_task.get_id())
cycle_task = self.create_task(tasks.DriverTask,
parent_task_id=task.get_id(),
design_id=design_id,
action=hd_fields.OrchestratorAction.PowerCycleNode,
task_scope=task_scope)
driver.execute_task(cycle_task.get_id())
cycle_task = self.state_manager.get_task(cycle_task.get_id())
if (setboot_task.get_result() == hd_fields.ActionResult.Success and
cycle_task.get_result() == hd_fields.ActionResult.Success):
self.task_field_update(task_id,
status=hd_fields.TaskStatus.Complete,
result=hd_fields.ActionResult.Success)
elif (setboot_task.get_result() == hd_fields.ActionResult.Success or
cycle_task.get_result() == hd_fields.ActionResult.Success):
self.task_field_update(task_id,
status=hd_fields.TaskStatus.Complete,
result=hd_fields.ActionResult.PartialSuccess)
else:
self.task_field_update(task_id,
status=hd_fields.TaskStatus.Complete,
result=hd_fields.ActionResult.Failure)
return
else:
raise errors.OrchestratorError("Action %s not supported"
% (task.action))
"""
terminate_task
Mark a task for termination and optionally propagate the termination
recursively to all subtasks
"""
def terminate_task(self, task_id, propagate=True):
task = self.state_manager.get_task(task_id)
if task is None:
raise errors.OrchestratorError("Could find task %s" % task_id)
else:
# Terminate initial task first to prevent add'l subtasks
self.task_field_update(task_id, terminate=True)
if propagate:
# Get subtasks list
subtasks = task.get_subtasks()
for st in subtasks:
self.terminate_task(st, propagate=True)
else:
return True
def create_task(self, task_class, **kwargs):
parent_task_id = kwargs.get('parent_task_id', None)
new_task = task_class(**kwargs)
self.state_manager.post_task(new_task)
if parent_task_id is not None:
self.task_subtask_add(parent_task_id, new_task.get_id())
return new_task
# Lock a task and make all field updates, then unlock it
def task_field_update(self, task_id, **kwargs):
lock_id = self.state_manager.lock_task(task_id)
if lock_id is not None:
task = self.state_manager.get_task(task_id)
for k,v in kwargs.items():
setattr(task, k, v)
self.state_manager.put_task(task, lock_id=lock_id)
self.state_manager.unlock_task(task_id, lock_id)
return True
else:
return False
def task_subtask_add(self, task_id, subtask_id):
lock_id = self.state_manager.lock_task(task_id)
if lock_id is not None:
task = self.state_manager.get_task(task_id)
task.register_subtask(subtask_id)
self.state_manager.put_task(task, lock_id=lock_id)
self.state_manager.unlock_task(task_id, lock_id)
return True
else:
return False
def compute_model_inheritance(self, site_design):
# For now the only thing that really incorporates inheritance is
# host profiles and baremetal nodes. So we'll just resolve it for
# the baremetal nodes which recursively resolves it for host profiles
# assigned to those nodes
for n in site_design.baremetal_nodes:
n.compile_applied_model(site_design)
return
"""
compute_model_inheritance - given a fully populated Site model,
compute the effecitve design by applying inheritance and references
return a Site model reflecting the effective design for the site
"""
def get_described_site(self, design_id, site_name):
site_design = None
if site_name is None:
raise errors.OrchestratorError("Cannot source design for site None")
site_design = self.state_manager.get_design(design_id)
return site_design
def get_effective_site(self, design_id, site_name):
site_design = self.get_described_site(design_id, site_name)
self.compute_model_inheritance(site_design)
return site_design
def process_node_filter(self, node_filter, site_design):
target_nodes = site_design.baremetal_nodes
if node_filter is None:
return target_nodes
node_names = node_filter.get('node_names', [])
node_racks = node_filter.get('rack_names', [])
node_tags = node_filter.get('node_tags', [])
if len(node_names) > 0:
target_nodes = [x
for x in target_nodes
if x.get_name() in node_names]
if len(node_racks) > 0:
target_nodes = [x
for x in target_nodes
if x.get_rack() in node_racks]
if len(node_tags) > 0:
target_nodes = [x
for x in target_nodes
for t in node_tags
if x.has_tag(t)]
return target_nodes

View File

@ -1,109 +0,0 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from copy import deepcopy
from helm_drydock.error import DesignError
class DesignStateClient(object):
def __init__(self):
self.log = logging.Logger('orchestrator')
"""
load_design_data - Pull all the defined models in statemgmt and assemble
them into a representation of the site. Does not compute inheritance.
Throws an exception if multiple Site models are found.
param design_state - Instance of statemgmt.DesignState to load data from
return a Site model populated with all components from the design state
"""
def load_design_data(self, site_name, design_state=None, change_id=None):
if design_state is None:
raise ValueError("Design state is None")
design_data = None
if change_id is None:
try:
design_data = design_state.get_design_base()
except DesignError(e):
raise e
else:
design_data = design_state.get_design_change(change_id)
site = design_data.get_site(site_name)
networks = design_data.get_networks()
for n in networks:
if n.site == site_name:
site.networks.append(n)
network_links = design_data.get_network_links()
for l in network_links:
if l.site == site_name:
site.network_links.append(l)
host_profiles = design_data.get_host_profiles()
for p in host_profiles:
if p.site == site_name:
site.host_profiles.append(p)
hardware_profiles = design_data.get_hardware_profiles()
for p in hardware_profiles:
if p.site == site_name:
site.hardware_profiles.append(p)
baremetal_nodes = design_data.get_baremetal_nodes()
for n in baremetal_nodes:
if n.site == site_name:
site.baremetal_nodes.append(n)
return site
def compute_model_inheritance(self, site_root):
# For now the only thing that really incorporates inheritance is
# host profiles and baremetal nodes. So we'll just resolve it for
# the baremetal nodes which recursively resolves it for host profiles
# assigned to those nodes
site_copy = deepcopy(site_root)
effective_nodes = []
for n in site_copy.baremetal_nodes:
resolved = n.apply_host_profile(site_copy)
resolved = resolved.apply_hardware_profile(site_copy)
resolved = resolved.apply_network_connections(site_copy)
effective_nodes.append(resolved)
site_copy.baremetal_nodes = effective_nodes
return site_copy
"""
compute_model_inheritance - given a fully populated Site model,
compute the effecitve design by applying inheritance and references
return a Site model reflecting the effective design for the site
"""

View File

@ -1,64 +0,0 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum, unique
@unique
class Action(Enum):
Noop = 'noop'
ValidateDesign = 'validate_design'
VerifySite = 'verify_site'
PrepareSite = 'prepare_site'
VerifyNode = 'verify_node'
PrepareNode = 'prepare_node'
DeployNode = 'deploy_node'
DestroyNode = 'destroy_node'
@unique
class ActionResult(Enum):
Success = 'success'
PartialSuccess = 'partial_success'
Failure = 'failure'
DependentFailure = 'dependent_failure'
@unique
class SiteStatus(Enum):
Unknown = 'unknown'
DesignStarted = 'design_started'
DesignAvailable = 'design_available'
DesignValidated = 'design_validated'
Deploying = 'deploying'
Deployed = 'deployed'
DesignUpdated = 'design_updated'
@unique
class NodeStatus(Enum):
Unknown = 'unknown'
Designed = 'designed'
Present = 'present' # IPMI access verified
BasicVerifying = 'basic_verifying' # Base node verification in process
FailedBasicVerify = 'failed_basic_verify' # Base node verification failed
BasicVerified = 'basic_verified' # Base node verification successful
Preparing = 'preparing' # Node preparation in progress
FailedPrepare = 'failed_prepare' # Node preparation failed
Prepared = 'prepared' # Node preparation complete
FullyVerifying = 'fully_verifying' # Node full verification in progress
FailedFullVerify = 'failed_full_verify' # Node full verification failed
FullyVerified = 'fully_verified' # Deeper verification successful
Deploying = 'deploy' # Node deployment in progress
FailedDeploy = 'failed_deploy' # Node deployment failed
Deployed = 'deployed' # Node deployed successfully
Bootstrapping = 'bootstrapping' # Node bootstrapping
FailedBootstrap = 'failed_bootstrap' # Node bootstrapping failed
Bootstrapped = 'bootstrapped' # Node fully bootstrapped
Complete = 'complete' # Node is complete

View File

@ -10,17 +10,27 @@ Orchestrator should persist the state of each task
such that on failure the task can retried and only the
steps needed will be executed.
## Drydock Tasks ##
Bullet points listed below are not exhaustive and will
change as we move through testing
## ValidateDesign ##
### ValidateDesign ###
Load design data from the statemgmt persistent store and
validate that the current state of design data represents
a valid site design. No claim is made that the design data
is compatible with the physical state of the site.
## VerifySite ##
#### Validations ####
* All baremetal nodes have an address, either static or DHCP, for all networks they are attached to.
* No static IP assignments are duplicated
* No static IP assignments are outside of the network they are targetted for
* No network MTU mismatches due to a network riding different links on different nodes
* Boot drive is above minimum size
### VerifySite ###
Verify site-wide resources are in a useful state
@ -29,7 +39,7 @@ Verify site-wide resources are in a useful state
* Promenade or other next-step services are up and available
* Verify credentials are available
## PrepareSite ##
### PrepareSite ###
Begin preparing site-wide resources for bootstrapping. This
action will lock site design data for changes.
@ -37,7 +47,7 @@ action will lock site design data for changes.
* Configure bootstrapper with site network configs
* Shuffle images so they are correctly configured for bootstrapping
## VerifyNode ##
### VerifyNode ###
Verification of per-node configurations within the context
of the current node status
@ -54,7 +64,7 @@ of the current node status
- Possibly network connectivity
- Firmware versions
## PrepareNode ##
### PrepareNode ###
Prepare a node for bootstrapping
@ -66,8 +76,11 @@ Prepare a node for bootstrapping
- Hardware configuration (e.g. RAID)
* Configure node networking
* Configure node storage
* Interrogate node
- lshw output
- lldp output
## DeployNode ##
### DeployNode ###
Begin bootstrapping the node and monitor
success
@ -78,6 +91,13 @@ success
* Reboot node from local disk
* Monitor platform bootstrapping
## DestroyNode ##
### DestroyNode ###
Destroy current node configuration and rebootstrap from scratch
Destroy current node configuration and rebootstrap from scratch
## Integration with Drivers ##
Based on the requested task and the current known state of a node
the orchestrator will call the enabled downstream drivers with one
or more tasks. Each call will provide the driver with the desired
state (the applied model) and current known state (the build model).

View File

@ -14,79 +14,70 @@
from copy import deepcopy
from datetime import datetime
from datetime import timezone
from threading import Lock
import uuid
import helm_drydock.model.node as node
import helm_drydock.model.hostprofile as hostprofile
import helm_drydock.model.network as network
import helm_drydock.model.site as site
import helm_drydock.model.hwprofile as hwprofile
import helm_drydock.objects as objects
import helm_drydock.objects.task as tasks
from helm_drydock.error import DesignError
from helm_drydock.error import DesignError, StateError
class DesignState(object):
def __init__(self):
self.design_base = None
self.design_changes = []
self.designs = {}
self.designs_lock = Lock()
self.builds = []
self.builds_lock = Lock()
self.tasks = []
self.tasks_lock = Lock()
return
# TODO Need to lock a design base or change once implementation
# has started
def get_design_base(self):
if self.design_base is None:
raise DesignError("No design base submitted")
def get_design(self, design_id):
if design_id not in self.designs.keys():
raise DesignError("Design ID %s not found" % (design_id))
return deepcopy(self.design_base)
return objects.SiteDesign.obj_from_primitive(self.designs[design_id])
def post_design_base(self, site_design):
if site_design is not None and isinstance(site_design, SiteDesign):
self.design_base = deepcopy(site_design)
return True
def put_design_base(self, site_design):
# TODO Support merging
if site_design is not None and isinstance(site_design, SiteDesign):
self.design_base = deepcopy(site_design)
return True
def get_design_change(self, changeid):
match = [x for x in self.design_changes if x.changeid == changeid]
if len(match) == 0:
raise DesignError("No design change %s found." % (changeid))
else:
return deepcopy(match[0])
def post_design_change(self, site_design):
if site_design is not None and isinstance(site_design, SiteDesign):
exists = [(x) for x
in self.design_changes
if x.changeid == site_design.changeid]
if len(exists) > 0:
raise DesignError("Existing change %s found" %
(site_design.changeid))
self.design_changes.append(deepcopy(site_design))
return True
def post_design(self, site_design):
if site_design is not None:
my_lock = self.designs_lock.acquire(blocking=True,
timeout=10)
if my_lock:
design_id = site_design.id
if design_id not in self.designs.keys():
self.designs[design_id] = site_design.obj_to_primitive()
else:
self.designs_lock.release()
raise StateError("Design ID %s already exists" % design_id)
self.designs_lock.release()
return True
raise StateError("Could not acquire lock")
else:
raise DesignError("Design change must be a SiteDesign instance")
def put_design_change(self, site_design):
# TODO Support merging
if site_design is not None and isinstance(site_design, SiteDesign):
design_copy = deepcopy(site_design)
self.design_changes = [design_copy
if x.changeid == design_copy.changeid
else x
for x
in self.design_changes]
return True
def put_design(self, site_design):
if site_design is not None:
my_lock = self.designs_lock.acquire(blocking=True,
timeout=10)
if my_lock:
design_id = site_design.id
if design_id not in self.designs.keys():
self.designs_lock.release()
raise StateError("Design ID %s does not exist" % design_id)
else:
self.designs[design_id] = site_design.obj_to_primitive()
self.designs_lock.release()
return True
raise StateError("Could not acquire lock")
else:
raise DesignError("Design change must be a SiteDesign instance")
raise DesignError("Design base must be a SiteDesign instance")
def get_current_build(self):
latest_stamp = 0
@ -108,187 +99,111 @@ class DesignState(object):
def post_build(self, site_build):
if site_build is not None and isinstance(site_build, SiteBuild):
exists = [b for b in self.builds
if b.build_id == site_build.build_id]
my_lock = self.builds_lock.acquire(block=True, timeout=10)
if my_lock:
exists = [b for b in self.builds
if b.build_id == site_build.build_id]
if len(exists) > 0:
raise DesignError("Already a site build with ID %s" %
(str(site_build.build_id)))
if len(exists) > 0:
self.builds_lock.release()
raise DesignError("Already a site build with ID %s" %
(str(site_build.build_id)))
self.builds.append(deepcopy(site_build))
self.builds_lock.release()
return True
raise StateError("Could not acquire lock")
else:
self.builds.append(deepcopy(site_build))
return True
raise DesignError("Design change must be a SiteDesign instance")
class SiteDesign(object):
def __init__(self, ischange=False):
if ischange:
self.changeid = uuid.uuid4()
def put_build(self, site_build):
if site_build is not None and isinstance(site_build, SiteBuild):
my_lock = self.builds_lock.acquire(block=True, timeout=10)
if my_lock:
buildid = site_build.buildid
for b in self.builds:
if b.buildid == buildid:
b.merge_updates(site_build)
self.builds_lock.release()
return True
self.builds_lock.release()
return False
raise StateError("Could not acquire lock")
else:
self.changeid = 0
raise DesignError("Design change must be a SiteDesign instance")
self.sites = []
self.networks = []
self.network_links = []
self.host_profiles = []
self.hardware_profiles = []
self.baremetal_nodes = []
def get_task(self, task_id):
for t in self.tasks:
if t.get_id() == task_id:
return deepcopy(t)
return None
def add_site(self, new_site):
if new_site is None or not isinstance(new_site, site.Site):
raise DesignError("Invalid Site model")
def post_task(self, task):
if task is not None and isinstance(task, tasks.Task):
my_lock = self.tasks_lock.acquire(blocking=True, timeout=10)
if my_lock:
task_id = task.get_id()
matching_tasks = [t for t in self.tasks
if t.get_id() == task_id]
if len(matching_tasks) > 0:
self.tasks_lock.release()
raise StateError("Task %s already created" % task_id)
self.sites.append(new_site)
def get_sites(self):
return self.sites
def get_site(self, site_name):
for s in self.sites:
if s.name == site_name:
return s
raise DesignError("Site %s not found in design state" % site_name)
def add_network(self, new_network):
if new_network is None or not isinstance(new_network, network.Network):
raise DesignError("Invalid Network model")
self.networks.append(new_network)
def get_networks(self):
return self.networks
def get_network(self, network_name):
for n in self.networks:
if n.name == network_name:
return n
raise DesignError("Network %s not found in design state"
% network_name)
def add_network_link(self, new_network_link):
if new_network_link is None or not isinstance(new_network_link,
network.NetworkLink):
raise DesignError("Invalid NetworkLink model")
self.network_links.append(new_network_link)
def get_network_links(self):
return self.network_links
def get_network_link(self, link_name):
for l in self.network_links:
if l.name == link_name:
return l
raise DesignError("NetworkLink %s not found in design state"
% link_name)
def add_host_profile(self, new_host_profile):
if new_host_profile is None or not isinstance(new_host_profile,
hostprofile.HostProfile):
raise DesignError("Invalid HostProfile model")
self.host_profiles.append(new_host_profile)
def get_host_profiles(self):
return self.host_profiles
def get_host_profile(self, profile_name):
for p in self.host_profiles:
if p.name == profile_name:
return p
raise DesignError("HostProfile %s not found in design state"
% profile_name)
def add_hardware_profile(self, new_hardware_profile):
if (new_hardware_profile is None or
not isinstance(new_hardware_profile, hwprofile.HardwareProfile)):
raise DesignError("Invalid HardwareProfile model")
self.hardware_profiles.append(new_hardware_profile)
def get_hardware_profiles(self):
return self.hardware_profiles
def get_hardware_profile(self, profile_name):
for p in self.hardware_profiles:
if p.name == profile_name:
return p
raise DesignError("HardwareProfile %s not found in design state"
% profile_name)
def add_baremetal_node(self, new_baremetal_node):
if (new_baremetal_node is None or
not isinstance(new_baremetal_node, node.BaremetalNode)):
raise DesignError("Invalid BaremetalNode model")
self.baremetal_nodes.append(new_baremetal_node)
def get_baremetal_nodes(self):
return self.baremetal_nodes
def get_baremetal_node(self, node_name):
for n in self.baremetal_nodes:
if n.name == node_name:
return n
raise DesignError("BaremetalNode %s not found in design state"
% node_name)
class SiteBuild(SiteDesign):
def __init__(self, build_id=None):
super(SiteBuild, self).__init__()
if build_id is None:
self.build_id = datetime.datetime.now(timezone.utc).timestamp()
self.tasks.append(deepcopy(task))
self.tasks_lock.release()
return True
else:
raise StateError("Could not acquire lock")
else:
self.build_id = build_id
raise StateError("Task is not the correct type")
def get_filtered_nodes(self, node_filter):
effective_nodes = self.get_baremetal_nodes()
def put_task(self, task, lock_id=None):
if task is not None and isinstance(task, tasks.Task):
my_lock = self.tasks_lock.acquire(blocking=True, timeout=10)
if my_lock:
task_id = task.get_id()
t = self.get_task(task_id)
if t.lock_id is not None and t.lock_id != lock_id:
self.tasks_lock.release()
raise StateError("Task locked for updates")
# filter by rack
rack_filter = node_filter.get('rackname', None)
task.lock_id = lock_id
self.tasks = [i
if i.get_id() != task_id
else deepcopy(task)
for i in self.tasks]
if rack_filter is not None:
rack_list = rack_filter.split(',')
effective_nodes = [x
for x in effective_nodes
if x.get_rack() in rack_list]
# filter by name
name_filter = node_filter.get('nodename', None)
self.tasks_lock.release()
return True
else:
raise StateError("Could not acquire lock")
else:
raise StateError("Task is not the correct type")
if name_filter is not None:
name_list = name_filter.split(',')
effective_nodes = [x
for x in effective_nodes
if x.get_name() in name_list]
# filter by tag
tag_filter = node_filter.get('tags', None)
def lock_task(self, task_id):
my_lock = self.tasks_lock.acquire(blocking=True, timeout=10)
if my_lock:
lock_id = uuid.uuid4()
for t in self.tasks:
if t.get_id() == task_id and t.lock_id is None:
t.lock_id = lock_id
self.tasks_lock.release()
return lock_id
self.tasks_lock.release()
return None
else:
raise StateError("Could not acquire lock")
if tag_filter is not None:
tag_list = tag_filter.split(',')
effective_nodes = [x
for x in effective_nodes
for t in tag_list
if x.has_tag(t)]
def unlock_task(self, task_id, lock_id):
my_lock = self.tasks_lock.acquire(blocking=True, timeout=10)
if my_lock:
for t in self.tasks:
if t.get_id() == task_id and t.lock_id == lock_id:
t.lock_id = None
self.tasks_lock.release()
return True
self.tasks_lock.release()
return False
else:
raise StateError("Could not acquire lock")
return effective_nodes
"""
Support filtering on rack name, node name or node tag
for now. Each filter can be a comma-delimited list of
values. The final result is an intersection of all the
filters
"""
def set_nodes_status(self, node_filter, status):
target_nodes = self.get_filtered_nodes(node_filter)
for n in target_nodes:
n.set_status(status)

View File

@ -20,6 +20,12 @@ Serialization of Drydock internal model as rendered to effective implementation
/drydock/build
/drydock/build/[datestamp] - A point-in-time view of what was deployed with deployment results
## Tasks ##
Management of task state for the internal orchestrator
/drydock/tasks
## Node data ##
Per-node data that can drive introspection as well as accept updates from nodes

View File

@ -40,23 +40,26 @@ setup(name='helm_drydock',
author_email='sh8121@att.com',
license='Apache 2.0',
packages=['helm_drydock',
'helm_drydock.model',
'helm_drydock.objects',
'helm_drydock.ingester',
'helm_drydock.ingester.plugins',
'helm_drydock.statemgmt',
'helm_drydock.orchestrator',
'helm_drydock.control'],
'helm_drydock.control',
'helm_drydock.drivers',
'helm_drydock.drivers.oob',
'helm_drydock.drivers.oob.pyghmi_driver',
'helm_drydock.drivers.node',
'helm_drydock.drivers.node.maasdriver',
'helm_drydock.drivers.node.maasdriver.models'],
install_requires=[
'PyYAML',
'oauth',
'requests-oauthlib',
'pyghmi',
'pyghmi>=1.0.18',
'netaddr',
'pecan',
'webob'
],
dependency_link=[
'git+https://github.com/maas/python-libmaas.git'
'falcon',
'oslo.versionedobjects>=1.23.0',
'requests',
'oauthlib',
]
)

View File

@ -1,2 +1,5 @@
pytest-mock
pytest
tox
mock
tox
oslo.versionedobjects[fixtures]>=1.23.0

View File

@ -0,0 +1,30 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import helm_drydock.config as config
import helm_drydock.drivers.node.maasdriver.api_client as client
class TestClass(object):
def test_client_authenticate(self):
client_config = config.DrydockConfig.node_driver['maasdriver']
maas_client = client.MaasRequestFactory(client_config['api_url'], client_config['api_key'])
resp = maas_client.get('account/', params={'op': 'list_authorisation_tokens'})
parsed = resp.json()
assert len(parsed) > 0

View File

@ -0,0 +1,58 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import uuid
import helm_drydock.config as config
import helm_drydock.drivers.node.maasdriver.api_client as client
import helm_drydock.drivers.node.maasdriver.models.fabric as maas_fabric
import helm_drydock.drivers.node.maasdriver.models.subnet as maas_subnet
class TestClass(object):
def test_maas_fabric(self):
client_config = config.DrydockConfig.node_driver['maasdriver']
maas_client = client.MaasRequestFactory(client_config['api_url'], client_config['api_key'])
fabric_name = str(uuid.uuid4())
fabric_list = maas_fabric.Fabrics(maas_client)
fabric_list.refresh()
test_fabric = maas_fabric.Fabric(maas_client, name=fabric_name, description='Test Fabric')
test_fabric = fabric_list.add(test_fabric)
assert test_fabric.name == fabric_name
assert test_fabric.resource_id is not None
query_fabric = maas_fabric.Fabric(maas_client, resource_id=test_fabric.resource_id)
query_fabric.refresh()
assert query_fabric.name == test_fabric.name
def test_maas_subnet(self):
client_config = config.DrydockConfig.node_driver['maasdriver']
maas_client = client.MaasRequestFactory(client_config['api_url'], client_config['api_key'])
subnet_list = maas_subnet.Subnets(maas_client)
subnet_list.refresh()
for s in subnet_list:
print(s.to_dict())
assert False

View File

@ -0,0 +1,94 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import pytest
import shutil
import os
import uuid
import helm_drydock.config as config
import helm_drydock.drivers.node.maasdriver.api_client as client
import helm_drydock.ingester.plugins.yaml
import helm_drydock.statemgmt as statemgmt
import helm_drydock.objects as objects
import helm_drydock.orchestrator as orch
import helm_drydock.objects.fields as hd_fields
import helm_drydock.objects.task as task
import helm_drydock.drivers as drivers
from helm_drydock.ingester import Ingester
class TestClass(object):
def test_client_verify(self):
design_state = statemgmt.DesignState()
orchestrator = orch.Orchestrator(state_manager=design_state,
enabled_drivers={'node': 'helm_drydock.drivers.node.maasdriver.driver.MaasNodeDriver'})
orch_task = orchestrator.create_task(task.OrchestratorTask,
site='sitename',
design_id=None,
action=hd_fields.OrchestratorAction.VerifySite)
orchestrator.execute_task(orch_task.get_id())
orch_task = design_state.get_task(orch_task.get_id())
assert orch_task.result == hd_fields.ActionResult.Success
def test_orch_preparesite(self, input_files):
objects.register_all()
input_file = input_files.join("fullsite.yaml")
design_state = statemgmt.DesignState()
design_data = objects.SiteDesign()
design_id = design_data.assign_id()
design_state.post_design(design_data)
ingester = Ingester()
ingester.enable_plugins([helm_drydock.ingester.plugins.yaml.YamlIngester])
ingester.ingest_data(plugin_name='yaml', design_state=design_state,
filenames=[str(input_file)], design_id=design_id)
design_data = design_state.get_design(design_id)
orchestrator = orch.Orchestrator(state_manager=design_state,
enabled_drivers={'node': 'helm_drydock.drivers.node.maasdriver.driver.MaasNodeDriver'})
orch_task = orchestrator.create_task(task.OrchestratorTask,
site='sitename',
design_id=design_id,
action=hd_fields.OrchestratorAction.PrepareSite)
orchestrator.execute_task(orch_task.get_id())
orch_task = design_state.get_task(orch_task.get_id())
assert orch_task.result == hd_fields.ActionResult.Success
@pytest.fixture(scope='module')
def input_files(self, tmpdir_factory, request):
tmpdir = tmpdir_factory.mktemp('data')
samples_dir = os.path.dirname(str(request.fspath)) + "/../yaml_samples"
samples = os.listdir(samples_dir)
for f in samples:
src_file = samples_dir + "/" + f
dst_file = str(tmpdir) + "/" + f
shutil.copyfile(src_file, dst_file)
return tmpdir

View File

@ -1,69 +0,0 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import yaml
from helm_drydock.model.hwprofile import HardwareProfile
class TestClass(object):
def setup_method(self, method):
print("Running test {0}".format(method.__name__))
def test_hardwareprofile(self):
yaml_snippet = ("---\n"
"apiVersion: 'v1.0'\n"
"kind: HardwareProfile\n"
"metadata:\n"
" name: HPGen8v3\n"
" region: sitename\n"
" date: 17-FEB-2017\n"
" name: Sample hardware definition\n"
" author: Scott Hussey\n"
"spec:\n"
" # Vendor of the server chassis\n"
" vendor: HP\n"
" # Generation of the chassis model\n"
" generation: '8'\n"
" # Version of the chassis model within its generation - not version of the hardware definition\n"
" hw_version: '3'\n"
" # The certified version of the chassis BIOS\n"
" bios_version: '2.2.3'\n"
" # Mode of the default boot of hardware - bios, uefi\n"
" boot_mode: bios\n"
" # Protocol of boot of the hardware - pxe, usb, hdd\n"
" bootstrap_protocol: pxe\n"
" # Which interface to use for network booting within the OOB manager, not OS device\n"
" pxe_interface: 0\n"
" # Map hardware addresses to aliases/roles to allow a mix of hardware configs\n"
" # in a site to result in a consistent configuration\n"
" device_aliases:\n"
" pci:\n"
" - address: pci@0000:00:03.0\n"
" alias: prim_nic01\n"
" # type could identify expected hardware - used for hardware manifest validation\n"
" type: '82540EM Gigabit Ethernet Controller'\n"
" - address: pci@0000:00:04.0\n"
" alias: prim_nic02\n"
" type: '82540EM Gigabit Ethernet Controller'\n"
" scsi:\n"
" - address: scsi@2:0.0.0\n"
" alias: primary_boot\n"
" type: 'VBOX HARDDISK'\n")
hw_profile = yaml.load(yaml_snippet)
hw_profile_model = HardwareProfile(**hw_profile)
assert hasattr(hw_profile_model, 'bootstrap_protocol')

View File

@ -13,8 +13,8 @@
# limitations under the License.
from helm_drydock.ingester import Ingester
from helm_drydock.statemgmt import DesignState, SiteDesign
from helm_drydock.orchestrator.designdata import DesignStateClient
from helm_drydock.statemgmt import DesignState
from helm_drydock.orchestrator import Orchestrator
from copy import deepcopy
@ -31,29 +31,28 @@ class TestClass(object):
def test_design_inheritance(self, loaded_design):
client = DesignStateClient()
orchestrator = Orchestrator(state_manager=loaded_design,
enabled_drivers={'oob': 'helm_drydock.drivers.oob.pyghmi_driver.PyghmiDriver'})
design_data = client.load_design_data("sitename", design_state=loaded_design)
design_data = orchestrator.load_design_data("sitename")
assert len(design_data.baremetal_nodes) == 2
print(yaml.dump(design_data, default_flow_style=False))
design_data = client.compute_model_inheritance(design_data)
design_data = orchestrator.compute_model_inheritance(design_data)
node = design_data.get_baremetal_node("controller01")
print(yaml.dump(node, default_flow_style=False))
assert node.hardware_profile == 'HPGen9v3'
assert node.applied.get('hardware_profile') == 'HPGen9v3'
iface = node.get_interface('bond0')
iface = node.get_applied_interface('bond0')
assert iface.get_slave_count() == 2
print(yaml.dump(iface, default_flow_style=False))
assert iface.get_applied_slave_count() == 2
iface = node.get_interface('pxe')
iface = node.get_applied_interface('pxe')
assert iface.get_slave_count() == 1
assert iface.get_applied_slave_count() == 1
@pytest.fixture(scope='module')
def loaded_design(self, input_files):
@ -69,12 +68,11 @@ class TestClass(object):
return design_state
@pytest.fixture(scope='module')
def input_files(self, tmpdir_factory, request):
tmpdir = tmpdir_factory.mktemp('data')
samples_dir = os.path.dirname(str(request.fspath)) + "/yaml_samples"
samples_dir = os.path.dirname(str(request.fspath)) + "../yaml_samples"
samples = os.listdir(samples_dir)
for f in samples:

View File

@ -13,7 +13,8 @@
# limitations under the License.
from helm_drydock.ingester import Ingester
from helm_drydock.statemgmt import DesignState, SiteDesign
from helm_drydock.statemgmt import DesignState
import helm_drydock.objects as objects
import pytest
import shutil
@ -26,43 +27,50 @@ class TestClass(object):
print("Running test {0}".format(method.__name__))
def test_ingest_full_site(self, input_files):
objects.register_all()
input_file = input_files.join("fullsite.yaml")
design_state = DesignState()
design_data = SiteDesign()
design_state.post_design_base(design_data)
design_data = objects.SiteDesign()
design_id = design_data.assign_id()
design_state.post_design(design_data)
ingester = Ingester()
ingester.enable_plugins([helm_drydock.ingester.plugins.yaml.YamlIngester])
ingester.ingest_data(plugin_name='yaml', design_state=design_state, filenames=[str(input_file)])
ingester.ingest_data(plugin_name='yaml', design_state=design_state,
filenames=[str(input_file)], design_id=design_id)
design_data = design_state.get_design_base()
design_data = design_state.get_design(design_id)
assert len(design_data.get_host_profiles()) == 3
assert len(design_data.get_baremetal_nodes()) == 2
assert len(design_data.host_profiles) == 3
assert len(design_data.baremetal_nodes) == 2
def test_ingest_federated_design(self, input_files):
objects.register_all()
profiles_file = input_files.join("fullsite_profiles.yaml")
networks_file = input_files.join("fullsite_networks.yaml")
nodes_file = input_files.join("fullsite_nodes.yaml")
design_state = DesignState()
design_data = SiteDesign()
design_state.post_design_base(design_data)
design_data = objects.SiteDesign()
design_id = design_data.assign_id()
design_state.post_design(design_data)
ingester = Ingester()
ingester.enable_plugins([helm_drydock.ingester.plugins.yaml.YamlIngester])
ingester.ingest_data(plugin_name='yaml', design_state=design_state,
filenames=[str(profiles_file), str(networks_file), str(nodes_file)])
ingester.ingest_data(plugin_name='yaml', design_state=design_state, design_id=design_id,
filenames=[str(profiles_file), str(networks_file), str(nodes_file)])
design_data = design_state.get_design_base()
design_data = design_state.get_design(design_id)
assert len(design_data.host_profiles) == 3
@pytest.fixture(scope='module')
def input_files(self, tmpdir_factory, request):
tmpdir = tmpdir_factory.mktemp('data')
samples_dir = os.path.dirname(str(request.fspath)) + "/yaml_samples"
samples_dir = os.path.dirname(str(request.fspath)) + "../yaml_samples"
samples = os.listdir(samples_dir)
for f in samples:

View File

@ -11,11 +11,12 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from helm_drydock.ingester.plugins.yaml import YamlIngester
import pytest
import shutil
import os
import uuid
from helm_drydock.ingester.plugins.yaml import YamlIngester
class TestClass(object):
@ -43,7 +44,7 @@ class TestClass(object):
@pytest.fixture(scope='module')
def input_files(self, tmpdir_factory, request):
tmpdir = tmpdir_factory.mktemp('data')
samples_dir = os.path.dirname(str(request.fspath)) + "/yaml_samples"
samples_dir = os.path.dirname(str(request.fspath)) + "../yaml_samples"
samples = os.listdir(samples_dir)
for f in samples:

85
tests/unit/test_models.py Normal file
View File

@ -0,0 +1,85 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import helm_drydock.objects as objects
from helm_drydock.objects import fields
class TestClass(object):
def test_hardwareprofile(self):
objects.register_all()
model_attr = {
'versioned_object.namespace': 'helm_drydock.objects',
'versioned_object.name': 'HardwareProfile',
'versioned_object.version': '1.0',
'versioned_object.data': {
'name': 'server',
'source': fields.ModelSource.Designed,
'site': 'test_site',
'vendor': 'Acme',
'generation': '9',
'hw_version': '3',
'bios_version': '2.1.1',
'boot_mode': 'bios',
'bootstrap_protocol': 'pxe',
'pxe_interface': '0',
'devices': {
'versioned_object.namespace': 'helm_drydock.objects',
'versioned_object.name': 'HardwareDeviceAliasList',
'versioned_object.version': '1.0',
'versioned_object.data': {
'objects': [
{
'versioned_object.namespace': 'helm_drydock.objects',
'versioned_object.name': 'HardwareDeviceAlias',
'versioned_object.version': '1.0',
'versioned_object.data': {
'alias': 'nic',
'source': fields.ModelSource.Designed,
'address': '0000:00:03.0',
'bus_type': 'pci',
'dev_type': '82540EM Gigabit Ethernet Controller',
}
},
{
'versioned_object.namespace': 'helm_drydock.objects',
'versioned_object.name': 'HardwareDeviceAlias',
'versioned_object.version': '1.0',
'versioned_object.data': {
'alias': 'bootdisk',
'source': fields.ModelSource.Designed,
'address': '2:0.0.0',
'bus_type': 'scsi',
'dev_type': 'SSD',
}
},
]
}
}
}
}
hwprofile = objects.HardwareProfile.obj_from_primitive(model_attr)
assert getattr(hwprofile, 'bootstrap_protocol') == 'pxe'
hwprofile.bootstrap_protocol = 'network'
assert 'bootstrap_protocol' in hwprofile.obj_what_changed()
assert 'bios_version' not in hwprofile.obj_what_changed()

View File

@ -0,0 +1,69 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generic testing for the orchestrator
#
import threading
import time
import helm_drydock.orchestrator as orch
import helm_drydock.objects.fields as hd_fields
import helm_drydock.statemgmt as statemgmt
import helm_drydock.objects.task as task
import helm_drydock.drivers as drivers
class TestClass(object):
def test_task_complete(self):
state_mgr = statemgmt.DesignState()
orchestrator = orch.Orchestrator(state_manager=state_mgr)
orch_task = orchestrator.create_task(task.OrchestratorTask,
site='default',
action=hd_fields.OrchestratorAction.Noop)
orchestrator.execute_task(orch_task.get_id())
orch_task = state_mgr.get_task(orch_task.get_id())
assert orch_task.get_status() == hd_fields.TaskStatus.Complete
for t_id in orch_task.subtasks:
t = state_mgr.get_task(t_id)
assert t.get_status() == hd_fields.TaskStatus.Complete
def test_task_termination(self):
state_mgr = statemgmt.DesignState()
orchestrator = orch.Orchestrator(state_manager=state_mgr)
orch_task = orchestrator.create_task(task.OrchestratorTask,
site='default',
action=hd_fields.OrchestratorAction.Noop)
orch_thread = threading.Thread(target=orchestrator.execute_task,
args=(orch_task.get_id(),))
orch_thread.start()
time.sleep(1)
orchestrator.terminate_task(orch_task.get_id())
while orch_thread.is_alive():
time.sleep(1)
orch_task = state_mgr.get_task(orch_task.get_id())
assert orch_task.get_status() == hd_fields.TaskStatus.Terminated
for t_id in orch_task.subtasks:
t = state_mgr.get_task(t_id)
assert t.get_status() == hd_fields.TaskStatus.Terminated

107
tests/unit/test_orch_oob.py Normal file
View File

@ -0,0 +1,107 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generic testing for the orchestrator
#
import pytest
#from pytest_mock import mocker
#import mock
import os
import shutil
import uuid
from helm_drydock.ingester import Ingester
import helm_drydock.orchestrator as orch
import helm_drydock.objects.fields as hd_fields
import helm_drydock.statemgmt as statemgmt
import helm_drydock.objects as objects
import helm_drydock.objects.task as task
import helm_drydock.drivers as drivers
import helm_drydock.ingester.plugins.yaml as yaml_ingester
class TestClass(object):
design_id = str(uuid.uuid4())
# sthussey None of these work right until I figure out correct
# mocking of pyghmi
def test_oob_verify_all_node(self, loaded_design):
#mocker.patch('pyghmi.ipmi.private.session.Session')
#mocker.patch.object('pyghmi.ipmi.command.Command','get_asset_tag')
orchestrator = orch.Orchestrator(state_manager=loaded_design,
enabled_drivers={'oob': 'helm_drydock.drivers.oob.pyghmi_driver.PyghmiDriver'})
orch_task = orchestrator.create_task(task.OrchestratorTask,
site='sitename',
design_id=self.design_id,
action=hd_fields.OrchestratorAction.VerifyNode)
orchestrator.execute_task(orch_task.get_id())
orch_task = loaded_design.get_task(orch_task.get_id())
assert True
"""
def test_oob_prepare_all_nodes(self, loaded_design):
#mocker.patch('pyghmi.ipmi.private.session.Session')
#mocker.patch.object('pyghmi.ipmi.command.Command','set_bootdev')
orchestrator = orch.Orchestrator(state_manager=loaded_design,
enabled_drivers={'oob': 'helm_drydock.drivers.oob.pyghmi_driver.PyghmiDriver'})
orch_task = orchestrator.create_task(task.OrchestratorTask,
site='sitename',
action=enum.OrchestratorAction.PrepareNode)
orchestrator.execute_task(orch_task.get_id())
#assert pyghmi.ipmi.command.Command.set_bootdev.call_count == 3
#assert pyghmi.ipmi.command.Command.set_power.call_count == 6
"""
@pytest.fixture(scope='module')
def loaded_design(self, input_files):
objects.register_all()
input_file = input_files.join("oob.yaml")
design_state = statemgmt.DesignState()
design_data = objects.SiteDesign(id=self.design_id)
design_state.post_design(design_data)
ingester = Ingester()
ingester.enable_plugins([yaml_ingester.YamlIngester])
ingester.ingest_data(plugin_name='yaml', design_state=design_state,
design_id=self.design_id, filenames=[str(input_file)])
return design_state
@pytest.fixture(scope='module')
def input_files(self, tmpdir_factory, request):
tmpdir = tmpdir_factory.mktemp('data')
samples_dir = os.path.dirname(str(request.fspath)) + "../yaml_samples"
samples = os.listdir(samples_dir)
for f in samples:
src_file = samples_dir + "/" + f
dst_file = str(tmpdir) + "/" + f
shutil.copyfile(src_file, dst_file)
return tmpdir

View File

@ -0,0 +1,48 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import shutil
import helm_drydock.objects as objects
import helm_drydock.statemgmt as statemgmt
class TestClass(object):
def setup_method(self, method):
print("Running test {0}".format(method.__name__))
def test_sitedesign_post(self):
objects.register_all()
state_manager = statemgmt.DesignState()
design_data = objects.SiteDesign()
design_id = design_data.assign_id()
initial_site = objects.Site()
initial_site.name = 'testsite'
net_a = objects.Network()
net_a.name = 'net_a'
net_a.region = 'testsite'
net_a.cidr = '172.16.0.0/24'
design_data.set_site(initial_site)
design_data.add_network(net_a)
state_manager.post_design(design_data)
my_design = state_manager.get_design(design_id)
assert design_data.obj_to_primitive() == my_design.obj_to_primitive()

View File

@ -37,11 +37,11 @@ metadata:
description: Describe layer 1 attributes. Primary key is 'name'. These settings will generally be things the switch and server have to agree on
spec:
bonding:
mode: none
mode: disabled
mtu: 1500
linkspeed: 100full
trunking:
mode: none
mode: disabled
default_network: oob
---
# pxe is a bit of 'magic' indicating the link config used when PXE booting
@ -57,14 +57,14 @@ metadata:
description: Describe layer 1 attributes. Primary key is 'name'. These settings will generally be things the switch and server have to agree on
spec:
bonding:
mode: none
mode: disabled
mtu: 1500
linkspeed: auto
# Is this link supporting multiple layer 2 networks?
# none is a port-based VLAN identified by default_network
# tagged is is using 802.1q VLAN tagging. Untagged packets will default to default_netwokr
trunking:
mode: none
mode: disabled
# use name, will translate to VLAN ID
default_network: pxe
---
@ -91,14 +91,11 @@ spec:
hash: layer3+4
# 802.3ad specific options
peer_rate: slow
mon_rate: default
up_delay: default
down_delay: default
mtu: 9000
linkspeed: auto
# Is this link supporting multiple layer 2 networks?
trunking:
mode: tagged
mode: 802.1q
default_network: mgmt
---
apiVersion: 'v1.0'
@ -395,6 +392,8 @@ spec:
address: 172.16.1.20
- network: public
address: 172.16.3.20
- network: oob
address: 172.16.100.20
metadata:
roles: os_ctl
rack: rack01
@ -416,6 +415,8 @@ spec:
address: 172.16.1.21
- network: private
address: 172.16.2.21
- network: oob
address: 172.16.100.21
---
apiVersion: 'v1.0'
kind: HardwareProfile
@ -442,15 +443,17 @@ spec:
# Map hardware addresses to aliases/roles to allow a mix of hardware configs
# in a site to result in a consistent configuration
device_aliases:
pci:
- address: pci@0000:00:03.0
alias: prim_nic01
- address: '0000:00:03.0'
alias: prim_nic01
# type could identify expected hardware - used for hardware manifest validation
type: '82540EM Gigabit Ethernet Controller'
- address: pci@0000:00:04.0
alias: prim_nic02
type: '82540EM Gigabit Ethernet Controller'
scsi:
- address: scsi@2:0.0.0
alias: primary_boot
type: 'VBOX HARDDISK'
dev_type: '82540EM Gigabit Ethernet Controller'
bus_type: 'pci'
- address: '0000:00:04.0'
alias: prim_nic02
dev_type: '82540EM Gigabit Ethernet Controller'
bus_type: 'pci'
- address: '2:0.0.0'
alias: primary_boot
dev_type: 'VBOX HARDDISK'
bus_type: 'scsi'

View File

@ -28,11 +28,11 @@ metadata:
description: Describe layer 1 attributes. Primary key is 'name'. These settings will generally be things the switch and server have to agree on
spec:
bonding:
mode: none
mode: disabled
mtu: 1500
linkspeed: 100full
trunking:
mode: none
mode: disabled
default_network: oob
---
# pxe is a bit of 'magic' indicating the link config used when PXE booting
@ -48,14 +48,14 @@ metadata:
description: Describe layer 1 attributes. Primary key is 'name'. These settings will generally be things the switch and server have to agree on
spec:
bonding:
mode: none
mode: disabled
mtu: 1500
linkspeed: auto
# Is this link supporting multiple layer 2 networks?
# none is a port-based VLAN identified by default_network
# tagged is is using 802.1q VLAN tagging. Untagged packets will default to default_netwokr
trunking:
mode: none
mode: disabled
# use name, will translate to VLAN ID
default_network: pxe
---
@ -82,14 +82,11 @@ spec:
hash: layer3+4
# 802.3ad specific options
peer_rate: slow
mon_rate: default
up_delay: default
down_delay: default
mtu: 9000
linkspeed: auto
# Is this link supporting multiple layer 2 networks?
trunking:
mode: tagged
mode: 802.1q
default_network: mgmt
---
apiVersion: 'v1.0'

View File

@ -48,7 +48,8 @@ spec:
- network: public
address: 172.16.3.20
metadata:
roles: os_ctl
tags:
- os_ctl
rack: rack01
---
apiVersion: 'v1.0'

View File

@ -182,15 +182,16 @@ spec:
# Map hardware addresses to aliases/roles to allow a mix of hardware configs
# in a site to result in a consistent configuration
device_aliases:
pci:
- address: pci@0000:00:03.0
alias: prim_nic01
- address: 0000:00:03.0
alias: prim_nic01
# type could identify expected hardware - used for hardware manifest validation
type: '82540EM Gigabit Ethernet Controller'
- address: pci@0000:00:04.0
alias: prim_nic02
type: '82540EM Gigabit Ethernet Controller'
scsi:
- address: scsi@2:0.0.0
alias: primary_boot
type: 'VBOX HARDDISK'
dev_type: '82540EM Gigabit Ethernet Controller'
bus_type: 'pci'
- address: 0000:00:04.0
alias: prim_nic02
dev_type: '82540EM Gigabit Ethernet Controller'
bus_type: 'pci'
- address: 2:0.0.0
alias: primary_boot
dev_type: 'VBOX HARDDISK'
bus_type: 'scsi'

View File

@ -10,11 +10,11 @@ metadata:
description: Describe layer 1 attributes. Primary key is 'name'. These settings will generally be things the switch and server have to agree on
spec:
bonding:
mode: none
mode: disabled
mtu: 1500
linkspeed: 100full
trunking:
mode: none
mode: disabled
default_network: oob
---
# pxe is a bit of 'magic' indicating the link config used when PXE booting
@ -31,14 +31,14 @@ metadata:
description: Describe layer 1 attributes. Primary key is 'name'. These settings will generally be things the switch and server have to agree on
spec:
bonding:
mode: none
mode: disabled
mtu: 1500
linkspeed: auto
# Is this link supporting multiple layer 2 networks?
# none is a port-based VLAN identified by default_network
# tagged is is using 802.1q VLAN tagging. Untagged packets will default to default_netwokr
trunking:
mode: none
mode: disabled
# use name, will translate to VLAN ID
default_network: pxe
---
@ -61,17 +61,14 @@ spec:
# balance-rr
# Can add support for others down the road
bonding:
mode: 802.3ad
mode: '802.3ad'
# For LACP (802.3ad) xmit hashing policy: layer2, layer2+3, layer3+4, encap3+4
hash: layer3+4
# 802.3ad specific options
peer_rate: slow
mon_rate: default
up_delay: default
down_delay: default
mtu: 9000
linkspeed: auto
# Is this link supporting multiple layer 2 networks?
trunking:
mode: tagged
mode: '802.1q'
default_network: mgmt

227
tests/yaml_samples/oob.yaml Normal file
View File

@ -0,0 +1,227 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################
#
# bootstrap_seed.yaml - Site server design definition for physical layer
#
####################
# version the schema in this file so consumers can rationally parse it
---
apiVersion: 'v1.0'
kind: Region
metadata:
name: sitename
date: 17-FEB-2017
description: Sample site design
author: sh8121@att.com
# Not sure if we have site wide data that doesn't fall into another 'Kind'
---
apiVersion: 'v1.0'
kind: NetworkLink
metadata:
name: oob
region: sitename
date: 17-FEB-2017
author: sh8121@att.com
description: Describe layer 1 attributes. Primary key is 'name'. These settings will generally be things the switch and server have to agree on
spec:
bonding:
mode: disabled
mtu: 1500
linkspeed: 100full
trunking:
mode: disabled
default_network: oob
---
apiVersion: 'v1.0'
kind: Network
metadata:
name: oob
region: sitename
date: 17-FEB-2017
author: sh8121@att.com
description: Describe layer 2/3 attributes. Primarily CIs used for configuring server interfaces
spec:
allocation: static
cidr: 172.16.100.0/24
ranges:
- type: static
start: 172.16.100.15
end: 172.16.100.254
dns:
domain: ilo.sitename.att.com
servers: 172.16.100.10
---
apiVersion: 'v1.0'
kind: HostProfile
metadata:
name: defaults
region: sitename
date: 17-FEB-2017
author: sh8121@att.com
description: Describe layer 2/3 attributes. Primarily CIs used for configuring server interfaces
# No magic to this host_profile, it just provides a way to specify
# sitewide settings. If it is absent from a node's inheritance chain
# then these values will NOT be applied
spec:
# OOB (iLO, iDRAC, etc...) settings. Should prefer open standards such
# as IPMI over vender-specific when possible.
oob:
type: ipmi
# OOB networking should be preconfigured, but we can include a network
# definition for validation or enhancement (DNS registration)
network: oob
account: admin
credential: admin
# Specify storage layout of base OS. Ceph out of scope
storage:
# How storage should be carved up: lvm (logical volumes), flat
# (single partition)
layout: lvm
# Info specific to the boot and root disk/partitions
bootdisk:
# Device will specify an alias defined in hwdefinition.yaml
device: primary_boot
# For LVM, the size of the partition added to VG as a PV
# For flat, the size of the partition formatted as ext4
root_size: 50g
# The /boot partition. If not specified, /boot will in root
boot_size: 2g
# Info for additional partitions. Need to balance between
# flexibility and complexity
partitions:
- name: logs
device: primary_boot
# Partition uuid if needed
part_uuid: 84db9664-f45e-11e6-823d-080027ef795a
size: 10g
# Optional, can carve up unformatted block devices
mountpoint: /var/log
fstype: ext4
mount_options: defaults
# Filesystem UUID or label can be specified. UUID recommended
fs_uuid: cdb74f1c-9e50-4e51-be1d-068b0e9ff69e
fs_label: logs
# Platform (Operating System) settings
platform:
image: ubuntu_16.04_hwe
kernel_params: default
# Additional metadata to apply to a node
metadata:
# Base URL of the introspection service - may go in curtin data
introspection_url: http://172.16.1.10:9090
---
apiVersion: 'v1.0'
kind: HostProfile
metadata:
name: k8-node
region: sitename
date: 17-FEB-2017
author: sh8121@att.com
description: Describe layer 2/3 attributes. Primarily CIs used for configuring server interfaces
spec:
# host_profile inheritance allows for deduplication of common CIs
# Inheritance is additive for CIs that are lists of multiple items
# To remove an inherited list member, prefix the primary key value
# with '!'.
host_profile: defaults
# Hardware profile will map hardware specific details to the abstract
# names uses in the host profile as well as specify hardware specific
# configs. A viable model should be to build a host profile without a
# hardware_profile and then for each node inherit the host profile and
# specify a hardware_profile to map that node's hardware to the abstract
# settings of the host_profile
hardware_profile: HPGen9v3
metadata:
# Explicit tag assignment
tags:
- 'test'
# MaaS supports key/value pairs. Not sure of the use yet
owner_data:
foo: bar
---
apiVersion: 'v1.0'
kind: BaremetalNode
metadata:
name: node01
region: sitename
date: 17-FEB-2017
author: sh8121@att.com
description: Describe layer 2/3 attributes. Primarily CIs used for configuring server interfaces
spec:
host_profile: k8-node
addressing:
- network: oob
address: 172.16.100.20
metadata:
rack: rack01
tags:
- 'odd'
---
apiVersion: 'v1.0'
kind: BaremetalNode
metadata:
name: node02
region: sitename
date: 17-FEB-2017
author: sh8121@att.com
description: Describe layer 2/3 attributes. Primarily CIs used for configuring server interfaces
spec:
host_profile: k8-node
addressing:
- network: oob
address: 172.16.100.21
metadata:
rack: rack01
tags:
- 'even'
---
apiVersion: 'v1.0'
kind: HardwareProfile
metadata:
name: HPGen9v3
region: sitename
date: 17-FEB-2017
author: Scott Hussey
spec:
# Vendor of the server chassis
vendor: HP
# Generation of the chassis model
generation: '8'
# Version of the chassis model within its generation - not version of the hardware definition
hw_version: '3'
# The certified version of the chassis BIOS
bios_version: '2.2.3'
# Mode of the default boot of hardware - bios, uefi
boot_mode: bios
# Protocol of boot of the hardware - pxe, usb, hdd
bootstrap_protocol: pxe
# Which interface to use for network booting within the OOB manager, not OS device
pxe_interface: 0
# Map hardware addresses to aliases/roles to allow a mix of hardware configs
# in a site to result in a consistent configuration
device_aliases:
- address: 0000:00:03.0
alias: prim_nic01
# type could identify expected hardware - used for hardware manifest validation
dev_type: '82540EM Gigabit Ethernet Controller'
bus_type: 'pci'
- address: 0000:00:04.0
alias: prim_nic02
dev_type: '82540EM Gigabit Ethernet Controller'
bus_type: 'pci'
- address: 2:0.0.0
alias: primary_boot
dev_type: 'VBOX HARDDISK'
bus_type: 'scsi'

View File

@ -25,15 +25,16 @@ spec:
# Map hardware addresses to aliases/roles to allow a mix of hardware configs
# in a site to result in a consistent configuration
device_aliases:
pci:
- address: pci@0000:00:03.0
alias: prim_nic01
- address: 0000:00:03.0
alias: prim_nic01
# type could identify expected hardware - used for hardware manifest validation
type: '82540EM Gigabit Ethernet Controller'
- address: pci@0000:00:04.0
alias: prim_nic02
type: '82540EM Gigabit Ethernet Controller'
scsi:
- address: scsi@2:0.0.0
alias: primary_boot
type: 'VBOX HARDDISK'
dev_type: '82540EM Gigabit Ethernet Controller'
bus_type: 'pci'
- address: 0000:00:04.0
alias: prim_nic02
dev_type: '82540EM Gigabit Ethernet Controller'
bus_type: 'pci'
- address: 2:0.0.0
alias: primary_boot
dev_type: 'VBOX HARDDISK'
bus_type: 'scsi'