Libvirt OOB driver

- Create a driver to support OOB actions via libvirt API
- Update Makefile with external dependency target
- Update Makefile and tooling to support new chart pipeline
   - Add 'drydock' make target for chart building
   - Add step to install helm binary

Change-Id: I8a3984d8fd70f99a82a954b7a869eab8e30145b4
This commit is contained in:
Scott Hussey 2018-04-04 17:31:49 -05:00
parent cd9770a979
commit dbad75775b
14 changed files with 1146 additions and 12 deletions

View File

@ -12,11 +12,12 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
BUILD_DIR := $(shell mktemp -d)
DOCKER_REGISTRY ?= quay.io DOCKER_REGISTRY ?= quay.io
IMAGE_NAME ?= drydock IMAGE_NAME ?= drydock
IMAGE_PREFIX ?= attcomdev IMAGE_PREFIX ?= attcomdev
IMAGE_TAG ?= latest IMAGE_TAG ?= latest
HELM ?= helm HELM := $(BUILD_DIR)/helm
PROXY ?= http://one.proxy.att.com:8080 PROXY ?= http://one.proxy.att.com:8080
USE_PROXY ?= false USE_PROXY ?= false
PUSH_IMAGE ?= false PUSH_IMAGE ?= false
@ -34,21 +35,36 @@ run_images: run_drydock
# Run tests # Run tests
.PHONY: tests .PHONY: tests
tests: coverage_test tests: external_dep pep8 security docs unit_tests
# Intall external (not managed by tox/pip) dependencies
.PHONY: external_dep
external_dep:
sudo ./hostdeps.sh
# Run unit and Postgres integration tests in coverage mode # Run unit and Postgres integration tests in coverage mode
.PHONY: coverage_test .PHONY: coverage_test
coverage_test: build_drydock coverage_test: build_drydock external_dep
tox -e coverage tox -re coverage
# Run just unit tests
.PHONY: unit_tests
unit_tests:
tox -re unit
# Run the drydock container and exercise simple tests # Run the drydock container and exercise simple tests
.PHONY: run_drydock .PHONY: run_drydock
run_drydock: build_drydock run_drydock: build_drydock
tools/drydock_image_run.sh tools/drydock_image_run.sh
# It seems CICD expects the target 'drydock' to
# build the chart
.PHONY: drydock
drydock: charts
# Create tgz of the chart # Create tgz of the chart
.PHONY: charts .PHONY: charts
charts: clean charts: clean helm-init
$(HELM) dep up charts/drydock $(HELM) dep up charts/drydock
$(HELM) package charts/drydock $(HELM) package charts/drydock
@ -58,18 +74,27 @@ lint: pep8 helm_lint
# Dry run templating of chart # Dry run templating of chart
.PHONY: dry-run .PHONY: dry-run
dry-run: clean dry-run: clean helm-init
tools/helm_tk.sh $(HELM)
$(HELM) template --set manifests.secret_ssh_key=true --set conf.ssh.private_key=foo charts/drydock $(HELM) template --set manifests.secret_ssh_key=true --set conf.ssh.private_key=foo charts/drydock
# Initialize local helm config
.PHONY: helm-init
helm-init: helm-install
tools/helm_tk.sh $(HELM)
# Install helm binary
.PHONY: helm-install
helm-install:
tools/helm_install.sh $(HELM)
# Make targets intended for use by the primary targets above. # Make targets intended for use by the primary targets above.
.PHONY: build_drydock .PHONY: build_drydock
build_drydock: build_drydock:
ifeq ($(USE_PROXY), true) ifeq ($(USE_PROXY), true)
docker build -t $(IMAGE) --label $(LABEL) -f images/drydock/Dockerfile . --build-arg http_proxy=$(PROXY) --build-arg https_proxy=$(PROXY) docker build --network host -t $(IMAGE) --label $(LABEL) -f images/drydock/Dockerfile . --build-arg http_proxy=$(PROXY) --build-arg https_proxy=$(PROXY)
else else
docker build -t $(IMAGE) --label $(LABEL) -f images/drydock/Dockerfile . docker build --network host -t $(IMAGE) --label $(LABEL) -f images/drydock/Dockerfile .
endif endif
ifeq ($(PUSH_IMAGE), true) ifeq ($(PUSH_IMAGE), true)
docker push $(IMAGE) docker push $(IMAGE)
@ -78,12 +103,17 @@ endif
.PHONY: docs .PHONY: docs
docs: clean drydock_docs docs: clean drydock_docs
.PHONY: security
security:
tox -e bandit
.PHONY: drydock_docs .PHONY: drydock_docs
drydock_docs: drydock_docs:
tox -e docs tox -e docs
.PHONY: clean .PHONY: clean
clean: clean:
rm -rf $(BUILD_DIR)/*
rm -rf build rm -rf build
rm -rf docs/build rm -rf docs/build
rm -rf charts/drydock/charts rm -rf charts/drydock/charts

View File

@ -0,0 +1,13 @@
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -0,0 +1,377 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Driver for controlling OOB interface via libvirt api."""
import time
import libvirt
from urllib.parse import urlparse
import defusedxml.ElementTree as ET
from drydock_provisioner.orchestrator.actions.orchestrator import BaseAction
import drydock_provisioner.error as errors
import drydock_provisioner.objects.fields as hd_fields
class LibvirtBaseAction(BaseAction):
"""Base action for Pyghmi executed actions."""
def init_session(self, node):
"""Initialize a Libvirt session to the node hypervisor.
:param node: instance of objects.BaremetalNode
"""
if node.oob_type != 'libvirt':
raise errors.DriverError(
"Node OOB type %s is not 'libvirt'" % node.oob_type)
virsh_url = node.oob_parameters.get('libvirt_uri', None)
if not virsh_url:
raise errors.DriverError("Node %s has no 'libvirt_url' defined" %
(node.name))
url_parts = urlparse(virsh_url)
if url_parts.scheme != "qemu+ssh":
raise errors.DriverError(
"Node %s has invalid libvirt URL scheme %s. "
"Only 'qemu+ssh' supported." % (node.name, url_parts.scheme))
self.logger.debug("Starting libvirt session to hypervisor %s " %
(virsh_url))
virsh_ses = libvirt.open(virsh_url)
if not virsh_ses:
raise errors.DriverError(
"Unable to establish libvirt session to %s." % virsh_url)
return virsh_ses
def set_node_pxe(self, node):
"""Set a node to PXE boot first."""
ses = self.init_session(node)
domain = ses.lookupByName(node.name)
domain_xml = domain.XMLDesc(libvirt.VIR_DOMAIN_XML_SECURE
| libvirt.VIR_DOMAIN_XML_INACTIVE)
xmltree = ET.fromstring(domain_xml)
# Delete all the current boot entries
os_tree = xmltree.find("./os")
boot_elements = os_tree.findall("./boot")
for e in boot_elements:
os_tree.remove(e)
# Now apply our boot order which is 'network' and then 'hd'
os_tree.append(ET.fromstring("<boot dev='network' />"))
os_tree.append(ET.fromstring("<boot dev='hd' />"))
# And now save the new XML def to the hypervisor
domain_xml = ET.tostring(xmltree, encoding="utf-8")
ses.defineXML(domain_xml.decode('utf-8'))
ses.close()
def get_node_status(self, node):
"""Get node status via libvirt api."""
try:
ses = self.init_session(node)
domain = ses.lookupByName(node.name)
status = domain.isActive()
finally:
ses.close()
return status
def poweroff_node(self, node):
"""Power off a node."""
ses = self.init_session(node)
domain = ses.lookupByName(node.name)
if domain.isActive():
domain.destroy()
else:
self.logger.debug("Node already powered off.")
return
try:
i = 3
while i > 0:
self.logger.debug("Polling powerstate waiting for success.")
power_state = domain.isActive()
if not power_state:
return
time.sleep(10)
i = i - 1
raise errors.DriverError("Power state never matched off")
finally:
ses.close()
def poweron_node(self, node):
"""Power on a node."""
ses = self.init_session(node)
domain = ses.lookupByName(node.name)
if not domain.isActive():
domain.create()
else:
self.logger.debug("Node already powered on.")
return
try:
i = 3
while i > 0:
self.logger.debug("Polling powerstate waiting for success.")
power_state = domain.isActive()
if power_state:
return
time.sleep(10)
i = i - 1
raise errors.DriverError("Power state never matched on")
finally:
ses.close()
class ValidateOobServices(LibvirtBaseAction):
"""Action to validation OOB services are available."""
def start(self):
self.task.add_status_msg(
msg="OOB does not require services.",
error=False,
ctx='NA',
ctx_type='NA')
self.task.set_status(hd_fields.TaskStatus.Complete)
self.task.success()
self.task.save()
return
class ConfigNodePxe(LibvirtBaseAction):
"""Action to configure PXE booting via OOB."""
def start(self):
self.task.set_status(hd_fields.TaskStatus.Running)
self.task.save()
design_status, site_design = self.orchestrator.get_effective_site(
self.task.design_ref)
node_list = self.orchestrator.process_node_filter(
self.task.node_filter, site_design)
for n in node_list:
self.task.add_status_msg(
msg="Libvirt doesn't configure PXE options.",
error=True,
ctx=n.name,
ctx_type='node')
self.task.set_status(hd_fields.TaskStatus.Complete)
self.task.failure()
self.task.save()
return
class SetNodeBoot(LibvirtBaseAction):
"""Action to configure a node to PXE boot."""
def start(self):
self.task.set_status(hd_fields.TaskStatus.Running)
self.task.save()
design_status, site_design = self.orchestrator.get_effective_site(
self.task.design_ref)
node_list = self.orchestrator.process_node_filter(
self.task.node_filter, site_design)
for n in node_list:
self.logger.debug("Setting bootdev to PXE for %s" % n.name)
self.task.add_status_msg(
msg="Setting node to PXE boot.",
error=False,
ctx=n.name,
ctx_type='node')
try:
self.set_node_pxe(n)
except Exception as ex:
self.task.add_status_msg(
msg="Unable to set bootdev to PXE: %s" % str(ex),
error=True,
ctx=n.name,
ctx_type='node')
self.task.failure(focus=n.name)
self.logger.warning("Unable to set node %s to PXE boot." %
(n.name))
else:
self.task.add_status_msg(
msg="Set bootdev to PXE.",
error=False,
ctx=n.name,
ctx_type='node')
self.logger.debug("%s reports bootdev of network" % n.name)
self.task.success(focus=n.name)
self.task.set_status(hd_fields.TaskStatus.Complete)
self.task.save()
return
class PowerOffNode(LibvirtBaseAction):
"""Action to power off a node via libvirt API."""
def start(self):
self.task.set_status(hd_fields.TaskStatus.Running)
self.task.save()
design_status, site_design = self.orchestrator.get_effective_site(
self.task.design_ref)
node_list = self.orchestrator.process_node_filter(
self.task.node_filter, site_design)
for n in node_list:
msg = "Shutting down domain %s" % n.name
self.logger.debug(msg)
self.task.add_status_msg(
msg=msg, error=False, ctx=n.name, ctx_type='node')
try:
self.poweroff_node(n)
except Exception as ex:
msg = "Node failed to power off: %s" % str(ex)
self.task.add_status_msg(
msg=msg, error=True, ctx=n.name, ctx_type='node')
self.logger.error(msg)
self.task.failure(focus=n.name)
else:
msg = "Node %s powered off." % n.name
self.task.add_status_msg(
msg=msg, error=False, ctx=n.name, ctx_type='node')
self.logger.debug(msg)
self.task.success(focus=n.name)
self.task.set_status(hd_fields.TaskStatus.Complete)
self.task.save()
return
class PowerOnNode(LibvirtBaseAction):
"""Action to power on a node via libvirt API."""
def start(self):
self.task.set_status(hd_fields.TaskStatus.Running)
self.task.save()
design_status, site_design = self.orchestrator.get_effective_site(
self.task.design_ref)
node_list = self.orchestrator.process_node_filter(
self.task.node_filter, site_design)
for n in node_list:
msg = "Starting domain %s" % n.name
self.logger.debug(msg)
self.task.add_status_msg(
msg=msg, error=False, ctx=n.name, ctx_type='node')
try:
self.poweron_node(n)
except Exception as ex:
msg = "Node failed to power on: %s" % str(ex)
self.task.add_status_msg(
msg=msg, error=True, ctx=n.name, ctx_type='node')
self.logger.error(msg)
self.task.failure(focus=n.name)
else:
msg = "Node %s powered on." % n.name
self.task.add_status_msg(
msg=msg, error=False, ctx=n.name, ctx_type='node')
self.logger.debug(msg)
self.task.success(focus=n.name)
self.task.set_status(hd_fields.TaskStatus.Complete)
self.task.save()
return
class PowerCycleNode(LibvirtBaseAction):
"""Action to hard powercycle a node via IPMI."""
def start(self):
self.task.set_status(hd_fields.TaskStatus.Running)
self.task.save()
design_status, site_design = self.orchestrator.get_effective_site(
self.task.design_ref)
node_list = self.orchestrator.process_node_filter(
self.task.node_filter, site_design)
for n in node_list:
msg = ("Power cycling domain for node %s" % n.name)
self.logger.debug(msg)
self.task.add_status_msg(
msg=msg, error=False, ctx=n.name, ctx_type='node')
try:
self.poweroff_node(n)
self.poweron_node(n)
except Exception as ex:
msg = "Node failed to power cycle: %s" % str(ex)
self.task.add_status_msg(
msg=msg, error=True, ctx=n.name, ctx_type='node')
self.logger.error(msg)
self.task.failure(focus=n.name)
else:
msg = "Node %s power cycled." % n.name
self.task.add_status_msg(
msg=msg, error=False, ctx=n.name, ctx_type='node')
self.logger.debug(msg)
self.task.success(focus=n.name)
self.task.set_status(hd_fields.TaskStatus.Complete)
self.task.save()
return
class InterrogateOob(LibvirtBaseAction):
"""Action to complete a basic interrogation of the node IPMI interface."""
def start(self):
self.task.set_status(hd_fields.TaskStatus.Running)
self.task.save()
design_status, site_design = self.orchestrator.get_effective_site(
self.task.design_ref)
node_list = self.orchestrator.process_node_filter(
self.task.node_filter, site_design)
for n in node_list:
try:
node_status = self.get_node_status(n)
except Exception as ex:
msg = "Node failed tatus check: %s" % str(ex)
self.task.add_status_msg(
msg=msg, error=True, ctx=n.name, ctx_type='node')
self.logger.error(msg)
self.task.failure(focus=n.name)
else:
msg = "Node %s status is %s." % (n.name, node_status)
self.task.add_status_msg(
msg=msg, error=False, ctx=n.name, ctx_type='node')
self.logger.debug(msg)
self.task.success(focus=n.name)
self.task.set_status(hd_fields.TaskStatus.Complete)
self.task.save()
return

View File

@ -0,0 +1,156 @@
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Driver for controlling libvirt domains."""
import uuid
import logging
import concurrent.futures
from oslo_config import cfg
import drydock_provisioner.error as errors
import drydock_provisioner.config as config
import drydock_provisioner.objects.fields as hd_fields
import drydock_provisioner.drivers.oob.driver as oob_driver
import drydock_provisioner.drivers.driver as generic_driver
from .actions.oob import ValidateOobServices
from .actions.oob import ConfigNodePxe
from .actions.oob import SetNodeBoot
from .actions.oob import PowerOffNode
from .actions.oob import PowerOnNode
from .actions.oob import PowerCycleNode
from .actions.oob import InterrogateOob
class LibvirtDriver(oob_driver.OobDriver):
"""Driver for executing OOB actions via libvirt API."""
libvirt_driver_options = [
cfg.IntOpt(
'poll_interval',
default=10,
help='Polling interval in seconds for querying libvirt status'),
]
oob_types_supported = ['libvirt']
driver_name = "libvirt_driver"
driver_key = "libvirt_driver"
driver_desc = "Libvirt OOB Driver"
action_class_map = {
hd_fields.OrchestratorAction.ValidateOobServices: ValidateOobServices,
hd_fields.OrchestratorAction.ConfigNodePxe: ConfigNodePxe,
hd_fields.OrchestratorAction.SetNodeBoot: SetNodeBoot,
hd_fields.OrchestratorAction.PowerOffNode: PowerOffNode,
hd_fields.OrchestratorAction.PowerOnNode: PowerOnNode,
hd_fields.OrchestratorAction.PowerCycleNode: PowerCycleNode,
hd_fields.OrchestratorAction.InterrogateOob: InterrogateOob,
}
def __init__(self, **kwargs):
super().__init__(**kwargs)
cfg.CONF.register_opts(
LibvirtDriver.libvirt_driver_options,
group=LibvirtDriver.driver_key)
self.logger = logging.getLogger(
config.config_mgr.conf.logging.oobdriver_logger_name)
def execute_task(self, task_id):
task = self.state_manager.get_task(task_id)
if task is None:
self.logger.error("Invalid task %s" % (task_id))
raise errors.DriverError("Invalid task %s" % (task_id))
if task.action not in self.supported_actions:
self.logger.error("Driver %s doesn't support task action %s" %
(self.driver_desc, task.action))
raise errors.DriverError(
"Driver %s doesn't support task action %s" % (self.driver_desc,
task.action))
task.set_status(hd_fields.TaskStatus.Running)
task.save()
target_nodes = self.orchestrator.get_target_nodes(task)
with concurrent.futures.ThreadPoolExecutor(max_workers=16) as e:
subtask_futures = dict()
for n in target_nodes:
sub_nf = self.orchestrator.create_nodefilter_from_nodelist([n])
subtask = self.orchestrator.create_task(
action=task.action,
design_ref=task.design_ref,
node_filter=sub_nf)
task.register_subtask(subtask)
self.logger.debug(
"Starting Libvirt subtask %s for action %s on node %s" %
(str(subtask.get_id()), task.action, n.name))
action_class = self.action_class_map.get(task.action, None)
if action_class is None:
self.logger.error(
"Could not find action resource for action %s" %
task.action)
self.task.failure()
break
action = action_class(subtask, self.orchestrator,
self.state_manager)
subtask_futures[subtask.get_id().bytes] = e.submit(
action.start)
timeout = config.config_mgr.conf.timeouts.drydock_timeout
finished, running = concurrent.futures.wait(
subtask_futures.values(), timeout=(timeout * 60))
for t, f in subtask_futures.items():
if not f.done():
task.add_status_msg(
"Subtask %s timed out before completing.",
error=True,
ctx=str(uuid.UUID(bytes=t)),
ctx_type='task')
task.failure()
else:
if f.exception():
self.logger.error(
"Uncaught exception in subtask %s" % str(
uuid.UUID(bytes=t)),
exc_info=f.exception())
task.align_result()
task.bubble_results()
task.set_status(hd_fields.TaskStatus.Complete)
task.save()
return
class LibvirtActionRunner(generic_driver.DriverActionRunner):
"""Threaded runner for a Libvirt Action."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.logger = logging.getLogger(
config.config_mgr.conf.logging.oobdriver_logger_name)
def list_opts():
return {LibvirtDriver.driver_key: LibvirtDriver.libvirt_driver_options}

9
hostdeps.sh Executable file
View File

@ -0,0 +1,9 @@
#!/bin/bash
# Install host-level package dependencies
# needed for local testing
if [[ ! -z $(uname -a | grep Ubuntu) ]]
then
apt install -y --no-install-recommends $(grep -v '^#' requirements-host.txt)
else
echo "Only support testing on Ubuntu hosts at this time."
fi

View File

@ -13,13 +13,15 @@
# limitations under the License. # limitations under the License.
FROM python:3.5 FROM python:3.5
ENV DEBIAN_FRONTEND noninteractive
ENV container docker ENV container docker
ENV PORT 9000 ENV PORT 9000
ENV LC_ALL C.UTF-8 ENV LC_ALL C.UTF-8
ENV LANG C.UTF-8 ENV LANG C.UTF-8
# Copy direct dependency requirements only to build a dependency layer # Copy direct dependency requirements only to build a dependency layer
RUN DEBIAN_FRONTEND=noninteractive apt update && \
apt install -y libvirt-dev --no-install-recommends
COPY ./requirements-lock.txt /tmp/drydock/ COPY ./requirements-lock.txt /tmp/drydock/
RUN pip3 install \ RUN pip3 install \
--no-cache-dir \ --no-cache-dir \

View File

@ -22,3 +22,4 @@ jsonschema==2.6.0
jinja2==2.9.6 jinja2==2.9.6
ulid2==0.1.1 ulid2==0.1.1
defusedxml===0.5.0 defusedxml===0.5.0
libvirt-python==3.10.0

8
requirements-host.txt Normal file
View File

@ -0,0 +1,8 @@
# These are host packages needed for Drydock
# that don't come on a minimal Ubuntu install
libvirt-dev
pkg-config
python3-dev
python-tox
docker.io
gcc

View File

@ -20,6 +20,7 @@ jsonschema==2.6.0
keystoneauth1==2.13.0 keystoneauth1==2.13.0
keystonemiddleware==4.9.1 keystonemiddleware==4.9.1
kombu==4.1.0 kombu==4.1.0
libvirt-python==3.10.0
Mako==1.0.7 Mako==1.0.7
MarkupSafe==1.0 MarkupSafe==1.0
monotonic==1.5 monotonic==1.5

View File

@ -0,0 +1,69 @@
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import libvirt
import pytest
from drydock_provisioner.error import DriverError
from drydock_provisioner.drivers.oob.libvirt_driver.actions.oob import LibvirtBaseAction
LOG = logging.getLogger(__name__)
class TestLibvirtOobDriver():
def test_libvirt_init_session(self, mocker, deckhand_orchestrator,
input_files, setup):
"""Test session initialization."""
mocker.patch('libvirt.open')
input_file = input_files.join("deckhand_fullsite_libvirt.yaml")
design_ref = "file://%s" % str(input_file)
design_status, design_data = deckhand_orchestrator.get_effective_site(
design_ref)
action = LibvirtBaseAction(None, None, None)
# controller01 should have valid libvirt OOB description
node = design_data.get_baremetal_node('controller01')
LOG.debug("%s", str(node.obj_to_simple()))
action.init_session(node)
expected_calls = [mocker.call('qemu+ssh://dummy@somehost/system')]
libvirt.open.assert_has_calls(expected_calls)
def test_libvirt_invalid_uri(self, mocker, deckhand_orchestrator,
input_files, setup):
"""Test session initialization."""
mocker.patch('libvirt.open')
input_file = input_files.join("deckhand_fullsite_libvirt.yaml")
design_ref = "file://%s" % str(input_file)
design_status, design_data = deckhand_orchestrator.get_effective_site(
design_ref)
action = LibvirtBaseAction(None, None, None)
# compute01 should have invalid libvirt OOB description
node = design_data.get_baremetal_node('compute01')
LOG.debug("%s", str(node.obj_to_simple()))
with pytest.raises(DriverError):
action.init_session(node)

View File

@ -0,0 +1,424 @@
#Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################
#
# deckhand_fullsite_libvirt.yaml - Full site definition in Deckhand format for libvirt based nodes
#
####################
---
schema: 'drydock/Region/v1'
metadata:
schema: 'metadata/Document/v1'
name: 'sitename'
storagePolicy: 'cleartext'
labels:
application: 'drydock'
data:
tag_definitions:
- tag: 'test'
definition_type: 'lshw_xpath'
definition: "//node[@id=\"display\"]/'clock units=\"Hz\"' > 1000000000"
authorized_keys:
- |
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDENeyO5hLPbLLQRZ0oafTYWs1ieo5Q+XgyZQs51Ju
jDGc8lKlWsg1/6yei2JewKMgcwG2Buu1eqU92Xn1SvMZLyt9GZURuBkyjcfVc/8GiU5QP1Of8B7CV0c
kfUpHWYJ17olTzT61Hgz10ioicBF6cjgQrLNcyn05xoaJHD2Vpf8Unxzi0YzA2e77yRqBo9jJVRaX2q
wUJuZrzb62x3zw8Knz6GGSZBn8xRKLaw1SKFpd1hwvL62GfqX5ZBAT1AYTZP1j8GcAoK8AFVn193SEU
vjSdUFa+RNWuJhkjBRfylJczIjTIFb5ls0jpbA3bMA9DE7lFKVQl6vVwFmiIVBI1 samplekey
---
schema: 'drydock/NetworkLink/v1'
metadata:
schema: 'metadata/Document/v1'
name: pxe
storagePolicy: 'cleartext'
labels:
application: 'drydock'
data:
bonding:
mode: disabled
mtu: 1500
linkspeed: auto
trunking:
mode: disabled
default_network: pxe
allowed_networks:
- pxe
---
schema: 'drydock/NetworkLink/v1'
metadata:
schema: 'metadata/Document/v1'
name: gp
storagePolicy: 'cleartext'
labels:
application: 'drydock'
data:
bonding:
mode: 802.3ad
hash: layer3+4
peer_rate: slow
mtu: 9000
linkspeed: auto
trunking:
mode: 802.1q
default_network: mgmt
allowed_networks:
- public
- private
- mgmt
---
schema: 'drydock/Rack/v1'
metadata:
schema: 'metadata/Document/v1'
name: rack1
storagePolicy: 'cleartext'
labels:
application: 'drydock'
data:
tor_switches:
switch01name:
mgmt_ip: 1.1.1.1
sdn_api_uri: polo+https://polo-api.web.att.com/switchmgmt?switch=switch01name
switch02name:
mgmt_ip: 1.1.1.2
sdn_api_uri: polo+https://polo-api.web.att.com/switchmgmt?switch=switch02name
location:
clli: HSTNTXMOCG0
grid: EG12
local_networks:
- pxe-rack1
---
schema: 'drydock/Network/v1'
metadata:
schema: 'metadata/Document/v1'
name: pxe
storagePolicy: 'cleartext'
labels:
application: 'drydock'
data:
dhcp_relay:
self_ip: 172.16.0.4
upstream_target: 172.16.5.5
mtu: 1500
cidr: 172.16.0.0/24
ranges:
- type: dhcp
start: 172.16.0.5
end: 172.16.0.254
dns:
domain: admin.sitename.att.com
servers: 172.16.0.10
---
schema: 'drydock/Network/v1'
metadata:
schema: 'metadata/Document/v1'
name: mgmt
storagePolicy: 'cleartext'
labels:
application: 'drydock'
data:
vlan: '100'
mtu: 1500
cidr: 172.16.1.0/24
ranges:
- type: static
start: 172.16.1.15
end: 172.16.1.254
routes:
- subnet: 0.0.0.0/0
gateway: 172.16.1.1
metric: 10
dns:
domain: mgmt.sitename.example.com
servers: 172.16.1.9,172.16.1.10
---
schema: 'drydock/Network/v1'
metadata:
schema: 'metadata/Document/v1'
name: private
storagePolicy: 'cleartext'
labels:
application: 'drydock'
data:
vlan: '101'
mtu: 9000
cidr: 172.16.2.0/24
ranges:
- type: static
start: 172.16.2.15
end: 172.16.2.254
dns:
domain: priv.sitename.example.com
servers: 172.16.2.9,172.16.2.10
---
schema: 'drydock/Network/v1'
metadata:
schema: 'metadata/Document/v1'
name: public
storagePolicy: 'cleartext'
labels:
application: 'drydock'
data:
vlan: '102'
mtu: 1500
cidr: 172.16.3.0/24
ranges:
- type: static
start: 172.16.3.15
end: 172.16.3.254
routes:
- subnet: 0.0.0.0/0
gateway: 172.16.3.1
metric: 10
dns:
domain: sitename.example.com
servers: 8.8.8.8
---
schema: 'drydock/HostProfile/v1'
metadata:
schema: 'metadata/Document/v1'
name: defaults
storagePolicy: 'cleartext'
labels:
application: 'drydock'
data:
oob:
type: libvirt
libvirt_uri: qemu+ssh://dummy@somehost/system
storage:
physical_devices:
sda:
labels:
role: rootdisk
partitions:
- name: root
size: 20g
bootable: true
filesystem:
mountpoint: '/'
fstype: 'ext4'
mount_options: 'defaults'
- name: boot
size: 1g
bootable: false
filesystem:
mountpoint: '/boot'
fstype: 'ext4'
mount_options: 'defaults'
sdb:
volume_group: 'log_vg'
volume_groups:
log_vg:
logical_volumes:
- name: 'log_lv'
size: '500m'
filesystem:
mountpoint: '/var/log'
fstype: 'xfs'
mount_options: 'defaults'
hardware_profile: HPGen9v3
primary_network: mgmt
interfaces:
pxe:
device_link: pxe
labels:
noconfig: true
slaves:
- prim_nic01
networks:
- pxe
bond0:
device_link: gp
slaves:
- prim_nic01
- prim_nic02
networks:
- mgmt
- private
sriov:
vf_count: 2
trustedmode: false
platform:
image: 'xenial'
kernel: 'ga-16.04'
kernel_params:
quiet: true
console: ttyS2
metadata:
owner_data:
foo: bar
---
schema: 'drydock/BaremetalNode/v1'
metadata:
schema: 'metadata/Document/v1'
name: controller01
storagePolicy: 'cleartext'
labels:
application: 'drydock'
data:
host_profile: defaults
addressing:
- network: pxe
address: dhcp
- network: mgmt
address: 172.16.1.20
- network: public
address: 172.16.3.20
metadata:
rack: rack1
---
schema: 'drydock/BaremetalNode/v1'
metadata:
schema: 'metadata/Document/v1'
name: compute01
storagePolicy: 'cleartext'
labels:
application: 'drydock'
data:
host_profile: defaults
oob:
type: libvirt
# This is an invalid libvirt uri, use it for testing
# sanity checks
libvirt_uri: http://dummy@somehost/system
addressing:
- network: pxe
address: dhcp
- network: mgmt
address: 172.16.1.21
- network: private
address: 172.16.2.21
- network: oob
address: 172.16.100.21
platform:
kernel_params:
isolcpus: hardwareprofile:cpuset.sriov
hugepagesz: hardwareprofile:hugepages.sriov.size
hugepages: hardwareprofile:hugepages.sriov.count
metadata:
rack: rack2
---
schema: 'drydock/HardwareProfile/v1'
metadata:
schema: 'metadata/Document/v1'
name: HPGen9v3
storagePolicy: 'cleartext'
labels:
application: 'drydock'
data:
vendor: HP
generation: '8'
hw_version: '3'
bios_version: '2.2.3'
boot_mode: bios
bootstrap_protocol: pxe
pxe_interface: 0
device_aliases:
prim_nic01:
address: '0000:00:03.0'
dev_type: '82540EM Gigabit Ethernet Controller'
bus_type: 'pci'
prim_nic02:
address: '0000:00:04.0'
dev_type: '82540EM Gigabit Ethernet Controller'
bus_type: 'pci'
primary_boot:
address: '2:0.0.0'
dev_type: 'VBOX HARDDISK'
bus_type: 'scsi'
cpu_sets:
sriov: '2,4'
hugepages:
sriov:
size: '1G'
count: 300
dpdk:
size: '2M'
count: 530000
---
schema: 'drydock/BootAction/v1'
metadata:
schema: 'metadata/Document/v1'
name: hw_filtered
storagePolicy: 'cleartext'
labels:
application: 'drydock'
data:
signaling: false
node_filter:
filter_set_type: 'union'
filter_set:
- filter_type: 'union'
node_names:
- 'compute01'
assets:
- path: /var/tmp/hello.sh
type: file
permissions: '555'
data: |-
IyEvYmluL2Jhc2gKCmVjaG8gJ0hlbGxvIFdvcmxkISAtZnJvbSB7eyBub2RlLmhvc3RuYW1lIH19
Jwo=
data_pipeline:
- base64_decode
- utf8_decode
- template
- path: /lib/systemd/system/hello.service
type: unit
permissions: '600'
data: |-
W1VuaXRdCkRlc2NyaXB0aW9uPUhlbGxvIFdvcmxkCgpbU2VydmljZV0KVHlwZT1vbmVzaG90CkV4
ZWNTdGFydD0vdmFyL3RtcC9oZWxsby5zaAoKW0luc3RhbGxdCldhbnRlZEJ5PW11bHRpLXVzZXIu
dGFyZ2V0Cg==
data_pipeline:
- base64_decode
- utf8_decode
...
---
schema: 'drydock/BootAction/v1'
metadata:
schema: 'metadata/Document/v1'
name: helloworld
storagePolicy: 'cleartext'
labels:
application: 'drydock'
data:
assets:
- path: /var/tmp/hello.sh
type: file
permissions: '555'
data: |-
IyEvYmluL2Jhc2gKCmVjaG8gJ0hlbGxvIFdvcmxkISAtZnJvbSB7eyBub2RlLmhvc3RuYW1lIH19
Jwo=
data_pipeline:
- base64_decode
- utf8_decode
- template
- path: /lib/systemd/system/hello.service
type: unit
permissions: '600'
data: |-
W1VuaXRdCkRlc2NyaXB0aW9uPUhlbGxvIFdvcmxkCgpbU2VydmljZV0KVHlwZT1vbmVzaG90CkV4
ZWNTdGFydD0vdmFyL3RtcC9oZWxsby5zaAoKW0luc3RhbGxdCldhbnRlZEJ5PW11bHRpLXVzZXIu
dGFyZ2V0Cg==
data_pipeline:
- base64_decode
- utf8_decode
- path: /var/tmp/designref.sh
type: file
permissions: '500'
data: e3sgYWN0aW9uLmRlc2lnbl9yZWYgfX0K
data_pipeline:
- base64_decode
- utf8_decode
- template
...

43
tools/helm_install.sh Executable file
View File

@ -0,0 +1,43 @@
#!/bin/bash
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
set -x
HELM=$1
HELM_ARTIFACT_URL=${HELM_ARTIFACT_URL:-"https://storage.googleapis.com/kubernetes-helm/helm-v2.7.2-linux-amd64.tar.gz"}
function install_helm_binary {
if [[ -z "${HELM}" ]]
then
echo "No Helm binary target location."
exit -1
fi
if [[ -w "$(dirname ${HELM})" ]]
then
TMP_DIR=${BUILD_DIR:-$(mktemp -d)}
curl -o "${TMP_DIR}/helm.tar.gz" "${HELM_ARTIFACT_URL}"
cd ${TMP_DIR}
tar -xvzf helm.tar.gz
cp "${TMP_DIR}/linux-amd64/helm" "${HELM}"
else
echo "Cannot write to ${HELM}"
exit -1
fi
}
install_helm_binary

View File

@ -18,6 +18,7 @@
HELM=$1 HELM=$1
HTK_REPO=${HTK_REPO:-"https://github.com/openstack/openstack-helm"} HTK_REPO=${HTK_REPO:-"https://github.com/openstack/openstack-helm"}
HTK_PATH=${HTK_PATH:-""} HTK_PATH=${HTK_PATH:-""}
HTK_STABLE_COMMIT=${HTK_COMMIT:-"f902cd14fac7de4c4c9f7d019191268a6b4e9601"}
DEP_UP_LIST=${DEP_UP_LIST:-"drydock"} DEP_UP_LIST=${DEP_UP_LIST:-"drydock"}
if [[ ! -z $(echo $http_proxy) ]] if [[ ! -z $(echo $http_proxy) ]]
@ -52,10 +53,10 @@ function helm_serve {
mkdir -p build mkdir -p build
pushd build pushd build
git clone --depth 1 $HTK_REPO || true git clone $HTK_REPO || true
pushd openstack-helm/$HTK_PATH pushd openstack-helm/$HTK_PATH
git reset --hard "${HTK_STABLE_COMMIT}"
git pull
helm_serve helm_serve
make helm-toolkit make helm-toolkit
popd && popd popd && popd