Drydock focal related upgrades

This PS implements the following changes:
- switches freeze approach to requirements-direct.txt and
  requirements-frozen.txt files
- adjusts code tabulation style according to  yapf recommendations
- replaces deprecated usage of responce.body attribute with
  responce.text
- fixes integration tests in controlled by Makefile + tox
- uplifts Helm to v3.9.4

Change-Id: I751db72eb8f670825382f11a36657112faeb169a
This commit is contained in:
Sergiy Markin 2023-04-26 12:31:09 +00:00
parent 415a8b52c5
commit d00eaf0303
181 changed files with 2811 additions and 4894 deletions

View File

@ -22,5 +22,5 @@ python:
version: 3.8 version: 3.8
install: install:
- requirements: doc/requirements-doc.txt - requirements: doc/requirements-doc.txt
- requirements: python/requirements-lock.txt - requirements: python/requirements-frozen.txt
- requirements: python/requirements-test.txt - requirements: python/test-requirements.txt

View File

@ -19,7 +19,7 @@ IMAGE_PREFIX ?= airshipit
IMAGE_TAG ?= dev IMAGE_TAG ?= dev
HELM := $(shell realpath $(BUILD_DIR))/helm HELM := $(shell realpath $(BUILD_DIR))/helm
UBUNTU_BASE_IMAGE ?= UBUNTU_BASE_IMAGE ?=
DISTRO ?= ubuntu_focal DISTRO ?= ubuntu_focal
PROXY ?= http://proxy.foo.com:8000 PROXY ?= http://proxy.foo.com:8000
NO_PROXY ?= localhost,127.0.0.1,.svc.cluster.local NO_PROXY ?= localhost,127.0.0.1,.svc.cluster.local
USE_PROXY ?= false USE_PROXY ?= false

View File

@ -32,7 +32,7 @@ and policy file templates to be customized
$ tox -e genpolicy $ tox -e genpolicy
$ virtualenv -p python3.5 /var/tmp/drydock $ virtualenv -p python3.5 /var/tmp/drydock
$ . /var/tmp/drydock/bin/activate $ . /var/tmp/drydock/bin/activate
$ pip install -r requirements-lock.txt $ pip install -r requirements-frozen.txt
$ pip install . $ pip install .
$ cp -r etc/drydock /etc/drydock $ cp -r etc/drydock /etc/drydock

View File

@ -39,8 +39,9 @@ def run_migrations_offline():
""" """
return # We don't support offline return # We don't support offline
url = config.get_main_option("sqlalchemy.url") url = config.get_main_option("sqlalchemy.url")
context.configure( context.configure(url=url,
url=url, target_metadata=target_metadata, literal_binds=True) target_metadata=target_metadata,
literal_binds=True)
with context.begin_transaction(): with context.begin_transaction():
context.run_migrations() context.run_migrations()
@ -55,15 +56,15 @@ def run_migrations_online():
""" """
db_url = os.environ['DRYDOCK_DB_URL'] db_url = os.environ['DRYDOCK_DB_URL']
connectable = engine_from_config( connectable = engine_from_config(config.get_section(
config.get_section(config.config_ini_section), config.config_ini_section),
prefix='sqlalchemy.', prefix='sqlalchemy.',
poolclass=pool.NullPool, poolclass=pool.NullPool,
url=db_url) url=db_url)
with connectable.connect() as connection: with connectable.connect() as connection:
context.configure( context.configure(connection=connection,
connection=connection, target_metadata=target_metadata) target_metadata=target_metadata)
with context.begin_transaction(): with context.begin_transaction():
context.run_migrations() context.run_migrations()

View File

@ -15,7 +15,7 @@
apiVersion: v1 apiVersion: v1
description: A Helm chart for Drydock description: A Helm chart for Drydock
name: drydock name: drydock
version: 0.1.1 version: 0.1.2
keywords: keywords:
- drydock - drydock
home: https://github.com/openstack/airship-drydock home: https://github.com/openstack/airship-drydock

View File

@ -1,5 +1,5 @@
sphinx_rtd_theme==1.2.0 sphinx_rtd_theme==1.2.0
pylibyaml==0.1.0 pylibyaml==0.1.0
oslo_versionedobjects==3.1.0 oslo.versionedobjects==3.1.0
falcon==3.1.1 falcon
keystoneauth1==5.1.2 keystoneauth1<=5.1.1

View File

@ -1,112 +1,8 @@
[DEFAULT] [DEFAULT]
#
# From drydock_provisioner
#
# Polling interval in seconds for checking subtask or downstream status (integer
# value)
# Minimum value: 1
#poll_interval = 10
# How long a leader has to check-in before leadership can be usurped, in seconds
# (integer value)
#leader_grace_period = 300
# How often will an instance attempt to claim leadership, in seconds (integer
# value)
#leadership_claim_interval = 30
[database]
#
# From drydock_provisioner
#
# The URI database connect string. (string value)
#database_connect_string = <None>
# The SQLalchemy database connection pool size. (integer value)
#pool_size = 15
# Should DB connections be validated prior to use. (boolean value)
#pool_pre_ping = true
# How long a request for a connection should wait before one becomes available.
# (integer value)
#pool_timeout = 30
# How many connections above pool_size are allowed to be open during high usage.
# (integer value)
#pool_overflow = 10
# Time, in seconds, when a connection should be closed and re-established. -1
# for no recycling. (integer value)
#connection_recycle = -1
[keystone_authtoken] [keystone_authtoken]
#
# From drydock_provisioner
#
# Authentication URL (string value)
#auth_url = <None>
# Scope for system operations (string value)
#system_scope = <None>
# Domain ID to scope to (string value)
#domain_id = <None>
# Domain name to scope to (string value)
#domain_name = <None>
# Project ID to scope to (string value)
# Deprecated group/name - [keystone_authtoken]/tenant_id
#project_id = <None>
# Project name to scope to (string value)
# Deprecated group/name - [keystone_authtoken]/tenant_name
#project_name = <None>
# Domain ID containing project (string value)
#project_domain_id = <None>
# Domain name containing project (string value)
#project_domain_name = <None>
# ID of the trust to use as a trustee use (string value)
#trust_id = <None>
# Optional domain ID to use with v3 and v2 parameters. It will be used for both
# the user and project domain in v3 and ignored in v2 authentication. (string
# value)
#default_domain_id = <None>
# Optional domain name to use with v3 API and v2 parameters. It will be used for
# both the user and project domain in v3 and ignored in v2 authentication.
# (string value)
#default_domain_name = <None>
# User id (string value)
#user_id = <None>
# Username (string value)
# Deprecated group/name - [keystone_authtoken]/user_name
#username = <None>
# User's domain id (string value)
#user_domain_id = <None>
# User's domain name (string value)
#user_domain_name = <None>
# User's password (string value)
#password = <None>
# #
# From keystonemiddleware.auth_token # From keystonemiddleware.auth_token
# #
@ -266,84 +162,6 @@
#auth_section = <None> #auth_section = <None>
[libvirt_driver]
#
# From drydock_provisioner
#
# Polling interval in seconds for querying libvirt status (integer value)
#poll_interval = 10
[logging]
#
# From drydock_provisioner
#
# Global log level for Drydock (string value)
#log_level = INFO
# Logger name for the top-level logger (string value)
#global_logger_name = drydock_provisioner
# Logger name for OOB driver logging (string value)
#oobdriver_logger_name = ${global_logger_name}.oobdriver
# Logger name for Node driver logging (string value)
#nodedriver_logger_name = ${global_logger_name}.nodedriver
# Logger name for Kubernetes driver logging (string value)
#kubernetesdriver_logger_name = ${global_logger_name}.kubernetesdriver
# Logger name for API server logging (string value)
#control_logger_name = ${global_logger_name}.control
[maasdriver]
#
# From drydock_provisioner
#
# The API key for accessing MaaS (string value)
#maas_api_key = <None>
# The URL for accessing MaaS API (string value)
#maas_api_url = <None>
# Update MAAS to use the provided Node OOB params, overwriting discovered values
# (boolean value)
#use_node_oob_params = false
# Skip BMC reconfiguration during commissioning (requires MAAS 2.7+) (boolean
# value)
#skip_bmc_config = false
# Polling interval for querying MaaS status in seconds (integer value)
#poll_interval = 10
[network]
#
# From drydock_provisioner
#
# Timeout for initial read of outgoing HTTP calls from Drydock in seconds.
# (integer value)
#http_client_connect_timeout = 16
# Timeout for initial read of outgoing HTTP calls from Drydock in seconds.
# (integer value)
#http_client_read_timeout = 300
# Number of retries for transient errors of outgoing HTTP calls from Drydock.
# (integer value)
#http_client_retries = 3
[oslo_policy] [oslo_policy]
# #
@ -402,102 +220,3 @@
# Absolute path client key file REST based policy check (string value) # Absolute path client key file REST based policy check (string value)
#remote_ssl_client_key_file = <None> #remote_ssl_client_key_file = <None>
[plugins]
#
# From drydock_provisioner
#
# Module path string of a input ingester to enable (string value)
#ingester = drydock_provisioner.ingester.plugins.yaml.YamlIngester
# List of module path strings of OOB drivers to enable (list value)
#oob_driver = drydock_provisioner.drivers.oob.pyghmi_driver.PyghmiDriver
# Module path string of the Node driver to enable (string value)
#node_driver = drydock_provisioner.drivers.node.maasdriver.driver.MaasNodeDriver
# Module path string of the Kubernetes driver to enable (string value)
#kubernetes_driver = drydock_provisioner.drivers.kubernetes.promenade_driver.driver.PromenadeDriver
# Module path string of the Network driver enable (string value)
#network_driver = <None>
[pyghmi_driver]
#
# From drydock_provisioner
#
# Polling interval in seconds for querying IPMI status (integer value)
#poll_interval = 10
[redfish_driver]
#
# From drydock_provisioner
#
# Maximum number of connection retries to Redfish server (integer value)
# Minimum value: 1
#max_retries = 10
# Maximum reties to wait for power state change (integer value)
# Minimum value: 1
#power_state_change_max_retries = 18
# Polling interval in seconds between retries for power state change (integer
# value)
#power_state_change_retry_interval = 10
# Use SSL to communicate with Redfish API server (boolean value)
#use_ssl = true
[timeouts]
#
# From drydock_provisioner
#
# Fallback timeout when a specific one is not configured (integer value)
#drydock_timeout = 5
# Timeout in minutes for creating site network templates (integer value)
#create_network_template = 2
# Timeout in minutes for creating user credentials (integer value)
#configure_user_credentials = 2
# Timeout in minutes for initial node identification (integer value)
#identify_node = 10
# Timeout in minutes for node commissioning and hardware configuration (integer
# value)
#configure_hardware = 30
# Timeout in minutes for configuring node networking (integer value)
#apply_node_networking = 5
# Timeout in minutes for configuring node storage (integer value)
#apply_node_storage = 5
# Timeout in minutes for configuring node platform (integer value)
#apply_node_platform = 5
# Timeout in minutes for deploying a node (integer value)
#deploy_node = 45
# Timeout in minutes between deployment completion and the all boot actions
# reporting status (integer value)
#bootaction_final_status = 15
# Timeout in minutes for releasing a node (integer value)
#destroy_node = 30
# Timeout in minutes for relabeling a node (integer value)
#relabel_node = 5

View File

@ -1,70 +0,0 @@
# Actions requiring admin authority
#"admin_required": "role:admin or is_admin:1"
# Get task status
# GET /api/v1.0/tasks
# GET /api/v1.0/tasks/{task_id}
#"physical_provisioner:read_task": "role:admin"
# Create a task
# POST /api/v1.0/tasks
#"physical_provisioner:create_task": "role:admin"
# Create validate_design task
# POST /api/v1.0/tasks
#"physical_provisioner:validate_design": "role:admin"
# Create verify_site task
# POST /api/v1.0/tasks
#"physical_provisioner:verify_site": "role:admin"
# Create prepare_site task
# POST /api/v1.0/tasks
#"physical_provisioner:prepare_site": "role:admin"
# Create verify_nodes task
# POST /api/v1.0/tasks
#"physical_provisioner:verify_nodes": "role:admin"
# Create prepare_nodes task
# POST /api/v1.0/tasks
#"physical_provisioner:prepare_nodes": "role:admin"
# Create deploy_nodes task
# POST /api/v1.0/tasks
#"physical_provisioner:deploy_nodes": "role:admin"
# Create destroy_nodes task
# POST /api/v1.0/tasks
#"physical_provisioner:destroy_nodes": "role:admin"
# Deletes tasks by age
# DELETE /api/v1.0/tasks
#"physical_provisioner:delete_tasks": "role:admin"
# Create relabel_nodes task
# POST /api/v1.0/tasks
#"physical_provisioner:relabel_nodes": "role:admin"
# Read build data for a node
# GET /api/v1.0/nodes/{nodename}/builddata
#"physical_provisioner:read_build_data": "role:admin"
# Read loaded design data
# GET /api/v1.0/designs
# GET /api/v1.0/designs/{design_id}
#"physical_provisioner:read_data": "role:admin"
# Load design data
# POST /api/v1.0/designs
# POST /api/v1.0/designs/{design_id}/parts
#"physical_provisioner:ingest_data": "role:admin"
# et health status
# GET /api/v1.0/health/extended
#"physical_provisioner:health_data": "role:admin"
# Validate site design
# POST /api/v1.0/validatedesign
#"physical_provisioner:validate_site_design": "role:admin"

View File

@ -1,112 +1,8 @@
[DEFAULT] [DEFAULT]
#
# From drydock_provisioner
#
# Polling interval in seconds for checking subtask or downstream status (integer
# value)
# Minimum value: 1
#poll_interval = 10
# How long a leader has to check-in before leadership can be usurped, in seconds
# (integer value)
#leader_grace_period = 300
# How often will an instance attempt to claim leadership, in seconds (integer
# value)
#leadership_claim_interval = 30
[database]
#
# From drydock_provisioner
#
# The URI database connect string. (string value)
#database_connect_string = <None>
# The SQLalchemy database connection pool size. (integer value)
#pool_size = 15
# Should DB connections be validated prior to use. (boolean value)
#pool_pre_ping = true
# How long a request for a connection should wait before one becomes available.
# (integer value)
#pool_timeout = 30
# How many connections above pool_size are allowed to be open during high usage.
# (integer value)
#pool_overflow = 10
# Time, in seconds, when a connection should be closed and re-established. -1
# for no recycling. (integer value)
#connection_recycle = -1
[keystone_authtoken] [keystone_authtoken]
#
# From drydock_provisioner
#
# Authentication URL (string value)
#auth_url = <None>
# Scope for system operations (string value)
#system_scope = <None>
# Domain ID to scope to (string value)
#domain_id = <None>
# Domain name to scope to (string value)
#domain_name = <None>
# Project ID to scope to (string value)
# Deprecated group/name - [keystone_authtoken]/tenant_id
#project_id = <None>
# Project name to scope to (string value)
# Deprecated group/name - [keystone_authtoken]/tenant_name
#project_name = <None>
# Domain ID containing project (string value)
#project_domain_id = <None>
# Domain name containing project (string value)
#project_domain_name = <None>
# ID of the trust to use as a trustee use (string value)
#trust_id = <None>
# Optional domain ID to use with v3 and v2 parameters. It will be used for both
# the user and project domain in v3 and ignored in v2 authentication. (string
# value)
#default_domain_id = <None>
# Optional domain name to use with v3 API and v2 parameters. It will be used for
# both the user and project domain in v3 and ignored in v2 authentication.
# (string value)
#default_domain_name = <None>
# User id (string value)
#user_id = <None>
# Username (string value)
# Deprecated group/name - [keystone_authtoken]/user_name
#username = <None>
# User's domain id (string value)
#user_domain_id = <None>
# User's domain name (string value)
#user_domain_name = <None>
# User's password (string value)
#password = <None>
# #
# From keystonemiddleware.auth_token # From keystonemiddleware.auth_token
# #
@ -266,84 +162,6 @@
#auth_section = <None> #auth_section = <None>
[libvirt_driver]
#
# From drydock_provisioner
#
# Polling interval in seconds for querying libvirt status (integer value)
#poll_interval = 10
[logging]
#
# From drydock_provisioner
#
# Global log level for Drydock (string value)
#log_level = INFO
# Logger name for the top-level logger (string value)
#global_logger_name = drydock_provisioner
# Logger name for OOB driver logging (string value)
#oobdriver_logger_name = ${global_logger_name}.oobdriver
# Logger name for Node driver logging (string value)
#nodedriver_logger_name = ${global_logger_name}.nodedriver
# Logger name for Kubernetes driver logging (string value)
#kubernetesdriver_logger_name = ${global_logger_name}.kubernetesdriver
# Logger name for API server logging (string value)
#control_logger_name = ${global_logger_name}.control
[maasdriver]
#
# From drydock_provisioner
#
# The API key for accessing MaaS (string value)
#maas_api_key = <None>
# The URL for accessing MaaS API (string value)
#maas_api_url = <None>
# Update MAAS to use the provided Node OOB params, overwriting discovered values
# (boolean value)
#use_node_oob_params = false
# Skip BMC reconfiguration during commissioning (requires MAAS 2.7+) (boolean
# value)
#skip_bmc_config = false
# Polling interval for querying MaaS status in seconds (integer value)
#poll_interval = 10
[network]
#
# From drydock_provisioner
#
# Timeout for initial read of outgoing HTTP calls from Drydock in seconds.
# (integer value)
#http_client_connect_timeout = 16
# Timeout for initial read of outgoing HTTP calls from Drydock in seconds.
# (integer value)
#http_client_read_timeout = 300
# Number of retries for transient errors of outgoing HTTP calls from Drydock.
# (integer value)
#http_client_retries = 3
[oslo_policy] [oslo_policy]
# #
@ -402,102 +220,3 @@
# Absolute path client key file REST based policy check (string value) # Absolute path client key file REST based policy check (string value)
#remote_ssl_client_key_file = <None> #remote_ssl_client_key_file = <None>
[plugins]
#
# From drydock_provisioner
#
# Module path string of a input ingester to enable (string value)
#ingester = drydock_provisioner.ingester.plugins.yaml.YamlIngester
# List of module path strings of OOB drivers to enable (list value)
#oob_driver = drydock_provisioner.drivers.oob.pyghmi_driver.PyghmiDriver
# Module path string of the Node driver to enable (string value)
#node_driver = drydock_provisioner.drivers.node.maasdriver.driver.MaasNodeDriver
# Module path string of the Kubernetes driver to enable (string value)
#kubernetes_driver = drydock_provisioner.drivers.kubernetes.promenade_driver.driver.PromenadeDriver
# Module path string of the Network driver enable (string value)
#network_driver = <None>
[pyghmi_driver]
#
# From drydock_provisioner
#
# Polling interval in seconds for querying IPMI status (integer value)
#poll_interval = 10
[redfish_driver]
#
# From drydock_provisioner
#
# Maximum number of connection retries to Redfish server (integer value)
# Minimum value: 1
#max_retries = 10
# Maximum reties to wait for power state change (integer value)
# Minimum value: 1
#power_state_change_max_retries = 18
# Polling interval in seconds between retries for power state change (integer
# value)
#power_state_change_retry_interval = 10
# Use SSL to communicate with Redfish API server (boolean value)
#use_ssl = true
[timeouts]
#
# From drydock_provisioner
#
# Fallback timeout when a specific one is not configured (integer value)
#drydock_timeout = 5
# Timeout in minutes for creating site network templates (integer value)
#create_network_template = 2
# Timeout in minutes for creating user credentials (integer value)
#configure_user_credentials = 2
# Timeout in minutes for initial node identification (integer value)
#identify_node = 10
# Timeout in minutes for node commissioning and hardware configuration (integer
# value)
#configure_hardware = 30
# Timeout in minutes for configuring node networking (integer value)
#apply_node_networking = 5
# Timeout in minutes for configuring node storage (integer value)
#apply_node_storage = 5
# Timeout in minutes for configuring node platform (integer value)
#apply_node_platform = 5
# Timeout in minutes for deploying a node (integer value)
#deploy_node = 45
# Timeout in minutes between deployment completion and the all boot actions
# reporting status (integer value)
#bootaction_final_status = 15
# Timeout in minutes for releasing a node (integer value)
#destroy_node = 30
# Timeout in minutes for relabeling a node (integer value)
#relabel_node = 5

View File

@ -1,70 +0,0 @@
# Actions requiring admin authority
#"admin_required": "role:admin or is_admin:1"
# Get task status
# GET /api/v1.0/tasks
# GET /api/v1.0/tasks/{task_id}
#"physical_provisioner:read_task": "role:admin"
# Create a task
# POST /api/v1.0/tasks
#"physical_provisioner:create_task": "role:admin"
# Create validate_design task
# POST /api/v1.0/tasks
#"physical_provisioner:validate_design": "role:admin"
# Create verify_site task
# POST /api/v1.0/tasks
#"physical_provisioner:verify_site": "role:admin"
# Create prepare_site task
# POST /api/v1.0/tasks
#"physical_provisioner:prepare_site": "role:admin"
# Create verify_nodes task
# POST /api/v1.0/tasks
#"physical_provisioner:verify_nodes": "role:admin"
# Create prepare_nodes task
# POST /api/v1.0/tasks
#"physical_provisioner:prepare_nodes": "role:admin"
# Create deploy_nodes task
# POST /api/v1.0/tasks
#"physical_provisioner:deploy_nodes": "role:admin"
# Create destroy_nodes task
# POST /api/v1.0/tasks
#"physical_provisioner:destroy_nodes": "role:admin"
# Deletes tasks by age
# DELETE /api/v1.0/tasks
#"physical_provisioner:delete_tasks": "role:admin"
# Create relabel_nodes task
# POST /api/v1.0/tasks
#"physical_provisioner:relabel_nodes": "role:admin"
# Read build data for a node
# GET /api/v1.0/nodes/{nodename}/builddata
#"physical_provisioner:read_build_data": "role:admin"
# Read loaded design data
# GET /api/v1.0/designs
# GET /api/v1.0/designs/{design_id}
#"physical_provisioner:read_data": "role:admin"
# Load design data
# POST /api/v1.0/designs
# POST /api/v1.0/designs/{design_id}/parts
#"physical_provisioner:ingest_data": "role:admin"
# et health status
# GET /api/v1.0/health/extended
#"physical_provisioner:health_data": "role:admin"
# Validate site design
# POST /api/v1.0/validatedesign
#"physical_provisioner:validate_site_design": "role:admin"

View File

@ -92,10 +92,10 @@ ENV LD_LIBRARY_PATH=/usr/local/lib
COPY --from=baclient_builder /usr/local/lib /usr/local/lib COPY --from=baclient_builder /usr/local/lib /usr/local/lib
COPY --from=baclient_builder /usr/local/include/yaml.h /usr/local/include/yaml.h COPY --from=baclient_builder /usr/local/include/yaml.h /usr/local/include/yaml.h
COPY ./python/requirements-lock.txt /tmp/drydock/ COPY ./python/requirements-frozen.txt /tmp/drydock/
RUN pip3 install \ RUN pip3 install \
--no-cache-dir \ --no-cache-dir \
-r /tmp/drydock/requirements-lock.txt -r /tmp/drydock/requirements-frozen.txt
COPY ./python /tmp/drydock/python COPY ./python /tmp/drydock/python
WORKDIR /tmp/drydock/python WORKDIR /tmp/drydock/python

View File

@ -106,14 +106,15 @@ ENV LD_LIBRARY_PATH=/usr/local/lib
COPY --from=baclient_builder /usr/local/lib /usr/local/lib COPY --from=baclient_builder /usr/local/lib /usr/local/lib
COPY --from=baclient_builder /usr/local/include/yaml.h /usr/local/include/yaml.h COPY --from=baclient_builder /usr/local/include/yaml.h /usr/local/include/yaml.h
COPY ./python/requirements-lock.txt /tmp/drydock/ COPY ./python/requirements-frozen.txt /tmp/drydock/
RUN pip3 install \ RUN pip3 install \
--no-cache-dir \ --no-cache-dir \
-r /tmp/drydock/requirements-lock.txt -r /tmp/drydock/requirements-frozen.txt
COPY ./python /tmp/drydock/python COPY ./python /tmp/drydock/python
WORKDIR /tmp/drydock/python WORKDIR /tmp/drydock/python
RUN python3 setup.py install RUN cd /tmp/drydock/python \
&& pip3 install $(pwd)
COPY ./alembic /tmp/drydock/alembic COPY ./alembic /tmp/drydock/alembic
COPY ./alembic.ini /tmp/drydock/alembic.ini COPY ./alembic.ini /tmp/drydock/alembic.ini

View File

@ -25,28 +25,27 @@ from .node import commands as node
@click.group() @click.group()
@click.option( @click.option('--debug/--no-debug',
'--debug/--no-debug', help='Enable or disable debugging', default=False) help='Enable or disable debugging',
default=False)
# Supported Environment Variables # Supported Environment Variables
@click.option( @click.option('--os_project_domain_name',
'--os_project_domain_name', envvar='OS_PROJECT_DOMAIN_NAME',
envvar='OS_PROJECT_DOMAIN_NAME', required=False)
required=False) @click.option('--os_user_domain_name',
@click.option( envvar='OS_USER_DOMAIN_NAME',
'--os_user_domain_name', envvar='OS_USER_DOMAIN_NAME', required=False) required=False)
@click.option('--os_project_name', envvar='OS_PROJECT_NAME', required=False) @click.option('--os_project_name', envvar='OS_PROJECT_NAME', required=False)
@click.option('--os_username', envvar='OS_USERNAME', required=False) @click.option('--os_username', envvar='OS_USERNAME', required=False)
@click.option('--os_password', envvar='OS_PASSWORD', required=False) @click.option('--os_password', envvar='OS_PASSWORD', required=False)
@click.option('--os_auth_url', envvar='OS_AUTH_URL', required=False) @click.option('--os_auth_url', envvar='OS_AUTH_URL', required=False)
@click.option( @click.option('--os_token',
'--os_token', help='The Keystone token to be used',
help='The Keystone token to be used', default=lambda: os.environ.get('OS_TOKEN', ''))
default=lambda: os.environ.get('OS_TOKEN', '')) @click.option('--url',
@click.option( '-u',
'--url', help='The url of the running drydock instance',
'-u', default=lambda: os.environ.get('DD_URL', ''))
help='The url of the running drydock instance',
default=lambda: os.environ.get('DD_URL', ''))
@click.pass_context @click.pass_context
def drydock(ctx, debug, url, os_project_domain_name, os_user_domain_name, def drydock(ctx, debug, url, os_project_domain_name, os_user_domain_name,
os_project_name, os_username, os_password, os_auth_url, os_token): os_project_name, os_username, os_password, os_auth_url, os_token):
@ -83,8 +82,8 @@ def drydock(ctx, debug, url, os_project_domain_name, os_user_domain_name,
str(keystone_env)) str(keystone_env))
ks_sess = KeystoneClient.get_ks_session(**keystone_env) ks_sess = KeystoneClient.get_ks_session(**keystone_env)
else: else:
logger.debug( logger.debug("Generating Keystone session by explicit token: %s" %
"Generating Keystone session by explicit token: %s" % os_token) os_token)
ks_sess = KeystoneClient.get_ks_session(token=os_token) ks_sess = KeystoneClient.get_ks_session(token=os_token)
KeystoneClient.get_token(ks_sess=ks_sess) KeystoneClient.get_token(ks_sess=ks_sess)
except Exception as ex: except Exception as ex:
@ -94,8 +93,8 @@ def drydock(ctx, debug, url, os_project_domain_name, os_user_domain_name,
try: try:
if not url: if not url:
url = KeystoneClient.get_endpoint( url = KeystoneClient.get_endpoint('physicalprovisioner',
'physicalprovisioner', ks_sess=ks_sess) ks_sess=ks_sess)
except Exception as ex: except Exception as ex:
logger.debug("Exception getting Drydock endpoint.", exc_info=ex) logger.debug("Exception getting Drydock endpoint.", exc_info=ex)
ctx.fail('Error: Unable to discover Drydock API URL') ctx.fail('Error: Unable to discover Drydock API URL')
@ -109,11 +108,10 @@ def drydock(ctx, debug, url, os_project_domain_name, os_user_domain_name,
if not url_parse_result.scheme: if not url_parse_result.scheme:
ctx.fail('URL must specify a scheme and hostname, optionally a port') ctx.fail('URL must specify a scheme and hostname, optionally a port')
ctx.obj['CLIENT'] = DrydockClient( ctx.obj['CLIENT'] = DrydockClient(
DrydockSession( DrydockSession(scheme=url_parse_result.scheme,
scheme=url_parse_result.scheme, host=url_parse_result.hostname,
host=url_parse_result.hostname, port=url_parse_result.port,
port=url_parse_result.port, auth_gen=auth_gen))
auth_gen=auth_gen))
drydock.add_command(task.task) drydock.add_command(task.task)

View File

@ -59,8 +59,8 @@ class DesignShow(CliAction): # pylint: disable=too-few-public-methods
design_id) design_id)
def invoke(self): def invoke(self):
return self.api_client.get_design( return self.api_client.get_design(design_id=self.design_id,
design_id=self.design_id, source=self.source) source=self.source)
class DesignValidate(CliAction): # pylint: disable=too-few-public-methods class DesignValidate(CliAction): # pylint: disable=too-few-public-methods

View File

@ -31,10 +31,9 @@ def design():
@design.command(name='create') @design.command(name='create')
@click.option( @click.option('--base-design',
'--base-design', '-b',
'-b', help='The base design to model this new design after')
help='The base design to model this new design after')
@click.pass_context @click.pass_context
def design_create(ctx, base_design=None): def design_create(ctx, base_design=None):
"""Create a design.""" """Create a design."""
@ -61,8 +60,9 @@ def design_show(ctx, design_id):
@design.command(name='validate') @design.command(name='validate')
@click.option( @click.option('--design-href',
'--design-href', '-h', help='The design href key to the design ref') '-h',
help='The design href key to the design ref')
@click.pass_context @click.pass_context
def design_validate(ctx, design_href=None): def design_validate(ctx, design_href=None):
"""Validate a design.""" """Validate a design."""

View File

@ -47,5 +47,5 @@ class NodeBuildData(CliAction):
self.logger.debug('NodeBuildData action initialized') self.logger.debug('NodeBuildData action initialized')
def invoke(self): def invoke(self):
return self.api_client.get_node_build_data( return self.api_client.get_node_build_data(self.nodename,
self.nodename, latest=self.latest) latest=self.latest)

View File

@ -31,8 +31,10 @@ def node():
@node.command(name='list') @node.command(name='list')
@click.option( @click.option('--output',
'--output', '-o', help='Output format: table|json', default='table') '-o',
help='Output format: table|json',
default='table')
@click.pass_context @click.pass_context
def node_list(ctx, output='table'): def node_list(ctx, output='table'):
"""List nodes.""" """List nodes."""
@ -59,12 +61,13 @@ def node_list(ctx, output='table'):
@node.command(name='builddata') @node.command(name='builddata')
@click.option( @click.option('--latest/--no-latest',
'--latest/--no-latest', help='Retrieve only the latest data items.',
help='Retrieve only the latest data items.', default=True)
default=True) @click.option('--output',
@click.option( '-o',
'--output', '-o', help='Output format: yaml|json', default='yaml') help='Output format: yaml|json',
default='yaml')
@click.argument('nodename') @click.argument('nodename')
@click.pass_context @click.pass_context
def node_builddata(ctx, nodename, latest=True, output='yaml'): def node_builddata(ctx, nodename, latest=True, output='yaml'):
@ -78,5 +81,6 @@ def node_builddata(ctx, nodename, latest=True, output='yaml'):
click.echo( click.echo(
"Invalid output format {}, default to YAML.".format(output)) "Invalid output format {}, default to YAML.".format(output))
click.echo( click.echo(
yaml.safe_dump( yaml.safe_dump(node_bd,
node_bd, allow_unicode=True, default_flow_style=False)) allow_unicode=True,
default_flow_style=False))

View File

@ -85,8 +85,7 @@ class PartShow(PartBase): # pylint: disable=too-few-public-methods
' kind=%s, key=%s, source=%s', design_id, kind, key, source) ' kind=%s, key=%s, source=%s', design_id, kind, key, source)
def invoke(self): def invoke(self):
return self.api_client.get_part( return self.api_client.get_part(design_id=self.design_id,
design_id=self.design_id, kind=self.kind,
kind=self.kind, key=self.key,
key=self.key, source=self.source)
source=self.source)

View File

@ -25,10 +25,9 @@ from drydock_provisioner.cli.part.actions import PartCreate
@click.group() @click.group()
@click.option( @click.option('--design-id',
'--design-id', '-d',
'-d', help='The id of the design containing the target parts')
help='The id of the design containing the target parts')
@click.pass_context @click.pass_context
def part(ctx, design_id=None): def part(ctx, design_id=None):
"""Drydock part commands.""" """Drydock part commands."""
@ -39,8 +38,9 @@ def part(ctx, design_id=None):
@part.command(name='create') @part.command(name='create')
@click.option( @click.option('--file',
'--file', '-f', help='The file name containing the part to create') '-f',
help='The file name containing the part to create')
@click.pass_context @click.pass_context
def part_create(ctx, file=None): def part_create(ctx, file=None):
"""Create a part.""" """Create a part."""
@ -52,10 +52,9 @@ def part_create(ctx, file=None):
# here is where some potential validation could be done on the input file # here is where some potential validation could be done on the input file
click.echo( click.echo(
json.dumps( json.dumps(
PartCreate( PartCreate(ctx.obj['CLIENT'],
ctx.obj['CLIENT'], design_id=ctx.obj['DESIGN_ID'],
design_id=ctx.obj['DESIGN_ID'], in_file=file_contents).invoke()))
in_file=file_contents).invoke()))
@part.command(name='list') @part.command(name='list')
@ -83,9 +82,8 @@ def part_show(ctx, source, kind, key):
click.echo( click.echo(
json.dumps( json.dumps(
PartShow( PartShow(ctx.obj['CLIENT'],
ctx.obj['CLIENT'], design_id=ctx.obj['DESIGN_ID'],
design_id=ctx.obj['DESIGN_ID'], kind=kind,
kind=kind, key=key,
key=key, source=source).invoke()))
source=source).invoke()))

View File

@ -90,10 +90,9 @@ class TaskCreate(CliAction): # pylint: disable=too-few-public-methods
def invoke(self): def invoke(self):
"""Invoke execution of this action.""" """Invoke execution of this action."""
task = self.api_client.create_task( task = self.api_client.create_task(design_ref=self.design_ref,
design_ref=self.design_ref, task_action=self.action_name,
task_action=self.action_name, node_filter=self.node_filter)
node_filter=self.node_filter)
if not self.block: if not self.block:
return task return task
@ -157,6 +156,7 @@ class TaskBuildData(CliAction):
def invoke(self): def invoke(self):
return self.api_client.get_task_build_data(self.task_id) return self.api_client.get_task_build_data(self.task_id)
class TasksDelete(CliAction): class TasksDelete(CliAction):
"""Action to delete tasks in database.""" """Action to delete tasks in database."""

View File

@ -29,17 +29,16 @@ def task():
@task.command(name='create') @task.command(name='create')
@click.option( @click.option('--design-ref',
'--design-ref', '-d', help='The design reference for this action') '-d',
help='The design reference for this action')
@click.option('--action', '-a', help='The action to perform') @click.option('--action', '-a', help='The action to perform')
@click.option( @click.option('--node-names',
'--node-names', '-n',
'-n', help='The nodes targeted by this action, comma separated')
help='The nodes targeted by this action, comma separated') @click.option('--rack-names',
@click.option( '-r',
'--rack-names', help='The racks targeted by this action, comma separated')
'-r',
help='The racks targeted by this action, comma separated')
@click.option( @click.option(
'--node-tags', '--node-tags',
'-t', '-t',
@ -49,10 +48,9 @@ def task():
'-b', '-b',
help='The CLI will wait until the created completes before exitting', help='The CLI will wait until the created completes before exitting',
default=False) default=False)
@click.option( @click.option('--poll-interval',
'--poll-interval', help='Polling interval to check task status in blocking mode.',
help='Polling interval to check task status in blocking mode.', default=15)
default=15)
@click.pass_context @click.pass_context
def task_create(ctx, def task_create(ctx,
design_ref=None, design_ref=None,
@ -112,8 +110,10 @@ def task_show(ctx, task_id=None, block=False):
@task.command(name='builddata') @task.command(name='builddata')
@click.option('--task-id', '-t', help='The required task id') @click.option('--task-id', '-t', help='The required task id')
@click.option( @click.option('--output',
'--output', '-o', help='The output format (yaml|json)', default='yaml') '-o',
help='The output format (yaml|json)',
default='yaml')
@click.pass_context @click.pass_context
def task_builddata(ctx, task_id=None, output='yaml'): def task_builddata(ctx, task_id=None, output='yaml'):
"""Show builddata assoicated with ``task_id``.""" """Show builddata assoicated with ``task_id``."""
@ -129,17 +129,19 @@ def task_builddata(ctx, task_id=None, output='yaml'):
click.echo( click.echo(
'Invalid output format {}, defaulting to YAML.'.format(output)) 'Invalid output format {}, defaulting to YAML.'.format(output))
click.echo( click.echo(
yaml.safe_dump( yaml.safe_dump(task_bd,
task_bd, allow_unicode=True, default_flow_style=False)) allow_unicode=True,
default_flow_style=False))
@task.command(name='delete') @task.command(name='delete')
@click.option('--days', '-d', help='The required number of days to retain tasks') @click.option('--days',
'-d',
help='The required number of days to retain tasks')
@click.pass_context @click.pass_context
def task_delete(ctx, days=None): def task_delete(ctx, days=None):
"""Delete tasks from database""" """Delete tasks from database"""
if not days: if not days:
ctx.fail('The number of days must be specified using --days or -d') ctx.fail('The number of days must be specified using --days or -d')
click.echo( click.echo(TasksDelete(ctx.obj['CLIENT'], days=days).invoke())
TasksDelete(ctx.obj['CLIENT'], days=days).invoke())

View File

@ -67,43 +67,36 @@ class DrydockConfig(object):
# Logging options # Logging options
logging_options = [ logging_options = [
cfg.StrOpt( cfg.StrOpt('log_level',
'log_level', default='INFO', help='Global log level for Drydock'), default='INFO',
cfg.StrOpt( help='Global log level for Drydock'),
'global_logger_name', cfg.StrOpt('global_logger_name',
default='drydock_provisioner', default='drydock_provisioner',
help='Logger name for the top-level logger'), help='Logger name for the top-level logger'),
cfg.StrOpt( cfg.StrOpt('oobdriver_logger_name',
'oobdriver_logger_name', default='${global_logger_name}.oobdriver',
default='${global_logger_name}.oobdriver', help='Logger name for OOB driver logging'),
help='Logger name for OOB driver logging'), cfg.StrOpt('nodedriver_logger_name',
cfg.StrOpt( default='${global_logger_name}.nodedriver',
'nodedriver_logger_name', help='Logger name for Node driver logging'),
default='${global_logger_name}.nodedriver', cfg.StrOpt('kubernetesdriver_logger_name',
help='Logger name for Node driver logging'), default='${global_logger_name}.kubernetesdriver',
cfg.StrOpt( help='Logger name for Kubernetes driver logging'),
'kubernetesdriver_logger_name', cfg.StrOpt('control_logger_name',
default='${global_logger_name}.kubernetesdriver', default='${global_logger_name}.control',
help='Logger name for Kubernetes driver logging'), help='Logger name for API server logging'),
cfg.StrOpt(
'control_logger_name',
default='${global_logger_name}.control',
help='Logger name for API server logging'),
] ]
# Database options # Database options
database_options = [ database_options = [
cfg.StrOpt( cfg.StrOpt('database_connect_string',
'database_connect_string', help='The URI database connect string.'),
help='The URI database connect string.'), cfg.IntOpt('pool_size',
cfg.IntOpt( default=15,
'pool_size', help='The SQLalchemy database connection pool size.'),
default=15, cfg.BoolOpt('pool_pre_ping',
help='The SQLalchemy database connection pool size.'), default=True,
cfg.BoolOpt( help='Should DB connections be validated prior to use.'),
'pool_pre_ping',
default=True,
help='Should DB connections be validated prior to use.'),
cfg.IntOpt( cfg.IntOpt(
'pool_timeout', 'pool_timeout',
default=30, default=30,
@ -126,9 +119,8 @@ class DrydockConfig(object):
# Options for the boot action framework # Options for the boot action framework
bootactions_options = [ bootactions_options = [
cfg.StrOpt( cfg.StrOpt('report_url',
'report_url', default='http://localhost:9000/api/v1.0/bootactions/')
default='http://localhost:9000/api/v1.0/bootactions/')
] ]
# Options for network traffic # Options for network traffic
@ -176,10 +168,9 @@ class DrydockConfig(object):
'drydock_provisioner.drivers.kubernetes.promenade_driver.driver.PromenadeDriver', 'drydock_provisioner.drivers.kubernetes.promenade_driver.driver.PromenadeDriver',
help='Module path string of the Kubernetes driver to enable'), help='Module path string of the Kubernetes driver to enable'),
# TODO(sh8121att) Network driver not yet implemented # TODO(sh8121att) Network driver not yet implemented
cfg.StrOpt( cfg.StrOpt('network_driver',
'network_driver', default=None,
default=None, help='Module path string of the Network driver enable'),
help='Module path string of the Network driver enable'),
] ]
# Timeouts for various tasks specified in minutes # Timeouts for various tasks specified in minutes
@ -192,36 +183,30 @@ class DrydockConfig(object):
'create_network_template', 'create_network_template',
default=2, default=2,
help='Timeout in minutes for creating site network templates'), help='Timeout in minutes for creating site network templates'),
cfg.IntOpt( cfg.IntOpt('configure_user_credentials',
'configure_user_credentials', default=2,
default=2, help='Timeout in minutes for creating user credentials'),
help='Timeout in minutes for creating user credentials'), cfg.IntOpt('identify_node',
cfg.IntOpt( default=10,
'identify_node', help='Timeout in minutes for initial node identification'),
default=10,
help='Timeout in minutes for initial node identification'),
cfg.IntOpt( cfg.IntOpt(
'configure_hardware', 'configure_hardware',
default=30, default=30,
help= help=
'Timeout in minutes for node commissioning and hardware configuration' 'Timeout in minutes for node commissioning and hardware configuration'
), ),
cfg.IntOpt( cfg.IntOpt('apply_node_networking',
'apply_node_networking', default=5,
default=5, help='Timeout in minutes for configuring node networking'),
help='Timeout in minutes for configuring node networking'), cfg.IntOpt('apply_node_storage',
cfg.IntOpt( default=5,
'apply_node_storage', help='Timeout in minutes for configuring node storage'),
default=5, cfg.IntOpt('apply_node_platform',
help='Timeout in minutes for configuring node storage'), default=5,
cfg.IntOpt( help='Timeout in minutes for configuring node platform'),
'apply_node_platform', cfg.IntOpt('deploy_node',
default=5, default=45,
help='Timeout in minutes for configuring node platform'), help='Timeout in minutes for deploying a node'),
cfg.IntOpt(
'deploy_node',
default=45,
help='Timeout in minutes for deploying a node'),
cfg.IntOpt( cfg.IntOpt(
'bootaction_final_status', 'bootaction_final_status',
default=15, default=15,
@ -233,10 +218,9 @@ class DrydockConfig(object):
default=30, default=30,
help='Timeout in minutes for releasing a node', help='Timeout in minutes for releasing a node',
), ),
cfg.IntOpt( cfg.IntOpt('relabel_node',
'relabel_node', default=5,
default=5, help='Timeout in minutes for relabeling a node'),
help='Timeout in minutes for relabeling a node'),
] ]
def __init__(self): def __init__(self):
@ -244,15 +228,15 @@ class DrydockConfig(object):
def register_options(self, enable_keystone=True): def register_options(self, enable_keystone=True):
self.conf.register_opts(DrydockConfig.options) self.conf.register_opts(DrydockConfig.options)
self.conf.register_opts( self.conf.register_opts(DrydockConfig.bootactions_options,
DrydockConfig.bootactions_options, group='bootactions') group='bootactions')
self.conf.register_opts(DrydockConfig.logging_options, group='logging') self.conf.register_opts(DrydockConfig.logging_options, group='logging')
self.conf.register_opts(DrydockConfig.plugin_options, group='plugins') self.conf.register_opts(DrydockConfig.plugin_options, group='plugins')
self.conf.register_opts(DrydockConfig.network_options, group='network') self.conf.register_opts(DrydockConfig.network_options, group='network')
self.conf.register_opts( self.conf.register_opts(DrydockConfig.database_options,
DrydockConfig.database_options, group='database') group='database')
self.conf.register_opts( self.conf.register_opts(DrydockConfig.timeout_options,
DrydockConfig.timeout_options, group='timeouts') group='timeouts')
if enable_keystone: if enable_keystone:
self.conf.register_opts( self.conf.register_opts(
loading.get_auth_plugin_conf_options('password'), loading.get_auth_plugin_conf_options('password'),

View File

@ -45,13 +45,12 @@ def start_api(state_manager=None, ingester=None, orchestrator=None):
part input part input
:param orchestrator: Instance of drydock_provisioner.orchestrator.Orchestrator for managing tasks :param orchestrator: Instance of drydock_provisioner.orchestrator.Orchestrator for managing tasks
""" """
control_api = falcon.App( control_api = falcon.App(request_type=DrydockRequest,
request_type=DrydockRequest, middleware=[
middleware=[ AuthMiddleware(),
AuthMiddleware(), ContextMiddleware(),
ContextMiddleware(), LoggingMiddleware()
LoggingMiddleware() ])
])
control_api.add_route('/versions', VersionsResource()) control_api.add_route('/versions', VersionsResource())
@ -59,11 +58,11 @@ def start_api(state_manager=None, ingester=None, orchestrator=None):
v1_0_routes = [ v1_0_routes = [
# API for managing orchestrator tasks # API for managing orchestrator tasks
('/health', ('/health',
HealthResource( HealthResource(state_manager=state_manager,
state_manager=state_manager, orchestrator=orchestrator)), orchestrator=orchestrator)),
('/health/extended', ('/health/extended',
HealthExtendedResource( HealthExtendedResource(state_manager=state_manager,
state_manager=state_manager, orchestrator=orchestrator)), orchestrator=orchestrator)),
('/tasks', ('/tasks',
TasksResource(state_manager=state_manager, TasksResource(state_manager=state_manager,
orchestrator=orchestrator)), orchestrator=orchestrator)),
@ -74,15 +73,15 @@ def start_api(state_manager=None, ingester=None, orchestrator=None):
# API for managing site design data # API for managing site design data
('/designs', DesignsResource(state_manager=state_manager)), ('/designs', DesignsResource(state_manager=state_manager)),
('/designs/{design_id}', ('/designs/{design_id}',
DesignResource( DesignResource(state_manager=state_manager,
state_manager=state_manager, orchestrator=orchestrator)), orchestrator=orchestrator)),
('/designs/{design_id}/parts', ('/designs/{design_id}/parts',
DesignsPartsResource(state_manager=state_manager, ingester=ingester)), DesignsPartsResource(state_manager=state_manager, ingester=ingester)),
('/designs/{design_id}/parts/{kind}', ('/designs/{design_id}/parts/{kind}',
DesignsPartsKindsResource(state_manager=state_manager)), DesignsPartsKindsResource(state_manager=state_manager)),
('/designs/{design_id}/parts/{kind}/{name}', ('/designs/{design_id}/parts/{kind}/{name}',
DesignsPartResource( DesignsPartResource(state_manager=state_manager,
state_manager=state_manager, orchestrator=orchestrator)), orchestrator=orchestrator)),
# API to list current MaaS nodes # API to list current MaaS nodes
('/nodes', NodesResource()), ('/nodes', NodesResource()),
@ -91,23 +90,23 @@ def start_api(state_manager=None, ingester=None, orchestrator=None):
NodeBuildDataResource(state_manager=state_manager)), NodeBuildDataResource(state_manager=state_manager)),
# API to list current node names based # API to list current node names based
('/nodefilter', ('/nodefilter',
NodeFilterResource( NodeFilterResource(state_manager=state_manager,
state_manager=state_manager, orchestrator=orchestrator)), orchestrator=orchestrator)),
# API for nodes to discover their boot actions during curtin install # API for nodes to discover their boot actions during curtin install
('/bootactions/nodes/{hostname}/units', ('/bootactions/nodes/{hostname}/units',
BootactionUnitsResource( BootactionUnitsResource(state_manager=state_manager,
state_manager=state_manager, orchestrator=orchestrator)), orchestrator=orchestrator)),
('/bootactions/nodes/{hostname}/files', ('/bootactions/nodes/{hostname}/files',
BootactionFilesResource( BootactionFilesResource(state_manager=state_manager,
state_manager=state_manager, orchestrator=orchestrator)), orchestrator=orchestrator)),
('/bootactions/{action_id}', ('/bootactions/{action_id}',
BootactionResource( BootactionResource(state_manager=state_manager,
state_manager=state_manager, orchestrator=orchestrator)), orchestrator=orchestrator)),
# API to validate schemas # API to validate schemas
('/validatedesign', ('/validatedesign',
ValidationResource( ValidationResource(state_manager=state_manager,
state_manager=state_manager, orchestrator=orchestrator)), orchestrator=orchestrator)),
] ]
for path, res in v1_0_routes: for path, res in v1_0_routes:
@ -122,10 +121,9 @@ class VersionsResource(BaseResource):
""" """
def on_get(self, req, resp): def on_get(self, req, resp):
resp.body = self.to_json({ resp.text = self.to_json(
'v1.0': { {'v1.0': {
'path': '/api/v1.0', 'path': '/api/v1.0',
'status': 'stable' 'status': 'stable'
} }})
})
resp.status = falcon.HTTP_200 resp.status = falcon.HTTP_200

View File

@ -22,6 +22,7 @@ import drydock_provisioner.error as errors
class BaseResource(object): class BaseResource(object):
def __init__(self): def __init__(self):
self.logger = logging.getLogger('drydock') self.logger = logging.getLogger('drydock')
@ -52,18 +53,18 @@ class BaseResource(object):
json_body = json.loads(raw_body.decode('utf-8')) json_body = json.loads(raw_body.decode('utf-8'))
return json_body return json_body
except json.JSONDecodeError as jex: except json.JSONDecodeError as jex:
print( print("Invalid JSON in request: \n%s" %
"Invalid JSON in request: \n%s" % raw_body.decode('utf-8')) raw_body.decode('utf-8'))
self.error( self.error(
req.context, req.context,
"Invalid JSON in request: \n%s" % raw_body.decode('utf-8')) "Invalid JSON in request: \n%s" % raw_body.decode('utf-8'))
raise errors.InvalidFormat( raise errors.InvalidFormat("%s: Invalid JSON in body: %s" %
"%s: Invalid JSON in body: %s" % (req.path, jex)) (req.path, jex))
else: else:
raise errors.InvalidFormat("Requires application/json payload") raise errors.InvalidFormat("Requires application/json payload")
def return_error(self, resp, status_code, message="", retry=False): def return_error(self, resp, status_code, message="", retry=False):
resp.body = json.dumps({ resp.text = json.dumps({
'type': 'error', 'type': 'error',
'message': message, 'message': message,
'retry': retry 'retry': retry
@ -71,8 +72,12 @@ class BaseResource(object):
resp.status = status_code resp.status = status_code
def log_error(self, ctx, level, msg): def log_error(self, ctx, level, msg):
extra = {'user': 'N/A', 'req_id': 'N/A', 'external_ctx': 'N/A', extra = {
'end_user': 'N/A'} 'user': 'N/A',
'req_id': 'N/A',
'external_ctx': 'N/A',
'end_user': 'N/A'
}
if ctx is not None: if ctx is not None:
extra = { extra = {
@ -104,6 +109,7 @@ class BaseResource(object):
class StatefulResource(BaseResource): class StatefulResource(BaseResource):
def __init__(self, state_manager=None, **kwargs): def __init__(self, state_manager=None, **kwargs):
super(StatefulResource, self).__init__(**kwargs) super(StatefulResource, self).__init__(**kwargs)
@ -119,6 +125,7 @@ class StatefulResource(BaseResource):
class DrydockRequestContext(object): class DrydockRequestContext(object):
def __init__(self): def __init__(self):
self.log_level = 'ERROR' self.log_level = 'ERROR'
self.user = None # Username self.user = None # Username

View File

@ -76,8 +76,8 @@ class BootactionResource(StatefulResource):
try: try:
ba_entry = self.state_manager.get_boot_action(action_id) ba_entry = self.state_manager.get_boot_action(action_id)
except Exception as ex: except Exception as ex:
self.logger.error( self.logger.error("Error querying for boot action %s" % action_id,
"Error querying for boot action %s" % action_id, exc_info=ex) exc_info=ex)
raise falcon.HTTPInternalServerError(str(ex)) raise falcon.HTTPInternalServerError(str(ex))
if ba_entry is None: if ba_entry is None:
@ -103,8 +103,8 @@ class BootactionResource(StatefulResource):
action_id) action_id)
for m in json_body.get('details', []): for m in json_body.get('details', []):
rm = objects.TaskStatusMessage( rm = objects.TaskStatusMessage(m.get('message'), m.get('error'),
m.get('message'), m.get('error'), 'bootaction', action_id) 'bootaction', action_id)
for f, v in m.items(): for f, v in m.items():
if f not in ['message', 'error']: if f not in ['message', 'error']:
rm['extra'] = dict() rm['extra'] = dict()
@ -124,11 +124,12 @@ class BootactionResource(StatefulResource):
resp.content_type = 'application/json' resp.content_type = 'application/json'
ba_entry['task_id'] = str(ba_entry['task_id']) ba_entry['task_id'] = str(ba_entry['task_id'])
ba_entry['action_id'] = ulid2.encode_ulid_base32(ba_entry['action_id']) ba_entry['action_id'] = ulid2.encode_ulid_base32(ba_entry['action_id'])
resp.body = json.dumps(ba_entry) resp.text = json.dumps(ba_entry)
return return
class BootactionAssetsResource(StatefulResource): class BootactionAssetsResource(StatefulResource):
def __init__(self, orchestrator=None, **kwargs): def __init__(self, orchestrator=None, **kwargs):
super().__init__(**kwargs) super().__init__(**kwargs)
self.orchestrator = orchestrator self.orchestrator = orchestrator
@ -149,8 +150,8 @@ class BootactionAssetsResource(StatefulResource):
try: try:
ba_ctx = self.state_manager.get_boot_action_context(hostname) ba_ctx = self.state_manager.get_boot_action_context(hostname)
except Exception as ex: except Exception as ex:
self.logger.error( self.logger.error("Error locating boot action for %s" % hostname,
"Error locating boot action for %s" % hostname, exc_info=ex) exc_info=ex)
raise falcon.HTTPNotFound() raise falcon.HTTPNotFound()
if ba_ctx is None: if ba_ctx is None:
@ -178,19 +179,19 @@ class BootactionAssetsResource(StatefulResource):
action_id = ba_status.get('action_id') action_id = ba_status.get('action_id')
action_key = ba_status.get('identity_key') action_key = ba_status.get('identity_key')
assets.extend( assets.extend(
ba.render_assets( ba.render_assets(hostname,
hostname, site_design,
site_design, action_id,
action_id, action_key,
action_key, task.design_ref,
task.design_ref, type_filter=asset_type_filter))
type_filter=asset_type_filter))
tarball = BootactionUtils.tarbuilder(asset_list=assets) tarball = BootactionUtils.tarbuilder(asset_list=assets)
resp.set_header('Content-Type', 'application/gzip') resp.set_header('Content-Type', 'application/gzip')
resp.set_header( resp.set_header(
'Content-Disposition', "attachment; filename=\"%s-%s.tar.gz\"" 'Content-Disposition',
% (hostname, asset_type)) "attachment; filename=\"%s-%s.tar.gz\"" %
(hostname, asset_type))
resp.data = tarball resp.data = tarball
resp.status = falcon.HTTP_200 resp.status = falcon.HTTP_200
return return
@ -200,16 +201,18 @@ class BootactionAssetsResource(StatefulResource):
class BootactionUnitsResource(BootactionAssetsResource): class BootactionUnitsResource(BootactionAssetsResource):
def __init__(self, **kwargs): def __init__(self, **kwargs):
super().__init__(**kwargs) super().__init__(**kwargs)
def on_get(self, req, resp, hostname): def on_get(self, req, resp, hostname):
self.logger.debug( self.logger.debug("Accessing boot action units resource for host %s." %
"Accessing boot action units resource for host %s." % hostname) hostname)
self.do_get(req, resp, hostname, 'unit') self.do_get(req, resp, hostname, 'unit')
class BootactionFilesResource(BootactionAssetsResource): class BootactionFilesResource(BootactionAssetsResource):
def __init__(self, **kwargs): def __init__(self, **kwargs):
super().__init__(**kwargs) super().__init__(**kwargs)
@ -233,18 +236,17 @@ class BootactionUtils(object):
identity_key = req.get_header('X-Bootaction-Key', default='') identity_key = req.get_header('X-Bootaction-Key', default='')
if identity_key == '': if identity_key == '':
raise falcon.HTTPUnauthorized( raise falcon.HTTPUnauthorized(title='Unauthorized',
title='Unauthorized', description='No X-Bootaction-Key',
description='No X-Bootaction-Key', challenges=['Bootaction-Key'])
challenges=['Bootaction-Key'])
if ba_ctx['identity_key'] != bytes.fromhex(identity_key): if ba_ctx['identity_key'] != bytes.fromhex(identity_key):
logger.warn( logger.warn(
"Forbidding boot action access - node: %s, identity_key: %s, req header: %s" "Forbidding boot action access - node: %s, identity_key: %s, req header: %s"
% (ba_ctx['node_name'], str(ba_ctx['identity_key']), % (ba_ctx['node_name'], str(
str(bytes.fromhex(identity_key)))) ba_ctx['identity_key']), str(bytes.fromhex(identity_key))))
raise falcon.HTTPForbidden( raise falcon.HTTPForbidden(title='Unauthorized',
title='Unauthorized', description='Invalid X-Bootaction-Key') description='Invalid X-Bootaction-Key')
@staticmethod @staticmethod
def tarbuilder(asset_list=None): def tarbuilder(asset_list=None):
@ -259,8 +261,9 @@ class BootactionUtils(object):
:param asset_list: list of objects.BootActionAsset instances :param asset_list: list of objects.BootActionAsset instances
""" """
tarbytes = io.BytesIO() tarbytes = io.BytesIO()
tarball = tarfile.open( tarball = tarfile.open(mode='w:gz',
mode='w:gz', fileobj=tarbytes, format=tarfile.GNU_FORMAT) fileobj=tarbytes,
format=tarfile.GNU_FORMAT)
asset_list = [ asset_list = [
a for a in asset_list if a.type != BootactionAssetType.PackageList a for a in asset_list if a.type != BootactionAssetType.PackageList
] ]

View File

@ -44,15 +44,14 @@ class DesignsResource(StatefulResource):
try: try:
designs = list(state.designs.keys()) designs = list(state.designs.keys())
resp.body = json.dumps(designs) resp.text = json.dumps(designs)
resp.status = falcon.HTTP_200 resp.status = falcon.HTTP_200
except Exception as ex: except Exception as ex:
self.error(req.context, "Exception raised: %s" % str(ex)) self.error(req.context, "Exception raised: %s" % str(ex))
self.return_error( self.return_error(resp,
resp, falcon.HTTP_500,
falcon.HTTP_500, message="Error accessing design list",
message="Error accessing design list", retry=True)
retry=True)
@policy.ApiEnforcer('physical_provisioner:ingest_data') @policy.ApiEnforcer('physical_provisioner:ingest_data')
def on_post(self, req, resp): def on_post(self, req, resp):
@ -75,19 +74,20 @@ class DesignsResource(StatefulResource):
design.assign_id() design.assign_id()
design.create(req.context, self.state_manager) design.create(req.context, self.state_manager)
resp.body = json.dumps(design.obj_to_simple()) resp.text = json.dumps(design.obj_to_simple())
resp.status = falcon.HTTP_201 resp.status = falcon.HTTP_201
except errors.StateError: except errors.StateError:
self.error(req.context, "Error updating persistence") self.error(req.context, "Error updating persistence")
self.return_error( self.return_error(resp,
resp, falcon.HTTP_500,
falcon.HTTP_500, message="Error updating persistence",
message="Error updating persistence", retry=True)
retry=True)
except errors.InvalidFormat as fex: except errors.InvalidFormat as fex:
self.error(req.context, str(fex)) self.error(req.context, str(fex))
self.return_error( self.return_error(resp,
resp, falcon.HTTP_400, message=str(fex), retry=False) falcon.HTTP_400,
message=str(fex),
retry=False)
class DesignResource(StatefulResource): class DesignResource(StatefulResource):
@ -115,17 +115,17 @@ class DesignResource(StatefulResource):
elif source == 'designed': elif source == 'designed':
design = self.orchestrator.get_described_site(design_id) design = self.orchestrator.get_described_site(design_id)
resp.body = json.dumps(design.obj_to_simple()) resp.text = json.dumps(design.obj_to_simple())
except errors.DesignError: except errors.DesignError:
self.error(req.context, "Design %s not found" % design_id) self.error(req.context, "Design %s not found" % design_id)
self.return_error( self.return_error(resp,
resp, falcon.HTTP_404,
falcon.HTTP_404, message="Design %s not found" % design_id,
message="Design %s not found" % design_id, retry=False)
retry=False)
class DesignsPartsResource(StatefulResource): class DesignsPartsResource(StatefulResource):
def __init__(self, ingester=None, **kwargs): def __init__(self, ingester=None, **kwargs):
super(DesignsPartsResource, self).__init__(**kwargs) super(DesignsPartsResource, self).__init__(**kwargs)
self.ingester = ingester self.ingester = ingester
@ -146,11 +146,10 @@ class DesignsPartsResource(StatefulResource):
self.error( self.error(
None, None,
"DesignsPartsResource POST requires parameter 'ingester'") "DesignsPartsResource POST requires parameter 'ingester'")
self.return_error( self.return_error(resp,
resp, falcon.HTTP_400,
falcon.HTTP_400, message="POST requires parameter 'ingester'",
message="POST requires parameter 'ingester'", retry=False)
retry=False)
else: else:
try: try:
raw_body = req.stream.read(req.content_length or 0) raw_body = req.stream.read(req.content_length or 0)
@ -162,37 +161,34 @@ class DesignsPartsResource(StatefulResource):
design_id=design_id, design_id=design_id,
context=req.context) context=req.context)
resp.status = falcon.HTTP_201 resp.status = falcon.HTTP_201
resp.body = json.dumps( resp.text = json.dumps(
[x.obj_to_simple() for x in parsed_items]) [x.obj_to_simple() for x in parsed_items])
else: else:
self.return_error( self.return_error(resp,
resp, falcon.HTTP_400,
falcon.HTTP_400, message="Empty body not supported",
message="Empty body not supported", retry=False)
retry=False)
except ValueError: except ValueError:
self.return_error( self.return_error(resp,
resp, falcon.HTTP_500,
falcon.HTTP_500, message="Error processing input",
message="Error processing input", retry=False)
retry=False)
except LookupError: except LookupError:
self.return_error( self.return_error(resp,
resp, falcon.HTTP_400,
falcon.HTTP_400, message="Ingester %s not registered" %
message="Ingester %s not registered" % ingester_name, ingester_name,
retry=False) retry=False)
@policy.ApiEnforcer('physical_provisioner:ingest_data') @policy.ApiEnforcer('physical_provisioner:ingest_data')
def on_get(self, req, resp, design_id): def on_get(self, req, resp, design_id):
try: try:
design = self.state_manager.get_design(design_id) design = self.state_manager.get_design(design_id)
except errors.DesignError: except errors.DesignError:
self.return_error( self.return_error(resp,
resp, falcon.HTTP_404,
falcon.HTTP_404, message="Design %s nout found" % design_id,
message="Design %s nout found" % design_id, retry=False)
retry=False)
part_catalog = [] part_catalog = []
@ -225,12 +221,13 @@ class DesignsPartsResource(StatefulResource):
'key': n.get_id() 'key': n.get_id()
} for n in design.baremetal_nodes]) } for n in design.baremetal_nodes])
resp.body = json.dumps(part_catalog) resp.text = json.dumps(part_catalog)
resp.status = falcon.HTTP_200 resp.status = falcon.HTTP_200
return return
class DesignsPartsKindsResource(StatefulResource): class DesignsPartsKindsResource(StatefulResource):
def __init__(self, **kwargs): def __init__(self, **kwargs):
super(DesignsPartsKindsResource, self).__init__(**kwargs) super(DesignsPartsKindsResource, self).__init__(**kwargs)
self.authorized_roles = ['user'] self.authorized_roles = ['user']
@ -242,6 +239,7 @@ class DesignsPartsKindsResource(StatefulResource):
class DesignsPartResource(StatefulResource): class DesignsPartResource(StatefulResource):
def __init__(self, orchestrator=None, **kwargs): def __init__(self, orchestrator=None, **kwargs):
super(DesignsPartResource, self).__init__(**kwargs) super(DesignsPartResource, self).__init__(**kwargs)
self.authorized_roles = ['user'] self.authorized_roles = ['user']
@ -273,19 +271,21 @@ class DesignsPartResource(StatefulResource):
part = design.get_baremetal_node(name) part = design.get_baremetal_node(name)
else: else:
self.error(req.context, "Kind %s unknown" % kind) self.error(req.context, "Kind %s unknown" % kind)
self.return_error( self.return_error(resp,
resp, falcon.HTTP_404,
falcon.HTTP_404, message="Kind %s unknown" % kind,
message="Kind %s unknown" % kind, retry=False)
retry=False)
return return
resp.body = json.dumps(part.obj_to_simple()) resp.text = json.dumps(part.obj_to_simple())
except errors.DesignError as dex: except errors.DesignError as dex:
self.error(req.context, str(dex)) self.error(req.context, str(dex))
self.return_error( self.return_error(resp,
resp, falcon.HTTP_404, message=str(dex), retry=False) falcon.HTTP_404,
message=str(dex),
retry=False)
except Exception as exc: except Exception as exc:
self.error(req.context, str(exc)) self.error(req.context, str(exc))
self.return_error( self.return_error(resp.falcon.HTTP_500,
resp.falcon.HTTP_500, message=str(exc), retry=False) message=str(exc),
retry=False)

View File

@ -40,10 +40,9 @@ class HealthResource(StatefulResource):
""" """
Returns 204 on healthy, otherwise 503, without response body. Returns 204 on healthy, otherwise 503, without response body.
""" """
hc = HealthCheckCombined( hc = HealthCheckCombined(state_manager=self.state_manager,
state_manager=self.state_manager, orchestrator=self.orchestrator,
orchestrator=self.orchestrator, extended=False)
extended=False)
return hc.get(req, resp) return hc.get(req, resp)
@ -65,10 +64,9 @@ class HealthExtendedResource(StatefulResource):
""" """
Returns 200 on success, otherwise 503, with a response body. Returns 200 on success, otherwise 503, with a response body.
""" """
hc = HealthCheckCombined( hc = HealthCheckCombined(state_manager=self.state_manager,
state_manager=self.state_manager, orchestrator=self.orchestrator,
orchestrator=self.orchestrator, extended=True)
extended=True)
return hc.get(req, resp) return hc.get(req, resp)
@ -97,8 +95,8 @@ class HealthCheckCombined(object):
if now is None: if now is None:
raise Exception('None received from database for now()') raise Exception('None received from database for now()')
except Exception: except Exception:
hcm = HealthCheckMessage( hcm = HealthCheckMessage(msg='Unable to connect to database',
msg='Unable to connect to database', error=True) error=True)
health_check.add_detail_msg(msg=hcm) health_check.add_detail_msg(msg=hcm)
# Test MaaS connection # Test MaaS connection
@ -111,12 +109,12 @@ class HealthCheckCombined(object):
if maas_validation.task.get_status() == ActionResult.Failure: if maas_validation.task.get_status() == ActionResult.Failure:
raise Exception('MaaS task failure') raise Exception('MaaS task failure')
except Exception: except Exception:
hcm = HealthCheckMessage( hcm = HealthCheckMessage(msg='Unable to connect to MaaS',
msg='Unable to connect to MaaS', error=True) error=True)
health_check.add_detail_msg(msg=hcm) health_check.add_detail_msg(msg=hcm)
if self.extended: if self.extended:
resp.body = json.dumps(health_check.to_dict()) resp.text = json.dumps(health_check.to_dict())
if health_check.is_healthy() and self.extended: if health_check.is_healthy() and self.extended:
resp.status = falcon.HTTP_200 resp.status = falcon.HTTP_200

View File

@ -22,6 +22,7 @@ from drydock_provisioner import policy
class AuthMiddleware(object): class AuthMiddleware(object):
def __init__(self): def __init__(self):
self.logger = logging.getLogger('drydock') self.logger = logging.getLogger('drydock')
@ -31,8 +32,8 @@ class AuthMiddleware(object):
ctx.set_policy_engine(policy.policy_engine) ctx.set_policy_engine(policy.policy_engine)
self.logger.debug( self.logger.debug("Request with headers: %s" %
"Request with headers: %s" % ','.join(req.headers.keys())) ','.join(req.headers.keys()))
auth_status = req.get_header('X-SERVICE-IDENTITY-STATUS') auth_status = req.get_header('X-SERVICE-IDENTITY-STATUS')
service = True service = True
@ -78,6 +79,7 @@ class AuthMiddleware(object):
class ContextMiddleware(object): class ContextMiddleware(object):
def __init__(self): def __init__(self):
# Setup validation pattern for external marker # Setup validation pattern for external marker
UUIDv4_pattern = '^[0-9A-F]{8}-[0-9A-F]{4}-4[0-9A-F]{3}-[89AB][0-9A-F]{3}-[0-9A-F]{12}$' UUIDv4_pattern = '^[0-9A-F]{8}-[0-9A-F]{4}-4[0-9A-F]{3}-[89AB][0-9A-F]{3}-[0-9A-F]{12}$'
@ -101,6 +103,7 @@ class ContextMiddleware(object):
class LoggingMiddleware(object): class LoggingMiddleware(object):
def __init__(self): def __init__(self):
self.logger = logging.getLogger(cfg.CONF.logging.control_logger_name) self.logger = logging.getLogger(cfg.CONF.logging.control_logger_name)
@ -111,9 +114,9 @@ class LoggingMiddleware(object):
'external_ctx': req.context.external_marker, 'external_ctx': req.context.external_marker,
'end_user': req.context.end_user, 'end_user': req.context.end_user,
} }
self.logger.info( self.logger.info("Request: %s %s %s" %
"Request: %s %s %s" % (req.method, req.uri, req.query_string), (req.method, req.uri, req.query_string),
extra=extra) extra=extra)
def process_response(self, req, resp, resource, req_succeeded): def process_response(self, req, resp, resource, req_succeeded):
ctx = req.context ctx = req.context
@ -124,6 +127,6 @@ class LoggingMiddleware(object):
'end_user': ctx.end_user, 'end_user': ctx.end_user,
} }
resp.append_header('X-Drydock-Req', ctx.request_id) resp.append_header('X-Drydock-Req', ctx.request_id)
self.logger.info( self.logger.info("Response: %s %s - %s" %
"Response: %s %s - %s" % (req.method, req.uri, resp.status), (req.method, req.uri, resp.status),
extra=extra) extra=extra)

View File

@ -24,6 +24,7 @@ from .base import BaseResource, StatefulResource
class NodesResource(BaseResource): class NodesResource(BaseResource):
def __init__(self): def __init__(self):
super().__init__() super().__init__()
@ -41,22 +42,23 @@ class NodesResource(BaseResource):
for m in machine_list: for m in machine_list:
m.get_power_params() m.get_power_params()
node_view.append( node_view.append(
dict( dict(hostname=m.hostname,
hostname=m.hostname, memory=m.memory,
memory=m.memory, cpu_count=m.cpu_count,
cpu_count=m.cpu_count, status_name=m.status_name,
status_name=m.status_name, boot_mac=m.boot_mac,
boot_mac=m.boot_mac, power_state=m.power_state,
power_state=m.power_state, power_address=m.power_parameters.get('power_address'),
power_address=m.power_parameters.get('power_address'), boot_ip=m.boot_ip))
boot_ip=m.boot_ip))
resp.body = json.dumps(node_view) resp.text = json.dumps(node_view)
resp.status = falcon.HTTP_200 resp.status = falcon.HTTP_200
except Exception as ex: except Exception as ex:
self.error(req.context, "Unknown error: %s" % str(ex), exc_info=ex) self.error(req.context, "Unknown error: %s" % str(ex), exc_info=ex)
self.return_error( self.return_error(resp,
resp, falcon.HTTP_500, message="Unknown error", retry=False) falcon.HTTP_500,
message="Unknown error",
retry=False)
class NodeBuildDataResource(StatefulResource): class NodeBuildDataResource(StatefulResource):
@ -68,27 +70,29 @@ class NodeBuildDataResource(StatefulResource):
latest = req.params.get('latest', 'false').upper() latest = req.params.get('latest', 'false').upper()
latest = True if latest == 'TRUE' else False latest = True if latest == 'TRUE' else False
node_bd = self.state_manager.get_build_data( node_bd = self.state_manager.get_build_data(node_name=hostname,
node_name=hostname, latest=latest) latest=latest)
if not node_bd: if not node_bd:
self.return_error( self.return_error(resp,
resp, falcon.HTTP_404,
falcon.HTTP_404, message="No build data found",
message="No build data found", retry=False)
retry=False)
else: else:
node_bd = [bd.to_dict() for bd in node_bd] node_bd = [bd.to_dict() for bd in node_bd]
resp.status = falcon.HTTP_200 resp.status = falcon.HTTP_200
resp.body = json.dumps(node_bd) resp.text = json.dumps(node_bd)
resp.content_type = falcon.MEDIA_JSON resp.content_type = falcon.MEDIA_JSON
except Exception as ex: except Exception as ex:
self.error(req.context, "Unknown error: %s" % str(ex), exc_info=ex) self.error(req.context, "Unknown error: %s" % str(ex), exc_info=ex)
self.return_error( self.return_error(resp,
resp, falcon.HTTP_500, message="Unknown error", retry=False) falcon.HTTP_500,
message="Unknown error",
retry=False)
class NodeFilterResource(StatefulResource): class NodeFilterResource(StatefulResource):
def __init__(self, orchestrator=None, **kwargs): def __init__(self, orchestrator=None, **kwargs):
"""Object initializer. """Object initializer.
@ -117,9 +121,11 @@ class NodeFilterResource(StatefulResource):
node_filter=node_filter, site_design=site_design) node_filter=node_filter, site_design=site_design)
resp_list = [n.name for n in nodes if nodes] resp_list = [n.name for n in nodes if nodes]
resp.body = json.dumps(resp_list) resp.text = json.dumps(resp_list)
resp.status = falcon.HTTP_200 resp.status = falcon.HTTP_200
except Exception as ex: except Exception as ex:
self.error(req.context, "Unknown error: %s" % str(ex), exc_info=ex) self.error(req.context, "Unknown error: %s" % str(ex), exc_info=ex)
self.return_error( self.return_error(resp,
resp, falcon.HTTP_500, message="Unknown error", retry=False) falcon.HTTP_500,
message="Unknown error",
retry=False)

View File

@ -42,14 +42,16 @@ class TasksResource(StatefulResource):
try: try:
task_model_list = self.state_manager.get_tasks() task_model_list = self.state_manager.get_tasks()
task_list = [x.to_dict() for x in task_model_list] task_list = [x.to_dict() for x in task_model_list]
resp.body = json.dumps(task_list) resp.text = json.dumps(task_list)
resp.status = falcon.HTTP_200 resp.status = falcon.HTTP_200
except Exception as ex: except Exception as ex:
self.error( self.error(
req.context, req.context,
"Unknown error: %s\n%s" % (str(ex), traceback.format_exc())) "Unknown error: %s\n%s" % (str(ex), traceback.format_exc()))
self.return_error( self.return_error(resp,
resp, falcon.HTTP_500, message="Unknown error", retry=False) falcon.HTTP_500,
message="Unknown error",
retry=False)
@policy.ApiEnforcer('physical_provisioner:create_task') @policy.ApiEnforcer('physical_provisioner:create_task')
def on_post(self, req, resp): def on_post(self, req, resp):
@ -72,19 +74,20 @@ class TasksResource(StatefulResource):
action = json_data.get('action', None) action = json_data.get('action', None)
if supported_actions.get(action, None) is None: if supported_actions.get(action, None) is None:
self.error(req.context, "Unsupported action %s" % action) self.error(req.context, "Unsupported action %s" % action)
self.return_error( self.return_error(resp,
resp, falcon.HTTP_400,
falcon.HTTP_400, message="Unsupported action %s" % action,
message="Unsupported action %s" % action, retry=False)
retry=False)
else: else:
supported_actions.get(action)(self, req, resp, json_data) supported_actions.get(action)(self, req, resp, json_data)
except Exception as ex: except Exception as ex:
self.error( self.error(
req.context, req.context,
"Unknown error: %s\n%s" % (str(ex), traceback.format_exc())) "Unknown error: %s\n%s" % (str(ex), traceback.format_exc()))
self.return_error( self.return_error(resp,
resp, falcon.HTTP_500, message="Unknown error", retry=False) falcon.HTTP_500,
message="Unknown error",
retry=False)
@policy.ApiEnforcer('physical_provisioner:delete_tasks') @policy.ApiEnforcer('physical_provisioner:delete_tasks')
def on_delete(self, req, resp): def on_delete(self, req, resp):
@ -100,10 +103,10 @@ class TasksResource(StatefulResource):
if not retention_status: if not retention_status:
resp.status = falcon.HTTP_404 resp.status = falcon.HTTP_404
return return
resp.body = "Tables purged successfully." resp.text = "Tables purged successfully."
except Exception as e: except Exception as e:
self.error(req.context, "Unknown error: %s" % (str(e))) self.error(req.context, "Unknown error: %s" % (str(e)))
resp.body = "Unexpected error." resp.text = "Unexpected error."
resp.status = falcon.HTTP_500 resp.status = falcon.HTTP_500
return return
resp.status = falcon.HTTP_200 resp.status = falcon.HTTP_200
@ -118,19 +121,23 @@ class TasksResource(StatefulResource):
req.context, req.context,
"Task body ended up in wrong handler: action %s in task_validate_design" "Task body ended up in wrong handler: action %s in task_validate_design"
% action) % action)
self.return_error( self.return_error(resp,
resp, falcon.HTTP_500, message="Error", retry=False) falcon.HTTP_500,
message="Error",
retry=False)
try: try:
task = self.create_task(json_data, req.context) task = self.create_task(json_data, req.context)
resp.body = json.dumps(task.to_dict()) resp.text = json.dumps(task.to_dict())
resp.append_header('Location', resp.append_header('Location',
"/api/v1.0/tasks/%s" % str(task.task_id)) "/api/v1.0/tasks/%s" % str(task.task_id))
resp.status = falcon.HTTP_201 resp.status = falcon.HTTP_201
except errors.InvalidFormat as ex: except errors.InvalidFormat as ex:
self.error(req.context, ex.msg) self.error(req.context, ex.msg)
self.return_error( self.return_error(resp,
resp, falcon.HTTP_400, message=ex.msg, retry=False) falcon.HTTP_400,
message=ex.msg,
retry=False)
@policy.ApiEnforcer('physical_provisioner:verify_site') @policy.ApiEnforcer('physical_provisioner:verify_site')
def task_verify_site(self, req, resp, json_data): def task_verify_site(self, req, resp, json_data):
@ -142,19 +149,23 @@ class TasksResource(StatefulResource):
req.context, req.context,
"Task body ended up in wrong handler: action %s in task_verify_site" "Task body ended up in wrong handler: action %s in task_verify_site"
% action) % action)
self.return_error( self.return_error(resp,
resp, falcon.HTTP_500, message="Error", retry=False) falcon.HTTP_500,
message="Error",
retry=False)
try: try:
task = self.create_task(json_data, req.context) task = self.create_task(json_data, req.context)
resp.body = json.dumps(task.to_dict()) resp.text = json.dumps(task.to_dict())
resp.append_header('Location', resp.append_header('Location',
"/api/v1.0/tasks/%s" % str(task.task_id)) "/api/v1.0/tasks/%s" % str(task.task_id))
resp.status = falcon.HTTP_201 resp.status = falcon.HTTP_201
except errors.InvalidFormat as ex: except errors.InvalidFormat as ex:
self.error(req.context, ex.msg) self.error(req.context, ex.msg)
self.return_error( self.return_error(resp,
resp, falcon.HTTP_400, message=ex.msg, retry=False) falcon.HTTP_400,
message=ex.msg,
retry=False)
@policy.ApiEnforcer('physical_provisioner:prepare_site') @policy.ApiEnforcer('physical_provisioner:prepare_site')
def task_prepare_site(self, req, resp, json_data): def task_prepare_site(self, req, resp, json_data):
@ -166,19 +177,23 @@ class TasksResource(StatefulResource):
req.context, req.context,
"Task body ended up in wrong handler: action %s in task_prepare_site" "Task body ended up in wrong handler: action %s in task_prepare_site"
% action) % action)
self.return_error( self.return_error(resp,
resp, falcon.HTTP_500, message="Error", retry=False) falcon.HTTP_500,
message="Error",
retry=False)
try: try:
task = self.create_task(json_data, req.context) task = self.create_task(json_data, req.context)
resp.body = json.dumps(task.to_dict()) resp.text = json.dumps(task.to_dict())
resp.append_header('Location', resp.append_header('Location',
"/api/v1.0/tasks/%s" % str(task.task_id)) "/api/v1.0/tasks/%s" % str(task.task_id))
resp.status = falcon.HTTP_201 resp.status = falcon.HTTP_201
except errors.InvalidFormat as ex: except errors.InvalidFormat as ex:
self.error(req.context, ex.msg) self.error(req.context, ex.msg)
self.return_error( self.return_error(resp,
resp, falcon.HTTP_400, message=ex.msg, retry=False) falcon.HTTP_400,
message=ex.msg,
retry=False)
@policy.ApiEnforcer('physical_provisioner:verify_nodes') @policy.ApiEnforcer('physical_provisioner:verify_nodes')
def task_verify_nodes(self, req, resp, json_data): def task_verify_nodes(self, req, resp, json_data):
@ -190,19 +205,23 @@ class TasksResource(StatefulResource):
req.context, req.context,
"Task body ended up in wrong handler: action %s in task_verify_nodes" "Task body ended up in wrong handler: action %s in task_verify_nodes"
% action) % action)
self.return_error( self.return_error(resp,
resp, falcon.HTTP_500, message="Error", retry=False) falcon.HTTP_500,
message="Error",
retry=False)
try: try:
task = self.create_task(json_data, req.context) task = self.create_task(json_data, req.context)
resp.body = json.dumps(task.to_dict()) resp.text = json.dumps(task.to_dict())
resp.append_header('Location', resp.append_header('Location',
"/api/v1.0/tasks/%s" % str(task.task_id)) "/api/v1.0/tasks/%s" % str(task.task_id))
resp.status = falcon.HTTP_201 resp.status = falcon.HTTP_201
except errors.InvalidFormat as ex: except errors.InvalidFormat as ex:
self.error(req.context, ex.msg) self.error(req.context, ex.msg)
self.return_error( self.return_error(resp,
resp, falcon.HTTP_400, message=ex.msg, retry=False) falcon.HTTP_400,
message=ex.msg,
retry=False)
@policy.ApiEnforcer('physical_provisioner:prepare_nodes') @policy.ApiEnforcer('physical_provisioner:prepare_nodes')
def task_prepare_nodes(self, req, resp, json_data): def task_prepare_nodes(self, req, resp, json_data):
@ -214,19 +233,23 @@ class TasksResource(StatefulResource):
req.context, req.context,
"Task body ended up in wrong handler: action %s in task_prepare_nodes" "Task body ended up in wrong handler: action %s in task_prepare_nodes"
% action) % action)
self.return_error( self.return_error(resp,
resp, falcon.HTTP_500, message="Error", retry=False) falcon.HTTP_500,
message="Error",
retry=False)
try: try:
task = self.create_task(json_data, req.context) task = self.create_task(json_data, req.context)
resp.body = json.dumps(task.to_dict()) resp.text = json.dumps(task.to_dict())
resp.append_header('Location', resp.append_header('Location',
"/api/v1.0/tasks/%s" % str(task.task_id)) "/api/v1.0/tasks/%s" % str(task.task_id))
resp.status = falcon.HTTP_201 resp.status = falcon.HTTP_201
except errors.InvalidFormat as ex: except errors.InvalidFormat as ex:
self.error(req.context, ex.msg) self.error(req.context, ex.msg)
self.return_error( self.return_error(resp,
resp, falcon.HTTP_400, message=ex.msg, retry=False) falcon.HTTP_400,
message=ex.msg,
retry=False)
@policy.ApiEnforcer('physical_provisioner:deploy_nodes') @policy.ApiEnforcer('physical_provisioner:deploy_nodes')
def task_deploy_nodes(self, req, resp, json_data): def task_deploy_nodes(self, req, resp, json_data):
@ -238,19 +261,23 @@ class TasksResource(StatefulResource):
req.context, req.context,
"Task body ended up in wrong handler: action %s in task_deploy_nodes" "Task body ended up in wrong handler: action %s in task_deploy_nodes"
% action) % action)
self.return_error( self.return_error(resp,
resp, falcon.HTTP_500, message="Error", retry=False) falcon.HTTP_500,
message="Error",
retry=False)
try: try:
task = self.create_task(json_data, req.context) task = self.create_task(json_data, req.context)
resp.body = json.dumps(task.to_dict()) resp.text = json.dumps(task.to_dict())
resp.append_header('Location', resp.append_header('Location',
"/api/v1.0/tasks/%s" % str(task.task_id)) "/api/v1.0/tasks/%s" % str(task.task_id))
resp.status = falcon.HTTP_201 resp.status = falcon.HTTP_201
except errors.InvalidFormat as ex: except errors.InvalidFormat as ex:
self.error(req.context, ex.msg) self.error(req.context, ex.msg)
self.return_error( self.return_error(resp,
resp, falcon.HTTP_400, message=ex.msg, retry=False) falcon.HTTP_400,
message=ex.msg,
retry=False)
@policy.ApiEnforcer('physical_provisioner:destroy_nodes') @policy.ApiEnforcer('physical_provisioner:destroy_nodes')
def task_destroy_nodes(self, req, resp, json_data): def task_destroy_nodes(self, req, resp, json_data):
@ -262,19 +289,23 @@ class TasksResource(StatefulResource):
req.context, req.context,
"Task body ended up in wrong handler: action %s in task_destroy_nodes" "Task body ended up in wrong handler: action %s in task_destroy_nodes"
% action) % action)
self.return_error( self.return_error(resp,
resp, falcon.HTTP_500, message="Error", retry=False) falcon.HTTP_500,
message="Error",
retry=False)
try: try:
task = self.create_task(json_data, req.context) task = self.create_task(json_data, req.context)
resp.body = json.dumps(task.to_dict()) resp.text = json.dumps(task.to_dict())
resp.append_header('Location', resp.append_header('Location',
"/api/v1.0/tasks/%s" % str(task.task_id)) "/api/v1.0/tasks/%s" % str(task.task_id))
resp.status = falcon.HTTP_201 resp.status = falcon.HTTP_201
except errors.InvalidFormat as ex: except errors.InvalidFormat as ex:
self.error(req.context, ex.msg) self.error(req.context, ex.msg)
self.return_error( self.return_error(resp,
resp, falcon.HTTP_400, message=ex.msg, retry=False) falcon.HTTP_400,
message=ex.msg,
retry=False)
@policy.ApiEnforcer('physical_provisioner:relabel_nodes') @policy.ApiEnforcer('physical_provisioner:relabel_nodes')
def task_relabel_nodes(self, req, resp, json_data): def task_relabel_nodes(self, req, resp, json_data):
@ -286,19 +317,23 @@ class TasksResource(StatefulResource):
req.context, req.context,
"Task body ended up in wrong handler: action %s in task_relabel_nodes" "Task body ended up in wrong handler: action %s in task_relabel_nodes"
% action) % action)
self.return_error( self.return_error(resp,
resp, falcon.HTTP_500, message="Error", retry=False) falcon.HTTP_500,
message="Error",
retry=False)
try: try:
task = self.create_task(json_data, req.context) task = self.create_task(json_data, req.context)
resp.body = json.dumps(task.to_dict()) resp.text = json.dumps(task.to_dict())
resp.append_header('Location', resp.append_header('Location',
"/api/v1.0/tasks/%s" % str(task.task_id)) "/api/v1.0/tasks/%s" % str(task.task_id))
resp.status = falcon.HTTP_201 resp.status = falcon.HTTP_201
except errors.InvalidFormat as ex: except errors.InvalidFormat as ex:
self.error(req.context, ex.msg) self.error(req.context, ex.msg)
self.return_error( self.return_error(resp,
resp, falcon.HTTP_400, message=ex.msg, retry=False) falcon.HTTP_400,
message=ex.msg,
retry=False)
def create_task(self, task_body, req_context): def create_task(self, task_body, req_context):
"""General task creation. """General task creation.
@ -320,11 +355,10 @@ class TasksResource(StatefulResource):
raise errors.InvalidFormat( raise errors.InvalidFormat(
'Task creation requires fields design_ref, action') 'Task creation requires fields design_ref, action')
task = self.orchestrator.create_task( task = self.orchestrator.create_task(design_ref=design_ref,
design_ref=design_ref, action=action,
action=action, node_filter=node_filter,
node_filter=node_filter, context=req_context)
context=req_context)
task.set_status(hd_fields.TaskStatus.Queued) task.set_status(hd_fields.TaskStatus.Queued)
task.save() task.save()
@ -357,11 +391,10 @@ class TaskResource(StatefulResource):
if first_task is None: if first_task is None:
self.info(req.context, "Task %s does not exist" % task_id) self.info(req.context, "Task %s does not exist" % task_id)
self.return_error( self.return_error(resp,
resp, falcon.HTTP_404,
falcon.HTTP_404, message="Task %s does not exist" % task_id,
message="Task %s does not exist" % task_id, retry=False)
retry=False)
else: else:
# If layers is passed in then it returns a dict of tasks instead of the task dict. # If layers is passed in then it returns a dict of tasks instead of the task dict.
if layers: if layers:
@ -380,12 +413,14 @@ class TaskResource(StatefulResource):
1, first_task) 1, first_task)
resp_data['subtask_errors'] = errors resp_data['subtask_errors'] = errors
resp.body = json.dumps(resp_data) resp.text = json.dumps(resp_data)
resp.status = falcon.HTTP_200 resp.status = falcon.HTTP_200
except Exception as ex: except Exception as ex:
self.error(req.context, "Unknown error: %s" % (str(ex))) self.error(req.context, "Unknown error: %s" % (str(ex)))
self.return_error( self.return_error(resp,
resp, falcon.HTTP_500, message="Unknown error", retry=False) falcon.HTTP_500,
message="Unknown error",
retry=False)
def get_task(self, req, resp, task_id, builddata): def get_task(self, req, resp, task_id, builddata):
try: try:
@ -403,8 +438,10 @@ class TaskResource(StatefulResource):
return task_dict return task_dict
except Exception as ex: except Exception as ex:
self.error(req.context, "Unknown error: %s" % (str(ex))) self.error(req.context, "Unknown error: %s" % (str(ex)))
self.return_error( self.return_error(resp,
resp, falcon.HTTP_500, message="Unknown error", retry=False) falcon.HTTP_500,
message="Unknown error",
retry=False)
def handle_layers(self, req, resp, task_id, builddata, subtask_errors, def handle_layers(self, req, resp, task_id, builddata, subtask_errors,
layers, first_task): layers, first_task):
@ -450,10 +487,10 @@ class TaskBuilddataResource(StatefulResource):
if not bd_list: if not bd_list:
resp.status = falcon.HTTP_404 resp.status = falcon.HTTP_404
return return
resp.body = json.dumps([bd.to_dict() for bd in bd_list]) resp.text = json.dumps([bd.to_dict() for bd in bd_list])
except Exception as e: except Exception as e:
self.error(req.context, "Unknown error: %s" % (str(e))) self.error(req.context, "Unknown error: %s" % (str(e)))
resp.body = "Unexpected error." resp.text = "Unexpected error."
resp.status = falcon.HTTP_500 resp.status = falcon.HTTP_500
return return
resp.status = falcon.HTTP_200 resp.status = falcon.HTTP_200

View File

@ -25,8 +25,9 @@ def get_internal_api_href(ver):
if ver in supported_versions: if ver in supported_versions:
ks_sess = KeystoneUtils.get_session() ks_sess = KeystoneUtils.get_session()
url = KeystoneClient.get_endpoint( url = KeystoneClient.get_endpoint("physicalprovisioner",
"physicalprovisioner", ks_sess=ks_sess, interface='internal') ks_sess=ks_sess,
interface='internal')
return url return url
else: else:
raise ApiError("API version %s unknown." % ver) raise ApiError("API version %s unknown." % ver)

View File

@ -62,12 +62,12 @@ class ValidationResource(StatefulResource):
resp_message = validation.to_dict() resp_message = validation.to_dict()
resp_message['code'] = 200 resp_message['code'] = 200
resp.status = falcon.HTTP_200 resp.status = falcon.HTTP_200
resp.body = json.dumps(resp_message) resp.text = json.dumps(resp_message)
else: else:
resp_message = validation.to_dict() resp_message = validation.to_dict()
resp_message['code'] = 400 resp_message['code'] = 400
resp.status = falcon.HTTP_400 resp.status = falcon.HTTP_400
resp.body = json.dumps(resp_message) resp.text = json.dumps(resp_message)
except errors.InvalidFormat as e: except errors.InvalidFormat as e:
err_message = str(e) err_message = str(e)

View File

@ -64,6 +64,7 @@ class ProviderDriver(object):
# Execute a single task in a separate thread # Execute a single task in a separate thread
class DriverActionRunner(Thread): class DriverActionRunner(Thread):
def __init__(self, action=None): def __init__(self, action=None):
super().__init__() super().__init__()

View File

@ -22,6 +22,7 @@ from drydock_provisioner.orchestrator.actions.orchestrator import BaseAction
class PromenadeAction(BaseAction): class PromenadeAction(BaseAction):
def __init__(self, *args, prom_client=None): def __init__(self, *args, prom_client=None):
super().__init__(*args) super().__init__(*args)
@ -42,11 +43,10 @@ class RelabelNode(PromenadeAction):
try: try:
site_design = self._load_site_design() site_design = self._load_site_design()
except errors.OrchestratorError: except errors.OrchestratorError:
self.task.add_status_msg( self.task.add_status_msg(msg="Error loading site design.",
msg="Error loading site design.", error=True,
error=True, ctx='NA',
ctx='NA', ctx_type='NA')
ctx_type='NA')
self.task.set_status(hd_fields.TaskStatus.Complete) self.task.set_status(hd_fields.TaskStatus.Complete)
self.task.failure() self.task.failure()
self.task.save() self.task.save()
@ -58,14 +58,16 @@ class RelabelNode(PromenadeAction):
for n in nodes: for n in nodes:
# Relabel node through Promenade # Relabel node through Promenade
try: try:
self.logger.info( self.logger.info("Relabeling node %s with node label data." %
"Relabeling node %s with node label data." % n.name) n.name)
labels_dict = n.get_node_labels() labels_dict = n.get_node_labels()
msg = "Set labels %s for node %s" % (str(labels_dict), n.name) msg = "Set labels %s for node %s" % (str(labels_dict), n.name)
self.task.add_status_msg( self.task.add_status_msg(msg=msg,
msg=msg, error=False, ctx=n.name, ctx_type='node') error=False,
ctx=n.name,
ctx_type='node')
# Call promenade to invoke relabel node # Call promenade to invoke relabel node
self.promenade_client.relabel_node(n.get_id(), labels_dict) self.promenade_client.relabel_node(n.get_id(), labels_dict)
@ -74,8 +76,10 @@ class RelabelNode(PromenadeAction):
msg = "Error relabeling node %s with label data" % n.name msg = "Error relabeling node %s with label data" % n.name
self.logger.warning(msg + ": " + str(ex)) self.logger.warning(msg + ": " + str(ex))
self.task.failure(focus=n.get_id()) self.task.failure(focus=n.get_id())
self.task.add_status_msg( self.task.add_status_msg(msg=msg,
msg=msg, error=True, ctx=n.name, ctx_type='node') error=True,
ctx=n.name,
ctx_type='node')
continue continue
self.task.set_status(hd_fields.TaskStatus.Complete) self.task.set_status(hd_fields.TaskStatus.Complete)

View File

@ -61,8 +61,9 @@ class PromenadeDriver(KubernetesDriver):
raise errors.DriverError("Invalid task %s" % (task_id)) raise errors.DriverError("Invalid task %s" % (task_id))
if task.action not in self.supported_actions: if task.action not in self.supported_actions:
raise errors.DriverError("Driver %s doesn't support task action %s" raise errors.DriverError(
% (self.driver_desc, task.action)) "Driver %s doesn't support task action %s" %
(self.driver_desc, task.action))
task.set_status(hd_fields.TaskStatus.Running) task.set_status(hd_fields.TaskStatus.Running)
task.save() task.save()
@ -71,11 +72,10 @@ class PromenadeDriver(KubernetesDriver):
if task.retry > 0: if task.retry > 0:
msg = "Retrying task %s on previous failed entities." % str( msg = "Retrying task %s on previous failed entities." % str(
task.get_id()) task.get_id())
task.add_status_msg( task.add_status_msg(msg=msg,
msg=msg, error=False,
error=False, ctx=str(task.get_id()),
ctx=str(task.get_id()), ctx_type='task')
ctx_type='task')
target_nodes = self.orchestrator.get_target_nodes( target_nodes = self.orchestrator.get_target_nodes(
task, failures=True) task, failures=True)
else: else:
@ -108,22 +108,20 @@ class PromenadeDriver(KubernetesDriver):
for t, f in subtask_futures.items(): for t, f in subtask_futures.items():
if not f.done(): if not f.done():
task.add_status_msg( task.add_status_msg("Subtask timed out before completing.",
"Subtask timed out before completing.", error=True,
error=True, ctx=str(uuid.UUID(bytes=t)),
ctx=str(uuid.UUID(bytes=t)), ctx_type='task')
ctx_type='task')
task.failure() task.failure()
else: else:
if f.exception(): if f.exception():
msg = ("Subtask %s raised unexpected exception: %s" % msg = ("Subtask %s raised unexpected exception: %s" %
(str(uuid.UUID(bytes=t)), str(f.exception()))) (str(uuid.UUID(bytes=t)), str(f.exception())))
self.logger.error(msg, exc_info=f.exception()) self.logger.error(msg, exc_info=f.exception())
task.add_status_msg( task.add_status_msg(msg=msg,
msg=msg, error=True,
error=True, ctx=str(uuid.UUID(bytes=t)),
ctx=str(uuid.UUID(bytes=t)), ctx_type='task')
ctx_type='task')
task.failure() task.failure()
task.bubble_results() task.bubble_results()
@ -138,14 +136,14 @@ class PromenadeDriver(KubernetesDriver):
prom_client=prom_client) prom_client=prom_client)
action.start() action.start()
except Exception as e: except Exception as e:
msg = ("Subtask for action %s raised unexpected exception: %s" msg = (
% (task.action, str(e))) "Subtask for action %s raised unexpected exception: %s" %
(task.action, str(e)))
self.logger.error(msg, exc_info=e) self.logger.error(msg, exc_info=e)
task.add_status_msg( task.add_status_msg(msg=msg,
msg=msg, error=True,
error=True, ctx=str(task.get_id()),
ctx=str(task.get_id()), ctx_type='task')
ctx_type='task')
task.failure() task.failure()
task.set_status(hd_fields.TaskStatus.Complete) task.set_status(hd_fields.TaskStatus.Complete)

View File

@ -79,8 +79,9 @@ class PromenadeSession(object):
url = self.base_url + route url = self.base_url + route
self.logger.debug('GET ' + url) self.logger.debug('GET ' + url)
self.logger.debug('Query Params: ' + str(query)) self.logger.debug('Query Params: ' + str(query))
resp = self.__session.get( resp = self.__session.get(url,
url, params=query, timeout=self._timeout(timeout)) params=query,
timeout=self._timeout(timeout))
if resp.status_code == 401 and not auth_refresh: if resp.status_code == 401 and not auth_refresh:
self.set_auth() self.set_auth()
@ -109,21 +110,19 @@ class PromenadeSession(object):
self.logger.debug('PUT ' + url) self.logger.debug('PUT ' + url)
self.logger.debug('Query Params: ' + str(query)) self.logger.debug('Query Params: ' + str(query))
if body is not None: if body is not None:
self.logger.debug( self.logger.debug("Sending PUT with explicit body: \n%s" %
"Sending PUT with explicit body: \n%s" % body) body)
resp = self.__session.put( resp = self.__session.put(self.base_url + endpoint,
self.base_url + endpoint, params=query,
params=query, data=body,
data=body, timeout=self._timeout(timeout))
timeout=self._timeout(timeout))
else: else:
self.logger.debug( self.logger.debug("Sending PUT with JSON body: \n%s" %
"Sending PUT with JSON body: \n%s" % str(data)) str(data))
resp = self.__session.put( resp = self.__session.put(self.base_url + endpoint,
self.base_url + endpoint, params=query,
params=query, json=data,
json=data, timeout=self._timeout(timeout))
timeout=self._timeout(timeout))
if resp.status_code == 401 and not auth_refresh: if resp.status_code == 401 and not auth_refresh:
self.set_auth() self.set_auth()
auth_refresh = True auth_refresh = True
@ -151,21 +150,19 @@ class PromenadeSession(object):
self.logger.debug('POST ' + url) self.logger.debug('POST ' + url)
self.logger.debug('Query Params: ' + str(query)) self.logger.debug('Query Params: ' + str(query))
if body is not None: if body is not None:
self.logger.debug( self.logger.debug("Sending POST with explicit body: \n%s" %
"Sending POST with explicit body: \n%s" % body) body)
resp = self.__session.post( resp = self.__session.post(self.base_url + endpoint,
self.base_url + endpoint, params=query,
params=query, data=body,
data=body, timeout=self._timeout(timeout))
timeout=self._timeout(timeout))
else: else:
self.logger.debug( self.logger.debug("Sending POST with JSON body: \n%s" %
"Sending POST with JSON body: \n%s" % str(data)) str(data))
resp = self.__session.post( resp = self.__session.post(self.base_url + endpoint,
self.base_url + endpoint, params=query,
params=query, json=data,
json=data, timeout=self._timeout(timeout))
timeout=self._timeout(timeout))
if resp.status_code == 401 and not auth_refresh: if resp.status_code == 401 and not auth_refresh:
self.set_auth() self.set_auth()
auth_refresh = True auth_refresh = True
@ -284,9 +281,9 @@ class PromenadeClient(object):
raise errors.ClientUnauthorizedError( raise errors.ClientUnauthorizedError(
"Unauthorized access to %s, include valid token." % resp.url) "Unauthorized access to %s, include valid token." % resp.url)
elif resp.status_code == 403: elif resp.status_code == 403:
raise errors.ClientForbiddenError( raise errors.ClientForbiddenError("Forbidden access to %s" %
"Forbidden access to %s" % resp.url) resp.url)
elif not resp.ok: elif not resp.ok:
raise errors.ClientError( raise errors.ClientError("Error - received %d: %s" %
"Error - received %d: %s" % (resp.status_code, resp.text), (resp.status_code, resp.text),
code=resp.status_code) code=resp.status_code)

View File

@ -26,6 +26,7 @@ import drydock_provisioner.error as errors
class MaasOauth(req_auth.AuthBase): class MaasOauth(req_auth.AuthBase):
def __init__(self, apikey): def __init__(self, apikey):
self.consumer_key, self.token_key, self.token_secret = apikey.split( self.consumer_key, self.token_key, self.token_secret = apikey.split(
':') ':')
@ -55,18 +56,19 @@ class MaasOauth(req_auth.AuthBase):
class MaasRequestFactory(object): class MaasRequestFactory(object):
def __init__(self, base_url, apikey): def __init__(self, base_url, apikey):
# The URL in the config should end in /MAAS/, but the api is behind /MAAS/api/2.0/ # The URL in the config should end in /MAAS/, but the api is behind /MAAS/api/2.0/
self.base_url = base_url + "/api/2.0/" self.base_url = base_url + "/api/2.0/"
self.apikey = apikey self.apikey = apikey
# Adapter for maas for request retries # Adapter for maas for request retries
retry_strategy = Retry( retry_strategy = Retry(total=3,
total=3, status_forcelist=[429, 500, 502, 503, 504],
status_forcelist=[429, 500, 502, 503, 504], method_whitelist=[
method_whitelist=["HEAD", "GET", "POST", "PUT", "DELETE", "HEAD", "GET", "POST", "PUT", "DELETE",
"OPTIONS", "TRACE"] "OPTIONS", "TRACE"
) ])
self.maas_adapter = HTTPAdapter(max_retries=retry_strategy) self.maas_adapter = HTTPAdapter(max_retries=retry_strategy)
self.signer = MaasOauth(apikey) self.signer = MaasOauth(apikey)
@ -109,8 +111,8 @@ class MaasRequestFactory(object):
except requests.Timeout: except requests.Timeout:
raise errors.TransientDriverError("Timeout connection to MaaS") raise errors.TransientDriverError("Timeout connection to MaaS")
except Exception as ex: except Exception as ex:
raise errors.PersistentDriverError( raise errors.PersistentDriverError("Error accessing MaaS: %s" %
"Error accessing MaaS: %s" % str(ex)) str(ex))
if resp.status_code in [401, 403]: if resp.status_code in [401, 403]:
raise errors.PersistentDriverError( raise errors.PersistentDriverError(
@ -149,15 +151,15 @@ class MaasRequestFactory(object):
str(i).encode('utf-8')).decode('utf-8') str(i).encode('utf-8')).decode('utf-8')
content_type = 'text/plain; charset="utf-8"' content_type = 'text/plain; charset="utf-8"'
part_headers = {'Content-Transfer-Encoding': 'base64'} part_headers = {'Content-Transfer-Encoding': 'base64'}
files_tuples.append((k, (None, value, content_type, files_tuples.append(
part_headers))) (k, (None, value, content_type, part_headers)))
else: else:
value = base64.b64encode( value = base64.b64encode(
str(v).encode('utf-8')).decode('utf-8') str(v).encode('utf-8')).decode('utf-8')
content_type = 'text/plain; charset="utf-8"' content_type = 'text/plain; charset="utf-8"'
part_headers = {'Content-Transfer-Encoding': 'base64'} part_headers = {'Content-Transfer-Encoding': 'base64'}
files_tuples.append((k, (None, value, content_type, files_tuples.append(
part_headers))) (k, (None, value, content_type, part_headers)))
kwargs['files'] = files_tuples kwargs['files'] = files_tuples
params = kwargs.pop('params', None) params = kwargs.pop('params', None)
@ -174,13 +176,12 @@ class MaasRequestFactory(object):
if timeout is None: if timeout is None:
timeout = (5, 60) timeout = (5, 60)
request = requests.Request( request = requests.Request(method=method,
method=method, url=self.base_url + endpoint,
url=self.base_url + endpoint, auth=self.signer,
auth=self.signer, headers=headers,
headers=headers, params=params,
params=params, **kwargs)
**kwargs)
prepared_req = self.http_session.prepare_request(request) prepared_req = self.http_session.prepare_request(request)
@ -191,6 +192,6 @@ class MaasRequestFactory(object):
"Received error response - URL: %s %s - RESPONSE: %s" % "Received error response - URL: %s %s - RESPONSE: %s" %
(prepared_req.method, prepared_req.url, resp.status_code)) (prepared_req.method, prepared_req.url, resp.status_code))
self.logger.debug("Response content: %s" % resp.text) self.logger.debug("Response content: %s" % resp.text)
raise errors.DriverError( raise errors.DriverError("MAAS Error: %s - %s" %
"MAAS Error: %s - %s" % (resp.status_code, resp.text)) (resp.status_code, resp.text))
return resp return resp

View File

@ -45,19 +45,21 @@ from .actions.node import ConfigureNodeProvisioner
class MaasNodeDriver(NodeDriver): class MaasNodeDriver(NodeDriver):
maasdriver_options = [ maasdriver_options = [
cfg.StrOpt( cfg.StrOpt('maas_api_key',
'maas_api_key', help='The API key for accessing MaaS', help='The API key for accessing MaaS',
secret=True), secret=True),
cfg.StrOpt('maas_api_url', help='The URL for accessing MaaS API'), cfg.StrOpt('maas_api_url', help='The URL for accessing MaaS API'),
cfg.BoolOpt( cfg.BoolOpt(
'use_node_oob_params', 'use_node_oob_params',
default=False, default=False,
help='Update MAAS to use the provided Node OOB params, overwriting discovered values', help=
'Update MAAS to use the provided Node OOB params, overwriting discovered values',
), ),
cfg.BoolOpt( cfg.BoolOpt(
'skip_bmc_config', 'skip_bmc_config',
default=False, default=False,
help='Skip BMC reconfiguration during commissioning (requires MAAS 2.7+)', help=
'Skip BMC reconfiguration during commissioning (requires MAAS 2.7+)',
), ),
cfg.IntOpt( cfg.IntOpt(
'poll_interval', 'poll_interval',
@ -105,8 +107,8 @@ class MaasNodeDriver(NodeDriver):
def __init__(self, **kwargs): def __init__(self, **kwargs):
super().__init__(**kwargs) super().__init__(**kwargs)
cfg.CONF.register_opts( cfg.CONF.register_opts(MaasNodeDriver.maasdriver_options,
MaasNodeDriver.maasdriver_options, group=MaasNodeDriver.driver_key) group=MaasNodeDriver.driver_key)
self.logger = logging.getLogger( self.logger = logging.getLogger(
cfg.CONF.logging.nodedriver_logger_name) cfg.CONF.logging.nodedriver_logger_name)
@ -139,8 +141,9 @@ class MaasNodeDriver(NodeDriver):
raise errors.DriverError("Invalid task %s" % (task_id)) raise errors.DriverError("Invalid task %s" % (task_id))
if task.action not in self.supported_actions: if task.action not in self.supported_actions:
raise errors.DriverError("Driver %s doesn't support task action %s" raise errors.DriverError(
% (self.driver_desc, task.action)) "Driver %s doesn't support task action %s" %
(self.driver_desc, task.action))
task.set_status(hd_fields.TaskStatus.Running) task.set_status(hd_fields.TaskStatus.Running)
task.save() task.save()
@ -149,11 +152,10 @@ class MaasNodeDriver(NodeDriver):
if task.retry > 0: if task.retry > 0:
msg = "Retrying task %s on previous failed entities." % str( msg = "Retrying task %s on previous failed entities." % str(
task.get_id()) task.get_id())
task.add_status_msg( task.add_status_msg(msg=msg,
msg=msg, error=False,
error=False, ctx=str(task.get_id()),
ctx=str(task.get_id()), ctx_type='task')
ctx_type='task')
target_nodes = self.orchestrator.get_target_nodes( target_nodes = self.orchestrator.get_target_nodes(
task, failures=True) task, failures=True)
else: else:
@ -197,10 +199,9 @@ class MaasNodeDriver(NodeDriver):
task.failure() task.failure()
else: else:
if f.exception(): if f.exception():
self.logger.error( self.logger.error("Uncaught exception in subtask %s." %
"Uncaught exception in subtask %s." % str( str(uuid.UUID(bytes=t)),
uuid.UUID(bytes=t)), exc_info=f.exception())
exc_info=f.exception())
task.failure() task.failure()
task.bubble_results() task.bubble_results()
task.align_result() task.align_result()
@ -216,14 +217,14 @@ class MaasNodeDriver(NodeDriver):
maas_client=maas_client) maas_client=maas_client)
action.start() action.start()
except Exception as e: except Exception as e:
msg = ("Subtask for action %s raised unexpected exception: %s" msg = (
% (task.action, str(e))) "Subtask for action %s raised unexpected exception: %s" %
(task.action, str(e)))
self.logger.error(msg, exc_info=e) self.logger.error(msg, exc_info=e)
task.add_status_msg( task.add_status_msg(msg=msg,
msg=msg, error=True,
error=True, ctx=str(task.get_id()),
ctx=str(task.get_id()), ctx_type='task')
ctx_type='task')
task.failure() task.failure()
task.set_status(hd_fields.TaskStatus.Complete) task.set_status(hd_fields.TaskStatus.Complete)

View File

@ -218,8 +218,9 @@ class ResourceCollectionBase(object):
res.set_resource_id(resp_json.get('id')) res.set_resource_id(resp_json.get('id'))
return res return res
raise errors.DriverError("Failed updating MAAS url %s - return code %s" raise errors.DriverError(
% (url, resp.status_code)) "Failed updating MAAS url %s - return code %s" %
(url, resp.status_code))
""" """
Append a resource instance to the list locally only Append a resource instance to the list locally only

View File

@ -93,8 +93,8 @@ class BlockDevice(model_base.ResourceBase):
resp = self.api_client.post(url, op='format', files=data) resp = self.api_client.post(url, op='format', files=data)
if not resp.ok: if not resp.ok:
raise Exception( raise Exception("MAAS error: %s - %s" %
"MAAS error: %s - %s" % (resp.status_code, resp.text)) (resp.status_code, resp.text))
self.refresh() self.refresh()
except Exception as ex: except Exception as ex:
@ -126,8 +126,8 @@ class BlockDevice(model_base.ResourceBase):
resp = self.api_client.post(url, op='unformat') resp = self.api_client.post(url, op='unformat')
if not resp.ok: if not resp.ok:
raise Exception( raise Exception("MAAS error: %s - %s" %
"MAAS error: %s - %s" % (resp.status_code, resp.text)) (resp.status_code, resp.text))
self.refresh() self.refresh()
except Exception as ex: except Exception as ex:
msg = "Error: unformat of device %s on node %s failed: %s" \ msg = "Error: unformat of device %s on node %s failed: %s" \
@ -156,8 +156,8 @@ class BlockDevice(model_base.ResourceBase):
resp = self.api_client.post(url, op='mount', files=data) resp = self.api_client.post(url, op='mount', files=data)
if not resp.ok: if not resp.ok:
raise Exception( raise Exception("MAAS error: %s - %s" %
"MAAS error: %s - %s" % (resp.status_code, resp.text)) (resp.status_code, resp.text))
self.refresh() self.refresh()
except Exception as ex: except Exception as ex:
@ -183,8 +183,8 @@ class BlockDevice(model_base.ResourceBase):
resp = self.api_client.post(url, op='unmount') resp = self.api_client.post(url, op='unmount')
if not resp.ok: if not resp.ok:
raise Exception( raise Exception("MAAS error: %s - %s" %
"MAAS error: %s - %s" % (resp.status_code, resp.text)) (resp.status_code, resp.text))
self.refresh() self.refresh()
except Exception as ex: except Exception as ex:
@ -202,8 +202,8 @@ class BlockDevice(model_base.ResourceBase):
resp = self.api_client.post(url, op='set_boot_disk') resp = self.api_client.post(url, op='set_boot_disk')
if not resp.ok: if not resp.ok:
raise Exception( raise Exception("MAAS error: %s - %s" %
"MAAS error: %s - %s" % (resp.status_code, resp.text)) (resp.status_code, resp.text))
self.refresh() self.refresh()
except Exception as ex: except Exception as ex:

View File

@ -37,8 +37,8 @@ class Fabric(model_base.ResourceBase):
return return
def refresh_vlans(self): def refresh_vlans(self):
self.vlans = model_vlan.Vlans( self.vlans = model_vlan.Vlans(self.api_client,
self.api_client, fabric_id=self.resource_id) fabric_id=self.resource_id)
self.vlans.refresh() self.vlans.refresh()
def set_resource_id(self, res_id): def set_resource_id(self, res_id):

View File

@ -85,8 +85,8 @@ class Interface(model_base.ResourceBase):
fabric_vlan = fabric.vlans.singleton({'vid': 0}) fabric_vlan = fabric.vlans.singleton({'vid': 0})
if fabric_vlan is None: if fabric_vlan is None:
self.logger.warning( self.logger.warning("Cannot locate untagged VLAN on fabric %s" %
"Cannot locate untagged VLAN on fabric %s" % (fabric_id)) (fabric_id))
raise errors.DriverError( raise errors.DriverError(
"Cannot locate untagged VLAN on fabric %s" % (fabric_id)) "Cannot locate untagged VLAN on fabric %s" % (fabric_id))
@ -112,8 +112,8 @@ class Interface(model_base.ResourceBase):
"""Disconnect this interface from subnets and VLANs.""" """Disconnect this interface from subnets and VLANs."""
url = self.interpolate_url() url = self.interpolate_url()
self.logger.debug( self.logger.debug("Disconnecting interface %s from networks." %
"Disconnecting interface %s from networks." % (self.name)) (self.name))
resp = self.api_client.post(url, op='disconnect') resp = self.api_client.post(url, op='disconnect')
if not resp.ok: if not resp.ok:
@ -242,7 +242,8 @@ class Interface(model_base.ResourceBase):
:return: true if this interface will respond to this MAC :return: true if this interface will respond to this MAC
""" """
if mac_address.replace(':', '').upper() == self.mac_address.replace(':', '').upper(): if mac_address.replace(':', '').upper() == self.mac_address.replace(
':', '').upper():
return True return True
return False return False
@ -311,10 +312,10 @@ class Interfaces(model_base.ResourceCollectionBase):
parent_iface = self.singleton({'name': parent_name}) parent_iface = self.singleton({'name': parent_name})
if parent_iface is None: if parent_iface is None:
self.logger.error( self.logger.error("Cannot locate parent interface %s" %
"Cannot locate parent interface %s" % (parent_name)) (parent_name))
raise errors.DriverError( raise errors.DriverError("Cannot locate parent interface %s" %
"Cannot locate parent interface %s" % (parent_name)) (parent_name))
if parent_iface.vlan is None: if parent_iface.vlan is None:
self.logger.error( self.logger.error(
@ -324,8 +325,8 @@ class Interfaces(model_base.ResourceCollectionBase):
"Cannot create VLAN interface on disconnected parent %s" % "Cannot create VLAN interface on disconnected parent %s" %
(parent_iface.resource_id)) (parent_iface.resource_id))
vlans = maas_vlan.Vlans( vlans = maas_vlan.Vlans(self.api_client,
self.api_client, fabric_id=parent_iface.fabric_id) fabric_id=parent_iface.fabric_id)
vlans.refresh() vlans.refresh()
vlan = vlans.singleton({'vid': vlan_tag}) vlan = vlans.singleton({'vid': vlan_tag})

View File

@ -72,5 +72,6 @@ class IpRanges(model_base.ResourceCollectionBase):
res.set_resource_id(resp_json.get('id')) res.set_resource_id(resp_json.get('id'))
return res return res
raise errors.DriverError("Failed updating MAAS url %s - return code %s" raise errors.DriverError(
% (url, resp.status_code)) "Failed updating MAAS url %s - return code %s" %
(url, resp.status_code))

View File

@ -31,6 +31,7 @@ LOG = logging.getLogger(__name__)
power_lock = Lock() power_lock = Lock()
power_cv = Condition(lock=power_lock) power_cv = Condition(lock=power_lock)
class Machine(model_base.ResourceBase): class Machine(model_base.ResourceBase):
resource_url = 'machines/{resource_id}/' resource_url = 'machines/{resource_id}/'
@ -62,8 +63,8 @@ class Machine(model_base.ResourceBase):
api_client, system_id=self.resource_id) api_client, system_id=self.resource_id)
self.volume_groups.refresh() self.volume_groups.refresh()
except Exception: except Exception:
self.logger.warning( self.logger.warning("Failed load node %s volume groups." %
"Failed load node %s volume groups." % (self.resource_id)) (self.resource_id))
else: else:
self.interfaces = None self.interfaces = None
self.block_devices = None self.block_devices = None
@ -123,28 +124,28 @@ class Machine(model_base.ResourceBase):
Removes all the volume groups/logical volumes and all the physical Removes all the volume groups/logical volumes and all the physical
device partitions on this machine. device partitions on this machine.
""" """
self.logger.info( self.logger.info("Resetting storage configuration on node %s" %
"Resetting storage configuration on node %s" % (self.resource_id)) (self.resource_id))
if self.volume_groups is not None and self.volume_groups.len() > 0: if self.volume_groups is not None and self.volume_groups.len() > 0:
for vg in self.volume_groups: for vg in self.volume_groups:
self.logger.debug("Removing VG %s" % vg.name) self.logger.debug("Removing VG %s" % vg.name)
vg.delete() vg.delete()
else: else:
self.logger.debug( self.logger.debug("No VGs configured on node %s" %
"No VGs configured on node %s" % (self.resource_id)) (self.resource_id))
if self.block_devices is not None: if self.block_devices is not None:
for d in self.block_devices: for d in self.block_devices:
if d.partitions is not None and d.partitions.len() > 0: if d.partitions is not None and d.partitions.len() > 0:
self.logger.debug( self.logger.debug("Clearing partitions on device %s" %
"Clearing partitions on device %s" % d.name) d.name)
d.clear_partitions() d.clear_partitions()
else: else:
self.logger.debug( self.logger.debug("No partitions found on device %s" %
"No partitions found on device %s" % d.name) d.name)
else: else:
self.logger.debug( self.logger.debug("No block devices found on node %s" %
"No block devices found on node %s" % (self.resource_id)) (self.resource_id))
def set_storage_layout(self, def set_storage_layout(self,
layout_type='flat', layout_type='flat',
@ -199,12 +200,13 @@ class Machine(model_base.ResourceBase):
if root_lv_name: if root_lv_name:
data['lv_name'] = root_lv_name data['lv_name'] = root_lv_name
resp = self.api_client.post( resp = self.api_client.post(url,
url, op='set_storage_layout', files=data) op='set_storage_layout',
files=data)
if not resp.ok: if not resp.ok:
raise Exception( raise Exception("MAAS Error: %s - %s" %
"MAAS Error: %s - %s" % (resp.status_code, resp.text)) (resp.status_code, resp.text))
except Exception as ex: except Exception as ex:
msg = "Error: failed configuring node %s storage layout: %s" % ( msg = "Error: failed configuring node %s storage layout: %s" % (
self.resource_id, str(ex)) self.resource_id, str(ex))
@ -356,10 +358,9 @@ class Machine(model_base.ResourceBase):
:param str result_type: the type of results to return. One of :param str result_type: the type of results to return. One of
``all``, ``commissioning``, ``testing``, ``deploy`` ``all``, ``commissioning``, ``testing``, ``deploy``
""" """
node_results = maas_nr.NodeResults( node_results = maas_nr.NodeResults(self.api_client,
self.api_client, system_id_list=[self.resource_id],
system_id_list=[self.resource_id], result_type=result_type)
result_type=result_type)
node_results.refresh() node_results.refresh()
return node_results return node_results
@ -375,8 +376,9 @@ class Machine(model_base.ResourceBase):
""" """
url = self.interpolate_url() url = self.interpolate_url()
resp = self.api_client.post( resp = self.api_client.post(url,
url, op='set_workload_annotations', files={key: value}) op='set_workload_annotations',
files={key: value})
if resp.status_code != 200: if resp.status_code != 200:
self.logger.error( self.logger.error(
@ -406,29 +408,26 @@ class Machine(model_base.ResourceBase):
if kwargs: if kwargs:
power_params = dict() power_params = dict()
self.logger.debug("Setting node power type to %s." % power_type) self.logger.debug("Setting node power type to %s." %
power_type)
self.power_type = power_type self.power_type = power_type
power_params['power_type'] = power_type power_params['power_type'] = power_type
for k, v in kwargs.items(): for k, v in kwargs.items():
power_params['power_parameters_' + k] = v power_params['power_parameters_' + k] = v
self.logger.debug( self.logger.debug("Updating node %s power parameters: %s" % (
"Updating node %s power parameters: %s" self.hostname,
% ( str({
self.hostname, **power_params,
str( **{
{ k: "<redacted>"
**power_params, for k in power_params if k in [
**{ "power_parameters_power_pass"
k: "<redacted>" ]
for k in power_params },
if k in ["power_parameters_power_pass"] }),
}, ))
}
),
)
)
resp = self.api_client.put(url, files=power_params) resp = self.api_client.put(url, files=power_params)
if resp.status_code == 200: if resp.status_code == 200:
@ -448,8 +447,9 @@ class Machine(model_base.ResourceBase):
with power_cv: with power_cv:
url = self.interpolate_url() url = self.interpolate_url()
self.logger.debug("Resetting node power type for machine {}".format( self.logger.debug(
self.resource_id)) "Resetting node power type for machine {}".format(
self.resource_id))
self.power_type = 'manual' self.power_type = 'manual'
power_params = {'power_type': 'manual'} power_params = {'power_type': 'manual'}
resp = self.api_client.put(url, files=power_params) resp = self.api_client.put(url, files=power_params)
@ -482,12 +482,11 @@ class Machine(model_base.ResourceBase):
'virsh', 'virsh',
power_address=oob_params.get('libvirt_uri'), power_address=oob_params.get('libvirt_uri'),
power_id=n.name) power_id=n.name)
elif use_node_oob_params and (n.oob_type == "ipmi" or n.oob_type == "redfish"): elif use_node_oob_params and (n.oob_type == "ipmi"
or n.oob_type == "redfish"):
self.logger.debug( self.logger.debug(
"Updating node {} MaaS power parameters for {}.".format( "Updating node {} MaaS power parameters for {}.".format(
n.name, n.oob_type n.name, n.oob_type))
)
)
oob_params = n.oob_parameters oob_params = n.oob_parameters
oob_network = oob_params.get("network") oob_network = oob_params.get("network")
oob_address = n.get_network_address(oob_network) oob_address = n.get_network_address(oob_network)
@ -585,21 +584,20 @@ class Machines(model_base.ResourceCollectionBase):
url = self.interpolate_url() url = self.interpolate_url()
resp = self.api_client.post( resp = self.api_client.post(url,
url, op='allocate', files={'system_id': node.resource_id}) op='allocate',
files={'system_id': node.resource_id})
if not resp.ok: if not resp.ok:
self.logger.error( self.logger.error("Error acquiring node, MaaS returned %s" %
"Error acquiring node, MaaS returned %s" % resp.status_code) resp.status_code)
self.logger.debug("MaaS response: %s" % resp.text) self.logger.debug("MaaS response: %s" % resp.text)
raise errors.DriverError( raise errors.DriverError("Error acquiring node, MaaS returned %s" %
"Error acquiring node, MaaS returned %s" % resp.status_code) resp.status_code)
return node return node
def identify_baremetal_node(self, def identify_baremetal_node(self, node_model, probably_exists=True):
node_model,
probably_exists=True):
"""Find MaaS node resource matching Drydock BaremetalNode. """Find MaaS node resource matching Drydock BaremetalNode.
Performs one or more queries to the MaaS API to find a Machine matching Performs one or more queries to the MaaS API to find a Machine matching
@ -642,8 +640,8 @@ class Machines(model_base.ResourceCollectionBase):
maas_node = self.find_node_with_mac(node_model.boot_mac) maas_node = self.find_node_with_mac(node_model.boot_mac)
if maas_node is None: if maas_node is None:
self.logger.info( self.logger.info("Could not locate node %s in MaaS" %
"Could not locate node %s in MaaS" % node_model.name) node_model.name)
else: else:
self.logger.debug("Found MaaS resource %s matching Node %s" % self.logger.debug("Found MaaS resource %s matching Node %s" %
(maas_node.resource_id, node_model.get_id())) (maas_node.resource_id, node_model.get_id()))
@ -656,11 +654,8 @@ class Machines(model_base.ResourceCollectionBase):
# query the MaaS API for machines with a matching mac address. # query the MaaS API for machines with a matching mac address.
# this call returns a json list, each member representing a complete # this call returns a json list, each member representing a complete
# Machine # Machine
self.logger.debug( self.logger.debug("Finding {} with hostname: {}".format(
"Finding {} with hostname: {}".format( self.collection_resource.__name__, hostname))
self.collection_resource.__name__, hostname
)
)
resp = self.api_client.get(url, params={"hostname": hostname}) resp = self.api_client.get(url, params={"hostname": hostname})
@ -675,9 +670,9 @@ class Machines(model_base.ResourceCollectionBase):
hostname, hostname,
node.get("system_id"), node.get("system_id"),
node.get("hostname"), node.get("hostname"),
) ))
) return self.collection_resource.from_dict(
return self.collection_resource.from_dict(self.api_client, node) self.api_client, node)
return None return None
@ -687,11 +682,8 @@ class Machines(model_base.ResourceCollectionBase):
# query the MaaS API for all power parameters at once. # query the MaaS API for all power parameters at once.
# this call returns a json dict, mapping system id to power parameters # this call returns a json dict, mapping system id to power parameters
self.logger.debug( self.logger.debug("Finding {} with power address: {}".format(
"Finding {} with power address: {}".format( self.collection_resource.__name__, power_address))
self.collection_resource.__name__, power_address
)
)
resp = self.api_client.get(url, op="power_parameters") resp = self.api_client.get(url, op="power_parameters")
@ -700,22 +692,22 @@ class Machines(model_base.ResourceCollectionBase):
for system_id, power_params in json_dict.items(): for system_id, power_params in json_dict.items():
self.logger.debug( self.logger.debug(
"Finding {} with power address: {}: Considering: {}: {}".format( "Finding {} with power address: {}: Considering: {}: {}".
format(
self.collection_resource.__name__, self.collection_resource.__name__,
power_address, power_address,
system_id, system_id,
power_params.get("power_address"), power_params.get("power_address"),
) ))
)
if power_params.get("power_address") == power_address: if power_params.get("power_address") == power_address:
self.logger.debug( self.logger.debug(
"Finding {} with power address: {}: Found: {}: {}".format( "Finding {} with power address: {}: Found: {}: {}".
format(
self.collection_resource.__name__, self.collection_resource.__name__,
power_address, power_address,
system_id, system_id,
power_params.get("power_address"), power_params.get("power_address"),
) ))
)
# the API result isn't quite enough to contruct a Machine, # the API result isn't quite enough to contruct a Machine,
# so construct one with the system_id and then refresh # so construct one with the system_id and then refresh
@ -758,8 +750,8 @@ class Machines(model_base.ResourceCollectionBase):
field = k[13:] field = k[13:]
result = [ result = [
i for i in result if str( i for i in result if str(
getattr(i, 'power_parameters', {}). getattr(i, 'power_parameters', {}).get(field, None))
get(field, None)) == str(v) == str(v)
] ]
else: else:
result = [ result = [
@ -785,8 +777,9 @@ class Machines(model_base.ResourceCollectionBase):
res.set_resource_id(resp_json.get('system_id')) res.set_resource_id(resp_json.get('system_id'))
return res return res
raise errors.DriverError("Failed updating MAAS url %s - return code %s" raise errors.DriverError(
% (url, resp.status_code)) "Failed updating MAAS url %s - return code %s" %
(url, resp.status_code))
def empty_refresh(self): def empty_refresh(self):
"""Check connectivity to MAAS machines API """Check connectivity to MAAS machines API

View File

@ -77,8 +77,8 @@ class Partition(model_base.ResourceBase):
resp = self.api_client.post(url, op='format', files=data) resp = self.api_client.post(url, op='format', files=data)
if not resp.ok: if not resp.ok:
raise Exception( raise Exception("MAAS error: %s - %s" %
"MAAS error: %s - %s" % (resp.status_code, resp.text)) (resp.status_code, resp.text))
self.refresh() self.refresh()
except Exception as ex: except Exception as ex:
@ -109,8 +109,8 @@ class Partition(model_base.ResourceBase):
(self.name, self.system_id)) (self.name, self.system_id))
resp = self.api_client.post(url, op='unformat') resp = self.api_client.post(url, op='unformat')
if not resp.ok: if not resp.ok:
raise Exception( raise Exception("MAAS error: %s - %s" %
"MAAS error: %s - %s" % (resp.status_code, resp.text)) (resp.status_code, resp.text))
self.refresh() self.refresh()
except Exception as ex: except Exception as ex:
msg = "Error: unformat of device %s on node %s failed: %s" \ msg = "Error: unformat of device %s on node %s failed: %s" \
@ -138,8 +138,8 @@ class Partition(model_base.ResourceBase):
(self.resource_id, self.system_id, mount_point)) (self.resource_id, self.system_id, mount_point))
resp = self.api_client.post(url, op='mount', files=data) resp = self.api_client.post(url, op='mount', files=data)
if not resp.ok: if not resp.ok:
raise Exception( raise Exception("MAAS error: %s - %s" %
"MAAS error: %s - %s" % (resp.status_code, resp.text)) (resp.status_code, resp.text))
self.refresh() self.refresh()
except Exception as ex: except Exception as ex:
msg = "Error: mount of device %s on node %s failed: %s" \ msg = "Error: mount of device %s on node %s failed: %s" \
@ -163,8 +163,8 @@ class Partition(model_base.ResourceBase):
(self.name, self.system_id)) (self.name, self.system_id))
resp = self.api_client.post(url, op='unmount') resp = self.api_client.post(url, op='unmount')
if not resp.ok: if not resp.ok:
raise Exception( raise Exception("MAAS error: %s - %s" %
"MAAS error: %s - %s" % (resp.status_code, resp.text)) (resp.status_code, resp.text))
self.refresh() self.refresh()
except Exception as ex: except Exception as ex:
msg = "Error: unmount of device %s on node %s failed: %s" \ msg = "Error: unmount of device %s on node %s failed: %s" \
@ -180,8 +180,8 @@ class Partition(model_base.ResourceBase):
(self.resource_id, self.system_id)) (self.resource_id, self.system_id))
resp = self.api_client.post(url, op='set_boot_disk') resp = self.api_client.post(url, op='set_boot_disk')
if not resp.ok: if not resp.ok:
raise Exception( raise Exception("MAAS error: %s - %s" %
"MAAS error: %s - %s" % (resp.status_code, resp.text)) (resp.status_code, resp.text))
self.refresh() self.refresh()
except Exception as ex: except Exception as ex:
msg = "Error: setting device %s on node %s to boot failed: %s" \ msg = "Error: setting device %s on node %s to boot failed: %s" \

View File

@ -66,8 +66,9 @@ class RackController(maas_machine.Machine):
def update_identity(self, n, domain="local"): def update_identity(self, n, domain="local"):
"""Cannot update rack controller identity.""" """Cannot update rack controller identity."""
self.logger.debug("Cannot update rack controller identity for %s, no-op." % self.logger.debug(
self.hostname) "Cannot update rack controller identity for %s, no-op." %
self.hostname)
return return
def is_healthy(self): def is_healthy(self):
@ -82,6 +83,7 @@ class RackController(maas_machine.Machine):
healthy = False healthy = False
return healthy return healthy
class RackControllers(maas_machine.Machines): class RackControllers(maas_machine.Machines):
"""Model for a collection of rack controllers.""" """Model for a collection of rack controllers."""

View File

@ -44,10 +44,8 @@ class Subnet(model_base.ResourceBase):
current_ranges.refresh() current_ranges.refresh()
exists = current_ranges.query({ exists = current_ranges.query({
'start_ip': 'start_ip': addr_range.get('start', None),
addr_range.get('start', None), 'end_ip': addr_range.get('end', None)
'end_ip':
addr_range.get('end', None)
}) })
if len(exists) > 0: if len(exists) > 0:
@ -90,12 +88,11 @@ class Subnet(model_base.ResourceBase):
if current_route is not None: if current_route is not None:
current_route.delete() current_route.delete()
new_route = maas_route.StaticRoute( new_route = maas_route.StaticRoute(self.api_client,
self.api_client, source=self.resource_id,
source=self.resource_id, destination=dest_subnet,
destination=dest_subnet, gateway_ip=gateway,
gateway_ip=gateway, metric=metric)
metric=metric)
new_route = sr.add(new_route) new_route = sr.add(new_route)
return new_route return new_route

View File

@ -64,13 +64,14 @@ class Tag(model_base.ResourceBase):
""" """
if system_id in self.get_applied_nodes(): if system_id in self.get_applied_nodes():
self.logger.debug( self.logger.debug("Tag %s already applied to node %s" %
"Tag %s already applied to node %s" % (self.name, system_id)) (self.name, system_id))
else: else:
url = self.interpolate_url() url = self.interpolate_url()
resp = self.api_client.post( resp = self.api_client.post(url,
url, op='update_nodes', files={'add': system_id}) op='update_nodes',
files={'add': system_id})
if not resp.ok: if not resp.ok:
self.logger.error( self.logger.error(

View File

@ -74,10 +74,13 @@ class Vlan(model_base.ResourceBase):
raise RackControllerConflict exception. raise RackControllerConflict exception.
""" """
if not self.primary_rack or self.primary_rack == rack_id: if not self.primary_rack or self.primary_rack == rack_id:
self.logger.debug("Setting primary DHCP controller %s on VLAN %s", rack_id, self.resource_id) self.logger.debug("Setting primary DHCP controller %s on VLAN %s",
rack_id, self.resource_id)
self.primary_rack = rack_id self.primary_rack = rack_id
elif not self.secondary_rack or self.secondary_rack == rack_id: elif not self.secondary_rack or self.secondary_rack == rack_id:
self.logger.debug("Setting secondary DHCP controller %s on VLAN %s.", rack_id, self.resource_id) self.logger.debug(
"Setting secondary DHCP controller %s on VLAN %s.", rack_id,
self.resource_id)
self.secondary_rack = rack_id self.secondary_rack = rack_id
else: else:
raise RackControllerConflict( raise RackControllerConflict(
@ -92,7 +95,8 @@ class Vlan(model_base.ResourceBase):
:param bool commit: Whether to commit reset to MAAS API :param bool commit: Whether to commit reset to MAAS API
""" """
self.logger.debug("Resetting DHCP control on VLAN %s.", self.resource_id) self.logger.debug("Resetting DHCP control on VLAN %s.",
self.resource_id)
self.relay_vlan = None self.relay_vlan = None
self.dhcp_on = False self.dhcp_on = False
self.primary_rack = None self.primary_rack = None

View File

@ -60,17 +60,19 @@ class VolumeGroup(model_base.ResourceBase):
data = {'name': name, 'uuid': uuid_str, 'size': size} data = {'name': name, 'uuid': uuid_str, 'size': size}
self.logger.debug("Creating logical volume %s in VG %s on node %s" self.logger.debug(
% (name, self.name, self.system_id)) "Creating logical volume %s in VG %s on node %s" %
(name, self.name, self.system_id))
url = self.interpolate_url() url = self.interpolate_url()
resp = self.api_client.post( resp = self.api_client.post(url,
url, op='create_logical_volume', files=data) op='create_logical_volume',
files=data)
if not resp.ok: if not resp.ok:
raise Exception( raise Exception("MAAS error - %s - %s" %
"MAAS error - %s - %s" % (resp.status_code, resp.txt)) (resp.status_code, resp.txt))
res = resp.json() res = resp.json()
if 'id' in res: if 'id' in res:
@ -101,12 +103,13 @@ class VolumeGroup(model_base.ResourceBase):
url = self.interpolate_url() url = self.interpolate_url()
resp = self.api_client.post( resp = self.api_client.post(url,
url, op='delete_logical_volume', files={'id': target_lv}) op='delete_logical_volume',
files={'id': target_lv})
if not resp.ok: if not resp.ok:
raise Exception( raise Exception("MAAS error - %s - %s" %
"MAAS error - %s - %s" % (resp.status_code, resp.text)) (resp.status_code, resp.text))
else: else:
raise Exception("VG %s has no logical volumes" % self.name) raise Exception("VG %s has no logical volumes" % self.name)
except Exception as ex: except Exception as ex:

View File

@ -35,14 +35,14 @@ class LibvirtBaseAction(BaseAction):
:param node: instance of objects.BaremetalNode :param node: instance of objects.BaremetalNode
""" """
if node.oob_type != 'libvirt': if node.oob_type != 'libvirt':
raise errors.DriverError( raise errors.DriverError("Node OOB type %s is not 'libvirt'" %
"Node OOB type %s is not 'libvirt'" % node.oob_type) node.oob_type)
virsh_url = node.oob_parameters.get('libvirt_uri', None) virsh_url = node.oob_parameters.get('libvirt_uri', None)
if not virsh_url: if not virsh_url:
raise errors.DriverError( raise errors.DriverError("Node %s has no 'libvirt_url' defined" %
"Node %s has no 'libvirt_url' defined" % (node.name)) (node.name))
url_parts = urlparse(virsh_url) url_parts = urlparse(virsh_url)
@ -51,8 +51,8 @@ class LibvirtBaseAction(BaseAction):
"Node %s has invalid libvirt URL scheme %s. " "Node %s has invalid libvirt URL scheme %s. "
"Only 'qemu+ssh' supported." % (node.name, url_parts.scheme)) "Only 'qemu+ssh' supported." % (node.name, url_parts.scheme))
self.logger.debug( self.logger.debug("Starting libvirt session to hypervisor %s " %
"Starting libvirt session to hypervisor %s " % (virsh_url)) (virsh_url))
virsh_ses = libvirt.open(virsh_url) virsh_ses = libvirt.open(virsh_url)
if not virsh_ses: if not virsh_ses:
@ -148,11 +148,10 @@ class ValidateOobServices(LibvirtBaseAction):
"""Action to validation OOB services are available.""" """Action to validation OOB services are available."""
def start(self): def start(self):
self.task.add_status_msg( self.task.add_status_msg(msg="OOB does not require services.",
msg="OOB does not require services.", error=False,
error=False, ctx='NA',
ctx='NA', ctx_type='NA')
ctx_type='NA')
self.task.set_status(hd_fields.TaskStatus.Complete) self.task.set_status(hd_fields.TaskStatus.Complete)
self.task.success() self.task.success()
self.task.save() self.task.save()
@ -198,11 +197,10 @@ class SetNodeBoot(LibvirtBaseAction):
for n in node_list: for n in node_list:
self.logger.debug("Setting bootdev to PXE for %s" % n.name) self.logger.debug("Setting bootdev to PXE for %s" % n.name)
self.task.add_status_msg( self.task.add_status_msg(msg="Setting node to PXE boot.",
msg="Setting node to PXE boot.", error=False,
error=False, ctx=n.name,
ctx=n.name, ctx_type='node')
ctx_type='node')
try: try:
self.set_node_pxe(n) self.set_node_pxe(n)
@ -213,14 +211,13 @@ class SetNodeBoot(LibvirtBaseAction):
ctx=n.name, ctx=n.name,
ctx_type='node') ctx_type='node')
self.task.failure(focus=n.name) self.task.failure(focus=n.name)
self.logger.warning( self.logger.warning("Unable to set node %s to PXE boot." %
"Unable to set node %s to PXE boot." % (n.name)) (n.name))
else: else:
self.task.add_status_msg( self.task.add_status_msg(msg="Set bootdev to PXE.",
msg="Set bootdev to PXE.", error=False,
error=False, ctx=n.name,
ctx=n.name, ctx_type='node')
ctx_type='node')
self.logger.debug("%s reports bootdev of network" % n.name) self.logger.debug("%s reports bootdev of network" % n.name)
self.task.success(focus=n.name) self.task.success(focus=n.name)
@ -244,21 +241,27 @@ class PowerOffNode(LibvirtBaseAction):
for n in node_list: for n in node_list:
msg = "Shutting down domain %s" % n.name msg = "Shutting down domain %s" % n.name
self.logger.debug(msg) self.logger.debug(msg)
self.task.add_status_msg( self.task.add_status_msg(msg=msg,
msg=msg, error=False, ctx=n.name, ctx_type='node') error=False,
ctx=n.name,
ctx_type='node')
try: try:
self.poweroff_node(n) self.poweroff_node(n)
except Exception as ex: except Exception as ex:
msg = "Node failed to power off: %s" % str(ex) msg = "Node failed to power off: %s" % str(ex)
self.task.add_status_msg( self.task.add_status_msg(msg=msg,
msg=msg, error=True, ctx=n.name, ctx_type='node') error=True,
ctx=n.name,
ctx_type='node')
self.logger.error(msg) self.logger.error(msg)
self.task.failure(focus=n.name) self.task.failure(focus=n.name)
else: else:
msg = "Node %s powered off." % n.name msg = "Node %s powered off." % n.name
self.task.add_status_msg( self.task.add_status_msg(msg=msg,
msg=msg, error=False, ctx=n.name, ctx_type='node') error=False,
ctx=n.name,
ctx_type='node')
self.logger.debug(msg) self.logger.debug(msg)
self.task.success(focus=n.name) self.task.success(focus=n.name)
@ -282,21 +285,27 @@ class PowerOnNode(LibvirtBaseAction):
for n in node_list: for n in node_list:
msg = "Starting domain %s" % n.name msg = "Starting domain %s" % n.name
self.logger.debug(msg) self.logger.debug(msg)
self.task.add_status_msg( self.task.add_status_msg(msg=msg,
msg=msg, error=False, ctx=n.name, ctx_type='node') error=False,
ctx=n.name,
ctx_type='node')
try: try:
self.poweron_node(n) self.poweron_node(n)
except Exception as ex: except Exception as ex:
msg = "Node failed to power on: %s" % str(ex) msg = "Node failed to power on: %s" % str(ex)
self.task.add_status_msg( self.task.add_status_msg(msg=msg,
msg=msg, error=True, ctx=n.name, ctx_type='node') error=True,
ctx=n.name,
ctx_type='node')
self.logger.error(msg) self.logger.error(msg)
self.task.failure(focus=n.name) self.task.failure(focus=n.name)
else: else:
msg = "Node %s powered on." % n.name msg = "Node %s powered on." % n.name
self.task.add_status_msg( self.task.add_status_msg(msg=msg,
msg=msg, error=False, ctx=n.name, ctx_type='node') error=False,
ctx=n.name,
ctx_type='node')
self.logger.debug(msg) self.logger.debug(msg)
self.task.success(focus=n.name) self.task.success(focus=n.name)
@ -320,22 +329,28 @@ class PowerCycleNode(LibvirtBaseAction):
for n in node_list: for n in node_list:
msg = ("Power cycling domain for node %s" % n.name) msg = ("Power cycling domain for node %s" % n.name)
self.logger.debug(msg) self.logger.debug(msg)
self.task.add_status_msg( self.task.add_status_msg(msg=msg,
msg=msg, error=False, ctx=n.name, ctx_type='node') error=False,
ctx=n.name,
ctx_type='node')
try: try:
self.poweroff_node(n) self.poweroff_node(n)
self.poweron_node(n) self.poweron_node(n)
except Exception as ex: except Exception as ex:
msg = "Node failed to power cycle: %s" % str(ex) msg = "Node failed to power cycle: %s" % str(ex)
self.task.add_status_msg( self.task.add_status_msg(msg=msg,
msg=msg, error=True, ctx=n.name, ctx_type='node') error=True,
ctx=n.name,
ctx_type='node')
self.logger.error(msg) self.logger.error(msg)
self.task.failure(focus=n.name) self.task.failure(focus=n.name)
else: else:
msg = "Node %s power cycled." % n.name msg = "Node %s power cycled." % n.name
self.task.add_status_msg( self.task.add_status_msg(msg=msg,
msg=msg, error=False, ctx=n.name, ctx_type='node') error=False,
ctx=n.name,
ctx_type='node')
self.logger.debug(msg) self.logger.debug(msg)
self.task.success(focus=n.name) self.task.success(focus=n.name)
@ -361,14 +376,18 @@ class InterrogateOob(LibvirtBaseAction):
node_status = self.get_node_status(n) node_status = self.get_node_status(n)
except Exception as ex: except Exception as ex:
msg = "Node failed tatus check: %s" % str(ex) msg = "Node failed tatus check: %s" % str(ex)
self.task.add_status_msg( self.task.add_status_msg(msg=msg,
msg=msg, error=True, ctx=n.name, ctx_type='node') error=True,
ctx=n.name,
ctx_type='node')
self.logger.error(msg) self.logger.error(msg)
self.task.failure(focus=n.name) self.task.failure(focus=n.name)
else: else:
msg = "Node %s status is %s." % (n.name, node_status) msg = "Node %s status is %s." % (n.name, node_status)
self.task.add_status_msg( self.task.add_status_msg(msg=msg,
msg=msg, error=False, ctx=n.name, ctx_type='node') error=False,
ctx=n.name,
ctx_type='node')
self.logger.debug(msg) self.logger.debug(msg)
self.task.success(focus=n.name) self.task.success(focus=n.name)

View File

@ -65,9 +65,8 @@ class LibvirtDriver(oob_driver.OobDriver):
def __init__(self, **kwargs): def __init__(self, **kwargs):
super().__init__(**kwargs) super().__init__(**kwargs)
cfg.CONF.register_opts( cfg.CONF.register_opts(LibvirtDriver.libvirt_driver_options,
LibvirtDriver.libvirt_driver_options, group=LibvirtDriver.driver_key)
group=LibvirtDriver.driver_key)
self.logger = logging.getLogger( self.logger = logging.getLogger(
config.config_mgr.conf.logging.oobdriver_logger_name) config.config_mgr.conf.logging.oobdriver_logger_name)
@ -82,8 +81,9 @@ class LibvirtDriver(oob_driver.OobDriver):
if task.action not in self.supported_actions: if task.action not in self.supported_actions:
self.logger.error("Driver %s doesn't support task action %s" % self.logger.error("Driver %s doesn't support task action %s" %
(self.driver_desc, task.action)) (self.driver_desc, task.action))
raise errors.DriverError("Driver %s doesn't support task action %s" raise errors.DriverError(
% (self.driver_desc, task.action)) "Driver %s doesn't support task action %s" %
(self.driver_desc, task.action))
task.set_status(hd_fields.TaskStatus.Running) task.set_status(hd_fields.TaskStatus.Running)
task.save() task.save()
@ -129,10 +129,9 @@ class LibvirtDriver(oob_driver.OobDriver):
task.failure() task.failure()
else: else:
if f.exception(): if f.exception():
self.logger.error( self.logger.error("Uncaught exception in subtask %s" %
"Uncaught exception in subtask %s" % str( str(uuid.UUID(bytes=t)),
uuid.UUID(bytes=t)), exc_info=f.exception())
exc_info=f.exception())
task.align_result() task.align_result()
task.bubble_results() task.bubble_results()
task.set_status(hd_fields.TaskStatus.Complete) task.set_status(hd_fields.TaskStatus.Complete)

View File

@ -48,20 +48,22 @@ class ManualDriver(oob.OobDriver):
if task.action not in self.supported_actions: if task.action not in self.supported_actions:
self.logger.error("Driver %s doesn't support task action %s" % self.logger.error("Driver %s doesn't support task action %s" %
(self.driver_desc, task.action)) (self.driver_desc, task.action))
raise errors.DriverError("Driver %s doesn't support task action %s" raise errors.DriverError(
% (self.driver_desc, task.action)) "Driver %s doesn't support task action %s" %
(self.driver_desc, task.action))
design_ref = task.design_ref design_ref = task.design_ref
if design_ref is None: if design_ref is None:
raise errors.DriverError( raise errors.DriverError("No design ID specified in task %s" %
"No design ID specified in task %s" % (task_id)) (task_id))
self.orchestrator.task_field_update( self.orchestrator.task_field_update(
task.get_id(), status=hd_fields.TaskStatus.Running) task.get_id(), status=hd_fields.TaskStatus.Running)
self.logger.info("Sleeping 60s to allow time for manual OOB %s action" self.logger.info(
% task.action) "Sleeping 60s to allow time for manual OOB %s action" %
task.action)
time.sleep(60) time.sleep(60)

View File

@ -44,16 +44,17 @@ class PyghmiBaseAction(BaseAction):
ipmi_address = node.get_network_address(ipmi_network) ipmi_address = node.get_network_address(ipmi_network)
if ipmi_address is None: if ipmi_address is None:
raise errors.DriverError( raise errors.DriverError("Node %s has no IPMI address" %
"Node %s has no IPMI address" % (node.name)) (node.name))
ipmi_account = node.oob_parameters['account'] ipmi_account = node.oob_parameters['account']
ipmi_credential = node.oob_parameters['credential'] ipmi_credential = node.oob_parameters['credential']
self.logger.debug("Starting IPMI session to %s with %s/%s" % self.logger.debug("Starting IPMI session to %s with %s/%s" %
(ipmi_address, ipmi_account, ipmi_credential[:1])) (ipmi_address, ipmi_account, ipmi_credential[:1]))
ipmi_session = Command( ipmi_session = Command(bmc=ipmi_address,
bmc=ipmi_address, userid=ipmi_account, password=ipmi_credential) userid=ipmi_account,
password=ipmi_credential)
return ipmi_session return ipmi_session
@ -99,11 +100,10 @@ class ValidateOobServices(PyghmiBaseAction):
"""Action to validation OOB services are available.""" """Action to validation OOB services are available."""
def start(self): def start(self):
self.task.add_status_msg( self.task.add_status_msg(msg="OOB does not require services.",
msg="OOB does not require services.", error=False,
error=False, ctx='NA',
ctx='NA', ctx_type='NA')
ctx_type='NA')
self.task.set_status(hd_fields.TaskStatus.Complete) self.task.set_status(hd_fields.TaskStatus.Complete)
self.task.success() self.task.success()
self.task.save() self.task.save()
@ -149,35 +149,32 @@ class SetNodeBoot(PyghmiBaseAction):
for n in node_list: for n in node_list:
self.logger.debug("Setting bootdev to PXE for %s" % n.name) self.logger.debug("Setting bootdev to PXE for %s" % n.name)
self.task.add_status_msg( self.task.add_status_msg(msg="Setting node to PXE boot.",
msg="Setting node to PXE boot.", error=False,
error=False, ctx=n.name,
ctx=n.name, ctx_type='node')
ctx_type='node')
self.exec_ipmi_command(n, Command.set_bootdev, 'pxe') self.exec_ipmi_command(n, Command.set_bootdev, 'pxe')
time.sleep(3) time.sleep(3)
bootdev = self.exec_ipmi_command(n, Command.get_bootdev) bootdev = self.exec_ipmi_command(n, Command.get_bootdev)
if bootdev is not None and (bootdev.get('bootdev', if bootdev is not None and (bootdev.get('bootdev', '')
'') == 'network'): == 'network'):
self.task.add_status_msg( self.task.add_status_msg(msg="Set bootdev to PXE.",
msg="Set bootdev to PXE.", error=False,
error=False, ctx=n.name,
ctx=n.name, ctx_type='node')
ctx_type='node')
self.logger.debug("%s reports bootdev of network" % n.name) self.logger.debug("%s reports bootdev of network" % n.name)
self.task.success(focus=n.name) self.task.success(focus=n.name)
else: else:
self.task.add_status_msg( self.task.add_status_msg(msg="Unable to set bootdev to PXE.",
msg="Unable to set bootdev to PXE.", error=True,
error=True, ctx=n.name,
ctx=n.name, ctx_type='node')
ctx_type='node')
self.task.failure(focus=n.name) self.task.failure(focus=n.name)
self.logger.warning( self.logger.warning("Unable to set node %s to PXE boot." %
"Unable to set node %s to PXE boot." % (n.name)) (n.name))
self.task.set_status(hd_fields.TaskStatus.Complete) self.task.set_status(hd_fields.TaskStatus.Complete)
self.task.save() self.task.save()
@ -198,11 +195,10 @@ class PowerOffNode(PyghmiBaseAction):
for n in node_list: for n in node_list:
self.logger.debug("Sending set_power = off command to %s" % n.name) self.logger.debug("Sending set_power = off command to %s" % n.name)
self.task.add_status_msg( self.task.add_status_msg(msg="Sending set_power = off command.",
msg="Sending set_power = off command.", error=False,
error=False, ctx=n.name,
ctx=n.name, ctx_type='node')
ctx_type='node')
self.exec_ipmi_command(n, Command.set_power, 'off') self.exec_ipmi_command(n, Command.set_power, 'off')
i = 18 i = 18
@ -212,13 +208,12 @@ class PowerOffNode(PyghmiBaseAction):
power_state = self.exec_ipmi_command(n, Command.get_power) power_state = self.exec_ipmi_command(n, Command.get_power)
if power_state is not None and (power_state.get( if power_state is not None and (power_state.get(
'powerstate', '') == 'off'): 'powerstate', '') == 'off'):
self.task.add_status_msg( self.task.add_status_msg(msg="Node reports power off.",
msg="Node reports power off.", error=False,
error=False, ctx=n.name,
ctx=n.name, ctx_type='node')
ctx_type='node') self.logger.debug("Node %s reports powerstate of off" %
self.logger.debug( n.name)
"Node %s reports powerstate of off" % n.name)
self.task.success(focus=n.name) self.task.success(focus=n.name)
break break
time.sleep(10) time.sleep(10)
@ -226,11 +221,10 @@ class PowerOffNode(PyghmiBaseAction):
if power_state is not None and (power_state.get('powerstate', '') if power_state is not None and (power_state.get('powerstate', '')
!= 'off'): != 'off'):
self.task.add_status_msg( self.task.add_status_msg(msg="Node failed to power off.",
msg="Node failed to power off.", error=True,
error=True, ctx=n.name,
ctx=n.name, ctx_type='node')
ctx_type='node')
self.logger.error("Giving up on IPMI command to %s" % n.name) self.logger.error("Giving up on IPMI command to %s" % n.name)
self.task.failure(focus=n.name) self.task.failure(focus=n.name)
@ -253,11 +247,10 @@ class PowerOnNode(PyghmiBaseAction):
for n in node_list: for n in node_list:
self.logger.debug("Sending set_power = off command to %s" % n.name) self.logger.debug("Sending set_power = off command to %s" % n.name)
self.task.add_status_msg( self.task.add_status_msg(msg="Sending set_power = on command.",
msg="Sending set_power = on command.", error=False,
error=False, ctx=n.name,
ctx=n.name, ctx_type='node')
ctx_type='node')
self.exec_ipmi_command(n, Command.set_power, 'off') self.exec_ipmi_command(n, Command.set_power, 'off')
i = 18 i = 18
@ -267,13 +260,12 @@ class PowerOnNode(PyghmiBaseAction):
power_state = self.exec_ipmi_command(n, Command.get_power) power_state = self.exec_ipmi_command(n, Command.get_power)
if power_state is not None and (power_state.get( if power_state is not None and (power_state.get(
'powerstate', '') == 'on'): 'powerstate', '') == 'on'):
self.logger.debug( self.logger.debug("Node %s reports powerstate of on" %
"Node %s reports powerstate of on" % n.name) n.name)
self.task.add_status_msg( self.task.add_status_msg(msg="Node reports power on.",
msg="Node reports power on.", error=False,
error=False, ctx=n.name,
ctx=n.name, ctx_type='node')
ctx_type='node')
self.task.success(focus=n.name) self.task.success(focus=n.name)
break break
time.sleep(10) time.sleep(10)
@ -281,11 +273,10 @@ class PowerOnNode(PyghmiBaseAction):
if power_state is not None and (power_state.get('powerstate', '') if power_state is not None and (power_state.get('powerstate', '')
!= 'on'): != 'on'):
self.task.add_status_msg( self.task.add_status_msg(msg="Node failed to power on.",
msg="Node failed to power on.", error=True,
error=True, ctx=n.name,
ctx=n.name, ctx_type='node')
ctx_type='node')
self.logger.error("Giving up on IPMI command to %s" % n.name) self.logger.error("Giving up on IPMI command to %s" % n.name)
self.task.failure(focus=n.name) self.task.failure(focus=n.name)
@ -308,11 +299,10 @@ class PowerCycleNode(PyghmiBaseAction):
for n in node_list: for n in node_list:
self.logger.debug("Sending set_power = off command to %s" % n.name) self.logger.debug("Sending set_power = off command to %s" % n.name)
self.task.add_status_msg( self.task.add_status_msg(msg="Power cycling node via IPMI.",
msg="Power cycling node via IPMI.", error=False,
error=False, ctx=n.name,
ctx=n.name, ctx_type='node')
ctx_type='node')
self.exec_ipmi_command(n, Command.set_power, 'off') self.exec_ipmi_command(n, Command.set_power, 'off')
# Wait for power state of off before booting back up # Wait for power state of off before booting back up
@ -326,8 +316,8 @@ class PowerCycleNode(PyghmiBaseAction):
self.logger.debug("%s reports powerstate of off" % n.name) self.logger.debug("%s reports powerstate of off" % n.name)
break break
elif power_state is None: elif power_state is None:
self.logger.debug( self.logger.debug("No response on IPMI power query to %s" %
"No response on IPMI power query to %s" % n.name) n.name)
time.sleep(10) time.sleep(10)
i = i - 1 i = i - 1
@ -355,18 +345,17 @@ class PowerCycleNode(PyghmiBaseAction):
self.logger.debug("%s reports powerstate of on" % n.name) self.logger.debug("%s reports powerstate of on" % n.name)
break break
elif power_state is None: elif power_state is None:
self.logger.debug( self.logger.debug("No response on IPMI power query to %s" %
"No response on IPMI power query to %s" % n.name) n.name)
time.sleep(10) time.sleep(10)
i = i - 1 i = i - 1
if power_state is not None and (power_state.get('powerstate', if power_state is not None and (power_state.get('powerstate', '')
'') == 'on'): == 'on'):
self.task.add_status_msg( self.task.add_status_msg(msg="Node power cycle complete.",
msg="Node power cycle complete.", error=False,
error=False, ctx=n.name,
ctx=n.name, ctx_type='node')
ctx_type='node')
self.task.success(focus=n.name) self.task.success(focus=n.name)
else: else:
self.task.add_status_msg( self.task.add_status_msg(
@ -398,8 +387,8 @@ class InterrogateOob(PyghmiBaseAction):
for n in node_list: for n in node_list:
try: try:
self.logger.debug( self.logger.debug("Interrogating node %s IPMI interface." %
"Interrogating node %s IPMI interface." % n.name) n.name)
powerstate = self.exec_ipmi_command(n, Command.get_power) powerstate = self.exec_ipmi_command(n, Command.get_power)
if powerstate is None: if powerstate is None:
raise errors.DriverError() raise errors.DriverError()

View File

@ -70,8 +70,8 @@ class PyghmiDriver(oob_driver.OobDriver):
def __init__(self, **kwargs): def __init__(self, **kwargs):
super().__init__(**kwargs) super().__init__(**kwargs)
cfg.CONF.register_opts( cfg.CONF.register_opts(PyghmiDriver.pyghmi_driver_options,
PyghmiDriver.pyghmi_driver_options, group=PyghmiDriver.driver_key) group=PyghmiDriver.driver_key)
self.logger = logging.getLogger( self.logger = logging.getLogger(
config.config_mgr.conf.logging.oobdriver_logger_name) config.config_mgr.conf.logging.oobdriver_logger_name)
@ -86,8 +86,9 @@ class PyghmiDriver(oob_driver.OobDriver):
if task.action not in self.supported_actions: if task.action not in self.supported_actions:
self.logger.error("Driver %s doesn't support task action %s" % self.logger.error("Driver %s doesn't support task action %s" %
(self.driver_desc, task.action)) (self.driver_desc, task.action))
raise errors.DriverError("Driver %s doesn't support task action %s" raise errors.DriverError(
% (self.driver_desc, task.action)) "Driver %s doesn't support task action %s" %
(self.driver_desc, task.action))
task.set_status(hd_fields.TaskStatus.Running) task.set_status(hd_fields.TaskStatus.Running)
task.save() task.save()
@ -133,10 +134,9 @@ class PyghmiDriver(oob_driver.OobDriver):
task.failure() task.failure()
else: else:
if f.exception(): if f.exception():
self.logger.error( self.logger.error("Uncaught exception in subtask %s" %
"Uncaught exception in subtask %s" % str( str(uuid.UUID(bytes=t)),
uuid.UUID(bytes=t)), exc_info=f.exception())
exc_info=f.exception())
task.align_result() task.align_result()
task.bubble_results() task.bubble_results()
task.set_status(hd_fields.TaskStatus.Complete) task.set_status(hd_fields.TaskStatus.Complete)

View File

@ -29,6 +29,7 @@ import drydock_provisioner.objects.fields as hd_fields
REDFISH_MAX_ATTEMPTS = 3 REDFISH_MAX_ATTEMPTS = 3
class RedfishBaseAction(BaseAction): class RedfishBaseAction(BaseAction):
"""Base action for Redfish executed actions.""" """Base action for Redfish executed actions."""
@ -44,8 +45,8 @@ class RedfishBaseAction(BaseAction):
oob_network = node.oob_parameters['network'] oob_network = node.oob_parameters['network']
oob_address = node.get_network_address(oob_network) oob_address = node.get_network_address(oob_network)
if oob_address is None: if oob_address is None:
raise errors.DriverError( raise errors.DriverError("Node %s has no OOB Redfish address" %
"Node %s has no OOB Redfish address" % (node.name)) (node.name))
oob_account = node.oob_parameters['account'] oob_account = node.oob_parameters['account']
oob_credential = node.oob_parameters['credential'] oob_credential = node.oob_parameters['credential']
@ -53,11 +54,12 @@ class RedfishBaseAction(BaseAction):
self.logger.debug("Starting Redfish session to %s with %s" % self.logger.debug("Starting Redfish session to %s with %s" %
(oob_address, oob_account)) (oob_address, oob_account))
try: try:
redfish_obj = RedfishSession(host=oob_address, redfish_obj = RedfishSession(
account=oob_account, host=oob_address,
password=oob_credential, account=oob_account,
use_ssl=cfg.CONF.redfish_driver.use_ssl, password=oob_credential,
connection_retries=cfg.CONF.redfish_driver.max_retries) use_ssl=cfg.CONF.redfish_driver.use_ssl,
connection_retries=cfg.CONF.redfish_driver.max_retries)
except (RedfishException, errors.DriverError) as iex: except (RedfishException, errors.DriverError) as iex:
self.logger.error( self.logger.error(
"Error initializing Redfish session for node %s" % node.name) "Error initializing Redfish session for node %s" % node.name)
@ -81,7 +83,8 @@ class RedfishBaseAction(BaseAction):
return response return response
except RedfishException as iex: except RedfishException as iex:
self.logger.error( self.logger.error(
"Error executing Redfish command %s for node %s" % (func.__name__, node.name)) "Error executing Redfish command %s for node %s" %
(func.__name__, node.name))
self.logger.error("Redfish Exception: %s" % str(iex)) self.logger.error("Redfish Exception: %s" % str(iex))
raise errors.DriverError("Redfish command failed.") raise errors.DriverError("Redfish command failed.")
@ -91,11 +94,10 @@ class ValidateOobServices(RedfishBaseAction):
"""Action to validate OOB services are available.""" """Action to validate OOB services are available."""
def start(self): def start(self):
self.task.add_status_msg( self.task.add_status_msg(msg="OOB does not require services.",
msg="OOB does not require services.", error=False,
error=False, ctx='NA',
ctx='NA', ctx_type='NA')
ctx_type='NA')
self.task.set_status(hd_fields.TaskStatus.Complete) self.task.set_status(hd_fields.TaskStatus.Complete)
self.task.success() self.task.success()
self.task.save() self.task.save()
@ -134,34 +136,38 @@ class SetNodeBoot(RedfishBaseAction):
node_list = self.orchestrator.get_target_nodes(self.task) node_list = self.orchestrator.get_target_nodes(self.task)
for n in node_list: for n in node_list:
self.task.add_status_msg( self.task.add_status_msg(msg="Setting node to PXE boot.",
msg="Setting node to PXE boot.", error=False,
error=False, ctx=n.name,
ctx=n.name, ctx_type='node')
ctx_type='node')
for i in range(REDFISH_MAX_ATTEMPTS): for i in range(REDFISH_MAX_ATTEMPTS):
bootdev = None bootdev = None
self.logger.debug("Setting bootdev to PXE for %s attempt #%s" % (n.name, i + 1)) self.logger.debug("Setting bootdev to PXE for %s attempt #%s" %
(n.name, i + 1))
try: try:
session = self.get_redfish_session(n) session = self.get_redfish_session(n)
bootdev = self.exec_redfish_command(n, session, RedfishSession.get_bootdev) bootdev = self.exec_redfish_command(
n, session, RedfishSession.get_bootdev)
if bootdev.get('bootdev', '') != 'Pxe': if bootdev.get('bootdev', '') != 'Pxe':
self.exec_redfish_command(n, session, RedfishSession.set_bootdev, 'Pxe') self.exec_redfish_command(n, session,
RedfishSession.set_bootdev,
'Pxe')
time.sleep(1) time.sleep(1)
bootdev = self.exec_redfish_command(n, session, RedfishSession.get_bootdev) bootdev = self.exec_redfish_command(
n, session, RedfishSession.get_bootdev)
session.close_session() session.close_session()
except errors.DriverError as e: except errors.DriverError as e:
self.logger.warning( self.logger.warning(
"An exception '%s' occurred while attempting to set boot device on %s" % (e, n.name)) "An exception '%s' occurred while attempting to set boot device on %s"
% (e, n.name))
if bootdev is not None and (bootdev.get('bootdev', if bootdev is not None and (bootdev.get('bootdev', '')
'') == 'Pxe'): == 'Pxe'):
self.task.add_status_msg( self.task.add_status_msg(msg="Set bootdev to PXE.",
msg="Set bootdev to PXE.", error=False,
error=False, ctx=n.name,
ctx=n.name, ctx_type='node')
ctx_type='node')
self.logger.debug("%s reports bootdev of network" % n.name) self.logger.debug("%s reports bootdev of network" % n.name)
self.task.success(focus=n.name) self.task.success(focus=n.name)
break break
@ -173,8 +179,8 @@ class SetNodeBoot(RedfishBaseAction):
ctx=n.name, ctx=n.name,
ctx_type='node') ctx_type='node')
self.task.failure(focus=n.name) self.task.failure(focus=n.name)
self.logger.warning( self.logger.warning("Unable to set node %s to PXE boot." %
"Unable to set node %s to PXE boot." % (n.name)) (n.name))
self.task.set_status(hd_fields.TaskStatus.Complete) self.task.set_status(hd_fields.TaskStatus.Complete)
self.task.save() self.task.save()
@ -192,56 +198,58 @@ class PowerOffNode(RedfishBaseAction):
for n in node_list: for n in node_list:
self.logger.debug("Sending set_power = off command to %s" % n.name) self.logger.debug("Sending set_power = off command to %s" % n.name)
self.task.add_status_msg( self.task.add_status_msg(msg="Sending set_power = off command.",
msg="Sending set_power = off command.", error=False,
error=False, ctx=n.name,
ctx=n.name, ctx_type='node')
ctx_type='node')
session = self.get_redfish_session(n) session = self.get_redfish_session(n)
# If power is already off, continue with the next node # If power is already off, continue with the next node
power_state = self.exec_redfish_command(n, RedfishSession.get_power) power_state = self.exec_redfish_command(n,
if power_state is not None and (power_state.get( RedfishSession.get_power)
'powerstate', '') == 'Off'): if power_state is not None and (power_state.get('powerstate', '')
self.task.add_status_msg( == 'Off'):
msg="Node reports power off.", self.task.add_status_msg(msg="Node reports power off.",
error=False, error=False,
ctx=n.name, ctx=n.name,
ctx_type='node') ctx_type='node')
self.logger.debug( self.logger.debug(
"Node %s reports powerstate already off. No action required" % n.name) "Node %s reports powerstate already off. No action required"
% n.name)
self.task.success(focus=n.name) self.task.success(focus=n.name)
continue continue
self.exec_redfish_command(n, session, RedfishSession.set_power, 'ForceOff') self.exec_redfish_command(n, session, RedfishSession.set_power,
'ForceOff')
attempts = cfg.CONF.redfish_driver.power_state_change_max_retries attempts = cfg.CONF.redfish_driver.power_state_change_max_retries
while attempts > 0: while attempts > 0:
self.logger.debug("Polling powerstate waiting for success.") self.logger.debug("Polling powerstate waiting for success.")
power_state = self.exec_redfish_command(n, RedfishSession.get_power) power_state = self.exec_redfish_command(
n, RedfishSession.get_power)
if power_state is not None and (power_state.get( if power_state is not None and (power_state.get(
'powerstate', '') == 'Off'): 'powerstate', '') == 'Off'):
self.task.add_status_msg( self.task.add_status_msg(msg="Node reports power off.",
msg="Node reports power off.", error=False,
error=False, ctx=n.name,
ctx=n.name, ctx_type='node')
ctx_type='node') self.logger.debug("Node %s reports powerstate of off" %
self.logger.debug( n.name)
"Node %s reports powerstate of off" % n.name)
self.task.success(focus=n.name) self.task.success(focus=n.name)
break break
time.sleep(cfg.CONF.redfish_driver.power_state_change_retry_interval) time.sleep(
cfg.CONF.redfish_driver.power_state_change_retry_interval)
attempts = attempts - 1 attempts = attempts - 1
if power_state is not None and (power_state.get('powerstate', '') if power_state is not None and (power_state.get('powerstate', '')
!= 'Off'): != 'Off'):
self.task.add_status_msg( self.task.add_status_msg(msg="Node failed to power off.",
msg="Node failed to power off.", error=True,
error=True, ctx=n.name,
ctx=n.name, ctx_type='node')
ctx_type='node') self.logger.error("Giving up on Redfish command to %s" %
self.logger.error("Giving up on Redfish command to %s" % n.name) n.name)
self.task.failure(focus=n.name) self.task.failure(focus=n.name)
session.close_session() session.close_session()
@ -262,56 +270,58 @@ class PowerOnNode(RedfishBaseAction):
for n in node_list: for n in node_list:
self.logger.debug("Sending set_power = on command to %s" % n.name) self.logger.debug("Sending set_power = on command to %s" % n.name)
self.task.add_status_msg( self.task.add_status_msg(msg="Sending set_power = on command.",
msg="Sending set_power = on command.", error=False,
error=False, ctx=n.name,
ctx=n.name, ctx_type='node')
ctx_type='node')
session = self.get_redfish_session(n) session = self.get_redfish_session(n)
# If power is already on, continue with the next node # If power is already on, continue with the next node
power_state = self.exec_redfish_command(n, RedfishSession.get_power) power_state = self.exec_redfish_command(n,
if power_state is not None and (power_state.get( RedfishSession.get_power)
'powerstate', '') == 'On'): if power_state is not None and (power_state.get('powerstate', '')
self.task.add_status_msg( == 'On'):
msg="Node reports power on.", self.task.add_status_msg(msg="Node reports power on.",
error=False, error=False,
ctx=n.name, ctx=n.name,
ctx_type='node') ctx_type='node')
self.logger.debug( self.logger.debug(
"Node %s reports powerstate already on. No action required" % n.name) "Node %s reports powerstate already on. No action required"
% n.name)
self.task.success(focus=n.name) self.task.success(focus=n.name)
continue continue
self.exec_redfish_command(n, session, RedfishSession.set_power, 'On') self.exec_redfish_command(n, session, RedfishSession.set_power,
'On')
attempts = cfg.CONF.redfish_driver.power_state_change_max_retries attempts = cfg.CONF.redfish_driver.power_state_change_max_retries
while attempts > 0: while attempts > 0:
self.logger.debug("Polling powerstate waiting for success.") self.logger.debug("Polling powerstate waiting for success.")
power_state = self.exec_redfish_command(n, session, RedfishSession.get_power) power_state = self.exec_redfish_command(
n, session, RedfishSession.get_power)
if power_state is not None and (power_state.get( if power_state is not None and (power_state.get(
'powerstate', '') == 'On'): 'powerstate', '') == 'On'):
self.logger.debug( self.logger.debug("Node %s reports powerstate of on" %
"Node %s reports powerstate of on" % n.name) n.name)
self.task.add_status_msg( self.task.add_status_msg(msg="Node reports power on.",
msg="Node reports power on.", error=False,
error=False, ctx=n.name,
ctx=n.name, ctx_type='node')
ctx_type='node')
self.task.success(focus=n.name) self.task.success(focus=n.name)
break break
time.sleep(cfg.CONF.redfish_driver.power_state_change_retry_interval) time.sleep(
cfg.CONF.redfish_driver.power_state_change_retry_interval)
attempts = attempts - 1 attempts = attempts - 1
if power_state is not None and (power_state.get('powerstate', '') if power_state is not None and (power_state.get('powerstate', '')
!= 'On'): != 'On'):
self.task.add_status_msg( self.task.add_status_msg(msg="Node failed to power on.",
msg="Node failed to power on.", error=True,
error=True, ctx=n.name,
ctx=n.name, ctx_type='node')
ctx_type='node') self.logger.error("Giving up on Redfish command to %s" %
self.logger.error("Giving up on Redfish command to %s" % n.name) n.name)
self.task.failure(focus=n.name) self.task.failure(focus=n.name)
session.close_session() session.close_session()
@ -332,19 +342,20 @@ class PowerCycleNode(RedfishBaseAction):
for n in node_list: for n in node_list:
self.logger.debug("Sending set_power = off command to %s" % n.name) self.logger.debug("Sending set_power = off command to %s" % n.name)
self.task.add_status_msg( self.task.add_status_msg(msg="Power cycling node via Redfish.",
msg="Power cycling node via Redfish.", error=False,
error=False, ctx=n.name,
ctx=n.name, ctx_type='node')
ctx_type='node')
session = self.get_redfish_session(n) session = self.get_redfish_session(n)
self.exec_redfish_command(n, session, RedfishSession.set_power, 'ForceOff') self.exec_redfish_command(n, session, RedfishSession.set_power,
'ForceOff')
# Wait for power state of off before booting back up # Wait for power state of off before booting back up
attempts = cfg.CONF.redfish_driver.power_state_change_max_retries attempts = cfg.CONF.redfish_driver.power_state_change_max_retries
while attempts > 0: while attempts > 0:
power_state = self.exec_redfish_command(n, session, RedfishSession.get_power) power_state = self.exec_redfish_command(
n, session, RedfishSession.get_power)
if power_state is not None and power_state.get( if power_state is not None and power_state.get(
'powerstate', '') == 'Off': 'powerstate', '') == 'Off':
self.logger.debug("%s reports powerstate of off" % n.name) self.logger.debug("%s reports powerstate of off" % n.name)
@ -352,7 +363,8 @@ class PowerCycleNode(RedfishBaseAction):
elif power_state is None: elif power_state is None:
self.logger.debug( self.logger.debug(
"No response on Redfish power query to %s" % n.name) "No response on Redfish power query to %s" % n.name)
time.sleep(cfg.CONF.redfish_driver.power_state_change_retry_interval) time.sleep(
cfg.CONF.redfish_driver.power_state_change_retry_interval)
attempts = attempts - 1 attempts = attempts - 1
if power_state.get('powerstate', '') != 'Off': if power_state.get('powerstate', '') != 'Off':
@ -368,12 +380,14 @@ class PowerCycleNode(RedfishBaseAction):
break break
self.logger.debug("Sending set_power = on command to %s" % n.name) self.logger.debug("Sending set_power = on command to %s" % n.name)
self.exec_redfish_command(n, session, RedfishSession.set_power, 'On') self.exec_redfish_command(n, session, RedfishSession.set_power,
'On')
attempts = cfg.CONF.redfish_driver.power_state_change_max_retries attempts = cfg.CONF.redfish_driver.power_state_change_max_retries
while attempts > 0: while attempts > 0:
power_state = self.exec_redfish_command(n, session, RedfishSession.get_power) power_state = self.exec_redfish_command(
n, session, RedfishSession.get_power)
if power_state is not None and power_state.get( if power_state is not None and power_state.get(
'powerstate', '') == 'On': 'powerstate', '') == 'On':
self.logger.debug("%s reports powerstate of on" % n.name) self.logger.debug("%s reports powerstate of on" % n.name)
@ -381,16 +395,16 @@ class PowerCycleNode(RedfishBaseAction):
elif power_state is None: elif power_state is None:
self.logger.debug( self.logger.debug(
"No response on Redfish power query to %s" % n.name) "No response on Redfish power query to %s" % n.name)
time.sleep(cfg.CONF.redfish_driver.power_state_change_retry_interval) time.sleep(
cfg.CONF.redfish_driver.power_state_change_retry_interval)
attempts = attempts - 1 attempts = attempts - 1
if power_state is not None and (power_state.get('powerstate', if power_state is not None and (power_state.get('powerstate', '')
'') == 'On'): == 'On'):
self.task.add_status_msg( self.task.add_status_msg(msg="Node power cycle complete.",
msg="Node power cycle complete.", error=False,
error=False, ctx=n.name,
ctx=n.name, ctx_type='node')
ctx_type='node')
self.task.success(focus=n.name) self.task.success(focus=n.name)
else: else:
self.task.add_status_msg( self.task.add_status_msg(
@ -421,16 +435,17 @@ class InterrogateOob(RedfishBaseAction):
for n in node_list: for n in node_list:
try: try:
self.logger.debug( self.logger.debug("Interrogating node %s Redfish interface." %
"Interrogating node %s Redfish interface." % n.name) n.name)
session = self.get_redfish_session(n) session = self.get_redfish_session(n)
powerstate = self.exec_redfish_command(n, session, RedfishSession.get_power) powerstate = self.exec_redfish_command(
n, session, RedfishSession.get_power)
session.close_session() session.close_session()
if powerstate is None: if powerstate is None:
raise errors.DriverError() raise errors.DriverError()
self.task.add_status_msg( self.task.add_status_msg(
msg="Redfish interface interrogation yielded powerstate %s" % msg="Redfish interface interrogation yielded powerstate %s"
powerstate.get('powerstate'), % powerstate.get('powerstate'),
error=False, error=False,
ctx=n.name, ctx=n.name,
ctx_type='node') ctx_type='node')

View File

@ -21,10 +21,16 @@ from redfish.rest.v1 import ServerDownOrUnreachableError
from redfish.rest.v1 import InvalidCredentialsError from redfish.rest.v1 import InvalidCredentialsError
from redfish.rest.v1 import RetriesExhaustedError from redfish.rest.v1 import RetriesExhaustedError
class RedfishSession(object): class RedfishSession(object):
"""Redfish Client to provide OOB commands""" """Redfish Client to provide OOB commands"""
def __init__(self, host, account, password, use_ssl=True, connection_retries=10): def __init__(self,
host,
account,
password,
use_ssl=True,
connection_retries=10):
try: try:
if use_ssl: if use_ssl:
redfish_url = 'https://' + host redfish_url = 'https://' + host
@ -57,7 +63,8 @@ class RedfishSession(object):
# Assumption that only one system is available on Node # Assumption that only one system is available on Node
if response.dict["Members@odata.count"] != 1: if response.dict["Members@odata.count"] != 1:
raise RedfishException("Number of systems are more than one in the node") raise RedfishException(
"Number of systems are more than one in the node")
instance = response.dict["Members"][0]["@odata.id"] instance = response.dict["Members"][0]["@odata.id"]
return instance return instance
@ -152,7 +159,9 @@ class RedfishSession(object):
""" """
instance = self.get_system_instance() instance = self.get_system_instance()
if powerstate not in ["On", "ForceOff", "PushPowerButton", "GracefulRestart"]: if powerstate not in [
"On", "ForceOff", "PushPowerButton", "GracefulRestart"
]:
raise RedfishException("Unsupported powerstate") raise RedfishException("Unsupported powerstate")
current_state = self.get_power() current_state = self.get_power()
@ -160,9 +169,7 @@ class RedfishSession(object):
(powerstate == "ForceOff" and current_state["powerstate"] == "Off"): (powerstate == "ForceOff" and current_state["powerstate"] == "Off"):
return {'powerstate': powerstate} return {'powerstate': powerstate}
payload = { payload = {"ResetType": powerstate}
"ResetType": powerstate
}
url = instance + "/Actions/ComputerSystem.Reset" url = instance + "/Actions/ComputerSystem.Reset"
response = self.redfish_client.post(path=url, body=payload) response = self.redfish_client.post(path=url, body=payload)

View File

@ -48,19 +48,19 @@ class RedfishDriver(oob_driver.OobDriver):
default=10, default=10,
min=1, min=1,
help='Maximum number of connection retries to Redfish server'), help='Maximum number of connection retries to Redfish server'),
cfg.IntOpt( cfg.IntOpt('power_state_change_max_retries',
'power_state_change_max_retries', default=18,
default=18, min=1,
min=1, help='Maximum reties to wait for power state change'),
help='Maximum reties to wait for power state change'),
cfg.IntOpt( cfg.IntOpt(
'power_state_change_retry_interval', 'power_state_change_retry_interval',
default=10, default=10,
help='Polling interval in seconds between retries for power state change'), help=
cfg.BoolOpt( 'Polling interval in seconds between retries for power state change'
'use_ssl', ),
default=True, cfg.BoolOpt('use_ssl',
help='Use SSL to communicate with Redfish API server'), default=True,
help='Use SSL to communicate with Redfish API server'),
] ]
oob_types_supported = ['redfish'] oob_types_supported = ['redfish']
@ -82,8 +82,8 @@ class RedfishDriver(oob_driver.OobDriver):
def __init__(self, **kwargs): def __init__(self, **kwargs):
super().__init__(**kwargs) super().__init__(**kwargs)
cfg.CONF.register_opts( cfg.CONF.register_opts(RedfishDriver.redfish_driver_options,
RedfishDriver.redfish_driver_options, group=RedfishDriver.driver_key) group=RedfishDriver.driver_key)
self.logger = logging.getLogger( self.logger = logging.getLogger(
config.config_mgr.conf.logging.oobdriver_logger_name) config.config_mgr.conf.logging.oobdriver_logger_name)
@ -98,8 +98,9 @@ class RedfishDriver(oob_driver.OobDriver):
if task.action not in self.supported_actions: if task.action not in self.supported_actions:
self.logger.error("Driver %s doesn't support task action %s" % self.logger.error("Driver %s doesn't support task action %s" %
(self.driver_desc, task.action)) (self.driver_desc, task.action))
raise errors.DriverError("Driver %s doesn't support task action %s" raise errors.DriverError(
% (self.driver_desc, task.action)) "Driver %s doesn't support task action %s" %
(self.driver_desc, task.action))
task.set_status(hd_fields.TaskStatus.Running) task.set_status(hd_fields.TaskStatus.Running)
task.save() task.save()
@ -145,10 +146,9 @@ class RedfishDriver(oob_driver.OobDriver):
task.failure() task.failure()
else: else:
if f.exception(): if f.exception():
self.logger.error( self.logger.error("Uncaught exception in subtask %s" %
"Uncaught exception in subtask %s" % str( str(uuid.UUID(bytes=t)),
uuid.UUID(bytes=t)), exc_info=f.exception())
exc_info=f.exception())
task.align_result() task.align_result()
task.bubble_results() task.bubble_results()
task.set_status(hd_fields.TaskStatus.Complete) task.set_status(hd_fields.TaskStatus.Complete)

View File

@ -34,8 +34,10 @@ def start_drydock(enable_keystone=True):
# Setup configuration parsing # Setup configuration parsing
cli_options = [ cli_options = [
cfg.BoolOpt( cfg.BoolOpt('debug',
'debug', short='d', default=False, help='Enable debug logging'), short='d',
default=False,
help='Enable debug logging'),
] ]
config.config_mgr.conf.register_cli_opts(cli_options) config.config_mgr.conf.register_cli_opts(cli_options)
@ -43,8 +45,9 @@ def start_drydock(enable_keystone=True):
config.config_mgr.conf(sys.argv[1:]) config.config_mgr.conf(sys.argv[1:])
if config.config_mgr.conf.debug: if config.config_mgr.conf.debug:
config.config_mgr.conf.set_override( config.config_mgr.conf.set_override(name='log_level',
name='log_level', override='DEBUG', group='logging') override='DEBUG',
group='logging')
# Setup root logger # Setup root logger
logger = logging.getLogger( logger = logging.getLogger(
@ -64,8 +67,7 @@ def start_drydock(enable_keystone=True):
logger.propagate = False logger.propagate = False
formatter = logging.Formatter( formatter = logging.Formatter(
"%(asctime)s - %(levelname)s - %(user)s - %(req_id)s" "%(asctime)s - %(levelname)s - %(user)s - %(req_id)s"
" - %(external_ctx)s - %(end_user)s - %(message)s" " - %(external_ctx)s - %(end_user)s - %(message)s")
)
ch = logging.StreamHandler() ch = logging.StreamHandler()
ch.setFormatter(formatter) ch.setFormatter(formatter)
@ -77,10 +79,9 @@ def start_drydock(enable_keystone=True):
input_ingester = Ingester() input_ingester = Ingester()
input_ingester.enable_plugin(config.config_mgr.conf.plugins.ingester) input_ingester.enable_plugin(config.config_mgr.conf.plugins.ingester)
orchestrator = Orchestrator( orchestrator = Orchestrator(enabled_drivers=config.config_mgr.conf.plugins,
enabled_drivers=config.config_mgr.conf.plugins, state_manager=state,
state_manager=state, ingester=input_ingester)
ingester=input_ingester)
orch_thread = threading.Thread(target=orchestrator.watch_for_tasks) orch_thread = threading.Thread(target=orchestrator.watch_for_tasks)
orch_thread.start() orch_thread.start()
@ -98,10 +99,9 @@ def start_drydock(enable_keystone=True):
policy.policy_engine.register_policy() policy.policy_engine.register_policy()
# Ensure that the policy_engine is initialized before starting the API # Ensure that the policy_engine is initialized before starting the API
wsgi_callable = api.start_api( wsgi_callable = api.start_api(state_manager=state,
state_manager=state, ingester=input_ingester,
ingester=input_ingester, orchestrator=orchestrator)
orchestrator=orchestrator)
# Now that loggers are configured, log the effective config # Now that loggers are configured, log the effective config
config.config_mgr.conf.log_opt_values( config.config_mgr.conf.log_opt_values(

View File

@ -192,9 +192,9 @@ class DrydockClient(object):
raise errors.ClientUnauthorizedError( raise errors.ClientUnauthorizedError(
"Unauthorized access to %s, include valid token." % resp.url) "Unauthorized access to %s, include valid token." % resp.url)
elif resp.status_code == 403: elif resp.status_code == 403:
raise errors.ClientForbiddenError( raise errors.ClientForbiddenError("Forbidden access to %s" %
"Forbidden access to %s" % resp.url) resp.url)
elif not resp.ok: elif not resp.ok:
raise errors.ClientError( raise errors.ClientError("Error - received %d: %s" %
"Error - received %d: %s" % (resp.status_code, resp.text), (resp.status_code, resp.text),
code=resp.status_code) code=resp.status_code)

View File

@ -91,8 +91,9 @@ class DrydockSession(object):
url = self.base_url + endpoint url = self.base_url + endpoint
self.logger.debug('GET ' + url) self.logger.debug('GET ' + url)
self.logger.debug('Query Params: ' + str(query)) self.logger.debug('Query Params: ' + str(query))
resp = self.__session.get( resp = self.__session.get(url,
url, params=query, timeout=self._timeout(timeout)) params=query,
timeout=self._timeout(timeout))
if resp.status_code == 401 and not auth_refresh: if resp.status_code == 401 and not auth_refresh:
self.set_auth() self.set_auth()
@ -121,21 +122,19 @@ class DrydockSession(object):
self.logger.debug('POST ' + url) self.logger.debug('POST ' + url)
self.logger.debug('Query Params: ' + str(query)) self.logger.debug('Query Params: ' + str(query))
if body is not None: if body is not None:
self.logger.debug( self.logger.debug("Sending POST with explicit body: \n%s" %
"Sending POST with explicit body: \n%s" % body) body)
resp = self.__session.post( resp = self.__session.post(self.base_url + endpoint,
self.base_url + endpoint, params=query,
params=query, data=body,
data=body, timeout=self._timeout(timeout))
timeout=self._timeout(timeout))
else: else:
self.logger.debug( self.logger.debug("Sending POST with JSON body: \n%s" %
"Sending POST with JSON body: \n%s" % str(data)) str(data))
resp = self.__session.post( resp = self.__session.post(self.base_url + endpoint,
self.base_url + endpoint, params=query,
params=query, json=data,
json=data, timeout=self._timeout(timeout))
timeout=self._timeout(timeout))
if resp.status_code == 401 and not auth_refresh: if resp.status_code == 401 and not auth_refresh:
self.set_auth() self.set_auth()
auth_refresh = True auth_refresh = True
@ -161,8 +160,9 @@ class DrydockSession(object):
url = self.base_url + endpoint url = self.base_url + endpoint
self.logger.debug('DELETE ' + url) self.logger.debug('DELETE ' + url)
self.logger.debug('Query Params: ' + str(query)) self.logger.debug('Query Params: ' + str(query))
resp = self.__session.delete( resp = self.__session.delete(url,
url, params=query, timeout=self._timeout(timeout)) params=query,
timeout=self._timeout(timeout))
if resp.status_code == 401 and not auth_refresh: if resp.status_code == 401 and not auth_refresh:
self.set_auth() self.set_auth()
@ -212,6 +212,7 @@ class DrydockSession(object):
class KeystoneClient(object): class KeystoneClient(object):
@staticmethod @staticmethod
def get_endpoint(endpoint, def get_endpoint(endpoint,
ks_sess=None, ks_sess=None,

View File

@ -381,6 +381,7 @@ class InvalidSizeFormat(DriverError):
class ApiError(Exception): class ApiError(Exception):
def __init__(self, msg, code=500): def __init__(self, msg, code=500):
super().__init__(msg) super().__init__(msg)
self.message = msg self.message = msg

View File

@ -29,6 +29,7 @@ import drydock_provisioner.objects.bootaction as bootaction
class Ingester(object): class Ingester(object):
def __init__(self): def __init__(self):
self.logger = logging.getLogger("drydock.ingester") self.logger = logging.getLogger("drydock.ingester")
self.registered_plugin = None self.registered_plugin = None
@ -50,8 +51,8 @@ class Ingester(object):
klass = getattr(mod, classname) klass = getattr(mod, classname)
self.registered_plugin = klass() self.registered_plugin = klass()
except Exception as ex: except Exception as ex:
self.logger.error( self.logger.error("Could not enable plugin %s - %s" %
"Could not enable plugin %s - %s" % (plugin, str(ex))) (plugin, str(ex)))
if self.registered_plugin is None: if self.registered_plugin is None:
self.logger.error("Could not enable at least one plugin") self.logger.error("Could not enable at least one plugin")
@ -89,8 +90,8 @@ class Ingester(object):
"Ingester:ingest_data ingesting design parts for design %s" % "Ingester:ingest_data ingesting design parts for design %s" %
design_ref) design_ref)
design_blob = design_state.get_design_documents(design_ref) design_blob = design_state.get_design_documents(design_ref)
self.logger.debug( self.logger.debug("Ingesting design data of %d bytes." %
"Ingesting design data of %d bytes." % len(design_blob)) len(design_blob))
try: try:
status, design_items = self.registered_plugin.ingest_data( status, design_items = self.registered_plugin.ingest_data(
@ -100,8 +101,8 @@ class Ingester(object):
"Ingester:ingest_data - Unexpected error processing data - %s" "Ingester:ingest_data - Unexpected error processing data - %s"
% (str(vex))) % (str(vex)))
return None, None return None, None
self.logger.debug("Ingester:ingest_data parsed %s design parts" % str( self.logger.debug("Ingester:ingest_data parsed %s design parts" %
len(design_items))) str(len(design_items)))
design_data = objects.SiteDesign() design_data = objects.SiteDesign()
for m in design_items: for m in design_items:
if context is not None: if context is not None:

View File

@ -19,6 +19,7 @@ import logging
class IngesterPlugin(object): class IngesterPlugin(object):
def __init__(self): def __init__(self):
self.log = logging.Logger('ingester') self.log = logging.Logger('ingester')
return return

View File

@ -37,7 +37,9 @@ cache_opts = {
cache = CacheManager(**parse_cache_config_options(cache_opts)) cache = CacheManager(**parse_cache_config_options(cache_opts))
class DeckhandIngester(IngesterPlugin): class DeckhandIngester(IngesterPlugin):
def __init__(self): def __init__(self):
super().__init__() super().__init__()
self.logger = logging.getLogger('drydock.ingester.deckhand') self.logger = logging.getLogger('drydock.ingester.deckhand')
@ -54,6 +56,7 @@ class DeckhandIngester(IngesterPlugin):
:returns: a tuple of a status response and a list of parsed objects from drydock_provisioner.objects :returns: a tuple of a status response and a list of parsed objects from drydock_provisioner.objects
""" """
def local_parse(): def local_parse():
return self.parse_docs(kwargs.get('content')) return self.parse_docs(kwargs.get('content'))
@ -66,7 +69,9 @@ class DeckhandIngester(IngesterPlugin):
results = local_cache.get(key=hv, createfunc=local_parse) results = local_cache.get(key=hv, createfunc=local_parse)
parse_status, models = results parse_status, models = results
except Exception as ex: except Exception as ex:
self.logger.debug("Error parsing design - hash %s", hv, exc_info=ex) self.logger.debug("Error parsing design - hash %s",
hv,
exc_info=ex)
raise ex raise ex
else: else:
raise ValueError('Missing parameter "content"') raise ValueError('Missing parameter "content"')
@ -103,8 +108,8 @@ class DeckhandIngester(IngesterPlugin):
(schema_ns, doc_kind, doc_version) = d.get('schema', (schema_ns, doc_kind, doc_version) = d.get('schema',
'').split('/') '').split('/')
except ValueError as ex: except ValueError as ex:
self.logger.error( self.logger.error("Error with document structure.",
"Error with document structure.", exc_info=ex) exc_info=ex)
self.logger.debug("Error document\n%s" % yaml.dump(d)) self.logger.debug("Error document\n%s" % yaml.dump(d))
continue continue
if schema_ns == 'drydock': if schema_ns == 'drydock':
@ -230,9 +235,9 @@ class DeckhandIngester(IngesterPlugin):
tag_model.definition = t.get('definition', '') tag_model.definition = t.get('definition', '')
if tag_model.type not in ['lshw_xpath']: if tag_model.type not in ['lshw_xpath']:
raise errors.IngesterError( raise errors.IngesterError('Unknown definition_type in '
'Unknown definition_type in ' 'tag_definition instance: %s' %
'tag_definition instance: %s' % (t.definition_type)) (t.definition_type))
model.tag_definitions.append(tag_model) model.tag_definitions.append(tag_model)
auth_keys = data.get('authorized_keys', []) auth_keys = data.get('authorized_keys', [])
@ -419,8 +424,9 @@ class DeckhandIngester(IngesterPlugin):
model.hugepages_confs = objects.HugepagesConfList() model.hugepages_confs = objects.HugepagesConfList()
for c, d in data.get('hugepages', {}).items(): for c, d in data.get('hugepages', {}).items():
conf = objects.HugepagesConf( conf = objects.HugepagesConf(name=c,
name=c, size=d.get('size'), count=d.get('count')) size=d.get('size'),
count=d.get('count'))
model.hugepages_confs.append(conf) model.hugepages_confs.append(conf)
return model return model
@ -589,8 +595,8 @@ class DeckhandIngester(IngesterPlugin):
if 'sriov' in v: if 'sriov' in v:
int_model.sriov = True int_model.sriov = True
int_model.vf_count = v.get('sriov', {}).get('vf_count', 0) int_model.vf_count = v.get('sriov', {}).get('vf_count', 0)
int_model.trustedmode = v.get('sriov', {}).get( int_model.trustedmode = v.get('sriov',
'trustedmode', False) {}).get('trustedmode', False)
model.interfaces.append(int_model) model.interfaces.append(int_model)
@ -705,8 +711,8 @@ class DeckhandIngester(IngesterPlugin):
self.logger.warning( self.logger.warning(
"Duplicate document schemas found for document kind %s." "Duplicate document schemas found for document kind %s."
% schema_for) % schema_for)
self.logger.debug( self.logger.debug("Loaded schema for document kind %s." %
"Loaded schema for document kind %s." % schema_for) schema_for)
self.v1_doc_schemas[schema_for] = schema.get('data') self.v1_doc_schemas[schema_for] = schema.get('data')
f.close() f.close()

View File

@ -28,6 +28,7 @@ from drydock_provisioner.ingester.plugins import IngesterPlugin
class YamlIngester(IngesterPlugin): class YamlIngester(IngesterPlugin):
def __init__(self): def __init__(self):
super().__init__() super().__init__()
self.logger = logging.getLogger('drydock.ingester.yaml') self.logger = logging.getLogger('drydock.ingester.yaml')
@ -96,8 +97,10 @@ class YamlIngester(IngesterPlugin):
ctx = d.get('metadata').get('name') ctx = d.get('metadata').get('name')
else: else:
ctx = 'Unknown' ctx = 'Unknown'
ps.add_status_msg( ps.add_status_msg(msg=msg,
msg=msg, error=True, ctx_type='document', ctx=ctx) error=True,
ctx_type='document',
ctx=ctx)
ps.set_status(hd_fields.ValidationResult.Failure) ps.set_status(hd_fields.ValidationResult.Failure)
except Exception as ex: except Exception as ex:
msg = "Unexpected error processing document: %s" % str(ex) msg = "Unexpected error processing document: %s" % str(ex)
@ -106,8 +109,10 @@ class YamlIngester(IngesterPlugin):
ctx = d.get('metadata').get('name') ctx = d.get('metadata').get('name')
else: else:
ctx = 'Unknown' ctx = 'Unknown'
ps.add_status_msg( ps.add_status_msg(msg=msg,
msg=msg, error=True, ctx_type='document', ctx=ctx) error=True,
ctx_type='document',
ctx=ctx)
ps.set_status(hd_fields.ValidationResult.Failure) ps.set_status(hd_fields.ValidationResult.Failure)
elif api.startswith('promenade/'): elif api.startswith('promenade/'):
(foo, api_version) = api.split('/') (foo, api_version) = api.split('/')
@ -193,9 +198,9 @@ class YamlIngester(IngesterPlugin):
tag_model.definition = t.get('definition', '') tag_model.definition = t.get('definition', '')
if tag_model.type not in ['lshw_xpath']: if tag_model.type not in ['lshw_xpath']:
raise errors.IngesterError( raise errors.IngesterError('Unknown definition_type in '
'Unknown definition_type in ' 'tag_definition instance: %s' %
'tag_definition instance: %s' % (t.definition_type)) (t.definition_type))
model.tag_definitions.append(tag_model) model.tag_definitions.append(tag_model)
auth_keys = data.get('authorized_keys', []) auth_keys = data.get('authorized_keys', [])
@ -637,8 +642,8 @@ class YamlIngester(IngesterPlugin):
self.logger.warning( self.logger.warning(
"Duplicate document schemas found for document kind %s." "Duplicate document schemas found for document kind %s."
% schema_for) % schema_for)
self.logger.debug( self.logger.debug("Loaded schema for document kind %s." %
"Loaded schema for document kind %s." % schema_for) schema_for)
self.v1_doc_schemas[schema_for] = schema self.v1_doc_schemas[schema_for] = schema
f.close() f.close()

View File

@ -104,10 +104,10 @@ class Utils(object):
effective_list.extend( effective_list.extend(
[x for x in child_list if not x.startswith("!")]) [x for x in child_list if not x.startswith("!")])
effective_list.extend( effective_list.extend([
[x for x in parent_list x for x in parent_list
if ("!" + x) not in child_list if ("!" + x) not in child_list and x not in effective_list
and x not in effective_list]) ])
except TypeError: except TypeError:
raise TypeError("Error iterating list argument") raise TypeError("Error iterating list argument")

View File

@ -91,6 +91,7 @@ class DrydockPersistentObject(base.VersionedObject):
class DrydockObjectListBase(base.ObjectListBase): class DrydockObjectListBase(base.ObjectListBase):
def __init__(self, **kwargs): def __init__(self, **kwargs):
super(DrydockObjectListBase, self).__init__(**kwargs) super(DrydockObjectListBase, self).__init__(**kwargs)

View File

@ -34,18 +34,13 @@ class BootAction(base.DrydockPersistentObject, base.DrydockObject):
VERSION = '1.0' VERSION = '1.0'
fields = { fields = {
'name': 'name': ovo_fields.StringField(),
ovo_fields.StringField(), 'source': hd_fields.ModelSourceField(nullable=False),
'source': 'asset_list': ovo_fields.ObjectField('BootActionAssetList',
hd_fields.ModelSourceField(nullable=False), nullable=False),
'asset_list': 'node_filter': ovo_fields.ObjectField('NodeFilterSet', nullable=True),
ovo_fields.ObjectField('BootActionAssetList', nullable=False), 'target_nodes': ovo_fields.ListOfStringsField(nullable=True),
'node_filter': 'signaling': ovo_fields.BooleanField(default=True),
ovo_fields.ObjectField('NodeFilterSet', nullable=True),
'target_nodes':
ovo_fields.ListOfStringsField(nullable=True),
'signaling':
ovo_fields.BooleanField(default=True),
} }
def __init__(self, **kwargs): def __init__(self, **kwargs):
@ -160,8 +155,9 @@ class BootActionAsset(base.DrydockObject):
action_key, design_ref) action_key, design_ref)
if self.location is not None: if self.location is not None:
rendered_location = self.execute_pipeline( rendered_location = self.execute_pipeline(self.location,
self.location, self.location_pipeline, tpl_ctx=tpl_ctx) self.location_pipeline,
tpl_ctx=tpl_ctx)
data_block = self.resolve_asset_location(rendered_location) data_block = self.resolve_asset_location(rendered_location)
if self.type == hd_fields.BootactionAssetType.PackageList: if self.type == hd_fields.BootactionAssetType.PackageList:
self._parse_package_list(data_block) self._parse_package_list(data_block)
@ -169,8 +165,9 @@ class BootActionAsset(base.DrydockObject):
data_block = self.data.encode('utf-8') data_block = self.data.encode('utf-8')
if self.type != hd_fields.BootactionAssetType.PackageList: if self.type != hd_fields.BootactionAssetType.PackageList:
value = self.execute_pipeline( value = self.execute_pipeline(data_block,
data_block, self.data_pipeline, tpl_ctx=tpl_ctx) self.data_pipeline,
tpl_ctx=tpl_ctx)
if isinstance(value, str): if isinstance(value, str):
value = value.encode('utf-8') value = value.encode('utf-8')
@ -222,9 +219,9 @@ class BootActionAsset(base.DrydockObject):
:param design_ref: The design reference representing ``site_design`` :param design_ref: The design reference representing ``site_design``
""" """
return dict( return dict(node=self._get_node_context(nodename, site_design),
node=self._get_node_context(nodename, site_design), action=self._get_action_context(action_id, action_key,
action=self._get_action_context(action_id, action_key, design_ref)) design_ref))
def _get_action_context(self, action_id, action_key, design_ref): def _get_action_context(self, action_id, action_key, design_ref):
"""Create the action-specific context items for template rendering. """Create the action-specific context items for template rendering.
@ -233,11 +230,10 @@ class BootActionAsset(base.DrydockObject):
:param action_key: random key of this boot action :param action_key: random key of this boot action
:param design_ref: Design reference representing the site design :param design_ref: Design reference representing the site design
""" """
return dict( return dict(action_id=ulid2.ulid_to_base32(action_id),
action_id=ulid2.ulid_to_base32(action_id), action_key=action_key.hex(),
action_key=action_key.hex(), report_url=config.config_mgr.conf.bootactions.report_url,
report_url=config.config_mgr.conf.bootactions.report_url, design_ref=design_ref)
design_ref=design_ref)
def _get_node_context(self, nodename, site_design): def _get_node_context(self, nodename, site_design):
"""Create the node-specific context items for template rendering. """Create the node-specific context items for template rendering.
@ -246,14 +242,13 @@ class BootActionAsset(base.DrydockObject):
:param site_design: full site design :param site_design: full site design
""" """
node = site_design.get_baremetal_node(nodename) node = site_design.get_baremetal_node(nodename)
return dict( return dict(hostname=nodename,
hostname=nodename, domain=node.get_domain(site_design),
domain=node.get_domain(site_design), tags=[t for t in node.tags],
tags=[t for t in node.tags], labels={k: v
labels={k: v for (k, v) in node.owner_data.items()},
for (k, v) in node.owner_data.items()}, network=self._get_node_network_context(node, site_design),
network=self._get_node_network_context(node, site_design), interfaces=self._get_node_interface_context(node))
interfaces=self._get_node_interface_context(node))
def _get_node_network_context(self, node, site_design): def _get_node_network_context(self, node, site_design):
"""Create a node's network configuration context. """Create a node's network configuration context.
@ -298,8 +293,8 @@ class BootActionAsset(base.DrydockObject):
return ReferenceResolver.resolve_reference(asset_url) return ReferenceResolver.resolve_reference(asset_url)
except Exception as ex: except Exception as ex:
raise errors.InvalidAssetLocation( raise errors.InvalidAssetLocation(
"Unable to resolve asset reference %s: %s" % (asset_url, "Unable to resolve asset reference %s: %s" %
str(ex))) (asset_url, str(ex)))
def execute_pipeline(self, data, pipeline, tpl_ctx=None): def execute_pipeline(self, data, pipeline, tpl_ctx=None):
"""Execute a pipeline against a data element. """Execute a pipeline against a data element.

View File

@ -16,6 +16,7 @@ from oslo_versionedobjects import fields
class BaseDrydockEnum(fields.Enum): class BaseDrydockEnum(fields.Enum):
def __init__(self): def __init__(self):
super(BaseDrydockEnum, self).__init__(valid_values=self.__class__.ALL) super(BaseDrydockEnum, self).__init__(valid_values=self.__class__.ALL)

View File

@ -116,8 +116,9 @@ class HostProfile(base.DrydockPersistentObject, base.DrydockObject):
for f in inheritable_field_list: for f in inheritable_field_list:
setattr( setattr(
self, f, self, f,
objects.Utils.apply_field_inheritance( objects.Utils.apply_field_inheritance(getattr(self, f, None),
getattr(self, f, None), getattr(parent, f, None))) getattr(parent, f,
None)))
# Now compute inheritance for complex types # Now compute inheritance for complex types
self.oob_parameters = objects.Utils.merge_dicts( self.oob_parameters = objects.Utils.merge_dicts(
@ -310,8 +311,8 @@ class HostVolumeGroup(base.DrydockObject):
fields = { fields = {
'name': obj_fields.StringField(), 'name': obj_fields.StringField(),
'vg_uuid': obj_fields.StringField(nullable=True), 'vg_uuid': obj_fields.StringField(nullable=True),
'logical_volumes': obj_fields.ObjectField( 'logical_volumes': obj_fields.ObjectField('HostVolumeList',
'HostVolumeList', nullable=True), nullable=True),
} }
def __init__(self, **kwargs): def __init__(self, **kwargs):
@ -431,8 +432,8 @@ class HostStorageDevice(base.DrydockObject):
'name': obj_fields.StringField(), 'name': obj_fields.StringField(),
'volume_group': obj_fields.StringField(nullable=True), 'volume_group': obj_fields.StringField(nullable=True),
'labels': obj_fields.DictOfStringsField(nullable=True), 'labels': obj_fields.DictOfStringsField(nullable=True),
'partitions': obj_fields.ObjectField( 'partitions': obj_fields.ObjectField('HostPartitionList',
'HostPartitionList', nullable=True), nullable=True),
} }
def __init__(self, **kwargs): def __init__(self, **kwargs):
@ -535,28 +536,18 @@ class HostPartition(base.DrydockObject):
VERSION = '1.0' VERSION = '1.0'
fields = { fields = {
'name': 'name': obj_fields.StringField(),
obj_fields.StringField(), 'source': hd_fields.ModelSourceField(),
'source': 'bootable': obj_fields.BooleanField(default=False),
hd_fields.ModelSourceField(), 'volume_group': obj_fields.StringField(nullable=True),
'bootable': 'part_uuid': obj_fields.UUIDField(nullable=True),
obj_fields.BooleanField(default=False), 'size': obj_fields.StringField(nullable=True),
'volume_group': 'mountpoint': obj_fields.StringField(nullable=True),
obj_fields.StringField(nullable=True), 'fstype': obj_fields.StringField(nullable=True, default='ext4'),
'part_uuid': 'mount_options': obj_fields.StringField(nullable=True,
obj_fields.UUIDField(nullable=True), default='defaults'),
'size': 'fs_uuid': obj_fields.UUIDField(nullable=True),
obj_fields.StringField(nullable=True), 'fs_label': obj_fields.StringField(nullable=True),
'mountpoint':
obj_fields.StringField(nullable=True),
'fstype':
obj_fields.StringField(nullable=True, default='ext4'),
'mount_options':
obj_fields.StringField(nullable=True, default='defaults'),
'fs_uuid':
obj_fields.UUIDField(nullable=True),
'fs_label':
obj_fields.StringField(nullable=True),
} }
def __init__(self, **kwargs): def __init__(self, **kwargs):
@ -672,24 +663,16 @@ class HostVolume(base.DrydockObject):
VERSION = '1.0' VERSION = '1.0'
fields = { fields = {
'name': 'name': obj_fields.StringField(),
obj_fields.StringField(), 'source': hd_fields.ModelSourceField(),
'source': 'lv_uuid': obj_fields.UUIDField(nullable=True),
hd_fields.ModelSourceField(), 'size': obj_fields.StringField(nullable=True),
'lv_uuid': 'mountpoint': obj_fields.StringField(nullable=True),
obj_fields.UUIDField(nullable=True), 'fstype': obj_fields.StringField(nullable=True, default='ext4'),
'size': 'mount_options': obj_fields.StringField(nullable=True,
obj_fields.StringField(nullable=True), default='defaults'),
'mountpoint': 'fs_uuid': obj_fields.UUIDField(nullable=True),
obj_fields.StringField(nullable=True), 'fs_label': obj_fields.StringField(nullable=True),
'fstype':
obj_fields.StringField(nullable=True, default='ext4'),
'mount_options':
obj_fields.StringField(nullable=True, default='defaults'),
'fs_uuid':
obj_fields.UUIDField(nullable=True),
'fs_label':
obj_fields.StringField(nullable=True),
} }
def __init__(self, **kwargs): def __init__(self, **kwargs):

View File

@ -60,8 +60,8 @@ class BaremetalNode(drydock_provisioner.objects.hostprofile.HostProfile):
self.source = hd_fields.ModelSource.Compiled self.source = hd_fields.ModelSource.Compiled
self.resolve_kernel_params(site_design) self.resolve_kernel_params(site_design)
if resolve_aliases: if resolve_aliases:
self.logger.debug( self.logger.debug("Resolving device aliases on node %s" %
"Resolving device aliases on node %s" % self.name) self.name)
self.apply_logicalnames(site_design, state_manager) self.apply_logicalnames(site_design, state_manager)
return return
@ -261,8 +261,7 @@ class BaremetalNode(drydock_provisioner.objects.hostprofile.HostProfile):
""" """
if "regexp:" in address: if "regexp:" in address:
self.logger.info( self.logger.info(
"Regexp: prefix has been detected in address: %s" % "Regexp: prefix has been detected in address: %s" % (address))
(address))
address_regexp = address.replace("regexp:", "") address_regexp = address.replace("regexp:", "")
nodes = xml_root.findall(".//node") nodes = xml_root.findall(".//node")
logicalnames = [] logicalnames = []
@ -272,8 +271,8 @@ class BaremetalNode(drydock_provisioner.objects.hostprofile.HostProfile):
if node.get('class') == "network": if node.get('class') == "network":
address = node.find('businfo').text.replace("pci@", "") address = node.find('businfo').text.replace("pci@", "")
self.logger.debug( self.logger.debug(
"A network device PCI address found. Address=%s. Checking for regexp %s match..." % "A network device PCI address found. Address=%s. Checking for regexp %s match..."
(address, address_regexp)) % (address, address_regexp))
if re.match(address_regexp, address): if re.match(address_regexp, address):
logicalnames.append(node.find('logicalname').text) logicalnames.append(node.find('logicalname').text)
addresses.append(address) addresses.append(address)
@ -282,26 +281,25 @@ class BaremetalNode(drydock_provisioner.objects.hostprofile.HostProfile):
(address, address_regexp)) (address, address_regexp))
else: else:
self.logger.debug( self.logger.debug(
"A network device with PCI address=%s does not match the regex %s." % "A network device with PCI address=%s does not match the regex %s."
(address, address_regexp)) % (address, address_regexp))
if len(logicalnames) >= 1 and logicalnames[0]: if len(logicalnames) >= 1 and logicalnames[0]:
if len(logicalnames) > 1: if len(logicalnames) > 1:
self.logger.info( self.logger.info("Multiple nodes found for businfo=%s@%s" %
"Multiple nodes found for businfo=%s@%s" % (bus_type, address_regexp))
(bus_type, address_regexp))
for logicalname in reversed(logicalnames[0].split("/")): for logicalname in reversed(logicalnames[0].split("/")):
address = addresses[0] address = addresses[0]
self.logger.info( self.logger.info(
"Logicalname build dict: node_name = %s, alias_name = %s, " "Logicalname build dict: node_name = %s, alias_name = %s, "
"bus_type = %s, address = %s, to logicalname = %s" % "bus_type = %s, address = %s, to logicalname = %s" %
(self.get_name(), alias_name, bus_type, address, (self.get_name(), alias_name, bus_type, address,
logicalname)) logicalname))
return logicalname return logicalname
else: else:
self.logger.info( self.logger.info("No prefix has been detected in address: %s" %
"No prefix has been detected in address: %s" % (address))
(address)) nodes = xml_root.findall(".//node[businfo='" + bus_type + "@"
nodes = xml_root.findall(".//node[businfo='" + bus_type + "@" + address + "'].logicalname") + address + "'].logicalname")
if len(nodes) >= 1 and nodes[0].text: if len(nodes) >= 1 and nodes[0].text:
if (len(nodes) > 1): if (len(nodes) > 1):
self.logger.info("Multiple nodes found for businfo=%s@%s" % self.logger.info("Multiple nodes found for businfo=%s@%s" %
@ -311,7 +309,7 @@ class BaremetalNode(drydock_provisioner.objects.hostprofile.HostProfile):
"Logicalname build dict: node_name = %s, alias_name = %s, " "Logicalname build dict: node_name = %s, alias_name = %s, "
"bus_type = %s, address = %s, to logicalname = %s" % "bus_type = %s, address = %s, to logicalname = %s" %
(self.get_name(), alias_name, bus_type, address, (self.get_name(), alias_name, bus_type, address,
logicalname)) logicalname))
return logicalname return logicalname
self.logger.debug( self.logger.debug(
"Logicalname build dict: alias_name = %s, bus_type = %s, address = %s, not found" "Logicalname build dict: alias_name = %s, bus_type = %s, address = %s, not found"
@ -327,8 +325,8 @@ class BaremetalNode(drydock_provisioner.objects.hostprofile.HostProfile):
""" """
logicalnames = {} logicalnames = {}
results = state_manager.get_build_data( results = state_manager.get_build_data(node_name=self.get_name(),
node_name=self.get_name(), latest=True) latest=True)
xml_data = None xml_data = None
for result in results: for result in results:
if result.generator == "lshw": if result.generator == "lshw":
@ -351,8 +349,8 @@ class BaremetalNode(drydock_provisioner.objects.hostprofile.HostProfile):
"resolving logical names for node %s", self.get_name()) "resolving logical names for node %s", self.get_name())
raise raise
else: else:
self.logger.info( self.logger.info("No Build Data found for node_name %s" %
"No Build Data found for node_name %s" % (self.get_name())) (self.get_name()))
self.logicalnames = logicalnames self.logicalnames = logicalnames

View File

@ -29,8 +29,8 @@ class Rack(base.DrydockPersistentObject, base.DrydockObject):
'name': obj_fields.StringField(nullable=False), 'name': obj_fields.StringField(nullable=False),
'site': obj_fields.StringField(nullable=False), 'site': obj_fields.StringField(nullable=False),
'source': hd_fields.ModelSourceField(nullable=False), 'source': hd_fields.ModelSourceField(nullable=False),
'tor_switches': obj_fields.ObjectField( 'tor_switches': obj_fields.ObjectField('TorSwitchList',
'TorSwitchList', nullable=False), nullable=False),
'location': obj_fields.DictOfStringsField(nullable=False), 'location': obj_fields.DictOfStringsField(nullable=False),
'local_networks': obj_fields.ListOfStringsField(nullable=True), 'local_networks': obj_fields.ListOfStringsField(nullable=True),
} }

View File

@ -208,8 +208,8 @@ class SiteDesign(base.DrydockPersistentObject, base.DrydockObject):
if n.get_id() == network_key: if n.get_id() == network_key:
return n return n
raise errors.DesignError( raise errors.DesignError("Network %s not found in design state" %
"Network %s not found in design state" % network_key) network_key)
def add_network_link(self, new_network_link): def add_network_link(self, new_network_link):
if new_network_link is None: if new_network_link is None:
@ -226,8 +226,8 @@ class SiteDesign(base.DrydockPersistentObject, base.DrydockObject):
if network_link.get_id() == link_key: if network_link.get_id() == link_key:
return network_link return network_link
raise errors.DesignError( raise errors.DesignError("NetworkLink %s not found in design state" %
"NetworkLink %s not found in design state" % link_key) link_key)
def add_rack(self, new_rack): def add_rack(self, new_rack):
if new_rack is None: if new_rack is None:
@ -243,8 +243,8 @@ class SiteDesign(base.DrydockPersistentObject, base.DrydockObject):
for r in self.racks: for r in self.racks:
if r.get_id() == rack_key: if r.get_id() == rack_key:
return r return r
raise errors.DesignError( raise errors.DesignError("Rack %s not found in design state" %
"Rack %s not found in design state" % rack_key) rack_key)
def add_bootaction(self, new_ba): def add_bootaction(self, new_ba):
"""Add a bootaction definition to this site design. """Add a bootaction definition to this site design.
@ -265,8 +265,8 @@ class SiteDesign(base.DrydockPersistentObject, base.DrydockObject):
for ba in self.bootactions: for ba in self.bootactions:
if ba.get_id() == ba_key: if ba.get_id() == ba_key:
return ba return ba
raise errors.DesignError( raise errors.DesignError("BootAction %s not found in design state" %
"BootAction %s not found in design state" % ba_key) ba_key)
def add_host_profile(self, new_host_profile): def add_host_profile(self, new_host_profile):
if new_host_profile is None: if new_host_profile is None:
@ -283,8 +283,8 @@ class SiteDesign(base.DrydockPersistentObject, base.DrydockObject):
if p.get_id() == profile_key: if p.get_id() == profile_key:
return p return p
raise errors.DesignError( raise errors.DesignError("HostProfile %s not found in design state" %
"HostProfile %s not found in design state" % profile_key) profile_key)
def add_hardware_profile(self, new_hardware_profile): def add_hardware_profile(self, new_hardware_profile):
if new_hardware_profile is None: if new_hardware_profile is None:
@ -319,8 +319,8 @@ class SiteDesign(base.DrydockPersistentObject, base.DrydockObject):
if n.get_id() == node_key: if n.get_id() == node_key:
return n return n
raise errors.DesignError( raise errors.DesignError("BaremetalNode %s not found in design state" %
"BaremetalNode %s not found in design state" % node_key) node_key)
def add_promenade_config(self, prom_conf): def add_promenade_config(self, prom_conf):
if self.prom_configs is None: if self.prom_configs is None:

View File

@ -91,11 +91,10 @@ class Task(object):
self.result.failures) > 0): self.result.failures) > 0):
if not max_attempts or (max_attempts if not max_attempts or (max_attempts
and self.retry < max_attempts): and self.retry < max_attempts):
self.add_status_msg( self.add_status_msg(msg="Retrying task for failed entities.",
msg="Retrying task for failed entities.", error=False,
error=False, ctx='NA',
ctx='NA', ctx_type='NA')
ctx_type='NA')
self.retry = self.retry + 1 self.retry = self.retry + 1
if len(self.result.successes) > 0: if len(self.result.successes) > 0:
self.result.status = hd_fields.ActionResult.Success self.result.status = hd_fields.ActionResult.Success
@ -104,11 +103,10 @@ class Task(object):
self.save() self.save()
return True return True
else: else:
self.add_status_msg( self.add_status_msg(msg="Retry requested, out of attempts.",
msg="Retry requested, out of attempts.", error=False,
error=False, ctx='NA',
ctx='NA', ctx_type='NA')
ctx_type='NA')
raise errors.MaxRetriesReached("Retries reached max attempts.") raise errors.MaxRetriesReached("Retries reached max attempts.")
else: else:
return False return False
@ -182,12 +180,11 @@ class Task(object):
raise errors.OrchestratorError("Cannot add subtask for parent" raise errors.OrchestratorError("Cannot add subtask for parent"
" marked for termination") " marked for termination")
if self.statemgr.add_subtask(self.task_id, subtask.task_id): if self.statemgr.add_subtask(self.task_id, subtask.task_id):
self.add_status_msg( self.add_status_msg(msg="Started subtask %s for action %s" %
msg="Started subtask %s for action %s" % (str( (str(subtask.get_id()), subtask.action),
subtask.get_id()), subtask.action), error=False,
error=False, ctx=str(self.get_id()),
ctx=str(self.get_id()), ctx_type='task')
ctx_type='task')
self.subtask_id_list.append(subtask.task_id) self.subtask_id_list.append(subtask.task_id)
subtask.parent_task_id = self.task_id subtask.parent_task_id = self.task_id
subtask.save() subtask.save()
@ -261,8 +258,8 @@ class Task(object):
:param action_filter: string action name to filter subtasks on :param action_filter: string action name to filter subtasks on
""" """
self.logger.debug( self.logger.debug("Bubbling subtask results up to task %s." %
"Bubbling subtask results up to task %s." % str(self.task_id)) str(self.task_id))
self.result.successes = [] self.result.successes = []
self.result.failures = [] self.result.failures = []
for st in self.statemgr.get_complete_subtasks(self.task_id): for st in self.statemgr.get_complete_subtasks(self.task_id):
@ -340,13 +337,12 @@ class Task(object):
msg_list = task_result.message_list msg_list = task_result.message_list
for m in msg_list: for m in msg_list:
self.add_status_msg( self.add_status_msg(msg=m.msg,
msg=m.msg, error=m.error,
error=m.error, ctx_type=m.ctx_type,
ctx_type=m.ctx_type, ctx=m.ctx,
ctx=m.ctx, ts=m.ts,
ts=m.ts, **m.extra)
**m.extra)
def to_db(self, include_id=True): def to_db(self, include_id=True):
"""Convert this instance to a dictionary for use persisting to a db. """Convert this instance to a dictionary for use persisting to a db.
@ -666,9 +662,8 @@ class TaskStatusMessage(object):
:param d: dictionary of values :param d: dictionary of values
""" """
i = TaskStatusMessage( i = TaskStatusMessage(d.get('message', None), d.get('error'),
d.get('message', None), d.get('error'), d.get('context_type'), d.get('context_type'), d.get('context'))
d.get('context'))
if 'extra' in d: if 'extra' in d:
i.extra = d.get('extra') i.extra = d.get('extra')
i.ts = d.get('ts', None) i.ts = d.get('ts', None)

View File

@ -123,8 +123,8 @@ class DocumentReference(base.DrydockObject):
def __hash__(self): def __hash__(self):
"""Override default hashing function.""" """Override default hashing function."""
return hash( return hash(str(self.doc_type), str(self.doc_schema),
str(self.doc_type), str(self.doc_schema), str(self.doc_name)) str(self.doc_name))
def to_dict(self): def to_dict(self):
"""Serialize to a dictionary for further serialization.""" """Serialize to a dictionary for further serialization."""

View File

@ -72,8 +72,8 @@ class BaseAction(object):
if len(target_nodes) > 1: if len(target_nodes) > 1:
self.logger.info( self.logger.info(
"Found multiple target nodes in task %s, splitting..." % str( "Found multiple target nodes in task %s, splitting..." %
self.task.get_id())) str(self.task.get_id()))
split_tasks = dict() split_tasks = dict()
with concurrent.futures.ThreadPoolExecutor(max_workers=64) as te: with concurrent.futures.ThreadPoolExecutor(max_workers=64) as te:
@ -101,8 +101,8 @@ class BaseAction(object):
:param timeout: The number of seconds to wait for all Futures to complete :param timeout: The number of seconds to wait for all Futures to complete
:param bubble: Whether to bubble results from collected subtasks :param bubble: Whether to bubble results from collected subtasks
""" """
finished, timed_out = concurrent.futures.wait( finished, timed_out = concurrent.futures.wait(subtask_futures.values(),
subtask_futures.values(), timeout=timeout) timeout=timeout)
for k, v in subtask_futures.items(): for k, v in subtask_futures.items():
if not v.done(): if not v.done():
@ -116,8 +116,8 @@ class BaseAction(object):
else: else:
if v.exception(): if v.exception():
self.logger.error( self.logger.error(
"Uncaught excetion in subtask %s future:" % str( "Uncaught excetion in subtask %s future:" %
uuid.UUID(bytes=k)), str(uuid.UUID(bytes=k)),
exc_info=v.exception()) exc_info=v.exception())
st = self.state_manager.get_task(uuid.UUID(bytes=k)) st = self.state_manager.get_task(uuid.UUID(bytes=k))
st.bubble_results() st.bubble_results()
@ -184,16 +184,20 @@ class Noop(BaseAction):
self.logger.debug("Terminating action.") self.logger.debug("Terminating action.")
self.task.set_status(hd_fields.TaskStatus.Terminated) self.task.set_status(hd_fields.TaskStatus.Terminated)
self.task.failure() self.task.failure()
self.task.add_status_msg( self.task.add_status_msg(msg="Action terminated.",
msg="Action terminated.", ctx_type='NA', ctx='NA', error=False) ctx_type='NA',
ctx='NA',
error=False)
else: else:
self.logger.debug("Marked task as successful.") self.logger.debug("Marked task as successful.")
self.task.set_status(hd_fields.TaskStatus.Complete) self.task.set_status(hd_fields.TaskStatus.Complete)
target_nodes = self.orchestrator.get_target_nodes(self.task) target_nodes = self.orchestrator.get_target_nodes(self.task)
for n in target_nodes: for n in target_nodes:
self.task.success(focus=n.name) self.task.success(focus=n.name)
self.task.add_status_msg( self.task.add_status_msg(msg="Noop action.",
msg="Noop action.", ctx_type='NA', ctx='NA', error=False) ctx_type='NA',
ctx='NA',
error=False)
self.task.save() self.task.save()
self.logger.debug("Saved task state.") self.logger.debug("Saved task state.")
self.logger.debug("Finished Noop Action.") self.logger.debug("Finished Noop Action.")
@ -226,11 +230,10 @@ class DestroyNodes(BaseAction):
target_nodes = self.orchestrator.get_target_nodes(self.task) target_nodes = self.orchestrator.get_target_nodes(self.task)
if not target_nodes: if not target_nodes:
self.task.add_status_msg( self.task.add_status_msg(msg="No nodes in scope, no work to do.",
msg="No nodes in scope, no work to do.", error=False,
error=False, ctx='NA',
ctx='NA', ctx_type='NA')
ctx_type='NA')
self.task.success() self.task.success()
self.task.set_status(hd_fields.TaskStatus.Complete) self.task.set_status(hd_fields.TaskStatus.Complete)
self.task.save() self.task.save()
@ -325,11 +328,11 @@ class VerifySite(BaseAction):
node_driver_task = self.state_manager.get_task( node_driver_task = self.state_manager.get_task(
node_driver_task.get_id()) node_driver_task.get_id())
self.task.add_status_msg( self.task.add_status_msg(msg="Collected subtask %s" %
msg="Collected subtask %s" % str(node_driver_task.get_id()), str(node_driver_task.get_id()),
error=False, error=False,
ctx=str(node_driver_task.get_id()), ctx=str(node_driver_task.get_id()),
ctx_type='task') ctx_type='task')
self.task = self.state_manager.get_task(self.task.get_id()) self.task = self.state_manager.get_task(self.task.get_id())
self.task.set_status(hd_fields.TaskStatus.Complete) self.task.set_status(hd_fields.TaskStatus.Complete)
@ -386,11 +389,11 @@ class PrepareSite(BaseAction):
driver.execute_task(config_prov_task.get_id()) driver.execute_task(config_prov_task.get_id())
self.task.add_status_msg( self.task.add_status_msg(msg="Collected subtask %s" %
msg="Collected subtask %s" % str(config_prov_task.get_id()), str(config_prov_task.get_id()),
error=False, error=False,
ctx=str(config_prov_task.get_id()), ctx=str(config_prov_task.get_id()),
ctx_type='task') ctx_type='task')
self.logger.info("Node driver task %s:%s is complete." % self.logger.info("Node driver task %s:%s is complete." %
(config_prov_task.get_id(), config_prov_task.action)) (config_prov_task.get_id(), config_prov_task.action))
@ -410,13 +413,13 @@ class PrepareSite(BaseAction):
driver.execute_task(site_network_task.get_id()) driver.execute_task(site_network_task.get_id())
self.task.add_status_msg( self.task.add_status_msg(msg="Collected subtask %s" %
msg="Collected subtask %s" % str(site_network_task.get_id()), str(site_network_task.get_id()),
error=False, error=False,
ctx=str(site_network_task.get_id()), ctx=str(site_network_task.get_id()),
ctx_type='task') ctx_type='task')
self.logger.info( self.logger.info("Node driver task %s complete" %
"Node driver task %s complete" % (site_network_task.get_id())) (site_network_task.get_id()))
def step_usercredentials(self, driver): def step_usercredentials(self, driver):
"""Run the ConfigureUserCredentials step of this action. """Run the ConfigureUserCredentials step of this action.
@ -434,13 +437,13 @@ class PrepareSite(BaseAction):
driver.execute_task(user_creds_task.get_id()) driver.execute_task(user_creds_task.get_id())
self.task.add_status_msg( self.task.add_status_msg(msg="Collected subtask %s" %
msg="Collected subtask %s" % str(user_creds_task.get_id()), str(user_creds_task.get_id()),
error=False, error=False,
ctx=str(user_creds_task.get_id()), ctx=str(user_creds_task.get_id()),
ctx_type='task') ctx_type='task')
self.logger.info( self.logger.info("Node driver task %s complete" %
"Node driver task %s complete" % (user_creds_task.get_id())) (user_creds_task.get_id()))
class VerifyNodes(BaseAction): class VerifyNodes(BaseAction):
@ -504,19 +507,18 @@ class VerifyNodes(BaseAction):
try: try:
self._collect_subtask_futures( self._collect_subtask_futures(
task_futures, task_futures,
timeout=( timeout=(config.config_mgr.conf.timeouts.drydock_timeout
config.config_mgr.conf.timeouts.drydock_timeout * 60)) * 60))
self.logger.debug( self.logger.debug("Collected subtasks for task %s" %
"Collected subtasks for task %s" % str(self.task.get_id())) str(self.task.get_id()))
except errors.CollectSubtaskTimeout as ex: except errors.CollectSubtaskTimeout as ex:
self.logger.warning(str(ex)) self.logger.warning(str(ex))
else: else:
# no target nodes # no target nodes
self.task.add_status_msg( self.task.add_status_msg(msg="No nodes in scope, no work to do.",
msg="No nodes in scope, no work to do.", error=False,
error=False, ctx='NA',
ctx='NA', ctx_type='NA')
ctx_type='NA')
self.task.success() self.task.success()
# Set task complete and persist that info. # Set task complete and persist that info.
@ -554,11 +556,10 @@ class PrepareNodes(BaseAction):
target_nodes = self.orchestrator.get_target_nodes(self.task) target_nodes = self.orchestrator.get_target_nodes(self.task)
if not target_nodes: if not target_nodes:
self.task.add_status_msg( self.task.add_status_msg(msg="No nodes in scope, no work to do.",
msg="No nodes in scope, no work to do.", error=False,
error=False, ctx='NA',
ctx='NA', ctx_type='NA')
ctx_type='NA')
self.task.success() self.task.success()
self.task.set_status(hd_fields.TaskStatus.Complete) self.task.set_status(hd_fields.TaskStatus.Complete)
self.task.save() self.task.save()
@ -701,8 +702,9 @@ class PrepareNodes(BaseAction):
create_nodefilter_from_nodelist(node_list)) create_nodefilter_from_nodelist(node_list))
self.task.register_subtask(node_identify_task) self.task.register_subtask(node_identify_task)
self.logger.info("Starting node driver task %s to identify nodes." self.logger.info(
% (node_identify_task.get_id())) "Starting node driver task %s to identify nodes." %
(node_identify_task.get_id()))
node_driver.execute_task(node_identify_task.get_id()) node_driver.execute_task(node_identify_task.get_id())
@ -742,8 +744,8 @@ class PrepareNodes(BaseAction):
oob_driver = self._get_driver('oob', oob_type) oob_driver = self._get_driver('oob', oob_type)
if oob_driver is None: if oob_driver is None:
self.logger.warning( self.logger.warning("Node OOB type %s has no enabled driver." %
"Node OOB type %s has no enabled driver." % oob_type) oob_type)
self.task.failure() self.task.failure()
for n in oob_nodes: for n in oob_nodes:
self.task.add_status_msg( self.task.add_status_msg(
@ -772,8 +774,8 @@ class PrepareNodes(BaseAction):
self._collect_subtask_futures( self._collect_subtask_futures(
task_futures, task_futures,
timeout=(config.config_mgr.conf.timeouts.drydock_timeout * 60)) timeout=(config.config_mgr.conf.timeouts.drydock_timeout * 60))
self.logger.debug( self.logger.debug("Collected subtasks for task %s" %
"Collected subtasks for task %s" % str(self.task.get_id())) str(self.task.get_id()))
except errors.CollectSubtaskTimeout as ex: except errors.CollectSubtaskTimeout as ex:
self.logger.warning(str(ex)) self.logger.warning(str(ex))
@ -799,8 +801,8 @@ class PrepareNodes(BaseAction):
oob_driver = self._get_driver('oob', oob_type) oob_driver = self._get_driver('oob', oob_type)
if oob_driver is None: if oob_driver is None:
self.logger.warning( self.logger.warning("Node OOB type %s has no enabled driver." %
"Node OOB type %s has no enabled driver." % oob_type) oob_type)
self.task.failure() self.task.failure()
for n in oob_nodes: for n in oob_nodes:
self.task.add_status_msg( self.task.add_status_msg(
@ -830,8 +832,8 @@ class PrepareNodes(BaseAction):
self._collect_subtask_futures( self._collect_subtask_futures(
task_futures, task_futures,
timeout=(config.config_mgr.conf.timeouts.drydock_timeout * 60)) timeout=(config.config_mgr.conf.timeouts.drydock_timeout * 60))
self.logger.debug( self.logger.debug("Collected subtasks for task %s" %
"Collected subtasks for task %s" % str(self.task.get_id())) str(self.task.get_id()))
except errors.CollectSubtaskTimeout as ex: except errors.CollectSubtaskTimeout as ex:
self.logger.warning(str(ex)) self.logger.warning(str(ex))
@ -897,11 +899,10 @@ class DeployNodes(BaseAction):
target_nodes = self.orchestrator.get_target_nodes(self.task) target_nodes = self.orchestrator.get_target_nodes(self.task)
if not target_nodes: if not target_nodes:
self.task.add_status_msg( self.task.add_status_msg(msg="No nodes in scope, no work to do.",
msg="No nodes in scope, no work to do.", error=False,
error=False, ctx='NA',
ctx='NA', ctx_type='NA')
ctx_type='NA')
self.task.success() self.task.success()
self.task.set_status(hd_fields.TaskStatus.Complete) self.task.set_status(hd_fields.TaskStatus.Complete)
self.task.save() self.task.save()
@ -952,8 +953,8 @@ class DeployNodes(BaseAction):
if (node_storage_task is not None if (node_storage_task is not None
and len(node_storage_task.result.successes) > 0): and len(node_storage_task.result.successes) > 0):
self.logger.info( self.logger.info(
"Configured storage on %s nodes, configuring platform." % (len( "Configured storage on %s nodes, configuring platform." %
node_storage_task.result.successes))) (len(node_storage_task.result.successes)))
node_platform_task = self.orchestrator.create_task( node_platform_task = self.orchestrator.create_task(
design_ref=self.task.design_ref, design_ref=self.task.design_ref,
@ -979,8 +980,8 @@ class DeployNodes(BaseAction):
if node_platform_task is not None and len( if node_platform_task is not None and len(
node_platform_task.result.successes) > 0: node_platform_task.result.successes) > 0:
self.logger.info( self.logger.info(
"Configured platform on %s nodes, starting deployment." % (len( "Configured platform on %s nodes, starting deployment." %
node_platform_task.result.successes))) (len(node_platform_task.result.successes)))
while True: while True:
if node_deploy_task is None: if node_deploy_task is None:
@ -1078,8 +1079,9 @@ class RelabelNodes(BaseAction):
node_filter=nf) node_filter=nf)
self.task.register_subtask(relabel_node_task) self.task.register_subtask(relabel_node_task)
self.logger.info("Starting kubernetes driver task %s to relabel nodes." self.logger.info(
% (relabel_node_task.get_id())) "Starting kubernetes driver task %s to relabel nodes." %
(relabel_node_task.get_id()))
kubernetes_driver.execute_task(relabel_node_task.get_id()) kubernetes_driver.execute_task(relabel_node_task.get_id())
relabel_node_task = self.state_manager.get_task( relabel_node_task = self.state_manager.get_task(
@ -1118,8 +1120,8 @@ class BootactionReport(BaseAction):
bas = self.state_manager.get_boot_actions_for_node(n) bas = self.state_manager.get_boot_actions_for_node(n)
running_bas = { running_bas = {
k: v k: v
for (k, v) in bas.items() if v. for (k, v) in bas.items() if v.get('action_status')
get('action_status') == hd_fields.ActionResult.Incomplete == hd_fields.ActionResult.Incomplete
} }
if len(running_bas) > 0: if len(running_bas) > 0:
still_running = True still_running = True
@ -1166,11 +1168,11 @@ class BootactionReport(BaseAction):
ctx=n, ctx=n,
ctx_type='node') ctx_type='node')
for ba in running_bas.values(): for ba in running_bas.values():
self.task.add_status_msg( self.task.add_status_msg(msg="Boot action %s timed out." %
msg="Boot action %s timed out." % (ba['action_name']), (ba['action_name']),
error=True, error=True,
ctx=n, ctx=n,
ctx_type='node') ctx_type='node')
if len(failure_bas) == 0 and len(running_bas) == 0: if len(failure_bas) == 0 and len(running_bas) == 0:
self.task.success(focus=n) self.task.success(focus=n)

View File

@ -41,7 +41,9 @@ from .validations.validator import Validator
class Orchestrator(object): class Orchestrator(object):
"""Defines functionality for task execution workflow.""" """Defines functionality for task execution workflow."""
def __init__(self, enabled_drivers=None, state_manager=None, def __init__(self,
enabled_drivers=None,
state_manager=None,
ingester=None): ingester=None):
"""Initialize the orchestrator. A single instance should be executing at a time. """Initialize the orchestrator. A single instance should be executing at a time.
@ -81,9 +83,8 @@ class Orchestrator(object):
if self.enabled_drivers.get('oob', None) is None: if self.enabled_drivers.get('oob', None) is None:
self.enabled_drivers['oob'] = [] self.enabled_drivers['oob'] = []
self.enabled_drivers['oob'].append( self.enabled_drivers['oob'].append(
oob_driver_class( oob_driver_class(state_manager=state_manager,
state_manager=state_manager, orchestrator=self))
orchestrator=self))
node_driver_name = enabled_drivers.node_driver node_driver_name = enabled_drivers.node_driver
if node_driver_name is not None: if node_driver_name is not None:
@ -97,8 +98,8 @@ class Orchestrator(object):
network_driver_name = enabled_drivers.network_driver network_driver_name = enabled_drivers.network_driver
if network_driver_name is not None: if network_driver_name is not None:
m, c = network_driver_name.rsplit('.', 1) m, c = network_driver_name.rsplit('.', 1)
network_driver_class = getattr( network_driver_class = getattr(importlib.import_module(m), c,
importlib.import_module(m), c, None) None)
if network_driver_class is not None: if network_driver_class is not None:
self.enabled_drivers['network'] = network_driver_class( self.enabled_drivers['network'] = network_driver_class(
state_manager=state_manager, orchestrator=self) state_manager=state_manager, orchestrator=self)
@ -106,8 +107,8 @@ class Orchestrator(object):
kubernetes_driver_name = enabled_drivers.kubernetes_driver kubernetes_driver_name = enabled_drivers.kubernetes_driver
if kubernetes_driver_name is not None: if kubernetes_driver_name is not None:
m, c = kubernetes_driver_name.rsplit('.', 1) m, c = kubernetes_driver_name.rsplit('.', 1)
kubernetes_driver_class = getattr( kubernetes_driver_class = getattr(importlib.import_module(m),
importlib.import_module(m), c, None) c, None)
if kubernetes_driver_class is not None: if kubernetes_driver_class is not None:
self.enabled_drivers[ self.enabled_drivers[
'kubernetes'] = kubernetes_driver_class( 'kubernetes'] = kubernetes_driver_class(
@ -191,8 +192,8 @@ class Orchestrator(object):
else: else:
self.logger.warning( self.logger.warning(
"Task %s has unsupported action %s, ending execution." "Task %s has unsupported action %s, ending execution."
% (str(next_task.get_id()), % (str(
next_task.action)) next_task.get_id()), next_task.action))
next_task.add_status_msg( next_task.add_status_msg(
msg="Unsupported action %s." % msg="Unsupported action %s." %
next_task.action, next_task.action,
@ -230,8 +231,8 @@ class Orchestrator(object):
:param propagate: whether the termination should propagatge to subtasks :param propagate: whether the termination should propagatge to subtasks
""" """
if task is None: if task is None:
raise errors.OrchestratorError( raise errors.OrchestratorError("Could find task %s" %
"Could find task %s" % str(task.get_id())) str(task.get_id()))
else: else:
# Terminate initial task first to prevent add'l subtasks # Terminate initial task first to prevent add'l subtasks
self.logger.debug("Terminating task %s." % str(task.get_id())) self.logger.debug("Terminating task %s." % str(task.get_id()))
@ -243,8 +244,9 @@ class Orchestrator(object):
for st_id in subtasks: for st_id in subtasks:
st = self.state_manager.get_task(st_id) st = self.state_manager.get_task(st_id)
self.terminate_task( self.terminate_task(st,
st, propagate=True, terminated_by=terminated_by) propagate=True,
terminated_by=terminated_by)
def create_task(self, **kwargs): def create_task(self, **kwargs):
"""Create a new task and persist it.""" """Create a new task and persist it."""
@ -263,13 +265,14 @@ class Orchestrator(object):
nodes = site_design.baremetal_nodes nodes = site_design.baremetal_nodes
for n in nodes or []: for n in nodes or []:
try: try:
n.compile_applied_model( n.compile_applied_model(site_design,
site_design, state_manager=self.state_manager,
state_manager=self.state_manager, resolve_aliases=resolve_aliases)
resolve_aliases=resolve_aliases)
except Exception as ex: except Exception as ex:
self.logger.debug( self.logger.debug(
"Failed to build applied model for node %s.", n.name, exc_info=ex) "Failed to build applied model for node %s.",
n.name,
exc_info=ex)
raise ex raise ex
except AttributeError: except AttributeError:
self.logger.debug( self.logger.debug(
@ -305,21 +308,21 @@ class Orchestrator(object):
try: try:
status, site_design = self.get_described_site(design_ref) status, site_design = self.get_described_site(design_ref)
if status.status == hd_fields.ValidationResult.Success: if status.status == hd_fields.ValidationResult.Success:
self.compute_model_inheritance( self.compute_model_inheritance(site_design,
site_design, resolve_aliases=resolve_aliases) resolve_aliases=resolve_aliases)
self.compute_bootaction_targets(site_design) self.compute_bootaction_targets(site_design)
self.render_route_domains(site_design) self.render_route_domains(site_design)
status = val.validate_design(site_design, result_status=status) status = val.validate_design(site_design, result_status=status)
except Exception as ex: except Exception as ex:
if status is not None: if status is not None:
status.add_status_msg( status.add_status_msg("Error loading effective site: %s" %
"Error loading effective site: %s" % str(ex), str(ex),
error=True, error=True,
ctx='NA', ctx='NA',
ctx_type='NA') ctx_type='NA')
status.set_status(hd_fields.ActionResult.Failure) status.set_status(hd_fields.ActionResult.Failure)
self.logger.error( self.logger.error("Error getting site definition: %s" % str(ex),
"Error getting site definition: %s" % str(ex), exc_info=ex) exc_info=ex)
return status, site_design return status, site_design
@ -368,9 +371,8 @@ class Orchestrator(object):
nf['filter_set_type'] = 'intersection' nf['filter_set_type'] = 'intersection'
nf['filter_set'] = [ nf['filter_set'] = [
dict( dict(node_names=[x.get_id() for x in node_list],
node_names=[x.get_id() for x in node_list], filter_type='union')
filter_type='union')
] ]
return nf return nf
@ -418,8 +420,8 @@ class Orchestrator(object):
for f in node_filter.get('filter_set', []): for f in node_filter.get('filter_set', []):
result_sets.append(self.process_filter(target_nodes, f)) result_sets.append(self.process_filter(target_nodes, f))
return self.join_filter_sets( return self.join_filter_sets(node_filter.get('filter_set_type'),
node_filter.get('filter_set_type'), result_sets) result_sets)
elif isinstance(node_filter, objects.NodeFilterSet): elif isinstance(node_filter, objects.NodeFilterSet):
for f in node_filter.filter_set: for f in node_filter.filter_set:
@ -434,8 +436,8 @@ class Orchestrator(object):
elif filter_set_type == 'intersection': elif filter_set_type == 'intersection':
return self.list_intersection(*result_sets) return self.list_intersection(*result_sets)
else: else:
raise errors.OrchestratorError( raise errors.OrchestratorError("Unknown filter set type %s" %
"Unknown filter set type %s" % filter_set_type) filter_set_type)
def process_filter(self, node_set, filter_set): def process_filter(self, node_set, filter_set):
"""Take a filter and apply it to the node_set. """Take a filter and apply it to the node_set.
@ -500,11 +502,10 @@ class Orchestrator(object):
target_nodes['rack_labels'] = node_set target_nodes['rack_labels'] = node_set
if set_type == 'union': if set_type == 'union':
return self.list_union( return self.list_union(target_nodes.get('node_names', []),
target_nodes.get('node_names', []), target_nodes.get('node_tags', []),
target_nodes.get('node_tags', []), target_nodes.get('rack_names', []),
target_nodes.get('rack_names', []), target_nodes.get('node_labels', []))
target_nodes.get('node_labels', []))
elif set_type == 'intersection': elif set_type == 'intersection':
return self.list_intersection( return self.list_intersection(
target_nodes.get('node_names', None), target_nodes.get('node_names', None),
@ -514,8 +515,8 @@ class Orchestrator(object):
except Exception as ex: except Exception as ex:
self.logger.error("Error processing node filter.", exc_info=ex) self.logger.error("Error processing node filter.", exc_info=ex)
raise errors.OrchestratorError( raise errors.OrchestratorError("Error processing node filter: %s" %
"Error processing node filter: %s" % str(ex)) str(ex))
def list_intersection(self, a, *rest): def list_intersection(self, a, *rest):
"""Take the intersection of a with the intersection of all the rest. """Take the intersection of a with the intersection of all the rest.
@ -569,12 +570,12 @@ class Orchestrator(object):
identity_key = None identity_key = None
self.logger.debug( self.logger.debug("Creating boot action context for node %s" %
"Creating boot action context for node %s" % nodename) nodename)
for ba in site_design.bootactions: for ba in site_design.bootactions:
self.logger.debug( self.logger.debug("Boot actions target nodes: %s" %
"Boot actions target nodes: %s" % ba.target_nodes) ba.target_nodes)
if nodename in ba.target_nodes: if nodename in ba.target_nodes:
if identity_key is None: if identity_key is None:
identity_key = os.urandom(32) identity_key = os.urandom(32)
@ -591,13 +592,12 @@ class Orchestrator(object):
"Boot action %s has disabled signaling, marking unreported." "Boot action %s has disabled signaling, marking unreported."
% ba.name) % ba.name)
action_id = ulid2.generate_binary_ulid() action_id = ulid2.generate_binary_ulid()
self.state_manager.post_boot_action( self.state_manager.post_boot_action(nodename,
nodename, task.get_id(),
task.get_id(), identity_key,
identity_key, action_id,
action_id, ba.name,
ba.name, action_status=init_status)
action_status=init_status)
return identity_key return identity_key
def find_node_package_lists(self, nodename, task): def find_node_package_lists(self, nodename, task):
@ -611,8 +611,8 @@ class Orchestrator(object):
if site_design.bootactions is None: if site_design.bootactions is None:
return None return None
self.logger.debug( self.logger.debug("Extracting package install list for node %s" %
"Extracting package install list for node %s" % nodename) nodename)
pkg_list = dict() pkg_list = dict()
@ -668,22 +668,22 @@ class Orchestrator(object):
metric = None metric = None
if 'routes' in n and n.routes is not None: if 'routes' in n and n.routes is not None:
for r in n.routes: for r in n.routes:
if 'routedomain' in r and r.get('routedomain', if 'routedomain' in r and r.get(
None) == rd: 'routedomain', None) == rd:
gw = r.get('gateway') gw = r.get('gateway')
metric = r.get('metric') metric = r.get('metric')
self.logger.debug( self.logger.debug(
"Use gateway %s for routedomain %s " "Use gateway %s for routedomain %s "
"on network %s." % (gw, rd, "on network %s." %
n.get_name())) (gw, rd, n.get_name()))
break break
if gw is not None and metric is not None: if gw is not None and metric is not None:
for cidr in rd_cidrs: for cidr in rd_cidrs:
if cidr != n.cidr: if cidr != n.cidr:
n.routes.append( n.routes.append(
dict( dict(subnet=cidr,
subnet=cidr, gateway=gw, gateway=gw,
metric=metric)) metric=metric))
else: else:
msg = "Invalid network model: {}. Cannot find " \ msg = "Invalid network model: {}. Cannot find " \
"routes field in network with routedomain: " \ "routes field in network with routedomain: " \

View File

@ -18,6 +18,7 @@ import drydock_provisioner.error as errors
class SimpleBytes(): class SimpleBytes():
def calculate_bytes(size_str): def calculate_bytes(size_str):
""" """
Calculate the size in bytes of a size_str. Calculate the size in bytes of a size_str.
@ -34,8 +35,8 @@ class SimpleBytes():
match = regex.match(size_str) match = regex.match(size_str)
if not match: if not match:
raise errors.InvalidSizeFormat( raise errors.InvalidSizeFormat("Invalid size string format: %s" %
"Invalid size string format: %s" % size_str) size_str)
base_size = int(match.group(1)) base_size = int(match.group(1))

View File

@ -18,6 +18,7 @@ from drydock_provisioner.orchestrator.util import SimpleBytes
class BootStorageRational(Validators): class BootStorageRational(Validators):
def __init__(self): def __init__(self):
super().__init__('Rational Boot Storage', 'DD1001') super().__init__('Rational Boot Storage', 'DD1001')

View File

@ -17,6 +17,7 @@ from drydock_provisioner.orchestrator.validations.validators import Validators
class CidrValidity(Validators): class CidrValidity(Validators):
def __init__(self): def __init__(self):
super().__init__('CIDR Validity', 'DD2006') super().__init__('CIDR Validity', 'DD2006')
@ -40,8 +41,10 @@ class CidrValidity(Validators):
except ValueError as e: except ValueError as e:
if str(e) == (net.cidr + " has host bits set"): if str(e) == (net.cidr + " has host bits set"):
msg = 'The provided CIDR %s has host bits set' % net.cidr msg = 'The provided CIDR %s has host bits set' % net.cidr
valid_cidr = ipaddress.ip_network(net.cidr, strict=False) valid_cidr = ipaddress.ip_network(net.cidr,
strict=False)
self.report_error( self.report_error(
msg, [net.doc_ref], msg, [net.doc_ref],
"Provide a CIDR acceptable by MAAS: %s" % str(valid_cidr)) "Provide a CIDR acceptable by MAAS: %s" %
str(valid_cidr))
return return

View File

@ -17,6 +17,7 @@ from drydock_provisioner.orchestrator.validations.validators import Validators
class HostnameValidity(Validators): class HostnameValidity(Validators):
def __init__(self): def __init__(self):
super().__init__('Hostname Validity', 'DD3003') super().__init__('Hostname Validity', 'DD3003')

View File

@ -2,6 +2,7 @@ from drydock_provisioner.orchestrator.validations.validators import Validators
class HugepagesValidity(Validators): class HugepagesValidity(Validators):
def __init__(self): def __init__(self):
super().__init__('Hugepages', 'DD1008') super().__init__('Hugepages', 'DD1008')

View File

@ -17,6 +17,7 @@ from netaddr import IPNetwork, IPAddress
class IpLocalityCheck(Validators): class IpLocalityCheck(Validators):
def __init__(self): def __init__(self):
super().__init__('IP Locality Check', "DD2002") super().__init__('IP Locality Check', "DD2002")

View File

@ -37,8 +37,9 @@ class MtuRational(Validators):
mtu = network_link.mtu mtu = network_link.mtu
if mtu and (mtu < MtuRational.MIN_MTU_SIZE if mtu and (mtu < MtuRational.MIN_MTU_SIZE
or mtu > MtuRational.MAX_MTU_SIZE): or mtu > MtuRational.MAX_MTU_SIZE):
msg = ("MTU must be between %d and %d, value is %d" % ( msg = (
MtuRational.MIN_MTU_SIZE, MtuRational.MAX_MTU_SIZE, mtu)) "MTU must be between %d and %d, value is %d" %
(MtuRational.MIN_MTU_SIZE, MtuRational.MAX_MTU_SIZE, mtu))
self.report_error( self.report_error(
msg, [network_link.doc_ref], msg, [network_link.doc_ref],
"Define a valid MTU. Standard is 1500, Jumbo is 9100.") "Define a valid MTU. Standard is 1500, Jumbo is 9100.")
@ -52,8 +53,9 @@ class MtuRational(Validators):
if network_mtu and (network_mtu < MtuRational.MIN_MTU_SIZE if network_mtu and (network_mtu < MtuRational.MIN_MTU_SIZE
or network_mtu > MtuRational.MAX_MTU_SIZE): or network_mtu > MtuRational.MAX_MTU_SIZE):
msg = ("MTU must be between %d and %d, value is %d" % ( msg = (
MtuRational.MIN_MTU_SIZE, MtuRational.MAX_MTU_SIZE, mtu)) "MTU must be between %d and %d, value is %d" %
(MtuRational.MIN_MTU_SIZE, MtuRational.MAX_MTU_SIZE, mtu))
self.report_error( self.report_error(
msg, [network.doc_ref], msg, [network.doc_ref],
"Define a valid MTU. Standard is 1500, Jumbo is 9100.") "Define a valid MTU. Standard is 1500, Jumbo is 9100.")

View File

@ -17,6 +17,7 @@ import drydock_provisioner.objects.fields as hd_fields
class NetworkTrunkingRational(Validators): class NetworkTrunkingRational(Validators):
def __init__(self): def __init__(self):
super().__init__('Network Trunking Rationalty', "DD2004") super().__init__('Network Trunking Rationalty', "DD2004")
@ -30,8 +31,8 @@ class NetworkTrunkingRational(Validators):
for network_link in network_link_list: for network_link in network_link_list:
allowed_networks = network_link.allowed_networks allowed_networks = network_link.allowed_networks
# if allowed networks > 1 trunking must be enabled # if allowed networks > 1 trunking must be enabled
if (len(allowed_networks) > 1 and network_link. if (len(allowed_networks) > 1 and network_link.trunk_mode
trunk_mode == hd_fields.NetworkLinkTrunkingMode.Disabled): == hd_fields.NetworkLinkTrunkingMode.Disabled):
msg = ('If there is more than 1 allowed network,' msg = ('If there is more than 1 allowed network,'
'trunking mode must be enabled') 'trunking mode must be enabled')
self.report_error( self.report_error(
@ -40,15 +41,17 @@ class NetworkTrunkingRational(Validators):
) )
# trunking mode is disabled, default_network must be defined # trunking mode is disabled, default_network must be defined
if (network_link.trunk_mode == hd_fields.NetworkLinkTrunkingMode. if (network_link.trunk_mode
Disabled and network_link.native_network is None): == hd_fields.NetworkLinkTrunkingMode.Disabled
and network_link.native_network is None):
msg = 'Trunking mode is disabled, a trunking default_network must be defined' msg = 'Trunking mode is disabled, a trunking default_network must be defined'
self.report_error( self.report_error(
msg, [network_link.doc_ref], msg, [network_link.doc_ref],
"Non-trunked links must have a native network defined.") "Non-trunked links must have a native network defined.")
elif (network_link.trunk_mode == hd_fields.NetworkLinkTrunkingMode. elif (network_link.trunk_mode
Disabled and network_link.native_network is not None): == hd_fields.NetworkLinkTrunkingMode.Disabled
and network_link.native_network is not None):
network = site_design.get_network(network_link.native_network) network = site_design.get_network(network_link.native_network)
if network and network.vlan_id: if network and network.vlan_id:
msg = "Network link native network has a defined VLAN tag." msg = "Network link native network has a defined VLAN tag."

View File

@ -15,6 +15,7 @@ from drydock_provisioner.orchestrator.validations.validators import Validators
class NoDuplicateIpsCheck(Validators): class NoDuplicateIpsCheck(Validators):
def __init__(self): def __init__(self):
super().__init__('Duplicated IP Check', "DD2005") super().__init__('Duplicated IP Check', "DD2005")

View File

@ -15,6 +15,7 @@ from drydock_provisioner.orchestrator.validations.validators import Validators
class IpmiValidity(Validators): class IpmiValidity(Validators):
def __init__(self): def __init__(self):
super().__init__('Valid IPMI Configuration', 'DD4001') super().__init__('Valid IPMI Configuration', 'DD4001')
@ -32,9 +33,8 @@ class IpmiValidity(Validators):
if baremetal_node.oob_type == 'ipmi': if baremetal_node.oob_type == 'ipmi':
for p in required_params: for p in required_params:
if not baremetal_node.oob_parameters.get(p, None): if not baremetal_node.oob_parameters.get(p, None):
msg = ( msg = ('OOB parameter %s for IPMI node %s missing.' %
'OOB parameter %s for IPMI node %s missing.' % p, p, baremetal_node.name)
baremetal_node.name)
self.report_error(msg, [baremetal_node.doc_ref], self.report_error(msg, [baremetal_node.doc_ref],
"Define OOB parameter %s" % p) "Define OOB parameter %s" % p)
oob_addr = None oob_addr = None

View File

@ -15,6 +15,7 @@ from drydock_provisioner.orchestrator.validations.validators import Validators
class LibvirtValidity(Validators): class LibvirtValidity(Validators):
def __init__(self): def __init__(self):
super().__init__('Valid Libvirt Configuration', 'DD4002') super().__init__('Valid Libvirt Configuration', 'DD4002')

View File

@ -15,6 +15,7 @@ from drydock_provisioner.orchestrator.validations.validators import Validators
class PlatformSelection(Validators): class PlatformSelection(Validators):
def __init__(self): def __init__(self):
super().__init__('Platform Selection', 'DD3001') super().__init__('Platform Selection', 'DD3001')
@ -39,8 +40,9 @@ class PlatformSelection(Validators):
try: try:
valid_images = node_driver.get_available_images() valid_images = node_driver.get_available_images()
except Exception: except Exception:
msg = ("Platform validation: Could not load images from driver, skipping" msg = (
"image and kernel selection validation.") "Platform validation: Could not load images from driver, skipping"
"image and kernel selection validation.")
self.report_warn( self.report_warn(
msg, [], msg, [],
"Cannot validate platform selection without accessing the node provisioner." "Cannot validate platform selection without accessing the node provisioner."
@ -53,8 +55,9 @@ class PlatformSelection(Validators):
try: try:
valid_kernels[i] = node_driver.get_available_kernels(i) valid_kernels[i] = node_driver.get_available_kernels(i)
except Exception: except Exception:
msg = ("Platform validation: Could not load kernels from driver, skipping" msg = (
"image and kernel selection validation.") "Platform validation: Could not load kernels from driver, skipping"
"image and kernel selection validation.")
self.report_warn( self.report_warn(
msg, [], msg, [],
"Cannot validate platform selection without accessing the node provisioner." "Cannot validate platform selection without accessing the node provisioner."

View File

@ -15,6 +15,7 @@ from drydock_provisioner.orchestrator.validations.validators import Validators
class RationalNetworkBond(Validators): class RationalNetworkBond(Validators):
def __init__(self): def __init__(self):
super().__init__('Network Bond Rationality', 'DD1006') super().__init__('Network Bond Rationality', 'DD1006')

View File

@ -14,7 +14,9 @@
from drydock_provisioner.orchestrator.validations.validators import Validators from drydock_provisioner.orchestrator.validations.validators import Validators
class StorageMountpoints(Validators): class StorageMountpoints(Validators):
def __init__(self): def __init__(self):
super().__init__('Storage Mountpoint', "DD2004") super().__init__('Storage Mountpoint', "DD2004")
@ -43,11 +45,10 @@ class StorageMountpoints(Validators):
if mountpoint is None: if mountpoint is None:
continue continue
if mountpoint in mountpoint_list: if mountpoint in mountpoint_list:
msg = ('Mountpoint "{}" already exists' msg = ('Mountpoint "{}" already exists'.format(
.format(mountpoint)) mountpoint))
self.report_error( self.report_error(msg, [baremetal_node.doc_ref],
msg, [baremetal_node.doc_ref], 'Please use unique mountpoints.')
'Please use unique mountpoints.')
return return
else: else:
mountpoint_list.append(mountpoint) mountpoint_list.append(mountpoint)
@ -66,8 +67,8 @@ class StorageMountpoints(Validators):
if mountpoint is None: if mountpoint is None:
continue continue
if mountpoint in mountpoint_list: if mountpoint in mountpoint_list:
msg = ('Mountpoint "{}" already exists' msg = ('Mountpoint "{}" already exists'.
.format(mountpoint)) format(mountpoint))
self.report_error( self.report_error(
msg, [baremetal_node.doc_ref], msg, [baremetal_node.doc_ref],
'Please use unique mountpoints.') 'Please use unique mountpoints.')

View File

@ -15,6 +15,7 @@ from drydock_provisioner.orchestrator.validations.validators import Validators
class StoragePartitioning(Validators): class StoragePartitioning(Validators):
def __init__(self): def __init__(self):
super().__init__('Storage Partitioning', "DD2002") super().__init__('Storage Partitioning', "DD2002")
@ -70,8 +71,9 @@ class StoragePartitioning(Validators):
all_volume_groups = baremetal_node.volume_groups or [] all_volume_groups = baremetal_node.volume_groups or []
for volume_group in all_volume_groups: for volume_group in all_volume_groups:
if volume_group.name not in volume_group_check_list: if volume_group.name not in volume_group_check_list:
msg = ('Volume group %s not assigned any physical volumes' msg = (
% (volume_group.name)) 'Volume group %s not assigned any physical volumes' %
(volume_group.name))
self.report_error( self.report_error(
msg, [baremetal_node.doc_ref], msg, [baremetal_node.doc_ref],
"Each volume group should be assigned at least one storage device " "Each volume group should be assigned at least one storage device "

View File

@ -15,6 +15,7 @@ from drydock_provisioner.orchestrator.validations.validators import Validators
class StorageSizing(Validators): class StorageSizing(Validators):
def __init__(self): def __init__(self):
super().__init__('Storage Sizing', 'DD2003') super().__init__('Storage Sizing', 'DD2003')

View File

@ -15,6 +15,7 @@ from drydock_provisioner.orchestrator.validations.validators import Validators
class UniqueNetworkCheck(Validators): class UniqueNetworkCheck(Validators):
def __init__(self): def __init__(self):
super().__init__('Allowed Network Check', 'DD1007') super().__init__('Allowed Network Check', 'DD1007')
@ -53,8 +54,8 @@ class UniqueNetworkCheck(Validators):
for name in duplicated_names: for name in duplicated_names:
msg = ( msg = (
'Allowed network %s duplicated on NetworkLink %s and NetworkLink ' 'Allowed network %s duplicated on NetworkLink %s and NetworkLink '
'%s' % (name, network_link_name, '%s' %
network_link_name_2)) (name, network_link_name, network_link_name_2))
self.report_error( self.report_error(
msg, [], msg, [],
"Each network is only allowed to cross a single network link." "Each network is only allowed to cross a single network link."

View File

@ -38,6 +38,7 @@ from drydock_provisioner.orchestrator.validations.storage_mountpoints import Sto
class Validator(): class Validator():
def __init__(self, orchestrator): def __init__(self, orchestrator):
"""Create a validator with a reference to the orchestrator. """Create a validator with a reference to the orchestrator.
@ -63,8 +64,8 @@ class Validator():
validation_error = False validation_error = False
for rule in rule_set: for rule in rule_set:
message_list = rule.execute( message_list = rule.execute(site_design=site_design,
site_design=site_design, orchestrator=self.orchestrator) orchestrator=self.orchestrator)
result_status.message_list.extend(message_list) result_status.message_list.extend(message_list)
error_msg = [m for m in message_list if m.error] error_msg = [m for m in message_list if m.error]
result_status.error_count = result_status.error_count + len( result_status.error_count = result_status.error_count + len(

View File

@ -20,7 +20,9 @@ from drydock_provisioner.objects import fields as hd_fields
import drydock_provisioner.config as config import drydock_provisioner.config as config
class Validators: class Validators:
def __init__(self, long_name, name): def __init__(self, long_name, name):
self.name = name self.name = name
self.long_name = long_name self.long_name = long_name
@ -42,13 +44,12 @@ class Validators:
:param level: String - More detailed of the severity level of this message :param level: String - More detailed of the severity level of this message
""" """
fmt_msg = "%s: %s" % (self.long_name, msg) fmt_msg = "%s: %s" % (self.long_name, msg)
msg_obj = objects.ValidationMessage( msg_obj = objects.ValidationMessage(fmt_msg,
fmt_msg, self.name,
self.name, error=error,
error=error, level=level,
level=level, docs=docs,
docs=docs, diagnostic=diagnostic)
diagnostic=diagnostic)
self.messages.append(msg_obj) self.messages.append(msg_obj)
def report_error(self, msg, docs, diagnostic): def report_error(self, msg, docs, diagnostic):

View File

@ -30,35 +30,35 @@ class DrydockPolicy(object):
# Base Policy # Base Policy
base_rules = [ base_rules = [
policy.RuleDefault( policy.RuleDefault('admin_required',
'admin_required', 'role:admin or is_admin:1',
'role:admin or is_admin:1', description='Actions requiring admin authority'),
description='Actions requiring admin authority'),
] ]
# Orchestrator Policy # Orchestrator Policy
task_rules = [ task_rules = [
policy.DocumentedRuleDefault( policy.DocumentedRuleDefault('physical_provisioner:read_task',
'physical_provisioner:read_task', 'role:admin', 'Get task status', 'role:admin', 'Get task status',
[{ [{
'path': '/api/v1.0/tasks', 'path': '/api/v1.0/tasks',
'method': 'GET' 'method': 'GET'
}, { }, {
'path': '/api/v1.0/tasks/{task_id}', 'path': '/api/v1.0/tasks/{task_id}',
'method': 'GET' 'method': 'GET'
}]), }]),
policy.DocumentedRuleDefault('physical_provisioner:create_task', policy.DocumentedRuleDefault('physical_provisioner:create_task',
'role:admin', 'Create a task', 'role:admin', 'Create a task',
[{ [{
'path': '/api/v1.0/tasks', 'path': '/api/v1.0/tasks',
'method': 'POST' 'method': 'POST'
}]), }]),
policy.DocumentedRuleDefault( policy.DocumentedRuleDefault('physical_provisioner:validate_design',
'physical_provisioner:validate_design', 'role:admin', 'role:admin',
'Create validate_design task', [{ 'Create validate_design task',
'path': '/api/v1.0/tasks', [{
'method': 'POST' 'path': '/api/v1.0/tasks',
}]), 'method': 'POST'
}]),
policy.DocumentedRuleDefault('physical_provisioner:verify_site', policy.DocumentedRuleDefault('physical_provisioner:verify_site',
'role:admin', 'Create verify_site task', 'role:admin', 'Create verify_site task',
[{ [{
@ -95,12 +95,12 @@ class DrydockPolicy(object):
'path': '/api/v1.0/tasks', 'path': '/api/v1.0/tasks',
'method': 'POST' 'method': 'POST'
}]), }]),
policy.DocumentedRuleDefault('physical_provisioner:delete_tasks', policy.DocumentedRuleDefault(
'role:admin', 'Deletes tasks by age', 'physical_provisioner:delete_tasks', 'role:admin',
[{ 'Deletes tasks by age', [{
'path': '/api/v1.0/tasks', 'path': '/api/v1.0/tasks',
'method': 'DELETE' 'method': 'DELETE'
}]), }]),
policy.DocumentedRuleDefault('physical_provisioner:relabel_nodes', policy.DocumentedRuleDefault('physical_provisioner:relabel_nodes',
'role:admin', 'Create relabel_nodes task', 'role:admin', 'Create relabel_nodes task',
[{ [{
@ -110,10 +110,8 @@ class DrydockPolicy(object):
policy.DocumentedRuleDefault( policy.DocumentedRuleDefault(
'physical_provisioner:read_build_data', 'role:admin', 'physical_provisioner:read_build_data', 'role:admin',
'Read build data for a node', [{ 'Read build data for a node', [{
'path': 'path': '/api/v1.0/nodes/{nodename}/builddata',
'/api/v1.0/nodes/{nodename}/builddata', 'method': 'GET',
'method':
'GET',
}]), }]),
] ]
@ -121,8 +119,7 @@ class DrydockPolicy(object):
data_rules = [ data_rules = [
policy.DocumentedRuleDefault( policy.DocumentedRuleDefault(
'physical_provisioner:read_data', 'role:admin', 'physical_provisioner:read_data', 'role:admin',
'Read loaded design data', 'Read loaded design data', [{
[{
'path': '/api/v1.0/designs', 'path': '/api/v1.0/designs',
'method': 'GET' 'method': 'GET'
}, { }, {
@ -131,8 +128,7 @@ class DrydockPolicy(object):
}]), }]),
policy.DocumentedRuleDefault( policy.DocumentedRuleDefault(
'physical_provisioner:ingest_data', 'role:admin', 'physical_provisioner:ingest_data', 'role:admin',
'Load design data', 'Load design data', [{
[{
'path': '/api/v1.0/designs', 'path': '/api/v1.0/designs',
'method': 'POST' 'method': 'POST'
}, { }, {
@ -182,6 +178,7 @@ class ApiEnforcer(object):
self.logger = logging.getLogger('drydock.policy') self.logger = logging.getLogger('drydock.policy')
def __call__(self, f): def __call__(self, f):
@functools.wraps(f) @functools.wraps(f)
def secure_handler(slf, req, resp, *args, **kwargs): def secure_handler(slf, req, resp, *args, **kwargs):
ctx = req.context ctx = req.context
@ -199,18 +196,16 @@ class ApiEnforcer(object):
slf.info( slf.info(
ctx, ctx,
"Error - Forbidden access - action: %s" % self.action) "Error - Forbidden access - action: %s" % self.action)
slf.return_error( slf.return_error(resp,
resp, falcon.HTTP_403,
falcon.HTTP_403, message="Forbidden",
message="Forbidden", retry=False)
retry=False)
else: else:
slf.info(ctx, "Error - Unauthenticated access") slf.info(ctx, "Error - Unauthenticated access")
slf.return_error( slf.return_error(resp,
resp, falcon.HTTP_401,
falcon.HTTP_401, message="Unauthenticated",
message="Unauthenticated", retry=False)
retry=False)
return secure_handler return secure_handler

View File

@ -20,6 +20,7 @@ from sqlalchemy.dialects import postgresql as pg
class ExtendTable(Table): class ExtendTable(Table):
def __new__(cls, metadata): def __new__(cls, metadata):
self = super().__new__(cls, cls.__tablename__, metadata, self = super().__new__(cls, cls.__tablename__, metadata,
*cls.__schema__) *cls.__schema__)

Some files were not shown because too many files have changed in this diff Show More