From d00eaf0303f50478d6503c2c2096e931134189ac Mon Sep 17 00:00:00 2001 From: Sergiy Markin Date: Wed, 26 Apr 2023 12:31:09 +0000 Subject: [PATCH] Drydock focal related upgrades This PS implements the following changes: - switches freeze approach to requirements-direct.txt and requirements-frozen.txt files - adjusts code tabulation style according to yapf recommendations - replaces deprecated usage of responce.body attribute with responce.text - fixes integration tests in controlled by Makefile + tox - uplifts Helm to v3.9.4 Change-Id: I751db72eb8f670825382f11a36657112faeb169a --- .readthedocs.yaml | 4 +- Makefile | 2 +- README.md | 2 +- alembic/env.py | 19 +- charts/drydock/Chart.yaml | 2 +- doc/requirements-doc.txt | 6 +- doc/source/_static/drydock.conf.sample | 281 --- doc/source/_static/policy.yaml.sample | 70 - etc/drydock/drydock.conf.sample | 281 --- etc/drydock/policy.yaml.sample | 70 - images/drydock/Dockerfile.ubuntu_bionic | 4 +- images/drydock/Dockerfile.ubuntu_focal | 7 +- python/drydock_provisioner/cli/commands.py | 50 +- .../drydock_provisioner/cli/design/actions.py | 4 +- .../cli/design/commands.py | 12 +- .../drydock_provisioner/cli/node/actions.py | 4 +- .../drydock_provisioner/cli/node/commands.py | 24 +- .../drydock_provisioner/cli/part/actions.py | 9 +- .../drydock_provisioner/cli/part/commands.py | 30 +- .../drydock_provisioner/cli/task/actions.py | 8 +- .../drydock_provisioner/cli/task/commands.py | 44 +- python/drydock_provisioner/config.py | 132 +- python/drydock_provisioner/control/api.py | 56 +- python/drydock_provisioner/control/base.py | 21 +- .../drydock_provisioner/control/bootaction.py | 59 +- python/drydock_provisioner/control/designs.py | 114 +- python/drydock_provisioner/control/health.py | 24 +- .../drydock_provisioner/control/middleware.py | 19 +- python/drydock_provisioner/control/nodes.py | 56 +- python/drydock_provisioner/control/tasks.py | 175 +- python/drydock_provisioner/control/util.py | 5 +- .../drydock_provisioner/control/validation.py | 4 +- python/drydock_provisioner/drivers/driver.py | 1 + .../promenade_driver/actions/k8s_node.py | 26 +- .../kubernetes/promenade_driver/driver.py | 46 +- .../promenade_driver/promenade_client.py | 67 +- .../drivers/node/maasdriver/actions/node.py | 1174 ++++++------ .../drivers/node/maasdriver/api_client.py | 43 +- .../drivers/node/maasdriver/driver.py | 51 +- .../drivers/node/maasdriver/models/base.py | 5 +- .../node/maasdriver/models/blockdev.py | 20 +- .../drivers/node/maasdriver/models/fabric.py | 4 +- .../node/maasdriver/models/interface.py | 23 +- .../drivers/node/maasdriver/models/iprange.py | 5 +- .../drivers/node/maasdriver/models/machine.py | 151 +- .../node/maasdriver/models/partition.py | 20 +- .../node/maasdriver/models/rack_controller.py | 6 +- .../drivers/node/maasdriver/models/subnet.py | 17 +- .../drivers/node/maasdriver/models/tag.py | 9 +- .../drivers/node/maasdriver/models/vlan.py | 10 +- .../node/maasdriver/models/volumegroup.py | 23 +- .../drivers/oob/libvirt_driver/actions/oob.py | 109 +- .../drivers/oob/libvirt_driver/driver.py | 17 +- .../drivers/oob/manual_driver/driver.py | 14 +- .../drivers/oob/pyghmi_driver/actions/oob.py | 149 +- .../drivers/oob/pyghmi_driver/driver.py | 16 +- .../drivers/oob/redfish_driver/actions/oob.py | 245 +-- .../drivers/oob/redfish_driver/client.py | 19 +- .../drivers/oob/redfish_driver/driver.py | 36 +- python/drydock_provisioner/drydock.py | 28 +- .../drydock_client/client.py | 10 +- .../drydock_client/session.py | 37 +- python/drydock_provisioner/error.py | 1 + .../drydock_provisioner/ingester/ingester.py | 13 +- .../ingester/plugins/__init__.py | 1 + .../ingester/plugins/deckhand.py | 30 +- .../ingester/plugins/yaml.py | 23 +- .../drydock_provisioner/objects/__init__.py | 8 +- python/drydock_provisioner/objects/base.py | 1 + .../drydock_provisioner/objects/bootaction.py | 63 +- python/drydock_provisioner/objects/fields.py | 1 + .../objects/hostprofile.py | 75 +- python/drydock_provisioner/objects/node.py | 40 +- python/drydock_provisioner/objects/rack.py | 4 +- python/drydock_provisioner/objects/site.py | 24 +- python/drydock_provisioner/objects/task.py | 51 +- .../drydock_provisioner/objects/validation.py | 4 +- .../orchestrator/actions/orchestrator.py | 164 +- .../orchestrator/orchestrator.py | 124 +- .../drydock_provisioner/orchestrator/util.py | 5 +- .../validations/boot_storage_rational.py | 1 + .../orchestrator/validations/cidr_validity.py | 9 +- .../validations/hostname_validity.py | 1 + .../validations/hugepages_validity.py | 1 + .../validations/ip_locality_check.py | 1 + .../orchestrator/validations/mtu_rational.py | 10 +- .../validations/network_trunking_rational.py | 15 +- .../validations/no_duplicate_ips_check.py | 1 + .../validations/oob_valid_ipmi.py | 6 +- .../validations/oob_valid_libvirt.py | 1 + .../validations/platform_selection.py | 11 +- .../validations/rational_network_bond.py | 1 + .../validations/storage_mountpoints.py | 15 +- .../validations/storage_partititioning.py | 6 +- .../validations/storage_sizing.py | 1 + .../validations/unique_network_check.py | 5 +- .../orchestrator/validations/validator.py | 5 +- .../orchestrator/validations/validators.py | 15 +- python/drydock_provisioner/policy.py | 81 +- .../statemgmt/db/tables.py | 1 + .../statemgmt/design/resolver.py | 12 +- python/drydock_provisioner/statemgmt/state.py | 120 +- python/requirements-direct.txt | 60 +- ...ments-lock.txt => requirements-frozen.txt} | 61 +- python/requirements-tree.txt | 1625 ----------------- python/requirements.txt | 3 + python/setup.py | 3 +- ...rements-test.txt => test-requirements.txt} | 4 +- python/tests/conftest.py | 27 +- .../postgres/test_action_config_node_prov.py | 1 + .../postgres/test_action_prepare_nodes.py | 8 +- .../postgres/test_action_prepare_site.py | 8 +- .../postgres/test_api_bootaction.py | 15 +- .../postgres/test_api_bootaction_status.py | 37 +- .../postgres/test_api_builddata.py | 35 +- .../integration/postgres/test_api_health.py | 4 +- .../integration/postgres/test_api_tasks.py | 24 +- .../postgres/test_bootaction_context.py | 12 +- .../postgres/test_bootaction_signalling.py | 1 + .../postgres/test_build_data_collection.py | 1 + .../postgres/test_noderesult_links.py | 25 +- .../integration/postgres/test_orch_generic.py | 9 +- .../test_postgres_bootaction_status.py | 10 +- .../postgres/test_postgres_builddata.py | 1 + .../postgres/test_postgres_leadership.py | 5 +- .../postgres/test_postgres_results.py | 5 +- .../postgres/test_postgres_tasks.py | 32 +- .../integration/test_maasdriver_client.py | 5 +- .../integration/test_maasdriver_network.py | 10 +- python/tests/postgres/start_postgres.sh | 7 +- python/tests/unit/test_api_nodes_unit.py | 23 +- python/tests/unit/test_api_tasks_unit.py | 46 +- python/tests/unit/test_api_validation.py | 36 +- python/tests/unit/test_api_versions.py | 2 +- python/tests/unit/test_apienforcer.py | 1 + .../unit/test_bootaction_asset_render.py | 5 +- python/tests/unit/test_bootaction_pipeline.py | 1 + python/tests/unit/test_bootaction_scoping.py | 15 +- .../tests/unit/test_bootaction_tarbuilder.py | 1 + python/tests/unit/test_cli_task.py | 15 +- python/tests/unit/test_design_inheritance.py | 5 +- python/tests/unit/test_drydock_client.py | 43 +- .../tests/unit/test_drydock_client_session.py | 1 + python/tests/unit/test_ingester.py | 1 + python/tests/unit/test_ingester_bootaction.py | 1 + .../tests/unit/test_ingester_invalidation.py | 1 + python/tests/unit/test_ingester_rack_model.py | 1 + .../unit/test_k8sdriver_promenade_client.py | 60 +- python/tests/unit/test_libvirt_driver.py | 1 + .../unit/test_maasdriver_calculate_bytes.py | 109 +- .../tests/unit/test_maasdriver_noderesults.py | 25 +- python/tests/unit/test_maasdriver_vlan.py | 1 + python/tests/unit/test_models.py | 13 +- python/tests/unit/test_node_logicalnames.py | 26 +- python/tests/unit/test_node_naming.py | 1 + python/tests/unit/test_orch_node_filter.py | 23 +- python/tests/unit/test_param_reference.py | 5 +- python/tests/unit/test_policy_engine.py | 1 + python/tests/unit/test_reference_resolver.py | 1 + python/tests/unit/test_render_routedomain.py | 9 +- python/tests/unit/test_schema_validation.py | 2 + python/tests/unit/test_task_link.py | 1 + python/tests/unit/test_task_node_filter.py | 21 +- python/tests/unit/test_validate_design.py | 9 +- .../unit/test_validation_rule_boot_storage.py | 13 +- .../unit/test_validation_rule_bootactions.py | 13 +- .../test_validation_rule_hostname_validity.py | 9 +- .../unit/test_validation_rule_hugepages.py | 15 +- .../unit/test_validation_rule_ip_locality.py | 26 +- .../unit/test_validation_rule_mtu_rational.py | 9 +- .../unit/test_validation_rule_network_bond.py | 9 +- .../unit/test_validation_rule_network_cidr.py | 11 +- .../test_validation_rule_network_trunking.py | 9 +- .../test_validation_rule_no_duplicate_IPs.py | 22 +- ...test_validation_rule_storage_mountpoint.py | 18 +- ...st_validation_rule_storage_partitioning.py | 18 +- .../test_validation_rule_storage_sizing.py | 9 +- .../test_validation_rule_unique_network.py | 9 +- .../test_validation_rule_valid_platform.py | 15 +- tools/helm_install.sh | 2 +- tox.ini | 24 +- 181 files changed, 2811 insertions(+), 4894 deletions(-) rename python/{requirements-lock.txt => requirements-frozen.txt} (77%) delete mode 100644 python/requirements-tree.txt create mode 100644 python/requirements.txt rename python/{requirements-test.txt => test-requirements.txt} (86%) diff --git a/.readthedocs.yaml b/.readthedocs.yaml index 845af27e..22f832dd 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -22,5 +22,5 @@ python: version: 3.8 install: - requirements: doc/requirements-doc.txt - - requirements: python/requirements-lock.txt - - requirements: python/requirements-test.txt + - requirements: python/requirements-frozen.txt + - requirements: python/test-requirements.txt diff --git a/Makefile b/Makefile index 0232a029..ace042ed 100644 --- a/Makefile +++ b/Makefile @@ -19,7 +19,7 @@ IMAGE_PREFIX ?= airshipit IMAGE_TAG ?= dev HELM := $(shell realpath $(BUILD_DIR))/helm UBUNTU_BASE_IMAGE ?= -DISTRO ?= ubuntu_focal +DISTRO ?= ubuntu_focal PROXY ?= http://proxy.foo.com:8000 NO_PROXY ?= localhost,127.0.0.1,.svc.cluster.local USE_PROXY ?= false diff --git a/README.md b/README.md index 75eca6c2..1d5a437e 100644 --- a/README.md +++ b/README.md @@ -32,7 +32,7 @@ and policy file templates to be customized $ tox -e genpolicy $ virtualenv -p python3.5 /var/tmp/drydock $ . /var/tmp/drydock/bin/activate - $ pip install -r requirements-lock.txt + $ pip install -r requirements-frozen.txt $ pip install . $ cp -r etc/drydock /etc/drydock diff --git a/alembic/env.py b/alembic/env.py index 6a9baebf..235c9764 100644 --- a/alembic/env.py +++ b/alembic/env.py @@ -39,8 +39,9 @@ def run_migrations_offline(): """ return # We don't support offline url = config.get_main_option("sqlalchemy.url") - context.configure( - url=url, target_metadata=target_metadata, literal_binds=True) + context.configure(url=url, + target_metadata=target_metadata, + literal_binds=True) with context.begin_transaction(): context.run_migrations() @@ -55,15 +56,15 @@ def run_migrations_online(): """ db_url = os.environ['DRYDOCK_DB_URL'] - connectable = engine_from_config( - config.get_section(config.config_ini_section), - prefix='sqlalchemy.', - poolclass=pool.NullPool, - url=db_url) + connectable = engine_from_config(config.get_section( + config.config_ini_section), + prefix='sqlalchemy.', + poolclass=pool.NullPool, + url=db_url) with connectable.connect() as connection: - context.configure( - connection=connection, target_metadata=target_metadata) + context.configure(connection=connection, + target_metadata=target_metadata) with context.begin_transaction(): context.run_migrations() diff --git a/charts/drydock/Chart.yaml b/charts/drydock/Chart.yaml index 5b30a576..9083e359 100644 --- a/charts/drydock/Chart.yaml +++ b/charts/drydock/Chart.yaml @@ -15,7 +15,7 @@ apiVersion: v1 description: A Helm chart for Drydock name: drydock -version: 0.1.1 +version: 0.1.2 keywords: - drydock home: https://github.com/openstack/airship-drydock diff --git a/doc/requirements-doc.txt b/doc/requirements-doc.txt index c17209f4..199e1121 100644 --- a/doc/requirements-doc.txt +++ b/doc/requirements-doc.txt @@ -1,5 +1,5 @@ sphinx_rtd_theme==1.2.0 pylibyaml==0.1.0 -oslo_versionedobjects==3.1.0 -falcon==3.1.1 -keystoneauth1==5.1.2 \ No newline at end of file +oslo.versionedobjects==3.1.0 +falcon +keystoneauth1<=5.1.1 \ No newline at end of file diff --git a/doc/source/_static/drydock.conf.sample b/doc/source/_static/drydock.conf.sample index 323d4df9..f4a7e5d0 100644 --- a/doc/source/_static/drydock.conf.sample +++ b/doc/source/_static/drydock.conf.sample @@ -1,112 +1,8 @@ [DEFAULT] -# -# From drydock_provisioner -# - -# Polling interval in seconds for checking subtask or downstream status (integer -# value) -# Minimum value: 1 -#poll_interval = 10 - -# How long a leader has to check-in before leadership can be usurped, in seconds -# (integer value) -#leader_grace_period = 300 - -# How often will an instance attempt to claim leadership, in seconds (integer -# value) -#leadership_claim_interval = 30 - - -[database] - -# -# From drydock_provisioner -# - -# The URI database connect string. (string value) -#database_connect_string = - -# The SQLalchemy database connection pool size. (integer value) -#pool_size = 15 - -# Should DB connections be validated prior to use. (boolean value) -#pool_pre_ping = true - -# How long a request for a connection should wait before one becomes available. -# (integer value) -#pool_timeout = 30 - -# How many connections above pool_size are allowed to be open during high usage. -# (integer value) -#pool_overflow = 10 - -# Time, in seconds, when a connection should be closed and re-established. -1 -# for no recycling. (integer value) -#connection_recycle = -1 - [keystone_authtoken] -# -# From drydock_provisioner -# - -# Authentication URL (string value) -#auth_url = - -# Scope for system operations (string value) -#system_scope = - -# Domain ID to scope to (string value) -#domain_id = - -# Domain name to scope to (string value) -#domain_name = - -# Project ID to scope to (string value) -# Deprecated group/name - [keystone_authtoken]/tenant_id -#project_id = - -# Project name to scope to (string value) -# Deprecated group/name - [keystone_authtoken]/tenant_name -#project_name = - -# Domain ID containing project (string value) -#project_domain_id = - -# Domain name containing project (string value) -#project_domain_name = - -# ID of the trust to use as a trustee use (string value) -#trust_id = - -# Optional domain ID to use with v3 and v2 parameters. It will be used for both -# the user and project domain in v3 and ignored in v2 authentication. (string -# value) -#default_domain_id = - -# Optional domain name to use with v3 API and v2 parameters. It will be used for -# both the user and project domain in v3 and ignored in v2 authentication. -# (string value) -#default_domain_name = - -# User id (string value) -#user_id = - -# Username (string value) -# Deprecated group/name - [keystone_authtoken]/user_name -#username = - -# User's domain id (string value) -#user_domain_id = - -# User's domain name (string value) -#user_domain_name = - -# User's password (string value) -#password = - # # From keystonemiddleware.auth_token # @@ -266,84 +162,6 @@ #auth_section = -[libvirt_driver] - -# -# From drydock_provisioner -# - -# Polling interval in seconds for querying libvirt status (integer value) -#poll_interval = 10 - - -[logging] - -# -# From drydock_provisioner -# - -# Global log level for Drydock (string value) -#log_level = INFO - -# Logger name for the top-level logger (string value) -#global_logger_name = drydock_provisioner - -# Logger name for OOB driver logging (string value) -#oobdriver_logger_name = ${global_logger_name}.oobdriver - -# Logger name for Node driver logging (string value) -#nodedriver_logger_name = ${global_logger_name}.nodedriver - -# Logger name for Kubernetes driver logging (string value) -#kubernetesdriver_logger_name = ${global_logger_name}.kubernetesdriver - -# Logger name for API server logging (string value) -#control_logger_name = ${global_logger_name}.control - - -[maasdriver] - -# -# From drydock_provisioner -# - -# The API key for accessing MaaS (string value) -#maas_api_key = - -# The URL for accessing MaaS API (string value) -#maas_api_url = - -# Update MAAS to use the provided Node OOB params, overwriting discovered values -# (boolean value) -#use_node_oob_params = false - -# Skip BMC reconfiguration during commissioning (requires MAAS 2.7+) (boolean -# value) -#skip_bmc_config = false - -# Polling interval for querying MaaS status in seconds (integer value) -#poll_interval = 10 - - -[network] - -# -# From drydock_provisioner -# - -# Timeout for initial read of outgoing HTTP calls from Drydock in seconds. -# (integer value) -#http_client_connect_timeout = 16 - -# Timeout for initial read of outgoing HTTP calls from Drydock in seconds. -# (integer value) -#http_client_read_timeout = 300 - -# Number of retries for transient errors of outgoing HTTP calls from Drydock. -# (integer value) -#http_client_retries = 3 - - [oslo_policy] # @@ -402,102 +220,3 @@ # Absolute path client key file REST based policy check (string value) #remote_ssl_client_key_file = - - -[plugins] - -# -# From drydock_provisioner -# - -# Module path string of a input ingester to enable (string value) -#ingester = drydock_provisioner.ingester.plugins.yaml.YamlIngester - -# List of module path strings of OOB drivers to enable (list value) -#oob_driver = drydock_provisioner.drivers.oob.pyghmi_driver.PyghmiDriver - -# Module path string of the Node driver to enable (string value) -#node_driver = drydock_provisioner.drivers.node.maasdriver.driver.MaasNodeDriver - -# Module path string of the Kubernetes driver to enable (string value) -#kubernetes_driver = drydock_provisioner.drivers.kubernetes.promenade_driver.driver.PromenadeDriver - -# Module path string of the Network driver enable (string value) -#network_driver = - - -[pyghmi_driver] - -# -# From drydock_provisioner -# - -# Polling interval in seconds for querying IPMI status (integer value) -#poll_interval = 10 - - -[redfish_driver] - -# -# From drydock_provisioner -# - -# Maximum number of connection retries to Redfish server (integer value) -# Minimum value: 1 -#max_retries = 10 - -# Maximum reties to wait for power state change (integer value) -# Minimum value: 1 -#power_state_change_max_retries = 18 - -# Polling interval in seconds between retries for power state change (integer -# value) -#power_state_change_retry_interval = 10 - -# Use SSL to communicate with Redfish API server (boolean value) -#use_ssl = true - - -[timeouts] - -# -# From drydock_provisioner -# - -# Fallback timeout when a specific one is not configured (integer value) -#drydock_timeout = 5 - -# Timeout in minutes for creating site network templates (integer value) -#create_network_template = 2 - -# Timeout in minutes for creating user credentials (integer value) -#configure_user_credentials = 2 - -# Timeout in minutes for initial node identification (integer value) -#identify_node = 10 - -# Timeout in minutes for node commissioning and hardware configuration (integer -# value) -#configure_hardware = 30 - -# Timeout in minutes for configuring node networking (integer value) -#apply_node_networking = 5 - -# Timeout in minutes for configuring node storage (integer value) -#apply_node_storage = 5 - -# Timeout in minutes for configuring node platform (integer value) -#apply_node_platform = 5 - -# Timeout in minutes for deploying a node (integer value) -#deploy_node = 45 - -# Timeout in minutes between deployment completion and the all boot actions -# reporting status (integer value) -#bootaction_final_status = 15 - -# Timeout in minutes for releasing a node (integer value) -#destroy_node = 30 - -# Timeout in minutes for relabeling a node (integer value) -#relabel_node = 5 diff --git a/doc/source/_static/policy.yaml.sample b/doc/source/_static/policy.yaml.sample index 54f02d18..e69de29b 100644 --- a/doc/source/_static/policy.yaml.sample +++ b/doc/source/_static/policy.yaml.sample @@ -1,70 +0,0 @@ -# Actions requiring admin authority -#"admin_required": "role:admin or is_admin:1" - -# Get task status -# GET /api/v1.0/tasks -# GET /api/v1.0/tasks/{task_id} -#"physical_provisioner:read_task": "role:admin" - -# Create a task -# POST /api/v1.0/tasks -#"physical_provisioner:create_task": "role:admin" - -# Create validate_design task -# POST /api/v1.0/tasks -#"physical_provisioner:validate_design": "role:admin" - -# Create verify_site task -# POST /api/v1.0/tasks -#"physical_provisioner:verify_site": "role:admin" - -# Create prepare_site task -# POST /api/v1.0/tasks -#"physical_provisioner:prepare_site": "role:admin" - -# Create verify_nodes task -# POST /api/v1.0/tasks -#"physical_provisioner:verify_nodes": "role:admin" - -# Create prepare_nodes task -# POST /api/v1.0/tasks -#"physical_provisioner:prepare_nodes": "role:admin" - -# Create deploy_nodes task -# POST /api/v1.0/tasks -#"physical_provisioner:deploy_nodes": "role:admin" - -# Create destroy_nodes task -# POST /api/v1.0/tasks -#"physical_provisioner:destroy_nodes": "role:admin" - -# Deletes tasks by age -# DELETE /api/v1.0/tasks -#"physical_provisioner:delete_tasks": "role:admin" - -# Create relabel_nodes task -# POST /api/v1.0/tasks -#"physical_provisioner:relabel_nodes": "role:admin" - -# Read build data for a node -# GET /api/v1.0/nodes/{nodename}/builddata -#"physical_provisioner:read_build_data": "role:admin" - -# Read loaded design data -# GET /api/v1.0/designs -# GET /api/v1.0/designs/{design_id} -#"physical_provisioner:read_data": "role:admin" - -# Load design data -# POST /api/v1.0/designs -# POST /api/v1.0/designs/{design_id}/parts -#"physical_provisioner:ingest_data": "role:admin" - -# et health status -# GET /api/v1.0/health/extended -#"physical_provisioner:health_data": "role:admin" - -# Validate site design -# POST /api/v1.0/validatedesign -#"physical_provisioner:validate_site_design": "role:admin" - diff --git a/etc/drydock/drydock.conf.sample b/etc/drydock/drydock.conf.sample index 323d4df9..f4a7e5d0 100644 --- a/etc/drydock/drydock.conf.sample +++ b/etc/drydock/drydock.conf.sample @@ -1,112 +1,8 @@ [DEFAULT] -# -# From drydock_provisioner -# - -# Polling interval in seconds for checking subtask or downstream status (integer -# value) -# Minimum value: 1 -#poll_interval = 10 - -# How long a leader has to check-in before leadership can be usurped, in seconds -# (integer value) -#leader_grace_period = 300 - -# How often will an instance attempt to claim leadership, in seconds (integer -# value) -#leadership_claim_interval = 30 - - -[database] - -# -# From drydock_provisioner -# - -# The URI database connect string. (string value) -#database_connect_string = - -# The SQLalchemy database connection pool size. (integer value) -#pool_size = 15 - -# Should DB connections be validated prior to use. (boolean value) -#pool_pre_ping = true - -# How long a request for a connection should wait before one becomes available. -# (integer value) -#pool_timeout = 30 - -# How many connections above pool_size are allowed to be open during high usage. -# (integer value) -#pool_overflow = 10 - -# Time, in seconds, when a connection should be closed and re-established. -1 -# for no recycling. (integer value) -#connection_recycle = -1 - [keystone_authtoken] -# -# From drydock_provisioner -# - -# Authentication URL (string value) -#auth_url = - -# Scope for system operations (string value) -#system_scope = - -# Domain ID to scope to (string value) -#domain_id = - -# Domain name to scope to (string value) -#domain_name = - -# Project ID to scope to (string value) -# Deprecated group/name - [keystone_authtoken]/tenant_id -#project_id = - -# Project name to scope to (string value) -# Deprecated group/name - [keystone_authtoken]/tenant_name -#project_name = - -# Domain ID containing project (string value) -#project_domain_id = - -# Domain name containing project (string value) -#project_domain_name = - -# ID of the trust to use as a trustee use (string value) -#trust_id = - -# Optional domain ID to use with v3 and v2 parameters. It will be used for both -# the user and project domain in v3 and ignored in v2 authentication. (string -# value) -#default_domain_id = - -# Optional domain name to use with v3 API and v2 parameters. It will be used for -# both the user and project domain in v3 and ignored in v2 authentication. -# (string value) -#default_domain_name = - -# User id (string value) -#user_id = - -# Username (string value) -# Deprecated group/name - [keystone_authtoken]/user_name -#username = - -# User's domain id (string value) -#user_domain_id = - -# User's domain name (string value) -#user_domain_name = - -# User's password (string value) -#password = - # # From keystonemiddleware.auth_token # @@ -266,84 +162,6 @@ #auth_section = -[libvirt_driver] - -# -# From drydock_provisioner -# - -# Polling interval in seconds for querying libvirt status (integer value) -#poll_interval = 10 - - -[logging] - -# -# From drydock_provisioner -# - -# Global log level for Drydock (string value) -#log_level = INFO - -# Logger name for the top-level logger (string value) -#global_logger_name = drydock_provisioner - -# Logger name for OOB driver logging (string value) -#oobdriver_logger_name = ${global_logger_name}.oobdriver - -# Logger name for Node driver logging (string value) -#nodedriver_logger_name = ${global_logger_name}.nodedriver - -# Logger name for Kubernetes driver logging (string value) -#kubernetesdriver_logger_name = ${global_logger_name}.kubernetesdriver - -# Logger name for API server logging (string value) -#control_logger_name = ${global_logger_name}.control - - -[maasdriver] - -# -# From drydock_provisioner -# - -# The API key for accessing MaaS (string value) -#maas_api_key = - -# The URL for accessing MaaS API (string value) -#maas_api_url = - -# Update MAAS to use the provided Node OOB params, overwriting discovered values -# (boolean value) -#use_node_oob_params = false - -# Skip BMC reconfiguration during commissioning (requires MAAS 2.7+) (boolean -# value) -#skip_bmc_config = false - -# Polling interval for querying MaaS status in seconds (integer value) -#poll_interval = 10 - - -[network] - -# -# From drydock_provisioner -# - -# Timeout for initial read of outgoing HTTP calls from Drydock in seconds. -# (integer value) -#http_client_connect_timeout = 16 - -# Timeout for initial read of outgoing HTTP calls from Drydock in seconds. -# (integer value) -#http_client_read_timeout = 300 - -# Number of retries for transient errors of outgoing HTTP calls from Drydock. -# (integer value) -#http_client_retries = 3 - - [oslo_policy] # @@ -402,102 +220,3 @@ # Absolute path client key file REST based policy check (string value) #remote_ssl_client_key_file = - - -[plugins] - -# -# From drydock_provisioner -# - -# Module path string of a input ingester to enable (string value) -#ingester = drydock_provisioner.ingester.plugins.yaml.YamlIngester - -# List of module path strings of OOB drivers to enable (list value) -#oob_driver = drydock_provisioner.drivers.oob.pyghmi_driver.PyghmiDriver - -# Module path string of the Node driver to enable (string value) -#node_driver = drydock_provisioner.drivers.node.maasdriver.driver.MaasNodeDriver - -# Module path string of the Kubernetes driver to enable (string value) -#kubernetes_driver = drydock_provisioner.drivers.kubernetes.promenade_driver.driver.PromenadeDriver - -# Module path string of the Network driver enable (string value) -#network_driver = - - -[pyghmi_driver] - -# -# From drydock_provisioner -# - -# Polling interval in seconds for querying IPMI status (integer value) -#poll_interval = 10 - - -[redfish_driver] - -# -# From drydock_provisioner -# - -# Maximum number of connection retries to Redfish server (integer value) -# Minimum value: 1 -#max_retries = 10 - -# Maximum reties to wait for power state change (integer value) -# Minimum value: 1 -#power_state_change_max_retries = 18 - -# Polling interval in seconds between retries for power state change (integer -# value) -#power_state_change_retry_interval = 10 - -# Use SSL to communicate with Redfish API server (boolean value) -#use_ssl = true - - -[timeouts] - -# -# From drydock_provisioner -# - -# Fallback timeout when a specific one is not configured (integer value) -#drydock_timeout = 5 - -# Timeout in minutes for creating site network templates (integer value) -#create_network_template = 2 - -# Timeout in minutes for creating user credentials (integer value) -#configure_user_credentials = 2 - -# Timeout in minutes for initial node identification (integer value) -#identify_node = 10 - -# Timeout in minutes for node commissioning and hardware configuration (integer -# value) -#configure_hardware = 30 - -# Timeout in minutes for configuring node networking (integer value) -#apply_node_networking = 5 - -# Timeout in minutes for configuring node storage (integer value) -#apply_node_storage = 5 - -# Timeout in minutes for configuring node platform (integer value) -#apply_node_platform = 5 - -# Timeout in minutes for deploying a node (integer value) -#deploy_node = 45 - -# Timeout in minutes between deployment completion and the all boot actions -# reporting status (integer value) -#bootaction_final_status = 15 - -# Timeout in minutes for releasing a node (integer value) -#destroy_node = 30 - -# Timeout in minutes for relabeling a node (integer value) -#relabel_node = 5 diff --git a/etc/drydock/policy.yaml.sample b/etc/drydock/policy.yaml.sample index 54f02d18..e69de29b 100644 --- a/etc/drydock/policy.yaml.sample +++ b/etc/drydock/policy.yaml.sample @@ -1,70 +0,0 @@ -# Actions requiring admin authority -#"admin_required": "role:admin or is_admin:1" - -# Get task status -# GET /api/v1.0/tasks -# GET /api/v1.0/tasks/{task_id} -#"physical_provisioner:read_task": "role:admin" - -# Create a task -# POST /api/v1.0/tasks -#"physical_provisioner:create_task": "role:admin" - -# Create validate_design task -# POST /api/v1.0/tasks -#"physical_provisioner:validate_design": "role:admin" - -# Create verify_site task -# POST /api/v1.0/tasks -#"physical_provisioner:verify_site": "role:admin" - -# Create prepare_site task -# POST /api/v1.0/tasks -#"physical_provisioner:prepare_site": "role:admin" - -# Create verify_nodes task -# POST /api/v1.0/tasks -#"physical_provisioner:verify_nodes": "role:admin" - -# Create prepare_nodes task -# POST /api/v1.0/tasks -#"physical_provisioner:prepare_nodes": "role:admin" - -# Create deploy_nodes task -# POST /api/v1.0/tasks -#"physical_provisioner:deploy_nodes": "role:admin" - -# Create destroy_nodes task -# POST /api/v1.0/tasks -#"physical_provisioner:destroy_nodes": "role:admin" - -# Deletes tasks by age -# DELETE /api/v1.0/tasks -#"physical_provisioner:delete_tasks": "role:admin" - -# Create relabel_nodes task -# POST /api/v1.0/tasks -#"physical_provisioner:relabel_nodes": "role:admin" - -# Read build data for a node -# GET /api/v1.0/nodes/{nodename}/builddata -#"physical_provisioner:read_build_data": "role:admin" - -# Read loaded design data -# GET /api/v1.0/designs -# GET /api/v1.0/designs/{design_id} -#"physical_provisioner:read_data": "role:admin" - -# Load design data -# POST /api/v1.0/designs -# POST /api/v1.0/designs/{design_id}/parts -#"physical_provisioner:ingest_data": "role:admin" - -# et health status -# GET /api/v1.0/health/extended -#"physical_provisioner:health_data": "role:admin" - -# Validate site design -# POST /api/v1.0/validatedesign -#"physical_provisioner:validate_site_design": "role:admin" - diff --git a/images/drydock/Dockerfile.ubuntu_bionic b/images/drydock/Dockerfile.ubuntu_bionic index 69e56280..b17c9340 100644 --- a/images/drydock/Dockerfile.ubuntu_bionic +++ b/images/drydock/Dockerfile.ubuntu_bionic @@ -92,10 +92,10 @@ ENV LD_LIBRARY_PATH=/usr/local/lib COPY --from=baclient_builder /usr/local/lib /usr/local/lib COPY --from=baclient_builder /usr/local/include/yaml.h /usr/local/include/yaml.h -COPY ./python/requirements-lock.txt /tmp/drydock/ +COPY ./python/requirements-frozen.txt /tmp/drydock/ RUN pip3 install \ --no-cache-dir \ - -r /tmp/drydock/requirements-lock.txt + -r /tmp/drydock/requirements-frozen.txt COPY ./python /tmp/drydock/python WORKDIR /tmp/drydock/python diff --git a/images/drydock/Dockerfile.ubuntu_focal b/images/drydock/Dockerfile.ubuntu_focal index 803ded7e..933a2a75 100644 --- a/images/drydock/Dockerfile.ubuntu_focal +++ b/images/drydock/Dockerfile.ubuntu_focal @@ -106,14 +106,15 @@ ENV LD_LIBRARY_PATH=/usr/local/lib COPY --from=baclient_builder /usr/local/lib /usr/local/lib COPY --from=baclient_builder /usr/local/include/yaml.h /usr/local/include/yaml.h -COPY ./python/requirements-lock.txt /tmp/drydock/ +COPY ./python/requirements-frozen.txt /tmp/drydock/ RUN pip3 install \ --no-cache-dir \ - -r /tmp/drydock/requirements-lock.txt + -r /tmp/drydock/requirements-frozen.txt COPY ./python /tmp/drydock/python WORKDIR /tmp/drydock/python -RUN python3 setup.py install +RUN cd /tmp/drydock/python \ + && pip3 install $(pwd) COPY ./alembic /tmp/drydock/alembic COPY ./alembic.ini /tmp/drydock/alembic.ini diff --git a/python/drydock_provisioner/cli/commands.py b/python/drydock_provisioner/cli/commands.py index fda417b2..27392260 100644 --- a/python/drydock_provisioner/cli/commands.py +++ b/python/drydock_provisioner/cli/commands.py @@ -25,28 +25,27 @@ from .node import commands as node @click.group() -@click.option( - '--debug/--no-debug', help='Enable or disable debugging', default=False) +@click.option('--debug/--no-debug', + help='Enable or disable debugging', + default=False) # Supported Environment Variables -@click.option( - '--os_project_domain_name', - envvar='OS_PROJECT_DOMAIN_NAME', - required=False) -@click.option( - '--os_user_domain_name', envvar='OS_USER_DOMAIN_NAME', required=False) +@click.option('--os_project_domain_name', + envvar='OS_PROJECT_DOMAIN_NAME', + required=False) +@click.option('--os_user_domain_name', + envvar='OS_USER_DOMAIN_NAME', + required=False) @click.option('--os_project_name', envvar='OS_PROJECT_NAME', required=False) @click.option('--os_username', envvar='OS_USERNAME', required=False) @click.option('--os_password', envvar='OS_PASSWORD', required=False) @click.option('--os_auth_url', envvar='OS_AUTH_URL', required=False) -@click.option( - '--os_token', - help='The Keystone token to be used', - default=lambda: os.environ.get('OS_TOKEN', '')) -@click.option( - '--url', - '-u', - help='The url of the running drydock instance', - default=lambda: os.environ.get('DD_URL', '')) +@click.option('--os_token', + help='The Keystone token to be used', + default=lambda: os.environ.get('OS_TOKEN', '')) +@click.option('--url', + '-u', + help='The url of the running drydock instance', + default=lambda: os.environ.get('DD_URL', '')) @click.pass_context def drydock(ctx, debug, url, os_project_domain_name, os_user_domain_name, os_project_name, os_username, os_password, os_auth_url, os_token): @@ -83,8 +82,8 @@ def drydock(ctx, debug, url, os_project_domain_name, os_user_domain_name, str(keystone_env)) ks_sess = KeystoneClient.get_ks_session(**keystone_env) else: - logger.debug( - "Generating Keystone session by explicit token: %s" % os_token) + logger.debug("Generating Keystone session by explicit token: %s" % + os_token) ks_sess = KeystoneClient.get_ks_session(token=os_token) KeystoneClient.get_token(ks_sess=ks_sess) except Exception as ex: @@ -94,8 +93,8 @@ def drydock(ctx, debug, url, os_project_domain_name, os_user_domain_name, try: if not url: - url = KeystoneClient.get_endpoint( - 'physicalprovisioner', ks_sess=ks_sess) + url = KeystoneClient.get_endpoint('physicalprovisioner', + ks_sess=ks_sess) except Exception as ex: logger.debug("Exception getting Drydock endpoint.", exc_info=ex) ctx.fail('Error: Unable to discover Drydock API URL') @@ -109,11 +108,10 @@ def drydock(ctx, debug, url, os_project_domain_name, os_user_domain_name, if not url_parse_result.scheme: ctx.fail('URL must specify a scheme and hostname, optionally a port') ctx.obj['CLIENT'] = DrydockClient( - DrydockSession( - scheme=url_parse_result.scheme, - host=url_parse_result.hostname, - port=url_parse_result.port, - auth_gen=auth_gen)) + DrydockSession(scheme=url_parse_result.scheme, + host=url_parse_result.hostname, + port=url_parse_result.port, + auth_gen=auth_gen)) drydock.add_command(task.task) diff --git a/python/drydock_provisioner/cli/design/actions.py b/python/drydock_provisioner/cli/design/actions.py index b6fe4a6d..ed5fa0ba 100644 --- a/python/drydock_provisioner/cli/design/actions.py +++ b/python/drydock_provisioner/cli/design/actions.py @@ -59,8 +59,8 @@ class DesignShow(CliAction): # pylint: disable=too-few-public-methods design_id) def invoke(self): - return self.api_client.get_design( - design_id=self.design_id, source=self.source) + return self.api_client.get_design(design_id=self.design_id, + source=self.source) class DesignValidate(CliAction): # pylint: disable=too-few-public-methods diff --git a/python/drydock_provisioner/cli/design/commands.py b/python/drydock_provisioner/cli/design/commands.py index 3f91c7af..c4600230 100644 --- a/python/drydock_provisioner/cli/design/commands.py +++ b/python/drydock_provisioner/cli/design/commands.py @@ -31,10 +31,9 @@ def design(): @design.command(name='create') -@click.option( - '--base-design', - '-b', - help='The base design to model this new design after') +@click.option('--base-design', + '-b', + help='The base design to model this new design after') @click.pass_context def design_create(ctx, base_design=None): """Create a design.""" @@ -61,8 +60,9 @@ def design_show(ctx, design_id): @design.command(name='validate') -@click.option( - '--design-href', '-h', help='The design href key to the design ref') +@click.option('--design-href', + '-h', + help='The design href key to the design ref') @click.pass_context def design_validate(ctx, design_href=None): """Validate a design.""" diff --git a/python/drydock_provisioner/cli/node/actions.py b/python/drydock_provisioner/cli/node/actions.py index cbcd9875..a1e11d11 100644 --- a/python/drydock_provisioner/cli/node/actions.py +++ b/python/drydock_provisioner/cli/node/actions.py @@ -47,5 +47,5 @@ class NodeBuildData(CliAction): self.logger.debug('NodeBuildData action initialized') def invoke(self): - return self.api_client.get_node_build_data( - self.nodename, latest=self.latest) + return self.api_client.get_node_build_data(self.nodename, + latest=self.latest) diff --git a/python/drydock_provisioner/cli/node/commands.py b/python/drydock_provisioner/cli/node/commands.py index 40ed14de..99fd11d2 100644 --- a/python/drydock_provisioner/cli/node/commands.py +++ b/python/drydock_provisioner/cli/node/commands.py @@ -31,8 +31,10 @@ def node(): @node.command(name='list') -@click.option( - '--output', '-o', help='Output format: table|json', default='table') +@click.option('--output', + '-o', + help='Output format: table|json', + default='table') @click.pass_context def node_list(ctx, output='table'): """List nodes.""" @@ -59,12 +61,13 @@ def node_list(ctx, output='table'): @node.command(name='builddata') -@click.option( - '--latest/--no-latest', - help='Retrieve only the latest data items.', - default=True) -@click.option( - '--output', '-o', help='Output format: yaml|json', default='yaml') +@click.option('--latest/--no-latest', + help='Retrieve only the latest data items.', + default=True) +@click.option('--output', + '-o', + help='Output format: yaml|json', + default='yaml') @click.argument('nodename') @click.pass_context def node_builddata(ctx, nodename, latest=True, output='yaml'): @@ -78,5 +81,6 @@ def node_builddata(ctx, nodename, latest=True, output='yaml'): click.echo( "Invalid output format {}, default to YAML.".format(output)) click.echo( - yaml.safe_dump( - node_bd, allow_unicode=True, default_flow_style=False)) + yaml.safe_dump(node_bd, + allow_unicode=True, + default_flow_style=False)) diff --git a/python/drydock_provisioner/cli/part/actions.py b/python/drydock_provisioner/cli/part/actions.py index eb2bfc96..f816bf10 100644 --- a/python/drydock_provisioner/cli/part/actions.py +++ b/python/drydock_provisioner/cli/part/actions.py @@ -85,8 +85,7 @@ class PartShow(PartBase): # pylint: disable=too-few-public-methods ' kind=%s, key=%s, source=%s', design_id, kind, key, source) def invoke(self): - return self.api_client.get_part( - design_id=self.design_id, - kind=self.kind, - key=self.key, - source=self.source) + return self.api_client.get_part(design_id=self.design_id, + kind=self.kind, + key=self.key, + source=self.source) diff --git a/python/drydock_provisioner/cli/part/commands.py b/python/drydock_provisioner/cli/part/commands.py index b4abe9df..38f1bc81 100644 --- a/python/drydock_provisioner/cli/part/commands.py +++ b/python/drydock_provisioner/cli/part/commands.py @@ -25,10 +25,9 @@ from drydock_provisioner.cli.part.actions import PartCreate @click.group() -@click.option( - '--design-id', - '-d', - help='The id of the design containing the target parts') +@click.option('--design-id', + '-d', + help='The id of the design containing the target parts') @click.pass_context def part(ctx, design_id=None): """Drydock part commands.""" @@ -39,8 +38,9 @@ def part(ctx, design_id=None): @part.command(name='create') -@click.option( - '--file', '-f', help='The file name containing the part to create') +@click.option('--file', + '-f', + help='The file name containing the part to create') @click.pass_context def part_create(ctx, file=None): """Create a part.""" @@ -52,10 +52,9 @@ def part_create(ctx, file=None): # here is where some potential validation could be done on the input file click.echo( json.dumps( - PartCreate( - ctx.obj['CLIENT'], - design_id=ctx.obj['DESIGN_ID'], - in_file=file_contents).invoke())) + PartCreate(ctx.obj['CLIENT'], + design_id=ctx.obj['DESIGN_ID'], + in_file=file_contents).invoke())) @part.command(name='list') @@ -83,9 +82,8 @@ def part_show(ctx, source, kind, key): click.echo( json.dumps( - PartShow( - ctx.obj['CLIENT'], - design_id=ctx.obj['DESIGN_ID'], - kind=kind, - key=key, - source=source).invoke())) + PartShow(ctx.obj['CLIENT'], + design_id=ctx.obj['DESIGN_ID'], + kind=kind, + key=key, + source=source).invoke())) diff --git a/python/drydock_provisioner/cli/task/actions.py b/python/drydock_provisioner/cli/task/actions.py index a1ad6dec..bffa532c 100644 --- a/python/drydock_provisioner/cli/task/actions.py +++ b/python/drydock_provisioner/cli/task/actions.py @@ -90,10 +90,9 @@ class TaskCreate(CliAction): # pylint: disable=too-few-public-methods def invoke(self): """Invoke execution of this action.""" - task = self.api_client.create_task( - design_ref=self.design_ref, - task_action=self.action_name, - node_filter=self.node_filter) + task = self.api_client.create_task(design_ref=self.design_ref, + task_action=self.action_name, + node_filter=self.node_filter) if not self.block: return task @@ -157,6 +156,7 @@ class TaskBuildData(CliAction): def invoke(self): return self.api_client.get_task_build_data(self.task_id) + class TasksDelete(CliAction): """Action to delete tasks in database.""" diff --git a/python/drydock_provisioner/cli/task/commands.py b/python/drydock_provisioner/cli/task/commands.py index a97bc629..5d78305e 100644 --- a/python/drydock_provisioner/cli/task/commands.py +++ b/python/drydock_provisioner/cli/task/commands.py @@ -29,17 +29,16 @@ def task(): @task.command(name='create') -@click.option( - '--design-ref', '-d', help='The design reference for this action') +@click.option('--design-ref', + '-d', + help='The design reference for this action') @click.option('--action', '-a', help='The action to perform') -@click.option( - '--node-names', - '-n', - help='The nodes targeted by this action, comma separated') -@click.option( - '--rack-names', - '-r', - help='The racks targeted by this action, comma separated') +@click.option('--node-names', + '-n', + help='The nodes targeted by this action, comma separated') +@click.option('--rack-names', + '-r', + help='The racks targeted by this action, comma separated') @click.option( '--node-tags', '-t', @@ -49,10 +48,9 @@ def task(): '-b', help='The CLI will wait until the created completes before exitting', default=False) -@click.option( - '--poll-interval', - help='Polling interval to check task status in blocking mode.', - default=15) +@click.option('--poll-interval', + help='Polling interval to check task status in blocking mode.', + default=15) @click.pass_context def task_create(ctx, design_ref=None, @@ -112,8 +110,10 @@ def task_show(ctx, task_id=None, block=False): @task.command(name='builddata') @click.option('--task-id', '-t', help='The required task id') -@click.option( - '--output', '-o', help='The output format (yaml|json)', default='yaml') +@click.option('--output', + '-o', + help='The output format (yaml|json)', + default='yaml') @click.pass_context def task_builddata(ctx, task_id=None, output='yaml'): """Show builddata assoicated with ``task_id``.""" @@ -129,17 +129,19 @@ def task_builddata(ctx, task_id=None, output='yaml'): click.echo( 'Invalid output format {}, defaulting to YAML.'.format(output)) click.echo( - yaml.safe_dump( - task_bd, allow_unicode=True, default_flow_style=False)) + yaml.safe_dump(task_bd, + allow_unicode=True, + default_flow_style=False)) @task.command(name='delete') -@click.option('--days', '-d', help='The required number of days to retain tasks') +@click.option('--days', + '-d', + help='The required number of days to retain tasks') @click.pass_context def task_delete(ctx, days=None): """Delete tasks from database""" if not days: ctx.fail('The number of days must be specified using --days or -d') - click.echo( - TasksDelete(ctx.obj['CLIENT'], days=days).invoke()) + click.echo(TasksDelete(ctx.obj['CLIENT'], days=days).invoke()) diff --git a/python/drydock_provisioner/config.py b/python/drydock_provisioner/config.py index a7248763..0173a5c2 100644 --- a/python/drydock_provisioner/config.py +++ b/python/drydock_provisioner/config.py @@ -67,43 +67,36 @@ class DrydockConfig(object): # Logging options logging_options = [ - cfg.StrOpt( - 'log_level', default='INFO', help='Global log level for Drydock'), - cfg.StrOpt( - 'global_logger_name', - default='drydock_provisioner', - help='Logger name for the top-level logger'), - cfg.StrOpt( - 'oobdriver_logger_name', - default='${global_logger_name}.oobdriver', - help='Logger name for OOB driver logging'), - cfg.StrOpt( - 'nodedriver_logger_name', - default='${global_logger_name}.nodedriver', - help='Logger name for Node driver logging'), - cfg.StrOpt( - 'kubernetesdriver_logger_name', - default='${global_logger_name}.kubernetesdriver', - help='Logger name for Kubernetes driver logging'), - cfg.StrOpt( - 'control_logger_name', - default='${global_logger_name}.control', - help='Logger name for API server logging'), + cfg.StrOpt('log_level', + default='INFO', + help='Global log level for Drydock'), + cfg.StrOpt('global_logger_name', + default='drydock_provisioner', + help='Logger name for the top-level logger'), + cfg.StrOpt('oobdriver_logger_name', + default='${global_logger_name}.oobdriver', + help='Logger name for OOB driver logging'), + cfg.StrOpt('nodedriver_logger_name', + default='${global_logger_name}.nodedriver', + help='Logger name for Node driver logging'), + cfg.StrOpt('kubernetesdriver_logger_name', + default='${global_logger_name}.kubernetesdriver', + help='Logger name for Kubernetes driver logging'), + cfg.StrOpt('control_logger_name', + default='${global_logger_name}.control', + help='Logger name for API server logging'), ] # Database options database_options = [ - cfg.StrOpt( - 'database_connect_string', - help='The URI database connect string.'), - cfg.IntOpt( - 'pool_size', - default=15, - help='The SQLalchemy database connection pool size.'), - cfg.BoolOpt( - 'pool_pre_ping', - default=True, - help='Should DB connections be validated prior to use.'), + cfg.StrOpt('database_connect_string', + help='The URI database connect string.'), + cfg.IntOpt('pool_size', + default=15, + help='The SQLalchemy database connection pool size.'), + cfg.BoolOpt('pool_pre_ping', + default=True, + help='Should DB connections be validated prior to use.'), cfg.IntOpt( 'pool_timeout', default=30, @@ -126,9 +119,8 @@ class DrydockConfig(object): # Options for the boot action framework bootactions_options = [ - cfg.StrOpt( - 'report_url', - default='http://localhost:9000/api/v1.0/bootactions/') + cfg.StrOpt('report_url', + default='http://localhost:9000/api/v1.0/bootactions/') ] # Options for network traffic @@ -176,10 +168,9 @@ class DrydockConfig(object): 'drydock_provisioner.drivers.kubernetes.promenade_driver.driver.PromenadeDriver', help='Module path string of the Kubernetes driver to enable'), # TODO(sh8121att) Network driver not yet implemented - cfg.StrOpt( - 'network_driver', - default=None, - help='Module path string of the Network driver enable'), + cfg.StrOpt('network_driver', + default=None, + help='Module path string of the Network driver enable'), ] # Timeouts for various tasks specified in minutes @@ -192,36 +183,30 @@ class DrydockConfig(object): 'create_network_template', default=2, help='Timeout in minutes for creating site network templates'), - cfg.IntOpt( - 'configure_user_credentials', - default=2, - help='Timeout in minutes for creating user credentials'), - cfg.IntOpt( - 'identify_node', - default=10, - help='Timeout in minutes for initial node identification'), + cfg.IntOpt('configure_user_credentials', + default=2, + help='Timeout in minutes for creating user credentials'), + cfg.IntOpt('identify_node', + default=10, + help='Timeout in minutes for initial node identification'), cfg.IntOpt( 'configure_hardware', default=30, help= 'Timeout in minutes for node commissioning and hardware configuration' ), - cfg.IntOpt( - 'apply_node_networking', - default=5, - help='Timeout in minutes for configuring node networking'), - cfg.IntOpt( - 'apply_node_storage', - default=5, - help='Timeout in minutes for configuring node storage'), - cfg.IntOpt( - 'apply_node_platform', - default=5, - help='Timeout in minutes for configuring node platform'), - cfg.IntOpt( - 'deploy_node', - default=45, - help='Timeout in minutes for deploying a node'), + cfg.IntOpt('apply_node_networking', + default=5, + help='Timeout in minutes for configuring node networking'), + cfg.IntOpt('apply_node_storage', + default=5, + help='Timeout in minutes for configuring node storage'), + cfg.IntOpt('apply_node_platform', + default=5, + help='Timeout in minutes for configuring node platform'), + cfg.IntOpt('deploy_node', + default=45, + help='Timeout in minutes for deploying a node'), cfg.IntOpt( 'bootaction_final_status', default=15, @@ -233,10 +218,9 @@ class DrydockConfig(object): default=30, help='Timeout in minutes for releasing a node', ), - cfg.IntOpt( - 'relabel_node', - default=5, - help='Timeout in minutes for relabeling a node'), + cfg.IntOpt('relabel_node', + default=5, + help='Timeout in minutes for relabeling a node'), ] def __init__(self): @@ -244,15 +228,15 @@ class DrydockConfig(object): def register_options(self, enable_keystone=True): self.conf.register_opts(DrydockConfig.options) - self.conf.register_opts( - DrydockConfig.bootactions_options, group='bootactions') + self.conf.register_opts(DrydockConfig.bootactions_options, + group='bootactions') self.conf.register_opts(DrydockConfig.logging_options, group='logging') self.conf.register_opts(DrydockConfig.plugin_options, group='plugins') self.conf.register_opts(DrydockConfig.network_options, group='network') - self.conf.register_opts( - DrydockConfig.database_options, group='database') - self.conf.register_opts( - DrydockConfig.timeout_options, group='timeouts') + self.conf.register_opts(DrydockConfig.database_options, + group='database') + self.conf.register_opts(DrydockConfig.timeout_options, + group='timeouts') if enable_keystone: self.conf.register_opts( loading.get_auth_plugin_conf_options('password'), diff --git a/python/drydock_provisioner/control/api.py b/python/drydock_provisioner/control/api.py index 07916462..4473d41b 100644 --- a/python/drydock_provisioner/control/api.py +++ b/python/drydock_provisioner/control/api.py @@ -45,13 +45,12 @@ def start_api(state_manager=None, ingester=None, orchestrator=None): part input :param orchestrator: Instance of drydock_provisioner.orchestrator.Orchestrator for managing tasks """ - control_api = falcon.App( - request_type=DrydockRequest, - middleware=[ - AuthMiddleware(), - ContextMiddleware(), - LoggingMiddleware() - ]) + control_api = falcon.App(request_type=DrydockRequest, + middleware=[ + AuthMiddleware(), + ContextMiddleware(), + LoggingMiddleware() + ]) control_api.add_route('/versions', VersionsResource()) @@ -59,11 +58,11 @@ def start_api(state_manager=None, ingester=None, orchestrator=None): v1_0_routes = [ # API for managing orchestrator tasks ('/health', - HealthResource( - state_manager=state_manager, orchestrator=orchestrator)), + HealthResource(state_manager=state_manager, + orchestrator=orchestrator)), ('/health/extended', - HealthExtendedResource( - state_manager=state_manager, orchestrator=orchestrator)), + HealthExtendedResource(state_manager=state_manager, + orchestrator=orchestrator)), ('/tasks', TasksResource(state_manager=state_manager, orchestrator=orchestrator)), @@ -74,15 +73,15 @@ def start_api(state_manager=None, ingester=None, orchestrator=None): # API for managing site design data ('/designs', DesignsResource(state_manager=state_manager)), ('/designs/{design_id}', - DesignResource( - state_manager=state_manager, orchestrator=orchestrator)), + DesignResource(state_manager=state_manager, + orchestrator=orchestrator)), ('/designs/{design_id}/parts', DesignsPartsResource(state_manager=state_manager, ingester=ingester)), ('/designs/{design_id}/parts/{kind}', DesignsPartsKindsResource(state_manager=state_manager)), ('/designs/{design_id}/parts/{kind}/{name}', - DesignsPartResource( - state_manager=state_manager, orchestrator=orchestrator)), + DesignsPartResource(state_manager=state_manager, + orchestrator=orchestrator)), # API to list current MaaS nodes ('/nodes', NodesResource()), @@ -91,23 +90,23 @@ def start_api(state_manager=None, ingester=None, orchestrator=None): NodeBuildDataResource(state_manager=state_manager)), # API to list current node names based ('/nodefilter', - NodeFilterResource( - state_manager=state_manager, orchestrator=orchestrator)), + NodeFilterResource(state_manager=state_manager, + orchestrator=orchestrator)), # API for nodes to discover their boot actions during curtin install ('/bootactions/nodes/{hostname}/units', - BootactionUnitsResource( - state_manager=state_manager, orchestrator=orchestrator)), + BootactionUnitsResource(state_manager=state_manager, + orchestrator=orchestrator)), ('/bootactions/nodes/{hostname}/files', - BootactionFilesResource( - state_manager=state_manager, orchestrator=orchestrator)), + BootactionFilesResource(state_manager=state_manager, + orchestrator=orchestrator)), ('/bootactions/{action_id}', - BootactionResource( - state_manager=state_manager, orchestrator=orchestrator)), + BootactionResource(state_manager=state_manager, + orchestrator=orchestrator)), # API to validate schemas ('/validatedesign', - ValidationResource( - state_manager=state_manager, orchestrator=orchestrator)), + ValidationResource(state_manager=state_manager, + orchestrator=orchestrator)), ] for path, res in v1_0_routes: @@ -122,10 +121,9 @@ class VersionsResource(BaseResource): """ def on_get(self, req, resp): - resp.body = self.to_json({ - 'v1.0': { + resp.text = self.to_json( + {'v1.0': { 'path': '/api/v1.0', 'status': 'stable' - } - }) + }}) resp.status = falcon.HTTP_200 diff --git a/python/drydock_provisioner/control/base.py b/python/drydock_provisioner/control/base.py index 039efceb..7e6f89d7 100644 --- a/python/drydock_provisioner/control/base.py +++ b/python/drydock_provisioner/control/base.py @@ -22,6 +22,7 @@ import drydock_provisioner.error as errors class BaseResource(object): + def __init__(self): self.logger = logging.getLogger('drydock') @@ -52,18 +53,18 @@ class BaseResource(object): json_body = json.loads(raw_body.decode('utf-8')) return json_body except json.JSONDecodeError as jex: - print( - "Invalid JSON in request: \n%s" % raw_body.decode('utf-8')) + print("Invalid JSON in request: \n%s" % + raw_body.decode('utf-8')) self.error( req.context, "Invalid JSON in request: \n%s" % raw_body.decode('utf-8')) - raise errors.InvalidFormat( - "%s: Invalid JSON in body: %s" % (req.path, jex)) + raise errors.InvalidFormat("%s: Invalid JSON in body: %s" % + (req.path, jex)) else: raise errors.InvalidFormat("Requires application/json payload") def return_error(self, resp, status_code, message="", retry=False): - resp.body = json.dumps({ + resp.text = json.dumps({ 'type': 'error', 'message': message, 'retry': retry @@ -71,8 +72,12 @@ class BaseResource(object): resp.status = status_code def log_error(self, ctx, level, msg): - extra = {'user': 'N/A', 'req_id': 'N/A', 'external_ctx': 'N/A', - 'end_user': 'N/A'} + extra = { + 'user': 'N/A', + 'req_id': 'N/A', + 'external_ctx': 'N/A', + 'end_user': 'N/A' + } if ctx is not None: extra = { @@ -104,6 +109,7 @@ class BaseResource(object): class StatefulResource(BaseResource): + def __init__(self, state_manager=None, **kwargs): super(StatefulResource, self).__init__(**kwargs) @@ -119,6 +125,7 @@ class StatefulResource(BaseResource): class DrydockRequestContext(object): + def __init__(self): self.log_level = 'ERROR' self.user = None # Username diff --git a/python/drydock_provisioner/control/bootaction.py b/python/drydock_provisioner/control/bootaction.py index 72aa434a..656bce2d 100644 --- a/python/drydock_provisioner/control/bootaction.py +++ b/python/drydock_provisioner/control/bootaction.py @@ -76,8 +76,8 @@ class BootactionResource(StatefulResource): try: ba_entry = self.state_manager.get_boot_action(action_id) except Exception as ex: - self.logger.error( - "Error querying for boot action %s" % action_id, exc_info=ex) + self.logger.error("Error querying for boot action %s" % action_id, + exc_info=ex) raise falcon.HTTPInternalServerError(str(ex)) if ba_entry is None: @@ -103,8 +103,8 @@ class BootactionResource(StatefulResource): action_id) for m in json_body.get('details', []): - rm = objects.TaskStatusMessage( - m.get('message'), m.get('error'), 'bootaction', action_id) + rm = objects.TaskStatusMessage(m.get('message'), m.get('error'), + 'bootaction', action_id) for f, v in m.items(): if f not in ['message', 'error']: rm['extra'] = dict() @@ -124,11 +124,12 @@ class BootactionResource(StatefulResource): resp.content_type = 'application/json' ba_entry['task_id'] = str(ba_entry['task_id']) ba_entry['action_id'] = ulid2.encode_ulid_base32(ba_entry['action_id']) - resp.body = json.dumps(ba_entry) + resp.text = json.dumps(ba_entry) return class BootactionAssetsResource(StatefulResource): + def __init__(self, orchestrator=None, **kwargs): super().__init__(**kwargs) self.orchestrator = orchestrator @@ -149,8 +150,8 @@ class BootactionAssetsResource(StatefulResource): try: ba_ctx = self.state_manager.get_boot_action_context(hostname) except Exception as ex: - self.logger.error( - "Error locating boot action for %s" % hostname, exc_info=ex) + self.logger.error("Error locating boot action for %s" % hostname, + exc_info=ex) raise falcon.HTTPNotFound() if ba_ctx is None: @@ -178,19 +179,19 @@ class BootactionAssetsResource(StatefulResource): action_id = ba_status.get('action_id') action_key = ba_status.get('identity_key') assets.extend( - ba.render_assets( - hostname, - site_design, - action_id, - action_key, - task.design_ref, - type_filter=asset_type_filter)) + ba.render_assets(hostname, + site_design, + action_id, + action_key, + task.design_ref, + type_filter=asset_type_filter)) tarball = BootactionUtils.tarbuilder(asset_list=assets) resp.set_header('Content-Type', 'application/gzip') resp.set_header( - 'Content-Disposition', "attachment; filename=\"%s-%s.tar.gz\"" - % (hostname, asset_type)) + 'Content-Disposition', + "attachment; filename=\"%s-%s.tar.gz\"" % + (hostname, asset_type)) resp.data = tarball resp.status = falcon.HTTP_200 return @@ -200,16 +201,18 @@ class BootactionAssetsResource(StatefulResource): class BootactionUnitsResource(BootactionAssetsResource): + def __init__(self, **kwargs): super().__init__(**kwargs) def on_get(self, req, resp, hostname): - self.logger.debug( - "Accessing boot action units resource for host %s." % hostname) + self.logger.debug("Accessing boot action units resource for host %s." % + hostname) self.do_get(req, resp, hostname, 'unit') class BootactionFilesResource(BootactionAssetsResource): + def __init__(self, **kwargs): super().__init__(**kwargs) @@ -233,18 +236,17 @@ class BootactionUtils(object): identity_key = req.get_header('X-Bootaction-Key', default='') if identity_key == '': - raise falcon.HTTPUnauthorized( - title='Unauthorized', - description='No X-Bootaction-Key', - challenges=['Bootaction-Key']) + raise falcon.HTTPUnauthorized(title='Unauthorized', + description='No X-Bootaction-Key', + challenges=['Bootaction-Key']) if ba_ctx['identity_key'] != bytes.fromhex(identity_key): logger.warn( "Forbidding boot action access - node: %s, identity_key: %s, req header: %s" - % (ba_ctx['node_name'], str(ba_ctx['identity_key']), - str(bytes.fromhex(identity_key)))) - raise falcon.HTTPForbidden( - title='Unauthorized', description='Invalid X-Bootaction-Key') + % (ba_ctx['node_name'], str( + ba_ctx['identity_key']), str(bytes.fromhex(identity_key)))) + raise falcon.HTTPForbidden(title='Unauthorized', + description='Invalid X-Bootaction-Key') @staticmethod def tarbuilder(asset_list=None): @@ -259,8 +261,9 @@ class BootactionUtils(object): :param asset_list: list of objects.BootActionAsset instances """ tarbytes = io.BytesIO() - tarball = tarfile.open( - mode='w:gz', fileobj=tarbytes, format=tarfile.GNU_FORMAT) + tarball = tarfile.open(mode='w:gz', + fileobj=tarbytes, + format=tarfile.GNU_FORMAT) asset_list = [ a for a in asset_list if a.type != BootactionAssetType.PackageList ] diff --git a/python/drydock_provisioner/control/designs.py b/python/drydock_provisioner/control/designs.py index 3142f498..a522769b 100644 --- a/python/drydock_provisioner/control/designs.py +++ b/python/drydock_provisioner/control/designs.py @@ -44,15 +44,14 @@ class DesignsResource(StatefulResource): try: designs = list(state.designs.keys()) - resp.body = json.dumps(designs) + resp.text = json.dumps(designs) resp.status = falcon.HTTP_200 except Exception as ex: self.error(req.context, "Exception raised: %s" % str(ex)) - self.return_error( - resp, - falcon.HTTP_500, - message="Error accessing design list", - retry=True) + self.return_error(resp, + falcon.HTTP_500, + message="Error accessing design list", + retry=True) @policy.ApiEnforcer('physical_provisioner:ingest_data') def on_post(self, req, resp): @@ -75,19 +74,20 @@ class DesignsResource(StatefulResource): design.assign_id() design.create(req.context, self.state_manager) - resp.body = json.dumps(design.obj_to_simple()) + resp.text = json.dumps(design.obj_to_simple()) resp.status = falcon.HTTP_201 except errors.StateError: self.error(req.context, "Error updating persistence") - self.return_error( - resp, - falcon.HTTP_500, - message="Error updating persistence", - retry=True) + self.return_error(resp, + falcon.HTTP_500, + message="Error updating persistence", + retry=True) except errors.InvalidFormat as fex: self.error(req.context, str(fex)) - self.return_error( - resp, falcon.HTTP_400, message=str(fex), retry=False) + self.return_error(resp, + falcon.HTTP_400, + message=str(fex), + retry=False) class DesignResource(StatefulResource): @@ -115,17 +115,17 @@ class DesignResource(StatefulResource): elif source == 'designed': design = self.orchestrator.get_described_site(design_id) - resp.body = json.dumps(design.obj_to_simple()) + resp.text = json.dumps(design.obj_to_simple()) except errors.DesignError: self.error(req.context, "Design %s not found" % design_id) - self.return_error( - resp, - falcon.HTTP_404, - message="Design %s not found" % design_id, - retry=False) + self.return_error(resp, + falcon.HTTP_404, + message="Design %s not found" % design_id, + retry=False) class DesignsPartsResource(StatefulResource): + def __init__(self, ingester=None, **kwargs): super(DesignsPartsResource, self).__init__(**kwargs) self.ingester = ingester @@ -146,11 +146,10 @@ class DesignsPartsResource(StatefulResource): self.error( None, "DesignsPartsResource POST requires parameter 'ingester'") - self.return_error( - resp, - falcon.HTTP_400, - message="POST requires parameter 'ingester'", - retry=False) + self.return_error(resp, + falcon.HTTP_400, + message="POST requires parameter 'ingester'", + retry=False) else: try: raw_body = req.stream.read(req.content_length or 0) @@ -162,37 +161,34 @@ class DesignsPartsResource(StatefulResource): design_id=design_id, context=req.context) resp.status = falcon.HTTP_201 - resp.body = json.dumps( + resp.text = json.dumps( [x.obj_to_simple() for x in parsed_items]) else: - self.return_error( - resp, - falcon.HTTP_400, - message="Empty body not supported", - retry=False) + self.return_error(resp, + falcon.HTTP_400, + message="Empty body not supported", + retry=False) except ValueError: - self.return_error( - resp, - falcon.HTTP_500, - message="Error processing input", - retry=False) + self.return_error(resp, + falcon.HTTP_500, + message="Error processing input", + retry=False) except LookupError: - self.return_error( - resp, - falcon.HTTP_400, - message="Ingester %s not registered" % ingester_name, - retry=False) + self.return_error(resp, + falcon.HTTP_400, + message="Ingester %s not registered" % + ingester_name, + retry=False) @policy.ApiEnforcer('physical_provisioner:ingest_data') def on_get(self, req, resp, design_id): try: design = self.state_manager.get_design(design_id) except errors.DesignError: - self.return_error( - resp, - falcon.HTTP_404, - message="Design %s nout found" % design_id, - retry=False) + self.return_error(resp, + falcon.HTTP_404, + message="Design %s nout found" % design_id, + retry=False) part_catalog = [] @@ -225,12 +221,13 @@ class DesignsPartsResource(StatefulResource): 'key': n.get_id() } for n in design.baremetal_nodes]) - resp.body = json.dumps(part_catalog) + resp.text = json.dumps(part_catalog) resp.status = falcon.HTTP_200 return class DesignsPartsKindsResource(StatefulResource): + def __init__(self, **kwargs): super(DesignsPartsKindsResource, self).__init__(**kwargs) self.authorized_roles = ['user'] @@ -242,6 +239,7 @@ class DesignsPartsKindsResource(StatefulResource): class DesignsPartResource(StatefulResource): + def __init__(self, orchestrator=None, **kwargs): super(DesignsPartResource, self).__init__(**kwargs) self.authorized_roles = ['user'] @@ -273,19 +271,21 @@ class DesignsPartResource(StatefulResource): part = design.get_baremetal_node(name) else: self.error(req.context, "Kind %s unknown" % kind) - self.return_error( - resp, - falcon.HTTP_404, - message="Kind %s unknown" % kind, - retry=False) + self.return_error(resp, + falcon.HTTP_404, + message="Kind %s unknown" % kind, + retry=False) return - resp.body = json.dumps(part.obj_to_simple()) + resp.text = json.dumps(part.obj_to_simple()) except errors.DesignError as dex: self.error(req.context, str(dex)) - self.return_error( - resp, falcon.HTTP_404, message=str(dex), retry=False) + self.return_error(resp, + falcon.HTTP_404, + message=str(dex), + retry=False) except Exception as exc: self.error(req.context, str(exc)) - self.return_error( - resp.falcon.HTTP_500, message=str(exc), retry=False) + self.return_error(resp.falcon.HTTP_500, + message=str(exc), + retry=False) diff --git a/python/drydock_provisioner/control/health.py b/python/drydock_provisioner/control/health.py index 4c1894a5..d79fa2ab 100644 --- a/python/drydock_provisioner/control/health.py +++ b/python/drydock_provisioner/control/health.py @@ -40,10 +40,9 @@ class HealthResource(StatefulResource): """ Returns 204 on healthy, otherwise 503, without response body. """ - hc = HealthCheckCombined( - state_manager=self.state_manager, - orchestrator=self.orchestrator, - extended=False) + hc = HealthCheckCombined(state_manager=self.state_manager, + orchestrator=self.orchestrator, + extended=False) return hc.get(req, resp) @@ -65,10 +64,9 @@ class HealthExtendedResource(StatefulResource): """ Returns 200 on success, otherwise 503, with a response body. """ - hc = HealthCheckCombined( - state_manager=self.state_manager, - orchestrator=self.orchestrator, - extended=True) + hc = HealthCheckCombined(state_manager=self.state_manager, + orchestrator=self.orchestrator, + extended=True) return hc.get(req, resp) @@ -97,8 +95,8 @@ class HealthCheckCombined(object): if now is None: raise Exception('None received from database for now()') except Exception: - hcm = HealthCheckMessage( - msg='Unable to connect to database', error=True) + hcm = HealthCheckMessage(msg='Unable to connect to database', + error=True) health_check.add_detail_msg(msg=hcm) # Test MaaS connection @@ -111,12 +109,12 @@ class HealthCheckCombined(object): if maas_validation.task.get_status() == ActionResult.Failure: raise Exception('MaaS task failure') except Exception: - hcm = HealthCheckMessage( - msg='Unable to connect to MaaS', error=True) + hcm = HealthCheckMessage(msg='Unable to connect to MaaS', + error=True) health_check.add_detail_msg(msg=hcm) if self.extended: - resp.body = json.dumps(health_check.to_dict()) + resp.text = json.dumps(health_check.to_dict()) if health_check.is_healthy() and self.extended: resp.status = falcon.HTTP_200 diff --git a/python/drydock_provisioner/control/middleware.py b/python/drydock_provisioner/control/middleware.py index b5db9840..5b87af6e 100644 --- a/python/drydock_provisioner/control/middleware.py +++ b/python/drydock_provisioner/control/middleware.py @@ -22,6 +22,7 @@ from drydock_provisioner import policy class AuthMiddleware(object): + def __init__(self): self.logger = logging.getLogger('drydock') @@ -31,8 +32,8 @@ class AuthMiddleware(object): ctx.set_policy_engine(policy.policy_engine) - self.logger.debug( - "Request with headers: %s" % ','.join(req.headers.keys())) + self.logger.debug("Request with headers: %s" % + ','.join(req.headers.keys())) auth_status = req.get_header('X-SERVICE-IDENTITY-STATUS') service = True @@ -78,6 +79,7 @@ class AuthMiddleware(object): class ContextMiddleware(object): + def __init__(self): # Setup validation pattern for external marker UUIDv4_pattern = '^[0-9A-F]{8}-[0-9A-F]{4}-4[0-9A-F]{3}-[89AB][0-9A-F]{3}-[0-9A-F]{12}$' @@ -101,6 +103,7 @@ class ContextMiddleware(object): class LoggingMiddleware(object): + def __init__(self): self.logger = logging.getLogger(cfg.CONF.logging.control_logger_name) @@ -111,9 +114,9 @@ class LoggingMiddleware(object): 'external_ctx': req.context.external_marker, 'end_user': req.context.end_user, } - self.logger.info( - "Request: %s %s %s" % (req.method, req.uri, req.query_string), - extra=extra) + self.logger.info("Request: %s %s %s" % + (req.method, req.uri, req.query_string), + extra=extra) def process_response(self, req, resp, resource, req_succeeded): ctx = req.context @@ -124,6 +127,6 @@ class LoggingMiddleware(object): 'end_user': ctx.end_user, } resp.append_header('X-Drydock-Req', ctx.request_id) - self.logger.info( - "Response: %s %s - %s" % (req.method, req.uri, resp.status), - extra=extra) + self.logger.info("Response: %s %s - %s" % + (req.method, req.uri, resp.status), + extra=extra) diff --git a/python/drydock_provisioner/control/nodes.py b/python/drydock_provisioner/control/nodes.py index 72065835..66890b3d 100644 --- a/python/drydock_provisioner/control/nodes.py +++ b/python/drydock_provisioner/control/nodes.py @@ -24,6 +24,7 @@ from .base import BaseResource, StatefulResource class NodesResource(BaseResource): + def __init__(self): super().__init__() @@ -41,22 +42,23 @@ class NodesResource(BaseResource): for m in machine_list: m.get_power_params() node_view.append( - dict( - hostname=m.hostname, - memory=m.memory, - cpu_count=m.cpu_count, - status_name=m.status_name, - boot_mac=m.boot_mac, - power_state=m.power_state, - power_address=m.power_parameters.get('power_address'), - boot_ip=m.boot_ip)) + dict(hostname=m.hostname, + memory=m.memory, + cpu_count=m.cpu_count, + status_name=m.status_name, + boot_mac=m.boot_mac, + power_state=m.power_state, + power_address=m.power_parameters.get('power_address'), + boot_ip=m.boot_ip)) - resp.body = json.dumps(node_view) + resp.text = json.dumps(node_view) resp.status = falcon.HTTP_200 except Exception as ex: self.error(req.context, "Unknown error: %s" % str(ex), exc_info=ex) - self.return_error( - resp, falcon.HTTP_500, message="Unknown error", retry=False) + self.return_error(resp, + falcon.HTTP_500, + message="Unknown error", + retry=False) class NodeBuildDataResource(StatefulResource): @@ -68,27 +70,29 @@ class NodeBuildDataResource(StatefulResource): latest = req.params.get('latest', 'false').upper() latest = True if latest == 'TRUE' else False - node_bd = self.state_manager.get_build_data( - node_name=hostname, latest=latest) + node_bd = self.state_manager.get_build_data(node_name=hostname, + latest=latest) if not node_bd: - self.return_error( - resp, - falcon.HTTP_404, - message="No build data found", - retry=False) + self.return_error(resp, + falcon.HTTP_404, + message="No build data found", + retry=False) else: node_bd = [bd.to_dict() for bd in node_bd] resp.status = falcon.HTTP_200 - resp.body = json.dumps(node_bd) + resp.text = json.dumps(node_bd) resp.content_type = falcon.MEDIA_JSON except Exception as ex: self.error(req.context, "Unknown error: %s" % str(ex), exc_info=ex) - self.return_error( - resp, falcon.HTTP_500, message="Unknown error", retry=False) + self.return_error(resp, + falcon.HTTP_500, + message="Unknown error", + retry=False) class NodeFilterResource(StatefulResource): + def __init__(self, orchestrator=None, **kwargs): """Object initializer. @@ -117,9 +121,11 @@ class NodeFilterResource(StatefulResource): node_filter=node_filter, site_design=site_design) resp_list = [n.name for n in nodes if nodes] - resp.body = json.dumps(resp_list) + resp.text = json.dumps(resp_list) resp.status = falcon.HTTP_200 except Exception as ex: self.error(req.context, "Unknown error: %s" % str(ex), exc_info=ex) - self.return_error( - resp, falcon.HTTP_500, message="Unknown error", retry=False) + self.return_error(resp, + falcon.HTTP_500, + message="Unknown error", + retry=False) diff --git a/python/drydock_provisioner/control/tasks.py b/python/drydock_provisioner/control/tasks.py index 98def5b4..435f4a0c 100644 --- a/python/drydock_provisioner/control/tasks.py +++ b/python/drydock_provisioner/control/tasks.py @@ -42,14 +42,16 @@ class TasksResource(StatefulResource): try: task_model_list = self.state_manager.get_tasks() task_list = [x.to_dict() for x in task_model_list] - resp.body = json.dumps(task_list) + resp.text = json.dumps(task_list) resp.status = falcon.HTTP_200 except Exception as ex: self.error( req.context, "Unknown error: %s\n%s" % (str(ex), traceback.format_exc())) - self.return_error( - resp, falcon.HTTP_500, message="Unknown error", retry=False) + self.return_error(resp, + falcon.HTTP_500, + message="Unknown error", + retry=False) @policy.ApiEnforcer('physical_provisioner:create_task') def on_post(self, req, resp): @@ -72,19 +74,20 @@ class TasksResource(StatefulResource): action = json_data.get('action', None) if supported_actions.get(action, None) is None: self.error(req.context, "Unsupported action %s" % action) - self.return_error( - resp, - falcon.HTTP_400, - message="Unsupported action %s" % action, - retry=False) + self.return_error(resp, + falcon.HTTP_400, + message="Unsupported action %s" % action, + retry=False) else: supported_actions.get(action)(self, req, resp, json_data) except Exception as ex: self.error( req.context, "Unknown error: %s\n%s" % (str(ex), traceback.format_exc())) - self.return_error( - resp, falcon.HTTP_500, message="Unknown error", retry=False) + self.return_error(resp, + falcon.HTTP_500, + message="Unknown error", + retry=False) @policy.ApiEnforcer('physical_provisioner:delete_tasks') def on_delete(self, req, resp): @@ -100,10 +103,10 @@ class TasksResource(StatefulResource): if not retention_status: resp.status = falcon.HTTP_404 return - resp.body = "Tables purged successfully." + resp.text = "Tables purged successfully." except Exception as e: self.error(req.context, "Unknown error: %s" % (str(e))) - resp.body = "Unexpected error." + resp.text = "Unexpected error." resp.status = falcon.HTTP_500 return resp.status = falcon.HTTP_200 @@ -118,19 +121,23 @@ class TasksResource(StatefulResource): req.context, "Task body ended up in wrong handler: action %s in task_validate_design" % action) - self.return_error( - resp, falcon.HTTP_500, message="Error", retry=False) + self.return_error(resp, + falcon.HTTP_500, + message="Error", + retry=False) try: task = self.create_task(json_data, req.context) - resp.body = json.dumps(task.to_dict()) + resp.text = json.dumps(task.to_dict()) resp.append_header('Location', "/api/v1.0/tasks/%s" % str(task.task_id)) resp.status = falcon.HTTP_201 except errors.InvalidFormat as ex: self.error(req.context, ex.msg) - self.return_error( - resp, falcon.HTTP_400, message=ex.msg, retry=False) + self.return_error(resp, + falcon.HTTP_400, + message=ex.msg, + retry=False) @policy.ApiEnforcer('physical_provisioner:verify_site') def task_verify_site(self, req, resp, json_data): @@ -142,19 +149,23 @@ class TasksResource(StatefulResource): req.context, "Task body ended up in wrong handler: action %s in task_verify_site" % action) - self.return_error( - resp, falcon.HTTP_500, message="Error", retry=False) + self.return_error(resp, + falcon.HTTP_500, + message="Error", + retry=False) try: task = self.create_task(json_data, req.context) - resp.body = json.dumps(task.to_dict()) + resp.text = json.dumps(task.to_dict()) resp.append_header('Location', "/api/v1.0/tasks/%s" % str(task.task_id)) resp.status = falcon.HTTP_201 except errors.InvalidFormat as ex: self.error(req.context, ex.msg) - self.return_error( - resp, falcon.HTTP_400, message=ex.msg, retry=False) + self.return_error(resp, + falcon.HTTP_400, + message=ex.msg, + retry=False) @policy.ApiEnforcer('physical_provisioner:prepare_site') def task_prepare_site(self, req, resp, json_data): @@ -166,19 +177,23 @@ class TasksResource(StatefulResource): req.context, "Task body ended up in wrong handler: action %s in task_prepare_site" % action) - self.return_error( - resp, falcon.HTTP_500, message="Error", retry=False) + self.return_error(resp, + falcon.HTTP_500, + message="Error", + retry=False) try: task = self.create_task(json_data, req.context) - resp.body = json.dumps(task.to_dict()) + resp.text = json.dumps(task.to_dict()) resp.append_header('Location', "/api/v1.0/tasks/%s" % str(task.task_id)) resp.status = falcon.HTTP_201 except errors.InvalidFormat as ex: self.error(req.context, ex.msg) - self.return_error( - resp, falcon.HTTP_400, message=ex.msg, retry=False) + self.return_error(resp, + falcon.HTTP_400, + message=ex.msg, + retry=False) @policy.ApiEnforcer('physical_provisioner:verify_nodes') def task_verify_nodes(self, req, resp, json_data): @@ -190,19 +205,23 @@ class TasksResource(StatefulResource): req.context, "Task body ended up in wrong handler: action %s in task_verify_nodes" % action) - self.return_error( - resp, falcon.HTTP_500, message="Error", retry=False) + self.return_error(resp, + falcon.HTTP_500, + message="Error", + retry=False) try: task = self.create_task(json_data, req.context) - resp.body = json.dumps(task.to_dict()) + resp.text = json.dumps(task.to_dict()) resp.append_header('Location', "/api/v1.0/tasks/%s" % str(task.task_id)) resp.status = falcon.HTTP_201 except errors.InvalidFormat as ex: self.error(req.context, ex.msg) - self.return_error( - resp, falcon.HTTP_400, message=ex.msg, retry=False) + self.return_error(resp, + falcon.HTTP_400, + message=ex.msg, + retry=False) @policy.ApiEnforcer('physical_provisioner:prepare_nodes') def task_prepare_nodes(self, req, resp, json_data): @@ -214,19 +233,23 @@ class TasksResource(StatefulResource): req.context, "Task body ended up in wrong handler: action %s in task_prepare_nodes" % action) - self.return_error( - resp, falcon.HTTP_500, message="Error", retry=False) + self.return_error(resp, + falcon.HTTP_500, + message="Error", + retry=False) try: task = self.create_task(json_data, req.context) - resp.body = json.dumps(task.to_dict()) + resp.text = json.dumps(task.to_dict()) resp.append_header('Location', "/api/v1.0/tasks/%s" % str(task.task_id)) resp.status = falcon.HTTP_201 except errors.InvalidFormat as ex: self.error(req.context, ex.msg) - self.return_error( - resp, falcon.HTTP_400, message=ex.msg, retry=False) + self.return_error(resp, + falcon.HTTP_400, + message=ex.msg, + retry=False) @policy.ApiEnforcer('physical_provisioner:deploy_nodes') def task_deploy_nodes(self, req, resp, json_data): @@ -238,19 +261,23 @@ class TasksResource(StatefulResource): req.context, "Task body ended up in wrong handler: action %s in task_deploy_nodes" % action) - self.return_error( - resp, falcon.HTTP_500, message="Error", retry=False) + self.return_error(resp, + falcon.HTTP_500, + message="Error", + retry=False) try: task = self.create_task(json_data, req.context) - resp.body = json.dumps(task.to_dict()) + resp.text = json.dumps(task.to_dict()) resp.append_header('Location', "/api/v1.0/tasks/%s" % str(task.task_id)) resp.status = falcon.HTTP_201 except errors.InvalidFormat as ex: self.error(req.context, ex.msg) - self.return_error( - resp, falcon.HTTP_400, message=ex.msg, retry=False) + self.return_error(resp, + falcon.HTTP_400, + message=ex.msg, + retry=False) @policy.ApiEnforcer('physical_provisioner:destroy_nodes') def task_destroy_nodes(self, req, resp, json_data): @@ -262,19 +289,23 @@ class TasksResource(StatefulResource): req.context, "Task body ended up in wrong handler: action %s in task_destroy_nodes" % action) - self.return_error( - resp, falcon.HTTP_500, message="Error", retry=False) + self.return_error(resp, + falcon.HTTP_500, + message="Error", + retry=False) try: task = self.create_task(json_data, req.context) - resp.body = json.dumps(task.to_dict()) + resp.text = json.dumps(task.to_dict()) resp.append_header('Location', "/api/v1.0/tasks/%s" % str(task.task_id)) resp.status = falcon.HTTP_201 except errors.InvalidFormat as ex: self.error(req.context, ex.msg) - self.return_error( - resp, falcon.HTTP_400, message=ex.msg, retry=False) + self.return_error(resp, + falcon.HTTP_400, + message=ex.msg, + retry=False) @policy.ApiEnforcer('physical_provisioner:relabel_nodes') def task_relabel_nodes(self, req, resp, json_data): @@ -286,19 +317,23 @@ class TasksResource(StatefulResource): req.context, "Task body ended up in wrong handler: action %s in task_relabel_nodes" % action) - self.return_error( - resp, falcon.HTTP_500, message="Error", retry=False) + self.return_error(resp, + falcon.HTTP_500, + message="Error", + retry=False) try: task = self.create_task(json_data, req.context) - resp.body = json.dumps(task.to_dict()) + resp.text = json.dumps(task.to_dict()) resp.append_header('Location', "/api/v1.0/tasks/%s" % str(task.task_id)) resp.status = falcon.HTTP_201 except errors.InvalidFormat as ex: self.error(req.context, ex.msg) - self.return_error( - resp, falcon.HTTP_400, message=ex.msg, retry=False) + self.return_error(resp, + falcon.HTTP_400, + message=ex.msg, + retry=False) def create_task(self, task_body, req_context): """General task creation. @@ -320,11 +355,10 @@ class TasksResource(StatefulResource): raise errors.InvalidFormat( 'Task creation requires fields design_ref, action') - task = self.orchestrator.create_task( - design_ref=design_ref, - action=action, - node_filter=node_filter, - context=req_context) + task = self.orchestrator.create_task(design_ref=design_ref, + action=action, + node_filter=node_filter, + context=req_context) task.set_status(hd_fields.TaskStatus.Queued) task.save() @@ -357,11 +391,10 @@ class TaskResource(StatefulResource): if first_task is None: self.info(req.context, "Task %s does not exist" % task_id) - self.return_error( - resp, - falcon.HTTP_404, - message="Task %s does not exist" % task_id, - retry=False) + self.return_error(resp, + falcon.HTTP_404, + message="Task %s does not exist" % task_id, + retry=False) else: # If layers is passed in then it returns a dict of tasks instead of the task dict. if layers: @@ -380,12 +413,14 @@ class TaskResource(StatefulResource): 1, first_task) resp_data['subtask_errors'] = errors - resp.body = json.dumps(resp_data) + resp.text = json.dumps(resp_data) resp.status = falcon.HTTP_200 except Exception as ex: self.error(req.context, "Unknown error: %s" % (str(ex))) - self.return_error( - resp, falcon.HTTP_500, message="Unknown error", retry=False) + self.return_error(resp, + falcon.HTTP_500, + message="Unknown error", + retry=False) def get_task(self, req, resp, task_id, builddata): try: @@ -403,8 +438,10 @@ class TaskResource(StatefulResource): return task_dict except Exception as ex: self.error(req.context, "Unknown error: %s" % (str(ex))) - self.return_error( - resp, falcon.HTTP_500, message="Unknown error", retry=False) + self.return_error(resp, + falcon.HTTP_500, + message="Unknown error", + retry=False) def handle_layers(self, req, resp, task_id, builddata, subtask_errors, layers, first_task): @@ -450,10 +487,10 @@ class TaskBuilddataResource(StatefulResource): if not bd_list: resp.status = falcon.HTTP_404 return - resp.body = json.dumps([bd.to_dict() for bd in bd_list]) + resp.text = json.dumps([bd.to_dict() for bd in bd_list]) except Exception as e: self.error(req.context, "Unknown error: %s" % (str(e))) - resp.body = "Unexpected error." + resp.text = "Unexpected error." resp.status = falcon.HTTP_500 return resp.status = falcon.HTTP_200 diff --git a/python/drydock_provisioner/control/util.py b/python/drydock_provisioner/control/util.py index 371da1c8..6db8125d 100644 --- a/python/drydock_provisioner/control/util.py +++ b/python/drydock_provisioner/control/util.py @@ -25,8 +25,9 @@ def get_internal_api_href(ver): if ver in supported_versions: ks_sess = KeystoneUtils.get_session() - url = KeystoneClient.get_endpoint( - "physicalprovisioner", ks_sess=ks_sess, interface='internal') + url = KeystoneClient.get_endpoint("physicalprovisioner", + ks_sess=ks_sess, + interface='internal') return url else: raise ApiError("API version %s unknown." % ver) diff --git a/python/drydock_provisioner/control/validation.py b/python/drydock_provisioner/control/validation.py index 73dd2f76..8c499a0b 100644 --- a/python/drydock_provisioner/control/validation.py +++ b/python/drydock_provisioner/control/validation.py @@ -62,12 +62,12 @@ class ValidationResource(StatefulResource): resp_message = validation.to_dict() resp_message['code'] = 200 resp.status = falcon.HTTP_200 - resp.body = json.dumps(resp_message) + resp.text = json.dumps(resp_message) else: resp_message = validation.to_dict() resp_message['code'] = 400 resp.status = falcon.HTTP_400 - resp.body = json.dumps(resp_message) + resp.text = json.dumps(resp_message) except errors.InvalidFormat as e: err_message = str(e) diff --git a/python/drydock_provisioner/drivers/driver.py b/python/drydock_provisioner/drivers/driver.py index 5cc3dbbc..d7fe745e 100644 --- a/python/drydock_provisioner/drivers/driver.py +++ b/python/drydock_provisioner/drivers/driver.py @@ -64,6 +64,7 @@ class ProviderDriver(object): # Execute a single task in a separate thread class DriverActionRunner(Thread): + def __init__(self, action=None): super().__init__() diff --git a/python/drydock_provisioner/drivers/kubernetes/promenade_driver/actions/k8s_node.py b/python/drydock_provisioner/drivers/kubernetes/promenade_driver/actions/k8s_node.py index 79307272..cdd8a8ad 100644 --- a/python/drydock_provisioner/drivers/kubernetes/promenade_driver/actions/k8s_node.py +++ b/python/drydock_provisioner/drivers/kubernetes/promenade_driver/actions/k8s_node.py @@ -22,6 +22,7 @@ from drydock_provisioner.orchestrator.actions.orchestrator import BaseAction class PromenadeAction(BaseAction): + def __init__(self, *args, prom_client=None): super().__init__(*args) @@ -42,11 +43,10 @@ class RelabelNode(PromenadeAction): try: site_design = self._load_site_design() except errors.OrchestratorError: - self.task.add_status_msg( - msg="Error loading site design.", - error=True, - ctx='NA', - ctx_type='NA') + self.task.add_status_msg(msg="Error loading site design.", + error=True, + ctx='NA', + ctx_type='NA') self.task.set_status(hd_fields.TaskStatus.Complete) self.task.failure() self.task.save() @@ -58,14 +58,16 @@ class RelabelNode(PromenadeAction): for n in nodes: # Relabel node through Promenade try: - self.logger.info( - "Relabeling node %s with node label data." % n.name) + self.logger.info("Relabeling node %s with node label data." % + n.name) labels_dict = n.get_node_labels() msg = "Set labels %s for node %s" % (str(labels_dict), n.name) - self.task.add_status_msg( - msg=msg, error=False, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') # Call promenade to invoke relabel node self.promenade_client.relabel_node(n.get_id(), labels_dict) @@ -74,8 +76,10 @@ class RelabelNode(PromenadeAction): msg = "Error relabeling node %s with label data" % n.name self.logger.warning(msg + ": " + str(ex)) self.task.failure(focus=n.get_id()) - self.task.add_status_msg( - msg=msg, error=True, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=True, + ctx=n.name, + ctx_type='node') continue self.task.set_status(hd_fields.TaskStatus.Complete) diff --git a/python/drydock_provisioner/drivers/kubernetes/promenade_driver/driver.py b/python/drydock_provisioner/drivers/kubernetes/promenade_driver/driver.py index c22e838a..73da4b2f 100644 --- a/python/drydock_provisioner/drivers/kubernetes/promenade_driver/driver.py +++ b/python/drydock_provisioner/drivers/kubernetes/promenade_driver/driver.py @@ -61,8 +61,9 @@ class PromenadeDriver(KubernetesDriver): raise errors.DriverError("Invalid task %s" % (task_id)) if task.action not in self.supported_actions: - raise errors.DriverError("Driver %s doesn't support task action %s" - % (self.driver_desc, task.action)) + raise errors.DriverError( + "Driver %s doesn't support task action %s" % + (self.driver_desc, task.action)) task.set_status(hd_fields.TaskStatus.Running) task.save() @@ -71,11 +72,10 @@ class PromenadeDriver(KubernetesDriver): if task.retry > 0: msg = "Retrying task %s on previous failed entities." % str( task.get_id()) - task.add_status_msg( - msg=msg, - error=False, - ctx=str(task.get_id()), - ctx_type='task') + task.add_status_msg(msg=msg, + error=False, + ctx=str(task.get_id()), + ctx_type='task') target_nodes = self.orchestrator.get_target_nodes( task, failures=True) else: @@ -108,22 +108,20 @@ class PromenadeDriver(KubernetesDriver): for t, f in subtask_futures.items(): if not f.done(): - task.add_status_msg( - "Subtask timed out before completing.", - error=True, - ctx=str(uuid.UUID(bytes=t)), - ctx_type='task') + task.add_status_msg("Subtask timed out before completing.", + error=True, + ctx=str(uuid.UUID(bytes=t)), + ctx_type='task') task.failure() else: if f.exception(): msg = ("Subtask %s raised unexpected exception: %s" % (str(uuid.UUID(bytes=t)), str(f.exception()))) self.logger.error(msg, exc_info=f.exception()) - task.add_status_msg( - msg=msg, - error=True, - ctx=str(uuid.UUID(bytes=t)), - ctx_type='task') + task.add_status_msg(msg=msg, + error=True, + ctx=str(uuid.UUID(bytes=t)), + ctx_type='task') task.failure() task.bubble_results() @@ -138,14 +136,14 @@ class PromenadeDriver(KubernetesDriver): prom_client=prom_client) action.start() except Exception as e: - msg = ("Subtask for action %s raised unexpected exception: %s" - % (task.action, str(e))) + msg = ( + "Subtask for action %s raised unexpected exception: %s" % + (task.action, str(e))) self.logger.error(msg, exc_info=e) - task.add_status_msg( - msg=msg, - error=True, - ctx=str(task.get_id()), - ctx_type='task') + task.add_status_msg(msg=msg, + error=True, + ctx=str(task.get_id()), + ctx_type='task') task.failure() task.set_status(hd_fields.TaskStatus.Complete) diff --git a/python/drydock_provisioner/drivers/kubernetes/promenade_driver/promenade_client.py b/python/drydock_provisioner/drivers/kubernetes/promenade_driver/promenade_client.py index c4b6ef91..78110ad3 100644 --- a/python/drydock_provisioner/drivers/kubernetes/promenade_driver/promenade_client.py +++ b/python/drydock_provisioner/drivers/kubernetes/promenade_driver/promenade_client.py @@ -79,8 +79,9 @@ class PromenadeSession(object): url = self.base_url + route self.logger.debug('GET ' + url) self.logger.debug('Query Params: ' + str(query)) - resp = self.__session.get( - url, params=query, timeout=self._timeout(timeout)) + resp = self.__session.get(url, + params=query, + timeout=self._timeout(timeout)) if resp.status_code == 401 and not auth_refresh: self.set_auth() @@ -109,21 +110,19 @@ class PromenadeSession(object): self.logger.debug('PUT ' + url) self.logger.debug('Query Params: ' + str(query)) if body is not None: - self.logger.debug( - "Sending PUT with explicit body: \n%s" % body) - resp = self.__session.put( - self.base_url + endpoint, - params=query, - data=body, - timeout=self._timeout(timeout)) + self.logger.debug("Sending PUT with explicit body: \n%s" % + body) + resp = self.__session.put(self.base_url + endpoint, + params=query, + data=body, + timeout=self._timeout(timeout)) else: - self.logger.debug( - "Sending PUT with JSON body: \n%s" % str(data)) - resp = self.__session.put( - self.base_url + endpoint, - params=query, - json=data, - timeout=self._timeout(timeout)) + self.logger.debug("Sending PUT with JSON body: \n%s" % + str(data)) + resp = self.__session.put(self.base_url + endpoint, + params=query, + json=data, + timeout=self._timeout(timeout)) if resp.status_code == 401 and not auth_refresh: self.set_auth() auth_refresh = True @@ -151,21 +150,19 @@ class PromenadeSession(object): self.logger.debug('POST ' + url) self.logger.debug('Query Params: ' + str(query)) if body is not None: - self.logger.debug( - "Sending POST with explicit body: \n%s" % body) - resp = self.__session.post( - self.base_url + endpoint, - params=query, - data=body, - timeout=self._timeout(timeout)) + self.logger.debug("Sending POST with explicit body: \n%s" % + body) + resp = self.__session.post(self.base_url + endpoint, + params=query, + data=body, + timeout=self._timeout(timeout)) else: - self.logger.debug( - "Sending POST with JSON body: \n%s" % str(data)) - resp = self.__session.post( - self.base_url + endpoint, - params=query, - json=data, - timeout=self._timeout(timeout)) + self.logger.debug("Sending POST with JSON body: \n%s" % + str(data)) + resp = self.__session.post(self.base_url + endpoint, + params=query, + json=data, + timeout=self._timeout(timeout)) if resp.status_code == 401 and not auth_refresh: self.set_auth() auth_refresh = True @@ -284,9 +281,9 @@ class PromenadeClient(object): raise errors.ClientUnauthorizedError( "Unauthorized access to %s, include valid token." % resp.url) elif resp.status_code == 403: - raise errors.ClientForbiddenError( - "Forbidden access to %s" % resp.url) + raise errors.ClientForbiddenError("Forbidden access to %s" % + resp.url) elif not resp.ok: - raise errors.ClientError( - "Error - received %d: %s" % (resp.status_code, resp.text), - code=resp.status_code) + raise errors.ClientError("Error - received %d: %s" % + (resp.status_code, resp.text), + code=resp.status_code) diff --git a/python/drydock_provisioner/drivers/node/maasdriver/actions/node.py b/python/drydock_provisioner/drivers/node/maasdriver/actions/node.py index 0ff5a641..0d1830c4 100644 --- a/python/drydock_provisioner/drivers/node/maasdriver/actions/node.py +++ b/python/drydock_provisioner/drivers/node/maasdriver/actions/node.py @@ -47,6 +47,7 @@ import drydock_provisioner.drivers.node.maasdriver.models.domain as maas_domain class BaseMaasAction(BaseAction): + def __init__(self, *args, maas_client=None): super().__init__(*args) @@ -59,13 +60,12 @@ class BaseMaasAction(BaseAction): result_details = machine.get_task_results(result_type=result_type) for r in result_details: if r.get_decoded_data(): - bd = objects.BuildData( - node_name=node.name, - task_id=self.task.task_id, - collected_date=r.updated, - generator="{}:{}".format(stage, r.name), - data_format='text/plain', - data_element=r.get_decoded_data()) + bd = objects.BuildData(node_name=node.name, + task_id=self.task.task_id, + collected_date=r.updated, + generator="{}:{}".format(stage, r.name), + data_format='text/plain', + data_element=r.get_decoded_data()) self.state_manager.post_build_data(bd) log_href = "%s/tasks/%s/builddata" % (get_internal_api_href("v1.0"), str(self.task.task_id)) @@ -82,11 +82,10 @@ class ValidateNodeServices(BaseMaasAction): try: if self.maas_client.test_connectivity(): self.logger.info("Able to connect to MaaS.") - self.task.add_status_msg( - msg='Able to connect to MaaS.', - error=False, - ctx='NA', - ctx_type='NA') + self.task.add_status_msg(msg='Able to connect to MaaS.', + error=False, + ctx='NA', + ctx_type='NA') self.task.success() if self.maas_client.test_authentication(): self.logger.info("Able to authenticate with MaaS API.") @@ -147,32 +146,37 @@ class ValidateNodeServices(BaseMaasAction): else: msg = "Rack controller %s not healthy." % r.hostname self.logger.info(msg) - self.task.add_status_msg( - msg=msg, - error=True, - ctx=r.hostname, - ctx_type='rack_ctlr') + self.task.add_status_msg(msg=msg, + error=True, + ctx=r.hostname, + ctx_type='rack_ctlr') if not healthy_rackd: msg = "No healthy rack controllers found." self.logger.info(msg) - self.task.add_status_msg( - msg=msg, - error=True, - ctx='maas', - ctx_type='cluster') + self.task.add_status_msg(msg=msg, + error=True, + ctx='maas', + ctx_type='cluster') self.task.failure() except errors.TransientDriverError as ex: - self.task.add_status_msg( - msg=str(ex), error=True, ctx='NA', ctx_type='NA', retry=True) + self.task.add_status_msg(msg=str(ex), + error=True, + ctx='NA', + ctx_type='NA', + retry=True) self.task.failure() except errors.PersistentDriverError as ex: - self.task.add_status_msg( - msg=str(ex), error=True, ctx='NA', ctx_type='NA') + self.task.add_status_msg(msg=str(ex), + error=True, + ctx='NA', + ctx_type='NA') self.task.failure() except Exception as ex: - self.task.add_status_msg( - msg=str(ex), error=True, ctx='NA', ctx_type='NA') + self.task.add_status_msg(msg=str(ex), + error=True, + ctx='NA', + ctx_type='NA') self.task.failure() self.task.set_status(hd_fields.TaskStatus.Complete) @@ -268,11 +272,10 @@ class DestroyNode(BaseMaasAction): try: site_design = self._load_site_design() except errors.OrchestratorError: - self.task.add_status_msg( - msg="Error loading site design.", - error=True, - ctx='NA', - ctx_type='NA') + self.task.add_status_msg(msg="Error loading site design.", + error=True, + ctx='NA', + ctx_type='NA') self.task.set_status(hd_fields.TaskStatus.Complete) self.task.failure() self.task.save() @@ -287,15 +290,19 @@ class DestroyNode(BaseMaasAction): if machine is None: msg = "Could not locate machine for node {}".format(n.name) self.logger.info(msg) - self.task.add_status_msg( - msg=msg, error=False, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') self.task.success(focus=n.get_id()) continue elif type(machine) == maas_rack.RackController: msg = "Cannot delete rack controller {}.".format(n.name) self.logger.info(msg) - self.task.add_status_msg( - msg=msg, error=False, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') self.task.failure(focus=n.get_id()) continue @@ -311,8 +318,10 @@ class DestroyNode(BaseMaasAction): msg = "Error Releasing node {}, skipping".format( n.name) self.logger.warning(msg) - self.task.add_status_msg( - msg=msg, error=True, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=True, + ctx=n.name, + ctx_type='node') self.task.failure(focus=n.get_id()) continue @@ -323,9 +332,8 @@ class DestroyNode(BaseMaasAction): * 60) // config.config_mgr.conf.maasdriver.poll_interval while (attempts < max_attempts - and (not machine.status_name.startswith('Ready') - and not - machine.status_name.startswith('Failed'))): + and (not machine.status_name.startswith('Ready') + and not machine.status_name.startswith('Failed'))): attempts = attempts + 1 time.sleep( config.config_mgr.conf.maasdriver.poll_interval) @@ -343,14 +351,18 @@ class DestroyNode(BaseMaasAction): msg = "Node {} released and disk erased.".format( n.name) self.logger.info(msg) - self.task.add_status_msg( - msg=msg, error=False, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') self.task.success(focus=n.get_id()) else: msg = "Node {} release timed out".format(n.name) self.logger.warning(msg) - self.task.add_status_msg( - msg=msg, error=True, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=True, + ctx=n.name, + ctx_type='node') self.task.failure(focus=n.get_id()) else: # Node is in a state that cannot be released from MaaS API. @@ -359,8 +371,10 @@ class DestroyNode(BaseMaasAction): n.name, machine.status_name) self.logger.info(msg) machine.reset_storage_config() - self.task.add_status_msg( - msg=msg, error=False, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') # for both cases above delete the node to force re-commissioning # But, before deleting the node reset it power type in maas if @@ -380,16 +394,20 @@ class DestroyNode(BaseMaasAction): msg = "Deleted Node: {} in status: {}.".format( n.name, machine.status_name) self.logger.info(msg) - self.task.add_status_msg( - msg=msg, error=False, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') self.task.success(focus=n.get_id()) except errors.DriverError as dex: msg = "Driver error, while destroying node {}, skipping".format( n.name) self.logger.warning(msg, exc_info=dex) - self.task.add_status_msg( - msg=msg, error=True, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=True, + ctx=n.name, + ctx_type='node') self.task.failure(focus=n.get_id()) continue @@ -409,11 +427,10 @@ class CreateNetworkTemplate(BaseMaasAction): try: site_design = self._load_site_design() except errors.OrchestratorError: - self.task.add_status_msg( - msg="Error loading site design.", - error=True, - ctx='NA', - ctx_type='NA') + self.task.add_status_msg(msg="Error loading site design.", + error=True, + ctx='NA', + ctx_type='NA') self.task.set_status(hd_fields.TaskStatus.Complete) self.task.failure() self.task.save() @@ -422,8 +439,10 @@ class CreateNetworkTemplate(BaseMaasAction): if not site_design.network_links: msg = ("Site design has no network links, no work to do.") self.logger.debug(msg) - self.task.add_status_msg( - msg=msg, error=False, ctx='NA', ctx_type='NA') + self.task.add_status_msg(msg=msg, + error=False, + ctx='NA', + ctx_type='NA') self.task.success() self.task.set_status(hd_fields.TaskStatus.Complete) self.task.save() @@ -466,11 +485,10 @@ class CreateNetworkTemplate(BaseMaasAction): msg = "Network %s allowed on link %s, but not defined." % ( net_name, design_link.name) self.logger.warning(msg) - self.task.add_status_msg( - msg=msg, - error=True, - ctx=design_link.name, - ctx_type='network_link') + self.task.add_status_msg(msg=msg, + error=True, + ctx=design_link.name, + ctx_type='network_link') continue maas_net = subnets.singleton({'cidr': n.cidr}) @@ -481,8 +499,10 @@ class CreateNetworkTemplate(BaseMaasAction): if len(fabrics_found) > 1: msg = "MaaS self-discovered network incompatible with NetworkLink %s" % design_link.name self.logger.warning(msg) - self.task.add_status_msg( - msg=msg, error=True, ctx=design_link.name, ctx_type='network_link') + self.task.add_status_msg(msg=msg, + error=True, + ctx=design_link.name, + ctx_type='network_link') continue elif len(fabrics_found) == 1: link_fabric_id = fabrics_found.pop() @@ -493,28 +513,31 @@ class CreateNetworkTemplate(BaseMaasAction): link_fabric = fabrics.singleton({'name': design_link.name}) if link_fabric is None: - link_fabric = maas_fabric.Fabric( - self.maas_client, name=design_link.name) + link_fabric = maas_fabric.Fabric(self.maas_client, + name=design_link.name) link_fabric = fabrics.add(link_fabric) # Ensure that the MTU of the untagged VLAN on the fabric # matches that on the NetworkLink config - vlan_list = maas_vlan.Vlans( - self.maas_client, fabric_id=link_fabric.resource_id) + vlan_list = maas_vlan.Vlans(self.maas_client, + fabric_id=link_fabric.resource_id) vlan_list.refresh() - msg = "Updating native VLAN MTU = %d on network link %s" % (design_link.mtu, - design_link.name) + msg = "Updating native VLAN MTU = %d on network link %s" % ( + design_link.mtu, design_link.name) self.logger.debug(msg) - self.task.add_status_msg( - msg=msg, error=False, ctx=design_link.name, ctx_type='network_link') + self.task.add_status_msg(msg=msg, + error=False, + ctx=design_link.name, + ctx_type='network_link') vlan = vlan_list.singleton({'vid': 0}) if vlan: vlan.mtu = design_link.mtu vlan.update() else: - self.logger.warning("Unable to find native VLAN on fabric %s." - % link_fabric.resource_id) + self.logger.warning( + "Unable to find native VLAN on fabric %s." % + link_fabric.resource_id) # Now that we have the fabrics sorted out, check # that VLAN tags and subnet attributes are correct @@ -532,10 +555,9 @@ class CreateNetworkTemplate(BaseMaasAction): self.logger.info( 'Network domain not found, adding: %s', n.dns_domain) - domain = maas_domain.Domain( - self.maas_client, - name=n.dns_domain, - authoritative=False) + domain = maas_domain.Domain(self.maas_client, + name=n.dns_domain, + authoritative=False) domain = domains.add(domain) domains.refresh() @@ -545,12 +567,15 @@ class CreateNetworkTemplate(BaseMaasAction): msg = "Subnet for network %s not found, creating..." % ( n.name) self.logger.info(msg) - self.task.add_status_msg( - msg=msg, error=False, ctx='NA', ctx_type='NA') + self.task.add_status_msg(msg=msg, + error=False, + ctx='NA', + ctx_type='NA') fabric_list = maas_fabric.Fabrics(self.maas_client) fabric_list.refresh() - fabric = fabric_list.singleton({'name': design_link.name}) + fabric = fabric_list.singleton( + {'name': design_link.name}) if fabric is not None: vlan_list = maas_vlan.Vlans( @@ -572,11 +597,10 @@ class CreateNetworkTemplate(BaseMaasAction): msg = "VLAN %s found for network %s, updated attributes" % ( vlan.resource_id, n.name) self.logger.debug(msg) - self.task.add_status_msg( - msg=msg, - error=False, - ctx=n.name, - ctx_type='network') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='network') else: # Create a new VLAN in this fabric and assign subnet to it vlan = maas_vlan.Vlan( @@ -590,11 +614,10 @@ class CreateNetworkTemplate(BaseMaasAction): msg = "VLAN %s created for network %s" % ( vlan.resource_id, n.name) self.logger.debug(msg) - self.task.add_status_msg( - msg=msg, - error=False, - ctx=n.name, - ctx_type='network') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='network') # If subnet did not exist, create it here and attach it to the fabric/VLAN subnet = maas_subnet.Subnet( @@ -612,35 +635,32 @@ class CreateNetworkTemplate(BaseMaasAction): msg = "Created subnet %s for CIDR %s on VLAN %s" % ( subnet.resource_id, subnet.cidr, subnet.vlan) self.logger.debug(msg) - self.task.add_status_msg( - msg=msg, - error=False, - ctx=n.name, - ctx_type='network') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='network') else: msg = "Fabric %s should be created, but cannot locate it." % ( design_link.name) self.logger.error(msg) - self.task.add_status_msg( - msg=msg, - error=True, - ctx=n.name, - ctx_type='network_link') + self.task.add_status_msg(msg=msg, + error=True, + ctx=n.name, + ctx_type='network_link') else: subnet.name = n.name subnet.dns_servers = n.dns_servers msg = "Subnet %s found for network %s, updated attributes" % ( subnet.resource_id, n.name) - self.task.add_status_msg( - msg=msg, - error=False, - ctx=n.name, - ctx_type='network') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='network') self.logger.info(msg) - vlan_list = maas_vlan.Vlans( - self.maas_client, fabric_id=subnet.fabric) + vlan_list = maas_vlan.Vlans(self.maas_client, + fabric_id=subnet.fabric) vlan_list.refresh() vlan = vlan_list.select(subnet.vlan) @@ -656,20 +676,18 @@ class CreateNetworkTemplate(BaseMaasAction): msg = "VLAN %s found for network %s, updated attributes" % ( vlan.resource_id, n.name) self.logger.debug(msg) - self.task.add_status_msg( - msg=msg, - error=False, - ctx=n.name, - ctx_type='network') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='network') else: msg = "MaaS subnet %s does not have a matching VLAN" % ( subnet.resource_id) self.logger.error(msg) - self.task.add_status_msg( - msg=msg, - error=True, - ctx=n.name, - ctx_type='network') + self.task.add_status_msg(msg=msg, + error=True, + ctx=n.name, + ctx_type='network') self.task.failure(focus=n.name) # Check if the routes have a default route @@ -687,14 +705,13 @@ class CreateNetworkTemplate(BaseMaasAction): msg = "Error adding range to network %s: %s" % ( n.name, str(r)) self.logger.error(msg, exc_info=e) - self.task.add_status_msg( - msg=msg, - error=True, - ctx=n.name, - ctx_type='network') + self.task.add_status_msg(msg=msg, + error=True, + ctx=n.name, + ctx_type='network') - vlan_list = maas_vlan.Vlans( - self.maas_client, fabric_id=subnet.fabric) + vlan_list = maas_vlan.Vlans(self.maas_client, + fabric_id=subnet.fabric) vlan_list.refresh() vlan = vlan_list.select(subnet.vlan) @@ -703,11 +720,10 @@ class CreateNetworkTemplate(BaseMaasAction): msg = "DHCP enabled for subnet %s, activating in MaaS" % ( subnet.name) self.logger.info(msg) - self.task.add_status_msg( - msg=msg, - error=False, - ctx=n.name, - ctx_type='network') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='network') rack_ctlrs = maas_rack.RackControllers( self.maas_client) @@ -725,8 +741,9 @@ class CreateNetworkTemplate(BaseMaasAction): if r.interface_for_ip( n.dhcp_relay_upstream_target): if not r.is_healthy(): - msg = ("Rack controller %s with DHCP relay is not healthy." % - r.hostname) + msg = ( + "Rack controller %s with DHCP relay is not healthy." + % r.hostname) self.logger.info(msg) self.task.add_status_msg( msg=msg, @@ -757,8 +774,9 @@ class CreateNetworkTemplate(BaseMaasAction): rackctl_id = r.resource_id if not r.is_healthy(): - msg = ("Rack controller %s not healthy, skipping DHCP config." % - r.resource_id) + msg = ( + "Rack controller %s not healthy, skipping DHCP config." + % r.resource_id) self.logger.info(msg) self.task.add_status_msg( msg=msg, @@ -797,11 +815,10 @@ class CreateNetworkTemplate(BaseMaasAction): msg = "Network %s requires DHCP, but could not locate a rack controller to serve it." % ( n.name) self.logger.error(msg) - self.task.add_status_msg( - msg=msg, - error=True, - ctx=n.name, - ctx_type='network') + self.task.add_status_msg(msg=msg, + error=True, + ctx=n.name, + ctx_type='network') self.task.failure(focus=n.name) except ValueError: @@ -823,14 +840,15 @@ class CreateNetworkTemplate(BaseMaasAction): continue dest_subnet = subnet_list.singleton({'cidr': route_net}) if dest_subnet is not None: - src_subnet.add_static_route( - dest_subnet.resource_id, - r.get('gateway'), - metric=r.get('metric', 100)) + src_subnet.add_static_route(dest_subnet.resource_id, + r.get('gateway'), + metric=r.get('metric', 100)) else: msg = "Could not locate destination network for static route to %s." % route_net - self.task.add_status_msg( - msg=msg, error=True, ctx=n.name, ctx_type='network') + self.task.add_status_msg(msg=msg, + error=True, + ctx=n.name, + ctx_type='network') self.task.failure(focus=n.name) self.logger.info(msg) continue @@ -852,8 +870,10 @@ class CreateNetworkTemplate(BaseMaasAction): else: msg = "Network %s defined, but not found in MaaS after network config task." % n.name self.logger.error(msg) - self.task.add_status_msg( - msg=msg, error=True, ctx=n.name, ctx_type='network') + self.task.add_status_msg(msg=msg, + error=True, + ctx=n.name, + ctx_type='network') self.task.failure(focus=n.name) self.task.set_status(hd_fields.TaskStatus.Complete) @@ -874,11 +894,10 @@ class ConfigureNodeProvisioner(BaseMaasAction): try: site_design = self._load_site_design() except errors.OrchestratorError: - self.task.add_status_msg( - msg="Error loading site design.", - error=True, - ctx='NA', - ctx_type='NA') + self.task.add_status_msg(msg="Error loading site design.", + error=True, + ctx='NA', + ctx_type='NA') self.task.set_status(hd_fields.TaskStatus.Complete) self.task.failure() self.task.save() @@ -891,11 +910,10 @@ class ConfigureNodeProvisioner(BaseMaasAction): self.logger.debug("Error accessing the MaaS API.", exc_info=ex) self.task.set_status(hd_fields.TaskStatus.Complete) self.task.failure() - self.task.add_status_msg( - msg='Error accessing MaaS SshKeys API', - error=True, - ctx='NA', - ctx_type='NA') + self.task.add_status_msg(msg='Error accessing MaaS SshKeys API', + error=True, + ctx='NA', + ctx_type='NA') self.task.save() return @@ -906,9 +924,8 @@ class ConfigureNodeProvisioner(BaseMaasAction): if repo_list: for r in repo_list: try: - existing_repo = current_repos.singleton({ - 'name': r.get_id() - }) + existing_repo = current_repos.singleton( + {'name': r.get_id()}) new_repo = self.create_maas_repo(self.maas_client, r) if existing_repo: new_repo.resource_id = existing_repo.resource_id @@ -916,22 +933,28 @@ class ConfigureNodeProvisioner(BaseMaasAction): msg = "Updating repository definition for %s." % ( r.name) self.logger.debug(msg) - self.task.add_status_msg( - msg=msg, error=False, ctx='NA', ctx_type='NA') + self.task.add_status_msg(msg=msg, + error=False, + ctx='NA', + ctx_type='NA') self.task.success() else: new_repo = current_repos.add(new_repo) msg = "Adding repository definition for %s." % (r.name) self.logger.debug(msg) - self.task.add_status_msg( - msg=msg, error=False, ctx='NA', ctx_type='NA') + self.task.add_status_msg(msg=msg, + error=False, + ctx='NA', + ctx_type='NA') self.task.success() except Exception as ex: msg = "Error adding repository to MaaS configuration: %s" % str( ex) self.logger.warning(msg) - self.task.add_status_msg( - msg=msg, error=True, ctx='NA', ctx_type='NA') + self.task.add_status_msg(msg=msg, + error=True, + ctx='NA', + ctx_type='NA') self.task.failure() if repo_list.remove_unlisted: defined_repos = [x.get_id() for x in repo_list] @@ -946,8 +969,10 @@ class ConfigureNodeProvisioner(BaseMaasAction): msg = ("No repositories to add, no work to do.") self.logger.debug(msg) self.task.success() - self.task.add_status_msg( - msg=msg, error=False, ctx='NA', ctx_type='NA') + self.task.add_status_msg(msg=msg, + error=False, + ctx='NA', + ctx_type='NA') self.task.set_status(hd_fields.TaskStatus.Complete) self.task.save() @@ -997,11 +1022,10 @@ class ConfigureUserCredentials(BaseMaasAction): try: site_design = self._load_site_design() except errors.OrchestratorError: - self.task.add_status_msg( - msg="Error loading site design.", - error=True, - ctx='NA', - ctx_type='NA') + self.task.add_status_msg(msg="Error loading site design.", + error=True, + ctx='NA', + ctx_type='NA') self.task.set_status(hd_fields.TaskStatus.Complete) self.task.failure() self.task.save() @@ -1014,11 +1038,10 @@ class ConfigureUserCredentials(BaseMaasAction): self.logger.debug("Error accessing the MaaS API.", exc_info=ex) self.task.set_status(hd_fields.TaskStatus.Complete) self.task.failure() - self.task.add_status_msg( - msg='Error accessing MaaS SshKeys API', - error=True, - ctx='NA', - ctx_type='NA') + self.task.add_status_msg(msg='Error accessing MaaS SshKeys API', + error=True, + ctx='NA', + ctx_type='NA') self.task.save() return @@ -1029,37 +1052,44 @@ class ConfigureUserCredentials(BaseMaasAction): if key_list: for k in key_list: try: - if len(current_keys.query({ - 'key': k.replace("\n", "") - })) == 0: + if len(current_keys.query({'key': k.replace("\n", + "")})) == 0: new_key = maas_keys.SshKey(self.maas_client, key=k) new_key = current_keys.add(new_key) msg = "Added SSH key %s to MaaS user profile. Will be installed on all deployed nodes." % ( k[:16]) self.logger.debug(msg) - self.task.add_status_msg( - msg=msg, error=False, ctx='NA', ctx_type='NA') + self.task.add_status_msg(msg=msg, + error=False, + ctx='NA', + ctx_type='NA') self.task.success() else: msg = "SSH key %s already exists in MaaS user profile." % k[: 16] self.logger.debug(msg) - self.task.add_status_msg( - msg=msg, error=False, ctx='NA', ctx_type='NA') + self.task.add_status_msg(msg=msg, + error=False, + ctx='NA', + ctx_type='NA') self.task.success() except Exception as ex: msg = "Error adding SSH key to MaaS user profile: %s" % str( ex) self.logger.warning(msg) - self.task.add_status_msg( - msg=msg, error=True, ctx='NA', ctx_type='NA') + self.task.add_status_msg(msg=msg, + error=True, + ctx='NA', + ctx_type='NA') self.task.failure() else: msg = ("No keys to add, no work to do.") self.logger.debug(msg) self.task.success() - self.task.add_status_msg( - msg=msg, error=False, ctx='NA', ctx_type='NA') + self.task.add_status_msg(msg=msg, + error=False, + ctx='NA', + ctx_type='NA') self.task.set_status(hd_fields.TaskStatus.Complete) self.task.save() @@ -1076,11 +1106,10 @@ class IdentifyNode(BaseMaasAction): try: site_design = self._load_site_design() except errors.OrchestratorError: - self.task.add_status_msg( - msg="Error loading site design.", - error=True, - ctx='NA', - ctx_type='NA') + self.task.add_status_msg(msg="Error loading site design.", + error=True, + ctx='NA', + ctx_type='NA') self.task.set_status(hd_fields.TaskStatus.Complete) self.task.failure() self.task.save() @@ -1091,49 +1120,51 @@ class IdentifyNode(BaseMaasAction): for n in nodes: try: - machine = find_node_in_maas(self.maas_client, n, probably_exists=False) + machine = find_node_in_maas(self.maas_client, + n, + probably_exists=False) if machine is None: self.task.failure(focus=n.get_id()) - self.task.add_status_msg( - msg="Node %s not found in MaaS" % n.name, - error=True, - ctx=n.name, - ctx_type='node') + self.task.add_status_msg(msg="Node %s not found in MaaS" % + n.name, + error=True, + ctx=n.name, + ctx_type='node') elif type(machine) == maas_machine.Machine: machine.update_identity( n, domain=n.get_domain(site_design), - use_node_oob_params=config.config_mgr.conf.maasdriver.use_node_oob_params, + use_node_oob_params=config.config_mgr.conf.maasdriver. + use_node_oob_params, ) msg = "Node %s identified in MaaS" % n.name self.logger.debug(msg) - self.task.add_status_msg( - msg=msg, - error=False, - ctx=n.name, - ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') self.task.success(focus=n.get_id()) elif type(machine) == maas_rack.RackController: msg = "Rack controller %s identified in MaaS" % n.name self.logger.debug(msg) - self.task.add_status_msg( - msg=msg, - error=False, - ctx=n.name, - ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') self.task.success(focus=n.get_id()) except ApiNotAvailable as api_ex: - self.logger.debug("Error accessing the MaaS API.", exc_info=api_ex) + self.logger.debug("Error accessing the MaaS API.", + exc_info=api_ex) self.task.failure() - self.task.add_status_msg( - msg='Error accessing MaaS API: %s' % str(api_ex), - error=True, - ctx='NA', - ctx_type='NA') + self.task.add_status_msg(msg='Error accessing MaaS API: %s' % + str(api_ex), + error=True, + ctx='NA', + ctx_type='NA') self.task.save() except Exception as ex: - self.logger.debug( - "Exception caught in identify node.", exc_info=ex) + self.logger.debug("Exception caught in identify node.", + exc_info=ex) self.task.failure(focus=n.get_id()) self.task.add_status_msg( msg="Error trying to location %s in MAAS" % n.name, @@ -1145,6 +1176,7 @@ class IdentifyNode(BaseMaasAction): self.task.save() return + class ConfigureHardware(BaseMaasAction): """Action to start commissioning a server.""" @@ -1169,11 +1201,10 @@ class ConfigureHardware(BaseMaasAction): try: site_design = self._load_site_design() except errors.OrchestratorError: - self.task.add_status_msg( - msg="Error loading site design.", - error=True, - ctx='NA', - ctx_type='NA') + self.task.add_status_msg(msg="Error loading site design.", + error=True, + ctx='NA', + ctx_type='NA') self.task.set_status(hd_fields.TaskStatus.Complete) self.task.failure() self.task.save() @@ -1185,15 +1216,17 @@ class ConfigureHardware(BaseMaasAction): # TODO(sh8121att): Better way of representing the node statuses than static strings for n in nodes: try: - self.logger.debug( - "Locating node %s for commissioning" % (n.name)) + self.logger.debug("Locating node %s for commissioning" % + (n.name)) machine = find_node_in_maas(self.maas_client, n) if type(machine) == maas_rack.RackController: msg = "Located node %s in MaaS as rack controller. Skipping." % ( n.name) self.logger.info(msg) - self.task.add_status_msg( - msg=msg, error=False, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') self.task.success(focus=n.get_id()) elif machine is not None: if machine.status_name in [ @@ -1203,9 +1236,8 @@ class ConfigureHardware(BaseMaasAction): self.logger.debug( "Located node %s in MaaS, starting commissioning" % (n.name)) - machine.commission( - skip_bmc_config=config.config_mgr.conf.maasdriver.skip_bmc_config - ) + machine.commission(skip_bmc_config=config.config_mgr. + conf.maasdriver.skip_bmc_config) # Poll machine status attempts = 0 @@ -1216,8 +1248,7 @@ class ConfigureHardware(BaseMaasAction): while (attempts < max_attempts and (machine.status_name != 'Ready' - and not - machine.status_name.startswith('Failed'))): + and not machine.status_name.startswith('Failed'))): attempts = attempts + 1 time.sleep(config.config_mgr.conf.maasdriver. poll_interval) @@ -1235,35 +1266,36 @@ class ConfigureHardware(BaseMaasAction): if machine.status_name == 'Ready': msg = "Node %s commissioned." % (n.name) self.logger.info(msg) - self.task.add_status_msg( - msg=msg, - error=False, - ctx=n.name, - ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') self.task.success(focus=n.get_id()) self.collect_build_data(machine) else: msg = "Node %s failed commissioning." % (n.name) self.logger.info(msg) - self.task.add_status_msg( - msg=msg, - error=True, - ctx=n.name, - ctx_type='node') + self.task.add_status_msg(msg=msg, + error=True, + ctx=n.name, + ctx_type='node') self.task.failure(focus=n.get_id()) - self._add_detail_logs( - n, - machine, - 'commission', - result_type='commissioning') - self._add_detail_logs( - n, machine, 'testing', result_type='testing') + self._add_detail_logs(n, + machine, + 'commission', + result_type='commissioning') + self._add_detail_logs(n, + machine, + 'testing', + result_type='testing') elif machine.status_name in ['Commissioning', 'Testing']: msg = "Located node %s in MaaS, node already being commissioned. Skipping..." % ( n.name) self.logger.info(msg) - self.task.add_status_msg( - msg=msg, error=False, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') self.task.success(focus=n.get_id()) elif machine.status_name in [ 'Ready', 'Deploying', 'Allocated', 'Deployed' @@ -1271,21 +1303,27 @@ class ConfigureHardware(BaseMaasAction): msg = "Located node %s in MaaS, node commissioned. Skipping..." % ( n.name) self.logger.info(msg) - self.task.add_status_msg( - msg=msg, error=False, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') self.task.success(focus=n.get_id()) else: msg = "Located node %s in MaaS, unknown status %s. Skipping." % ( n, machine.status_name) self.logger.warning(msg) - self.task.add_status_msg( - msg=msg, error=True, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=True, + ctx=n.name, + ctx_type='node') self.task.failure(focus=n.get_id()) else: msg = "Node %s not found in MaaS" % n.name self.logger.warning(msg) - self.task.add_status_msg( - msg=msg, error=True, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=True, + ctx=n.name, + ctx_type='node') self.task.failure(focus=n.get_id()) except Exception as ex: msg = "Error commissioning node %s: %s" % (n.name, str(ex)) @@ -1293,8 +1331,10 @@ class ConfigureHardware(BaseMaasAction): self.logger.debug( "Unhandled exception attempting to commission node.", exc_info=ex) - self.task.add_status_msg( - msg=msg, error=True, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=True, + ctx=n.name, + ctx_type='node') self.task.failure(focus=n.get_id()) self.task.set_status(hd_fields.TaskStatus.Complete) @@ -1312,25 +1352,23 @@ class ConfigureHardware(BaseMaasAction): df = 'text/xml' else: df = 'text/plain' - bd = objects.BuildData( - node_name=machine.hostname, - task_id=self.task.get_id(), - generator=t, - collected_date=datetime.utcnow(), - data_format=df, - data_element=d.decode()) - self.logger.debug( - "Saving build data from generator %s" % t) + bd = objects.BuildData(node_name=machine.hostname, + task_id=self.task.get_id(), + generator=t, + collected_date=datetime.utcnow(), + data_format=df, + data_element=d.decode()) + self.logger.debug("Saving build data from generator %s" % + t) self.state_manager.post_build_data(bd) - self.task.add_status_msg( - msg="Saving build data element.", - error=False, - ctx=machine.hostname, - ctx_type='node') + self.task.add_status_msg(msg="Saving build data element.", + error=False, + ctx=machine.hostname, + ctx_type='node') except Exception as ex: - self.logger.error( - "Error collecting node build data for %s" % machine.hostname, - exc_info=ex) + self.logger.error("Error collecting node build data for %s" % + machine.hostname, + exc_info=ex) class ApplyNodeNetworking(BaseMaasAction): @@ -1349,11 +1387,11 @@ class ApplyNodeNetworking(BaseMaasAction): self.logger.debug("Error accessing the MaaS API.", exc_info=ex) self.task.set_status(hd_fields.TaskStatus.Complete) self.task.failure() - self.task.add_status_msg( - msg="Error accessing MaaS API: %s" % str(ex), - error=True, - ctx='NA', - ctx_type='NA') + self.task.add_status_msg(msg="Error accessing MaaS API: %s" % + str(ex), + error=True, + ctx='NA', + ctx_type='NA') self.task.save() return @@ -1363,11 +1401,10 @@ class ApplyNodeNetworking(BaseMaasAction): try: site_design = self._load_site_design(resolve_aliases=True) except errors.OrchestratorError: - self.task.add_status_msg( - msg="Error loading site design.", - error=True, - ctx='NA', - ctx_type='NA') + self.task.add_status_msg(msg="Error loading site design.", + error=True, + ctx='NA', + ctx_type='NA') self.task.set_status(hd_fields.TaskStatus.Complete) self.task.failure() self.task.save() @@ -1385,14 +1422,14 @@ class ApplyNodeNetworking(BaseMaasAction): machine = find_node_in_maas(self.maas_client, n) if type(machine) is maas_rack.RackController: - msg = ("Node %s is a rack controller, skipping deploy action." % - n.name) + msg = ( + "Node %s is a rack controller, skipping deploy action." + % n.name) self.logger.debug(msg) - self.task.add_status_msg( - msg=msg, - error=False, - ctx=n.name, - ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') self.task.success(focus=n.name) continue elif machine is not None: @@ -1409,25 +1446,28 @@ class ApplyNodeNetworking(BaseMaasAction): "Node %s could not be released, skipping deployment." % n.name) self.logger.info(msg) - self.task.add_status_msg( - msg=msg, - error=True, - ctx=n.name, - ctx_type='node') + self.task.add_status_msg(msg=msg, + error=True, + ctx=n.name, + ctx_type='node') self.task.failure(focus=n.name) continue msg = ("Released failed node %s to retry deployment." % n.name) self.logger.info(msg) - self.task.add_status_msg( - msg=msg, error=False, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') if machine.status_name == 'Ready': msg = "Located node %s in MaaS, starting interface configuration" % ( n.name) self.logger.debug(msg) - self.task.add_status_msg( - msg=msg, error=False, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') machine.reset_network_config() machine.refresh() @@ -1454,11 +1494,10 @@ class ApplyNodeNetworking(BaseMaasAction): msg = "No fabric found for NetworkLink %s" % ( nl.name) self.logger.error(msg) - self.task.add_status_msg( - msg=msg, - error=True, - ctx=n.name, - ctx_type='node') + self.task.add_status_msg(msg=msg, + error=True, + ctx=n.name, + ctx_type='node') self.task.failure(focus=n.name) continue @@ -1467,11 +1506,10 @@ class ApplyNodeNetworking(BaseMaasAction): msg = "Building node %s interface %s as a bond." % ( n.name, i.device_name) self.logger.debug(msg) - self.task.add_status_msg( - msg=msg, - error=False, - ctx=n.name, - ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') hw_iface_list = i.get_hw_slaves() hw_iface_logicalname_list = [] for hw_iface in hw_iface_list: @@ -1493,11 +1531,10 @@ class ApplyNodeNetworking(BaseMaasAction): "interface %s has less than 2 slaves." % \ (nl.name, i.device_name) self.logger.warning(msg) - self.task.add_status_msg( - msg=msg, - error=True, - ctx=n.name, - ctx_type='node') + self.task.add_status_msg(msg=msg, + error=True, + ctx=n.name, + ctx_type='node') self.task.failure(focus=n.name) continue else: @@ -1505,32 +1542,29 @@ class ApplyNodeNetworking(BaseMaasAction): msg = "Network link %s disables bonding, interface %s has multiple slaves." % \ (nl.name, i.device_name) self.logger.warning(msg) - self.task.add_status_msg( - msg=msg, - error=True, - ctx=n.name, - ctx_type='node') + self.task.add_status_msg(msg=msg, + error=True, + ctx=n.name, + ctx_type='node') self.task.failure(n.name) continue elif len(i.get_hw_slaves()) == 0: msg = "Interface %s has 0 slaves." % ( i.device_name) self.logger.warning(msg) - self.task.add_status_msg( - msg=msg, - error=True, - ctx=n.name, - ctx_type='node') + self.task.add_status_msg(msg=msg, + error=True, + ctx=n.name, + ctx_type='node') self.task.failure(focus=n.name) else: msg = "Configuring interface %s on node %s" % ( i.device_name, n.name) self.logger.debug(msg) - self.task.add_status_msg( - msg=msg, - error=False, - ctx=n.name, - ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') hw_iface = i.get_hw_slaves()[0] # TODO(sh8121att): HardwareProfile device alias integration iface = machine.get_network_interface( @@ -1540,11 +1574,10 @@ class ApplyNodeNetworking(BaseMaasAction): msg = "Interface %s not found on node %s, skipping configuration" % ( i.device_name, machine.resource_id) self.logger.warning(msg) - self.task.add_status_msg( - msg=msg, - error=True, - ctx=n.name, - ctx_type='node') + self.task.add_status_msg(msg=msg, + error=True, + ctx=n.name, + ctx_type='node') self.task.failure(focus=n.name) continue @@ -1640,11 +1673,10 @@ class ApplyNodeNetworking(BaseMaasAction): msg = "Linking system %s interface %s to subnet %s" % ( n.name, i.device_name, dd_net.cidr) self.logger.info(msg) - self.task.add_status_msg( - msg=msg, - error=False, - ctx=n.name, - ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') link_iface.link_subnet(**link_options) self.task.success(focus=n.name) @@ -1652,18 +1684,20 @@ class ApplyNodeNetworking(BaseMaasAction): self.task.failure(focus=n.name) msg = "Did not find a defined Network %s to attach to interface" % iface_net self.logger.error(msg) - self.task.add_status_msg( - msg=msg, - error=True, - ctx=n.name, - ctx_type='node') + self.task.add_status_msg(msg=msg, + error=True, + ctx=n.name, + ctx_type='node') elif machine.status_name == 'Broken': - msg = ("Located node %s in MaaS, status broken. Run " - "ConfigureHardware before configurating network" - % (n.name)) + msg = ( + "Located node %s in MaaS, status broken. Run " + "ConfigureHardware before configurating network" % + (n.name)) self.logger.info(msg) - self.task.add_status_msg( - msg=msg, error=True, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=True, + ctx=n.name, + ctx_type='node') self.task.failure(focus=n.name) elif machine.status_name == 'Deployed': msg = ( @@ -1671,29 +1705,37 @@ class ApplyNodeNetworking(BaseMaasAction): "and considering success. Destroy node first if redeploy needed." % (n.name)) self.logger.info(msg) - self.task.add_status_msg( - msg=msg, error=False, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') self.task.success(focus=n.name) else: msg = "Located node %s in MaaS, unknown status %s. Skipping..." % ( n.name, machine.status_name) self.logger.warning(msg) - self.task.add_status_msg( - msg=msg, error=True, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=True, + ctx=n.name, + ctx_type='node') self.task.failure(focus=n.name) else: msg = "Node %s not found in MaaS" % n.name self.logger.warning(msg) - self.task.add_status_msg( - msg=msg, error=True, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=True, + ctx=n.name, + ctx_type='node') self.task.failure() except Exception as ex: msg = "Error configuring network for node %s: %s" % (n.name, str(ex)) self.logger.warning(msg) - self.task.add_status_msg( - msg=msg, error=True, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=True, + ctx=n.name, + ctx_type='node') self.task.failure(focus=n.name) self.task.set_status(hd_fields.TaskStatus.Complete) @@ -1713,11 +1755,11 @@ class ApplyNodePlatform(BaseMaasAction): self.logger.debug("Error accessing the MaaS API.", exc_info=ex) self.task.set_status(hd_fields.TaskStatus.Complete) self.task.failure() - self.task.add_status_msg( - msg="Error accessing MaaS API: %s" % str(ex), - error=True, - ctx='NA', - ctx_type='NA') + self.task.add_status_msg(msg="Error accessing MaaS API: %s" % + str(ex), + error=True, + ctx='NA', + ctx_type='NA') self.task.save() return @@ -1727,11 +1769,10 @@ class ApplyNodePlatform(BaseMaasAction): try: site_design = self._load_site_design() except errors.OrchestratorError: - self.task.add_status_msg( - msg="Error loading site design.", - error=True, - ctx='NA', - ctx_type='NA') + self.task.add_status_msg(msg="Error loading site design.", + error=True, + ctx='NA', + ctx_type='NA') self.task.set_status(hd_fields.TaskStatus.Complete) self.task.failure() self.task.save() @@ -1750,23 +1791,29 @@ class ApplyNodePlatform(BaseMaasAction): if machine is None: msg = "Could not locate machine for node %s" % n.name self.logger.warning(msg) - self.task.add_status_msg( - msg=msg, error=True, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=True, + ctx=n.name, + ctx_type='node') self.task.failure(focus=n.get_id()) continue except Exception as ex1: msg = "Error locating machine for node %s: %s" % (n, str(ex1)) self.task.failure(focus=n.get_id()) self.logger.error(msg) - self.task.add_status_msg( - msg=msg, error=True, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=True, + ctx=n.name, + ctx_type='node') continue if type(machine) is maas_rack.RackController: msg = ("Skipping changes to rack controller %s." % n.name) self.logger.info(msg) - self.task.add_status_msg( - msg=msg, error=False, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') self.task.success(focus=n.name) continue elif machine.status_name == 'Deployed': @@ -1775,8 +1822,10 @@ class ApplyNodePlatform(BaseMaasAction): "and considering success. Destroy node first if redeploy needed." % (n.name)) self.logger.info(msg) - self.task.add_status_msg( - msg=msg, error=False, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') self.task.success(focus=n.name) continue @@ -1796,39 +1845,46 @@ class ApplyNodePlatform(BaseMaasAction): msg = "Creating kernel_params tag for node %s: %s" % ( n.name, kp_string) self.logger.debug(msg) - node_kp_tag = maas_tag.Tag( - self.maas_client, - name="%s_kp" % (n.name), - kernel_opts=kp_string) + node_kp_tag = maas_tag.Tag(self.maas_client, + name="%s_kp" % (n.name), + kernel_opts=kp_string) node_kp_tag = tag_list.add(node_kp_tag) node_kp_tag.apply_to_node(machine.resource_id) - self.task.add_status_msg( - msg=msg, error=False, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') msg = "Applied kernel parameters to node %s" % n.name self.logger.info(msg) - self.task.add_status_msg( - msg=msg, error=False, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') self.task.success(focus=n.get_id()) else: msg = "No kernel parameters to apply for %s." % n.name self.logger.debug(msg) - self.task.add_status_msg( - msg=msg, error=False, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') self.task.success(focus=n.get_id()) except Exception as ex2: msg = "Error configuring kernel parameters for node %s" % ( n.name) self.logger.error(msg + ": %s" % str(ex2)) - self.task.add_status_msg( - msg=msg, error=True, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=True, + ctx=n.name, + ctx_type='node') self.task.failure(focus=n.get_id()) continue try: if n.tags is not None and len(n.tags) > 0: - self.logger.info( - "Configuring static tags for node %s" % (n.name)) + self.logger.info("Configuring static tags for node %s" % + (n.name)) for t in n.tags: tag_list.refresh() @@ -1853,23 +1909,29 @@ class ApplyNodePlatform(BaseMaasAction): msg = "Applying tag %s to node %s" % ( tag.resource_id, machine.resource_id) self.logger.debug(msg) - self.task.add_status_msg( - msg=msg, error=False, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') tag.apply_to_node(machine.resource_id) - self.logger.info( - "Applied static tags to node %s" % (n.name)) + self.logger.info("Applied static tags to node %s" % + (n.name)) self.task.success(focus=n.get_id()) else: msg = "No node tags to apply for %s." % n.name self.logger.debug(msg) - self.task.add_status_msg( - msg=msg, error=False, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') self.task.success(focus=n.get_id()) except Exception as ex3: msg = "Error configuring static tags for node %s" % (n.name) self.logger.error(msg + ": " + str(ex3)) - self.task.add_status_msg( - msg=msg, error=True, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=True, + ctx=n.name, + ctx_type='node') self.task.failure(focus=n.get_id()) continue @@ -1897,11 +1959,11 @@ class ApplyNodeStorage(BaseMaasAction): self.logger.debug("Error accessing the MaaS API.", exc_info=ex) self.task.set_status(hd_fields.TaskStatus.Complete) self.task.failure() - self.task.add_status_msg( - msg="Error accessing MaaS API: %s" % str(ex), - error=True, - ctx='NA', - ctx_type='NA') + self.task.add_status_msg(msg="Error accessing MaaS API: %s" % + str(ex), + error=True, + ctx='NA', + ctx_type='NA') self.task.save() return @@ -1911,11 +1973,10 @@ class ApplyNodeStorage(BaseMaasAction): try: site_design = self._load_site_design(resolve_aliases=True) except errors.OrchestratorError: - self.task.add_status_msg( - msg="Error loading site design.", - error=True, - ctx='NA', - ctx_type='NA') + self.task.add_status_msg(msg="Error loading site design.", + error=True, + ctx='NA', + ctx_type='NA') self.task.set_status(hd_fields.TaskStatus.Complete) self.task.failure() self.task.save() @@ -1934,24 +1995,31 @@ class ApplyNodeStorage(BaseMaasAction): if machine is None: msg = "Could not locate machine for node %s" % n.name self.logger.warning(msg) - self.task.add_status_msg( - msg=msg, error=True, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=True, + ctx=n.name, + ctx_type='node') self.task.failure(focus=n.get_id()) continue except Exception as ex: msg = "Error locating machine for node %s" % (n.name) self.logger.error(msg + ": " + str(ex)) - self.task.add_status_msg( - msg=msg, error=True, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=True, + ctx=n.name, + ctx_type='node') self.task.failure(focus=n.get_id()) continue if type(machine) is maas_rack.RackController: - msg = ("Skipping configuration updates to rack controller %s." % - n.name) + msg = ( + "Skipping configuration updates to rack controller %s." % + n.name) self.logger.info(msg) - self.task.add_status_msg( - msg=msg, error=False, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') self.task.success(focus=n.name) continue elif machine.status_name == 'Deployed': @@ -1960,8 +2028,10 @@ class ApplyNodeStorage(BaseMaasAction): "and considering success. Destroy node first if redeploy needed." % (n.name)) self.logger.info(msg) - self.task.add_status_msg( - msg=msg, error=False, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') self.task.success(focus=n.name) continue @@ -1975,8 +2045,10 @@ class ApplyNodeStorage(BaseMaasAction): """ msg = "Clearing current storage layout on node %s." % n.name self.logger.debug(msg) - self.task.add_status_msg( - msg=msg, error=False, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') machine.reset_storage_config() (root_dev, root_block) = n.find_fs_block_device('/') @@ -1987,56 +2059,63 @@ class ApplyNodeStorage(BaseMaasAction): storage_layout['layout_type'] = 'flat' storage_layout['root_device'] = n.get_logicalname( root_dev.name) - storage_layout['root_size'] = ApplyNodeStorage.calculate_bytes( - root_block.size) + storage_layout[ + 'root_size'] = ApplyNodeStorage.calculate_bytes( + root_block.size) elif isinstance(root_block, hostprofile.HostVolume): storage_layout['layout_type'] = 'lvm' if len(root_dev.physical_devices) != 1: msg = "Root LV in VG with multiple physical devices on node %s" % ( n.name) self.logger.error(msg) - self.task.add_status_msg( - msg=msg, error=True, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=True, + ctx=n.name, + ctx_type='node') self.task.failure(focus=n.get_id()) continue storage_layout['root_device'] = n.get_logicalname( root_dev.physical_devices[0]) - storage_layout['root_lv_size'] = ApplyNodeStorage.calculate_bytes( - root_block.size) + storage_layout[ + 'root_lv_size'] = ApplyNodeStorage.calculate_bytes( + root_block.size) storage_layout['root_lv_name'] = root_block.name storage_layout['root_vg_name'] = root_dev.name if boot_block is not None: - storage_layout['boot_size'] = ApplyNodeStorage.calculate_bytes( - boot_block.size) + storage_layout[ + 'boot_size'] = ApplyNodeStorage.calculate_bytes( + boot_block.size) msg = "Setting node %s root storage layout: %s" % ( n.name, str(storage_layout)) self.logger.debug(msg) - self.task.add_status_msg( - msg=msg, error=False, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') machine.set_storage_layout(**storage_layout) vg_devs = {} for d in n.storage_devices: - maas_dev = machine.block_devices.singleton({ - 'name': - n.get_logicalname(d.name) - }) + maas_dev = machine.block_devices.singleton( + {'name': n.get_logicalname(d.name)}) if maas_dev is None: msg = "Dev %s (%s) not found on node %s" % ( - d.name, n.get_logicalname(d.name), n.name) + d.name, n.get_logicalname(d.name), n.name) self.logger.warning(msg) - self.task.add_status_msg( - msg=msg, error=True, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=True, + ctx=n.name, + ctx_type='node') self.task.failure(focus=n.get_id()) continue if d.volume_group is not None: self.logger.debug( "Adding dev %s (%s) to volume group %s" % - (d.name, n.get_logicalname(d.name), - d.volume_group)) + (d.name, n.get_logicalname( + d.name), d.volume_group)) if d.volume_group not in vg_devs: vg_devs[d.volume_group] = {'b': [], 'p': []} vg_devs[d.volume_group]['b'].append( @@ -2055,16 +2134,19 @@ class ApplyNodeStorage(BaseMaasAction): maas_dev.refresh() size = ApplyNodeStorage.calculate_bytes( size_str=p.size, context=maas_dev) - part = maas_partition.Partition( - self.maas_client, size=size, bootable=p.bootable) + part = maas_partition.Partition(self.maas_client, + size=size, + bootable=p.bootable) if p.part_uuid is not None: part.uuid = p.part_uuid msg = "Creating partition %s sized %d bytes on dev %s (%s)" % ( p.name, size, d.name, n.get_logicalname(d.name)) self.logger.debug(msg) part = maas_dev.create_partition(part) - self.task.add_status_msg( - msg=msg, error=False, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') if p.volume_group is not None: self.logger.debug( @@ -2086,11 +2168,10 @@ class ApplyNodeStorage(BaseMaasAction): p.fstype) self.logger.debug(msg) part.format(**format_opts) - self.task.add_status_msg( - msg=msg, - error=False, - ctx=n.name, - ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') mount_opts = { 'mount_point': p.mountpoint, 'mount_options': p.mount_options, @@ -2099,14 +2180,13 @@ class ApplyNodeStorage(BaseMaasAction): p.name, p.mountpoint) self.logger.debug(msg) part.mount(**mount_opts) - self.task.add_status_msg( - msg=msg, - error=False, - ctx=n.name, - ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') - self.logger.debug( - "Finished configuring node %s partitions" % n.name) + self.logger.debug("Finished configuring node %s partitions" % + n.name) for v in n.volume_groups: if v.is_sys(): @@ -2119,8 +2199,8 @@ class ApplyNodeStorage(BaseMaasAction): % (v.name)) continue - maas_volgroup = maas_vg.VolumeGroup( - self.maas_client, name=v.name) + maas_volgroup = maas_vg.VolumeGroup(self.maas_client, + name=v.name) if v.vg_uuid is not None: maas_volgroup.uuid = v.vg_uuid @@ -2138,14 +2218,17 @@ class ApplyNodeStorage(BaseMaasAction): maas_volgroup = machine.volume_groups.add(maas_volgroup) maas_volgroup.refresh() - self.task.add_status_msg( - msg=msg, error=False, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') for lv in v.logical_volumes: calc_size = ApplyNodeStorage.calculate_bytes( size_str=lv.size, context=maas_volgroup) - bd_id = maas_volgroup.create_lv( - name=lv.name, uuid_str=lv.lv_uuid, size=calc_size) + bd_id = maas_volgroup.create_lv(name=lv.name, + uuid_str=lv.lv_uuid, + size=calc_size) if lv.mountpoint is not None: machine.refresh() @@ -2153,32 +2236,29 @@ class ApplyNodeStorage(BaseMaasAction): msg = "Formatting LV %s as filesystem on node %s." % ( lv.name, n.name) self.logger.debug(msg) - self.task.add_status_msg( - msg=msg, - error=False, - ctx=n.name, - ctx_type='node') - maas_lv.format( - fstype=lv.fstype, uuid_str=lv.fs_uuid) + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') + maas_lv.format(fstype=lv.fstype, + uuid_str=lv.fs_uuid) msg = "Mounting LV %s at %s on node %s." % ( lv.name, lv.mountpoint, n.name) self.logger.debug(msg) - maas_lv.mount( - mount_point=lv.mountpoint, - mount_options=lv.mount_options) - self.task.add_status_msg( - msg=msg, - error=False, - ctx=n.name, - ctx_type='node') + maas_lv.mount(mount_point=lv.mountpoint, + mount_options=lv.mount_options) + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') self.task.success(focus=n.get_id()) except Exception as ex: self.task.failure(focus=n.get_id()) - self.task.add_status_msg( - msg="Error configuring storage. %s" % str(ex), - error=True, - ctx=n.name, - ctx_type='node') + self.task.add_status_msg(msg="Error configuring storage. %s" % + str(ex), + error=True, + ctx=n.name, + ctx_type='node') self.logger.debug("Error configuring storage for node %s: %s" % (n.name, str(ex))) @@ -2219,8 +2299,8 @@ class ApplyNodeStorage(BaseMaasAction): match = regex.match(size_str) if not match: - raise errors.InvalidSizeFormat( - "Invalid size string format: %s" % size_str) + raise errors.InvalidSizeFormat("Invalid size string format: %s" % + size_str) if ((match.group(1) == '>' or match.group(3) == '%') and not context): raise errors.InvalidSizeFormat( @@ -2265,11 +2345,11 @@ class DeployNode(BaseMaasAction): self.logger.debug("Error accessing the MaaS API.", exc_info=ex) self.task.set_status(hd_fields.TaskStatus.Complete) self.task.failure() - self.task.add_status_msg( - msg="Error accessing MaaS API: %s" % str(ex), - error=True, - ctx='NA', - ctx_type='NA') + self.task.add_status_msg(msg="Error accessing MaaS API: %s" % + str(ex), + error=True, + ctx='NA', + ctx_type='NA') self.task.save() return @@ -2279,11 +2359,10 @@ class DeployNode(BaseMaasAction): try: site_design = self._load_site_design() except errors.OrchestratorError: - self.task.add_status_msg( - msg="Error loading site design.", - error=True, - ctx='NA', - ctx_type='NA') + self.task.add_status_msg(msg="Error loading site design.", + error=True, + ctx='NA', + ctx_type='NA') self.task.set_status(hd_fields.TaskStatus.Complete) self.task.failure() self.task.save() @@ -2299,8 +2378,10 @@ class DeployNode(BaseMaasAction): if type(machine) is maas_rack.RackController: msg = "Skipping configuration of rack controller %s." % n.name self.logger.info(msg) - self.task.add_status_msg( - msg=msg, error=False, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') self.task.success(focus=n.name) continue elif machine.status_name.startswith( @@ -2309,34 +2390,44 @@ class DeployNode(BaseMaasAction): msg = "Node %s already deployed or deploying, skipping." % ( n.name) self.logger.info(msg) - self.task.add_status_msg( - msg=msg, error=False, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') self.task.success(focus=n.name) continue elif machine.status_name == 'Ready': msg = "Acquiring node %s for deployment" % (n.name) self.logger.info(msg) machine = machine_list.acquire_node(n.name) - self.task.add_status_msg( - msg=msg, error=False, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') elif machine.status_name.startswith('Allocated'): msg = "Node %s already acquired." % (n.name) self.logger.info(msg) - self.task.add_status_msg( - msg=msg, error=False, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') else: msg = "Unexpected status %s for node %s, skipping deployment." % ( machine.status_name, n.name) self.logger.warning(msg) - self.task.add_status_msg( - msg=msg, error=True, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=True, + ctx=n.name, + ctx_type='node') self.task.failure(focus=n.get_id()) continue except errors.DriverError as dex: msg = "Error acquiring node %s, skipping" % n.name self.logger.warning(msg, exc_info=dex) - self.task.add_status_msg( - msg=msg, error=True, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=True, + ctx=n.name, + ctx_type='node') self.task.failure(focus=n.get_id()) continue @@ -2347,19 +2438,23 @@ class DeployNode(BaseMaasAction): msg = "Set owner data %s = %s for node %s" % (k, v, n.name) self.logger.debug(msg) machine.set_owner_data(k, v) - self.task.add_status_msg( - msg=msg, error=False, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') except Exception as ex: msg = "Error setting node %s owner data" % n.name self.logger.warning(msg + ": " + str(ex)) self.task.failure(focus=n.get_id()) - self.task.add_status_msg( - msg=msg, error=True, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=True, + ctx=n.name, + ctx_type='node') continue # Saving boot action context for a node - self.logger.info( - "Saving Boot Action context for node %s." % (n.name)) + self.logger.info("Saving Boot Action context for node %s." % + (n.name)) try: ba_key = self.orchestrator.create_bootaction_context( n.name, self.task) @@ -2374,13 +2469,15 @@ class DeployNode(BaseMaasAction): msg = "Creating boot action id key tag for node %s" % ( n.name) self.logger.debug(msg) - node_baid_tag = maas_tag.Tag( - self.maas_client, - name="%s__baid__%s" % (n.name, ba_key.hex())) + node_baid_tag = maas_tag.Tag(self.maas_client, + name="%s__baid__%s" % + (n.name, ba_key.hex())) node_baid_tag = tag_list.add(node_baid_tag) node_baid_tag.apply_to_node(machine.resource_id) - self.task.add_status_msg( - msg=msg, error=False, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') except Exception as ex: self.logger.error( "Error setting boot action id key tag for %s." % n.name, @@ -2408,15 +2505,16 @@ class DeployNode(BaseMaasAction): (n.name, n.image, n.kernel)) try: - machine.deploy( - platform=n.image, - kernel=n.kernel, - user_data=user_data_string) + machine.deploy(platform=n.image, + kernel=n.kernel, + user_data=user_data_string) except errors.DriverError: msg = "Error deploying node %s, skipping" % n.name self.logger.warning(msg) - self.task.add_status_msg( - msg=msg, error=True, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=True, + ctx=n.name, + ctx_type='node') self.task.failure(focus=n.get_id()) continue @@ -2442,20 +2540,26 @@ class DeployNode(BaseMaasAction): if machine.status_name.startswith('Deployed'): msg = "Node %s deployed" % (n.name) self.logger.info(msg) - self.task.add_status_msg( - msg=msg, error=False, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') self.task.success(focus=n.get_id()) elif machine.status_name.startswith('Failed'): msg = "Node %s deployment failed" % (n.name) self.logger.info(msg) - self.task.add_status_msg( - msg=msg, error=True, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=True, + ctx=n.name, + ctx_type='node') self.task.failure(focus=n.get_id()) else: msg = "Node %s deployment timed out" % (n.name) self.logger.warning(msg) - self.task.add_status_msg( - msg=msg, error=True, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=True, + ctx=n.name, + ctx_type='node') self.task.failure(focus=n.get_id()) self._add_detail_logs(n, machine, 'deploy', result_type='deploy') self.task.set_status(hd_fields.TaskStatus.Complete) @@ -2463,6 +2567,7 @@ class DeployNode(BaseMaasAction): return + def find_node_in_maas(maas_client, node_model, probably_exists=True): """Find a node in MAAS matching the node_model. @@ -2485,6 +2590,7 @@ def find_node_in_maas(maas_client, node_model, probably_exists=True): if not machine: # If node isn't found a normal node, check rack controllers rackd_list = maas_rack.RackControllers(maas_client) - machine = rackd_list.identify_baremetal_node(node_model, probably_exists) + machine = rackd_list.identify_baremetal_node(node_model, + probably_exists) return machine diff --git a/python/drydock_provisioner/drivers/node/maasdriver/api_client.py b/python/drydock_provisioner/drivers/node/maasdriver/api_client.py index 351b85d6..8c6eaeba 100644 --- a/python/drydock_provisioner/drivers/node/maasdriver/api_client.py +++ b/python/drydock_provisioner/drivers/node/maasdriver/api_client.py @@ -26,6 +26,7 @@ import drydock_provisioner.error as errors class MaasOauth(req_auth.AuthBase): + def __init__(self, apikey): self.consumer_key, self.token_key, self.token_secret = apikey.split( ':') @@ -55,18 +56,19 @@ class MaasOauth(req_auth.AuthBase): class MaasRequestFactory(object): + def __init__(self, base_url, apikey): # The URL in the config should end in /MAAS/, but the api is behind /MAAS/api/2.0/ self.base_url = base_url + "/api/2.0/" self.apikey = apikey # Adapter for maas for request retries - retry_strategy = Retry( - total=3, - status_forcelist=[429, 500, 502, 503, 504], - method_whitelist=["HEAD", "GET", "POST", "PUT", "DELETE", - "OPTIONS", "TRACE"] - ) + retry_strategy = Retry(total=3, + status_forcelist=[429, 500, 502, 503, 504], + method_whitelist=[ + "HEAD", "GET", "POST", "PUT", "DELETE", + "OPTIONS", "TRACE" + ]) self.maas_adapter = HTTPAdapter(max_retries=retry_strategy) self.signer = MaasOauth(apikey) @@ -109,8 +111,8 @@ class MaasRequestFactory(object): except requests.Timeout: raise errors.TransientDriverError("Timeout connection to MaaS") except Exception as ex: - raise errors.PersistentDriverError( - "Error accessing MaaS: %s" % str(ex)) + raise errors.PersistentDriverError("Error accessing MaaS: %s" % + str(ex)) if resp.status_code in [401, 403]: raise errors.PersistentDriverError( @@ -149,15 +151,15 @@ class MaasRequestFactory(object): str(i).encode('utf-8')).decode('utf-8') content_type = 'text/plain; charset="utf-8"' part_headers = {'Content-Transfer-Encoding': 'base64'} - files_tuples.append((k, (None, value, content_type, - part_headers))) + files_tuples.append( + (k, (None, value, content_type, part_headers))) else: value = base64.b64encode( str(v).encode('utf-8')).decode('utf-8') content_type = 'text/plain; charset="utf-8"' part_headers = {'Content-Transfer-Encoding': 'base64'} - files_tuples.append((k, (None, value, content_type, - part_headers))) + files_tuples.append( + (k, (None, value, content_type, part_headers))) kwargs['files'] = files_tuples params = kwargs.pop('params', None) @@ -174,13 +176,12 @@ class MaasRequestFactory(object): if timeout is None: timeout = (5, 60) - request = requests.Request( - method=method, - url=self.base_url + endpoint, - auth=self.signer, - headers=headers, - params=params, - **kwargs) + request = requests.Request(method=method, + url=self.base_url + endpoint, + auth=self.signer, + headers=headers, + params=params, + **kwargs) prepared_req = self.http_session.prepare_request(request) @@ -191,6 +192,6 @@ class MaasRequestFactory(object): "Received error response - URL: %s %s - RESPONSE: %s" % (prepared_req.method, prepared_req.url, resp.status_code)) self.logger.debug("Response content: %s" % resp.text) - raise errors.DriverError( - "MAAS Error: %s - %s" % (resp.status_code, resp.text)) + raise errors.DriverError("MAAS Error: %s - %s" % + (resp.status_code, resp.text)) return resp diff --git a/python/drydock_provisioner/drivers/node/maasdriver/driver.py b/python/drydock_provisioner/drivers/node/maasdriver/driver.py index a37085da..d4b6c60c 100644 --- a/python/drydock_provisioner/drivers/node/maasdriver/driver.py +++ b/python/drydock_provisioner/drivers/node/maasdriver/driver.py @@ -45,19 +45,21 @@ from .actions.node import ConfigureNodeProvisioner class MaasNodeDriver(NodeDriver): maasdriver_options = [ - cfg.StrOpt( - 'maas_api_key', help='The API key for accessing MaaS', - secret=True), + cfg.StrOpt('maas_api_key', + help='The API key for accessing MaaS', + secret=True), cfg.StrOpt('maas_api_url', help='The URL for accessing MaaS API'), cfg.BoolOpt( 'use_node_oob_params', default=False, - help='Update MAAS to use the provided Node OOB params, overwriting discovered values', + help= + 'Update MAAS to use the provided Node OOB params, overwriting discovered values', ), cfg.BoolOpt( 'skip_bmc_config', default=False, - help='Skip BMC reconfiguration during commissioning (requires MAAS 2.7+)', + help= + 'Skip BMC reconfiguration during commissioning (requires MAAS 2.7+)', ), cfg.IntOpt( 'poll_interval', @@ -105,8 +107,8 @@ class MaasNodeDriver(NodeDriver): def __init__(self, **kwargs): super().__init__(**kwargs) - cfg.CONF.register_opts( - MaasNodeDriver.maasdriver_options, group=MaasNodeDriver.driver_key) + cfg.CONF.register_opts(MaasNodeDriver.maasdriver_options, + group=MaasNodeDriver.driver_key) self.logger = logging.getLogger( cfg.CONF.logging.nodedriver_logger_name) @@ -139,8 +141,9 @@ class MaasNodeDriver(NodeDriver): raise errors.DriverError("Invalid task %s" % (task_id)) if task.action not in self.supported_actions: - raise errors.DriverError("Driver %s doesn't support task action %s" - % (self.driver_desc, task.action)) + raise errors.DriverError( + "Driver %s doesn't support task action %s" % + (self.driver_desc, task.action)) task.set_status(hd_fields.TaskStatus.Running) task.save() @@ -149,11 +152,10 @@ class MaasNodeDriver(NodeDriver): if task.retry > 0: msg = "Retrying task %s on previous failed entities." % str( task.get_id()) - task.add_status_msg( - msg=msg, - error=False, - ctx=str(task.get_id()), - ctx_type='task') + task.add_status_msg(msg=msg, + error=False, + ctx=str(task.get_id()), + ctx_type='task') target_nodes = self.orchestrator.get_target_nodes( task, failures=True) else: @@ -197,10 +199,9 @@ class MaasNodeDriver(NodeDriver): task.failure() else: if f.exception(): - self.logger.error( - "Uncaught exception in subtask %s." % str( - uuid.UUID(bytes=t)), - exc_info=f.exception()) + self.logger.error("Uncaught exception in subtask %s." % + str(uuid.UUID(bytes=t)), + exc_info=f.exception()) task.failure() task.bubble_results() task.align_result() @@ -216,14 +217,14 @@ class MaasNodeDriver(NodeDriver): maas_client=maas_client) action.start() except Exception as e: - msg = ("Subtask for action %s raised unexpected exception: %s" - % (task.action, str(e))) + msg = ( + "Subtask for action %s raised unexpected exception: %s" % + (task.action, str(e))) self.logger.error(msg, exc_info=e) - task.add_status_msg( - msg=msg, - error=True, - ctx=str(task.get_id()), - ctx_type='task') + task.add_status_msg(msg=msg, + error=True, + ctx=str(task.get_id()), + ctx_type='task') task.failure() task.set_status(hd_fields.TaskStatus.Complete) diff --git a/python/drydock_provisioner/drivers/node/maasdriver/models/base.py b/python/drydock_provisioner/drivers/node/maasdriver/models/base.py index 3cd378ef..1b1349e0 100644 --- a/python/drydock_provisioner/drivers/node/maasdriver/models/base.py +++ b/python/drydock_provisioner/drivers/node/maasdriver/models/base.py @@ -218,8 +218,9 @@ class ResourceCollectionBase(object): res.set_resource_id(resp_json.get('id')) return res - raise errors.DriverError("Failed updating MAAS url %s - return code %s" - % (url, resp.status_code)) + raise errors.DriverError( + "Failed updating MAAS url %s - return code %s" % + (url, resp.status_code)) """ Append a resource instance to the list locally only diff --git a/python/drydock_provisioner/drivers/node/maasdriver/models/blockdev.py b/python/drydock_provisioner/drivers/node/maasdriver/models/blockdev.py index 1674ebe3..1b879302 100644 --- a/python/drydock_provisioner/drivers/node/maasdriver/models/blockdev.py +++ b/python/drydock_provisioner/drivers/node/maasdriver/models/blockdev.py @@ -93,8 +93,8 @@ class BlockDevice(model_base.ResourceBase): resp = self.api_client.post(url, op='format', files=data) if not resp.ok: - raise Exception( - "MAAS error: %s - %s" % (resp.status_code, resp.text)) + raise Exception("MAAS error: %s - %s" % + (resp.status_code, resp.text)) self.refresh() except Exception as ex: @@ -126,8 +126,8 @@ class BlockDevice(model_base.ResourceBase): resp = self.api_client.post(url, op='unformat') if not resp.ok: - raise Exception( - "MAAS error: %s - %s" % (resp.status_code, resp.text)) + raise Exception("MAAS error: %s - %s" % + (resp.status_code, resp.text)) self.refresh() except Exception as ex: msg = "Error: unformat of device %s on node %s failed: %s" \ @@ -156,8 +156,8 @@ class BlockDevice(model_base.ResourceBase): resp = self.api_client.post(url, op='mount', files=data) if not resp.ok: - raise Exception( - "MAAS error: %s - %s" % (resp.status_code, resp.text)) + raise Exception("MAAS error: %s - %s" % + (resp.status_code, resp.text)) self.refresh() except Exception as ex: @@ -183,8 +183,8 @@ class BlockDevice(model_base.ResourceBase): resp = self.api_client.post(url, op='unmount') if not resp.ok: - raise Exception( - "MAAS error: %s - %s" % (resp.status_code, resp.text)) + raise Exception("MAAS error: %s - %s" % + (resp.status_code, resp.text)) self.refresh() except Exception as ex: @@ -202,8 +202,8 @@ class BlockDevice(model_base.ResourceBase): resp = self.api_client.post(url, op='set_boot_disk') if not resp.ok: - raise Exception( - "MAAS error: %s - %s" % (resp.status_code, resp.text)) + raise Exception("MAAS error: %s - %s" % + (resp.status_code, resp.text)) self.refresh() except Exception as ex: diff --git a/python/drydock_provisioner/drivers/node/maasdriver/models/fabric.py b/python/drydock_provisioner/drivers/node/maasdriver/models/fabric.py index 996e62c1..cc3568ce 100644 --- a/python/drydock_provisioner/drivers/node/maasdriver/models/fabric.py +++ b/python/drydock_provisioner/drivers/node/maasdriver/models/fabric.py @@ -37,8 +37,8 @@ class Fabric(model_base.ResourceBase): return def refresh_vlans(self): - self.vlans = model_vlan.Vlans( - self.api_client, fabric_id=self.resource_id) + self.vlans = model_vlan.Vlans(self.api_client, + fabric_id=self.resource_id) self.vlans.refresh() def set_resource_id(self, res_id): diff --git a/python/drydock_provisioner/drivers/node/maasdriver/models/interface.py b/python/drydock_provisioner/drivers/node/maasdriver/models/interface.py index 4b423793..56647665 100644 --- a/python/drydock_provisioner/drivers/node/maasdriver/models/interface.py +++ b/python/drydock_provisioner/drivers/node/maasdriver/models/interface.py @@ -85,8 +85,8 @@ class Interface(model_base.ResourceBase): fabric_vlan = fabric.vlans.singleton({'vid': 0}) if fabric_vlan is None: - self.logger.warning( - "Cannot locate untagged VLAN on fabric %s" % (fabric_id)) + self.logger.warning("Cannot locate untagged VLAN on fabric %s" % + (fabric_id)) raise errors.DriverError( "Cannot locate untagged VLAN on fabric %s" % (fabric_id)) @@ -112,8 +112,8 @@ class Interface(model_base.ResourceBase): """Disconnect this interface from subnets and VLANs.""" url = self.interpolate_url() - self.logger.debug( - "Disconnecting interface %s from networks." % (self.name)) + self.logger.debug("Disconnecting interface %s from networks." % + (self.name)) resp = self.api_client.post(url, op='disconnect') if not resp.ok: @@ -242,7 +242,8 @@ class Interface(model_base.ResourceBase): :return: true if this interface will respond to this MAC """ - if mac_address.replace(':', '').upper() == self.mac_address.replace(':', '').upper(): + if mac_address.replace(':', '').upper() == self.mac_address.replace( + ':', '').upper(): return True return False @@ -311,10 +312,10 @@ class Interfaces(model_base.ResourceCollectionBase): parent_iface = self.singleton({'name': parent_name}) if parent_iface is None: - self.logger.error( - "Cannot locate parent interface %s" % (parent_name)) - raise errors.DriverError( - "Cannot locate parent interface %s" % (parent_name)) + self.logger.error("Cannot locate parent interface %s" % + (parent_name)) + raise errors.DriverError("Cannot locate parent interface %s" % + (parent_name)) if parent_iface.vlan is None: self.logger.error( @@ -324,8 +325,8 @@ class Interfaces(model_base.ResourceCollectionBase): "Cannot create VLAN interface on disconnected parent %s" % (parent_iface.resource_id)) - vlans = maas_vlan.Vlans( - self.api_client, fabric_id=parent_iface.fabric_id) + vlans = maas_vlan.Vlans(self.api_client, + fabric_id=parent_iface.fabric_id) vlans.refresh() vlan = vlans.singleton({'vid': vlan_tag}) diff --git a/python/drydock_provisioner/drivers/node/maasdriver/models/iprange.py b/python/drydock_provisioner/drivers/node/maasdriver/models/iprange.py index a7a42a51..8a2f5272 100644 --- a/python/drydock_provisioner/drivers/node/maasdriver/models/iprange.py +++ b/python/drydock_provisioner/drivers/node/maasdriver/models/iprange.py @@ -72,5 +72,6 @@ class IpRanges(model_base.ResourceCollectionBase): res.set_resource_id(resp_json.get('id')) return res - raise errors.DriverError("Failed updating MAAS url %s - return code %s" - % (url, resp.status_code)) + raise errors.DriverError( + "Failed updating MAAS url %s - return code %s" % + (url, resp.status_code)) diff --git a/python/drydock_provisioner/drivers/node/maasdriver/models/machine.py b/python/drydock_provisioner/drivers/node/maasdriver/models/machine.py index 066e87b2..98aac0fd 100644 --- a/python/drydock_provisioner/drivers/node/maasdriver/models/machine.py +++ b/python/drydock_provisioner/drivers/node/maasdriver/models/machine.py @@ -31,6 +31,7 @@ LOG = logging.getLogger(__name__) power_lock = Lock() power_cv = Condition(lock=power_lock) + class Machine(model_base.ResourceBase): resource_url = 'machines/{resource_id}/' @@ -62,8 +63,8 @@ class Machine(model_base.ResourceBase): api_client, system_id=self.resource_id) self.volume_groups.refresh() except Exception: - self.logger.warning( - "Failed load node %s volume groups." % (self.resource_id)) + self.logger.warning("Failed load node %s volume groups." % + (self.resource_id)) else: self.interfaces = None self.block_devices = None @@ -123,28 +124,28 @@ class Machine(model_base.ResourceBase): Removes all the volume groups/logical volumes and all the physical device partitions on this machine. """ - self.logger.info( - "Resetting storage configuration on node %s" % (self.resource_id)) + self.logger.info("Resetting storage configuration on node %s" % + (self.resource_id)) if self.volume_groups is not None and self.volume_groups.len() > 0: for vg in self.volume_groups: self.logger.debug("Removing VG %s" % vg.name) vg.delete() else: - self.logger.debug( - "No VGs configured on node %s" % (self.resource_id)) + self.logger.debug("No VGs configured on node %s" % + (self.resource_id)) if self.block_devices is not None: for d in self.block_devices: if d.partitions is not None and d.partitions.len() > 0: - self.logger.debug( - "Clearing partitions on device %s" % d.name) + self.logger.debug("Clearing partitions on device %s" % + d.name) d.clear_partitions() else: - self.logger.debug( - "No partitions found on device %s" % d.name) + self.logger.debug("No partitions found on device %s" % + d.name) else: - self.logger.debug( - "No block devices found on node %s" % (self.resource_id)) + self.logger.debug("No block devices found on node %s" % + (self.resource_id)) def set_storage_layout(self, layout_type='flat', @@ -199,12 +200,13 @@ class Machine(model_base.ResourceBase): if root_lv_name: data['lv_name'] = root_lv_name - resp = self.api_client.post( - url, op='set_storage_layout', files=data) + resp = self.api_client.post(url, + op='set_storage_layout', + files=data) if not resp.ok: - raise Exception( - "MAAS Error: %s - %s" % (resp.status_code, resp.text)) + raise Exception("MAAS Error: %s - %s" % + (resp.status_code, resp.text)) except Exception as ex: msg = "Error: failed configuring node %s storage layout: %s" % ( self.resource_id, str(ex)) @@ -356,10 +358,9 @@ class Machine(model_base.ResourceBase): :param str result_type: the type of results to return. One of ``all``, ``commissioning``, ``testing``, ``deploy`` """ - node_results = maas_nr.NodeResults( - self.api_client, - system_id_list=[self.resource_id], - result_type=result_type) + node_results = maas_nr.NodeResults(self.api_client, + system_id_list=[self.resource_id], + result_type=result_type) node_results.refresh() return node_results @@ -375,8 +376,9 @@ class Machine(model_base.ResourceBase): """ url = self.interpolate_url() - resp = self.api_client.post( - url, op='set_workload_annotations', files={key: value}) + resp = self.api_client.post(url, + op='set_workload_annotations', + files={key: value}) if resp.status_code != 200: self.logger.error( @@ -406,29 +408,26 @@ class Machine(model_base.ResourceBase): if kwargs: power_params = dict() - self.logger.debug("Setting node power type to %s." % power_type) + self.logger.debug("Setting node power type to %s." % + power_type) self.power_type = power_type power_params['power_type'] = power_type for k, v in kwargs.items(): power_params['power_parameters_' + k] = v - self.logger.debug( - "Updating node %s power parameters: %s" - % ( - self.hostname, - str( - { - **power_params, - **{ - k: "" - for k in power_params - if k in ["power_parameters_power_pass"] - }, - } - ), - ) - ) + self.logger.debug("Updating node %s power parameters: %s" % ( + self.hostname, + str({ + **power_params, + **{ + k: "" + for k in power_params if k in [ + "power_parameters_power_pass" + ] + }, + }), + )) resp = self.api_client.put(url, files=power_params) if resp.status_code == 200: @@ -448,8 +447,9 @@ class Machine(model_base.ResourceBase): with power_cv: url = self.interpolate_url() - self.logger.debug("Resetting node power type for machine {}".format( - self.resource_id)) + self.logger.debug( + "Resetting node power type for machine {}".format( + self.resource_id)) self.power_type = 'manual' power_params = {'power_type': 'manual'} resp = self.api_client.put(url, files=power_params) @@ -482,12 +482,11 @@ class Machine(model_base.ResourceBase): 'virsh', power_address=oob_params.get('libvirt_uri'), power_id=n.name) - elif use_node_oob_params and (n.oob_type == "ipmi" or n.oob_type == "redfish"): + elif use_node_oob_params and (n.oob_type == "ipmi" + or n.oob_type == "redfish"): self.logger.debug( "Updating node {} MaaS power parameters for {}.".format( - n.name, n.oob_type - ) - ) + n.name, n.oob_type)) oob_params = n.oob_parameters oob_network = oob_params.get("network") oob_address = n.get_network_address(oob_network) @@ -585,21 +584,20 @@ class Machines(model_base.ResourceCollectionBase): url = self.interpolate_url() - resp = self.api_client.post( - url, op='allocate', files={'system_id': node.resource_id}) + resp = self.api_client.post(url, + op='allocate', + files={'system_id': node.resource_id}) if not resp.ok: - self.logger.error( - "Error acquiring node, MaaS returned %s" % resp.status_code) + self.logger.error("Error acquiring node, MaaS returned %s" % + resp.status_code) self.logger.debug("MaaS response: %s" % resp.text) - raise errors.DriverError( - "Error acquiring node, MaaS returned %s" % resp.status_code) + raise errors.DriverError("Error acquiring node, MaaS returned %s" % + resp.status_code) return node - def identify_baremetal_node(self, - node_model, - probably_exists=True): + def identify_baremetal_node(self, node_model, probably_exists=True): """Find MaaS node resource matching Drydock BaremetalNode. Performs one or more queries to the MaaS API to find a Machine matching @@ -642,8 +640,8 @@ class Machines(model_base.ResourceCollectionBase): maas_node = self.find_node_with_mac(node_model.boot_mac) if maas_node is None: - self.logger.info( - "Could not locate node %s in MaaS" % node_model.name) + self.logger.info("Could not locate node %s in MaaS" % + node_model.name) else: self.logger.debug("Found MaaS resource %s matching Node %s" % (maas_node.resource_id, node_model.get_id())) @@ -656,11 +654,8 @@ class Machines(model_base.ResourceCollectionBase): # query the MaaS API for machines with a matching mac address. # this call returns a json list, each member representing a complete # Machine - self.logger.debug( - "Finding {} with hostname: {}".format( - self.collection_resource.__name__, hostname - ) - ) + self.logger.debug("Finding {} with hostname: {}".format( + self.collection_resource.__name__, hostname)) resp = self.api_client.get(url, params={"hostname": hostname}) @@ -675,9 +670,9 @@ class Machines(model_base.ResourceCollectionBase): hostname, node.get("system_id"), node.get("hostname"), - ) - ) - return self.collection_resource.from_dict(self.api_client, node) + )) + return self.collection_resource.from_dict( + self.api_client, node) return None @@ -687,11 +682,8 @@ class Machines(model_base.ResourceCollectionBase): # query the MaaS API for all power parameters at once. # this call returns a json dict, mapping system id to power parameters - self.logger.debug( - "Finding {} with power address: {}".format( - self.collection_resource.__name__, power_address - ) - ) + self.logger.debug("Finding {} with power address: {}".format( + self.collection_resource.__name__, power_address)) resp = self.api_client.get(url, op="power_parameters") @@ -700,22 +692,22 @@ class Machines(model_base.ResourceCollectionBase): for system_id, power_params in json_dict.items(): self.logger.debug( - "Finding {} with power address: {}: Considering: {}: {}".format( + "Finding {} with power address: {}: Considering: {}: {}". + format( self.collection_resource.__name__, power_address, system_id, power_params.get("power_address"), - ) - ) + )) if power_params.get("power_address") == power_address: self.logger.debug( - "Finding {} with power address: {}: Found: {}: {}".format( + "Finding {} with power address: {}: Found: {}: {}". + format( self.collection_resource.__name__, power_address, system_id, power_params.get("power_address"), - ) - ) + )) # the API result isn't quite enough to contruct a Machine, # so construct one with the system_id and then refresh @@ -758,8 +750,8 @@ class Machines(model_base.ResourceCollectionBase): field = k[13:] result = [ i for i in result if str( - getattr(i, 'power_parameters', {}). - get(field, None)) == str(v) + getattr(i, 'power_parameters', {}).get(field, None)) + == str(v) ] else: result = [ @@ -785,8 +777,9 @@ class Machines(model_base.ResourceCollectionBase): res.set_resource_id(resp_json.get('system_id')) return res - raise errors.DriverError("Failed updating MAAS url %s - return code %s" - % (url, resp.status_code)) + raise errors.DriverError( + "Failed updating MAAS url %s - return code %s" % + (url, resp.status_code)) def empty_refresh(self): """Check connectivity to MAAS machines API diff --git a/python/drydock_provisioner/drivers/node/maasdriver/models/partition.py b/python/drydock_provisioner/drivers/node/maasdriver/models/partition.py index b78b0050..86c87a4f 100644 --- a/python/drydock_provisioner/drivers/node/maasdriver/models/partition.py +++ b/python/drydock_provisioner/drivers/node/maasdriver/models/partition.py @@ -77,8 +77,8 @@ class Partition(model_base.ResourceBase): resp = self.api_client.post(url, op='format', files=data) if not resp.ok: - raise Exception( - "MAAS error: %s - %s" % (resp.status_code, resp.text)) + raise Exception("MAAS error: %s - %s" % + (resp.status_code, resp.text)) self.refresh() except Exception as ex: @@ -109,8 +109,8 @@ class Partition(model_base.ResourceBase): (self.name, self.system_id)) resp = self.api_client.post(url, op='unformat') if not resp.ok: - raise Exception( - "MAAS error: %s - %s" % (resp.status_code, resp.text)) + raise Exception("MAAS error: %s - %s" % + (resp.status_code, resp.text)) self.refresh() except Exception as ex: msg = "Error: unformat of device %s on node %s failed: %s" \ @@ -138,8 +138,8 @@ class Partition(model_base.ResourceBase): (self.resource_id, self.system_id, mount_point)) resp = self.api_client.post(url, op='mount', files=data) if not resp.ok: - raise Exception( - "MAAS error: %s - %s" % (resp.status_code, resp.text)) + raise Exception("MAAS error: %s - %s" % + (resp.status_code, resp.text)) self.refresh() except Exception as ex: msg = "Error: mount of device %s on node %s failed: %s" \ @@ -163,8 +163,8 @@ class Partition(model_base.ResourceBase): (self.name, self.system_id)) resp = self.api_client.post(url, op='unmount') if not resp.ok: - raise Exception( - "MAAS error: %s - %s" % (resp.status_code, resp.text)) + raise Exception("MAAS error: %s - %s" % + (resp.status_code, resp.text)) self.refresh() except Exception as ex: msg = "Error: unmount of device %s on node %s failed: %s" \ @@ -180,8 +180,8 @@ class Partition(model_base.ResourceBase): (self.resource_id, self.system_id)) resp = self.api_client.post(url, op='set_boot_disk') if not resp.ok: - raise Exception( - "MAAS error: %s - %s" % (resp.status_code, resp.text)) + raise Exception("MAAS error: %s - %s" % + (resp.status_code, resp.text)) self.refresh() except Exception as ex: msg = "Error: setting device %s on node %s to boot failed: %s" \ diff --git a/python/drydock_provisioner/drivers/node/maasdriver/models/rack_controller.py b/python/drydock_provisioner/drivers/node/maasdriver/models/rack_controller.py index a2394634..3dbb86b5 100644 --- a/python/drydock_provisioner/drivers/node/maasdriver/models/rack_controller.py +++ b/python/drydock_provisioner/drivers/node/maasdriver/models/rack_controller.py @@ -66,8 +66,9 @@ class RackController(maas_machine.Machine): def update_identity(self, n, domain="local"): """Cannot update rack controller identity.""" - self.logger.debug("Cannot update rack controller identity for %s, no-op." % - self.hostname) + self.logger.debug( + "Cannot update rack controller identity for %s, no-op." % + self.hostname) return def is_healthy(self): @@ -82,6 +83,7 @@ class RackController(maas_machine.Machine): healthy = False return healthy + class RackControllers(maas_machine.Machines): """Model for a collection of rack controllers.""" diff --git a/python/drydock_provisioner/drivers/node/maasdriver/models/subnet.py b/python/drydock_provisioner/drivers/node/maasdriver/models/subnet.py index ae6a1c7b..0fba9a9c 100644 --- a/python/drydock_provisioner/drivers/node/maasdriver/models/subnet.py +++ b/python/drydock_provisioner/drivers/node/maasdriver/models/subnet.py @@ -44,10 +44,8 @@ class Subnet(model_base.ResourceBase): current_ranges.refresh() exists = current_ranges.query({ - 'start_ip': - addr_range.get('start', None), - 'end_ip': - addr_range.get('end', None) + 'start_ip': addr_range.get('start', None), + 'end_ip': addr_range.get('end', None) }) if len(exists) > 0: @@ -90,12 +88,11 @@ class Subnet(model_base.ResourceBase): if current_route is not None: current_route.delete() - new_route = maas_route.StaticRoute( - self.api_client, - source=self.resource_id, - destination=dest_subnet, - gateway_ip=gateway, - metric=metric) + new_route = maas_route.StaticRoute(self.api_client, + source=self.resource_id, + destination=dest_subnet, + gateway_ip=gateway, + metric=metric) new_route = sr.add(new_route) return new_route diff --git a/python/drydock_provisioner/drivers/node/maasdriver/models/tag.py b/python/drydock_provisioner/drivers/node/maasdriver/models/tag.py index b7a407a4..e8a41e2b 100644 --- a/python/drydock_provisioner/drivers/node/maasdriver/models/tag.py +++ b/python/drydock_provisioner/drivers/node/maasdriver/models/tag.py @@ -64,13 +64,14 @@ class Tag(model_base.ResourceBase): """ if system_id in self.get_applied_nodes(): - self.logger.debug( - "Tag %s already applied to node %s" % (self.name, system_id)) + self.logger.debug("Tag %s already applied to node %s" % + (self.name, system_id)) else: url = self.interpolate_url() - resp = self.api_client.post( - url, op='update_nodes', files={'add': system_id}) + resp = self.api_client.post(url, + op='update_nodes', + files={'add': system_id}) if not resp.ok: self.logger.error( diff --git a/python/drydock_provisioner/drivers/node/maasdriver/models/vlan.py b/python/drydock_provisioner/drivers/node/maasdriver/models/vlan.py index 87219f76..8dd18593 100644 --- a/python/drydock_provisioner/drivers/node/maasdriver/models/vlan.py +++ b/python/drydock_provisioner/drivers/node/maasdriver/models/vlan.py @@ -74,10 +74,13 @@ class Vlan(model_base.ResourceBase): raise RackControllerConflict exception. """ if not self.primary_rack or self.primary_rack == rack_id: - self.logger.debug("Setting primary DHCP controller %s on VLAN %s", rack_id, self.resource_id) + self.logger.debug("Setting primary DHCP controller %s on VLAN %s", + rack_id, self.resource_id) self.primary_rack = rack_id elif not self.secondary_rack or self.secondary_rack == rack_id: - self.logger.debug("Setting secondary DHCP controller %s on VLAN %s.", rack_id, self.resource_id) + self.logger.debug( + "Setting secondary DHCP controller %s on VLAN %s.", rack_id, + self.resource_id) self.secondary_rack = rack_id else: raise RackControllerConflict( @@ -92,7 +95,8 @@ class Vlan(model_base.ResourceBase): :param bool commit: Whether to commit reset to MAAS API """ - self.logger.debug("Resetting DHCP control on VLAN %s.", self.resource_id) + self.logger.debug("Resetting DHCP control on VLAN %s.", + self.resource_id) self.relay_vlan = None self.dhcp_on = False self.primary_rack = None diff --git a/python/drydock_provisioner/drivers/node/maasdriver/models/volumegroup.py b/python/drydock_provisioner/drivers/node/maasdriver/models/volumegroup.py index ab35971e..03ebdce1 100644 --- a/python/drydock_provisioner/drivers/node/maasdriver/models/volumegroup.py +++ b/python/drydock_provisioner/drivers/node/maasdriver/models/volumegroup.py @@ -60,17 +60,19 @@ class VolumeGroup(model_base.ResourceBase): data = {'name': name, 'uuid': uuid_str, 'size': size} - self.logger.debug("Creating logical volume %s in VG %s on node %s" - % (name, self.name, self.system_id)) + self.logger.debug( + "Creating logical volume %s in VG %s on node %s" % + (name, self.name, self.system_id)) url = self.interpolate_url() - resp = self.api_client.post( - url, op='create_logical_volume', files=data) + resp = self.api_client.post(url, + op='create_logical_volume', + files=data) if not resp.ok: - raise Exception( - "MAAS error - %s - %s" % (resp.status_code, resp.txt)) + raise Exception("MAAS error - %s - %s" % + (resp.status_code, resp.txt)) res = resp.json() if 'id' in res: @@ -101,12 +103,13 @@ class VolumeGroup(model_base.ResourceBase): url = self.interpolate_url() - resp = self.api_client.post( - url, op='delete_logical_volume', files={'id': target_lv}) + resp = self.api_client.post(url, + op='delete_logical_volume', + files={'id': target_lv}) if not resp.ok: - raise Exception( - "MAAS error - %s - %s" % (resp.status_code, resp.text)) + raise Exception("MAAS error - %s - %s" % + (resp.status_code, resp.text)) else: raise Exception("VG %s has no logical volumes" % self.name) except Exception as ex: diff --git a/python/drydock_provisioner/drivers/oob/libvirt_driver/actions/oob.py b/python/drydock_provisioner/drivers/oob/libvirt_driver/actions/oob.py index 60926794..f47398b9 100644 --- a/python/drydock_provisioner/drivers/oob/libvirt_driver/actions/oob.py +++ b/python/drydock_provisioner/drivers/oob/libvirt_driver/actions/oob.py @@ -35,14 +35,14 @@ class LibvirtBaseAction(BaseAction): :param node: instance of objects.BaremetalNode """ if node.oob_type != 'libvirt': - raise errors.DriverError( - "Node OOB type %s is not 'libvirt'" % node.oob_type) + raise errors.DriverError("Node OOB type %s is not 'libvirt'" % + node.oob_type) virsh_url = node.oob_parameters.get('libvirt_uri', None) if not virsh_url: - raise errors.DriverError( - "Node %s has no 'libvirt_url' defined" % (node.name)) + raise errors.DriverError("Node %s has no 'libvirt_url' defined" % + (node.name)) url_parts = urlparse(virsh_url) @@ -51,8 +51,8 @@ class LibvirtBaseAction(BaseAction): "Node %s has invalid libvirt URL scheme %s. " "Only 'qemu+ssh' supported." % (node.name, url_parts.scheme)) - self.logger.debug( - "Starting libvirt session to hypervisor %s " % (virsh_url)) + self.logger.debug("Starting libvirt session to hypervisor %s " % + (virsh_url)) virsh_ses = libvirt.open(virsh_url) if not virsh_ses: @@ -148,11 +148,10 @@ class ValidateOobServices(LibvirtBaseAction): """Action to validation OOB services are available.""" def start(self): - self.task.add_status_msg( - msg="OOB does not require services.", - error=False, - ctx='NA', - ctx_type='NA') + self.task.add_status_msg(msg="OOB does not require services.", + error=False, + ctx='NA', + ctx_type='NA') self.task.set_status(hd_fields.TaskStatus.Complete) self.task.success() self.task.save() @@ -198,11 +197,10 @@ class SetNodeBoot(LibvirtBaseAction): for n in node_list: self.logger.debug("Setting bootdev to PXE for %s" % n.name) - self.task.add_status_msg( - msg="Setting node to PXE boot.", - error=False, - ctx=n.name, - ctx_type='node') + self.task.add_status_msg(msg="Setting node to PXE boot.", + error=False, + ctx=n.name, + ctx_type='node') try: self.set_node_pxe(n) @@ -213,14 +211,13 @@ class SetNodeBoot(LibvirtBaseAction): ctx=n.name, ctx_type='node') self.task.failure(focus=n.name) - self.logger.warning( - "Unable to set node %s to PXE boot." % (n.name)) + self.logger.warning("Unable to set node %s to PXE boot." % + (n.name)) else: - self.task.add_status_msg( - msg="Set bootdev to PXE.", - error=False, - ctx=n.name, - ctx_type='node') + self.task.add_status_msg(msg="Set bootdev to PXE.", + error=False, + ctx=n.name, + ctx_type='node') self.logger.debug("%s reports bootdev of network" % n.name) self.task.success(focus=n.name) @@ -244,21 +241,27 @@ class PowerOffNode(LibvirtBaseAction): for n in node_list: msg = "Shutting down domain %s" % n.name self.logger.debug(msg) - self.task.add_status_msg( - msg=msg, error=False, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') try: self.poweroff_node(n) except Exception as ex: msg = "Node failed to power off: %s" % str(ex) - self.task.add_status_msg( - msg=msg, error=True, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=True, + ctx=n.name, + ctx_type='node') self.logger.error(msg) self.task.failure(focus=n.name) else: msg = "Node %s powered off." % n.name - self.task.add_status_msg( - msg=msg, error=False, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') self.logger.debug(msg) self.task.success(focus=n.name) @@ -282,21 +285,27 @@ class PowerOnNode(LibvirtBaseAction): for n in node_list: msg = "Starting domain %s" % n.name self.logger.debug(msg) - self.task.add_status_msg( - msg=msg, error=False, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') try: self.poweron_node(n) except Exception as ex: msg = "Node failed to power on: %s" % str(ex) - self.task.add_status_msg( - msg=msg, error=True, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=True, + ctx=n.name, + ctx_type='node') self.logger.error(msg) self.task.failure(focus=n.name) else: msg = "Node %s powered on." % n.name - self.task.add_status_msg( - msg=msg, error=False, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') self.logger.debug(msg) self.task.success(focus=n.name) @@ -320,22 +329,28 @@ class PowerCycleNode(LibvirtBaseAction): for n in node_list: msg = ("Power cycling domain for node %s" % n.name) self.logger.debug(msg) - self.task.add_status_msg( - msg=msg, error=False, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') try: self.poweroff_node(n) self.poweron_node(n) except Exception as ex: msg = "Node failed to power cycle: %s" % str(ex) - self.task.add_status_msg( - msg=msg, error=True, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=True, + ctx=n.name, + ctx_type='node') self.logger.error(msg) self.task.failure(focus=n.name) else: msg = "Node %s power cycled." % n.name - self.task.add_status_msg( - msg=msg, error=False, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') self.logger.debug(msg) self.task.success(focus=n.name) @@ -361,14 +376,18 @@ class InterrogateOob(LibvirtBaseAction): node_status = self.get_node_status(n) except Exception as ex: msg = "Node failed tatus check: %s" % str(ex) - self.task.add_status_msg( - msg=msg, error=True, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=True, + ctx=n.name, + ctx_type='node') self.logger.error(msg) self.task.failure(focus=n.name) else: msg = "Node %s status is %s." % (n.name, node_status) - self.task.add_status_msg( - msg=msg, error=False, ctx=n.name, ctx_type='node') + self.task.add_status_msg(msg=msg, + error=False, + ctx=n.name, + ctx_type='node') self.logger.debug(msg) self.task.success(focus=n.name) diff --git a/python/drydock_provisioner/drivers/oob/libvirt_driver/driver.py b/python/drydock_provisioner/drivers/oob/libvirt_driver/driver.py index d3762142..cb771a91 100644 --- a/python/drydock_provisioner/drivers/oob/libvirt_driver/driver.py +++ b/python/drydock_provisioner/drivers/oob/libvirt_driver/driver.py @@ -65,9 +65,8 @@ class LibvirtDriver(oob_driver.OobDriver): def __init__(self, **kwargs): super().__init__(**kwargs) - cfg.CONF.register_opts( - LibvirtDriver.libvirt_driver_options, - group=LibvirtDriver.driver_key) + cfg.CONF.register_opts(LibvirtDriver.libvirt_driver_options, + group=LibvirtDriver.driver_key) self.logger = logging.getLogger( config.config_mgr.conf.logging.oobdriver_logger_name) @@ -82,8 +81,9 @@ class LibvirtDriver(oob_driver.OobDriver): if task.action not in self.supported_actions: self.logger.error("Driver %s doesn't support task action %s" % (self.driver_desc, task.action)) - raise errors.DriverError("Driver %s doesn't support task action %s" - % (self.driver_desc, task.action)) + raise errors.DriverError( + "Driver %s doesn't support task action %s" % + (self.driver_desc, task.action)) task.set_status(hd_fields.TaskStatus.Running) task.save() @@ -129,10 +129,9 @@ class LibvirtDriver(oob_driver.OobDriver): task.failure() else: if f.exception(): - self.logger.error( - "Uncaught exception in subtask %s" % str( - uuid.UUID(bytes=t)), - exc_info=f.exception()) + self.logger.error("Uncaught exception in subtask %s" % + str(uuid.UUID(bytes=t)), + exc_info=f.exception()) task.align_result() task.bubble_results() task.set_status(hd_fields.TaskStatus.Complete) diff --git a/python/drydock_provisioner/drivers/oob/manual_driver/driver.py b/python/drydock_provisioner/drivers/oob/manual_driver/driver.py index d0029037..d27c58ef 100644 --- a/python/drydock_provisioner/drivers/oob/manual_driver/driver.py +++ b/python/drydock_provisioner/drivers/oob/manual_driver/driver.py @@ -48,20 +48,22 @@ class ManualDriver(oob.OobDriver): if task.action not in self.supported_actions: self.logger.error("Driver %s doesn't support task action %s" % (self.driver_desc, task.action)) - raise errors.DriverError("Driver %s doesn't support task action %s" - % (self.driver_desc, task.action)) + raise errors.DriverError( + "Driver %s doesn't support task action %s" % + (self.driver_desc, task.action)) design_ref = task.design_ref if design_ref is None: - raise errors.DriverError( - "No design ID specified in task %s" % (task_id)) + raise errors.DriverError("No design ID specified in task %s" % + (task_id)) self.orchestrator.task_field_update( task.get_id(), status=hd_fields.TaskStatus.Running) - self.logger.info("Sleeping 60s to allow time for manual OOB %s action" - % task.action) + self.logger.info( + "Sleeping 60s to allow time for manual OOB %s action" % + task.action) time.sleep(60) diff --git a/python/drydock_provisioner/drivers/oob/pyghmi_driver/actions/oob.py b/python/drydock_provisioner/drivers/oob/pyghmi_driver/actions/oob.py index 18d63c9a..a62ce4e5 100644 --- a/python/drydock_provisioner/drivers/oob/pyghmi_driver/actions/oob.py +++ b/python/drydock_provisioner/drivers/oob/pyghmi_driver/actions/oob.py @@ -44,16 +44,17 @@ class PyghmiBaseAction(BaseAction): ipmi_address = node.get_network_address(ipmi_network) if ipmi_address is None: - raise errors.DriverError( - "Node %s has no IPMI address" % (node.name)) + raise errors.DriverError("Node %s has no IPMI address" % + (node.name)) ipmi_account = node.oob_parameters['account'] ipmi_credential = node.oob_parameters['credential'] self.logger.debug("Starting IPMI session to %s with %s/%s" % (ipmi_address, ipmi_account, ipmi_credential[:1])) - ipmi_session = Command( - bmc=ipmi_address, userid=ipmi_account, password=ipmi_credential) + ipmi_session = Command(bmc=ipmi_address, + userid=ipmi_account, + password=ipmi_credential) return ipmi_session @@ -99,11 +100,10 @@ class ValidateOobServices(PyghmiBaseAction): """Action to validation OOB services are available.""" def start(self): - self.task.add_status_msg( - msg="OOB does not require services.", - error=False, - ctx='NA', - ctx_type='NA') + self.task.add_status_msg(msg="OOB does not require services.", + error=False, + ctx='NA', + ctx_type='NA') self.task.set_status(hd_fields.TaskStatus.Complete) self.task.success() self.task.save() @@ -149,35 +149,32 @@ class SetNodeBoot(PyghmiBaseAction): for n in node_list: self.logger.debug("Setting bootdev to PXE for %s" % n.name) - self.task.add_status_msg( - msg="Setting node to PXE boot.", - error=False, - ctx=n.name, - ctx_type='node') + self.task.add_status_msg(msg="Setting node to PXE boot.", + error=False, + ctx=n.name, + ctx_type='node') self.exec_ipmi_command(n, Command.set_bootdev, 'pxe') time.sleep(3) bootdev = self.exec_ipmi_command(n, Command.get_bootdev) - if bootdev is not None and (bootdev.get('bootdev', - '') == 'network'): - self.task.add_status_msg( - msg="Set bootdev to PXE.", - error=False, - ctx=n.name, - ctx_type='node') + if bootdev is not None and (bootdev.get('bootdev', '') + == 'network'): + self.task.add_status_msg(msg="Set bootdev to PXE.", + error=False, + ctx=n.name, + ctx_type='node') self.logger.debug("%s reports bootdev of network" % n.name) self.task.success(focus=n.name) else: - self.task.add_status_msg( - msg="Unable to set bootdev to PXE.", - error=True, - ctx=n.name, - ctx_type='node') + self.task.add_status_msg(msg="Unable to set bootdev to PXE.", + error=True, + ctx=n.name, + ctx_type='node') self.task.failure(focus=n.name) - self.logger.warning( - "Unable to set node %s to PXE boot." % (n.name)) + self.logger.warning("Unable to set node %s to PXE boot." % + (n.name)) self.task.set_status(hd_fields.TaskStatus.Complete) self.task.save() @@ -198,11 +195,10 @@ class PowerOffNode(PyghmiBaseAction): for n in node_list: self.logger.debug("Sending set_power = off command to %s" % n.name) - self.task.add_status_msg( - msg="Sending set_power = off command.", - error=False, - ctx=n.name, - ctx_type='node') + self.task.add_status_msg(msg="Sending set_power = off command.", + error=False, + ctx=n.name, + ctx_type='node') self.exec_ipmi_command(n, Command.set_power, 'off') i = 18 @@ -212,13 +208,12 @@ class PowerOffNode(PyghmiBaseAction): power_state = self.exec_ipmi_command(n, Command.get_power) if power_state is not None and (power_state.get( 'powerstate', '') == 'off'): - self.task.add_status_msg( - msg="Node reports power off.", - error=False, - ctx=n.name, - ctx_type='node') - self.logger.debug( - "Node %s reports powerstate of off" % n.name) + self.task.add_status_msg(msg="Node reports power off.", + error=False, + ctx=n.name, + ctx_type='node') + self.logger.debug("Node %s reports powerstate of off" % + n.name) self.task.success(focus=n.name) break time.sleep(10) @@ -226,11 +221,10 @@ class PowerOffNode(PyghmiBaseAction): if power_state is not None and (power_state.get('powerstate', '') != 'off'): - self.task.add_status_msg( - msg="Node failed to power off.", - error=True, - ctx=n.name, - ctx_type='node') + self.task.add_status_msg(msg="Node failed to power off.", + error=True, + ctx=n.name, + ctx_type='node') self.logger.error("Giving up on IPMI command to %s" % n.name) self.task.failure(focus=n.name) @@ -253,11 +247,10 @@ class PowerOnNode(PyghmiBaseAction): for n in node_list: self.logger.debug("Sending set_power = off command to %s" % n.name) - self.task.add_status_msg( - msg="Sending set_power = on command.", - error=False, - ctx=n.name, - ctx_type='node') + self.task.add_status_msg(msg="Sending set_power = on command.", + error=False, + ctx=n.name, + ctx_type='node') self.exec_ipmi_command(n, Command.set_power, 'off') i = 18 @@ -267,13 +260,12 @@ class PowerOnNode(PyghmiBaseAction): power_state = self.exec_ipmi_command(n, Command.get_power) if power_state is not None and (power_state.get( 'powerstate', '') == 'on'): - self.logger.debug( - "Node %s reports powerstate of on" % n.name) - self.task.add_status_msg( - msg="Node reports power on.", - error=False, - ctx=n.name, - ctx_type='node') + self.logger.debug("Node %s reports powerstate of on" % + n.name) + self.task.add_status_msg(msg="Node reports power on.", + error=False, + ctx=n.name, + ctx_type='node') self.task.success(focus=n.name) break time.sleep(10) @@ -281,11 +273,10 @@ class PowerOnNode(PyghmiBaseAction): if power_state is not None and (power_state.get('powerstate', '') != 'on'): - self.task.add_status_msg( - msg="Node failed to power on.", - error=True, - ctx=n.name, - ctx_type='node') + self.task.add_status_msg(msg="Node failed to power on.", + error=True, + ctx=n.name, + ctx_type='node') self.logger.error("Giving up on IPMI command to %s" % n.name) self.task.failure(focus=n.name) @@ -308,11 +299,10 @@ class PowerCycleNode(PyghmiBaseAction): for n in node_list: self.logger.debug("Sending set_power = off command to %s" % n.name) - self.task.add_status_msg( - msg="Power cycling node via IPMI.", - error=False, - ctx=n.name, - ctx_type='node') + self.task.add_status_msg(msg="Power cycling node via IPMI.", + error=False, + ctx=n.name, + ctx_type='node') self.exec_ipmi_command(n, Command.set_power, 'off') # Wait for power state of off before booting back up @@ -326,8 +316,8 @@ class PowerCycleNode(PyghmiBaseAction): self.logger.debug("%s reports powerstate of off" % n.name) break elif power_state is None: - self.logger.debug( - "No response on IPMI power query to %s" % n.name) + self.logger.debug("No response on IPMI power query to %s" % + n.name) time.sleep(10) i = i - 1 @@ -355,18 +345,17 @@ class PowerCycleNode(PyghmiBaseAction): self.logger.debug("%s reports powerstate of on" % n.name) break elif power_state is None: - self.logger.debug( - "No response on IPMI power query to %s" % n.name) + self.logger.debug("No response on IPMI power query to %s" % + n.name) time.sleep(10) i = i - 1 - if power_state is not None and (power_state.get('powerstate', - '') == 'on'): - self.task.add_status_msg( - msg="Node power cycle complete.", - error=False, - ctx=n.name, - ctx_type='node') + if power_state is not None and (power_state.get('powerstate', '') + == 'on'): + self.task.add_status_msg(msg="Node power cycle complete.", + error=False, + ctx=n.name, + ctx_type='node') self.task.success(focus=n.name) else: self.task.add_status_msg( @@ -398,8 +387,8 @@ class InterrogateOob(PyghmiBaseAction): for n in node_list: try: - self.logger.debug( - "Interrogating node %s IPMI interface." % n.name) + self.logger.debug("Interrogating node %s IPMI interface." % + n.name) powerstate = self.exec_ipmi_command(n, Command.get_power) if powerstate is None: raise errors.DriverError() diff --git a/python/drydock_provisioner/drivers/oob/pyghmi_driver/driver.py b/python/drydock_provisioner/drivers/oob/pyghmi_driver/driver.py index 08951c53..ad6917ce 100644 --- a/python/drydock_provisioner/drivers/oob/pyghmi_driver/driver.py +++ b/python/drydock_provisioner/drivers/oob/pyghmi_driver/driver.py @@ -70,8 +70,8 @@ class PyghmiDriver(oob_driver.OobDriver): def __init__(self, **kwargs): super().__init__(**kwargs) - cfg.CONF.register_opts( - PyghmiDriver.pyghmi_driver_options, group=PyghmiDriver.driver_key) + cfg.CONF.register_opts(PyghmiDriver.pyghmi_driver_options, + group=PyghmiDriver.driver_key) self.logger = logging.getLogger( config.config_mgr.conf.logging.oobdriver_logger_name) @@ -86,8 +86,9 @@ class PyghmiDriver(oob_driver.OobDriver): if task.action not in self.supported_actions: self.logger.error("Driver %s doesn't support task action %s" % (self.driver_desc, task.action)) - raise errors.DriverError("Driver %s doesn't support task action %s" - % (self.driver_desc, task.action)) + raise errors.DriverError( + "Driver %s doesn't support task action %s" % + (self.driver_desc, task.action)) task.set_status(hd_fields.TaskStatus.Running) task.save() @@ -133,10 +134,9 @@ class PyghmiDriver(oob_driver.OobDriver): task.failure() else: if f.exception(): - self.logger.error( - "Uncaught exception in subtask %s" % str( - uuid.UUID(bytes=t)), - exc_info=f.exception()) + self.logger.error("Uncaught exception in subtask %s" % + str(uuid.UUID(bytes=t)), + exc_info=f.exception()) task.align_result() task.bubble_results() task.set_status(hd_fields.TaskStatus.Complete) diff --git a/python/drydock_provisioner/drivers/oob/redfish_driver/actions/oob.py b/python/drydock_provisioner/drivers/oob/redfish_driver/actions/oob.py index 260b89b6..a6492d94 100644 --- a/python/drydock_provisioner/drivers/oob/redfish_driver/actions/oob.py +++ b/python/drydock_provisioner/drivers/oob/redfish_driver/actions/oob.py @@ -29,6 +29,7 @@ import drydock_provisioner.objects.fields as hd_fields REDFISH_MAX_ATTEMPTS = 3 + class RedfishBaseAction(BaseAction): """Base action for Redfish executed actions.""" @@ -44,8 +45,8 @@ class RedfishBaseAction(BaseAction): oob_network = node.oob_parameters['network'] oob_address = node.get_network_address(oob_network) if oob_address is None: - raise errors.DriverError( - "Node %s has no OOB Redfish address" % (node.name)) + raise errors.DriverError("Node %s has no OOB Redfish address" % + (node.name)) oob_account = node.oob_parameters['account'] oob_credential = node.oob_parameters['credential'] @@ -53,11 +54,12 @@ class RedfishBaseAction(BaseAction): self.logger.debug("Starting Redfish session to %s with %s" % (oob_address, oob_account)) try: - redfish_obj = RedfishSession(host=oob_address, - account=oob_account, - password=oob_credential, - use_ssl=cfg.CONF.redfish_driver.use_ssl, - connection_retries=cfg.CONF.redfish_driver.max_retries) + redfish_obj = RedfishSession( + host=oob_address, + account=oob_account, + password=oob_credential, + use_ssl=cfg.CONF.redfish_driver.use_ssl, + connection_retries=cfg.CONF.redfish_driver.max_retries) except (RedfishException, errors.DriverError) as iex: self.logger.error( "Error initializing Redfish session for node %s" % node.name) @@ -81,7 +83,8 @@ class RedfishBaseAction(BaseAction): return response except RedfishException as iex: self.logger.error( - "Error executing Redfish command %s for node %s" % (func.__name__, node.name)) + "Error executing Redfish command %s for node %s" % + (func.__name__, node.name)) self.logger.error("Redfish Exception: %s" % str(iex)) raise errors.DriverError("Redfish command failed.") @@ -91,11 +94,10 @@ class ValidateOobServices(RedfishBaseAction): """Action to validate OOB services are available.""" def start(self): - self.task.add_status_msg( - msg="OOB does not require services.", - error=False, - ctx='NA', - ctx_type='NA') + self.task.add_status_msg(msg="OOB does not require services.", + error=False, + ctx='NA', + ctx_type='NA') self.task.set_status(hd_fields.TaskStatus.Complete) self.task.success() self.task.save() @@ -134,34 +136,38 @@ class SetNodeBoot(RedfishBaseAction): node_list = self.orchestrator.get_target_nodes(self.task) for n in node_list: - self.task.add_status_msg( - msg="Setting node to PXE boot.", - error=False, - ctx=n.name, - ctx_type='node') + self.task.add_status_msg(msg="Setting node to PXE boot.", + error=False, + ctx=n.name, + ctx_type='node') for i in range(REDFISH_MAX_ATTEMPTS): bootdev = None - self.logger.debug("Setting bootdev to PXE for %s attempt #%s" % (n.name, i + 1)) + self.logger.debug("Setting bootdev to PXE for %s attempt #%s" % + (n.name, i + 1)) try: session = self.get_redfish_session(n) - bootdev = self.exec_redfish_command(n, session, RedfishSession.get_bootdev) + bootdev = self.exec_redfish_command( + n, session, RedfishSession.get_bootdev) if bootdev.get('bootdev', '') != 'Pxe': - self.exec_redfish_command(n, session, RedfishSession.set_bootdev, 'Pxe') + self.exec_redfish_command(n, session, + RedfishSession.set_bootdev, + 'Pxe') time.sleep(1) - bootdev = self.exec_redfish_command(n, session, RedfishSession.get_bootdev) + bootdev = self.exec_redfish_command( + n, session, RedfishSession.get_bootdev) session.close_session() except errors.DriverError as e: self.logger.warning( - "An exception '%s' occurred while attempting to set boot device on %s" % (e, n.name)) + "An exception '%s' occurred while attempting to set boot device on %s" + % (e, n.name)) - if bootdev is not None and (bootdev.get('bootdev', - '') == 'Pxe'): - self.task.add_status_msg( - msg="Set bootdev to PXE.", - error=False, - ctx=n.name, - ctx_type='node') + if bootdev is not None and (bootdev.get('bootdev', '') + == 'Pxe'): + self.task.add_status_msg(msg="Set bootdev to PXE.", + error=False, + ctx=n.name, + ctx_type='node') self.logger.debug("%s reports bootdev of network" % n.name) self.task.success(focus=n.name) break @@ -173,8 +179,8 @@ class SetNodeBoot(RedfishBaseAction): ctx=n.name, ctx_type='node') self.task.failure(focus=n.name) - self.logger.warning( - "Unable to set node %s to PXE boot." % (n.name)) + self.logger.warning("Unable to set node %s to PXE boot." % + (n.name)) self.task.set_status(hd_fields.TaskStatus.Complete) self.task.save() @@ -192,56 +198,58 @@ class PowerOffNode(RedfishBaseAction): for n in node_list: self.logger.debug("Sending set_power = off command to %s" % n.name) - self.task.add_status_msg( - msg="Sending set_power = off command.", - error=False, - ctx=n.name, - ctx_type='node') + self.task.add_status_msg(msg="Sending set_power = off command.", + error=False, + ctx=n.name, + ctx_type='node') session = self.get_redfish_session(n) # If power is already off, continue with the next node - power_state = self.exec_redfish_command(n, RedfishSession.get_power) - if power_state is not None and (power_state.get( - 'powerstate', '') == 'Off'): - self.task.add_status_msg( - msg="Node reports power off.", - error=False, - ctx=n.name, - ctx_type='node') + power_state = self.exec_redfish_command(n, + RedfishSession.get_power) + if power_state is not None and (power_state.get('powerstate', '') + == 'Off'): + self.task.add_status_msg(msg="Node reports power off.", + error=False, + ctx=n.name, + ctx_type='node') self.logger.debug( - "Node %s reports powerstate already off. No action required" % n.name) + "Node %s reports powerstate already off. No action required" + % n.name) self.task.success(focus=n.name) continue - self.exec_redfish_command(n, session, RedfishSession.set_power, 'ForceOff') + self.exec_redfish_command(n, session, RedfishSession.set_power, + 'ForceOff') attempts = cfg.CONF.redfish_driver.power_state_change_max_retries while attempts > 0: self.logger.debug("Polling powerstate waiting for success.") - power_state = self.exec_redfish_command(n, RedfishSession.get_power) + power_state = self.exec_redfish_command( + n, RedfishSession.get_power) if power_state is not None and (power_state.get( 'powerstate', '') == 'Off'): - self.task.add_status_msg( - msg="Node reports power off.", - error=False, - ctx=n.name, - ctx_type='node') - self.logger.debug( - "Node %s reports powerstate of off" % n.name) + self.task.add_status_msg(msg="Node reports power off.", + error=False, + ctx=n.name, + ctx_type='node') + self.logger.debug("Node %s reports powerstate of off" % + n.name) self.task.success(focus=n.name) break - time.sleep(cfg.CONF.redfish_driver.power_state_change_retry_interval) + time.sleep( + cfg.CONF.redfish_driver.power_state_change_retry_interval) attempts = attempts - 1 if power_state is not None and (power_state.get('powerstate', '') != 'Off'): - self.task.add_status_msg( - msg="Node failed to power off.", - error=True, - ctx=n.name, - ctx_type='node') - self.logger.error("Giving up on Redfish command to %s" % n.name) + self.task.add_status_msg(msg="Node failed to power off.", + error=True, + ctx=n.name, + ctx_type='node') + self.logger.error("Giving up on Redfish command to %s" % + n.name) self.task.failure(focus=n.name) session.close_session() @@ -262,56 +270,58 @@ class PowerOnNode(RedfishBaseAction): for n in node_list: self.logger.debug("Sending set_power = on command to %s" % n.name) - self.task.add_status_msg( - msg="Sending set_power = on command.", - error=False, - ctx=n.name, - ctx_type='node') + self.task.add_status_msg(msg="Sending set_power = on command.", + error=False, + ctx=n.name, + ctx_type='node') session = self.get_redfish_session(n) # If power is already on, continue with the next node - power_state = self.exec_redfish_command(n, RedfishSession.get_power) - if power_state is not None and (power_state.get( - 'powerstate', '') == 'On'): - self.task.add_status_msg( - msg="Node reports power on.", - error=False, - ctx=n.name, - ctx_type='node') + power_state = self.exec_redfish_command(n, + RedfishSession.get_power) + if power_state is not None and (power_state.get('powerstate', '') + == 'On'): + self.task.add_status_msg(msg="Node reports power on.", + error=False, + ctx=n.name, + ctx_type='node') self.logger.debug( - "Node %s reports powerstate already on. No action required" % n.name) + "Node %s reports powerstate already on. No action required" + % n.name) self.task.success(focus=n.name) continue - self.exec_redfish_command(n, session, RedfishSession.set_power, 'On') + self.exec_redfish_command(n, session, RedfishSession.set_power, + 'On') attempts = cfg.CONF.redfish_driver.power_state_change_max_retries while attempts > 0: self.logger.debug("Polling powerstate waiting for success.") - power_state = self.exec_redfish_command(n, session, RedfishSession.get_power) + power_state = self.exec_redfish_command( + n, session, RedfishSession.get_power) if power_state is not None and (power_state.get( 'powerstate', '') == 'On'): - self.logger.debug( - "Node %s reports powerstate of on" % n.name) - self.task.add_status_msg( - msg="Node reports power on.", - error=False, - ctx=n.name, - ctx_type='node') + self.logger.debug("Node %s reports powerstate of on" % + n.name) + self.task.add_status_msg(msg="Node reports power on.", + error=False, + ctx=n.name, + ctx_type='node') self.task.success(focus=n.name) break - time.sleep(cfg.CONF.redfish_driver.power_state_change_retry_interval) + time.sleep( + cfg.CONF.redfish_driver.power_state_change_retry_interval) attempts = attempts - 1 if power_state is not None and (power_state.get('powerstate', '') != 'On'): - self.task.add_status_msg( - msg="Node failed to power on.", - error=True, - ctx=n.name, - ctx_type='node') - self.logger.error("Giving up on Redfish command to %s" % n.name) + self.task.add_status_msg(msg="Node failed to power on.", + error=True, + ctx=n.name, + ctx_type='node') + self.logger.error("Giving up on Redfish command to %s" % + n.name) self.task.failure(focus=n.name) session.close_session() @@ -332,19 +342,20 @@ class PowerCycleNode(RedfishBaseAction): for n in node_list: self.logger.debug("Sending set_power = off command to %s" % n.name) - self.task.add_status_msg( - msg="Power cycling node via Redfish.", - error=False, - ctx=n.name, - ctx_type='node') + self.task.add_status_msg(msg="Power cycling node via Redfish.", + error=False, + ctx=n.name, + ctx_type='node') session = self.get_redfish_session(n) - self.exec_redfish_command(n, session, RedfishSession.set_power, 'ForceOff') + self.exec_redfish_command(n, session, RedfishSession.set_power, + 'ForceOff') # Wait for power state of off before booting back up attempts = cfg.CONF.redfish_driver.power_state_change_max_retries while attempts > 0: - power_state = self.exec_redfish_command(n, session, RedfishSession.get_power) + power_state = self.exec_redfish_command( + n, session, RedfishSession.get_power) if power_state is not None and power_state.get( 'powerstate', '') == 'Off': self.logger.debug("%s reports powerstate of off" % n.name) @@ -352,7 +363,8 @@ class PowerCycleNode(RedfishBaseAction): elif power_state is None: self.logger.debug( "No response on Redfish power query to %s" % n.name) - time.sleep(cfg.CONF.redfish_driver.power_state_change_retry_interval) + time.sleep( + cfg.CONF.redfish_driver.power_state_change_retry_interval) attempts = attempts - 1 if power_state.get('powerstate', '') != 'Off': @@ -368,12 +380,14 @@ class PowerCycleNode(RedfishBaseAction): break self.logger.debug("Sending set_power = on command to %s" % n.name) - self.exec_redfish_command(n, session, RedfishSession.set_power, 'On') + self.exec_redfish_command(n, session, RedfishSession.set_power, + 'On') attempts = cfg.CONF.redfish_driver.power_state_change_max_retries while attempts > 0: - power_state = self.exec_redfish_command(n, session, RedfishSession.get_power) + power_state = self.exec_redfish_command( + n, session, RedfishSession.get_power) if power_state is not None and power_state.get( 'powerstate', '') == 'On': self.logger.debug("%s reports powerstate of on" % n.name) @@ -381,16 +395,16 @@ class PowerCycleNode(RedfishBaseAction): elif power_state is None: self.logger.debug( "No response on Redfish power query to %s" % n.name) - time.sleep(cfg.CONF.redfish_driver.power_state_change_retry_interval) + time.sleep( + cfg.CONF.redfish_driver.power_state_change_retry_interval) attempts = attempts - 1 - if power_state is not None and (power_state.get('powerstate', - '') == 'On'): - self.task.add_status_msg( - msg="Node power cycle complete.", - error=False, - ctx=n.name, - ctx_type='node') + if power_state is not None and (power_state.get('powerstate', '') + == 'On'): + self.task.add_status_msg(msg="Node power cycle complete.", + error=False, + ctx=n.name, + ctx_type='node') self.task.success(focus=n.name) else: self.task.add_status_msg( @@ -421,16 +435,17 @@ class InterrogateOob(RedfishBaseAction): for n in node_list: try: - self.logger.debug( - "Interrogating node %s Redfish interface." % n.name) + self.logger.debug("Interrogating node %s Redfish interface." % + n.name) session = self.get_redfish_session(n) - powerstate = self.exec_redfish_command(n, session, RedfishSession.get_power) + powerstate = self.exec_redfish_command( + n, session, RedfishSession.get_power) session.close_session() if powerstate is None: raise errors.DriverError() self.task.add_status_msg( - msg="Redfish interface interrogation yielded powerstate %s" % - powerstate.get('powerstate'), + msg="Redfish interface interrogation yielded powerstate %s" + % powerstate.get('powerstate'), error=False, ctx=n.name, ctx_type='node') diff --git a/python/drydock_provisioner/drivers/oob/redfish_driver/client.py b/python/drydock_provisioner/drivers/oob/redfish_driver/client.py index 48225deb..0b9c92b6 100644 --- a/python/drydock_provisioner/drivers/oob/redfish_driver/client.py +++ b/python/drydock_provisioner/drivers/oob/redfish_driver/client.py @@ -21,10 +21,16 @@ from redfish.rest.v1 import ServerDownOrUnreachableError from redfish.rest.v1 import InvalidCredentialsError from redfish.rest.v1 import RetriesExhaustedError + class RedfishSession(object): """Redfish Client to provide OOB commands""" - def __init__(self, host, account, password, use_ssl=True, connection_retries=10): + def __init__(self, + host, + account, + password, + use_ssl=True, + connection_retries=10): try: if use_ssl: redfish_url = 'https://' + host @@ -57,7 +63,8 @@ class RedfishSession(object): # Assumption that only one system is available on Node if response.dict["Members@odata.count"] != 1: - raise RedfishException("Number of systems are more than one in the node") + raise RedfishException( + "Number of systems are more than one in the node") instance = response.dict["Members"][0]["@odata.id"] return instance @@ -152,7 +159,9 @@ class RedfishSession(object): """ instance = self.get_system_instance() - if powerstate not in ["On", "ForceOff", "PushPowerButton", "GracefulRestart"]: + if powerstate not in [ + "On", "ForceOff", "PushPowerButton", "GracefulRestart" + ]: raise RedfishException("Unsupported powerstate") current_state = self.get_power() @@ -160,9 +169,7 @@ class RedfishSession(object): (powerstate == "ForceOff" and current_state["powerstate"] == "Off"): return {'powerstate': powerstate} - payload = { - "ResetType": powerstate - } + payload = {"ResetType": powerstate} url = instance + "/Actions/ComputerSystem.Reset" response = self.redfish_client.post(path=url, body=payload) diff --git a/python/drydock_provisioner/drivers/oob/redfish_driver/driver.py b/python/drydock_provisioner/drivers/oob/redfish_driver/driver.py index 27905e8c..e7deafcb 100644 --- a/python/drydock_provisioner/drivers/oob/redfish_driver/driver.py +++ b/python/drydock_provisioner/drivers/oob/redfish_driver/driver.py @@ -48,19 +48,19 @@ class RedfishDriver(oob_driver.OobDriver): default=10, min=1, help='Maximum number of connection retries to Redfish server'), - cfg.IntOpt( - 'power_state_change_max_retries', - default=18, - min=1, - help='Maximum reties to wait for power state change'), + cfg.IntOpt('power_state_change_max_retries', + default=18, + min=1, + help='Maximum reties to wait for power state change'), cfg.IntOpt( 'power_state_change_retry_interval', default=10, - help='Polling interval in seconds between retries for power state change'), - cfg.BoolOpt( - 'use_ssl', - default=True, - help='Use SSL to communicate with Redfish API server'), + help= + 'Polling interval in seconds between retries for power state change' + ), + cfg.BoolOpt('use_ssl', + default=True, + help='Use SSL to communicate with Redfish API server'), ] oob_types_supported = ['redfish'] @@ -82,8 +82,8 @@ class RedfishDriver(oob_driver.OobDriver): def __init__(self, **kwargs): super().__init__(**kwargs) - cfg.CONF.register_opts( - RedfishDriver.redfish_driver_options, group=RedfishDriver.driver_key) + cfg.CONF.register_opts(RedfishDriver.redfish_driver_options, + group=RedfishDriver.driver_key) self.logger = logging.getLogger( config.config_mgr.conf.logging.oobdriver_logger_name) @@ -98,8 +98,9 @@ class RedfishDriver(oob_driver.OobDriver): if task.action not in self.supported_actions: self.logger.error("Driver %s doesn't support task action %s" % (self.driver_desc, task.action)) - raise errors.DriverError("Driver %s doesn't support task action %s" - % (self.driver_desc, task.action)) + raise errors.DriverError( + "Driver %s doesn't support task action %s" % + (self.driver_desc, task.action)) task.set_status(hd_fields.TaskStatus.Running) task.save() @@ -145,10 +146,9 @@ class RedfishDriver(oob_driver.OobDriver): task.failure() else: if f.exception(): - self.logger.error( - "Uncaught exception in subtask %s" % str( - uuid.UUID(bytes=t)), - exc_info=f.exception()) + self.logger.error("Uncaught exception in subtask %s" % + str(uuid.UUID(bytes=t)), + exc_info=f.exception()) task.align_result() task.bubble_results() task.set_status(hd_fields.TaskStatus.Complete) diff --git a/python/drydock_provisioner/drydock.py b/python/drydock_provisioner/drydock.py index 944a5573..cbcc1a2c 100644 --- a/python/drydock_provisioner/drydock.py +++ b/python/drydock_provisioner/drydock.py @@ -34,8 +34,10 @@ def start_drydock(enable_keystone=True): # Setup configuration parsing cli_options = [ - cfg.BoolOpt( - 'debug', short='d', default=False, help='Enable debug logging'), + cfg.BoolOpt('debug', + short='d', + default=False, + help='Enable debug logging'), ] config.config_mgr.conf.register_cli_opts(cli_options) @@ -43,8 +45,9 @@ def start_drydock(enable_keystone=True): config.config_mgr.conf(sys.argv[1:]) if config.config_mgr.conf.debug: - config.config_mgr.conf.set_override( - name='log_level', override='DEBUG', group='logging') + config.config_mgr.conf.set_override(name='log_level', + override='DEBUG', + group='logging') # Setup root logger logger = logging.getLogger( @@ -64,8 +67,7 @@ def start_drydock(enable_keystone=True): logger.propagate = False formatter = logging.Formatter( "%(asctime)s - %(levelname)s - %(user)s - %(req_id)s" - " - %(external_ctx)s - %(end_user)s - %(message)s" - ) + " - %(external_ctx)s - %(end_user)s - %(message)s") ch = logging.StreamHandler() ch.setFormatter(formatter) @@ -77,10 +79,9 @@ def start_drydock(enable_keystone=True): input_ingester = Ingester() input_ingester.enable_plugin(config.config_mgr.conf.plugins.ingester) - orchestrator = Orchestrator( - enabled_drivers=config.config_mgr.conf.plugins, - state_manager=state, - ingester=input_ingester) + orchestrator = Orchestrator(enabled_drivers=config.config_mgr.conf.plugins, + state_manager=state, + ingester=input_ingester) orch_thread = threading.Thread(target=orchestrator.watch_for_tasks) orch_thread.start() @@ -98,10 +99,9 @@ def start_drydock(enable_keystone=True): policy.policy_engine.register_policy() # Ensure that the policy_engine is initialized before starting the API - wsgi_callable = api.start_api( - state_manager=state, - ingester=input_ingester, - orchestrator=orchestrator) + wsgi_callable = api.start_api(state_manager=state, + ingester=input_ingester, + orchestrator=orchestrator) # Now that loggers are configured, log the effective config config.config_mgr.conf.log_opt_values( diff --git a/python/drydock_provisioner/drydock_client/client.py b/python/drydock_provisioner/drydock_client/client.py index dec4a3d5..c7b0dfee 100644 --- a/python/drydock_provisioner/drydock_client/client.py +++ b/python/drydock_provisioner/drydock_client/client.py @@ -192,9 +192,9 @@ class DrydockClient(object): raise errors.ClientUnauthorizedError( "Unauthorized access to %s, include valid token." % resp.url) elif resp.status_code == 403: - raise errors.ClientForbiddenError( - "Forbidden access to %s" % resp.url) + raise errors.ClientForbiddenError("Forbidden access to %s" % + resp.url) elif not resp.ok: - raise errors.ClientError( - "Error - received %d: %s" % (resp.status_code, resp.text), - code=resp.status_code) + raise errors.ClientError("Error - received %d: %s" % + (resp.status_code, resp.text), + code=resp.status_code) diff --git a/python/drydock_provisioner/drydock_client/session.py b/python/drydock_provisioner/drydock_client/session.py index f236e356..0a2dbfd5 100644 --- a/python/drydock_provisioner/drydock_client/session.py +++ b/python/drydock_provisioner/drydock_client/session.py @@ -91,8 +91,9 @@ class DrydockSession(object): url = self.base_url + endpoint self.logger.debug('GET ' + url) self.logger.debug('Query Params: ' + str(query)) - resp = self.__session.get( - url, params=query, timeout=self._timeout(timeout)) + resp = self.__session.get(url, + params=query, + timeout=self._timeout(timeout)) if resp.status_code == 401 and not auth_refresh: self.set_auth() @@ -121,21 +122,19 @@ class DrydockSession(object): self.logger.debug('POST ' + url) self.logger.debug('Query Params: ' + str(query)) if body is not None: - self.logger.debug( - "Sending POST with explicit body: \n%s" % body) - resp = self.__session.post( - self.base_url + endpoint, - params=query, - data=body, - timeout=self._timeout(timeout)) + self.logger.debug("Sending POST with explicit body: \n%s" % + body) + resp = self.__session.post(self.base_url + endpoint, + params=query, + data=body, + timeout=self._timeout(timeout)) else: - self.logger.debug( - "Sending POST with JSON body: \n%s" % str(data)) - resp = self.__session.post( - self.base_url + endpoint, - params=query, - json=data, - timeout=self._timeout(timeout)) + self.logger.debug("Sending POST with JSON body: \n%s" % + str(data)) + resp = self.__session.post(self.base_url + endpoint, + params=query, + json=data, + timeout=self._timeout(timeout)) if resp.status_code == 401 and not auth_refresh: self.set_auth() auth_refresh = True @@ -161,8 +160,9 @@ class DrydockSession(object): url = self.base_url + endpoint self.logger.debug('DELETE ' + url) self.logger.debug('Query Params: ' + str(query)) - resp = self.__session.delete( - url, params=query, timeout=self._timeout(timeout)) + resp = self.__session.delete(url, + params=query, + timeout=self._timeout(timeout)) if resp.status_code == 401 and not auth_refresh: self.set_auth() @@ -212,6 +212,7 @@ class DrydockSession(object): class KeystoneClient(object): + @staticmethod def get_endpoint(endpoint, ks_sess=None, diff --git a/python/drydock_provisioner/error.py b/python/drydock_provisioner/error.py index e5878d44..c24019c0 100644 --- a/python/drydock_provisioner/error.py +++ b/python/drydock_provisioner/error.py @@ -381,6 +381,7 @@ class InvalidSizeFormat(DriverError): class ApiError(Exception): + def __init__(self, msg, code=500): super().__init__(msg) self.message = msg diff --git a/python/drydock_provisioner/ingester/ingester.py b/python/drydock_provisioner/ingester/ingester.py index 34b4aa24..36c7935e 100644 --- a/python/drydock_provisioner/ingester/ingester.py +++ b/python/drydock_provisioner/ingester/ingester.py @@ -29,6 +29,7 @@ import drydock_provisioner.objects.bootaction as bootaction class Ingester(object): + def __init__(self): self.logger = logging.getLogger("drydock.ingester") self.registered_plugin = None @@ -50,8 +51,8 @@ class Ingester(object): klass = getattr(mod, classname) self.registered_plugin = klass() except Exception as ex: - self.logger.error( - "Could not enable plugin %s - %s" % (plugin, str(ex))) + self.logger.error("Could not enable plugin %s - %s" % + (plugin, str(ex))) if self.registered_plugin is None: self.logger.error("Could not enable at least one plugin") @@ -89,8 +90,8 @@ class Ingester(object): "Ingester:ingest_data ingesting design parts for design %s" % design_ref) design_blob = design_state.get_design_documents(design_ref) - self.logger.debug( - "Ingesting design data of %d bytes." % len(design_blob)) + self.logger.debug("Ingesting design data of %d bytes." % + len(design_blob)) try: status, design_items = self.registered_plugin.ingest_data( @@ -100,8 +101,8 @@ class Ingester(object): "Ingester:ingest_data - Unexpected error processing data - %s" % (str(vex))) return None, None - self.logger.debug("Ingester:ingest_data parsed %s design parts" % str( - len(design_items))) + self.logger.debug("Ingester:ingest_data parsed %s design parts" % + str(len(design_items))) design_data = objects.SiteDesign() for m in design_items: if context is not None: diff --git a/python/drydock_provisioner/ingester/plugins/__init__.py b/python/drydock_provisioner/ingester/plugins/__init__.py index 2a488453..66282670 100644 --- a/python/drydock_provisioner/ingester/plugins/__init__.py +++ b/python/drydock_provisioner/ingester/plugins/__init__.py @@ -19,6 +19,7 @@ import logging class IngesterPlugin(object): + def __init__(self): self.log = logging.Logger('ingester') return diff --git a/python/drydock_provisioner/ingester/plugins/deckhand.py b/python/drydock_provisioner/ingester/plugins/deckhand.py index 4a8ae033..400249d0 100644 --- a/python/drydock_provisioner/ingester/plugins/deckhand.py +++ b/python/drydock_provisioner/ingester/plugins/deckhand.py @@ -37,7 +37,9 @@ cache_opts = { cache = CacheManager(**parse_cache_config_options(cache_opts)) + class DeckhandIngester(IngesterPlugin): + def __init__(self): super().__init__() self.logger = logging.getLogger('drydock.ingester.deckhand') @@ -54,6 +56,7 @@ class DeckhandIngester(IngesterPlugin): :returns: a tuple of a status response and a list of parsed objects from drydock_provisioner.objects """ + def local_parse(): return self.parse_docs(kwargs.get('content')) @@ -66,7 +69,9 @@ class DeckhandIngester(IngesterPlugin): results = local_cache.get(key=hv, createfunc=local_parse) parse_status, models = results except Exception as ex: - self.logger.debug("Error parsing design - hash %s", hv, exc_info=ex) + self.logger.debug("Error parsing design - hash %s", + hv, + exc_info=ex) raise ex else: raise ValueError('Missing parameter "content"') @@ -103,8 +108,8 @@ class DeckhandIngester(IngesterPlugin): (schema_ns, doc_kind, doc_version) = d.get('schema', '').split('/') except ValueError as ex: - self.logger.error( - "Error with document structure.", exc_info=ex) + self.logger.error("Error with document structure.", + exc_info=ex) self.logger.debug("Error document\n%s" % yaml.dump(d)) continue if schema_ns == 'drydock': @@ -230,9 +235,9 @@ class DeckhandIngester(IngesterPlugin): tag_model.definition = t.get('definition', '') if tag_model.type not in ['lshw_xpath']: - raise errors.IngesterError( - 'Unknown definition_type in ' - 'tag_definition instance: %s' % (t.definition_type)) + raise errors.IngesterError('Unknown definition_type in ' + 'tag_definition instance: %s' % + (t.definition_type)) model.tag_definitions.append(tag_model) auth_keys = data.get('authorized_keys', []) @@ -419,8 +424,9 @@ class DeckhandIngester(IngesterPlugin): model.hugepages_confs = objects.HugepagesConfList() for c, d in data.get('hugepages', {}).items(): - conf = objects.HugepagesConf( - name=c, size=d.get('size'), count=d.get('count')) + conf = objects.HugepagesConf(name=c, + size=d.get('size'), + count=d.get('count')) model.hugepages_confs.append(conf) return model @@ -589,8 +595,8 @@ class DeckhandIngester(IngesterPlugin): if 'sriov' in v: int_model.sriov = True int_model.vf_count = v.get('sriov', {}).get('vf_count', 0) - int_model.trustedmode = v.get('sriov', {}).get( - 'trustedmode', False) + int_model.trustedmode = v.get('sriov', + {}).get('trustedmode', False) model.interfaces.append(int_model) @@ -705,8 +711,8 @@ class DeckhandIngester(IngesterPlugin): self.logger.warning( "Duplicate document schemas found for document kind %s." % schema_for) - self.logger.debug( - "Loaded schema for document kind %s." % schema_for) + self.logger.debug("Loaded schema for document kind %s." % + schema_for) self.v1_doc_schemas[schema_for] = schema.get('data') f.close() diff --git a/python/drydock_provisioner/ingester/plugins/yaml.py b/python/drydock_provisioner/ingester/plugins/yaml.py index fdda7fe4..0fb6fba2 100644 --- a/python/drydock_provisioner/ingester/plugins/yaml.py +++ b/python/drydock_provisioner/ingester/plugins/yaml.py @@ -28,6 +28,7 @@ from drydock_provisioner.ingester.plugins import IngesterPlugin class YamlIngester(IngesterPlugin): + def __init__(self): super().__init__() self.logger = logging.getLogger('drydock.ingester.yaml') @@ -96,8 +97,10 @@ class YamlIngester(IngesterPlugin): ctx = d.get('metadata').get('name') else: ctx = 'Unknown' - ps.add_status_msg( - msg=msg, error=True, ctx_type='document', ctx=ctx) + ps.add_status_msg(msg=msg, + error=True, + ctx_type='document', + ctx=ctx) ps.set_status(hd_fields.ValidationResult.Failure) except Exception as ex: msg = "Unexpected error processing document: %s" % str(ex) @@ -106,8 +109,10 @@ class YamlIngester(IngesterPlugin): ctx = d.get('metadata').get('name') else: ctx = 'Unknown' - ps.add_status_msg( - msg=msg, error=True, ctx_type='document', ctx=ctx) + ps.add_status_msg(msg=msg, + error=True, + ctx_type='document', + ctx=ctx) ps.set_status(hd_fields.ValidationResult.Failure) elif api.startswith('promenade/'): (foo, api_version) = api.split('/') @@ -193,9 +198,9 @@ class YamlIngester(IngesterPlugin): tag_model.definition = t.get('definition', '') if tag_model.type not in ['lshw_xpath']: - raise errors.IngesterError( - 'Unknown definition_type in ' - 'tag_definition instance: %s' % (t.definition_type)) + raise errors.IngesterError('Unknown definition_type in ' + 'tag_definition instance: %s' % + (t.definition_type)) model.tag_definitions.append(tag_model) auth_keys = data.get('authorized_keys', []) @@ -637,8 +642,8 @@ class YamlIngester(IngesterPlugin): self.logger.warning( "Duplicate document schemas found for document kind %s." % schema_for) - self.logger.debug( - "Loaded schema for document kind %s." % schema_for) + self.logger.debug("Loaded schema for document kind %s." % + schema_for) self.v1_doc_schemas[schema_for] = schema f.close() diff --git a/python/drydock_provisioner/objects/__init__.py b/python/drydock_provisioner/objects/__init__.py index 0555e56f..52457aeb 100644 --- a/python/drydock_provisioner/objects/__init__.py +++ b/python/drydock_provisioner/objects/__init__.py @@ -104,10 +104,10 @@ class Utils(object): effective_list.extend( [x for x in child_list if not x.startswith("!")]) - effective_list.extend( - [x for x in parent_list - if ("!" + x) not in child_list - and x not in effective_list]) + effective_list.extend([ + x for x in parent_list + if ("!" + x) not in child_list and x not in effective_list + ]) except TypeError: raise TypeError("Error iterating list argument") diff --git a/python/drydock_provisioner/objects/base.py b/python/drydock_provisioner/objects/base.py index 69f7edb9..68b6f742 100644 --- a/python/drydock_provisioner/objects/base.py +++ b/python/drydock_provisioner/objects/base.py @@ -91,6 +91,7 @@ class DrydockPersistentObject(base.VersionedObject): class DrydockObjectListBase(base.ObjectListBase): + def __init__(self, **kwargs): super(DrydockObjectListBase, self).__init__(**kwargs) diff --git a/python/drydock_provisioner/objects/bootaction.py b/python/drydock_provisioner/objects/bootaction.py index b04326b4..705fb633 100644 --- a/python/drydock_provisioner/objects/bootaction.py +++ b/python/drydock_provisioner/objects/bootaction.py @@ -34,18 +34,13 @@ class BootAction(base.DrydockPersistentObject, base.DrydockObject): VERSION = '1.0' fields = { - 'name': - ovo_fields.StringField(), - 'source': - hd_fields.ModelSourceField(nullable=False), - 'asset_list': - ovo_fields.ObjectField('BootActionAssetList', nullable=False), - 'node_filter': - ovo_fields.ObjectField('NodeFilterSet', nullable=True), - 'target_nodes': - ovo_fields.ListOfStringsField(nullable=True), - 'signaling': - ovo_fields.BooleanField(default=True), + 'name': ovo_fields.StringField(), + 'source': hd_fields.ModelSourceField(nullable=False), + 'asset_list': ovo_fields.ObjectField('BootActionAssetList', + nullable=False), + 'node_filter': ovo_fields.ObjectField('NodeFilterSet', nullable=True), + 'target_nodes': ovo_fields.ListOfStringsField(nullable=True), + 'signaling': ovo_fields.BooleanField(default=True), } def __init__(self, **kwargs): @@ -160,8 +155,9 @@ class BootActionAsset(base.DrydockObject): action_key, design_ref) if self.location is not None: - rendered_location = self.execute_pipeline( - self.location, self.location_pipeline, tpl_ctx=tpl_ctx) + rendered_location = self.execute_pipeline(self.location, + self.location_pipeline, + tpl_ctx=tpl_ctx) data_block = self.resolve_asset_location(rendered_location) if self.type == hd_fields.BootactionAssetType.PackageList: self._parse_package_list(data_block) @@ -169,8 +165,9 @@ class BootActionAsset(base.DrydockObject): data_block = self.data.encode('utf-8') if self.type != hd_fields.BootactionAssetType.PackageList: - value = self.execute_pipeline( - data_block, self.data_pipeline, tpl_ctx=tpl_ctx) + value = self.execute_pipeline(data_block, + self.data_pipeline, + tpl_ctx=tpl_ctx) if isinstance(value, str): value = value.encode('utf-8') @@ -222,9 +219,9 @@ class BootActionAsset(base.DrydockObject): :param design_ref: The design reference representing ``site_design`` """ - return dict( - node=self._get_node_context(nodename, site_design), - action=self._get_action_context(action_id, action_key, design_ref)) + return dict(node=self._get_node_context(nodename, site_design), + action=self._get_action_context(action_id, action_key, + design_ref)) def _get_action_context(self, action_id, action_key, design_ref): """Create the action-specific context items for template rendering. @@ -233,11 +230,10 @@ class BootActionAsset(base.DrydockObject): :param action_key: random key of this boot action :param design_ref: Design reference representing the site design """ - return dict( - action_id=ulid2.ulid_to_base32(action_id), - action_key=action_key.hex(), - report_url=config.config_mgr.conf.bootactions.report_url, - design_ref=design_ref) + return dict(action_id=ulid2.ulid_to_base32(action_id), + action_key=action_key.hex(), + report_url=config.config_mgr.conf.bootactions.report_url, + design_ref=design_ref) def _get_node_context(self, nodename, site_design): """Create the node-specific context items for template rendering. @@ -246,14 +242,13 @@ class BootActionAsset(base.DrydockObject): :param site_design: full site design """ node = site_design.get_baremetal_node(nodename) - return dict( - hostname=nodename, - domain=node.get_domain(site_design), - tags=[t for t in node.tags], - labels={k: v - for (k, v) in node.owner_data.items()}, - network=self._get_node_network_context(node, site_design), - interfaces=self._get_node_interface_context(node)) + return dict(hostname=nodename, + domain=node.get_domain(site_design), + tags=[t for t in node.tags], + labels={k: v + for (k, v) in node.owner_data.items()}, + network=self._get_node_network_context(node, site_design), + interfaces=self._get_node_interface_context(node)) def _get_node_network_context(self, node, site_design): """Create a node's network configuration context. @@ -298,8 +293,8 @@ class BootActionAsset(base.DrydockObject): return ReferenceResolver.resolve_reference(asset_url) except Exception as ex: raise errors.InvalidAssetLocation( - "Unable to resolve asset reference %s: %s" % (asset_url, - str(ex))) + "Unable to resolve asset reference %s: %s" % + (asset_url, str(ex))) def execute_pipeline(self, data, pipeline, tpl_ctx=None): """Execute a pipeline against a data element. diff --git a/python/drydock_provisioner/objects/fields.py b/python/drydock_provisioner/objects/fields.py index 87cf0f4d..3b4a636c 100644 --- a/python/drydock_provisioner/objects/fields.py +++ b/python/drydock_provisioner/objects/fields.py @@ -16,6 +16,7 @@ from oslo_versionedobjects import fields class BaseDrydockEnum(fields.Enum): + def __init__(self): super(BaseDrydockEnum, self).__init__(valid_values=self.__class__.ALL) diff --git a/python/drydock_provisioner/objects/hostprofile.py b/python/drydock_provisioner/objects/hostprofile.py index 35623c71..3f0de69a 100644 --- a/python/drydock_provisioner/objects/hostprofile.py +++ b/python/drydock_provisioner/objects/hostprofile.py @@ -116,8 +116,9 @@ class HostProfile(base.DrydockPersistentObject, base.DrydockObject): for f in inheritable_field_list: setattr( self, f, - objects.Utils.apply_field_inheritance( - getattr(self, f, None), getattr(parent, f, None))) + objects.Utils.apply_field_inheritance(getattr(self, f, None), + getattr(parent, f, + None))) # Now compute inheritance for complex types self.oob_parameters = objects.Utils.merge_dicts( @@ -310,8 +311,8 @@ class HostVolumeGroup(base.DrydockObject): fields = { 'name': obj_fields.StringField(), 'vg_uuid': obj_fields.StringField(nullable=True), - 'logical_volumes': obj_fields.ObjectField( - 'HostVolumeList', nullable=True), + 'logical_volumes': obj_fields.ObjectField('HostVolumeList', + nullable=True), } def __init__(self, **kwargs): @@ -431,8 +432,8 @@ class HostStorageDevice(base.DrydockObject): 'name': obj_fields.StringField(), 'volume_group': obj_fields.StringField(nullable=True), 'labels': obj_fields.DictOfStringsField(nullable=True), - 'partitions': obj_fields.ObjectField( - 'HostPartitionList', nullable=True), + 'partitions': obj_fields.ObjectField('HostPartitionList', + nullable=True), } def __init__(self, **kwargs): @@ -535,28 +536,18 @@ class HostPartition(base.DrydockObject): VERSION = '1.0' fields = { - 'name': - obj_fields.StringField(), - 'source': - hd_fields.ModelSourceField(), - 'bootable': - obj_fields.BooleanField(default=False), - 'volume_group': - obj_fields.StringField(nullable=True), - 'part_uuid': - obj_fields.UUIDField(nullable=True), - 'size': - obj_fields.StringField(nullable=True), - 'mountpoint': - obj_fields.StringField(nullable=True), - 'fstype': - obj_fields.StringField(nullable=True, default='ext4'), - 'mount_options': - obj_fields.StringField(nullable=True, default='defaults'), - 'fs_uuid': - obj_fields.UUIDField(nullable=True), - 'fs_label': - obj_fields.StringField(nullable=True), + 'name': obj_fields.StringField(), + 'source': hd_fields.ModelSourceField(), + 'bootable': obj_fields.BooleanField(default=False), + 'volume_group': obj_fields.StringField(nullable=True), + 'part_uuid': obj_fields.UUIDField(nullable=True), + 'size': obj_fields.StringField(nullable=True), + 'mountpoint': obj_fields.StringField(nullable=True), + 'fstype': obj_fields.StringField(nullable=True, default='ext4'), + 'mount_options': obj_fields.StringField(nullable=True, + default='defaults'), + 'fs_uuid': obj_fields.UUIDField(nullable=True), + 'fs_label': obj_fields.StringField(nullable=True), } def __init__(self, **kwargs): @@ -672,24 +663,16 @@ class HostVolume(base.DrydockObject): VERSION = '1.0' fields = { - 'name': - obj_fields.StringField(), - 'source': - hd_fields.ModelSourceField(), - 'lv_uuid': - obj_fields.UUIDField(nullable=True), - 'size': - obj_fields.StringField(nullable=True), - 'mountpoint': - obj_fields.StringField(nullable=True), - 'fstype': - obj_fields.StringField(nullable=True, default='ext4'), - 'mount_options': - obj_fields.StringField(nullable=True, default='defaults'), - 'fs_uuid': - obj_fields.UUIDField(nullable=True), - 'fs_label': - obj_fields.StringField(nullable=True), + 'name': obj_fields.StringField(), + 'source': hd_fields.ModelSourceField(), + 'lv_uuid': obj_fields.UUIDField(nullable=True), + 'size': obj_fields.StringField(nullable=True), + 'mountpoint': obj_fields.StringField(nullable=True), + 'fstype': obj_fields.StringField(nullable=True, default='ext4'), + 'mount_options': obj_fields.StringField(nullable=True, + default='defaults'), + 'fs_uuid': obj_fields.UUIDField(nullable=True), + 'fs_label': obj_fields.StringField(nullable=True), } def __init__(self, **kwargs): diff --git a/python/drydock_provisioner/objects/node.py b/python/drydock_provisioner/objects/node.py index 348a4a2c..118fbbcb 100644 --- a/python/drydock_provisioner/objects/node.py +++ b/python/drydock_provisioner/objects/node.py @@ -60,8 +60,8 @@ class BaremetalNode(drydock_provisioner.objects.hostprofile.HostProfile): self.source = hd_fields.ModelSource.Compiled self.resolve_kernel_params(site_design) if resolve_aliases: - self.logger.debug( - "Resolving device aliases on node %s" % self.name) + self.logger.debug("Resolving device aliases on node %s" % + self.name) self.apply_logicalnames(site_design, state_manager) return @@ -261,8 +261,7 @@ class BaremetalNode(drydock_provisioner.objects.hostprofile.HostProfile): """ if "regexp:" in address: self.logger.info( - "Regexp: prefix has been detected in address: %s" % - (address)) + "Regexp: prefix has been detected in address: %s" % (address)) address_regexp = address.replace("regexp:", "") nodes = xml_root.findall(".//node") logicalnames = [] @@ -272,8 +271,8 @@ class BaremetalNode(drydock_provisioner.objects.hostprofile.HostProfile): if node.get('class') == "network": address = node.find('businfo').text.replace("pci@", "") self.logger.debug( - "A network device PCI address found. Address=%s. Checking for regexp %s match..." % - (address, address_regexp)) + "A network device PCI address found. Address=%s. Checking for regexp %s match..." + % (address, address_regexp)) if re.match(address_regexp, address): logicalnames.append(node.find('logicalname').text) addresses.append(address) @@ -282,26 +281,25 @@ class BaremetalNode(drydock_provisioner.objects.hostprofile.HostProfile): (address, address_regexp)) else: self.logger.debug( - "A network device with PCI address=%s does not match the regex %s." % - (address, address_regexp)) + "A network device with PCI address=%s does not match the regex %s." + % (address, address_regexp)) if len(logicalnames) >= 1 and logicalnames[0]: if len(logicalnames) > 1: - self.logger.info( - "Multiple nodes found for businfo=%s@%s" % - (bus_type, address_regexp)) + self.logger.info("Multiple nodes found for businfo=%s@%s" % + (bus_type, address_regexp)) for logicalname in reversed(logicalnames[0].split("/")): address = addresses[0] self.logger.info( "Logicalname build dict: node_name = %s, alias_name = %s, " "bus_type = %s, address = %s, to logicalname = %s" % (self.get_name(), alias_name, bus_type, address, - logicalname)) + logicalname)) return logicalname else: - self.logger.info( - "No prefix has been detected in address: %s" % - (address)) - nodes = xml_root.findall(".//node[businfo='" + bus_type + "@" + address + "'].logicalname") + self.logger.info("No prefix has been detected in address: %s" % + (address)) + nodes = xml_root.findall(".//node[businfo='" + bus_type + "@" + + address + "'].logicalname") if len(nodes) >= 1 and nodes[0].text: if (len(nodes) > 1): self.logger.info("Multiple nodes found for businfo=%s@%s" % @@ -311,7 +309,7 @@ class BaremetalNode(drydock_provisioner.objects.hostprofile.HostProfile): "Logicalname build dict: node_name = %s, alias_name = %s, " "bus_type = %s, address = %s, to logicalname = %s" % (self.get_name(), alias_name, bus_type, address, - logicalname)) + logicalname)) return logicalname self.logger.debug( "Logicalname build dict: alias_name = %s, bus_type = %s, address = %s, not found" @@ -327,8 +325,8 @@ class BaremetalNode(drydock_provisioner.objects.hostprofile.HostProfile): """ logicalnames = {} - results = state_manager.get_build_data( - node_name=self.get_name(), latest=True) + results = state_manager.get_build_data(node_name=self.get_name(), + latest=True) xml_data = None for result in results: if result.generator == "lshw": @@ -351,8 +349,8 @@ class BaremetalNode(drydock_provisioner.objects.hostprofile.HostProfile): "resolving logical names for node %s", self.get_name()) raise else: - self.logger.info( - "No Build Data found for node_name %s" % (self.get_name())) + self.logger.info("No Build Data found for node_name %s" % + (self.get_name())) self.logicalnames = logicalnames diff --git a/python/drydock_provisioner/objects/rack.py b/python/drydock_provisioner/objects/rack.py index 165a3c00..f4d19177 100644 --- a/python/drydock_provisioner/objects/rack.py +++ b/python/drydock_provisioner/objects/rack.py @@ -29,8 +29,8 @@ class Rack(base.DrydockPersistentObject, base.DrydockObject): 'name': obj_fields.StringField(nullable=False), 'site': obj_fields.StringField(nullable=False), 'source': hd_fields.ModelSourceField(nullable=False), - 'tor_switches': obj_fields.ObjectField( - 'TorSwitchList', nullable=False), + 'tor_switches': obj_fields.ObjectField('TorSwitchList', + nullable=False), 'location': obj_fields.DictOfStringsField(nullable=False), 'local_networks': obj_fields.ListOfStringsField(nullable=True), } diff --git a/python/drydock_provisioner/objects/site.py b/python/drydock_provisioner/objects/site.py index 021d7a33..c3bb75c3 100644 --- a/python/drydock_provisioner/objects/site.py +++ b/python/drydock_provisioner/objects/site.py @@ -208,8 +208,8 @@ class SiteDesign(base.DrydockPersistentObject, base.DrydockObject): if n.get_id() == network_key: return n - raise errors.DesignError( - "Network %s not found in design state" % network_key) + raise errors.DesignError("Network %s not found in design state" % + network_key) def add_network_link(self, new_network_link): if new_network_link is None: @@ -226,8 +226,8 @@ class SiteDesign(base.DrydockPersistentObject, base.DrydockObject): if network_link.get_id() == link_key: return network_link - raise errors.DesignError( - "NetworkLink %s not found in design state" % link_key) + raise errors.DesignError("NetworkLink %s not found in design state" % + link_key) def add_rack(self, new_rack): if new_rack is None: @@ -243,8 +243,8 @@ class SiteDesign(base.DrydockPersistentObject, base.DrydockObject): for r in self.racks: if r.get_id() == rack_key: return r - raise errors.DesignError( - "Rack %s not found in design state" % rack_key) + raise errors.DesignError("Rack %s not found in design state" % + rack_key) def add_bootaction(self, new_ba): """Add a bootaction definition to this site design. @@ -265,8 +265,8 @@ class SiteDesign(base.DrydockPersistentObject, base.DrydockObject): for ba in self.bootactions: if ba.get_id() == ba_key: return ba - raise errors.DesignError( - "BootAction %s not found in design state" % ba_key) + raise errors.DesignError("BootAction %s not found in design state" % + ba_key) def add_host_profile(self, new_host_profile): if new_host_profile is None: @@ -283,8 +283,8 @@ class SiteDesign(base.DrydockPersistentObject, base.DrydockObject): if p.get_id() == profile_key: return p - raise errors.DesignError( - "HostProfile %s not found in design state" % profile_key) + raise errors.DesignError("HostProfile %s not found in design state" % + profile_key) def add_hardware_profile(self, new_hardware_profile): if new_hardware_profile is None: @@ -319,8 +319,8 @@ class SiteDesign(base.DrydockPersistentObject, base.DrydockObject): if n.get_id() == node_key: return n - raise errors.DesignError( - "BaremetalNode %s not found in design state" % node_key) + raise errors.DesignError("BaremetalNode %s not found in design state" % + node_key) def add_promenade_config(self, prom_conf): if self.prom_configs is None: diff --git a/python/drydock_provisioner/objects/task.py b/python/drydock_provisioner/objects/task.py index 5fc910c2..43186a8b 100644 --- a/python/drydock_provisioner/objects/task.py +++ b/python/drydock_provisioner/objects/task.py @@ -91,11 +91,10 @@ class Task(object): self.result.failures) > 0): if not max_attempts or (max_attempts and self.retry < max_attempts): - self.add_status_msg( - msg="Retrying task for failed entities.", - error=False, - ctx='NA', - ctx_type='NA') + self.add_status_msg(msg="Retrying task for failed entities.", + error=False, + ctx='NA', + ctx_type='NA') self.retry = self.retry + 1 if len(self.result.successes) > 0: self.result.status = hd_fields.ActionResult.Success @@ -104,11 +103,10 @@ class Task(object): self.save() return True else: - self.add_status_msg( - msg="Retry requested, out of attempts.", - error=False, - ctx='NA', - ctx_type='NA') + self.add_status_msg(msg="Retry requested, out of attempts.", + error=False, + ctx='NA', + ctx_type='NA') raise errors.MaxRetriesReached("Retries reached max attempts.") else: return False @@ -182,12 +180,11 @@ class Task(object): raise errors.OrchestratorError("Cannot add subtask for parent" " marked for termination") if self.statemgr.add_subtask(self.task_id, subtask.task_id): - self.add_status_msg( - msg="Started subtask %s for action %s" % (str( - subtask.get_id()), subtask.action), - error=False, - ctx=str(self.get_id()), - ctx_type='task') + self.add_status_msg(msg="Started subtask %s for action %s" % + (str(subtask.get_id()), subtask.action), + error=False, + ctx=str(self.get_id()), + ctx_type='task') self.subtask_id_list.append(subtask.task_id) subtask.parent_task_id = self.task_id subtask.save() @@ -261,8 +258,8 @@ class Task(object): :param action_filter: string action name to filter subtasks on """ - self.logger.debug( - "Bubbling subtask results up to task %s." % str(self.task_id)) + self.logger.debug("Bubbling subtask results up to task %s." % + str(self.task_id)) self.result.successes = [] self.result.failures = [] for st in self.statemgr.get_complete_subtasks(self.task_id): @@ -340,13 +337,12 @@ class Task(object): msg_list = task_result.message_list for m in msg_list: - self.add_status_msg( - msg=m.msg, - error=m.error, - ctx_type=m.ctx_type, - ctx=m.ctx, - ts=m.ts, - **m.extra) + self.add_status_msg(msg=m.msg, + error=m.error, + ctx_type=m.ctx_type, + ctx=m.ctx, + ts=m.ts, + **m.extra) def to_db(self, include_id=True): """Convert this instance to a dictionary for use persisting to a db. @@ -666,9 +662,8 @@ class TaskStatusMessage(object): :param d: dictionary of values """ - i = TaskStatusMessage( - d.get('message', None), d.get('error'), d.get('context_type'), - d.get('context')) + i = TaskStatusMessage(d.get('message', None), d.get('error'), + d.get('context_type'), d.get('context')) if 'extra' in d: i.extra = d.get('extra') i.ts = d.get('ts', None) diff --git a/python/drydock_provisioner/objects/validation.py b/python/drydock_provisioner/objects/validation.py index 2b408050..8feb9ec8 100644 --- a/python/drydock_provisioner/objects/validation.py +++ b/python/drydock_provisioner/objects/validation.py @@ -123,8 +123,8 @@ class DocumentReference(base.DrydockObject): def __hash__(self): """Override default hashing function.""" - return hash( - str(self.doc_type), str(self.doc_schema), str(self.doc_name)) + return hash(str(self.doc_type), str(self.doc_schema), + str(self.doc_name)) def to_dict(self): """Serialize to a dictionary for further serialization.""" diff --git a/python/drydock_provisioner/orchestrator/actions/orchestrator.py b/python/drydock_provisioner/orchestrator/actions/orchestrator.py index 1632e80b..54cca6d2 100644 --- a/python/drydock_provisioner/orchestrator/actions/orchestrator.py +++ b/python/drydock_provisioner/orchestrator/actions/orchestrator.py @@ -72,8 +72,8 @@ class BaseAction(object): if len(target_nodes) > 1: self.logger.info( - "Found multiple target nodes in task %s, splitting..." % str( - self.task.get_id())) + "Found multiple target nodes in task %s, splitting..." % + str(self.task.get_id())) split_tasks = dict() with concurrent.futures.ThreadPoolExecutor(max_workers=64) as te: @@ -101,8 +101,8 @@ class BaseAction(object): :param timeout: The number of seconds to wait for all Futures to complete :param bubble: Whether to bubble results from collected subtasks """ - finished, timed_out = concurrent.futures.wait( - subtask_futures.values(), timeout=timeout) + finished, timed_out = concurrent.futures.wait(subtask_futures.values(), + timeout=timeout) for k, v in subtask_futures.items(): if not v.done(): @@ -116,8 +116,8 @@ class BaseAction(object): else: if v.exception(): self.logger.error( - "Uncaught excetion in subtask %s future:" % str( - uuid.UUID(bytes=k)), + "Uncaught excetion in subtask %s future:" % + str(uuid.UUID(bytes=k)), exc_info=v.exception()) st = self.state_manager.get_task(uuid.UUID(bytes=k)) st.bubble_results() @@ -184,16 +184,20 @@ class Noop(BaseAction): self.logger.debug("Terminating action.") self.task.set_status(hd_fields.TaskStatus.Terminated) self.task.failure() - self.task.add_status_msg( - msg="Action terminated.", ctx_type='NA', ctx='NA', error=False) + self.task.add_status_msg(msg="Action terminated.", + ctx_type='NA', + ctx='NA', + error=False) else: self.logger.debug("Marked task as successful.") self.task.set_status(hd_fields.TaskStatus.Complete) target_nodes = self.orchestrator.get_target_nodes(self.task) for n in target_nodes: self.task.success(focus=n.name) - self.task.add_status_msg( - msg="Noop action.", ctx_type='NA', ctx='NA', error=False) + self.task.add_status_msg(msg="Noop action.", + ctx_type='NA', + ctx='NA', + error=False) self.task.save() self.logger.debug("Saved task state.") self.logger.debug("Finished Noop Action.") @@ -226,11 +230,10 @@ class DestroyNodes(BaseAction): target_nodes = self.orchestrator.get_target_nodes(self.task) if not target_nodes: - self.task.add_status_msg( - msg="No nodes in scope, no work to do.", - error=False, - ctx='NA', - ctx_type='NA') + self.task.add_status_msg(msg="No nodes in scope, no work to do.", + error=False, + ctx='NA', + ctx_type='NA') self.task.success() self.task.set_status(hd_fields.TaskStatus.Complete) self.task.save() @@ -325,11 +328,11 @@ class VerifySite(BaseAction): node_driver_task = self.state_manager.get_task( node_driver_task.get_id()) - self.task.add_status_msg( - msg="Collected subtask %s" % str(node_driver_task.get_id()), - error=False, - ctx=str(node_driver_task.get_id()), - ctx_type='task') + self.task.add_status_msg(msg="Collected subtask %s" % + str(node_driver_task.get_id()), + error=False, + ctx=str(node_driver_task.get_id()), + ctx_type='task') self.task = self.state_manager.get_task(self.task.get_id()) self.task.set_status(hd_fields.TaskStatus.Complete) @@ -386,11 +389,11 @@ class PrepareSite(BaseAction): driver.execute_task(config_prov_task.get_id()) - self.task.add_status_msg( - msg="Collected subtask %s" % str(config_prov_task.get_id()), - error=False, - ctx=str(config_prov_task.get_id()), - ctx_type='task') + self.task.add_status_msg(msg="Collected subtask %s" % + str(config_prov_task.get_id()), + error=False, + ctx=str(config_prov_task.get_id()), + ctx_type='task') self.logger.info("Node driver task %s:%s is complete." % (config_prov_task.get_id(), config_prov_task.action)) @@ -410,13 +413,13 @@ class PrepareSite(BaseAction): driver.execute_task(site_network_task.get_id()) - self.task.add_status_msg( - msg="Collected subtask %s" % str(site_network_task.get_id()), - error=False, - ctx=str(site_network_task.get_id()), - ctx_type='task') - self.logger.info( - "Node driver task %s complete" % (site_network_task.get_id())) + self.task.add_status_msg(msg="Collected subtask %s" % + str(site_network_task.get_id()), + error=False, + ctx=str(site_network_task.get_id()), + ctx_type='task') + self.logger.info("Node driver task %s complete" % + (site_network_task.get_id())) def step_usercredentials(self, driver): """Run the ConfigureUserCredentials step of this action. @@ -434,13 +437,13 @@ class PrepareSite(BaseAction): driver.execute_task(user_creds_task.get_id()) - self.task.add_status_msg( - msg="Collected subtask %s" % str(user_creds_task.get_id()), - error=False, - ctx=str(user_creds_task.get_id()), - ctx_type='task') - self.logger.info( - "Node driver task %s complete" % (user_creds_task.get_id())) + self.task.add_status_msg(msg="Collected subtask %s" % + str(user_creds_task.get_id()), + error=False, + ctx=str(user_creds_task.get_id()), + ctx_type='task') + self.logger.info("Node driver task %s complete" % + (user_creds_task.get_id())) class VerifyNodes(BaseAction): @@ -504,19 +507,18 @@ class VerifyNodes(BaseAction): try: self._collect_subtask_futures( task_futures, - timeout=( - config.config_mgr.conf.timeouts.drydock_timeout * 60)) - self.logger.debug( - "Collected subtasks for task %s" % str(self.task.get_id())) + timeout=(config.config_mgr.conf.timeouts.drydock_timeout + * 60)) + self.logger.debug("Collected subtasks for task %s" % + str(self.task.get_id())) except errors.CollectSubtaskTimeout as ex: self.logger.warning(str(ex)) else: # no target nodes - self.task.add_status_msg( - msg="No nodes in scope, no work to do.", - error=False, - ctx='NA', - ctx_type='NA') + self.task.add_status_msg(msg="No nodes in scope, no work to do.", + error=False, + ctx='NA', + ctx_type='NA') self.task.success() # Set task complete and persist that info. @@ -554,11 +556,10 @@ class PrepareNodes(BaseAction): target_nodes = self.orchestrator.get_target_nodes(self.task) if not target_nodes: - self.task.add_status_msg( - msg="No nodes in scope, no work to do.", - error=False, - ctx='NA', - ctx_type='NA') + self.task.add_status_msg(msg="No nodes in scope, no work to do.", + error=False, + ctx='NA', + ctx_type='NA') self.task.success() self.task.set_status(hd_fields.TaskStatus.Complete) self.task.save() @@ -701,8 +702,9 @@ class PrepareNodes(BaseAction): create_nodefilter_from_nodelist(node_list)) self.task.register_subtask(node_identify_task) - self.logger.info("Starting node driver task %s to identify nodes." - % (node_identify_task.get_id())) + self.logger.info( + "Starting node driver task %s to identify nodes." % + (node_identify_task.get_id())) node_driver.execute_task(node_identify_task.get_id()) @@ -742,8 +744,8 @@ class PrepareNodes(BaseAction): oob_driver = self._get_driver('oob', oob_type) if oob_driver is None: - self.logger.warning( - "Node OOB type %s has no enabled driver." % oob_type) + self.logger.warning("Node OOB type %s has no enabled driver." % + oob_type) self.task.failure() for n in oob_nodes: self.task.add_status_msg( @@ -772,8 +774,8 @@ class PrepareNodes(BaseAction): self._collect_subtask_futures( task_futures, timeout=(config.config_mgr.conf.timeouts.drydock_timeout * 60)) - self.logger.debug( - "Collected subtasks for task %s" % str(self.task.get_id())) + self.logger.debug("Collected subtasks for task %s" % + str(self.task.get_id())) except errors.CollectSubtaskTimeout as ex: self.logger.warning(str(ex)) @@ -799,8 +801,8 @@ class PrepareNodes(BaseAction): oob_driver = self._get_driver('oob', oob_type) if oob_driver is None: - self.logger.warning( - "Node OOB type %s has no enabled driver." % oob_type) + self.logger.warning("Node OOB type %s has no enabled driver." % + oob_type) self.task.failure() for n in oob_nodes: self.task.add_status_msg( @@ -830,8 +832,8 @@ class PrepareNodes(BaseAction): self._collect_subtask_futures( task_futures, timeout=(config.config_mgr.conf.timeouts.drydock_timeout * 60)) - self.logger.debug( - "Collected subtasks for task %s" % str(self.task.get_id())) + self.logger.debug("Collected subtasks for task %s" % + str(self.task.get_id())) except errors.CollectSubtaskTimeout as ex: self.logger.warning(str(ex)) @@ -897,11 +899,10 @@ class DeployNodes(BaseAction): target_nodes = self.orchestrator.get_target_nodes(self.task) if not target_nodes: - self.task.add_status_msg( - msg="No nodes in scope, no work to do.", - error=False, - ctx='NA', - ctx_type='NA') + self.task.add_status_msg(msg="No nodes in scope, no work to do.", + error=False, + ctx='NA', + ctx_type='NA') self.task.success() self.task.set_status(hd_fields.TaskStatus.Complete) self.task.save() @@ -952,8 +953,8 @@ class DeployNodes(BaseAction): if (node_storage_task is not None and len(node_storage_task.result.successes) > 0): self.logger.info( - "Configured storage on %s nodes, configuring platform." % (len( - node_storage_task.result.successes))) + "Configured storage on %s nodes, configuring platform." % + (len(node_storage_task.result.successes))) node_platform_task = self.orchestrator.create_task( design_ref=self.task.design_ref, @@ -979,8 +980,8 @@ class DeployNodes(BaseAction): if node_platform_task is not None and len( node_platform_task.result.successes) > 0: self.logger.info( - "Configured platform on %s nodes, starting deployment." % (len( - node_platform_task.result.successes))) + "Configured platform on %s nodes, starting deployment." % + (len(node_platform_task.result.successes))) while True: if node_deploy_task is None: @@ -1078,8 +1079,9 @@ class RelabelNodes(BaseAction): node_filter=nf) self.task.register_subtask(relabel_node_task) - self.logger.info("Starting kubernetes driver task %s to relabel nodes." - % (relabel_node_task.get_id())) + self.logger.info( + "Starting kubernetes driver task %s to relabel nodes." % + (relabel_node_task.get_id())) kubernetes_driver.execute_task(relabel_node_task.get_id()) relabel_node_task = self.state_manager.get_task( @@ -1118,8 +1120,8 @@ class BootactionReport(BaseAction): bas = self.state_manager.get_boot_actions_for_node(n) running_bas = { k: v - for (k, v) in bas.items() if v. - get('action_status') == hd_fields.ActionResult.Incomplete + for (k, v) in bas.items() if v.get('action_status') + == hd_fields.ActionResult.Incomplete } if len(running_bas) > 0: still_running = True @@ -1166,11 +1168,11 @@ class BootactionReport(BaseAction): ctx=n, ctx_type='node') for ba in running_bas.values(): - self.task.add_status_msg( - msg="Boot action %s timed out." % (ba['action_name']), - error=True, - ctx=n, - ctx_type='node') + self.task.add_status_msg(msg="Boot action %s timed out." % + (ba['action_name']), + error=True, + ctx=n, + ctx_type='node') if len(failure_bas) == 0 and len(running_bas) == 0: self.task.success(focus=n) diff --git a/python/drydock_provisioner/orchestrator/orchestrator.py b/python/drydock_provisioner/orchestrator/orchestrator.py index fcdeeff6..57a2d872 100644 --- a/python/drydock_provisioner/orchestrator/orchestrator.py +++ b/python/drydock_provisioner/orchestrator/orchestrator.py @@ -41,7 +41,9 @@ from .validations.validator import Validator class Orchestrator(object): """Defines functionality for task execution workflow.""" - def __init__(self, enabled_drivers=None, state_manager=None, + def __init__(self, + enabled_drivers=None, + state_manager=None, ingester=None): """Initialize the orchestrator. A single instance should be executing at a time. @@ -81,9 +83,8 @@ class Orchestrator(object): if self.enabled_drivers.get('oob', None) is None: self.enabled_drivers['oob'] = [] self.enabled_drivers['oob'].append( - oob_driver_class( - state_manager=state_manager, - orchestrator=self)) + oob_driver_class(state_manager=state_manager, + orchestrator=self)) node_driver_name = enabled_drivers.node_driver if node_driver_name is not None: @@ -97,8 +98,8 @@ class Orchestrator(object): network_driver_name = enabled_drivers.network_driver if network_driver_name is not None: m, c = network_driver_name.rsplit('.', 1) - network_driver_class = getattr( - importlib.import_module(m), c, None) + network_driver_class = getattr(importlib.import_module(m), c, + None) if network_driver_class is not None: self.enabled_drivers['network'] = network_driver_class( state_manager=state_manager, orchestrator=self) @@ -106,8 +107,8 @@ class Orchestrator(object): kubernetes_driver_name = enabled_drivers.kubernetes_driver if kubernetes_driver_name is not None: m, c = kubernetes_driver_name.rsplit('.', 1) - kubernetes_driver_class = getattr( - importlib.import_module(m), c, None) + kubernetes_driver_class = getattr(importlib.import_module(m), + c, None) if kubernetes_driver_class is not None: self.enabled_drivers[ 'kubernetes'] = kubernetes_driver_class( @@ -191,8 +192,8 @@ class Orchestrator(object): else: self.logger.warning( "Task %s has unsupported action %s, ending execution." - % (str(next_task.get_id()), - next_task.action)) + % (str( + next_task.get_id()), next_task.action)) next_task.add_status_msg( msg="Unsupported action %s." % next_task.action, @@ -230,8 +231,8 @@ class Orchestrator(object): :param propagate: whether the termination should propagatge to subtasks """ if task is None: - raise errors.OrchestratorError( - "Could find task %s" % str(task.get_id())) + raise errors.OrchestratorError("Could find task %s" % + str(task.get_id())) else: # Terminate initial task first to prevent add'l subtasks self.logger.debug("Terminating task %s." % str(task.get_id())) @@ -243,8 +244,9 @@ class Orchestrator(object): for st_id in subtasks: st = self.state_manager.get_task(st_id) - self.terminate_task( - st, propagate=True, terminated_by=terminated_by) + self.terminate_task(st, + propagate=True, + terminated_by=terminated_by) def create_task(self, **kwargs): """Create a new task and persist it.""" @@ -263,13 +265,14 @@ class Orchestrator(object): nodes = site_design.baremetal_nodes for n in nodes or []: try: - n.compile_applied_model( - site_design, - state_manager=self.state_manager, - resolve_aliases=resolve_aliases) + n.compile_applied_model(site_design, + state_manager=self.state_manager, + resolve_aliases=resolve_aliases) except Exception as ex: self.logger.debug( - "Failed to build applied model for node %s.", n.name, exc_info=ex) + "Failed to build applied model for node %s.", + n.name, + exc_info=ex) raise ex except AttributeError: self.logger.debug( @@ -305,21 +308,21 @@ class Orchestrator(object): try: status, site_design = self.get_described_site(design_ref) if status.status == hd_fields.ValidationResult.Success: - self.compute_model_inheritance( - site_design, resolve_aliases=resolve_aliases) + self.compute_model_inheritance(site_design, + resolve_aliases=resolve_aliases) self.compute_bootaction_targets(site_design) self.render_route_domains(site_design) status = val.validate_design(site_design, result_status=status) except Exception as ex: if status is not None: - status.add_status_msg( - "Error loading effective site: %s" % str(ex), - error=True, - ctx='NA', - ctx_type='NA') + status.add_status_msg("Error loading effective site: %s" % + str(ex), + error=True, + ctx='NA', + ctx_type='NA') status.set_status(hd_fields.ActionResult.Failure) - self.logger.error( - "Error getting site definition: %s" % str(ex), exc_info=ex) + self.logger.error("Error getting site definition: %s" % str(ex), + exc_info=ex) return status, site_design @@ -368,9 +371,8 @@ class Orchestrator(object): nf['filter_set_type'] = 'intersection' nf['filter_set'] = [ - dict( - node_names=[x.get_id() for x in node_list], - filter_type='union') + dict(node_names=[x.get_id() for x in node_list], + filter_type='union') ] return nf @@ -418,8 +420,8 @@ class Orchestrator(object): for f in node_filter.get('filter_set', []): result_sets.append(self.process_filter(target_nodes, f)) - return self.join_filter_sets( - node_filter.get('filter_set_type'), result_sets) + return self.join_filter_sets(node_filter.get('filter_set_type'), + result_sets) elif isinstance(node_filter, objects.NodeFilterSet): for f in node_filter.filter_set: @@ -434,8 +436,8 @@ class Orchestrator(object): elif filter_set_type == 'intersection': return self.list_intersection(*result_sets) else: - raise errors.OrchestratorError( - "Unknown filter set type %s" % filter_set_type) + raise errors.OrchestratorError("Unknown filter set type %s" % + filter_set_type) def process_filter(self, node_set, filter_set): """Take a filter and apply it to the node_set. @@ -500,11 +502,10 @@ class Orchestrator(object): target_nodes['rack_labels'] = node_set if set_type == 'union': - return self.list_union( - target_nodes.get('node_names', []), - target_nodes.get('node_tags', []), - target_nodes.get('rack_names', []), - target_nodes.get('node_labels', [])) + return self.list_union(target_nodes.get('node_names', []), + target_nodes.get('node_tags', []), + target_nodes.get('rack_names', []), + target_nodes.get('node_labels', [])) elif set_type == 'intersection': return self.list_intersection( target_nodes.get('node_names', None), @@ -514,8 +515,8 @@ class Orchestrator(object): except Exception as ex: self.logger.error("Error processing node filter.", exc_info=ex) - raise errors.OrchestratorError( - "Error processing node filter: %s" % str(ex)) + raise errors.OrchestratorError("Error processing node filter: %s" % + str(ex)) def list_intersection(self, a, *rest): """Take the intersection of a with the intersection of all the rest. @@ -569,12 +570,12 @@ class Orchestrator(object): identity_key = None - self.logger.debug( - "Creating boot action context for node %s" % nodename) + self.logger.debug("Creating boot action context for node %s" % + nodename) for ba in site_design.bootactions: - self.logger.debug( - "Boot actions target nodes: %s" % ba.target_nodes) + self.logger.debug("Boot actions target nodes: %s" % + ba.target_nodes) if nodename in ba.target_nodes: if identity_key is None: identity_key = os.urandom(32) @@ -591,13 +592,12 @@ class Orchestrator(object): "Boot action %s has disabled signaling, marking unreported." % ba.name) action_id = ulid2.generate_binary_ulid() - self.state_manager.post_boot_action( - nodename, - task.get_id(), - identity_key, - action_id, - ba.name, - action_status=init_status) + self.state_manager.post_boot_action(nodename, + task.get_id(), + identity_key, + action_id, + ba.name, + action_status=init_status) return identity_key def find_node_package_lists(self, nodename, task): @@ -611,8 +611,8 @@ class Orchestrator(object): if site_design.bootactions is None: return None - self.logger.debug( - "Extracting package install list for node %s" % nodename) + self.logger.debug("Extracting package install list for node %s" % + nodename) pkg_list = dict() @@ -668,22 +668,22 @@ class Orchestrator(object): metric = None if 'routes' in n and n.routes is not None: for r in n.routes: - if 'routedomain' in r and r.get('routedomain', - None) == rd: + if 'routedomain' in r and r.get( + 'routedomain', None) == rd: gw = r.get('gateway') metric = r.get('metric') self.logger.debug( "Use gateway %s for routedomain %s " - "on network %s." % (gw, rd, - n.get_name())) + "on network %s." % + (gw, rd, n.get_name())) break if gw is not None and metric is not None: for cidr in rd_cidrs: if cidr != n.cidr: n.routes.append( - dict( - subnet=cidr, gateway=gw, - metric=metric)) + dict(subnet=cidr, + gateway=gw, + metric=metric)) else: msg = "Invalid network model: {}. Cannot find " \ "routes field in network with routedomain: " \ diff --git a/python/drydock_provisioner/orchestrator/util.py b/python/drydock_provisioner/orchestrator/util.py index eb0aca7f..d2e08a3a 100644 --- a/python/drydock_provisioner/orchestrator/util.py +++ b/python/drydock_provisioner/orchestrator/util.py @@ -18,6 +18,7 @@ import drydock_provisioner.error as errors class SimpleBytes(): + def calculate_bytes(size_str): """ Calculate the size in bytes of a size_str. @@ -34,8 +35,8 @@ class SimpleBytes(): match = regex.match(size_str) if not match: - raise errors.InvalidSizeFormat( - "Invalid size string format: %s" % size_str) + raise errors.InvalidSizeFormat("Invalid size string format: %s" % + size_str) base_size = int(match.group(1)) diff --git a/python/drydock_provisioner/orchestrator/validations/boot_storage_rational.py b/python/drydock_provisioner/orchestrator/validations/boot_storage_rational.py index 186079d8..55319390 100644 --- a/python/drydock_provisioner/orchestrator/validations/boot_storage_rational.py +++ b/python/drydock_provisioner/orchestrator/validations/boot_storage_rational.py @@ -18,6 +18,7 @@ from drydock_provisioner.orchestrator.util import SimpleBytes class BootStorageRational(Validators): + def __init__(self): super().__init__('Rational Boot Storage', 'DD1001') diff --git a/python/drydock_provisioner/orchestrator/validations/cidr_validity.py b/python/drydock_provisioner/orchestrator/validations/cidr_validity.py index 7e96b9e1..2a4efb85 100644 --- a/python/drydock_provisioner/orchestrator/validations/cidr_validity.py +++ b/python/drydock_provisioner/orchestrator/validations/cidr_validity.py @@ -17,6 +17,7 @@ from drydock_provisioner.orchestrator.validations.validators import Validators class CidrValidity(Validators): + def __init__(self): super().__init__('CIDR Validity', 'DD2006') @@ -40,8 +41,10 @@ class CidrValidity(Validators): except ValueError as e: if str(e) == (net.cidr + " has host bits set"): msg = 'The provided CIDR %s has host bits set' % net.cidr - valid_cidr = ipaddress.ip_network(net.cidr, strict=False) + valid_cidr = ipaddress.ip_network(net.cidr, + strict=False) self.report_error( - msg, [net.doc_ref], - "Provide a CIDR acceptable by MAAS: %s" % str(valid_cidr)) + msg, [net.doc_ref], + "Provide a CIDR acceptable by MAAS: %s" % + str(valid_cidr)) return diff --git a/python/drydock_provisioner/orchestrator/validations/hostname_validity.py b/python/drydock_provisioner/orchestrator/validations/hostname_validity.py index b6c77a59..900521e3 100644 --- a/python/drydock_provisioner/orchestrator/validations/hostname_validity.py +++ b/python/drydock_provisioner/orchestrator/validations/hostname_validity.py @@ -17,6 +17,7 @@ from drydock_provisioner.orchestrator.validations.validators import Validators class HostnameValidity(Validators): + def __init__(self): super().__init__('Hostname Validity', 'DD3003') diff --git a/python/drydock_provisioner/orchestrator/validations/hugepages_validity.py b/python/drydock_provisioner/orchestrator/validations/hugepages_validity.py index 8e18ef30..dca9f592 100644 --- a/python/drydock_provisioner/orchestrator/validations/hugepages_validity.py +++ b/python/drydock_provisioner/orchestrator/validations/hugepages_validity.py @@ -2,6 +2,7 @@ from drydock_provisioner.orchestrator.validations.validators import Validators class HugepagesValidity(Validators): + def __init__(self): super().__init__('Hugepages', 'DD1008') diff --git a/python/drydock_provisioner/orchestrator/validations/ip_locality_check.py b/python/drydock_provisioner/orchestrator/validations/ip_locality_check.py index 3df4f0ef..35741828 100644 --- a/python/drydock_provisioner/orchestrator/validations/ip_locality_check.py +++ b/python/drydock_provisioner/orchestrator/validations/ip_locality_check.py @@ -17,6 +17,7 @@ from netaddr import IPNetwork, IPAddress class IpLocalityCheck(Validators): + def __init__(self): super().__init__('IP Locality Check', "DD2002") diff --git a/python/drydock_provisioner/orchestrator/validations/mtu_rational.py b/python/drydock_provisioner/orchestrator/validations/mtu_rational.py index baa70aba..64b70f13 100644 --- a/python/drydock_provisioner/orchestrator/validations/mtu_rational.py +++ b/python/drydock_provisioner/orchestrator/validations/mtu_rational.py @@ -37,8 +37,9 @@ class MtuRational(Validators): mtu = network_link.mtu if mtu and (mtu < MtuRational.MIN_MTU_SIZE or mtu > MtuRational.MAX_MTU_SIZE): - msg = ("MTU must be between %d and %d, value is %d" % ( - MtuRational.MIN_MTU_SIZE, MtuRational.MAX_MTU_SIZE, mtu)) + msg = ( + "MTU must be between %d and %d, value is %d" % + (MtuRational.MIN_MTU_SIZE, MtuRational.MAX_MTU_SIZE, mtu)) self.report_error( msg, [network_link.doc_ref], "Define a valid MTU. Standard is 1500, Jumbo is 9100.") @@ -52,8 +53,9 @@ class MtuRational(Validators): if network_mtu and (network_mtu < MtuRational.MIN_MTU_SIZE or network_mtu > MtuRational.MAX_MTU_SIZE): - msg = ("MTU must be between %d and %d, value is %d" % ( - MtuRational.MIN_MTU_SIZE, MtuRational.MAX_MTU_SIZE, mtu)) + msg = ( + "MTU must be between %d and %d, value is %d" % + (MtuRational.MIN_MTU_SIZE, MtuRational.MAX_MTU_SIZE, mtu)) self.report_error( msg, [network.doc_ref], "Define a valid MTU. Standard is 1500, Jumbo is 9100.") diff --git a/python/drydock_provisioner/orchestrator/validations/network_trunking_rational.py b/python/drydock_provisioner/orchestrator/validations/network_trunking_rational.py index 481feb17..deca18ed 100644 --- a/python/drydock_provisioner/orchestrator/validations/network_trunking_rational.py +++ b/python/drydock_provisioner/orchestrator/validations/network_trunking_rational.py @@ -17,6 +17,7 @@ import drydock_provisioner.objects.fields as hd_fields class NetworkTrunkingRational(Validators): + def __init__(self): super().__init__('Network Trunking Rationalty', "DD2004") @@ -30,8 +31,8 @@ class NetworkTrunkingRational(Validators): for network_link in network_link_list: allowed_networks = network_link.allowed_networks # if allowed networks > 1 trunking must be enabled - if (len(allowed_networks) > 1 and network_link. - trunk_mode == hd_fields.NetworkLinkTrunkingMode.Disabled): + if (len(allowed_networks) > 1 and network_link.trunk_mode + == hd_fields.NetworkLinkTrunkingMode.Disabled): msg = ('If there is more than 1 allowed network,' 'trunking mode must be enabled') self.report_error( @@ -40,15 +41,17 @@ class NetworkTrunkingRational(Validators): ) # trunking mode is disabled, default_network must be defined - if (network_link.trunk_mode == hd_fields.NetworkLinkTrunkingMode. - Disabled and network_link.native_network is None): + if (network_link.trunk_mode + == hd_fields.NetworkLinkTrunkingMode.Disabled + and network_link.native_network is None): msg = 'Trunking mode is disabled, a trunking default_network must be defined' self.report_error( msg, [network_link.doc_ref], "Non-trunked links must have a native network defined.") - elif (network_link.trunk_mode == hd_fields.NetworkLinkTrunkingMode. - Disabled and network_link.native_network is not None): + elif (network_link.trunk_mode + == hd_fields.NetworkLinkTrunkingMode.Disabled + and network_link.native_network is not None): network = site_design.get_network(network_link.native_network) if network and network.vlan_id: msg = "Network link native network has a defined VLAN tag." diff --git a/python/drydock_provisioner/orchestrator/validations/no_duplicate_ips_check.py b/python/drydock_provisioner/orchestrator/validations/no_duplicate_ips_check.py index 8b21b7f8..5d584540 100644 --- a/python/drydock_provisioner/orchestrator/validations/no_duplicate_ips_check.py +++ b/python/drydock_provisioner/orchestrator/validations/no_duplicate_ips_check.py @@ -15,6 +15,7 @@ from drydock_provisioner.orchestrator.validations.validators import Validators class NoDuplicateIpsCheck(Validators): + def __init__(self): super().__init__('Duplicated IP Check', "DD2005") diff --git a/python/drydock_provisioner/orchestrator/validations/oob_valid_ipmi.py b/python/drydock_provisioner/orchestrator/validations/oob_valid_ipmi.py index 1886152b..e4492d4f 100644 --- a/python/drydock_provisioner/orchestrator/validations/oob_valid_ipmi.py +++ b/python/drydock_provisioner/orchestrator/validations/oob_valid_ipmi.py @@ -15,6 +15,7 @@ from drydock_provisioner.orchestrator.validations.validators import Validators class IpmiValidity(Validators): + def __init__(self): super().__init__('Valid IPMI Configuration', 'DD4001') @@ -32,9 +33,8 @@ class IpmiValidity(Validators): if baremetal_node.oob_type == 'ipmi': for p in required_params: if not baremetal_node.oob_parameters.get(p, None): - msg = ( - 'OOB parameter %s for IPMI node %s missing.' % p, - baremetal_node.name) + msg = ('OOB parameter %s for IPMI node %s missing.' % + p, baremetal_node.name) self.report_error(msg, [baremetal_node.doc_ref], "Define OOB parameter %s" % p) oob_addr = None diff --git a/python/drydock_provisioner/orchestrator/validations/oob_valid_libvirt.py b/python/drydock_provisioner/orchestrator/validations/oob_valid_libvirt.py index 30012b54..a8d28c20 100644 --- a/python/drydock_provisioner/orchestrator/validations/oob_valid_libvirt.py +++ b/python/drydock_provisioner/orchestrator/validations/oob_valid_libvirt.py @@ -15,6 +15,7 @@ from drydock_provisioner.orchestrator.validations.validators import Validators class LibvirtValidity(Validators): + def __init__(self): super().__init__('Valid Libvirt Configuration', 'DD4002') diff --git a/python/drydock_provisioner/orchestrator/validations/platform_selection.py b/python/drydock_provisioner/orchestrator/validations/platform_selection.py index d2cd9a44..f1cedcdd 100644 --- a/python/drydock_provisioner/orchestrator/validations/platform_selection.py +++ b/python/drydock_provisioner/orchestrator/validations/platform_selection.py @@ -15,6 +15,7 @@ from drydock_provisioner.orchestrator.validations.validators import Validators class PlatformSelection(Validators): + def __init__(self): super().__init__('Platform Selection', 'DD3001') @@ -39,8 +40,9 @@ class PlatformSelection(Validators): try: valid_images = node_driver.get_available_images() except Exception: - msg = ("Platform validation: Could not load images from driver, skipping" - "image and kernel selection validation.") + msg = ( + "Platform validation: Could not load images from driver, skipping" + "image and kernel selection validation.") self.report_warn( msg, [], "Cannot validate platform selection without accessing the node provisioner." @@ -53,8 +55,9 @@ class PlatformSelection(Validators): try: valid_kernels[i] = node_driver.get_available_kernels(i) except Exception: - msg = ("Platform validation: Could not load kernels from driver, skipping" - "image and kernel selection validation.") + msg = ( + "Platform validation: Could not load kernels from driver, skipping" + "image and kernel selection validation.") self.report_warn( msg, [], "Cannot validate platform selection without accessing the node provisioner." diff --git a/python/drydock_provisioner/orchestrator/validations/rational_network_bond.py b/python/drydock_provisioner/orchestrator/validations/rational_network_bond.py index 6bf5804b..647e1aae 100644 --- a/python/drydock_provisioner/orchestrator/validations/rational_network_bond.py +++ b/python/drydock_provisioner/orchestrator/validations/rational_network_bond.py @@ -15,6 +15,7 @@ from drydock_provisioner.orchestrator.validations.validators import Validators class RationalNetworkBond(Validators): + def __init__(self): super().__init__('Network Bond Rationality', 'DD1006') diff --git a/python/drydock_provisioner/orchestrator/validations/storage_mountpoints.py b/python/drydock_provisioner/orchestrator/validations/storage_mountpoints.py index c5190cc2..807941e0 100644 --- a/python/drydock_provisioner/orchestrator/validations/storage_mountpoints.py +++ b/python/drydock_provisioner/orchestrator/validations/storage_mountpoints.py @@ -14,7 +14,9 @@ from drydock_provisioner.orchestrator.validations.validators import Validators + class StorageMountpoints(Validators): + def __init__(self): super().__init__('Storage Mountpoint', "DD2004") @@ -43,11 +45,10 @@ class StorageMountpoints(Validators): if mountpoint is None: continue if mountpoint in mountpoint_list: - msg = ('Mountpoint "{}" already exists' - .format(mountpoint)) - self.report_error( - msg, [baremetal_node.doc_ref], - 'Please use unique mountpoints.') + msg = ('Mountpoint "{}" already exists'.format( + mountpoint)) + self.report_error(msg, [baremetal_node.doc_ref], + 'Please use unique mountpoints.') return else: mountpoint_list.append(mountpoint) @@ -66,8 +67,8 @@ class StorageMountpoints(Validators): if mountpoint is None: continue if mountpoint in mountpoint_list: - msg = ('Mountpoint "{}" already exists' - .format(mountpoint)) + msg = ('Mountpoint "{}" already exists'. + format(mountpoint)) self.report_error( msg, [baremetal_node.doc_ref], 'Please use unique mountpoints.') diff --git a/python/drydock_provisioner/orchestrator/validations/storage_partititioning.py b/python/drydock_provisioner/orchestrator/validations/storage_partititioning.py index cc34a36c..d29794a9 100644 --- a/python/drydock_provisioner/orchestrator/validations/storage_partititioning.py +++ b/python/drydock_provisioner/orchestrator/validations/storage_partititioning.py @@ -15,6 +15,7 @@ from drydock_provisioner.orchestrator.validations.validators import Validators class StoragePartitioning(Validators): + def __init__(self): super().__init__('Storage Partitioning', "DD2002") @@ -70,8 +71,9 @@ class StoragePartitioning(Validators): all_volume_groups = baremetal_node.volume_groups or [] for volume_group in all_volume_groups: if volume_group.name not in volume_group_check_list: - msg = ('Volume group %s not assigned any physical volumes' - % (volume_group.name)) + msg = ( + 'Volume group %s not assigned any physical volumes' % + (volume_group.name)) self.report_error( msg, [baremetal_node.doc_ref], "Each volume group should be assigned at least one storage device " diff --git a/python/drydock_provisioner/orchestrator/validations/storage_sizing.py b/python/drydock_provisioner/orchestrator/validations/storage_sizing.py index b3bd3a11..698727c5 100644 --- a/python/drydock_provisioner/orchestrator/validations/storage_sizing.py +++ b/python/drydock_provisioner/orchestrator/validations/storage_sizing.py @@ -15,6 +15,7 @@ from drydock_provisioner.orchestrator.validations.validators import Validators class StorageSizing(Validators): + def __init__(self): super().__init__('Storage Sizing', 'DD2003') diff --git a/python/drydock_provisioner/orchestrator/validations/unique_network_check.py b/python/drydock_provisioner/orchestrator/validations/unique_network_check.py index e4f9b562..227d838e 100644 --- a/python/drydock_provisioner/orchestrator/validations/unique_network_check.py +++ b/python/drydock_provisioner/orchestrator/validations/unique_network_check.py @@ -15,6 +15,7 @@ from drydock_provisioner.orchestrator.validations.validators import Validators class UniqueNetworkCheck(Validators): + def __init__(self): super().__init__('Allowed Network Check', 'DD1007') @@ -53,8 +54,8 @@ class UniqueNetworkCheck(Validators): for name in duplicated_names: msg = ( 'Allowed network %s duplicated on NetworkLink %s and NetworkLink ' - '%s' % (name, network_link_name, - network_link_name_2)) + '%s' % + (name, network_link_name, network_link_name_2)) self.report_error( msg, [], "Each network is only allowed to cross a single network link." diff --git a/python/drydock_provisioner/orchestrator/validations/validator.py b/python/drydock_provisioner/orchestrator/validations/validator.py index 41e0b5e6..10389ba4 100644 --- a/python/drydock_provisioner/orchestrator/validations/validator.py +++ b/python/drydock_provisioner/orchestrator/validations/validator.py @@ -38,6 +38,7 @@ from drydock_provisioner.orchestrator.validations.storage_mountpoints import Sto class Validator(): + def __init__(self, orchestrator): """Create a validator with a reference to the orchestrator. @@ -63,8 +64,8 @@ class Validator(): validation_error = False for rule in rule_set: - message_list = rule.execute( - site_design=site_design, orchestrator=self.orchestrator) + message_list = rule.execute(site_design=site_design, + orchestrator=self.orchestrator) result_status.message_list.extend(message_list) error_msg = [m for m in message_list if m.error] result_status.error_count = result_status.error_count + len( diff --git a/python/drydock_provisioner/orchestrator/validations/validators.py b/python/drydock_provisioner/orchestrator/validations/validators.py index 8f37f791..18c97419 100644 --- a/python/drydock_provisioner/orchestrator/validations/validators.py +++ b/python/drydock_provisioner/orchestrator/validations/validators.py @@ -20,7 +20,9 @@ from drydock_provisioner.objects import fields as hd_fields import drydock_provisioner.config as config + class Validators: + def __init__(self, long_name, name): self.name = name self.long_name = long_name @@ -42,13 +44,12 @@ class Validators: :param level: String - More detailed of the severity level of this message """ fmt_msg = "%s: %s" % (self.long_name, msg) - msg_obj = objects.ValidationMessage( - fmt_msg, - self.name, - error=error, - level=level, - docs=docs, - diagnostic=diagnostic) + msg_obj = objects.ValidationMessage(fmt_msg, + self.name, + error=error, + level=level, + docs=docs, + diagnostic=diagnostic) self.messages.append(msg_obj) def report_error(self, msg, docs, diagnostic): diff --git a/python/drydock_provisioner/policy.py b/python/drydock_provisioner/policy.py index dfeaf7ae..0b65019d 100644 --- a/python/drydock_provisioner/policy.py +++ b/python/drydock_provisioner/policy.py @@ -30,35 +30,35 @@ class DrydockPolicy(object): # Base Policy base_rules = [ - policy.RuleDefault( - 'admin_required', - 'role:admin or is_admin:1', - description='Actions requiring admin authority'), + policy.RuleDefault('admin_required', + 'role:admin or is_admin:1', + description='Actions requiring admin authority'), ] # Orchestrator Policy task_rules = [ - policy.DocumentedRuleDefault( - 'physical_provisioner:read_task', 'role:admin', 'Get task status', - [{ - 'path': '/api/v1.0/tasks', - 'method': 'GET' - }, { - 'path': '/api/v1.0/tasks/{task_id}', - 'method': 'GET' - }]), + policy.DocumentedRuleDefault('physical_provisioner:read_task', + 'role:admin', 'Get task status', + [{ + 'path': '/api/v1.0/tasks', + 'method': 'GET' + }, { + 'path': '/api/v1.0/tasks/{task_id}', + 'method': 'GET' + }]), policy.DocumentedRuleDefault('physical_provisioner:create_task', 'role:admin', 'Create a task', [{ 'path': '/api/v1.0/tasks', 'method': 'POST' }]), - policy.DocumentedRuleDefault( - 'physical_provisioner:validate_design', 'role:admin', - 'Create validate_design task', [{ - 'path': '/api/v1.0/tasks', - 'method': 'POST' - }]), + policy.DocumentedRuleDefault('physical_provisioner:validate_design', + 'role:admin', + 'Create validate_design task', + [{ + 'path': '/api/v1.0/tasks', + 'method': 'POST' + }]), policy.DocumentedRuleDefault('physical_provisioner:verify_site', 'role:admin', 'Create verify_site task', [{ @@ -95,12 +95,12 @@ class DrydockPolicy(object): 'path': '/api/v1.0/tasks', 'method': 'POST' }]), - policy.DocumentedRuleDefault('physical_provisioner:delete_tasks', - 'role:admin', 'Deletes tasks by age', - [{ - 'path': '/api/v1.0/tasks', - 'method': 'DELETE' - }]), + policy.DocumentedRuleDefault( + 'physical_provisioner:delete_tasks', 'role:admin', + 'Deletes tasks by age', [{ + 'path': '/api/v1.0/tasks', + 'method': 'DELETE' + }]), policy.DocumentedRuleDefault('physical_provisioner:relabel_nodes', 'role:admin', 'Create relabel_nodes task', [{ @@ -110,10 +110,8 @@ class DrydockPolicy(object): policy.DocumentedRuleDefault( 'physical_provisioner:read_build_data', 'role:admin', 'Read build data for a node', [{ - 'path': - '/api/v1.0/nodes/{nodename}/builddata', - 'method': - 'GET', + 'path': '/api/v1.0/nodes/{nodename}/builddata', + 'method': 'GET', }]), ] @@ -121,8 +119,7 @@ class DrydockPolicy(object): data_rules = [ policy.DocumentedRuleDefault( 'physical_provisioner:read_data', 'role:admin', - 'Read loaded design data', - [{ + 'Read loaded design data', [{ 'path': '/api/v1.0/designs', 'method': 'GET' }, { @@ -131,8 +128,7 @@ class DrydockPolicy(object): }]), policy.DocumentedRuleDefault( 'physical_provisioner:ingest_data', 'role:admin', - 'Load design data', - [{ + 'Load design data', [{ 'path': '/api/v1.0/designs', 'method': 'POST' }, { @@ -182,6 +178,7 @@ class ApiEnforcer(object): self.logger = logging.getLogger('drydock.policy') def __call__(self, f): + @functools.wraps(f) def secure_handler(slf, req, resp, *args, **kwargs): ctx = req.context @@ -199,18 +196,16 @@ class ApiEnforcer(object): slf.info( ctx, "Error - Forbidden access - action: %s" % self.action) - slf.return_error( - resp, - falcon.HTTP_403, - message="Forbidden", - retry=False) + slf.return_error(resp, + falcon.HTTP_403, + message="Forbidden", + retry=False) else: slf.info(ctx, "Error - Unauthenticated access") - slf.return_error( - resp, - falcon.HTTP_401, - message="Unauthenticated", - retry=False) + slf.return_error(resp, + falcon.HTTP_401, + message="Unauthenticated", + retry=False) return secure_handler diff --git a/python/drydock_provisioner/statemgmt/db/tables.py b/python/drydock_provisioner/statemgmt/db/tables.py index 9d551bc1..8c9d3681 100644 --- a/python/drydock_provisioner/statemgmt/db/tables.py +++ b/python/drydock_provisioner/statemgmt/db/tables.py @@ -20,6 +20,7 @@ from sqlalchemy.dialects import postgresql as pg class ExtendTable(Table): + def __new__(cls, metadata): self = super().__new__(cls, cls.__tablename__, metadata, *cls.__schema__) diff --git a/python/drydock_provisioner/statemgmt/design/resolver.py b/python/drydock_provisioner/statemgmt/design/resolver.py index 0feaeabd..025a377d 100644 --- a/python/drydock_provisioner/statemgmt/design/resolver.py +++ b/python/drydock_provisioner/statemgmt/design/resolver.py @@ -84,13 +84,13 @@ class ReferenceResolver(object): :param design_uri: Tuple as returned by urllib.parse for the design reference """ if design_uri.username is not None and design_uri.password is not None: - response = requests.get( - design_uri.geturl(), - auth=(design_uri.username, design_uri.password), - timeout=get_client_timeouts()) + response = requests.get(design_uri.geturl(), + auth=(design_uri.username, + design_uri.password), + timeout=get_client_timeouts()) else: - response = requests.get( - design_uri.geturl(), timeout=get_client_timeouts()) + response = requests.get(design_uri.geturl(), + timeout=get_client_timeouts()) return response.content diff --git a/python/drydock_provisioner/statemgmt/state.py b/python/drydock_provisioner/statemgmt/state.py index 277f85f6..24abd3bc 100644 --- a/python/drydock_provisioner/statemgmt/state.py +++ b/python/drydock_provisioner/statemgmt/state.py @@ -33,6 +33,7 @@ from .design.resolver import ReferenceResolver class DrydockState(object): + def __init__(self): self.logger = logging.getLogger( config.config_mgr.conf.logging.global_logger_name) @@ -74,8 +75,8 @@ class DrydockState(object): with self.db_engine.connect() as conn: for t in table_names: - query_text = sql.text( - "TRUNCATE TABLE %s" % t).execution_options(autocommit=True) + query_text = sql.text("TRUNCATE TABLE %s" % + t).execution_options(autocommit=True) conn.execute(query_text) def get_design_documents(self, design_ref): @@ -169,8 +170,9 @@ class DrydockState(object): with self.db_engine.connect() as conn: if allowed_actions is None: query = self.tasks_tbl.select().where( - self.tasks_tbl.c.status == hd_fields.TaskStatus. - Queued).order_by(self.tasks_tbl.c.created.asc()) + self.tasks_tbl.c.status + == hd_fields.TaskStatus.Queued).order_by( + self.tasks_tbl.c.created.asc()) rs = conn.execute(query) else: query = sql.text("SELECT * FROM tasks WHERE " @@ -192,9 +194,9 @@ class DrydockState(object): else: return None except Exception as ex: - self.logger.error( - "Error querying for next queued task: %s" % str(ex), - exc_info=True) + self.logger.error("Error querying for next queued task: %s" % + str(ex), + exc_info=True) return None def get_task(self, task_id): @@ -211,17 +213,17 @@ class DrydockState(object): task = objects.Task.from_db(dict(r)) - self.logger.debug( - "Assembling result messages for task %s." % str(task.task_id)) + self.logger.debug("Assembling result messages for task %s." % + str(task.task_id)) self._assemble_tasks(task_list=[task]) task.statemgr = self return task except Exception as ex: - self.logger.error( - "Error querying task %s: %s" % (str(task_id), str(ex)), - exc_info=True) + self.logger.error("Error querying task %s: %s" % + (str(task_id), str(ex)), + exc_info=True) return None def post_result_message(self, task_id, msg): @@ -237,8 +239,9 @@ class DrydockState(object): conn.execute(query) return True except Exception as ex: - self.logger.error("Error inserting result message for task %s: %s" - % (str(task_id), str(ex))) + self.logger.error( + "Error inserting result message for task %s: %s" % + (str(task_id), str(ex))) return False def delete_result_message(self, task_id, msg): @@ -254,8 +257,8 @@ class DrydockState(object): conn.execute(query) return True except Exception as ex: - self.logger.error("Error delete result message for task %s: %s" - % (str(task_id), str(ex))) + self.logger.error("Error delete result message for task %s: %s" % + (str(task_id), str(ex))) return False def _assemble_tasks(self, task_list=None): @@ -292,13 +295,13 @@ class DrydockState(object): """ try: with self.db_engine.connect() as conn: - query = self.tasks_tbl.insert().values( - **(task.to_db(include_id=True))) + query = self.tasks_tbl.insert().values(**(task.to_db( + include_id=True))) conn.execute(query) return True except Exception as ex: - self.logger.error( - "Error inserting task %s: %s" % (str(task.task_id), str(ex))) + self.logger.error("Error inserting task %s: %s" % + (str(task.task_id), str(ex))) return False def put_task(self, task): @@ -317,8 +320,8 @@ class DrydockState(object): else: return False except Exception as ex: - self.logger.error( - "Error updating task %s: %s" % (str(task.task_id), str(ex))) + self.logger.error("Error updating task %s: %s" % + (str(task.task_id), str(ex))) return False def task_retention(self, retain_days): @@ -335,22 +338,19 @@ class DrydockState(object): conn.execute(query_tasks_text) conn.close() except Exception as ex: - self.logger.error( - "Error deleting tasks: %s" % str(ex)) + self.logger.error("Error deleting tasks: %s" % str(ex)) return False with self.db_engine.connect() as conn: try: - query_subtasks_text = ( - "DELETE FROM tasks " - "WHERE parent_task_id IS NOT NULL AND " - "parent_task_id NOT IN " - "(SELECT task_id FROM tasks);") + query_subtasks_text = ("DELETE FROM tasks " + "WHERE parent_task_id IS NOT NULL AND " + "parent_task_id NOT IN " + "(SELECT task_id FROM tasks);") conn.execute(sql.text(query_subtasks_text)) conn.close() except Exception as ex: - self.logger.error( - "Error deleting subtasks: %s" % str(ex)) + self.logger.error("Error deleting subtasks: %s" % str(ex)) return False with self.db_engine.connect() as conn: @@ -364,8 +364,8 @@ class DrydockState(object): conn.execute(sql.text(query_result_message_text)) conn.close() except Exception as ex: - self.logger.error( - "Error deleting result messages: %s" % str(ex)) + self.logger.error("Error deleting result messages: %s" % + str(ex)) return False with self.db_engine.connect() as conn: @@ -378,8 +378,7 @@ class DrydockState(object): real_conn.set_isolation_level(old_isolation_level) conn.close() except Exception as ex: - self.logger.error( - "Error running vacuum full: %s" % str(ex)) + self.logger.error("Error running vacuum full: %s" % str(ex)) return False return True @@ -397,10 +396,9 @@ class DrydockState(object): try: with self.db_engine.connect() as conn: - rs = conn.execute( - query_string, - new_subtask=subtask_id.bytes, - task_id=task_id.bytes) + rs = conn.execute(query_string, + new_subtask=subtask_id.bytes, + task_id=task_id.bytes) rc = rs.rowcount if rc == 1: return True @@ -419,8 +417,8 @@ class DrydockState(object): try: with self.db_engine.connect() as conn: query = self.active_instance_tbl.update().where( - self.active_instance_tbl.c.identity == leader_id. - bytes).values(last_ping=datetime.utcnow()) + self.active_instance_tbl.c.identity + == leader_id.bytes).values(last_ping=datetime.utcnow()) rs = conn.execute(query) rc = rs.rowcount @@ -509,17 +507,16 @@ class DrydockState(object): "WHERE ba1.node_name = :node").execution_options( autocommit=True) - conn.execute( - query, - node=nodename, - task_id=task_id.bytes, - identity=identity) + conn.execute(query, + node=nodename, + task_id=task_id.bytes, + identity=identity) return True except Exception as ex: - self.logger.error( - "Error posting boot action context for node %s" % nodename, - exc_info=ex) + self.logger.error("Error posting boot action context for node %s" % + nodename, + exc_info=ex) return False def get_boot_action_context(self, nodename): @@ -576,8 +573,8 @@ class DrydockState(object): conn.execute(query) return True except Exception as ex: - self.logger.error( - "Error saving boot action %s." % action_id, exc_info=ex) + self.logger.error("Error saving boot action %s." % action_id, + exc_info=ex) return False def put_bootaction_status(self, @@ -596,9 +593,9 @@ class DrydockState(object): conn.execute(query) return True except Exception as ex: - self.logger.error( - "Error updating boot action %s status." % action_id, - exc_info=ex) + self.logger.error("Error updating boot action %s status." % + action_id, + exc_info=ex) return False def get_boot_actions_for_node(self, nodename): @@ -623,9 +620,9 @@ class DrydockState(object): actions[ba_dict.get('action_name', 'undefined')] = ba_dict return actions except Exception as ex: - self.logger.error( - "Error selecting boot actions for node %s" % nodename, - exc_info=ex) + self.logger.error("Error selecting boot actions for node %s" % + nodename, + exc_info=ex) return None def get_boot_action(self, action_id): @@ -649,8 +646,8 @@ class DrydockState(object): else: return None except Exception as ex: - self.logger.error( - "Error querying boot action %s" % action_id, exc_info=ex) + self.logger.error("Error querying boot action %s" % action_id, + exc_info=ex) def post_build_data(self, build_data): """Write a new build data element to the database. @@ -695,8 +692,9 @@ class DrydockState(object): if node_name and task_id: query = self.build_data_tbl.select().where( self.build_data_tbl.c.node_name == node_name - and self.build_data_tbl.c.task_id == task_id.bytes - ).order_by(self.build_data_tbl.c.collected_date.desc()) + and self.build_data_tbl.c.task_id + == task_id.bytes).order_by( + self.build_data_tbl.c.collected_date.desc()) rs = conn.execute(query) elif node_name: if latest: diff --git a/python/requirements-direct.txt b/python/requirements-direct.txt index 7909bfbb..3799f392 100644 --- a/python/requirements-direct.txt +++ b/python/requirements-direct.txt @@ -1,30 +1,30 @@ -# edited with compartibility with shipyard's apache-airflow 1.10.15 -alembic==1.4.3 -Beaker==1.12.0 -click==6.7 -defusedxml===0.6.0 -falcon==3.1.1 -iso8601==0.1.13 -jinja2==3.0.3 -jsonschema==3.2.0 -keystoneauth1==5.1.1 -keystonemiddleware==10.2.0 -libvirt-python==9.2.0 -netaddr==0.8.0 -oauthlib==3.1.0 -oslo.config==8.7.1 -oslo.policy==3.10.1 -oslo.versionedobjects==2.4.0 -Paste==3.5.0 -PasteDeploy==3.0.1 -psycopg2-binary==2.8.4 -PTable==0.9.2 -pyghmi==1.5.60 -pylibyaml==0.1.0 -pymongo==3.10.1 -PyYAML==5.4.1 -redfish==3.1.9 -requests==2.23.0 -SQLAlchemy==1.2.8 -ulid2==0.1.1 -uWSGI==2.0.21 + +alembic +Beaker +click +defusedxml +falcon +iso8601 +jinja2 +jsonschema +keystoneauth1 +keystonemiddleware +libvirt-python +netaddr +oauthlib +oslo.config +oslo.policy +oslo.versionedobjects +Paste +PasteDeploy +psycopg2-binary +PTable +pyghmi +pylibyaml +pymongo +PyYAML +redfish +requests +SQLAlchemy<=1.3.20 +ulid2 +uWSGI \ No newline at end of file diff --git a/python/requirements-lock.txt b/python/requirements-frozen.txt similarity index 77% rename from python/requirements-lock.txt rename to python/requirements-frozen.txt index b52049b5..f99bdedd 100644 --- a/python/requirements-lock.txt +++ b/python/requirements-frozen.txt @@ -1,25 +1,26 @@ alabaster==0.7.13 -alembic==1.4.3 +alembic==1.10.4 amqp==5.1.1 -attrs==22.2.0 +attrs==23.1.0 Babel==2.12.1 bandit==1.7.5 bcrypt==4.0.1 -Beaker==1.12.0 +Beaker==1.12.1 cachetools==5.3.0 certifi==2022.12.7 cffi==1.15.1 -chardet==3.0.4 +charset-normalizer==3.1.0 click==6.7 coverage==7.2.3 -cryptography==40.0.1 +cryptography==40.0.2 debtcollector==2.5.0 decorator==5.1.1 -defusedxml==0.6.0 +defusedxml==0.7.1 dnspython==2.3.0 docutils==0.19 dogpile.cache==1.1.8 eventlet==0.33.3 +exceptiongroup==1.1.1 falcon==3.1.1 fasteners==0.18 fixtures==4.0.1 @@ -28,16 +29,16 @@ futurist==2.4.1 gitdb==4.0.10 GitPython==3.1.31 greenlet==2.0.2 -idna==2.10 +idna==3.4 imagesize==1.4.1 iniconfig==2.0.0 -iso8601==0.1.13 -Jinja2==3.0.3 +iso8601==1.1.0 +Jinja2==3.1.2 jsonpatch==1.32 jsonpath-rw==1.4.0 jsonpointer==2.3 jsonschema==3.2.0 -keystoneauth1==5.1.1 +keystoneauth1==5.1.2 keystonemiddleware==10.2.0 kombu==5.2.4 libvirt-python==9.2.0 @@ -50,83 +51,81 @@ mock==5.0.1 msgpack==1.0.5 netaddr==0.8.0 netifaces==0.11.0 -oauthlib==3.1.0 +oauthlib==3.2.2 os-service-types==1.7.0 oslo.cache==3.3.1 oslo.concurrency==5.1.1 -oslo.config==8.7.1 +oslo.config==9.1.1 oslo.context==5.1.1 oslo.i18n==6.0.0 oslo.log==5.2.0 oslo.messaging==14.2.0 oslo.metrics==0.6.0 oslo.middleware==5.1.1 -oslo.policy==3.10.1 +oslo.policy==4.1.1 oslo.serialization==5.1.1 oslo.service==3.1.1 oslo.utils==6.1.0 -oslo.versionedobjects==2.4.0 -packaging==23.0 -Paste==3.5.0 +oslo.versionedobjects==3.1.0 +packaging==23.1 +Paste==3.5.2 PasteDeploy==3.0.1 pbr==5.11.1 pip==23.0.1 pluggy==1.0.0 ply==3.11 prometheus-client==0.16.0 -psycopg2-binary==2.8.4 +psycopg2-binary==2.9.6 PTable==0.9.2 -py==1.11.0 pycadf==3.1.1 pycodestyle==2.10.0 pycparser==2.21 pyflakes==3.0.1 -pyghmi==1.5.60 -Pygments==2.14.0 +pyghmi==1.5.61 +Pygments==2.15.1 pylibyaml==0.1.0 pymongo==3.10.1 pyparsing==3.0.9 pyrsistent==0.19.3 -pytest==6.2.5 +pytest==7.3.1 pytest-cov==4.0.0 pytest-mock==3.10.0 python-dateutil==2.8.2 -python-editor==1.0.4 python-keystoneclient==5.1.0 pytz==2023.3 -PyYAML==5.4.1 +PyYAML==6.0 redfish==3.1.9 repoze.lru==0.7 -requests==2.23.0 +requests==2.29.0 requests-toolbelt==0.10.1 requests-unixsocket==0.3.0 responses==0.23.1 rfc3986==2.0.0 -rich==13.3.3 +rich==13.3.4 Routes==2.5.1 setuptools==56.0.0 six==1.16.0 smmap==5.0.0 snowballstemmer==2.2.0 -Sphinx==5.3.0 +Sphinx==6.2.1 sphinxcontrib-applehelp==1.0.4 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.1 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 -SQLAlchemy==1.2.8 +SQLAlchemy==1.3.21 statsd==4.0.1 stevedore==5.0.0 -toml==0.10.2 tomli==2.0.1 types-PyYAML==6.0.12.9 +typing_extensions==4.5.0 ulid2==0.1.1 -urllib3==1.25.11 +urllib3==1.26.15 uWSGI==2.0.21 vine==5.0.0 WebOb==1.8.7 -wheel==0.38.4 +wheel==0.40.0 wrapt==1.15.0 -yapf==0.32.0 +yapf==0.33.0 yappi==1.4.0 diff --git a/python/requirements-tree.txt b/python/requirements-tree.txt deleted file mode 100644 index dfb02a46..00000000 --- a/python/requirements-tree.txt +++ /dev/null @@ -1,1625 +0,0 @@ -Automat==20.2.0 -bandit==1.7.4 - - GitPython [required: >=1.0.1, installed: 3.1.31] - - gitdb [required: >=4.0.1,<5, installed: 4.0.10] - - smmap [required: >=3.0.1,<6, installed: 5.0.0] - - PyYAML [required: >=5.3.1, installed: 5.3.1] - - stevedore [required: >=1.20.0, installed: 4.1.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] -blinker==1.4 -cloud-init==22.4.2 -command-not-found==0.3 -configobj==5.0.6 -constantly==15.1.0 -coverage==5.3 -dbus-python==1.2.18 -Deckhand==1.0.0.dev717 - - alembic [required: ==1.4.3, installed: 1.4.3] - - Mako [required: Any, installed: 1.2.4] - - MarkupSafe [required: >=0.9.2, installed: 2.0.1] - - python-dateutil [required: Any, installed: 2.8.1] - - six [required: >=1.5, installed: 1.15.0] - - python-editor [required: >=0.3, installed: 1.0.4] - - SQLAlchemy [required: >=1.1.0, installed: 1.4.0] - - greenlet [required: !=0.4.17, installed: 2.0.2] - - amqp [required: ==2.6.1, installed: 2.6.1] - - vine [required: >=1.1.3,<5.0.0a1, installed: 1.3.0] - - Beaker [required: ==1.12.0, installed: 1.12.0] - - cryptography [required: ==3.2.1, installed: 3.2.1] - - cffi [required: >=1.8,!=1.11.3, installed: 1.15.1] - - pycparser [required: Any, installed: 2.21] - - six [required: >=1.4.1, installed: 1.15.0] - - deepdiff [required: ==5.8.1, installed: 5.8.1] - - ordered-set [required: >=4.1.0,<4.2.0, installed: 4.1.0] - - falcon [required: ==3.1.1, installed: 3.1.1] - - hacking [required: ==4.1.0, installed: 4.1.0] - - flake8 [required: >=3.8.0,<3.9.0, installed: 3.8.4] - - mccabe [required: >=0.6.0,<0.7.0, installed: 0.6.1] - - pycodestyle [required: >=2.6.0a1,<2.7.0, installed: 2.6.0] - - pyflakes [required: >=2.2.0,<2.3.0, installed: 2.2.0] - - jsonpath-ng [required: ==1.5.3, installed: 1.5.3] - - decorator [required: Any, installed: 5.1.1] - - ply [required: Any, installed: 3.11] - - six [required: Any, installed: 1.15.0] - - jsonpickle [required: ==1.4.1, installed: 1.4.1] - - importlib-metadata [required: Any, installed: 4.6.4] - - jsonschema [required: ==3.2.0, installed: 3.2.0] - - keystoneauth1 [required: ==5.1.1, installed: 5.1.1] - - iso8601 [required: >=0.1.11, installed: 1.1.0] - - os-service-types [required: >=1.2.0, installed: 1.7.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - requests [required: >=2.14.2, installed: 2.28.2] - - certifi [required: >=2017.4.17, installed: 2020.6.20] - - charset-normalizer [required: >=2,<4, installed: 3.1.0] - - idna [required: >=2.5,<4, installed: 2.10] - - urllib3 [required: >=1.21.1,<1.27, installed: 1.25.11] - - six [required: >=1.10.0, installed: 1.15.0] - - stevedore [required: >=1.20.0, installed: 4.1.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - keystonemiddleware [required: ==10.2.0, installed: 10.2.0] - - keystoneauth1 [required: >=3.12.0, installed: 5.1.1] - - iso8601 [required: >=0.1.11, installed: 1.1.0] - - os-service-types [required: >=1.2.0, installed: 1.7.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - requests [required: >=2.14.2, installed: 2.28.2] - - certifi [required: >=2017.4.17, installed: 2020.6.20] - - charset-normalizer [required: >=2,<4, installed: 3.1.0] - - idna [required: >=2.5,<4, installed: 2.10] - - urllib3 [required: >=1.21.1,<1.27, installed: 1.25.11] - - six [required: >=1.10.0, installed: 1.15.0] - - stevedore [required: >=1.20.0, installed: 4.1.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.cache [required: >=1.26.0, installed: 2.10.1] - - dogpile.cache [required: >=1.1.5, installed: 1.1.8] - - decorator [required: >=4.0.0, installed: 5.1.1] - - stevedore [required: >=3.0.0, installed: 4.1.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.config [required: >=8.1.0, installed: 8.7.1] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - PyYAML [required: >=5.1, installed: 5.3.1] - - requests [required: >=2.18.0, installed: 2.28.2] - - certifi [required: >=2017.4.17, installed: 2020.6.20] - - charset-normalizer [required: >=2,<4, installed: 3.1.0] - - idna [required: >=2.5,<4, installed: 2.10] - - urllib3 [required: >=1.21.1,<1.27, installed: 1.25.11] - - rfc3986 [required: >=1.2.0, installed: 2.0.0] - - stevedore [required: >=1.20.0, installed: 4.1.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.i18n [required: >=5.0.0, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.log [required: >=4.2.1, installed: 4.6.0] - - debtcollector [required: >=1.19.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - oslo.config [required: >=5.2.0, installed: 8.7.1] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - PyYAML [required: >=5.1, installed: 5.3.1] - - requests [required: >=2.18.0, installed: 2.28.2] - - certifi [required: >=2017.4.17, installed: 2020.6.20] - - charset-normalizer [required: >=2,<4, installed: 3.1.0] - - idna [required: >=2.5,<4, installed: 2.10] - - urllib3 [required: >=1.21.1,<1.27, installed: 1.25.11] - - rfc3986 [required: >=1.2.0, installed: 2.0.0] - - stevedore [required: >=1.20.0, installed: 4.1.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.context [required: >=2.20.0, installed: 4.1.0] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.i18n [required: >=3.20.0, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.serialization [required: >=2.25.0, installed: 4.2.0] - - msgpack [required: >=0.5.2, installed: 1.0.5] - - oslo.utils [required: >=3.33.0, installed: 4.12.3] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - iso8601 [required: >=0.1.11, installed: 1.1.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - netifaces [required: >=0.10.4, installed: 0.11.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - packaging [required: >=20.4, installed: 23.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pyparsing [required: >=2.1.0, installed: 2.4.7] - - pytz [required: >=2013.6, installed: 2022.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pytz [required: >=2013.6, installed: 2022.1] - - oslo.utils [required: >=3.36.0, installed: 4.12.3] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - iso8601 [required: >=0.1.11, installed: 1.1.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - netifaces [required: >=0.10.4, installed: 0.11.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - packaging [required: >=20.4, installed: 23.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pyparsing [required: >=2.1.0, installed: 2.4.7] - - pytz [required: >=2013.6, installed: 2022.1] - - pbr [required: >=3.1.1, installed: 5.5.1] - - pyinotify [required: >=0.9.6, installed: 0.9.6] - - python-dateutil [required: >=2.7.0, installed: 2.8.1] - - six [required: >=1.5, installed: 1.15.0] - - oslo.utils [required: >=4.2.0, installed: 4.12.3] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - iso8601 [required: >=0.1.11, installed: 1.1.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - netifaces [required: >=0.10.4, installed: 0.11.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - packaging [required: >=20.4, installed: 23.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pyparsing [required: >=2.1.0, installed: 2.4.7] - - pytz [required: >=2013.6, installed: 2022.1] - - oslo.config [required: >=5.2.0, installed: 8.7.1] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - PyYAML [required: >=5.1, installed: 5.3.1] - - requests [required: >=2.18.0, installed: 2.28.2] - - certifi [required: >=2017.4.17, installed: 2020.6.20] - - charset-normalizer [required: >=2,<4, installed: 3.1.0] - - idna [required: >=2.5,<4, installed: 2.10] - - urllib3 [required: >=1.21.1,<1.27, installed: 1.25.11] - - rfc3986 [required: >=1.2.0, installed: 2.0.0] - - stevedore [required: >=1.20.0, installed: 4.1.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.context [required: >=2.19.2, installed: 4.1.0] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.log [required: >=3.36.0, installed: 4.6.0] - - debtcollector [required: >=1.19.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - oslo.config [required: >=5.2.0, installed: 8.7.1] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - PyYAML [required: >=5.1, installed: 5.3.1] - - requests [required: >=2.18.0, installed: 2.28.2] - - certifi [required: >=2017.4.17, installed: 2020.6.20] - - charset-normalizer [required: >=2,<4, installed: 3.1.0] - - idna [required: >=2.5,<4, installed: 2.10] - - urllib3 [required: >=1.21.1,<1.27, installed: 1.25.11] - - rfc3986 [required: >=1.2.0, installed: 2.0.0] - - stevedore [required: >=1.20.0, installed: 4.1.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.context [required: >=2.20.0, installed: 4.1.0] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.i18n [required: >=3.20.0, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.serialization [required: >=2.25.0, installed: 4.2.0] - - msgpack [required: >=0.5.2, installed: 1.0.5] - - oslo.utils [required: >=3.33.0, installed: 4.12.3] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - iso8601 [required: >=0.1.11, installed: 1.1.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - netifaces [required: >=0.10.4, installed: 0.11.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - packaging [required: >=20.4, installed: 23.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pyparsing [required: >=2.1.0, installed: 2.4.7] - - pytz [required: >=2013.6, installed: 2022.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pytz [required: >=2013.6, installed: 2022.1] - - oslo.utils [required: >=3.36.0, installed: 4.12.3] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - iso8601 [required: >=0.1.11, installed: 1.1.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - netifaces [required: >=0.10.4, installed: 0.11.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - packaging [required: >=20.4, installed: 23.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pyparsing [required: >=2.1.0, installed: 2.4.7] - - pytz [required: >=2013.6, installed: 2022.1] - - pbr [required: >=3.1.1, installed: 5.5.1] - - pyinotify [required: >=0.9.6, installed: 0.9.6] - - python-dateutil [required: >=2.7.0, installed: 2.8.1] - - six [required: >=1.5, installed: 1.15.0] - - oslo.serialization [required: >=2.18.0,!=2.19.1, installed: 4.2.0] - - msgpack [required: >=0.5.2, installed: 1.0.5] - - oslo.utils [required: >=3.33.0, installed: 4.12.3] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - iso8601 [required: >=0.1.11, installed: 1.1.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - netifaces [required: >=0.10.4, installed: 0.11.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - packaging [required: >=20.4, installed: 23.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pyparsing [required: >=2.1.0, installed: 2.4.7] - - pytz [required: >=2013.6, installed: 2022.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pytz [required: >=2013.6, installed: 2022.1] - - oslo.utils [required: >=3.33.0, installed: 4.12.3] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - iso8601 [required: >=0.1.11, installed: 1.1.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - netifaces [required: >=0.10.4, installed: 0.11.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - packaging [required: >=20.4, installed: 23.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pyparsing [required: >=2.1.0, installed: 2.4.7] - - pytz [required: >=2013.6, installed: 2022.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pycadf [required: >=1.1.0,!=2.0.0, installed: 3.1.1] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - oslo.config [required: >=5.2.0, installed: 8.7.1] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - PyYAML [required: >=5.1, installed: 5.3.1] - - requests [required: >=2.18.0, installed: 2.28.2] - - certifi [required: >=2017.4.17, installed: 2020.6.20] - - charset-normalizer [required: >=2,<4, installed: 3.1.0] - - idna [required: >=2.5,<4, installed: 2.10] - - urllib3 [required: >=1.21.1,<1.27, installed: 1.25.11] - - rfc3986 [required: >=1.2.0, installed: 2.0.0] - - stevedore [required: >=1.20.0, installed: 4.1.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.serialization [required: >=2.18.0,!=2.19.1, installed: 4.2.0] - - msgpack [required: >=0.5.2, installed: 1.0.5] - - oslo.utils [required: >=3.33.0, installed: 4.12.3] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - iso8601 [required: >=0.1.11, installed: 1.1.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - netifaces [required: >=0.10.4, installed: 0.11.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - packaging [required: >=20.4, installed: 23.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pyparsing [required: >=2.1.0, installed: 2.4.7] - - pytz [required: >=2013.6, installed: 2022.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pytz [required: >=2013.6, installed: 2022.1] - - pytz [required: >=2013.6, installed: 2022.1] - - six [required: >=1.10.0, installed: 1.15.0] - - python-keystoneclient [required: >=3.20.0, installed: 3.22.0] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - keystoneauth1 [required: >=3.4.0, installed: 5.1.1] - - iso8601 [required: >=0.1.11, installed: 1.1.0] - - os-service-types [required: >=1.2.0, installed: 1.7.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - requests [required: >=2.14.2, installed: 2.28.2] - - certifi [required: >=2017.4.17, installed: 2020.6.20] - - charset-normalizer [required: >=2,<4, installed: 3.1.0] - - idna [required: >=2.5,<4, installed: 2.10] - - urllib3 [required: >=1.21.1,<1.27, installed: 1.25.11] - - six [required: >=1.10.0, installed: 1.15.0] - - stevedore [required: >=1.20.0, installed: 4.1.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.config [required: >=5.2.0, installed: 8.7.1] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - PyYAML [required: >=5.1, installed: 5.3.1] - - requests [required: >=2.18.0, installed: 2.28.2] - - certifi [required: >=2017.4.17, installed: 2020.6.20] - - charset-normalizer [required: >=2,<4, installed: 3.1.0] - - idna [required: >=2.5,<4, installed: 2.10] - - urllib3 [required: >=1.21.1,<1.27, installed: 1.25.11] - - rfc3986 [required: >=1.2.0, installed: 2.0.0] - - stevedore [required: >=1.20.0, installed: 4.1.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.serialization [required: >=2.18.0,!=2.19.1, installed: 4.2.0] - - msgpack [required: >=0.5.2, installed: 1.0.5] - - oslo.utils [required: >=3.33.0, installed: 4.12.3] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - iso8601 [required: >=0.1.11, installed: 1.1.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - netifaces [required: >=0.10.4, installed: 0.11.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - packaging [required: >=20.4, installed: 23.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pyparsing [required: >=2.1.0, installed: 2.4.7] - - pytz [required: >=2013.6, installed: 2022.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pytz [required: >=2013.6, installed: 2022.1] - - oslo.utils [required: >=3.33.0, installed: 4.12.3] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - iso8601 [required: >=0.1.11, installed: 1.1.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - netifaces [required: >=0.10.4, installed: 0.11.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - packaging [required: >=20.4, installed: 23.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pyparsing [required: >=2.1.0, installed: 2.4.7] - - pytz [required: >=2013.6, installed: 2022.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - requests [required: >=2.14.2, installed: 2.28.2] - - certifi [required: >=2017.4.17, installed: 2020.6.20] - - charset-normalizer [required: >=2,<4, installed: 3.1.0] - - idna [required: >=2.5,<4, installed: 2.10] - - urllib3 [required: >=1.21.1,<1.27, installed: 1.25.11] - - six [required: >=1.10.0, installed: 1.15.0] - - stevedore [required: >=1.20.0, installed: 4.1.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - requests [required: >=2.14.2, installed: 2.28.2] - - certifi [required: >=2017.4.17, installed: 2020.6.20] - - charset-normalizer [required: >=2,<4, installed: 3.1.0] - - idna [required: >=2.5,<4, installed: 2.10] - - urllib3 [required: >=1.21.1,<1.27, installed: 1.25.11] - - six [required: >=1.10.0, installed: 1.15.0] - - WebOb [required: >=1.7.1, installed: 1.8.7] - - kombu [required: ==4.6.11, installed: 4.6.11] - - amqp [required: >=2.6.0,<2.7, installed: 2.6.1] - - vine [required: >=1.1.3,<5.0.0a1, installed: 1.3.0] - - networkx [required: ==2.5, installed: 2.5] - - decorator [required: >=4.3.0, installed: 5.1.1] - - oslo.cache [required: ==2.10.1, installed: 2.10.1] - - dogpile.cache [required: >=1.1.5, installed: 1.1.8] - - decorator [required: >=4.0.0, installed: 5.1.1] - - stevedore [required: >=3.0.0, installed: 4.1.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.config [required: >=8.1.0, installed: 8.7.1] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - PyYAML [required: >=5.1, installed: 5.3.1] - - requests [required: >=2.18.0, installed: 2.28.2] - - certifi [required: >=2017.4.17, installed: 2020.6.20] - - charset-normalizer [required: >=2,<4, installed: 3.1.0] - - idna [required: >=2.5,<4, installed: 2.10] - - urllib3 [required: >=1.21.1,<1.27, installed: 1.25.11] - - rfc3986 [required: >=1.2.0, installed: 2.0.0] - - stevedore [required: >=1.20.0, installed: 4.1.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.i18n [required: >=5.0.0, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.log [required: >=4.2.1, installed: 4.6.0] - - debtcollector [required: >=1.19.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - oslo.config [required: >=5.2.0, installed: 8.7.1] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - PyYAML [required: >=5.1, installed: 5.3.1] - - requests [required: >=2.18.0, installed: 2.28.2] - - certifi [required: >=2017.4.17, installed: 2020.6.20] - - charset-normalizer [required: >=2,<4, installed: 3.1.0] - - idna [required: >=2.5,<4, installed: 2.10] - - urllib3 [required: >=1.21.1,<1.27, installed: 1.25.11] - - rfc3986 [required: >=1.2.0, installed: 2.0.0] - - stevedore [required: >=1.20.0, installed: 4.1.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.context [required: >=2.20.0, installed: 4.1.0] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.i18n [required: >=3.20.0, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.serialization [required: >=2.25.0, installed: 4.2.0] - - msgpack [required: >=0.5.2, installed: 1.0.5] - - oslo.utils [required: >=3.33.0, installed: 4.12.3] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - iso8601 [required: >=0.1.11, installed: 1.1.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - netifaces [required: >=0.10.4, installed: 0.11.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - packaging [required: >=20.4, installed: 23.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pyparsing [required: >=2.1.0, installed: 2.4.7] - - pytz [required: >=2013.6, installed: 2022.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pytz [required: >=2013.6, installed: 2022.1] - - oslo.utils [required: >=3.36.0, installed: 4.12.3] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - iso8601 [required: >=0.1.11, installed: 1.1.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - netifaces [required: >=0.10.4, installed: 0.11.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - packaging [required: >=20.4, installed: 23.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pyparsing [required: >=2.1.0, installed: 2.4.7] - - pytz [required: >=2013.6, installed: 2022.1] - - pbr [required: >=3.1.1, installed: 5.5.1] - - pyinotify [required: >=0.9.6, installed: 0.9.6] - - python-dateutil [required: >=2.7.0, installed: 2.8.1] - - six [required: >=1.5, installed: 1.15.0] - - oslo.utils [required: >=4.2.0, installed: 4.12.3] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - iso8601 [required: >=0.1.11, installed: 1.1.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - netifaces [required: >=0.10.4, installed: 0.11.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - packaging [required: >=20.4, installed: 23.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pyparsing [required: >=2.1.0, installed: 2.4.7] - - pytz [required: >=2013.6, installed: 2022.1] - - oslo.concurrency [required: ==5.0.1, installed: 5.0.1] - - fasteners [required: >=0.7.0, installed: 0.18] - - oslo.config [required: >=5.2.0, installed: 8.7.1] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - PyYAML [required: >=5.1, installed: 5.3.1] - - requests [required: >=2.18.0, installed: 2.28.2] - - certifi [required: >=2017.4.17, installed: 2020.6.20] - - charset-normalizer [required: >=2,<4, installed: 3.1.0] - - idna [required: >=2.5,<4, installed: 2.10] - - urllib3 [required: >=1.21.1,<1.27, installed: 1.25.11] - - rfc3986 [required: >=1.2.0, installed: 2.0.0] - - stevedore [required: >=1.20.0, installed: 4.1.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.utils [required: >=3.33.0, installed: 4.12.3] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - iso8601 [required: >=0.1.11, installed: 1.1.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - netifaces [required: >=0.10.4, installed: 0.11.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - packaging [required: >=20.4, installed: 23.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pyparsing [required: >=2.1.0, installed: 2.4.7] - - pytz [required: >=2013.6, installed: 2022.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.config [required: ==8.7.1, installed: 8.7.1] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - PyYAML [required: >=5.1, installed: 5.3.1] - - requests [required: >=2.18.0, installed: 2.28.2] - - certifi [required: >=2017.4.17, installed: 2020.6.20] - - charset-normalizer [required: >=2,<4, installed: 3.1.0] - - idna [required: >=2.5,<4, installed: 2.10] - - urllib3 [required: >=1.21.1,<1.27, installed: 1.25.11] - - rfc3986 [required: >=1.2.0, installed: 2.0.0] - - stevedore [required: >=1.20.0, installed: 4.1.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.context [required: ==4.1.0, installed: 4.1.0] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.db [required: ==11.2.0, installed: 11.2.0] - - alembic [required: >=0.9.6, installed: 1.4.3] - - Mako [required: Any, installed: 1.2.4] - - MarkupSafe [required: >=0.9.2, installed: 2.0.1] - - python-dateutil [required: Any, installed: 2.8.1] - - six [required: >=1.5, installed: 1.15.0] - - python-editor [required: >=0.3, installed: 1.0.4] - - SQLAlchemy [required: >=1.1.0, installed: 1.4.0] - - greenlet [required: !=0.4.17, installed: 2.0.2] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - oslo.config [required: >=5.2.0, installed: 8.7.1] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - PyYAML [required: >=5.1, installed: 5.3.1] - - requests [required: >=2.18.0, installed: 2.28.2] - - certifi [required: >=2017.4.17, installed: 2020.6.20] - - charset-normalizer [required: >=2,<4, installed: 3.1.0] - - idna [required: >=2.5,<4, installed: 2.10] - - urllib3 [required: >=1.21.1,<1.27, installed: 1.25.11] - - rfc3986 [required: >=1.2.0, installed: 2.0.0] - - stevedore [required: >=1.20.0, installed: 4.1.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.utils [required: >=3.33.0, installed: 4.12.3] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - iso8601 [required: >=0.1.11, installed: 1.1.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - netifaces [required: >=0.10.4, installed: 0.11.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - packaging [required: >=20.4, installed: 23.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pyparsing [required: >=2.1.0, installed: 2.4.7] - - pytz [required: >=2013.6, installed: 2022.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - SQLAlchemy [required: >=1.4.0, installed: 1.4.0] - - greenlet [required: !=0.4.17, installed: 2.0.2] - - sqlalchemy-migrate [required: >=0.11.0, installed: 0.13.0] - - decorator [required: Any, installed: 5.1.1] - - pbr [required: >=1.8, installed: 5.5.1] - - six [required: >=1.7.0, installed: 1.15.0] - - SQLAlchemy [required: >=0.9.6, installed: 1.4.0] - - greenlet [required: !=0.4.17, installed: 2.0.2] - - sqlparse [required: Any, installed: 0.4.3] - - Tempita [required: >=0.4, installed: 0.5.2] - - stevedore [required: >=1.20.0, installed: 4.1.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - testresources [required: >=2.0.0, installed: 2.0.1] - - pbr [required: >=1.8, installed: 5.5.1] - - testscenarios [required: >=0.4, installed: 0.5.0] - - pbr [required: >=0.11, installed: 5.5.1] - - testtools [required: Any, installed: 2.5.0] - - extras [required: >=1.0.0, installed: 1.0.0] - - fixtures [required: >=1.3.0, installed: 3.0.0] - - pbr [required: >=0.11, installed: 5.5.1] - - six [required: Any, installed: 1.15.0] - - pbr [required: >=0.11, installed: 5.5.1] - - oslo.log [required: ==4.6.0, installed: 4.6.0] - - debtcollector [required: >=1.19.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - oslo.config [required: >=5.2.0, installed: 8.7.1] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - PyYAML [required: >=5.1, installed: 5.3.1] - - requests [required: >=2.18.0, installed: 2.28.2] - - certifi [required: >=2017.4.17, installed: 2020.6.20] - - charset-normalizer [required: >=2,<4, installed: 3.1.0] - - idna [required: >=2.5,<4, installed: 2.10] - - urllib3 [required: >=1.21.1,<1.27, installed: 1.25.11] - - rfc3986 [required: >=1.2.0, installed: 2.0.0] - - stevedore [required: >=1.20.0, installed: 4.1.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.context [required: >=2.20.0, installed: 4.1.0] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.i18n [required: >=3.20.0, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.serialization [required: >=2.25.0, installed: 4.2.0] - - msgpack [required: >=0.5.2, installed: 1.0.5] - - oslo.utils [required: >=3.33.0, installed: 4.12.3] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - iso8601 [required: >=0.1.11, installed: 1.1.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - netifaces [required: >=0.10.4, installed: 0.11.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - packaging [required: >=20.4, installed: 23.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pyparsing [required: >=2.1.0, installed: 2.4.7] - - pytz [required: >=2013.6, installed: 2022.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pytz [required: >=2013.6, installed: 2022.1] - - oslo.utils [required: >=3.36.0, installed: 4.12.3] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - iso8601 [required: >=0.1.11, installed: 1.1.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - netifaces [required: >=0.10.4, installed: 0.11.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - packaging [required: >=20.4, installed: 23.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pyparsing [required: >=2.1.0, installed: 2.4.7] - - pytz [required: >=2013.6, installed: 2022.1] - - pbr [required: >=3.1.1, installed: 5.5.1] - - pyinotify [required: >=0.9.6, installed: 0.9.6] - - python-dateutil [required: >=2.7.0, installed: 2.8.1] - - six [required: >=1.5, installed: 1.15.0] - - oslo.messaging [required: ==12.13.0, installed: 12.13.0] - - amqp [required: >=2.5.2, installed: 2.6.1] - - vine [required: >=1.1.3,<5.0.0a1, installed: 1.3.0] - - cachetools [required: >=2.0.0, installed: 5.3.0] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - futurist [required: >=1.2.0, installed: 2.4.1] - - kombu [required: >=4.6.6, installed: 4.6.11] - - amqp [required: >=2.6.0,<2.7, installed: 2.6.1] - - vine [required: >=1.1.3,<5.0.0a1, installed: 1.3.0] - - oslo.config [required: >=5.2.0, installed: 8.7.1] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - PyYAML [required: >=5.1, installed: 5.3.1] - - requests [required: >=2.18.0, installed: 2.28.2] - - certifi [required: >=2017.4.17, installed: 2020.6.20] - - charset-normalizer [required: >=2,<4, installed: 3.1.0] - - idna [required: >=2.5,<4, installed: 2.10] - - urllib3 [required: >=1.21.1,<1.27, installed: 1.25.11] - - rfc3986 [required: >=1.2.0, installed: 2.0.0] - - stevedore [required: >=1.20.0, installed: 4.1.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.log [required: >=3.36.0, installed: 4.6.0] - - debtcollector [required: >=1.19.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - oslo.config [required: >=5.2.0, installed: 8.7.1] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - PyYAML [required: >=5.1, installed: 5.3.1] - - requests [required: >=2.18.0, installed: 2.28.2] - - certifi [required: >=2017.4.17, installed: 2020.6.20] - - charset-normalizer [required: >=2,<4, installed: 3.1.0] - - idna [required: >=2.5,<4, installed: 2.10] - - urllib3 [required: >=1.21.1,<1.27, installed: 1.25.11] - - rfc3986 [required: >=1.2.0, installed: 2.0.0] - - stevedore [required: >=1.20.0, installed: 4.1.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.context [required: >=2.20.0, installed: 4.1.0] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.i18n [required: >=3.20.0, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.serialization [required: >=2.25.0, installed: 4.2.0] - - msgpack [required: >=0.5.2, installed: 1.0.5] - - oslo.utils [required: >=3.33.0, installed: 4.12.3] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - iso8601 [required: >=0.1.11, installed: 1.1.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - netifaces [required: >=0.10.4, installed: 0.11.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - packaging [required: >=20.4, installed: 23.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pyparsing [required: >=2.1.0, installed: 2.4.7] - - pytz [required: >=2013.6, installed: 2022.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pytz [required: >=2013.6, installed: 2022.1] - - oslo.utils [required: >=3.36.0, installed: 4.12.3] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - iso8601 [required: >=0.1.11, installed: 1.1.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - netifaces [required: >=0.10.4, installed: 0.11.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - packaging [required: >=20.4, installed: 23.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pyparsing [required: >=2.1.0, installed: 2.4.7] - - pytz [required: >=2013.6, installed: 2022.1] - - pbr [required: >=3.1.1, installed: 5.5.1] - - pyinotify [required: >=0.9.6, installed: 0.9.6] - - python-dateutil [required: >=2.7.0, installed: 2.8.1] - - six [required: >=1.5, installed: 1.15.0] - - oslo.metrics [required: >=0.2.1, installed: 0.6.0] - - oslo.config [required: >=6.9.0, installed: 8.7.1] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - PyYAML [required: >=5.1, installed: 5.3.1] - - requests [required: >=2.18.0, installed: 2.28.2] - - certifi [required: >=2017.4.17, installed: 2020.6.20] - - charset-normalizer [required: >=2,<4, installed: 3.1.0] - - idna [required: >=2.5,<4, installed: 2.10] - - urllib3 [required: >=1.21.1,<1.27, installed: 1.25.11] - - rfc3986 [required: >=1.2.0, installed: 2.0.0] - - stevedore [required: >=1.20.0, installed: 4.1.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.log [required: >=3.44.0, installed: 4.6.0] - - debtcollector [required: >=1.19.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - oslo.config [required: >=5.2.0, installed: 8.7.1] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - PyYAML [required: >=5.1, installed: 5.3.1] - - requests [required: >=2.18.0, installed: 2.28.2] - - certifi [required: >=2017.4.17, installed: 2020.6.20] - - charset-normalizer [required: >=2,<4, installed: 3.1.0] - - idna [required: >=2.5,<4, installed: 2.10] - - urllib3 [required: >=1.21.1,<1.27, installed: 1.25.11] - - rfc3986 [required: >=1.2.0, installed: 2.0.0] - - stevedore [required: >=1.20.0, installed: 4.1.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.context [required: >=2.20.0, installed: 4.1.0] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.i18n [required: >=3.20.0, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.serialization [required: >=2.25.0, installed: 4.2.0] - - msgpack [required: >=0.5.2, installed: 1.0.5] - - oslo.utils [required: >=3.33.0, installed: 4.12.3] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - iso8601 [required: >=0.1.11, installed: 1.1.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - netifaces [required: >=0.10.4, installed: 0.11.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - packaging [required: >=20.4, installed: 23.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pyparsing [required: >=2.1.0, installed: 2.4.7] - - pytz [required: >=2013.6, installed: 2022.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pytz [required: >=2013.6, installed: 2022.1] - - oslo.utils [required: >=3.36.0, installed: 4.12.3] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - iso8601 [required: >=0.1.11, installed: 1.1.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - netifaces [required: >=0.10.4, installed: 0.11.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - packaging [required: >=20.4, installed: 23.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pyparsing [required: >=2.1.0, installed: 2.4.7] - - pytz [required: >=2013.6, installed: 2022.1] - - pbr [required: >=3.1.1, installed: 5.5.1] - - pyinotify [required: >=0.9.6, installed: 0.9.6] - - python-dateutil [required: >=2.7.0, installed: 2.8.1] - - six [required: >=1.5, installed: 1.15.0] - - oslo.utils [required: >=3.41.0, installed: 4.12.3] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - iso8601 [required: >=0.1.11, installed: 1.1.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - netifaces [required: >=0.10.4, installed: 0.11.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - packaging [required: >=20.4, installed: 23.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pyparsing [required: >=2.1.0, installed: 2.4.7] - - pytz [required: >=2013.6, installed: 2022.1] - - pbr [required: >=3.1.1,!=2.1.0, installed: 5.5.1] - - prometheus-client [required: >=0.6.0, installed: 0.16.0] - - oslo.middleware [required: >=3.31.0, installed: 4.4.0] - - bcrypt [required: >=3.1.3, installed: 3.2.0] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - Jinja2 [required: >=2.10, installed: 3.0.3] - - oslo.config [required: >=5.2.0, installed: 8.7.1] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - PyYAML [required: >=5.1, installed: 5.3.1] - - requests [required: >=2.18.0, installed: 2.28.2] - - certifi [required: >=2017.4.17, installed: 2020.6.20] - - charset-normalizer [required: >=2,<4, installed: 3.1.0] - - idna [required: >=2.5,<4, installed: 2.10] - - urllib3 [required: >=1.21.1,<1.27, installed: 1.25.11] - - rfc3986 [required: >=1.2.0, installed: 2.0.0] - - stevedore [required: >=1.20.0, installed: 4.1.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.context [required: >=2.19.2, installed: 4.1.0] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.utils [required: >=3.33.0, installed: 4.12.3] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - iso8601 [required: >=0.1.11, installed: 1.1.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - netifaces [required: >=0.10.4, installed: 0.11.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - packaging [required: >=20.4, installed: 23.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pyparsing [required: >=2.1.0, installed: 2.4.7] - - pytz [required: >=2013.6, installed: 2022.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - statsd [required: >=3.2.1, installed: 4.0.1] - - stevedore [required: >=1.20.0, installed: 4.1.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - WebOb [required: >=1.8.0, installed: 1.8.7] - - oslo.serialization [required: >=2.18.0,!=2.19.1, installed: 4.2.0] - - msgpack [required: >=0.5.2, installed: 1.0.5] - - oslo.utils [required: >=3.33.0, installed: 4.12.3] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - iso8601 [required: >=0.1.11, installed: 1.1.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - netifaces [required: >=0.10.4, installed: 0.11.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - packaging [required: >=20.4, installed: 23.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pyparsing [required: >=2.1.0, installed: 2.4.7] - - pytz [required: >=2013.6, installed: 2022.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pytz [required: >=2013.6, installed: 2022.1] - - oslo.service [required: >=1.24.0,!=1.28.1, installed: 3.1.1] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - eventlet [required: >=0.25.2, installed: 0.33.3] - - dnspython [required: >=1.15.0, installed: 2.3.0] - - greenlet [required: >=0.3, installed: 2.0.2] - - six [required: >=1.10.0, installed: 1.15.0] - - fixtures [required: >=3.0.0, installed: 3.0.0] - - pbr [required: >=0.11, installed: 5.5.1] - - six [required: Any, installed: 1.15.0] - - testtools [required: >=0.9.22, installed: 2.5.0] - - extras [required: >=1.0.0, installed: 1.0.0] - - pbr [required: >=0.11, installed: 5.5.1] - - greenlet [required: >=0.4.15, installed: 2.0.2] - - oslo.concurrency [required: >=3.25.0, installed: 5.0.1] - - fasteners [required: >=0.7.0, installed: 0.18] - - oslo.config [required: >=5.2.0, installed: 8.7.1] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - PyYAML [required: >=5.1, installed: 5.3.1] - - requests [required: >=2.18.0, installed: 2.28.2] - - certifi [required: >=2017.4.17, installed: 2020.6.20] - - charset-normalizer [required: >=2,<4, installed: 3.1.0] - - idna [required: >=2.5,<4, installed: 2.10] - - urllib3 [required: >=1.21.1,<1.27, installed: 1.25.11] - - rfc3986 [required: >=1.2.0, installed: 2.0.0] - - stevedore [required: >=1.20.0, installed: 4.1.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.utils [required: >=3.33.0, installed: 4.12.3] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - iso8601 [required: >=0.1.11, installed: 1.1.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - netifaces [required: >=0.10.4, installed: 0.11.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - packaging [required: >=20.4, installed: 23.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pyparsing [required: >=2.1.0, installed: 2.4.7] - - pytz [required: >=2013.6, installed: 2022.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.config [required: >=5.1.0, installed: 8.7.1] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - PyYAML [required: >=5.1, installed: 5.3.1] - - requests [required: >=2.18.0, installed: 2.28.2] - - certifi [required: >=2017.4.17, installed: 2020.6.20] - - charset-normalizer [required: >=2,<4, installed: 3.1.0] - - idna [required: >=2.5,<4, installed: 2.10] - - urllib3 [required: >=1.21.1,<1.27, installed: 1.25.11] - - rfc3986 [required: >=1.2.0, installed: 2.0.0] - - stevedore [required: >=1.20.0, installed: 4.1.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.log [required: >=3.36.0, installed: 4.6.0] - - debtcollector [required: >=1.19.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - oslo.config [required: >=5.2.0, installed: 8.7.1] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - PyYAML [required: >=5.1, installed: 5.3.1] - - requests [required: >=2.18.0, installed: 2.28.2] - - certifi [required: >=2017.4.17, installed: 2020.6.20] - - charset-normalizer [required: >=2,<4, installed: 3.1.0] - - idna [required: >=2.5,<4, installed: 2.10] - - urllib3 [required: >=1.21.1,<1.27, installed: 1.25.11] - - rfc3986 [required: >=1.2.0, installed: 2.0.0] - - stevedore [required: >=1.20.0, installed: 4.1.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.context [required: >=2.20.0, installed: 4.1.0] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.i18n [required: >=3.20.0, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.serialization [required: >=2.25.0, installed: 4.2.0] - - msgpack [required: >=0.5.2, installed: 1.0.5] - - oslo.utils [required: >=3.33.0, installed: 4.12.3] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - iso8601 [required: >=0.1.11, installed: 1.1.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - netifaces [required: >=0.10.4, installed: 0.11.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - packaging [required: >=20.4, installed: 23.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pyparsing [required: >=2.1.0, installed: 2.4.7] - - pytz [required: >=2013.6, installed: 2022.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pytz [required: >=2013.6, installed: 2022.1] - - oslo.utils [required: >=3.36.0, installed: 4.12.3] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - iso8601 [required: >=0.1.11, installed: 1.1.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - netifaces [required: >=0.10.4, installed: 0.11.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - packaging [required: >=20.4, installed: 23.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pyparsing [required: >=2.1.0, installed: 2.4.7] - - pytz [required: >=2013.6, installed: 2022.1] - - pbr [required: >=3.1.1, installed: 5.5.1] - - pyinotify [required: >=0.9.6, installed: 0.9.6] - - python-dateutil [required: >=2.7.0, installed: 2.8.1] - - six [required: >=1.5, installed: 1.15.0] - - oslo.utils [required: >=3.40.2, installed: 4.12.3] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - iso8601 [required: >=0.1.11, installed: 1.1.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - netifaces [required: >=0.10.4, installed: 0.11.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - packaging [required: >=20.4, installed: 23.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pyparsing [required: >=2.1.0, installed: 2.4.7] - - pytz [required: >=2013.6, installed: 2022.1] - - Paste [required: >=2.0.2, installed: 3.5.0] - - setuptools [required: Any, installed: 59.6.0] - - six [required: >=1.4.0, installed: 1.15.0] - - PasteDeploy [required: >=1.5.0, installed: 3.0.1] - - Routes [required: >=2.3.1, installed: 2.5.1] - - repoze.lru [required: >=0.3, installed: 0.7] - - six [required: Any, installed: 1.15.0] - - WebOb [required: >=1.7.1, installed: 1.8.7] - - Yappi [required: >=1.0, installed: 1.4.0] - - oslo.utils [required: >=3.37.0, installed: 4.12.3] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - iso8601 [required: >=0.1.11, installed: 1.1.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - netifaces [required: >=0.10.4, installed: 0.11.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - packaging [required: >=20.4, installed: 23.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pyparsing [required: >=2.1.0, installed: 2.4.7] - - pytz [required: >=2013.6, installed: 2022.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - PyYAML [required: >=3.13, installed: 5.3.1] - - stevedore [required: >=1.20.0, installed: 4.1.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - WebOb [required: >=1.7.1, installed: 1.8.7] - - oslo.middleware [required: ==4.4.0, installed: 4.4.0] - - bcrypt [required: >=3.1.3, installed: 3.2.0] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - Jinja2 [required: >=2.10, installed: 3.0.3] - - oslo.config [required: >=5.2.0, installed: 8.7.1] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - PyYAML [required: >=5.1, installed: 5.3.1] - - requests [required: >=2.18.0, installed: 2.28.2] - - certifi [required: >=2017.4.17, installed: 2020.6.20] - - charset-normalizer [required: >=2,<4, installed: 3.1.0] - - idna [required: >=2.5,<4, installed: 2.10] - - urllib3 [required: >=1.21.1,<1.27, installed: 1.25.11] - - rfc3986 [required: >=1.2.0, installed: 2.0.0] - - stevedore [required: >=1.20.0, installed: 4.1.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.context [required: >=2.19.2, installed: 4.1.0] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.utils [required: >=3.33.0, installed: 4.12.3] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - iso8601 [required: >=0.1.11, installed: 1.1.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - netifaces [required: >=0.10.4, installed: 0.11.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - packaging [required: >=20.4, installed: 23.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pyparsing [required: >=2.1.0, installed: 2.4.7] - - pytz [required: >=2013.6, installed: 2022.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - statsd [required: >=3.2.1, installed: 4.0.1] - - stevedore [required: >=1.20.0, installed: 4.1.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - WebOb [required: >=1.8.0, installed: 1.8.7] - - oslo.policy [required: ==3.10.1, installed: 3.10.1] - - oslo.config [required: >=6.0.0, installed: 8.7.1] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - PyYAML [required: >=5.1, installed: 5.3.1] - - requests [required: >=2.18.0, installed: 2.28.2] - - certifi [required: >=2017.4.17, installed: 2020.6.20] - - charset-normalizer [required: >=2,<4, installed: 3.1.0] - - idna [required: >=2.5,<4, installed: 2.10] - - urllib3 [required: >=1.21.1,<1.27, installed: 1.25.11] - - rfc3986 [required: >=1.2.0, installed: 2.0.0] - - stevedore [required: >=1.20.0, installed: 4.1.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.context [required: >=2.22.0, installed: 4.1.0] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.serialization [required: >=2.18.0,!=2.19.1, installed: 4.2.0] - - msgpack [required: >=0.5.2, installed: 1.0.5] - - oslo.utils [required: >=3.33.0, installed: 4.12.3] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - iso8601 [required: >=0.1.11, installed: 1.1.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - netifaces [required: >=0.10.4, installed: 0.11.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - packaging [required: >=20.4, installed: 23.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pyparsing [required: >=2.1.0, installed: 2.4.7] - - pytz [required: >=2013.6, installed: 2022.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pytz [required: >=2013.6, installed: 2022.1] - - oslo.utils [required: >=3.40.0, installed: 4.12.3] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - iso8601 [required: >=0.1.11, installed: 1.1.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - netifaces [required: >=0.10.4, installed: 0.11.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - packaging [required: >=20.4, installed: 23.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pyparsing [required: >=2.1.0, installed: 2.4.7] - - pytz [required: >=2013.6, installed: 2022.1] - - PyYAML [required: >=5.1, installed: 5.3.1] - - requests [required: >=2.14.2, installed: 2.28.2] - - certifi [required: >=2017.4.17, installed: 2020.6.20] - - charset-normalizer [required: >=2,<4, installed: 3.1.0] - - idna [required: >=2.5,<4, installed: 2.10] - - urllib3 [required: >=1.21.1,<1.27, installed: 1.25.11] - - stevedore [required: >=1.20.0, installed: 4.1.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.serialization [required: ==4.2.0, installed: 4.2.0] - - msgpack [required: >=0.5.2, installed: 1.0.5] - - oslo.utils [required: >=3.33.0, installed: 4.12.3] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - iso8601 [required: >=0.1.11, installed: 1.1.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - netifaces [required: >=0.10.4, installed: 0.11.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - packaging [required: >=20.4, installed: 23.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pyparsing [required: >=2.1.0, installed: 2.4.7] - - pytz [required: >=2013.6, installed: 2022.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pytz [required: >=2013.6, installed: 2022.1] - - oslo.utils [required: ==4.12.3, installed: 4.12.3] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - iso8601 [required: >=0.1.11, installed: 1.1.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - netifaces [required: >=0.10.4, installed: 0.11.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - packaging [required: >=20.4, installed: 23.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pyparsing [required: >=2.1.0, installed: 2.4.7] - - pytz [required: >=2013.6, installed: 2022.1] - - Paste [required: ==3.5.0, installed: 3.5.0] - - setuptools [required: Any, installed: 59.6.0] - - six [required: >=1.4.0, installed: 1.15.0] - - PasteDeploy [required: ==3.0.1, installed: 3.0.1] - - pbr [required: ==5.5.1, installed: 5.5.1] - - psycopg2-binary [required: ==2.8.6, installed: 2.8.6] - - pylibyaml [required: ==0.1.0, installed: 0.1.0] - - python-barbicanclient [required: ==5.2.0, installed: 5.2.0] - - cliff [required: >=2.8.0,!=2.9.0, installed: 4.2.0] - - autopage [required: >=0.4.0, installed: 0.5.1] - - cmd2 [required: >=1.0.0, installed: 2.4.3] - - attrs [required: >=16.3.0, installed: 21.2.0] - - pyperclip [required: >=1.6, installed: 1.8.2] - - wcwidth [required: >=0.1.7, installed: 0.2.6] - - importlib-metadata [required: >=4.4, installed: 4.6.4] - - PrettyTable [required: >=0.7.2, installed: 3.6.0] - - wcwidth [required: Any, installed: 0.2.6] - - PyYAML [required: >=3.12, installed: 5.3.1] - - stevedore [required: >=2.0.1, installed: 4.1.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - keystoneauth1 [required: >=3.4.0, installed: 5.1.1] - - iso8601 [required: >=0.1.11, installed: 1.1.0] - - os-service-types [required: >=1.2.0, installed: 1.7.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - requests [required: >=2.14.2, installed: 2.28.2] - - certifi [required: >=2017.4.17, installed: 2020.6.20] - - charset-normalizer [required: >=2,<4, installed: 3.1.0] - - idna [required: >=2.5,<4, installed: 2.10] - - urllib3 [required: >=1.21.1,<1.27, installed: 1.25.11] - - six [required: >=1.10.0, installed: 1.15.0] - - stevedore [required: >=1.20.0, installed: 4.1.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.serialization [required: >=2.18.0,!=2.19.1, installed: 4.2.0] - - msgpack [required: >=0.5.2, installed: 1.0.5] - - oslo.utils [required: >=3.33.0, installed: 4.12.3] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - iso8601 [required: >=0.1.11, installed: 1.1.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - netifaces [required: >=0.10.4, installed: 0.11.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - packaging [required: >=20.4, installed: 23.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pyparsing [required: >=2.1.0, installed: 2.4.7] - - pytz [required: >=2013.6, installed: 2022.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pytz [required: >=2013.6, installed: 2022.1] - - oslo.utils [required: >=3.33.0, installed: 4.12.3] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - iso8601 [required: >=0.1.11, installed: 1.1.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - netifaces [required: >=0.10.4, installed: 0.11.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - packaging [required: >=20.4, installed: 23.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pyparsing [required: >=2.1.0, installed: 2.4.7] - - pytz [required: >=2013.6, installed: 2022.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - requests [required: >=2.14.2, installed: 2.28.2] - - certifi [required: >=2017.4.17, installed: 2020.6.20] - - charset-normalizer [required: >=2,<4, installed: 3.1.0] - - idna [required: >=2.5,<4, installed: 2.10] - - urllib3 [required: >=1.21.1,<1.27, installed: 1.25.11] - - six [required: >=1.10.0, installed: 1.15.0] - - python-dateutil [required: ==2.8.1, installed: 2.8.1] - - six [required: >=1.5, installed: 1.15.0] - - python-keystoneclient [required: ==3.22.0, installed: 3.22.0] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - keystoneauth1 [required: >=3.4.0, installed: 5.1.1] - - iso8601 [required: >=0.1.11, installed: 1.1.0] - - os-service-types [required: >=1.2.0, installed: 1.7.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - requests [required: >=2.14.2, installed: 2.28.2] - - certifi [required: >=2017.4.17, installed: 2020.6.20] - - charset-normalizer [required: >=2,<4, installed: 3.1.0] - - idna [required: >=2.5,<4, installed: 2.10] - - urllib3 [required: >=1.21.1,<1.27, installed: 1.25.11] - - six [required: >=1.10.0, installed: 1.15.0] - - stevedore [required: >=1.20.0, installed: 4.1.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.config [required: >=5.2.0, installed: 8.7.1] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - PyYAML [required: >=5.1, installed: 5.3.1] - - requests [required: >=2.18.0, installed: 2.28.2] - - certifi [required: >=2017.4.17, installed: 2020.6.20] - - charset-normalizer [required: >=2,<4, installed: 3.1.0] - - idna [required: >=2.5,<4, installed: 2.10] - - urllib3 [required: >=1.21.1,<1.27, installed: 1.25.11] - - rfc3986 [required: >=1.2.0, installed: 2.0.0] - - stevedore [required: >=1.20.0, installed: 4.1.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - oslo.serialization [required: >=2.18.0,!=2.19.1, installed: 4.2.0] - - msgpack [required: >=0.5.2, installed: 1.0.5] - - oslo.utils [required: >=3.33.0, installed: 4.12.3] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - iso8601 [required: >=0.1.11, installed: 1.1.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - netifaces [required: >=0.10.4, installed: 0.11.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - packaging [required: >=20.4, installed: 23.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pyparsing [required: >=2.1.0, installed: 2.4.7] - - pytz [required: >=2013.6, installed: 2022.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pytz [required: >=2013.6, installed: 2022.1] - - oslo.utils [required: >=3.33.0, installed: 4.12.3] - - debtcollector [required: >=1.2.0, installed: 2.5.0] - - wrapt [required: >=1.7.0, installed: 1.15.0] - - iso8601 [required: >=0.1.11, installed: 1.1.0] - - netaddr [required: >=0.7.18, installed: 0.8.0] - - netifaces [required: >=0.10.4, installed: 0.11.0] - - oslo.i18n [required: >=3.15.3, installed: 6.0.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - packaging [required: >=20.4, installed: 23.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pyparsing [required: >=2.1.0, installed: 2.4.7] - - pytz [required: >=2013.6, installed: 2022.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - requests [required: >=2.14.2, installed: 2.28.2] - - certifi [required: >=2017.4.17, installed: 2020.6.20] - - charset-normalizer [required: >=2,<4, installed: 3.1.0] - - idna [required: >=2.5,<4, installed: 2.10] - - urllib3 [required: >=1.21.1,<1.27, installed: 1.25.11] - - six [required: >=1.10.0, installed: 1.15.0] - - stevedore [required: >=1.20.0, installed: 4.1.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - python-memcached [required: ==1.59, installed: 1.59] - - six [required: >=1.4.0, installed: 1.15.0] - - PyYAML [required: ==5.4.1, installed: 5.3.1] - - Routes [required: ==2.5.1, installed: 2.5.1] - - repoze.lru [required: >=0.3, installed: 0.7] - - six [required: Any, installed: 1.15.0] - - six [required: ==1.15.0, installed: 1.15.0] - - sphinx-rtd-theme [required: ==0.5.0, installed: 0.5.0] - - sphinx [required: Any, installed: 6.1.3] - - alabaster [required: >=0.7,<0.8, installed: 0.7.13] - - babel [required: >=2.9, installed: 2.12.1] - - docutils [required: >=0.18,<0.20, installed: 0.19] - - imagesize [required: >=1.3, installed: 1.4.1] - - Jinja2 [required: >=3.0, installed: 3.0.3] - - packaging [required: >=21.0, installed: 23.0] - - Pygments [required: >=2.13, installed: 2.14.0] - - requests [required: >=2.25.0, installed: 2.28.2] - - certifi [required: >=2017.4.17, installed: 2020.6.20] - - charset-normalizer [required: >=2,<4, installed: 3.1.0] - - idna [required: >=2.5,<4, installed: 2.10] - - urllib3 [required: >=1.21.1,<1.27, installed: 1.25.11] - - snowballstemmer [required: >=2.0, installed: 2.2.0] - - sphinxcontrib-applehelp [required: Any, installed: 1.0.4] - - sphinxcontrib-devhelp [required: Any, installed: 1.0.2] - - sphinxcontrib-htmlhelp [required: >=2.0.0, installed: 2.0.1] - - sphinxcontrib-jsmath [required: Any, installed: 1.0.1] - - sphinxcontrib-qthelp [required: Any, installed: 1.0.3] - - sphinxcontrib-serializinghtml [required: >=1.1.5, installed: 1.1.5] - - SQLAlchemy [required: ==1.4.0, installed: 1.4.0] - - greenlet [required: !=0.4.17, installed: 2.0.2] - - stestr [required: ==3.2.0, installed: 3.2.0] - - cliff [required: >=2.8.0, installed: 4.2.0] - - autopage [required: >=0.4.0, installed: 0.5.1] - - cmd2 [required: >=1.0.0, installed: 2.4.3] - - attrs [required: >=16.3.0, installed: 21.2.0] - - pyperclip [required: >=1.6, installed: 1.8.2] - - wcwidth [required: >=0.1.7, installed: 0.2.6] - - importlib-metadata [required: >=4.4, installed: 4.6.4] - - PrettyTable [required: >=0.7.2, installed: 3.6.0] - - wcwidth [required: Any, installed: 0.2.6] - - PyYAML [required: >=3.12, installed: 5.3.1] - - stevedore [required: >=2.0.1, installed: 4.1.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - fixtures [required: >=3.0.0, installed: 3.0.0] - - pbr [required: >=0.11, installed: 5.5.1] - - six [required: Any, installed: 1.15.0] - - testtools [required: >=0.9.22, installed: 2.5.0] - - extras [required: >=1.0.0, installed: 1.0.0] - - pbr [required: >=0.11, installed: 5.5.1] - - future [required: Any, installed: 0.18.3] - - pbr [required: >=2.0.0,!=4.0.3,!=4.0.2,!=4.0.1,!=4.0.0,!=2.1.0, installed: 5.5.1] - - python-subunit [required: >=1.4.0, installed: 1.4.0] - - extras [required: Any, installed: 1.0.0] - - testtools [required: >=0.9.34, installed: 2.5.0] - - extras [required: >=1.0.0, installed: 1.0.0] - - fixtures [required: >=1.3.0, installed: 3.0.0] - - pbr [required: >=0.11, installed: 5.5.1] - - six [required: Any, installed: 1.15.0] - - pbr [required: >=0.11, installed: 5.5.1] - - PyYAML [required: >=3.10.0, installed: 5.3.1] - - testtools [required: >=2.2.0, installed: 2.5.0] - - extras [required: >=1.0.0, installed: 1.0.0] - - fixtures [required: >=1.3.0, installed: 3.0.0] - - pbr [required: >=0.11, installed: 5.5.1] - - six [required: Any, installed: 1.15.0] - - pbr [required: >=0.11, installed: 5.5.1] - - voluptuous [required: >=0.8.9, installed: 0.13.1] - - stevedore [required: ==4.1.1, installed: 4.1.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - urllib3 [required: ==1.25.11, installed: 1.25.11] - - uWSGI [required: ==2.0.21, installed: 2.0.21] - - Werkzeug [required: ==0.16.1, installed: 0.16.1] -distro==1.7.0 -distro-info==1.1build1 -gabbi==1.35.1 - - colorama [required: Any, installed: 0.4.6] - - jsonpath-rw-ext [required: >=1.0.0, installed: 1.2.2] - - jsonpath-rw [required: >=1.2.0, installed: 1.4.0] - - decorator [required: Any, installed: 5.1.1] - - ply [required: Any, installed: 3.11] - - six [required: Any, installed: 1.15.0] - - pbr [required: >=1.8, installed: 5.5.1] - - pbr [required: Any, installed: 5.5.1] - - pytest [required: Any, installed: 7.2.2] - - attrs [required: >=19.2.0, installed: 21.2.0] - - exceptiongroup [required: >=1.0.0rc8, installed: 1.1.1] - - iniconfig [required: Any, installed: 2.0.0] - - packaging [required: Any, installed: 23.0] - - pluggy [required: >=0.12,<2.0, installed: 1.0.0] - - tomli [required: >=1.0.0, installed: 2.0.1] - - PyYAML [required: Any, installed: 5.3.1] - - six [required: Any, installed: 1.15.0] - - testtools [required: Any, installed: 2.5.0] - - extras [required: >=1.0.0, installed: 1.0.0] - - fixtures [required: >=1.3.0, installed: 3.0.0] - - pbr [required: >=0.11, installed: 5.5.1] - - six [required: Any, installed: 1.15.0] - - pbr [required: >=0.11, installed: 5.5.1] - - urllib3 [required: >=1.11.0, installed: 1.25.11] - - wsgi-intercept [required: >=1.2.2, installed: 1.11.0] - - six [required: Any, installed: 1.15.0] -git-review==2.2.0 -hyperlink==21.0.0 -incremental==21.3.0 -jeepney==0.7.1 -jsonpointer==2.0 -launchpadlib==1.10.16 - - httplib2 [required: Any, installed: 0.20.2] - - pyparsing [required: >=2.4.2,<4,!=3.0.3,!=3.0.2,!=3.0.1,!=3.0.0, installed: 2.4.7] - - keyring [required: Any, installed: 23.5.0] - - lazr.restfulclient [required: >=0.9.19, installed: 0.14.4] - - httplib2 [required: >=0.7.7, installed: 0.20.2] - - pyparsing [required: >=2.4.2,<4,!=3.0.3,!=3.0.2,!=3.0.1,!=3.0.0, installed: 2.4.7] - - lazr.uri [required: Any, installed: 1.0.6] - - six [required: Any, installed: 1.15.0] -libvirt-python==8.0.0 -more-itertools==8.10.0 -oauthlib==3.2.0 -openstacksdk==0.59.0 - - appdirs [required: >=1.3.0, installed: 1.4.4] - - cryptography [required: >=2.7, installed: 3.2.1] - - cffi [required: >=1.8,!=1.11.3, installed: 1.15.1] - - pycparser [required: Any, installed: 2.21] - - six [required: >=1.4.1, installed: 1.15.0] - - decorator [required: >=4.4.1, installed: 5.1.1] - - dogpile.cache [required: >=0.6.5, installed: 1.1.8] - - decorator [required: >=4.0.0, installed: 5.1.1] - - stevedore [required: >=3.0.0, installed: 4.1.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - iso8601 [required: >=0.1.11, installed: 1.1.0] - - jmespath [required: >=0.9.0, installed: 1.0.1] - - jsonpatch [required: >=1.16,!=1.20, installed: 1.32] - - keystoneauth1 [required: >=3.18.0, installed: 5.1.1] - - iso8601 [required: >=0.1.11, installed: 1.1.0] - - os-service-types [required: >=1.2.0, installed: 1.7.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - requests [required: >=2.14.2, installed: 2.28.2] - - certifi [required: >=2017.4.17, installed: 2020.6.20] - - charset-normalizer [required: >=2,<4, installed: 3.1.0] - - idna [required: >=2.5,<4, installed: 2.10] - - urllib3 [required: >=1.21.1,<1.27, installed: 1.25.11] - - six [required: >=1.10.0, installed: 1.15.0] - - stevedore [required: >=1.20.0, installed: 4.1.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - munch [required: >=2.1.0, installed: 2.5.0] - - six [required: Any, installed: 1.15.0] - - netifaces [required: >=0.10.4, installed: 0.11.0] - - os-service-types [required: >=1.7.0, installed: 1.7.0] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - PyYAML [required: >=3.13, installed: 5.3.1] - - requestsexceptions [required: >=1.2.0, installed: 1.4.0] -os-testr==2.0.1 - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - python-subunit [required: >=1.0.0, installed: 1.4.0] - - extras [required: Any, installed: 1.0.0] - - testtools [required: >=0.9.34, installed: 2.5.0] - - extras [required: >=1.0.0, installed: 1.0.0] - - fixtures [required: >=1.3.0, installed: 3.0.0] - - pbr [required: >=0.11, installed: 5.5.1] - - six [required: Any, installed: 1.15.0] - - pbr [required: >=0.11, installed: 5.5.1] - - stestr [required: >=1.0.0, installed: 3.2.0] - - cliff [required: >=2.8.0, installed: 4.2.0] - - autopage [required: >=0.4.0, installed: 0.5.1] - - cmd2 [required: >=1.0.0, installed: 2.4.3] - - attrs [required: >=16.3.0, installed: 21.2.0] - - pyperclip [required: >=1.6, installed: 1.8.2] - - wcwidth [required: >=0.1.7, installed: 0.2.6] - - importlib-metadata [required: >=4.4, installed: 4.6.4] - - PrettyTable [required: >=0.7.2, installed: 3.6.0] - - wcwidth [required: Any, installed: 0.2.6] - - PyYAML [required: >=3.12, installed: 5.3.1] - - stevedore [required: >=2.0.1, installed: 4.1.1] - - pbr [required: >=2.0.0,!=2.1.0, installed: 5.5.1] - - fixtures [required: >=3.0.0, installed: 3.0.0] - - pbr [required: >=0.11, installed: 5.5.1] - - six [required: Any, installed: 1.15.0] - - testtools [required: >=0.9.22, installed: 2.5.0] - - extras [required: >=1.0.0, installed: 1.0.0] - - pbr [required: >=0.11, installed: 5.5.1] - - future [required: Any, installed: 0.18.3] - - pbr [required: >=2.0.0,!=4.0.3,!=4.0.2,!=4.0.1,!=4.0.0,!=2.1.0, installed: 5.5.1] - - python-subunit [required: >=1.4.0, installed: 1.4.0] - - extras [required: Any, installed: 1.0.0] - - testtools [required: >=0.9.34, installed: 2.5.0] - - extras [required: >=1.0.0, installed: 1.0.0] - - fixtures [required: >=1.3.0, installed: 3.0.0] - - pbr [required: >=0.11, installed: 5.5.1] - - six [required: Any, installed: 1.15.0] - - pbr [required: >=0.11, installed: 5.5.1] - - PyYAML [required: >=3.10.0, installed: 5.3.1] - - testtools [required: >=2.2.0, installed: 2.5.0] - - extras [required: >=1.0.0, installed: 1.0.0] - - fixtures [required: >=1.3.0, installed: 3.0.0] - - pbr [required: >=0.11, installed: 5.5.1] - - six [required: Any, installed: 1.15.0] - - pbr [required: >=0.11, installed: 5.5.1] - - voluptuous [required: >=0.8.9, installed: 0.13.1] - - testtools [required: >=2.2.0, installed: 2.5.0] - - extras [required: >=1.0.0, installed: 1.0.0] - - fixtures [required: >=1.3.0, installed: 3.0.0] - - pbr [required: >=0.11, installed: 5.5.1] - - six [required: Any, installed: 1.15.0] - - pbr [required: >=0.11, installed: 5.5.1] -oslotest==4.5.0 - - fixtures [required: >=3.0.0, installed: 3.0.0] - - pbr [required: >=0.11, installed: 5.5.1] - - six [required: Any, installed: 1.15.0] - - testtools [required: >=0.9.22, installed: 2.5.0] - - extras [required: >=1.0.0, installed: 1.0.0] - - pbr [required: >=0.11, installed: 5.5.1] - - python-subunit [required: >=1.0.0, installed: 1.4.0] - - extras [required: Any, installed: 1.0.0] - - testtools [required: >=0.9.34, installed: 2.5.0] - - extras [required: >=1.0.0, installed: 1.0.0] - - fixtures [required: >=1.3.0, installed: 3.0.0] - - pbr [required: >=0.11, installed: 5.5.1] - - six [required: Any, installed: 1.15.0] - - pbr [required: >=0.11, installed: 5.5.1] - - six [required: >=1.10.0, installed: 1.15.0] - - testtools [required: >=2.2.0, installed: 2.5.0] - - extras [required: >=1.0.0, installed: 1.0.0] - - fixtures [required: >=1.3.0, installed: 3.0.0] - - pbr [required: >=0.11, installed: 5.5.1] - - six [required: Any, installed: 1.15.0] - - pbr [required: >=0.11, installed: 5.5.1] -pexpect==4.8.0 -pifpaf==3.1.5 - - click [required: Any, installed: 8.0.3] - - daiquiri [required: Any, installed: 3.2.1] - - python-json-logger [required: Any, installed: 2.0.7] - - fixtures [required: Any, installed: 3.0.0] - - pbr [required: >=0.11, installed: 5.5.1] - - six [required: Any, installed: 1.15.0] - - testtools [required: >=0.9.22, installed: 2.5.0] - - extras [required: >=1.0.0, installed: 1.0.0] - - pbr [required: >=0.11, installed: 5.5.1] - - jinja2 [required: Any, installed: 3.0.3] - - pbr [required: Any, installed: 5.5.1] - - psutil [required: Any, installed: 5.9.4] - - xattr [required: Any, installed: 0.10.1] - - cffi [required: >=1.0, installed: 1.15.1] - - pycparser [required: Any, installed: 2.21] -pip==22.0.2 -pipdeptree==2.7.0 -pipreqs==0.4.11 - - docopt [required: Any, installed: 0.6.2] - - yarg [required: Any, installed: 0.1.9] - - requests [required: Any, installed: 2.28.2] - - certifi [required: >=2017.4.17, installed: 2020.6.20] - - charset-normalizer [required: >=2,<4, installed: 3.1.0] - - idna [required: >=2.5,<4, installed: 2.10] - - urllib3 [required: >=1.21.1,<1.27, installed: 1.25.11] -ptyprocess==0.7.0 -py==1.10.0 -pyasn1==0.4.8 -pyasn1-modules==0.2.1 -PyGObject==3.42.1 - - pycairo [required: >=1.16.0, installed: 1.20.1] -PyHamcrest==2.0.2 -PyJWT==2.3.0 -pyOpenSSL==21.0.0 -pyrsistent==0.18.1 -pyserial==3.5 -python-apt==2.4.0+ubuntu1 -python-debian==0.1.43ubuntu1 -python-magic==0.4.24 -SecretStorage==3.3.1 -service-identity==18.1.0 -sos==4.4 -ssh-import-id==5.11 -systemd-python==234 -testrepository==0.0.20 - - fixtures [required: Any, installed: 3.0.0] - - pbr [required: >=0.11, installed: 5.5.1] - - six [required: Any, installed: 1.15.0] - - testtools [required: >=0.9.22, installed: 2.5.0] - - extras [required: >=1.0.0, installed: 1.0.0] - - pbr [required: >=0.11, installed: 5.5.1] - - python-subunit [required: >=0.0.18, installed: 1.4.0] - - extras [required: Any, installed: 1.0.0] - - testtools [required: >=0.9.34, installed: 2.5.0] - - extras [required: >=1.0.0, installed: 1.0.0] - - fixtures [required: >=1.3.0, installed: 3.0.0] - - pbr [required: >=0.11, installed: 5.5.1] - - six [required: Any, installed: 1.15.0] - - pbr [required: >=0.11, installed: 5.5.1] - - testtools [required: >=0.9.30, installed: 2.5.0] - - extras [required: >=1.0.0, installed: 1.0.0] - - fixtures [required: >=1.3.0, installed: 3.0.0] - - pbr [required: >=0.11, installed: 5.5.1] - - six [required: Any, installed: 1.15.0] - - pbr [required: >=0.11, installed: 5.5.1] -tox==4.4.11 - - cachetools [required: >=5.3, installed: 5.3.0] - - chardet [required: >=5.1, installed: 5.1.0] - - colorama [required: >=0.4.6, installed: 0.4.6] - - filelock [required: >=3.10.7, installed: 3.11.0] - - packaging [required: >=23, installed: 23.0] - - platformdirs [required: >=3.2, installed: 3.2.0] - - pluggy [required: >=1, installed: 1.0.0] - - pyproject-api [required: >=1.5.1, installed: 1.5.1] - - packaging [required: >=23, installed: 23.0] - - tomli [required: >=2.0.1, installed: 2.0.1] - - tomli [required: >=2.0.1, installed: 2.0.1] - - virtualenv [required: >=20.21, installed: 20.21.0] - - distlib [required: >=0.3.6,<1, installed: 0.3.6] - - filelock [required: >=3.4.1,<4, installed: 3.11.0] - - platformdirs [required: >=2.4,<4, installed: 3.2.0] -Twisted==22.1.0 -ubuntu-advantage-tools==8001 -ufw==0.36.1 -unattended-upgrades==0.1 -wadllib==1.3.6 -wheel==0.37.1 -yq==3.1.0 - - argcomplete [required: >=1.8.1, installed: 3.0.4] - - PyYAML [required: >=5.3.1, installed: 5.3.1] - - toml [required: >=0.10.0, installed: 0.10.2] - - xmltodict [required: >=0.11.0, installed: 0.13.0] -zipp==1.0.0 -zope.interface==5.4.0 diff --git a/python/requirements.txt b/python/requirements.txt new file mode 100644 index 00000000..5d4c01fe --- /dev/null +++ b/python/requirements.txt @@ -0,0 +1,3 @@ +# Warning: This file should be empty. +# Specify direct dependencies in requirements-direct.txt instead. +-r requirements-direct.txt diff --git a/python/setup.py b/python/setup.py index 16a894cc..fc93a180 100644 --- a/python/setup.py +++ b/python/setup.py @@ -36,7 +36,6 @@ setup( 'drydock_provisioner = drydock_provisioner.config:list_opts', 'oslo.policy.policies': 'drydock_provisioner = drydock_provisioner.policy:list_policies', - 'console_scripts': - 'drydock = drydock_provisioner.cli.commands:drydock' + 'console_scripts': 'drydock = drydock_provisioner.cli.commands:drydock' }, ) diff --git a/python/requirements-test.txt b/python/test-requirements.txt similarity index 86% rename from python/requirements-test.txt rename to python/test-requirements.txt index 39c828d0..0ea77b9e 100644 --- a/python/requirements-test.txt +++ b/python/test-requirements.txt @@ -5,8 +5,8 @@ jsonschema==3.2.0 mock==5.0.1 pylibyaml==0.1.0 pymongo==3.10.1 -pytest==6.2.5 -pytest-cov +pytest >= 3.0 +pytest-cov==4.0.0 pytest-mock responses==0.23.1 setuptools==56.0.0 diff --git a/python/tests/conftest.py b/python/tests/conftest.py index 4cda5a0e..29b44f4a 100644 --- a/python/tests/conftest.py +++ b/python/tests/conftest.py @@ -45,15 +45,15 @@ def yaml_ingester(): @pytest.fixture() def deckhand_orchestrator(drydock_state, deckhand_ingester): - orchestrator = Orchestrator( - state_manager=drydock_state, ingester=deckhand_ingester) + orchestrator = Orchestrator(state_manager=drydock_state, + ingester=deckhand_ingester) return orchestrator @pytest.fixture() def yaml_orchestrator(drydock_state, yaml_ingester): - orchestrator = Orchestrator( - state_manager=drydock_state, ingester=yaml_ingester) + orchestrator = Orchestrator(state_manager=drydock_state, + ingester=yaml_ingester) return orchestrator @@ -101,8 +101,8 @@ def setup(setup_logging): group="database", override="postgresql+psycopg2://drydock:drydock@localhost:5432/drydock" ) - config.config_mgr.conf.set_override( - name="leader_grace_period", override=15) + config.config_mgr.conf.set_override(name="leader_grace_period", + override=15) config.config_mgr.conf.set_override(name="poll_interval", override=3) return @@ -124,8 +124,7 @@ def setup_logging(): logger.propagate = False formatter = logging.Formatter( "%(asctime)s - %(levelname)s - %(user)s - %(req_id)s" - " - %(external_ctx)s - %(end_user)s - %(message)s" - ) + " - %(external_ctx)s - %(end_user)s - %(message)s") ch = logging.StreamHandler() ch.setFormatter(formatter) @@ -134,13 +133,13 @@ def setup_logging(): @pytest.fixture(scope='module') def mock_get_build_data(drydock_state): + def side_effect(**kwargs): - build_data = objects.BuildData( - node_name="test", - task_id="tid", - generator="lshw", - data_format="text/plain", - data_element="") + build_data = objects.BuildData(node_name="test", + task_id="tid", + generator="lshw", + data_format="text/plain", + data_element="") return [build_data] drydock_state.real_get_build_data = drydock_state.get_build_data diff --git a/python/tests/integration/postgres/test_action_config_node_prov.py b/python/tests/integration/postgres/test_action_config_node_prov.py index 1a22fba0..825a192f 100644 --- a/python/tests/integration/postgres/test_action_config_node_prov.py +++ b/python/tests/integration/postgres/test_action_config_node_prov.py @@ -17,6 +17,7 @@ from drydock_provisioner.drivers.node.maasdriver.actions.node import ConfigureNo class TestActionConfigureNodeProvisioner(object): + def test_create_maas_repo(self, mocker): distribution_list = ['xenial', 'xenial-updates'] diff --git a/python/tests/integration/postgres/test_action_prepare_nodes.py b/python/tests/integration/postgres/test_action_prepare_nodes.py index 7223c802..cb9cae2b 100644 --- a/python/tests/integration/postgres/test_action_prepare_nodes.py +++ b/python/tests/integration/postgres/test_action_prepare_nodes.py @@ -19,6 +19,7 @@ from drydock_provisioner.orchestrator.actions.orchestrator import PrepareNodes class TestActionPrepareNodes(object): + def test_preparenodes(self, mocker, input_files, deckhand_ingester, setup, drydock_state, mock_get_build_data): mock_images = mocker.patch( @@ -42,10 +43,9 @@ class TestActionPrepareNodes(object): kubernetes_driver = 'drydock_provisioner.drivers.kubernetes.driver.KubernetesDriver' network_driver = None - orchestrator = orch.Orchestrator( - enabled_drivers=DummyConf(), - state_manager=drydock_state, - ingester=deckhand_ingester) + orchestrator = orch.Orchestrator(enabled_drivers=DummyConf(), + state_manager=drydock_state, + ingester=deckhand_ingester) task = orchestrator.create_task( design_ref=design_ref, diff --git a/python/tests/integration/postgres/test_action_prepare_site.py b/python/tests/integration/postgres/test_action_prepare_site.py index 81bf0a51..f54af4be 100644 --- a/python/tests/integration/postgres/test_action_prepare_site.py +++ b/python/tests/integration/postgres/test_action_prepare_site.py @@ -19,6 +19,7 @@ from drydock_provisioner.orchestrator.actions.orchestrator import PrepareSite class TestActionPrepareSite(object): + def test_preparesite(self, input_files, deckhand_ingester, setup, drydock_state): input_file = input_files.join("deckhand_fullsite.yaml") @@ -33,10 +34,9 @@ class TestActionPrepareSite(object): kubernetes_driver = 'drydock_provisioner.drivers.kubernetes.driver.KubernetesDriver' network_driver = None - orchestrator = orch.Orchestrator( - enabled_drivers=DummyConf(), - state_manager=drydock_state, - ingester=deckhand_ingester) + orchestrator = orch.Orchestrator(enabled_drivers=DummyConf(), + state_manager=drydock_state, + ingester=deckhand_ingester) task = orchestrator.create_task( design_ref=design_ref, diff --git a/python/tests/integration/postgres/test_api_bootaction.py b/python/tests/integration/postgres/test_api_bootaction.py index 421897c0..df7fa063 100644 --- a/python/tests/integration/postgres/test_api_bootaction.py +++ b/python/tests/integration/postgres/test_api_bootaction.py @@ -24,6 +24,7 @@ from drydock_provisioner.control.api import start_api class TestClass(object): + def test_bootaction_context(self, falcontest, seed_bootaction): """Test that the API will return a boot action context""" url = "/api/v1.0/bootactions/nodes/%s/units" % seed_bootaction[ @@ -77,10 +78,9 @@ class TestClass(object): id_key = yaml_orchestrator.create_bootaction_context( 'compute01', test_task) - ba_ctx = dict( - nodename='compute01', - task_id=test_task.get_id(), - identity_key=id_key.hex()) + ba_ctx = dict(nodename='compute01', + task_id=test_task.get_id(), + identity_key=id_key.hex()) return ba_ctx @pytest.fixture() @@ -88,7 +88,6 @@ class TestClass(object): mock_get_build_data): """Create a test harness for the Falcon API framework.""" return testing.TestClient( - start_api( - state_manager=drydock_state, - ingester=yaml_ingester, - orchestrator=yaml_orchestrator)) + start_api(state_manager=drydock_state, + ingester=yaml_ingester, + orchestrator=yaml_orchestrator)) diff --git a/python/tests/integration/postgres/test_api_bootaction_status.py b/python/tests/integration/postgres/test_api_bootaction_status.py index 60239aac..1d9b8e83 100644 --- a/python/tests/integration/postgres/test_api_bootaction_status.py +++ b/python/tests/integration/postgres/test_api_bootaction_status.py @@ -25,6 +25,7 @@ from drydock_provisioner.control.api import start_api class TestClass(object): + def test_bootaction_detail(self, falcontest, seed_bootaction_status): """Test that the API allows boot action detail messages.""" url = "/api/v1.0/bootactions/%s" % seed_bootaction_status['action_id'] @@ -42,8 +43,9 @@ class TestClass(object): ] } - result = falcontest.simulate_post( - url, headers=hdr, body=json.dumps(body)) + result = falcontest.simulate_post(url, + headers=hdr, + body=json.dumps(body)) assert result.status == falcon.HTTP_200 @@ -65,13 +67,15 @@ class TestClass(object): ] } - result = falcontest.simulate_post( - url, headers=hdr, body=json.dumps(body)) + result = falcontest.simulate_post(url, + headers=hdr, + body=json.dumps(body)) assert result.status == falcon.HTTP_200 - result = falcontest.simulate_post( - url, headers=hdr, body=json.dumps(body)) + result = falcontest.simulate_post(url, + headers=hdr, + body=json.dumps(body)) assert result.status == falcon.HTTP_409 @@ -87,8 +91,9 @@ class TestClass(object): 'foo': 'Success', } - result = falcontest.simulate_post( - url, headers=hdr, body=json.dumps(body)) + result = falcontest.simulate_post(url, + headers=hdr, + body=json.dumps(body)) assert result.status == falcon.HTTP_400 @@ -106,18 +111,16 @@ class TestClass(object): blank_state.post_boot_action('compute01', test_task.get_id(), id_key, action_id, 'helloworld') - ba = dict( - nodename='compute01', - task_id=test_task.get_id(), - identity_key=id_key.hex(), - action_id=ulid2.encode_ulid_base32(action_id)) + ba = dict(nodename='compute01', + task_id=test_task.get_id(), + identity_key=id_key.hex(), + action_id=ulid2.encode_ulid_base32(action_id)) return ba @pytest.fixture() def falcontest(self, drydock_state, yaml_ingester, yaml_orchestrator): """Create a test harness for the Falcon API framework.""" return testing.TestClient( - start_api( - state_manager=drydock_state, - ingester=yaml_ingester, - orchestrator=yaml_orchestrator)) + start_api(state_manager=drydock_state, + ingester=yaml_ingester, + orchestrator=yaml_orchestrator)) diff --git a/python/tests/integration/postgres/test_api_builddata.py b/python/tests/integration/postgres/test_api_builddata.py index b940f0ac..18341cbc 100644 --- a/python/tests/integration/postgres/test_api_builddata.py +++ b/python/tests/integration/postgres/test_api_builddata.py @@ -28,6 +28,7 @@ import falcon class TestNodeBuildDataApi(): + def test_read_builddata_all(self, falcontest, seeded_builddata): """Test that by default the API returns all build data for a node.""" url = '/api/v1.0/nodes/foo/builddata' @@ -68,14 +69,14 @@ class TestNodeBuildDataApi(): nodelist = ['foo'] generatorlist = ['hello', 'hello', 'bye'] count = 3 - seeded_builddata( - nodelist=nodelist, - generatorlist=generatorlist, - count=count, - random_dates=True) + seeded_builddata(nodelist=nodelist, + generatorlist=generatorlist, + count=count, + random_dates=True) - resp = falcontest.simulate_get( - url, headers=req_hdr, query_string="latest=true") + resp = falcontest.simulate_get(url, + headers=req_hdr, + query_string="latest=true") assert resp.status == falcon.HTTP_200 @@ -121,13 +122,12 @@ class TestNodeBuildDataApi(): generator = generatorlist[i] else: generator = 'hello_world' - bd = objects.BuildData( - node_name=n, - task_id=task_id, - generator=generator, - data_format='text/plain', - collected_date=collected_date, - data_element='Hello World!') + bd = objects.BuildData(node_name=n, + task_id=task_id, + generator=generator, + data_format='text/plain', + collected_date=collected_date, + data_element='Hello World!') blank_state.post_build_data(bd) i = i + 1 @@ -142,10 +142,9 @@ class TestNodeBuildDataApi(): policy.policy_engine.register_policy() return testing.TestClient( - start_api( - state_manager=drydock_state, - ingester=deckhand_ingester, - orchestrator=deckhand_orchestrator)) + start_api(state_manager=drydock_state, + ingester=deckhand_ingester, + orchestrator=deckhand_orchestrator)) @policy.ApiEnforcer('physical_provisioner:read_task') def target_function(self, req, resp): diff --git a/python/tests/integration/postgres/test_api_health.py b/python/tests/integration/postgres/test_api_health.py index c4b38faf..ad4b283b 100644 --- a/python/tests/integration/postgres/test_api_health.py +++ b/python/tests/integration/postgres/test_api_health.py @@ -19,8 +19,8 @@ import falcon def test_get_health(mocker, deckhand_orchestrator, drydock_state): - api = HealthResource( - state_manager=drydock_state, orchestrator=deckhand_orchestrator) + api = HealthResource(state_manager=drydock_state, + orchestrator=deckhand_orchestrator) # Configure mocked request and response req = mocker.MagicMock(spec=falcon.Request) diff --git a/python/tests/integration/postgres/test_api_tasks.py b/python/tests/integration/postgres/test_api_tasks.py index 07f9cf72..b02aa130 100644 --- a/python/tests/integration/postgres/test_api_tasks.py +++ b/python/tests/integration/postgres/test_api_tasks.py @@ -28,6 +28,7 @@ import falcon class TestTasksApi(): + def test_read_tasks(self, falcontest, blank_state): """Test that the tasks API responds with list of tasks.""" url = '/api/v1.0/tasks' @@ -61,18 +62,18 @@ class TestTasksApi(): context=ctx) # Seed DB with build data for task - build_data = objects.BuildData( - node_name='foo', - task_id=task.get_id(), - generator='hello_world', - data_format='text/plain', - data_element='Hello World!') + build_data = objects.BuildData(node_name='foo', + task_id=task.get_id(), + generator='hello_world', + data_format='text/plain', + data_element='Hello World!') blank_state.post_build_data(build_data) url = '/api/v1.0/tasks/%s' % str(task.get_id()) - resp = falcontest.simulate_get( - url, headers=req_hdr, query_string="builddata=true") + resp = falcontest.simulate_get(url, + headers=req_hdr, + query_string="builddata=true") assert resp.status == falcon.HTTP_200 @@ -111,7 +112,6 @@ class TestTasksApi(): policy.policy_engine.register_policy() return testing.TestClient( - start_api( - state_manager=drydock_state, - ingester=deckhand_ingester, - orchestrator=deckhand_orchestrator)) + start_api(state_manager=drydock_state, + ingester=deckhand_ingester, + orchestrator=deckhand_orchestrator)) diff --git a/python/tests/integration/postgres/test_bootaction_context.py b/python/tests/integration/postgres/test_bootaction_context.py index aa618fbe..e68ed7ca 100644 --- a/python/tests/integration/postgres/test_bootaction_context.py +++ b/python/tests/integration/postgres/test_bootaction_context.py @@ -25,6 +25,7 @@ from drydock_provisioner.control.api import start_api class TestBootActionContext(object): + def test_bootaction_context(self, falcontest, seed_bootaction_multinode): """Test that the API will return a boot action context""" for n, c in seed_bootaction_multinode.items(): @@ -56,8 +57,8 @@ class TestBootActionContext(object): for n in design_data.baremetal_nodes: id_key = deckhand_orchestrator.create_bootaction_context( n.name, test_task) - node_ctx = dict( - task_id=test_task.get_id(), identity_key=id_key.hex()) + node_ctx = dict(task_id=test_task.get_id(), + identity_key=id_key.hex()) ba_ctx[n.name] = node_ctx return ba_ctx @@ -67,7 +68,6 @@ class TestBootActionContext(object): deckhand_orchestrator, mock_get_build_data): """Create a test harness for the Falcon API framework.""" return testing.TestClient( - start_api( - state_manager=drydock_state, - ingester=deckhand_ingester, - orchestrator=deckhand_orchestrator)) + start_api(state_manager=drydock_state, + ingester=deckhand_ingester, + orchestrator=deckhand_orchestrator)) diff --git a/python/tests/integration/postgres/test_bootaction_signalling.py b/python/tests/integration/postgres/test_bootaction_signalling.py index 18bce94f..a37cf933 100644 --- a/python/tests/integration/postgres/test_bootaction_signalling.py +++ b/python/tests/integration/postgres/test_bootaction_signalling.py @@ -17,6 +17,7 @@ from drydock_provisioner.objects import fields as hd_fields class TestBootActionSignal(object): + def test_bootaction_signal_disable(self, deckhand_orchestrator, drydock_state, input_files, mock_get_build_data): diff --git a/python/tests/integration/postgres/test_build_data_collection.py b/python/tests/integration/postgres/test_build_data_collection.py index a9a26926..a01955a8 100644 --- a/python/tests/integration/postgres/test_build_data_collection.py +++ b/python/tests/integration/postgres/test_build_data_collection.py @@ -19,6 +19,7 @@ from drydock_provisioner.drivers.node.maasdriver.actions.node import ConfigureHa class TestBuildDataCollection(object): + def test_build_data_collection(self, setup, blank_state, mocker, deckhand_orchestrator): """Test that the build data collection from MaaS works.""" diff --git a/python/tests/integration/postgres/test_noderesult_links.py b/python/tests/integration/postgres/test_noderesult_links.py index 9238ec57..b66cfd31 100644 --- a/python/tests/integration/postgres/test_noderesult_links.py +++ b/python/tests/integration/postgres/test_noderesult_links.py @@ -18,6 +18,7 @@ from drydock_provisioner.drivers.node.maasdriver.models.machine import Machine class TestNodeResultLinks(object): + def test_create_detail_log_links(self, setup, blank_state, mocker, input_files, deckhand_orchestrator): """Test that the detail log collection from MaaS works.""" @@ -28,25 +29,17 @@ class TestNodeResultLinks(object): def json(self): resp_content = [{ - "id": - 3, - "data": - "SGVsbG8gV29ybGQh", - "result_type": - 0, - "script_result": - 0, - "resource_uri": - "/MAAS/api/2.0/commissioning-scripts/", - "updated": - "2018-07-06T14:32:20.129", + "id": 3, + "data": "SGVsbG8gV29ybGQh", + "result_type": 0, + "script_result": 0, + "resource_uri": "/MAAS/api/2.0/commissioning-scripts/", + "updated": "2018-07-06T14:32:20.129", "node": { "system_id": "r7mqnw" }, - "created": - "2018-07-06T14:37:12.632", - "name": - "hello_world" + "created": "2018-07-06T14:37:12.632", + "name": "hello_world" }] return resp_content diff --git a/python/tests/integration/postgres/test_orch_generic.py b/python/tests/integration/postgres/test_orch_generic.py index ecafda06..82765a03 100644 --- a/python/tests/integration/postgres/test_orch_generic.py +++ b/python/tests/integration/postgres/test_orch_generic.py @@ -20,13 +20,14 @@ import drydock_provisioner.objects.fields as hd_fields class TestClass(object): + def test_task_complete(self, deckhand_ingester, input_files, setup, blank_state, mock_get_build_data): input_file = input_files.join("deckhand_fullsite.yaml") design_ref = "file://%s" % str(input_file) - orchestrator = orch.Orchestrator( - state_manager=blank_state, ingester=deckhand_ingester) + orchestrator = orch.Orchestrator(state_manager=blank_state, + ingester=deckhand_ingester) orch_task = orchestrator.create_task( action=hd_fields.OrchestratorAction.Noop, design_ref=design_ref) orch_task.set_status(hd_fields.TaskStatus.Queued) @@ -50,8 +51,8 @@ class TestClass(object): input_file = input_files.join("deckhand_fullsite.yaml") design_ref = "file://%s" % str(input_file) - orchestrator = orch.Orchestrator( - state_manager=blank_state, ingester=deckhand_ingester) + orchestrator = orch.Orchestrator(state_manager=blank_state, + ingester=deckhand_ingester) orch_task = orchestrator.create_task( action=hd_fields.OrchestratorAction.Noop, design_ref=design_ref) diff --git a/python/tests/integration/postgres/test_postgres_bootaction_status.py b/python/tests/integration/postgres/test_postgres_bootaction_status.py index 541bc806..59f8e18c 100644 --- a/python/tests/integration/postgres/test_postgres_bootaction_status.py +++ b/python/tests/integration/postgres/test_postgres_bootaction_status.py @@ -21,13 +21,15 @@ from drydock_provisioner import objects class TestPostgresBootAction(object): + def test_bootaction_post(self, populateddb, drydock_state): """Test that a boot action status can be added.""" id_key = os.urandom(32) action_id = ulid2.generate_binary_ulid() nodename = 'testnode' - result = drydock_state.post_boot_action( - nodename, populateddb.get_id(), id_key, action_id, 'helloworld') + result = drydock_state.post_boot_action(nodename, populateddb.get_id(), + id_key, action_id, + 'helloworld') assert result @@ -60,8 +62,8 @@ class TestPostgresBootAction(object): @pytest.fixture(scope='function') def populateddb(self, blank_state): """Add dummy task to test against.""" - task = objects.Task( - action='prepare_site', design_ref='http://test.com/design') + task = objects.Task(action='prepare_site', + design_ref='http://test.com/design') blank_state.post_task(task) diff --git a/python/tests/integration/postgres/test_postgres_builddata.py b/python/tests/integration/postgres/test_postgres_builddata.py index 6cf24eec..3fdec9c6 100644 --- a/python/tests/integration/postgres/test_postgres_builddata.py +++ b/python/tests/integration/postgres/test_postgres_builddata.py @@ -22,6 +22,7 @@ from drydock_provisioner import objects class TestBuildData(object): + def test_build_data_insert_no_collected_date(self, blank_state): """Test that build data can be inserted omitting collection date.""" build_data_fields = { diff --git a/python/tests/integration/postgres/test_postgres_leadership.py b/python/tests/integration/postgres/test_postgres_leadership.py index ea4e685e..928506c9 100644 --- a/python/tests/integration/postgres/test_postgres_leadership.py +++ b/python/tests/integration/postgres/test_postgres_leadership.py @@ -17,6 +17,7 @@ import time class TestPostgres(object): + def test_claim_leadership(self, blank_state): """Test that a node can claim leadership. @@ -41,8 +42,8 @@ class TestPostgres(object): time.sleep(20) - print( - "Claiming leadership for %s after 20s" % str(second_leader.bytes)) + print("Claiming leadership for %s after 20s" % + str(second_leader.bytes)) crown = ds.claim_leadership(second_leader) assert crown diff --git a/python/tests/integration/postgres/test_postgres_results.py b/python/tests/integration/postgres/test_postgres_results.py index f7317c79..f3ceace6 100644 --- a/python/tests/integration/postgres/test_postgres_results.py +++ b/python/tests/integration/postgres/test_postgres_results.py @@ -4,6 +4,7 @@ from drydock_provisioner import objects class TestPostgres(object): + def test_result_message_insert(self, populateddb, drydock_state): """Test that a result message for a task can be added.""" msg1 = objects.TaskStatusMessage('Error 1', True, 'node', 'node1') @@ -23,8 +24,8 @@ class TestPostgres(object): @pytest.fixture(scope='function') def populateddb(self, blank_state): """Add dummy task to test against.""" - task = objects.Task( - action='prepare_site', design_ref='http://test.com/design') + task = objects.Task(action='prepare_site', + design_ref='http://test.com/design') blank_state.post_task(task) diff --git a/python/tests/integration/postgres/test_postgres_tasks.py b/python/tests/integration/postgres/test_postgres_tasks.py index 9b02856b..9a6dfc75 100644 --- a/python/tests/integration/postgres/test_postgres_tasks.py +++ b/python/tests/integration/postgres/test_postgres_tasks.py @@ -23,16 +23,16 @@ from drydock_provisioner.control.base import DrydockRequestContext class TestPostgres(object): + def test_task_insert(self, blank_state): """Test that a task can be inserted into the database.""" ctx = DrydockRequestContext() ctx.user = 'sh8121' ctx.external_marker = str(uuid.uuid4()) - task = objects.Task( - action='deploy_node', - design_ref='http://foo.bar/design', - context=ctx) + task = objects.Task(action='deploy_node', + design_ref='http://foo.bar/design', + context=ctx) result = blank_state.post_task(task) @@ -51,11 +51,10 @@ class TestPostgres(object): 'filter_type': 'union' }] } - task = objects.Task( - action='deploy_node', - node_filter=node_filter, - design_ref='http://foo.bar/design', - context=ctx) + task = objects.Task(action='deploy_node', + node_filter=node_filter, + design_ref='http://foo.bar/design', + context=ctx) result = blank_state.post_task(task) @@ -68,12 +67,11 @@ class TestPostgres(object): def test_subtask_append(self, blank_state): """Test that the atomic subtask append method works.""" - task = objects.Task( - action='deploy_node', design_ref='http://foobar/design') - subtask = objects.Task( - action='deploy_node', - design_ref='http://foobar/design', - parent_task_id=task.task_id) + task = objects.Task(action='deploy_node', + design_ref='http://foobar/design') + subtask = objects.Task(action='deploy_node', + design_ref='http://foobar/design', + parent_task_id=task.task_id) blank_state.post_task(task) blank_state.post_task(subtask) @@ -100,8 +98,8 @@ class TestPostgres(object): @pytest.fixture(scope='function') def populateddb(self, blank_state): """Add dummy task to test against.""" - task = objects.Task( - action='prepare_site', design_ref='http://test.com/design') + task = objects.Task(action='prepare_site', + design_ref='http://test.com/design') blank_state.post_task(task) diff --git a/python/tests/integration/test_maasdriver_client.py b/python/tests/integration/test_maasdriver_client.py index 0436826b..f7628912 100644 --- a/python/tests/integration/test_maasdriver_client.py +++ b/python/tests/integration/test_maasdriver_client.py @@ -16,14 +16,15 @@ import drydock_provisioner.drivers.node.maasdriver.api_client as client class TestClass(object): + def test_client_authenticate(self): client_config = config.DrydockConfig.node_driver['maasdriver'] maas_client = client.MaasRequestFactory(client_config['api_url'], client_config['api_key']) - resp = maas_client.get( - 'account/', params={'op': 'list_authorisation_tokens'}) + resp = maas_client.get('account/', + params={'op': 'list_authorisation_tokens'}) parsed = resp.json() diff --git a/python/tests/integration/test_maasdriver_network.py b/python/tests/integration/test_maasdriver_network.py index 9dfa3700..f34a65b6 100644 --- a/python/tests/integration/test_maasdriver_network.py +++ b/python/tests/integration/test_maasdriver_network.py @@ -20,6 +20,7 @@ import drydock_provisioner.drivers.node.maasdriver.models.subnet as maas_subnet class TestClass(object): + def test_maas_fabric(self): client_config = config.DrydockConfig.node_driver['maasdriver'] @@ -31,15 +32,16 @@ class TestClass(object): fabric_list = maas_fabric.Fabrics(maas_client) fabric_list.refresh() - test_fabric = maas_fabric.Fabric( - maas_client, name=fabric_name, description='Test Fabric') + test_fabric = maas_fabric.Fabric(maas_client, + name=fabric_name, + description='Test Fabric') test_fabric = fabric_list.add(test_fabric) assert test_fabric.name == fabric_name assert test_fabric.resource_id is not None - query_fabric = maas_fabric.Fabric( - maas_client, resource_id=test_fabric.resource_id) + query_fabric = maas_fabric.Fabric(maas_client, + resource_id=test_fabric.resource_id) query_fabric.refresh() assert query_fabric.name == test_fabric.name diff --git a/python/tests/postgres/start_postgres.sh b/python/tests/postgres/start_postgres.sh index 4b4fe884..3dfaeef7 100755 --- a/python/tests/postgres/start_postgres.sh +++ b/python/tests/postgres/start_postgres.sh @@ -1,20 +1,21 @@ #!/bin/bash set -x -IMAGE="${DOCKER_REGISTRY}/${IMAGE_PREFIX}/${IMAGE_NAME}:${IMAGE_TAG}" +IMAGE="${DOCKER_REGISTRY}/${IMAGE_PREFIX}/${IMAGE_NAME}:${IMAGE_TAG}-${DISTRO}" +env if [[ ! -z $(docker ps | grep 'psql_integration') ]] then sudo docker stop 'psql_integration' fi -IMAGE=${IMAGE:-"drydock:latest"} +IMAGE=${IMAGE:-"quay.io/airshipit/drydock:latest-ubuntu_focal"} if [[ ! -z $(docker ps | grep 'psql_integration') ]] then sudo docker stop 'psql_integration' fi -sudo docker run --rm -dp 5432:5432 --name 'psql_integration' postgres:14.6 +sudo docker run --rm -d -p 5432:5432/tcp -e POSTGRES_HOST_AUTH_METHOD=trust -e POSTGRES_PASSWORD=postgres --name 'psql_integration' postgres:14.6 sleep 15 docker run --rm --net host postgres:14.6 psql -h localhost -c "create user drydock with password 'drydock';" postgres postgres diff --git a/python/tests/unit/test_api_nodes_unit.py b/python/tests/unit/test_api_nodes_unit.py index 76edf8ef..438fdada 100644 --- a/python/tests/unit/test_api_nodes_unit.py +++ b/python/tests/unit/test_api_nodes_unit.py @@ -28,6 +28,7 @@ LOG = logging.getLogger(__name__) class TestNodesApiUnit(object): + def test_post_nodes_resp(self, input_files, falcontest, mock_process_node_filter): @@ -41,8 +42,9 @@ class TestNodesApiUnit(object): 'design_ref': design_ref, } - result = falcontest.simulate_post( - url, headers=hdr, body=json.dumps(body)) + result = falcontest.simulate_post(url, + headers=hdr, + body=json.dumps(body)) LOG.debug(result.text) assert result.status == falcon.HTTP_200 @@ -54,8 +56,9 @@ class TestNodesApiUnit(object): hdr = self.get_standard_header() body = {} - result = falcontest.simulate_post( - url, headers=hdr, body=json.dumps(body)) + result = falcontest.simulate_post(url, + headers=hdr, + body=json.dumps(body)) LOG.debug(result.text) assert result.status == falcon.HTTP_400 @@ -68,10 +71,9 @@ class TestNodesApiUnit(object): policy.policy_engine.register_policy() return testing.TestClient( - start_api( - state_manager=drydock_state, - ingester=deckhand_ingester, - orchestrator=deckhand_orchestrator)) + start_api(state_manager=drydock_state, + ingester=deckhand_ingester, + orchestrator=deckhand_orchestrator)) def get_standard_header(self): hdr = { @@ -93,6 +95,7 @@ def mock_process_node_filter(mocker, deckhand_orchestrator): n2.site = 'test2' mock_results = [n1, n2] - with mocker.patch('drydock_provisioner.orchestrator.orchestrator.Orchestrator.process_node_filter', - mocker.MagicMock(return_value=mock_results)): + with mocker.patch( + 'drydock_provisioner.orchestrator.orchestrator.Orchestrator.process_node_filter', + mocker.MagicMock(return_value=mock_results)): yield diff --git a/python/tests/unit/test_api_tasks_unit.py b/python/tests/unit/test_api_tasks_unit.py index 5c8209d2..900ef93e 100644 --- a/python/tests/unit/test_api_tasks_unit.py +++ b/python/tests/unit/test_api_tasks_unit.py @@ -30,6 +30,7 @@ LOG = logging.getLogger(__name__) class TestTasksApiUnit(object): + def test_get_tasks_id_resp(self, falcontest): url = '/api/v1.0/tasks/11111111-1111-1111-1111-111111111111' hdr = self.get_standard_header() @@ -57,8 +58,9 @@ class TestTasksApiUnit(object): url = '/api/v1.0/tasks/11111111-1111-1111-1111-111111111111' hdr = self.get_standard_header() - result = falcontest.simulate_get( - url, headers=hdr, query_string='subtaskerrors=true') + result = falcontest.simulate_get(url, + headers=hdr, + query_string='subtaskerrors=true') assert result.status == falcon.HTTP_200 response_json = json.loads(result.text) @@ -70,8 +72,9 @@ class TestTasksApiUnit(object): url = '/api/v1.0/tasks/11111111-1111-1111-1111-111111111113' hdr = self.get_standard_header() - result = falcontest.simulate_get( - url, headers=hdr, query_string='subtaskerrors=true') + result = falcontest.simulate_get(url, + headers=hdr, + query_string='subtaskerrors=true') assert result.status == falcon.HTTP_200 response_json = json.loads(result.text) @@ -85,8 +88,9 @@ class TestTasksApiUnit(object): url = '/api/v1.0/tasks/11111111-1111-1111-1111-111111111111' hdr = self.get_standard_header() - result = falcontest.simulate_get( - url, headers=hdr, query_string='builddata=true') + result = falcontest.simulate_get(url, + headers=hdr, + query_string='builddata=true') LOG.debug(result.text) assert result.status == falcon.HTTP_200 @@ -116,8 +120,9 @@ class TestTasksApiUnit(object): url = '/api/v1.0/tasks/11111111-1111-1111-1111-111111111113' hdr = self.get_standard_header() - result = falcontest.simulate_get( - url, headers=hdr, query_string='layers=2') + result = falcontest.simulate_get(url, + headers=hdr, + query_string='layers=2') LOG.debug(result.text) assert result.status == falcon.HTTP_200 @@ -140,8 +145,9 @@ class TestTasksApiUnit(object): url = '/api/v1.0/tasks/11111111-1111-1111-1111-111111111113' hdr = self.get_standard_header() - result = falcontest.simulate_get( - url, headers=hdr, query_string='layers=-1') + result = falcontest.simulate_get(url, + headers=hdr, + query_string='layers=-1') LOG.debug(result.text) assert result.status == falcon.HTTP_200 @@ -210,10 +216,9 @@ class TestTasksApiUnit(object): policy.policy_engine.register_policy() return testing.TestClient( - start_api( - state_manager=drydock_state, - ingester=deckhand_ingester, - orchestrator=deckhand_orchestrator)) + start_api(state_manager=drydock_state, + ingester=deckhand_ingester, + orchestrator=deckhand_orchestrator)) def get_standard_header(self): hdr = { @@ -227,6 +232,7 @@ class TestTasksApiUnit(object): @pytest.fixture() def mock_get_task(drydock_state): + def side_effect(*args): task_id = str(args[0]) LOG.debug(task_id) @@ -236,8 +242,10 @@ def mock_get_task(drydock_state): new_task.task_id = '11111111-1111-1111-1111-111111111111' new_task.result = objects.TaskStatus() new_task.result.set_status(hd_fields.ActionResult.Failure) - new_task.result.add_status_msg( - msg='Test', error=True, ctx_type='N/A', ctx='N/A') + new_task.result.add_status_msg(msg='Test', + error=True, + ctx_type='N/A', + ctx='N/A') return new_task # Task not found if task_id == '11111111-1111-1111-1111-111111111112': @@ -268,8 +276,10 @@ def mock_get_task(drydock_state): new_task.task_id = '11111111-1111-1111-1111-111111111116' new_task.result = objects.TaskStatus() new_task.result.set_status(hd_fields.ActionResult.Failure) - new_task.result.add_status_msg( - msg='Test', error=True, ctx_type='N/A', ctx='N/A') + new_task.result.add_status_msg(msg='Test', + error=True, + ctx_type='N/A', + ctx='N/A') LOG.debug('error_count') LOG.debug(new_task.result.error_count) return new_task diff --git a/python/tests/unit/test_api_validation.py b/python/tests/unit/test_api_validation.py index f64d95eb..e9cf1e67 100644 --- a/python/tests/unit/test_api_validation.py +++ b/python/tests/unit/test_api_validation.py @@ -27,8 +27,9 @@ LOG = logging.getLogger(__name__) class TestValidationApi(object): - def test_post_validation_resp(self, setup_logging, input_files, falcontest, drydock_state, - mock_get_build_data): + + def test_post_validation_resp(self, setup_logging, input_files, falcontest, + drydock_state, mock_get_build_data): input_file = input_files.join("deckhand_fullsite.yaml") design_ref = "file://%s" % str(input_file) @@ -46,8 +47,9 @@ class TestValidationApi(object): 'type': "application/x-yaml", } - result = falcontest.simulate_post( - url, headers=hdr, body=json.dumps(body)) + result = falcontest.simulate_post(url, + headers=hdr, + body=json.dumps(body)) LOG.debug(result.text) assert result.status == falcon.HTTP_200 @@ -66,8 +68,9 @@ class TestValidationApi(object): 'type': "application/x-yaml", } - result = falcontest.simulate_post( - url, headers=hdr, body=json.dumps(body)) + result = falcontest.simulate_post(url, + headers=hdr, + body=json.dumps(body)) LOG.debug(result.text) assert result.status == falcon.HTTP_400 @@ -82,14 +85,15 @@ class TestValidationApi(object): } body = {} - result = falcontest.simulate_post( - url, headers=hdr, body=json.dumps(body)) + result = falcontest.simulate_post(url, + headers=hdr, + body=json.dumps(body)) LOG.debug(result.text) assert result.status == falcon.HTTP_400 - def test_invalid_post_resp(self, setup_logging, input_files, falcontest, drydock_state, - mock_get_build_data): + def test_invalid_post_resp(self, setup_logging, input_files, falcontest, + drydock_state, mock_get_build_data): input_file = input_files.join("invalid_validation.yaml") design_ref = "file://%s" % str(input_file) @@ -106,8 +110,9 @@ class TestValidationApi(object): 'type': "application/x-yaml", } - result = falcontest.simulate_post( - url, headers=hdr, body=json.dumps(body)) + result = falcontest.simulate_post(url, + headers=hdr, + body=json.dumps(body)) assert result.status == falcon.HTTP_400 @@ -119,7 +124,6 @@ class TestValidationApi(object): policy.policy_engine.register_policy() return testing.TestClient( - start_api( - state_manager=drydock_state, - ingester=deckhand_ingester, - orchestrator=deckhand_orchestrator)) + start_api(state_manager=drydock_state, + ingester=deckhand_ingester, + orchestrator=deckhand_orchestrator)) diff --git a/python/tests/unit/test_api_versions.py b/python/tests/unit/test_api_versions.py index 6fae06e1..655fdc8a 100644 --- a/python/tests/unit/test_api_versions.py +++ b/python/tests/unit/test_api_versions.py @@ -29,5 +29,5 @@ def test_get_versions(mocker): expected = api.to_json({'v1.0': {'path': '/api/v1.0', 'status': 'stable'}}) - assert resp.body == expected + assert resp.text == expected assert resp.status == falcon.HTTP_200 diff --git a/python/tests/unit/test_apienforcer.py b/python/tests/unit/test_apienforcer.py index a7dff692..5d002671 100644 --- a/python/tests/unit/test_apienforcer.py +++ b/python/tests/unit/test_apienforcer.py @@ -21,6 +21,7 @@ logging.basicConfig(level=logging.DEBUG) class TestEnforcerDecorator(): + def test_apienforcer_decorator(self, mocker): ''' DrydockPolicy.authorized() should correctly use oslo_policy to enforce RBAC policy based on a DrydockRequestContext instance. authorized() is diff --git a/python/tests/unit/test_bootaction_asset_render.py b/python/tests/unit/test_bootaction_asset_render.py index 036f0f2d..a5f6761d 100644 --- a/python/tests/unit/test_bootaction_asset_render.py +++ b/python/tests/unit/test_bootaction_asset_render.py @@ -22,6 +22,7 @@ from drydock_provisioner.objects import fields as hd_fields class TestBootactionRenderAction(object): + def test_bootaction_render_nodename(self, input_files, deckhand_ingester, setup): """Test the bootaction render routine provides expected output.""" @@ -142,8 +143,8 @@ class TestBootactionRenderAction(object): design_ref = "file://%s" % str(input_file) - test_task = Task( - action=hd_fields.OrchestratorAction.Noop, design_ref=design_ref) + test_task = Task(action=hd_fields.OrchestratorAction.Noop, + design_ref=design_ref) pkg_list = deckhand_orchestrator.find_node_package_lists( 'compute01', test_task) diff --git a/python/tests/unit/test_bootaction_pipeline.py b/python/tests/unit/test_bootaction_pipeline.py index b9858ad4..ab750028 100644 --- a/python/tests/unit/test_bootaction_pipeline.py +++ b/python/tests/unit/test_bootaction_pipeline.py @@ -18,6 +18,7 @@ import drydock_provisioner.objects as objects class TestClass(object): + def test_bootaction_pipeline_base64(self): objects.register_all() diff --git a/python/tests/unit/test_bootaction_scoping.py b/python/tests/unit/test_bootaction_scoping.py index 8cd2adf1..252989fe 100644 --- a/python/tests/unit/test_bootaction_scoping.py +++ b/python/tests/unit/test_bootaction_scoping.py @@ -16,9 +16,11 @@ import drydock_provisioner.objects as objects class TestClass(object): - def test_bootaction_scoping_blankfilter( - self, input_files, deckhand_orchestrator, drydock_state, - mock_get_build_data): + + def test_bootaction_scoping_blankfilter(self, input_files, + deckhand_orchestrator, + drydock_state, + mock_get_build_data): """Test a boot action with no node filter scopes correctly.""" input_file = input_files.join("deckhand_fullsite.yaml") @@ -36,9 +38,10 @@ class TestClass(object): assert 'compute01' in ba.target_nodes assert 'controller01' in ba.target_nodes - def test_bootaction_scoping_unionfilter( - self, input_files, deckhand_orchestrator, drydock_state, - mock_get_build_data): + def test_bootaction_scoping_unionfilter(self, input_files, + deckhand_orchestrator, + drydock_state, + mock_get_build_data): """Test a boot action with a union node filter scopes correctly.""" input_file = input_files.join("deckhand_fullsite.yaml") diff --git a/python/tests/unit/test_bootaction_tarbuilder.py b/python/tests/unit/test_bootaction_tarbuilder.py index 819da6ce..d9b3deeb 100644 --- a/python/tests/unit/test_bootaction_tarbuilder.py +++ b/python/tests/unit/test_bootaction_tarbuilder.py @@ -24,6 +24,7 @@ from drydock_provisioner.control.bootaction import BootactionUtils class TestClass(object): + def test_bootaction_tarbuilder(self, input_files, deckhand_ingester, setup): objects.register_all() diff --git a/python/tests/unit/test_cli_task.py b/python/tests/unit/test_cli_task.py index 736013f5..4f8b0600 100644 --- a/python/tests/unit/test_cli_task.py +++ b/python/tests/unit/test_cli_task.py @@ -34,8 +34,9 @@ def test_taskcli_blank_nodefilter(): dd_ses = dc_session.DrydockSession(host) dd_client = dc_client.DrydockClient(dd_ses) - action = TaskCreate( - dd_client, "http://foo.bar", action_name="deploy_nodes") + action = TaskCreate(dd_client, + "http://foo.bar", + action_name="deploy_nodes") assert action.node_filter is None @@ -77,8 +78,8 @@ def test_taskcli_builddata_command(mocker): api_client = mocker.MagicMock() api_client.get_task_build_data.return_value = build_data - mocker.patch( - 'drydock_provisioner.cli.commands.DrydockClient', new=api_client) + mocker.patch('drydock_provisioner.cli.commands.DrydockClient', + new=api_client) mocker.patch('drydock_provisioner.cli.commands.KeystoneClient') runner = CliRunner() @@ -88,6 +89,6 @@ def test_taskcli_builddata_command(mocker): print(result.exc_info) api_client.get_task_build_data.assert_called_with(task_id) - assert yaml.safe_dump( - build_data, allow_unicode=True, - default_flow_style=False) in result.output + assert yaml.safe_dump(build_data, + allow_unicode=True, + default_flow_style=False) in result.output diff --git a/python/tests/unit/test_design_inheritance.py b/python/tests/unit/test_design_inheritance.py index 1367d368..ca825683 100644 --- a/python/tests/unit/test_design_inheritance.py +++ b/python/tests/unit/test_design_inheritance.py @@ -18,6 +18,7 @@ from drydock_provisioner.orchestrator.orchestrator import Orchestrator class TestClass(object): + def test_design_inheritance(self, input_files, setup): input_file = input_files.join("fullsite.yaml") @@ -28,8 +29,8 @@ class TestClass(object): ingester.enable_plugin( 'drydock_provisioner.ingester.plugins.yaml.YamlIngester') - orchestrator = Orchestrator( - state_manager=design_state, ingester=ingester) + orchestrator = Orchestrator(state_manager=design_state, + ingester=ingester) design_status, design_data = orchestrator.get_effective_site( design_ref) diff --git a/python/tests/unit/test_drydock_client.py b/python/tests/unit/test_drydock_client.py index 51ec0053..9a106de4 100644 --- a/python/tests/unit/test_drydock_client.py +++ b/python/tests/unit/test_drydock_client.py @@ -43,11 +43,10 @@ def test_session_init_minimal_no_port(): @responses.activate def test_session_get(): - responses.add( - responses.GET, - 'http://foo.bar.baz/api/v1.0/test', - body='okay', - status=200) + responses.add(responses.GET, + 'http://foo.bar.baz/api/v1.0/test', + body='okay', + status=200) host = 'foo.bar.baz' token = '5f1e08b6-38ec-4a99-9d0f-00d29c4e325b' marker = '40c3eaf6-6a8a-11e7-a4bd-080027ef795a' @@ -65,16 +64,14 @@ def test_session_get(): @responses.activate -@mock.patch.object( - dc_session.KeystoneClient, - 'get_token', - return_value='5f1e08b6-38ec-4a99-9d0f-00d29c4e325b') +@mock.patch.object(dc_session.KeystoneClient, + 'get_token', + return_value='5f1e08b6-38ec-4a99-9d0f-00d29c4e325b') def test_session_get_returns_401(*args): - responses.add( - responses.GET, - 'http://foo.bar.baz/api/v1.0/test', - body='okay', - status=401) + responses.add(responses.GET, + 'http://foo.bar.baz/api/v1.0/test', + body='okay', + status=401) host = 'foo.bar.baz' token = '5f1e08b6-38ec-4a99-9d0f-00d29c4e325b' marker = '40c3eaf6-6a8a-11e7-a4bd-080027ef795a' @@ -132,11 +129,10 @@ def test_client_get_nodes_for_filter_post(): host = 'foo.bar.baz' - responses.add( - responses.POST, - "http://%s/api/v1.0/nodefilter" % (host), - json=node_list, - status=200) + responses.add(responses.POST, + "http://%s/api/v1.0/nodefilter" % (host), + json=node_list, + status=200) dd_ses = dc_session.DrydockSession(host) dd_client = dc_client.DrydockClient(dd_ses) @@ -154,11 +150,10 @@ def test_client_validate_design_post(): host = 'foo.bar.baz' - responses.add( - responses.POST, - "http://%s/api/v1.0/validatedesign" % (host), - json=validation, - status=200) + responses.add(responses.POST, + "http://%s/api/v1.0/validatedesign" % (host), + json=validation, + status=200) dd_ses = dc_session.DrydockSession(host) dd_client = dc_client.DrydockClient(dd_ses) diff --git a/python/tests/unit/test_drydock_client_session.py b/python/tests/unit/test_drydock_client_session.py index 15170f95..21fe51a6 100644 --- a/python/tests/unit/test_drydock_client_session.py +++ b/python/tests/unit/test_drydock_client_session.py @@ -17,6 +17,7 @@ from drydock_provisioner.drydock_client.session import DrydockSession class TestClientSession(object): + def test_create_session(self): """Tests setting up an Drydock client session""" sess = DrydockSession("testdrydock") diff --git a/python/tests/unit/test_ingester.py b/python/tests/unit/test_ingester.py index 9b9404a6..fb4bfdfe 100644 --- a/python/tests/unit/test_ingester.py +++ b/python/tests/unit/test_ingester.py @@ -18,6 +18,7 @@ import drydock_provisioner.objects as objects class TestClass(object): + def test_ingest_deckhand(self, input_files, setup, deckhand_ingester): input_file = input_files.join("deckhand_fullsite.yaml") diff --git a/python/tests/unit/test_ingester_bootaction.py b/python/tests/unit/test_ingester_bootaction.py index 2a252045..53f09b5f 100644 --- a/python/tests/unit/test_ingester_bootaction.py +++ b/python/tests/unit/test_ingester_bootaction.py @@ -18,6 +18,7 @@ import drydock_provisioner.objects as objects class TestBootAction(object): + def test_bootaction_parse(self, input_files, deckhand_ingester, setup): objects.register_all() diff --git a/python/tests/unit/test_ingester_invalidation.py b/python/tests/unit/test_ingester_invalidation.py index 8096693c..2983f939 100644 --- a/python/tests/unit/test_ingester_invalidation.py +++ b/python/tests/unit/test_ingester_invalidation.py @@ -21,6 +21,7 @@ LOG = logging.getLogger(__name__) class TestClass(object): + def test_bootaction_parse(self, input_files, deckhand_ingester, setup): design_status, design_data = self.parse_design( "invalid_bootaction.yaml", input_files, deckhand_ingester) diff --git a/python/tests/unit/test_ingester_rack_model.py b/python/tests/unit/test_ingester_rack_model.py index 6753734a..e1b04ec2 100644 --- a/python/tests/unit/test_ingester_rack_model.py +++ b/python/tests/unit/test_ingester_rack_model.py @@ -21,6 +21,7 @@ import pytest class TestClass(object): + def test_rack_parse(self, deckhand_ingester, input_files, setup): objects.register_all() diff --git a/python/tests/unit/test_k8sdriver_promenade_client.py b/python/tests/unit/test_k8sdriver_promenade_client.py index 72e63f41..5c42f5e2 100644 --- a/python/tests/unit/test_k8sdriver_promenade_client.py +++ b/python/tests/unit/test_k8sdriver_promenade_client.py @@ -38,15 +38,15 @@ def test_put(patch1, patch2): """ Test put functionality """ - responses.add( - responses.PUT, - 'http://promhost:80/api/v1.0/node-label/n1', - body='{"key1":"label1"}', - status=200) + responses.add(responses.PUT, + 'http://promhost:80/api/v1.0/node-label/n1', + body='{"key1":"label1"}', + status=200) prom_session = PromenadeSession() - result = prom_session.put( - 'v1.0/node-label/n1', body='{"key1":"label1"}', timeout=(60, 60)) + result = prom_session.put('v1.0/node-label/n1', + body='{"key1":"label1"}', + timeout=(60, 60)) assert PROM_HOST == prom_session.host assert result.status_code == 200 @@ -65,8 +65,9 @@ def test_get(patch1, patch2): """ Test get functionality """ - responses.add( - responses.GET, 'http://promhost:80/api/v1.0/node-label/n1', status=200) + responses.add(responses.GET, + 'http://promhost:80/api/v1.0/node-label/n1', + status=200) prom_session = PromenadeSession() result = prom_session.get('v1.0/node-label/n1', timeout=(60, 60)) @@ -87,15 +88,15 @@ def test_post(patch1, patch2): """ Test post functionality """ - responses.add( - responses.POST, - 'http://promhost:80/api/v1.0/node-label/n1', - body='{"key1":"label1"}', - status=200) + responses.add(responses.POST, + 'http://promhost:80/api/v1.0/node-label/n1', + body='{"key1":"label1"}', + status=200) prom_session = PromenadeSession() - result = prom_session.post( - 'v1.0/node-label/n1', body='{"key1":"label1"}', timeout=(60, 60)) + result = prom_session.post('v1.0/node-label/n1', + body='{"key1":"label1"}', + timeout=(60, 60)) assert PROM_HOST == prom_session.host assert result.status_code == 200 @@ -115,11 +116,10 @@ def test_relabel_node(patch1, patch2): Test relabel node call from Promenade Client """ - responses.add( - responses.PUT, - 'http://promhost:80/api/v1.0/node-labels/n1', - body='{"key1":"label1"}', - status=200) + responses.add(responses.PUT, + 'http://promhost:80/api/v1.0/node-labels/n1', + body='{"key1":"label1"}', + status=200) prom_client = PromenadeClient() @@ -141,11 +141,10 @@ def test_relabel_node_403_status(patch1, patch2): """ Test relabel node with 403 resp status """ - responses.add( - responses.PUT, - 'http://promhost:80/api/v1.0/node-labels/n1', - body='{"key1":"label1"}', - status=403) + responses.add(responses.PUT, + 'http://promhost:80/api/v1.0/node-labels/n1', + body='{"key1":"label1"}', + status=403) prom_client = PromenadeClient() @@ -166,11 +165,10 @@ def test_relabel_node_401_status(patch1, patch2): """ Test relabel node with 401 resp status """ - responses.add( - responses.PUT, - 'http://promhost:80/api/v1.0/node-labels/n1', - body='{"key1":"label1"}', - status=401) + responses.add(responses.PUT, + 'http://promhost:80/api/v1.0/node-labels/n1', + body='{"key1":"label1"}', + status=401) prom_client = PromenadeClient() diff --git a/python/tests/unit/test_libvirt_driver.py b/python/tests/unit/test_libvirt_driver.py index 95d4f673..47b60fb6 100644 --- a/python/tests/unit/test_libvirt_driver.py +++ b/python/tests/unit/test_libvirt_driver.py @@ -23,6 +23,7 @@ LOG = logging.getLogger(__name__) class TestLibvirtOobDriver(): + def test_libvirt_init_session(self, mocker, deckhand_orchestrator, input_files, setup): """Test session initialization.""" diff --git a/python/tests/unit/test_maasdriver_calculate_bytes.py b/python/tests/unit/test_maasdriver_calculate_bytes.py index 3f720d0d..4c3b6c65 100644 --- a/python/tests/unit/test_maasdriver_calculate_bytes.py +++ b/python/tests/unit/test_maasdriver_calculate_bytes.py @@ -24,14 +24,15 @@ from drydock_provisioner.drivers.node.maasdriver.models.volumegroup import Volum class TestCalculateBytes(): + def test_calculate_m_label(self): '''Convert megabyte labels to x * 10^6 bytes.''' size_str = '15m' drive_size = 20 * 10**6 drive = BlockDevice(None, size=drive_size, available_size=drive_size) - calc_size = ApplyNodeStorage.calculate_bytes( - size_str=size_str, context=drive) + calc_size = ApplyNodeStorage.calculate_bytes(size_str=size_str, + context=drive) assert calc_size == 15 * 10**6 @@ -41,8 +42,8 @@ class TestCalculateBytes(): drive_size = 20 * 10**6 drive = BlockDevice(None, size=drive_size, available_size=drive_size) - calc_size = ApplyNodeStorage.calculate_bytes( - size_str=size_str, context=drive) + calc_size = ApplyNodeStorage.calculate_bytes(size_str=size_str, + context=drive) assert calc_size == 15 * 10**6 @@ -52,8 +53,8 @@ class TestCalculateBytes(): drive_size = 20 * 10**6 drive = BlockDevice(None, size=drive_size, available_size=drive_size) - calc_size = ApplyNodeStorage.calculate_bytes( - size_str=size_str, context=drive) + calc_size = ApplyNodeStorage.calculate_bytes(size_str=size_str, + context=drive) assert calc_size == 15 * 10**6 @@ -63,8 +64,8 @@ class TestCalculateBytes(): drive_size = 20 * 10**6 drive = BlockDevice(None, size=drive_size, available_size=drive_size) - calc_size = ApplyNodeStorage.calculate_bytes( - size_str=size_str, context=drive) + calc_size = ApplyNodeStorage.calculate_bytes(size_str=size_str, + context=drive) assert calc_size == 15 * 10**6 @@ -74,8 +75,8 @@ class TestCalculateBytes(): drive_size = 20 * 10**9 drive = BlockDevice(None, size=drive_size, available_size=drive_size) - calc_size = ApplyNodeStorage.calculate_bytes( - size_str=size_str, context=drive) + calc_size = ApplyNodeStorage.calculate_bytes(size_str=size_str, + context=drive) assert calc_size == 15 * 10**9 @@ -85,8 +86,8 @@ class TestCalculateBytes(): drive_size = 20 * 10**9 drive = BlockDevice(None, size=drive_size, available_size=drive_size) - calc_size = ApplyNodeStorage.calculate_bytes( - size_str=size_str, context=drive) + calc_size = ApplyNodeStorage.calculate_bytes(size_str=size_str, + context=drive) assert calc_size == 15 * 10**9 @@ -96,8 +97,8 @@ class TestCalculateBytes(): drive_size = 20 * 10**9 drive = BlockDevice(None, size=drive_size, available_size=drive_size) - calc_size = ApplyNodeStorage.calculate_bytes( - size_str=size_str, context=drive) + calc_size = ApplyNodeStorage.calculate_bytes(size_str=size_str, + context=drive) assert calc_size == 15 * 10**9 @@ -107,8 +108,8 @@ class TestCalculateBytes(): drive_size = 20 * 10**9 drive = BlockDevice(None, size=drive_size, available_size=drive_size) - calc_size = ApplyNodeStorage.calculate_bytes( - size_str=size_str, context=drive) + calc_size = ApplyNodeStorage.calculate_bytes(size_str=size_str, + context=drive) assert calc_size == 15 * 10**9 @@ -118,8 +119,8 @@ class TestCalculateBytes(): drive_size = 20 * 10**12 drive = BlockDevice(None, size=drive_size, available_size=drive_size) - calc_size = ApplyNodeStorage.calculate_bytes( - size_str=size_str, context=drive) + calc_size = ApplyNodeStorage.calculate_bytes(size_str=size_str, + context=drive) assert calc_size == 15 * 10**12 @@ -129,8 +130,8 @@ class TestCalculateBytes(): drive_size = 20 * 10**12 drive = BlockDevice(None, size=drive_size, available_size=drive_size) - calc_size = ApplyNodeStorage.calculate_bytes( - size_str=size_str, context=drive) + calc_size = ApplyNodeStorage.calculate_bytes(size_str=size_str, + context=drive) assert calc_size == 15 * 10**12 @@ -140,8 +141,8 @@ class TestCalculateBytes(): drive_size = 20 * 10**12 drive = BlockDevice(None, size=drive_size, available_size=drive_size) - calc_size = ApplyNodeStorage.calculate_bytes( - size_str=size_str, context=drive) + calc_size = ApplyNodeStorage.calculate_bytes(size_str=size_str, + context=drive) assert calc_size == 15 * 10**12 @@ -151,8 +152,8 @@ class TestCalculateBytes(): drive_size = 20 * 10**12 drive = BlockDevice(None, size=drive_size, available_size=drive_size) - calc_size = ApplyNodeStorage.calculate_bytes( - size_str=size_str, context=drive) + calc_size = ApplyNodeStorage.calculate_bytes(size_str=size_str, + context=drive) assert calc_size == 15 * 10**12 @@ -162,8 +163,8 @@ class TestCalculateBytes(): drive_size = 20 * 2**20 drive = BlockDevice(None, size=drive_size, available_size=drive_size) - calc_size = ApplyNodeStorage.calculate_bytes( - size_str=size_str, context=drive) + calc_size = ApplyNodeStorage.calculate_bytes(size_str=size_str, + context=drive) assert calc_size == 15 * 2**20 @@ -173,8 +174,8 @@ class TestCalculateBytes(): drive_size = 20 * 2**20 drive = BlockDevice(None, size=drive_size, available_size=drive_size) - calc_size = ApplyNodeStorage.calculate_bytes( - size_str=size_str, context=drive) + calc_size = ApplyNodeStorage.calculate_bytes(size_str=size_str, + context=drive) assert calc_size == 15 * 2**20 @@ -184,8 +185,8 @@ class TestCalculateBytes(): drive_size = 20 * 2**20 drive = BlockDevice(None, size=drive_size, available_size=drive_size) - calc_size = ApplyNodeStorage.calculate_bytes( - size_str=size_str, context=drive) + calc_size = ApplyNodeStorage.calculate_bytes(size_str=size_str, + context=drive) assert calc_size == 15 * 2**20 @@ -195,8 +196,8 @@ class TestCalculateBytes(): drive_size = 20 * 2**20 drive = BlockDevice(None, size=drive_size, available_size=drive_size) - calc_size = ApplyNodeStorage.calculate_bytes( - size_str=size_str, context=drive) + calc_size = ApplyNodeStorage.calculate_bytes(size_str=size_str, + context=drive) assert calc_size == 15 * 2**20 @@ -206,8 +207,8 @@ class TestCalculateBytes(): drive_size = 20 * 2**30 drive = BlockDevice(None, size=drive_size, available_size=drive_size) - calc_size = ApplyNodeStorage.calculate_bytes( - size_str=size_str, context=drive) + calc_size = ApplyNodeStorage.calculate_bytes(size_str=size_str, + context=drive) assert calc_size == 15 * 2**30 @@ -217,8 +218,8 @@ class TestCalculateBytes(): drive_size = 20 * 2**30 drive = BlockDevice(None, size=drive_size, available_size=drive_size) - calc_size = ApplyNodeStorage.calculate_bytes( - size_str=size_str, context=drive) + calc_size = ApplyNodeStorage.calculate_bytes(size_str=size_str, + context=drive) assert calc_size == 15 * 2**30 @@ -228,8 +229,8 @@ class TestCalculateBytes(): drive_size = 20 * 2**30 drive = BlockDevice(None, size=drive_size, available_size=drive_size) - calc_size = ApplyNodeStorage.calculate_bytes( - size_str=size_str, context=drive) + calc_size = ApplyNodeStorage.calculate_bytes(size_str=size_str, + context=drive) assert calc_size == 15 * 2**30 @@ -239,8 +240,8 @@ class TestCalculateBytes(): drive_size = 20 * 2**30 drive = BlockDevice(None, size=drive_size, available_size=drive_size) - calc_size = ApplyNodeStorage.calculate_bytes( - size_str=size_str, context=drive) + calc_size = ApplyNodeStorage.calculate_bytes(size_str=size_str, + context=drive) assert calc_size == 15 * 2**30 @@ -250,8 +251,8 @@ class TestCalculateBytes(): drive_size = 20 * 2**40 drive = BlockDevice(None, size=drive_size, available_size=drive_size) - calc_size = ApplyNodeStorage.calculate_bytes( - size_str=size_str, context=drive) + calc_size = ApplyNodeStorage.calculate_bytes(size_str=size_str, + context=drive) assert calc_size == 15 * 2**40 @@ -261,8 +262,8 @@ class TestCalculateBytes(): drive_size = 20 * 2**40 drive = BlockDevice(None, size=drive_size, available_size=drive_size) - calc_size = ApplyNodeStorage.calculate_bytes( - size_str=size_str, context=drive) + calc_size = ApplyNodeStorage.calculate_bytes(size_str=size_str, + context=drive) assert calc_size == 15 * 2**40 @@ -272,8 +273,8 @@ class TestCalculateBytes(): drive_size = 20 * 2**40 drive = BlockDevice(None, size=drive_size, available_size=drive_size) - calc_size = ApplyNodeStorage.calculate_bytes( - size_str=size_str, context=drive) + calc_size = ApplyNodeStorage.calculate_bytes(size_str=size_str, + context=drive) assert calc_size == 15 * 2**40 @@ -283,8 +284,8 @@ class TestCalculateBytes(): drive_size = 20 * 2**40 drive = BlockDevice(None, size=drive_size, available_size=drive_size) - calc_size = ApplyNodeStorage.calculate_bytes( - size_str=size_str, context=drive) + calc_size = ApplyNodeStorage.calculate_bytes(size_str=size_str, + context=drive) assert calc_size == 15 * 2**40 @@ -296,8 +297,8 @@ class TestCalculateBytes(): drive = BlockDevice(None, size=drive_size, available_size=drive_size) - calc_size = ApplyNodeStorage.calculate_bytes( - size_str=size_str, context=drive) + calc_size = ApplyNodeStorage.calculate_bytes(size_str=size_str, + context=drive) assert calc_size == part_size @@ -309,8 +310,8 @@ class TestCalculateBytes(): vg = VolumeGroup(None, size=vg_size, available_size=vg_size) - calc_size = ApplyNodeStorage.calculate_bytes( - size_str=size_str, context=vg) + calc_size = ApplyNodeStorage.calculate_bytes(size_str=size_str, + context=vg) assert calc_size == lv_size @@ -333,7 +334,7 @@ class TestCalculateBytes(): vg = VolumeGroup(None, size=vg_size, available_size=vg_available) - calc_size = ApplyNodeStorage.calculate_bytes( - size_str=size_str, context=vg) + calc_size = ApplyNodeStorage.calculate_bytes(size_str=size_str, + context=vg) assert calc_size == vg_available - ApplyNodeStorage.PART_TABLE_RESERVATION diff --git a/python/tests/unit/test_maasdriver_noderesults.py b/python/tests/unit/test_maasdriver_noderesults.py index 4e6963b7..f8fdf96d 100644 --- a/python/tests/unit/test_maasdriver_noderesults.py +++ b/python/tests/unit/test_maasdriver_noderesults.py @@ -16,6 +16,7 @@ from drydock_provisioner.drivers.node.maasdriver.models.node_results import Node class TestMaasNodeResults(): + def test_get_noderesults(self, mocker): '''Test noderesults refresh call to load a list of NodeResults.''' @@ -27,25 +28,17 @@ class TestMaasNodeResults(): def json(self): resp_content = [{ - "id": - 3, - "data": - "SGVsbG8gV29ybGQh", - "result_type": - 0, - "script_result": - 0, - "resource_uri": - "/MAAS/api/2.0/commissioning-scripts/", - "updated": - "2018-07-06T14:32:20.129", + "id": 3, + "data": "SGVsbG8gV29ybGQh", + "result_type": 0, + "script_result": 0, + "resource_uri": "/MAAS/api/2.0/commissioning-scripts/", + "updated": "2018-07-06T14:32:20.129", "node": { "system_id": "r7mqnw" }, - "created": - "2018-07-06T14:37:12.632", - "name": - "hello_world" + "created": "2018-07-06T14:37:12.632", + "name": "hello_world" }] return resp_content diff --git a/python/tests/unit/test_maasdriver_vlan.py b/python/tests/unit/test_maasdriver_vlan.py index 6094efea..fb0252a6 100644 --- a/python/tests/unit/test_maasdriver_vlan.py +++ b/python/tests/unit/test_maasdriver_vlan.py @@ -19,6 +19,7 @@ from drydock_provisioner.drivers.node.maasdriver.errors import RackControllerCon class TestMaasVlan(): + def test_add_rack_controller(self, mocker): '''Test vlan model method for setting a managing rack controller.''' diff --git a/python/tests/unit/test_models.py b/python/tests/unit/test_models.py index 33200f91..6858b555 100644 --- a/python/tests/unit/test_models.py +++ b/python/tests/unit/test_models.py @@ -17,6 +17,7 @@ from drydock_provisioner.objects import fields class TestClass(object): + def test_hardwareprofile(self): objects.register_all() @@ -45,10 +46,8 @@ class TestClass(object): { 'versioned_object.namespace': 'drydock_provisioner.objects', - 'versioned_object.name': - 'HardwareDeviceAlias', - 'versioned_object.version': - '1.0', + 'versioned_object.name': 'HardwareDeviceAlias', + 'versioned_object.version': '1.0', 'versioned_object.data': { 'alias': 'nic', @@ -65,10 +64,8 @@ class TestClass(object): { 'versioned_object.namespace': 'drydock_provisioner.objects', - 'versioned_object.name': - 'HardwareDeviceAlias', - 'versioned_object.version': - '1.0', + 'versioned_object.name': 'HardwareDeviceAlias', + 'versioned_object.version': '1.0', 'versioned_object.data': { 'alias': 'bootdisk', 'source': fields.ModelSource.Designed, diff --git a/python/tests/unit/test_node_logicalnames.py b/python/tests/unit/test_node_logicalnames.py index 77c412ca..64c66d04 100644 --- a/python/tests/unit/test_node_logicalnames.py +++ b/python/tests/unit/test_node_logicalnames.py @@ -18,6 +18,7 @@ import drydock_provisioner.objects as objects class TestClass(object): + def test_apply_logicalnames_else(self, input_files, deckhand_orchestrator, drydock_state, mock_get_build_data): """Test node apply_logicalnames hits the else block""" @@ -129,12 +130,11 @@ class TestClass(object): xml_example = xml_example.replace('\n', '') def side_effect(**kwargs): - build_data = objects.BuildData( - node_name="controller01", - task_id="tid", - generator="lshw", - data_format="text/plain", - data_element=xml_example) + build_data = objects.BuildData(node_name="controller01", + task_id="tid", + generator="lshw", + data_format="text/plain", + data_element=xml_example) return [build_data] drydock_state.get_build_data = Mock(side_effect=side_effect) @@ -162,7 +162,8 @@ class TestClass(object): assert nodes[0].get_logicalname('prim_nic02') == 'prim_nic02' def test_apply_logicalnames_nic_autodetect_success(self, input_files, - deckhand_orchestrator, drydock_state, + deckhand_orchestrator, + drydock_state, mock_get_build_data): """Test node apply_logicalnames to get the proper dictionary""" input_file = input_files.join("deckhand_fullsite_nic_autodetect.yaml") @@ -265,12 +266,11 @@ class TestClass(object): xml_example = xml_example.replace('\n', '') def side_effect(**kwargs): - build_data = objects.BuildData( - node_name="controller01", - task_id="tid", - generator="lshw", - data_format="text/plain", - data_element=xml_example) + build_data = objects.BuildData(node_name="controller01", + task_id="tid", + generator="lshw", + data_format="text/plain", + data_element=xml_example) return [build_data] drydock_state.get_build_data = Mock(side_effect=side_effect) diff --git a/python/tests/unit/test_node_naming.py b/python/tests/unit/test_node_naming.py index 8f0a0a35..8e324923 100644 --- a/python/tests/unit/test_node_naming.py +++ b/python/tests/unit/test_node_naming.py @@ -17,6 +17,7 @@ import drydock_provisioner.objects as objects class TestNodeNaming(object): + def test_node_fqdn(self, deckhand_orchestrator, input_files, setup): """Test fqdn rendering.""" input_file = input_files.join("deckhand_fullsite.yaml") diff --git a/python/tests/unit/test_orch_node_filter.py b/python/tests/unit/test_orch_node_filter.py index 7cc236a3..f0371b76 100644 --- a/python/tests/unit/test_orch_node_filter.py +++ b/python/tests/unit/test_orch_node_filter.py @@ -18,6 +18,7 @@ import drydock_provisioner.objects as objects class TestClass(object): + def test_node_filter_obj(self, input_files, setup, deckhand_orchestrator, deckhand_ingester): input_file = input_files.join("deckhand_fullsite.yaml") @@ -31,8 +32,8 @@ class TestClass(object): nf = objects.NodeFilter() nf.filter_type = 'intersection' nf.node_names = ['compute01'] - nfs = objects.NodeFilterSet( - filter_set_type='intersection', filter_set=[nf]) + nfs = objects.NodeFilterSet(filter_set_type='intersection', + filter_set=[nf]) node_list = deckhand_orchestrator.process_node_filter(nfs, design_data) @@ -63,8 +64,8 @@ class TestClass(object): assert len(node_list) == 1 - def test_node_filter_by_rackname(self, input_files, setup, deckhand_orchestrator, - deckhand_ingester): + def test_node_filter_by_rackname(self, input_files, setup, + deckhand_orchestrator, deckhand_ingester): input_file = input_files.join("deckhand_fullsite.yaml") design_state = DrydockState() @@ -88,13 +89,14 @@ class TestClass(object): assert len(node_list) == 2 - def test_node_filter_by_nodetag(self, input_files, setup, deckhand_orchestrator, - deckhand_ingester): + def test_node_filter_by_nodetag(self, input_files, setup, + deckhand_orchestrator, deckhand_ingester): input_file = input_files.join("deckhand_fullsite.yaml") design_ref = "file://%s" % str(input_file) - design_status, design_data = deckhand_orchestrator.get_effective_site(design_ref) + design_status, design_data = deckhand_orchestrator.get_effective_site( + design_ref) nfs = { 'filter_set_type': @@ -111,7 +113,8 @@ class TestClass(object): assert len(node_list) == 3 - def test_node_filter_by_nodelabel(self, input_files, setup, deckhand_orchestrator, + def test_node_filter_by_nodelabel(self, input_files, setup, + deckhand_orchestrator, deckhand_ingester): input_file = input_files.join("deckhand_fullsite.yaml") @@ -127,7 +130,9 @@ class TestClass(object): 'filter_set': [ { 'filter_type': 'intersection', - 'node_labels': {'foo': 'baz'}, + 'node_labels': { + 'foo': 'baz' + }, }, ], } diff --git a/python/tests/unit/test_param_reference.py b/python/tests/unit/test_param_reference.py index 933cc272..78176915 100644 --- a/python/tests/unit/test_param_reference.py +++ b/python/tests/unit/test_param_reference.py @@ -21,6 +21,7 @@ LOG = logging.getLogger(__name__) class TestKernelParameterReferences(object): + def test_valid_param_reference(self, deckhand_ingester, input_files, setup): input_file = input_files.join("deckhand_fullsite.yaml") @@ -28,8 +29,8 @@ class TestKernelParameterReferences(object): design_state = DrydockState() design_ref = "file://%s" % str(input_file) - orchestrator = Orchestrator( - state_manager=design_state, ingester=deckhand_ingester) + orchestrator = Orchestrator(state_manager=design_state, + ingester=deckhand_ingester) design_status, design_data = orchestrator.get_effective_site( design_ref) diff --git a/python/tests/unit/test_policy_engine.py b/python/tests/unit/test_policy_engine.py index 96cdd336..f1e1e10f 100644 --- a/python/tests/unit/test_policy_engine.py +++ b/python/tests/unit/test_policy_engine.py @@ -17,6 +17,7 @@ from drydock_provisioner.control.base import DrydockRequestContext class TestDefaultRules(): + def test_register_policy(self, mocker): ''' DrydockPolicy.register_policy() should correctly register all default policy rules diff --git a/python/tests/unit/test_reference_resolver.py b/python/tests/unit/test_reference_resolver.py index c9803cde..2f9584ed 100644 --- a/python/tests/unit/test_reference_resolver.py +++ b/python/tests/unit/test_reference_resolver.py @@ -21,6 +21,7 @@ from drydock_provisioner.statemgmt.design.resolver import ReferenceResolver class TestClass(object): + def test_resolve_file_url(self, input_files): """Test that the resolver will resolve file URLs.""" input_file = input_files.join("fullsite.yaml") diff --git a/python/tests/unit/test_render_routedomain.py b/python/tests/unit/test_render_routedomain.py index 68fe46bb..4d63d44a 100644 --- a/python/tests/unit/test_render_routedomain.py +++ b/python/tests/unit/test_render_routedomain.py @@ -19,6 +19,7 @@ from drydock_provisioner.orchestrator.orchestrator import Orchestrator class TestRouteDomains(object): + def test_routedomain_render(self, input_files, setup): input_file = input_files.join("deckhand_routedomain.yaml") @@ -29,8 +30,8 @@ class TestRouteDomains(object): ingester.enable_plugin( 'drydock_provisioner.ingester.plugins.deckhand.DeckhandIngester') - orchestrator = Orchestrator( - state_manager=design_state, ingester=ingester) + orchestrator = Orchestrator(state_manager=design_state, + ingester=ingester) design_status, design_data = orchestrator.get_effective_site( design_ref) @@ -60,8 +61,8 @@ class TestRouteDomains(object): ingester.enable_plugin( 'drydock_provisioner.ingester.plugins.deckhand.DeckhandIngester') - orchestrator = Orchestrator( - state_manager=design_state, ingester=ingester) + orchestrator = Orchestrator(state_manager=design_state, + ingester=ingester) design_status, design_data = orchestrator.get_effective_site( design_ref) diff --git a/python/tests/unit/test_schema_validation.py b/python/tests/unit/test_schema_validation.py index 2959a728..8a608d21 100644 --- a/python/tests/unit/test_schema_validation.py +++ b/python/tests/unit/test_schema_validation.py @@ -10,6 +10,7 @@ from jsonschema.exceptions import ValidationError class BaseSchemaValidationTest(object): + def _test_validate(self, schema, expect_failure, input_files, input): """validates input yaml against schema. @@ -35,6 +36,7 @@ class BaseSchemaValidationTest(object): class TestValidation(BaseSchemaValidationTest): + def test_validate_baremetalNode(self, input_files): self._test_validate('baremetalNode.yaml', False, input_files, "baremetalNode.yaml") diff --git a/python/tests/unit/test_task_link.py b/python/tests/unit/test_task_link.py index 2263e858..90c48c2c 100644 --- a/python/tests/unit/test_task_link.py +++ b/python/tests/unit/test_task_link.py @@ -16,6 +16,7 @@ from drydock_provisioner.objects import TaskStatus class TestTaskStatusLinks(): + def test_links_add(self): '''Add a link to a task status.''' ts = TaskStatus() diff --git a/python/tests/unit/test_task_node_filter.py b/python/tests/unit/test_task_node_filter.py index 7d8f666d..5cfcbf68 100644 --- a/python/tests/unit/test_task_node_filter.py +++ b/python/tests/unit/test_task_node_filter.py @@ -18,11 +18,11 @@ import drydock_provisioner.objects.fields as hd_fields class TestTaskFilterGeneration(object): + def test_task_success_focus(self, setup): """Test that marking a task successful works correctly.""" - task = objects.Task( - action=hd_fields.OrchestratorAction.Noop, - design_ref="http://foo.com") + task = objects.Task(action=hd_fields.OrchestratorAction.Noop, + design_ref="http://foo.com") task.success(focus='foo') @@ -31,9 +31,8 @@ class TestTaskFilterGeneration(object): def test_task_failure_focus(self, setup): """Test that marking a task failed works correctly.""" - task = objects.Task( - action=hd_fields.OrchestratorAction.Noop, - design_ref="http://foo.com") + task = objects.Task(action=hd_fields.OrchestratorAction.Noop, + design_ref="http://foo.com") task.failure(focus='foo') @@ -42,9 +41,8 @@ class TestTaskFilterGeneration(object): def test_task_success_nf(self, setup): """Test that a task can generate a node filter based on its success.""" - task = objects.Task( - action=hd_fields.OrchestratorAction.Noop, - design_ref="http://foo.com") + task = objects.Task(action=hd_fields.OrchestratorAction.Noop, + design_ref="http://foo.com") expected_nf = { 'filter_set_type': 'intersection', @@ -62,9 +60,8 @@ class TestTaskFilterGeneration(object): def test_task_failure_nf(self, setup): """Test that a task can generate a node filter based on its failure.""" - task = objects.Task( - action=hd_fields.OrchestratorAction.Noop, - design_ref="http://foo.com") + task = objects.Task(action=hd_fields.OrchestratorAction.Noop, + design_ref="http://foo.com") expected_nf = { 'filter_set_type': 'intersection', diff --git a/python/tests/unit/test_validate_design.py b/python/tests/unit/test_validate_design.py index c57b1e11..b83159b5 100644 --- a/python/tests/unit/test_validate_design.py +++ b/python/tests/unit/test_validate_design.py @@ -18,6 +18,7 @@ from drydock_provisioner.orchestrator.validations.validator import Validator class TestDesignValidator(object): + def test_validate_design(self, deckhand_ingester, drydock_state, input_files, mock_get_build_data): """Test the basic validation engine.""" @@ -25,8 +26,8 @@ class TestDesignValidator(object): input_file = input_files.join("deckhand_fullsite.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator( - state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator(state_manager=drydock_state, + ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) @@ -42,8 +43,8 @@ class TestDesignValidator(object): input_file = input_files.join("deckhand_fullsite_no_nodes.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator( - state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator(state_manager=drydock_state, + ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) diff --git a/python/tests/unit/test_validation_rule_boot_storage.py b/python/tests/unit/test_validation_rule_boot_storage.py index 4da738d3..d17c8088 100644 --- a/python/tests/unit/test_validation_rule_boot_storage.py +++ b/python/tests/unit/test_validation_rule_boot_storage.py @@ -23,13 +23,14 @@ LOG = logging.getLogger(__name__) class TestRationalBootStorage(object): + def test_boot_storage_rational(self, deckhand_ingester, drydock_state, input_files, mock_get_build_data): input_file = input_files.join("validation.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator( - state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator(state_manager=drydock_state, + ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) @@ -45,8 +46,8 @@ class TestRationalBootStorage(object): input_file = input_files.join("invalid_boot_storage_small.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator( - state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator(state_manager=drydock_state, + ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) @@ -69,8 +70,8 @@ class TestRationalBootStorage(object): input_file = input_files.join("invalid_validation.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator( - state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator(state_manager=drydock_state, + ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) diff --git a/python/tests/unit/test_validation_rule_bootactions.py b/python/tests/unit/test_validation_rule_bootactions.py index 9da5939c..9fabce67 100644 --- a/python/tests/unit/test_validation_rule_bootactions.py +++ b/python/tests/unit/test_validation_rule_bootactions.py @@ -24,13 +24,14 @@ LOG = logging.getLogger(__name__) class TestBootactionsValidity(object): + def test_valid_bootaction(self, deckhand_ingester, drydock_state, setup, input_files, mock_get_build_data): input_file = input_files.join("validation.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator( - state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator(state_manager=drydock_state, + ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) @@ -51,8 +52,8 @@ class TestBootactionsValidity(object): input_file = input_files.join("absent_bootaction.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator( - state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator(state_manager=drydock_state, + ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) @@ -70,8 +71,8 @@ class TestBootactionsValidity(object): input_file = input_files.join("invalid_bootaction_pkg.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator( - state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator(state_manager=drydock_state, + ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) diff --git a/python/tests/unit/test_validation_rule_hostname_validity.py b/python/tests/unit/test_validation_rule_hostname_validity.py index f05be8cc..fcd95642 100644 --- a/python/tests/unit/test_validation_rule_hostname_validity.py +++ b/python/tests/unit/test_validation_rule_hostname_validity.py @@ -22,13 +22,14 @@ LOG = logging.getLogger(__name__) class TestHostnameValidity(object): + def test_hostname(self, mocker, deckhand_ingester, drydock_state, input_files): input_file = input_files.join("validation.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator( - state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator(state_manager=drydock_state, + ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) @@ -46,8 +47,8 @@ class TestHostnameValidity(object): input_file = input_files.join("invalid_validation.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator( - state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator(state_manager=drydock_state, + ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) diff --git a/python/tests/unit/test_validation_rule_hugepages.py b/python/tests/unit/test_validation_rule_hugepages.py index e8c01dc6..9d8ed722 100644 --- a/python/tests/unit/test_validation_rule_hugepages.py +++ b/python/tests/unit/test_validation_rule_hugepages.py @@ -11,15 +11,15 @@ LOG = logging.getLogger(__name__) class TestValidPlatform(object): + def test_valid_platform(self, deckhand_ingester, drydock_state, input_files): input_file = input_files.join("validation.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator( - state_manager=drydock_state, - ingester=deckhand_ingester, - enabled_drivers=config.config_mgr.conf.plugins) + orch = Orchestrator(state_manager=drydock_state, + ingester=deckhand_ingester, + enabled_drivers=config.config_mgr.conf.plugins) status, site_design = Orchestrator.get_effective_site(orch, design_ref) @@ -41,10 +41,9 @@ class TestValidPlatform(object): input_file = input_files.join("invalid_hugepages.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator( - state_manager=drydock_state, - ingester=deckhand_ingester, - enabled_drivers=config.config_mgr.conf.plugins) + orch = Orchestrator(state_manager=drydock_state, + ingester=deckhand_ingester, + enabled_drivers=config.config_mgr.conf.plugins) status, site_design = Orchestrator.get_effective_site(orch, design_ref) diff --git a/python/tests/unit/test_validation_rule_ip_locality.py b/python/tests/unit/test_validation_rule_ip_locality.py index ad02d646..d933b323 100644 --- a/python/tests/unit/test_validation_rule_ip_locality.py +++ b/python/tests/unit/test_validation_rule_ip_locality.py @@ -22,12 +22,13 @@ LOG = logging.getLogger(__name__) class TestIPLocality(object): + def test_ip_locality(self, input_files, drydock_state, deckhand_ingester): input_file = input_files.join("validation.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator( - state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator(state_manager=drydock_state, + ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) @@ -42,8 +43,8 @@ class TestIPLocality(object): input_file = input_files.join("ip_locality_no_networks.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator( - state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator(state_manager=drydock_state, + ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) @@ -59,8 +60,8 @@ class TestIPLocality(object): input_file = input_files.join("ip_locality_no_gateway.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator( - state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator(state_manager=drydock_state, + ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) @@ -76,8 +77,8 @@ class TestIPLocality(object): input_file = input_files.join("no_baremetal_node.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator( - state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator(state_manager=drydock_state, + ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) @@ -88,13 +89,14 @@ class TestIPLocality(object): assert 'No baremetal_nodes found' in msg.get('message') assert msg.get('error') is False - def test_invalid_ip_locality_invalid_network( - self, input_files, drydock_state, deckhand_ingester): + def test_invalid_ip_locality_invalid_network(self, input_files, + drydock_state, + deckhand_ingester): input_file = input_files.join("invalid_validation.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator( - state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator(state_manager=drydock_state, + ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) diff --git a/python/tests/unit/test_validation_rule_mtu_rational.py b/python/tests/unit/test_validation_rule_mtu_rational.py index a1b84159..5a89fb41 100644 --- a/python/tests/unit/test_validation_rule_mtu_rational.py +++ b/python/tests/unit/test_validation_rule_mtu_rational.py @@ -23,12 +23,13 @@ LOG = logging.getLogger(__name__) class TestMtu(object): + def test_mtu(self, mocker, deckhand_ingester, drydock_state, input_files): input_file = input_files.join("validation.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator( - state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator(state_manager=drydock_state, + ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) @@ -46,8 +47,8 @@ class TestMtu(object): input_file = input_files.join("invalid_validation.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator( - state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator(state_manager=drydock_state, + ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) diff --git a/python/tests/unit/test_validation_rule_network_bond.py b/python/tests/unit/test_validation_rule_network_bond.py index 32fc17b8..29e99872 100644 --- a/python/tests/unit/test_validation_rule_network_bond.py +++ b/python/tests/unit/test_validation_rule_network_bond.py @@ -23,13 +23,14 @@ LOG = logging.getLogger(__name__) class TestRationalNetworkLinkBond(object): + def test_rational_network_bond(self, mocker, deckhand_ingester, drydock_state, input_files): input_file = input_files.join("validation.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator( - state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator(state_manager=drydock_state, + ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) @@ -47,8 +48,8 @@ class TestRationalNetworkLinkBond(object): design_ref = "file://%s" % str(input_file) - orch = Orchestrator( - state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator(state_manager=drydock_state, + ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) diff --git a/python/tests/unit/test_validation_rule_network_cidr.py b/python/tests/unit/test_validation_rule_network_cidr.py index 6be2e7f2..c7c069c2 100644 --- a/python/tests/unit/test_validation_rule_network_cidr.py +++ b/python/tests/unit/test_validation_rule_network_cidr.py @@ -23,14 +23,15 @@ LOG = logging.getLogger(__name__) class TestNetworkCidr(object): + def test_valid_network_cidr(self, mocker, deckhand_ingester, drydock_state, input_files): input_file = input_files.join("validation.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator( - state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator(state_manager=drydock_state, + ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) @@ -47,8 +48,8 @@ class TestNetworkCidr(object): input_file = input_files.join("invalid_network_cidr.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator( - state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator(state_manager=drydock_state, + ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) @@ -64,4 +65,4 @@ class TestNetworkCidr(object): assert any([ regex_diagnostic.search(msg.get('diagnostic')), regex_message.search(msg.get('message')) - ]) + ]) diff --git a/python/tests/unit/test_validation_rule_network_trunking.py b/python/tests/unit/test_validation_rule_network_trunking.py index 4e75bd13..be3ca25e 100644 --- a/python/tests/unit/test_validation_rule_network_trunking.py +++ b/python/tests/unit/test_validation_rule_network_trunking.py @@ -23,13 +23,14 @@ LOG = logging.getLogger(__name__) class TestRationalNetworkTrunking(object): + def test_rational_network_trunking(self, deckhand_ingester, drydock_state, input_files): input_file = input_files.join("validation.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator( - state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator(state_manager=drydock_state, + ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) @@ -44,8 +45,8 @@ class TestRationalNetworkTrunking(object): input_file = input_files.join("invalid_rational_network_trunking.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator( - state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator(state_manager=drydock_state, + ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) diff --git a/python/tests/unit/test_validation_rule_no_duplicate_IPs.py b/python/tests/unit/test_validation_rule_no_duplicate_IPs.py index ba028d9b..4980602b 100644 --- a/python/tests/unit/test_validation_rule_no_duplicate_IPs.py +++ b/python/tests/unit/test_validation_rule_no_duplicate_IPs.py @@ -22,13 +22,14 @@ LOG = logging.getLogger(__name__) class TestDuplicateIPs(object): + def test_no_duplicate_IPs(self, input_files, drydock_state, deckhand_ingester): input_file = input_files.join("validation.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator( - state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator(state_manager=drydock_state, + ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) @@ -38,13 +39,14 @@ class TestDuplicateIPs(object): assert msg.get('error') is False - def test_no_duplicate_IPs_no_baremetal_node( - self, input_files, drydock_state, deckhand_ingester): + def test_no_duplicate_IPs_no_baremetal_node(self, input_files, + drydock_state, + deckhand_ingester): input_file = input_files.join("no_baremetal_node.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator( - state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator(state_manager=drydock_state, + ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) @@ -60,8 +62,8 @@ class TestDuplicateIPs(object): input_file = input_files.join("no_duplicate_IPs_no_addressing.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator( - state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator(state_manager=drydock_state, + ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) @@ -77,8 +79,8 @@ class TestDuplicateIPs(object): input_file = input_files.join("invalid_validation.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator( - state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator(state_manager=drydock_state, + ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) diff --git a/python/tests/unit/test_validation_rule_storage_mountpoint.py b/python/tests/unit/test_validation_rule_storage_mountpoint.py index 67fba1ba..10deb763 100644 --- a/python/tests/unit/test_validation_rule_storage_mountpoint.py +++ b/python/tests/unit/test_validation_rule_storage_mountpoint.py @@ -22,15 +22,17 @@ from drydock_provisioner.orchestrator.validations.\ LOG = logging.getLogger(__name__) + class TestStorageMountpoints(object): + def test_storage_mountpoints(self, deckhand_ingester, drydock_state, input_files): input_file = input_files.join("validation.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator( - state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator(state_manager=drydock_state, + ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) validator = StorageMountpoints() @@ -47,8 +49,8 @@ class TestStorageMountpoints(object): input_file = input_files.join("invalid_validation.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator( - state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator(state_manager=drydock_state, + ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) @@ -69,8 +71,8 @@ class TestStorageMountpoints(object): input_file = input_files.join("invalid_mountpoint.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator( - state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator(state_manager=drydock_state, + ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) @@ -92,8 +94,8 @@ class TestStorageMountpoints(object): input_file = input_files.join("partitions_without_mountpoints.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator( - state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator(state_manager=drydock_state, + ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) validator = StorageMountpoints() diff --git a/python/tests/unit/test_validation_rule_storage_partitioning.py b/python/tests/unit/test_validation_rule_storage_partitioning.py index 642c65fa..ca26b9cf 100644 --- a/python/tests/unit/test_validation_rule_storage_partitioning.py +++ b/python/tests/unit/test_validation_rule_storage_partitioning.py @@ -23,13 +23,14 @@ LOG = logging.getLogger(__name__) class TestStoragePartitioning(object): + def test_storage_partitioning(self, deckhand_ingester, drydock_state, input_files): input_file = input_files.join("validation.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator( - state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator(state_manager=drydock_state, + ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) @@ -40,14 +41,15 @@ class TestStoragePartitioning(object): assert len(message_list) == 1 assert msg.get('error') is False - def test_storage_partitioning_unassigned_partition( - self, deckhand_ingester, drydock_state, input_files): + def test_storage_partitioning_unassigned_partition(self, deckhand_ingester, + drydock_state, + input_files): input_file = input_files.join( "storage_partitioning_unassigned_partition.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator( - state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator(state_manager=drydock_state, + ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) @@ -65,8 +67,8 @@ class TestStoragePartitioning(object): design_ref = "file://%s" % str(input_file) - orch = Orchestrator( - state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator(state_manager=drydock_state, + ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) diff --git a/python/tests/unit/test_validation_rule_storage_sizing.py b/python/tests/unit/test_validation_rule_storage_sizing.py index f3aafaf5..a11a2ce0 100644 --- a/python/tests/unit/test_validation_rule_storage_sizing.py +++ b/python/tests/unit/test_validation_rule_storage_sizing.py @@ -23,14 +23,15 @@ LOG = logging.getLogger(__name__) class TestStorageSizing(object): + def test_storage_sizing(self, deckhand_ingester, drydock_state, input_files): input_file = input_files.join("storage_sizing.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator( - state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator(state_manager=drydock_state, + ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) @@ -47,8 +48,8 @@ class TestStorageSizing(object): input_file = input_files.join("invalid_validation.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator( - state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator(state_manager=drydock_state, + ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) diff --git a/python/tests/unit/test_validation_rule_unique_network.py b/python/tests/unit/test_validation_rule_unique_network.py index 26ea0dfa..04d5c28f 100644 --- a/python/tests/unit/test_validation_rule_unique_network.py +++ b/python/tests/unit/test_validation_rule_unique_network.py @@ -23,14 +23,15 @@ LOG = logging.getLogger(__name__) class TestUniqueNetwork(object): + def test_unique_network(self, mocker, deckhand_ingester, drydock_state, input_files): input_file = input_files.join("validation.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator( - state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator(state_manager=drydock_state, + ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) @@ -47,8 +48,8 @@ class TestUniqueNetwork(object): input_file = input_files.join("invalid_unique_network.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator( - state_manager=drydock_state, ingester=deckhand_ingester) + orch = Orchestrator(state_manager=drydock_state, + ingester=deckhand_ingester) status, site_design = Orchestrator.get_effective_site(orch, design_ref) diff --git a/python/tests/unit/test_validation_rule_valid_platform.py b/python/tests/unit/test_validation_rule_valid_platform.py index 18676f92..051a2cac 100644 --- a/python/tests/unit/test_validation_rule_valid_platform.py +++ b/python/tests/unit/test_validation_rule_valid_platform.py @@ -24,6 +24,7 @@ LOG = logging.getLogger(__name__) class TestValidPlatform(object): + def test_valid_platform(self, mocker, deckhand_ingester, drydock_state, input_files, mock_get_build_data): mock_images = mocker.patch( @@ -38,10 +39,9 @@ class TestValidPlatform(object): input_file = input_files.join("validation.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator( - state_manager=drydock_state, - ingester=deckhand_ingester, - enabled_drivers=config.config_mgr.conf.plugins) + orch = Orchestrator(state_manager=drydock_state, + ingester=deckhand_ingester, + enabled_drivers=config.config_mgr.conf.plugins) status, site_design = Orchestrator.get_effective_site(orch, design_ref) @@ -70,10 +70,9 @@ class TestValidPlatform(object): input_file = input_files.join("invalid_kernel.yaml") design_ref = "file://%s" % str(input_file) - orch = Orchestrator( - state_manager=drydock_state, - ingester=deckhand_ingester, - enabled_drivers=config.config_mgr.conf.plugins) + orch = Orchestrator(state_manager=drydock_state, + ingester=deckhand_ingester, + enabled_drivers=config.config_mgr.conf.plugins) status, site_design = Orchestrator.get_effective_site(orch, design_ref) diff --git a/tools/helm_install.sh b/tools/helm_install.sh index 14b08598..fa116acc 100755 --- a/tools/helm_install.sh +++ b/tools/helm_install.sh @@ -17,7 +17,7 @@ set -x HELM=$1 -HELM_ARTIFACT_URL=${HELM_ARTIFACT_URL:-"https://get.helm.sh/helm-v3.6.3-linux-amd64.tar.gz"} +HELM_ARTIFACT_URL=${HELM_ARTIFACT_URL:-"https://get.helm.sh/helm-v3.9.4-linux-amd64.tar.gz"} function install_helm_binary { diff --git a/tox.ini b/tox.ini index 1f3544e7..a0f2e3cc 100644 --- a/tox.ini +++ b/tox.ini @@ -7,7 +7,7 @@ setupdir=python/ setenv = YAMLDIR = {toxinidir}/python/tests/yaml_samples/ passenv = http_proxy,HTTP_PROXY,https_proxy,HTTPS_PROXY,no_proxy,NO_PROXY deps= - -r{toxinidir}/python/requirements-lock.txt + -r{toxinidir}/python/requirements-frozen.txt [testenv:venv] commands = {posargs} @@ -17,14 +17,12 @@ recreate = True allowlist_externals= rm sh - pipdeptree deps= - -rpython/requirements-direct.txt - -rpython/requirements-test.txt + -r{toxinidir}/python/requirements-direct.txt + -r{toxinidir}/python/test-requirements.txt commands= - rm -f python/requirements-lock.txt - sh -c "pip freeze --all | grep -vE 'drydock-provisioner|pyinotify|pkg-resources==0.0.0' > python/requirements-lock.txt" - sh -c "pipdeptree > python/requirements-tree.txt" + rm -f python/requirements-frozen.txt + sh -c "pip freeze --all | grep -vE 'drydock_provisioner|pyinotify|pkg-resources==0.0.0' > python/requirements-frozen.txt" [testenv:yapf] allowlist_externals=find @@ -46,12 +44,16 @@ commands= {toxinidir}/python/tests/unit/{posargs} [testenv:integration] -passenv=DOCKER_REGISTRY,IMAGE_NAME,IMAGE_PREFIX,IMAGE_TAG +passenv=DOCKER_REGISTRY,IMAGE_NAME,IMAGE_PREFIX,IMAGE_TAG,DISTRO setenv= PYTHONWARNING=all YAMLDIR={toxinidir}/python/tests/yaml_samples/ +allowlist_externals= + sh + pip3 commands= - {toxinidir}/python/tests/postgres/start_postgres.sh + pip3 install ./python/ --use-pep517 + sh -c {toxinidir}/python/tests/postgres/start_postgres.sh py.test \ {toxinidir}/python/tests/integration/postgres/{posargs} @@ -60,8 +62,10 @@ usedevelop=True passenv=DOCKER_REGISTRY,IMAGE_NAME,IMAGE_PREFIX,IMAGE_TAG setenv= YAMLDIR={toxinidir}/python/tests/yaml_samples/ +allowlist_externals= + sh commands= - {toxinidir}/python/tests/postgres/start_postgres.sh + sh -c {toxinidir}/python/tests/postgres/start_postgres.sh py.test --cov=drydock_provisioner \ {toxinidir}/python/tests/unit/ {toxinidir}/python/tests/integration/postgres