From 6ca7aa4bffe60986cd2d708fa045c126f12dec13 Mon Sep 17 00:00:00 2001 From: Scott Hussey Date: Wed, 26 Sep 2018 08:57:51 -0500 Subject: [PATCH] Catchup YAPF formatting Change-Id: Ic54f77b4b0bdd9199fbc10dfdfc43d3af8f0bfd1 --- docs/source/_static/drydock.conf.sample | 6 +- etc/drydock/drydock.conf.sample | 9 ++ etc/drydock/policy.yaml.sample | 4 + .../drydock_provisioner/cli/part/commands.py | 4 +- .../drydock_provisioner/cli/task/commands.py | 4 +- .../promenade_driver/actions/k8s_node.py | 3 +- .../kubernetes/promenade_driver/driver.py | 10 +- .../promenade_driver/promenade_client.py | 22 ++- .../drivers/node/maasdriver/actions/node.py | 55 ++++---- .../drivers/node/maasdriver/models/machine.py | 4 +- .../drydock_provisioner/objects/bootaction.py | 3 +- python/drydock_provisioner/objects/fields.py | 12 +- python/drydock_provisioner/objects/node.py | 1 + python/drydock_provisioner/objects/task.py | 6 +- .../orchestrator/actions/orchestrator.py | 10 +- .../orchestrator/orchestrator.py | 5 +- .../validations/network_trunking_rational.py | 10 +- python/drydock_provisioner/policy.py | 27 ++-- python/drydock_provisioner/statemgmt/state.py | 9 +- .../postgres/test_api_bootaction.py | 9 +- .../postgres/test_postgres_builddata.py | 5 +- .../unit/test_k8sdriver_promenade_client.py | 126 ++++++++---------- 22 files changed, 172 insertions(+), 172 deletions(-) diff --git a/docs/source/_static/drydock.conf.sample b/docs/source/_static/drydock.conf.sample index d0fc93d7..b0c17d54 100644 --- a/docs/source/_static/drydock.conf.sample +++ b/docs/source/_static/drydock.conf.sample @@ -404,12 +404,12 @@ # Timeout in minutes for deploying a node (integer value) #deploy_node = 45 -# Timeout in minutes for relabeling a node (integer value) -#relabel_node = 5 - # Timeout in minutes between deployment completion and the all boot actions # reporting status (integer value) #bootaction_final_status = 15 # Timeout in minutes for releasing a node (integer value) #destroy_node = 30 + +# Timeout in minutes for relabeling a node (integer value) +#relabel_node = 5 diff --git a/etc/drydock/drydock.conf.sample b/etc/drydock/drydock.conf.sample index 64f6fb84..b0c17d54 100644 --- a/etc/drydock/drydock.conf.sample +++ b/etc/drydock/drydock.conf.sample @@ -276,6 +276,9 @@ # Logger name for Node driver logging (string value) #nodedriver_logger_name = ${global_logger_name}.nodedriver +# Logger name for Kubernetes driver logging (string value) +#kubernetesdriver_logger_name = ${global_logger_name}.kubernetesdriver + # Logger name for API server logging (string value) #control_logger_name = ${global_logger_name}.control @@ -350,6 +353,9 @@ # Module path string of the Node driver to enable (string value) #node_driver = drydock_provisioner.drivers.node.maasdriver.driver.MaasNodeDriver +# Module path string of the Kubernetes driver to enable (string value) +#kubernetes_driver = drydock_provisioner.drivers.kubernetes.promenade_driver.driver.PromenadeDriver + # Module path string of the Network driver enable (string value) #network_driver = @@ -404,3 +410,6 @@ # Timeout in minutes for releasing a node (integer value) #destroy_node = 30 + +# Timeout in minutes for relabeling a node (integer value) +#relabel_node = 5 diff --git a/etc/drydock/policy.yaml.sample b/etc/drydock/policy.yaml.sample index 65706bf5..22b23659 100644 --- a/etc/drydock/policy.yaml.sample +++ b/etc/drydock/policy.yaml.sample @@ -38,6 +38,10 @@ # POST /api/v1.0/tasks #"physical_provisioner:destroy_nodes": "role:admin" +# Create relabel_nodes task +# POST /api/v1.0/tasks +#"physical_provisioner:relabel_nodes": "role:admin" + # Read build data for a node # GET /api/v1.0/nodes/{nodename}/builddata #"physical_provisioner:read_build_data": "role:admin" diff --git a/python/drydock_provisioner/cli/part/commands.py b/python/drydock_provisioner/cli/part/commands.py index 2a90a3a4..b4abe9df 100644 --- a/python/drydock_provisioner/cli/part/commands.py +++ b/python/drydock_provisioner/cli/part/commands.py @@ -64,8 +64,8 @@ def part_list(ctx): """List parts of a design.""" click.echo( json.dumps( - PartList(ctx.obj['CLIENT'], design_id=ctx.obj['DESIGN_ID']) - .invoke())) + PartList(ctx.obj['CLIENT'], + design_id=ctx.obj['DESIGN_ID']).invoke())) @part.command(name='show') diff --git a/python/drydock_provisioner/cli/task/commands.py b/python/drydock_provisioner/cli/task/commands.py index 1e592c93..d7f207a1 100644 --- a/python/drydock_provisioner/cli/task/commands.py +++ b/python/drydock_provisioner/cli/task/commands.py @@ -79,8 +79,8 @@ def task_create(ctx, if node_names else [], rack_names=[x.strip() for x in rack_names.split(',')] if rack_names else [], - node_tags=[x.strip() for x in node_tags.split(',')] - if node_tags else [], + node_tags=[x.strip() + for x in node_tags.split(',')] if node_tags else [], block=block, poll_interval=poll_interval).invoke())) diff --git a/python/drydock_provisioner/drivers/kubernetes/promenade_driver/actions/k8s_node.py b/python/drydock_provisioner/drivers/kubernetes/promenade_driver/actions/k8s_node.py index 359a6796..79307272 100644 --- a/python/drydock_provisioner/drivers/kubernetes/promenade_driver/actions/k8s_node.py +++ b/python/drydock_provisioner/drivers/kubernetes/promenade_driver/actions/k8s_node.py @@ -58,7 +58,8 @@ class RelabelNode(PromenadeAction): for n in nodes: # Relabel node through Promenade try: - self.logger.info("Relabeling node %s with node label data." % n.name) + self.logger.info( + "Relabeling node %s with node label data." % n.name) labels_dict = n.get_node_labels() msg = "Set labels %s for node %s" % (str(labels_dict), n.name) diff --git a/python/drydock_provisioner/drivers/kubernetes/promenade_driver/driver.py b/python/drydock_provisioner/drivers/kubernetes/promenade_driver/driver.py index 876d5fac..583d97c3 100644 --- a/python/drydock_provisioner/drivers/kubernetes/promenade_driver/driver.py +++ b/python/drydock_provisioner/drivers/kubernetes/promenade_driver/driver.py @@ -35,8 +35,7 @@ class PromenadeDriver(KubernetesDriver): driver_desc = 'Promenade Kubernetes Driver' action_class_map = { - hd_fields.OrchestratorAction.RelabelNode: - RelabelNode, + hd_fields.OrchestratorAction.RelabelNode: RelabelNode, } def __init__(self, **kwargs): @@ -103,8 +102,7 @@ class PromenadeDriver(KubernetesDriver): action.start) timeout = action_timeouts.get( - task.action, - config.config_mgr.conf.timeouts.relabel_node) + task.action, config.config_mgr.conf.timeouts.relabel_node) finished, running = concurrent.futures.wait( subtask_futures.values(), timeout=(timeout * 60)) @@ -118,8 +116,8 @@ class PromenadeDriver(KubernetesDriver): task.failure() else: if f.exception(): - msg = ("Subtask %s raised unexpected exception: %s" - % (str(uuid.UUID(bytes=t)), str(f.exception()))) + msg = ("Subtask %s raised unexpected exception: %s" % + (str(uuid.UUID(bytes=t)), str(f.exception()))) self.logger.error(msg, exc_info=f.exception()) task.add_status_msg( msg=msg, diff --git a/python/drydock_provisioner/drivers/kubernetes/promenade_driver/promenade_client.py b/python/drydock_provisioner/drivers/kubernetes/promenade_driver/promenade_client.py index 386c992f..c4b6ef91 100644 --- a/python/drydock_provisioner/drivers/kubernetes/promenade_driver/promenade_client.py +++ b/python/drydock_provisioner/drivers/kubernetes/promenade_driver/promenade_client.py @@ -22,6 +22,7 @@ from keystoneauth1 import exceptions as exc import drydock_provisioner.error as errors from drydock_provisioner.util import KeystoneUtils + # TODO: Remove this local implementation of Promenade Session and client once # Promenade api client is available as part of Promenade project. class PromenadeSession(object): @@ -35,10 +36,7 @@ class PromenadeSession(object): read timeout to use """ - def __init__(self, - scheme='http', - marker=None, - timeout=None): + def __init__(self, scheme='http', marker=None, timeout=None): self.logger = logging.getLogger(__name__) self.__session = requests.Session() @@ -63,8 +61,8 @@ class PromenadeSession(object): def set_auth(self): - auth_header = self._auth_gen() - self.__session.headers.update(auth_header) + auth_header = self._auth_gen() + self.__session.headers.update(auth_header) def get(self, route, query=None, timeout=None): """ @@ -220,11 +218,10 @@ class PromenadeSession(object): try: ks_session = KeystoneUtils.get_session() except exc.AuthorizationFailure as aferr: - self.logger.error( - 'Could not authorize against Keystone: %s', - str(aferr)) - raise errors.DriverError('Could not authorize against Keystone: %s', - str(aferr)) + self.logger.error('Could not authorize against Keystone: %s', + str(aferr)) + raise errors.DriverError( + 'Could not authorize against Keystone: %s', str(aferr)) return ks_session @@ -235,8 +232,7 @@ class PromenadeSession(object): try: prom_endpoint = ks_session.get_endpoint( - interface='internal', - service_type='kubernetesprovisioner') + interface='internal', service_type='kubernetesprovisioner') except exc.EndpointNotFound: self.logger.error("Could not find an internal interface" " defined in Keystone for Promenade") diff --git a/python/drydock_provisioner/drivers/node/maasdriver/actions/node.py b/python/drydock_provisioner/drivers/node/maasdriver/actions/node.py index 6a376b0d..732aec98 100644 --- a/python/drydock_provisioner/drivers/node/maasdriver/actions/node.py +++ b/python/drydock_provisioner/drivers/node/maasdriver/actions/node.py @@ -278,7 +278,8 @@ class DestroyNode(BaseMaasAction): site_design) for n in nodes: try: - machine = machine_list.identify_baremetal_node(n, update_name=False) + machine = machine_list.identify_baremetal_node( + n, update_name=False) if machine is None: msg = "Could not locate machine for node {}".format(n.name) @@ -297,7 +298,8 @@ class DestroyNode(BaseMaasAction): try: machine.release(erase_disk=True, quick_erase=True) except errors.DriverError: - msg = "Error Releasing node {}, skipping".format(n.name) + msg = "Error Releasing node {}, skipping".format( + n.name) self.logger.warning(msg) self.task.add_status_msg( msg=msg, error=True, ctx=n.name, ctx_type='node') @@ -306,25 +308,26 @@ class DestroyNode(BaseMaasAction): # node release with erase disk will take sometime monitor it attempts = 0 - max_attempts = (config.config_mgr.conf.timeouts.destroy_node - * 60) // config.config_mgr.conf.maasdriver.poll_interval + max_attempts = ( + config.config_mgr.conf.timeouts.destroy_node * + 60) // config.config_mgr.conf.maasdriver.poll_interval - while (attempts < max_attempts - and (not machine.status_name.startswith('Ready') - and not machine.status_name.startswith( - 'Failed'))): + while (attempts < max_attempts and + (not machine.status_name.startswith('Ready') + and not machine.status_name.startswith('Failed'))): attempts = attempts + 1 time.sleep( config.config_mgr.conf.maasdriver.poll_interval) try: machine.refresh() self.logger.debug( - "Polling node {} status attempt {:d} of {:d}: {}".format( - n.name, attempts, max_attempts, - machine.status_name)) + "Polling node {} status attempt {:d} of {:d}: {}" + .format(n.name, attempts, max_attempts, + machine.status_name)) except Exception: self.logger.warning( - "Error updating node {} status during release node, will re-attempt.".format(n.name)) + "Error updating node {} status during release node, will re-attempt." + .format(n.name)) if machine.status_name.startswith('Ready'): msg = "Node {} released and disk erased.".format( n.name) @@ -354,8 +357,8 @@ class DestroyNode(BaseMaasAction): try: if n.oob_type == 'libvirt': self.logger.info( - 'Resetting MaaS virsh power parameters for node {}.'.format( - n.name)) + 'Resetting MaaS virsh power parameters for node {}.' + .format(n.name)) # setting power type attibutes to empty string # will remove them from maas BMC table machine.reset_power_parameters() @@ -363,8 +366,8 @@ class DestroyNode(BaseMaasAction): pass machine.delete() - msg = "Deleted Node: {} in status: {}.".format(n.name, - machine.status_name) + msg = "Deleted Node: {} in status: {}.".format( + n.name, machine.status_name) self.logger.info(msg) self.task.add_status_msg( msg=msg, error=False, ctx=n.name, ctx_type='node') @@ -1147,16 +1150,17 @@ class ConfigureHardware(BaseMaasAction): # Poll machine status attempts = 0 - max_attempts = (config.config_mgr.conf.timeouts.configure_hardware - * 60) // config.config_mgr.conf.maasdriver.poll_interval + max_attempts = ( + config.config_mgr.conf.timeouts.configure_hardware + * 60 + ) // config.config_mgr.conf.maasdriver.poll_interval while (attempts < max_attempts and (machine.status_name != 'Ready' and not machine.status_name.startswith('Failed'))): attempts = attempts + 1 - time.sleep( - config.config_mgr.conf.maasdriver.poll_interval - ) + time.sleep(config.config_mgr.conf.maasdriver. + poll_interval) try: machine.refresh() self.logger.debug( @@ -1226,7 +1230,9 @@ class ConfigureHardware(BaseMaasAction): except Exception as ex: msg = "Error commissioning node %s: %s" % (n.name, str(ex)) self.logger.warning(msg) - self.logger.debug("Unhandled exception attempting to commission node.", exc_info=ex) + self.logger.debug( + "Unhandled exception attempting to commission node.", + exc_info=ex) self.task.add_status_msg( msg=msg, error=True, ctx=n.name, ctx_type='node') self.task.failure(focus=n.get_id()) @@ -2312,8 +2318,9 @@ class DeployNode(BaseMaasAction): continue attempts = 0 - max_attempts = (config.config_mgr.conf.timeouts.deploy_node - * 60) // config.config_mgr.conf.maasdriver.poll_interval + max_attempts = ( + config.config_mgr.conf.timeouts.deploy_node * + 60) // config.config_mgr.conf.maasdriver.poll_interval while (attempts < max_attempts and (not machine.status_name.startswith('Deployed') diff --git a/python/drydock_provisioner/drivers/node/maasdriver/models/machine.py b/python/drydock_provisioner/drivers/node/maasdriver/models/machine.py index ff6b5a5f..4add2899 100644 --- a/python/drydock_provisioner/drivers/node/maasdriver/models/machine.py +++ b/python/drydock_provisioner/drivers/node/maasdriver/models/machine.py @@ -423,8 +423,8 @@ class Machine(model_base.ResourceBase): return True raise errors.DriverError( - "Failed updating power parameters MAAS url {} - return code {}\n{}".format( - url, resp.status_code.resp.text)) + "Failed updating power parameters MAAS url {} - return code {}\n{}" + .format(url, resp.status_code.resp.text)) def to_dict(self): """Serialize this resource instance into a dict. diff --git a/python/drydock_provisioner/objects/bootaction.py b/python/drydock_provisioner/objects/bootaction.py index b55a962e..b04326b4 100644 --- a/python/drydock_provisioner/objects/bootaction.py +++ b/python/drydock_provisioner/objects/bootaction.py @@ -87,7 +87,8 @@ class BootAction(base.DrydockPersistentObject, base.DrydockObject): for a in self.asset_list: if type_filter is None or (type_filter is not None and a.type == type_filter): - a.render(nodename, site_design, action_id, action_key, design_ref) + a.render(nodename, site_design, action_id, action_key, + design_ref) assets.append(a) return assets diff --git a/python/drydock_provisioner/objects/fields.py b/python/drydock_provisioner/objects/fields.py index 71c6d487..87cf0f4d 100644 --- a/python/drydock_provisioner/objects/fields.py +++ b/python/drydock_provisioner/objects/fields.py @@ -70,12 +70,12 @@ class OrchestratorAction(BaseDrydockEnum): ALL = (Noop, ValidateDesign, VerifySite, PrepareSite, VerifyNodes, PrepareNodes, DeployNodes, BootactionReport, DestroyNodes, - RelabelNodes, ConfigNodePxe, SetNodeBoot, PowerOffNode, - PowerOnNode, PowerCycleNode, InterrogateOob, RelabelNode, - CreateNetworkTemplate, CreateStorageTemplate, CreateBootMedia, - PrepareHardwareConfig, ConfigureHardware, InterrogateNode, - ApplyNodeNetworking, ApplyNodeStorage, ApplyNodePlatform, - DeployNode, DestroyNode, ConfigureNodeProvisioner) + RelabelNodes, ConfigNodePxe, SetNodeBoot, PowerOffNode, PowerOnNode, + PowerCycleNode, InterrogateOob, RelabelNode, CreateNetworkTemplate, + CreateStorageTemplate, CreateBootMedia, PrepareHardwareConfig, + ConfigureHardware, InterrogateNode, ApplyNodeNetworking, + ApplyNodeStorage, ApplyNodePlatform, DeployNode, DestroyNode, + ConfigureNodeProvisioner) class OrchestratorActionField(fields.BaseEnumField): diff --git a/python/drydock_provisioner/objects/node.py b/python/drydock_provisioner/objects/node.py index a1ad5567..6e38f979 100644 --- a/python/drydock_provisioner/objects/node.py +++ b/python/drydock_provisioner/objects/node.py @@ -338,6 +338,7 @@ class BaremetalNode(drydock_provisioner.objects.hostprofile.HostProfile): return labels_dict + @base.DrydockObjectRegistry.register class BaremetalNodeList(base.DrydockObjectListBase, base.DrydockObject): diff --git a/python/drydock_provisioner/objects/task.py b/python/drydock_provisioner/objects/task.py index 8de7870f..5fc910c2 100644 --- a/python/drydock_provisioner/objects/task.py +++ b/python/drydock_provisioner/objects/task.py @@ -274,7 +274,8 @@ class Task(object): "Bubbling subtask success for entity %s." % se) self.result.add_success(se) else: - self.logger.debug("Skipping subtask success due to action filter.") + self.logger.debug( + "Skipping subtask success due to action filter.") # All failures are bubbled up. if self.retry == 0 or (self.retry == st.retry): for fe in st.result.failures: @@ -283,8 +284,7 @@ class Task(object): self.result.add_failure(fe) else: self.logger.debug( - "Skipping failures as they mismatch task retry sequence." - ) + "Skipping failures as they mismatch task retry sequence.") def align_result(self, action_filter=None, reset_status=True): """Align the result of this task with the combined results of all the subtasks. diff --git a/python/drydock_provisioner/orchestrator/actions/orchestrator.py b/python/drydock_provisioner/orchestrator/actions/orchestrator.py index 90c6647f..26fbdd84 100644 --- a/python/drydock_provisioner/orchestrator/actions/orchestrator.py +++ b/python/drydock_provisioner/orchestrator/actions/orchestrator.py @@ -245,9 +245,8 @@ class DestroyNodes(BaseAction): node_filter=self.task.node_filter) self.task.register_subtask(node_release_task) - self.logger.info( - "Starting node driver task %s to Release nodes." % - (node_release_task.get_id())) + self.logger.info("Starting node driver task %s to Release nodes." % + (node_release_task.get_id())) node_driver.execute_task(node_release_task.get_id()) node_release_task = self.state_manager.get_task( @@ -1079,9 +1078,8 @@ class RelabelNodes(BaseAction): node_filter=nf) self.task.register_subtask(relabel_node_task) - self.logger.info( - "Starting kubernetes driver task %s to relabel nodes." % - (relabel_node_task.get_id())) + self.logger.info("Starting kubernetes driver task %s to relabel nodes." + % (relabel_node_task.get_id())) kubernetes_driver.execute_task(relabel_node_task.get_id()) relabel_node_task = self.state_manager.get_task( diff --git a/python/drydock_provisioner/orchestrator/orchestrator.py b/python/drydock_provisioner/orchestrator/orchestrator.py index 6ea9f66d..44f23e55 100644 --- a/python/drydock_provisioner/orchestrator/orchestrator.py +++ b/python/drydock_provisioner/orchestrator/orchestrator.py @@ -109,8 +109,9 @@ class Orchestrator(object): kubernetes_driver_class = getattr( importlib.import_module(m), c, None) if kubernetes_driver_class is not None: - self.enabled_drivers['kubernetes'] = kubernetes_driver_class( - state_manager=state_manager, orchestrator=self) + self.enabled_drivers[ + 'kubernetes'] = kubernetes_driver_class( + state_manager=state_manager, orchestrator=self) def watch_for_tasks(self): """Start polling the database watching for Queued tasks to execute.""" diff --git a/python/drydock_provisioner/orchestrator/validations/network_trunking_rational.py b/python/drydock_provisioner/orchestrator/validations/network_trunking_rational.py index eb30e956..bc2f30de 100644 --- a/python/drydock_provisioner/orchestrator/validations/network_trunking_rational.py +++ b/python/drydock_provisioner/orchestrator/validations/network_trunking_rational.py @@ -40,17 +40,15 @@ class NetworkTrunkingRational(Validators): ) # trunking mode is disabled, default_network must be defined - if (network_link.trunk_mode == - hd_fields.NetworkLinkTrunkingMode.Disabled - and network_link.native_network is None): + if (network_link.trunk_mode == hd_fields.NetworkLinkTrunkingMode. + Disabled and network_link.native_network is None): msg = 'Trunking mode is disabled, a trunking default_network must be defined' self.report_error( msg, [network_link.doc_ref], "Non-trunked links must have a native network defined.") - elif (network_link.trunk_mode == - hd_fields.NetworkLinkTrunkingMode.Disabled - and network_link.native_network is not None): + elif (network_link.trunk_mode == hd_fields.NetworkLinkTrunkingMode. + Disabled and network_link.native_network is not None): network = site_design.get_network(network_link.native_network) if network and network.vlan_id: msg = "Network link native network has a defined VLAN tag." diff --git a/python/drydock_provisioner/policy.py b/python/drydock_provisioner/policy.py index a4c182fd..8fa1fccc 100644 --- a/python/drydock_provisioner/policy.py +++ b/python/drydock_provisioner/policy.py @@ -38,15 +38,15 @@ class DrydockPolicy(object): # Orchestrator Policy task_rules = [ - policy.DocumentedRuleDefault('physical_provisioner:read_task', - 'role:admin', 'Get task status', - [{ - 'path': '/api/v1.0/tasks', - 'method': 'GET' - }, { - 'path': '/api/v1.0/tasks/{task_id}', - 'method': 'GET' - }]), + policy.DocumentedRuleDefault( + 'physical_provisioner:read_task', 'role:admin', 'Get task status', + [{ + 'path': '/api/v1.0/tasks', + 'method': 'GET' + }, { + 'path': '/api/v1.0/tasks/{task_id}', + 'method': 'GET' + }]), policy.DocumentedRuleDefault('physical_provisioner:create_task', 'role:admin', 'Create a task', [{ @@ -103,10 +103,11 @@ class DrydockPolicy(object): }]), policy.DocumentedRuleDefault( 'physical_provisioner:read_build_data', 'role:admin', - 'Read build data for a node', - [{ - 'path': '/api/v1.0/nodes/{nodename}/builddata', - 'method': 'GET', + 'Read build data for a node', [{ + 'path': + '/api/v1.0/nodes/{nodename}/builddata', + 'method': + 'GET', }]), ] diff --git a/python/drydock_provisioner/statemgmt/state.py b/python/drydock_provisioner/statemgmt/state.py index 2166b167..0055681f 100644 --- a/python/drydock_provisioner/statemgmt/state.py +++ b/python/drydock_provisioner/statemgmt/state.py @@ -169,9 +169,8 @@ class DrydockState(object): with self.db_engine.connect() as conn: if allowed_actions is None: query = self.tasks_tbl.select().where( - self.tasks_tbl.c.status == - hd_fields.TaskStatus.Queued).order_by( - self.tasks_tbl.c.created.asc()) + self.tasks_tbl.c.status == hd_fields.TaskStatus. + Queued).order_by(self.tasks_tbl.c.created.asc()) rs = conn.execute(query) else: query = sql.text("SELECT * FROM tasks WHERE " @@ -340,8 +339,8 @@ class DrydockState(object): try: with self.db_engine.connect() as conn: query = self.active_instance_tbl.update().where( - self.active_instance_tbl.c.identity == - leader_id.bytes).values(last_ping=datetime.utcnow()) + self.active_instance_tbl.c.identity == leader_id. + bytes).values(last_ping=datetime.utcnow()) rs = conn.execute(query) rc = rs.rowcount diff --git a/python/tests/integration/postgres/test_api_bootaction.py b/python/tests/integration/postgres/test_api_bootaction.py index fa3bcc7b..421897c0 100644 --- a/python/tests/integration/postgres/test_api_bootaction.py +++ b/python/tests/integration/postgres/test_api_bootaction.py @@ -26,7 +26,8 @@ from drydock_provisioner.control.api import start_api class TestClass(object): def test_bootaction_context(self, falcontest, seed_bootaction): """Test that the API will return a boot action context""" - url = "/api/v1.0/bootactions/nodes/%s/units" % seed_bootaction['nodename'] + url = "/api/v1.0/bootactions/nodes/%s/units" % seed_bootaction[ + 'nodename'] auth_hdr = {'X-Bootaction-Key': "%s" % seed_bootaction['identity_key']} result = falcontest.simulate_get(url, headers=auth_hdr) @@ -47,7 +48,8 @@ class TestClass(object): def test_bootaction_context_noauth(self, falcontest, seed_bootaction): """Test that the API will return a boot action context""" - url = "/api/v1.0/bootactions/nodes/%s/units" % seed_bootaction['nodename'] + url = "/api/v1.0/bootactions/nodes/%s/units" % seed_bootaction[ + 'nodename'] result = falcontest.simulate_get(url) @@ -55,7 +57,8 @@ class TestClass(object): def test_bootaction_context_badauth(self, falcontest, seed_bootaction): """Test that the API will return a boot action context""" - url = "/api/v1.0/bootactions/nodes/%s/units" % seed_bootaction['nodename'] + url = "/api/v1.0/bootactions/nodes/%s/units" % seed_bootaction[ + 'nodename'] auth_hdr = {'X-Bootaction-Key': 'deadbeef'} result = falcontest.simulate_get(url, headers=auth_hdr) diff --git a/python/tests/integration/postgres/test_postgres_builddata.py b/python/tests/integration/postgres/test_postgres_builddata.py index 377ee62b..6cf24eec 100644 --- a/python/tests/integration/postgres/test_postgres_builddata.py +++ b/python/tests/integration/postgres/test_postgres_builddata.py @@ -90,9 +90,8 @@ class TestBuildData(object): } build_data_old = copy.deepcopy(build_data_latest) - build_data_old[ - 'collected_date'] = build_data_latest['collected_date'] - timedelta( - days=1) + build_data_old['collected_date'] = build_data_latest[ + 'collected_date'] - timedelta(days=1) build_data_old['task_id'] = uuid.uuid4() build_data1 = objects.BuildData(**build_data_latest) diff --git a/python/tests/unit/test_k8sdriver_promenade_client.py b/python/tests/unit/test_k8sdriver_promenade_client.py index 088bd6cd..72e63f41 100644 --- a/python/tests/unit/test_k8sdriver_promenade_client.py +++ b/python/tests/unit/test_k8sdriver_promenade_client.py @@ -25,16 +25,14 @@ PROM_URL = urlparse('http://promhost:80/api/v1.0') PROM_HOST = 'promhost' -@mock.patch( - 'drydock_provisioner.drivers.kubernetes' - '.promenade_driver.promenade_client' - '.PromenadeSession._get_prom_url', - return_value=PROM_URL) -@mock.patch( - 'drydock_provisioner.drivers.kubernetes' - '.promenade_driver.promenade_client' - '.PromenadeSession.set_auth', - return_value=None) +@mock.patch('drydock_provisioner.drivers.kubernetes' + '.promenade_driver.promenade_client' + '.PromenadeSession._get_prom_url', + return_value=PROM_URL) +@mock.patch('drydock_provisioner.drivers.kubernetes' + '.promenade_driver.promenade_client' + '.PromenadeSession.set_auth', + return_value=None) @responses.activate def test_put(patch1, patch2): """ @@ -47,51 +45,43 @@ def test_put(patch1, patch2): status=200) prom_session = PromenadeSession() - result = prom_session.put('v1.0/node-label/n1', - body='{"key1":"label1"}', - timeout=(60, 60)) + result = prom_session.put( + 'v1.0/node-label/n1', body='{"key1":"label1"}', timeout=(60, 60)) assert PROM_HOST == prom_session.host assert result.status_code == 200 -@mock.patch( - 'drydock_provisioner.drivers.kubernetes' - '.promenade_driver.promenade_client' - '.PromenadeSession._get_prom_url', - return_value=PROM_URL) -@mock.patch( - 'drydock_provisioner.drivers.kubernetes' - '.promenade_driver.promenade_client' - '.PromenadeSession.set_auth', - return_value=None) +@mock.patch('drydock_provisioner.drivers.kubernetes' + '.promenade_driver.promenade_client' + '.PromenadeSession._get_prom_url', + return_value=PROM_URL) +@mock.patch('drydock_provisioner.drivers.kubernetes' + '.promenade_driver.promenade_client' + '.PromenadeSession.set_auth', + return_value=None) @responses.activate def test_get(patch1, patch2): """ Test get functionality """ responses.add( - responses.GET, - 'http://promhost:80/api/v1.0/node-label/n1', - status=200) + responses.GET, 'http://promhost:80/api/v1.0/node-label/n1', status=200) prom_session = PromenadeSession() - result = prom_session.get('v1.0/node-label/n1', - timeout=(60, 60)) + result = prom_session.get('v1.0/node-label/n1', timeout=(60, 60)) assert result.status_code == 200 -@mock.patch( - 'drydock_provisioner.drivers.kubernetes' - '.promenade_driver.promenade_client' - '.PromenadeSession._get_prom_url', - return_value=PROM_URL) -@mock.patch( - 'drydock_provisioner.drivers.kubernetes' - '.promenade_driver.promenade_client' - '.PromenadeSession.set_auth', - return_value=None) +@mock.patch('drydock_provisioner.drivers.kubernetes' + '.promenade_driver.promenade_client' + '.PromenadeSession._get_prom_url', + return_value=PROM_URL) +@mock.patch('drydock_provisioner.drivers.kubernetes' + '.promenade_driver.promenade_client' + '.PromenadeSession.set_auth', + return_value=None) @responses.activate def test_post(patch1, patch2): """ @@ -104,24 +94,21 @@ def test_post(patch1, patch2): status=200) prom_session = PromenadeSession() - result = prom_session.post('v1.0/node-label/n1', - body='{"key1":"label1"}', - timeout=(60, 60)) + result = prom_session.post( + 'v1.0/node-label/n1', body='{"key1":"label1"}', timeout=(60, 60)) assert PROM_HOST == prom_session.host assert result.status_code == 200 -@mock.patch( - 'drydock_provisioner.drivers.kubernetes' - '.promenade_driver.promenade_client' - '.PromenadeSession._get_prom_url', - return_value=PROM_URL) -@mock.patch( - 'drydock_provisioner.drivers.kubernetes' - '.promenade_driver.promenade_client' - '.PromenadeSession.set_auth', - return_value=None) +@mock.patch('drydock_provisioner.drivers.kubernetes' + '.promenade_driver.promenade_client' + '.PromenadeSession._get_prom_url', + return_value=PROM_URL) +@mock.patch('drydock_provisioner.drivers.kubernetes' + '.promenade_driver.promenade_client' + '.PromenadeSession.set_auth', + return_value=None) @responses.activate def test_relabel_node(patch1, patch2): """ @@ -141,16 +128,14 @@ def test_relabel_node(patch1, patch2): assert result == {"key1": "label1"} -@mock.patch( - 'drydock_provisioner.drivers.kubernetes' - '.promenade_driver.promenade_client' - '.PromenadeSession._get_prom_url', - return_value=PROM_URL) -@mock.patch( - 'drydock_provisioner.drivers.kubernetes' - '.promenade_driver.promenade_client' - '.PromenadeSession.set_auth', - return_value=None) +@mock.patch('drydock_provisioner.drivers.kubernetes' + '.promenade_driver.promenade_client' + '.PromenadeSession._get_prom_url', + return_value=PROM_URL) +@mock.patch('drydock_provisioner.drivers.kubernetes' + '.promenade_driver.promenade_client' + '.PromenadeSession.set_auth', + return_value=None) @responses.activate def test_relabel_node_403_status(patch1, patch2): """ @@ -167,16 +152,15 @@ def test_relabel_node_403_status(patch1, patch2): with pytest.raises(errors.ClientForbiddenError): prom_client.relabel_node('n1', {"key1": "label1"}) -@mock.patch( - 'drydock_provisioner.drivers.kubernetes' - '.promenade_driver.promenade_client' - '.PromenadeSession._get_prom_url', - return_value=PROM_URL) -@mock.patch( - 'drydock_provisioner.drivers.kubernetes' - '.promenade_driver.promenade_client' - '.PromenadeSession.set_auth', - return_value=None) + +@mock.patch('drydock_provisioner.drivers.kubernetes' + '.promenade_driver.promenade_client' + '.PromenadeSession._get_prom_url', + return_value=PROM_URL) +@mock.patch('drydock_provisioner.drivers.kubernetes' + '.promenade_driver.promenade_client' + '.PromenadeSession.set_auth', + return_value=None) @responses.activate def test_relabel_node_401_status(patch1, patch2): """