Merge "Catchup YAPF formatting"

This commit is contained in:
Zuul 2018-10-02 20:52:13 +00:00 committed by Gerrit Code Review
commit 43efbb0c63
22 changed files with 172 additions and 172 deletions

View File

@ -404,12 +404,12 @@
# Timeout in minutes for deploying a node (integer value)
#deploy_node = 45
# Timeout in minutes for relabeling a node (integer value)
#relabel_node = 5
# Timeout in minutes between deployment completion and the all boot actions
# reporting status (integer value)
#bootaction_final_status = 15
# Timeout in minutes for releasing a node (integer value)
#destroy_node = 30
# Timeout in minutes for relabeling a node (integer value)
#relabel_node = 5

View File

@ -276,6 +276,9 @@
# Logger name for Node driver logging (string value)
#nodedriver_logger_name = ${global_logger_name}.nodedriver
# Logger name for Kubernetes driver logging (string value)
#kubernetesdriver_logger_name = ${global_logger_name}.kubernetesdriver
# Logger name for API server logging (string value)
#control_logger_name = ${global_logger_name}.control
@ -350,6 +353,9 @@
# Module path string of the Node driver to enable (string value)
#node_driver = drydock_provisioner.drivers.node.maasdriver.driver.MaasNodeDriver
# Module path string of the Kubernetes driver to enable (string value)
#kubernetes_driver = drydock_provisioner.drivers.kubernetes.promenade_driver.driver.PromenadeDriver
# Module path string of the Network driver enable (string value)
#network_driver = <None>
@ -404,3 +410,6 @@
# Timeout in minutes for releasing a node (integer value)
#destroy_node = 30
# Timeout in minutes for relabeling a node (integer value)
#relabel_node = 5

View File

@ -38,6 +38,10 @@
# POST /api/v1.0/tasks
#"physical_provisioner:destroy_nodes": "role:admin"
# Create relabel_nodes task
# POST /api/v1.0/tasks
#"physical_provisioner:relabel_nodes": "role:admin"
# Read build data for a node
# GET /api/v1.0/nodes/{nodename}/builddata
#"physical_provisioner:read_build_data": "role:admin"

View File

@ -64,8 +64,8 @@ def part_list(ctx):
"""List parts of a design."""
click.echo(
json.dumps(
PartList(ctx.obj['CLIENT'], design_id=ctx.obj['DESIGN_ID'])
.invoke()))
PartList(ctx.obj['CLIENT'],
design_id=ctx.obj['DESIGN_ID']).invoke()))
@part.command(name='show')

View File

@ -79,8 +79,8 @@ def task_create(ctx,
if node_names else [],
rack_names=[x.strip() for x in rack_names.split(',')]
if rack_names else [],
node_tags=[x.strip() for x in node_tags.split(',')]
if node_tags else [],
node_tags=[x.strip()
for x in node_tags.split(',')] if node_tags else [],
block=block,
poll_interval=poll_interval).invoke()))

View File

@ -58,7 +58,8 @@ class RelabelNode(PromenadeAction):
for n in nodes:
# Relabel node through Promenade
try:
self.logger.info("Relabeling node %s with node label data." % n.name)
self.logger.info(
"Relabeling node %s with node label data." % n.name)
labels_dict = n.get_node_labels()
msg = "Set labels %s for node %s" % (str(labels_dict), n.name)

View File

@ -35,8 +35,7 @@ class PromenadeDriver(KubernetesDriver):
driver_desc = 'Promenade Kubernetes Driver'
action_class_map = {
hd_fields.OrchestratorAction.RelabelNode:
RelabelNode,
hd_fields.OrchestratorAction.RelabelNode: RelabelNode,
}
def __init__(self, **kwargs):
@ -103,8 +102,7 @@ class PromenadeDriver(KubernetesDriver):
action.start)
timeout = action_timeouts.get(
task.action,
config.config_mgr.conf.timeouts.relabel_node)
task.action, config.config_mgr.conf.timeouts.relabel_node)
finished, running = concurrent.futures.wait(
subtask_futures.values(), timeout=(timeout * 60))
@ -118,8 +116,8 @@ class PromenadeDriver(KubernetesDriver):
task.failure()
else:
if f.exception():
msg = ("Subtask %s raised unexpected exception: %s"
% (str(uuid.UUID(bytes=t)), str(f.exception())))
msg = ("Subtask %s raised unexpected exception: %s" %
(str(uuid.UUID(bytes=t)), str(f.exception())))
self.logger.error(msg, exc_info=f.exception())
task.add_status_msg(
msg=msg,

View File

@ -22,6 +22,7 @@ from keystoneauth1 import exceptions as exc
import drydock_provisioner.error as errors
from drydock_provisioner.util import KeystoneUtils
# TODO: Remove this local implementation of Promenade Session and client once
# Promenade api client is available as part of Promenade project.
class PromenadeSession(object):
@ -35,10 +36,7 @@ class PromenadeSession(object):
read timeout to use
"""
def __init__(self,
scheme='http',
marker=None,
timeout=None):
def __init__(self, scheme='http', marker=None, timeout=None):
self.logger = logging.getLogger(__name__)
self.__session = requests.Session()
@ -63,8 +61,8 @@ class PromenadeSession(object):
def set_auth(self):
auth_header = self._auth_gen()
self.__session.headers.update(auth_header)
auth_header = self._auth_gen()
self.__session.headers.update(auth_header)
def get(self, route, query=None, timeout=None):
"""
@ -220,11 +218,10 @@ class PromenadeSession(object):
try:
ks_session = KeystoneUtils.get_session()
except exc.AuthorizationFailure as aferr:
self.logger.error(
'Could not authorize against Keystone: %s',
str(aferr))
raise errors.DriverError('Could not authorize against Keystone: %s',
str(aferr))
self.logger.error('Could not authorize against Keystone: %s',
str(aferr))
raise errors.DriverError(
'Could not authorize against Keystone: %s', str(aferr))
return ks_session
@ -235,8 +232,7 @@ class PromenadeSession(object):
try:
prom_endpoint = ks_session.get_endpoint(
interface='internal',
service_type='kubernetesprovisioner')
interface='internal', service_type='kubernetesprovisioner')
except exc.EndpointNotFound:
self.logger.error("Could not find an internal interface"
" defined in Keystone for Promenade")

View File

@ -278,7 +278,8 @@ class DestroyNode(BaseMaasAction):
site_design)
for n in nodes:
try:
machine = machine_list.identify_baremetal_node(n, update_name=False)
machine = machine_list.identify_baremetal_node(
n, update_name=False)
if machine is None:
msg = "Could not locate machine for node {}".format(n.name)
@ -297,7 +298,8 @@ class DestroyNode(BaseMaasAction):
try:
machine.release(erase_disk=True, quick_erase=True)
except errors.DriverError:
msg = "Error Releasing node {}, skipping".format(n.name)
msg = "Error Releasing node {}, skipping".format(
n.name)
self.logger.warning(msg)
self.task.add_status_msg(
msg=msg, error=True, ctx=n.name, ctx_type='node')
@ -306,25 +308,26 @@ class DestroyNode(BaseMaasAction):
# node release with erase disk will take sometime monitor it
attempts = 0
max_attempts = (config.config_mgr.conf.timeouts.destroy_node
* 60) // config.config_mgr.conf.maasdriver.poll_interval
max_attempts = (
config.config_mgr.conf.timeouts.destroy_node *
60) // config.config_mgr.conf.maasdriver.poll_interval
while (attempts < max_attempts
and (not machine.status_name.startswith('Ready')
and not machine.status_name.startswith(
'Failed'))):
while (attempts < max_attempts and
(not machine.status_name.startswith('Ready')
and not machine.status_name.startswith('Failed'))):
attempts = attempts + 1
time.sleep(
config.config_mgr.conf.maasdriver.poll_interval)
try:
machine.refresh()
self.logger.debug(
"Polling node {} status attempt {:d} of {:d}: {}".format(
n.name, attempts, max_attempts,
machine.status_name))
"Polling node {} status attempt {:d} of {:d}: {}"
.format(n.name, attempts, max_attempts,
machine.status_name))
except Exception:
self.logger.warning(
"Error updating node {} status during release node, will re-attempt.".format(n.name))
"Error updating node {} status during release node, will re-attempt."
.format(n.name))
if machine.status_name.startswith('Ready'):
msg = "Node {} released and disk erased.".format(
n.name)
@ -354,8 +357,8 @@ class DestroyNode(BaseMaasAction):
try:
if n.oob_type == 'libvirt':
self.logger.info(
'Resetting MaaS virsh power parameters for node {}.'.format(
n.name))
'Resetting MaaS virsh power parameters for node {}.'
.format(n.name))
# setting power type attibutes to empty string
# will remove them from maas BMC table
machine.reset_power_parameters()
@ -363,8 +366,8 @@ class DestroyNode(BaseMaasAction):
pass
machine.delete()
msg = "Deleted Node: {} in status: {}.".format(n.name,
machine.status_name)
msg = "Deleted Node: {} in status: {}.".format(
n.name, machine.status_name)
self.logger.info(msg)
self.task.add_status_msg(
msg=msg, error=False, ctx=n.name, ctx_type='node')
@ -1147,16 +1150,17 @@ class ConfigureHardware(BaseMaasAction):
# Poll machine status
attempts = 0
max_attempts = (config.config_mgr.conf.timeouts.configure_hardware
* 60) // config.config_mgr.conf.maasdriver.poll_interval
max_attempts = (
config.config_mgr.conf.timeouts.configure_hardware
* 60
) // config.config_mgr.conf.maasdriver.poll_interval
while (attempts < max_attempts and
(machine.status_name != 'Ready' and
not machine.status_name.startswith('Failed'))):
attempts = attempts + 1
time.sleep(
config.config_mgr.conf.maasdriver.poll_interval
)
time.sleep(config.config_mgr.conf.maasdriver.
poll_interval)
try:
machine.refresh()
self.logger.debug(
@ -1226,7 +1230,9 @@ class ConfigureHardware(BaseMaasAction):
except Exception as ex:
msg = "Error commissioning node %s: %s" % (n.name, str(ex))
self.logger.warning(msg)
self.logger.debug("Unhandled exception attempting to commission node.", exc_info=ex)
self.logger.debug(
"Unhandled exception attempting to commission node.",
exc_info=ex)
self.task.add_status_msg(
msg=msg, error=True, ctx=n.name, ctx_type='node')
self.task.failure(focus=n.get_id())
@ -2312,8 +2318,9 @@ class DeployNode(BaseMaasAction):
continue
attempts = 0
max_attempts = (config.config_mgr.conf.timeouts.deploy_node
* 60) // config.config_mgr.conf.maasdriver.poll_interval
max_attempts = (
config.config_mgr.conf.timeouts.deploy_node *
60) // config.config_mgr.conf.maasdriver.poll_interval
while (attempts < max_attempts
and (not machine.status_name.startswith('Deployed')

View File

@ -423,8 +423,8 @@ class Machine(model_base.ResourceBase):
return True
raise errors.DriverError(
"Failed updating power parameters MAAS url {} - return code {}\n{}".format(
url, resp.status_code.resp.text))
"Failed updating power parameters MAAS url {} - return code {}\n{}"
.format(url, resp.status_code.resp.text))
def to_dict(self):
"""Serialize this resource instance into a dict.

View File

@ -87,7 +87,8 @@ class BootAction(base.DrydockPersistentObject, base.DrydockObject):
for a in self.asset_list:
if type_filter is None or (type_filter is not None
and a.type == type_filter):
a.render(nodename, site_design, action_id, action_key, design_ref)
a.render(nodename, site_design, action_id, action_key,
design_ref)
assets.append(a)
return assets

View File

@ -70,12 +70,12 @@ class OrchestratorAction(BaseDrydockEnum):
ALL = (Noop, ValidateDesign, VerifySite, PrepareSite, VerifyNodes,
PrepareNodes, DeployNodes, BootactionReport, DestroyNodes,
RelabelNodes, ConfigNodePxe, SetNodeBoot, PowerOffNode,
PowerOnNode, PowerCycleNode, InterrogateOob, RelabelNode,
CreateNetworkTemplate, CreateStorageTemplate, CreateBootMedia,
PrepareHardwareConfig, ConfigureHardware, InterrogateNode,
ApplyNodeNetworking, ApplyNodeStorage, ApplyNodePlatform,
DeployNode, DestroyNode, ConfigureNodeProvisioner)
RelabelNodes, ConfigNodePxe, SetNodeBoot, PowerOffNode, PowerOnNode,
PowerCycleNode, InterrogateOob, RelabelNode, CreateNetworkTemplate,
CreateStorageTemplate, CreateBootMedia, PrepareHardwareConfig,
ConfigureHardware, InterrogateNode, ApplyNodeNetworking,
ApplyNodeStorage, ApplyNodePlatform, DeployNode, DestroyNode,
ConfigureNodeProvisioner)
class OrchestratorActionField(fields.BaseEnumField):

View File

@ -338,6 +338,7 @@ class BaremetalNode(drydock_provisioner.objects.hostprofile.HostProfile):
return labels_dict
@base.DrydockObjectRegistry.register
class BaremetalNodeList(base.DrydockObjectListBase, base.DrydockObject):

View File

@ -274,7 +274,8 @@ class Task(object):
"Bubbling subtask success for entity %s." % se)
self.result.add_success(se)
else:
self.logger.debug("Skipping subtask success due to action filter.")
self.logger.debug(
"Skipping subtask success due to action filter.")
# All failures are bubbled up.
if self.retry == 0 or (self.retry == st.retry):
for fe in st.result.failures:
@ -283,8 +284,7 @@ class Task(object):
self.result.add_failure(fe)
else:
self.logger.debug(
"Skipping failures as they mismatch task retry sequence."
)
"Skipping failures as they mismatch task retry sequence.")
def align_result(self, action_filter=None, reset_status=True):
"""Align the result of this task with the combined results of all the subtasks.

View File

@ -245,9 +245,8 @@ class DestroyNodes(BaseAction):
node_filter=self.task.node_filter)
self.task.register_subtask(node_release_task)
self.logger.info(
"Starting node driver task %s to Release nodes." %
(node_release_task.get_id()))
self.logger.info("Starting node driver task %s to Release nodes." %
(node_release_task.get_id()))
node_driver.execute_task(node_release_task.get_id())
node_release_task = self.state_manager.get_task(
@ -1079,9 +1078,8 @@ class RelabelNodes(BaseAction):
node_filter=nf)
self.task.register_subtask(relabel_node_task)
self.logger.info(
"Starting kubernetes driver task %s to relabel nodes." %
(relabel_node_task.get_id()))
self.logger.info("Starting kubernetes driver task %s to relabel nodes."
% (relabel_node_task.get_id()))
kubernetes_driver.execute_task(relabel_node_task.get_id())
relabel_node_task = self.state_manager.get_task(

View File

@ -109,8 +109,9 @@ class Orchestrator(object):
kubernetes_driver_class = getattr(
importlib.import_module(m), c, None)
if kubernetes_driver_class is not None:
self.enabled_drivers['kubernetes'] = kubernetes_driver_class(
state_manager=state_manager, orchestrator=self)
self.enabled_drivers[
'kubernetes'] = kubernetes_driver_class(
state_manager=state_manager, orchestrator=self)
def watch_for_tasks(self):
"""Start polling the database watching for Queued tasks to execute."""

View File

@ -40,17 +40,15 @@ class NetworkTrunkingRational(Validators):
)
# trunking mode is disabled, default_network must be defined
if (network_link.trunk_mode ==
hd_fields.NetworkLinkTrunkingMode.Disabled
and network_link.native_network is None):
if (network_link.trunk_mode == hd_fields.NetworkLinkTrunkingMode.
Disabled and network_link.native_network is None):
msg = 'Trunking mode is disabled, a trunking default_network must be defined'
self.report_error(
msg, [network_link.doc_ref],
"Non-trunked links must have a native network defined.")
elif (network_link.trunk_mode ==
hd_fields.NetworkLinkTrunkingMode.Disabled
and network_link.native_network is not None):
elif (network_link.trunk_mode == hd_fields.NetworkLinkTrunkingMode.
Disabled and network_link.native_network is not None):
network = site_design.get_network(network_link.native_network)
if network and network.vlan_id:
msg = "Network link native network has a defined VLAN tag."

View File

@ -38,15 +38,15 @@ class DrydockPolicy(object):
# Orchestrator Policy
task_rules = [
policy.DocumentedRuleDefault('physical_provisioner:read_task',
'role:admin', 'Get task status',
[{
'path': '/api/v1.0/tasks',
'method': 'GET'
}, {
'path': '/api/v1.0/tasks/{task_id}',
'method': 'GET'
}]),
policy.DocumentedRuleDefault(
'physical_provisioner:read_task', 'role:admin', 'Get task status',
[{
'path': '/api/v1.0/tasks',
'method': 'GET'
}, {
'path': '/api/v1.0/tasks/{task_id}',
'method': 'GET'
}]),
policy.DocumentedRuleDefault('physical_provisioner:create_task',
'role:admin', 'Create a task',
[{
@ -103,10 +103,11 @@ class DrydockPolicy(object):
}]),
policy.DocumentedRuleDefault(
'physical_provisioner:read_build_data', 'role:admin',
'Read build data for a node',
[{
'path': '/api/v1.0/nodes/{nodename}/builddata',
'method': 'GET',
'Read build data for a node', [{
'path':
'/api/v1.0/nodes/{nodename}/builddata',
'method':
'GET',
}]),
]

View File

@ -169,9 +169,8 @@ class DrydockState(object):
with self.db_engine.connect() as conn:
if allowed_actions is None:
query = self.tasks_tbl.select().where(
self.tasks_tbl.c.status ==
hd_fields.TaskStatus.Queued).order_by(
self.tasks_tbl.c.created.asc())
self.tasks_tbl.c.status == hd_fields.TaskStatus.
Queued).order_by(self.tasks_tbl.c.created.asc())
rs = conn.execute(query)
else:
query = sql.text("SELECT * FROM tasks WHERE "
@ -340,8 +339,8 @@ class DrydockState(object):
try:
with self.db_engine.connect() as conn:
query = self.active_instance_tbl.update().where(
self.active_instance_tbl.c.identity ==
leader_id.bytes).values(last_ping=datetime.utcnow())
self.active_instance_tbl.c.identity == leader_id.
bytes).values(last_ping=datetime.utcnow())
rs = conn.execute(query)
rc = rs.rowcount

View File

@ -26,7 +26,8 @@ from drydock_provisioner.control.api import start_api
class TestClass(object):
def test_bootaction_context(self, falcontest, seed_bootaction):
"""Test that the API will return a boot action context"""
url = "/api/v1.0/bootactions/nodes/%s/units" % seed_bootaction['nodename']
url = "/api/v1.0/bootactions/nodes/%s/units" % seed_bootaction[
'nodename']
auth_hdr = {'X-Bootaction-Key': "%s" % seed_bootaction['identity_key']}
result = falcontest.simulate_get(url, headers=auth_hdr)
@ -47,7 +48,8 @@ class TestClass(object):
def test_bootaction_context_noauth(self, falcontest, seed_bootaction):
"""Test that the API will return a boot action context"""
url = "/api/v1.0/bootactions/nodes/%s/units" % seed_bootaction['nodename']
url = "/api/v1.0/bootactions/nodes/%s/units" % seed_bootaction[
'nodename']
result = falcontest.simulate_get(url)
@ -55,7 +57,8 @@ class TestClass(object):
def test_bootaction_context_badauth(self, falcontest, seed_bootaction):
"""Test that the API will return a boot action context"""
url = "/api/v1.0/bootactions/nodes/%s/units" % seed_bootaction['nodename']
url = "/api/v1.0/bootactions/nodes/%s/units" % seed_bootaction[
'nodename']
auth_hdr = {'X-Bootaction-Key': 'deadbeef'}
result = falcontest.simulate_get(url, headers=auth_hdr)

View File

@ -90,9 +90,8 @@ class TestBuildData(object):
}
build_data_old = copy.deepcopy(build_data_latest)
build_data_old[
'collected_date'] = build_data_latest['collected_date'] - timedelta(
days=1)
build_data_old['collected_date'] = build_data_latest[
'collected_date'] - timedelta(days=1)
build_data_old['task_id'] = uuid.uuid4()
build_data1 = objects.BuildData(**build_data_latest)

View File

@ -25,16 +25,14 @@ PROM_URL = urlparse('http://promhost:80/api/v1.0')
PROM_HOST = 'promhost'
@mock.patch(
'drydock_provisioner.drivers.kubernetes'
'.promenade_driver.promenade_client'
'.PromenadeSession._get_prom_url',
return_value=PROM_URL)
@mock.patch(
'drydock_provisioner.drivers.kubernetes'
'.promenade_driver.promenade_client'
'.PromenadeSession.set_auth',
return_value=None)
@mock.patch('drydock_provisioner.drivers.kubernetes'
'.promenade_driver.promenade_client'
'.PromenadeSession._get_prom_url',
return_value=PROM_URL)
@mock.patch('drydock_provisioner.drivers.kubernetes'
'.promenade_driver.promenade_client'
'.PromenadeSession.set_auth',
return_value=None)
@responses.activate
def test_put(patch1, patch2):
"""
@ -47,51 +45,43 @@ def test_put(patch1, patch2):
status=200)
prom_session = PromenadeSession()
result = prom_session.put('v1.0/node-label/n1',
body='{"key1":"label1"}',
timeout=(60, 60))
result = prom_session.put(
'v1.0/node-label/n1', body='{"key1":"label1"}', timeout=(60, 60))
assert PROM_HOST == prom_session.host
assert result.status_code == 200
@mock.patch(
'drydock_provisioner.drivers.kubernetes'
'.promenade_driver.promenade_client'
'.PromenadeSession._get_prom_url',
return_value=PROM_URL)
@mock.patch(
'drydock_provisioner.drivers.kubernetes'
'.promenade_driver.promenade_client'
'.PromenadeSession.set_auth',
return_value=None)
@mock.patch('drydock_provisioner.drivers.kubernetes'
'.promenade_driver.promenade_client'
'.PromenadeSession._get_prom_url',
return_value=PROM_URL)
@mock.patch('drydock_provisioner.drivers.kubernetes'
'.promenade_driver.promenade_client'
'.PromenadeSession.set_auth',
return_value=None)
@responses.activate
def test_get(patch1, patch2):
"""
Test get functionality
"""
responses.add(
responses.GET,
'http://promhost:80/api/v1.0/node-label/n1',
status=200)
responses.GET, 'http://promhost:80/api/v1.0/node-label/n1', status=200)
prom_session = PromenadeSession()
result = prom_session.get('v1.0/node-label/n1',
timeout=(60, 60))
result = prom_session.get('v1.0/node-label/n1', timeout=(60, 60))
assert result.status_code == 200
@mock.patch(
'drydock_provisioner.drivers.kubernetes'
'.promenade_driver.promenade_client'
'.PromenadeSession._get_prom_url',
return_value=PROM_URL)
@mock.patch(
'drydock_provisioner.drivers.kubernetes'
'.promenade_driver.promenade_client'
'.PromenadeSession.set_auth',
return_value=None)
@mock.patch('drydock_provisioner.drivers.kubernetes'
'.promenade_driver.promenade_client'
'.PromenadeSession._get_prom_url',
return_value=PROM_URL)
@mock.patch('drydock_provisioner.drivers.kubernetes'
'.promenade_driver.promenade_client'
'.PromenadeSession.set_auth',
return_value=None)
@responses.activate
def test_post(patch1, patch2):
"""
@ -104,24 +94,21 @@ def test_post(patch1, patch2):
status=200)
prom_session = PromenadeSession()
result = prom_session.post('v1.0/node-label/n1',
body='{"key1":"label1"}',
timeout=(60, 60))
result = prom_session.post(
'v1.0/node-label/n1', body='{"key1":"label1"}', timeout=(60, 60))
assert PROM_HOST == prom_session.host
assert result.status_code == 200
@mock.patch(
'drydock_provisioner.drivers.kubernetes'
'.promenade_driver.promenade_client'
'.PromenadeSession._get_prom_url',
return_value=PROM_URL)
@mock.patch(
'drydock_provisioner.drivers.kubernetes'
'.promenade_driver.promenade_client'
'.PromenadeSession.set_auth',
return_value=None)
@mock.patch('drydock_provisioner.drivers.kubernetes'
'.promenade_driver.promenade_client'
'.PromenadeSession._get_prom_url',
return_value=PROM_URL)
@mock.patch('drydock_provisioner.drivers.kubernetes'
'.promenade_driver.promenade_client'
'.PromenadeSession.set_auth',
return_value=None)
@responses.activate
def test_relabel_node(patch1, patch2):
"""
@ -141,16 +128,14 @@ def test_relabel_node(patch1, patch2):
assert result == {"key1": "label1"}
@mock.patch(
'drydock_provisioner.drivers.kubernetes'
'.promenade_driver.promenade_client'
'.PromenadeSession._get_prom_url',
return_value=PROM_URL)
@mock.patch(
'drydock_provisioner.drivers.kubernetes'
'.promenade_driver.promenade_client'
'.PromenadeSession.set_auth',
return_value=None)
@mock.patch('drydock_provisioner.drivers.kubernetes'
'.promenade_driver.promenade_client'
'.PromenadeSession._get_prom_url',
return_value=PROM_URL)
@mock.patch('drydock_provisioner.drivers.kubernetes'
'.promenade_driver.promenade_client'
'.PromenadeSession.set_auth',
return_value=None)
@responses.activate
def test_relabel_node_403_status(patch1, patch2):
"""
@ -167,16 +152,15 @@ def test_relabel_node_403_status(patch1, patch2):
with pytest.raises(errors.ClientForbiddenError):
prom_client.relabel_node('n1', {"key1": "label1"})
@mock.patch(
'drydock_provisioner.drivers.kubernetes'
'.promenade_driver.promenade_client'
'.PromenadeSession._get_prom_url',
return_value=PROM_URL)
@mock.patch(
'drydock_provisioner.drivers.kubernetes'
'.promenade_driver.promenade_client'
'.PromenadeSession.set_auth',
return_value=None)
@mock.patch('drydock_provisioner.drivers.kubernetes'
'.promenade_driver.promenade_client'
'.PromenadeSession._get_prom_url',
return_value=PROM_URL)
@mock.patch('drydock_provisioner.drivers.kubernetes'
'.promenade_driver.promenade_client'
'.PromenadeSession.set_auth',
return_value=None)
@responses.activate
def test_relabel_node_401_status(patch1, patch2):
"""