Add keystone integration test

* Adds "g2" gate for testing keystone integration
* Adds policy enforcement for join-scripts endpoint
* Updates ceph to luminous

Change-Id: Id52c1e51b567afc5a16d46c72145e21316c00a49
This commit is contained in:
Mark Burnett 2017-12-04 22:46:58 -06:00
parent 5b4eee16b8
commit 527b2ad096
25 changed files with 436 additions and 146 deletions

View File

@ -40,7 +40,7 @@ spec:
terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.api.timeout | default "30" }}
containers:
- name: promenade-api
image: {{ .Values.images.tags.api }}
image: {{ .Values.images.tags.promenade }}
imagePullPolicy: {{ .Values.images.pull_policy }}
{{ tuple $envAll $envAll.Values.pod.resources.api | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
command:

View File

@ -1073,8 +1073,8 @@ data:
pipeline: noauth promenade-api
images:
tags:
api: quay.io/attcomdev/promenade:latest
dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.2.1
promenade: quay.io/attcomdev/promenade:latest
manifests:
job_ks_endpoints: false
job_ks_service: false

View File

@ -16,6 +16,7 @@ data:
- ceph-mon=enabled
- ceph-osd=enabled
- ceph-rgw=enabled
- ceph-mgr=enabled
- kubernetes-apiserver=enabled
- kubernetes-controller-manager=enabled
- kubernetes-etcd=enabled

View File

@ -119,6 +119,7 @@ data:
- ucp-rabbitmq
- ucp-barbican
- ingress
- tiller
---
schema: armada/ChartGroup/v1
metadata:
@ -1173,16 +1174,21 @@ data:
rgw_keystone_user_and_endpoints: false
bootstrap:
enabled: true
conf:
ceph:
config:
global:
osd_pool_default_size: 1
images:
tags:
ks_user: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
ks_service: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
ks_endpoints: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
bootstrap: quay.io/attcomdev/ceph-daemon:tag-build-master-jewel-ubuntu-16.04
dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.2.1
daemon: quay.io/attcomdev/ceph-daemon:tag-build-master-jewel-ubuntu-16.04
ceph_bootstrap: docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04
ceph_config_helper: docker.io/port/ceph-config-helper:v1.7.5
rbd_provisioner: quay.io/external_storage/rbd-provisioner:v0.1.1
ceph_daemon: docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04
ceph_rbd_provisioner: quay.io/external_storage/rbd-provisioner:v0.1.1
dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.2.1
ks_endpoints: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
ks_service: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
ks_user: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
source:
type: git
location: https://git.openstack.org/openstack/openstack-helm
@ -1217,9 +1223,14 @@ data:
values:
images:
tags:
bootstrap: quay.io/attcomdev/ceph-daemon:tag-build-master-jewel-ubuntu-16.04
ceph_bootstrap: docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04
ceph_config_helper: docker.io/port/ceph-config-helper:v1.7.5
ceph_daemon: docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04
ceph_rbd_provisioner: quay.io/external_storage/rbd-provisioner:v0.1.1
dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.2.1
ks_endpoints: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
ks_service: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
ks_user: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
labels:
jobs:
node_selector_key: ucp-control-plane
@ -1342,16 +1353,16 @@ data:
images:
tags:
bootstrap: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
test: docker.io/kolla/ubuntu-source-rally:4.0.0
db_init: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
db_sync: docker.io/kolla/ubuntu-source-keystone:3.0.3
db_drop: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
fernet_setup: docker.io/kolla/ubuntu-source-keystone:3.0.3
fernet_rotate: docker.io/kolla/ubuntu-source-keystone:3.0.3
credential_setup: docker.io/kolla/ubuntu-source-keystone:3.0.3
credential_rotate: docker.io/kolla/ubuntu-source-keystone:3.0.3
api: docker.io/kolla/ubuntu-source-keystone:3.0.3
db_init: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.2.1
keystone_api: docker.io/kolla/ubuntu-source-keystone:3.0.3
keystone_credential_rotate: docker.io/kolla/ubuntu-source-keystone:3.0.3
keystone_credential_setup: docker.io/kolla/ubuntu-source-keystone:3.0.3
keystone_db_sync: docker.io/kolla/ubuntu-source-keystone:3.0.3
keystone_fernet_rotate: docker.io/kolla/ubuntu-source-keystone:3.0.3
keystone_fernet_setup: docker.io/kolla/ubuntu-source-keystone:3.0.3
test: docker.io/kolla/ubuntu-source-rally:4.0.0
pod:
replicas:
api: 1
@ -1389,6 +1400,9 @@ data:
labels:
node_selector_key: ucp-control-plane
node_selector_value: enabled
images:
tags:
postgresql: docker.io/postgres:9.5
source:
type: git
location: https://git.openstack.org/openstack/openstack-helm
@ -1414,6 +1428,7 @@ data:
tags:
db_init: docker.io/postgres:9.5
db_sync: quay.io/attcomdev/maas-region:master
maas_cache: docker.io/sthussey/maas-cache:cachetest
maas_rack: quay.io/attcomdev/maas-rack:master
maas_region: quay.io/attcomdev/maas-region:master
bootstrap: quay.io/attcomdev/maas-region:master
@ -1487,6 +1502,9 @@ data:
labels:
node_selector_key: ucp-control-plane
node_selector_value: enabled
images:
tags:
etcd: gcr.io/google_containers/etcd-amd64:2.2.5
source:
type: git
location: https://git.openstack.org/openstack/openstack-helm
@ -1512,6 +1530,10 @@ data:
delete: []
create: []
values:
images:
tags:
rabbitmq: quay.io/attcomdev/fuel-mcp-rabbitmq:ocata-unstable
dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.2.1
pod:
replicas:
server: 1
@ -1549,6 +1571,18 @@ data:
labels:
node_selector_key: ucp-control-plane
node_selector_value: enabled
images:
tags:
barbican_api: docker.io/kolla/ubuntu-source-barbican-api:3.0.3
barbican_db_sync: docker.io/kolla/ubuntu-source-barbican-api:3.0.3
bootstrap: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
db_drop: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
db_init: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.2.1
ks_endpoints: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
ks_service: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
ks_user: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
scripted_test: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
source:
type: git
location: https://git.openstack.org/openstack/openstack-helm
@ -1575,6 +1609,12 @@ data:
labels:
node_selector_key: ucp-control-plane
node_selector_value: enabled
images:
tags:
entrypoint: quay.io/stackanetes/kubernetes-entrypoint:v0.2.1
# https://github.com/kubernetes/ingress/blob/master/controllers/nginx/Changelog.md
ingress: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.8
error_pages: gcr.io/google_containers/defaultbackend:1.0
source:
type: git
location: https://github.com/openstack/openstack-helm
@ -1584,6 +1624,33 @@ data:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: tiller
data:
chart_name: tiller
release: tiller
namespace: ucp
install:
no_hooks: false
upgrade:
no_hooks: false
values:
images:
tags:
tiller: gcr.io/kubernetes-helm/tiller:v2.7.0
labels:
node_selector_key: ucp-control-plane
node_selector_value: enabled
source:
type: git
location: https://github.com/att-comdev/armada
subpath: charts/tiller
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: deckhand
@ -1599,6 +1666,12 @@ data:
images:
tags:
deckhand: quay.io/attcomdev/deckhand:master
dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.2.1
db_init: docker.io/postgres:9.5
db_sync: docker.io/postgres:9.5
ks_endpoints: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
ks_service: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
ks_user: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
labels:
node_selector_key: ucp-control-plane
node_selector_value: enabled
@ -1633,13 +1706,13 @@ data:
values:
images:
tags:
drydock: quay.io/attcomdev/drydock:master
dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.2.1
ks_user: docker.io/kolla/ubuntu-source-kolla-toolbox:3.0.3
ks_service: docker.io/kolla/ubuntu-source-kolla-toolbox:3.0.3
ks_endpoints: docker.io/kolla/ubuntu-source-kolla-toolbox:3.0.3
drydock: quay.io/attcomdev/drydock:master
drydock_db_init: docker.io/postgres:9.5
drydock_db_sync: quay.io/attcomdev/drydock:master
ks_endpoints: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
ks_service: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
ks_user: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
labels:
node_selector_key: ucp-control-plane
node_selector_value: enabled
@ -1689,16 +1762,17 @@ data:
admin_password: password
admin_tenant_name: service
admin_user: promenade
delay_auth_decision: true
identity_uri: 'http://keystone-api.ucp.svc.cluster.local/'
service_token_roles_required: true
images:
tags:
api: quay.io/attcomdev/promenade:latest
dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.2.1
ks_user: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
ks_service: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
ks_endpoints: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
ks_service: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
ks_user: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
promenade: quay.io/attcomdev/promenade:latest
upgrade:
no_hooks: true
source:
@ -1721,6 +1795,9 @@ data:
upgrade:
no_hooks: false
values:
manifests:
deployment_tiller: false
service_tiller: false
images:
tags:
api: quay.io/attcomdev/armada:latest

View File

@ -16,6 +16,7 @@ data:
- ceph-mon=enabled
- ceph-osd=enabled
- ceph-rgw=enabled
- ceph-mgr=enabled
- ucp-control-plane=enabled
---
schema: promenade/KubernetesNode/v1
@ -36,6 +37,7 @@ data:
- ceph-mon=enabled
- ceph-osd=enabled
- ceph-rgw=enabled
- ceph-mgr=enabled
- kubernetes-apiserver=enabled
- kubernetes-controller-manager=enabled
- kubernetes-etcd=enabled
@ -60,6 +62,7 @@ data:
- ceph-mon=enabled
- ceph-osd=enabled
- ceph-rgw=enabled
- ceph-mgr=enabled
- kubernetes-apiserver=enabled
- kubernetes-controller-manager=enabled
- kubernetes-etcd=enabled
@ -84,6 +87,7 @@ data:
- ceph-mon=enabled
- ceph-osd=enabled
- ceph-rgw=enabled
- ceph-mgr=enabled
- kubernetes-apiserver=enabled
- kubernetes-controller-manager=enabled
- kubernetes-etcd=enabled

View File

@ -24,11 +24,10 @@ import falcon.routing as routing
from promenade import exceptions as exc
from promenade import logging
LOG = logging.getLogger(__name__)
class BaseResource(object):
def __init__(self):
self.logger = logging.getLogger('promenade.control')
def on_options(self, req, resp, **kwargs):
"""
Handle options requests
@ -88,45 +87,6 @@ class BaseResource(object):
"""
return json.dumps(body_dict, default=str)
def log_message(self, ctx, level, msg):
"""
Logs a message with context, and extra populated.
"""
extra = {'user': 'N/A', 'req_id': 'N/A', 'external_ctx': 'N/A'}
if ctx is not None:
extra = {
'user': ctx.user,
'req_id': ctx.request_id,
'external_ctx': ctx.external_marker,
}
self.logger.log(level, msg, extra=extra)
def debug(self, ctx, msg):
"""
Debug logger for resources, incorporating context.
"""
self.log_message(ctx, logging.DEBUG, msg)
def info(self, ctx, msg):
"""
Info logger for resources, incorporating context.
"""
self.log_message(ctx, logging.INFO, msg)
def warn(self, ctx, msg):
"""
Warn logger for resources, incorporating context.
"""
self.log_message(ctx, logging.WARN, msg)
def error(self, ctx, msg):
"""
Error logger for resources, incorporating context.
"""
self.log_message(ctx, logging.ERROR, msg)
class PromenadeRequestContext(context.RequestContext):
"""
@ -179,6 +139,15 @@ class PromenadeRequestContext(context.RequestContext):
return policy_dict
def to_log_context(self):
result = {}
result['request_id'] = self.request_id
result['external_id'] = self.external_marker
result['user'] = self.user
return result
class PromenadeRequest(request.Request):
context_type = PromenadeRequestContext

View File

@ -16,6 +16,7 @@ from promenade.control.base import BaseResource
from promenade.builder import Builder
from promenade.config import Configuration
from promenade import logging
from promenade import policy
import falcon
import kubernetes
import random
@ -28,6 +29,7 @@ class JoinScriptsResource(BaseResource):
Lists the versions supported by this API
"""
@policy.ApiEnforcer('kubernetes_provisioner:get_join_scripts')
def on_get(self, req, resp):
design_ref = req.get_param('design_ref', required=True)
ip = req.get_param('ip', required=True)

View File

@ -17,18 +17,17 @@ import uuid
from promenade import logging
from promenade import policy
LOG = logging.getLogger('promenade')
class AuthMiddleware(object):
def __init__(self):
self.logger = logging.getLogger('promenade')
# Authentication
def process_request(self, req, resp):
ctx = req.context
ctx.set_policy_engine(policy.policy_engine)
for k, v in req.headers.items():
self.logger.debug("Request with header %s: %s" % (k, v))
LOG.debug("Request with header %s: %s" % (k, v))
auth_status = req.get_header(
'X-SERVICE-IDENTITY-STATUS') # will be set to Confirmed or Invalid
@ -64,18 +63,19 @@ class AuthMiddleware(object):
'X-PROJECT-DOMAIN-NAME')
if service:
# comma delimieted list of case-sensitive role names
ctx.add_roles(req.get_header('X-SERVICE-ROLES').split(','))
if req.get_header('X-SERVICE-ROLES'):
ctx.add_roles(req.get_header('X-SERVICE-ROLES').split(','))
else:
ctx.add_roles(req.get_header('X-ROLES').split(','))
if req.get_header('X-ROLES'):
ctx.add_roles(req.get_header('X-ROLES').split(','))
if req.get_header('X-IS-ADMIN-PROJECT') == 'True':
ctx.is_admin_project = True
else:
ctx.is_admin_project = False
self.logger.debug(
'Request from authenticated user %s with roles %s', ctx.user,
','.join(ctx.roles))
LOG.debug('Request from authenticated user %s with roles %s',
ctx.user, ctx.roles)
else:
ctx.authenticated = False
@ -109,23 +109,11 @@ class ContextMiddleware(object):
class LoggingMiddleware(object):
def __init__(self):
self.logger = logging.getLogger('promenade.control')
def process_response(self, req, resp, resource, req_succeeded):
ctx = req.context
extra = {
'user': ctx.user,
'req_id': ctx.request_id,
'external_ctx': ctx.external_marker,
}
resp.append_header('X-Promenade-Req', ctx.request_id)
self.logger.info(
'%s %s - %s', req.method, req.uri, resp.status, extra=extra)
self.logger.debug('Response body:\n%s', resp.body, extra=extra)
LOG.info('%s %s - %s', req.method, req.uri, resp.status, ctx=ctx)
class NoAuthFilter(object):

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from promenade import logging
import traceback
import falcon

View File

@ -1,14 +1,80 @@
import copy
import logging
from logging import getLogger
import logging.config
__all__ = ['getLogger', 'setup']
LOG_FORMAT = '%(asctime)s %(levelname)-8s %(name)s:%(funcName)s [%(lineno)3d] %(message)s' # noqa
LOG_FORMAT = '%(asctime)s %(levelname)-8s %(request_id)s %(external_id)s %(user)s %(name)s:%(funcName)s [%(lineno)3d] %(message)s' # noqa
BLANK_CONTEXT_VALUES = [
'external_id',
'request_id',
'user',
]
DEFAULT_CONFIG = {
'version': 1,
'disable_existing_loggers': True,
'filters': {
'blank_context': {
'()': 'promenade.logging.BlankContextFilter',
},
},
'formatters': {
'standard': {
'format': LOG_FORMAT,
},
},
'handlers': {
'default': {
'level': 'DEBUG',
'formatter': 'standard',
'class': 'logging.StreamHandler',
'filters': ['blank_context'],
},
},
'loggers': {
'promenade': {
'handlers': ['default'],
'level': 'INFO',
'propagate': False,
},
},
'root': {
'handlers': ['default'],
'level': 'INFO',
},
}
class BlankContextFilter(logging.Filter):
def filter(self, record):
for key in BLANK_CONTEXT_VALUES:
if getattr(record, key, None) is None:
setattr(record, key, '-')
return True
class Adapter(logging.LoggerAdapter):
def process(self, msg, kwargs):
extra = kwargs.get('extra', {})
ctx = kwargs.pop('ctx', None)
if ctx is not None:
extra.update(ctx.to_log_context())
kwargs['extra'] = extra
return msg, kwargs
def setup(*, verbose):
log_config = copy.deepcopy(DEFAULT_CONFIG)
if verbose:
level = logging.DEBUG
else:
level = logging.INFO
logging.basicConfig(format=LOG_FORMAT, level=level)
log_config['loggers']['promenade']['level'] = 'DEBUG'
logging.config.dictConfig(log_config)
def getLogger(*args, **kwargs):
return Adapter(logging.getLogger(*args, **kwargs), {})

5
promenade/options.py Normal file
View File

@ -0,0 +1,5 @@
from oslo_config import cfg
OPTIONS = []
cfg.CONF.register_opts(OPTIONS)

View File

@ -11,16 +11,45 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import functools
import falcon
import functools
import oslo_policy.policy as op
from oslo_config import cfg
from promenade import exceptions as ex
from promenade import logging
LOG = logging.getLogger(__name__)
# TODO: Add policy_engine
policy_engine = None
POLICIES = [
op.RuleDefault(
'admin_required',
'role:admin or is_admin:1',
description='Actions requiring admin authority'),
op.DocumentedRuleDefault('kubernetes_provisioner:get_join_scripts',
'role:admin', 'Get join script for node',
[{
'path': '/api/v1.0/join-scripts',
'method': 'GET'
}]),
]
class PromenadePolicy:
def __init__(self):
self.enforcer = op.Enforcer(cfg.CONF)
def register_policy(self):
self.enforcer.register_defaults(POLICIES)
self.enforcer.load_rules()
def authorize(self, action, ctx):
target = {'project_id': ctx.project_id, 'user_id': ctx.user_id}
return self.enforcer.authorize(action, target, ctx.to_policy_view())
class ApiEnforcer(object):
"""
@ -35,52 +64,55 @@ class ApiEnforcer(object):
def secure_handler(slf, req, resp, *args, **kwargs):
ctx = req.context
policy_eng = ctx.policy_engine
slf.info(ctx, "Policy Engine: %s" % policy_eng.__class__.__name__)
# perform auth
slf.info(ctx, "Enforcing policy %s on request %s" %
(self.action, ctx.request_id))
# policy engine must be configured
if policy_eng is None:
slf.error(
ctx,
"Error-Policy engine required-action: %s" % self.action)
if policy_eng is not None:
LOG.debug(
'Enforcing policy %s on request %s using engine %s',
self.action,
ctx.request_id,
policy_eng.__class__.__name__,
ctx=ctx)
else:
LOG.error('No policy engine configured', ctx=ctx)
raise ex.PromenadeException(
title="Auth is not being handled by any policy engine",
status=falcon.HTTP_500,
retry=False)
authorized = False
try:
if policy_eng.authorize(self.action, ctx):
# authorized
slf.info(ctx, "Request is authorized")
LOG.debug('Request is authorized', ctx=ctx)
authorized = True
except Exception:
# couldn't service the auth request
slf.error(
ctx,
"Error - Expectation Failed - action: %s" % self.action)
LOG.exception(
'Error authorizing request for action %s',
self.action,
ctx=ctx)
raise ex.ApiError(
title="Expectation Failed",
status=falcon.HTTP_417,
retry=False)
if authorized:
return f(slf, req, resp, *args, **kwargs)
else:
slf.error(
ctx,
"Auth check failed. Authenticated:%s" % ctx.authenticated)
# raise the appropriate response exeception
if ctx.authenticated:
slf.error(
ctx,
"Error: Forbidden access - action: %s" % self.action)
LOG.error(
'Unauthorized access attempted for action %s',
self.action,
ctx=ctx)
raise ex.ApiError(
title="Forbidden",
status=falcon.HTTP_403,
description="Credentials do not permit access",
retry=False)
else:
slf.error(ctx, "Error - Unauthenticated access")
LOG.error(
'Unathenticated access attempted for action %s',
self.action,
ctx=ctx)
raise ex.ApiError(
title="Unauthenticated",
status=falcon.HTTP_401,

View File

@ -11,14 +11,22 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from promenade.control import api
from promenade import options # noqa
from promenade import logging
from promenade import policy
def start_promenade():
cfg.CONF()
# Setup root logger
logging.setup(verbose=False)
# TODO: Add policy engine to start
logging.setup(verbose=True)
# Setup policy
policy.policy_engine = policy.PromenadePolicy()
policy.policy_engine.register_policy()
# Start the API
return api.start_api()

View File

@ -6,6 +6,7 @@ jsonschema==2.6.0
keystonemiddleware==4.17.0
kubernetes==3.0.0
oslo.context==2.19.2
oslo.policy==1.22.1
pastedeploy==1.5.2
pbr==3.0.1
pyyaml==3.12

View File

@ -1,19 +1,19 @@
Babel==2.5.1
cachetools==2.0.1
certifi==2017.7.27.1
certifi==2017.11.5
chardet==3.0.4
click==6.7
debtcollector==1.18.0
debtcollector==1.19.0
decorator==4.1.2
falcon==1.2.0
google-auth==1.2.0
google-auth==1.2.1
idna==2.6
ipaddress==1.0.18
iso8601==0.1.12
Jinja2==2.9.6
jsonpath-ng==1.4.3
jsonschema==2.6.0
keystoneauth1==3.2.0
keystoneauth1==3.3.0
keystonemiddleware==4.17.0
kubernetes==3.0.0
MarkupSafe==1.0
@ -21,23 +21,24 @@ monotonic==1.4
msgpack-python==0.4.8
netaddr==0.7.19
netifaces==0.10.6
oslo.config==5.0.0
oslo.config==5.1.0
oslo.context==2.19.2
oslo.i18n==3.18.0
oslo.log==3.32.0
oslo.serialization==2.21.2
oslo.utils==3.31.0
oslo.i18n==3.19.0
oslo.log==3.35.0
oslo.policy==1.22.1
oslo.serialization==2.22.0
oslo.utils==3.33.0
PasteDeploy==1.5.2
pbr==3.0.1
ply==3.10
positional==1.2.1
pyasn1==0.3.7
pyasn1-modules==0.1.5
pyasn1==0.4.2
pyasn1-modules==0.2.1
pycadf==2.6.0
pyinotify==0.9.6
pyparsing==2.2.0
python-dateutil==2.6.1
python-keystoneclient==3.13.0
python-keystoneclient==3.14.0
python-mimeparse==1.6.0
pytz==2017.3
PyYAML==3.12
@ -45,9 +46,9 @@ requests==2.18.4
rfc3986==1.1.0
rsa==3.4.2
six==1.11.0
stevedore==1.27.1
stevedore==1.28.0
urllib3==1.22
uWSGI==2.0.15
WebOb==1.7.3
WebOb==1.7.4
websocket-client==0.40.0
wrapt==1.10.11

View File

@ -13,6 +13,7 @@ export PROMENADE_DEBUG=${PROMENADE_DEBUG:-1}
exec docker run \
--rm -it \
--publish 9000:9000 \
--env PROMENADE_DEBUG=${PROMENADE_DEBUG} \
--volume "${SOURCE_DIR}/etc/promenade/noauth-api-paste.ini":/etc/promenade/api-paste.ini:ro \
quay.io/attcomdev/promenade:latest \
server

View File

@ -9,6 +9,7 @@ source "$LIB_DIR"/etcd.sh
source "$LIB_DIR"/kube.sh
source "$LIB_DIR"/log.sh
source "$LIB_DIR"/nginx.sh
source "$LIB_DIR"/openstack.sh
source "$LIB_DIR"/promenade.sh
source "$LIB_DIR"/registry.sh
source "$LIB_DIR"/ssh.sh

View File

@ -22,6 +22,12 @@ log() {
echo -e "${d} ${*}" >> "${LOG_FILE}"
}
log_warn() {
d=$(date --utc)
echo -e "${C_MUTE}${d}${C_CLEAR} ${C_HILIGHT}WARN${C_CLEAR} ${*}" 1>&2
echo -e "${d} ${*}" >> "${LOG_FILE}"
}
log_stage_diagnostic_header() {
echo -e " ${C_ERROR}= Diagnostic Report =${C_CLEAR}"
}

32
tools/g2/lib/openstack.sh Normal file
View File

@ -0,0 +1,32 @@
os_ks_get_token() {
VIA=${1}
KEYSTONE_URL=${2:-http://keystone-api.ucp.svc.cluster.local}
DOMAIN=${3:-default}
USERNAME=${4:-promenade}
PASSWORD=${5:-password}
REQUEST_BODY_PATH="ks-token-request.json"
cat <<EOBODY > "${TEMP_DIR}/${REQUEST_BODY_PATH}"
{
"auth": {
"identity": {
"methods": ["password"],
"password": {
"user": {
"name": "${USERNAME}",
"domain": { "id": "${DOMAIN}" },
"password": "${PASSWORD}"
}
}
}
}
}
EOBODY
rsync_cmd "${TEMP_DIR}/${REQUEST_BODY_PATH}" "${VIA}:/root/${REQUEST_BODY_PATH}"
ssh_cmd "${VIA}" curl -isS \
-H 'Content-Type: application/json' \
-d "@/root/${REQUEST_BODY_PATH}" \
"${KEYSTONE_URL}/v3/auth/tokens" | grep 'X-Subject-Token' | awk '{print $2}' | sed "s;';;g" | sed "s;\r;;g"
}

View File

@ -3,10 +3,13 @@ rsync_cmd() {
}
ssh_cmd() {
HOST=${1}
shift
args=$(shell-quote -- "${@}")
if [[ -v GATE_DEBUG && ${GATE_DEBUG} = "1" ]]; then
ssh -F "${SSH_CONFIG_DIR}/config" -v "${@}"
ssh -F "${SSH_CONFIG_DIR}/config" -v "${HOST}" "${args}"
else
ssh -F "${SSH_CONFIG_DIR}/config" "${@}"
ssh -F "${SSH_CONFIG_DIR}/config" "${HOST}" "${args}"
fi
}

View File

@ -0,0 +1,61 @@
{
"configuration": [
"examples/complete"
],
"stages": [
{
"name": "Gate Setup",
"script": "gate-setup.sh"
},
{
"name": "Build Image",
"script": "build-image.sh"
},
{
"name": "Generate Certificates",
"script": "generate-certificates.sh"
},
{
"name": "Build Scripts",
"script": "build-scripts.sh"
},
{
"name": "Create VMs",
"script": "create-vms.sh"
},
{
"name": "Genesis",
"script": "genesis.sh"
},
{
"name": "Join Master",
"script": "join-nodes.sh",
"arguments": [
"-v", "n0",
"-t",
"-n", "n1",
"-l", "calico-etcd=enabled",
"-l", "kubernetes-apiserver=enabled",
"-l", "kubernetes-controller-manager=enabled",
"-l", "kubernetes-etcd=enabled",
"-l", "kubernetes-scheduler=enabled",
"-l", "ceph-mds=enabled",
"-l", "ceph-mgr=enabled",
"-l", "ceph-mon=enabled",
"-l", "ceph-osd=enabled",
"-l", "ceph-rgw=enabled",
"-l", "ucp-control-plane=enabled",
"-e", "kubernetes n0 genesis n1",
"-e", "calico n0 n0 n1"
]
}
],
"vm": {
"memory": 12288,
"names": [
"n0",
"n1"
],
"vcpus": 4
}
}

View File

@ -6,5 +6,10 @@ source "${GATE_UTILS}"
rsync_cmd "${TEMP_DIR}/scripts"/*genesis* "${GENESIS_NAME}:/root/promenade/"
ssh_cmd "${GENESIS_NAME}" /root/promenade/genesis.sh
ssh_cmd "${GENESIS_NAME}" /root/promenade/validate-genesis.sh
ssh_cmd "${GENESIS_NAME}" /root/promenade/genesis.sh 2>&1 | tee -a "${LOG_FILE}"
ssh_cmd "${GENESIS_NAME}" /root/promenade/validate-genesis.sh 2>&1 | tee -a "${LOG_FILE}"
if ! ssh_cmd n0 docker images | tail -n +2 | grep -v registry:5000 ; then
log_warn "Using some non-cached docker images. This will slow testing."
ssh_cmd n0 docker images | tail -n +2 | grep -v registry:5000 | tee -a "${LOG_FILE}"
fi

View File

@ -8,7 +8,9 @@ declare -a ETCD_CLUSTERS
declare -a LABELS
declare -a NODES
while getopts "e:l:n:v:" opt; do
GET_KEYSTONE_TOKEN=0
while getopts "e:l:n:tv:" opt; do
case "${opt}" in
e)
ETCD_CLUSTERS+=("${OPTARG}")
@ -19,6 +21,9 @@ while getopts "e:l:n:v:" opt; do
n)
NODES+=("${OPTARG}")
;;
t)
GET_KEYSTONE_TOKEN=1
;;
v)
VIA=${OPTARG}
;;
@ -36,6 +41,7 @@ if [ $# -gt 0 ]; then
fi
SCRIPT_DIR="${TEMP_DIR}/curled-scripts"
BASE_PROM_URL="http://promenade-api.ucp.svc.cluster.local"
echo Etcd Clusters: "${ETCD_CLUSTERS[@]}"
echo Labels: "${LABELS[@]}"
@ -51,11 +57,11 @@ render_curl_url() {
LABEL_PARAMS+="&labels.dynamic=${label}"
done
BASE_URL="http://promenade-api.ucp.svc.cluster.local/api/v1.0/join-scripts"
BASE_URL="${BASE_PROM_URL}/api/v1.0/join-scripts"
DESIGN_REF="design_ref=http://192.168.77.1:7777/promenade.yaml"
HOST_PARAMS="hostname=${NAME}&ip=$(config_vm_ip "${NAME}")"
echo "'${BASE_URL}?${DESIGN_REF}&${HOST_PARAMS}${LABEL_PARAMS}'"
echo "${BASE_URL}?${DESIGN_REF}&${HOST_PARAMS}${LABEL_PARAMS}"
}
mkdir -p "${SCRIPT_DIR}"
@ -63,12 +69,32 @@ mkdir -p "${SCRIPT_DIR}"
for NAME in "${NODES[@]}"; do
log Building join script for node "${NAME}"
ssh_cmd "${VIA}" curl --max-time 300 --retry 16 --retry-delay 15 "$(render_curl_url "${NAME}" "${LABELS[@]}")" > "${SCRIPT_DIR}/join-${NAME}.sh"
CURL_ARGS=("--fail" "--max-time" "300" "--retry" "16" "--retry-delay" "15")
if [[ $GET_KEYSTONE_TOKEN == 1 ]]; then
TOKEN="$(os_ks_get_token "${VIA}")"
if [[ -z $TOKEN ]]; then
log Failed to get keystone token, exiting.
exit 1
fi
log "Got keystone token: ${TOKEN}"
CURL_ARGS+=("-H" "X-Auth-Token: ${TOKEN}")
fi
log "Checking Promenade API health"
ssh_cmd "${VIA}" curl -v "${CURL_ARGS[@]}" \
"${BASE_PROM_URL}/api/v1.0/health"
log "Promenade API healthy"
log "Fetching join script"
ssh_cmd "${VIA}" curl "${CURL_ARGS[@]}" \
"$(render_curl_url "${NAME}" "${LABELS[@]}")" > "${SCRIPT_DIR}/join-${NAME}.sh"
chmod 755 "${SCRIPT_DIR}/join-${NAME}.sh"
log "Join script received"
log Joining node "${NAME}"
rsync_cmd "${SCRIPT_DIR}/join-${NAME}.sh" "${NAME}:/root/promenade/"
ssh_cmd "${NAME}" "/root/promenade/join-${NAME}.sh"
ssh_cmd "${NAME}" "/root/promenade/join-${NAME}.sh" 2>&1 | tee -a "${LOG_FILE}"
done
for etcd_validation_string in "${ETCD_CLUSTERS[@]}"; do

View File

@ -1014,8 +1014,8 @@ data:
pipeline: promenade-api
images:
tags:
api: ${IMAGE_PROMENADE}
dep_check: ${IMAGE_DEP_CHECK}
promenade: ${IMAGE_PROMENADE}
manifests:
job_ks_endpoints: false
job_ks_service: false

View File

@ -24,6 +24,7 @@ sudo apt-get install -q -y --no-install-recommends \
docker.io \
genisoimage \
jq \
libstring-shellquote-perl \
libvirt-bin \
qemu-kvm \
qemu-utils \