Bucket deletion implementation
This commit implements logic to realize bucket deletion. This commit also adds logic for raising an exception when trying to create the same (document.schema, document.metadata.name) in a different bucket than the one it was originally created in. Included in this commit: - Implementation of document deletion logic. - Documents are always saved, even if they have been deleted or remain unchanged between revisions. This makes it easier to compute the diff between revisions. - Associated unit tests. - Unskip all remaining functional tests for 'document-crud-success-single-bucket.yaml` - Raise a 409 exception when trying to create the same (document.schema, document.metadata.name) in a different bucket. - Unskip functional tests for 'document-crud-error-bucket-conflict.yaml' Change-Id: I6693bbb918cb672de315a66bb087de547df302d1
This commit is contained in:
parent
4d6cf7c261
commit
e32a5a9319
|
@ -15,8 +15,6 @@
|
|||
import yaml
|
||||
|
||||
import falcon
|
||||
|
||||
from oslo_db import exception as db_exc
|
||||
from oslo_log import log as logging
|
||||
|
||||
from deckhand.control import base as api_base
|
||||
|
@ -51,7 +49,7 @@ class BucketsResource(api_base.BaseResource):
|
|||
try:
|
||||
validation_policies = document_validation.DocumentValidation(
|
||||
documents).validate_all()
|
||||
except (deckhand_errors.InvalidDocumentFormat) as e:
|
||||
except deckhand_errors.InvalidDocumentFormat as e:
|
||||
raise falcon.HTTPBadRequest(description=e.format_message())
|
||||
|
||||
for document in documents:
|
||||
|
@ -63,7 +61,7 @@ class BucketsResource(api_base.BaseResource):
|
|||
try:
|
||||
documents.extend(validation_policies)
|
||||
created_documents = db_api.documents_create(bucket_name, documents)
|
||||
except db_exc.DBDuplicateEntry as e:
|
||||
except deckhand_errors.DocumentExists as e:
|
||||
raise falcon.HTTPConflict(description=e.format_message())
|
||||
except Exception as e:
|
||||
raise falcon.HTTPInternalServerError(description=e)
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
# limitations under the License.
|
||||
|
||||
import falcon
|
||||
from oslo_log import log as logging
|
||||
|
||||
from deckhand.control import base as api_base
|
||||
from deckhand.control import common
|
||||
|
@ -20,6 +21,8 @@ from deckhand.control.views import document as document_view
|
|||
from deckhand.db.sqlalchemy import api as db_api
|
||||
from deckhand import errors
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RevisionDocumentsResource(api_base.BaseResource):
|
||||
"""API resource for realizing CRUD endpoints for revision documents."""
|
||||
|
|
|
@ -16,23 +16,39 @@ from deckhand.control import common
|
|||
|
||||
|
||||
class ViewBuilder(common.ViewBuilder):
|
||||
"""Model document API responses as a python dictionary."""
|
||||
"""Model document API responses as a python dictionary.
|
||||
|
||||
There are 2 cases for rendering the response body below.
|
||||
|
||||
1. Treat the case where all documents in a bucket have been deleted as a
|
||||
special case. The response body must still include the revision_id and
|
||||
bucket_id. It is not meaningful to include other data about the deleted
|
||||
documents as technically they don't exist.
|
||||
2. Add all non-deleted documents to the response body.
|
||||
"""
|
||||
|
||||
_collection_name = 'documents'
|
||||
|
||||
def list(self, documents):
|
||||
# Edge case for when all documents are deleted from a bucket. Still
|
||||
# need to return bucket_id and revision_id.
|
||||
if len(documents) == 1 and documents[0]['deleted']:
|
||||
resp_obj = {'status': {}}
|
||||
resp_obj['status']['bucket'] = documents[0]['bucket_id']
|
||||
resp_obj['status']['revision'] = documents[0]['revision_id']
|
||||
return [resp_obj]
|
||||
|
||||
resp_list = []
|
||||
attrs = ['id', 'metadata', 'data', 'schema']
|
||||
|
||||
for document in documents:
|
||||
attrs = ['id', 'metadata', 'data', 'schema']
|
||||
if document['deleted']:
|
||||
attrs.append('deleted')
|
||||
continue
|
||||
|
||||
resp_obj = {x: document[x] for x in attrs}
|
||||
resp_obj.setdefault('status', {})
|
||||
resp_obj['status']['bucket'] = document['bucket_id']
|
||||
resp_obj['status']['revision'] = document['revision_id']
|
||||
|
||||
resp_list.append(resp_obj)
|
||||
|
||||
return resp_list
|
||||
|
|
|
@ -33,9 +33,7 @@ from deckhand import errors
|
|||
from deckhand import types
|
||||
from deckhand import utils
|
||||
|
||||
sa_logger = None
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
options.set_defaults(CONF)
|
||||
|
@ -91,37 +89,85 @@ def setup_db():
|
|||
|
||||
|
||||
def documents_create(bucket_name, documents, session=None):
|
||||
session = session or get_session()
|
||||
documents_created = _documents_create(documents, session)
|
||||
|
||||
if documents_created:
|
||||
bucket = bucket_get_or_create(bucket_name)
|
||||
revision = revision_create()
|
||||
|
||||
for doc in documents_created:
|
||||
with session.begin():
|
||||
doc['bucket_id'] = bucket['name']
|
||||
doc['revision_id'] = revision['id']
|
||||
doc.save(session=session)
|
||||
|
||||
return [d.to_dict() for d in documents_created]
|
||||
|
||||
|
||||
def _documents_create(values_list, session=None):
|
||||
"""Create a set of documents and associated schema.
|
||||
"""Create a set of documents and associated bucket.
|
||||
|
||||
If no changes are detected, a new revision will not be created. This
|
||||
allows services to periodically re-register their schemas without
|
||||
creating unnecessary revisions.
|
||||
|
||||
:param values_list: List of documents to be saved.
|
||||
:param bucket_name: The name of the bucket with which to associate created
|
||||
documents.
|
||||
:param documents: List of documents to be created.
|
||||
:param validation_policies: List of validation policies to be created.
|
||||
:param session: Database session object.
|
||||
:returns: List of created documents in dictionary format.
|
||||
:raises DocumentExists: If the (document.schema, document.metadata.name)
|
||||
already exists in another bucket.
|
||||
"""
|
||||
session = session or get_session()
|
||||
documents_to_create = _documents_create(bucket_name, documents, session)
|
||||
|
||||
resp = []
|
||||
|
||||
# The documents to be deleted are computed by comparing the documents for
|
||||
# the previous revision (if it exists) that belong to `bucket_name` with
|
||||
# `documents`: the difference between the former and the latter.
|
||||
document_history = [(d['schema'], d['name'])
|
||||
for d in revision_get_documents(
|
||||
bucket_id=bucket_name)]
|
||||
documents_to_delete = [
|
||||
h for h in document_history if h not in
|
||||
[(d['schema'], d['metadata']['name']) for d in documents]]
|
||||
|
||||
# Only create a revision if any docs have been created, changed or deleted.
|
||||
if any([documents_to_create, documents_to_delete]):
|
||||
bucket = bucket_get_or_create(bucket_name)
|
||||
revision = revision_create()
|
||||
|
||||
if documents_to_delete:
|
||||
LOG.debug('Deleting documents: %s.', documents_to_delete)
|
||||
deleted_documents = []
|
||||
|
||||
for d in documents_to_delete:
|
||||
doc = models.Document()
|
||||
with session.begin():
|
||||
# Store bare minimum information about the document.
|
||||
doc['schema'] = d[0]
|
||||
doc['name'] = d[1]
|
||||
doc['data'] = {}
|
||||
doc['_metadata'] = {}
|
||||
doc['bucket_id'] = bucket['name']
|
||||
doc['revision_id'] = revision['id']
|
||||
|
||||
# Save and mark the document as `deleted` in the database.
|
||||
doc.save(session=session)
|
||||
doc.safe_delete(session=session)
|
||||
deleted_documents.append(doc)
|
||||
|
||||
resp.extend([d.to_dict() for d in deleted_documents])
|
||||
|
||||
if documents_to_create:
|
||||
LOG.debug('Creating documents: %s.',
|
||||
[(d['schema'], d['name']) for d in documents_to_create])
|
||||
for doc in documents_to_create:
|
||||
with session.begin():
|
||||
doc['bucket_id'] = bucket['name']
|
||||
doc['revision_id'] = revision['id']
|
||||
doc.save(session=session)
|
||||
# NOTE(fmontei): The orig_revision_id is not copied into the
|
||||
# revision_id for each created document, because the revision_id here
|
||||
# should reference the just-created revision. In case the user needs
|
||||
# the original revision_id, that is returned as well.
|
||||
resp.extend([d.to_dict() for d in documents_to_create])
|
||||
|
||||
return resp
|
||||
|
||||
|
||||
def _documents_create(bucket_name, values_list, session=None):
|
||||
values_list = copy.deepcopy(values_list)
|
||||
session = session or get_session()
|
||||
filters = [c for c in models.Document.UNIQUE_CONSTRAINTS
|
||||
if c != 'revision_id']
|
||||
filters = ('name', 'schema')
|
||||
|
||||
documents_to_change = []
|
||||
changed_documents = []
|
||||
|
||||
def _document_changed(existing_document):
|
||||
|
@ -144,35 +190,65 @@ def _documents_create(values_list, session=None):
|
|||
|
||||
try:
|
||||
existing_document = document_get(
|
||||
raw_dict=True, **{c: values[c] for c in filters})
|
||||
raw_dict=True, **{x: values[x] for x in filters})
|
||||
except errors.DocumentNotFound:
|
||||
# Ignore bad data at this point. Allow creation to bubble up the
|
||||
# error related to bad data.
|
||||
existing_document = None
|
||||
|
||||
if not existing_document:
|
||||
documents_to_change.append(values)
|
||||
elif existing_document and _document_changed(existing_document):
|
||||
documents_to_change.append(values)
|
||||
if existing_document:
|
||||
# If the document already exists in another bucket, raise an error.
|
||||
# Ignore redundant validation policies as they are allowed to exist
|
||||
# in multiple buckets.
|
||||
if (existing_document['bucket_id'] != bucket_name and
|
||||
existing_document['schema'] != types.VALIDATION_POLICY_SCHEMA):
|
||||
raise errors.DocumentExists(
|
||||
schema=existing_document['schema'],
|
||||
name=existing_document['name'],
|
||||
bucket=existing_document['bucket_id'])
|
||||
|
||||
if documents_to_change:
|
||||
for values in documents_to_change:
|
||||
doc = _document_create(values)
|
||||
changed_documents.append(doc)
|
||||
if not _document_changed(existing_document):
|
||||
# Since the document has not changed, reference the original
|
||||
# revision in which it was created. This is necessary so that
|
||||
# the correct revision history is maintained.
|
||||
if existing_document['orig_revision_id']:
|
||||
values['orig_revision_id'] = existing_document[
|
||||
'orig_revision_id']
|
||||
else:
|
||||
values['orig_revision_id'] = existing_document[
|
||||
'revision_id']
|
||||
|
||||
# Create all documents, even unchanged ones, for the current revision. This
|
||||
# makes the generation of the revision diff a lot easier.
|
||||
for values in values_list:
|
||||
doc = _document_create(values)
|
||||
changed_documents.append(doc)
|
||||
|
||||
return changed_documents
|
||||
|
||||
|
||||
def document_get(session=None, raw_dict=False, **filters):
|
||||
session = session or get_session()
|
||||
if 'document_id' in filters:
|
||||
filters['id'] = filters.pop('document_id')
|
||||
"""Retrieve a document from the DB.
|
||||
|
||||
try:
|
||||
document = session.query(models.Document)\
|
||||
.filter_by(**filters)\
|
||||
.one()
|
||||
except sa_orm.exc.NoResultFound:
|
||||
:param session: Database session object.
|
||||
:param raw_dict: Whether to retrieve the exact way the data is stored in
|
||||
DB if ``True``, else the way users expect the data.
|
||||
:param filters: Dictionary attributes (including nested) used to filter
|
||||
out revision documents.
|
||||
:returns: Dictionary representation of retrieved document.
|
||||
:raises: DocumentNotFound if the document wasn't found.
|
||||
"""
|
||||
session = session or get_session()
|
||||
|
||||
# Retrieve the most recently created version of a document. Documents with
|
||||
# the same metadata.name and schema can exist across different revisions,
|
||||
# so it is necessary to use `first` instead of `one` to avoid errors.
|
||||
document = session.query(models.Document)\
|
||||
.filter_by(**filters)\
|
||||
.order_by(models.Document.created_at.desc())\
|
||||
.first()
|
||||
|
||||
if not document:
|
||||
raise errors.DocumentNotFound(document=filters)
|
||||
|
||||
return document.to_dict(raw_dict=raw_dict)
|
||||
|
@ -182,6 +258,16 @@ def document_get(session=None, raw_dict=False, **filters):
|
|||
|
||||
|
||||
def bucket_get_or_create(bucket_name, session=None):
|
||||
"""Retrieve or create bucket.
|
||||
|
||||
Retrieve the ``Bucket`` DB object by ``bucket_name`` if it exists
|
||||
or else create a new ``Bucket`` DB object by ``bucket_name``.
|
||||
|
||||
:param bucket_name: Unique identifier used for creating or retrieving
|
||||
a bucket.
|
||||
:param session: Database session object.
|
||||
:returns: Dictionary representation of created/retrieved bucket.
|
||||
"""
|
||||
session = session or get_session()
|
||||
|
||||
try:
|
||||
|
@ -200,6 +286,11 @@ def bucket_get_or_create(bucket_name, session=None):
|
|||
####################
|
||||
|
||||
def revision_create(session=None):
|
||||
"""Create a revision.
|
||||
|
||||
:param session: Database session object.
|
||||
:returns: Dictionary representation of created revision.
|
||||
"""
|
||||
session = session or get_session()
|
||||
|
||||
revision = models.Revision()
|
||||
|
@ -212,6 +303,8 @@ def revision_create(session=None):
|
|||
def revision_get(revision_id, session=None):
|
||||
"""Return the specified `revision_id`.
|
||||
|
||||
:param session: Database session object.
|
||||
:returns: Dictionary representation of retrieved revision.
|
||||
:raises: RevisionNotFound if the revision was not found.
|
||||
"""
|
||||
session = session or get_session()
|
||||
|
@ -219,11 +312,14 @@ def revision_get(revision_id, session=None):
|
|||
try:
|
||||
revision = session.query(models.Revision)\
|
||||
.filter_by(id=revision_id)\
|
||||
.one()
|
||||
.one()\
|
||||
.to_dict()
|
||||
except sa_orm.exc.NoResultFound:
|
||||
raise errors.RevisionNotFound(revision=revision_id)
|
||||
|
||||
return revision.to_dict()
|
||||
revision['documents'] = _update_revision_history(revision['documents'])
|
||||
|
||||
return revision
|
||||
|
||||
|
||||
def require_revision_exists(f):
|
||||
|
@ -239,53 +335,105 @@ def require_revision_exists(f):
|
|||
|
||||
|
||||
def revision_get_all(session=None):
|
||||
"""Return list of all revisions."""
|
||||
"""Return list of all revisions.
|
||||
|
||||
:param session: Database session object.
|
||||
:returns: List of dictionary representations of retrieved revisions.
|
||||
"""
|
||||
session = session or get_session()
|
||||
revisions = session.query(models.Revision)\
|
||||
.all()
|
||||
return [r.to_dict() for r in revisions]
|
||||
|
||||
revisions_dict = [r.to_dict() for r in revisions]
|
||||
for revision in revisions_dict:
|
||||
revision['documents'] = _update_revision_history(revision['documents'])
|
||||
|
||||
return revisions_dict
|
||||
|
||||
|
||||
def revision_delete_all(session=None):
|
||||
"""Delete all revisions."""
|
||||
"""Delete all revisions.
|
||||
|
||||
:param session: Database session object.
|
||||
:returns: None
|
||||
"""
|
||||
session = session or get_session()
|
||||
session.query(models.Revision)\
|
||||
.delete(synchronize_session=False)
|
||||
|
||||
|
||||
def revision_get_documents(revision_id, session=None, **filters):
|
||||
def revision_get_documents(revision_id=None, include_history=True,
|
||||
unique_only=True, session=None, **filters):
|
||||
"""Return the documents that match filters for the specified `revision_id`.
|
||||
|
||||
Deleted documents are not included unless deleted=True is provided in
|
||||
``filters``.
|
||||
|
||||
:param revision_id: The ID corresponding to the ``Revision`` object. If the
|
||||
ID is ``None``, then retrieve the latest revision, if one exists.
|
||||
:param include_history: Return all documents for revision history prior
|
||||
and up to current revision, if ``True``. Default is ``True``.
|
||||
:param unique_only: Return only unique documents if ``True. Default is
|
||||
``True``.
|
||||
:param filters: Dictionary attributes (including nested) used to filter
|
||||
out revision documents.
|
||||
:param session: Database session object.
|
||||
:returns: All revision documents for ``revision_id`` that match the
|
||||
``filters``, including document revision history if applicable.
|
||||
:raises: RevisionNotFound if the revision was not found.
|
||||
"""
|
||||
session = session or get_session()
|
||||
revision_documents = []
|
||||
|
||||
try:
|
||||
revision = session.query(models.Revision)\
|
||||
.filter_by(id=revision_id)\
|
||||
.one()
|
||||
older_revisions = session.query(models.Revision)\
|
||||
.filter(models.Revision.created_at < revision.created_at)\
|
||||
.order_by(models.Revision.created_at)\
|
||||
.all()
|
||||
if revision_id:
|
||||
revision = session.query(models.Revision)\
|
||||
.filter_by(id=revision_id)\
|
||||
.one()
|
||||
else:
|
||||
# If no revision_id is specified, grab the newest one.
|
||||
revision = session.query(models.Revision)\
|
||||
.order_by(models.Revision.created_at.desc())\
|
||||
.first()
|
||||
|
||||
revision_documents = (revision.to_dict()['documents']
|
||||
if revision else [])
|
||||
|
||||
if include_history and revision:
|
||||
older_revisions = session.query(models.Revision)\
|
||||
.filter(models.Revision.created_at < revision.created_at)\
|
||||
.order_by(models.Revision.created_at)\
|
||||
.all()
|
||||
|
||||
# Include documents from older revisions in response body.
|
||||
for older_revision in older_revisions:
|
||||
revision_documents.extend(
|
||||
older_revision.to_dict()['documents'])
|
||||
except sa_orm.exc.NoResultFound:
|
||||
raise errors.RevisionNotFound(revision=revision_id)
|
||||
|
||||
document_history = []
|
||||
for rev in ([revision] + older_revisions):
|
||||
document_history.extend(rev.to_dict()['documents'])
|
||||
revision_documents = _update_revision_history(revision_documents)
|
||||
|
||||
filtered_documents = _filter_revision_documents(
|
||||
document_history, **filters)
|
||||
revision_documents, unique_only, **filters)
|
||||
|
||||
return filtered_documents
|
||||
|
||||
|
||||
def _filter_revision_documents(documents, **filters):
|
||||
def _update_revision_history(documents):
|
||||
# Since documents that are unchanged across revisions need to be saved for
|
||||
# each revision, we need to ensure that the original revision is shown
|
||||
# for the document's `revision_id` to maintain the correct revision
|
||||
# history.
|
||||
for doc in documents:
|
||||
if doc['orig_revision_id']:
|
||||
doc['revision_id'] = doc['orig_revision_id']
|
||||
return documents
|
||||
|
||||
|
||||
def _filter_revision_documents(documents, unique_only, **filters):
|
||||
"""Return the list of documents that match filters.
|
||||
|
||||
:param unique_only: Return only unique documents if ``True``.
|
||||
:param filters: Dictionary attributes (including nested) used to filter
|
||||
out revision documents.
|
||||
:returns: List of documents that match specified filters.
|
||||
"""
|
||||
# TODO(fmontei): Implement this as an sqlalchemy query.
|
||||
|
@ -296,7 +444,7 @@ def _filter_revision_documents(documents, **filters):
|
|||
for document in documents:
|
||||
# NOTE(fmontei): Only want to include non-validation policy documents
|
||||
# for this endpoint.
|
||||
if document['schema'] in types.VALIDATION_POLICY_SCHEMA:
|
||||
if document['schema'] == types.VALIDATION_POLICY_SCHEMA:
|
||||
continue
|
||||
match = True
|
||||
|
||||
|
@ -318,7 +466,11 @@ def _filter_revision_documents(documents, **filters):
|
|||
if match:
|
||||
# Filter out redundant documents from previous revisions, i.e.
|
||||
# documents schema and metadata.name are repeated.
|
||||
unique_key = tuple([document[filter] for filter in unique_filters])
|
||||
if unique_only:
|
||||
unique_key = tuple(
|
||||
[document[filter] for filter in unique_filters])
|
||||
else:
|
||||
unique_key = document['id']
|
||||
if unique_key not in filtered_documents:
|
||||
filtered_documents[unique_key] = document
|
||||
|
||||
|
@ -332,8 +484,15 @@ def _filter_revision_documents(documents, **filters):
|
|||
def revision_tag_create(revision_id, tag, data=None, session=None):
|
||||
"""Create a revision tag.
|
||||
|
||||
If a tag already exists by name ``tag``, the request is ignored.
|
||||
|
||||
:param revision_id: ID corresponding to ``Revision`` DB object.
|
||||
:param tag: Name of the revision tag.
|
||||
:param data: Dictionary of data to be associated with tag.
|
||||
:param session: Database session object.
|
||||
:returns: The tag that was created if not already present in the database,
|
||||
else None.
|
||||
:raises RevisionTagBadFormat: If data is neither None nor dictionary.
|
||||
"""
|
||||
session = session or get_session()
|
||||
tag_model = models.RevisionTag()
|
||||
|
@ -359,6 +518,9 @@ def revision_tag_create(revision_id, tag, data=None, session=None):
|
|||
def revision_tag_get(revision_id, tag, session=None):
|
||||
"""Retrieve tag details.
|
||||
|
||||
:param revision_id: ID corresponding to ``Revision`` DB object.
|
||||
:param tag: Name of the revision tag.
|
||||
:param session: Database session object.
|
||||
:returns: None
|
||||
:raises RevisionTagNotFound: If ``tag`` for ``revision_id`` was not found.
|
||||
"""
|
||||
|
@ -378,6 +540,9 @@ def revision_tag_get(revision_id, tag, session=None):
|
|||
def revision_tag_get_all(revision_id, session=None):
|
||||
"""Return list of tags for a revision.
|
||||
|
||||
:param revision_id: ID corresponding to ``Revision`` DB object.
|
||||
:param tag: Name of the revision tag.
|
||||
:param session: Database session object.
|
||||
:returns: List of tags for ``revision_id``, ordered by the tag name by
|
||||
default.
|
||||
"""
|
||||
|
@ -393,6 +558,9 @@ def revision_tag_get_all(revision_id, session=None):
|
|||
def revision_tag_delete(revision_id, tag, session=None):
|
||||
"""Delete a specific tag for a revision.
|
||||
|
||||
:param revision_id: ID corresponding to ``Revision`` DB object.
|
||||
:param tag: Name of the revision tag.
|
||||
:param session: Database session object.
|
||||
:returns: None
|
||||
"""
|
||||
session = session or get_session()
|
||||
|
@ -407,6 +575,8 @@ def revision_tag_delete(revision_id, tag, session=None):
|
|||
def revision_tag_delete_all(revision_id, session=None):
|
||||
"""Delete all tags for a revision.
|
||||
|
||||
:param revision_id: ID corresponding to ``Revision`` DB object.
|
||||
:param session: Database session object.
|
||||
:returns: None
|
||||
"""
|
||||
session = session or get_session()
|
||||
|
|
|
@ -53,7 +53,7 @@ class DeckhandBase(models.ModelBase, models.TimestampMixin):
|
|||
def safe_delete(self, session=None):
|
||||
self.deleted = True
|
||||
self.deleted_at = timeutils.utcnow()
|
||||
super(DeckhandBase, self).delete(session=session)
|
||||
super(DeckhandBase, self).save(session=session)
|
||||
|
||||
def keys(self):
|
||||
return self.__dict__.keys()
|
||||
|
@ -74,17 +74,12 @@ class DeckhandBase(models.ModelBase, models.TimestampMixin):
|
|||
# CircularReference.
|
||||
d.pop("_sa_instance_state")
|
||||
|
||||
for k in ["created_at", "updated_at", "deleted_at", "deleted"]:
|
||||
for k in ["created_at", "updated_at", "deleted_at"]:
|
||||
if k in d and d[k]:
|
||||
d[k] = d[k].isoformat()
|
||||
else:
|
||||
d.setdefault(k, None)
|
||||
|
||||
# NOTE(fmontei): ``metadata`` is reserved by the DB, so ``_metadata``
|
||||
# must be used to store document metadata information in the DB.
|
||||
if not raw_dict and '_metadata' in self.keys():
|
||||
d['metadata'] = d.pop('_metadata')
|
||||
|
||||
return d
|
||||
|
||||
|
||||
|
@ -106,7 +101,10 @@ class Revision(BASE, DeckhandBase):
|
|||
__tablename__ = 'revisions'
|
||||
|
||||
id = Column(Integer, primary_key=True)
|
||||
documents = relationship("Document")
|
||||
# `primaryjoin` used below for sqlalchemy to distinguish between
|
||||
# `Document.revision_id` and `Document.orig_revision_id`.
|
||||
documents = relationship("Document",
|
||||
primaryjoin="Revision.id==Document.revision_id")
|
||||
tags = relationship("RevisionTag")
|
||||
|
||||
def to_dict(self):
|
||||
|
@ -146,10 +144,30 @@ class Document(BASE, DeckhandBase):
|
|||
|
||||
bucket_id = Column(Integer, ForeignKey('buckets.name', ondelete='CASCADE'),
|
||||
nullable=False)
|
||||
|
||||
revision_id = Column(
|
||||
Integer, ForeignKey('revisions.id', ondelete='CASCADE'),
|
||||
nullable=False)
|
||||
# Used for documents that haven't changed across revisions but still have
|
||||
# been carried over into newer revisions. This is necessary in order to
|
||||
# roll back to previous revisions or to generate a revision diff. Without
|
||||
# recording all the documents that were PUT in a revision, this is rather
|
||||
# difficult. By using `orig_revision_id` it is therefore possible to
|
||||
# maintain the correct revision history -- that is, remembering the exact
|
||||
# revision a document was created in -- while still being able to roll
|
||||
# back to all the documents that exist in a specific revision or generate
|
||||
# an accurate revision diff report.
|
||||
orig_revision_id = Column(
|
||||
Integer, ForeignKey('revisions.id', ondelete='CASCADE'),
|
||||
nullable=True)
|
||||
|
||||
def to_dict(self, raw_dict=False):
|
||||
d = super(Document, self).to_dict()
|
||||
# NOTE(fmontei): ``metadata`` is reserved by the DB, so ``_metadata``
|
||||
# must be used to store document metadata information in the DB.
|
||||
if not raw_dict and '_metadata' in self.keys():
|
||||
d['metadata'] = d.pop('_metadata')
|
||||
|
||||
return d
|
||||
|
||||
|
||||
def register_models(engine):
|
||||
|
|
|
@ -68,8 +68,8 @@ class InvalidFormat(ApiError):
|
|||
|
||||
|
||||
class DocumentExists(DeckhandException):
|
||||
msg_fmt = ("Document with kind %(kind)s and schemaVersion "
|
||||
"%(schema_version)s already exists.")
|
||||
msg_fmt = ("Document with schema %(schema)s and metadata.name "
|
||||
"%(name)s already exists in bucket %(bucket)s.")
|
||||
code = 409
|
||||
|
||||
|
||||
|
|
|
@ -16,18 +16,19 @@ tests:
|
|||
desc: Begin testing from known state.
|
||||
DELETE: /api/v1.0/revisions
|
||||
status: 204
|
||||
skip: Not implemented.
|
||||
|
||||
- name: create
|
||||
desc: Create initial documents
|
||||
PUT: /api/v1.0/bucket/a/documents
|
||||
status: 201
|
||||
data: <@resources/sample-document.yaml
|
||||
skip: Not implemented.
|
||||
status: 200
|
||||
data: <@resources/sample-doc.yaml
|
||||
|
||||
- name: error
|
||||
desc: Trigger error case
|
||||
PUT: /api/v1.0/bucket/b/documents
|
||||
status: 409
|
||||
data: <@resources/sample-document.yaml
|
||||
skip: Not implemented.
|
||||
data: <@resources/sample-doc.yaml
|
||||
# Deckhand exceptions return the following content-type header by
|
||||
# default. TODO(fmontei): Override that later.
|
||||
response_headers:
|
||||
content-type: 'application/json; charset=UTF-8'
|
|
@ -151,19 +151,18 @@ tests:
|
|||
- name: delete_document
|
||||
desc: Delete a single document
|
||||
PUT: /api/v1.0/bucket/mop/documents
|
||||
status: 201
|
||||
status: 200
|
||||
data: <@resources/design-doc-layering-sample-with-delete.yaml
|
||||
skip: Not implemented.
|
||||
|
||||
- name: verify_delete
|
||||
desc: Verify document deletion
|
||||
GET: /api/v1.0/revisions/$RESPONSE['$.[0].revision']/documents
|
||||
GET: /api/v1.0/revisions/$RESPONSE['$.[0].status.revision']/documents
|
||||
status: 200
|
||||
response_multidoc_jsonpaths:
|
||||
$.[*].status.revision:
|
||||
- "$HISTORY['initialize'].$RESPONSE['$.[0].revision']"
|
||||
- "$HISTORY['initialize'].$RESPONSE['$.[0].revision']"
|
||||
- "$HISTORY['update_with_ignore'].$RESPONSE['$.[0].revision']"
|
||||
- "$HISTORY['initialize'].$RESPONSE['$.[0].status.revision']"
|
||||
- "$HISTORY['initialize'].$RESPONSE['$.[0].status.revision']"
|
||||
- "$HISTORY['update_single_document'].$RESPONSE['$.[0].status.revision']"
|
||||
$.[*].metadata.name:
|
||||
- layering-policy
|
||||
- global-1234
|
||||
|
@ -173,11 +172,10 @@ tests:
|
|||
- mop
|
||||
- mop
|
||||
$.[2].data.b: 5
|
||||
skip: Not implemented.
|
||||
|
||||
- name: verify_initial_documents_preserved_after_delete
|
||||
desc: Verify initial documents count and revisions
|
||||
GET: /api/v1.0/revisions/$HISTORY['initialize'].$RESPONSE['$.[0].revision']/documents
|
||||
GET: /api/v1.0/revisions/$HISTORY['initialize'].$RESPONSE['$.[0].status.revision']/documents
|
||||
status: 200
|
||||
response_multidoc_jsonpaths:
|
||||
$.[*].metadata.name:
|
||||
|
@ -186,21 +184,20 @@ tests:
|
|||
- region-1234
|
||||
- site-1234
|
||||
$.[*].status.revision:
|
||||
- "$HISTORY['initialize'].$RESPONSE['$.[0].revision']"
|
||||
- "$HISTORY['initialize'].$RESPONSE['$.[0].revision']"
|
||||
- "$HISTORY['initialize'].$RESPONSE['$.[0].revision']"
|
||||
- "$HISTORY['initialize'].$RESPONSE['$.[0].revision']"
|
||||
- "$HISTORY['initialize'].$RESPONSE['$.[0].status.revision']"
|
||||
- "$HISTORY['initialize'].$RESPONSE['$.[0].status.revision']"
|
||||
- "$HISTORY['initialize'].$RESPONSE['$.[0].status.revision']"
|
||||
- "$HISTORY['initialize'].$RESPONSE['$.[0].status.revision']"
|
||||
$.[*].status.bucket:
|
||||
- mop
|
||||
- mop
|
||||
- mop
|
||||
- mop
|
||||
$.[3].data.b: 4
|
||||
skip: Not implemented.
|
||||
|
||||
- name: verify_updated_documents_preserved_after_delete
|
||||
desc: Verify updated documents count and revisions preserved after delete
|
||||
GET: /api/v1.0/revisions/$HISTORY['update_with_ignore'].$RESPONSE['$.[0].revision']/documents
|
||||
GET: /api/v1.0/revisions/$HISTORY['update_single_document'].$RESPONSE['$.[0].status.revision']/documents
|
||||
status: 200
|
||||
response_multidoc_jsonpaths:
|
||||
$.[*].metadata.name:
|
||||
|
@ -209,14 +206,13 @@ tests:
|
|||
- region-1234
|
||||
- site-1234
|
||||
$.[*].status.revision:
|
||||
- "$HISTORY['initialize'].$RESPONSE['$.[0].revision']"
|
||||
- "$HISTORY['initialize'].$RESPONSE['$.[0].revision']"
|
||||
- "$HISTORY['initialize'].$RESPONSE['$.[0].revision']"
|
||||
- "$HISTORY['update_with_ignore'].$RESPONSE['$.[0].revision']"
|
||||
- "$HISTORY['initialize'].$RESPONSE['$.[0].status.revision']"
|
||||
- "$HISTORY['initialize'].$RESPONSE['$.[0].status.revision']"
|
||||
- "$HISTORY['initialize'].$RESPONSE['$.[0].status.revision']"
|
||||
- "$HISTORY['update_single_document'].$RESPONSE['$.[0].status.revision']"
|
||||
$.[*].status.bucket:
|
||||
- mop
|
||||
- mop
|
||||
- mop
|
||||
- mop
|
||||
$.[3].data.b: 5
|
||||
skip: Not implemented.
|
||||
|
|
|
@ -69,8 +69,7 @@ tests:
|
|||
desc: Verify that the revision was deleted
|
||||
GET: /api/v1.0/revisions/$HISTORY['initialize'].$RESPONSE['$.[0].status.revision']
|
||||
status: 404
|
||||
|
||||
# Deckhand exceptions return the following content-type header by
|
||||
# default. TODO(fmontei): Override that later.
|
||||
response_headers:
|
||||
# Deckhand exceptions return the following content-type header by
|
||||
# default. TODO(fmontei): Override that later.
|
||||
content-type: 'application/json; charset=UTF-8'
|
||||
|
|
|
@ -33,8 +33,7 @@ class TestDocuments(base.TestDbBase):
|
|||
}
|
||||
|
||||
def test_create_and_show_bucket(self):
|
||||
payload = self.documents_factory.gen_test(
|
||||
self.document_mapping)
|
||||
payload = self.documents_factory.gen_test(self.document_mapping)
|
||||
bucket_name = test_utils.rand_name('bucket')
|
||||
documents = self.create_documents(bucket_name, payload)
|
||||
|
||||
|
@ -43,6 +42,7 @@ class TestDocuments(base.TestDbBase):
|
|||
|
||||
for idx in range(len(documents)):
|
||||
retrieved_document = self.show_document(id=documents[idx]['id'])
|
||||
self.assertIsNone(retrieved_document.pop('orig_revision_id'))
|
||||
self.assertEqual(documents[idx], retrieved_document)
|
||||
|
||||
def test_create_and_get_multiple_document(self):
|
||||
|
@ -54,17 +54,6 @@ class TestDocuments(base.TestDbBase):
|
|||
self.assertIsInstance(created_documents, list)
|
||||
self.assertEqual(3, len(created_documents))
|
||||
|
||||
def test_create_document_conflict(self):
|
||||
payload = self.documents_factory.gen_test(
|
||||
self.document_mapping)
|
||||
bucket_name = test_utils.rand_name('bucket')
|
||||
|
||||
self.create_documents(bucket_name, payload)
|
||||
unchanged_documents = self.create_documents(bucket_name, payload)
|
||||
|
||||
self.assertIsInstance(unchanged_documents, list)
|
||||
self.assertEmpty(unchanged_documents)
|
||||
|
||||
def test_list_documents_by_revision_id(self):
|
||||
payload = self.documents_factory.gen_test(
|
||||
self.document_mapping)
|
||||
|
@ -72,6 +61,7 @@ class TestDocuments(base.TestDbBase):
|
|||
documents = self.create_documents(bucket_name, payload)
|
||||
|
||||
revision = self.show_revision(documents[0]['revision_id'])
|
||||
self.assertIsNone(revision['documents'][0].pop('orig_revision_id'))
|
||||
self.assertEqual(3, len(revision['documents']))
|
||||
self.assertEqual(documents[0], revision['documents'][0])
|
||||
|
||||
|
@ -106,6 +96,7 @@ class TestDocuments(base.TestDbBase):
|
|||
documents = self.list_revision_documents(
|
||||
document['revision_id'], **filters)
|
||||
self.assertEqual(1, len(documents))
|
||||
self.assertIsNone(documents[0].pop('orig_revision_id'))
|
||||
self.assertEqual(document, documents[0])
|
||||
|
||||
def test_create_multiple_documents_and_get_revision(self):
|
||||
|
@ -123,6 +114,7 @@ class TestDocuments(base.TestDbBase):
|
|||
|
||||
# Validate that the revision is valid.
|
||||
for document in created_documents:
|
||||
document['orig_revision_id'] = None
|
||||
revision = self.show_revision(document['revision_id'])
|
||||
self.assertEqual(3, len(revision['documents']))
|
||||
self.assertIn(document, revision['documents'])
|
||||
|
@ -147,6 +139,7 @@ class TestDocuments(base.TestDbBase):
|
|||
document['revision_id'], **filters)
|
||||
|
||||
self.assertEqual(1, len(filtered_documents))
|
||||
self.assertIsNone(filtered_documents[0].pop('orig_revision_id'))
|
||||
self.assertEqual(document, filtered_documents[0])
|
||||
|
||||
def test_create_certificate(self):
|
||||
|
@ -199,3 +192,93 @@ class TestDocuments(base.TestDbBase):
|
|||
'metadata']['storagePolicy'])
|
||||
self.assertTrue(created_documents[0]['is_secret'])
|
||||
self.assertEqual(rand_secret, created_documents[0]['data'])
|
||||
|
||||
def test_delete_document(self):
|
||||
payload = base.DocumentFixture.get_minimal_fixture()
|
||||
bucket_name = test_utils.rand_name('bucket')
|
||||
self.create_documents(bucket_name, payload)
|
||||
|
||||
documents = self.create_documents(bucket_name, [], do_validation=False)
|
||||
self.assertEqual(1, len(documents))
|
||||
self.assertTrue(documents[0]['deleted'])
|
||||
self.assertTrue(documents[0]['deleted_at'])
|
||||
self.assertEqual(documents[0]['schema'], payload['schema'])
|
||||
self.assertEqual(documents[0]['name'], payload['metadata']['name'])
|
||||
self.assertEmpty(documents[0]['metadata'])
|
||||
self.assertEmpty(documents[0]['data'])
|
||||
|
||||
def test_delete_all_documents(self):
|
||||
payload = self.documents_factory.gen_test(self.document_mapping)
|
||||
bucket_name = test_utils.rand_name('bucket')
|
||||
documents = self.create_documents(bucket_name, payload)
|
||||
|
||||
self.assertIsInstance(documents, list)
|
||||
self.assertEqual(3, len(documents))
|
||||
|
||||
documents = self.create_documents(bucket_name, [], do_validation=False)
|
||||
|
||||
for idx in range(3):
|
||||
self.assertTrue(documents[idx]['deleted'])
|
||||
self.assertTrue(documents[idx]['deleted_at'])
|
||||
self.assertEqual(documents[idx]['schema'], payload[idx]['schema'])
|
||||
self.assertEqual(documents[idx]['name'],
|
||||
payload[idx]['metadata']['name'])
|
||||
self.assertEmpty(documents[idx]['metadata'])
|
||||
self.assertEmpty(documents[idx]['data'])
|
||||
|
||||
def test_delete_and_create_document_in_same_payload(self):
|
||||
payload = self.documents_factory.gen_test(self.document_mapping)
|
||||
bucket_name = test_utils.rand_name('bucket')
|
||||
# Create just 1 document.
|
||||
documents = self.create_documents(bucket_name, payload[0])
|
||||
|
||||
# Create the document in payload[0] but create a new document for
|
||||
# payload[1].
|
||||
documents = self.create_documents(bucket_name, payload[1],
|
||||
do_validation=False)
|
||||
# Information about the deleted and created document should've been
|
||||
# returned. The 1st document is the deleted one and the 2nd document
|
||||
# is the created one.
|
||||
self.assertEqual(2, len(documents))
|
||||
# Check that deleted doc is formatted correctly.
|
||||
self.assertTrue(documents[0]['deleted'])
|
||||
self.assertTrue(documents[0]['deleted_at'])
|
||||
self.assertEmpty(documents[0]['metadata'])
|
||||
self.assertEmpty(documents[0]['data'])
|
||||
# Check that created doc isn't deleted.
|
||||
self.assertFalse(documents[1]['deleted'])
|
||||
|
||||
for idx in range(2):
|
||||
self.assertEqual(documents[idx]['schema'], payload[idx]['schema'])
|
||||
self.assertEqual(documents[idx]['name'],
|
||||
payload[idx]['metadata']['name'])
|
||||
|
||||
def test_delete_and_create_many_documents_in_same_payload(self):
|
||||
payload = self.documents_factory.gen_test(self.document_mapping)
|
||||
bucket_name = test_utils.rand_name('bucket')
|
||||
# Create just 1 document.
|
||||
documents = self.create_documents(bucket_name, payload[1:])
|
||||
|
||||
# Create the document in payload[0] but create a new document for
|
||||
# payload[1].
|
||||
documents = self.create_documents(bucket_name, payload[0],
|
||||
do_validation=False)
|
||||
# The first document will be first, followed by the two deleted docs.
|
||||
documents = sorted(documents, key=lambda d: d['deleted'])
|
||||
# Information about the deleted and created document should've been
|
||||
# returned. The 1st document is the deleted one and the 2nd document
|
||||
# is the created one.
|
||||
self.assertEqual(3, len(documents))
|
||||
self.assertFalse(documents[0]['deleted'])
|
||||
self.assertFalse(documents[0]['deleted_at'])
|
||||
self.assertTrue(documents[1]['deleted'])
|
||||
self.assertTrue(documents[2]['deleted'])
|
||||
self.assertTrue(documents[1]['deleted_at'])
|
||||
self.assertTrue(documents[2]['deleted_at'])
|
||||
|
||||
for idx in range(1, 3):
|
||||
self.assertEqual(documents[idx]['schema'], payload[idx]['schema'])
|
||||
self.assertEqual(documents[idx]['name'],
|
||||
payload[idx]['metadata']['name'])
|
||||
self.assertEmpty(documents[idx]['metadata'])
|
||||
self.assertEmpty(documents[idx]['data'])
|
||||
|
|
|
@ -45,4 +45,20 @@ class TestDocumentsNegative(base.TestDbBase):
|
|||
self.assertRaises(errors.DocumentNotFound,
|
||||
self.show_document,
|
||||
do_validation=False,
|
||||
document_id=test_utils.rand_uuid_hex())
|
||||
id=test_utils.rand_uuid_hex())
|
||||
|
||||
def test_create_bucket_conflict(self):
|
||||
# Create the document in one bucket.
|
||||
payload = base.DocumentFixture.get_minimal_fixture()
|
||||
bucket_name = test_utils.rand_name('bucket')
|
||||
self.create_documents(bucket_name, payload)
|
||||
|
||||
# Verify that the document cannot be created in another bucket.
|
||||
alt_bucket_name = test_utils.rand_name('bucket')
|
||||
error_re = ("Document with schema %s and metadata.name "
|
||||
"%s already exists in bucket %s." % (
|
||||
payload['schema'], payload['metadata']['name'],
|
||||
bucket_name))
|
||||
self.assertRaisesRegex(
|
||||
errors.DocumentExists, error_re, self.create_documents,
|
||||
alt_bucket_name, payload)
|
||||
|
|
|
@ -37,13 +37,17 @@ class TestRevisions(base.TestDbBase):
|
|||
for _ in range(4)]
|
||||
bucket_name = test_utils.rand_name('bucket')
|
||||
created_documents = self.create_documents(bucket_name, documents)
|
||||
orig_revision_id = created_documents[0]['revision_id']
|
||||
|
||||
# Update the last document.
|
||||
documents[-1]['data'] = {'foo': 'bar'}
|
||||
updated_documents = self.create_documents(
|
||||
bucket_name, documents, do_validation=False)
|
||||
new_revision_id = updated_documents[0]['revision_id']
|
||||
|
||||
self.assertEqual(1, len(updated_documents))
|
||||
# 4 documents should be returned: the updated doc along with the other
|
||||
# 3 documents (unchanged) that accompanied the PUT request.
|
||||
self.assertEqual(4, len(updated_documents))
|
||||
self.assertEqual(created_documents[-1]['bucket_id'],
|
||||
updated_documents[0]['bucket_id'])
|
||||
self.assertNotEqual(created_documents[-1]['revision_id'],
|
||||
|
@ -52,8 +56,58 @@ class TestRevisions(base.TestDbBase):
|
|||
revision_documents = self.list_revision_documents(
|
||||
updated_documents[0]['revision_id'])
|
||||
self.assertEqual(4, len(revision_documents))
|
||||
self.assertEqual(created_documents[:-1] + updated_documents,
|
||||
revision_documents)
|
||||
|
||||
self.assertEqual([orig_revision_id] * 3 + [new_revision_id],
|
||||
[d['revision_id'] for d in revision_documents])
|
||||
|
||||
self.assertEqual(
|
||||
[(d['name'], d['schema'])
|
||||
for d in (created_documents[:-1] + [updated_documents[-1]])],
|
||||
[(d['name'], d['schema']) for d in revision_documents])
|
||||
|
||||
def test_recreate_with_no_changes(self):
|
||||
# Verify that showing and listing revisions returns the revisions
|
||||
# with the original revision ID. This is because nothing has changed
|
||||
# so the "new" revision is just a carry-over from the original one.
|
||||
documents = base.DocumentFixture.get_minimal_fixture()
|
||||
bucket_name = test_utils.rand_name('bucket')
|
||||
created_documents = self.create_documents(bucket_name, documents)
|
||||
recreated_documents = self.create_documents(bucket_name, documents)
|
||||
|
||||
created_rev_id = created_documents[0].pop('revision_id')
|
||||
recreated_rev_id = recreated_documents[0].pop('revision_id')
|
||||
recreated_orig_rev_id = recreated_documents[0].pop('orig_revision_id')
|
||||
|
||||
for attr in ('data', 'metadata', 'name', 'schema'):
|
||||
self.assertEqual(
|
||||
created_documents[0][attr], recreated_documents[0][attr])
|
||||
self.assertEqual(created_rev_id, recreated_orig_rev_id)
|
||||
self.assertEqual(created_rev_id + 1, recreated_rev_id)
|
||||
|
||||
# Verify that correct revision ID is returned for listing
|
||||
# revision documents.
|
||||
revision_documents = self.list_revision_documents(recreated_rev_id)
|
||||
docs_rev_id = revision_documents[0].pop('revision_id')
|
||||
|
||||
for attr in ('data', 'metadata', 'name', 'schema'):
|
||||
self.assertEqual(
|
||||
revision_documents[0][attr], recreated_documents[0][attr])
|
||||
|
||||
self.assertEqual(recreated_orig_rev_id, docs_rev_id)
|
||||
self.assertEqual(recreated_rev_id, docs_rev_id + 1)
|
||||
|
||||
# Verify that each doc in revision['documents'] for listing revisions
|
||||
# has correct revision_id.
|
||||
retrieved_revisions = self.list_revisions()
|
||||
for rev in retrieved_revisions:
|
||||
self.assertEqual(created_rev_id,
|
||||
rev['documents'][0]['revision_id'])
|
||||
|
||||
# Verify that each doc in revision['documents'] for showing revision
|
||||
# details has correct revision_id.
|
||||
retrieved_revision = self.show_revision(recreated_rev_id)
|
||||
self.assertEqual(created_rev_id,
|
||||
retrieved_revision['documents'][0]['revision_id'])
|
||||
|
||||
def test_list_with_validation_policies(self):
|
||||
documents = [base.DocumentFixture.get_minimal_fixture()
|
||||
|
@ -82,7 +136,7 @@ class TestRevisions(base.TestDbBase):
|
|||
for _ in range(3)]
|
||||
bucket_name = test_utils.rand_name('bucket')
|
||||
created_documents = self.create_documents(
|
||||
bucket_name, document_payload)
|
||||
bucket_name, document_payload, do_validation=False)
|
||||
all_created_documents.extend(created_documents)
|
||||
revision_id = created_documents[0]['revision_id']
|
||||
all_revision_ids.append(revision_id)
|
||||
|
@ -101,3 +155,22 @@ class TestRevisions(base.TestDbBase):
|
|||
error_re = 'The requested document %s was not found.' % filters
|
||||
self.assertRaisesRegex(errors.DocumentNotFound, error_re,
|
||||
self.show_document, **filters)
|
||||
|
||||
def test_revision_history_multiple_buckets(self):
|
||||
documents = base.DocumentFixture.get_minimal_fixture()
|
||||
alt_documents = base.DocumentFixture.get_minimal_fixture()
|
||||
bucket_name = test_utils.rand_name('bucket')
|
||||
alt_bucket_name = test_utils.rand_name('bucket')
|
||||
|
||||
created_documents = self.create_documents(bucket_name, documents)
|
||||
alt_created_documents = self.create_documents(
|
||||
alt_bucket_name, alt_documents, do_validation=False)
|
||||
|
||||
alt_revision_docs = self.list_revision_documents(
|
||||
alt_created_documents[0]['revision_id'])
|
||||
self.assertEqual(2, len(alt_revision_docs))
|
||||
|
||||
expected_doc_ids = [created_documents[0]['id'],
|
||||
alt_created_documents[0]['id']]
|
||||
self.assertEqual(
|
||||
expected_doc_ids, [d['id'] for d in alt_revision_docs])
|
||||
|
|
|
@ -18,10 +18,10 @@ from deckhand.tests import test_utils
|
|||
from deckhand.tests.unit.db import base
|
||||
|
||||
|
||||
class TestRevisionViews(base.TestDbBase):
|
||||
class TestDocumentViews(base.TestDbBase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestRevisionViews, self).setUp()
|
||||
super(TestDocumentViews, self).setUp()
|
||||
self.view_builder = document.ViewBuilder()
|
||||
self.factory = factories.ValidationPolicyFactory()
|
||||
|
||||
|
@ -52,3 +52,15 @@ class TestRevisionViews(base.TestDbBase):
|
|||
|
||||
def test_create_many_documents(self):
|
||||
self._test_document_creation_view(4)
|
||||
|
||||
def test_delete_all_documents(self):
|
||||
payload = base.DocumentFixture.get_minimal_fixture()
|
||||
bucket_name = test_utils.rand_name('bucket')
|
||||
self.create_documents(bucket_name, payload)
|
||||
deleted_documents = self.create_documents(
|
||||
bucket_name, [], do_validation=False)
|
||||
|
||||
document_view = self.view_builder.list(deleted_documents)
|
||||
self.assertEqual(1, len(document_view))
|
||||
self.assertEqual({'status': {'bucket': bucket_name, 'revision': 2}},
|
||||
document_view[0])
|
||||
|
|
|
@ -49,7 +49,7 @@ class TestRevisionViews(base.TestDbBase):
|
|||
payload = [base.DocumentFixture.get_minimal_fixture()
|
||||
for _ in range(doc_count)]
|
||||
bucket_name = test_utils.rand_name('bucket')
|
||||
self.create_documents(bucket_name, payload)
|
||||
self.create_documents(bucket_name, payload, do_validation=False)
|
||||
revisions = self.list_revisions()
|
||||
revisions_view = self.view_builder.list(revisions)
|
||||
|
||||
|
|
Loading…
Reference in New Issue