diff --git a/.zuul.yaml b/.zuul.yaml index e5d692d..0dc96f5 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -5,18 +5,25 @@ check: jobs: - cinder-tempest-plugin-lvm-lio-barbican + - cinder-tempest-plugin-lvm-lio-barbican-centos-8: + voting: false + - cinder-tempest-plugin-lvm-tgt-barbican - cinder-tempest-plugin-cbak-ceph + - cinder-tempest-plugin-basic-ussuri + - cinder-tempest-plugin-basic-train + - cinder-tempest-plugin-basic-stein gate: jobs: - cinder-tempest-plugin-lvm-lio-barbican + - cinder-tempest-plugin-lvm-tgt-barbican - cinder-tempest-plugin-cbak-ceph - job: - name: cinder-tempest-plugin-lvm-lio-barbican + name: cinder-tempest-plugin-lvm-barbican-base description: | - This jobs configures Cinder with LVM, LIO, barbican and - runs tempest tests and cinderlib tests. + This is a base job for lvm with lio & tgt targets parent: devstack-tempest + timeout: 10800 roles: - zuul: opendev.org/openstack/cinderlib required-projects: @@ -37,9 +44,13 @@ tempest_test_blacklist: '{{ ansible_user_dir }}/{{ zuul.projects["opendev.org/openstack/tempest"].src_dir }}/tools/tempest-integrated-gate-storage-blacklist.txt' tox_envlist: all devstack_localrc: - CINDER_ISCSI_HELPER: lioadm CINDER_LVM_TYPE: thin CINDER_COORDINATION_URL: 'file://\$state_path' + devstack_local_conf: + test-config: + $TEMPEST_CONFIG: + volume-feature-enabled: + volume_revert: True devstack_services: barbican: true tempest_plugins: @@ -58,5 +69,90 @@ Integration tests that runs with the ceph devstack plugin, py3 and enable the backup service. vars: + tempest_black_regex: '(VolumesBackupsTest.test_bootable_volume_backup_and_restore|TestVolumeBackupRestore.test_volume_backup_restore)' + devstack_local_conf: + test-config: + $TEMPEST_CONFIG: + volume-feature-enabled: + volume_revert: True devstack_services: c-bak: true + +# variant for pre-Ussuri branches (no volume revert for Ceph), +# should this job be used on those branches +- job: + name: cinder-tempest-plugin-cbak-ceph + branches: ^(?=stable/(ocata|pike|queens|rocky|stein|train)).*$ + vars: + tempest_black_regex: '' + devstack_local_conf: + test-config: + $TEMPEST_CONFIG: + volume-feature-enabled: + volume_revert: False + +- job: + name: cinder-tempest-plugin-lvm-lio-barbican + description: | + This jobs configures Cinder with LVM, LIO, barbican and + runs tempest tests and cinderlib tests. + parent: cinder-tempest-plugin-lvm-barbican-base + vars: + devstack_localrc: + CINDER_ISCSI_HELPER: lioadm + +- job: + name: cinder-tempest-plugin-lvm-lio-barbican-centos-8 + parent: cinder-tempest-plugin-lvm-lio-barbican + nodeset: devstack-single-node-centos-8 + description: | + This jobs configures Cinder with LVM, LIO, barbican and + runs tempest tests and cinderlib tests on CentOS 8. + +- job: + name: cinder-tempest-plugin-lvm-tgt-barbican + description: | + This jobs configures Cinder with LVM, tgt, barbican and + runs tempest tests and cinderlib tests. + parent: cinder-tempest-plugin-lvm-barbican-base + +- job: + name: cinder-tempest-plugin-basic + parent: devstack-tempest + description: | + Cinder tempest plugin tests job which run only cinder-tempest-plugin + tests. + required-projects: + - opendev.org/openstack/cinder-tempest-plugin + vars: + devstack_localrc: + TEMPEST_VOLUME_REVERT_TO_SNAPSHOT: True + tox_envlist: all + tempest_test_regex: cinder_tempest_plugin + tempest_plugins: + - cinder-tempest-plugin + irrelevant-files: + - ^.*\.rst$ + - ^doc/.*$ + - ^releasenotes/.*$ + +- job: + name: cinder-tempest-plugin-basic-ussuri + parent: cinder-tempest-plugin-basic + override-checkout: stable/ussuri + +- job: + name: cinder-tempest-plugin-basic-train + parent: cinder-tempest-plugin-basic + override-checkout: stable/train + vars: + devstack_localrc: + USE_PYTHON3: True + +- job: + name: cinder-tempest-plugin-basic-stein + parent: cinder-tempest-plugin-basic + override-checkout: stable/stein + vars: + devstack_localrc: + USE_PYTHON3: True diff --git a/cinder_tempest_plugin/api/volume/admin/test_volume_backup.py b/cinder_tempest_plugin/api/volume/admin/test_volume_backup.py new file mode 100644 index 0000000..d1fa730 --- /dev/null +++ b/cinder_tempest_plugin/api/volume/admin/test_volume_backup.py @@ -0,0 +1,140 @@ +# Copyright (C) 2020 Canonical Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from tempest.common import waiters +from tempest import config +from tempest.lib import decorators +from tempest.lib import exceptions + +from cinder_tempest_plugin.api.volume import base + +CONF = config.CONF + + +class VolumesBackupsTest(base.BaseVolumeAdminTest): + @classmethod + def setup_clients(cls): + super(VolumesBackupsTest, cls).setup_clients() + cls.admin_volume_client = cls.os_admin.volumes_client_latest + cls.backups_client = cls.os_primary.backups_client_latest + cls.volumes_client = cls.os_primary.volumes_client_latest + + @classmethod + def skip_checks(cls): + super(VolumesBackupsTest, cls).skip_checks() + if not CONF.volume_feature_enabled.backup: + raise cls.skipException("Cinder backup feature disabled") + + @decorators.idempotent_id('2daadb2e-409a-4ede-a6ce-6002ec324372') + def test_backup_crossproject_admin_negative(self): + + # create vol as user + volume = self.volumes_client.create_volume( + size=CONF.volume.volume_size)['volume'] + waiters.wait_for_volume_resource_status( + self.volumes_client, + volume['id'], 'available') + + # create backup as user + backup = self.backups_client.create_backup( + volume_id=volume['id'])['backup'] + waiters.wait_for_volume_resource_status( + self.backups_client, + backup['id'], 'available') + + # try to create incremental backup as admin + self.assertRaises( + exceptions.BadRequest, self.admin_backups_client.create_backup, + volume_id=volume['id'], incremental=True) + + @decorators.idempotent_id('b9feb593-5809-4207-90d3-28e627730f13') + def test_backup_crossproject_user_negative(self): + + # create vol as user + volume = self.volumes_client.create_volume( + size=CONF.volume.volume_size)['volume'] + waiters.wait_for_volume_resource_status( + self.volumes_client, + volume['id'], 'available') + + # create backup as admin + backup = self.admin_backups_client.create_backup( + volume_id=volume['id'])['backup'] + waiters.wait_for_volume_resource_status( + self.admin_backups_client, + backup['id'], 'available') + + # try to create incremental backup as user + self.assertRaises( + exceptions.BadRequest, self.backups_client.create_backup, + volume_id=volume['id'], incremental=True) + + @decorators.idempotent_id('ce15f528-bfc1-492d-81db-b6168b631587') + def test_incremental_backup_respective_parents(self): + + # create vol as user + volume = self.volumes_client.create_volume( + size=CONF.volume.volume_size)['volume'] + waiters.wait_for_volume_resource_status( + self.volumes_client, + volume['id'], 'available') + + # create backup as admin + backup_adm = self.admin_backups_client.create_backup( + volume_id=volume['id'])['backup'] + waiters.wait_for_volume_resource_status( + self.admin_backups_client, + backup_adm['id'], 'available') + + # create backup as user + backup_usr = self.backups_client.create_backup( + volume_id=volume['id'])['backup'] + waiters.wait_for_volume_resource_status( + self.backups_client, + backup_usr['id'], 'available') + + # refresh admin backup and assert no child backups + backup_adm = self.admin_backups_client.show_backup( + backup_adm['id'])['backup'] + self.assertFalse(backup_adm['has_dependent_backups']) + + # create incremental backup as admin + backup_adm_inc = self.admin_backups_client.create_backup( + volume_id=volume['id'], incremental=True)['backup'] + waiters.wait_for_volume_resource_status( + self.admin_backups_client, + backup_adm_inc['id'], 'available') + + # refresh user backup and assert no child backups + backup_usr = self.backups_client.show_backup( + backup_usr['id'])['backup'] + self.assertFalse(backup_usr['has_dependent_backups']) + + # refresh admin backup and assert it has childs + backup_adm = self.admin_backups_client.show_backup( + backup_adm['id'])['backup'] + self.assertTrue(backup_adm['has_dependent_backups']) + + # create incremental backup as user + backup_usr_inc = self.backups_client.create_backup( + volume_id=volume['id'], incremental=True)['backup'] + waiters.wait_for_volume_resource_status( + self.backups_client, + backup_usr_inc['id'], 'available') + + # refresh user backup and assert it has childs + backup_usr = self.backups_client.show_backup( + backup_usr['id'])['backup'] + self.assertTrue(backup_usr['has_dependent_backups']) diff --git a/cinder_tempest_plugin/api/volume/base.py b/cinder_tempest_plugin/api/volume/base.py index 675d0bc..418fd33 100644 --- a/cinder_tempest_plugin/api/volume/base.py +++ b/cinder_tempest_plugin/api/volume/base.py @@ -164,3 +164,38 @@ super(BaseVolumeAdminTest, cls).setup_clients() cls.admin_volume_types_client = cls.os_admin.volume_types_client_latest + cls.admin_backups_client = cls.os_admin.backups_client_latest + cls.admin_volumes_client = cls.os_admin.volumes_client_latest + + @classmethod + def create_volume_type(cls, name=None, **kwargs): + """Create a test volume-type""" + + name = name or data_utils.rand_name(cls.__name__ + '-volume-type') + volume_type = cls.admin_volume_types_client.create_volume_type( + name=name, **kwargs)['volume_type'] + cls.addClassResourceCleanup(cls._clear_volume_type, volume_type) + return volume_type + + @classmethod + def _clear_volume_type(cls, volume_type): + # If image caching is enabled, we must delete the cached volume + # before cinder will allow us to delete the volume_type. This function + # solves that problem by taking the brute-force approach of deleting + # any volumes of this volume_type that exist *no matter what project + # they are in*. Since this won't happen until the teardown of the + # test class, that should be OK. + type_id = volume_type['id'] + type_name = volume_type['name'] + + volumes = cls.admin_volumes_client.list_volumes( + detail=True, params={'all_tenants': 1})['volumes'] + for volume in [v for v in volumes if v['volume_type'] == type_name]: + test_utils.call_and_ignore_notfound_exc( + cls.admin_volumes_client.delete_volume, volume['id']) + cls.admin_volumes_client.wait_for_resource_deletion(volume['id']) + + test_utils.call_and_ignore_notfound_exc( + cls.admin_volume_types_client.delete_volume_type, type_id) + test_utils.call_and_ignore_notfound_exc( + cls.admin_volume_types_client.wait_for_resource_deletion, type_id) diff --git a/cinder_tempest_plugin/api/volume/test_create_from_image.py b/cinder_tempest_plugin/api/volume/test_create_from_image.py index 02fbd24..dc296c0 100644 --- a/cinder_tempest_plugin/api/volume/test_create_from_image.py +++ b/cinder_tempest_plugin/api/volume/test_create_from_image.py @@ -9,6 +9,8 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. + +import io from tempest.common import waiters from tempest import config @@ -77,3 +79,84 @@ waiters.wait_for_volume_resource_status(self.volumes_client, v['id'], 'available') + + +class VolumeAndVolumeTypeFromImageTest(base.BaseVolumeAdminTest): + # needs AdminTest as superclass to manipulate volume_types + + @classmethod + def skip_checks(cls): + super(VolumeAndVolumeTypeFromImageTest, cls).skip_checks() + if not CONF.service_available.glance: + raise cls.skipException("Glance service is disabled") + + @classmethod + def create_image_with_data(cls, **kwargs): + # we do this as a class method so we can use the + # addClassResourceCleanup functionality of tempest.test.BaseTestCase + images_client = cls.os_primary.image_client_v2 + if 'min_disk' not in kwargs: + kwargs['min_disk'] = 1 + response = images_client.create_image(**kwargs) + image_id = response['id'] + cls.addClassResourceCleanup( + images_client.wait_for_resource_deletion, image_id) + cls.addClassResourceCleanup( + test_utils.call_and_ignore_notfound_exc, + images_client.delete_image, image_id) + + # upload "data" to image + image_file = io.BytesIO(data_utils.random_bytes(size=1024)) + images_client.store_image_file(image_id, image_file) + + waiters.wait_for_image_status(images_client, image_id, 'active') + image = images_client.show_image(image_id) + return image + + @decorators.idempotent_id('6e9266ff-a917-4dd5-aa4a-c36e59e7a2a6') + def test_create_from_image_with_volume_type_image_property(self): + """Verify that the cinder_img_volume_type image property works. + + When a volume is created from an image containing the + cinder_img_volume_type property and no volume_type is specified + in the volume-create request, the volume_type of the resulting + volume should be the one specified by the image property. + """ + + volume_type_meta = 'cinder_img_volume_type' + volume_type_name = 'vol-type-for-6e9266ff-a917-4dd5-aa4a-c36e59e7a2a6' + description = ('Generic volume_type for test ' + '6e9266ff-a917-4dd5-aa4a-c36e59e7a2a6') + proto = CONF.volume.storage_protocol + vendor = CONF.volume.vendor_name + extra_specs = {"storage_protocol": proto, + "vendor_name": vendor} + kwargs = {'description': description, + 'extra_specs': extra_specs, + 'os-volume-type-access:is_public': True} + volume_type = self.create_volume_type(name=volume_type_name, + **kwargs) + # quick sanity check + self.assertEqual(volume_type_name, volume_type['name']) + + # create an image in glance + kwargs = {'disk_format': 'raw', + 'container_format': 'bare', + 'name': ('image-for-test-' + '6e9266ff-a917-4dd5-aa4a-c36e59e7a2a6'), + 'visibility': 'private', + volume_type_meta: volume_type_name} + image = self.create_image_with_data(**kwargs) + # quick sanity check + self.assertEqual(volume_type_name, image[volume_type_meta]) + + # create volume from image + kwargs = {'name': ('volume-for-test-' + '6e9266ff-a917-4dd5-aa4a-c36e59e7a2a6'), + 'imageRef': image['id']} + # this is the whole point of the test, so make sure this is true + self.assertNotIn('volume_type', kwargs) + volume = self.create_volume(**kwargs) + + found_volume_type = volume['volume_type'] + self.assertEqual(volume_type_name, found_volume_type) diff --git a/cinder_tempest_plugin/exceptions.py b/cinder_tempest_plugin/exceptions.py new file mode 100644 index 0000000..4825f19 --- /dev/null +++ b/cinder_tempest_plugin/exceptions.py @@ -0,0 +1,22 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from tempest.lib import exceptions + + +class ConsistencyGroupException(exceptions.TempestException): + message = "Consistency group %(cg_id)s failed and is in ERROR status" + + +class ConsistencyGroupSnapshotException(exceptions.TempestException): + message = ("Consistency group snapshot %(cgsnapshot_id)s failed and is " + "in ERROR status") diff --git a/cinder_tempest_plugin/services/consistencygroups_client.py b/cinder_tempest_plugin/services/consistencygroups_client.py index 10415d4..a29a90a 100644 --- a/cinder_tempest_plugin/services/consistencygroups_client.py +++ b/cinder_tempest_plugin/services/consistencygroups_client.py @@ -18,9 +18,10 @@ from oslo_serialization import jsonutils as json from six.moves import http_client -from tempest import exceptions from tempest.lib.common import rest_client from tempest.lib import exceptions as lib_exc + +from cinder_tempest_plugin import exceptions as volume_exc class ConsistencyGroupsClient(rest_client.RestClient): @@ -137,14 +138,14 @@ body = self.show_consistencygroup(cg_id)['consistencygroup'] cg_status = body['status'] if cg_status == 'error': - raise exceptions.ConsistencyGroupException(cg_id=cg_id) + raise volume_exc.ConsistencyGroupException(cg_id=cg_id) if int(time.time()) - start >= self.build_timeout: message = ('Consistency group %s failed to reach %s status ' '(current %s) within the required time (%s s).' % (cg_id, status, cg_status, self.build_timeout)) - raise exceptions.TimeoutException(message) + raise lib_exc.TimeoutException(message) def wait_for_consistencygroup_deletion(self, cg_id): """Waits for consistency group deletion""" @@ -155,7 +156,7 @@ except lib_exc.NotFound: return if int(time.time()) - start_time >= self.build_timeout: - raise exceptions.TimeoutException + raise lib_exc.TimeoutException time.sleep(self.build_interval) def wait_for_cgsnapshot_status(self, cgsnapshot_id, status): @@ -169,7 +170,7 @@ body = self.show_cgsnapshot(cgsnapshot_id)['cgsnapshot'] cgsnapshot_status = body['status'] if cgsnapshot_status == 'error': - raise exceptions.ConsistencyGroupSnapshotException( + raise volume_exc.ConsistencyGroupSnapshotException( cgsnapshot_id=cgsnapshot_id) if int(time.time()) - start >= self.build_timeout: @@ -178,7 +179,7 @@ '(%s s).' % (cgsnapshot_id, status, cgsnapshot_status, self.build_timeout)) - raise exceptions.TimeoutException(message) + raise lib_exc.TimeoutException(message) def wait_for_cgsnapshot_deletion(self, cgsnapshot_id): """Waits for consistency group snapshot deletion""" @@ -189,5 +190,5 @@ except lib_exc.NotFound: return if int(time.time()) - start_time >= self.build_timeout: - raise exceptions.TimeoutException + raise lib_exc.TimeoutException time.sleep(self.build_interval) diff --git a/playbooks/post-cinderlib.yaml b/playbooks/post-cinderlib.yaml index f61775c..6384bd1 100644 --- a/playbooks/post-cinderlib.yaml +++ b/playbooks/post-cinderlib.yaml @@ -1,4 +1,4 @@ -- hosts: all +- hosts: tempest vars: tox_envlist: functional zuul_work_dir: "{{ ansible_user_dir }}/{{ zuul.projects['opendev.org/openstack/cinderlib'].src_dir }}" diff --git a/playbooks/tempest-and-cinderlib-run.yaml b/playbooks/tempest-and-cinderlib-run.yaml index 5dddf7f..8e26913 100644 --- a/playbooks/tempest-and-cinderlib-run.yaml +++ b/playbooks/tempest-and-cinderlib-run.yaml @@ -19,10 +19,16 @@ - setup-tempest-data-dir - acl-devstack-files - role: run-tempest - # ignore the errors, so that run-cinderlib-tests is always executed + # ignore the errors for now, so that run-cinderlib-tests is always executed ignore_errors: yes - role: change-devstack-data-owner devstack_data_subdir_changed: cinder devstack_data_subdir_owner: zuul - role: run-cinderlib-tests + tox_install_siblings: false cinderlib_base_dir: "{{ ansible_user_dir }}/{{ zuul.projects['opendev.org/openstack/cinderlib'].src_dir }}" + post_tasks: + - name: Fail if the first tempest run did not work + fail: + msg: "tempest run returned with an error" + when: tempest_run_result is defined and tempest_run_result.rc != 0 diff --git a/setup.cfg b/setup.cfg index 3ee7468..9b05085 100644 --- a/setup.cfg +++ b/setup.cfg @@ -21,29 +21,6 @@ packages = cinder_tempest_plugin -[build_sphinx] -all-files = 1 -warning-is-error = 1 -source-dir = doc/source -build-dir = doc/build - -[upload_sphinx] -upload-dir = doc/build/html - -[compile_catalog] -directory = cinder_tempest_plugin/locale -domain = cinder_tempest_plugin - -[update_catalog] -domain = cinder_tempest_plugin -output_dir = cinder_tempest_plugin/locale -input_file = cinder_tempest_plugin/locale/cinder_tempest_plugin.pot - -[extract_messages] -keywords = _ gettext ngettext l_ lazy_gettext -mapping_file = babel.cfg -output_file = cinder_tempest_plugin/locale/cinder_tempest_plugin.pot - [entry_points] tempest.test_plugins = cinder_tests = cinder_tempest_plugin.plugin:CinderTempestPlugin diff --git a/setup.py b/setup.py index 566d844..cd35c3c 100644 --- a/setup.py +++ b/setup.py @@ -13,16 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT import setuptools - -# In python < 2.7.4, a lazy loading of package `pbr` will break -# setuptools if some other modules registered functions in `atexit`. -# solution from: http://bugs.python.org/issue15881#msg170215 -try: - import multiprocessing # noqa -except ImportError: - pass setuptools.setup( setup_requires=['pbr>=2.0.0'], diff --git a/test-requirements.txt b/test-requirements.txt index af89592..e0bd682 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -2,7 +2,7 @@ # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. -hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0 +hacking>=3.0.1,<3.1 # Apache-2.0 coverage!=4.4,>=4.0 # Apache-2.0 python-subunit>=1.0.0 # Apache-2.0/BSD diff --git a/tox.ini b/tox.ini index 4c2edbc..be122b4 100644 --- a/tox.ini +++ b/tox.ini @@ -9,11 +9,10 @@ [testenv] basepython = python3 usedevelop = True -install_command = pip install {opts} {packages} setenv = VIRTUAL_ENV={envdir} PYTHONWARNINGS=default::DeprecationWarning -deps = -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} +deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/test-requirements.txt commands = python setup.py test --slowest --testr-args='{posargs}' @@ -25,8 +24,9 @@ [flake8] # E123, E125 skipped as they are invalid PEP-8. - +# W503 line break before binary operator +# W504 line break after binary operator show-source = True -ignore = E123,E125 +ignore = E123,E125,W503,W504 builtins = _ exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build