Codebase list cinder-tempest-plugin / f5f6299
Merge tag '1.4.0' into debian/wallaby cinder-tempest-plugin 1.4.0 release meta:version: 1.4.0 meta:diff-start: - meta:series: wallaby meta:release-type: release meta:pypi: no meta:first: no meta:release:Author: Luigi Toscano <ltoscano@redhat.com> meta:release:Commit: Luigi Toscano <ltoscano@redhat.com> meta:release:Change-Id: Ib6efac12278c12aa1f320c12a86cbcd164faed90 meta:release:Code-Review+1: Brian Rosmaita <rosmaita.fossdev@gmail.com> meta:release:Code-Review+2: Elod Illes <elod.illes@est.tech> meta:release:Code-Review+2: Hervé Beraud <herveberaud.pro@gmail.com> meta:release:Code-Review+2: Thierry Carrez <thierry@openstack.org> meta:release:Workflow+1: Thierry Carrez <thierry@openstack.org> Thomas Goirand 5 years ago
13 changed file(s) with 1546 addition(s) and 94 deletion(s). Raw diff Collapse all Expand all
2626 !.coveragerc
2727 .tox
2828 nosetests.xml
29 .testrepository
29 .stestr
3030 .venv
3131
3232 # Translations
0 [DEFAULT]
1 test_path=${OS_TEST_PATH:-./cinder_tempest_plugin}
2 top_dir=./
+0
-7
.testr.conf less more
0 [DEFAULT]
1 test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
2 OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
3 OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \
4 ${PYTHON:-python} -m subunit.run discover -t ./ . $LISTOPT $IDOPTION
5 test_id_option=--load-list $IDFILE
6 test_list_option=--list
88 voting: false
99 - cinder-tempest-plugin-lvm-tgt-barbican
1010 - cinder-tempest-plugin-cbak-ceph
11 - cinder-tempest-plugin-basic-victoria
1112 - cinder-tempest-plugin-basic-ussuri
1213 - cinder-tempest-plugin-basic-train
13 - cinder-tempest-plugin-basic-stein
1414 gate:
1515 jobs:
1616 - cinder-tempest-plugin-lvm-lio-barbican
2222 description: |
2323 This is a base job for lvm with lio & tgt targets
2424 parent: devstack-tempest
25 # TODO(gmann): Remove the below nodeset setting to Bionic once
26 # https://storyboard.openstack.org/#!/story/2007732 is fixed
27 # Once nodeset is removed form here then devstack-tempest job
28 # will automatically run this job on Ubuntu Focal nodeset from
29 # Victoria gate onwards.
30 nodeset: openstack-single-node-bionic
3125 timeout: 10800
3226 roles:
3327 - zuul: opendev.org/openstack/cinderlib
5448 devstack_local_conf:
5549 test-config:
5650 $TEMPEST_CONFIG:
51 auth:
52 # FIXME: 'creator' should be re-added by the barbican devstack plugin
53 # but the value below override everything.
54 tempest_roles: member,creator
5755 volume-feature-enabled:
5856 volume_revert: True
5957 devstack_services:
142140 - ^releasenotes/.*$
143141
144142 - job:
143 name: cinder-tempest-plugin-basic-victoria
144 parent: cinder-tempest-plugin-basic
145 nodeset: openstack-single-node-focal
146 override-checkout: stable/victoria
147
148 - job:
145149 name: cinder-tempest-plugin-basic-ussuri
146150 parent: cinder-tempest-plugin-basic
147151 nodeset: openstack-single-node-bionic
155159 vars:
156160 devstack_localrc:
157161 USE_PYTHON3: True
158
159 - job:
160 name: cinder-tempest-plugin-basic-stein
161 parent: cinder-tempest-plugin-basic
162 nodeset: openstack-single-node-bionic
163 override-checkout: stable/stein
164 vars:
165 devstack_localrc:
166 USE_PYTHON3: True
7777 self.consistencygroups_adm_client.create_consistencygroup)
7878 cg = create_consistencygroup(volume_type['id'],
7979 name=cg_name)['consistencygroup']
80 vol_name = data_utils.rand_name("volume")
81 params = {'name': vol_name,
82 'volume_type': volume_type['id'],
83 'consistencygroup_id': cg['id'],
84 'size': CONF.volume.volume_size}
85
86 # Create volume
87 volume = self.admin_volume_client.create_volume(**params)['volume']
88
89 waiters.wait_for_volume_resource_status(self.admin_volume_client,
90 volume['id'], 'available')
91 self.consistencygroups_adm_client.wait_for_consistencygroup_status(
92 cg['id'], 'available')
93 self.assertEqual(cg_name, cg['name'])
80 self.consistencygroups_adm_client.wait_for_consistencygroup_status(
81 cg['id'], 'available')
82 self.assertEqual(cg_name, cg['name'])
83
84 # Create volume
85 vol_name = data_utils.rand_name("volume")
86 params = {'name': vol_name,
87 'volume_type': volume_type['id'],
88 'consistencygroup_id': cg['id'],
89 'size': CONF.volume.volume_size}
90
91 volume = self.admin_volume_client.create_volume(**params)['volume']
92 waiters.wait_for_volume_resource_status(self.admin_volume_client,
93 volume['id'], 'available')
9494
9595 # Get a given CG
9696 cg = self.consistencygroups_adm_client.show_consistencygroup(
121121 self.consistencygroups_adm_client.create_consistencygroup)
122122 cg = create_consistencygroup(volume_type['id'],
123123 name=cg_name)['consistencygroup']
124 vol_name = data_utils.rand_name("volume")
125 params = {'name': vol_name,
126 'volume_type': volume_type['id'],
127 'consistencygroup_id': cg['id'],
128 'size': CONF.volume.volume_size}
129
130 # Create volume
131 volume = self.admin_volume_client.create_volume(**params)['volume']
132 waiters.wait_for_volume_resource_status(self.admin_volume_client,
133 volume['id'], 'available')
134 self.consistencygroups_adm_client.wait_for_consistencygroup_status(
135 cg['id'], 'available')
136 self.assertEqual(cg_name, cg['name'])
124 self.consistencygroups_adm_client.wait_for_consistencygroup_status(
125 cg['id'], 'available')
126 self.assertEqual(cg_name, cg['name'])
127
128 # Create volume
129 vol_name = data_utils.rand_name("volume")
130 params = {'name': vol_name,
131 'volume_type': volume_type['id'],
132 'consistencygroup_id': cg['id'],
133 'size': CONF.volume.volume_size}
134 volume = self.admin_volume_client.create_volume(**params)['volume']
135 waiters.wait_for_volume_resource_status(self.admin_volume_client,
136 volume['id'], 'available')
137137
138138 # Create cgsnapshot
139139 cgsnapshot_name = data_utils.rand_name('cgsnapshot')
141141 self.consistencygroups_adm_client.create_cgsnapshot)
142142 cgsnapshot = create_cgsnapshot(cg['id'],
143143 name=cgsnapshot_name)['cgsnapshot']
144 self.consistencygroups_adm_client.wait_for_cgsnapshot_status(
145 cgsnapshot['id'], 'available')
146 self.assertEqual(cgsnapshot_name, cgsnapshot['name'])
144147 snapshots = self.os_admin.snapshots_v2_client.list_snapshots(
145148 detail=True)['snapshots']
146149 for snap in snapshots:
148151 waiters.wait_for_volume_resource_status(
149152 self.os_admin.snapshots_v2_client,
150153 snap['id'], 'available')
151 self.consistencygroups_adm_client.wait_for_cgsnapshot_status(
152 cgsnapshot['id'], 'available')
153 self.assertEqual(cgsnapshot_name, cgsnapshot['name'])
154154
155155 # Get a given CG snapshot
156156 cgsnapshot = self.consistencygroups_adm_client.show_cgsnapshot(
181181 self.consistencygroups_adm_client.create_consistencygroup)
182182 cg = create_consistencygroup(volume_type['id'],
183183 name=cg_name)['consistencygroup']
184 vol_name = data_utils.rand_name("volume")
185 params = {'name': vol_name,
186 'volume_type': volume_type['id'],
187 'consistencygroup_id': cg['id'],
188 'size': CONF.volume.volume_size}
189
190 # Create volume
191 volume = self.admin_volume_client.create_volume(**params)['volume']
192 waiters.wait_for_volume_resource_status(self.admin_volume_client,
193 volume['id'], 'available')
194 self.consistencygroups_adm_client.wait_for_consistencygroup_status(
195 cg['id'], 'available')
196 self.assertEqual(cg_name, cg['name'])
184 self.consistencygroups_adm_client.wait_for_consistencygroup_status(
185 cg['id'], 'available')
186 self.assertEqual(cg_name, cg['name'])
187
188 # Create volume
189 vol_name = data_utils.rand_name("volume")
190 params = {'name': vol_name,
191 'volume_type': volume_type['id'],
192 'consistencygroup_id': cg['id'],
193 'size': CONF.volume.volume_size}
194 volume = self.admin_volume_client.create_volume(**params)['volume']
195 waiters.wait_for_volume_resource_status(self.admin_volume_client,
196 volume['id'], 'available')
197197
198198 # Create cgsnapshot
199199 cgsnapshot_name = data_utils.rand_name('cgsnapshot')
201201 self.consistencygroups_adm_client.create_cgsnapshot)
202202 cgsnapshot = create_cgsnapshot(cg['id'],
203203 name=cgsnapshot_name)['cgsnapshot']
204 self.consistencygroups_adm_client.wait_for_cgsnapshot_status(
205 cgsnapshot['id'], 'available')
206 self.assertEqual(cgsnapshot_name, cgsnapshot['name'])
204207 snapshots = self.snapshots_client.list_snapshots(
205208 detail=True)['snapshots']
206209 for snap in snapshots:
207210 if volume['id'] == snap['volume_id']:
208211 waiters.wait_for_volume_resource_status(
209212 self.os_admin.snapshots_v2_client, snap['id'], 'available')
210 self.consistencygroups_adm_client.wait_for_cgsnapshot_status(
211 cgsnapshot['id'], 'available')
212 self.assertEqual(cgsnapshot_name, cgsnapshot['name'])
213213
214214 # Create CG from CG snapshot
215215 cg_name2 = data_utils.rand_name('CG_from_snap')
217217 self.consistencygroups_adm_client.create_consistencygroup_from_src)
218218 cg2 = create_consistencygroup2(cgsnapshot_id=cgsnapshot['id'],
219219 name=cg_name2)['consistencygroup']
220 self.consistencygroups_adm_client.wait_for_consistencygroup_status(
221 cg2['id'], 'available')
222 self.assertEqual(cg_name2, cg2['name'])
220223 vols = self.admin_volume_client.list_volumes(
221224 detail=True)['volumes']
222225 for vol in vols:
223226 if vol['consistencygroup_id'] == cg2['id']:
224227 waiters.wait_for_volume_resource_status(
225228 self.admin_volume_client, vol['id'], 'available')
226 self.consistencygroups_adm_client.wait_for_consistencygroup_status(
227 cg2['id'], 'available')
228 self.assertEqual(cg_name2, cg2['name'])
229229
230230 # Clean up
231231 self._delete_consistencygroup(cg2['id'])
246246 self.consistencygroups_adm_client.create_consistencygroup)
247247 cg = create_consistencygroup(volume_type['id'],
248248 name=cg_name)['consistencygroup']
249 vol_name = data_utils.rand_name("volume")
250 params = {'name': vol_name,
251 'volume_type': volume_type['id'],
252 'consistencygroup_id': cg['id'],
253 'size': CONF.volume.volume_size}
254
255 # Create volume
256 volume = self.admin_volume_client.create_volume(**params)['volume']
257 waiters.wait_for_volume_resource_status(self.admin_volume_client,
258 volume['id'], 'available')
259 self.consistencygroups_adm_client.wait_for_consistencygroup_status(
260 cg['id'], 'available')
261 self.assertEqual(cg_name, cg['name'])
249 self.consistencygroups_adm_client.wait_for_consistencygroup_status(
250 cg['id'], 'available')
251 self.assertEqual(cg_name, cg['name'])
252
253 # Create volume
254 vol_name = data_utils.rand_name("volume")
255 params = {'name': vol_name,
256 'volume_type': volume_type['id'],
257 'consistencygroup_id': cg['id'],
258 'size': CONF.volume.volume_size}
259 volume = self.admin_volume_client.create_volume(**params)['volume']
260 waiters.wait_for_volume_resource_status(self.admin_volume_client,
261 volume['id'], 'available')
262262
263263 # Create CG from CG
264264 cg_name2 = data_utils.rand_name('CG_from_cg')
266266 self.consistencygroups_adm_client.create_consistencygroup_from_src)
267267 cg2 = create_consistencygroup2(source_cgid=cg['id'],
268268 name=cg_name2)['consistencygroup']
269 self.consistencygroups_adm_client.wait_for_consistencygroup_status(
270 cg2['id'], 'available')
271 self.assertEqual(cg_name2, cg2['name'])
269272 vols = self.admin_volume_client.list_volumes(
270273 detail=True)['volumes']
271274 for vol in vols:
272275 if vol['consistencygroup_id'] == cg2['id']:
273276 waiters.wait_for_volume_resource_status(
274277 self.admin_volume_client, vol['id'], 'available')
275 self.consistencygroups_adm_client.wait_for_consistencygroup_status(
276 cg2['id'], 'available')
277 self.assertEqual(cg_name2, cg2['name'])
278278
279279 # Clean up
280280 self._delete_consistencygroup(cg2['id'])
5656
5757 return volume
5858
59 @decorators.idempotent_id('2d7e2e49-150e-4849-a18e-79f9777c9a96')
5960 def test_create_delete_unicode_volume_name(self):
6061 """Create a volume with a unicode name and view it."""
6162
6768 @testtools.skipUnless(CONF.volume_feature_enabled.snapshot,
6869 "Cinder volume snapshots are disabled")
6970 @decorators.related_bug('1393871')
71 @decorators.idempotent_id('332be44d-5418-4fb3-a8f0-a3587de6929f')
7072 def test_snapshot_create_volume_description_non_ascii_code(self):
7173 # Create a volume with non-ascii description
7274 description = u'\u05e7\u05d9\u05d9\u05e4\u05e9'
0 # TODO: Remove this file when tempest scenario manager becomes stable
1 # Copyright 2012 OpenStack Foundation
2 # Copyright 2013 IBM Corp.
3 # All Rights Reserved.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License"); you may
6 # not use this file except in compliance with the License. You may obtain
7 # a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14 # License for the specific language governing permissions and limitations
15 # under the License.
16
17 import netaddr
18 from oslo_log import log
19 from oslo_serialization import jsonutils as json
20 from oslo_utils import netutils
21
22 from tempest.common import compute
23 from tempest.common import image as common_image
24 from tempest.common.utils.linux import remote_client
25 from tempest.common import waiters
26 from tempest import config
27 from tempest import exceptions
28 from tempest.lib.common import api_microversion_fixture
29 from tempest.lib.common import api_version_utils
30 from tempest.lib.common.utils import data_utils
31 from tempest.lib.common.utils import test_utils
32 from tempest.lib import exceptions as lib_exc
33 import tempest.test
34
35 CONF = config.CONF
36
37 LOG = log.getLogger(__name__)
38
39 LATEST_MICROVERSION = 'latest'
40
41
42 class ScenarioTest(tempest.test.BaseTestCase):
43 """Base class for scenario tests. Uses tempest own clients. """
44
45 credentials = ['primary']
46
47 compute_min_microversion = None
48 compute_max_microversion = LATEST_MICROVERSION
49 volume_min_microversion = None
50 volume_max_microversion = LATEST_MICROVERSION
51 placement_min_microversion = None
52 placement_max_microversion = LATEST_MICROVERSION
53
54 @classmethod
55 def skip_checks(cls):
56 super(ScenarioTest, cls).skip_checks()
57 api_version_utils.check_skip_with_microversion(
58 cls.compute_min_microversion, cls.compute_max_microversion,
59 CONF.compute.min_microversion, CONF.compute.max_microversion)
60 api_version_utils.check_skip_with_microversion(
61 cls.volume_min_microversion, cls.volume_max_microversion,
62 CONF.volume.min_microversion, CONF.volume.max_microversion)
63 api_version_utils.check_skip_with_microversion(
64 cls.placement_min_microversion, cls.placement_max_microversion,
65 CONF.placement.min_microversion, CONF.placement.max_microversion)
66
67 @classmethod
68 def resource_setup(cls):
69 super(ScenarioTest, cls).resource_setup()
70 cls.compute_request_microversion = (
71 api_version_utils.select_request_microversion(
72 cls.compute_min_microversion,
73 CONF.compute.min_microversion))
74 cls.volume_request_microversion = (
75 api_version_utils.select_request_microversion(
76 cls.volume_min_microversion,
77 CONF.volume.min_microversion))
78 cls.placement_request_microversion = (
79 api_version_utils.select_request_microversion(
80 cls.placement_min_microversion,
81 CONF.placement.min_microversion))
82
83 def setUp(self):
84 super(ScenarioTest, self).setUp()
85 self.useFixture(api_microversion_fixture.APIMicroversionFixture(
86 compute_microversion=self.compute_request_microversion,
87 volume_microversion=self.volume_request_microversion,
88 placement_microversion=self.placement_request_microversion))
89
90 @classmethod
91 def setup_clients(cls):
92 super(ScenarioTest, cls).setup_clients()
93 # Clients (in alphabetical order)
94 cls.flavors_client = cls.os_primary.flavors_client
95 cls.compute_floating_ips_client = (
96 cls.os_primary.compute_floating_ips_client)
97 if CONF.service_available.glance:
98 # Check if glance v1 is available to determine which client to use.
99 if CONF.image_feature_enabled.api_v1:
100 cls.image_client = cls.os_primary.image_client
101 elif CONF.image_feature_enabled.api_v2:
102 cls.image_client = cls.os_primary.image_client_v2
103 else:
104 raise lib_exc.InvalidConfiguration(
105 'Either api_v1 or api_v2 must be True in '
106 '[image-feature-enabled].')
107 # Compute image client
108 cls.compute_images_client = cls.os_primary.compute_images_client
109 cls.keypairs_client = cls.os_primary.keypairs_client
110 # Nova security groups client
111 cls.compute_security_groups_client = (
112 cls.os_primary.compute_security_groups_client)
113 cls.compute_security_group_rules_client = (
114 cls.os_primary.compute_security_group_rules_client)
115 cls.servers_client = cls.os_primary.servers_client
116 cls.interface_client = cls.os_primary.interfaces_client
117 # Neutron network client
118 cls.networks_client = cls.os_primary.networks_client
119 cls.ports_client = cls.os_primary.ports_client
120 cls.routers_client = cls.os_primary.routers_client
121 cls.subnets_client = cls.os_primary.subnets_client
122 cls.floating_ips_client = cls.os_primary.floating_ips_client
123 cls.security_groups_client = cls.os_primary.security_groups_client
124 cls.security_group_rules_client = (
125 cls.os_primary.security_group_rules_client)
126 # Use the latest available volume clients
127 if CONF.service_available.cinder:
128 cls.volumes_client = cls.os_primary.volumes_client_latest
129 cls.snapshots_client = cls.os_primary.snapshots_client_latest
130 cls.backups_client = cls.os_primary.backups_client_latest
131
132 # ## Test functions library
133 #
134 # The create_[resource] functions only return body and discard the
135 # resp part which is not used in scenario tests
136
137 def create_keypair(self, client=None):
138 if not client:
139 client = self.keypairs_client
140 name = data_utils.rand_name(self.__class__.__name__)
141 # We don't need to create a keypair by pubkey in scenario
142 body = client.create_keypair(name=name)
143 self.addCleanup(client.delete_keypair, name)
144 return body['keypair']
145
146 def create_server(self, name=None, image_id=None, flavor=None,
147 validatable=False, wait_until='ACTIVE',
148 clients=None, **kwargs):
149 """Wrapper utility that returns a test server.
150
151 This wrapper utility calls the common create test server and
152 returns a test server. The purpose of this wrapper is to minimize
153 the impact on the code of the tests already using this
154 function.
155
156 :param **kwargs:
157 See extra parameters below
158
159 :Keyword Arguments:
160 * *vnic_type* (``string``) --
161 used when launching instances with pre-configured ports.
162 Examples:
163 normal: a traditional virtual port that is either attached
164 to a linux bridge or an openvswitch bridge on a
165 compute node.
166 direct: an SR-IOV port that is directly attached to a VM
167 macvtap: an SR-IOV port that is attached to a VM via a macvtap
168 device.
169 Defaults to ``CONF.network.port_vnic_type``.
170 * *port_profile* (``dict``) --
171 This attribute is a dictionary that can be used (with admin
172 credentials) to supply information influencing the binding of
173 the port.
174 example: port_profile = "capabilities:[switchdev]"
175 Defaults to ``CONF.network.port_profile``.
176 """
177
178 # NOTE(jlanoux): As a first step, ssh checks in the scenario
179 # tests need to be run regardless of the run_validation and
180 # validatable parameters and thus until the ssh validation job
181 # becomes voting in CI. The test resources management and IP
182 # association are taken care of in the scenario tests.
183 # Therefore, the validatable parameter is set to false in all
184 # those tests. In this way create_server just return a standard
185 # server and the scenario tests always perform ssh checks.
186
187 # Needed for the cross_tenant_traffic test:
188 if clients is None:
189 clients = self.os_primary
190
191 if name is None:
192 name = data_utils.rand_name(self.__class__.__name__ + "-server")
193
194 vnic_type = kwargs.pop('vnic_type', CONF.network.port_vnic_type)
195 profile = kwargs.pop('port_profile', CONF.network.port_profile)
196
197 # If vnic_type or profile are configured create port for
198 # every network
199 if vnic_type or profile:
200 ports = []
201 create_port_body = {}
202
203 if vnic_type:
204 create_port_body['binding:vnic_type'] = vnic_type
205
206 if profile:
207 create_port_body['binding:profile'] = profile
208
209 if kwargs:
210 # Convert security group names to security group ids
211 # to pass to create_port
212 if 'security_groups' in kwargs:
213 security_groups = \
214 clients.security_groups_client.list_security_groups(
215 ).get('security_groups')
216 sec_dict = dict([(s['name'], s['id'])
217 for s in security_groups])
218
219 sec_groups_names = [s['name'] for s in kwargs.pop(
220 'security_groups')]
221 security_groups_ids = [sec_dict[s]
222 for s in sec_groups_names]
223
224 if security_groups_ids:
225 create_port_body[
226 'security_groups'] = security_groups_ids
227 networks = kwargs.pop('networks', [])
228 else:
229 networks = []
230
231 # If there are no networks passed to us we look up
232 # for the project's private networks and create a port.
233 # The same behaviour as we would expect when passing
234 # the call to the clients with no networks
235 if not networks:
236 networks = clients.networks_client.list_networks(
237 **{'router:external': False, 'fields': 'id'})['networks']
238
239 # It's net['uuid'] if networks come from kwargs
240 # and net['id'] if they come from
241 # clients.networks_client.list_networks
242 for net in networks:
243 net_id = net.get('uuid', net.get('id'))
244 if 'port' not in net:
245 port = self.create_port(network_id=net_id,
246 client=clients.ports_client,
247 **create_port_body)
248 ports.append({'port': port['id']})
249 else:
250 ports.append({'port': net['port']})
251 if ports:
252 kwargs['networks'] = ports
253 self.ports = ports
254
255 tenant_network = self.get_tenant_network()
256
257 if CONF.compute.compute_volume_common_az:
258 kwargs.setdefault('availability_zone',
259 CONF.compute.compute_volume_common_az)
260
261 body, _ = compute.create_test_server(
262 clients,
263 tenant_network=tenant_network,
264 wait_until=wait_until,
265 name=name, flavor=flavor,
266 image_id=image_id, **kwargs)
267
268 self.addCleanup(waiters.wait_for_server_termination,
269 clients.servers_client, body['id'])
270 self.addCleanup(test_utils.call_and_ignore_notfound_exc,
271 clients.servers_client.delete_server, body['id'])
272 server = clients.servers_client.show_server(body['id'])['server']
273 return server
274
275 def create_volume(self, size=None, name=None, snapshot_id=None,
276 imageRef=None, volume_type=None):
277 if size is None:
278 size = CONF.volume.volume_size
279 if imageRef:
280 if CONF.image_feature_enabled.api_v1:
281 resp = self.image_client.check_image(imageRef)
282 image = common_image.get_image_meta_from_headers(resp)
283 else:
284 image = self.image_client.show_image(imageRef)
285 min_disk = image.get('min_disk')
286 size = max(size, min_disk)
287 if name is None:
288 name = data_utils.rand_name(self.__class__.__name__ + "-volume")
289 kwargs = {'display_name': name,
290 'snapshot_id': snapshot_id,
291 'imageRef': imageRef,
292 'volume_type': volume_type,
293 'size': size}
294
295 if CONF.compute.compute_volume_common_az:
296 kwargs.setdefault('availability_zone',
297 CONF.compute.compute_volume_common_az)
298
299 volume = self.volumes_client.create_volume(**kwargs)['volume']
300
301 self.addCleanup(self.volumes_client.wait_for_resource_deletion,
302 volume['id'])
303 self.addCleanup(test_utils.call_and_ignore_notfound_exc,
304 self.volumes_client.delete_volume, volume['id'])
305 self.assertEqual(name, volume['name'])
306 waiters.wait_for_volume_resource_status(self.volumes_client,
307 volume['id'], 'available')
308 # The volume retrieved on creation has a non-up-to-date status.
309 # Retrieval after it becomes active ensures correct details.
310 volume = self.volumes_client.show_volume(volume['id'])['volume']
311 return volume
312
313 def create_backup(self, volume_id, name=None, description=None,
314 force=False, snapshot_id=None, incremental=False,
315 container=None):
316
317 name = name or data_utils.rand_name(
318 self.__class__.__name__ + "-backup")
319 kwargs = {'name': name,
320 'description': description,
321 'force': force,
322 'snapshot_id': snapshot_id,
323 'incremental': incremental,
324 'container': container}
325 backup = self.backups_client.create_backup(volume_id=volume_id,
326 **kwargs)['backup']
327 self.addCleanup(self.backups_client.delete_backup, backup['id'])
328 waiters.wait_for_volume_resource_status(self.backups_client,
329 backup['id'], 'available')
330 return backup
331
332 def restore_backup(self, backup_id):
333 restore = self.backups_client.restore_backup(backup_id)['restore']
334 self.addCleanup(self.volumes_client.delete_volume,
335 restore['volume_id'])
336 waiters.wait_for_volume_resource_status(self.backups_client,
337 backup_id, 'available')
338 waiters.wait_for_volume_resource_status(self.volumes_client,
339 restore['volume_id'],
340 'available')
341 self.assertEqual(backup_id, restore['backup_id'])
342 return restore
343
344 def create_volume_snapshot(self, volume_id, name=None, description=None,
345 metadata=None, force=False):
346 name = name or data_utils.rand_name(
347 self.__class__.__name__ + '-snapshot')
348 snapshot = self.snapshots_client.create_snapshot(
349 volume_id=volume_id,
350 force=force,
351 display_name=name,
352 description=description,
353 metadata=metadata)['snapshot']
354 self.addCleanup(self.snapshots_client.wait_for_resource_deletion,
355 snapshot['id'])
356 self.addCleanup(self.snapshots_client.delete_snapshot, snapshot['id'])
357 waiters.wait_for_volume_resource_status(self.snapshots_client,
358 snapshot['id'], 'available')
359 snapshot = self.snapshots_client.show_snapshot(
360 snapshot['id'])['snapshot']
361 return snapshot
362
363 def _cleanup_volume_type(self, volume_type):
364 """Clean up a given volume type.
365
366 Ensuring all volumes associated to a type are first removed before
367 attempting to remove the type itself. This includes any image volume
368 cache volumes stored in a separate tenant to the original volumes
369 created from the type.
370 """
371 admin_volume_type_client = self.os_admin.volume_types_client_latest
372 admin_volumes_client = self.os_admin.volumes_client_latest
373 volumes = admin_volumes_client.list_volumes(
374 detail=True, params={'all_tenants': 1})['volumes']
375 type_name = volume_type['name']
376 for volume in [v for v in volumes if v['volume_type'] == type_name]:
377 test_utils.call_and_ignore_notfound_exc(
378 admin_volumes_client.delete_volume, volume['id'])
379 admin_volumes_client.wait_for_resource_deletion(volume['id'])
380 admin_volume_type_client.delete_volume_type(volume_type['id'])
381
382 def create_volume_type(self, client=None, name=None, backend_name=None):
383 if not client:
384 client = self.os_admin.volume_types_client_latest
385 if not name:
386 class_name = self.__class__.__name__
387 name = data_utils.rand_name(class_name + '-volume-type')
388 randomized_name = data_utils.rand_name('scenario-type-' + name)
389
390 LOG.debug("Creating a volume type: %s on backend %s",
391 randomized_name, backend_name)
392 extra_specs = {}
393 if backend_name:
394 extra_specs = {"volume_backend_name": backend_name}
395
396 volume_type = client.create_volume_type(
397 name=randomized_name, extra_specs=extra_specs)['volume_type']
398 self.addCleanup(self._cleanup_volume_type, volume_type)
399 return volume_type
400
401 def _create_loginable_secgroup_rule(self, secgroup_id=None):
402 _client = self.compute_security_groups_client
403 _client_rules = self.compute_security_group_rules_client
404 if secgroup_id is None:
405 sgs = _client.list_security_groups()['security_groups']
406 for sg in sgs:
407 if sg['name'] == 'default':
408 secgroup_id = sg['id']
409
410 # These rules are intended to permit inbound ssh and icmp
411 # traffic from all sources, so no group_id is provided.
412 # Setting a group_id would only permit traffic from ports
413 # belonging to the same security group.
414 rulesets = [
415 {
416 # ssh
417 'ip_protocol': 'tcp',
418 'from_port': 22,
419 'to_port': 22,
420 'cidr': '0.0.0.0/0',
421 },
422 {
423 # ping
424 'ip_protocol': 'icmp',
425 'from_port': -1,
426 'to_port': -1,
427 'cidr': '0.0.0.0/0',
428 }
429 ]
430 rules = list()
431 for ruleset in rulesets:
432 sg_rule = _client_rules.create_security_group_rule(
433 parent_group_id=secgroup_id, **ruleset)['security_group_rule']
434 rules.append(sg_rule)
435 return rules
436
437 def _create_security_group(self):
438 # Create security group
439 sg_name = data_utils.rand_name(self.__class__.__name__)
440 sg_desc = sg_name + " description"
441 secgroup = self.compute_security_groups_client.create_security_group(
442 name=sg_name, description=sg_desc)['security_group']
443 self.assertEqual(secgroup['name'], sg_name)
444 self.assertEqual(secgroup['description'], sg_desc)
445 self.addCleanup(
446 test_utils.call_and_ignore_notfound_exc,
447 self.compute_security_groups_client.delete_security_group,
448 secgroup['id'])
449
450 # Add rules to the security group
451 self._create_loginable_secgroup_rule(secgroup['id'])
452
453 return secgroup
454
455 def get_remote_client(self, ip_address, username=None, private_key=None,
456 server=None):
457 """Get a SSH client to a remote server
458
459 :param ip_address: the server floating or fixed IP address to use
460 for ssh validation
461 :param username: name of the Linux account on the remote server
462 :param private_key: the SSH private key to use
463 :param server: server dict, used for debugging purposes
464 :return: a RemoteClient object
465 """
466
467 if username is None:
468 username = CONF.validation.image_ssh_user
469 # Set this with 'keypair' or others to log in with keypair or
470 # username/password.
471 if CONF.validation.auth_method == 'keypair':
472 password = None
473 if private_key is None:
474 private_key = self.keypair['private_key']
475 else:
476 password = CONF.validation.image_ssh_password
477 private_key = None
478 linux_client = remote_client.RemoteClient(
479 ip_address, username, pkey=private_key, password=password,
480 server=server, servers_client=self.servers_client)
481 linux_client.validate_authentication()
482 return linux_client
483
484 def _log_net_info(self, exc):
485 # network debug is called as part of ssh init
486 if not isinstance(exc, lib_exc.SSHTimeout):
487 LOG.debug('Network information on a devstack host')
488
489 def create_server_snapshot(self, server, name=None):
490 # Glance client
491 _image_client = self.image_client
492 # Compute client
493 _images_client = self.compute_images_client
494 if name is None:
495 name = data_utils.rand_name(self.__class__.__name__ + 'snapshot')
496 LOG.debug("Creating a snapshot image for server: %s", server['name'])
497 image = _images_client.create_image(server['id'], name=name)
498 image_id = image.response['location'].split('images/')[1]
499 waiters.wait_for_image_status(_image_client, image_id, 'active')
500
501 self.addCleanup(_image_client.wait_for_resource_deletion,
502 image_id)
503 self.addCleanup(test_utils.call_and_ignore_notfound_exc,
504 _image_client.delete_image, image_id)
505
506 if CONF.image_feature_enabled.api_v1:
507 # In glance v1 the additional properties are stored in the headers.
508 resp = _image_client.check_image(image_id)
509 snapshot_image = common_image.get_image_meta_from_headers(resp)
510 image_props = snapshot_image.get('properties', {})
511 else:
512 # In glance v2 the additional properties are flattened.
513 snapshot_image = _image_client.show_image(image_id)
514 image_props = snapshot_image
515
516 bdm = image_props.get('block_device_mapping')
517 if bdm:
518 bdm = json.loads(bdm)
519 if bdm and 'snapshot_id' in bdm[0]:
520 snapshot_id = bdm[0]['snapshot_id']
521 self.addCleanup(
522 self.snapshots_client.wait_for_resource_deletion,
523 snapshot_id)
524 self.addCleanup(test_utils.call_and_ignore_notfound_exc,
525 self.snapshots_client.delete_snapshot,
526 snapshot_id)
527 waiters.wait_for_volume_resource_status(self.snapshots_client,
528 snapshot_id,
529 'available')
530 image_name = snapshot_image['name']
531 self.assertEqual(name, image_name)
532 LOG.debug("Created snapshot image %s for server %s",
533 image_name, server['name'])
534 return snapshot_image
535
536 def nova_volume_attach(self, server, volume_to_attach):
537 volume = self.servers_client.attach_volume(
538 server['id'], volumeId=volume_to_attach['id'], device='/dev/%s'
539 % CONF.compute.volume_device_name)['volumeAttachment']
540 self.assertEqual(volume_to_attach['id'], volume['id'])
541 waiters.wait_for_volume_resource_status(self.volumes_client,
542 volume['id'], 'in-use')
543
544 # Return the updated volume after the attachment
545 return self.volumes_client.show_volume(volume['id'])['volume']
546
547 def nova_volume_detach(self, server, volume):
548 self.servers_client.detach_volume(server['id'], volume['id'])
549 waiters.wait_for_volume_resource_status(self.volumes_client,
550 volume['id'], 'available')
551
552 def check_vm_connectivity(self, ip_address,
553 username=None,
554 private_key=None,
555 should_connect=True,
556 extra_msg="",
557 server=None,
558 mtu=None):
559 """Check server connectivity
560
561 :param ip_address: server to test against
562 :param username: server's ssh username
563 :param private_key: server's ssh private key to be used
564 :param should_connect: True/False indicates positive/negative test
565 positive - attempt ping and ssh
566 negative - attempt ping and fail if succeed
567 :param extra_msg: Message to help with debugging if ``ping_ip_address``
568 fails
569 :param server: The server whose console to log for debugging
570 :param mtu: network MTU to use for connectivity validation
571
572 :raises: AssertError if the result of the connectivity check does
573 not match the value of the should_connect param
574 """
575 LOG.debug('checking network connections to IP %s with user: %s',
576 ip_address, username)
577 if should_connect:
578 msg = "Timed out waiting for %s to become reachable" % ip_address
579 else:
580 msg = "ip address %s is reachable" % ip_address
581 if extra_msg:
582 msg = "%s\n%s" % (extra_msg, msg)
583 self.assertTrue(self.ping_ip_address(ip_address,
584 should_succeed=should_connect,
585 mtu=mtu, server=server),
586 msg=msg)
587 if should_connect:
588 # no need to check ssh for negative connectivity
589 try:
590 self.get_remote_client(ip_address, username, private_key,
591 server=server)
592 except Exception:
593 if not extra_msg:
594 extra_msg = 'Failed to ssh to %s' % ip_address
595 LOG.exception(extra_msg)
596 raise
597
598 def create_floating_ip(self, thing, pool_name=None):
599 """Create a floating IP and associates to a server on Nova"""
600
601 if not pool_name:
602 pool_name = CONF.network.floating_network_name
603 floating_ip = (self.compute_floating_ips_client.
604 create_floating_ip(pool=pool_name)['floating_ip'])
605 self.addCleanup(test_utils.call_and_ignore_notfound_exc,
606 self.compute_floating_ips_client.delete_floating_ip,
607 floating_ip['id'])
608 self.compute_floating_ips_client.associate_floating_ip_to_server(
609 floating_ip['ip'], thing['id'])
610 return floating_ip
611
612 def create_timestamp(self, ip_address, dev_name=None, mount_path='/mnt',
613 private_key=None, server=None):
614 ssh_client = self.get_remote_client(ip_address,
615 private_key=private_key,
616 server=server)
617 if dev_name is not None:
618 ssh_client.make_fs(dev_name)
619 ssh_client.exec_command('sudo mount /dev/%s %s' % (dev_name,
620 mount_path))
621 cmd_timestamp = 'sudo sh -c "date > %s/timestamp; sync"' % mount_path
622 ssh_client.exec_command(cmd_timestamp)
623 timestamp = ssh_client.exec_command('sudo cat %s/timestamp'
624 % mount_path)
625 if dev_name is not None:
626 ssh_client.exec_command('sudo umount %s' % mount_path)
627 return timestamp
628
629 def get_timestamp(self, ip_address, dev_name=None, mount_path='/mnt',
630 private_key=None, server=None):
631 ssh_client = self.get_remote_client(ip_address,
632 private_key=private_key,
633 server=server)
634 if dev_name is not None:
635 ssh_client.mount(dev_name, mount_path)
636 timestamp = ssh_client.exec_command('sudo cat %s/timestamp'
637 % mount_path)
638 if dev_name is not None:
639 ssh_client.exec_command('sudo umount %s' % mount_path)
640 return timestamp
641
642 def get_server_ip(self, server):
643 """Get the server fixed or floating IP.
644
645 Based on the configuration we're in, return a correct ip
646 address for validating that a guest is up.
647 """
648 if CONF.validation.connect_method == 'floating':
649 # The tests calling this method don't have a floating IP
650 # and can't make use of the validation resources. So the
651 # method is creating the floating IP there.
652 return self.create_floating_ip(server)['ip']
653 elif CONF.validation.connect_method == 'fixed':
654 # Determine the network name to look for based on config or creds
655 # provider network resources.
656 if CONF.validation.network_for_ssh:
657 addresses = server['addresses'][
658 CONF.validation.network_for_ssh]
659 else:
660 network = self.get_tenant_network()
661 addresses = (server['addresses'][network['name']]
662 if network else [])
663 for address in addresses:
664 if (address['version'] == CONF.validation.ip_version_for_ssh and # noqa
665 address['OS-EXT-IPS:type'] == 'fixed'):
666 return address['addr']
667 raise exceptions.ServerUnreachable(server_id=server['id'])
668 else:
669 raise lib_exc.InvalidConfiguration()
670
671 @classmethod
672 def get_host_for_server(cls, server_id):
673 server_details = cls.os_admin.servers_client.show_server(server_id)
674 return server_details['server']['OS-EXT-SRV-ATTR:host']
675
676 def _get_bdm(self, source_id, source_type, delete_on_termination=False):
677 bd_map_v2 = [{
678 'uuid': source_id,
679 'source_type': source_type,
680 'destination_type': 'volume',
681 'boot_index': 0,
682 'delete_on_termination': delete_on_termination}]
683 return {'block_device_mapping_v2': bd_map_v2}
684
685 def boot_instance_from_resource(self, source_id,
686 source_type,
687 keypair=None,
688 security_group=None,
689 delete_on_termination=False,
690 name=None):
691 create_kwargs = dict()
692 if keypair:
693 create_kwargs['key_name'] = keypair['name']
694 if security_group:
695 create_kwargs['security_groups'] = [
696 {'name': security_group['name']}]
697 create_kwargs.update(self._get_bdm(
698 source_id,
699 source_type,
700 delete_on_termination=delete_on_termination))
701 if name:
702 create_kwargs['name'] = name
703
704 return self.create_server(image_id='', **create_kwargs)
705
706 def create_volume_from_image(self):
707 img_uuid = CONF.compute.image_ref
708 vol_name = data_utils.rand_name(
709 self.__class__.__name__ + '-volume-origin')
710 return self.create_volume(name=vol_name, imageRef=img_uuid)
711
712
713 class NetworkScenarioTest(ScenarioTest):
714 """Base class for network scenario tests.
715
716 This class provide helpers for network scenario tests, using the neutron
717 API. Helpers from ancestor which use the nova network API are overridden
718 with the neutron API.
719
720 This Class also enforces using Neutron instead of novanetwork.
721 Subclassed tests will be skipped if Neutron is not enabled
722
723 """
724
725 credentials = ['primary', 'admin']
726
727 @classmethod
728 def skip_checks(cls):
729 super(NetworkScenarioTest, cls).skip_checks()
730 if not CONF.service_available.neutron:
731 raise cls.skipException('Neutron not available')
732
733 def _create_network(self, networks_client=None,
734 tenant_id=None,
735 namestart='network-smoke-',
736 port_security_enabled=True, **net_dict):
737 if not networks_client:
738 networks_client = self.networks_client
739 if not tenant_id:
740 tenant_id = networks_client.tenant_id
741 name = data_utils.rand_name(namestart)
742 network_kwargs = dict(name=name, tenant_id=tenant_id)
743 if net_dict:
744 network_kwargs.update(net_dict)
745 # Neutron disables port security by default so we have to check the
746 # config before trying to create the network with port_security_enabled
747 if CONF.network_feature_enabled.port_security:
748 network_kwargs['port_security_enabled'] = port_security_enabled
749 result = networks_client.create_network(**network_kwargs)
750 network = result['network']
751
752 self.assertEqual(network['name'], name)
753 self.addCleanup(test_utils.call_and_ignore_notfound_exc,
754 networks_client.delete_network,
755 network['id'])
756 return network
757
758 def create_subnet(self, network, subnets_client=None,
759 namestart='subnet-smoke', **kwargs):
760 """Create a subnet for the given network
761
762 within the cidr block configured for tenant networks.
763 """
764 if not subnets_client:
765 subnets_client = self.subnets_client
766
767 def cidr_in_use(cidr, tenant_id):
768 """Check cidr existence
769
770 :returns: True if subnet with cidr already exist in tenant
771 False else
772 """
773 cidr_in_use = self.os_admin.subnets_client.list_subnets(
774 tenant_id=tenant_id, cidr=cidr)['subnets']
775 return len(cidr_in_use) != 0
776
777 ip_version = kwargs.pop('ip_version', 4)
778
779 if ip_version == 6:
780 tenant_cidr = netaddr.IPNetwork(
781 CONF.network.project_network_v6_cidr)
782 num_bits = CONF.network.project_network_v6_mask_bits
783 else:
784 tenant_cidr = netaddr.IPNetwork(CONF.network.project_network_cidr)
785 num_bits = CONF.network.project_network_mask_bits
786
787 result = None
788 str_cidr = None
789 # Repeatedly attempt subnet creation with sequential cidr
790 # blocks until an unallocated block is found.
791 for subnet_cidr in tenant_cidr.subnet(num_bits):
792 str_cidr = str(subnet_cidr)
793 if cidr_in_use(str_cidr, tenant_id=network['tenant_id']):
794 continue
795
796 subnet = dict(
797 name=data_utils.rand_name(namestart),
798 network_id=network['id'],
799 tenant_id=network['tenant_id'],
800 cidr=str_cidr,
801 ip_version=ip_version,
802 **kwargs
803 )
804 try:
805 result = subnets_client.create_subnet(**subnet)
806 break
807 except lib_exc.Conflict as e:
808 is_overlapping_cidr = 'overlaps with another subnet' in str(e)
809 if not is_overlapping_cidr:
810 raise
811 self.assertIsNotNone(result, 'Unable to allocate tenant network')
812
813 subnet = result['subnet']
814 self.assertEqual(subnet['cidr'], str_cidr)
815
816 self.addCleanup(test_utils.call_and_ignore_notfound_exc,
817 subnets_client.delete_subnet, subnet['id'])
818
819 return subnet
820
821 def _get_server_port_id_and_ip4(self, server, ip_addr=None):
822 if ip_addr:
823 ports = self.os_admin.ports_client.list_ports(
824 device_id=server['id'],
825 fixed_ips='ip_address=%s' % ip_addr)['ports']
826 else:
827 ports = self.os_admin.ports_client.list_ports(
828 device_id=server['id'])['ports']
829 # A port can have more than one IP address in some cases.
830 # If the network is dual-stack (IPv4 + IPv6), this port is associated
831 # with 2 subnets
832 p_status = ['ACTIVE']
833 # NOTE(vsaienko) With Ironic, instances live on separate hardware
834 # servers. Neutron does not bind ports for Ironic instances, as a
835 # result the port remains in the DOWN state.
836 # TODO(vsaienko) remove once bug: #1599836 is resolved.
837 if getattr(CONF.service_available, 'ironic', False):
838 p_status.append('DOWN')
839 port_map = [(p["id"], fxip["ip_address"])
840 for p in ports
841 for fxip in p["fixed_ips"]
842 if (netutils.is_valid_ipv4(fxip["ip_address"]) and
843 p['status'] in p_status)]
844 inactive = [p for p in ports if p['status'] != 'ACTIVE']
845 if inactive:
846 LOG.warning("Instance has ports that are not ACTIVE: %s", inactive)
847
848 self.assertNotEmpty(port_map,
849 "No IPv4 addresses found in: %s" % ports)
850 self.assertEqual(len(port_map), 1,
851 "Found multiple IPv4 addresses: %s. "
852 "Unable to determine which port to target."
853 % port_map)
854 return port_map[0]
855
856 def _get_network_by_name(self, network_name):
857 net = self.os_admin.networks_client.list_networks(
858 name=network_name)['networks']
859 self.assertNotEmpty(net,
860 "Unable to get network by name: %s" % network_name)
861 return net[0]
862
863 def create_floating_ip(self, thing, external_network_id=None,
864 port_id=None, client=None):
865 """Create a floating IP and associates to a resource/port on Neutron"""
866 if not external_network_id:
867 external_network_id = CONF.network.public_network_id
868 if not client:
869 client = self.floating_ips_client
870 if not port_id:
871 port_id, ip4 = self._get_server_port_id_and_ip4(thing)
872 else:
873 ip4 = None
874 result = client.create_floatingip(
875 floating_network_id=external_network_id,
876 port_id=port_id,
877 tenant_id=thing['tenant_id'],
878 fixed_ip_address=ip4
879 )
880 floating_ip = result['floatingip']
881 self.addCleanup(test_utils.call_and_ignore_notfound_exc,
882 client.delete_floatingip,
883 floating_ip['id'])
884 return floating_ip
885
886 def check_floating_ip_status(self, floating_ip, status):
887 """Verifies floatingip reaches the given status
888
889 :param dict floating_ip: floating IP dict to check status
890 :param status: target status
891 :raises: AssertionError if status doesn't match
892 """
893 floatingip_id = floating_ip['id']
894
895 def refresh():
896 result = (self.floating_ips_client.
897 show_floatingip(floatingip_id)['floatingip'])
898 return status == result['status']
899
900 if not test_utils.call_until_true(refresh,
901 CONF.network.build_timeout,
902 CONF.network.build_interval):
903 floating_ip = self.floating_ips_client.show_floatingip(
904 floatingip_id)['floatingip']
905 self.assertEqual(status, floating_ip['status'],
906 message="FloatingIP: {fp} is at status: {cst}. "
907 "failed to reach status: {st}"
908 .format(fp=floating_ip, cst=floating_ip['status'],
909 st=status))
910 LOG.info("FloatingIP: {fp} is at status: {st}"
911 .format(fp=floating_ip, st=status))
912
913 def _create_security_group(self, security_group_rules_client=None,
914 tenant_id=None,
915 namestart='secgroup-smoke',
916 security_groups_client=None):
917 if security_group_rules_client is None:
918 security_group_rules_client = self.security_group_rules_client
919 if security_groups_client is None:
920 security_groups_client = self.security_groups_client
921 if tenant_id is None:
922 tenant_id = security_groups_client.tenant_id
923 secgroup = self._create_empty_security_group(
924 namestart=namestart, client=security_groups_client,
925 tenant_id=tenant_id)
926
927 # Add rules to the security group
928 rules = self._create_loginable_secgroup_rule(
929 security_group_rules_client=security_group_rules_client,
930 secgroup=secgroup,
931 security_groups_client=security_groups_client)
932 for rule in rules:
933 self.assertEqual(tenant_id, rule['tenant_id'])
934 self.assertEqual(secgroup['id'], rule['security_group_id'])
935 return secgroup
936
937 def _create_empty_security_group(self, client=None, tenant_id=None,
938 namestart='secgroup-smoke'):
939 """Create a security group without rules.
940
941 Default rules will be created:
942 - IPv4 egress to any
943 - IPv6 egress to any
944
945 :param tenant_id: secgroup will be created in this tenant
946 :returns: the created security group
947 """
948 if client is None:
949 client = self.security_groups_client
950 if not tenant_id:
951 tenant_id = client.tenant_id
952 sg_name = data_utils.rand_name(namestart)
953 sg_desc = sg_name + " description"
954 sg_dict = dict(name=sg_name,
955 description=sg_desc)
956 sg_dict['tenant_id'] = tenant_id
957 result = client.create_security_group(**sg_dict)
958
959 secgroup = result['security_group']
960 self.assertEqual(secgroup['name'], sg_name)
961 self.assertEqual(tenant_id, secgroup['tenant_id'])
962 self.assertEqual(secgroup['description'], sg_desc)
963
964 self.addCleanup(test_utils.call_and_ignore_notfound_exc,
965 client.delete_security_group, secgroup['id'])
966 return secgroup
967
968 def _create_security_group_rule(self, secgroup=None,
969 sec_group_rules_client=None,
970 tenant_id=None,
971 security_groups_client=None, **kwargs):
972 """Create a rule from a dictionary of rule parameters.
973
974 Create a rule in a secgroup. if secgroup not defined will search for
975 default secgroup in tenant_id.
976
977 :param secgroup: the security group.
978 :param tenant_id: if secgroup not passed -- the tenant in which to
979 search for default secgroup
980 :param kwargs: a dictionary containing rule parameters:
981 for example, to allow incoming ssh:
982 rule = {
983 direction: 'ingress'
984 protocol:'tcp',
985 port_range_min: 22,
986 port_range_max: 22
987 }
988 """
989 if sec_group_rules_client is None:
990 sec_group_rules_client = self.security_group_rules_client
991 if security_groups_client is None:
992 security_groups_client = self.security_groups_client
993 if not tenant_id:
994 tenant_id = security_groups_client.tenant_id
995 if secgroup is None:
996 # Get default secgroup for tenant_id
997 default_secgroups = security_groups_client.list_security_groups(
998 name='default', tenant_id=tenant_id)['security_groups']
999 msg = "No default security group for tenant %s." % (tenant_id)
1000 self.assertNotEmpty(default_secgroups, msg)
1001 secgroup = default_secgroups[0]
1002
1003 ruleset = dict(security_group_id=secgroup['id'],
1004 tenant_id=secgroup['tenant_id'])
1005 ruleset.update(kwargs)
1006
1007 sg_rule = sec_group_rules_client.create_security_group_rule(**ruleset)
1008 sg_rule = sg_rule['security_group_rule']
1009
1010 self.assertEqual(secgroup['tenant_id'], sg_rule['tenant_id'])
1011 self.assertEqual(secgroup['id'], sg_rule['security_group_id'])
1012
1013 return sg_rule
1014
1015 def _create_loginable_secgroup_rule(self, security_group_rules_client=None,
1016 secgroup=None,
1017 security_groups_client=None):
1018 """Create loginable security group rule
1019
1020 This function will create:
1021 1. egress and ingress tcp port 22 allow rule in order to allow ssh
1022 access for ipv4.
1023 2. egress and ingress ipv6 icmp allow rule, in order to allow icmpv6.
1024 3. egress and ingress ipv4 icmp allow rule, in order to allow icmpv4.
1025 """
1026
1027 if security_group_rules_client is None:
1028 security_group_rules_client = self.security_group_rules_client
1029 if security_groups_client is None:
1030 security_groups_client = self.security_groups_client
1031 rules = []
1032 rulesets = [
1033 dict(
1034 # ssh
1035 protocol='tcp',
1036 port_range_min=22,
1037 port_range_max=22,
1038 ),
1039 dict(
1040 # ping
1041 protocol='icmp',
1042 ),
1043 dict(
1044 # ipv6-icmp for ping6
1045 protocol='icmp',
1046 ethertype='IPv6',
1047 )
1048 ]
1049 sec_group_rules_client = security_group_rules_client
1050 for ruleset in rulesets:
1051 for r_direction in ['ingress', 'egress']:
1052 ruleset['direction'] = r_direction
1053 try:
1054 sg_rule = self._create_security_group_rule(
1055 sec_group_rules_client=sec_group_rules_client,
1056 secgroup=secgroup,
1057 security_groups_client=security_groups_client,
1058 **ruleset)
1059 except lib_exc.Conflict as ex:
1060 # if rule already exist - skip rule and continue
1061 msg = 'Security group rule already exists'
1062 if msg not in ex._error_string:
1063 raise ex
1064 else:
1065 self.assertEqual(r_direction, sg_rule['direction'])
1066 rules.append(sg_rule)
1067
1068 return rules
1069
1070
1071 class EncryptionScenarioTest(ScenarioTest):
1072 """Base class for encryption scenario tests"""
1073
1074 credentials = ['primary', 'admin']
1075
1076 @classmethod
1077 def setup_clients(cls):
1078 super(EncryptionScenarioTest, cls).setup_clients()
1079 cls.admin_volume_types_client = cls.os_admin.volume_types_client_latest
1080 cls.admin_encryption_types_client =\
1081 cls.os_admin.encryption_types_client_latest
1082
1083 def create_encryption_type(self, client=None, type_id=None, provider=None,
1084 key_size=None, cipher=None,
1085 control_location=None):
1086 if not client:
1087 client = self.admin_encryption_types_client
1088 if not type_id:
1089 volume_type = self.create_volume_type()
1090 type_id = volume_type['id']
1091 LOG.debug("Creating an encryption type for volume type: %s", type_id)
1092 client.create_encryption_type(
1093 type_id, provider=provider, key_size=key_size, cipher=cipher,
1094 control_location=control_location)
1095
1096 def create_encrypted_volume(self, encryption_provider, volume_type,
1097 key_size=256, cipher='aes-xts-plain64',
1098 control_location='front-end'):
1099 volume_type = self.create_volume_type(name=volume_type)
1100 self.create_encryption_type(type_id=volume_type['id'],
1101 provider=encryption_provider,
1102 key_size=key_size,
1103 cipher=cipher,
1104 control_location=control_location)
1105 return self.create_volume(volume_type=volume_type['name'])
0 # Copyright 2020 Red Hat, Inc.
1 # All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"); you may
4 # not use this file except in compliance with the License. You may obtain
5 # a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 # License for the specific language governing permissions and limitations
13 # under the License.
14
15 from tempest.common import utils
16 from tempest.lib import decorators
17
18 from cinder_tempest_plugin.scenario import manager
19
20
21 class SnapshotDataIntegrityTests(manager.ScenarioTest):
22
23 def setUp(self):
24 super(SnapshotDataIntegrityTests, self).setUp()
25 self.keypair = self.create_keypair()
26 self.security_group = self._create_security_group()
27
28 def _get_file_md5(self, ip_address, filename, mount_path='/mnt',
29 private_key=None, server=None):
30 ssh_client = self.get_remote_client(ip_address,
31 private_key=private_key,
32 server=server)
33
34 md5_sum = ssh_client.exec_command(
35 'sudo md5sum %s/%s|cut -c 1-32' % (mount_path, filename))
36 return md5_sum
37
38 def _count_files(self, ip_address, mount_path='/mnt', private_key=None,
39 server=None):
40 ssh_client = self.get_remote_client(ip_address,
41 private_key=private_key,
42 server=server)
43 count = ssh_client.exec_command('sudo ls -l %s | wc -l' % mount_path)
44 return int(count) - 1
45
46 def _launch_instance_from_snapshot(self, snap):
47 volume_snap = self.create_volume(snapshot_id=snap['id'],
48 size=snap['size'])
49
50 server_snap = self.boot_instance_from_resource(
51 source_id=volume_snap['id'],
52 source_type='volume',
53 keypair=self.keypair,
54 security_group=self.security_group)
55
56 return server_snap
57
58 def create_md5_new_file(self, ip_address, filename, mount_path='/mnt',
59 private_key=None, server=None):
60 ssh_client = self.get_remote_client(ip_address,
61 private_key=private_key,
62 server=server)
63
64 ssh_client.exec_command(
65 'sudo dd bs=1024 count=100 if=/dev/urandom of=/%s/%s' %
66 (mount_path, filename))
67 md5 = ssh_client.exec_command(
68 'sudo md5sum -b %s/%s|cut -c 1-32' % (mount_path, filename))
69 ssh_client.exec_command('sudo sync')
70 return md5
71
72 def get_md5_from_file(self, instance, filename):
73
74 instance_ip = self.get_server_ip(instance)
75
76 md5_sum = self._get_file_md5(instance_ip, filename=filename,
77 private_key=self.keypair['private_key'],
78 server=instance)
79 count = self._count_files(instance_ip,
80 private_key=self.keypair['private_key'],
81 server=instance)
82 return count, md5_sum
83
84 @decorators.idempotent_id('ff10644e-5a70-4a9f-9801-8204bb81fb61')
85 @utils.services('compute', 'volume', 'image', 'network')
86 def test_snapshot_data_integrity(self):
87 """This test checks the data integrity after creating and restoring
88
89 snapshots. The procedure is as follows:
90
91 1) create a volume from image
92 2) Boot an instance from the volume
93 3) create file on vm and write data into it
94 4) create snapshot
95 5) repeat 3 and 4 two more times (simply creating 3 snapshots)
96
97 Now restore the snapshots one by one into volume, create instances
98 from it and check the number of files and file content at each
99 point when snapshot was created.
100 """
101
102 # Create a volume from image
103 volume = self.create_volume_from_image()
104
105 # create an instance from bootable volume
106 server = self.boot_instance_from_resource(
107 source_id=volume['id'],
108 source_type='volume',
109 keypair=self.keypair,
110 security_group=self.security_group)
111
112 instance_ip = self.get_server_ip(server)
113
114 # Write data to volume
115 file1_md5 = self.create_md5_new_file(
116 instance_ip, filename="file1",
117 private_key=self.keypair['private_key'],
118 server=instance_ip)
119
120 # Create first snapshot
121 snapshot1 = self.create_volume_snapshot(volume['id'], force=True)
122
123 # Write data to volume
124 file2_md5 = self.create_md5_new_file(
125 instance_ip, filename="file2",
126 private_key=self.keypair['private_key'],
127 server=instance_ip)
128
129 # Create second snapshot
130 snapshot2 = self.create_volume_snapshot(volume['id'], force=True)
131
132 # Write data to volume
133 file3_md5 = self.create_md5_new_file(
134 instance_ip, filename="file3",
135 private_key=self.keypair['private_key'],
136 server=instance_ip)
137
138 # Create third snapshot
139 snapshot3 = self.create_volume_snapshot(volume['id'], force=True)
140
141 # Create volume, instance and check file and contents for snap1
142 instance_1 = self._launch_instance_from_snapshot(snapshot1)
143 count_snap_1, md5_file_1 = self.get_md5_from_file(instance_1,
144 'file1')
145
146 self.assertEqual(count_snap_1, 1)
147 self.assertEqual(file1_md5, md5_file_1)
148
149 # Create volume, instance and check file and contents for snap2
150 instance_2 = self._launch_instance_from_snapshot(snapshot2)
151 count_snap_2, md5_file_2 = self.get_md5_from_file(instance_2,
152 'file2')
153
154 self.assertEqual(count_snap_2, 2)
155 self.assertEqual(file2_md5, md5_file_2)
156
157 # Create volume, instance and check file and contents for snap3
158 instance_3 = self._launch_instance_from_snapshot(snapshot3)
159 count_snap_3, md5_file_3 = self.get_md5_from_file(instance_3,
160 'file3')
161
162 self.assertEqual(count_snap_3, 3)
163 self.assertEqual(file3_md5, md5_file_3)
0 # Licensed under the Apache License, Version 2.0 (the "License"); you may
1 # not use this file except in compliance with the License. You may obtain
2 # a copy of the License at
3 #
4 # http://www.apache.org/licenses/LICENSE-2.0
5 #
6 # Unless required by applicable law or agreed to in writing, software
7 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
8 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
9 # License for the specific language governing permissions and limitations
10 # under the License.
11
12 from tempest.common import utils
13 from tempest.common import waiters
14 from tempest import config
15 from tempest.lib.common.utils import data_utils
16 from tempest.lib import decorators
17
18 from cinder_tempest_plugin.scenario import manager
19
20 CONF = config.CONF
21
22
23 class TestEncryptedCinderVolumes(manager.EncryptionScenarioTest,
24 manager.ScenarioTest):
25
26 @classmethod
27 def skip_checks(cls):
28 super(TestEncryptedCinderVolumes, cls).skip_checks()
29 if not CONF.compute_feature_enabled.attach_encrypted_volume:
30 raise cls.skipException('Encrypted volume attach is not supported')
31
32 @classmethod
33 def resource_setup(cls):
34 super(TestEncryptedCinderVolumes, cls).resource_setup()
35
36 @classmethod
37 def resource_cleanup(cls):
38 super(TestEncryptedCinderVolumes, cls).resource_cleanup()
39
40 def launch_instance(self):
41 keypair = self.create_keypair()
42
43 return self.create_server(key_name=keypair['name'])
44
45 def attach_detach_volume(self, server, volume):
46 attached_volume = self.nova_volume_attach(server, volume)
47 self.nova_volume_detach(server, attached_volume)
48
49 def _delete_server(self, server):
50 self.servers_client.delete_server(server['id'])
51 waiters.wait_for_server_termination(self.servers_client, server['id'])
52
53 def create_encrypted_volume_from_image(self, encryption_provider,
54 volume_type='luks',
55 key_size=256,
56 cipher='aes-xts-plain64',
57 control_location='front-end',
58 **kwargs):
59 """Create an encrypted volume from image.
60
61 :param image_id: ID of the image to create volume from,
62 CONF.compute.image_ref by default
63 :param name: name of the volume,
64 '$classname-volume-origin' by default
65 :param **kwargs: additional parameters
66 """
67 volume_type = self.create_volume_type(name=volume_type)
68 self.create_encryption_type(type_id=volume_type['id'],
69 provider=encryption_provider,
70 key_size=key_size,
71 cipher=cipher,
72 control_location=control_location)
73 image_id = kwargs.pop('image_id', CONF.compute.image_ref)
74 name = kwargs.pop('name', None)
75 if not name:
76 namestart = self.__class__.__name__ + '-volume-origin'
77 name = data_utils.rand_name(namestart)
78 return self.create_volume(volume_type=volume_type['name'],
79 name=name, imageRef=image_id,
80 **kwargs)
81
82 @decorators.idempotent_id('5bb622ab-5060-48a8-8840-d589a548b9e4')
83 @utils.services('volume')
84 @utils.services('compute')
85 def test_attach_cloned_encrypted_volume(self):
86
87 """This test case attempts to reproduce the following steps:
88
89 * Create an encrypted volume
90 * Create clone from volume
91 * Boot an instance and attach/dettach cloned volume
92
93 """
94
95 volume = self.create_encrypted_volume('luks', volume_type='luks')
96 kwargs = {
97 'display_name': data_utils.rand_name(self.__class__.__name__),
98 'source_volid': volume['id'],
99 'volume_type': volume['volume_type'],
100 'size': volume['size']
101 }
102 volume_s = self.volumes_client.create_volume(**kwargs)['volume']
103 self.addCleanup(self.volumes_client.wait_for_resource_deletion,
104 volume_s['id'])
105 self.addCleanup(self.volumes_client.delete_volume, volume_s['id'])
106 waiters.wait_for_volume_resource_status(
107 self.volumes_client, volume_s['id'], 'available')
108 volume_source = self.volumes_client.show_volume(
109 volume_s['id'])['volume']
110 server = self.launch_instance()
111 self.attach_detach_volume(server, volume_source)
112
113 @decorators.idempotent_id('5bb622ab-5060-48a8-8840-d589a548b7e4')
114 @utils.services('volume')
115 @utils.services('compute')
116 @utils.services('image')
117 def test_boot_cloned_encrypted_volume(self):
118
119 """This test case attempts to reproduce the following steps:
120
121 * Create an encrypted volume from image
122 * Boot an instance from the volume
123 * Write data to the volume
124 * Detach volume
125 * Create a clone from the first volume
126 * Create another encrypted volume from source_volumeid
127 * Boot an instance from cloned volume
128 * Verify the data
129 """
130
131 keypair = self.create_keypair()
132 security_group = self._create_security_group()
133
134 volume = self.create_encrypted_volume_from_image('luks')
135
136 # create an instance from volume
137 instance_1st = self.boot_instance_from_resource(
138 source_id=volume['id'],
139 source_type='volume',
140 keypair=keypair,
141 security_group=security_group)
142
143 # write content to volume on instance
144 ip_instance_1st = self.get_server_ip(instance_1st)
145 timestamp = self.create_timestamp(ip_instance_1st,
146 private_key=keypair['private_key'],
147 server=instance_1st)
148 # delete instance
149 self._delete_server(instance_1st)
150
151 # create clone
152 kwargs = {
153 'display_name': data_utils.rand_name(self.__class__.__name__),
154 'source_volid': volume['id'],
155 'volume_type': volume['volume_type'],
156 'size': volume['size']
157 }
158 volume_s = self.volumes_client.create_volume(**kwargs)['volume']
159
160 self.addCleanup(self.volumes_client.wait_for_resource_deletion,
161 volume_s['id'])
162 self.addCleanup(self.volumes_client.delete_volume, volume_s['id'])
163 waiters.wait_for_volume_resource_status(
164 self.volumes_client, volume_s['id'], 'available')
165
166 # create an instance from volume
167 instance_2nd = self.boot_instance_from_resource(
168 source_id=volume_s['id'],
169 source_type='volume',
170 keypair=keypair,
171 security_group=security_group)
172
173 # check the content of written file
174 ip_instance_2nd = self.get_server_ip(instance_2nd)
175 timestamp2 = self.get_timestamp(ip_instance_2nd,
176 private_key=keypair['private_key'],
177 server=instance_2nd)
178
179 self.assertEqual(timestamp, timestamp2)
180
181 # delete instance
182 self._delete_server(instance_2nd)
0 sphinx!=1.6.6,!=1.6.7,>=1.6.2 # BSD
1 openstackdocstheme>=1.18.1 # Apache-2.0
1515 Programming Language :: Python :: 3
1616 Programming Language :: Python :: 3.6
1717 Programming Language :: Python :: 3.7
18 Programming Language :: Python :: 3.8
1819
1920 [files]
2021 packages =
55
66 coverage!=4.4,>=4.0 # Apache-2.0
77 python-subunit>=1.0.0 # Apache-2.0/BSD
8 sphinx!=1.6.6,!=1.6.7,>=1.6.2 # BSD
98 oslotest>=3.2.0 # Apache-2.0
10 testrepository>=0.0.18 # Apache-2.0/BSD
9 stestr>=1.0.0 # Apache-2.0
1110 testtools>=2.2.0 # MIT
12 openstackdocstheme>=1.18.1 # Apache-2.0
1111 setenv =
1212 VIRTUAL_ENV={envdir}
1313 PYTHONWARNINGS=default::DeprecationWarning
14 OS_LOG_CAPTURE={env:OS_LOG_CAPTURE:true}
15 OS_STDOUT_CAPTURE={env:OS_STDOUT_CAPTURE:true}
16 OS_STDERR_CAPTURE={env:OS_STDERR_CAPTURE:true}
1417 deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
1518 -r{toxinidir}/test-requirements.txt
16 commands = python setup.py test --slowest --testr-args='{posargs}'
19 commands = stestr run --slowest {posargs}
1720
1821 [testenv:pep8]
1922 commands = flake8 {posargs}
2528 # E123, E125 skipped as they are invalid PEP-8.
2629 # W503 line break before binary operator
2730 # W504 line break after binary operator
31 # H101 include name with TODO
32 # reason: no real benefit
2833 show-source = True
29 ignore = E123,E125,W503,W504
34 ignore = E123,E125,W503,W504,H101
3035 builtins = _
3136 exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build