| 0 | |
# TODO: Remove this file when tempest scenario manager becomes stable
|
| 1 | |
# Copyright 2012 OpenStack Foundation
|
| 2 | |
# Copyright 2013 IBM Corp.
|
|
0 |
# Copyright 2021 Red Hat, Inc.
|
| 3 | 1 |
# All Rights Reserved.
|
| 4 | 2 |
#
|
| 5 | 3 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
| 14 | 12 |
# License for the specific language governing permissions and limitations
|
| 15 | 13 |
# under the License.
|
| 16 | 14 |
|
| 17 | |
import netaddr
|
| 18 | 15 |
from oslo_log import log
|
| 19 | |
from oslo_serialization import jsonutils as json
|
| 20 | |
from oslo_utils import netutils
|
| 21 | |
|
| 22 | |
from tempest.common import compute
|
| 23 | |
from tempest.common import image as common_image
|
| 24 | |
from tempest.common.utils.linux import remote_client
|
|
16 |
|
| 25 | 17 |
from tempest.common import waiters
|
| 26 | 18 |
from tempest import config
|
| 27 | |
from tempest import exceptions
|
| 28 | |
from tempest.lib.common import api_microversion_fixture
|
| 29 | |
from tempest.lib.common import api_version_utils
|
| 30 | 19 |
from tempest.lib.common.utils import data_utils
|
| 31 | 20 |
from tempest.lib.common.utils import test_utils
|
| 32 | 21 |
from tempest.lib import exceptions as lib_exc
|
| 33 | |
import tempest.test
|
|
22 |
|
|
23 |
from tempest.scenario import manager
|
| 34 | 24 |
|
| 35 | 25 |
CONF = config.CONF
|
| 36 | 26 |
|
| 37 | 27 |
LOG = log.getLogger(__name__)
|
| 38 | 28 |
|
| 39 | |
LATEST_MICROVERSION = 'latest'
|
| 40 | |
|
| 41 | |
|
| 42 | |
class ScenarioTest(tempest.test.BaseTestCase):
|
| 43 | |
"""Base class for scenario tests. Uses tempest own clients. """
|
| 44 | |
|
| 45 | |
credentials = ['primary']
|
| 46 | |
|
| 47 | |
compute_min_microversion = None
|
| 48 | |
compute_max_microversion = LATEST_MICROVERSION
|
| 49 | |
volume_min_microversion = None
|
| 50 | |
volume_max_microversion = LATEST_MICROVERSION
|
| 51 | |
placement_min_microversion = None
|
| 52 | |
placement_max_microversion = LATEST_MICROVERSION
|
| 53 | |
|
| 54 | |
@classmethod
|
| 55 | |
def skip_checks(cls):
|
| 56 | |
super(ScenarioTest, cls).skip_checks()
|
| 57 | |
api_version_utils.check_skip_with_microversion(
|
| 58 | |
cls.compute_min_microversion, cls.compute_max_microversion,
|
| 59 | |
CONF.compute.min_microversion, CONF.compute.max_microversion)
|
| 60 | |
api_version_utils.check_skip_with_microversion(
|
| 61 | |
cls.volume_min_microversion, cls.volume_max_microversion,
|
| 62 | |
CONF.volume.min_microversion, CONF.volume.max_microversion)
|
| 63 | |
api_version_utils.check_skip_with_microversion(
|
| 64 | |
cls.placement_min_microversion, cls.placement_max_microversion,
|
| 65 | |
CONF.placement.min_microversion, CONF.placement.max_microversion)
|
| 66 | |
|
| 67 | |
@classmethod
|
| 68 | |
def resource_setup(cls):
|
| 69 | |
super(ScenarioTest, cls).resource_setup()
|
| 70 | |
cls.compute_request_microversion = (
|
| 71 | |
api_version_utils.select_request_microversion(
|
| 72 | |
cls.compute_min_microversion,
|
| 73 | |
CONF.compute.min_microversion))
|
| 74 | |
cls.volume_request_microversion = (
|
| 75 | |
api_version_utils.select_request_microversion(
|
| 76 | |
cls.volume_min_microversion,
|
| 77 | |
CONF.volume.min_microversion))
|
| 78 | |
cls.placement_request_microversion = (
|
| 79 | |
api_version_utils.select_request_microversion(
|
| 80 | |
cls.placement_min_microversion,
|
| 81 | |
CONF.placement.min_microversion))
|
| 82 | |
|
| 83 | |
def setUp(self):
|
| 84 | |
super(ScenarioTest, self).setUp()
|
| 85 | |
self.useFixture(api_microversion_fixture.APIMicroversionFixture(
|
| 86 | |
compute_microversion=self.compute_request_microversion,
|
| 87 | |
volume_microversion=self.volume_request_microversion,
|
| 88 | |
placement_microversion=self.placement_request_microversion))
|
|
29 |
|
|
30 |
class ScenarioTest(manager.ScenarioTest):
|
|
31 |
|
|
32 |
credentials = ['primary', 'admin']
|
| 89 | 33 |
|
| 90 | 34 |
@classmethod
|
| 91 | 35 |
def setup_clients(cls):
|
| 92 | 36 |
super(ScenarioTest, cls).setup_clients()
|
| 93 | |
# Clients (in alphabetical order)
|
| 94 | |
cls.flavors_client = cls.os_primary.flavors_client
|
| 95 | |
cls.compute_floating_ips_client = (
|
| 96 | |
cls.os_primary.compute_floating_ips_client)
|
| 97 | |
if CONF.service_available.glance:
|
| 98 | |
# Check if glance v1 is available to determine which client to use.
|
| 99 | |
if CONF.image_feature_enabled.api_v1:
|
| 100 | |
cls.image_client = cls.os_primary.image_client
|
| 101 | |
elif CONF.image_feature_enabled.api_v2:
|
| 102 | |
cls.image_client = cls.os_primary.image_client_v2
|
| 103 | |
else:
|
| 104 | |
raise lib_exc.InvalidConfiguration(
|
| 105 | |
'Either api_v1 or api_v2 must be True in '
|
| 106 | |
'[image-feature-enabled].')
|
| 107 | |
# Compute image client
|
| 108 | |
cls.compute_images_client = cls.os_primary.compute_images_client
|
| 109 | |
cls.keypairs_client = cls.os_primary.keypairs_client
|
| 110 | |
# Nova security groups client
|
| 111 | |
cls.compute_security_groups_client = (
|
| 112 | |
cls.os_primary.compute_security_groups_client)
|
| 113 | |
cls.compute_security_group_rules_client = (
|
| 114 | |
cls.os_primary.compute_security_group_rules_client)
|
| 115 | |
cls.servers_client = cls.os_primary.servers_client
|
| 116 | |
cls.interface_client = cls.os_primary.interfaces_client
|
| 117 | |
# Neutron network client
|
| 118 | |
cls.networks_client = cls.os_primary.networks_client
|
| 119 | |
cls.ports_client = cls.os_primary.ports_client
|
| 120 | |
cls.routers_client = cls.os_primary.routers_client
|
| 121 | |
cls.subnets_client = cls.os_primary.subnets_client
|
| 122 | |
cls.floating_ips_client = cls.os_primary.floating_ips_client
|
| 123 | |
cls.security_groups_client = cls.os_primary.security_groups_client
|
| 124 | |
cls.security_group_rules_client = (
|
| 125 | |
cls.os_primary.security_group_rules_client)
|
| 126 | |
# Use the latest available volume clients
|
| 127 | |
if CONF.service_available.cinder:
|
| 128 | |
cls.volumes_client = cls.os_primary.volumes_client_latest
|
| 129 | |
cls.snapshots_client = cls.os_primary.snapshots_client_latest
|
| 130 | |
cls.backups_client = cls.os_primary.backups_client_latest
|
| 131 | |
|
| 132 | |
# ## Test functions library
|
| 133 | |
#
|
| 134 | |
# The create_[resource] functions only return body and discard the
|
| 135 | |
# resp part which is not used in scenario tests
|
| 136 | |
|
| 137 | |
def create_keypair(self, client=None):
|
| 138 | |
if not client:
|
| 139 | |
client = self.keypairs_client
|
| 140 | |
name = data_utils.rand_name(self.__class__.__name__)
|
| 141 | |
# We don't need to create a keypair by pubkey in scenario
|
| 142 | |
body = client.create_keypair(name=name)
|
| 143 | |
self.addCleanup(client.delete_keypair, name)
|
| 144 | |
return body['keypair']
|
| 145 | |
|
| 146 | |
def create_server(self, name=None, image_id=None, flavor=None,
|
| 147 | |
validatable=False, wait_until='ACTIVE',
|
| 148 | |
clients=None, **kwargs):
|
| 149 | |
"""Wrapper utility that returns a test server.
|
| 150 | |
|
| 151 | |
This wrapper utility calls the common create test server and
|
| 152 | |
returns a test server. The purpose of this wrapper is to minimize
|
| 153 | |
the impact on the code of the tests already using this
|
| 154 | |
function.
|
| 155 | |
|
| 156 | |
:param **kwargs:
|
| 157 | |
See extra parameters below
|
| 158 | |
|
| 159 | |
:Keyword Arguments:
|
| 160 | |
* *vnic_type* (``string``) --
|
| 161 | |
used when launching instances with pre-configured ports.
|
| 162 | |
Examples:
|
| 163 | |
normal: a traditional virtual port that is either attached
|
| 164 | |
to a linux bridge or an openvswitch bridge on a
|
| 165 | |
compute node.
|
| 166 | |
direct: an SR-IOV port that is directly attached to a VM
|
| 167 | |
macvtap: an SR-IOV port that is attached to a VM via a macvtap
|
| 168 | |
device.
|
| 169 | |
Defaults to ``CONF.network.port_vnic_type``.
|
| 170 | |
* *port_profile* (``dict``) --
|
| 171 | |
This attribute is a dictionary that can be used (with admin
|
| 172 | |
credentials) to supply information influencing the binding of
|
| 173 | |
the port.
|
| 174 | |
example: port_profile = "capabilities:[switchdev]"
|
| 175 | |
Defaults to ``CONF.network.port_profile``.
|
| 176 | |
"""
|
| 177 | |
|
| 178 | |
# NOTE(jlanoux): As a first step, ssh checks in the scenario
|
| 179 | |
# tests need to be run regardless of the run_validation and
|
| 180 | |
# validatable parameters and thus until the ssh validation job
|
| 181 | |
# becomes voting in CI. The test resources management and IP
|
| 182 | |
# association are taken care of in the scenario tests.
|
| 183 | |
# Therefore, the validatable parameter is set to false in all
|
| 184 | |
# those tests. In this way create_server just return a standard
|
| 185 | |
# server and the scenario tests always perform ssh checks.
|
| 186 | |
|
| 187 | |
# Needed for the cross_tenant_traffic test:
|
| 188 | |
if clients is None:
|
| 189 | |
clients = self.os_primary
|
| 190 | |
|
| 191 | |
if name is None:
|
| 192 | |
name = data_utils.rand_name(self.__class__.__name__ + "-server")
|
| 193 | |
|
| 194 | |
vnic_type = kwargs.pop('vnic_type', CONF.network.port_vnic_type)
|
| 195 | |
profile = kwargs.pop('port_profile', CONF.network.port_profile)
|
| 196 | |
|
| 197 | |
# If vnic_type or profile are configured create port for
|
| 198 | |
# every network
|
| 199 | |
if vnic_type or profile:
|
| 200 | |
ports = []
|
| 201 | |
create_port_body = {}
|
| 202 | |
|
| 203 | |
if vnic_type:
|
| 204 | |
create_port_body['binding:vnic_type'] = vnic_type
|
| 205 | |
|
| 206 | |
if profile:
|
| 207 | |
create_port_body['binding:profile'] = profile
|
| 208 | |
|
| 209 | |
if kwargs:
|
| 210 | |
# Convert security group names to security group ids
|
| 211 | |
# to pass to create_port
|
| 212 | |
if 'security_groups' in kwargs:
|
| 213 | |
security_groups = \
|
| 214 | |
clients.security_groups_client.list_security_groups(
|
| 215 | |
).get('security_groups')
|
| 216 | |
sec_dict = dict([(s['name'], s['id'])
|
| 217 | |
for s in security_groups])
|
| 218 | |
|
| 219 | |
sec_groups_names = [s['name'] for s in kwargs.pop(
|
| 220 | |
'security_groups')]
|
| 221 | |
security_groups_ids = [sec_dict[s]
|
| 222 | |
for s in sec_groups_names]
|
| 223 | |
|
| 224 | |
if security_groups_ids:
|
| 225 | |
create_port_body[
|
| 226 | |
'security_groups'] = security_groups_ids
|
| 227 | |
networks = kwargs.pop('networks', [])
|
| 228 | |
else:
|
| 229 | |
networks = []
|
| 230 | |
|
| 231 | |
# If there are no networks passed to us we look up
|
| 232 | |
# for the project's private networks and create a port.
|
| 233 | |
# The same behaviour as we would expect when passing
|
| 234 | |
# the call to the clients with no networks
|
| 235 | |
if not networks:
|
| 236 | |
networks = clients.networks_client.list_networks(
|
| 237 | |
**{'router:external': False, 'fields': 'id'})['networks']
|
| 238 | |
|
| 239 | |
# It's net['uuid'] if networks come from kwargs
|
| 240 | |
# and net['id'] if they come from
|
| 241 | |
# clients.networks_client.list_networks
|
| 242 | |
for net in networks:
|
| 243 | |
net_id = net.get('uuid', net.get('id'))
|
| 244 | |
if 'port' not in net:
|
| 245 | |
port = self.create_port(network_id=net_id,
|
| 246 | |
client=clients.ports_client,
|
| 247 | |
**create_port_body)
|
| 248 | |
ports.append({'port': port['id']})
|
| 249 | |
else:
|
| 250 | |
ports.append({'port': net['port']})
|
| 251 | |
if ports:
|
| 252 | |
kwargs['networks'] = ports
|
| 253 | |
self.ports = ports
|
| 254 | |
|
| 255 | |
tenant_network = self.get_tenant_network()
|
| 256 | |
|
| 257 | |
if CONF.compute.compute_volume_common_az:
|
| 258 | |
kwargs.setdefault('availability_zone',
|
| 259 | |
CONF.compute.compute_volume_common_az)
|
| 260 | |
|
| 261 | |
body, _ = compute.create_test_server(
|
| 262 | |
clients,
|
| 263 | |
tenant_network=tenant_network,
|
| 264 | |
wait_until=wait_until,
|
| 265 | |
name=name, flavor=flavor,
|
| 266 | |
image_id=image_id, **kwargs)
|
| 267 | |
|
| 268 | |
self.addCleanup(waiters.wait_for_server_termination,
|
| 269 | |
clients.servers_client, body['id'])
|
| 270 | |
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
| 271 | |
clients.servers_client.delete_server, body['id'])
|
| 272 | |
server = clients.servers_client.show_server(body['id'])['server']
|
| 273 | |
return server
|
| 274 | |
|
| 275 | |
def create_volume(self, size=None, name=None, snapshot_id=None,
|
| 276 | |
imageRef=None, volume_type=None):
|
| 277 | |
if size is None:
|
| 278 | |
size = CONF.volume.volume_size
|
| 279 | |
if imageRef:
|
| 280 | |
if CONF.image_feature_enabled.api_v1:
|
| 281 | |
resp = self.image_client.check_image(imageRef)
|
| 282 | |
image = common_image.get_image_meta_from_headers(resp)
|
| 283 | |
else:
|
| 284 | |
image = self.image_client.show_image(imageRef)
|
| 285 | |
min_disk = image.get('min_disk')
|
| 286 | |
size = max(size, min_disk)
|
| 287 | |
if name is None:
|
| 288 | |
name = data_utils.rand_name(self.__class__.__name__ + "-volume")
|
| 289 | |
kwargs = {'display_name': name,
|
| 290 | |
'snapshot_id': snapshot_id,
|
| 291 | |
'imageRef': imageRef,
|
| 292 | |
'volume_type': volume_type,
|
| 293 | |
'size': size}
|
| 294 | |
|
| 295 | |
if CONF.compute.compute_volume_common_az:
|
| 296 | |
kwargs.setdefault('availability_zone',
|
| 297 | |
CONF.compute.compute_volume_common_az)
|
| 298 | |
|
| 299 | |
volume = self.volumes_client.create_volume(**kwargs)['volume']
|
| 300 | |
|
| 301 | |
self.addCleanup(self.volumes_client.wait_for_resource_deletion,
|
| 302 | |
volume['id'])
|
| 303 | |
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
| 304 | |
self.volumes_client.delete_volume, volume['id'])
|
| 305 | |
self.assertEqual(name, volume['name'])
|
| 306 | |
waiters.wait_for_volume_resource_status(self.volumes_client,
|
| 307 | |
volume['id'], 'available')
|
| 308 | |
# The volume retrieved on creation has a non-up-to-date status.
|
| 309 | |
# Retrieval after it becomes active ensures correct details.
|
| 310 | |
volume = self.volumes_client.show_volume(volume['id'])['volume']
|
| 311 | |
return volume
|
| 312 | |
|
| 313 | |
def create_backup(self, volume_id, name=None, description=None,
|
| 314 | |
force=False, snapshot_id=None, incremental=False,
|
| 315 | |
container=None):
|
| 316 | |
|
| 317 | |
name = name or data_utils.rand_name(
|
| 318 | |
self.__class__.__name__ + "-backup")
|
| 319 | |
kwargs = {'name': name,
|
| 320 | |
'description': description,
|
| 321 | |
'force': force,
|
| 322 | |
'snapshot_id': snapshot_id,
|
| 323 | |
'incremental': incremental,
|
| 324 | |
'container': container}
|
| 325 | |
backup = self.backups_client.create_backup(volume_id=volume_id,
|
| 326 | |
**kwargs)['backup']
|
| 327 | |
self.addCleanup(self.backups_client.delete_backup, backup['id'])
|
| 328 | |
waiters.wait_for_volume_resource_status(self.backups_client,
|
| 329 | |
backup['id'], 'available')
|
| 330 | |
return backup
|
| 331 | |
|
| 332 | |
def restore_backup(self, backup_id):
|
| 333 | |
restore = self.backups_client.restore_backup(backup_id)['restore']
|
| 334 | |
self.addCleanup(self.volumes_client.delete_volume,
|
| 335 | |
restore['volume_id'])
|
| 336 | |
waiters.wait_for_volume_resource_status(self.backups_client,
|
| 337 | |
backup_id, 'available')
|
| 338 | |
waiters.wait_for_volume_resource_status(self.volumes_client,
|
| 339 | |
restore['volume_id'],
|
| 340 | |
'available')
|
| 341 | |
self.assertEqual(backup_id, restore['backup_id'])
|
| 342 | |
return restore
|
| 343 | |
|
| 344 | |
def create_volume_snapshot(self, volume_id, name=None, description=None,
|
| 345 | |
metadata=None, force=False):
|
| 346 | |
name = name or data_utils.rand_name(
|
| 347 | |
self.__class__.__name__ + '-snapshot')
|
| 348 | |
snapshot = self.snapshots_client.create_snapshot(
|
| 349 | |
volume_id=volume_id,
|
| 350 | |
force=force,
|
| 351 | |
display_name=name,
|
| 352 | |
description=description,
|
| 353 | |
metadata=metadata)['snapshot']
|
| 354 | |
self.addCleanup(self.snapshots_client.wait_for_resource_deletion,
|
| 355 | |
snapshot['id'])
|
| 356 | |
self.addCleanup(self.snapshots_client.delete_snapshot, snapshot['id'])
|
| 357 | |
waiters.wait_for_volume_resource_status(self.snapshots_client,
|
| 358 | |
snapshot['id'], 'available')
|
| 359 | |
snapshot = self.snapshots_client.show_snapshot(
|
| 360 | |
snapshot['id'])['snapshot']
|
| 361 | |
return snapshot
|
| 362 | |
|
| 363 | |
def _cleanup_volume_type(self, volume_type):
|
| 364 | |
"""Clean up a given volume type.
|
| 365 | |
|
| 366 | |
Ensuring all volumes associated to a type are first removed before
|
| 367 | |
attempting to remove the type itself. This includes any image volume
|
| 368 | |
cache volumes stored in a separate tenant to the original volumes
|
| 369 | |
created from the type.
|
| 370 | |
"""
|
| 371 | |
admin_volume_type_client = self.os_admin.volume_types_client_latest
|
| 372 | |
admin_volumes_client = self.os_admin.volumes_client_latest
|
| 373 | |
volumes = admin_volumes_client.list_volumes(
|
| 374 | |
detail=True, params={'all_tenants': 1})['volumes']
|
| 375 | |
type_name = volume_type['name']
|
| 376 | |
for volume in [v for v in volumes if v['volume_type'] == type_name]:
|
| 377 | |
test_utils.call_and_ignore_notfound_exc(
|
| 378 | |
admin_volumes_client.delete_volume, volume['id'])
|
| 379 | |
admin_volumes_client.wait_for_resource_deletion(volume['id'])
|
| 380 | |
admin_volume_type_client.delete_volume_type(volume_type['id'])
|
| 381 | |
|
| 382 | |
def create_volume_type(self, client=None, name=None, backend_name=None):
|
|
37 |
cls.admin_volume_types_client = cls.os_admin.volume_types_client_latest
|
|
38 |
|
|
39 |
def _attached_volume_name(
|
|
40 |
self, disks_list_before_attach, ip_address, private_key):
|
|
41 |
ssh = self.get_remote_client(ip_address, private_key=private_key)
|
|
42 |
|
|
43 |
def _wait_for_volume_available_on_system():
|
|
44 |
disks_list_after_attach = ssh.list_disks()
|
|
45 |
return len(disks_list_after_attach) > len(disks_list_before_attach)
|
|
46 |
|
|
47 |
if not test_utils.call_until_true(_wait_for_volume_available_on_system,
|
|
48 |
CONF.compute.build_timeout,
|
|
49 |
CONF.compute.build_interval):
|
|
50 |
raise lib_exc.TimeoutException
|
|
51 |
|
|
52 |
disks_list_after_attach = ssh.list_disks()
|
|
53 |
volume_name = [item for item in disks_list_after_attach
|
|
54 |
if item not in disks_list_before_attach][0]
|
|
55 |
return volume_name
|
|
56 |
|
|
57 |
def _get_file_md5(self, ip_address, filename, dev_name=None,
|
|
58 |
mount_path='/mnt', private_key=None, server=None):
|
|
59 |
|
|
60 |
ssh_client = self.get_remote_client(ip_address,
|
|
61 |
private_key=private_key,
|
|
62 |
server=server)
|
|
63 |
if dev_name is not None:
|
|
64 |
ssh_client.exec_command('sudo mount /dev/%s %s' % (dev_name,
|
|
65 |
mount_path))
|
|
66 |
|
|
67 |
md5_sum = ssh_client.exec_command(
|
|
68 |
'sudo md5sum %s/%s|cut -c 1-32' % (mount_path, filename))
|
|
69 |
if dev_name is not None:
|
|
70 |
ssh_client.exec_command('sudo umount %s' % mount_path)
|
|
71 |
return md5_sum
|
|
72 |
|
|
73 |
def _count_files(self, ip_address, dev_name=None, mount_path='/mnt',
|
|
74 |
private_key=None, server=None):
|
|
75 |
ssh_client = self.get_remote_client(ip_address,
|
|
76 |
private_key=private_key,
|
|
77 |
server=server)
|
|
78 |
if dev_name is not None:
|
|
79 |
ssh_client.exec_command('sudo mount /dev/%s %s' % (dev_name,
|
|
80 |
mount_path))
|
|
81 |
count = ssh_client.exec_command('sudo ls -l %s | wc -l' % mount_path)
|
|
82 |
if dev_name is not None:
|
|
83 |
ssh_client.exec_command('sudo umount %s' % mount_path)
|
|
84 |
# We subtract 2 from the count since `wc -l` also includes the count
|
|
85 |
# of new line character and while creating the filesystem, a
|
|
86 |
# lost+found folder is also created
|
|
87 |
return int(count) - 2
|
|
88 |
|
|
89 |
def _make_fs(self, ip_address, private_key, server, dev_name, fs='ext4'):
|
|
90 |
ssh_client = self.get_remote_client(ip_address,
|
|
91 |
private_key=private_key,
|
|
92 |
server=server)
|
|
93 |
|
|
94 |
ssh_client.make_fs(dev_name, fs=fs)
|
|
95 |
|
|
96 |
def create_md5_new_file(self, ip_address, filename, dev_name=None,
|
|
97 |
mount_path='/mnt', private_key=None, server=None):
|
|
98 |
ssh_client = self.get_remote_client(ip_address,
|
|
99 |
private_key=private_key,
|
|
100 |
server=server)
|
|
101 |
|
|
102 |
if dev_name is not None:
|
|
103 |
ssh_client.exec_command('sudo mount /dev/%s %s' % (dev_name,
|
|
104 |
mount_path))
|
|
105 |
ssh_client.exec_command(
|
|
106 |
'sudo dd bs=1024 count=100 if=/dev/urandom of=/%s/%s' %
|
|
107 |
(mount_path, filename))
|
|
108 |
md5 = ssh_client.exec_command(
|
|
109 |
'sudo md5sum -b %s/%s|cut -c 1-32' % (mount_path, filename))
|
|
110 |
ssh_client.exec_command('sudo sync')
|
|
111 |
if dev_name is not None:
|
|
112 |
ssh_client.exec_command('sudo umount %s' % mount_path)
|
|
113 |
return md5
|
|
114 |
|
|
115 |
def get_md5_from_file(self, instance, instance_ip, filename,
|
|
116 |
dev_name=None):
|
|
117 |
|
|
118 |
md5_sum = self._get_file_md5(instance_ip, filename=filename,
|
|
119 |
dev_name=dev_name,
|
|
120 |
private_key=self.keypair['private_key'],
|
|
121 |
server=instance)
|
|
122 |
count = self._count_files(instance_ip, dev_name=dev_name,
|
|
123 |
private_key=self.keypair['private_key'],
|
|
124 |
server=instance)
|
|
125 |
return count, md5_sum
|
|
126 |
|
|
127 |
def _attach_and_get_volume_device_name(self, server, volume, instance_ip,
|
|
128 |
private_key):
|
|
129 |
ssh_client = self.get_remote_client(
|
|
130 |
instance_ip, private_key=private_key,
|
|
131 |
server=server)
|
|
132 |
# List disks before volume attachment
|
|
133 |
disks_list_before_attach = ssh_client.list_disks()
|
|
134 |
# Attach volume
|
|
135 |
attachment = self.attach_volume(server, volume)
|
|
136 |
# Find the difference between disks before and after attachment that
|
|
137 |
# gives us the volume device name
|
|
138 |
volume_device_name = self._attached_volume_name(
|
|
139 |
disks_list_before_attach, instance_ip, private_key)
|
|
140 |
return volume_device_name, attachment
|
|
141 |
|
|
142 |
def create_volume_type(self, client=None, name=None, extra_specs=None):
|
| 383 | 143 |
if not client:
|
| 384 | 144 |
client = self.os_admin.volume_types_client_latest
|
| 385 | 145 |
if not name:
|
|
| 387 | 147 |
name = data_utils.rand_name(class_name + '-volume-type')
|
| 388 | 148 |
randomized_name = data_utils.rand_name('scenario-type-' + name)
|
| 389 | 149 |
|
| 390 | |
LOG.debug("Creating a volume type: %s on backend %s",
|
| 391 | |
randomized_name, backend_name)
|
| 392 | |
extra_specs = {}
|
| 393 | |
if backend_name:
|
| 394 | |
extra_specs = {"volume_backend_name": backend_name}
|
| 395 | |
|
| 396 | |
volume_type = client.create_volume_type(
|
|
150 |
LOG.debug("Creating a volume type: %s with extra_specs %s",
|
|
151 |
randomized_name, extra_specs)
|
|
152 |
if extra_specs is None:
|
|
153 |
extra_specs = {}
|
|
154 |
volume_type = self.admin_volume_types_client.create_volume_type(
|
| 397 | 155 |
name=randomized_name, extra_specs=extra_specs)['volume_type']
|
| 398 | |
self.addCleanup(self._cleanup_volume_type, volume_type)
|
|
156 |
self.addCleanup(self.cleanup_volume_type, volume_type)
|
| 399 | 157 |
return volume_type
|
| 400 | 158 |
|
| 401 | |
def _create_loginable_secgroup_rule(self, secgroup_id=None):
|
| 402 | |
_client = self.compute_security_groups_client
|
| 403 | |
_client_rules = self.compute_security_group_rules_client
|
| 404 | |
if secgroup_id is None:
|
| 405 | |
sgs = _client.list_security_groups()['security_groups']
|
| 406 | |
for sg in sgs:
|
| 407 | |
if sg['name'] == 'default':
|
| 408 | |
secgroup_id = sg['id']
|
| 409 | |
|
| 410 | |
# These rules are intended to permit inbound ssh and icmp
|
| 411 | |
# traffic from all sources, so no group_id is provided.
|
| 412 | |
# Setting a group_id would only permit traffic from ports
|
| 413 | |
# belonging to the same security group.
|
| 414 | |
rulesets = [
|
| 415 | |
{
|
| 416 | |
# ssh
|
| 417 | |
'ip_protocol': 'tcp',
|
| 418 | |
'from_port': 22,
|
| 419 | |
'to_port': 22,
|
| 420 | |
'cidr': '0.0.0.0/0',
|
| 421 | |
},
|
| 422 | |
{
|
| 423 | |
# ping
|
| 424 | |
'ip_protocol': 'icmp',
|
| 425 | |
'from_port': -1,
|
| 426 | |
'to_port': -1,
|
| 427 | |
'cidr': '0.0.0.0/0',
|
| 428 | |
}
|
| 429 | |
]
|
| 430 | |
rules = list()
|
| 431 | |
for ruleset in rulesets:
|
| 432 | |
sg_rule = _client_rules.create_security_group_rule(
|
| 433 | |
parent_group_id=secgroup_id, **ruleset)['security_group_rule']
|
| 434 | |
rules.append(sg_rule)
|
| 435 | |
return rules
|
| 436 | |
|
| 437 | |
def _create_security_group(self):
|
| 438 | |
# Create security group
|
| 439 | |
sg_name = data_utils.rand_name(self.__class__.__name__)
|
| 440 | |
sg_desc = sg_name + " description"
|
| 441 | |
secgroup = self.compute_security_groups_client.create_security_group(
|
| 442 | |
name=sg_name, description=sg_desc)['security_group']
|
| 443 | |
self.assertEqual(secgroup['name'], sg_name)
|
| 444 | |
self.assertEqual(secgroup['description'], sg_desc)
|
| 445 | |
self.addCleanup(
|
| 446 | |
test_utils.call_and_ignore_notfound_exc,
|
| 447 | |
self.compute_security_groups_client.delete_security_group,
|
| 448 | |
secgroup['id'])
|
| 449 | |
|
| 450 | |
# Add rules to the security group
|
| 451 | |
self._create_loginable_secgroup_rule(secgroup['id'])
|
| 452 | |
|
| 453 | |
return secgroup
|
| 454 | |
|
| 455 | |
def get_remote_client(self, ip_address, username=None, private_key=None,
|
| 456 | |
server=None):
|
| 457 | |
"""Get a SSH client to a remote server
|
| 458 | |
|
| 459 | |
:param ip_address: the server floating or fixed IP address to use
|
| 460 | |
for ssh validation
|
| 461 | |
:param username: name of the Linux account on the remote server
|
| 462 | |
:param private_key: the SSH private key to use
|
| 463 | |
:param server: server dict, used for debugging purposes
|
| 464 | |
:return: a RemoteClient object
|
|
159 |
def attach_volume(self, server, volume, device=None, tag=None):
|
|
160 |
"""Attaches volume to server and waits for 'in-use' volume status.
|
|
161 |
|
|
162 |
The volume will be detached when the test tears down.
|
|
163 |
|
|
164 |
:param server: The server to which the volume will be attached.
|
|
165 |
:param volume: The volume to attach.
|
|
166 |
:param device: Optional mountpoint for the attached volume. Note that
|
|
167 |
this is not guaranteed for all hypervisors and is not recommended.
|
|
168 |
:param tag: Optional device role tag to apply to the volume.
|
| 465 | 169 |
"""
|
| 466 | |
|
| 467 | |
if username is None:
|
| 468 | |
username = CONF.validation.image_ssh_user
|
| 469 | |
# Set this with 'keypair' or others to log in with keypair or
|
| 470 | |
# username/password.
|
| 471 | |
if CONF.validation.auth_method == 'keypair':
|
| 472 | |
password = None
|
| 473 | |
if private_key is None:
|
| 474 | |
private_key = self.keypair['private_key']
|
|
170 |
attach_kwargs = dict(volumeId=volume['id'])
|
|
171 |
if device:
|
|
172 |
attach_kwargs['device'] = device
|
|
173 |
if tag:
|
|
174 |
attach_kwargs['tag'] = tag
|
|
175 |
|
|
176 |
attachment = self.servers_client.attach_volume(
|
|
177 |
server['id'], **attach_kwargs)['volumeAttachment']
|
|
178 |
# On teardown detach the volume and for multiattach volumes wait for
|
|
179 |
# the attachment to be removed. For non-multiattach volumes wait for
|
|
180 |
# the state of the volume to change to available. This is so we don't
|
|
181 |
# error out when trying to delete the volume during teardown.
|
|
182 |
if volume['multiattach']:
|
|
183 |
att = waiters.wait_for_volume_attachment_create(
|
|
184 |
self.volumes_client, volume['id'], server['id'])
|
|
185 |
self.addCleanup(waiters.wait_for_volume_attachment_remove,
|
|
186 |
self.volumes_client, volume['id'],
|
|
187 |
att['attachment_id'])
|
| 475 | 188 |
else:
|
| 476 | |
password = CONF.validation.image_ssh_password
|
| 477 | |
private_key = None
|
| 478 | |
linux_client = remote_client.RemoteClient(
|
| 479 | |
ip_address, username, pkey=private_key, password=password,
|
| 480 | |
server=server, servers_client=self.servers_client)
|
| 481 | |
linux_client.validate_authentication()
|
| 482 | |
return linux_client
|
| 483 | |
|
| 484 | |
def _log_net_info(self, exc):
|
| 485 | |
# network debug is called as part of ssh init
|
| 486 | |
if not isinstance(exc, lib_exc.SSHTimeout):
|
| 487 | |
LOG.debug('Network information on a devstack host')
|
| 488 | |
|
| 489 | |
def create_server_snapshot(self, server, name=None):
|
| 490 | |
# Glance client
|
| 491 | |
_image_client = self.image_client
|
| 492 | |
# Compute client
|
| 493 | |
_images_client = self.compute_images_client
|
| 494 | |
if name is None:
|
| 495 | |
name = data_utils.rand_name(self.__class__.__name__ + 'snapshot')
|
| 496 | |
LOG.debug("Creating a snapshot image for server: %s", server['name'])
|
| 497 | |
image = _images_client.create_image(server['id'], name=name)
|
| 498 | |
image_id = image.response['location'].split('images/')[1]
|
| 499 | |
waiters.wait_for_image_status(_image_client, image_id, 'active')
|
| 500 | |
|
| 501 | |
self.addCleanup(_image_client.wait_for_resource_deletion,
|
| 502 | |
image_id)
|
| 503 | |
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
| 504 | |
_image_client.delete_image, image_id)
|
| 505 | |
|
| 506 | |
if CONF.image_feature_enabled.api_v1:
|
| 507 | |
# In glance v1 the additional properties are stored in the headers.
|
| 508 | |
resp = _image_client.check_image(image_id)
|
| 509 | |
snapshot_image = common_image.get_image_meta_from_headers(resp)
|
| 510 | |
image_props = snapshot_image.get('properties', {})
|
| 511 | |
else:
|
| 512 | |
# In glance v2 the additional properties are flattened.
|
| 513 | |
snapshot_image = _image_client.show_image(image_id)
|
| 514 | |
image_props = snapshot_image
|
| 515 | |
|
| 516 | |
bdm = image_props.get('block_device_mapping')
|
| 517 | |
if bdm:
|
| 518 | |
bdm = json.loads(bdm)
|
| 519 | |
if bdm and 'snapshot_id' in bdm[0]:
|
| 520 | |
snapshot_id = bdm[0]['snapshot_id']
|
| 521 | |
self.addCleanup(
|
| 522 | |
self.snapshots_client.wait_for_resource_deletion,
|
| 523 | |
snapshot_id)
|
| 524 | |
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
| 525 | |
self.snapshots_client.delete_snapshot,
|
| 526 | |
snapshot_id)
|
| 527 | |
waiters.wait_for_volume_resource_status(self.snapshots_client,
|
| 528 | |
snapshot_id,
|
| 529 | |
'available')
|
| 530 | |
image_name = snapshot_image['name']
|
| 531 | |
self.assertEqual(name, image_name)
|
| 532 | |
LOG.debug("Created snapshot image %s for server %s",
|
| 533 | |
image_name, server['name'])
|
| 534 | |
return snapshot_image
|
| 535 | |
|
| 536 | |
def nova_volume_attach(self, server, volume_to_attach):
|
| 537 | |
volume = self.servers_client.attach_volume(
|
| 538 | |
server['id'], volumeId=volume_to_attach['id'], device='/dev/%s'
|
| 539 | |
% CONF.compute.volume_device_name)['volumeAttachment']
|
| 540 | |
self.assertEqual(volume_to_attach['id'], volume['id'])
|
| 541 | |
waiters.wait_for_volume_resource_status(self.volumes_client,
|
| 542 | |
volume['id'], 'in-use')
|
| 543 | |
|
| 544 | |
# Return the updated volume after the attachment
|
| 545 | |
return self.volumes_client.show_volume(volume['id'])['volume']
|
| 546 | |
|
| 547 | |
def nova_volume_detach(self, server, volume):
|
| 548 | |
self.servers_client.detach_volume(server['id'], volume['id'])
|
| 549 | |
waiters.wait_for_volume_resource_status(self.volumes_client,
|
| 550 | |
volume['id'], 'available')
|
| 551 | |
|
| 552 | |
def check_vm_connectivity(self, ip_address,
|
| 553 | |
username=None,
|
| 554 | |
private_key=None,
|
| 555 | |
should_connect=True,
|
| 556 | |
extra_msg="",
|
| 557 | |
server=None,
|
| 558 | |
mtu=None):
|
| 559 | |
"""Check server connectivity
|
| 560 | |
|
| 561 | |
:param ip_address: server to test against
|
| 562 | |
:param username: server's ssh username
|
| 563 | |
:param private_key: server's ssh private key to be used
|
| 564 | |
:param should_connect: True/False indicates positive/negative test
|
| 565 | |
positive - attempt ping and ssh
|
| 566 | |
negative - attempt ping and fail if succeed
|
| 567 | |
:param extra_msg: Message to help with debugging if ``ping_ip_address``
|
| 568 | |
fails
|
| 569 | |
:param server: The server whose console to log for debugging
|
| 570 | |
:param mtu: network MTU to use for connectivity validation
|
| 571 | |
|
| 572 | |
:raises: AssertError if the result of the connectivity check does
|
| 573 | |
not match the value of the should_connect param
|
|
189 |
self.addCleanup(waiters.wait_for_volume_resource_status,
|
|
190 |
self.volumes_client, volume['id'], 'available')
|
|
191 |
waiters.wait_for_volume_resource_status(self.volumes_client,
|
|
192 |
volume['id'], 'in-use')
|
|
193 |
# Ignore 404s on detach in case the server is deleted or the volume
|
|
194 |
# is already detached.
|
|
195 |
self.addCleanup(self._detach_volume, server, volume)
|
|
196 |
return attachment
|
|
197 |
|
|
198 |
def _detach_volume(self, server, volume):
|
|
199 |
"""Helper method to detach a volume.
|
|
200 |
|
|
201 |
Ignores 404 responses if the volume or server do not exist, or the
|
|
202 |
volume is already detached from the server.
|
| 574 | 203 |
"""
|
| 575 | |
LOG.debug('checking network connections to IP %s with user: %s',
|
| 576 | |
ip_address, username)
|
| 577 | |
if should_connect:
|
| 578 | |
msg = "Timed out waiting for %s to become reachable" % ip_address
|
| 579 | |
else:
|
| 580 | |
msg = "ip address %s is reachable" % ip_address
|
| 581 | |
if extra_msg:
|
| 582 | |
msg = "%s\n%s" % (extra_msg, msg)
|
| 583 | |
self.assertTrue(self.ping_ip_address(ip_address,
|
| 584 | |
should_succeed=should_connect,
|
| 585 | |
mtu=mtu, server=server),
|
| 586 | |
msg=msg)
|
| 587 | |
if should_connect:
|
| 588 | |
# no need to check ssh for negative connectivity
|
| 589 | |
try:
|
| 590 | |
self.get_remote_client(ip_address, username, private_key,
|
| 591 | |
server=server)
|
| 592 | |
except Exception:
|
| 593 | |
if not extra_msg:
|
| 594 | |
extra_msg = 'Failed to ssh to %s' % ip_address
|
| 595 | |
LOG.exception(extra_msg)
|
| 596 | |
raise
|
| 597 | |
|
| 598 | |
def create_floating_ip(self, thing, pool_name=None):
|
| 599 | |
"""Create a floating IP and associates to a server on Nova"""
|
| 600 | |
|
| 601 | |
if not pool_name:
|
| 602 | |
pool_name = CONF.network.floating_network_name
|
| 603 | |
floating_ip = (self.compute_floating_ips_client.
|
| 604 | |
create_floating_ip(pool=pool_name)['floating_ip'])
|
| 605 | |
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
| 606 | |
self.compute_floating_ips_client.delete_floating_ip,
|
| 607 | |
floating_ip['id'])
|
| 608 | |
self.compute_floating_ips_client.associate_floating_ip_to_server(
|
| 609 | |
floating_ip['ip'], thing['id'])
|
| 610 | |
return floating_ip
|
| 611 | |
|
| 612 | |
def create_timestamp(self, ip_address, dev_name=None, mount_path='/mnt',
|
| 613 | |
private_key=None, server=None):
|
| 614 | |
ssh_client = self.get_remote_client(ip_address,
|
| 615 | |
private_key=private_key,
|
| 616 | |
server=server)
|
| 617 | |
if dev_name is not None:
|
| 618 | |
ssh_client.make_fs(dev_name)
|
| 619 | |
ssh_client.exec_command('sudo mount /dev/%s %s' % (dev_name,
|
| 620 | |
mount_path))
|
| 621 | |
cmd_timestamp = 'sudo sh -c "date > %s/timestamp; sync"' % mount_path
|
| 622 | |
ssh_client.exec_command(cmd_timestamp)
|
| 623 | |
timestamp = ssh_client.exec_command('sudo cat %s/timestamp'
|
| 624 | |
% mount_path)
|
| 625 | |
if dev_name is not None:
|
| 626 | |
ssh_client.exec_command('sudo umount %s' % mount_path)
|
| 627 | |
return timestamp
|
| 628 | |
|
| 629 | |
def get_timestamp(self, ip_address, dev_name=None, mount_path='/mnt',
|
| 630 | |
private_key=None, server=None):
|
| 631 | |
ssh_client = self.get_remote_client(ip_address,
|
| 632 | |
private_key=private_key,
|
| 633 | |
server=server)
|
| 634 | |
if dev_name is not None:
|
| 635 | |
ssh_client.mount(dev_name, mount_path)
|
| 636 | |
timestamp = ssh_client.exec_command('sudo cat %s/timestamp'
|
| 637 | |
% mount_path)
|
| 638 | |
if dev_name is not None:
|
| 639 | |
ssh_client.exec_command('sudo umount %s' % mount_path)
|
| 640 | |
return timestamp
|
| 641 | |
|
| 642 | |
def get_server_ip(self, server):
|
| 643 | |
"""Get the server fixed or floating IP.
|
| 644 | |
|
| 645 | |
Based on the configuration we're in, return a correct ip
|
| 646 | |
address for validating that a guest is up.
|
| 647 | |
"""
|
| 648 | |
if CONF.validation.connect_method == 'floating':
|
| 649 | |
# The tests calling this method don't have a floating IP
|
| 650 | |
# and can't make use of the validation resources. So the
|
| 651 | |
# method is creating the floating IP there.
|
| 652 | |
return self.create_floating_ip(server)['ip']
|
| 653 | |
elif CONF.validation.connect_method == 'fixed':
|
| 654 | |
# Determine the network name to look for based on config or creds
|
| 655 | |
# provider network resources.
|
| 656 | |
if CONF.validation.network_for_ssh:
|
| 657 | |
addresses = server['addresses'][
|
| 658 | |
CONF.validation.network_for_ssh]
|
| 659 | |
else:
|
| 660 | |
network = self.get_tenant_network()
|
| 661 | |
addresses = (server['addresses'][network['name']]
|
| 662 | |
if network else [])
|
| 663 | |
for address in addresses:
|
| 664 | |
if (address['version'] == CONF.validation.ip_version_for_ssh and # noqa
|
| 665 | |
address['OS-EXT-IPS:type'] == 'fixed'):
|
| 666 | |
return address['addr']
|
| 667 | |
raise exceptions.ServerUnreachable(server_id=server['id'])
|
| 668 | |
else:
|
| 669 | |
raise lib_exc.InvalidConfiguration()
|
| 670 | |
|
| 671 | |
@classmethod
|
| 672 | |
def get_host_for_server(cls, server_id):
|
| 673 | |
server_details = cls.os_admin.servers_client.show_server(server_id)
|
| 674 | |
return server_details['server']['OS-EXT-SRV-ATTR:host']
|
| 675 | |
|
| 676 | |
def _get_bdm(self, source_id, source_type, delete_on_termination=False):
|
| 677 | |
bd_map_v2 = [{
|
| 678 | |
'uuid': source_id,
|
| 679 | |
'source_type': source_type,
|
| 680 | |
'destination_type': 'volume',
|
| 681 | |
'boot_index': 0,
|
| 682 | |
'delete_on_termination': delete_on_termination}]
|
| 683 | |
return {'block_device_mapping_v2': bd_map_v2}
|
| 684 | |
|
| 685 | |
def boot_instance_from_resource(self, source_id,
|
| 686 | |
source_type,
|
| 687 | |
keypair=None,
|
| 688 | |
security_group=None,
|
| 689 | |
delete_on_termination=False,
|
| 690 | |
name=None):
|
| 691 | |
create_kwargs = dict()
|
| 692 | |
if keypair:
|
| 693 | |
create_kwargs['key_name'] = keypair['name']
|
| 694 | |
if security_group:
|
| 695 | |
create_kwargs['security_groups'] = [
|
| 696 | |
{'name': security_group['name']}]
|
| 697 | |
create_kwargs.update(self._get_bdm(
|
| 698 | |
source_id,
|
| 699 | |
source_type,
|
| 700 | |
delete_on_termination=delete_on_termination))
|
| 701 | |
if name:
|
| 702 | |
create_kwargs['name'] = name
|
| 703 | |
|
| 704 | |
return self.create_server(image_id='', **create_kwargs)
|
| 705 | |
|
| 706 | |
def create_volume_from_image(self):
|
| 707 | |
img_uuid = CONF.compute.image_ref
|
| 708 | |
vol_name = data_utils.rand_name(
|
| 709 | |
self.__class__.__name__ + '-volume-origin')
|
| 710 | |
return self.create_volume(name=vol_name, imageRef=img_uuid)
|
| 711 | |
|
| 712 | |
|
| 713 | |
class NetworkScenarioTest(ScenarioTest):
|
| 714 | |
"""Base class for network scenario tests.
|
| 715 | |
|
| 716 | |
This class provide helpers for network scenario tests, using the neutron
|
| 717 | |
API. Helpers from ancestor which use the nova network API are overridden
|
| 718 | |
with the neutron API.
|
| 719 | |
|
| 720 | |
This Class also enforces using Neutron instead of novanetwork.
|
| 721 | |
Subclassed tests will be skipped if Neutron is not enabled
|
| 722 | |
|
| 723 | |
"""
|
| 724 | |
|
| 725 | |
credentials = ['primary', 'admin']
|
| 726 | |
|
| 727 | |
@classmethod
|
| 728 | |
def skip_checks(cls):
|
| 729 | |
super(NetworkScenarioTest, cls).skip_checks()
|
| 730 | |
if not CONF.service_available.neutron:
|
| 731 | |
raise cls.skipException('Neutron not available')
|
| 732 | |
|
| 733 | |
def _create_network(self, networks_client=None,
|
| 734 | |
tenant_id=None,
|
| 735 | |
namestart='network-smoke-',
|
| 736 | |
port_security_enabled=True, **net_dict):
|
| 737 | |
if not networks_client:
|
| 738 | |
networks_client = self.networks_client
|
| 739 | |
if not tenant_id:
|
| 740 | |
tenant_id = networks_client.tenant_id
|
| 741 | |
name = data_utils.rand_name(namestart)
|
| 742 | |
network_kwargs = dict(name=name, tenant_id=tenant_id)
|
| 743 | |
if net_dict:
|
| 744 | |
network_kwargs.update(net_dict)
|
| 745 | |
# Neutron disables port security by default so we have to check the
|
| 746 | |
# config before trying to create the network with port_security_enabled
|
| 747 | |
if CONF.network_feature_enabled.port_security:
|
| 748 | |
network_kwargs['port_security_enabled'] = port_security_enabled
|
| 749 | |
result = networks_client.create_network(**network_kwargs)
|
| 750 | |
network = result['network']
|
| 751 | |
|
| 752 | |
self.assertEqual(network['name'], name)
|
| 753 | |
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
| 754 | |
networks_client.delete_network,
|
| 755 | |
network['id'])
|
| 756 | |
return network
|
| 757 | |
|
| 758 | |
def create_subnet(self, network, subnets_client=None,
|
| 759 | |
namestart='subnet-smoke', **kwargs):
|
| 760 | |
"""Create a subnet for the given network
|
| 761 | |
|
| 762 | |
within the cidr block configured for tenant networks.
|
| 763 | |
"""
|
| 764 | |
if not subnets_client:
|
| 765 | |
subnets_client = self.subnets_client
|
| 766 | |
|
| 767 | |
def cidr_in_use(cidr, tenant_id):
|
| 768 | |
"""Check cidr existence
|
| 769 | |
|
| 770 | |
:returns: True if subnet with cidr already exist in tenant
|
| 771 | |
False else
|
| 772 | |
"""
|
| 773 | |
cidr_in_use = self.os_admin.subnets_client.list_subnets(
|
| 774 | |
tenant_id=tenant_id, cidr=cidr)['subnets']
|
| 775 | |
return len(cidr_in_use) != 0
|
| 776 | |
|
| 777 | |
ip_version = kwargs.pop('ip_version', 4)
|
| 778 | |
|
| 779 | |
if ip_version == 6:
|
| 780 | |
tenant_cidr = netaddr.IPNetwork(
|
| 781 | |
CONF.network.project_network_v6_cidr)
|
| 782 | |
num_bits = CONF.network.project_network_v6_mask_bits
|
| 783 | |
else:
|
| 784 | |
tenant_cidr = netaddr.IPNetwork(CONF.network.project_network_cidr)
|
| 785 | |
num_bits = CONF.network.project_network_mask_bits
|
| 786 | |
|
| 787 | |
result = None
|
| 788 | |
str_cidr = None
|
| 789 | |
# Repeatedly attempt subnet creation with sequential cidr
|
| 790 | |
# blocks until an unallocated block is found.
|
| 791 | |
for subnet_cidr in tenant_cidr.subnet(num_bits):
|
| 792 | |
str_cidr = str(subnet_cidr)
|
| 793 | |
if cidr_in_use(str_cidr, tenant_id=network['tenant_id']):
|
| 794 | |
continue
|
| 795 | |
|
| 796 | |
subnet = dict(
|
| 797 | |
name=data_utils.rand_name(namestart),
|
| 798 | |
network_id=network['id'],
|
| 799 | |
tenant_id=network['tenant_id'],
|
| 800 | |
cidr=str_cidr,
|
| 801 | |
ip_version=ip_version,
|
| 802 | |
**kwargs
|
| 803 | |
)
|
| 804 | |
try:
|
| 805 | |
result = subnets_client.create_subnet(**subnet)
|
| 806 | |
break
|
| 807 | |
except lib_exc.Conflict as e:
|
| 808 | |
is_overlapping_cidr = 'overlaps with another subnet' in str(e)
|
| 809 | |
if not is_overlapping_cidr:
|
| 810 | |
raise
|
| 811 | |
self.assertIsNotNone(result, 'Unable to allocate tenant network')
|
| 812 | |
|
| 813 | |
subnet = result['subnet']
|
| 814 | |
self.assertEqual(subnet['cidr'], str_cidr)
|
| 815 | |
|
| 816 | |
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
| 817 | |
subnets_client.delete_subnet, subnet['id'])
|
| 818 | |
|
| 819 | |
return subnet
|
| 820 | |
|
| 821 | |
def _get_server_port_id_and_ip4(self, server, ip_addr=None):
|
| 822 | |
if ip_addr:
|
| 823 | |
ports = self.os_admin.ports_client.list_ports(
|
| 824 | |
device_id=server['id'],
|
| 825 | |
fixed_ips='ip_address=%s' % ip_addr)['ports']
|
| 826 | |
else:
|
| 827 | |
ports = self.os_admin.ports_client.list_ports(
|
| 828 | |
device_id=server['id'])['ports']
|
| 829 | |
# A port can have more than one IP address in some cases.
|
| 830 | |
# If the network is dual-stack (IPv4 + IPv6), this port is associated
|
| 831 | |
# with 2 subnets
|
| 832 | |
p_status = ['ACTIVE']
|
| 833 | |
# NOTE(vsaienko) With Ironic, instances live on separate hardware
|
| 834 | |
# servers. Neutron does not bind ports for Ironic instances, as a
|
| 835 | |
# result the port remains in the DOWN state.
|
| 836 | |
# TODO(vsaienko) remove once bug: #1599836 is resolved.
|
| 837 | |
if getattr(CONF.service_available, 'ironic', False):
|
| 838 | |
p_status.append('DOWN')
|
| 839 | |
port_map = [(p["id"], fxip["ip_address"])
|
| 840 | |
for p in ports
|
| 841 | |
for fxip in p["fixed_ips"]
|
| 842 | |
if (netutils.is_valid_ipv4(fxip["ip_address"]) and
|
| 843 | |
p['status'] in p_status)]
|
| 844 | |
inactive = [p for p in ports if p['status'] != 'ACTIVE']
|
| 845 | |
if inactive:
|
| 846 | |
LOG.warning("Instance has ports that are not ACTIVE: %s", inactive)
|
| 847 | |
|
| 848 | |
self.assertNotEmpty(port_map,
|
| 849 | |
"No IPv4 addresses found in: %s" % ports)
|
| 850 | |
self.assertEqual(len(port_map), 1,
|
| 851 | |
"Found multiple IPv4 addresses: %s. "
|
| 852 | |
"Unable to determine which port to target."
|
| 853 | |
% port_map)
|
| 854 | |
return port_map[0]
|
| 855 | |
|
| 856 | |
def _get_network_by_name(self, network_name):
|
| 857 | |
net = self.os_admin.networks_client.list_networks(
|
| 858 | |
name=network_name)['networks']
|
| 859 | |
self.assertNotEmpty(net,
|
| 860 | |
"Unable to get network by name: %s" % network_name)
|
| 861 | |
return net[0]
|
| 862 | |
|
| 863 | |
def create_floating_ip(self, thing, external_network_id=None,
|
| 864 | |
port_id=None, client=None):
|
| 865 | |
"""Create a floating IP and associates to a resource/port on Neutron"""
|
| 866 | |
if not external_network_id:
|
| 867 | |
external_network_id = CONF.network.public_network_id
|
| 868 | |
if not client:
|
| 869 | |
client = self.floating_ips_client
|
| 870 | |
if not port_id:
|
| 871 | |
port_id, ip4 = self._get_server_port_id_and_ip4(thing)
|
| 872 | |
else:
|
| 873 | |
ip4 = None
|
| 874 | |
result = client.create_floatingip(
|
| 875 | |
floating_network_id=external_network_id,
|
| 876 | |
port_id=port_id,
|
| 877 | |
tenant_id=thing['tenant_id'],
|
| 878 | |
fixed_ip_address=ip4
|
| 879 | |
)
|
| 880 | |
floating_ip = result['floatingip']
|
| 881 | |
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
| 882 | |
client.delete_floatingip,
|
| 883 | |
floating_ip['id'])
|
| 884 | |
return floating_ip
|
| 885 | |
|
| 886 | |
def check_floating_ip_status(self, floating_ip, status):
|
| 887 | |
"""Verifies floatingip reaches the given status
|
| 888 | |
|
| 889 | |
:param dict floating_ip: floating IP dict to check status
|
| 890 | |
:param status: target status
|
| 891 | |
:raises: AssertionError if status doesn't match
|
| 892 | |
"""
|
| 893 | |
floatingip_id = floating_ip['id']
|
| 894 | |
|
| 895 | |
def refresh():
|
| 896 | |
result = (self.floating_ips_client.
|
| 897 | |
show_floatingip(floatingip_id)['floatingip'])
|
| 898 | |
return status == result['status']
|
| 899 | |
|
| 900 | |
if not test_utils.call_until_true(refresh,
|
| 901 | |
CONF.network.build_timeout,
|
| 902 | |
CONF.network.build_interval):
|
| 903 | |
floating_ip = self.floating_ips_client.show_floatingip(
|
| 904 | |
floatingip_id)['floatingip']
|
| 905 | |
self.assertEqual(status, floating_ip['status'],
|
| 906 | |
message="FloatingIP: {fp} is at status: {cst}. "
|
| 907 | |
"failed to reach status: {st}"
|
| 908 | |
.format(fp=floating_ip, cst=floating_ip['status'],
|
| 909 | |
st=status))
|
| 910 | |
LOG.info("FloatingIP: {fp} is at status: {st}"
|
| 911 | |
.format(fp=floating_ip, st=status))
|
| 912 | |
|
| 913 | |
def _create_security_group(self, security_group_rules_client=None,
|
| 914 | |
tenant_id=None,
|
| 915 | |
namestart='secgroup-smoke',
|
| 916 | |
security_groups_client=None):
|
| 917 | |
if security_group_rules_client is None:
|
| 918 | |
security_group_rules_client = self.security_group_rules_client
|
| 919 | |
if security_groups_client is None:
|
| 920 | |
security_groups_client = self.security_groups_client
|
| 921 | |
if tenant_id is None:
|
| 922 | |
tenant_id = security_groups_client.tenant_id
|
| 923 | |
secgroup = self._create_empty_security_group(
|
| 924 | |
namestart=namestart, client=security_groups_client,
|
| 925 | |
tenant_id=tenant_id)
|
| 926 | |
|
| 927 | |
# Add rules to the security group
|
| 928 | |
rules = self._create_loginable_secgroup_rule(
|
| 929 | |
security_group_rules_client=security_group_rules_client,
|
| 930 | |
secgroup=secgroup,
|
| 931 | |
security_groups_client=security_groups_client)
|
| 932 | |
for rule in rules:
|
| 933 | |
self.assertEqual(tenant_id, rule['tenant_id'])
|
| 934 | |
self.assertEqual(secgroup['id'], rule['security_group_id'])
|
| 935 | |
return secgroup
|
| 936 | |
|
| 937 | |
def _create_empty_security_group(self, client=None, tenant_id=None,
|
| 938 | |
namestart='secgroup-smoke'):
|
| 939 | |
"""Create a security group without rules.
|
| 940 | |
|
| 941 | |
Default rules will be created:
|
| 942 | |
- IPv4 egress to any
|
| 943 | |
- IPv6 egress to any
|
| 944 | |
|
| 945 | |
:param tenant_id: secgroup will be created in this tenant
|
| 946 | |
:returns: the created security group
|
| 947 | |
"""
|
| 948 | |
if client is None:
|
| 949 | |
client = self.security_groups_client
|
| 950 | |
if not tenant_id:
|
| 951 | |
tenant_id = client.tenant_id
|
| 952 | |
sg_name = data_utils.rand_name(namestart)
|
| 953 | |
sg_desc = sg_name + " description"
|
| 954 | |
sg_dict = dict(name=sg_name,
|
| 955 | |
description=sg_desc)
|
| 956 | |
sg_dict['tenant_id'] = tenant_id
|
| 957 | |
result = client.create_security_group(**sg_dict)
|
| 958 | |
|
| 959 | |
secgroup = result['security_group']
|
| 960 | |
self.assertEqual(secgroup['name'], sg_name)
|
| 961 | |
self.assertEqual(tenant_id, secgroup['tenant_id'])
|
| 962 | |
self.assertEqual(secgroup['description'], sg_desc)
|
| 963 | |
|
| 964 | |
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
| 965 | |
client.delete_security_group, secgroup['id'])
|
| 966 | |
return secgroup
|
| 967 | |
|
| 968 | |
def _create_security_group_rule(self, secgroup=None,
|
| 969 | |
sec_group_rules_client=None,
|
| 970 | |
tenant_id=None,
|
| 971 | |
security_groups_client=None, **kwargs):
|
| 972 | |
"""Create a rule from a dictionary of rule parameters.
|
| 973 | |
|
| 974 | |
Create a rule in a secgroup. if secgroup not defined will search for
|
| 975 | |
default secgroup in tenant_id.
|
| 976 | |
|
| 977 | |
:param secgroup: the security group.
|
| 978 | |
:param tenant_id: if secgroup not passed -- the tenant in which to
|
| 979 | |
search for default secgroup
|
| 980 | |
:param kwargs: a dictionary containing rule parameters:
|
| 981 | |
for example, to allow incoming ssh:
|
| 982 | |
rule = {
|
| 983 | |
direction: 'ingress'
|
| 984 | |
protocol:'tcp',
|
| 985 | |
port_range_min: 22,
|
| 986 | |
port_range_max: 22
|
| 987 | |
}
|
| 988 | |
"""
|
| 989 | |
if sec_group_rules_client is None:
|
| 990 | |
sec_group_rules_client = self.security_group_rules_client
|
| 991 | |
if security_groups_client is None:
|
| 992 | |
security_groups_client = self.security_groups_client
|
| 993 | |
if not tenant_id:
|
| 994 | |
tenant_id = security_groups_client.tenant_id
|
| 995 | |
if secgroup is None:
|
| 996 | |
# Get default secgroup for tenant_id
|
| 997 | |
default_secgroups = security_groups_client.list_security_groups(
|
| 998 | |
name='default', tenant_id=tenant_id)['security_groups']
|
| 999 | |
msg = "No default security group for tenant %s." % (tenant_id)
|
| 1000 | |
self.assertNotEmpty(default_secgroups, msg)
|
| 1001 | |
secgroup = default_secgroups[0]
|
| 1002 | |
|
| 1003 | |
ruleset = dict(security_group_id=secgroup['id'],
|
| 1004 | |
tenant_id=secgroup['tenant_id'])
|
| 1005 | |
ruleset.update(kwargs)
|
| 1006 | |
|
| 1007 | |
sg_rule = sec_group_rules_client.create_security_group_rule(**ruleset)
|
| 1008 | |
sg_rule = sg_rule['security_group_rule']
|
| 1009 | |
|
| 1010 | |
self.assertEqual(secgroup['tenant_id'], sg_rule['tenant_id'])
|
| 1011 | |
self.assertEqual(secgroup['id'], sg_rule['security_group_id'])
|
| 1012 | |
|
| 1013 | |
return sg_rule
|
| 1014 | |
|
| 1015 | |
def _create_loginable_secgroup_rule(self, security_group_rules_client=None,
|
| 1016 | |
secgroup=None,
|
| 1017 | |
security_groups_client=None):
|
| 1018 | |
"""Create loginable security group rule
|
| 1019 | |
|
| 1020 | |
This function will create:
|
| 1021 | |
1. egress and ingress tcp port 22 allow rule in order to allow ssh
|
| 1022 | |
access for ipv4.
|
| 1023 | |
2. egress and ingress ipv6 icmp allow rule, in order to allow icmpv6.
|
| 1024 | |
3. egress and ingress ipv4 icmp allow rule, in order to allow icmpv4.
|
| 1025 | |
"""
|
| 1026 | |
|
| 1027 | |
if security_group_rules_client is None:
|
| 1028 | |
security_group_rules_client = self.security_group_rules_client
|
| 1029 | |
if security_groups_client is None:
|
| 1030 | |
security_groups_client = self.security_groups_client
|
| 1031 | |
rules = []
|
| 1032 | |
rulesets = [
|
| 1033 | |
dict(
|
| 1034 | |
# ssh
|
| 1035 | |
protocol='tcp',
|
| 1036 | |
port_range_min=22,
|
| 1037 | |
port_range_max=22,
|
| 1038 | |
),
|
| 1039 | |
dict(
|
| 1040 | |
# ping
|
| 1041 | |
protocol='icmp',
|
| 1042 | |
),
|
| 1043 | |
dict(
|
| 1044 | |
# ipv6-icmp for ping6
|
| 1045 | |
protocol='icmp',
|
| 1046 | |
ethertype='IPv6',
|
| 1047 | |
)
|
| 1048 | |
]
|
| 1049 | |
sec_group_rules_client = security_group_rules_client
|
| 1050 | |
for ruleset in rulesets:
|
| 1051 | |
for r_direction in ['ingress', 'egress']:
|
| 1052 | |
ruleset['direction'] = r_direction
|
| 1053 | |
try:
|
| 1054 | |
sg_rule = self._create_security_group_rule(
|
| 1055 | |
sec_group_rules_client=sec_group_rules_client,
|
| 1056 | |
secgroup=secgroup,
|
| 1057 | |
security_groups_client=security_groups_client,
|
| 1058 | |
**ruleset)
|
| 1059 | |
except lib_exc.Conflict as ex:
|
| 1060 | |
# if rule already exist - skip rule and continue
|
| 1061 | |
msg = 'Security group rule already exists'
|
| 1062 | |
if msg not in ex._error_string:
|
| 1063 | |
raise ex
|
| 1064 | |
else:
|
| 1065 | |
self.assertEqual(r_direction, sg_rule['direction'])
|
| 1066 | |
rules.append(sg_rule)
|
| 1067 | |
|
| 1068 | |
return rules
|
| 1069 | |
|
| 1070 | |
|
| 1071 | |
class EncryptionScenarioTest(ScenarioTest):
|
| 1072 | |
"""Base class for encryption scenario tests"""
|
| 1073 | |
|
| 1074 | |
credentials = ['primary', 'admin']
|
| 1075 | |
|
| 1076 | |
@classmethod
|
| 1077 | |
def setup_clients(cls):
|
| 1078 | |
super(EncryptionScenarioTest, cls).setup_clients()
|
| 1079 | |
cls.admin_volume_types_client = cls.os_admin.volume_types_client_latest
|
| 1080 | |
cls.admin_encryption_types_client =\
|
| 1081 | |
cls.os_admin.encryption_types_client_latest
|
| 1082 | |
|
| 1083 | |
def create_encryption_type(self, client=None, type_id=None, provider=None,
|
| 1084 | |
key_size=None, cipher=None,
|
| 1085 | |
control_location=None):
|
| 1086 | |
if not client:
|
| 1087 | |
client = self.admin_encryption_types_client
|
| 1088 | |
if not type_id:
|
| 1089 | |
volume_type = self.create_volume_type()
|
| 1090 | |
type_id = volume_type['id']
|
| 1091 | |
LOG.debug("Creating an encryption type for volume type: %s", type_id)
|
| 1092 | |
client.create_encryption_type(
|
| 1093 | |
type_id, provider=provider, key_size=key_size, cipher=cipher,
|
| 1094 | |
control_location=control_location)
|
| 1095 | |
|
| 1096 | |
def create_encrypted_volume(self, encryption_provider, volume_type,
|
| 1097 | |
key_size=256, cipher='aes-xts-plain64',
|
| 1098 | |
control_location='front-end'):
|
| 1099 | |
volume_type = self.create_volume_type(name=volume_type)
|
| 1100 | |
self.create_encryption_type(type_id=volume_type['id'],
|
| 1101 | |
provider=encryption_provider,
|
| 1102 | |
key_size=key_size,
|
| 1103 | |
cipher=cipher,
|
| 1104 | |
control_location=control_location)
|
| 1105 | |
return self.create_volume(volume_type=volume_type['name'])
|
|
204 |
try:
|
|
205 |
volume = self.volumes_client.show_volume(volume['id'])['volume']
|
|
206 |
# Check the status. You can only detach an in-use volume, otherwise
|
|
207 |
# the compute API will return a 400 response.
|
|
208 |
if volume['status'] == 'in-use':
|
|
209 |
self.servers_client.detach_volume(server['id'], volume['id'])
|
|
210 |
except lib_exc.NotFound:
|
|
211 |
# Ignore 404s on detach in case the server is deleted or the volume
|
|
212 |
# is already detached.
|
|
213 |
pass
|