summaryrefslogtreecommitdiff
path: root/tempest/stress/actions/volume_attach_verify.py
blob: 0d3cb2347515af828c66575fa3c5a2d36b7bc09e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
#    Licensed under the Apache License, Version 2.0 (the "License");
#    you may not use this file except in compliance with the License.
#    You may obtain a copy of the License at
#
#        http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS,
#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#    See the License for the specific language governing permissions and
#    limitations under the License.

from tempest.common.utils import data_utils
from tempest.common.utils.linux import remote_client
from tempest import config
import tempest.stress.stressaction as stressaction
import tempest.test

import re
CONF = config.CONF


class VolumeVerifyStress(stressaction.StressAction):

    def _create_keypair(self):
        keyname = data_utils.rand_name("key")
        resp, self.key = self.manager.keypairs_client.create_keypair(keyname)
        assert(resp.status == 200)

    def _delete_keypair(self):
        resp, _ = self.manager.keypairs_client.delete_keypair(self.key['name'])
        assert(resp.status == 202)

    def _create_vm(self):
        self.name = name = data_utils.rand_name("instance")
        servers_client = self.manager.servers_client
        self.logger.info("creating %s" % name)
        vm_args = self.vm_extra_args.copy()
        vm_args['security_groups'] = [self.sec_grp]
        vm_args['key_name'] = self.key['name']
        resp, server = servers_client.create_server(name, self.image,
                                                    self.flavor,
                                                    **vm_args)
        self.server_id = server['id']
        assert(resp.status == 202)
        self.manager.servers_client.wait_for_server_status(self.server_id,
                                                           'ACTIVE')

    def _destroy_vm(self):
        self.logger.info("deleting server: %s" % self.server_id)
        resp, _ = self.manager.servers_client.delete_server(self.server_id)
        assert(resp.status == 204)  # It cannot be 204 if I had to wait..
        self.manager.servers_client.wait_for_server_termination(self.server_id)
        self.logger.info("deleted server: %s" % self.server_id)

    def _create_sec_group(self):
        sec_grp_cli = self.manager.security_groups_client
        s_name = data_utils.rand_name('sec_grp-')
        s_description = data_utils.rand_name('desc-')
        _, self.sec_grp = sec_grp_cli.create_security_group(s_name,
                                                            s_description)
        create_rule = sec_grp_cli.create_security_group_rule
        create_rule(self.sec_grp['id'], 'tcp', 22, 22)
        create_rule(self.sec_grp['id'], 'icmp', -1, -1)

    def _destroy_sec_grp(self):
        sec_grp_cli = self.manager.security_groups_client
        sec_grp_cli.delete_security_group(self.sec_grp['id'])

    def _create_floating_ip(self):
        floating_cli = self.manager.floating_ips_client
        _, self.floating = floating_cli.create_floating_ip(self.floating_pool)

    def _destroy_floating_ip(self):
        cli = self.manager.floating_ips_client
        cli.delete_floating_ip(self.floating['id'])
        cli.wait_for_resource_deletion(self.floating['id'])
        self.logger.info("Deleted Floating IP %s", str(self.floating['ip']))

    def _create_volume(self):
        name = data_utils.rand_name("volume")
        self.logger.info("creating volume: %s" % name)
        volumes_client = self.manager.volumes_client
        resp, self.volume = volumes_client.create_volume(
            size=1,
            display_name=name)
        assert(resp.status == 200)
        volumes_client.wait_for_volume_status(self.volume['id'],
                                              'available')
        self.logger.info("created volume: %s" % self.volume['id'])

    def _delete_volume(self):
        self.logger.info("deleting volume: %s" % self.volume['id'])
        volumes_client = self.manager.volumes_client
        resp, _ = volumes_client.delete_volume(self.volume['id'])
        assert(resp.status == 202)
        volumes_client.wait_for_resource_deletion(self.volume['id'])
        self.logger.info("deleted volume: %s" % self.volume['id'])

    def _wait_disassociate(self):
        cli = self.manager.floating_ips_client

        def func():
            _, floating = cli.get_floating_ip_details(self.floating['id'])
            return floating['instance_id'] is None

        if not tempest.test.call_until_true(func, CONF.compute.build_timeout,
                                            CONF.compute.build_interval):
            raise RuntimeError("IP disassociate timeout!")

    def new_server_ops(self):
        self._create_vm()
        cli = self.manager.floating_ips_client
        cli.associate_floating_ip_to_server(self.floating['ip'],
                                            self.server_id)
        if self.ssh_test_before_attach and self.enable_ssh_verify:
            self.logger.info("Scanning for block devices via ssh on %s"
                             % self.server_id)
            self.part_wait(self.detach_match_count)

    def setUp(self, **kwargs):
        """Note able configuration combinations:
            Closest options to the test_stamp_pattern:
                new_server = True
                new_volume = True
                enable_ssh_verify = True
                ssh_test_before_attach = False
            Just attaching:
                new_server = False
                new_volume = False
                enable_ssh_verify = True
                ssh_test_before_attach = True
            Mostly API load by repeated attachment:
                new_server = False
                new_volume = False
                enable_ssh_verify = False
                ssh_test_before_attach = False
            Minimal Nova load, but cinder load not decreased:
                new_server = False
                new_volume = True
                enable_ssh_verify = True
                ssh_test_before_attach = True
        """
        self.image = CONF.compute.image_ref
        self.flavor = CONF.compute.flavor_ref
        self.vm_extra_args = kwargs.get('vm_extra_args', {})
        self.floating_pool = kwargs.get('floating_pool', None)
        self.new_volume = kwargs.get('new_volume', True)
        self.new_server = kwargs.get('new_server', False)
        self.enable_ssh_verify = kwargs.get('enable_ssh_verify', True)
        self.ssh_test_before_attach = kwargs.get('ssh_test_before_attach',
                                                 False)
        self.part_line_re = re.compile(kwargs.get('part_line_re', '.*vd.*'))
        self.detach_match_count = kwargs.get('detach_match_count', 1)
        self.attach_match_count = kwargs.get('attach_match_count', 2)
        self.part_name = kwargs.get('part_name', '/dev/vdc')

        self._create_floating_ip()
        self._create_sec_group()
        self._create_keypair()
        private_key = self.key['private_key']
        username = CONF.compute.image_ssh_user
        self.remote_client = remote_client.RemoteClient(self.floating['ip'],
                                                        username,
                                                        pkey=private_key)
        if not self.new_volume:
            self._create_volume()
        if not self.new_server:
            self.new_server_ops()

    # now we just test is number of partition increased or decrised
    def part_wait(self, num_match):
        def _part_state():
            self.partitions = self.remote_client.get_partitions().split('\n')
            matching = 0
            for part_line in self.partitions[1:]:
                if self.part_line_re.match(part_line):
                    matching += 1
            return matching == num_match
        if tempest.test.call_until_true(_part_state,
                                        CONF.compute.build_timeout,
                                        CONF.compute.build_interval):
            return
        else:
            raise RuntimeError("Unexpected partitions: %s",
                               str(self.partitions))

    def run(self):
        if self.new_server:
            self.new_server_ops()
        if self.new_volume:
            self._create_volume()
        servers_client = self.manager.servers_client
        self.logger.info("attach volume (%s) to vm %s" %
                         (self.volume['id'], self.server_id))
        resp, body = servers_client.attach_volume(self.server_id,
                                                  self.volume['id'],
                                                  self.part_name)
        assert(resp.status == 200)
        self.manager.volumes_client.wait_for_volume_status(self.volume['id'],
                                                           'in-use')
        if self.enable_ssh_verify:
            self.logger.info("Scanning for new block device on %s"
                             % self.server_id)
            self.part_wait(self.attach_match_count)

        resp, body = servers_client.detach_volume(self.server_id,
                                                  self.volume['id'])
        assert(resp.status == 202)
        self.manager.volumes_client.wait_for_volume_status(self.volume['id'],
                                                           'available')
        if self.enable_ssh_verify:
            self.logger.info("Scanning for block device disapperance on %s"
                             % self.server_id)
            self.part_wait(self.detach_match_count)
        if self.new_volume:
            self._delete_volume()
        if self.new_server:
            self._destroy_vm()

    def tearDown(self):
        cli = self.manager.floating_ips_client
        cli.disassociate_floating_ip_from_server(self.floating['ip'],
                                                 self.server_id)
        self._wait_disassociate()
        if not self.new_server:
            self._destroy_vm()
        self._delete_keypair()
        self._destroy_floating_ip()
        self._destroy_sec_grp()
        if not self.new_volume:
            self._delete_volume()