summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Perez <thingee@gmail.com>2015-04-05 22:13:03 -0700
committerMike Perez <thingee@gmail.com>2015-04-06 16:45:20 +0000
commitf274672870c86ac5eeb77a7d2dac5fe74dec8111 (patch)
treefb72f339b69d4584ab395206a89f46183bdf3780
parentaf79f87b9ee97f8ae56d97b6423c884889aec415 (diff)
downloadcinder-f274672870c86ac5eeb77a7d2dac5fe74dec8111.tar.gz
Partial Revert "Removing ZFSSA driver"
This reverts commit c742566a6920f7260573fbd0dc71a5cc1e4a2311. However, the ZFSSA iSCSI driver is still left out because there is no CI for it. Change-Id: Ibf830f295043624ca9a5dc90f1e3a897cb64915f
-rw-r--r--cinder/tests/test_zfssa.py312
-rw-r--r--cinder/volume/drivers/zfssa/__init__.py0
-rw-r--r--cinder/volume/drivers/zfssa/restclient.py360
-rw-r--r--cinder/volume/drivers/zfssa/webdavclient.py133
-rw-r--r--cinder/volume/drivers/zfssa/zfssanfs.py305
-rw-r--r--cinder/volume/drivers/zfssa/zfssarest.py894
6 files changed, 2004 insertions, 0 deletions
diff --git a/cinder/tests/test_zfssa.py b/cinder/tests/test_zfssa.py
new file mode 100644
index 000000000..d7e71997b
--- /dev/null
+++ b/cinder/tests/test_zfssa.py
@@ -0,0 +1,312 @@
+# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Unit tests for Oracle's ZFSSA Cinder volume driver
+"""
+
+import json
+
+import mock
+from oslo_log import log as logging
+from oslo_utils import units
+
+from cinder.volume.drivers.zfssa import restclient as client
+from cinder.volume.drivers.zfssa import zfssarest as rest
+
+
+LOG = logging.getLogger(__name__)
+
+nfs_logbias = 'latency'
+nfs_compression = 'off'
+
+
+class FakeZFSSA(object):
+ """Fake ZFS SA"""
+ def __init__(self):
+ self.user = None
+ self.host = None
+
+ def login(self, user):
+ self.user = user
+
+ def set_host(self, host, timeout=None):
+ self.host = host
+
+ def create_project(self, pool, project, compression, logbias):
+ out = {}
+ if not self.host or not self.user:
+ return out
+
+ out = {"status": "online",
+ "name": "pool",
+ "usage": {"available": 10,
+ "total": 10,
+ "dedupratio": 100,
+ "used": 1},
+ "peer": "00000000-0000-0000-0000-000000000000",
+ "owner": "host",
+ "asn": "11111111-2222-3333-4444-555555555555"}
+ return out
+
+ def create_initiator(self, init, initgrp, chapuser, chapsecret):
+ out = {}
+ if not self.host or not self.user:
+ return out
+ out = {"href": "fake_href",
+ "alias": "fake_alias",
+ "initiator": "fake_iqn.1993-08.org.fake:01:000000000000",
+ "chapuser": "",
+ "chapsecret": ""
+ }
+
+ return out
+
+ def add_to_initiatorgroup(self, init, initgrp):
+ r = rest.ZFSSAApi()
+ type(r).rclient = mock.PropertyMock(return_value=FakeAddIni2InitGrp())
+ r.add_to_initiatorgroup(init, initgrp)
+
+ def create_target(self, tgtalias, inter, tchapuser, tchapsecret):
+ out = {}
+ if not self.host or not self.user:
+ return out
+ out = {"href": "fake_href",
+ "alias": "fake_tgtgrp",
+ "iqn": "iqn.1986-03.com.sun:02:00000-aaaa-bbbb-cccc-ddddd",
+ "auth": "none",
+ "targetchapuser": "",
+ "targetchapsecret": "",
+ "interfaces": ["eth0"]
+ }
+
+ return out
+
+ def add_to_targetgroup(self, iqn, tgtgrp):
+ out = {}
+ if not self.host or not self.user:
+ return {}
+ out = {"href": "fake_href",
+ "name": "fake_tgtgrp",
+ "targets": ["iqn.1986-03.com.sun:02:00000-aaaa-bbbb-cccc-ddddd"]
+ }
+ return out
+
+ def get_lun(self, pool, project, lun):
+ ret = {
+ 'guid': '600144F0F8FBD5BD000053CE53AB0001',
+ 'number': 0,
+ 'initiatorgroup': 'fake_initgrp',
+ 'size': 1 * units.Gi
+ }
+ return ret
+
+ def get_target(self, target):
+ return 'iqn.1986-03.com.sun:02:00000-aaaa-bbbb-cccc-ddddd'
+
+ def create_lun(self, pool, project, lun, volsize, targetgroup, specs):
+ out = {}
+ if not self.host and not self.user:
+ return out
+
+ out = {"status": "online",
+ "lunguid": "600144F0F8FBD5BD000053CE53AB0001",
+ "initiatorgroup": ["fake_initgrp"],
+ "volsize": volsize,
+ "pool": pool,
+ "name": lun,
+ "project": project,
+ "targetgroup": targetgroup}
+ if specs:
+ out.update(specs)
+
+ return out
+
+ def delete_lun(self, pool, project, lun):
+ out = {}
+ if not self.host and not self.user:
+ return out
+ out = {"pool": pool,
+ "project": project,
+ "name": lun}
+
+ return out
+
+ def create_snapshot(self, pool, project, vol, snap):
+ out = {}
+ if not self.host and not self.user:
+ return {}
+ out = {"name": snap,
+ "numclones": 0,
+ "share": vol,
+ "project": project,
+ "pool": pool}
+
+ return out
+
+ def delete_snapshot(self, pool, project, vol, snap):
+ out = {}
+ if not self.host and not self.user:
+ return {}
+ out = {"name": snap,
+ "share": vol,
+ "project": project,
+ "pool": pool}
+
+ return out
+
+ def clone_snapshot(self, pool, project, pvol, snap, vol):
+ out = {}
+ if not self.host and not self.user:
+ return out
+ out = {"origin": {"project": project,
+ "snapshot": snap,
+ "share": pvol,
+ "pool": pool},
+ "logbias": "latency",
+ "assignednumber": 1,
+ "status": "online",
+ "lunguid": "600144F0F8FBD5BD000053CE67A50002",
+ "volsize": 1,
+ "pool": pool,
+ "name": vol,
+ "project": project}
+
+ return out
+
+ def set_lun_initiatorgroup(self, pool, project, vol, initgrp):
+ out = {}
+ if not self.host and not self.user:
+ return out
+ out = {"lunguid": "600144F0F8FBD5BD000053CE67A50002",
+ "pool": pool,
+ "name": vol,
+ "project": project,
+ "initiatorgroup": ["fake_initgrp"]}
+
+ return out
+
+ def has_clones(self, pool, project, vol, snapshot):
+ return False
+
+ def set_lun_props(self, pool, project, vol, **kargs):
+ out = {}
+ if not self.host and not self.user:
+ return out
+ out = {"pool": pool,
+ "name": vol,
+ "project": project,
+ "volsize": kargs['volsize']}
+
+ return out
+
+ def get_initiator_initiatorgroup(self, initiator):
+ ret = ['test-init-grp1']
+ return ret
+
+
+class FakeNFSZFSSA(FakeZFSSA):
+ """Fake ZFS SA for the NFS Driver
+ """
+ def set_webdav(self, https_path, auth_str):
+ self.webdavclient = https_path
+
+ def create_share(self, pool, project, share, args):
+ out = {}
+ if not self.host and not self.user:
+ return out
+
+ out = {"logbias": nfs_logbias,
+ "compression": nfs_compression,
+ "status": "online",
+ "pool": pool,
+ "name": share,
+ "project": project,
+ "mountpoint": '/export/nfs_share'}
+
+ return out
+
+ def get_share(self, pool, project, share):
+ out = {}
+ if not self.host and not self.user:
+ return out
+
+ out = {"logbias": nfs_logbias,
+ "compression": nfs_compression,
+ "encryption": "off",
+ "status": "online",
+ "pool": pool,
+ "name": share,
+ "project": project,
+ "mountpoint": '/export/nfs_share'}
+
+ return out
+
+ def create_snapshot_of_volume_file(self, src_file="", dst_file=""):
+ out = {}
+ if not self.host and not self.user:
+ return out
+ out = {"status": 201}
+
+ return out
+
+ def delete_snapshot_of_volume_file(self, src_file=""):
+ out = {}
+ if not self.host and not self.user:
+ return out
+ out = {"status": 204}
+
+ return out
+
+ def create_volume_from_snapshot_file(self, src_file="", dst_file="",
+ method='COPY'):
+ out = {}
+ if not self.host and not self.user:
+ return out
+ out = {"status": 202}
+
+ return out
+
+ def modify_service(self, service, args):
+ out = {}
+ if not self.host and not self.user:
+ return out
+ out = {"service": {"<status>": "online"}}
+ return out
+
+ def enable_service(self, service):
+ out = {}
+ if not self.host and not self.user:
+ return out
+ out = {"service": {"<status>": "online"}}
+ return out
+
+
+class FakeAddIni2InitGrp(object):
+ def get(self, path, **kwargs):
+ result = client.RestResult()
+ result.status = client.Status.OK
+ result.data = json.JSONEncoder().encode({'group':
+ {'initiators':
+ ['iqn.1-0.org.deb:01:d7']}})
+ return result
+
+ def put(self, path, body="", **kwargs):
+ result = client.RestResult()
+ result.status = client.Status.ACCEPTED
+ return result
+
+ def post(self, path, body="", **kwargs):
+ result = client.RestResult()
+ result.status = client.Status.CREATED
+ return result
diff --git a/cinder/volume/drivers/zfssa/__init__.py b/cinder/volume/drivers/zfssa/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/cinder/volume/drivers/zfssa/__init__.py
diff --git a/cinder/volume/drivers/zfssa/restclient.py b/cinder/volume/drivers/zfssa/restclient.py
new file mode 100644
index 000000000..19c29feab
--- /dev/null
+++ b/cinder/volume/drivers/zfssa/restclient.py
@@ -0,0 +1,360 @@
+# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+ZFS Storage Appliance REST API Client Programmatic Interface
+"""
+
+import httplib
+import json
+import StringIO
+import time
+import urllib2
+
+from oslo_log import log
+
+from cinder.i18n import _LE, _LI
+
+LOG = log.getLogger(__name__)
+
+
+class Status(object):
+ """Result HTTP Status"""
+
+ def __init__(self):
+ pass
+
+ #: Request return OK
+ OK = httplib.OK
+
+ #: New resource created successfully
+ CREATED = httplib.CREATED
+
+ #: Command accepted
+ ACCEPTED = httplib.ACCEPTED
+
+ #: Command returned OK but no data will be returned
+ NO_CONTENT = httplib.NO_CONTENT
+
+ #: Bad Request
+ BAD_REQUEST = httplib.BAD_REQUEST
+
+ #: User is not authorized
+ UNAUTHORIZED = httplib.UNAUTHORIZED
+
+ #: The request is not allowed
+ FORBIDDEN = httplib.FORBIDDEN
+
+ #: The requested resource was not found
+ NOT_FOUND = httplib.NOT_FOUND
+
+ #: The request is not allowed
+ NOT_ALLOWED = httplib.METHOD_NOT_ALLOWED
+
+ #: Request timed out
+ TIMEOUT = httplib.REQUEST_TIMEOUT
+
+ #: Invalid request
+ CONFLICT = httplib.CONFLICT
+
+ #: Service Unavailable
+ BUSY = httplib.SERVICE_UNAVAILABLE
+
+
+class RestResult(object):
+ """Result from a REST API operation"""
+ def __init__(self, response=None, err=None):
+ """Initialize a RestResult containing the results from a REST call
+ :param response: HTTP response
+ """
+ self.response = response
+ self.error = err
+ self.data = ""
+ self.status = 0
+ if self.response:
+ self.status = self.response.getcode()
+ result = self.response.read()
+ while result:
+ self.data += result
+ result = self.response.read()
+
+ if self.error:
+ self.status = self.error.code
+ self.data = httplib.responses[self.status]
+
+ LOG.debug('Response code: %s' % self.status)
+ LOG.debug('Response data: %s' % self.data)
+
+ def get_header(self, name):
+ """Get an HTTP header with the given name from the results
+
+ :param name: HTTP header name
+ :return: The header value or None if no value is found
+ """
+ if self.response is None:
+ return None
+ info = self.response.info()
+ return info.getheader(name)
+
+
+class RestClientError(Exception):
+ """Exception for ZFS REST API client errors"""
+ def __init__(self, status, name="ERR_INTERNAL", message=None):
+
+ """Create a REST Response exception
+
+ :param status: HTTP response status
+ :param name: The name of the REST API error type
+ :param message: Descriptive error message returned from REST call
+ """
+ super(RestClientError, self).__init__(message)
+ self.code = status
+ self.name = name
+ self.msg = message
+ if status in httplib.responses:
+ self.msg = httplib.responses[status]
+
+ def __str__(self):
+ return "%d %s %s" % (self.code, self.name, self.msg)
+
+
+class RestClientURL(object):
+ """ZFSSA urllib2 client"""
+ def __init__(self, url, **kwargs):
+ """Initialize a REST client.
+
+ :param url: The ZFSSA REST API URL
+ :key session: HTTP Cookie value of x-auth-session obtained from a
+ normal BUI login.
+ :key timeout: Time in seconds to wait for command to complete.
+ (Default is 60 seconds)
+ """
+ self.url = url
+ self.local = kwargs.get("local", False)
+ self.base_path = kwargs.get("base_path", "/api")
+ self.timeout = kwargs.get("timeout", 60)
+ self.headers = None
+ if kwargs.get('session'):
+ self.headers['x-auth-session'] = kwargs.get('session')
+
+ self.headers = {"content-type": "application/json"}
+ self.do_logout = False
+ self.auth_str = None
+
+ def _path(self, path, base_path=None):
+ """build rest url path"""
+ if path.startswith("http://") or path.startswith("https://"):
+ return path
+ if base_path is None:
+ base_path = self.base_path
+ if not path.startswith(base_path) and not (
+ self.local and ("/api" + path).startswith(base_path)):
+ path = "%s%s" % (base_path, path)
+ if self.local and path.startswith("/api"):
+ path = path[4:]
+ return self.url + path
+
+ def _authorize(self):
+ """Performs authorization setting x-auth-session"""
+ self.headers['authorization'] = 'Basic %s' % self.auth_str
+ if 'x-auth-session' in self.headers:
+ del self.headers['x-auth-session']
+
+ try:
+ result = self.post("/access/v1")
+ del self.headers['authorization']
+ if result.status == httplib.CREATED:
+ self.headers['x-auth-session'] = \
+ result.get_header('x-auth-session')
+ self.do_logout = True
+ LOG.info(_LI('ZFSSA version: %s') %
+ result.get_header('x-zfssa-version'))
+
+ elif result.status == httplib.NOT_FOUND:
+ raise RestClientError(result.status, name="ERR_RESTError",
+ message="REST Not Available: \
+ Please Upgrade")
+
+ except RestClientError as err:
+ del self.headers['authorization']
+ raise err
+
+ def login(self, auth_str):
+ """Login to an appliance using a user name and password.
+
+ Start a session like what is done logging into the BUI. This is not a
+ requirement to run REST commands, since the protocol is stateless.
+ What is does is set up a cookie session so that some server side
+ caching can be done. If login is used remember to call logout when
+ finished.
+
+ :param auth_str: Authorization string (base64)
+ """
+ self.auth_str = auth_str
+ self._authorize()
+
+ def logout(self):
+ """Logout of an appliance"""
+ result = None
+ try:
+ result = self.delete("/access/v1", base_path="/api")
+ except RestClientError:
+ pass
+
+ self.headers.clear()
+ self.do_logout = False
+ return result
+
+ def islogin(self):
+ """return if client is login"""
+ return self.do_logout
+
+ @staticmethod
+ def mkpath(*args, **kwargs):
+ """Make a path?query string for making a REST request
+
+ :cmd_params args: The path part
+ :cmd_params kwargs: The query part
+ """
+ buf = StringIO.StringIO()
+ query = "?"
+ for arg in args:
+ buf.write("/")
+ buf.write(arg)
+ for k in kwargs:
+ buf.write(query)
+ if query == "?":
+ query = "&"
+ buf.write(k)
+ buf.write("=")
+ buf.write(kwargs[k])
+ return buf.getvalue()
+
+ def request(self, path, request, body=None, **kwargs):
+ """Make an HTTP request and return the results
+
+ :param path: Path used with the initialized URL to make a request
+ :param request: HTTP request type (GET, POST, PUT, DELETE)
+ :param body: HTTP body of request
+ :key accept: Set HTTP 'Accept' header with this value
+ :key base_path: Override the base_path for this request
+ :key content: Set HTTP 'Content-Type' header with this value
+ """
+ out_hdrs = dict.copy(self.headers)
+ if kwargs.get("accept"):
+ out_hdrs['accept'] = kwargs.get("accept")
+
+ if body:
+ if isinstance(body, dict):
+ body = str(json.dumps(body))
+
+ if body and len(body):
+ out_hdrs['content-length'] = len(body)
+
+ zfssaurl = self._path(path, kwargs.get("base_path"))
+ req = urllib2.Request(zfssaurl, body, out_hdrs)
+ req.get_method = lambda: request
+ maxreqretries = kwargs.get("maxreqretries", 10)
+ retry = 0
+ response = None
+
+ LOG.debug('Request: %s %s' % (request, zfssaurl))
+ LOG.debug('Out headers: %s' % out_hdrs)
+ if body and body != '':
+ LOG.debug('Body: %s' % body)
+
+ while retry < maxreqretries:
+ try:
+ response = urllib2.urlopen(req, timeout=self.timeout)
+ except urllib2.HTTPError as err:
+ if err.code == httplib.NOT_FOUND:
+ LOG.debug('REST Not Found: %s' % err.code)
+ else:
+ LOG.error(_LE('REST Not Available: %s') % err.code)
+
+ if err.code == httplib.SERVICE_UNAVAILABLE and \
+ retry < maxreqretries:
+ retry += 1
+ time.sleep(1)
+ LOG.error(_LE('Server Busy retry request: %s') % retry)
+ continue
+ if (err.code == httplib.UNAUTHORIZED or
+ err.code == httplib.INTERNAL_SERVER_ERROR) and \
+ '/access/v1' not in zfssaurl:
+ try:
+ LOG.error(_LE('Authorizing request: '
+ '%(zfssaurl)s'
+ 'retry: %(retry)d .')
+ % {'zfssaurl': zfssaurl,
+ 'retry': retry})
+ self._authorize()
+ req.add_header('x-auth-session',
+ self.headers['x-auth-session'])
+ except RestClientError:
+ pass
+ retry += 1
+ time.sleep(1)
+ continue
+
+ return RestResult(err=err)
+
+ except urllib2.URLError as err:
+ LOG.error(_LE('URLError: %s') % err.reason)
+ raise RestClientError(-1, name="ERR_URLError",
+ message=err.reason)
+
+ break
+
+ if response and response.getcode() == httplib.SERVICE_UNAVAILABLE and \
+ retry >= maxreqretries:
+ raise RestClientError(response.getcode(), name="ERR_HTTPError",
+ message="REST Not Available: Disabled")
+
+ return RestResult(response=response)
+
+ def get(self, path, **kwargs):
+ """Make an HTTP GET request
+
+ :param path: Path to resource.
+ """
+ return self.request(path, "GET", **kwargs)
+
+ def post(self, path, body="", **kwargs):
+ """Make an HTTP POST request
+
+ :param path: Path to resource.
+ :param body: Post data content
+ """
+ return self.request(path, "POST", body, **kwargs)
+
+ def put(self, path, body="", **kwargs):
+ """Make an HTTP PUT request
+
+ :param path: Path to resource.
+ :param body: Put data content
+ """
+ return self.request(path, "PUT", body, **kwargs)
+
+ def delete(self, path, **kwargs):
+ """Make an HTTP DELETE request
+
+ :param path: Path to resource that will be deleted.
+ """
+ return self.request(path, "DELETE", **kwargs)
+
+ def head(self, path, **kwargs):
+ """Make an HTTP HEAD request
+
+ :param path: Path to resource.
+ """
+ return self.request(path, "HEAD", **kwargs)
diff --git a/cinder/volume/drivers/zfssa/webdavclient.py b/cinder/volume/drivers/zfssa/webdavclient.py
new file mode 100644
index 000000000..c100bf45b
--- /dev/null
+++ b/cinder/volume/drivers/zfssa/webdavclient.py
@@ -0,0 +1,133 @@
+# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+ZFS Storage Appliance WebDAV Client
+"""
+
+import httplib
+import time
+import urllib2
+
+from oslo_log import log
+
+from cinder import exception
+from cinder.i18n import _, _LE
+
+LOG = log.getLogger(__name__)
+
+bad_gateway_err = _('Check the state of the http service. Also ensure that '
+ 'the https port number is the same as the one specified '
+ 'in cinder.conf.')
+
+WebDAVHTTPErrors = {
+ httplib.UNAUTHORIZED: _('User not authorized to perform WebDAV '
+ 'operations.'),
+ httplib.BAD_GATEWAY: bad_gateway_err,
+ httplib.FORBIDDEN: _('Check access permissions for the ZFS share assigned '
+ 'to this driver.'),
+ httplib.NOT_FOUND: _('The source volume for this WebDAV operation not '
+ 'found.'),
+ httplib.INSUFFICIENT_STORAGE: _('Not enough storage space in the ZFS '
+ 'share to perform this operation.')
+}
+
+WebDAVErrors = {
+ 'BadStatusLine': _('http service may have been abruptly disabled or put '
+ 'to maintenance state in the middle of this '
+ 'operation.'),
+ 'Bad_Gateway': bad_gateway_err
+}
+
+
+class ZFSSAWebDAVClient(object):
+ def __init__(self, url, auth_str, **kwargs):
+ """Initialize WebDAV Client"""
+ self.https_path = url
+ self.auth_str = auth_str
+
+ def _lookup_error(self, error):
+ msg = ''
+ if error in httplib.responses:
+ msg = httplib.responses[error]
+
+ if error in WebDAVHTTPErrors:
+ msg = WebDAVHTTPErrors[error]
+ elif error in WebDAVErrors:
+ msg = WebDAVErrors[error]
+
+ return msg
+
+ def request(self, src_file="", dst_file="", method="", maxretries=10):
+ retry = 0
+ src_url = self.https_path + "/" + src_file
+ dst_url = self.https_path + "/" + dst_file
+ request = urllib2.Request(src_url)
+
+ if dst_file != "":
+ request.add_header('Destination', dst_url)
+
+ request.add_header("Authorization", "Basic %s" % self.auth_str)
+
+ request.get_method = lambda: method
+
+ LOG.debug('Sending WebDAV request:%s %s %s' % (method, src_url,
+ dst_url))
+
+ while retry < maxretries:
+ try:
+ response = urllib2.urlopen(request, timeout=None)
+ except urllib2.HTTPError as err:
+ LOG.error(_LE('WebDAV returned with %(code)s error during '
+ '%(method)s call.')
+ % {'code': err.code,
+ 'method': method})
+
+ if err.code == httplib.INTERNAL_SERVER_ERROR:
+ exception_msg = (_('WebDAV operation failed with '
+ 'error code: %(code)s '
+ 'reason: %(reason)s '
+ 'Retry attempt %(retry)s in progress.')
+ % {'code': err.code,
+ 'reason': err.reason,
+ 'retry': retry})
+ LOG.error(exception_msg)
+ if retry < maxretries:
+ retry += 1
+ time.sleep(1)
+ continue
+
+ msg = self._lookup_error(err.code)
+ raise exception.WebDAVClientError(msg=msg, code=err.code,
+ src=src_file, dst=dst_file,
+ method=method)
+
+ except httplib.BadStatusLine as err:
+ msg = self._lookup_error('BadStatusLine')
+ raise exception.WebDAVClientError(msg=msg,
+ code='httplib.BadStatusLine',
+ src=src_file, dst=dst_file,
+ method=method)
+
+ except urllib2.URLError as err:
+ reason = ''
+ if getattr(err, 'reason'):
+ reason = err.reason
+
+ msg = self._lookup_error('Bad_Gateway')
+ raise exception.WebDAVClientError(msg=msg,
+ code=reason, src=src_file,
+ dst=dst_file, method=method)
+
+ break
+ return response
diff --git a/cinder/volume/drivers/zfssa/zfssanfs.py b/cinder/volume/drivers/zfssa/zfssanfs.py
new file mode 100644
index 000000000..721f11c0a
--- /dev/null
+++ b/cinder/volume/drivers/zfssa/zfssanfs.py
@@ -0,0 +1,305 @@
+# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+ZFS Storage Appliance NFS Cinder Volume Driver
+"""
+import base64
+import datetime as dt
+import errno
+
+from oslo_config import cfg
+from oslo_log import log
+from oslo_utils import excutils
+from oslo_utils import units
+
+from cinder import exception
+from cinder.i18n import _, _LE, _LI
+from cinder.volume.drivers import nfs
+from cinder.volume.drivers.san import san
+from cinder.volume.drivers.zfssa import zfssarest
+
+
+ZFSSA_OPTS = [
+ cfg.StrOpt('zfssa_data_ip',
+ help='Data path IP address'),
+ cfg.StrOpt('zfssa_https_port', default='443',
+ help='HTTPS port number'),
+ cfg.StrOpt('zfssa_nfs_mount_options', default='',
+ help='Options to be passed while mounting share over nfs'),
+ cfg.StrOpt('zfssa_nfs_pool', default='',
+ help='Storage pool name.'),
+ cfg.StrOpt('zfssa_nfs_project', default='NFSProject',
+ help='Project name.'),
+ cfg.StrOpt('zfssa_nfs_share', default='nfs_share',
+ help='Share name.'),
+ cfg.StrOpt('zfssa_nfs_share_compression', default='off',
+ choices=['off', 'lzjb', 'gzip-2', 'gzip', 'gzip-9'],
+ help='Data compression.'),
+ cfg.StrOpt('zfssa_nfs_share_logbias', default='latency',
+ choices=['latency', 'throughput'],
+ help='Synchronous write bias-latency, throughput.'),
+ cfg.IntOpt('zfssa_rest_timeout',
+ help='REST connection timeout. (seconds)')
+]
+
+LOG = log.getLogger(__name__)
+
+CONF = cfg.CONF
+CONF.register_opts(ZFSSA_OPTS)
+
+
+def factory_zfssa():
+ return zfssarest.ZFSSANfsApi()
+
+
+class ZFSSANFSDriver(nfs.NfsDriver):
+ VERSION = '1.0.0'
+ volume_backend_name = 'ZFSSA_NFS'
+ protocol = driver_prefix = driver_volume_type = 'nfs'
+
+ def __init__(self, *args, **kwargs):
+ super(ZFSSANFSDriver, self).__init__(*args, **kwargs)
+ self.configuration.append_config_values(ZFSSA_OPTS)
+ self.configuration.append_config_values(san.san_opts)
+ self.zfssa = None
+ self._stats = None
+
+ def do_setup(self, context):
+ if not self.configuration.nfs_oversub_ratio > 0:
+ msg = _("NFS config 'nfs_oversub_ratio' invalid. Must be > 0: "
+ "%s") % self.configuration.nfs_oversub_ratio
+ LOG.error(msg)
+ raise exception.NfsException(msg)
+
+ if ((not self.configuration.nfs_used_ratio > 0) and
+ (self.configuration.nfs_used_ratio <= 1)):
+ msg = _("NFS config 'nfs_used_ratio' invalid. Must be > 0 "
+ "and <= 1.0: %s") % self.configuration.nfs_used_ratio
+ LOG.error(msg)
+ raise exception.NfsException(msg)
+
+ package = 'mount.nfs'
+ try:
+ self._execute(package, check_exit_code=False, run_as_root=True)
+ except OSError as exc:
+ if exc.errno == errno.ENOENT:
+ msg = _('%s is not installed') % package
+ raise exception.NfsException(msg)
+ else:
+ raise exc
+
+ lcfg = self.configuration
+ LOG.info(_LI('Connecting to host: %s.'), lcfg.san_ip)
+
+ host = lcfg.san_ip
+ user = lcfg.san_login
+ password = lcfg.san_password
+ https_port = lcfg.zfssa_https_port
+
+ credentials = ['san_ip', 'san_login', 'san_password', 'zfssa_data_ip']
+
+ for cred in credentials:
+ if not getattr(lcfg, cred, None):
+ exception_msg = _('%s not set in cinder.conf') % cred
+ LOG.error(exception_msg)
+ raise exception.CinderException(exception_msg)
+
+ self.zfssa = factory_zfssa()
+ self.zfssa.set_host(host, timeout=lcfg.zfssa_rest_timeout)
+
+ auth_str = base64.encodestring('%s:%s' % (user, password))[:-1]
+ self.zfssa.login(auth_str)
+
+ self.zfssa.create_project(lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project,
+ compression=lcfg.zfssa_nfs_share_compression,
+ logbias=lcfg.zfssa_nfs_share_logbias)
+
+ share_args = {
+ 'sharedav': 'rw',
+ 'sharenfs': 'rw',
+ 'root_permissions': '777',
+ 'compression': lcfg.zfssa_nfs_share_compression,
+ 'logbias': lcfg.zfssa_nfs_share_logbias
+ }
+
+ self.zfssa.create_share(lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project,
+ lcfg.zfssa_nfs_share, share_args)
+
+ share_details = self.zfssa.get_share(lcfg.zfssa_nfs_pool,
+ lcfg.zfssa_nfs_project,
+ lcfg.zfssa_nfs_share)
+
+ mountpoint = share_details['mountpoint']
+
+ self.mount_path = lcfg.zfssa_data_ip + ':' + mountpoint
+ https_path = 'https://' + lcfg.zfssa_data_ip + ':' + https_port + \
+ '/shares' + mountpoint
+
+ LOG.debug('NFS mount path: %s' % self.mount_path)
+ LOG.debug('WebDAV path to the share: %s' % https_path)
+
+ self.shares = {}
+ mnt_opts = self.configuration.zfssa_nfs_mount_options
+ self.shares[self.mount_path] = mnt_opts if len(mnt_opts) > 1 else None
+
+ # Initialize the WebDAV client
+ self.zfssa.set_webdav(https_path, auth_str)
+
+ # Edit http service so that WebDAV requests are always authenticated
+ args = {'https_port': https_port,
+ 'require_login': True}
+
+ self.zfssa.modify_service('http', args)
+ self.zfssa.enable_service('http')
+
+ def _ensure_shares_mounted(self):
+ try:
+ self._ensure_share_mounted(self.mount_path)
+ except Exception as exc:
+ LOG.error(_LE('Exception during mounting %s.') % exc)
+
+ self._mounted_shares = [self.mount_path]
+ LOG.debug('Available shares %s' % self._mounted_shares)
+
+ def check_for_setup_error(self):
+ """Check that driver can login.
+
+ Check also for properly configured pool, project and share
+ Check that the http and nfs services are enabled
+ """
+ lcfg = self.configuration
+
+ self.zfssa.verify_pool(lcfg.zfssa_nfs_pool)
+ self.zfssa.verify_project(lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project)
+ self.zfssa.verify_share(lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project,
+ lcfg.zfssa_nfs_share)
+ self.zfssa.verify_service('http')
+ self.zfssa.verify_service('nfs')
+
+ def create_snapshot(self, snapshot):
+ """Creates a snapshot of a volume."""
+ LOG.info(_LI('Creating snapshot: %s'), snapshot['name'])
+ lcfg = self.configuration
+ snap_name = self._create_snapshot_name()
+ self.zfssa.create_snapshot(lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project,
+ lcfg.zfssa_nfs_share, snap_name)
+
+ src_file = snap_name + '/' + snapshot['volume_name']
+
+ try:
+ self.zfssa.create_snapshot_of_volume_file(src_file=src_file,
+ dst_file=
+ snapshot['name'])
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.debug('Error thrown during snapshot: %s creation' %
+ snapshot['name'])
+ finally:
+ self.zfssa.delete_snapshot(lcfg.zfssa_nfs_pool,
+ lcfg.zfssa_nfs_project,
+ lcfg.zfssa_nfs_share, snap_name)
+
+ def delete_snapshot(self, snapshot):
+ """Deletes a snapshot."""
+ LOG.info(_LI('Deleting snapshot: %s'), snapshot['name'])
+ self.zfssa.delete_snapshot_of_volume_file(src_file=snapshot['name'])
+
+ def create_volume_from_snapshot(self, volume, snapshot, method='COPY'):
+ LOG.info(_LI('Creatng volume from snapshot. volume: %s'),
+ volume['name'])
+ LOG.info(_LI('Source Snapshot: %s'), snapshot['name'])
+
+ self._ensure_shares_mounted()
+ self.zfssa.create_volume_from_snapshot_file(src_file=snapshot['name'],
+ dst_file=volume['name'],
+ method=method)
+
+ volume['provider_location'] = self.mount_path
+
+ if volume['size'] != snapshot['volume_size']:
+ try:
+ self.extend_volume(volume, volume['size'])
+ except Exception:
+ vol_path = self.local_path(volume)
+ exception_msg = (_('Error in extending volume size: '
+ 'Volume: %(volume)s '
+ 'Vol_Size: %(vol_size)d with '
+ 'Snapshot: %(snapshot)s '
+ 'Snap_Size: %(snap_size)d')
+ % {'volume': volume['name'],
+ 'vol_size': volume['size'],
+ 'snapshot': snapshot['name'],
+ 'snap_size': snapshot['volume_size']})
+ with excutils.save_and_reraise_exception():
+ LOG.error(exception_msg)
+ self._execute('rm', '-f', vol_path, run_as_root=True)
+
+ return {'provider_location': volume['provider_location']}
+
+ def create_cloned_volume(self, volume, src_vref):
+ """Creates a snapshot and then clones the snapshot into a volume."""
+ LOG.info(_LI('new cloned volume: %s'), volume['name'])
+ LOG.info(_LI('source volume for cloning: %s'), src_vref['name'])
+
+ snapshot = {'volume_name': src_vref['name'],
+ 'volume_id': src_vref['id'],
+ 'volume_size': src_vref['size'],
+ 'name': self._create_snapshot_name()}
+
+ self.create_snapshot(snapshot)
+ return self.create_volume_from_snapshot(volume, snapshot,
+ method='MOVE')
+
+ def _create_snapshot_name(self):
+ """Creates a snapshot name from the date and time."""
+
+ return ('cinder-zfssa-nfs-snapshot-%s' %
+ dt.datetime.utcnow().isoformat())
+
+ def _get_share_capacity_info(self):
+ """Get available and used capacity info for the NFS share."""
+ lcfg = self.configuration
+ share_details = self.zfssa.get_share(lcfg.zfssa_nfs_pool,
+ lcfg.zfssa_nfs_project,
+ lcfg.zfssa_nfs_share)
+
+ free = share_details['space_available']
+ used = share_details['space_total']
+ return free, used
+
+ def _update_volume_stats(self):
+ """Get volume stats from zfssa"""
+ self._ensure_shares_mounted()
+ data = {}
+ backend_name = self.configuration.safe_get('volume_backend_name')
+ data['volume_backend_name'] = backend_name or self.__class__.__name__
+ data['vendor_name'] = 'Oracle'
+ data['driver_version'] = self.VERSION
+ data['storage_protocol'] = self.protocol
+
+ free, used = self._get_share_capacity_info()
+ capacity = float(free) + float(used)
+ ratio_used = used / capacity
+
+ data['QoS_support'] = False
+ data['reserved_percentage'] = 0
+
+ if ratio_used > self.configuration.nfs_used_ratio or \
+ ratio_used >= self.configuration.nfs_oversub_ratio:
+ data['reserved_percentage'] = 100
+
+ data['total_capacity_gb'] = float(capacity) / units.Gi
+ data['free_capacity_gb'] = float(free) / units.Gi
+
+ self._stats = data
diff --git a/cinder/volume/drivers/zfssa/zfssarest.py b/cinder/volume/drivers/zfssa/zfssarest.py
new file mode 100644
index 000000000..f9ccf896a
--- /dev/null
+++ b/cinder/volume/drivers/zfssa/zfssarest.py
@@ -0,0 +1,894 @@
+# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+ZFS Storage Appliance Proxy
+"""
+import json
+
+from oslo_log import log
+
+from cinder import exception
+from cinder.i18n import _, _LE
+from cinder.volume.drivers.zfssa import restclient
+from cinder.volume.drivers.zfssa import webdavclient
+
+LOG = log.getLogger(__name__)
+
+
+class ZFSSAApi(object):
+ """ZFSSA API proxy class"""
+
+ def __init__(self):
+ self.host = None
+ self.url = None
+ self.rclient = None
+
+ def __del__(self):
+ if self.rclient and self.rclient.islogin():
+ self.rclient.logout()
+
+ def _is_pool_owned(self, pdata):
+ """returns True if the pool's owner is the
+ same as the host.
+ """
+ svc = '/api/system/v1/version'
+ ret = self.rclient.get(svc)
+ if ret.status != restclient.Status.OK:
+ exception_msg = (_('Error getting version: '
+ 'svc: %(svc)s.'
+ 'Return code: %(ret.status)d '
+ 'Message: %(ret.data)s.')
+ % {'svc': svc,
+ 'ret.status': ret.status,
+ 'ret.data': ret.data})
+ LOG.error(exception_msg)
+ raise exception.VolumeBackendAPIException(data=exception_msg)
+
+ vdata = json.loads(ret.data)
+ return vdata['version']['asn'] == pdata['pool']['asn'] and \
+ vdata['version']['nodename'] == pdata['pool']['owner']
+
+ def set_host(self, host, timeout=None):
+ self.host = host
+ self.url = "https://" + self.host + ":215"
+ self.rclient = restclient.RestClientURL(self.url, timeout=timeout)
+
+ def login(self, auth_str):
+ """Login to the appliance"""
+ if self.rclient and not self.rclient.islogin():
+ self.rclient.login(auth_str)
+
+ def get_pool_stats(self, pool):
+ """Get space available and total properties of a pool
+ returns (avail, total).
+ """
+ svc = '/api/storage/v1/pools/' + pool
+ ret = self.rclient.get(svc)
+ if ret.status != restclient.Status.OK:
+ exception_msg = (_('Error Getting Pool Stats: '
+ 'Pool: %(pool)s '
+ 'Return code: %(ret.status)d '
+ 'Message: %(ret.data)s.')
+ % {'pool': pool,
+ 'ret.status': ret.status,
+ 'ret.data': ret.data})
+ LOG.error(exception_msg)
+ raise exception.InvalidVolume(reason=exception_msg)
+
+ val = json.loads(ret.data)
+
+ if not self._is_pool_owned(val):
+ exception_msg = (_('Error Pool ownership: '
+ 'Pool %(pool)s is not owned '
+ 'by %(host)s.')
+ % {'pool': pool,
+ 'host': self.host})
+ LOG.error(exception_msg)
+ raise exception.InvalidInput(reason=pool)
+
+ avail = val['pool']['usage']['available']
+ total = val['pool']['usage']['total']
+
+ return avail, total
+
+ def create_project(self, pool, project, compression=None, logbias=None):
+ """Create a project on a pool
+ Check first whether the pool exists.
+ """
+ self.verify_pool(pool)
+ svc = '/api/storage/v1/pools/' + pool + '/projects/' + project
+ ret = self.rclient.get(svc)
+ if ret.status != restclient.Status.OK:
+ svc = '/api/storage/v1/pools/' + pool + '/projects'
+ arg = {
+ 'name': project
+ }
+ if compression and compression != '':
+ arg.update({'compression': compression})
+ if logbias and logbias != '':
+ arg.update({'logbias': logbias})
+
+ ret = self.rclient.post(svc, arg)
+ if ret.status != restclient.Status.CREATED:
+ exception_msg = (_('Error Creating Project: '
+ '%(project)s on '
+ 'Pool: %(pool)s '
+ 'Return code: %(ret.status)d '
+ 'Message: %(ret.data)s .')
+ % {'project': project,
+ 'pool': pool,
+ 'ret.status': ret.status,
+ 'ret.data': ret.data})
+ LOG.error(exception_msg)
+ raise exception.VolumeBackendAPIException(data=exception_msg)
+
+ def create_initiator(self, initiator, alias, chapuser=None,
+ chapsecret=None):
+ """Create an iSCSI initiator."""
+
+ svc = '/api/san/v1/iscsi/initiators/alias=' + alias
+ ret = self.rclient.get(svc)
+ if ret.status != restclient.Status.OK:
+ svc = '/api/san/v1/iscsi/initiators'
+ arg = {
+ 'initiator': initiator,
+ 'alias': alias
+ }
+ if chapuser and chapuser != '' and chapsecret and chapsecret != '':
+ arg.update({'chapuser': chapuser,
+ 'chapsecret': chapsecret})
+
+ ret = self.rclient.post(svc, arg)
+ if ret.status != restclient.Status.CREATED:
+ exception_msg = (_('Error Creating Initiator: '
+ '%(initiator)s on '
+ 'Alias: %(alias)s '
+ 'Return code: %(ret.status)d '
+ 'Message: %(ret.data)s .')
+ % {'initiator': initiator,
+ 'alias': alias,
+ 'ret.status': ret.status,
+ 'ret.data': ret.data})
+ LOG.error(exception_msg)
+ raise exception.VolumeBackendAPIException(data=exception_msg)
+
+ def add_to_initiatorgroup(self, initiator, initiatorgroup):
+ """Add an iSCSI initiator to initiatorgroup"""
+ svc = '/api/san/v1/iscsi/initiator-groups/' + initiatorgroup
+ ret = self.rclient.get(svc)
+ if ret.status != restclient.Status.OK:
+ svc = '/api/san/v1/iscsi/initiator-groups'
+ arg = {
+ 'name': initiatorgroup,
+ 'initiators': [initiator]
+ }
+ ret = self.rclient.post(svc, arg)
+ if ret.status != restclient.Status.CREATED:
+ exception_msg = (_('Error Adding Initiator: '
+ '%(initiator)s on group'
+ 'InitiatorGroup: %(initiatorgroup)s '
+ 'Return code: %(ret.status)d '
+ 'Message: %(ret.data)s .')
+ % {'initiator': initiator,
+ 'initiatorgroup': initiatorgroup,
+ 'ret.status': ret.status,
+ 'ret.data': ret.data})
+ LOG.error(exception_msg)
+ raise exception.VolumeBackendAPIException(data=exception_msg)
+ else:
+ val = json.loads(ret.data)
+ inits = val['group']['initiators']
+ if inits is None:
+ exception_msg = (_('Error Getting Initiators: '
+ 'InitiatorGroup: %(initiatorgroup)s '
+ 'Return code: %(ret.status)d '
+ 'Message: %(ret.data)s .')
+ % {'initiatorgroup': initiatorgroup,
+ 'ret.status': ret.status,
+ 'ret.data': ret.data})
+ LOG.error(exception_msg)
+ raise exception.VolumeBackendAPIException(data=exception_msg)
+
+ if initiator in inits:
+ return
+
+ inits.append(initiator)
+ svc = '/api/san/v1/iscsi/initiator-groups/' + initiatorgroup
+ arg = {
+ 'initiators': inits
+ }
+ ret = self.rclient.put(svc, arg)
+ if ret.status != restclient.Status.ACCEPTED:
+ exception_msg = (_('Error Adding Initiator: '
+ '%(initiator)s on group'
+ 'InitiatorGroup: %(initiatorgroup)s '
+ 'Return code: %(ret.status)d '
+ 'Message: %(ret.data)s .')
+ % {'initiator': initiator,
+ 'initiatorgroup': initiatorgroup,
+ 'ret.status': ret.status,
+ 'ret.data': ret.data})
+ LOG.error(exception_msg)
+ raise exception.VolumeBackendAPIException(data=exception_msg)
+
+ def create_target(self, alias, interfaces=None, tchapuser=None,
+ tchapsecret=None):
+ """Create an iSCSI target.
+ interfaces: an array with network interfaces
+ tchapuser, tchapsecret: target's chapuser and chapsecret
+ returns target iqn
+ """
+ svc = '/api/san/v1/iscsi/targets/alias=' + alias
+ ret = self.rclient.get(svc)
+ if ret.status != restclient.Status.OK:
+ svc = '/api/san/v1/iscsi/targets'
+ arg = {
+ 'alias': alias
+ }
+
+ if tchapuser and tchapuser != '' and tchapsecret and \
+ tchapsecret != '':
+ arg.update({'targetchapuser': tchapuser,
+ 'targetchapsecret': tchapsecret,
+ 'auth': 'chap'})
+
+ if interfaces is not None and len(interfaces) > 0:
+ arg.update({'interfaces': interfaces})
+
+ ret = self.rclient.post(svc, arg)
+ if ret.status != restclient.Status.CREATED:
+ exception_msg = (_('Error Creating Target: '
+ '%(alias)s'
+ 'Return code: %(ret.status)d '
+ 'Message: %(ret.data)s .')
+ % {'alias': alias,
+ 'ret.status': ret.status,
+ 'ret.data': ret.data})
+ LOG.error(exception_msg)
+ raise exception.VolumeBackendAPIException(data=exception_msg)
+
+ val = json.loads(ret.data)
+ return val['target']['iqn']
+
+ def get_target(self, alias):
+ """Get an iSCSI target iqn."""
+ svc = '/api/san/v1/iscsi/targets/alias=' + alias
+ ret = self.rclient.get(svc)
+ if ret.status != restclient.Status.OK:
+ exception_msg = (_('Error Getting Target: '
+ '%(alias)s'
+ 'Return code: %(ret.status)d '
+ 'Message: %(ret.data)s .')
+ % {'alias': alias,
+ 'ret.status': ret.status,
+ 'ret.data': ret.data})
+ LOG.error(exception_msg)
+ raise exception.VolumeBackendAPIException(data=exception_msg)
+
+ val = json.loads(ret.data)
+ return val['target']['iqn']
+
+ def add_to_targetgroup(self, iqn, targetgroup):
+ """Add an iSCSI target to targetgroup."""
+ svc = '/api/san/v1/iscsi/target-groups/' + targetgroup
+ ret = self.rclient.get(svc)
+ if ret.status != restclient.Status.OK:
+ svccrt = '/api/san/v1/iscsi/target-groups'
+ arg = {
+ 'name': targetgroup,
+ 'targets': [iqn]
+ }
+
+ ret = self.rclient.post(svccrt, arg)
+ if ret.status != restclient.Status.CREATED:
+ exception_msg = (_('Error Creating TargetGroup: '
+ '%(targetgroup)s with'
+ 'IQN: %(iqn)s'
+ 'Return code: %(ret.status)d '
+ 'Message: %(ret.data)s .')
+ % {'targetgroup': targetgroup,
+ 'iqn': iqn,
+ 'ret.status': ret.status,
+ 'ret.data': ret.data})
+ LOG.error(exception_msg)
+ raise exception.VolumeBackendAPIException(data=exception_msg)
+
+ return
+
+ arg = {
+ 'targets': [iqn]
+ }
+
+ ret = self.rclient.put(svc, arg)
+ if ret.status != restclient.Status.ACCEPTED:
+ exception_msg = (_('Error Adding to TargetGroup: '
+ '%(targetgroup)s with'
+ 'IQN: %(iqn)s'
+ 'Return code: %(ret.status)d '
+ 'Message: %(ret.data)s.')
+ % {'targetgroup': targetgroup,
+ 'iqn': iqn,
+ 'ret.status': ret.status,
+ 'ret.data': ret.data})
+ LOG.error(exception_msg)
+ raise exception.VolumeBackendAPIException(data=exception_msg)
+
+ def verify_pool(self, pool):
+ """Checks whether pool exists."""
+ svc = '/api/storage/v1/pools/' + pool
+ ret = self.rclient.get(svc)
+ if ret.status != restclient.Status.OK:
+ exception_msg = (_('Error Verifying Pool: '
+ '%(pool)s '
+ 'Return code: %(ret.status)d '
+ 'Message: %(ret.data)s.')
+ % {'pool': pool,
+ 'ret.status': ret.status,
+ 'ret.data': ret.data})
+ LOG.error(exception_msg)
+ raise exception.VolumeBackendAPIException(data=exception_msg)
+
+ def verify_project(self, pool, project):
+ """Checks whether project exists."""
+ svc = '/api/storage/v1/pools/' + pool + '/projects/' + project
+ ret = self.rclient.get(svc)
+ if ret.status != restclient.Status.OK:
+ exception_msg = (_('Error Verifying '
+ 'Project: %(project)s on '
+ 'Pool: %(pool)s '
+ 'Return code: %(ret.status)d '
+ 'Message: %(ret.data)s.')
+ % {'project': project,
+ 'pool': pool,
+ 'ret.status': ret.status,
+ 'ret.data': ret.data})
+ LOG.error(exception_msg)
+ raise exception.VolumeBackendAPIException(data=exception_msg)
+
+ def verify_initiator(self, iqn):
+ """Check whether initiator iqn exists."""
+ svc = '/api/san/v1/iscsi/initiators/' + iqn
+ ret = self.rclient.get(svc)
+ if ret.status != restclient.Status.OK:
+ exception_msg = (_('Error Verifying '
+ 'Initiator: %(iqn)s '
+ 'Return code: %(ret.status)d '
+ 'Message: %(ret.data)s.')
+ % {'initiator': iqn,
+ 'ret.status': ret.status,
+ 'ret.data': ret.data})
+ LOG.error(exception_msg)
+ raise exception.VolumeBackendAPIException(data=exception_msg)
+
+ def verify_target(self, alias):
+ """Check whether target alias exists."""
+ svc = '/api/san/v1/iscsi/targets/alias=' + alias
+ ret = self.rclient.get(svc)
+ if ret.status != restclient.Status.OK:
+ exception_msg = (_('Error Verifying '
+ 'Target: %(alias)s '
+ 'Return code: %(ret.status)d '
+ 'Message: %(ret.data)s.')
+ % {'alias': alias,
+ 'ret.status': ret.status,
+ 'ret.data': ret.data})
+ LOG.error(exception_msg)
+ raise exception.VolumeBackendAPIException(data=exception_msg)
+
+ def create_lun(self, pool, project, lun, volsize, targetgroup, specs):
+
+ """Create a LUN.
+ specs - contains volume properties (e.g blocksize, compression).
+ """
+ svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
+ project + '/luns'
+ arg = {
+ 'name': lun,
+ 'volsize': volsize,
+ 'targetgroup': targetgroup,
+ 'initiatorgroup': 'com.sun.ms.vss.hg.maskAll'
+ }
+ if specs:
+ arg.update(specs)
+
+ ret = self.rclient.post(svc, arg)
+ if ret.status != restclient.Status.CREATED:
+ exception_msg = (_('Error Creating '
+ 'Volume: %(lun)s '
+ 'Size: %(size)s '
+ 'Return code: %(ret.status)d '
+ 'Message: %(ret.data)s.')
+ % {'lun': lun,
+ 'size': volsize,
+ 'ret.status': ret.status,
+ 'ret.data': ret.data})
+ LOG.error(exception_msg)
+ raise exception.VolumeBackendAPIException(data=exception_msg)
+
+ def get_lun(self, pool, project, lun):
+ """return iscsi lun properties."""
+ svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
+ project + "/luns/" + lun
+ ret = self.rclient.get(svc)
+ if ret.status != restclient.Status.OK:
+ exception_msg = (_('Error Getting '
+ 'Volume: %(lun)s on '
+ 'Pool: %(pool)s '
+ 'Project: %(project)s '
+ 'Return code: %(ret.status)d '
+ 'Message: %(ret.data)s.')
+ % {'lun': lun,
+ 'pool': pool,
+ 'project': project,
+ 'ret.status': ret.status,
+ 'ret.data': ret.data})
+ LOG.error(exception_msg)
+ raise exception.VolumeBackendAPIException(data=exception_msg)
+
+ val = json.loads(ret.data)
+ ret = {
+ 'guid': val['lun']['lunguid'],
+ 'number': val['lun']['assignednumber'],
+ 'initiatorgroup': val['lun']['initiatorgroup'],
+ 'size': val['lun']['volsize'],
+ 'nodestroy': val['lun']['nodestroy']
+ }
+ if 'origin' in val['lun']:
+ ret.update({'origin': val['lun']['origin']})
+ if isinstance(ret['number'], list):
+ ret['number'] = ret['number'][0]
+
+ return ret
+
+ def set_lun_initiatorgroup(self, pool, project, lun, initiatorgroup):
+ """Set the initiatorgroup property of a LUN."""
+ if initiatorgroup == '':
+ initiatorgroup = 'com.sun.ms.vss.hg.maskAll'
+
+ svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
+ project + '/luns/' + lun
+ arg = {
+ 'initiatorgroup': initiatorgroup
+ }
+
+ ret = self.rclient.put(svc, arg)
+ if ret.status != restclient.Status.ACCEPTED:
+ exception_msg = (_('Error Setting '
+ 'Volume: %(lun)s to '
+ 'InitiatorGroup: %(initiatorgroup)s '
+ 'Pool: %(pool)s '
+ 'Project: %(project)s '
+ 'Return code: %(ret.status)d '
+ 'Message: %(ret.data)s.')
+ % {'lun': lun,
+ 'initiatorgroup': initiatorgroup,
+ 'pool': pool,
+ 'project': project,
+ 'ret.status': ret.status,
+ 'ret.data': ret.data})
+ LOG.error(exception_msg)
+
+ def delete_lun(self, pool, project, lun):
+ """delete iscsi lun."""
+ svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
+ project + '/luns/' + lun
+
+ ret = self.rclient.delete(svc)
+ if ret.status != restclient.Status.NO_CONTENT:
+ exception_msg = (_('Error Deleting '
+ 'Volume: %(lun)s to '
+ 'Pool: %(pool)s '
+ 'Project: %(project)s '
+ 'Return code: %(ret.status)d '
+ 'Message: %(ret.data)s.')
+ % {'lun': lun,
+ 'pool': pool,
+ 'project': project,
+ 'ret.status': ret.status,
+ 'ret.data': ret.data})
+ LOG.error(exception_msg)
+
+ def create_snapshot(self, pool, project, lun, snapshot):
+ """create snapshot."""
+ svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
+ project + '/luns/' + lun + '/snapshots'
+ arg = {
+ 'name': snapshot
+ }
+
+ ret = self.rclient.post(svc, arg)
+ if ret.status != restclient.Status.CREATED:
+ exception_msg = (_('Error Creating '
+ 'Snapshot: %(snapshot)s on'
+ 'Volume: %(lun)s to '
+ 'Pool: %(pool)s '
+ 'Project: %(project)s '
+ 'Return code: %(ret.status)d '
+ 'Message: %(ret.data)s.')
+ % {'snapshot': snapshot,
+ 'lun': lun,
+ 'pool': pool,
+ 'project': project,
+ 'ret.status': ret.status,
+ 'ret.data': ret.data})
+ LOG.error(exception_msg)
+ raise exception.VolumeBackendAPIException(data=exception_msg)
+
+ def delete_snapshot(self, pool, project, lun, snapshot):
+ """delete snapshot."""
+ svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
+ project + '/luns/' + lun + '/snapshots/' + snapshot
+
+ ret = self.rclient.delete(svc)
+ if ret.status != restclient.Status.NO_CONTENT:
+ exception_msg = (_('Error Deleting '
+ 'Snapshot: %(snapshot)s on '
+ 'Volume: %(lun)s to '
+ 'Pool: %(pool)s '
+ 'Project: %(project)s '
+ 'Return code: %(ret.status)d '
+ 'Message: %(ret.data)s.')
+ % {'snapshot': snapshot,
+ 'lun': lun,
+ 'pool': pool,
+ 'project': project,
+ 'ret.status': ret.status,
+ 'ret.data': ret.data})
+ LOG.error(exception_msg)
+ raise exception.VolumeBackendAPIException(data=exception_msg)
+
+ def clone_snapshot(self, pool, project, lun, snapshot, clone):
+ """clone snapshot."""
+ svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
+ project + '/luns/' + lun + '/snapshots/' + snapshot + '/clone'
+ arg = {
+ 'project': project,
+ 'share': clone,
+ 'nodestroy': True
+ }
+
+ ret = self.rclient.put(svc, arg)
+ if ret.status != restclient.Status.CREATED:
+ exception_msg = (_('Error Cloning '
+ 'Snapshot: %(snapshot)s on '
+ 'Volume: %(lun)s of '
+ 'Pool: %(pool)s '
+ 'Project: %(project)s '
+ 'Return code: %(ret.status)d '
+ 'Message: %(ret.data)s.')
+ % {'snapshot': snapshot,
+ 'lun': lun,
+ 'pool': pool,
+ 'project': project,
+ 'ret.status': ret.status,
+ 'ret.data': ret.data})
+ LOG.error(exception_msg)
+ raise exception.VolumeBackendAPIException(data=exception_msg)
+
+ def set_lun_props(self, pool, project, lun, **kargs):
+ """set lun properties."""
+ svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
+ project + '/luns/' + lun
+ if kargs is None:
+ return
+
+ ret = self.rclient.put(svc, kargs)
+ if ret.status != restclient.Status.ACCEPTED:
+ exception_msg = (_('Error Setting props '
+ 'Props: %(props)s on '
+ 'Volume: %(lun)s of '
+ 'Pool: %(pool)s '
+ 'Project: %(project)s '
+ 'Return code: %(ret.status)d '
+ 'Message: %(ret.data)s.')
+ % {'props': kargs,
+ 'lun': lun,
+ 'pool': pool,
+ 'project': project,
+ 'ret.status': ret.status,
+ 'ret.data': ret.data})
+ LOG.error(exception_msg)
+ raise exception.VolumeBackendAPIException(data=exception_msg)
+
+ def has_clones(self, pool, project, lun, snapshot):
+ """Checks whether snapshot has clones or not."""
+ svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
+ project + '/luns/' + lun + '/snapshots/' + snapshot
+
+ ret = self.rclient.get(svc)
+ if ret.status != restclient.Status.OK:
+ exception_msg = (_('Error Getting '
+ 'Snapshot: %(snapshot)s on '
+ 'Volume: %(lun)s to '
+ 'Pool: %(pool)s '
+ 'Project: %(project)s '
+ 'Return code: %(ret.status)d '
+ 'Message: %(ret.data)s.')
+ % {'snapshot': snapshot,
+ 'lun': lun,
+ 'pool': pool,
+ 'project': project,
+ 'ret.status': ret.status,
+ 'ret.data': ret.data})
+ LOG.error(exception_msg)
+ raise exception.VolumeBackendAPIException(data=exception_msg)
+
+ val = json.loads(ret.data)
+ return val['snapshot']['numclones'] != 0
+
+ def get_initiator_initiatorgroup(self, initiator):
+ """Returns the initiator group of the initiator."""
+ groups = []
+ svc = "/api/san/v1/iscsi/initiator-groups"
+ ret = self.rclient.get(svc)
+ if ret.status != restclient.Status.OK:
+ LOG.error(_LE('Error getting initiator groups.'))
+ exception_msg = (_('Error getting initiator groups.'))
+ raise exception.VolumeBackendAPIException(data=exception_msg)
+ val = json.loads(ret.data)
+ for initiator_group in val['groups']:
+ if initiator in initiator_group['initiators']:
+ groups.append(initiator_group["name"])
+ if len(groups) == 0:
+ LOG.debug("Initiator group not found. Attaching volume to "
+ "default initiator group.")
+ groups.append('default')
+ return groups
+
+
+class ZFSSANfsApi(ZFSSAApi):
+ """ZFSSA API proxy class for NFS driver"""
+ projects_path = '/api/storage/v1/pools/%s/projects'
+ project_path = projects_path + '/%s'
+
+ shares_path = project_path + '/filesystems'
+ share_path = shares_path + '/%s'
+ share_snapshots_path = share_path + '/snapshots'
+ share_snapshot_path = share_snapshots_path + '/%s'
+
+ services_path = '/api/service/v1/services/'
+
+ def __init__(self, *args, **kwargs):
+ super(ZFSSANfsApi, self).__init__(*args, **kwargs)
+ self.webdavclient = None
+
+ def set_webdav(self, https_path, auth_str):
+ self.webdavclient = webdavclient.ZFSSAWebDAVClient(https_path,
+ auth_str)
+
+ def verify_share(self, pool, project, share):
+ """Checks whether the share exists"""
+ svc = self.share_path % (pool, project, share)
+ ret = self.rclient.get(svc)
+ if ret.status != restclient.Status.OK:
+ exception_msg = (_('Error Verifying '
+ 'share: %(share)s on '
+ 'Project: %(project)s and '
+ 'Pool: %(pool)s '
+ 'Return code: %(ret.status)d '
+ 'Message: %(ret.data)s.')
+ % {'share': share,
+ 'project': project,
+ 'pool': pool,
+ 'ret.status': ret.status,
+ 'ret.data': ret.data})
+ LOG.error(exception_msg)
+ raise exception.VolumeBackendAPIException(data=exception_msg)
+
+ def create_snapshot(self, pool, project, share, snapshot):
+ """create snapshot of a share"""
+ svc = self.share_snapshots_path % (pool, project, share)
+
+ arg = {
+ 'name': snapshot
+ }
+
+ ret = self.rclient.post(svc, arg)
+ if ret.status != restclient.Status.CREATED:
+ exception_msg = (_('Error Creating '
+ 'Snapshot: %(snapshot)s on'
+ 'share: %(share)s to '
+ 'Pool: %(pool)s '
+ 'Project: %(project)s '
+ 'Return code: %(ret.status)d '
+ 'Message: %(ret.data)s.')
+ % {'snapshot': snapshot,
+ 'share': share,
+ 'pool': pool,
+ 'project': project,
+ 'ret.status': ret.status,
+ 'ret.data': ret.data})
+ LOG.error(exception_msg)
+ raise exception.VolumeBackendAPIException(data=exception_msg)
+
+ def delete_snapshot(self, pool, project, share, snapshot):
+ """delete snapshot of a share"""
+ svc = self.share_snapshot_path % (pool, project, share, snapshot)
+
+ ret = self.rclient.delete(svc)
+ if ret.status != restclient.Status.NO_CONTENT:
+ exception_msg = (_('Error Deleting '
+ 'Snapshot: %(snapshot)s on '
+ 'Share: %(share)s to '
+ 'Pool: %(pool)s '
+ 'Project: %(project)s '
+ 'Return code: %(ret.status)d '
+ 'Message: %(ret.data)s.')
+ % {'snapshot': snapshot,
+ 'share': share,
+ 'pool': pool,
+ 'project': project,
+ 'ret.status': ret.status,
+ 'ret.data': ret.data})
+ LOG.error(exception_msg)
+ raise exception.VolumeBackendAPIException(data=exception_msg)
+
+ def create_snapshot_of_volume_file(self, src_file="", dst_file=""):
+ src_file = '.zfs/snapshot/' + src_file
+ return self.webdavclient.request(src_file=src_file, dst_file=dst_file,
+ method='COPY')
+
+ def delete_snapshot_of_volume_file(self, src_file=""):
+ return self.webdavclient.request(src_file=src_file, method='DELETE')
+
+ def create_volume_from_snapshot_file(self, src_file="", dst_file="",
+ method='COPY'):
+ return self.webdavclient.request(src_file=src_file, dst_file=dst_file,
+ method=method)
+
+ def _change_service_state(self, service, state=''):
+ svc = self.services_path + service + '/' + state
+ ret = self.rclient.put(svc)
+ if ret.status != restclient.Status.ACCEPTED:
+ exception_msg = (_('Error Verifying '
+ 'Service: %(service)s '
+ 'Return code: %(ret.status)d '
+ 'Message: %(ret.data)s.')
+ % {'service': service,
+ 'ret.status': ret.status,
+ 'ret.data': ret.data})
+
+ LOG.error(exception_msg)
+ raise exception.VolumeBackendAPIException(data=exception_msg)
+ data = json.loads(ret.data)['service']
+ LOG.debug('%s service state: %s' % (service, data))
+
+ status = 'online' if state == 'enable' else 'disabled'
+
+ if data['<status>'] != status:
+ exception_msg = (_('%(service)s Service is not %(status)s '
+ 'on storage appliance: %(host)s')
+ % {'service': service,
+ 'status': status,
+ 'host': self.host})
+ LOG.error(exception_msg)
+ raise exception.VolumeBackendAPIException(data=exception_msg)
+
+ def enable_service(self, service):
+ self._change_service_state(service, state='enable')
+ self.verify_service(service)
+
+ def disable_service(self, service):
+ self._change_service_state(service, state='disable')
+ self.verify_service(service, status='offline')
+
+ def verify_service(self, service, status='online'):
+ """Checks whether a service is online or not"""
+ svc = self.services_path + service
+ ret = self.rclient.get(svc)
+
+ if ret.status != restclient.Status.OK:
+ exception_msg = (_('Error Verifying '
+ 'Service: %(service)s '
+ 'Return code: %(ret.status)d '
+ 'Message: %(ret.data)s.')
+ % {'service': service,
+ 'ret.status': ret.status,
+ 'ret.data': ret.data})
+
+ LOG.error(exception_msg)
+ raise exception.VolumeBackendAPIException(data=exception_msg)
+
+ data = json.loads(ret.data)['service']
+
+ if data['<status>'] != status:
+ exception_msg = (_('%(service)s Service is not %(status)s '
+ 'on storage appliance: %(host)s')
+ % {'service': service,
+ 'status': status,
+ 'host': self.host})
+ LOG.error(exception_msg)
+ raise exception.VolumeBackendAPIException(data=exception_msg)
+
+ def modify_service(self, service, edit_args=None):
+ """Edit service properties"""
+ if edit_args is None:
+ edit_args = {}
+
+ svc = self.services_path + service
+
+ ret = self.rclient.put(svc, edit_args)
+
+ if ret.status != restclient.Status.ACCEPTED:
+ exception_msg = (_('Error modifying '
+ 'Service: %(service)s '
+ 'Return code: %(ret.status)d '
+ 'Message: %(ret.data)s.')
+ % {'service': service,
+ 'ret.status': ret.status,
+ 'ret.data': ret.data})
+
+ LOG.error(exception_msg)
+ raise exception.VolumeBackendAPIException(data=exception_msg)
+ data = json.loads(ret.data)['service']
+ LOG.debug('Modify %(service)s service '
+ 'return data: %(data)s'
+ % {'service': service,
+ 'data': data})
+
+ def create_share(self, pool, project, share, args):
+ """Create a share in the specified pool and project"""
+ svc = self.share_path % (pool, project, share)
+ ret = self.rclient.get(svc)
+ if ret.status != restclient.Status.OK:
+ svc = self.shares_path % (pool, project)
+ args.update({'name': share})
+ ret = self.rclient.post(svc, args)
+ if ret.status != restclient.Status.CREATED:
+ exception_msg = (_('Error Creating '
+ 'Share: %(name)s '
+ 'Return code: %(ret.status)d '
+ 'Message: %(ret.data)s.')
+ % {'name': share,
+ 'ret.status': ret.status,
+ 'ret.data': ret.data})
+ LOG.error(exception_msg)
+ raise exception.VolumeBackendAPIException(data=exception_msg)
+ else:
+ LOG.debug('Editing properties of a pre-existing share')
+ ret = self.rclient.put(svc, args)
+ if ret.status != restclient.Status.ACCEPTED:
+ exception_msg = (_('Error editing share: '
+ '%(share)s on '
+ 'Pool: %(pool)s '
+ 'Return code: %(ret.status)d '
+ 'Message: %(ret.data)s .')
+ % {'share': share,
+ 'pool': pool,
+ 'ret.status': ret.status,
+ 'ret.data': ret.data})
+ LOG.error(exception_msg)
+ raise exception.VolumeBackendAPIException(data=exception_msg)
+
+ def get_share(self, pool, project, share):
+ """return share properties"""
+ svc = self.share_path % (pool, project, share)
+ ret = self.rclient.get(svc)
+ if ret.status != restclient.Status.OK:
+ exception_msg = (_('Error Getting '
+ 'Share: %(share)s on '
+ 'Pool: %(pool)s '
+ 'Project: %(project)s '
+ 'Return code: %(ret.status)d '
+ 'Message: %(ret.data)s.')
+ % {'share': share,
+ 'pool': pool,
+ 'project': project,
+ 'ret.status': ret.status,
+ 'ret.data': ret.data})
+ LOG.error(exception_msg)
+ raise exception.VolumeBackendAPIException(data=exception_msg)
+
+ val = json.loads(ret.data)
+ return val['filesystem']