summaryrefslogtreecommitdiff
path: root/source3
diff options
context:
space:
mode:
authorDavid Disseldorp <ddiss@samba.org>2018-05-09 16:51:34 +0200
committerKarolin Seeger <kseeger@samba.org>2018-05-14 09:58:19 +0200
commit139743ec1ed9078be0891e416c536305b6dd1b33 (patch)
tree3396e22a99433bf479d58c884cbd3cf4bd23c497 /source3
parentbf466d26a9d48e19caa887c3c15781739c2d120d (diff)
downloadsamba-139743ec1ed9078be0891e416c536305b6dd1b33.tar.gz
vfs_ceph: add fake async pwrite/pread send/recv hooks
As found by Jeremy, VFS modules that don't provide pread_send() or pwrite_send() hooks result in vfs_default fallback, which is catastrophic for VFS modules with non-mounted filesystems such as vfs_ceph. Bug: https://bugzilla.samba.org/show_bug.cgi?id=13425 Reported-by: Jeremy Allison <jra@samba.org> Signed-off-by: David Disseldorp <ddiss@samba.org> Reviewed-by: Jeremy Allison <jra@samba.org> (cherry picked from commit f0e6453b0420fe9d062936d4ddc05f44b40cf2ba)
Diffstat (limited to 'source3')
-rw-r--r--source3/modules/vfs_ceph.c109
1 files changed, 108 insertions, 1 deletions
diff --git a/source3/modules/vfs_ceph.c b/source3/modules/vfs_ceph.c
index 87d3c005c61..a27b76293f5 100644
--- a/source3/modules/vfs_ceph.c
+++ b/source3/modules/vfs_ceph.c
@@ -482,6 +482,57 @@ static ssize_t cephwrap_pread(struct vfs_handle_struct *handle, files_struct *fs
WRAP_RETURN(result);
}
+struct cephwrap_pread_state {
+ ssize_t bytes_read;
+ struct vfs_aio_state vfs_aio_state;
+};
+
+/*
+ * Fake up an async ceph read by calling the synchronous API.
+ */
+static struct tevent_req *cephwrap_pread_send(struct vfs_handle_struct *handle,
+ TALLOC_CTX *mem_ctx,
+ struct tevent_context *ev,
+ struct files_struct *fsp,
+ void *data,
+ size_t n, off_t offset)
+{
+ struct tevent_req *req = NULL;
+ struct cephwrap_pread_state *state = NULL;
+ int ret = -1;
+
+ DBG_DEBUG("[CEPH] %s\n", __func__);
+ req = tevent_req_create(mem_ctx, &state, struct cephwrap_pread_state);
+ if (req == NULL) {
+ return NULL;
+ }
+
+ ret = ceph_read(handle->data, fsp->fh->fd, data, n, offset);
+ if (ret < 0) {
+ /* ceph returns -errno on error. */
+ tevent_req_error(req, -ret);
+ return tevent_req_post(req, ev);
+ }
+
+ state->bytes_read = ret;
+ tevent_req_done(req);
+ /* Return and schedule the completion of the call. */
+ return tevent_req_post(req, ev);
+}
+
+static ssize_t cephwrap_pread_recv(struct tevent_req *req,
+ struct vfs_aio_state *vfs_aio_state)
+{
+ struct cephwrap_pread_state *state =
+ tevent_req_data(req, struct cephwrap_pread_state);
+
+ DBG_DEBUG("[CEPH] %s\n", __func__);
+ if (tevent_req_is_unix_error(req, &vfs_aio_state->error)) {
+ return -1;
+ }
+ *vfs_aio_state = state->vfs_aio_state;
+ return state->bytes_read;
+}
static ssize_t cephwrap_write(struct vfs_handle_struct *handle, files_struct *fsp, const void *data, size_t n)
{
@@ -510,6 +561,58 @@ static ssize_t cephwrap_pwrite(struct vfs_handle_struct *handle, files_struct *f
WRAP_RETURN(result);
}
+struct cephwrap_pwrite_state {
+ ssize_t bytes_written;
+ struct vfs_aio_state vfs_aio_state;
+};
+
+/*
+ * Fake up an async ceph write by calling the synchronous API.
+ */
+static struct tevent_req *cephwrap_pwrite_send(struct vfs_handle_struct *handle,
+ TALLOC_CTX *mem_ctx,
+ struct tevent_context *ev,
+ struct files_struct *fsp,
+ const void *data,
+ size_t n, off_t offset)
+{
+ struct tevent_req *req = NULL;
+ struct cephwrap_pwrite_state *state = NULL;
+ int ret = -1;
+
+ DBG_DEBUG("[CEPH] %s\n", __func__);
+ req = tevent_req_create(mem_ctx, &state, struct cephwrap_pwrite_state);
+ if (req == NULL) {
+ return NULL;
+ }
+
+ ret = ceph_write(handle->data, fsp->fh->fd, data, n, offset);
+ if (ret < 0) {
+ /* ceph returns -errno on error. */
+ tevent_req_error(req, -ret);
+ return tevent_req_post(req, ev);
+ }
+
+ state->bytes_written = ret;
+ tevent_req_done(req);
+ /* Return and schedule the completion of the call. */
+ return tevent_req_post(req, ev);
+}
+
+static ssize_t cephwrap_pwrite_recv(struct tevent_req *req,
+ struct vfs_aio_state *vfs_aio_state)
+{
+ struct cephwrap_pwrite_state *state =
+ tevent_req_data(req, struct cephwrap_pwrite_state);
+
+ DBG_DEBUG("[CEPH] %s\n", __func__);
+ if (tevent_req_is_unix_error(req, &vfs_aio_state->error)) {
+ return -1;
+ }
+ *vfs_aio_state = state->vfs_aio_state;
+ return state->bytes_written;
+}
+
static off_t cephwrap_lseek(struct vfs_handle_struct *handle, files_struct *fsp, off_t offset, int whence)
{
off_t result = 0;
@@ -571,7 +674,7 @@ static int cephwrap_fsync(struct vfs_handle_struct *handle, files_struct *fsp)
}
/*
- * Fake up an async ceph fsync by calling the sychronous API.
+ * Fake up an async ceph fsync by calling the synchronous API.
*/
static struct tevent_req *cephwrap_fsync_send(struct vfs_handle_struct *handle,
@@ -1484,8 +1587,12 @@ static struct vfs_fn_pointers ceph_fns = {
.close_fn = cephwrap_close,
.read_fn = cephwrap_read,
.pread_fn = cephwrap_pread,
+ .pread_send_fn = cephwrap_pread_send,
+ .pread_recv_fn = cephwrap_pread_recv,
.write_fn = cephwrap_write,
.pwrite_fn = cephwrap_pwrite,
+ .pwrite_send_fn = cephwrap_pwrite_send,
+ .pwrite_recv_fn = cephwrap_pwrite_recv,
.lseek_fn = cephwrap_lseek,
.sendfile_fn = cephwrap_sendfile,
.recvfile_fn = cephwrap_recvfile,