diff options
Diffstat (limited to 'fs')
47 files changed, 752 insertions, 499 deletions
diff --git a/fs/afs/afs.h b/fs/afs/afs.h index 432cb4b23961..81815724db6c 100644 --- a/fs/afs/afs.h +++ b/fs/afs/afs.h @@ -19,8 +19,8 @@ #define AFSPATHMAX 1024 /* Maximum length of a pathname plus NUL */ #define AFSOPAQUEMAX 1024 /* Maximum length of an opaque field */ -#define AFS_VL_MAX_LIFESPAN (120 * HZ) -#define AFS_PROBE_MAX_LIFESPAN (30 * HZ) +#define AFS_VL_MAX_LIFESPAN 120 +#define AFS_PROBE_MAX_LIFESPAN 30 typedef u64 afs_volid_t; typedef u64 afs_vnodeid_t; diff --git a/fs/afs/internal.h b/fs/afs/internal.h index ad8523d0d038..68ae91d21b57 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -128,7 +128,7 @@ struct afs_call { spinlock_t state_lock; int error; /* error code */ u32 abort_code; /* Remote abort ID or 0 */ - unsigned int max_lifespan; /* Maximum lifespan to set if not 0 */ + unsigned int max_lifespan; /* Maximum lifespan in secs to set if not 0 */ unsigned request_size; /* size of request data */ unsigned reply_max; /* maximum size of reply */ unsigned count2; /* count used in unmarshalling */ diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index 7817e2b860e5..6862e3dde364 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -334,7 +334,9 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp) /* create a call */ rxcall = rxrpc_kernel_begin_call(call->net->socket, srx, call->key, (unsigned long)call, - tx_total_len, gfp, + tx_total_len, + call->max_lifespan, + gfp, (call->async ? afs_wake_up_async_call : afs_wake_up_call_waiter), @@ -349,10 +351,6 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp) } call->rxcall = rxcall; - - if (call->max_lifespan) - rxrpc_kernel_set_max_life(call->net->socket, rxcall, - call->max_lifespan); call->issue_time = ktime_get_real(); /* send the request */ diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index e54f0884802a..79336fa853db 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -45,7 +45,8 @@ static int check_extent_in_eb(struct btrfs_backref_walk_ctx *ctx, int root_count; bool cached; - if (!btrfs_file_extent_compression(eb, fi) && + if (!ctx->ignore_extent_item_pos && + !btrfs_file_extent_compression(eb, fi) && !btrfs_file_extent_encryption(eb, fi) && !btrfs_file_extent_other_encoding(eb, fi)) { u64 data_offset; @@ -552,7 +553,7 @@ static int add_all_parents(struct btrfs_backref_walk_ctx *ctx, count++; else goto next; - if (!ctx->ignore_extent_item_pos) { + if (!ctx->skip_inode_ref_list) { ret = check_extent_in_eb(ctx, &key, eb, fi, &eie); if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP || ret < 0) @@ -564,7 +565,7 @@ static int add_all_parents(struct btrfs_backref_walk_ctx *ctx, eie, (void **)&old, GFP_NOFS); if (ret < 0) break; - if (!ret && !ctx->ignore_extent_item_pos) { + if (!ret && !ctx->skip_inode_ref_list) { while (old->next) old = old->next; old->next = eie; @@ -1606,7 +1607,7 @@ again: goto out; } if (ref->count && ref->parent) { - if (!ctx->ignore_extent_item_pos && !ref->inode_list && + if (!ctx->skip_inode_ref_list && !ref->inode_list && ref->level == 0) { struct btrfs_tree_parent_check check = { 0 }; struct extent_buffer *eb; @@ -1647,7 +1648,7 @@ again: (void **)&eie, GFP_NOFS); if (ret < 0) goto out; - if (!ret && !ctx->ignore_extent_item_pos) { + if (!ret && !ctx->skip_inode_ref_list) { /* * We've recorded that parent, so we must extend * its inode list here. @@ -1743,7 +1744,7 @@ int btrfs_find_all_leafs(struct btrfs_backref_walk_ctx *ctx) static int btrfs_find_all_roots_safe(struct btrfs_backref_walk_ctx *ctx) { const u64 orig_bytenr = ctx->bytenr; - const bool orig_ignore_extent_item_pos = ctx->ignore_extent_item_pos; + const bool orig_skip_inode_ref_list = ctx->skip_inode_ref_list; bool roots_ulist_allocated = false; struct ulist_iterator uiter; int ret = 0; @@ -1764,7 +1765,7 @@ static int btrfs_find_all_roots_safe(struct btrfs_backref_walk_ctx *ctx) roots_ulist_allocated = true; } - ctx->ignore_extent_item_pos = true; + ctx->skip_inode_ref_list = true; ULIST_ITER_INIT(&uiter); while (1) { @@ -1789,7 +1790,7 @@ static int btrfs_find_all_roots_safe(struct btrfs_backref_walk_ctx *ctx) ulist_free(ctx->refs); ctx->refs = NULL; ctx->bytenr = orig_bytenr; - ctx->ignore_extent_item_pos = orig_ignore_extent_item_pos; + ctx->skip_inode_ref_list = orig_skip_inode_ref_list; return ret; } @@ -1912,7 +1913,7 @@ int btrfs_is_data_extent_shared(struct btrfs_inode *inode, u64 bytenr, goto out_trans; } - walk_ctx.ignore_extent_item_pos = true; + walk_ctx.skip_inode_ref_list = true; walk_ctx.trans = trans; walk_ctx.fs_info = fs_info; walk_ctx.refs = &ctx->refs; diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h index ef6bbea3f456..1616e3e3f1e4 100644 --- a/fs/btrfs/backref.h +++ b/fs/btrfs/backref.h @@ -60,6 +60,12 @@ struct btrfs_backref_walk_ctx { * @extent_item_pos is ignored. */ bool ignore_extent_item_pos; + /* + * If true and bytenr corresponds to a data extent, then the inode list + * (each member describing inode number, file offset and root) is not + * added to each reference added to the @refs ulist. + */ + bool skip_inode_ref_list; /* A valid transaction handle or NULL. */ struct btrfs_trans_handle *trans; /* diff --git a/fs/btrfs/block-rsv.c b/fs/btrfs/block-rsv.c index 5367a14d44d2..4bb4a4875872 100644 --- a/fs/btrfs/block-rsv.c +++ b/fs/btrfs/block-rsv.c @@ -124,7 +124,8 @@ static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info, } else { num_bytes = 0; } - if (block_rsv->qgroup_rsv_reserved >= block_rsv->qgroup_rsv_size) { + if (qgroup_to_release_ret && + block_rsv->qgroup_rsv_reserved >= block_rsv->qgroup_rsv_size) { qgroup_to_release = block_rsv->qgroup_rsv_reserved - block_rsv->qgroup_rsv_size; block_rsv->qgroup_rsv_reserved = block_rsv->qgroup_rsv_size; diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index a5b6bb54545f..26bb10b6ca85 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -4489,10 +4489,12 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path) { struct btrfs_key key; + struct btrfs_key orig_key; struct btrfs_disk_key found_key; int ret; btrfs_item_key_to_cpu(path->nodes[0], &key, 0); + orig_key = key; if (key.offset > 0) { key.offset--; @@ -4509,8 +4511,36 @@ int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path) btrfs_release_path(path); ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); - if (ret < 0) + if (ret <= 0) return ret; + + /* + * Previous key not found. Even if we were at slot 0 of the leaf we had + * before releasing the path and calling btrfs_search_slot(), we now may + * be in a slot pointing to the same original key - this can happen if + * after we released the path, one of more items were moved from a + * sibling leaf into the front of the leaf we had due to an insertion + * (see push_leaf_right()). + * If we hit this case and our slot is > 0 and just decrement the slot + * so that the caller does not process the same key again, which may or + * may not break the caller, depending on its logic. + */ + if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) { + btrfs_item_key(path->nodes[0], &found_key, path->slots[0]); + ret = comp_keys(&found_key, &orig_key); + if (ret == 0) { + if (path->slots[0] > 0) { + path->slots[0]--; + return 0; + } + /* + * At slot 0, same key as before, it means orig_key is + * the lowest, leftmost, key in the tree. We're done. + */ + return 1; + } + } + btrfs_item_key(path->nodes[0], &found_key, 0); ret = comp_keys(&found_key, &key); /* diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 9e1596bb208d..9892dae178b7 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -3123,23 +3123,34 @@ int btrfs_start_pre_rw_mount(struct btrfs_fs_info *fs_info) { int ret; const bool cache_opt = btrfs_test_opt(fs_info, SPACE_CACHE); - bool clear_free_space_tree = false; + bool rebuild_free_space_tree = false; if (btrfs_test_opt(fs_info, CLEAR_CACHE) && btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { - clear_free_space_tree = true; + rebuild_free_space_tree = true; } else if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) && !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID)) { btrfs_warn(fs_info, "free space tree is invalid"); - clear_free_space_tree = true; + rebuild_free_space_tree = true; } - if (clear_free_space_tree) { - btrfs_info(fs_info, "clearing free space tree"); - ret = btrfs_clear_free_space_tree(fs_info); + if (rebuild_free_space_tree) { + btrfs_info(fs_info, "rebuilding free space tree"); + ret = btrfs_rebuild_free_space_tree(fs_info); if (ret) { btrfs_warn(fs_info, - "failed to clear free space tree: %d", ret); + "failed to rebuild free space tree: %d", ret); + goto out; + } + } + + if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) && + !btrfs_test_opt(fs_info, FREE_SPACE_TREE)) { + btrfs_info(fs_info, "disabling free space tree"); + ret = btrfs_delete_free_space_tree(fs_info); + if (ret) { + btrfs_warn(fs_info, + "failed to disable free space tree: %d", ret); goto out; } } diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index 41c77a100853..a4584c629ba3 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -52,13 +52,13 @@ void btrfs_inode_safe_disk_i_size_write(struct btrfs_inode *inode, u64 new_i_siz u64 start, end, i_size; int ret; + spin_lock(&inode->lock); i_size = new_i_size ?: i_size_read(&inode->vfs_inode); if (btrfs_fs_incompat(fs_info, NO_HOLES)) { inode->disk_i_size = i_size; - return; + goto out_unlock; } - spin_lock(&inode->lock); ret = find_contiguous_extent_bit(&inode->file_extent_tree, 0, &start, &end, EXTENT_DIRTY); if (!ret && start == 0) @@ -66,6 +66,7 @@ void btrfs_inode_safe_disk_i_size_write(struct btrfs_inode *inode, u64 new_i_siz else i_size = 0; inode->disk_i_size = i_size; +out_unlock: spin_unlock(&inode->lock); } diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index d84cef89cdff..cf98a3c05480 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -870,15 +870,16 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, } spin_lock(&ctl->tree_lock); ret = link_free_space(ctl, e); - ctl->total_bitmaps++; - recalculate_thresholds(ctl); - spin_unlock(&ctl->tree_lock); if (ret) { + spin_unlock(&ctl->tree_lock); btrfs_err(fs_info, "Duplicate entries in free space cache, dumping"); kmem_cache_free(btrfs_free_space_cachep, e); goto free_cache; } + ctl->total_bitmaps++; + recalculate_thresholds(ctl); + spin_unlock(&ctl->tree_lock); list_add_tail(&e->list, &bitmaps); } diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c index 4d155a48ec59..b21da1446f2a 100644 --- a/fs/btrfs/free-space-tree.c +++ b/fs/btrfs/free-space-tree.c @@ -1252,7 +1252,7 @@ out: return ret; } -int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info) +int btrfs_delete_free_space_tree(struct btrfs_fs_info *fs_info) { struct btrfs_trans_handle *trans; struct btrfs_root *tree_root = fs_info->tree_root; @@ -1298,6 +1298,54 @@ abort: return ret; } +int btrfs_rebuild_free_space_tree(struct btrfs_fs_info *fs_info) +{ + struct btrfs_trans_handle *trans; + struct btrfs_key key = { + .objectid = BTRFS_FREE_SPACE_TREE_OBJECTID, + .type = BTRFS_ROOT_ITEM_KEY, + .offset = 0, + }; + struct btrfs_root *free_space_root = btrfs_global_root(fs_info, &key); + struct rb_node *node; + int ret; + + trans = btrfs_start_transaction(free_space_root, 1); + if (IS_ERR(trans)) + return PTR_ERR(trans); + + set_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags); + set_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags); + + ret = clear_free_space_tree(trans, free_space_root); + if (ret) + goto abort; + + node = rb_first_cached(&fs_info->block_group_cache_tree); + while (node) { + struct btrfs_block_group *block_group; + + block_group = rb_entry(node, struct btrfs_block_group, + cache_node); + ret = populate_free_space_tree(trans, block_group); + if (ret) + goto abort; + node = rb_next(node); + } + + btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE); + btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID); + clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags); + + ret = btrfs_commit_transaction(trans); + clear_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags); + return ret; +abort: + btrfs_abort_transaction(trans, ret); + btrfs_end_transaction(trans); + return ret; +} + static int __add_block_group_free_space(struct btrfs_trans_handle *trans, struct btrfs_block_group *block_group, struct btrfs_path *path) diff --git a/fs/btrfs/free-space-tree.h b/fs/btrfs/free-space-tree.h index dc2463e4cfe3..6d5551d0ced8 100644 --- a/fs/btrfs/free-space-tree.h +++ b/fs/btrfs/free-space-tree.h @@ -18,7 +18,8 @@ struct btrfs_caching_control; void set_free_space_tree_thresholds(struct btrfs_block_group *block_group); int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info); -int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info); +int btrfs_delete_free_space_tree(struct btrfs_fs_info *fs_info); +int btrfs_rebuild_free_space_tree(struct btrfs_fs_info *fs_info); int load_free_space_tree(struct btrfs_caching_control *caching_ctl); int add_block_group_free_space(struct btrfs_trans_handle *trans, struct btrfs_block_group *block_group); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 957e4d76a7b6..b31bb3352477 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3167,6 +3167,9 @@ int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) btrfs_rewrite_logical_zoned(ordered_extent); btrfs_zone_finish_endio(fs_info, ordered_extent->disk_bytenr, ordered_extent->disk_num_bytes); + } else if (btrfs_is_data_reloc_root(inode->root)) { + btrfs_zone_finish_endio(fs_info, ordered_extent->disk_bytenr, + ordered_extent->disk_num_bytes); } if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) { diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 25833b4eeaf5..2fa36f694daa 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -454,7 +454,9 @@ void btrfs_exclop_balance(struct btrfs_fs_info *fs_info, case BTRFS_EXCLOP_BALANCE_PAUSED: spin_lock(&fs_info->super_lock); ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE || - fs_info->exclusive_operation == BTRFS_EXCLOP_DEV_ADD); + fs_info->exclusive_operation == BTRFS_EXCLOP_DEV_ADD || + fs_info->exclusive_operation == BTRFS_EXCLOP_NONE || + fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED); fs_info->exclusive_operation = BTRFS_EXCLOP_BALANCE_PAUSED; spin_unlock(&fs_info->super_lock); break; diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c index b93c96213304..497b9dbd8a13 100644 --- a/fs/btrfs/print-tree.c +++ b/fs/btrfs/print-tree.c @@ -151,10 +151,10 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type) pr_cont("shared data backref parent %llu count %u\n", offset, btrfs_shared_data_ref_count(eb, sref)); /* - * offset is supposed to be a tree block which - * must be aligned to nodesize. + * Offset is supposed to be a tree block which must be + * aligned to sectorsize. */ - if (!IS_ALIGNED(offset, eb->fs_info->nodesize)) + if (!IS_ALIGNED(offset, eb->fs_info->sectorsize)) pr_info( "\t\t\t(parent %llu not aligned to sectorsize %u)\n", offset, eb->fs_info->sectorsize); diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index ef13a9d4e370..c10670915f96 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -3422,7 +3422,7 @@ int add_data_references(struct reloc_control *rc, btrfs_release_path(path); ctx.bytenr = extent_key->objectid; - ctx.ignore_extent_item_pos = true; + ctx.skip_inode_ref_list = true; ctx.fs_info = rc->extent_root->fs_info; ret = btrfs_find_all_leafs(&ctx); diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 366fb4cde145..e19792d919c6 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -826,7 +826,11 @@ out: !btrfs_test_opt(info, CLEAR_CACHE)) { btrfs_err(info, "cannot disable free space tree"); ret = -EINVAL; - + } + if (btrfs_fs_compat_ro(info, BLOCK_GROUP_TREE) && + !btrfs_test_opt(info, FREE_SPACE_TREE)) { + btrfs_err(info, "cannot disable free space tree with block-group-tree feature"); + ret = -EINVAL; } if (!ret) ret = btrfs_check_mountopts_zoned(info); diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c index 45d04092f2f8..8db47f93007f 100644 --- a/fs/btrfs/zoned.c +++ b/fs/btrfs/zoned.c @@ -122,10 +122,9 @@ static int sb_write_pointer(struct block_device *bdev, struct blk_zone *zones, int i; for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) { - u64 bytenr; - - bytenr = ((zones[i].start + zones[i].len) - << SECTOR_SHIFT) - BTRFS_SUPER_INFO_SIZE; + u64 zone_end = (zones[i].start + zones[i].capacity) << SECTOR_SHIFT; + u64 bytenr = ALIGN_DOWN(zone_end, BTRFS_SUPER_INFO_SIZE) - + BTRFS_SUPER_INFO_SIZE; page[i] = read_cache_page_gfp(mapping, bytenr >> PAGE_SHIFT, GFP_NOFS); @@ -1168,12 +1167,12 @@ int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size) return -ERANGE; /* All the zones are conventional */ - if (find_next_bit(zinfo->seq_zones, begin, end) == end) + if (find_next_bit(zinfo->seq_zones, end, begin) == end) return 0; /* All the zones are sequential and empty */ - if (find_next_zero_bit(zinfo->seq_zones, begin, end) == end && - find_next_zero_bit(zinfo->empty_zones, begin, end) == end) + if (find_next_zero_bit(zinfo->seq_zones, end, begin) == end && + find_next_zero_bit(zinfo->empty_zones, end, begin) == end) return 0; for (pos = start; pos < start + size; pos += zinfo->zone_size) { @@ -1610,11 +1609,11 @@ void btrfs_redirty_list_add(struct btrfs_transaction *trans, !list_empty(&eb->release_list)) return; + memzero_extent_buffer(eb, 0, eb->len); + set_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags); set_extent_buffer_dirty(eb); set_extent_bits_nowait(&trans->dirty_pages, eb->start, eb->start + eb->len - 1, EXTENT_DIRTY); - memzero_extent_buffer(eb, 0, eb->len); - set_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags); spin_lock(&trans->releasing_ebs_lock); list_add_tail(&eb->release_list, &trans->releasing_ebs); diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index ac9034fce409..b30f8f768ac4 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -744,6 +744,7 @@ static void cifs_umount_begin(struct super_block *sb) spin_unlock(&tcon->tc_lock); spin_unlock(&cifs_tcp_ses_lock); + cifs_close_all_deferred_files(tcon); /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */ /* cancel_notify_requests(tcon); */ if (tcon->ses && tcon->ses->server) { @@ -759,6 +760,20 @@ static void cifs_umount_begin(struct super_block *sb) return; } +static int cifs_freeze(struct super_block *sb) +{ + struct cifs_sb_info *cifs_sb = CIFS_SB(sb); + struct cifs_tcon *tcon; + + if (cifs_sb == NULL) + return 0; + + tcon = cifs_sb_master_tcon(cifs_sb); + + cifs_close_all_deferred_files(tcon); + return 0; +} + #ifdef CONFIG_CIFS_STATS2 static int cifs_show_stats(struct seq_file *s, struct dentry *root) { @@ -797,6 +812,7 @@ static const struct super_operations cifs_super_ops = { as opens */ .show_options = cifs_show_options, .umount_begin = cifs_umount_begin, + .freeze_fs = cifs_freeze, #ifdef CONFIG_CIFS_STATS2 .show_stats = cifs_show_stats, #endif diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 2c573062ec87..59a10330e299 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -2705,6 +2705,13 @@ cifs_match_super(struct super_block *sb, void *data) spin_lock(&cifs_tcp_ses_lock); cifs_sb = CIFS_SB(sb); + + /* We do not want to use a superblock that has been shutdown */ + if (CIFS_MOUNT_SHUTDOWN & cifs_sb->mnt_cifs_flags) { + spin_unlock(&cifs_tcp_ses_lock); + return 0; + } + tlink = cifs_get_tlink(cifs_sb_master_tlink(cifs_sb)); if (tlink == NULL) { /* can not match superblock if tlink were ever null */ diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index a81758225fcd..a295e4c2d54e 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -1682,7 +1682,7 @@ smb2_copychunk_range(const unsigned int xid, pcchunk->SourceOffset = cpu_to_le64(src_off); pcchunk->TargetOffset = cpu_to_le64(dest_off); pcchunk->Length = - cpu_to_le32(min_t(u32, len, tcon->max_bytes_chunk)); + cpu_to_le32(min_t(u64, len, tcon->max_bytes_chunk)); /* Request server copy to target from src identified by key */ kfree(retbuf); diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index 8ff4b9192a9f..f2c415f31b75 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -303,6 +303,22 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb, return desc; } +static ext4_fsblk_t ext4_valid_block_bitmap_padding(struct super_block *sb, + ext4_group_t block_group, + struct buffer_head *bh) +{ + ext4_grpblk_t next_zero_bit; + unsigned long bitmap_size = sb->s_blocksize * 8; + unsigned int offset = num_clusters_in_group(sb, block_group); + + if (bitmap_size <= offset) + return 0; + + next_zero_bit = ext4_find_next_zero_bit(bh->b_data, bitmap_size, offset); + + return (next_zero_bit < bitmap_size ? next_zero_bit : 0); +} + /* * Return the block number which was discovered to be invalid, or 0 if * the block bitmap is valid. @@ -401,6 +417,15 @@ static int ext4_validate_block_bitmap(struct super_block *sb, EXT4_GROUP_INFO_BBITMAP_CORRUPT); return -EFSCORRUPTED; } + blk = ext4_valid_block_bitmap_padding(sb, block_group, bh); + if (unlikely(blk != 0)) { + ext4_unlock_group(sb, block_group); + ext4_error(sb, "bg %u: block %llu: padding at end of block bitmap is not set", + block_group, blk); + ext4_mark_group_bitmap_corrupted(sb, block_group, + EXT4_GROUP_INFO_BBITMAP_CORRUPT); + return -EFSCORRUPTED; + } set_buffer_verified(bh); verified: ext4_unlock_group(sb, block_group); diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 08b29c289da4..df0255b7d1fa 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -1774,6 +1774,30 @@ static inline struct ext4_inode_info *EXT4_I(struct inode *inode) return container_of(inode, struct ext4_inode_info, vfs_inode); } +static inline int ext4_writepages_down_read(struct super_block *sb) +{ + percpu_down_read(&EXT4_SB(sb)->s_writepages_rwsem); + return memalloc_nofs_save(); +} + +static inline void ext4_writepages_up_read(struct super_block *sb, int ctx) +{ + memalloc_nofs_restore(ctx); + percpu_up_read(&EXT4_SB(sb)->s_writepages_rwsem); +} + +static inline int ext4_writepages_down_write(struct super_block *sb) +{ + percpu_down_write(&EXT4_SB(sb)->s_writepages_rwsem); + return memalloc_nofs_save(); +} + +static inline void ext4_writepages_up_write(struct super_block *sb, int ctx) +{ + memalloc_nofs_restore(ctx); + percpu_up_write(&EXT4_SB(sb)->s_writepages_rwsem); +} + static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino) { return ino == EXT4_ROOT_INO || diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c index 7bc221038c6c..595abb9e7d74 100644 --- a/fs/ext4/extents_status.c +++ b/fs/ext4/extents_status.c @@ -267,14 +267,12 @@ static void __es_find_extent_range(struct inode *inode, /* see if the extent has been cached */ es->es_lblk = es->es_len = es->es_pblk = 0; - if (tree->cache_es) { - es1 = tree->cache_es; - if (in_range(lblk, es1->es_lblk, es1->es_len)) { - es_debug("%u cached by [%u/%u) %llu %x\n", - lblk, es1->es_lblk, es1->es_len, - ext4_es_pblock(es1), ext4_es_status(es1)); - goto out; - } + es1 = READ_ONCE(tree->cache_es); + if (es1 && in_range(lblk, es1->es_lblk, es1->es_len)) { + es_debug("%u cached by [%u/%u) %llu %x\n", + lblk, es1->es_lblk, es1->es_len, + ext4_es_pblock(es1), ext4_es_status(es1)); + goto out; } es1 = __es_tree_search(&tree->root, lblk); @@ -293,7 +291,7 @@ out: } if (es1 && matching_fn(es1)) { - tree->cache_es = es1; + WRITE_ONCE(tree->cache_es, es1); es->es_lblk = es1->es_lblk; es->es_len = es1->es_len; es->es_pblk = es1->es_pblk; @@ -931,14 +929,12 @@ int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk, /* find extent in cache firstly */ es->es_lblk = es->es_len = es->es_pblk = 0; - if (tree->cache_es) { - es1 = tree->cache_es; - if (in_range(lblk, es1->es_lblk, es1->es_len)) { - es_debug("%u cached by [%u/%u)\n", - lblk, es1->es_lblk, es1->es_len); - found = 1; - goto out; - } + es1 = READ_ONCE(tree->cache_es); + if (es1 && in_range(lblk, es1->es_lblk, es1->es_len)) { + es_debug("%u cached by [%u/%u)\n", + lblk, es1->es_lblk, es1->es_len); + found = 1; + goto out; } node = tree->root.rb_node; diff --git a/fs/ext4/hash.c b/fs/ext4/hash.c index 147b5241dd94..46c3423ddfa1 100644 --- a/fs/ext4/hash.c +++ b/fs/ext4/hash.c @@ -277,7 +277,11 @@ static int __ext4fs_dirhash(const struct inode *dir, const char *name, int len, } default: hinfo->hash = 0; - return -1; + hinfo->minor_hash = 0; + ext4_warning(dir->i_sb, + "invalid/unsupported hash tree version %u", + hinfo->hash_version); + return -EINVAL; } hash = hash & ~1; if (hash == (EXT4_HTREE_EOF_32BIT << 1)) diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index 1602d74b5eeb..cb36037f20fc 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -34,6 +34,7 @@ static int get_max_inline_xattr_value_size(struct inode *inode, struct ext4_xattr_ibody_header *header; struct ext4_xattr_entry *entry; struct ext4_inode *raw_inode; + void *end; int free, min_offs; if (!EXT4_INODE_HAS_XATTR_SPACE(inode)) @@ -57,14 +58,23 @@ static int get_max_inline_xattr_value_size(struct inode *inode, raw_inode = ext4_raw_inode(iloc); header = IHDR(inode, raw_inode); entry = IFIRST(header); + end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; /* Compute min_offs. */ - for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) { + while (!IS_LAST_ENTRY(entry)) { + void *next = EXT4_XATTR_NEXT(entry); + + if (next >= end) { + EXT4_ERROR_INODE(inode, + "corrupt xattr in inline inode"); + return 0; + } if (!entry->e_value_inum && entry->e_value_size) { size_t offs = le16_to_cpu(entry->e_value_offs); if (offs < min_offs) min_offs = offs; } + entry = next; } free = min_offs - ((void *)entry - (void *)IFIRST(header)) - sizeof(__u32); @@ -350,7 +360,7 @@ static int ext4_update_inline_data(handle_t *handle, struct inode *inode, error = ext4_xattr_ibody_get(inode, i.name_index, i.name, value, len); - if (error == -ENODATA) + if (error < 0) goto out; BUFFER_TRACE(is.iloc.bh, "get_write_access"); @@ -1177,6 +1187,7 @@ static int ext4_finish_convert_inline_dir(handle_t *handle, ext4_initialize_dirent_tail(dir_block, inode->i_sb->s_blocksize); set_buffer_uptodate(dir_block); + unlock_buffer(dir_block); err = ext4_handle_dirty_dirblock(handle, inode, dir_block); if (err) return err; @@ -1251,6 +1262,7 @@ static int ext4_convert_inline_data_nolock(handle_t *handle, if (!S_ISDIR(inode->i_mode)) { memcpy(data_bh->b_data, buf, inline_size); set_buffer_uptodate(data_bh); + unlock_buffer(data_bh); error = ext4_handle_dirty_metadata(handle, inode, data_bh); } else { @@ -1258,7 +1270,6 @@ static int ext4_convert_inline_data_nolock(handle_t *handle, buf, inline_size); } - unlock_buffer(data_bh); out_restore: if (error) ext4_restore_inline_data(handle, inode, iloc, buf, inline_size); diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 41ba1c432844..145ea24d589b 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -2956,13 +2956,14 @@ static int ext4_writepages(struct address_space *mapping, .can_map = 1, }; int ret; + int alloc_ctx; if (unlikely(ext4_forced_shutdown(EXT4_SB(sb)))) return -EIO; - percpu_down_read(&EXT4_SB(sb)->s_writepages_rwsem); + alloc_ctx = ext4_writepages_down_read(sb); ret = ext4_do_writepages(&mpd); - percpu_up_read(&EXT4_SB(sb)->s_writepages_rwsem); + ext4_writepages_up_read(sb, alloc_ctx); return ret; } @@ -2990,17 +2991,18 @@ static int ext4_dax_writepages(struct address_space *mapping, long nr_to_write = wbc->nr_to_write; struct inode *inode = mapping->host; struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); + int alloc_ctx; if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) return -EIO; - percpu_down_read(&sbi->s_writepages_rwsem); + alloc_ctx = ext4_writepages_down_read(inode->i_sb); trace_ext4_writepages(inode, wbc); ret = dax_writeback_mapping_range(mapping, sbi->s_daxdev, wbc); trace_ext4_writepages_result(inode, wbc, ret, nr_to_write - wbc->nr_to_write); - percpu_up_read(&sbi->s_writepages_rwsem); + ext4_writepages_up_read(inode->i_sb, alloc_ctx); return ret; } @@ -3574,7 +3576,7 @@ static int ext4_iomap_overwrite_begin(struct inode *inode, loff_t offset, */ flags &= ~IOMAP_WRITE; ret = ext4_iomap_begin(inode, offset, length, flags, iomap, srcmap); - WARN_ON_ONCE(iomap->type != IOMAP_MAPPED); + WARN_ON_ONCE(!ret && iomap->type != IOMAP_MAPPED); return ret; } @@ -6122,7 +6124,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val) journal_t *journal; handle_t *handle; int err; - struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); + int alloc_ctx; /* * We have to be very careful here: changing a data block's @@ -6160,7 +6162,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val) } } - percpu_down_write(&sbi->s_writepages_rwsem); + alloc_ctx = ext4_writepages_down_write(inode->i_sb); jbd2_journal_lock_updates(journal); /* @@ -6177,7 +6179,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val) err = jbd2_journal_flush(journal, 0); if (err < 0) { jbd2_journal_unlock_updates(journal); - percpu_up_write(&sbi->s_writepages_rwsem); + ext4_writepages_up_write(inode->i_sb, alloc_ctx); return err; } ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); @@ -6185,7 +6187,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val) ext4_set_aops(inode); jbd2_journal_unlock_updates(journal); - percpu_up_write(&sbi->s_writepages_rwsem); + ext4_writepages_up_write(inode->i_sb, alloc_ctx); if (val) filemap_invalidate_unlock(inode->i_mapping); diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 5b2ae37a8b80..5639a4cf7ff9 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -4820,7 +4820,11 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b, trace_ext4_mb_release_group_pa(sb, pa); BUG_ON(pa->pa_deleted == 0); ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); - BUG_ON(group != e4b->bd_group && pa->pa_len != 0); + if (unlikely(group != e4b->bd_group && pa->pa_len != 0)) { + ext4_warning(sb, "bad group: expected %u, group %u, pa_start %llu", + e4b->bd_group, group, pa->pa_pstart); + return 0; + } mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len); atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len); diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c index a19a9661646e..d98ac2af8199 100644 --- a/fs/ext4/migrate.c +++ b/fs/ext4/migrate.c @@ -408,7 +408,6 @@ static int free_ext_block(handle_t *handle, struct inode *inode) int ext4_ext_migrate(struct inode *inode) { - struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); handle_t *handle; int retval = 0, i; __le32 *i_data; @@ -418,6 +417,7 @@ int ext4_ext_migrate(struct inode *inode) unsigned long max_entries; __u32 goal, tmp_csum_seed; uid_t owner[2]; + int alloc_ctx; /* * If the filesystem does not support extents, or the inode @@ -434,7 +434,7 @@ int ext4_ext_migrate(struct inode *inode) */ return retval; - percpu_down_write(&sbi->s_writepages_rwsem); + alloc_ctx = ext4_writepages_down_write(inode->i_sb); /* * Worst case we can touch the allocation bitmaps and a block @@ -586,7 +586,7 @@ out_tmp_inode: unlock_new_inode(tmp_inode); iput(tmp_inode); out_unlock: - percpu_up_write(&sbi->s_writepages_rwsem); + ext4_writepages_up_write(inode->i_sb, alloc_ctx); return retval; } @@ -605,6 +605,7 @@ int ext4_ind_migrate(struct inode *inode) ext4_fsblk_t blk; handle_t *handle; int ret, ret2 = 0; + int alloc_ctx; if (!ext4_has_feature_extents(inode->i_sb) || (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) @@ -621,7 +622,7 @@ int ext4_ind_migrate(struct inode *inode) if (test_opt(inode->i_sb, DELALLOC)) ext4_alloc_da_blocks(inode); - percpu_down_write(&sbi->s_writepages_rwsem); + alloc_ctx = ext4_writepages_down_write(inode->i_sb); handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1); if (IS_ERR(handle)) { @@ -665,6 +666,6 @@ errout: ext4_journal_stop(handle); up_write(&EXT4_I(inode)->i_data_sem); out_unlock: - percpu_up_write(&sbi->s_writepages_rwsem); + ext4_writepages_up_write(inode->i_sb, alloc_ctx); return ret; } diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c index 4681fff6665f..46735ce315b5 100644 --- a/fs/ext4/mmp.c +++ b/fs/ext4/mmp.c @@ -39,28 +39,36 @@ static void ext4_mmp_csum_set(struct super_block *sb, struct mmp_struct *mmp) * Write the MMP block using REQ_SYNC to try to get the block on-disk * faster. */ -static int write_mmp_block(struct super_block *sb, struct buffer_head *bh) +static int write_mmp_block_thawed(struct super_block *sb, + struct buffer_head *bh) { struct mmp_struct *mmp = (struct mmp_struct *)(bh->b_data); - /* - * We protect against freezing so that we don't create dirty buffers - * on frozen filesystem. - */ - sb_start_write(sb); ext4_mmp_csum_set(sb, mmp); lock_buffer(bh); bh->b_end_io = end_buffer_write_sync; get_bh(bh); submit_bh(REQ_OP_WRITE | REQ_SYNC | REQ_META | REQ_PRIO, bh); wait_on_buffer(bh); - sb_end_write(sb); if (unlikely(!buffer_uptodate(bh))) return -EIO; - return 0; } +static int write_mmp_block(struct super_block *sb, struct buffer_head *bh) +{ + int err; + + /* + * We protect against freezing so that we don't create dirty buffers + * on frozen filesystem. + */ + sb_start_write(sb); + err = write_mmp_block_thawed(sb, bh); + sb_end_write(sb); + return err; +} + /* * Read the MMP block. It _must_ be read from disk and hence we clear the * uptodate flag on the buffer. @@ -340,7 +348,11 @@ skip: seq = mmp_new_seq(); mmp->mmp_seq = cpu_to_le32(seq); - retval = write_mmp_block(sb, bh); + /* + * On mount / remount we are protected against fs freezing (by s_umount + * semaphore) and grabbing freeze protection upsets lockdep + */ + retval = write_mmp_block_thawed(sb, bh); if (retval) goto failed; diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index a5010b5b8a8c..45b579805c95 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -674,7 +674,7 @@ static struct stats dx_show_leaf(struct inode *dir, len = de->name_len; if (!IS_ENCRYPTED(dir)) { /* Directory is not encrypted */ - ext4fs_dirhash(dir, de->name, + (void) ext4fs_dirhash(dir, de->name, de->name_len, &h); printk("%*.s:(U)%x.%u ", len, name, h.hash, @@ -709,8 +709,9 @@ static struct stats dx_show_leaf(struct inode *dir, if (IS_CASEFOLDED(dir)) h.hash = EXT4_DIRENT_HASH(de); else - ext4fs_dirhash(dir, de->name, - de->name_len, &h); + (void) ext4fs_dirhash(dir, + de->name, + de->name_len, &h); printk("%*.s:(E)%x.%u ", len, name, h.hash, (unsigned) ((char *) de - base)); @@ -720,7 +721,8 @@ static struct stats dx_show_leaf(struct inode *dir, #else int len = de->name_len; char *name = de->name; - ext4fs_dirhash(dir, de->name, de->name_len, &h); + (void) ext4fs_dirhash(dir, de->name, + de->name_len, &h); printk("%*.s:%x.%u ", len, name, h.hash, (unsigned) ((char *) de - base)); #endif @@ -849,8 +851,14 @@ dx_probe(struct ext4_filename *fname, struct inode *dir, hinfo->seed = EXT4_SB(dir->i_sb)->s_hash_seed; /* hash is already computed for encrypted casefolded directory */ if (fname && fname_name(fname) && - !(IS_ENCRYPTED(dir) && IS_CASEFOLDED(dir))) - ext4fs_dirhash(dir, fname_name(fname), fname_len(fname), hinfo); + !(IS_ENCRYPTED(dir) && IS_CASEFOLDED(dir))) { + int ret = ext4fs_dirhash(dir, fname_name(fname), + fname_len(fname), hinfo); + if (ret < 0) { + ret_err = ERR_PTR(ret); + goto fail; + } + } hash = hinfo->hash; if (root->info.unused_flags & 1) { @@ -1111,7 +1119,12 @@ static int htree_dirblock_to_tree(struct file *dir_file, hinfo->minor_hash = 0; } } else { - ext4fs_dirhash(dir, de->name, de->name_len, hinfo); + err = ext4fs_dirhash(dir, de->name, + de->name_len, hinfo); + if (err < 0) { + count = err; + goto errout; + } } if ((hinfo->hash < start_hash) || ((hinfo->hash == start_hash) && @@ -1313,8 +1326,12 @@ static int dx_make_map(struct inode *dir, struct buffer_head *bh, if (de->name_len && de->inode) { if (ext4_hash_in_dirent(dir)) h.hash = EXT4_DIRENT_HASH(de); - else - ext4fs_dirhash(dir, de->name, de->name_len, &h); + else { + int err = ext4fs_dirhash(dir, de->name, + de->name_len, &h); + if (err < 0) + return err; + } map_tail--; map_tail->hash = h.hash; map_tail->offs = ((char *) de - base)>>2; @@ -1452,10 +1469,9 @@ int ext4_fname_setup_ci_filename(struct inode *dir, const struct qstr *iname, hinfo->hash_version = DX_HASH_SIPHASH; hinfo->seed = NULL; if (cf_name->name) - ext4fs_dirhash(dir, cf_name->name, cf_name->len, hinfo); + return ext4fs_dirhash(dir, cf_name->name, cf_name->len, hinfo); else - ext4fs_dirhash(dir, iname->name, iname->len, hinfo); - return 0; + return ext4fs_dirhash(dir, iname->name, iname->len, hinfo); } #endif @@ -2298,10 +2314,15 @@ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname, fname->hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed; /* casefolded encrypted hashes are computed on fname setup */ - if (!ext4_hash_in_dirent(dir)) - ext4fs_dirhash(dir, fname_name(fname), - fname_len(fname), &fname->hinfo); - + if (!ext4_hash_in_dirent(dir)) { + int err = ext4fs_dirhash(dir, fname_name(fname), + fname_len(fname), &fname->hinfo); + if (err < 0) { + brelse(bh2); + brelse(bh); + return err; + } + } memset(frames, 0, sizeof(frames)); frame = frames; frame->entries = entries; diff --git a/fs/ext4/super.c b/fs/ext4/super.c index f43e526112ae..d6ac61f43ac3 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -3184,11 +3184,9 @@ static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group, crc = crc16(crc, (__u8 *)gdp, offset); offset += sizeof(gdp->bg_checksum); /* skip checksum */ /* for checksum of struct ext4_group_desc do the rest...*/ - if (ext4_has_feature_64bit(sb) && - offset < le16_to_cpu(sbi->s_es->s_desc_size)) + if (ext4_has_feature_64bit(sb) && offset < sbi->s_desc_size) crc = crc16(crc, (__u8 *)gdp + offset, - le16_to_cpu(sbi->s_es->s_desc_size) - - offset); + sbi->s_desc_size - offset); out: return cpu_to_le16(crc); @@ -6581,9 +6579,6 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb) } #ifdef CONFIG_QUOTA - /* Release old quota file names */ - for (i = 0; i < EXT4_MAXQUOTAS; i++) - kfree(old_opts.s_qf_names[i]); if (enable_quota) { if (sb_any_quota_suspended(sb)) dquot_resume(sb, -1); @@ -6593,6 +6588,9 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb) goto restore_opts; } } + /* Release old quota file names */ + for (i = 0; i < EXT4_MAXQUOTAS; i++) + kfree(old_opts.s_qf_names[i]); #endif if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks) ext4_release_system_zone(sb); @@ -6603,6 +6601,13 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb) return 0; restore_opts: + /* + * If there was a failing r/w to ro transition, we may need to + * re-enable quota + */ + if ((sb->s_flags & SB_RDONLY) && !(old_sb_flags & SB_RDONLY) && + sb_any_quota_suspended(sb)) + dquot_resume(sb, -1); sb->s_flags = old_sb_flags; sbi->s_mount_opt = old_opts.s_mount_opt; sbi->s_mount_opt2 = old_opts.s_mount_opt2; diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index 767454d74cd6..e33a323faf3c 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -2615,6 +2615,7 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode, .in_inode = !!entry->e_value_inum, }; struct ext4_xattr_ibody_header *header = IHDR(inode, raw_inode); + int needs_kvfree = 0; int error; is = kzalloc(sizeof(struct ext4_xattr_ibody_find), GFP_NOFS); @@ -2637,7 +2638,7 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode, error = -ENOMEM; goto out; } - + needs_kvfree = 1; error = ext4_xattr_inode_get(inode, entry, buffer, value_size); if (error) goto out; @@ -2676,7 +2677,7 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode, out: kfree(b_entry_name); - if (entry->e_value_inum && buffer) + if (needs_kvfree && buffer) kvfree(buffer); if (is) brelse(is->iloc.bh); diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c index 28b12553f2b3..9a8153895d20 100644 --- a/fs/f2fs/extent_cache.c +++ b/fs/f2fs/extent_cache.c @@ -161,118 +161,52 @@ static bool __is_front_mergeable(struct extent_info *cur, return __is_extent_mergeable(cur, front, type); } -static struct rb_entry *__lookup_rb_tree_fast(struct rb_entry *cached_re, - unsigned int ofs) -{ - if (cached_re) { - if (cached_re->ofs <= ofs && - cached_re->ofs + cached_re->len > ofs) { - return cached_re; - } - } - return NULL; -} - -static struct rb_entry *__lookup_rb_tree_slow(struct rb_root_cached *root, - unsigned int ofs) +static struct extent_node *__lookup_extent_node(struct rb_root_cached *root, + struct extent_node *cached_en, unsigned int fofs) { struct rb_node *node = root->rb_root.rb_node; - struct rb_entry *re; + struct extent_node *en; + /* check a cached entry */ + if (cached_en && cached_en->ei.fofs <= fofs && + cached_en->ei.fofs + cached_en->ei.len > fofs) + return cached_en; + + /* check rb_tree */ while (node) { - re = rb_entry(node, struct rb_entry, rb_node); + en = rb_entry(node, struct extent_node, rb_node); - if (ofs < re->ofs) + if (fofs < en->ei.fofs) node = node->rb_left; - else if (ofs >= re->ofs + re->len) + else if (fofs >= en->ei.fofs + en->ei.len) node = node->rb_right; else - return re; + return en; } return NULL; } -struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root, - struct rb_entry *cached_re, unsigned int ofs) -{ - struct rb_entry *re; - - re = __lookup_rb_tree_fast(cached_re, ofs); - if (!re) - return __lookup_rb_tree_slow(root, ofs); - - return re; -} - -struct rb_node **f2fs_lookup_rb_tree_ext(struct f2fs_sb_info *sbi, - struct rb_root_cached *root, - struct rb_node **parent, - unsigned long long key, bool *leftmost) -{ - struct rb_node **p = &root->rb_root.rb_node; - struct rb_entry *re; - - while (*p) { - *parent = *p; - re = rb_entry(*parent, struct rb_entry, rb_node); - - if (key < re->key) { - p = &(*p)->rb_left; - } else { - p = &(*p)->rb_right; - *leftmost = false; - } - } - - return p; -} - -struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi, - struct rb_root_cached *root, - struct rb_node **parent, - unsigned int ofs, bool *leftmost) -{ - struct rb_node **p = &root->rb_root.rb_node; - struct rb_entry *re; - - while (*p) { - *parent = *p; - re = rb_entry(*parent, struct rb_entry, rb_node); - - if (ofs < re->ofs) { - p = &(*p)->rb_left; - } else if (ofs >= re->ofs + re->len) { - p = &(*p)->rb_right; - *leftmost = false; - } else { - f2fs_bug_on(sbi, 1); - } - } - - return p; -} - /* - * lookup rb entry in position of @ofs in rb-tree, + * lookup rb entry in position of @fofs in rb-tree, * if hit, return the entry, otherwise, return NULL - * @prev_ex: extent before ofs - * @next_ex: extent after ofs - * @insert_p: insert point for new extent at ofs + * @prev_ex: extent before fofs + * @next_ex: extent after fofs + * @insert_p: insert point for new extent at fofs * in order to simplify the insertion after. * tree must stay unchanged between lookup and insertion. */ -struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root, - struct rb_entry *cached_re, - unsigned int ofs, - struct rb_entry **prev_entry, - struct rb_entry **next_entry, +static struct extent_node *__lookup_extent_node_ret(struct rb_root_cached *root, + struct extent_node *cached_en, + unsigned int fofs, + struct extent_node **prev_entry, + struct extent_node **next_entry, struct rb_node ***insert_p, struct rb_node **insert_parent, - bool force, bool *leftmost) + bool *leftmost) { struct rb_node **pnode = &root->rb_root.rb_node; struct rb_node *parent = NULL, *tmp_node; - struct rb_entry *re = cached_re; + struct extent_node *en = cached_en; *insert_p = NULL; *insert_parent = NULL; @@ -282,24 +216,20 @@ struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root, if (RB_EMPTY_ROOT(&root->rb_root)) return NULL; - if (re) { - if (re->ofs <= ofs && re->ofs + re->len > ofs) - goto lookup_neighbors; - } + if (en && en->ei.fofs <= fofs && en->ei.fofs + en->ei.len > fofs) + goto lookup_neighbors; - if (leftmost) - *leftmost = true; + *leftmost = true; while (*pnode) { parent = *pnode; - re = rb_entry(*pnode, struct rb_entry, rb_node); + en = rb_entry(*pnode, struct extent_node, rb_node); - if (ofs < re->ofs) { + if (fofs < en->ei.fofs) { pnode = &(*pnode)->rb_left; - } else if (ofs >= re->ofs + re->len) { + } else if (fofs >= en->ei.fofs + en->ei.len) { pnode = &(*pnode)->rb_right; - if (leftmost) - *leftmost = false; + *leftmost = false; } else { goto lookup_neighbors; } @@ -308,71 +238,32 @@ struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root, *insert_p = pnode; *insert_parent = parent; - re = rb_entry(parent, struct rb_entry, rb_node); + en = rb_entry(parent, struct extent_node, rb_node); tmp_node = parent; - if (parent && ofs > re->ofs) + if (parent && fofs > en->ei.fofs) tmp_node = rb_next(parent); - *next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node); + *next_entry = rb_entry_safe(tmp_node, struct extent_node, rb_node); tmp_node = parent; - if (parent && ofs < re->ofs) + if (parent && fofs < en->ei.fofs) tmp_node = rb_prev(parent); - *prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node); + *prev_entry = rb_entry_safe(tmp_node, struct extent_node, rb_node); return NULL; lookup_neighbors: - if (ofs == re->ofs || force) { + if (fofs == en->ei.fofs) { /* lookup prev node for merging backward later */ - tmp_node = rb_prev(&re->rb_node); - *prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node); + tmp_node = rb_prev(&en->rb_node); + *prev_entry = rb_entry_safe(tmp_node, + struct extent_node, rb_node); } - if (ofs == re->ofs + re->len - 1 || force) { + if (fofs == en->ei.fofs + en->ei.len - 1) { /* lookup next node for merging frontward later */ - tmp_node = rb_next(&re->rb_node); - *next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node); + tmp_node = rb_next(&en->rb_node); + *next_entry = rb_entry_safe(tmp_node, + struct extent_node, rb_node); } - return re; -} - -bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi, - struct rb_root_cached *root, bool check_key) -{ -#ifdef CONFIG_F2FS_CHECK_FS - struct rb_node *cur = rb_first_cached(root), *next; - struct rb_entry *cur_re, *next_re; - - if (!cur) - return true; - - while (cur) { - next = rb_next(cur); - if (!next) - return true; - - cur_re = rb_entry(cur, struct rb_entry, rb_node); - next_re = rb_entry(next, struct rb_entry, rb_node); - - if (check_key) { - if (cur_re->key > next_re->key) { - f2fs_info(sbi, "inconsistent rbtree, " - "cur(%llu) next(%llu)", - cur_re->key, next_re->key); - return false; - } - goto next; - } - - if (cur_re->ofs + cur_re->len > next_re->ofs) { - f2fs_info(sbi, "inconsistent rbtree, cur(%u, %u) next(%u, %u)", - cur_re->ofs, cur_re->len, - next_re->ofs, next_re->len); - return false; - } -next: - cur = next; - } -#endif - return true; + return en; } static struct kmem_cache *extent_tree_slab; @@ -587,8 +478,7 @@ static bool __lookup_extent_tree(struct inode *inode, pgoff_t pgofs, goto out; } - en = (struct extent_node *)f2fs_lookup_rb_tree(&et->root, - (struct rb_entry *)et->cached_en, pgofs); + en = __lookup_extent_node(&et->root, et->cached_en, pgofs); if (!en) goto out; @@ -662,7 +552,7 @@ static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi, bool leftmost) { struct extent_tree_info *eti = &sbi->extent_tree[et->type]; - struct rb_node **p; + struct rb_node **p = &et->root.rb_root.rb_node; struct rb_node *parent = NULL; struct extent_node *en = NULL; @@ -674,8 +564,21 @@ static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi, leftmost = true; - p = f2fs_lookup_rb_tree_for_insert(sbi, &et->root, &parent, - ei->fofs, &leftmost); + /* look up extent_node in the rb tree */ + while (*p) { + parent = *p; + en = rb_entry(parent, struct extent_node, rb_node); + + if (ei->fofs < en->ei.fofs) { + p = &(*p)->rb_left; + } else if (ei->fofs >= en->ei.fofs + en->ei.len) { + p = &(*p)->rb_right; + leftmost = false; + } else { + f2fs_bug_on(sbi, 1); + } + } + do_insert: en = __attach_extent_node(sbi, et, ei, parent, p, leftmost); if (!en) @@ -734,11 +637,10 @@ static void __update_extent_tree_range(struct inode *inode, } /* 1. lookup first extent node in range [fofs, fofs + len - 1] */ - en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root, - (struct rb_entry *)et->cached_en, fofs, - (struct rb_entry **)&prev_en, - (struct rb_entry **)&next_en, - &insert_p, &insert_parent, false, + en = __lookup_extent_node_ret(&et->root, + et->cached_en, fofs, + &prev_en, &next_en, + &insert_p, &insert_parent, &leftmost); if (!en) en = next_en; @@ -876,12 +778,11 @@ void f2fs_update_read_extent_tree_range_compressed(struct inode *inode, write_lock(&et->lock); - en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root, - (struct rb_entry *)et->cached_en, fofs, - (struct rb_entry **)&prev_en, - (struct rb_entry **)&next_en, - &insert_p, &insert_parent, false, - &leftmost); + en = __lookup_extent_node_ret(&et->root, + et->cached_en, fofs, + &prev_en, &next_en, + &insert_p, &insert_parent, + &leftmost); if (en) goto unlock_out; diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 620343c65ab6..d6f9d6e0f13b 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -353,15 +353,7 @@ struct discard_info { struct discard_cmd { struct rb_node rb_node; /* rb node located in rb-tree */ - union { - struct { - block_t lstart; /* logical start address */ - block_t len; /* length */ - block_t start; /* actual start address in dev */ - }; - struct discard_info di; /* discard info */ - - }; + struct discard_info di; /* discard info */ struct list_head list; /* command list */ struct completion wait; /* compleation */ struct block_device *bdev; /* bdev */ @@ -628,17 +620,6 @@ enum extent_type { NR_EXTENT_CACHES, }; -struct rb_entry { - struct rb_node rb_node; /* rb node located in rb-tree */ - union { - struct { - unsigned int ofs; /* start offset of the entry */ - unsigned int len; /* length of the entry */ - }; - unsigned long long key; /* 64-bits key */ - } __packed; -}; - struct extent_info { unsigned int fofs; /* start offset in a file */ unsigned int len; /* length of the extent */ @@ -4139,23 +4120,6 @@ void f2fs_leave_shrinker(struct f2fs_sb_info *sbi); * extent_cache.c */ bool sanity_check_extent_cache(struct inode *inode); -struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root, - struct rb_entry *cached_re, unsigned int ofs); -struct rb_node **f2fs_lookup_rb_tree_ext(struct f2fs_sb_info *sbi, - struct rb_root_cached *root, - struct rb_node **parent, - unsigned long long key, bool *left_most); -struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi, - struct rb_root_cached *root, - struct rb_node **parent, - unsigned int ofs, bool *leftmost); -struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root, - struct rb_entry *cached_re, unsigned int ofs, - struct rb_entry **prev_entry, struct rb_entry **next_entry, - struct rb_node ***insert_p, struct rb_node **insert_parent, - bool force, bool *leftmost); -bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi, - struct rb_root_cached *root, bool check_key); void f2fs_init_extent_tree(struct inode *inode); void f2fs_drop_extent_tree(struct inode *inode); void f2fs_destroy_extent_node(struct inode *inode); diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 292a17d62f56..2996d38aa89c 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -390,40 +390,95 @@ static unsigned int count_bits(const unsigned long *addr, return sum; } -static struct victim_entry *attach_victim_entry(struct f2fs_sb_info *sbi, - unsigned long long mtime, unsigned int segno, - struct rb_node *parent, struct rb_node **p, - bool left_most) +static bool f2fs_check_victim_tree(struct f2fs_sb_info *sbi, + struct rb_root_cached *root) +{ +#ifdef CONFIG_F2FS_CHECK_FS + struct rb_node *cur = rb_first_cached(root), *next; + struct victim_entry *cur_ve, *next_ve; + + while (cur) { + next = rb_next(cur); + if (!next) + return true; + + cur_ve = rb_entry(cur, struct victim_entry, rb_node); + next_ve = rb_entry(next, struct victim_entry, rb_node); + + if (cur_ve->mtime > next_ve->mtime) { + f2fs_info(sbi, "broken victim_rbtree, " + "cur_mtime(%llu) next_mtime(%llu)", + cur_ve->mtime, next_ve->mtime); + return false; + } + cur = next; + } +#endif + return true; +} + +static struct victim_entry *__lookup_victim_entry(struct f2fs_sb_info *sbi, + unsigned long long mtime) +{ + struct atgc_management *am = &sbi->am; + struct rb_node *node = am->root.rb_root.rb_node; + struct victim_entry *ve = NULL; + + while (node) { + ve = rb_entry(node, struct victim_entry, rb_node); + + if (mtime < ve->mtime) + node = node->rb_left; + else + node = node->rb_right; + } + return ve; +} + +static struct victim_entry *__create_victim_entry(struct f2fs_sb_info *sbi, + unsigned long long mtime, unsigned int segno) { struct atgc_management *am = &sbi->am; struct victim_entry *ve; - ve = f2fs_kmem_cache_alloc(victim_entry_slab, - GFP_NOFS, true, NULL); + ve = f2fs_kmem_cache_alloc(victim_entry_slab, GFP_NOFS, true, NULL); ve->mtime = mtime; ve->segno = segno; - rb_link_node(&ve->rb_node, parent, p); - rb_insert_color_cached(&ve->rb_node, &am->root, left_most); - list_add_tail(&ve->list, &am->victim_list); - am->victim_count++; return ve; } -static void insert_victim_entry(struct f2fs_sb_info *sbi, +static void __insert_victim_entry(struct f2fs_sb_info *sbi, unsigned long long mtime, unsigned int segno) { struct atgc_management *am = &sbi->am; - struct rb_node **p; + struct rb_root_cached *root = &am->root; + struct rb_node **p = &root->rb_root.rb_node; struct rb_node *parent = NULL; + struct victim_entry *ve; bool left_most = true; - p = f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, mtime, &left_most); - attach_victim_entry(sbi, mtime, segno, parent, p, left_most); + /* look up rb tree to find parent node */ + while (*p) { + parent = *p; + ve = rb_entry(parent, struct victim_entry, rb_node); + + if (mtime < ve->mtime) { + p = &(*p)->rb_left; + } else { + p = &(*p)->rb_right; + left_most = false; + } + } + + ve = __create_victim_entry(sbi, mtime, segno); + + rb_link_node(&ve->rb_node, parent, p); + rb_insert_color_cached(&ve->rb_node, root, left_most); } static void add_victim_entry(struct f2fs_sb_info *sbi, @@ -459,19 +514,7 @@ static void add_victim_entry(struct f2fs_sb_info *sbi, if (sit_i->dirty_max_mtime - mtime < p->age_threshold) return; - insert_victim_entry(sbi, mtime, segno); -} - -static struct rb_node *lookup_central_victim(struct f2fs_sb_info *sbi, - struct victim_sel_policy *p) -{ - struct atgc_management *am = &sbi->am; - struct rb_node *parent = NULL; - bool left_most; - - f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, p->age, &left_most); - - return parent; + __insert_victim_entry(sbi, mtime, segno); } static void atgc_lookup_victim(struct f2fs_sb_info *sbi, @@ -481,7 +524,6 @@ static void atgc_lookup_victim(struct f2fs_sb_info *sbi, struct atgc_management *am = &sbi->am; struct rb_root_cached *root = &am->root; struct rb_node *node; - struct rb_entry *re; struct victim_entry *ve; unsigned long long total_time; unsigned long long age, u, accu; @@ -508,12 +550,10 @@ static void atgc_lookup_victim(struct f2fs_sb_info *sbi, node = rb_first_cached(root); next: - re = rb_entry_safe(node, struct rb_entry, rb_node); - if (!re) + ve = rb_entry_safe(node, struct victim_entry, rb_node); + if (!ve) return; - ve = (struct victim_entry *)re; - if (ve->mtime >= max_mtime || ve->mtime < min_mtime) goto skip; @@ -555,8 +595,6 @@ static void atssr_lookup_victim(struct f2fs_sb_info *sbi, { struct sit_info *sit_i = SIT_I(sbi); struct atgc_management *am = &sbi->am; - struct rb_node *node; - struct rb_entry *re; struct victim_entry *ve; unsigned long long age; unsigned long long max_mtime = sit_i->dirty_max_mtime; @@ -566,25 +604,22 @@ static void atssr_lookup_victim(struct f2fs_sb_info *sbi, unsigned int dirty_threshold = max(am->max_candidate_count, am->candidate_ratio * am->victim_count / 100); - unsigned int cost; - unsigned int iter = 0; + unsigned int cost, iter; int stage = 0; if (max_mtime < min_mtime) return; max_mtime += 1; next_stage: - node = lookup_central_victim(sbi, p); + iter = 0; + ve = __lookup_victim_entry(sbi, p->age); next_node: - re = rb_entry_safe(node, struct rb_entry, rb_node); - if (!re) { - if (stage == 0) - goto skip_stage; + if (!ve) { + if (stage++ == 0) + goto next_stage; return; } - ve = (struct victim_entry *)re; - if (ve->mtime >= max_mtime || ve->mtime < min_mtime) goto skip_node; @@ -610,24 +645,20 @@ next_node: } skip_node: if (iter < dirty_threshold) { - if (stage == 0) - node = rb_prev(node); - else if (stage == 1) - node = rb_next(node); + ve = rb_entry(stage == 0 ? rb_prev(&ve->rb_node) : + rb_next(&ve->rb_node), + struct victim_entry, rb_node); goto next_node; } -skip_stage: - if (stage < 1) { - stage++; - iter = 0; + + if (stage++ == 0) goto next_stage; - } } + static void lookup_victim_by_age(struct f2fs_sb_info *sbi, struct victim_sel_policy *p) { - f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi, - &sbi->am.root, true)); + f2fs_bug_on(sbi, !f2fs_check_victim_tree(sbi, &sbi->am.root)); if (p->gc_mode == GC_AT) atgc_lookup_victim(sbi, p); diff --git a/fs/f2fs/gc.h b/fs/f2fs/gc.h index 15bd1d680f67..5ad6ac63e13f 100644 --- a/fs/f2fs/gc.h +++ b/fs/f2fs/gc.h @@ -55,20 +55,10 @@ struct gc_inode_list { struct radix_tree_root iroot; }; -struct victim_info { - unsigned long long mtime; /* mtime of section */ - unsigned int segno; /* section No. */ -}; - struct victim_entry { struct rb_node rb_node; /* rb node located in rb-tree */ - union { - struct { - unsigned long long mtime; /* mtime of section */ - unsigned int segno; /* segment No. */ - }; - struct victim_info vi; /* victim info */ - }; + unsigned long long mtime; /* mtime of section */ + unsigned int segno; /* segment No. */ struct list_head list; }; diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index 11fc4c8036a9..f97073c75d67 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c @@ -995,12 +995,20 @@ static int f2fs_rename(struct mnt_idmap *idmap, struct inode *old_dir, goto out; } + /* + * Copied from ext4_rename: we need to protect against old.inode + * directory getting converted from inline directory format into + * a normal one. + */ + if (S_ISDIR(old_inode->i_mode)) + inode_lock_nested(old_inode, I_MUTEX_NONDIR2); + err = -ENOENT; old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page); if (!old_entry) { if (IS_ERR(old_page)) err = PTR_ERR(old_page); - goto out; + goto out_unlock_old; } if (S_ISDIR(old_inode->i_mode)) { @@ -1108,6 +1116,9 @@ static int f2fs_rename(struct mnt_idmap *idmap, struct inode *old_dir, f2fs_unlock_op(sbi); + if (S_ISDIR(old_inode->i_mode)) + inode_unlock(old_inode); + if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir)) f2fs_sync_fs(sbi->sb, 1); @@ -1122,6 +1133,9 @@ out_dir: f2fs_put_page(old_dir_page, 0); out_old: f2fs_put_page(old_page, 0); +out_unlock_old: + if (S_ISDIR(old_inode->i_mode)) + inode_unlock(old_inode); out: iput(whiteout); return err; diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index b2a080c660c8..a705c1d42716 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -263,7 +263,7 @@ retry: f2fs_put_dnode(&dn); trace_f2fs_replace_atomic_write_block(inode, F2FS_I(inode)->cow_inode, - index, *old_addr, new_addr, recover); + index, old_addr ? *old_addr : 0, new_addr, recover); return 0; } @@ -939,9 +939,9 @@ static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi, dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS, true, NULL); INIT_LIST_HEAD(&dc->list); dc->bdev = bdev; - dc->lstart = lstart; - dc->start = start; - dc->len = len; + dc->di.lstart = lstart; + dc->di.start = start; + dc->di.len = len; dc->ref = 0; dc->state = D_PREP; dc->queued = 0; @@ -956,20 +956,108 @@ static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi, return dc; } -static struct discard_cmd *__attach_discard_cmd(struct f2fs_sb_info *sbi, - struct block_device *bdev, block_t lstart, - block_t start, block_t len, - struct rb_node *parent, struct rb_node **p, - bool leftmost) +static bool f2fs_check_discard_tree(struct f2fs_sb_info *sbi) +{ +#ifdef CONFIG_F2FS_CHECK_FS + struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; + struct rb_node *cur = rb_first_cached(&dcc->root), *next; + struct discard_cmd *cur_dc, *next_dc; + + while (cur) { + next = rb_next(cur); + if (!next) + return true; + + cur_dc = rb_entry(cur, struct discard_cmd, rb_node); + next_dc = rb_entry(next, struct discard_cmd, rb_node); + + if (cur_dc->di.lstart + cur_dc->di.len > next_dc->di.lstart) { + f2fs_info(sbi, "broken discard_rbtree, " + "cur(%u, %u) next(%u, %u)", + cur_dc->di.lstart, cur_dc->di.len, + next_dc->di.lstart, next_dc->di.len); + return false; + } + cur = next; + } +#endif + return true; +} + +static struct discard_cmd *__lookup_discard_cmd(struct f2fs_sb_info *sbi, + block_t blkaddr) { struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; + struct rb_node *node = dcc->root.rb_root.rb_node; struct discard_cmd *dc; - dc = __create_discard_cmd(sbi, bdev, lstart, start, len); + while (node) { + dc = rb_entry(node, struct discard_cmd, rb_node); - rb_link_node(&dc->rb_node, parent, p); - rb_insert_color_cached(&dc->rb_node, &dcc->root, leftmost); + if (blkaddr < dc->di.lstart) + node = node->rb_left; + else if (blkaddr >= dc->di.lstart + dc->di.len) + node = node->rb_right; + else + return dc; + } + return NULL; +} + +static struct discard_cmd *__lookup_discard_cmd_ret(struct rb_root_cached *root, + block_t blkaddr, + struct discard_cmd **prev_entry, + struct discard_cmd **next_entry, + struct rb_node ***insert_p, + struct rb_node **insert_parent) +{ + struct rb_node **pnode = &root->rb_root.rb_node; + struct rb_node *parent = NULL, *tmp_node; + struct discard_cmd *dc; + + *insert_p = NULL; + *insert_parent = NULL; + *prev_entry = NULL; + *next_entry = NULL; + + if (RB_EMPTY_ROOT(&root->rb_root)) + return NULL; + + while (*pnode) { + parent = *pnode; + dc = rb_entry(*pnode, struct discard_cmd, rb_node); + + if (blkaddr < dc->di.lstart) + pnode = &(*pnode)->rb_left; + else if (blkaddr >= dc->di.lstart + dc->di.len) + pnode = &(*pnode)->rb_right; + else + goto lookup_neighbors; + } + + *insert_p = pnode; + *insert_parent = parent; + + dc = rb_entry(parent, struct discard_cmd, rb_node); + tmp_node = parent; + if (parent && blkaddr > dc->di.lstart) + tmp_node = rb_next(parent); + *next_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node); + + tmp_node = parent; + if (parent && blkaddr < dc->di.lstart) + tmp_node = rb_prev(parent); + *prev_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node); + return NULL; + +lookup_neighbors: + /* lookup prev node for merging backward later */ + tmp_node = rb_prev(&dc->rb_node); + *prev_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node); + /* lookup next node for merging frontward later */ + tmp_node = rb_next(&dc->rb_node); + *next_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node); return dc; } @@ -981,7 +1069,7 @@ static void __detach_discard_cmd(struct discard_cmd_control *dcc, list_del(&dc->list); rb_erase_cached(&dc->rb_node, &dcc->root); - dcc->undiscard_blks -= dc->len; + dcc->undiscard_blks -= dc->di.len; kmem_cache_free(discard_cmd_slab, dc); @@ -994,7 +1082,7 @@ static void __remove_discard_cmd(struct f2fs_sb_info *sbi, struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; unsigned long flags; - trace_f2fs_remove_discard(dc->bdev, dc->start, dc->len); + trace_f2fs_remove_discard(dc->bdev, dc->di.start, dc->di.len); spin_lock_irqsave(&dc->lock, flags); if (dc->bio_ref) { @@ -1012,7 +1100,7 @@ static void __remove_discard_cmd(struct f2fs_sb_info *sbi, printk_ratelimited( "%sF2FS-fs (%s): Issue discard(%u, %u, %u) failed, ret: %d", KERN_INFO, sbi->sb->s_id, - dc->lstart, dc->start, dc->len, dc->error); + dc->di.lstart, dc->di.start, dc->di.len, dc->error); __detach_discard_cmd(dcc, dc); } @@ -1128,14 +1216,14 @@ static int __submit_discard_cmd(struct f2fs_sb_info *sbi, if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) return 0; - trace_f2fs_issue_discard(bdev, dc->start, dc->len); + trace_f2fs_issue_discard(bdev, dc->di.start, dc->di.len); - lstart = dc->lstart; - start = dc->start; - len = dc->len; + lstart = dc->di.lstart; + start = dc->di.start; + len = dc->di.len; total_len = len; - dc->len = 0; + dc->di.len = 0; while (total_len && *issued < dpolicy->max_requests && !err) { struct bio *bio = NULL; @@ -1151,7 +1239,7 @@ static int __submit_discard_cmd(struct f2fs_sb_info *sbi, if (*issued == dpolicy->max_requests) last = true; - dc->len += len; + dc->di.len += len; if (time_to_inject(sbi, FAULT_DISCARD)) { err = -EIO; @@ -1213,34 +1301,41 @@ static int __submit_discard_cmd(struct f2fs_sb_info *sbi, return err; } -static void __insert_discard_tree(struct f2fs_sb_info *sbi, +static void __insert_discard_cmd(struct f2fs_sb_info *sbi, struct block_device *bdev, block_t lstart, - block_t start, block_t len, - struct rb_node **insert_p, - struct rb_node *insert_parent) + block_t start, block_t len) { struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; - struct rb_node **p; + struct rb_node **p = &dcc->root.rb_root.rb_node; struct rb_node *parent = NULL; + struct discard_cmd *dc; bool leftmost = true; - if (insert_p && insert_parent) { - parent = insert_parent; - p = insert_p; - goto do_insert; + /* look up rb tree to find parent node */ + while (*p) { + parent = *p; + dc = rb_entry(parent, struct discard_cmd, rb_node); + + if (lstart < dc->di.lstart) { + p = &(*p)->rb_left; + } else if (lstart >= dc->di.lstart + dc->di.len) { + p = &(*p)->rb_right; + leftmost = false; + } else { + f2fs_bug_on(sbi, 1); + } } - p = f2fs_lookup_rb_tree_for_insert(sbi, &dcc->root, &parent, - lstart, &leftmost); -do_insert: - __attach_discard_cmd(sbi, bdev, lstart, start, len, parent, - p, leftmost); + dc = __create_discard_cmd(sbi, bdev, lstart, start, len); + + rb_link_node(&dc->rb_node, parent, p); + rb_insert_color_cached(&dc->rb_node, &dcc->root, leftmost); } static void __relocate_discard_cmd(struct discard_cmd_control *dcc, struct discard_cmd *dc) { - list_move_tail(&dc->list, &dcc->pend_list[plist_idx(dc->len)]); + list_move_tail(&dc->list, &dcc->pend_list[plist_idx(dc->di.len)]); } static void __punch_discard_cmd(struct f2fs_sb_info *sbi, @@ -1250,7 +1345,7 @@ static void __punch_discard_cmd(struct f2fs_sb_info *sbi, struct discard_info di = dc->di; bool modified = false; - if (dc->state == D_DONE || dc->len == 1) { + if (dc->state == D_DONE || dc->di.len == 1) { __remove_discard_cmd(sbi, dc); return; } @@ -1258,23 +1353,22 @@ static void __punch_discard_cmd(struct f2fs_sb_info *sbi, dcc->undiscard_blks -= di.len; if (blkaddr > di.lstart) { - dc->len = blkaddr - dc->lstart; - dcc->undiscard_blks += dc->len; + dc->di.len = blkaddr - dc->di.lstart; + dcc->undiscard_blks += dc->di.len; __relocate_discard_cmd(dcc, dc); modified = true; } if (blkaddr < di.lstart + di.len - 1) { if (modified) { - __insert_discard_tree(sbi, dc->bdev, blkaddr + 1, + __insert_discard_cmd(sbi, dc->bdev, blkaddr + 1, di.start + blkaddr + 1 - di.lstart, - di.lstart + di.len - 1 - blkaddr, - NULL, NULL); + di.lstart + di.len - 1 - blkaddr); } else { - dc->lstart++; - dc->len--; - dc->start++; - dcc->undiscard_blks += dc->len; + dc->di.lstart++; + dc->di.len--; + dc->di.start++; + dcc->undiscard_blks += dc->di.len; __relocate_discard_cmd(dcc, dc); } } @@ -1293,17 +1387,14 @@ static void __update_discard_tree_range(struct f2fs_sb_info *sbi, SECTOR_TO_BLOCK(bdev_max_discard_sectors(bdev)); block_t end = lstart + len; - dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root, - NULL, lstart, - (struct rb_entry **)&prev_dc, - (struct rb_entry **)&next_dc, - &insert_p, &insert_parent, true, NULL); + dc = __lookup_discard_cmd_ret(&dcc->root, lstart, + &prev_dc, &next_dc, &insert_p, &insert_parent); if (dc) prev_dc = dc; if (!prev_dc) { di.lstart = lstart; - di.len = next_dc ? next_dc->lstart - lstart : len; + di.len = next_dc ? next_dc->di.lstart - lstart : len; di.len = min(di.len, len); di.start = start; } @@ -1314,16 +1405,16 @@ static void __update_discard_tree_range(struct f2fs_sb_info *sbi, struct discard_cmd *tdc = NULL; if (prev_dc) { - di.lstart = prev_dc->lstart + prev_dc->len; + di.lstart = prev_dc->di.lstart + prev_dc->di.len; if (di.lstart < lstart) di.lstart = lstart; if (di.lstart >= end) break; - if (!next_dc || next_dc->lstart > end) + if (!next_dc || next_dc->di.lstart > end) di.len = end - di.lstart; else - di.len = next_dc->lstart - di.lstart; + di.len = next_dc->di.lstart - di.lstart; di.start = start + di.lstart - lstart; } @@ -1356,10 +1447,9 @@ static void __update_discard_tree_range(struct f2fs_sb_info *sbi, merged = true; } - if (!merged) { - __insert_discard_tree(sbi, bdev, di.lstart, di.start, - di.len, NULL, NULL); - } + if (!merged) + __insert_discard_cmd(sbi, bdev, + di.lstart, di.start, di.len); next: prev_dc = next_dc; if (!prev_dc) @@ -1398,15 +1488,11 @@ static void __issue_discard_cmd_orderly(struct f2fs_sb_info *sbi, struct rb_node **insert_p = NULL, *insert_parent = NULL; struct discard_cmd *dc; struct blk_plug plug; - unsigned int pos = dcc->next_pos; bool io_interrupted = false; mutex_lock(&dcc->cmd_lock); - dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root, - NULL, pos, - (struct rb_entry **)&prev_dc, - (struct rb_entry **)&next_dc, - &insert_p, &insert_parent, true, NULL); + dc = __lookup_discard_cmd_ret(&dcc->root, dcc->next_pos, + &prev_dc, &next_dc, &insert_p, &insert_parent); if (!dc) dc = next_dc; @@ -1424,7 +1510,7 @@ static void __issue_discard_cmd_orderly(struct f2fs_sb_info *sbi, break; } - dcc->next_pos = dc->lstart + dc->len; + dcc->next_pos = dc->di.lstart + dc->di.len; err = __submit_discard_cmd(sbi, dpolicy, dc, issued); if (*issued >= dpolicy->max_requests) @@ -1483,8 +1569,7 @@ retry: if (list_empty(pend_list)) goto next; if (unlikely(dcc->rbtree_check)) - f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi, - &dcc->root, false)); + f2fs_bug_on(sbi, !f2fs_check_discard_tree(sbi)); blk_start_plug(&plug); list_for_each_entry_safe(dc, tmp, pend_list, list) { f2fs_bug_on(sbi, dc->state != D_PREP); @@ -1562,7 +1647,7 @@ static unsigned int __wait_one_discard_bio(struct f2fs_sb_info *sbi, dc->ref--; if (!dc->ref) { if (!dc->error) - len = dc->len; + len = dc->di.len; __remove_discard_cmd(sbi, dc); } mutex_unlock(&dcc->cmd_lock); @@ -1585,14 +1670,15 @@ next: mutex_lock(&dcc->cmd_lock); list_for_each_entry_safe(iter, tmp, wait_list, list) { - if (iter->lstart + iter->len <= start || end <= iter->lstart) + if (iter->di.lstart + iter->di.len <= start || + end <= iter->di.lstart) continue; - if (iter->len < dpolicy->granularity) + if (iter->di.len < dpolicy->granularity) continue; if (iter->state == D_DONE && !iter->ref) { wait_for_completion_io(&iter->wait); if (!iter->error) - trimmed += iter->len; + trimmed += iter->di.len; __remove_discard_cmd(sbi, iter); } else { iter->ref++; @@ -1636,8 +1722,7 @@ static void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr) bool need_wait = false; mutex_lock(&dcc->cmd_lock); - dc = (struct discard_cmd *)f2fs_lookup_rb_tree(&dcc->root, - NULL, blkaddr); + dc = __lookup_discard_cmd(sbi, blkaddr); if (dc) { if (dc->state == D_PREP) { __punch_discard_cmd(sbi, dc, blkaddr); @@ -2970,24 +3055,20 @@ next: mutex_lock(&dcc->cmd_lock); if (unlikely(dcc->rbtree_check)) - f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi, - &dcc->root, false)); - - dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root, - NULL, start, - (struct rb_entry **)&prev_dc, - (struct rb_entry **)&next_dc, - &insert_p, &insert_parent, true, NULL); + f2fs_bug_on(sbi, !f2fs_check_discard_tree(sbi)); + + dc = __lookup_discard_cmd_ret(&dcc->root, start, + &prev_dc, &next_dc, &insert_p, &insert_parent); if (!dc) dc = next_dc; blk_start_plug(&plug); - while (dc && dc->lstart <= end) { + while (dc && dc->di.lstart <= end) { struct rb_node *node; int err = 0; - if (dc->len < dpolicy->granularity) + if (dc->di.len < dpolicy->granularity) goto skip; if (dc->state != D_PREP) { @@ -2998,7 +3079,7 @@ next: err = __submit_discard_cmd(sbi, dpolicy, dc, &issued); if (issued >= dpolicy->max_requests) { - start = dc->lstart + dc->len; + start = dc->di.lstart + dc->di.len; if (err) __remove_discard_cmd(sbi, dc); diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 1db3e3c24b43..ae4e51e91ee3 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -829,7 +829,7 @@ void wbc_detach_inode(struct writeback_control *wbc) * is okay. The main goal is avoiding keeping an inode on * the wrong wb for an extended period of time. */ - if (hweight32(history) > WB_FRN_HIST_THR_SLOTS) + if (hweight16(history) > WB_FRN_HIST_THR_SLOTS) inode_switch_wbs(inode, max_id); } diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c index 49cfe2ae6d23..993375f0db67 100644 --- a/fs/notify/inotify/inotify_fsnotify.c +++ b/fs/notify/inotify/inotify_fsnotify.c @@ -65,7 +65,7 @@ int inotify_handle_inode_event(struct fsnotify_mark *inode_mark, u32 mask, struct fsnotify_event *fsn_event; struct fsnotify_group *group = inode_mark->group; int ret; - int len = 0; + int len = 0, wd; int alloc_len = sizeof(struct inotify_event_info); struct mem_cgroup *old_memcg; @@ -81,6 +81,13 @@ int inotify_handle_inode_event(struct fsnotify_mark *inode_mark, u32 mask, fsn_mark); /* + * We can be racing with mark being detached. Don't report event with + * invalid wd. + */ + wd = READ_ONCE(i_mark->wd); + if (wd == -1) + return 0; + /* * Whoever is interested in the event, pays for the allocation. Do not * trigger OOM killer in the target monitoring memcg as it may have * security repercussion. @@ -110,7 +117,7 @@ int inotify_handle_inode_event(struct fsnotify_mark *inode_mark, u32 mask, fsn_event = &event->fse; fsnotify_init_event(fsn_event); event->mask = mask; - event->wd = i_mark->wd; + event->wd = wd; event->sync_cookie = cookie; event->name_len = len; if (len) diff --git a/fs/ntfs3/bitmap.c b/fs/ntfs3/bitmap.c index 723fb64e6531..393c726ef17a 100644 --- a/fs/ntfs3/bitmap.c +++ b/fs/ntfs3/bitmap.c @@ -658,7 +658,8 @@ int wnd_init(struct wnd_bitmap *wnd, struct super_block *sb, size_t nbits) if (!wnd->bits_last) wnd->bits_last = wbits; - wnd->free_bits = kcalloc(wnd->nwnd, sizeof(u16), GFP_NOFS | __GFP_NOWARN); + wnd->free_bits = + kcalloc(wnd->nwnd, sizeof(u16), GFP_NOFS | __GFP_NOWARN); if (!wnd->free_bits) return -ENOMEM; diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c index f1df52dfab74..7d0473da12c3 100644 --- a/fs/ntfs3/frecord.c +++ b/fs/ntfs3/frecord.c @@ -1645,7 +1645,7 @@ struct ATTR_FILE_NAME *ni_fname_name(struct ntfs_inode *ni, { struct ATTRIB *attr = NULL; struct ATTR_FILE_NAME *fname; - struct le_str *fns; + struct le_str *fns; if (le) *le = NULL; diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c index 567563771bf8..24c9aeb5a49e 100644 --- a/fs/ntfs3/fsntfs.c +++ b/fs/ntfs3/fsntfs.c @@ -2594,8 +2594,10 @@ static inline bool is_reserved_name(struct ntfs_sb_info *sbi, if (len == 4 || (len > 4 && le16_to_cpu(name[4]) == '.')) { port_digit = le16_to_cpu(name[3]); if (port_digit >= '1' && port_digit <= '9') - if (!ntfs_cmp_names(name, 3, COM_NAME, 3, upcase, false) || - !ntfs_cmp_names(name, 3, LPT_NAME, 3, upcase, false)) + if (!ntfs_cmp_names(name, 3, COM_NAME, 3, upcase, + false) || + !ntfs_cmp_names(name, 3, LPT_NAME, 3, upcase, + false)) return true; } diff --git a/fs/ntfs3/namei.c b/fs/ntfs3/namei.c index 407fe92394e2..92bbc8ee83ca 100644 --- a/fs/ntfs3/namei.c +++ b/fs/ntfs3/namei.c @@ -88,6 +88,16 @@ static struct dentry *ntfs_lookup(struct inode *dir, struct dentry *dentry, __putname(uni); } + /* + * Check for a null pointer + * If the MFT record of ntfs inode is not a base record, inode->i_op can be NULL. + * This causes null pointer dereference in d_splice_alias(). + */ + if (!IS_ERR_OR_NULL(inode) && !inode->i_op) { + iput(inode); + inode = ERR_PTR(-EINVAL); + } + return d_splice_alias(inode, dentry); } diff --git a/fs/ntfs3/ntfs.h b/fs/ntfs3/ntfs.h index 86ea1826d099..90151e56c122 100644 --- a/fs/ntfs3/ntfs.h +++ b/fs/ntfs3/ntfs.h @@ -435,9 +435,6 @@ static inline u64 attr_svcn(const struct ATTRIB *attr) return attr->non_res ? le64_to_cpu(attr->nres.svcn) : 0; } -/* The size of resident attribute by its resident size. */ -#define BYTES_PER_RESIDENT(b) (0x18 + (b)) - static_assert(sizeof(struct ATTRIB) == 0x48); static_assert(sizeof(((struct ATTRIB *)NULL)->res) == 0x08); static_assert(sizeof(((struct ATTRIB *)NULL)->nres) == 0x38); diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 5851eb5bc726..faf32caef89b 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -1287,7 +1287,10 @@ out: * __register_sysctl_table - register a leaf sysctl table * @set: Sysctl tree to register on * @path: The path to the directory the sysctl table is in. - * @table: the top-level table structure + * @table: the top-level table structure without any child. This table + * should not be free'd after registration. So it should not be + * used on stack. It can either be a global or dynamically allocated + * by the caller and free'd later after sysctl unregistration. * * Register a sysctl table hierarchy. @table should be a filled in ctl_table * array. A completely 0 filled entry terminates the table. @@ -1308,9 +1311,12 @@ out: * proc_handler - the text handler routine (described below) * * extra1, extra2 - extra pointers usable by the proc handler routines + * XXX: we should eventually modify these to use long min / max [0] + * [0] https://lkml.kernel.org/87zgpte9o4.fsf@email.froward.int.ebiederm.org * * Leaf nodes in the sysctl tree will be represented by a single file - * under /proc; non-leaf nodes will be represented by directories. + * under /proc; non-leaf nodes (where child is not NULL) are not allowed, + * sysctl_check_table() verifies this. * * There must be a proc_handler routine for any terminal nodes. * Several default handlers are available to cover common cases - @@ -1352,7 +1358,7 @@ struct ctl_table_header *__register_sysctl_table( spin_lock(&sysctl_lock); dir = &set->dir; - /* Reference moved down the diretory tree get_subdir */ + /* Reference moved down the directory tree get_subdir */ dir->header.nreg++; spin_unlock(&sysctl_lock); @@ -1369,6 +1375,11 @@ struct ctl_table_header *__register_sysctl_table( if (namelen == 0) continue; + /* + * namelen ensures if name is "foo/bar/yay" only foo is + * registered first. We traverse as if using mkdir -p and + * return a ctl_dir for the last directory entry. + */ dir = get_subdir(dir, name, namelen); if (IS_ERR(dir)) goto fail; @@ -1394,8 +1405,15 @@ fail: /** * register_sysctl - register a sysctl table - * @path: The path to the directory the sysctl table is in. - * @table: the table structure + * @path: The path to the directory the sysctl table is in. If the path + * doesn't exist we will create it for you. + * @table: the table structure. The calller must ensure the life of the @table + * will be kept during the lifetime use of the syctl. It must not be freed + * until unregister_sysctl_table() is called with the given returned table + * with this registration. If your code is non modular then you don't need + * to call unregister_sysctl_table() and can instead use something like + * register_sysctl_init() which does not care for the result of the syctl + * registration. * * Register a sysctl table. @table should be a filled in ctl_table * array. A completely 0 filled entry terminates the table. @@ -1411,8 +1429,11 @@ EXPORT_SYMBOL(register_sysctl); /** * __register_sysctl_init() - register sysctl table to path - * @path: path name for sysctl base - * @table: This is the sysctl table that needs to be registered to the path + * @path: path name for sysctl base. If that path doesn't exist we will create + * it for you. + * @table: This is the sysctl table that needs to be registered to the path. + * The caller must ensure the life of the @table will be kept during the + * lifetime use of the sysctl. * @table_name: The name of sysctl table, only used for log printing when * registration fails * @@ -1424,10 +1445,7 @@ EXPORT_SYMBOL(register_sysctl); * register_sysctl() failing on init are extremely low, and so for both reasons * this function does not return any error as it is used by initialization code. * - * Context: Can only be called after your respective sysctl base path has been - * registered. So for instance, most base directories are registered early on - * init before init levels are processed through proc_sys_init() and - * sysctl_init_bases(). + * Context: if your base directory does not exist it will be created for you. */ void __init __register_sysctl_init(const char *path, struct ctl_table *table, const char *table_name) @@ -1557,6 +1575,7 @@ out: * * Register a sysctl table hierarchy. @table should be a filled in ctl_table * array. A completely 0 filled entry terminates the table. + * We are slowly deprecating this call so avoid its use. * * See __register_sysctl_table for more details. */ @@ -1628,6 +1647,7 @@ err_register_leaves: * * Register a sysctl table hierarchy. @table should be a filled in ctl_table * array. A completely 0 filled entry terminates the table. + * We are slowly deprecating this caller so avoid future uses of it. * * See __register_sysctl_paths for more details. */ |