diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/Kconfig | 3 | ||||
-rw-r--r-- | mm/cma.c | 7 | ||||
-rw-r--r-- | mm/filemap.c | 30 | ||||
-rw-r--r-- | mm/kasan/kasan.h | 1 | ||||
-rw-r--r-- | mm/memcontrol.c | 41 | ||||
-rw-r--r-- | mm/memory.c | 40 | ||||
-rw-r--r-- | mm/memory_hotplug.c | 4 | ||||
-rw-r--r-- | mm/mmap.c | 16 | ||||
-rw-r--r-- | mm/nommu.c | 2 | ||||
-rw-r--r-- | mm/oom_kill.c | 32 | ||||
-rw-r--r-- | mm/page_ext.c | 4 | ||||
-rw-r--r-- | mm/rmap.c | 2 | ||||
-rw-r--r-- | mm/truncate.c | 62 | ||||
-rw-r--r-- | mm/zsmalloc.c | 35 |
14 files changed, 154 insertions, 125 deletions
diff --git a/mm/Kconfig b/mm/Kconfig index 2664c118b5d2..3e2daef3c946 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -648,7 +648,8 @@ config DEFERRED_STRUCT_PAGE_INIT bool "Defer initialisation of struct pages to kthreads" default n depends on ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT - depends on MEMORY_HOTPLUG + depends on NO_BOOTMEM && MEMORY_HOTPLUG + depends on !FLATMEM help Ordinarily all struct pages are initialised during early boot in a single thread. On very large machines this can take a considerable @@ -183,7 +183,8 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, return -EINVAL; /* ensure minimal alignment required by mm core */ - alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order); + alignment = PAGE_SIZE << + max_t(unsigned long, MAX_ORDER - 1, pageblock_order); /* alignment should be aligned with order_per_bit */ if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit)) @@ -266,8 +267,8 @@ int __init cma_declare_contiguous(phys_addr_t base, * migratetype page by page allocator's buddy algorithm. In the case, * you couldn't get a contiguous memory, which is not what we want. */ - alignment = max(alignment, - (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order)); + alignment = max(alignment, (phys_addr_t)PAGE_SIZE << + max_t(unsigned long, MAX_ORDER - 1, pageblock_order)); base = ALIGN(base, alignment); size = ALIGN(size, alignment); limit &= ~(alignment - 1); diff --git a/mm/filemap.c b/mm/filemap.c index 9665b1d4f318..00ae878b2a38 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -143,13 +143,15 @@ static void page_cache_tree_delete(struct address_space *mapping, return; /* - * Track node that only contains shadow entries. + * Track node that only contains shadow entries. DAX mappings contain + * no shadow entries and may contain other exceptional entries so skip + * those. * * Avoid acquiring the list_lru lock if already tracked. The * list_empty() test is safe as node->private_list is * protected by mapping->tree_lock. */ - if (!workingset_node_pages(node) && + if (!dax_mapping(mapping) && !workingset_node_pages(node) && list_empty(&node->private_list)) { node->private_data = mapping; list_lru_add(&workingset_shadow_nodes, &node->private_list); @@ -580,14 +582,24 @@ static int page_cache_tree_insert(struct address_space *mapping, if (!radix_tree_exceptional_entry(p)) return -EEXIST; - if (WARN_ON(dax_mapping(mapping))) - return -EINVAL; - - if (shadowp) - *shadowp = p; mapping->nrexceptional--; - if (node) - workingset_node_shadows_dec(node); + if (!dax_mapping(mapping)) { + if (shadowp) + *shadowp = p; + if (node) + workingset_node_shadows_dec(node); + } else { + /* DAX can replace empty locked entry with a hole */ + WARN_ON_ONCE(p != + (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | + RADIX_DAX_ENTRY_LOCK)); + /* DAX accounts exceptional entries as normal pages */ + if (node) + workingset_node_pages_dec(node); + /* Wakeup waiters for exceptional entry lock */ + dax_wake_mapping_entry_waiter(mapping, page->index, + false); + } } radix_tree_replace_slot(slot, page); mapping->nrpages++; diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index 7f7ac51d7faf..fb87923552ef 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -77,7 +77,6 @@ struct kasan_alloc_meta { struct kasan_track track; u32 state : 2; /* enum kasan_state */ u32 alloc_size : 30; - u32 reserved; }; struct qlist_node { diff --git a/mm/memcontrol.c b/mm/memcontrol.c index cf428d7b9a03..925b431f3f03 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1108,6 +1108,8 @@ static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) limit = READ_ONCE(memcg->memsw.limit); if (count <= limit) margin = min(margin, limit - count); + else + margin = 0; } return margin; @@ -1302,6 +1304,8 @@ static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, mem_cgroup_iter_break(memcg, iter); if (chosen) put_task_struct(chosen); + /* Set a dummy value to return "true". */ + chosen = (void *) 1; goto unlock; case OOM_SCAN_OK: break; @@ -4305,24 +4309,6 @@ static int mem_cgroup_do_precharge(unsigned long count) return 0; } -/** - * get_mctgt_type - get target type of moving charge - * @vma: the vma the pte to be checked belongs - * @addr: the address corresponding to the pte to be checked - * @ptent: the pte to be checked - * @target: the pointer the target page or swap ent will be stored(can be NULL) - * - * Returns - * 0(MC_TARGET_NONE): if the pte is not a target for move charge. - * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for - * move charge. if @target is not NULL, the page is stored in target->page - * with extra refcnt got(Callers should handle it). - * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a - * target for charge migration. if @target is not NULL, the entry is stored - * in target->ent. - * - * Called with pte lock held. - */ union mc_target { struct page *page; swp_entry_t ent; @@ -4511,6 +4497,25 @@ out: return ret; } +/** + * get_mctgt_type - get target type of moving charge + * @vma: the vma the pte to be checked belongs + * @addr: the address corresponding to the pte to be checked + * @ptent: the pte to be checked + * @target: the pointer the target page or swap ent will be stored(can be NULL) + * + * Returns + * 0(MC_TARGET_NONE): if the pte is not a target for move charge. + * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for + * move charge. if @target is not NULL, the page is stored in target->page + * with extra refcnt got(Callers should handle it). + * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a + * target for charge migration. if @target is not NULL, the entry is stored + * in target->ent. + * + * Called with pte lock held. + */ + static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma, unsigned long addr, pte_t ptent, union mc_target *target) { diff --git a/mm/memory.c b/mm/memory.c index a1b93d9e4449..15322b73636b 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -63,6 +63,7 @@ #include <linux/dma-debug.h> #include <linux/debugfs.h> #include <linux/userfaultfd_k.h> +#include <linux/dax.h> #include <asm/io.h> #include <asm/mmu_context.h> @@ -2492,8 +2493,6 @@ void unmap_mapping_range(struct address_space *mapping, if (details.last_index < details.first_index) details.last_index = ULONG_MAX; - - /* DAX uses i_mmap_lock to serialise file truncate vs page fault */ i_mmap_lock_write(mapping); if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap))) unmap_mapping_range_tree(&mapping->i_mmap, &details); @@ -2825,7 +2824,8 @@ oom: */ static int __do_fault(struct vm_area_struct *vma, unsigned long address, pgoff_t pgoff, unsigned int flags, - struct page *cow_page, struct page **page) + struct page *cow_page, struct page **page, + void **entry) { struct vm_fault vmf; int ret; @@ -2840,8 +2840,10 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address, ret = vma->vm_ops->fault(vma, &vmf); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) return ret; - if (!vmf.page) - goto out; + if (ret & VM_FAULT_DAX_LOCKED) { + *entry = vmf.entry; + return ret; + } if (unlikely(PageHWPoison(vmf.page))) { if (ret & VM_FAULT_LOCKED) @@ -2855,7 +2857,6 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address, else VM_BUG_ON_PAGE(!PageLocked(vmf.page), vmf.page); - out: *page = vmf.page; return ret; } @@ -3048,7 +3049,7 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma, pte_unmap_unlock(pte, ptl); } - ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page); + ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page, NULL); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) return ret; @@ -3071,6 +3072,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma, pgoff_t pgoff, unsigned int flags, pte_t orig_pte) { struct page *fault_page, *new_page; + void *fault_entry; struct mem_cgroup *memcg; spinlock_t *ptl; pte_t *pte; @@ -3088,26 +3090,24 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma, return VM_FAULT_OOM; } - ret = __do_fault(vma, address, pgoff, flags, new_page, &fault_page); + ret = __do_fault(vma, address, pgoff, flags, new_page, &fault_page, + &fault_entry); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) goto uncharge_out; - if (fault_page) + if (!(ret & VM_FAULT_DAX_LOCKED)) copy_user_highpage(new_page, fault_page, address, vma); __SetPageUptodate(new_page); pte = pte_offset_map_lock(mm, pmd, address, &ptl); if (unlikely(!pte_same(*pte, orig_pte))) { pte_unmap_unlock(pte, ptl); - if (fault_page) { + if (!(ret & VM_FAULT_DAX_LOCKED)) { unlock_page(fault_page); put_page(fault_page); } else { - /* - * The fault handler has no page to lock, so it holds - * i_mmap_lock for read to protect against truncate. - */ - i_mmap_unlock_read(vma->vm_file->f_mapping); + dax_unlock_mapping_entry(vma->vm_file->f_mapping, + pgoff); } goto uncharge_out; } @@ -3115,15 +3115,11 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma, mem_cgroup_commit_charge(new_page, memcg, false, false); lru_cache_add_active_or_unevictable(new_page, vma); pte_unmap_unlock(pte, ptl); - if (fault_page) { + if (!(ret & VM_FAULT_DAX_LOCKED)) { unlock_page(fault_page); put_page(fault_page); } else { - /* - * The fault handler has no page to lock, so it holds - * i_mmap_lock for read to protect against truncate. - */ - i_mmap_unlock_read(vma->vm_file->f_mapping); + dax_unlock_mapping_entry(vma->vm_file->f_mapping, pgoff); } return ret; uncharge_out: @@ -3143,7 +3139,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma, int dirtied = 0; int ret, tmp; - ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page); + ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page, NULL); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) return ret; diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index caf2a14c37ad..e3cbdcaff2a5 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -263,7 +263,7 @@ static void register_page_bootmem_info_section(unsigned long start_pfn) } #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ -void register_page_bootmem_info_node(struct pglist_data *pgdat) +void __init register_page_bootmem_info_node(struct pglist_data *pgdat) { unsigned long i, pfn, end_pfn, nr_pages; int node = pgdat->node_id; @@ -300,7 +300,7 @@ void register_page_bootmem_info_node(struct pglist_data *pgdat) * multiple nodes we check that this pfn does not already * reside in some other nodes. */ - if (pfn_valid(pfn) && (pfn_to_nid(pfn) == node)) + if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node)) register_page_bootmem_info_section(pfn); } } diff --git a/mm/mmap.c b/mm/mmap.c index d3d9a94ca031..de2c1769cc68 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -168,7 +168,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) return next; } -static unsigned long do_brk(unsigned long addr, unsigned long len); +static int do_brk(unsigned long addr, unsigned long len); SYSCALL_DEFINE1(brk, unsigned long, brk) { @@ -224,7 +224,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) goto out; /* Ok, looks good - let it rip. */ - if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk) + if (do_brk(oldbrk, newbrk-oldbrk) < 0) goto out; set_brk: @@ -2625,7 +2625,7 @@ static inline void verify_mm_writelocked(struct mm_struct *mm) * anonymous maps. eventually we may be able to do some * brk-specific accounting here. */ -static unsigned long do_brk(unsigned long addr, unsigned long len) +static int do_brk(unsigned long addr, unsigned long len) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma, *prev; @@ -2636,7 +2636,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len) len = PAGE_ALIGN(len); if (!len) - return addr; + return 0; flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; @@ -2703,13 +2703,13 @@ out: if (flags & VM_LOCKED) mm->locked_vm += (len >> PAGE_SHIFT); vma->vm_flags |= VM_SOFTDIRTY; - return addr; + return 0; } -unsigned long vm_brk(unsigned long addr, unsigned long len) +int vm_brk(unsigned long addr, unsigned long len) { struct mm_struct *mm = current->mm; - unsigned long ret; + int ret; bool populate; if (down_write_killable(&mm->mmap_sem)) @@ -2718,7 +2718,7 @@ unsigned long vm_brk(unsigned long addr, unsigned long len) ret = do_brk(addr, len); populate = ((mm->def_flags & VM_LOCKED) != 0); up_write(&mm->mmap_sem); - if (populate) + if (populate && !ret) mm_populate(addr, len); return ret; } diff --git a/mm/nommu.c b/mm/nommu.c index c8bd59a03c71..c2e58880207f 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -1682,7 +1682,7 @@ void exit_mmap(struct mm_struct *mm) } } -unsigned long vm_brk(unsigned long addr, unsigned long len) +int vm_brk(unsigned long addr, unsigned long len) { return -ENOMEM; } diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 5bb2f7698ad7..dfb1ab61fb23 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -443,13 +443,29 @@ static bool __oom_reap_task(struct task_struct *tsk) { struct mmu_gather tlb; struct vm_area_struct *vma; - struct mm_struct *mm; + struct mm_struct *mm = NULL; struct task_struct *p; struct zap_details details = {.check_swap_entries = true, .ignore_dirty = true}; bool ret = true; /* + * We have to make sure to not race with the victim exit path + * and cause premature new oom victim selection: + * __oom_reap_task exit_mm + * atomic_inc_not_zero + * mmput + * atomic_dec_and_test + * exit_oom_victim + * [...] + * out_of_memory + * select_bad_process + * # no TIF_MEMDIE task selects new victim + * unmap_page_range # frees some memory + */ + mutex_lock(&oom_lock); + + /* * Make sure we find the associated mm_struct even when the particular * thread has already terminated and cleared its mm. * We might have race with exit path so consider our work done if there @@ -457,19 +473,19 @@ static bool __oom_reap_task(struct task_struct *tsk) */ p = find_lock_task_mm(tsk); if (!p) - return true; + goto unlock_oom; mm = p->mm; if (!atomic_inc_not_zero(&mm->mm_users)) { task_unlock(p); - return true; + goto unlock_oom; } task_unlock(p); if (!down_read_trylock(&mm->mmap_sem)) { ret = false; - goto out; + goto unlock_oom; } tlb_gather_mmu(&tlb, mm, 0, -1); @@ -511,13 +527,15 @@ static bool __oom_reap_task(struct task_struct *tsk) * to release its memory. */ set_bit(MMF_OOM_REAPED, &mm->flags); -out: +unlock_oom: + mutex_unlock(&oom_lock); /* * Drop our reference but make sure the mmput slow path is called from a * different context because we shouldn't risk we get stuck there and * put the oom_reaper out of the way. */ - mmput_async(mm); + if (mm) + mmput_async(mm); return ret; } @@ -611,8 +629,6 @@ void try_oom_reaper(struct task_struct *tsk) if (!process_shares_mm(p, mm)) continue; - if (same_thread_group(p, tsk)) - continue; if (fatal_signal_pending(p)) continue; diff --git a/mm/page_ext.c b/mm/page_ext.c index 2d864e64f7fe..44a4c029c8e7 100644 --- a/mm/page_ext.c +++ b/mm/page_ext.c @@ -390,8 +390,10 @@ void __init page_ext_init(void) * We know some arch can have a nodes layout such as * -------------pfn--------------> * N0 | N1 | N2 | N0 | N1 | N2|.... + * + * Take into account DEFERRED_STRUCT_PAGE_INIT. */ - if (pfn_to_nid(pfn) != nid) + if (early_pfn_to_nid(pfn) != nid) continue; if (init_section_page_ext(pfn, nid)) goto oom; diff --git a/mm/rmap.c b/mm/rmap.c index 8a839935b18c..0ea5d9071b32 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1098,6 +1098,8 @@ void page_move_anon_rmap(struct page *page, VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_VMA(!anon_vma, vma); + if (IS_ENABLED(CONFIG_DEBUG_VM) && PageTransHuge(page)) + address &= HPAGE_PMD_MASK; VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page); anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; diff --git a/mm/truncate.c b/mm/truncate.c index b00272810871..4064f8f53daa 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -34,40 +34,38 @@ static void clear_exceptional_entry(struct address_space *mapping, if (shmem_mapping(mapping)) return; - spin_lock_irq(&mapping->tree_lock); - if (dax_mapping(mapping)) { - if (radix_tree_delete_item(&mapping->page_tree, index, entry)) - mapping->nrexceptional--; - } else { - /* - * Regular page slots are stabilized by the page lock even - * without the tree itself locked. These unlocked entries - * need verification under the tree lock. - */ - if (!__radix_tree_lookup(&mapping->page_tree, index, &node, - &slot)) - goto unlock; - if (*slot != entry) - goto unlock; - radix_tree_replace_slot(slot, NULL); - mapping->nrexceptional--; - if (!node) - goto unlock; - workingset_node_shadows_dec(node); - /* - * Don't track node without shadow entries. - * - * Avoid acquiring the list_lru lock if already untracked. - * The list_empty() test is safe as node->private_list is - * protected by mapping->tree_lock. - */ - if (!workingset_node_shadows(node) && - !list_empty(&node->private_list)) - list_lru_del(&workingset_shadow_nodes, - &node->private_list); - __radix_tree_delete_node(&mapping->page_tree, node); + dax_delete_mapping_entry(mapping, index); + return; } + spin_lock_irq(&mapping->tree_lock); + /* + * Regular page slots are stabilized by the page lock even + * without the tree itself locked. These unlocked entries + * need verification under the tree lock. + */ + if (!__radix_tree_lookup(&mapping->page_tree, index, &node, + &slot)) + goto unlock; + if (*slot != entry) + goto unlock; + radix_tree_replace_slot(slot, NULL); + mapping->nrexceptional--; + if (!node) + goto unlock; + workingset_node_shadows_dec(node); + /* + * Don't track node without shadow entries. + * + * Avoid acquiring the list_lru lock if already untracked. + * The list_empty() test is safe as node->private_list is + * protected by mapping->tree_lock. + */ + if (!workingset_node_shadows(node) && + !list_empty(&node->private_list)) + list_lru_del(&workingset_shadow_nodes, + &node->private_list); + __radix_tree_delete_node(&mapping->page_tree, node); unlock: spin_unlock_irq(&mapping->tree_lock); } diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 72698db958e7..b6d4f258cb53 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -45,6 +45,8 @@ * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> @@ -483,16 +485,16 @@ static inline unsigned long zs_stat_get(struct size_class *class, #ifdef CONFIG_ZSMALLOC_STAT -static int __init zs_stat_init(void) +static void __init zs_stat_init(void) { - if (!debugfs_initialized()) - return -ENODEV; + if (!debugfs_initialized()) { + pr_warn("debugfs not available, stat dir not created\n"); + return; + } zs_stat_root = debugfs_create_dir("zsmalloc", NULL); if (!zs_stat_root) - return -ENOMEM; - - return 0; + pr_warn("debugfs 'zsmalloc' stat dir creation failed\n"); } static void __exit zs_stat_exit(void) @@ -577,8 +579,10 @@ static void zs_pool_stat_create(struct zs_pool *pool, const char *name) { struct dentry *entry; - if (!zs_stat_root) + if (!zs_stat_root) { + pr_warn("no root stat dir, not creating <%s> stat dir\n", name); return; + } entry = debugfs_create_dir(name, zs_stat_root); if (!entry) { @@ -592,7 +596,8 @@ static void zs_pool_stat_create(struct zs_pool *pool, const char *name) if (!entry) { pr_warn("%s: debugfs file entry <%s> creation failed\n", name, "classes"); - return; + debugfs_remove_recursive(pool->stat_dentry); + pool->stat_dentry = NULL; } } @@ -602,9 +607,8 @@ static void zs_pool_stat_destroy(struct zs_pool *pool) } #else /* CONFIG_ZSMALLOC_STAT */ -static int __init zs_stat_init(void) +static void __init zs_stat_init(void) { - return 0; } static void __exit zs_stat_exit(void) @@ -2011,17 +2015,10 @@ static int __init zs_init(void) zpool_register_driver(&zs_zpool_driver); #endif - ret = zs_stat_init(); - if (ret) { - pr_err("zs stat initialization failed\n"); - goto stat_fail; - } + zs_stat_init(); + return 0; -stat_fail: -#ifdef CONFIG_ZPOOL - zpool_unregister_driver(&zs_zpool_driver); -#endif notifier_fail: zs_unregister_cpu_notifier(); |