summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig139
-rw-r--r--mm/Makefile9
-rw-r--r--mm/backing-dev.c44
-rw-r--r--mm/balloon_compaction.c4
-rw-r--r--mm/bootmem.c47
-rw-r--r--mm/bounce.c46
-rw-r--r--mm/cleancache.c6
-rw-r--r--mm/compaction.c223
-rw-r--r--mm/early_ioremap.c245
-rw-r--r--mm/filemap.c1058
-rw-r--r--mm/filemap_xip.c2
-rw-r--r--mm/fremap.c47
-rw-r--r--mm/huge_memory.c781
-rw-r--r--mm/hugetlb.c1193
-rw-r--r--mm/hugetlb_cgroup.c92
-rw-r--r--mm/hwpoison-inject.c11
-rw-r--r--mm/internal.h45
-rw-r--r--mm/iov_iter.c224
-rw-r--r--mm/kmemleak.c146
-rw-r--r--mm/ksm.c147
-rw-r--r--mm/list_lru.c152
-rw-r--r--mm/madvise.c40
-rw-r--r--mm/memblock.c554
-rw-r--r--mm/memcontrol.c2454
-rw-r--r--mm/memory-failure.c291
-rw-r--r--mm/memory.c1033
-rw-r--r--mm/memory_hotplug.c329
-rw-r--r--mm/mempolicy.c341
-rw-r--r--mm/mempool.c6
-rw-r--r--mm/migrate.c384
-rw-r--r--mm/mincore.c27
-rw-r--r--mm/mlock.c393
-rw-r--r--mm/mm_init.c68
-rw-r--r--mm/mmap.c271
-rw-r--r--mm/mmu_context.c3
-rw-r--r--mm/mmu_notifier.c5
-rw-r--r--mm/mmzone.c14
-rw-r--r--mm/mprotect.c161
-rw-r--r--mm/mremap.c46
-rw-r--r--mm/nobootmem.c97
-rw-r--r--mm/nommu.c67
-rw-r--r--mm/oom_kill.c84
-rw-r--r--mm/page-writeback.c366
-rw-r--r--mm/page_alloc.c924
-rw-r--r--mm/page_cgroup.c19
-rw-r--r--mm/page_io.c65
-rw-r--r--mm/page_isolation.c22
-rw-r--r--mm/pagewalk.c2
-rw-r--r--mm/percpu.c257
-rw-r--r--mm/pgtable-generic.c53
-rw-r--r--mm/process_vm_access.c278
-rw-r--r--mm/readahead.c64
-rw-r--r--mm/rmap.c683
-rw-r--r--mm/shmem.c342
-rw-r--r--mm/slab.c801
-rw-r--r--mm/slab.h59
-rw-r--r--mm/slab_common.c286
-rw-r--r--mm/slob.c42
-rw-r--r--mm/slub.c474
-rw-r--r--mm/sparse-vmemmap.c6
-rw-r--r--mm/sparse.c227
-rw-r--r--mm/swap.c477
-rw-r--r--mm/swap_state.c83
-rw-r--r--mm/swapfile.c648
-rw-r--r--mm/truncate.c274
-rw-r--r--mm/util.c119
-rw-r--r--mm/vmacache.c114
-rw-r--r--mm/vmalloc.c231
-rw-r--r--mm/vmpressure.c60
-rw-r--r--mm/vmscan.c1084
-rw-r--r--mm/vmstat.c133
-rw-r--r--mm/workingset.c414
-rw-r--r--mm/zbud.c527
-rw-r--r--mm/zsmalloc.c1117
-rw-r--r--mm/zswap.c940
75 files changed, 15086 insertions, 7434 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index e742d06285b7..1b5a95f0fa01 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -20,7 +20,7 @@ config FLATMEM_MANUAL
Some users of more advanced features like NUMA and
memory hotplug may have different options here.
- DISCONTIGMEM is an more mature, better tested system,
+ DISCONTIGMEM is a more mature, better tested system,
but is incompatible with memory hotplug and may suffer
decreased performance over SPARSEMEM. If unsure between
"Sparse Memory" and "Discontiguous Memory", choose
@@ -153,11 +153,18 @@ config MOVABLE_NODE
help
Allow a node to have only movable memory. Pages used by the kernel,
such as direct mapping pages cannot be migrated. So the corresponding
- memory device cannot be hotplugged. This option allows users to
- online all the memory of a node as movable memory so that the whole
- node can be hotplugged. Users who don't use the memory hotplug
- feature are fine with this option on since they don't online memory
- as movable.
+ memory device cannot be hotplugged. This option allows the following
+ two things:
+ - When the system is booting, node full of hotpluggable memory can
+ be arranged to have only movable memory so that the whole node can
+ be hot-removed. (need movable_node boot option specified).
+ - After the system is up, the option allows users to online all the
+ memory of a node as movable memory so that the whole node can be
+ hot-removed.
+
+ Users who don't use the memory hotplug feature are fine with this
+ option on since they don't specify movable_node boot option or they
+ don't online memory as movable.
Say Y here if you want to hotplug a whole node.
Say N here if you want kernel to use memory on all nodes evenly.
@@ -173,7 +180,7 @@ config HAVE_BOOTMEM_INFO_NODE
config MEMORY_HOTPLUG
bool "Allow for memory hot-add"
depends on SPARSEMEM || X86_64_ACPI_NUMA
- depends on HOTPLUG && ARCH_ENABLE_MEMORY_HOTPLUG
+ depends on ARCH_ENABLE_MEMORY_HOTPLUG
depends on (IA64 || X86 || PPC_BOOK3S_64 || SUPERH || S390)
config MEMORY_HOTPLUG_SPARSE
@@ -183,7 +190,7 @@ config MEMORY_HOTPLUG_SPARSE
config MEMORY_HOTREMOVE
bool "Allow for memory hot remove"
select MEMORY_ISOLATION
- select HAVE_BOOTMEM_INFO_NODE if X86_64
+ select HAVE_BOOTMEM_INFO_NODE if (X86_64 || PPC64)
depends on MEMORY_HOTPLUG && ARCH_ENABLE_MEMORY_HOTREMOVE
depends on MIGRATION
@@ -209,11 +216,14 @@ config PAGEFLAGS_EXTENDED
#
config SPLIT_PTLOCK_CPUS
int
+ default "999999" if !MMU
default "999999" if ARM && !CPU_CACHE_VIPT
default "999999" if PARISC && !PA20
- default "999999" if DEBUG_SPINLOCK || DEBUG_LOCK_ALLOC
default "4"
+config ARCH_ENABLE_SPLIT_PMD_PTLOCK
+ boolean
+
#
# support for memory balloon compaction
config BALLOON_COMPACTION
@@ -245,7 +255,7 @@ config COMPACTION
config MIGRATION
bool "Page migration"
def_bool y
- depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION || CMA
+ depends on (NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION || CMA) && MMU
help
Allows the migration of the physical location of pages of processes
while the virtual addresses are not changed. This is useful in
@@ -477,3 +487,112 @@ config FRONTSWAP
and swap data is stored as normal on the matching swap device.
If unsure, say Y to enable frontswap.
+
+config CMA
+ bool "Contiguous Memory Allocator"
+ depends on HAVE_MEMBLOCK && MMU
+ select MIGRATION
+ select MEMORY_ISOLATION
+ help
+ This enables the Contiguous Memory Allocator which allows other
+ subsystems to allocate big physically-contiguous blocks of memory.
+ CMA reserves a region of memory and allows only movable pages to
+ be allocated from it. This way, the kernel can use the memory for
+ pagecache and when a subsystem requests for contiguous area, the
+ allocated pages are migrated away to serve the contiguous request.
+
+ If unsure, say "n".
+
+config CMA_DEBUG
+ bool "CMA debug messages (DEVELOPMENT)"
+ depends on DEBUG_KERNEL && CMA
+ help
+ Turns on debug messages in CMA. This produces KERN_DEBUG
+ messages for every CMA call as well as various messages while
+ processing calls such as dma_alloc_from_contiguous().
+ This option does not affect warning and error messages.
+
+config ZBUD
+ tristate
+ default n
+ help
+ A special purpose allocator for storing compressed pages.
+ It is designed to store up to two compressed pages per physical
+ page. While this design limits storage density, it has simple and
+ deterministic reclaim properties that make it preferable to a higher
+ density approach when reclaim will be used.
+
+config ZSWAP
+ bool "Compressed cache for swap pages (EXPERIMENTAL)"
+ depends on FRONTSWAP && CRYPTO=y
+ select CRYPTO_LZO
+ select ZBUD
+ default n
+ help
+ A lightweight compressed cache for swap pages. It takes
+ pages that are in the process of being swapped out and attempts to
+ compress them into a dynamically allocated RAM-based memory pool.
+ This can result in a significant I/O reduction on swap device and,
+ in the case where decompressing from RAM is faster that swap device
+ reads, can also improve workload performance.
+
+ This is marked experimental because it is a new feature (as of
+ v3.11) that interacts heavily with memory reclaim. While these
+ interactions don't cause any known issues on simple memory setups,
+ they have not be fully explored on the large set of potential
+ configurations and workloads that exist.
+
+config MEM_SOFT_DIRTY
+ bool "Track memory changes"
+ depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY && PROC_FS
+ select PROC_PAGE_MONITOR
+ help
+ This option enables memory changes tracking by introducing a
+ soft-dirty bit on pte-s. This bit it set when someone writes
+ into a page just as regular dirty bit, but unlike the latter
+ it can be cleared by hands.
+
+ See Documentation/vm/soft-dirty.txt for more details.
+
+config ZSMALLOC
+ bool "Memory allocator for compressed pages"
+ depends on MMU
+ default n
+ help
+ zsmalloc is a slab-based memory allocator designed to store
+ compressed RAM pages. zsmalloc uses virtual memory mapping
+ in order to reduce fragmentation. However, this results in a
+ non-standard allocator interface where a handle, not a pointer, is
+ returned by an alloc(). This handle must be mapped in order to
+ access the allocated space.
+
+config PGTABLE_MAPPING
+ bool "Use page table mapping to access object in zsmalloc"
+ depends on ZSMALLOC
+ help
+ By default, zsmalloc uses a copy-based object mapping method to
+ access allocations that span two pages. However, if a particular
+ architecture (ex, ARM) performs VM mapping faster than copying,
+ then you should select this. This causes zsmalloc to use page table
+ mapping rather than copying for object mapping.
+
+ You can check speed with zsmalloc benchmark:
+ https://github.com/spartacus06/zsmapbench
+
+config GENERIC_EARLY_IOREMAP
+ bool
+
+config MAX_STACK_SIZE_MB
+ int "Maximum user stack size for 32-bit processes (MB)"
+ default 80
+ range 8 256 if METAG
+ range 8 2048
+ depends on STACK_GROWSUP && (!64BIT || COMPAT)
+ help
+ This is the maximum stack size in Megabytes in the VM layout of 32-bit
+ user processes when the stack grows upwards (currently only on parisc
+ and metag arch). The stack will be located at the highest memory
+ address minus the given value, unless the RLIMIT_STACK hard limit is
+ changed to a smaller value in which case that is used.
+
+ A sane initial value is 80 MB.
diff --git a/mm/Makefile b/mm/Makefile
index 72c5acb9345f..b484452dac57 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -16,8 +16,9 @@ obj-y := filemap.o mempool.o oom_kill.o fadvise.o \
readahead.o swap.o truncate.o vmscan.o shmem.o \
util.o mmzone.o vmstat.o backing-dev.o \
mm_init.o mmu_context.o percpu.o slab_common.o \
- compaction.o balloon_compaction.o \
- interval_tree.o $(mmu-y)
+ compaction.o balloon_compaction.o vmacache.o \
+ interval_tree.o list_lru.o workingset.o \
+ iov_iter.o $(mmu-y)
obj-y += init-mm.o
@@ -32,6 +33,7 @@ obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o
obj-$(CONFIG_BOUNCE) += bounce.o
obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o
obj-$(CONFIG_FRONTSWAP) += frontswap.o
+obj-$(CONFIG_ZSWAP) += zswap.o
obj-$(CONFIG_HAS_DMA) += dmapool.o
obj-$(CONFIG_HUGETLBFS) += hugetlb.o
obj-$(CONFIG_NUMA) += mempolicy.o
@@ -58,3 +60,6 @@ obj-$(CONFIG_DEBUG_KMEMLEAK) += kmemleak.o
obj-$(CONFIG_DEBUG_KMEMLEAK_TEST) += kmemleak-test.o
obj-$(CONFIG_CLEANCACHE) += cleancache.o
obj-$(CONFIG_MEMORY_ISOLATION) += page_isolation.o
+obj-$(CONFIG_ZBUD) += zbud.o
+obj-$(CONFIG_ZSMALLOC) += zsmalloc.o
+obj-$(CONFIG_GENERIC_EARLY_IOREMAP) += early_ioremap.o
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 502517492258..09d9591b7708 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -180,7 +180,8 @@ static ssize_t name##_show(struct device *dev, \
struct backing_dev_info *bdi = dev_get_drvdata(dev); \
\
return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr); \
-}
+} \
+static DEVICE_ATTR_RW(name);
BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
@@ -231,16 +232,16 @@ static ssize_t stable_pages_required_show(struct device *dev,
return snprintf(page, PAGE_SIZE-1, "%d\n",
bdi_cap_stable_pages_required(bdi) ? 1 : 0);
}
+static DEVICE_ATTR_RO(stable_pages_required);
-#define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store)
-
-static struct device_attribute bdi_dev_attrs[] = {
- __ATTR_RW(read_ahead_kb),
- __ATTR_RW(min_ratio),
- __ATTR_RW(max_ratio),
- __ATTR_RO(stable_pages_required),
- __ATTR_NULL,
+static struct attribute *bdi_dev_attrs[] = {
+ &dev_attr_read_ahead_kb.attr,
+ &dev_attr_min_ratio.attr,
+ &dev_attr_max_ratio.attr,
+ &dev_attr_stable_pages_required.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(bdi_dev);
static __init int bdi_class_init(void)
{
@@ -248,7 +249,7 @@ static __init int bdi_class_init(void)
if (IS_ERR(bdi_class))
return PTR_ERR(bdi_class);
- bdi_class->dev_attrs = bdi_dev_attrs;
+ bdi_class->dev_groups = bdi_dev_groups;
bdi_debug_init();
return 0;
}
@@ -287,13 +288,19 @@ int bdi_has_dirty_io(struct backing_dev_info *bdi)
* Note, we wouldn't bother setting up the timer, but this function is on the
* fast-path (used by '__mark_inode_dirty()'), so we save few context switches
* by delaying the wake-up.
+ *
+ * We have to be careful not to postpone flush work if it is scheduled for
+ * earlier. Thus we use queue_delayed_work().
*/
void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi)
{
unsigned long timeout;
timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
- mod_delayed_work(bdi_wq, &bdi->wb.dwork, timeout);
+ spin_lock_bh(&bdi->wb_lock);
+ if (test_bit(BDI_registered, &bdi->state))
+ queue_delayed_work(bdi_wq, &bdi->wb.dwork, timeout);
+ spin_unlock_bh(&bdi->wb_lock);
}
/*
@@ -306,9 +313,6 @@ static void bdi_remove_from_list(struct backing_dev_info *bdi)
spin_unlock_bh(&bdi_lock);
synchronize_rcu_expedited();
-
- /* bdi_list is now unused, clear it to mark @bdi dying */
- INIT_LIST_HEAD(&bdi->bdi_list);
}
int bdi_register(struct backing_dev_info *bdi, struct device *parent,
@@ -359,6 +363,11 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
*/
bdi_remove_from_list(bdi);
+ /* Make sure nobody queues further work */
+ spin_lock_bh(&bdi->wb_lock);
+ clear_bit(BDI_registered, &bdi->state);
+ spin_unlock_bh(&bdi->wb_lock);
+
/*
* Drain work list and shutdown the delayed_work. At this point,
* @bdi->bdi_list is empty telling bdi_Writeback_workfn() that @bdi
@@ -515,7 +524,6 @@ EXPORT_SYMBOL(bdi_destroy);
int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
unsigned int cap)
{
- char tmp[32];
int err;
bdi->name = name;
@@ -524,8 +532,8 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
if (err)
return err;
- sprintf(tmp, "%.28s%s", name, "-%d");
- err = bdi_register(bdi, NULL, tmp, atomic_long_inc_return(&bdi_seq));
+ err = bdi_register(bdi, NULL, "%.28s-%ld", name,
+ atomic_long_inc_return(&bdi_seq));
if (err) {
bdi_destroy(bdi);
return err;
@@ -652,7 +660,7 @@ int pdflush_proc_obsolete(struct ctl_table *table, int write,
{
char kbuf[] = "0\n";
- if (*ppos) {
+ if (*ppos || *lenp < sizeof(kbuf)) {
*lenp = 0;
return 0;
}
diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c
index 07dbc8ec46cf..6e45a5074bf0 100644
--- a/mm/balloon_compaction.c
+++ b/mm/balloon_compaction.c
@@ -267,7 +267,7 @@ void balloon_page_putback(struct page *page)
put_page(page);
} else {
WARN_ON(1);
- dump_page(page);
+ dump_page(page, "not movable balloon page");
}
unlock_page(page);
}
@@ -287,7 +287,7 @@ int balloon_page_migrate(struct page *newpage,
BUG_ON(!trylock_page(newpage));
if (WARN_ON(!__is_movable_balloon_page(page))) {
- dump_page(page);
+ dump_page(page, "not movable balloon page");
unlock_page(newpage);
return rc;
}
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 2b0bcb019ec2..90bd3507b413 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -172,11 +172,12 @@ void __init free_bootmem_late(unsigned long physaddr, unsigned long size)
static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
{
struct page *page;
- unsigned long start, end, pages, count = 0;
+ unsigned long *map, start, end, pages, count = 0;
if (!bdata->node_bootmem_map)
return 0;
+ map = bdata->node_bootmem_map;
start = bdata->node_min_pfn;
end = bdata->node_low_pfn;
@@ -184,10 +185,9 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
bdata - bootmem_node_data, start, end);
while (start < end) {
- unsigned long *map, idx, vec;
+ unsigned long idx, vec;
unsigned shift;
- map = bdata->node_bootmem_map;
idx = start - bdata->node_min_pfn;
shift = idx & (BITS_PER_LONG - 1);
/*
@@ -241,33 +241,26 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
return count;
}
-static void reset_node_lowmem_managed_pages(pg_data_t *pgdat)
+static int reset_managed_pages_done __initdata;
+
+static inline void __init reset_node_managed_pages(pg_data_t *pgdat)
{
struct zone *z;
- /*
- * In free_area_init_core(), highmem zone's managed_pages is set to
- * present_pages, and bootmem allocator doesn't allocate from highmem
- * zones. So there's no need to recalculate managed_pages because all
- * highmem pages will be managed by the buddy system. Here highmem
- * zone also includes highmem movable zone.
- */
+ if (reset_managed_pages_done)
+ return;
+
for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
- if (!is_highmem(z))
- z->managed_pages = 0;
+ z->managed_pages = 0;
}
-/**
- * free_all_bootmem_node - release a node's free pages to the buddy allocator
- * @pgdat: node to be released
- *
- * Returns the number of pages actually released.
- */
-unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
+void __init reset_all_zones_managed_pages(void)
{
- register_page_bootmem_info_node(pgdat);
- reset_node_lowmem_managed_pages(pgdat);
- return free_all_bootmem_core(pgdat->bdata);
+ struct pglist_data *pgdat;
+
+ for_each_online_pgdat(pgdat)
+ reset_node_managed_pages(pgdat);
+ reset_managed_pages_done = 1;
}
/**
@@ -279,14 +272,14 @@ unsigned long __init free_all_bootmem(void)
{
unsigned long total_pages = 0;
bootmem_data_t *bdata;
- struct pglist_data *pgdat;
- for_each_online_pgdat(pgdat)
- reset_node_lowmem_managed_pages(pgdat);
+ reset_all_zones_managed_pages();
list_for_each_entry(bdata, &bdata_list, list)
total_pages += free_all_bootmem_core(bdata);
+ totalram_pages += total_pages;
+
return total_pages;
}
@@ -791,7 +784,7 @@ void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
/* update goal according ...MAX_DMA32_PFN */
- end_pfn = pgdat->node_start_pfn + pgdat->node_spanned_pages;
+ end_pfn = pgdat_end_pfn(pgdat);
if (end_pfn > MAX_DMA32_PFN + (128 >> (20 - PAGE_SHIFT)) &&
(goal >> PAGE_SHIFT) < MAX_DMA32_PFN) {
diff --git a/mm/bounce.c b/mm/bounce.c
index c9f0a4339a7d..523918b8c6dc 100644
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -98,27 +98,24 @@ int init_emergency_isa_pool(void)
static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
{
unsigned char *vfrom;
- struct bio_vec *tovec, *fromvec;
- int i;
-
- bio_for_each_segment(tovec, to, i) {
- fromvec = from->bi_io_vec + i;
-
- /*
- * not bounced
- */
- if (tovec->bv_page == fromvec->bv_page)
- continue;
-
- /*
- * fromvec->bv_offset and fromvec->bv_len might have been
- * modified by the block layer, so use the original copy,
- * bounce_copy_vec already uses tovec->bv_len
- */
- vfrom = page_address(fromvec->bv_page) + tovec->bv_offset;
+ struct bio_vec tovec, *fromvec = from->bi_io_vec;
+ struct bvec_iter iter;
+
+ bio_for_each_segment(tovec, to, iter) {
+ if (tovec.bv_page != fromvec->bv_page) {
+ /*
+ * fromvec->bv_offset and fromvec->bv_len might have
+ * been modified by the block layer, so use the original
+ * copy, bounce_copy_vec already uses tovec->bv_len
+ */
+ vfrom = page_address(fromvec->bv_page) +
+ tovec.bv_offset;
+
+ bounce_copy_vec(&tovec, vfrom);
+ flush_dcache_page(tovec.bv_page);
+ }
- bounce_copy_vec(tovec, vfrom);
- flush_dcache_page(tovec->bv_page);
+ fromvec++;
}
}
@@ -201,11 +198,14 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
{
struct bio *bio;
int rw = bio_data_dir(*bio_orig);
- struct bio_vec *to, *from;
+ struct bio_vec *to, from;
+ struct bvec_iter iter;
unsigned i;
- bio_for_each_segment(from, *bio_orig, i)
- if (page_to_pfn(from->bv_page) > queue_bounce_pfn(q))
+ if (force)
+ goto bounce;
+ bio_for_each_segment(from, *bio_orig, iter)
+ if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q))
goto bounce;
return;
diff --git a/mm/cleancache.c b/mm/cleancache.c
index 5875f48ce279..d0eac4350403 100644
--- a/mm/cleancache.c
+++ b/mm/cleancache.c
@@ -237,7 +237,7 @@ int __cleancache_get_page(struct page *page)
goto out;
}
- VM_BUG_ON(!PageLocked(page));
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
fake_pool_id = page->mapping->host->i_sb->cleancache_poolid;
if (fake_pool_id < 0)
goto out;
@@ -279,7 +279,7 @@ void __cleancache_put_page(struct page *page)
return;
}
- VM_BUG_ON(!PageLocked(page));
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
fake_pool_id = page->mapping->host->i_sb->cleancache_poolid;
if (fake_pool_id < 0)
return;
@@ -318,7 +318,7 @@ void __cleancache_invalidate_page(struct address_space *mapping,
if (pool_id < 0)
return;
- VM_BUG_ON(!PageLocked(page));
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
if (cleancache_get_key(mapping->host, &key) >= 0) {
cleancache_ops->invalidate_page(pool_id,
key, page->index);
diff --git a/mm/compaction.c b/mm/compaction.c
index 05ccb4cc0bdb..627dc2e4320f 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -134,6 +134,10 @@ static void update_pageblock_skip(struct compact_control *cc,
bool migrate_scanner)
{
struct zone *zone = cc->zone;
+
+ if (cc->ignore_skip_hint)
+ return;
+
if (!page)
return;
@@ -213,21 +217,12 @@ static inline bool compact_trylock_irqsave(spinlock_t *lock,
/* Returns true if the page is within a block suitable for migration to */
static bool suitable_migration_target(struct page *page)
{
- int migratetype = get_pageblock_migratetype(page);
-
- /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
- if (migratetype == MIGRATE_RESERVE)
- return false;
-
- if (is_migrate_isolate(migratetype))
- return false;
-
- /* If the page is a large free page, then allow migration */
+ /* If the page is a large free page, then disallow migration */
if (PageBuddy(page) && page_order(page) >= pageblock_order)
- return true;
+ return false;
/* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
- if (migrate_async_suitable(migratetype))
+ if (migrate_async_suitable(get_pageblock_migratetype(page)))
return true;
/* Otherwise skip the block */
@@ -235,10 +230,9 @@ static bool suitable_migration_target(struct page *page)
}
/*
- * Isolate free pages onto a private freelist. Caller must hold zone->lock.
- * If @strict is true, will abort returning 0 on any invalid PFNs or non-free
- * pages inside of the pageblock (even though it may still end up isolating
- * some pages).
+ * Isolate free pages onto a private freelist. If @strict is true, will abort
+ * returning 0 on any invalid PFNs or non-free pages inside of the pageblock
+ * (even though it may still end up isolating some pages).
*/
static unsigned long isolate_freepages_block(struct compact_control *cc,
unsigned long blockpfn,
@@ -248,9 +242,9 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
{
int nr_scanned = 0, total_isolated = 0;
struct page *cursor, *valid_page = NULL;
- unsigned long nr_strict_required = end_pfn - blockpfn;
unsigned long flags;
bool locked = false;
+ bool checked_pageblock = false;
cursor = pfn_to_page(blockpfn);
@@ -261,11 +255,12 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
nr_scanned++;
if (!pfn_valid_within(blockpfn))
- continue;
+ goto isolate_fail;
+
if (!valid_page)
valid_page = page;
if (!PageBuddy(page))
- continue;
+ goto isolate_fail;
/*
* The zone lock must be held to isolate freepages.
@@ -281,17 +276,23 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
break;
/* Recheck this is a suitable migration target under lock */
- if (!strict && !suitable_migration_target(page))
- break;
+ if (!strict && !checked_pageblock) {
+ /*
+ * We need to check suitability of pageblock only once
+ * and this isolate_freepages_block() is called with
+ * pageblock range, so just check once is sufficient.
+ */
+ checked_pageblock = true;
+ if (!suitable_migration_target(page))
+ break;
+ }
/* Recheck this is a buddy page under lock */
if (!PageBuddy(page))
- continue;
+ goto isolate_fail;
/* Found a free page, break it into order-0 pages */
isolated = split_free_page(page);
- if (!isolated && strict)
- break;
total_isolated += isolated;
for (i = 0; i < isolated; i++) {
list_add(&page->lru, freelist);
@@ -302,7 +303,15 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
if (isolated) {
blockpfn += isolated - 1;
cursor += isolated - 1;
+ continue;
}
+
+isolate_fail:
+ if (strict)
+ break;
+ else
+ continue;
+
}
trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
@@ -312,7 +321,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
* pages requested were isolated. If there were any failures, 0 is
* returned and CMA will fail.
*/
- if (strict && nr_strict_required > total_isolated)
+ if (strict && blockpfn < end_pfn)
total_isolated = 0;
if (locked)
@@ -451,11 +460,13 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
unsigned long last_pageblock_nr = 0, pageblock_nr;
unsigned long nr_scanned = 0, nr_isolated = 0;
struct list_head *migratelist = &cc->migratepages;
- isolate_mode_t mode = 0;
struct lruvec *lruvec;
unsigned long flags;
bool locked = false;
struct page *page = NULL, *valid_page = NULL;
+ bool skipped_async_unsuitable = false;
+ const isolate_mode_t mode = (!cc->sync ? ISOLATE_ASYNC_MIGRATE : 0) |
+ (unevictable ? ISOLATE_UNEVICTABLE : 0);
/*
* Ensure that there are not too many pages isolated from the LRU
@@ -477,7 +488,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
cond_resched();
for (; low_pfn < end_pfn; low_pfn++) {
/* give a chance to irqs before checking need_resched() */
- if (locked && !((low_pfn+1) % SWAP_CLUSTER_MAX)) {
+ if (locked && !(low_pfn % SWAP_CLUSTER_MAX)) {
if (should_release_lock(&zone->lru_lock)) {
spin_unlock_irqrestore(&zone->lru_lock, flags);
locked = false;
@@ -516,23 +527,32 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
/* If isolation recently failed, do not retry */
pageblock_nr = low_pfn >> pageblock_order;
- if (!isolation_suitable(cc, page))
- goto next_pageblock;
+ if (last_pageblock_nr != pageblock_nr) {
+ int mt;
- /* Skip if free */
- if (PageBuddy(page))
- continue;
+ last_pageblock_nr = pageblock_nr;
+ if (!isolation_suitable(cc, page))
+ goto next_pageblock;
+
+ /*
+ * For async migration, also only scan in MOVABLE
+ * blocks. Async migration is optimistic to see if
+ * the minimum amount of work satisfies the allocation
+ */
+ mt = get_pageblock_migratetype(page);
+ if (!cc->sync && !migrate_async_suitable(mt)) {
+ cc->finished_update_migrate = true;
+ skipped_async_unsuitable = true;
+ goto next_pageblock;
+ }
+ }
/*
- * For async migration, also only scan in MOVABLE blocks. Async
- * migration is optimistic to see if the minimum amount of work
- * satisfies the allocation
+ * Skip if free. page_order cannot be used without zone->lock
+ * as nothing prevents parallel allocations or buddy merging.
*/
- if (!cc->sync && last_pageblock_nr != pageblock_nr &&
- !migrate_async_suitable(get_pageblock_migratetype(page))) {
- cc->finished_update_migrate = true;
- goto next_pageblock;
- }
+ if (PageBuddy(page))
+ continue;
/*
* Check may be lockless but that's ok as we recheck later.
@@ -543,11 +563,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
if (unlikely(balloon_page_movable(page))) {
if (locked && balloon_page_isolate(page)) {
/* Successfully isolated */
- cc->finished_update_migrate = true;
- list_add(&page->lru, migratelist);
- cc->nr_migratepages++;
- nr_isolated++;
- goto check_compact_cluster;
+ goto isolate_success;
}
}
continue;
@@ -570,6 +586,15 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
continue;
}
+ /*
+ * Migration will fail if an anonymous page is pinned in memory,
+ * so avoid taking lru_lock and isolating it unnecessarily in an
+ * admittedly racy check.
+ */
+ if (!page_mapping(page) &&
+ page_count(page) > page_mapcount(page))
+ continue;
+
/* Check if it is ok to still hold the lock */
locked = compact_checklock_irqsave(&zone->lru_lock, &flags,
locked, cc);
@@ -584,28 +609,23 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
continue;
}
- if (!cc->sync)
- mode |= ISOLATE_ASYNC_MIGRATE;
-
- if (unevictable)
- mode |= ISOLATE_UNEVICTABLE;
-
lruvec = mem_cgroup_page_lruvec(page, zone);
/* Try isolate the page */
if (__isolate_lru_page(page, mode) != 0)
continue;
- VM_BUG_ON(PageTransCompound(page));
+ VM_BUG_ON_PAGE(PageTransCompound(page), page);
/* Successfully isolated */
- cc->finished_update_migrate = true;
del_page_from_lru_list(page, lruvec, page_lru(page));
+
+isolate_success:
+ cc->finished_update_migrate = true;
list_add(&page->lru, migratelist);
cc->nr_migratepages++;
nr_isolated++;
-check_compact_cluster:
/* Avoid isolating too much */
if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
++low_pfn;
@@ -616,7 +636,6 @@ check_compact_cluster:
next_pageblock:
low_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages) - 1;
- last_pageblock_nr = pageblock_nr;
}
acct_isolated(zone, locked, cc);
@@ -624,8 +643,13 @@ next_pageblock:
if (locked)
spin_unlock_irqrestore(&zone->lru_lock, flags);
- /* Update the pageblock-skip if the whole pageblock was scanned */
- if (low_pfn == end_pfn)
+ /*
+ * Update the pageblock-skip information and cached scanner pfn,
+ * if the whole pageblock was scanned without isolating any page.
+ * This is not done when pageblock was skipped due to being unsuitable
+ * for async compaction, so that eventual sync compaction can try.
+ */
+ if (low_pfn == end_pfn && !skipped_async_unsuitable)
update_pageblock_skip(cc, valid_page, nr_isolated, true);
trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
@@ -647,17 +671,21 @@ static void isolate_freepages(struct zone *zone,
struct compact_control *cc)
{
struct page *page;
- unsigned long high_pfn, low_pfn, pfn, z_end_pfn, end_pfn;
+ unsigned long high_pfn, low_pfn, pfn, z_end_pfn;
int nr_freepages = cc->nr_freepages;
struct list_head *freelist = &cc->freepages;
/*
* Initialise the free scanner. The starting point is where we last
- * scanned from (or the end of the zone if starting). The low point
- * is the end of the pageblock the migration scanner is using.
+ * successfully isolated from, zone-cached value, or the end of the
+ * zone when isolating for the first time. We need this aligned to
+ * the pageblock boundary, because we do pfn -= pageblock_nr_pages
+ * in the for loop.
+ * The low boundary is the end of the pageblock the migration scanner
+ * is using.
*/
- pfn = cc->free_pfn;
- low_pfn = cc->migrate_pfn + pageblock_nr_pages;
+ pfn = cc->free_pfn & ~(pageblock_nr_pages-1);
+ low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages);
/*
* Take care that if the migration scanner is at the end of the zone
@@ -673,9 +701,17 @@ static void isolate_freepages(struct zone *zone,
* pages on cc->migratepages. We stop searching if the migrate
* and free page scanners meet or enough free pages are isolated.
*/
- for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
+ for (; pfn >= low_pfn && cc->nr_migratepages > nr_freepages;
pfn -= pageblock_nr_pages) {
unsigned long isolated;
+ unsigned long end_pfn;
+
+ /*
+ * This can iterate a massively long zone without finding any
+ * suitable migration targets, so periodically check if we need
+ * to schedule.
+ */
+ cond_resched();
if (!pfn_valid(pfn))
continue;
@@ -703,13 +739,10 @@ static void isolate_freepages(struct zone *zone,
isolated = 0;
/*
- * As pfn may not start aligned, pfn+pageblock_nr_page
- * may cross a MAX_ORDER_NR_PAGES boundary and miss
- * a pfn_valid check. Ensure isolate_freepages_block()
- * only scans within a pageblock
+ * Take care when isolating in last pageblock of a zone which
+ * ends in the middle of a pageblock.
*/
- end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
- end_pfn = min(end_pfn, z_end_pfn);
+ end_pfn = min(pfn + pageblock_nr_pages, z_end_pfn);
isolated = isolate_freepages_block(cc, pfn, end_pfn,
freelist, false);
nr_freepages += isolated;
@@ -728,7 +761,14 @@ static void isolate_freepages(struct zone *zone,
/* split_free_page does not map the pages */
map_pages(freelist);
- cc->free_pfn = high_pfn;
+ /*
+ * If we crossed the migrate scanner, we want to keep it that way
+ * so that compact_finished() may detect this
+ */
+ if (pfn < low_pfn)
+ cc->free_pfn = max(pfn, zone->zone_start_pfn);
+ else
+ cc->free_pfn = high_pfn;
cc->nr_freepages = nr_freepages;
}
@@ -827,6 +867,10 @@ static int compact_finished(struct zone *zone,
/* Compaction run completes if the migrate and free scanner meet */
if (cc->free_pfn <= cc->migrate_pfn) {
+ /* Let the next compaction start anew. */
+ zone->compact_cached_migrate_pfn = zone->zone_start_pfn;
+ zone->compact_cached_free_pfn = zone_end_pfn(zone);
+
/*
* Mark that the PG_migrate_skip information should be cleared
* by kswapd when it goes to sleep. kswapd does not set the
@@ -937,6 +981,14 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
}
/*
+ * Clear pageblock skip if there were failures recently and compaction
+ * is about to be retried after being deferred. kswapd does not do
+ * this reset as it'll reset the cached information when going to sleep.
+ */
+ if (compaction_restarting(zone, cc->order) && !current_is_kswapd())
+ __reset_isolation_suitable(zone);
+
+ /*
* Setup to move all movable pages to the end of the zone. Used cached
* information on where the scanners should start but check that it
* is initialised by ensuring the values are within zone boundaries.
@@ -952,13 +1004,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
zone->compact_cached_migrate_pfn = cc->migrate_pfn;
}
- /*
- * Clear pageblock skip if there were failures recently and compaction
- * is about to be retried after being deferred. kswapd does not do
- * this reset as it'll reset the cached information when going to sleep.
- */
- if (compaction_restarting(zone, cc->order) && !current_is_kswapd())
- __reset_isolation_suitable(zone);
+ trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn);
migrate_prep_local();
@@ -993,7 +1039,11 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
if (err) {
putback_movable_pages(&cc->migratepages);
cc->nr_migratepages = 0;
- if (err == -ENOMEM) {
+ /*
+ * migrate_pages() may return -ENOMEM when scanners meet
+ * and we want compact_finished() to detect it
+ */
+ if (err == -ENOMEM && cc->free_pfn > cc->migrate_pfn) {
ret = COMPACT_PARTIAL;
goto out;
}
@@ -1005,6 +1055,8 @@ out:
cc->nr_freepages -= release_freepages(&cc->freepages);
VM_BUG_ON(cc->nr_freepages != 0);
+ trace_mm_compaction_end(ret);
+
return ret;
}
@@ -1110,12 +1162,11 @@ static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
compact_zone(zone, cc);
if (cc->order > 0) {
- int ok = zone_watermark_ok(zone, cc->order,
- low_wmark_pages(zone), 0, 0);
- if (ok && cc->order >= zone->compact_order_failed)
- zone->compact_order_failed = cc->order + 1;
+ if (zone_watermark_ok(zone, cc->order,
+ low_wmark_pages(zone), 0, 0))
+ compaction_defer_reset(zone, cc->order, false);
/* Currently async compaction is never deferred. */
- else if (!ok && cc->sync)
+ else if (cc->sync)
defer_compaction(zone, cc->order);
}
@@ -1131,6 +1182,9 @@ void compact_pgdat(pg_data_t *pgdat, int order)
.sync = false,
};
+ if (!order)
+ return;
+
__compact_pgdat(pgdat, &cc);
}
@@ -1139,6 +1193,7 @@ static void compact_node(int nid)
struct compact_control cc = {
.order = -1,
.sync = true,
+ .ignore_skip_hint = true,
};
__compact_pgdat(NODE_DATA(nid), &cc);
@@ -1178,7 +1233,7 @@ int sysctl_extfrag_handler(struct ctl_table *table, int write,
}
#if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
-ssize_t sysfs_compact_node(struct device *dev,
+static ssize_t sysfs_compact_node(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
diff --git a/mm/early_ioremap.c b/mm/early_ioremap.c
new file mode 100644
index 000000000000..e10ccd299d66
--- /dev/null
+++ b/mm/early_ioremap.c
@@ -0,0 +1,245 @@
+/*
+ * Provide common bits of early_ioremap() support for architectures needing
+ * temporary mappings during boot before ioremap() is available.
+ *
+ * This is mostly a direct copy of the x86 early_ioremap implementation.
+ *
+ * (C) Copyright 1995 1996, 2014 Linus Torvalds
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <asm/fixmap.h>
+
+#ifdef CONFIG_MMU
+static int early_ioremap_debug __initdata;
+
+static int __init early_ioremap_debug_setup(char *str)
+{
+ early_ioremap_debug = 1;
+
+ return 0;
+}
+early_param("early_ioremap_debug", early_ioremap_debug_setup);
+
+static int after_paging_init __initdata;
+
+void __init __weak early_ioremap_shutdown(void)
+{
+}
+
+void __init early_ioremap_reset(void)
+{
+ early_ioremap_shutdown();
+ after_paging_init = 1;
+}
+
+/*
+ * Generally, ioremap() is available after paging_init() has been called.
+ * Architectures wanting to allow early_ioremap after paging_init() can
+ * define __late_set_fixmap and __late_clear_fixmap to do the right thing.
+ */
+#ifndef __late_set_fixmap
+static inline void __init __late_set_fixmap(enum fixed_addresses idx,
+ phys_addr_t phys, pgprot_t prot)
+{
+ BUG();
+}
+#endif
+
+#ifndef __late_clear_fixmap
+static inline void __init __late_clear_fixmap(enum fixed_addresses idx)
+{
+ BUG();
+}
+#endif
+
+static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
+static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
+static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata;
+
+void __init early_ioremap_setup(void)
+{
+ int i;
+
+ for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
+ if (WARN_ON(prev_map[i]))
+ break;
+
+ for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
+ slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
+}
+
+static int __init check_early_ioremap_leak(void)
+{
+ int count = 0;
+ int i;
+
+ for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
+ if (prev_map[i])
+ count++;
+
+ if (WARN(count, KERN_WARNING
+ "Debug warning: early ioremap leak of %d areas detected.\n"
+ "please boot with early_ioremap_debug and report the dmesg.\n",
+ count))
+ return 1;
+ return 0;
+}
+late_initcall(check_early_ioremap_leak);
+
+static void __init __iomem *
+__early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
+{
+ unsigned long offset;
+ resource_size_t last_addr;
+ unsigned int nrpages;
+ enum fixed_addresses idx;
+ int i, slot;
+
+ WARN_ON(system_state != SYSTEM_BOOTING);
+
+ slot = -1;
+ for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
+ if (!prev_map[i]) {
+ slot = i;
+ break;
+ }
+ }
+
+ if (WARN(slot < 0, "%s(%08llx, %08lx) not found slot\n",
+ __func__, (u64)phys_addr, size))
+ return NULL;
+
+ /* Don't allow wraparound or zero size */
+ last_addr = phys_addr + size - 1;
+ if (WARN_ON(!size || last_addr < phys_addr))
+ return NULL;
+
+ prev_size[slot] = size;
+ /*
+ * Mappings have to be page-aligned
+ */
+ offset = phys_addr & ~PAGE_MASK;
+ phys_addr &= PAGE_MASK;
+ size = PAGE_ALIGN(last_addr + 1) - phys_addr;
+
+ /*
+ * Mappings have to fit in the FIX_BTMAP area.
+ */
+ nrpages = size >> PAGE_SHIFT;
+ if (WARN_ON(nrpages > NR_FIX_BTMAPS))
+ return NULL;
+
+ /*
+ * Ok, go for it..
+ */
+ idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
+ while (nrpages > 0) {
+ if (after_paging_init)
+ __late_set_fixmap(idx, phys_addr, prot);
+ else
+ __early_set_fixmap(idx, phys_addr, prot);
+ phys_addr += PAGE_SIZE;
+ --idx;
+ --nrpages;
+ }
+ WARN(early_ioremap_debug, "%s(%08llx, %08lx) [%d] => %08lx + %08lx\n",
+ __func__, (u64)phys_addr, size, slot, offset, slot_virt[slot]);
+
+ prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]);
+ return prev_map[slot];
+}
+
+void __init early_iounmap(void __iomem *addr, unsigned long size)
+{
+ unsigned long virt_addr;
+ unsigned long offset;
+ unsigned int nrpages;
+ enum fixed_addresses idx;
+ int i, slot;
+
+ slot = -1;
+ for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
+ if (prev_map[i] == addr) {
+ slot = i;
+ break;
+ }
+ }
+
+ if (WARN(slot < 0, "early_iounmap(%p, %08lx) not found slot\n",
+ addr, size))
+ return;
+
+ if (WARN(prev_size[slot] != size,
+ "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
+ addr, size, slot, prev_size[slot]))
+ return;
+
+ WARN(early_ioremap_debug, "early_iounmap(%p, %08lx) [%d]\n",
+ addr, size, slot);
+
+ virt_addr = (unsigned long)addr;
+ if (WARN_ON(virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)))
+ return;
+
+ offset = virt_addr & ~PAGE_MASK;
+ nrpages = PAGE_ALIGN(offset + size) >> PAGE_SHIFT;
+
+ idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
+ while (nrpages > 0) {
+ if (after_paging_init)
+ __late_clear_fixmap(idx);
+ else
+ __early_set_fixmap(idx, 0, FIXMAP_PAGE_CLEAR);
+ --idx;
+ --nrpages;
+ }
+ prev_map[slot] = NULL;
+}
+
+/* Remap an IO device */
+void __init __iomem *
+early_ioremap(resource_size_t phys_addr, unsigned long size)
+{
+ return __early_ioremap(phys_addr, size, FIXMAP_PAGE_IO);
+}
+
+/* Remap memory */
+void __init *
+early_memremap(resource_size_t phys_addr, unsigned long size)
+{
+ return (__force void *)__early_ioremap(phys_addr, size,
+ FIXMAP_PAGE_NORMAL);
+}
+#else /* CONFIG_MMU */
+
+void __init __iomem *
+early_ioremap(resource_size_t phys_addr, unsigned long size)
+{
+ return (__force void __iomem *)phys_addr;
+}
+
+/* Remap memory */
+void __init *
+early_memremap(resource_size_t phys_addr, unsigned long size)
+{
+ return (void *)phys_addr;
+}
+
+void __init early_iounmap(void __iomem *addr, unsigned long size)
+{
+}
+
+#endif /* CONFIG_MMU */
+
+
+void __init early_memunmap(void *addr, unsigned long size)
+{
+ early_iounmap((__force void __iomem *)addr, size);
+}
diff --git a/mm/filemap.c b/mm/filemap.c
index 7905fe721aa8..088358c8006b 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -33,6 +33,7 @@
#include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
#include <linux/memcontrol.h>
#include <linux/cleancache.h>
+#include <linux/rmap.h>
#include "internal.h"
#define CREATE_TRACE_POINTS
@@ -76,7 +77,7 @@
* ->mmap_sem
* ->lock_page (access_process_vm)
*
- * ->i_mutex (generic_file_buffered_write)
+ * ->i_mutex (generic_perform_write)
* ->mmap_sem (fault_in_pages_readable->do_page_fault)
*
* bdi->wb.list_lock
@@ -107,12 +108,75 @@
* ->tasklist_lock (memory_failure, collect_procs_ao)
*/
+static void page_cache_tree_delete(struct address_space *mapping,
+ struct page *page, void *shadow)
+{
+ struct radix_tree_node *node;
+ unsigned long index;
+ unsigned int offset;
+ unsigned int tag;
+ void **slot;
+
+ VM_BUG_ON(!PageLocked(page));
+
+ __radix_tree_lookup(&mapping->page_tree, page->index, &node, &slot);
+
+ if (shadow) {
+ mapping->nrshadows++;
+ /*
+ * Make sure the nrshadows update is committed before
+ * the nrpages update so that final truncate racing
+ * with reclaim does not see both counters 0 at the
+ * same time and miss a shadow entry.
+ */
+ smp_wmb();
+ }
+ mapping->nrpages--;
+
+ if (!node) {
+ /* Clear direct pointer tags in root node */
+ mapping->page_tree.gfp_mask &= __GFP_BITS_MASK;
+ radix_tree_replace_slot(slot, shadow);
+ return;
+ }
+
+ /* Clear tree tags for the removed page */
+ index = page->index;
+ offset = index & RADIX_TREE_MAP_MASK;
+ for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
+ if (test_bit(offset, node->tags[tag]))
+ radix_tree_tag_clear(&mapping->page_tree, index, tag);
+ }
+
+ /* Delete page, swap shadow entry */
+ radix_tree_replace_slot(slot, shadow);
+ workingset_node_pages_dec(node);
+ if (shadow)
+ workingset_node_shadows_inc(node);
+ else
+ if (__radix_tree_delete_node(&mapping->page_tree, node))
+ return;
+
+ /*
+ * Track node that only contains shadow entries.
+ *
+ * Avoid acquiring the list_lru lock if already tracked. The
+ * list_empty() test is safe as node->private_list is
+ * protected by mapping->tree_lock.
+ */
+ if (!workingset_node_pages(node) &&
+ list_empty(&node->private_list)) {
+ node->private_data = mapping;
+ list_lru_add(&workingset_shadow_nodes, &node->private_list);
+ }
+}
+
/*
* Delete a page from the page cache and free it. Caller has to make
* sure the page is locked and that nobody else uses it - or that usage
* is safe. The caller must hold the mapping's tree_lock.
*/
-void __delete_from_page_cache(struct page *page)
+void __delete_from_page_cache(struct page *page, void *shadow)
{
struct address_space *mapping = page->mapping;
@@ -127,10 +191,11 @@ void __delete_from_page_cache(struct page *page)
else
cleancache_invalidate_page(mapping, page);
- radix_tree_delete(&mapping->page_tree, page->index);
+ page_cache_tree_delete(mapping, page, shadow);
+
page->mapping = NULL;
/* Leave page->index set: truncation lookup relies upon it */
- mapping->nrpages--;
+
__dec_zone_page_state(page, NR_FILE_PAGES);
if (PageSwapBacked(page))
__dec_zone_page_state(page, NR_SHMEM);
@@ -166,7 +231,7 @@ void delete_from_page_cache(struct page *page)
freepage = mapping->a_ops->freepage;
spin_lock_irq(&mapping->tree_lock);
- __delete_from_page_cache(page);
+ __delete_from_page_cache(page, NULL);
spin_unlock_irq(&mapping->tree_lock);
mem_cgroup_uncharge_cache_page(page);
@@ -192,9 +257,11 @@ static int filemap_check_errors(struct address_space *mapping)
{
int ret = 0;
/* Check for outstanding write errors */
- if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
+ if (test_bit(AS_ENOSPC, &mapping->flags) &&
+ test_and_clear_bit(AS_ENOSPC, &mapping->flags))
ret = -ENOSPC;
- if (test_and_clear_bit(AS_EIO, &mapping->flags))
+ if (test_bit(AS_EIO, &mapping->flags) &&
+ test_and_clear_bit(AS_EIO, &mapping->flags))
ret = -EIO;
return ret;
}
@@ -409,9 +476,9 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
{
int error;
- VM_BUG_ON(!PageLocked(old));
- VM_BUG_ON(!PageLocked(new));
- VM_BUG_ON(new->mapping);
+ VM_BUG_ON_PAGE(!PageLocked(old), old);
+ VM_BUG_ON_PAGE(!PageLocked(new), new);
+ VM_BUG_ON_PAGE(new->mapping, new);
error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
if (!error) {
@@ -426,7 +493,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
new->index = offset;
spin_lock_irq(&mapping->tree_lock);
- __delete_from_page_cache(old);
+ __delete_from_page_cache(old, NULL);
error = radix_tree_insert(&mapping->page_tree, offset, new);
BUG_ON(error);
mapping->nrpages++;
@@ -446,6 +513,91 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
}
EXPORT_SYMBOL_GPL(replace_page_cache_page);
+static int page_cache_tree_insert(struct address_space *mapping,
+ struct page *page, void **shadowp)
+{
+ struct radix_tree_node *node;
+ void **slot;
+ int error;
+
+ error = __radix_tree_create(&mapping->page_tree, page->index,
+ &node, &slot);
+ if (error)
+ return error;
+ if (*slot) {
+ void *p;
+
+ p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
+ if (!radix_tree_exceptional_entry(p))
+ return -EEXIST;
+ if (shadowp)
+ *shadowp = p;
+ mapping->nrshadows--;
+ if (node)
+ workingset_node_shadows_dec(node);
+ }
+ radix_tree_replace_slot(slot, page);
+ mapping->nrpages++;
+ if (node) {
+ workingset_node_pages_inc(node);
+ /*
+ * Don't track node that contains actual pages.
+ *
+ * Avoid acquiring the list_lru lock if already
+ * untracked. The list_empty() test is safe as
+ * node->private_list is protected by
+ * mapping->tree_lock.
+ */
+ if (!list_empty(&node->private_list))
+ list_lru_del(&workingset_shadow_nodes,
+ &node->private_list);
+ }
+ return 0;
+}
+
+static int __add_to_page_cache_locked(struct page *page,
+ struct address_space *mapping,
+ pgoff_t offset, gfp_t gfp_mask,
+ void **shadowp)
+{
+ int error;
+
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
+ VM_BUG_ON_PAGE(PageSwapBacked(page), page);
+
+ error = mem_cgroup_charge_file(page, current->mm,
+ gfp_mask & GFP_RECLAIM_MASK);
+ if (error)
+ return error;
+
+ error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM);
+ if (error) {
+ mem_cgroup_uncharge_cache_page(page);
+ return error;
+ }
+
+ page_cache_get(page);
+ page->mapping = mapping;
+ page->index = offset;
+
+ spin_lock_irq(&mapping->tree_lock);
+ error = page_cache_tree_insert(mapping, page, shadowp);
+ radix_tree_preload_end();
+ if (unlikely(error))
+ goto err_insert;
+ __inc_zone_page_state(page, NR_FILE_PAGES);
+ spin_unlock_irq(&mapping->tree_lock);
+ trace_mm_filemap_add_to_page_cache(page);
+ return 0;
+err_insert:
+ page->mapping = NULL;
+ /* Leave page->index set: truncation relies upon it */
+ spin_unlock_irq(&mapping->tree_lock);
+ mem_cgroup_uncharge_cache_page(page);
+ page_cache_release(page);
+ return error;
+}
+
/**
* add_to_page_cache_locked - add a locked page to the pagecache
* @page: page to add
@@ -459,52 +611,35 @@ EXPORT_SYMBOL_GPL(replace_page_cache_page);
int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
pgoff_t offset, gfp_t gfp_mask)
{
- int error;
-
- VM_BUG_ON(!PageLocked(page));
- VM_BUG_ON(PageSwapBacked(page));
-
- error = mem_cgroup_cache_charge(page, current->mm,
- gfp_mask & GFP_RECLAIM_MASK);
- if (error)
- goto out;
-
- error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
- if (error == 0) {
- page_cache_get(page);
- page->mapping = mapping;
- page->index = offset;
-
- spin_lock_irq(&mapping->tree_lock);
- error = radix_tree_insert(&mapping->page_tree, offset, page);
- if (likely(!error)) {
- mapping->nrpages++;
- __inc_zone_page_state(page, NR_FILE_PAGES);
- spin_unlock_irq(&mapping->tree_lock);
- trace_mm_filemap_add_to_page_cache(page);
- } else {
- page->mapping = NULL;
- /* Leave page->index set: truncation relies upon it */
- spin_unlock_irq(&mapping->tree_lock);
- mem_cgroup_uncharge_cache_page(page);
- page_cache_release(page);
- }
- radix_tree_preload_end();
- } else
- mem_cgroup_uncharge_cache_page(page);
-out:
- return error;
+ return __add_to_page_cache_locked(page, mapping, offset,
+ gfp_mask, NULL);
}
EXPORT_SYMBOL(add_to_page_cache_locked);
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
pgoff_t offset, gfp_t gfp_mask)
{
+ void *shadow = NULL;
int ret;
- ret = add_to_page_cache(page, mapping, offset, gfp_mask);
- if (ret == 0)
- lru_cache_add_file(page);
+ __set_page_locked(page);
+ ret = __add_to_page_cache_locked(page, mapping, offset,
+ gfp_mask, &shadow);
+ if (unlikely(ret))
+ __clear_page_locked(page);
+ else {
+ /*
+ * The page might have been evicted from cache only
+ * recently, in which case it should be activated like
+ * any other repeatedly accessed page.
+ */
+ if (shadow && workingset_refault(shadow)) {
+ SetPageActive(page);
+ workingset_activation(page);
+ } else
+ ClearPageActive(page);
+ lru_cache_add(page);
+ }
return ret;
}
EXPORT_SYMBOL_GPL(add_to_page_cache_lru);
@@ -518,10 +653,10 @@ struct page *__page_cache_alloc(gfp_t gfp)
if (cpuset_do_page_mem_spread()) {
unsigned int cpuset_mems_cookie;
do {
- cpuset_mems_cookie = get_mems_allowed();
+ cpuset_mems_cookie = read_mems_allowed_begin();
n = cpuset_mem_spread_node();
page = alloc_pages_exact_node(n, gfp, 0);
- } while (!put_mems_allowed(cpuset_mems_cookie) && !page);
+ } while (!page && read_mems_allowed_retry(cpuset_mems_cookie));
return page;
}
@@ -605,7 +740,7 @@ EXPORT_SYMBOL_GPL(add_page_wait_queue);
*/
void unlock_page(struct page *page)
{
- VM_BUG_ON(!PageLocked(page));
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
clear_bit_unlock(PG_locked, &page->flags);
smp_mb__after_clear_bit();
wake_up_page(page, PG_locked);
@@ -684,14 +819,101 @@ int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
}
/**
- * find_get_page - find and get a page reference
+ * page_cache_next_hole - find the next hole (not-present entry)
+ * @mapping: mapping
+ * @index: index
+ * @max_scan: maximum range to search
+ *
+ * Search the set [index, min(index+max_scan-1, MAX_INDEX)] for the
+ * lowest indexed hole.
+ *
+ * Returns: the index of the hole if found, otherwise returns an index
+ * outside of the set specified (in which case 'return - index >=
+ * max_scan' will be true). In rare cases of index wrap-around, 0 will
+ * be returned.
+ *
+ * page_cache_next_hole may be called under rcu_read_lock. However,
+ * like radix_tree_gang_lookup, this will not atomically search a
+ * snapshot of the tree at a single point in time. For example, if a
+ * hole is created at index 5, then subsequently a hole is created at
+ * index 10, page_cache_next_hole covering both indexes may return 10
+ * if called under rcu_read_lock.
+ */
+pgoff_t page_cache_next_hole(struct address_space *mapping,
+ pgoff_t index, unsigned long max_scan)
+{
+ unsigned long i;
+
+ for (i = 0; i < max_scan; i++) {
+ struct page *page;
+
+ page = radix_tree_lookup(&mapping->page_tree, index);
+ if (!page || radix_tree_exceptional_entry(page))
+ break;
+ index++;
+ if (index == 0)
+ break;
+ }
+
+ return index;
+}
+EXPORT_SYMBOL(page_cache_next_hole);
+
+/**
+ * page_cache_prev_hole - find the prev hole (not-present entry)
+ * @mapping: mapping
+ * @index: index
+ * @max_scan: maximum range to search
+ *
+ * Search backwards in the range [max(index-max_scan+1, 0), index] for
+ * the first hole.
+ *
+ * Returns: the index of the hole if found, otherwise returns an index
+ * outside of the set specified (in which case 'index - return >=
+ * max_scan' will be true). In rare cases of wrap-around, ULONG_MAX
+ * will be returned.
+ *
+ * page_cache_prev_hole may be called under rcu_read_lock. However,
+ * like radix_tree_gang_lookup, this will not atomically search a
+ * snapshot of the tree at a single point in time. For example, if a
+ * hole is created at index 10, then subsequently a hole is created at
+ * index 5, page_cache_prev_hole covering both indexes may return 5 if
+ * called under rcu_read_lock.
+ */
+pgoff_t page_cache_prev_hole(struct address_space *mapping,
+ pgoff_t index, unsigned long max_scan)
+{
+ unsigned long i;
+
+ for (i = 0; i < max_scan; i++) {
+ struct page *page;
+
+ page = radix_tree_lookup(&mapping->page_tree, index);
+ if (!page || radix_tree_exceptional_entry(page))
+ break;
+ index--;
+ if (index == ULONG_MAX)
+ break;
+ }
+
+ return index;
+}
+EXPORT_SYMBOL(page_cache_prev_hole);
+
+/**
+ * find_get_entry - find and get a page cache entry
* @mapping: the address_space to search
- * @offset: the page index
+ * @offset: the page cache index
*
- * Is there a pagecache struct page at the given (mapping, offset) tuple?
- * If yes, increment its refcount and return it; if no, return NULL.
+ * Looks up the page cache slot at @mapping & @offset. If there is a
+ * page cache page, it is returned with an increased refcount.
+ *
+ * If the slot holds a shadow entry of a previously evicted page, or a
+ * swap entry from shmem/tmpfs, it is returned.
+ *
+ * Otherwise, %NULL is returned.
*/
-struct page *find_get_page(struct address_space *mapping, pgoff_t offset)
+struct page *find_get_entry(struct address_space *mapping, pgoff_t offset)
{
void **pagep;
struct page *page;
@@ -708,9 +930,9 @@ repeat:
if (radix_tree_deref_retry(page))
goto repeat;
/*
- * Otherwise, shmem/tmpfs must be storing a swap entry
- * here as an exceptional entry: so return it without
- * attempting to raise page count.
+ * A shadow entry of a recently evicted page,
+ * or a swap entry from shmem/tmpfs. Return
+ * it without attempting to raise page count.
*/
goto out;
}
@@ -732,24 +954,50 @@ out:
return page;
}
-EXPORT_SYMBOL(find_get_page);
+EXPORT_SYMBOL(find_get_entry);
/**
- * find_lock_page - locate, pin and lock a pagecache page
+ * find_get_page - find and get a page reference
* @mapping: the address_space to search
* @offset: the page index
*
- * Locates the desired pagecache page, locks it, increments its reference
- * count and returns its address.
+ * Looks up the page cache slot at @mapping & @offset. If there is a
+ * page cache page, it is returned with an increased refcount.
*
- * Returns zero if the page was not present. find_lock_page() may sleep.
+ * Otherwise, %NULL is returned.
*/
-struct page *find_lock_page(struct address_space *mapping, pgoff_t offset)
+struct page *find_get_page(struct address_space *mapping, pgoff_t offset)
+{
+ struct page *page = find_get_entry(mapping, offset);
+
+ if (radix_tree_exceptional_entry(page))
+ page = NULL;
+ return page;
+}
+EXPORT_SYMBOL(find_get_page);
+
+/**
+ * find_lock_entry - locate, pin and lock a page cache entry
+ * @mapping: the address_space to search
+ * @offset: the page cache index
+ *
+ * Looks up the page cache slot at @mapping & @offset. If there is a
+ * page cache page, it is returned locked and with an increased
+ * refcount.
+ *
+ * If the slot holds a shadow entry of a previously evicted page, or a
+ * swap entry from shmem/tmpfs, it is returned.
+ *
+ * Otherwise, %NULL is returned.
+ *
+ * find_lock_entry() may sleep.
+ */
+struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset)
{
struct page *page;
repeat:
- page = find_get_page(mapping, offset);
+ page = find_get_entry(mapping, offset);
if (page && !radix_tree_exception(page)) {
lock_page(page);
/* Has the page been truncated? */
@@ -758,10 +1006,33 @@ repeat:
page_cache_release(page);
goto repeat;
}
- VM_BUG_ON(page->index != offset);
+ VM_BUG_ON_PAGE(page->index != offset, page);
}
return page;
}
+EXPORT_SYMBOL(find_lock_entry);
+
+/**
+ * find_lock_page - locate, pin and lock a pagecache page
+ * @mapping: the address_space to search
+ * @offset: the page index
+ *
+ * Looks up the page cache slot at @mapping & @offset. If there is a
+ * page cache page, it is returned locked and with an increased
+ * refcount.
+ *
+ * Otherwise, %NULL is returned.
+ *
+ * find_lock_page() may sleep.
+ */
+struct page *find_lock_page(struct address_space *mapping, pgoff_t offset)
+{
+ struct page *page = find_lock_entry(mapping, offset);
+
+ if (radix_tree_exceptional_entry(page))
+ page = NULL;
+ return page;
+}
EXPORT_SYMBOL(find_lock_page);
/**
@@ -770,16 +1041,18 @@ EXPORT_SYMBOL(find_lock_page);
* @index: the page's index into the mapping
* @gfp_mask: page allocation mode
*
- * Locates a page in the pagecache. If the page is not present, a new page
- * is allocated using @gfp_mask and is added to the pagecache and to the VM's
- * LRU list. The returned page is locked and has its reference count
- * incremented.
+ * Looks up the page cache slot at @mapping & @offset. If there is a
+ * page cache page, it is returned locked and with an increased
+ * refcount.
*
- * find_or_create_page() may sleep, even if @gfp_flags specifies an atomic
- * allocation!
+ * If the page is not present, a new page is allocated using @gfp_mask
+ * and added to the page cache and the VM's LRU list. The page is
+ * returned locked and with an increased refcount.
*
- * find_or_create_page() returns the desired page's address, or zero on
- * memory exhaustion.
+ * On memory exhaustion, %NULL is returned.
+ *
+ * find_or_create_page() may sleep, even if @gfp_flags specifies an
+ * atomic allocation!
*/
struct page *find_or_create_page(struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask)
@@ -812,6 +1085,76 @@ repeat:
EXPORT_SYMBOL(find_or_create_page);
/**
+ * find_get_entries - gang pagecache lookup
+ * @mapping: The address_space to search
+ * @start: The starting page cache index
+ * @nr_entries: The maximum number of entries
+ * @entries: Where the resulting entries are placed
+ * @indices: The cache indices corresponding to the entries in @entries
+ *
+ * find_get_entries() will search for and return a group of up to
+ * @nr_entries entries in the mapping. The entries are placed at
+ * @entries. find_get_entries() takes a reference against any actual
+ * pages it returns.
+ *
+ * The search returns a group of mapping-contiguous page cache entries
+ * with ascending indexes. There may be holes in the indices due to
+ * not-present pages.
+ *
+ * Any shadow entries of evicted pages, or swap entries from
+ * shmem/tmpfs, are included in the returned array.
+ *
+ * find_get_entries() returns the number of pages and shadow entries
+ * which were found.
+ */
+unsigned find_get_entries(struct address_space *mapping,
+ pgoff_t start, unsigned int nr_entries,
+ struct page **entries, pgoff_t *indices)
+{
+ void **slot;
+ unsigned int ret = 0;
+ struct radix_tree_iter iter;
+
+ if (!nr_entries)
+ return 0;
+
+ rcu_read_lock();
+restart:
+ radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
+ struct page *page;
+repeat:
+ page = radix_tree_deref_slot(slot);
+ if (unlikely(!page))
+ continue;
+ if (radix_tree_exception(page)) {
+ if (radix_tree_deref_retry(page))
+ goto restart;
+ /*
+ * A shadow entry of a recently evicted page,
+ * or a swap entry from shmem/tmpfs. Return
+ * it without attempting to raise page count.
+ */
+ goto export;
+ }
+ if (!page_cache_get_speculative(page))
+ goto repeat;
+
+ /* Has the page moved? */
+ if (unlikely(page != *slot)) {
+ page_cache_release(page);
+ goto repeat;
+ }
+export:
+ indices[ret] = iter.index;
+ entries[ret] = page;
+ if (++ret == nr_entries)
+ break;
+ }
+ rcu_read_unlock();
+ return ret;
+}
+
+/**
* find_get_pages - gang pagecache lookup
* @mapping: The address_space to search
* @start: The starting page index
@@ -857,9 +1200,9 @@ repeat:
goto restart;
}
/*
- * Otherwise, shmem/tmpfs must be storing a swap entry
- * here as an exceptional entry: so skip over it -
- * we only reach this from invalidate_mapping_pages().
+ * A shadow entry of a recently evicted page,
+ * or a swap entry from shmem/tmpfs. Skip
+ * over it.
*/
continue;
}
@@ -924,9 +1267,9 @@ repeat:
goto restart;
}
/*
- * Otherwise, shmem/tmpfs must be storing a swap entry
- * here as an exceptional entry: so stop looking for
- * contiguous pages.
+ * A shadow entry of a recently evicted page,
+ * or a swap entry from shmem/tmpfs. Stop
+ * looking for contiguous pages.
*/
break;
}
@@ -1000,10 +1343,17 @@ repeat:
goto restart;
}
/*
- * This function is never used on a shmem/tmpfs
- * mapping, so a swap entry won't be found here.
+ * A shadow entry of a recently evicted page.
+ *
+ * Those entries should never be tagged, but
+ * this tree walk is lockless and the tags are
+ * looked up in bulk, one radix tree node at a
+ * time, so there is a sizable window for page
+ * reclaim to evict a page we saw tagged.
+ *
+ * Skip over it.
*/
- BUG();
+ continue;
}
if (!page_cache_get_speculative(page))
@@ -1087,8 +1437,8 @@ static void shrink_readahead_size_eio(struct file *filp,
* do_generic_file_read - generic file read routine
* @filp: the file to read
* @ppos: current file position
- * @desc: read_descriptor
- * @actor: read method
+ * @iter: data destination
+ * @written: already copied
*
* This is a generic file read routine, and uses the
* mapping->a_ops->readpage() function for the actual low-level stuff.
@@ -1096,8 +1446,8 @@ static void shrink_readahead_size_eio(struct file *filp,
* This is really ugly. But the goto's actually try to clarify some
* of the logic when it comes to error handling etc.
*/
-static void do_generic_file_read(struct file *filp, loff_t *ppos,
- read_descriptor_t *desc, read_actor_t actor)
+static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos,
+ struct iov_iter *iter, ssize_t written)
{
struct address_space *mapping = filp->f_mapping;
struct inode *inode = mapping->host;
@@ -1107,12 +1457,12 @@ static void do_generic_file_read(struct file *filp, loff_t *ppos,
pgoff_t prev_index;
unsigned long offset; /* offset into pagecache page */
unsigned int prev_offset;
- int error;
+ int error = 0;
index = *ppos >> PAGE_CACHE_SHIFT;
prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT;
prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1);
- last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
+ last_index = (*ppos + iter->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
offset = *ppos & ~PAGE_CACHE_MASK;
for (;;) {
@@ -1147,7 +1497,7 @@ find_page:
if (!page->mapping)
goto page_not_up_to_date_locked;
if (!mapping->a_ops->is_partially_uptodate(page,
- desc, offset))
+ offset, iter->count))
goto page_not_up_to_date_locked;
unlock_page(page);
}
@@ -1197,23 +1547,23 @@ page_ok:
/*
* Ok, we have the page, and it's up-to-date, so
* now we can copy it to user space...
- *
- * The actor routine returns how many bytes were actually used..
- * NOTE! This may not be the same as how much of a user buffer
- * we filled up (we may be padding etc), so we can only update
- * "pos" here (the actor routine has to update the user buffer
- * pointers and the remaining count).
*/
- ret = actor(desc, page, offset, nr);
+
+ ret = copy_page_to_iter(page, offset, nr, iter);
offset += ret;
index += offset >> PAGE_CACHE_SHIFT;
offset &= ~PAGE_CACHE_MASK;
prev_offset = offset;
page_cache_release(page);
- if (ret == nr && desc->count)
- continue;
- goto out;
+ written += ret;
+ if (!iov_iter_count(iter))
+ goto out;
+ if (ret < nr) {
+ error = -EFAULT;
+ goto out;
+ }
+ continue;
page_not_up_to_date:
/* Get exclusive access to the page ... */
@@ -1248,6 +1598,7 @@ readpage:
if (unlikely(error)) {
if (error == AOP_TRUNCATED_PAGE) {
page_cache_release(page);
+ error = 0;
goto find_page;
}
goto readpage_error;
@@ -1278,7 +1629,6 @@ readpage:
readpage_error:
/* UHHUH! A synchronous read error occurred. Report it */
- desc->error = error;
page_cache_release(page);
goto out;
@@ -1289,16 +1639,17 @@ no_cached_page:
*/
page = page_cache_alloc_cold(mapping);
if (!page) {
- desc->error = -ENOMEM;
+ error = -ENOMEM;
goto out;
}
error = add_to_page_cache_lru(page, mapping,
index, GFP_KERNEL);
if (error) {
page_cache_release(page);
- if (error == -EEXIST)
+ if (error == -EEXIST) {
+ error = 0;
goto find_page;
- desc->error = error;
+ }
goto out;
}
goto readpage;
@@ -1311,44 +1662,7 @@ out:
*ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset;
file_accessed(filp);
-}
-
-int file_read_actor(read_descriptor_t *desc, struct page *page,
- unsigned long offset, unsigned long size)
-{
- char *kaddr;
- unsigned long left, count = desc->count;
-
- if (size > count)
- size = count;
-
- /*
- * Faults on the destination of a read are common, so do it before
- * taking the kmap.
- */
- if (!fault_in_pages_writeable(desc->arg.buf, size)) {
- kaddr = kmap_atomic(page);
- left = __copy_to_user_inatomic(desc->arg.buf,
- kaddr + offset, size);
- kunmap_atomic(kaddr);
- if (left == 0)
- goto success;
- }
-
- /* Do it the slow way */
- kaddr = kmap(page);
- left = __copy_to_user(desc->arg.buf, kaddr + offset, size);
- kunmap(page);
-
- if (left) {
- size -= left;
- desc->error = -EFAULT;
- }
-success:
- desc->count = count - size;
- desc->written += size;
- desc->arg.buf += size;
- return size;
+ return written ? written : error;
}
/*
@@ -1406,14 +1720,15 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
{
struct file *filp = iocb->ki_filp;
ssize_t retval;
- unsigned long seg = 0;
size_t count;
loff_t *ppos = &iocb->ki_pos;
+ struct iov_iter i;
count = 0;
retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
if (retval)
return retval;
+ iov_iter_init(&i, iov, nr_segs, count, 0);
/* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
if (filp->f_flags & O_DIRECT) {
@@ -1426,66 +1741,37 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
if (!count)
goto out; /* skip atime */
size = i_size_read(inode);
- if (pos < size) {
- retval = filemap_write_and_wait_range(mapping, pos,
+ retval = filemap_write_and_wait_range(mapping, pos,
pos + iov_length(iov, nr_segs) - 1);
- if (!retval) {
- retval = mapping->a_ops->direct_IO(READ, iocb,
- iov, pos, nr_segs);
- }
- if (retval > 0) {
- *ppos = pos + retval;
- count -= retval;
- }
-
+ if (!retval) {
+ retval = mapping->a_ops->direct_IO(READ, iocb,
+ iov, pos, nr_segs);
+ }
+ if (retval > 0) {
+ *ppos = pos + retval;
+ count -= retval;
/*
- * Btrfs can have a short DIO read if we encounter
- * compressed extents, so if there was an error, or if
- * we've already read everything we wanted to, or if
- * there was a short read because we hit EOF, go ahead
- * and return. Otherwise fallthrough to buffered io for
- * the rest of the read.
+ * If we did a short DIO read we need to skip the
+ * section of the iov that we've already read data into.
*/
- if (retval < 0 || !count || *ppos >= size) {
- file_accessed(filp);
- goto out;
- }
+ iov_iter_advance(&i, retval);
}
- }
-
- count = retval;
- for (seg = 0; seg < nr_segs; seg++) {
- read_descriptor_t desc;
- loff_t offset = 0;
/*
- * If we did a short DIO read we need to skip the section of the
- * iov that we've already read data into.
+ * Btrfs can have a short DIO read if we encounter
+ * compressed extents, so if there was an error, or if
+ * we've already read everything we wanted to, or if
+ * there was a short read because we hit EOF, go ahead
+ * and return. Otherwise fallthrough to buffered io for
+ * the rest of the read.
*/
- if (count) {
- if (count > iov[seg].iov_len) {
- count -= iov[seg].iov_len;
- continue;
- }
- offset = count;
- count = 0;
- }
-
- desc.written = 0;
- desc.arg.buf = iov[seg].iov_base + offset;
- desc.count = iov[seg].iov_len - offset;
- if (desc.count == 0)
- continue;
- desc.error = 0;
- do_generic_file_read(filp, ppos, &desc, file_read_actor);
- retval += desc.written;
- if (desc.error) {
- retval = retval ?: desc.error;
- break;
+ if (retval < 0 || !count || *ppos >= size) {
+ file_accessed(filp);
+ goto out;
}
- if (desc.count > 0)
- break;
}
+
+ retval = do_generic_file_read(filp, ppos, &i, retval);
out:
return retval;
}
@@ -1539,12 +1825,12 @@ static void do_sync_mmap_readahead(struct vm_area_struct *vma,
struct address_space *mapping = file->f_mapping;
/* If we don't want any read-ahead, don't bother */
- if (VM_RandomReadHint(vma))
+ if (vma->vm_flags & VM_RAND_READ)
return;
if (!ra->ra_pages)
return;
- if (VM_SequentialReadHint(vma)) {
+ if (vma->vm_flags & VM_SEQ_READ) {
page_cache_sync_readahead(mapping, ra, file, offset,
ra->ra_pages);
return;
@@ -1584,7 +1870,7 @@ static void do_async_mmap_readahead(struct vm_area_struct *vma,
struct address_space *mapping = file->f_mapping;
/* If we don't want any read-ahead, don't bother */
- if (VM_RandomReadHint(vma))
+ if (vma->vm_flags & VM_RAND_READ)
return;
if (ra->mmap_miss > 0)
ra->mmap_miss--;
@@ -1614,11 +1900,11 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
struct inode *inode = mapping->host;
pgoff_t offset = vmf->pgoff;
struct page *page;
- pgoff_t size;
+ loff_t size;
int ret = 0;
- size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
- if (offset >= size)
+ size = round_up(i_size_read(inode), PAGE_CACHE_SIZE);
+ if (offset >= size >> PAGE_CACHE_SHIFT)
return VM_FAULT_SIGBUS;
/*
@@ -1654,7 +1940,7 @@ retry_find:
put_page(page);
goto retry_find;
}
- VM_BUG_ON(page->index != offset);
+ VM_BUG_ON_PAGE(page->index != offset, page);
/*
* We have a locked page in the page cache, now we need to check
@@ -1667,8 +1953,8 @@ retry_find:
* Found the page and have a reference on it.
* We must recheck i_size under page lock.
*/
- size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
- if (unlikely(offset >= size)) {
+ size = round_up(i_size_read(inode), PAGE_CACHE_SIZE);
+ if (unlikely(offset >= size >> PAGE_CACHE_SHIFT)) {
unlock_page(page);
page_cache_release(page);
return VM_FAULT_SIGBUS;
@@ -1726,6 +2012,78 @@ page_not_uptodate:
}
EXPORT_SYMBOL(filemap_fault);
+void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct radix_tree_iter iter;
+ void **slot;
+ struct file *file = vma->vm_file;
+ struct address_space *mapping = file->f_mapping;
+ loff_t size;
+ struct page *page;
+ unsigned long address = (unsigned long) vmf->virtual_address;
+ unsigned long addr;
+ pte_t *pte;
+
+ rcu_read_lock();
+ radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, vmf->pgoff) {
+ if (iter.index > vmf->max_pgoff)
+ break;
+repeat:
+ page = radix_tree_deref_slot(slot);
+ if (unlikely(!page))
+ goto next;
+ if (radix_tree_exception(page)) {
+ if (radix_tree_deref_retry(page))
+ break;
+ else
+ goto next;
+ }
+
+ if (!page_cache_get_speculative(page))
+ goto repeat;
+
+ /* Has the page moved? */
+ if (unlikely(page != *slot)) {
+ page_cache_release(page);
+ goto repeat;
+ }
+
+ if (!PageUptodate(page) ||
+ PageReadahead(page) ||
+ PageHWPoison(page))
+ goto skip;
+ if (!trylock_page(page))
+ goto skip;
+
+ if (page->mapping != mapping || !PageUptodate(page))
+ goto unlock;
+
+ size = round_up(i_size_read(mapping->host), PAGE_CACHE_SIZE);
+ if (page->index >= size >> PAGE_CACHE_SHIFT)
+ goto unlock;
+
+ pte = vmf->pte + page->index - vmf->pgoff;
+ if (!pte_none(*pte))
+ goto unlock;
+
+ if (file->f_ra.mmap_miss > 0)
+ file->f_ra.mmap_miss--;
+ addr = address + (page->index - vmf->pgoff) * PAGE_SIZE;
+ do_set_pte(vma, addr, page, pte, false, false);
+ unlock_page(page);
+ goto next;
+unlock:
+ unlock_page(page);
+skip:
+ page_cache_release(page);
+next:
+ if (iter.index == vmf->max_pgoff)
+ break;
+ }
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(filemap_map_pages);
+
int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct page *page = vmf->page;
@@ -1755,6 +2113,7 @@ EXPORT_SYMBOL(filemap_page_mkwrite);
const struct vm_operations_struct generic_file_vm_ops = {
.fault = filemap_fault,
+ .map_pages = filemap_map_pages,
.page_mkwrite = filemap_page_mkwrite,
.remap_pages = generic_file_remap_pages,
};
@@ -1795,6 +2154,18 @@ int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
EXPORT_SYMBOL(generic_file_mmap);
EXPORT_SYMBOL(generic_file_readonly_mmap);
+static struct page *wait_on_page_read(struct page *page)
+{
+ if (!IS_ERR(page)) {
+ wait_on_page_locked(page);
+ if (!PageUptodate(page)) {
+ page_cache_release(page);
+ page = ERR_PTR(-EIO);
+ }
+ }
+ return page;
+}
+
static struct page *__read_cache_page(struct address_space *mapping,
pgoff_t index,
int (*filler)(void *, struct page *),
@@ -1821,6 +2192,8 @@ repeat:
if (err < 0) {
page_cache_release(page);
page = ERR_PTR(err);
+ } else {
+ page = wait_on_page_read(page);
}
}
return page;
@@ -1857,6 +2230,10 @@ retry:
if (err < 0) {
page_cache_release(page);
return ERR_PTR(err);
+ } else {
+ page = wait_on_page_read(page);
+ if (IS_ERR(page))
+ return page;
}
out:
mark_page_accessed(page);
@@ -1864,40 +2241,25 @@ out:
}
/**
- * read_cache_page_async - read into page cache, fill it if needed
+ * read_cache_page - read into page cache, fill it if needed
* @mapping: the page's address_space
* @index: the page index
* @filler: function to perform the read
* @data: first arg to filler(data, page) function, often left as NULL
*
- * Same as read_cache_page, but don't wait for page to become unlocked
- * after submitting it to the filler.
- *
* Read into the page cache. If a page already exists, and PageUptodate() is
- * not set, try to fill the page but don't wait for it to become unlocked.
+ * not set, try to fill the page and wait for it to become unlocked.
*
* If the page does not get brought uptodate, return -EIO.
*/
-struct page *read_cache_page_async(struct address_space *mapping,
+struct page *read_cache_page(struct address_space *mapping,
pgoff_t index,
int (*filler)(void *, struct page *),
void *data)
{
return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping));
}
-EXPORT_SYMBOL(read_cache_page_async);
-
-static struct page *wait_on_page_read(struct page *page)
-{
- if (!IS_ERR(page)) {
- wait_on_page_locked(page);
- if (!PageUptodate(page)) {
- page_cache_release(page);
- page = ERR_PTR(-EIO);
- }
- }
- return page;
-}
+EXPORT_SYMBOL(read_cache_page);
/**
* read_cache_page_gfp - read into page cache, using specified page allocation flags.
@@ -1916,175 +2278,10 @@ struct page *read_cache_page_gfp(struct address_space *mapping,
{
filler_t *filler = (filler_t *)mapping->a_ops->readpage;
- return wait_on_page_read(do_read_cache_page(mapping, index, filler, NULL, gfp));
+ return do_read_cache_page(mapping, index, filler, NULL, gfp);
}
EXPORT_SYMBOL(read_cache_page_gfp);
-/**
- * read_cache_page - read into page cache, fill it if needed
- * @mapping: the page's address_space
- * @index: the page index
- * @filler: function to perform the read
- * @data: first arg to filler(data, page) function, often left as NULL
- *
- * Read into the page cache. If a page already exists, and PageUptodate() is
- * not set, try to fill the page then wait for it to become unlocked.
- *
- * If the page does not get brought uptodate, return -EIO.
- */
-struct page *read_cache_page(struct address_space *mapping,
- pgoff_t index,
- int (*filler)(void *, struct page *),
- void *data)
-{
- return wait_on_page_read(read_cache_page_async(mapping, index, filler, data));
-}
-EXPORT_SYMBOL(read_cache_page);
-
-static size_t __iovec_copy_from_user_inatomic(char *vaddr,
- const struct iovec *iov, size_t base, size_t bytes)
-{
- size_t copied = 0, left = 0;
-
- while (bytes) {
- char __user *buf = iov->iov_base + base;
- int copy = min(bytes, iov->iov_len - base);
-
- base = 0;
- left = __copy_from_user_inatomic(vaddr, buf, copy);
- copied += copy;
- bytes -= copy;
- vaddr += copy;
- iov++;
-
- if (unlikely(left))
- break;
- }
- return copied - left;
-}
-
-/*
- * Copy as much as we can into the page and return the number of bytes which
- * were successfully copied. If a fault is encountered then return the number of
- * bytes which were copied.
- */
-size_t iov_iter_copy_from_user_atomic(struct page *page,
- struct iov_iter *i, unsigned long offset, size_t bytes)
-{
- char *kaddr;
- size_t copied;
-
- BUG_ON(!in_atomic());
- kaddr = kmap_atomic(page);
- if (likely(i->nr_segs == 1)) {
- int left;
- char __user *buf = i->iov->iov_base + i->iov_offset;
- left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
- copied = bytes - left;
- } else {
- copied = __iovec_copy_from_user_inatomic(kaddr + offset,
- i->iov, i->iov_offset, bytes);
- }
- kunmap_atomic(kaddr);
-
- return copied;
-}
-EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
-
-/*
- * This has the same sideeffects and return value as
- * iov_iter_copy_from_user_atomic().
- * The difference is that it attempts to resolve faults.
- * Page must not be locked.
- */
-size_t iov_iter_copy_from_user(struct page *page,
- struct iov_iter *i, unsigned long offset, size_t bytes)
-{
- char *kaddr;
- size_t copied;
-
- kaddr = kmap(page);
- if (likely(i->nr_segs == 1)) {
- int left;
- char __user *buf = i->iov->iov_base + i->iov_offset;
- left = __copy_from_user(kaddr + offset, buf, bytes);
- copied = bytes - left;
- } else {
- copied = __iovec_copy_from_user_inatomic(kaddr + offset,
- i->iov, i->iov_offset, bytes);
- }
- kunmap(page);
- return copied;
-}
-EXPORT_SYMBOL(iov_iter_copy_from_user);
-
-void iov_iter_advance(struct iov_iter *i, size_t bytes)
-{
- BUG_ON(i->count < bytes);
-
- if (likely(i->nr_segs == 1)) {
- i->iov_offset += bytes;
- i->count -= bytes;
- } else {
- const struct iovec *iov = i->iov;
- size_t base = i->iov_offset;
- unsigned long nr_segs = i->nr_segs;
-
- /*
- * The !iov->iov_len check ensures we skip over unlikely
- * zero-length segments (without overruning the iovec).
- */
- while (bytes || unlikely(i->count && !iov->iov_len)) {
- int copy;
-
- copy = min(bytes, iov->iov_len - base);
- BUG_ON(!i->count || i->count < copy);
- i->count -= copy;
- bytes -= copy;
- base += copy;
- if (iov->iov_len == base) {
- iov++;
- nr_segs--;
- base = 0;
- }
- }
- i->iov = iov;
- i->iov_offset = base;
- i->nr_segs = nr_segs;
- }
-}
-EXPORT_SYMBOL(iov_iter_advance);
-
-/*
- * Fault in the first iovec of the given iov_iter, to a maximum length
- * of bytes. Returns 0 on success, or non-zero if the memory could not be
- * accessed (ie. because it is an invalid address).
- *
- * writev-intensive code may want this to prefault several iovecs -- that
- * would be possible (callers must not rely on the fact that _only_ the
- * first iovec will be faulted with the current implementation).
- */
-int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
-{
- char __user *buf = i->iov->iov_base + i->iov_offset;
- bytes = min(bytes, i->iov->iov_len - i->iov_offset);
- return fault_in_pages_readable(buf, bytes);
-}
-EXPORT_SYMBOL(iov_iter_fault_in_readable);
-
-/*
- * Return the count of just the current iov_iter segment.
- */
-size_t iov_iter_single_seg_count(const struct iov_iter *i)
-{
- const struct iovec *iov = i->iov;
- if (i->nr_segs == 1)
- return i->count;
- else
- return min(i->count, iov->iov_len - i->iov_offset);
-}
-EXPORT_SYMBOL(iov_iter_single_seg_count);
-
/*
* Performs necessary checks before doing a write
*
@@ -2191,7 +2388,7 @@ EXPORT_SYMBOL(pagecache_write_end);
ssize_t
generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
- unsigned long *nr_segs, loff_t pos, loff_t *ppos,
+ unsigned long *nr_segs, loff_t pos,
size_t count, size_t ocount)
{
struct file *file = iocb->ki_filp;
@@ -2252,7 +2449,7 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
i_size_write(inode, pos);
mark_inode_dirty(inode);
}
- *ppos = pos;
+ iocb->ki_pos = pos;
}
out:
return written;
@@ -2298,7 +2495,7 @@ found:
}
EXPORT_SYMBOL(grab_cache_page_write_begin);
-static ssize_t generic_perform_write(struct file *file,
+ssize_t generic_perform_write(struct file *file,
struct iov_iter *i, loff_t pos)
{
struct address_space *mapping = file->f_mapping;
@@ -2348,9 +2545,7 @@ again:
if (mapping_writably_mapped(mapping))
flush_dcache_page(page);
- pagefault_disable();
copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
- pagefault_enable();
flush_dcache_page(page);
mark_page_accessed(page);
@@ -2388,34 +2583,13 @@ again:
return written ? written : status;
}
-
-ssize_t
-generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos, loff_t *ppos,
- size_t count, ssize_t written)
-{
- struct file *file = iocb->ki_filp;
- ssize_t status;
- struct iov_iter i;
-
- iov_iter_init(&i, iov, nr_segs, count, written);
- status = generic_perform_write(file, &i, pos);
-
- if (likely(status >= 0)) {
- written += status;
- *ppos = pos + status;
- }
-
- return written ? written : status;
-}
-EXPORT_SYMBOL(generic_file_buffered_write);
+EXPORT_SYMBOL(generic_perform_write);
/**
* __generic_file_aio_write - write data to a file
* @iocb: IO state structure (file, offset, etc.)
* @iov: vector with data to write
* @nr_segs: number of segments in the vector
- * @ppos: position where to write
*
* This function does all the work needed for actually writing data to a
* file. It does all basic checks, removes SUID from the file, updates
@@ -2430,16 +2604,18 @@ EXPORT_SYMBOL(generic_file_buffered_write);
* avoid syncing under i_mutex.
*/
ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t *ppos)
+ unsigned long nr_segs)
{
struct file *file = iocb->ki_filp;
struct address_space * mapping = file->f_mapping;
size_t ocount; /* original count */
size_t count; /* after file limit checks */
struct inode *inode = mapping->host;
- loff_t pos;
- ssize_t written;
+ loff_t pos = iocb->ki_pos;
+ ssize_t written = 0;
ssize_t err;
+ ssize_t status;
+ struct iov_iter from;
ocount = 0;
err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
@@ -2447,12 +2623,9 @@ ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
return err;
count = ocount;
- pos = *ppos;
/* We can write back this queue in page reclaim */
current->backing_dev_info = mapping->backing_dev_info;
- written = 0;
-
err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
if (err)
goto out;
@@ -2468,45 +2641,47 @@ ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
if (err)
goto out;
+ iov_iter_init(&from, iov, nr_segs, count, 0);
+
/* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
if (unlikely(file->f_flags & O_DIRECT)) {
loff_t endbyte;
- ssize_t written_buffered;
- written = generic_file_direct_write(iocb, iov, &nr_segs, pos,
- ppos, count, ocount);
+ written = generic_file_direct_write(iocb, iov, &from.nr_segs, pos,
+ count, ocount);
if (written < 0 || written == count)
goto out;
+ iov_iter_advance(&from, written);
+
/*
* direct-io write to a hole: fall through to buffered I/O
* for completing the rest of the request.
*/
pos += written;
count -= written;
- written_buffered = generic_file_buffered_write(iocb, iov,
- nr_segs, pos, ppos, count,
- written);
+
+ status = generic_perform_write(file, &from, pos);
/*
- * If generic_file_buffered_write() retuned a synchronous error
+ * If generic_perform_write() returned a synchronous error
* then we want to return the number of bytes which were
* direct-written, or the error code if that was zero. Note
* that this differs from normal direct-io semantics, which
* will return -EFOO even if some bytes were written.
*/
- if (written_buffered < 0) {
- err = written_buffered;
+ if (unlikely(status < 0) && !written) {
+ err = status;
goto out;
}
-
+ iocb->ki_pos = pos + status;
/*
* We need to ensure that the page cache pages are written to
* disk and invalidated to preserve the expected O_DIRECT
* semantics.
*/
- endbyte = pos + written_buffered - written - 1;
+ endbyte = pos + status - 1;
err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
if (err == 0) {
- written = written_buffered;
+ written += status;
invalidate_mapping_pages(mapping,
pos >> PAGE_CACHE_SHIFT,
endbyte >> PAGE_CACHE_SHIFT);
@@ -2517,8 +2692,9 @@ ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
*/
}
} else {
- written = generic_file_buffered_write(iocb, iov, nr_segs,
- pos, ppos, count, written);
+ written = generic_perform_write(file, &from, pos);
+ if (likely(written >= 0))
+ iocb->ki_pos = pos + written;
}
out:
current->backing_dev_info = NULL;
@@ -2547,14 +2723,14 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
BUG_ON(iocb->ki_pos != pos);
mutex_lock(&inode->i_mutex);
- ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
+ ret = __generic_file_aio_write(iocb, iov, nr_segs);
mutex_unlock(&inode->i_mutex);
- if (ret > 0 || ret == -EIOCBQUEUED) {
+ if (ret > 0) {
ssize_t err;
- err = generic_write_sync(file, pos, ret);
- if (err < 0 && ret > 0)
+ err = generic_write_sync(file, iocb->ki_pos - ret, ret);
+ if (err < 0)
ret = err;
}
return ret;
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index 28fe26b64f8a..d8d9fe3f685c 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -26,7 +26,7 @@
* of ZERO_PAGE(), such as /dev/zero
*/
static DEFINE_MUTEX(xip_sparse_mutex);
-static seqcount_t xip_sparse_seq = SEQCNT_ZERO;
+static seqcount_t xip_sparse_seq = SEQCNT_ZERO(xip_sparse_seq);
static struct page *__xip_sparse_page;
/* called under xip_sparse_mutex */
diff --git a/mm/fremap.c b/mm/fremap.c
index 87da3590c61e..34feba60a17e 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -23,28 +23,44 @@
#include "internal.h"
+static int mm_counter(struct page *page)
+{
+ return PageAnon(page) ? MM_ANONPAGES : MM_FILEPAGES;
+}
+
static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
pte_t pte = *ptep;
+ struct page *page;
+ swp_entry_t entry;
if (pte_present(pte)) {
- struct page *page;
-
flush_cache_page(vma, addr, pte_pfn(pte));
pte = ptep_clear_flush(vma, addr, ptep);
page = vm_normal_page(vma, addr, pte);
if (page) {
if (pte_dirty(pte))
set_page_dirty(page);
+ update_hiwater_rss(mm);
+ dec_mm_counter(mm, mm_counter(page));
page_remove_rmap(page);
page_cache_release(page);
+ }
+ } else { /* zap_pte() is not called when pte_none() */
+ if (!pte_file(pte)) {
update_hiwater_rss(mm);
- dec_mm_counter(mm, MM_FILEPAGES);
+ entry = pte_to_swp_entry(pte);
+ if (non_swap_entry(entry)) {
+ if (is_migration_entry(entry)) {
+ page = migration_entry_to_page(entry);
+ dec_mm_counter(mm, mm_counter(page));
+ }
+ } else {
+ free_swap_and_cache(entry);
+ dec_mm_counter(mm, MM_SWAPENTS);
+ }
}
- } else {
- if (!pte_file(pte))
- free_swap_and_cache(pte_to_swp_entry(pte));
pte_clear_not_present_full(mm, addr, ptep, 0);
}
}
@@ -57,17 +73,22 @@ static int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, unsigned long pgoff, pgprot_t prot)
{
int err = -ENOMEM;
- pte_t *pte;
+ pte_t *pte, ptfile;
spinlock_t *ptl;
pte = get_locked_pte(mm, addr, &ptl);
if (!pte)
goto out;
- if (!pte_none(*pte))
+ ptfile = pgoff_to_pte(pgoff);
+
+ if (!pte_none(*pte)) {
+ if (pte_present(*pte) && pte_soft_dirty(*pte))
+ pte_file_mksoft_dirty(ptfile);
zap_pte(mm, vma, addr, pte);
+ }
- set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff));
+ set_pte_at(mm, addr, pte, ptfile);
/*
* We don't need to run update_mmu_cache() here because the "file pte"
* being installed by install_file_pte() is not a real pte - it's a
@@ -203,9 +224,10 @@ get_write_lock:
if (mapping_cap_account_dirty(mapping)) {
unsigned long addr;
struct file *file = get_file(vma->vm_file);
+ /* mmap_region may free vma; grab the info now */
+ vm_flags = vma->vm_flags;
- addr = mmap_region(file, start, size,
- vma->vm_flags, pgoff);
+ addr = mmap_region(file, start, size, vm_flags, pgoff);
fput(file);
if (IS_ERR_VALUE(addr)) {
err = addr;
@@ -213,7 +235,7 @@ get_write_lock:
BUG_ON(addr != start);
err = 0;
}
- goto out;
+ goto out_freed;
}
mutex_lock(&mapping->i_mmap_mutex);
flush_dcache_mmap_lock(mapping);
@@ -248,6 +270,7 @@ get_write_lock:
out:
if (vma)
vm_flags = vma->vm_flags;
+out_freed:
if (likely(!has_write_lock))
up_read(&mm->mmap_sem);
else
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 362c329b83fe..b4b1feba6472 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -27,11 +27,12 @@
#include "internal.h"
/*
- * By default transparent hugepage support is enabled for all mappings
- * and khugepaged scans all mappings. Defrag is only invoked by
- * khugepaged hugepage allocations and by page faults inside
- * MADV_HUGEPAGE regions to avoid the risk of slowing down short lived
- * allocations.
+ * By default transparent hugepage support is disabled in order that avoid
+ * to risk increase the memory footprint of applications without a guaranteed
+ * benefit. When transparent hugepage support is enabled, is for all mappings,
+ * and khugepaged scans all mappings.
+ * Defrag is invoked by khugepaged hugepage allocations and by page faults
+ * for all hugepage allocations.
*/
unsigned long transparent_hugepage_flags __read_mostly =
#ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
@@ -129,8 +130,14 @@ static int set_recommended_min_free_kbytes(void)
(unsigned long) nr_free_buffer_pages() / 20);
recommended_min <<= (PAGE_SHIFT-10);
- if (recommended_min > min_free_kbytes)
+ if (recommended_min > min_free_kbytes) {
+ if (user_min_free_kbytes >= 0)
+ pr_info("raising min_free_kbytes from %d to %lu "
+ "to help transparent hugepage allocations\n",
+ min_free_kbytes, recommended_min);
+
min_free_kbytes = recommended_min;
+ }
setup_per_zone_wmarks();
return 0;
}
@@ -211,24 +218,29 @@ static void put_huge_zero_page(void)
BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
}
-static int shrink_huge_zero_page(struct shrinker *shrink,
- struct shrink_control *sc)
+static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
+ struct shrink_control *sc)
{
- if (!sc->nr_to_scan)
- /* we can free zero page only if last reference remains */
- return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
+ /* we can free zero page only if last reference remains */
+ return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
+}
+static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
struct page *zero_page = xchg(&huge_zero_page, NULL);
BUG_ON(zero_page == NULL);
__free_page(zero_page);
+ return HPAGE_PMD_NR;
}
return 0;
}
static struct shrinker huge_zero_page_shrinker = {
- .shrink = shrink_huge_zero_page,
+ .count_objects = shrink_huge_zero_page_count,
+ .scan_objects = shrink_huge_zero_page_scan,
.seeks = DEFAULT_SEEKS,
};
@@ -417,7 +429,7 @@ static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
unsigned long msecs;
int err;
- err = strict_strtoul(buf, 10, &msecs);
+ err = kstrtoul(buf, 10, &msecs);
if (err || msecs > UINT_MAX)
return -EINVAL;
@@ -444,7 +456,7 @@ static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
unsigned long msecs;
int err;
- err = strict_strtoul(buf, 10, &msecs);
+ err = kstrtoul(buf, 10, &msecs);
if (err || msecs > UINT_MAX)
return -EINVAL;
@@ -470,7 +482,7 @@ static ssize_t pages_to_scan_store(struct kobject *kobj,
int err;
unsigned long pages;
- err = strict_strtoul(buf, 10, &pages);
+ err = kstrtoul(buf, 10, &pages);
if (err || !pages || pages > UINT_MAX)
return -EINVAL;
@@ -538,7 +550,7 @@ static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
int err;
unsigned long max_ptes_none;
- err = strict_strtoul(buf, 10, &max_ptes_none);
+ err = kstrtoul(buf, 10, &max_ptes_none);
if (err || max_ptes_none > HPAGE_PMD_NR-1)
return -EINVAL;
@@ -649,7 +661,7 @@ out:
hugepage_exit_sysfs(hugepage_kobj);
return err;
}
-module_init(hugepage_init)
+subsys_initcall(hugepage_init);
static int __init setup_transparent_hugepage(char *str)
{
@@ -690,11 +702,10 @@ pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
return pmd;
}
-static inline pmd_t mk_huge_pmd(struct page *page, struct vm_area_struct *vma)
+static inline pmd_t mk_huge_pmd(struct page *page, pgprot_t prot)
{
pmd_t entry;
- entry = mk_pmd(page, vma->vm_page_prot);
- entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
+ entry = mk_pmd(page, prot);
entry = pmd_mkhuge(entry);
return entry;
}
@@ -705,8 +716,9 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
struct page *page)
{
pgtable_t pgtable;
+ spinlock_t *ptl;
- VM_BUG_ON(!PageCompound(page));
+ VM_BUG_ON_PAGE(!PageCompound(page), page);
pgtable = pte_alloc_one(mm, haddr);
if (unlikely(!pgtable))
return VM_FAULT_OOM;
@@ -719,21 +731,22 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
*/
__SetPageUptodate(page);
- spin_lock(&mm->page_table_lock);
+ ptl = pmd_lock(mm, pmd);
if (unlikely(!pmd_none(*pmd))) {
- spin_unlock(&mm->page_table_lock);
+ spin_unlock(ptl);
mem_cgroup_uncharge_page(page);
put_page(page);
pte_free(mm, pgtable);
} else {
pmd_t entry;
- entry = mk_huge_pmd(page, vma);
+ entry = mk_huge_pmd(page, vma->vm_page_prot);
+ entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
page_add_new_anon_rmap(page, vma, haddr);
+ pgtable_trans_huge_deposit(mm, pmd, pgtable);
set_pmd_at(mm, haddr, pmd, entry);
- pgtable_trans_huge_deposit(mm, pgtable);
add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
- mm->nr_ptes++;
- spin_unlock(&mm->page_table_lock);
+ atomic_long_inc(&mm->nr_ptes);
+ spin_unlock(ptl);
}
return 0;
@@ -753,14 +766,7 @@ static inline struct page *alloc_hugepage_vma(int defrag,
HPAGE_PMD_ORDER, vma, haddr, nd);
}
-#ifndef CONFIG_NUMA
-static inline struct page *alloc_hugepage(int defrag)
-{
- return alloc_pages(alloc_hugepage_gfpmask(defrag, 0),
- HPAGE_PMD_ORDER);
-}
-#endif
-
+/* Caller must hold page table lock. */
static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
struct page *zero_page)
@@ -771,9 +777,9 @@ static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
entry = mk_pmd(zero_page, vma->vm_page_prot);
entry = pmd_wrprotect(entry);
entry = pmd_mkhuge(entry);
+ pgtable_trans_huge_deposit(mm, pmd, pgtable);
set_pmd_at(mm, haddr, pmd, entry);
- pgtable_trans_huge_deposit(mm, pgtable);
- mm->nr_ptes++;
+ atomic_long_inc(&mm->nr_ptes);
return true;
}
@@ -783,83 +789,65 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
{
struct page *page;
unsigned long haddr = address & HPAGE_PMD_MASK;
- pte_t *pte;
- if (haddr >= vma->vm_start && haddr + HPAGE_PMD_SIZE <= vma->vm_end) {
- if (unlikely(anon_vma_prepare(vma)))
- return VM_FAULT_OOM;
- if (unlikely(khugepaged_enter(vma)))
+ if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
+ return VM_FAULT_FALLBACK;
+ if (unlikely(anon_vma_prepare(vma)))
+ return VM_FAULT_OOM;
+ if (unlikely(khugepaged_enter(vma)))
+ return VM_FAULT_OOM;
+ if (!(flags & FAULT_FLAG_WRITE) &&
+ transparent_hugepage_use_zero_page()) {
+ spinlock_t *ptl;
+ pgtable_t pgtable;
+ struct page *zero_page;
+ bool set;
+ pgtable = pte_alloc_one(mm, haddr);
+ if (unlikely(!pgtable))
return VM_FAULT_OOM;
- if (!(flags & FAULT_FLAG_WRITE) &&
- transparent_hugepage_use_zero_page()) {
- pgtable_t pgtable;
- struct page *zero_page;
- bool set;
- pgtable = pte_alloc_one(mm, haddr);
- if (unlikely(!pgtable))
- return VM_FAULT_OOM;
- zero_page = get_huge_zero_page();
- if (unlikely(!zero_page)) {
- pte_free(mm, pgtable);
- count_vm_event(THP_FAULT_FALLBACK);
- goto out;
- }
- spin_lock(&mm->page_table_lock);
- set = set_huge_zero_page(pgtable, mm, vma, haddr, pmd,
- zero_page);
- spin_unlock(&mm->page_table_lock);
- if (!set) {
- pte_free(mm, pgtable);
- put_huge_zero_page();
- }
- return 0;
- }
- page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
- vma, haddr, numa_node_id(), 0);
- if (unlikely(!page)) {
+ zero_page = get_huge_zero_page();
+ if (unlikely(!zero_page)) {
+ pte_free(mm, pgtable);
count_vm_event(THP_FAULT_FALLBACK);
- goto out;
- }
- count_vm_event(THP_FAULT_ALLOC);
- if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
- put_page(page);
- goto out;
+ return VM_FAULT_FALLBACK;
}
- if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd,
- page))) {
- mem_cgroup_uncharge_page(page);
- put_page(page);
- goto out;
+ ptl = pmd_lock(mm, pmd);
+ set = set_huge_zero_page(pgtable, mm, vma, haddr, pmd,
+ zero_page);
+ spin_unlock(ptl);
+ if (!set) {
+ pte_free(mm, pgtable);
+ put_huge_zero_page();
}
-
return 0;
}
-out:
- /*
- * Use __pte_alloc instead of pte_alloc_map, because we can't
- * run pte_offset_map on the pmd, if an huge pmd could
- * materialize from under us from a different thread.
- */
- if (unlikely(pmd_none(*pmd)) &&
- unlikely(__pte_alloc(mm, vma, pmd, address)))
- return VM_FAULT_OOM;
- /* if an huge pmd materialized from under us just retry later */
- if (unlikely(pmd_trans_huge(*pmd)))
- return 0;
- /*
- * A regular pmd is established and it can't morph into a huge pmd
- * from under us anymore at this point because we hold the mmap_sem
- * read mode and khugepaged takes it in write mode. So now it's
- * safe to run pte_offset_map().
- */
- pte = pte_offset_map(pmd, address);
- return handle_pte_fault(mm, vma, address, pte, pmd, flags);
+ page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
+ vma, haddr, numa_node_id(), 0);
+ if (unlikely(!page)) {
+ count_vm_event(THP_FAULT_FALLBACK);
+ return VM_FAULT_FALLBACK;
+ }
+ if (unlikely(mem_cgroup_charge_anon(page, mm, GFP_KERNEL))) {
+ put_page(page);
+ count_vm_event(THP_FAULT_FALLBACK);
+ return VM_FAULT_FALLBACK;
+ }
+ if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page))) {
+ mem_cgroup_uncharge_page(page);
+ put_page(page);
+ count_vm_event(THP_FAULT_FALLBACK);
+ return VM_FAULT_FALLBACK;
+ }
+
+ count_vm_event(THP_FAULT_ALLOC);
+ return 0;
}
int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
struct vm_area_struct *vma)
{
+ spinlock_t *dst_ptl, *src_ptl;
struct page *src_page;
pmd_t pmd;
pgtable_t pgtable;
@@ -870,8 +858,9 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
if (unlikely(!pgtable))
goto out;
- spin_lock(&dst_mm->page_table_lock);
- spin_lock_nested(&src_mm->page_table_lock, SINGLE_DEPTH_NESTING);
+ dst_ptl = pmd_lock(dst_mm, dst_pmd);
+ src_ptl = pmd_lockptr(src_mm, src_pmd);
+ spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
ret = -EAGAIN;
pmd = *src_pmd;
@@ -880,7 +869,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
goto out_unlock;
}
/*
- * mm->page_table_lock is enough to be sure that huge zero pmd is not
+ * When page table lock is held, the huge zero pmd should not be
* under splitting since we don't split the page itself, only pmd to
* a page table.
*/
@@ -899,31 +888,32 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
ret = 0;
goto out_unlock;
}
+
if (unlikely(pmd_trans_splitting(pmd))) {
/* split huge page running from under us */
- spin_unlock(&src_mm->page_table_lock);
- spin_unlock(&dst_mm->page_table_lock);
+ spin_unlock(src_ptl);
+ spin_unlock(dst_ptl);
pte_free(dst_mm, pgtable);
wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */
goto out;
}
src_page = pmd_page(pmd);
- VM_BUG_ON(!PageHead(src_page));
+ VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
get_page(src_page);
page_dup_rmap(src_page);
add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
pmdp_set_wrprotect(src_mm, addr, src_pmd);
pmd = pmd_mkold(pmd_wrprotect(pmd));
+ pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
set_pmd_at(dst_mm, addr, dst_pmd, pmd);
- pgtable_trans_huge_deposit(dst_mm, pgtable);
- dst_mm->nr_ptes++;
+ atomic_long_inc(&dst_mm->nr_ptes);
ret = 0;
out_unlock:
- spin_unlock(&src_mm->page_table_lock);
- spin_unlock(&dst_mm->page_table_lock);
+ spin_unlock(src_ptl);
+ spin_unlock(dst_ptl);
out:
return ret;
}
@@ -934,10 +924,11 @@ void huge_pmd_set_accessed(struct mm_struct *mm,
pmd_t *pmd, pmd_t orig_pmd,
int dirty)
{
+ spinlock_t *ptl;
pmd_t entry;
unsigned long haddr;
- spin_lock(&mm->page_table_lock);
+ ptl = pmd_lock(mm, pmd);
if (unlikely(!pmd_same(*pmd, orig_pmd)))
goto unlock;
@@ -947,81 +938,7 @@ void huge_pmd_set_accessed(struct mm_struct *mm,
update_mmu_cache_pmd(vma, address, pmd);
unlock:
- spin_unlock(&mm->page_table_lock);
-}
-
-static int do_huge_pmd_wp_zero_page_fallback(struct mm_struct *mm,
- struct vm_area_struct *vma, unsigned long address,
- pmd_t *pmd, pmd_t orig_pmd, unsigned long haddr)
-{
- pgtable_t pgtable;
- pmd_t _pmd;
- struct page *page;
- int i, ret = 0;
- unsigned long mmun_start; /* For mmu_notifiers */
- unsigned long mmun_end; /* For mmu_notifiers */
-
- page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
- if (!page) {
- ret |= VM_FAULT_OOM;
- goto out;
- }
-
- if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) {
- put_page(page);
- ret |= VM_FAULT_OOM;
- goto out;
- }
-
- clear_user_highpage(page, address);
- __SetPageUptodate(page);
-
- mmun_start = haddr;
- mmun_end = haddr + HPAGE_PMD_SIZE;
- mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
-
- spin_lock(&mm->page_table_lock);
- if (unlikely(!pmd_same(*pmd, orig_pmd)))
- goto out_free_page;
-
- pmdp_clear_flush(vma, haddr, pmd);
- /* leave pmd empty until pte is filled */
-
- pgtable = pgtable_trans_huge_withdraw(mm);
- pmd_populate(mm, &_pmd, pgtable);
-
- for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
- pte_t *pte, entry;
- if (haddr == (address & PAGE_MASK)) {
- entry = mk_pte(page, vma->vm_page_prot);
- entry = maybe_mkwrite(pte_mkdirty(entry), vma);
- page_add_new_anon_rmap(page, vma, haddr);
- } else {
- entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
- entry = pte_mkspecial(entry);
- }
- pte = pte_offset_map(&_pmd, haddr);
- VM_BUG_ON(!pte_none(*pte));
- set_pte_at(mm, haddr, pte, entry);
- pte_unmap(pte);
- }
- smp_wmb(); /* make pte visible before pmd */
- pmd_populate(mm, pmd, pgtable);
- spin_unlock(&mm->page_table_lock);
- put_huge_zero_page();
- inc_mm_counter(mm, MM_ANONPAGES);
-
- mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
-
- ret |= VM_FAULT_WRITE;
-out:
- return ret;
-out_free_page:
- spin_unlock(&mm->page_table_lock);
- mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
- mem_cgroup_uncharge_page(page);
- put_page(page);
- goto out;
+ spin_unlock(ptl);
}
static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
@@ -1031,6 +948,7 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
struct page *page,
unsigned long haddr)
{
+ spinlock_t *ptl;
pgtable_t pgtable;
pmd_t _pmd;
int ret = 0, i;
@@ -1050,7 +968,7 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
__GFP_OTHER_NODE,
vma, address, page_to_nid(page));
if (unlikely(!pages[i] ||
- mem_cgroup_newpage_charge(pages[i], mm,
+ mem_cgroup_charge_anon(pages[i], mm,
GFP_KERNEL))) {
if (pages[i])
put_page(pages[i]);
@@ -1077,15 +995,15 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
mmun_end = haddr + HPAGE_PMD_SIZE;
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
- spin_lock(&mm->page_table_lock);
+ ptl = pmd_lock(mm, pmd);
if (unlikely(!pmd_same(*pmd, orig_pmd)))
goto out_free_pages;
- VM_BUG_ON(!PageHead(page));
+ VM_BUG_ON_PAGE(!PageHead(page), page);
pmdp_clear_flush(vma, haddr, pmd);
/* leave pmd empty until pte is filled */
- pgtable = pgtable_trans_huge_withdraw(mm);
+ pgtable = pgtable_trans_huge_withdraw(mm, pmd);
pmd_populate(mm, &_pmd, pgtable);
for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
@@ -1103,7 +1021,7 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
smp_wmb(); /* make pte visible before pmd */
pmd_populate(mm, pmd, pgtable);
page_remove_rmap(page);
- spin_unlock(&mm->page_table_lock);
+ spin_unlock(ptl);
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
@@ -1114,7 +1032,7 @@ out:
return ret;
out_free_pages:
- spin_unlock(&mm->page_table_lock);
+ spin_unlock(ptl);
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
mem_cgroup_uncharge_start();
for (i = 0; i < HPAGE_PMD_NR; i++) {
@@ -1129,22 +1047,24 @@ out_free_pages:
int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pmd_t *pmd, pmd_t orig_pmd)
{
+ spinlock_t *ptl;
int ret = 0;
struct page *page = NULL, *new_page;
unsigned long haddr;
unsigned long mmun_start; /* For mmu_notifiers */
unsigned long mmun_end; /* For mmu_notifiers */
+ ptl = pmd_lockptr(mm, pmd);
VM_BUG_ON(!vma->anon_vma);
haddr = address & HPAGE_PMD_MASK;
if (is_huge_zero_pmd(orig_pmd))
goto alloc;
- spin_lock(&mm->page_table_lock);
+ spin_lock(ptl);
if (unlikely(!pmd_same(*pmd, orig_pmd)))
goto out_unlock;
page = pmd_page(orig_pmd);
- VM_BUG_ON(!PageCompound(page) || !PageHead(page));
+ VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page);
if (page_mapcount(page) == 1) {
pmd_t entry;
entry = pmd_mkyoung(orig_pmd);
@@ -1155,7 +1075,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
goto out_unlock;
}
get_page(page);
- spin_unlock(&mm->page_table_lock);
+ spin_unlock(ptl);
alloc:
if (transparent_hugepage_enabled(vma) &&
!transparent_hugepage_debug_cow())
@@ -1165,32 +1085,37 @@ alloc:
new_page = NULL;
if (unlikely(!new_page)) {
- count_vm_event(THP_FAULT_FALLBACK);
- if (is_huge_zero_pmd(orig_pmd)) {
- ret = do_huge_pmd_wp_zero_page_fallback(mm, vma,
- address, pmd, orig_pmd, haddr);
+ if (!page) {
+ split_huge_page_pmd(vma, address, pmd);
+ ret |= VM_FAULT_FALLBACK;
} else {
ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
pmd, orig_pmd, page, haddr);
- if (ret & VM_FAULT_OOM)
+ if (ret & VM_FAULT_OOM) {
split_huge_page(page);
+ ret |= VM_FAULT_FALLBACK;
+ }
put_page(page);
}
+ count_vm_event(THP_FAULT_FALLBACK);
goto out;
}
- count_vm_event(THP_FAULT_ALLOC);
- if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
+ if (unlikely(mem_cgroup_charge_anon(new_page, mm, GFP_KERNEL))) {
put_page(new_page);
if (page) {
split_huge_page(page);
put_page(page);
- }
- ret |= VM_FAULT_OOM;
+ } else
+ split_huge_page_pmd(vma, address, pmd);
+ ret |= VM_FAULT_FALLBACK;
+ count_vm_event(THP_FAULT_FALLBACK);
goto out;
}
- if (is_huge_zero_pmd(orig_pmd))
+ count_vm_event(THP_FAULT_ALLOC);
+
+ if (!page)
clear_huge_page(new_page, haddr, HPAGE_PMD_NR);
else
copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
@@ -1200,38 +1125,39 @@ alloc:
mmun_end = haddr + HPAGE_PMD_SIZE;
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
- spin_lock(&mm->page_table_lock);
+ spin_lock(ptl);
if (page)
put_page(page);
if (unlikely(!pmd_same(*pmd, orig_pmd))) {
- spin_unlock(&mm->page_table_lock);
+ spin_unlock(ptl);
mem_cgroup_uncharge_page(new_page);
put_page(new_page);
goto out_mn;
} else {
pmd_t entry;
- entry = mk_huge_pmd(new_page, vma);
+ entry = mk_huge_pmd(new_page, vma->vm_page_prot);
+ entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
pmdp_clear_flush(vma, haddr, pmd);
page_add_new_anon_rmap(new_page, vma, haddr);
set_pmd_at(mm, haddr, pmd, entry);
update_mmu_cache_pmd(vma, address, pmd);
- if (is_huge_zero_pmd(orig_pmd)) {
+ if (!page) {
add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
put_huge_zero_page();
} else {
- VM_BUG_ON(!PageHead(page));
+ VM_BUG_ON_PAGE(!PageHead(page), page);
page_remove_rmap(page);
put_page(page);
}
ret |= VM_FAULT_WRITE;
}
- spin_unlock(&mm->page_table_lock);
+ spin_unlock(ptl);
out_mn:
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
out:
return ret;
out_unlock:
- spin_unlock(&mm->page_table_lock);
+ spin_unlock(ptl);
return ret;
}
@@ -1243,7 +1169,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
struct mm_struct *mm = vma->vm_mm;
struct page *page = NULL;
- assert_spin_locked(&mm->page_table_lock);
+ assert_spin_locked(pmd_lockptr(mm, pmd));
if (flags & FOLL_WRITE && !pmd_write(*pmd))
goto out;
@@ -1252,8 +1178,12 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
return ERR_PTR(-EFAULT);
+ /* Full NUMA hinting faults to serialise migration in fault paths */
+ if ((flags & FOLL_NUMA) && pmd_numa(*pmd))
+ goto out;
+
page = pmd_page(*pmd);
- VM_BUG_ON(!PageHead(page));
+ VM_BUG_ON_PAGE(!PageHead(page), page);
if (flags & FOLL_TOUCH) {
pmd_t _pmd;
/*
@@ -1265,7 +1195,9 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
* young bit, instead of the current set_pmd_at.
*/
_pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
- set_pmd_at(mm, addr & HPAGE_PMD_MASK, pmd, _pmd);
+ if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
+ pmd, _pmd, 1))
+ update_mmu_cache_pmd(vma, addr, pmd);
}
if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
if (page->mapping && trylock_page(page)) {
@@ -1276,7 +1208,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
}
}
page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
- VM_BUG_ON(!PageCompound(page));
+ VM_BUG_ON_PAGE(!PageCompound(page), page);
if (flags & FOLL_GET)
get_page_foll(page);
@@ -1288,91 +1220,157 @@ out:
int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pmd_t pmd, pmd_t *pmdp)
{
+ spinlock_t *ptl;
+ struct anon_vma *anon_vma = NULL;
struct page *page;
unsigned long haddr = addr & HPAGE_PMD_MASK;
- int target_nid;
- int current_nid = -1;
- bool migrated;
+ int page_nid = -1, this_nid = numa_node_id();
+ int target_nid, last_cpupid = -1;
+ bool page_locked;
+ bool migrated = false;
+ int flags = 0;
- spin_lock(&mm->page_table_lock);
+ ptl = pmd_lock(mm, pmdp);
if (unlikely(!pmd_same(pmd, *pmdp)))
goto out_unlock;
+ /*
+ * If there are potential migrations, wait for completion and retry
+ * without disrupting NUMA hinting information. Do not relock and
+ * check_same as the page may no longer be mapped.
+ */
+ if (unlikely(pmd_trans_migrating(*pmdp))) {
+ spin_unlock(ptl);
+ wait_migrate_huge_page(vma->anon_vma, pmdp);
+ goto out;
+ }
+
page = pmd_page(pmd);
- get_page(page);
- current_nid = page_to_nid(page);
+ BUG_ON(is_huge_zero_page(page));
+ page_nid = page_to_nid(page);
+ last_cpupid = page_cpupid_last(page);
count_vm_numa_event(NUMA_HINT_FAULTS);
- if (current_nid == numa_node_id())
+ if (page_nid == this_nid) {
count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
+ flags |= TNF_FAULT_LOCAL;
+ }
+ /*
+ * Avoid grouping on DSO/COW pages in specific and RO pages
+ * in general, RO pages shouldn't hurt as much anyway since
+ * they can be in shared cache state.
+ */
+ if (!pmd_write(pmd))
+ flags |= TNF_NO_GROUP;
+
+ /*
+ * Acquire the page lock to serialise THP migrations but avoid dropping
+ * page_table_lock if at all possible
+ */
+ page_locked = trylock_page(page);
target_nid = mpol_misplaced(page, vma, haddr);
if (target_nid == -1) {
- put_page(page);
- goto clear_pmdnuma;
+ /* If the page was locked, there are no parallel migrations */
+ if (page_locked)
+ goto clear_pmdnuma;
+ }
+
+ /* Migration could have started since the pmd_trans_migrating check */
+ if (!page_locked) {
+ spin_unlock(ptl);
+ wait_on_page_locked(page);
+ page_nid = -1;
+ goto out;
}
- /* Acquire the page lock to serialise THP migrations */
- spin_unlock(&mm->page_table_lock);
- lock_page(page);
+ /*
+ * Page is misplaced. Page lock serialises migrations. Acquire anon_vma
+ * to serialises splits
+ */
+ get_page(page);
+ spin_unlock(ptl);
+ anon_vma = page_lock_anon_vma_read(page);
- /* Confirm the PTE did not while locked */
- spin_lock(&mm->page_table_lock);
+ /* Confirm the PMD did not change while page_table_lock was released */
+ spin_lock(ptl);
if (unlikely(!pmd_same(pmd, *pmdp))) {
unlock_page(page);
put_page(page);
+ page_nid = -1;
goto out_unlock;
}
- spin_unlock(&mm->page_table_lock);
- /* Migrate the THP to the requested node */
+ /* Bail if we fail to protect against THP splits for any reason */
+ if (unlikely(!anon_vma)) {
+ put_page(page);
+ page_nid = -1;
+ goto clear_pmdnuma;
+ }
+
+ /*
+ * Migrate the THP to the requested node, returns with page unlocked
+ * and pmd_numa cleared.
+ */
+ spin_unlock(ptl);
migrated = migrate_misplaced_transhuge_page(mm, vma,
pmdp, pmd, addr, page, target_nid);
- if (!migrated)
- goto check_same;
-
- task_numa_fault(target_nid, HPAGE_PMD_NR, true);
- return 0;
+ if (migrated) {
+ flags |= TNF_MIGRATED;
+ page_nid = target_nid;
+ }
-check_same:
- spin_lock(&mm->page_table_lock);
- if (unlikely(!pmd_same(pmd, *pmdp)))
- goto out_unlock;
+ goto out;
clear_pmdnuma:
+ BUG_ON(!PageLocked(page));
pmd = pmd_mknonnuma(pmd);
set_pmd_at(mm, haddr, pmdp, pmd);
VM_BUG_ON(pmd_numa(*pmdp));
update_mmu_cache_pmd(vma, addr, pmdp);
+ unlock_page(page);
out_unlock:
- spin_unlock(&mm->page_table_lock);
- if (current_nid != -1)
- task_numa_fault(current_nid, HPAGE_PMD_NR, false);
+ spin_unlock(ptl);
+
+out:
+ if (anon_vma)
+ page_unlock_anon_vma_read(anon_vma);
+
+ if (page_nid != -1)
+ task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, flags);
+
return 0;
}
int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
pmd_t *pmd, unsigned long addr)
{
+ spinlock_t *ptl;
int ret = 0;
- if (__pmd_trans_huge_lock(pmd, vma) == 1) {
+ if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
struct page *page;
pgtable_t pgtable;
pmd_t orig_pmd;
- pgtable = pgtable_trans_huge_withdraw(tlb->mm);
+ /*
+ * For architectures like ppc64 we look at deposited pgtable
+ * when calling pmdp_get_and_clear. So do the
+ * pgtable_trans_huge_withdraw after finishing pmdp related
+ * operations.
+ */
orig_pmd = pmdp_get_and_clear(tlb->mm, addr, pmd);
tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
+ pgtable = pgtable_trans_huge_withdraw(tlb->mm, pmd);
if (is_huge_zero_pmd(orig_pmd)) {
- tlb->mm->nr_ptes--;
- spin_unlock(&tlb->mm->page_table_lock);
+ atomic_long_dec(&tlb->mm->nr_ptes);
+ spin_unlock(ptl);
put_huge_zero_page();
} else {
page = pmd_page(orig_pmd);
page_remove_rmap(page);
- VM_BUG_ON(page_mapcount(page) < 0);
+ VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
- VM_BUG_ON(!PageHead(page));
- tlb->mm->nr_ptes--;
- spin_unlock(&tlb->mm->page_table_lock);
+ VM_BUG_ON_PAGE(!PageHead(page), page);
+ atomic_long_dec(&tlb->mm->nr_ptes);
+ spin_unlock(ptl);
tlb_remove_page(tlb, page);
}
pte_free(tlb->mm, pgtable);
@@ -1385,14 +1383,15 @@ int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end,
unsigned char *vec)
{
+ spinlock_t *ptl;
int ret = 0;
- if (__pmd_trans_huge_lock(pmd, vma) == 1) {
+ if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
/*
* All logical pages in the range are present
* if backed by a huge page.
*/
- spin_unlock(&vma->vm_mm->page_table_lock);
+ spin_unlock(ptl);
memset(vec, 1, (end - addr) >> PAGE_SHIFT);
ret = 1;
}
@@ -1405,6 +1404,7 @@ int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
unsigned long new_addr, unsigned long old_end,
pmd_t *old_pmd, pmd_t *new_pmd)
{
+ spinlock_t *old_ptl, *new_ptl;
int ret = 0;
pmd_t pmd;
@@ -1425,41 +1425,72 @@ int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
goto out;
}
- ret = __pmd_trans_huge_lock(old_pmd, vma);
+ /*
+ * We don't have to worry about the ordering of src and dst
+ * ptlocks because exclusive mmap_sem prevents deadlock.
+ */
+ ret = __pmd_trans_huge_lock(old_pmd, vma, &old_ptl);
if (ret == 1) {
+ new_ptl = pmd_lockptr(mm, new_pmd);
+ if (new_ptl != old_ptl)
+ spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
pmd = pmdp_get_and_clear(mm, old_addr, old_pmd);
VM_BUG_ON(!pmd_none(*new_pmd));
- set_pmd_at(mm, new_addr, new_pmd, pmd);
- spin_unlock(&mm->page_table_lock);
+
+ if (pmd_move_must_withdraw(new_ptl, old_ptl)) {
+ pgtable_t pgtable;
+ pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
+ pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
+ }
+ set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd));
+ if (new_ptl != old_ptl)
+ spin_unlock(new_ptl);
+ spin_unlock(old_ptl);
}
out:
return ret;
}
+/*
+ * Returns
+ * - 0 if PMD could not be locked
+ * - 1 if PMD was locked but protections unchange and TLB flush unnecessary
+ * - HPAGE_PMD_NR is protections changed and TLB flush necessary
+ */
int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, pgprot_t newprot, int prot_numa)
{
struct mm_struct *mm = vma->vm_mm;
+ spinlock_t *ptl;
int ret = 0;
- if (__pmd_trans_huge_lock(pmd, vma) == 1) {
+ if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
pmd_t entry;
- entry = pmdp_get_and_clear(mm, addr, pmd);
+ ret = 1;
if (!prot_numa) {
+ entry = pmdp_get_and_clear(mm, addr, pmd);
+ if (pmd_numa(entry))
+ entry = pmd_mknonnuma(entry);
entry = pmd_modify(entry, newprot);
+ ret = HPAGE_PMD_NR;
+ set_pmd_at(mm, addr, pmd, entry);
BUG_ON(pmd_write(entry));
} else {
struct page *page = pmd_page(*pmd);
- /* only check non-shared pages */
- if (page_mapcount(page) == 1 &&
+ /*
+ * Do not trap faults against the zero page. The
+ * read-only data is likely to be read-cached on the
+ * local CPU cache and it is less useful to know about
+ * local vs remote hits on the zero page.
+ */
+ if (!is_huge_zero_page(page) &&
!pmd_numa(*pmd)) {
- entry = pmd_mknuma(entry);
+ pmdp_set_numa(mm, addr, pmd);
+ ret = HPAGE_PMD_NR;
}
}
- set_pmd_at(mm, addr, pmd, entry);
- spin_unlock(&vma->vm_mm->page_table_lock);
- ret = 1;
+ spin_unlock(ptl);
}
return ret;
@@ -1472,12 +1503,13 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
* Note that if it returns 1, this routine returns without unlocking page
* table locks. So callers must unlock them.
*/
-int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
+int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
+ spinlock_t **ptl)
{
- spin_lock(&vma->vm_mm->page_table_lock);
+ *ptl = pmd_lock(vma->vm_mm, pmd);
if (likely(pmd_trans_huge(*pmd))) {
if (unlikely(pmd_trans_splitting(*pmd))) {
- spin_unlock(&vma->vm_mm->page_table_lock);
+ spin_unlock(*ptl);
wait_split_huge_page(vma->anon_vma, pmd);
return -1;
} else {
@@ -1486,27 +1518,44 @@ int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
return 1;
}
}
- spin_unlock(&vma->vm_mm->page_table_lock);
+ spin_unlock(*ptl);
return 0;
}
+/*
+ * This function returns whether a given @page is mapped onto the @address
+ * in the virtual space of @mm.
+ *
+ * When it's true, this function returns *pmd with holding the page table lock
+ * and passing it back to the caller via @ptl.
+ * If it's false, returns NULL without holding the page table lock.
+ */
pmd_t *page_check_address_pmd(struct page *page,
struct mm_struct *mm,
unsigned long address,
- enum page_check_address_pmd_flag flag)
+ enum page_check_address_pmd_flag flag,
+ spinlock_t **ptl)
{
- pmd_t *pmd, *ret = NULL;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
if (address & ~HPAGE_PMD_MASK)
- goto out;
+ return NULL;
- pmd = mm_find_pmd(mm, address);
- if (!pmd)
- goto out;
- if (pmd_none(*pmd))
- goto out;
+ pgd = pgd_offset(mm, address);
+ if (!pgd_present(*pgd))
+ return NULL;
+ pud = pud_offset(pgd, address);
+ if (!pud_present(*pud))
+ return NULL;
+ pmd = pmd_offset(pud, address);
+
+ *ptl = pmd_lock(mm, pmd);
+ if (!pmd_present(*pmd))
+ goto unlock;
if (pmd_page(*pmd) != page)
- goto out;
+ goto unlock;
/*
* split_vma() may create temporary aliased mappings. There is
* no risk as long as all huge pmd are found and have their
@@ -1516,14 +1565,15 @@ pmd_t *page_check_address_pmd(struct page *page,
*/
if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG &&
pmd_trans_splitting(*pmd))
- goto out;
+ goto unlock;
if (pmd_trans_huge(*pmd)) {
VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG &&
!pmd_trans_splitting(*pmd));
- ret = pmd;
+ return pmd;
}
-out:
- return ret;
+unlock:
+ spin_unlock(*ptl);
+ return NULL;
}
static int __split_huge_page_splitting(struct page *page,
@@ -1531,6 +1581,7 @@ static int __split_huge_page_splitting(struct page *page,
unsigned long address)
{
struct mm_struct *mm = vma->vm_mm;
+ spinlock_t *ptl;
pmd_t *pmd;
int ret = 0;
/* For mmu_notifiers */
@@ -1538,9 +1589,8 @@ static int __split_huge_page_splitting(struct page *page,
const unsigned long mmun_end = address + HPAGE_PMD_SIZE;
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
- spin_lock(&mm->page_table_lock);
pmd = page_check_address_pmd(page, mm, address,
- PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG);
+ PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG, &ptl);
if (pmd) {
/*
* We can't temporarily set the pmd to null in order
@@ -1551,8 +1601,8 @@ static int __split_huge_page_splitting(struct page *page,
*/
pmdp_splitting_flush(vma, address, pmd);
ret = 1;
+ spin_unlock(ptl);
}
- spin_unlock(&mm->page_table_lock);
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
return ret;
@@ -1612,7 +1662,9 @@ static void __split_huge_page_refcount(struct page *page,
((1L << PG_referenced) |
(1L << PG_swapbacked) |
(1L << PG_mlocked) |
- (1L << PG_uptodate)));
+ (1L << PG_uptodate) |
+ (1L << PG_active) |
+ (1L << PG_unevictable)));
page_tail->flags |= (1L << PG_dirty);
/* clear PageTail before overwriting first_page */
@@ -1638,7 +1690,7 @@ static void __split_huge_page_refcount(struct page *page,
page_tail->mapping = page->mapping;
page_tail->index = page->index + i;
- page_nid_xchg_last(page_tail, page_nid_last(page));
+ page_cpupid_xchg_last(page_tail, page_cpupid_last(page));
BUG_ON(!PageAnon(page_tail));
BUG_ON(!PageUptodate(page_tail));
@@ -1651,7 +1703,6 @@ static void __split_huge_page_refcount(struct page *page,
BUG_ON(atomic_read(&page->_count) <= 0);
__mod_zone_page_state(zone, NR_ANON_TRANSPARENT_HUGEPAGES, -1);
- __mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR);
ClearPageCompound(page);
compound_unlock(page);
@@ -1682,16 +1733,16 @@ static int __split_huge_page_map(struct page *page,
unsigned long address)
{
struct mm_struct *mm = vma->vm_mm;
+ spinlock_t *ptl;
pmd_t *pmd, _pmd;
int ret = 0, i;
pgtable_t pgtable;
unsigned long haddr;
- spin_lock(&mm->page_table_lock);
pmd = page_check_address_pmd(page, mm, address,
- PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG);
+ PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG, &ptl);
if (pmd) {
- pgtable = pgtable_trans_huge_withdraw(mm);
+ pgtable = pgtable_trans_huge_withdraw(mm, pmd);
pmd_populate(mm, &_pmd, pgtable);
haddr = address;
@@ -1744,8 +1795,8 @@ static int __split_huge_page_map(struct page *page,
pmdp_invalidate(vma, address, pmd);
pmd_populate(mm, pmd, pgtable);
ret = 1;
+ spin_unlock(ptl);
}
- spin_unlock(&mm->page_table_lock);
return ret;
}
@@ -1842,22 +1893,27 @@ out:
return ret;
}
-#define VM_NO_THP (VM_SPECIAL|VM_MIXEDMAP|VM_HUGETLB|VM_SHARED|VM_MAYSHARE)
+#define VM_NO_THP (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE)
int hugepage_madvise(struct vm_area_struct *vma,
unsigned long *vm_flags, int advice)
{
- struct mm_struct *mm = vma->vm_mm;
-
switch (advice) {
case MADV_HUGEPAGE:
+#ifdef CONFIG_S390
+ /*
+ * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
+ * can't handle this properly after s390_enable_sie, so we simply
+ * ignore the madvise to prevent qemu from causing a SIGSEGV.
+ */
+ if (mm_has_pgste(vma->vm_mm))
+ return 0;
+#endif
/*
* Be somewhat over-protective like KSM for now!
*/
if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP))
return -EINVAL;
- if (mm->def_flags & VM_NOHUGEPAGE)
- return -EINVAL;
*vm_flags &= ~VM_NOHUGEPAGE;
*vm_flags |= VM_HUGEPAGE;
/*
@@ -2057,9 +2113,9 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
if (unlikely(!page))
goto out;
- VM_BUG_ON(PageCompound(page));
- BUG_ON(!PageAnon(page));
- VM_BUG_ON(!PageSwapBacked(page));
+ VM_BUG_ON_PAGE(PageCompound(page), page);
+ VM_BUG_ON_PAGE(!PageAnon(page), page);
+ VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
/* cannot use mapcount: can't collapse if there's a gup pin */
if (page_count(page) != 1)
@@ -2082,8 +2138,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
}
/* 0 stands for page_is_file_cache(page) == false */
inc_zone_page_state(page, NR_ISOLATED_ANON + 0);
- VM_BUG_ON(!PageLocked(page));
- VM_BUG_ON(PageLRU(page));
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
+ VM_BUG_ON_PAGE(PageLRU(page), page);
/* If there is no mapped pte young don't collapse the page */
if (pte_young(pteval) || PageReferenced(page) ||
@@ -2113,7 +2169,7 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
} else {
src_page = pte_page(pteval);
copy_user_highpage(page, src_page, address, vma);
- VM_BUG_ON(page_mapcount(src_page) != 1);
+ VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page);
release_pte_page(src_page);
/*
* ptl mostly unnecessary, but preempt has to
@@ -2142,7 +2198,34 @@ static void khugepaged_alloc_sleep(void)
msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
}
+static int khugepaged_node_load[MAX_NUMNODES];
+
#ifdef CONFIG_NUMA
+static int khugepaged_find_target_node(void)
+{
+ static int last_khugepaged_target_node = NUMA_NO_NODE;
+ int nid, target_node = 0, max_value = 0;
+
+ /* find first node with max normal pages hit */
+ for (nid = 0; nid < MAX_NUMNODES; nid++)
+ if (khugepaged_node_load[nid] > max_value) {
+ max_value = khugepaged_node_load[nid];
+ target_node = nid;
+ }
+
+ /* do some balance if several nodes have the same hit record */
+ if (target_node <= last_khugepaged_target_node)
+ for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
+ nid++)
+ if (max_value == khugepaged_node_load[nid]) {
+ target_node = nid;
+ break;
+ }
+
+ last_khugepaged_target_node = target_node;
+ return target_node;
+}
+
static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
{
if (IS_ERR(*hpage)) {
@@ -2165,7 +2248,7 @@ static struct page
struct vm_area_struct *vma, unsigned long address,
int node)
{
- VM_BUG_ON(*hpage);
+ VM_BUG_ON_PAGE(*hpage, *hpage);
/*
* Allocate the page while the vma is still valid and under
* the mmap_sem read mode so there is no memory allocation
@@ -2176,9 +2259,8 @@ static struct page
* mmap_sem in read mode is good idea also to allow greater
* scalability.
*/
- *hpage = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
- node, __GFP_OTHER_NODE);
-
+ *hpage = alloc_pages_exact_node(node, alloc_hugepage_gfpmask(
+ khugepaged_defrag(), __GFP_OTHER_NODE), HPAGE_PMD_ORDER);
/*
* After allocating the hugepage, release the mmap_sem read lock in
* preparation for taking it in write mode.
@@ -2194,6 +2276,17 @@ static struct page
return *hpage;
}
#else
+static int khugepaged_find_target_node(void)
+{
+ return 0;
+}
+
+static inline struct page *alloc_hugepage(int defrag)
+{
+ return alloc_pages(alloc_hugepage_gfpmask(defrag, 0),
+ HPAGE_PMD_ORDER);
+}
+
static struct page *khugepaged_alloc_hugepage(bool *wait)
{
struct page *hpage;
@@ -2260,7 +2353,7 @@ static void collapse_huge_page(struct mm_struct *mm,
pte_t *pte;
pgtable_t pgtable;
struct page *new_page;
- spinlock_t *ptl;
+ spinlock_t *pmd_ptl, *pte_ptl;
int isolated;
unsigned long hstart, hend;
unsigned long mmun_start; /* For mmu_notifiers */
@@ -2273,7 +2366,7 @@ static void collapse_huge_page(struct mm_struct *mm,
if (!new_page)
return;
- if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)))
+ if (unlikely(mem_cgroup_charge_anon(new_page, mm, GFP_KERNEL)))
return;
/*
@@ -2286,6 +2379,8 @@ static void collapse_huge_page(struct mm_struct *mm,
goto out;
vma = find_vma(mm, address);
+ if (!vma)
+ goto out;
hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
hend = vma->vm_end & HPAGE_PMD_MASK;
if (address < hstart || address + HPAGE_PMD_SIZE > hend)
@@ -2301,12 +2396,12 @@ static void collapse_huge_page(struct mm_struct *mm,
anon_vma_lock_write(vma->anon_vma);
pte = pte_offset_map(pmd, address);
- ptl = pte_lockptr(mm, pmd);
+ pte_ptl = pte_lockptr(mm, pmd);
mmun_start = address;
mmun_end = address + HPAGE_PMD_SIZE;
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
- spin_lock(&mm->page_table_lock); /* probably unnecessary */
+ pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
/*
* After this gup_fast can't run anymore. This also removes
* any huge TLB entry from the CPU so we won't allow
@@ -2314,16 +2409,16 @@ static void collapse_huge_page(struct mm_struct *mm,
* to avoid the risk of CPU bugs in that area.
*/
_pmd = pmdp_clear_flush(vma, address, pmd);
- spin_unlock(&mm->page_table_lock);
+ spin_unlock(pmd_ptl);
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
- spin_lock(ptl);
+ spin_lock(pte_ptl);
isolated = __collapse_huge_page_isolate(vma, address, pte);
- spin_unlock(ptl);
+ spin_unlock(pte_ptl);
if (unlikely(!isolated)) {
pte_unmap(pte);
- spin_lock(&mm->page_table_lock);
+ spin_lock(pmd_ptl);
BUG_ON(!pmd_none(*pmd));
/*
* We can only use set_pmd_at when establishing
@@ -2331,7 +2426,7 @@ static void collapse_huge_page(struct mm_struct *mm,
* points to regular pagetables. Use pmd_populate for that
*/
pmd_populate(mm, pmd, pmd_pgtable(_pmd));
- spin_unlock(&mm->page_table_lock);
+ spin_unlock(pmd_ptl);
anon_vma_unlock_write(vma->anon_vma);
goto out;
}
@@ -2342,12 +2437,13 @@ static void collapse_huge_page(struct mm_struct *mm,
*/
anon_vma_unlock_write(vma->anon_vma);
- __collapse_huge_page_copy(pte, new_page, vma, address, ptl);
+ __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl);
pte_unmap(pte);
__SetPageUptodate(new_page);
pgtable = pmd_pgtable(_pmd);
- _pmd = mk_huge_pmd(new_page, vma);
+ _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
+ _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
/*
* spin_lock() below is not the equivalent of smp_wmb(), so
@@ -2356,13 +2452,13 @@ static void collapse_huge_page(struct mm_struct *mm,
*/
smp_wmb();
- spin_lock(&mm->page_table_lock);
+ spin_lock(pmd_ptl);
BUG_ON(!pmd_none(*pmd));
page_add_new_anon_rmap(new_page, vma, address);
+ pgtable_trans_huge_deposit(mm, pmd, pgtable);
set_pmd_at(mm, address, pmd, _pmd);
update_mmu_cache_pmd(vma, address, pmd);
- pgtable_trans_huge_deposit(mm, pgtable);
- spin_unlock(&mm->page_table_lock);
+ spin_unlock(pmd_ptl);
*hpage = NULL;
@@ -2397,6 +2493,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
if (pmd_trans_huge(*pmd))
goto out;
+ memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
pte = pte_offset_map_lock(mm, pmd, address, &ptl);
for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
_pte++, _address += PAGE_SIZE) {
@@ -2413,13 +2510,14 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
if (unlikely(!page))
goto out_unmap;
/*
- * Chose the node of the first page. This could
- * be more sophisticated and look at more pages,
- * but isn't for now.
+ * Record which node the original page is from and save this
+ * information to khugepaged_node_load[].
+ * Khupaged will allocate hugepage from the node has the max
+ * hit record.
*/
- if (node == NUMA_NO_NODE)
- node = page_to_nid(page);
- VM_BUG_ON(PageCompound(page));
+ node = page_to_nid(page);
+ khugepaged_node_load[node]++;
+ VM_BUG_ON_PAGE(PageCompound(page), page);
if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
goto out_unmap;
/* cannot use mapcount: can't collapse if there's a gup pin */
@@ -2433,9 +2531,11 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
ret = 1;
out_unmap:
pte_unmap_unlock(pte, ptl);
- if (ret)
+ if (ret) {
+ node = khugepaged_find_target_node();
/* collapse_huge_page will return with the mmap_sem released */
collapse_huge_page(mm, address, hpage, vma, node);
+ }
out:
return ret;
}
@@ -2667,7 +2767,7 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
pmdp_clear_flush(vma, haddr, pmd);
/* leave pmd empty until pte is filled */
- pgtable = pgtable_trans_huge_withdraw(mm);
+ pgtable = pgtable_trans_huge_withdraw(mm, pmd);
pmd_populate(mm, &_pmd, pgtable);
for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
@@ -2687,6 +2787,7 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmd)
{
+ spinlock_t *ptl;
struct page *page;
struct mm_struct *mm = vma->vm_mm;
unsigned long haddr = address & HPAGE_PMD_MASK;
@@ -2697,29 +2798,37 @@ void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address,
mmun_start = haddr;
mmun_end = haddr + HPAGE_PMD_SIZE;
+again:
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
- spin_lock(&mm->page_table_lock);
+ ptl = pmd_lock(mm, pmd);
if (unlikely(!pmd_trans_huge(*pmd))) {
- spin_unlock(&mm->page_table_lock);
+ spin_unlock(ptl);
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
return;
}
if (is_huge_zero_pmd(*pmd)) {
__split_huge_zero_page_pmd(vma, haddr, pmd);
- spin_unlock(&mm->page_table_lock);
+ spin_unlock(ptl);
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
return;
}
page = pmd_page(*pmd);
- VM_BUG_ON(!page_count(page));
+ VM_BUG_ON_PAGE(!page_count(page), page);
get_page(page);
- spin_unlock(&mm->page_table_lock);
+ spin_unlock(ptl);
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
split_huge_page(page);
put_page(page);
- BUG_ON(pmd_trans_huge(*pmd));
+
+ /*
+ * We don't always have down_write of mmap_sem here: a racing
+ * do_huge_pmd_wp_page() might have copied-on-write to another
+ * huge page before our split_huge_page() got the anon_vma lock.
+ */
+ if (unlikely(pmd_trans_huge(*pmd)))
+ goto again;
}
void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address,
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index e2bfbf73a551..c82290b9c1fc 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -13,6 +13,7 @@
#include <linux/nodemask.h>
#include <linux/pagemap.h>
#include <linux/mempolicy.h>
+#include <linux/compiler.h>
#include <linux/cpuset.h>
#include <linux/mutex.h>
#include <linux/bootmem.h>
@@ -21,6 +22,8 @@
#include <linux/rmap.h>
#include <linux/swap.h>
#include <linux/swapops.h>
+#include <linux/page-isolation.h>
+#include <linux/jhash.h>
#include <asm/page.h>
#include <asm/pgtable.h>
@@ -33,7 +36,6 @@
#include "internal.h"
const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
-static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
unsigned long hugepages_treat_as_movable;
int hugetlb_max_hstate __read_mostly;
@@ -48,10 +50,18 @@ static unsigned long __initdata default_hstate_max_huge_pages;
static unsigned long __initdata default_hstate_size;
/*
- * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
+ * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
+ * free_huge_pages, and surplus_huge_pages.
*/
DEFINE_SPINLOCK(hugetlb_lock);
+/*
+ * Serializes faults on the same logical page. This is used to
+ * prevent spurious OOMs when the hugepage pool is fully utilized.
+ */
+static int num_fault_mutexes;
+static struct mutex *htlb_fault_mutex_table ____cacheline_aligned_in_smp;
+
static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
{
bool free = (spool->count == 0) && (spool->used_hpages == 0);
@@ -134,15 +144,8 @@ static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
* Region tracking -- allows tracking of reservations and instantiated pages
* across the pages in a mapping.
*
- * The region data structures are protected by a combination of the mmap_sem
- * and the hugetlb_instantion_mutex. To access or modify a region the caller
- * must either hold the mmap_sem for write, or the mmap_sem for read and
- * the hugetlb_instantiation mutex:
- *
- * down_write(&mm->mmap_sem);
- * or
- * down_read(&mm->mmap_sem);
- * mutex_lock(&hugetlb_instantiation_mutex);
+ * The region data structures are embedded into a resv_map and
+ * protected by a resv_map's lock
*/
struct file_region {
struct list_head link;
@@ -150,10 +153,12 @@ struct file_region {
long to;
};
-static long region_add(struct list_head *head, long f, long t)
+static long region_add(struct resv_map *resv, long f, long t)
{
+ struct list_head *head = &resv->regions;
struct file_region *rg, *nrg, *trg;
+ spin_lock(&resv->lock);
/* Locate the region we are either in or before. */
list_for_each_entry(rg, head, link)
if (f <= rg->to)
@@ -183,14 +188,18 @@ static long region_add(struct list_head *head, long f, long t)
}
nrg->from = f;
nrg->to = t;
+ spin_unlock(&resv->lock);
return 0;
}
-static long region_chg(struct list_head *head, long f, long t)
+static long region_chg(struct resv_map *resv, long f, long t)
{
- struct file_region *rg, *nrg;
+ struct list_head *head = &resv->regions;
+ struct file_region *rg, *nrg = NULL;
long chg = 0;
+retry:
+ spin_lock(&resv->lock);
/* Locate the region we are before or in. */
list_for_each_entry(rg, head, link)
if (f <= rg->to)
@@ -200,15 +209,21 @@ static long region_chg(struct list_head *head, long f, long t)
* Subtle, allocate a new region at the position but make it zero
* size such that we can guarantee to record the reservation. */
if (&rg->link == head || t < rg->from) {
- nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
- if (!nrg)
- return -ENOMEM;
- nrg->from = f;
- nrg->to = f;
- INIT_LIST_HEAD(&nrg->link);
- list_add(&nrg->link, rg->link.prev);
+ if (!nrg) {
+ spin_unlock(&resv->lock);
+ nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
+ if (!nrg)
+ return -ENOMEM;
+
+ nrg->from = f;
+ nrg->to = f;
+ INIT_LIST_HEAD(&nrg->link);
+ goto retry;
+ }
- return t - f;
+ list_add(&nrg->link, rg->link.prev);
+ chg = t - f;
+ goto out_nrg;
}
/* Round our left edge to the current segment if it encloses us. */
@@ -221,7 +236,7 @@ static long region_chg(struct list_head *head, long f, long t)
if (&rg->link == head)
break;
if (rg->from > t)
- return chg;
+ goto out;
/* We overlap with this area, if it extends further than
* us then we must extend ourselves. Account for its
@@ -232,20 +247,30 @@ static long region_chg(struct list_head *head, long f, long t)
}
chg -= rg->to - rg->from;
}
+
+out:
+ spin_unlock(&resv->lock);
+ /* We already know we raced and no longer need the new region */
+ kfree(nrg);
+ return chg;
+out_nrg:
+ spin_unlock(&resv->lock);
return chg;
}
-static long region_truncate(struct list_head *head, long end)
+static long region_truncate(struct resv_map *resv, long end)
{
+ struct list_head *head = &resv->regions;
struct file_region *rg, *trg;
long chg = 0;
+ spin_lock(&resv->lock);
/* Locate the region we are either in or before. */
list_for_each_entry(rg, head, link)
if (end <= rg->to)
break;
if (&rg->link == head)
- return 0;
+ goto out;
/* If we are in the middle of a region then adjust it. */
if (end > rg->from) {
@@ -262,14 +287,19 @@ static long region_truncate(struct list_head *head, long end)
list_del(&rg->link);
kfree(rg);
}
+
+out:
+ spin_unlock(&resv->lock);
return chg;
}
-static long region_count(struct list_head *head, long f, long t)
+static long region_count(struct resv_map *resv, long f, long t)
{
+ struct list_head *head = &resv->regions;
struct file_region *rg;
long chg = 0;
+ spin_lock(&resv->lock);
/* Locate each segment we overlap with, and count that overlap. */
list_for_each_entry(rg, head, link) {
long seg_from;
@@ -285,6 +315,7 @@ static long region_count(struct list_head *head, long f, long t)
chg += seg_to - seg_from;
}
+ spin_unlock(&resv->lock);
return chg;
}
@@ -319,7 +350,7 @@ unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
hstate = hstate_vma(vma);
- return 1UL << (hstate->order + PAGE_SHIFT);
+ return 1UL << huge_page_shift(hstate);
}
EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
@@ -375,39 +406,46 @@ static void set_vma_private_data(struct vm_area_struct *vma,
vma->vm_private_data = (void *)value;
}
-struct resv_map {
- struct kref refs;
- struct list_head regions;
-};
-
-static struct resv_map *resv_map_alloc(void)
+struct resv_map *resv_map_alloc(void)
{
struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
if (!resv_map)
return NULL;
kref_init(&resv_map->refs);
+ spin_lock_init(&resv_map->lock);
INIT_LIST_HEAD(&resv_map->regions);
return resv_map;
}
-static void resv_map_release(struct kref *ref)
+void resv_map_release(struct kref *ref)
{
struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
/* Clear out any active regions before we release the map. */
- region_truncate(&resv_map->regions, 0);
+ region_truncate(resv_map, 0);
kfree(resv_map);
}
+static inline struct resv_map *inode_resv_map(struct inode *inode)
+{
+ return inode->i_mapping->private_data;
+}
+
static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
{
VM_BUG_ON(!is_vm_hugetlb_page(vma));
- if (!(vma->vm_flags & VM_MAYSHARE))
+ if (vma->vm_flags & VM_MAYSHARE) {
+ struct address_space *mapping = vma->vm_file->f_mapping;
+ struct inode *inode = mapping->host;
+
+ return inode_resv_map(inode);
+
+ } else {
return (struct resv_map *)(get_vma_private_data(vma) &
~HPAGE_RESV_MASK);
- return NULL;
+ }
}
static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
@@ -434,25 +472,6 @@ static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
return (get_vma_private_data(vma) & flag) != 0;
}
-/* Decrement the reserved pages in the hugepage pool by one */
-static void decrement_hugepage_resv_vma(struct hstate *h,
- struct vm_area_struct *vma)
-{
- if (vma->vm_flags & VM_NORESERVE)
- return;
-
- if (vma->vm_flags & VM_MAYSHARE) {
- /* Shared mappings always use reserves */
- h->resv_huge_pages--;
- } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
- /*
- * Only the process that called mmap() has reserves for
- * private mappings.
- */
- h->resv_huge_pages--;
- }
-}
-
/* Reset counters to 0 and clear all HPAGE_RESV_* flags */
void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
{
@@ -462,47 +481,36 @@ void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
}
/* Returns true if the VMA has associated reserve pages */
-static int vma_has_reserves(struct vm_area_struct *vma)
+static int vma_has_reserves(struct vm_area_struct *vma, long chg)
{
+ if (vma->vm_flags & VM_NORESERVE) {
+ /*
+ * This address is already reserved by other process(chg == 0),
+ * so, we should decrement reserved count. Without decrementing,
+ * reserve count remains after releasing inode, because this
+ * allocated page will go into page cache and is regarded as
+ * coming from reserved pool in releasing step. Currently, we
+ * don't have any other solution to deal with this situation
+ * properly, so add work-around here.
+ */
+ if (vma->vm_flags & VM_MAYSHARE && chg == 0)
+ return 1;
+ else
+ return 0;
+ }
+
+ /* Shared mappings always use reserves */
if (vma->vm_flags & VM_MAYSHARE)
return 1;
+
+ /*
+ * Only the process that called mmap() has reserves for
+ * private mappings.
+ */
if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
return 1;
- return 0;
-}
-static void copy_gigantic_page(struct page *dst, struct page *src)
-{
- int i;
- struct hstate *h = page_hstate(src);
- struct page *dst_base = dst;
- struct page *src_base = src;
-
- for (i = 0; i < pages_per_huge_page(h); ) {
- cond_resched();
- copy_highpage(dst, src);
-
- i++;
- dst = mem_map_next(dst, dst_base, i);
- src = mem_map_next(src, src_base, i);
- }
-}
-
-void copy_huge_page(struct page *dst, struct page *src)
-{
- int i;
- struct hstate *h = page_hstate(src);
-
- if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
- copy_gigantic_page(dst, src);
- return;
- }
-
- might_sleep();
- for (i = 0; i < pages_per_huge_page(h); i++) {
- cond_resched();
- copy_highpage(dst + i, src + i);
- }
+ return 0;
}
static void enqueue_huge_page(struct hstate *h, struct page *page)
@@ -517,9 +525,15 @@ static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
{
struct page *page;
- if (list_empty(&h->hugepage_freelists[nid]))
+ list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
+ if (!is_migrate_isolate_page(page))
+ break;
+ /*
+ * if 'non-isolated free hugepage' not found on the list,
+ * the allocation fails.
+ */
+ if (&h->hugepage_freelists[nid] == &page->lru)
return NULL;
- page = list_entry(h->hugepage_freelists[nid].next, struct page, lru);
list_move(&page->lru, &h->hugepage_activelist);
set_page_refcounted(page);
h->free_huge_pages--;
@@ -527,9 +541,19 @@ static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
return page;
}
+/* Movability of hugepages depends on migration support. */
+static inline gfp_t htlb_alloc_mask(struct hstate *h)
+{
+ if (hugepages_treat_as_movable || hugepage_migration_support(h))
+ return GFP_HIGHUSER_MOVABLE;
+ else
+ return GFP_HIGHUSER;
+}
+
static struct page *dequeue_huge_page_vma(struct hstate *h,
struct vm_area_struct *vma,
- unsigned long address, int avoid_reserve)
+ unsigned long address, int avoid_reserve,
+ long chg)
{
struct page *page = NULL;
struct mempolicy *mpol;
@@ -539,16 +563,12 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
struct zoneref *z;
unsigned int cpuset_mems_cookie;
-retry_cpuset:
- cpuset_mems_cookie = get_mems_allowed();
- zonelist = huge_zonelist(vma, address,
- htlb_alloc_mask, &mpol, &nodemask);
/*
* A child process with MAP_PRIVATE mappings created by their parent
* have no page reserves. This check ensures that reservations are
* not "stolen". The child may still get SIGKILLed
*/
- if (!vma_has_reserves(vma) &&
+ if (!vma_has_reserves(vma, chg) &&
h->free_huge_pages - h->resv_huge_pages == 0)
goto err;
@@ -556,25 +576,34 @@ retry_cpuset:
if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
goto err;
+retry_cpuset:
+ cpuset_mems_cookie = read_mems_allowed_begin();
+ zonelist = huge_zonelist(vma, address,
+ htlb_alloc_mask(h), &mpol, &nodemask);
+
for_each_zone_zonelist_nodemask(zone, z, zonelist,
MAX_NR_ZONES - 1, nodemask) {
- if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask)) {
+ if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask(h))) {
page = dequeue_huge_page_node(h, zone_to_nid(zone));
if (page) {
- if (!avoid_reserve)
- decrement_hugepage_resv_vma(h, vma);
+ if (avoid_reserve)
+ break;
+ if (!vma_has_reserves(vma, chg))
+ break;
+
+ SetPagePrivate(page);
+ h->resv_huge_pages--;
break;
}
}
}
mpol_cond_put(mpol);
- if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
+ if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
goto retry_cpuset;
return page;
err:
- mpol_cond_put(mpol);
return NULL;
}
@@ -592,7 +621,7 @@ static void update_and_free_page(struct hstate *h, struct page *page)
1 << PG_active | 1 << PG_reserved |
1 << PG_private | 1 << PG_writeback);
}
- VM_BUG_ON(hugetlb_cgroup_from_page(page));
+ VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
set_compound_page_dtor(page, NULL);
set_page_refcounted(page);
arch_release_hugepage(page);
@@ -620,15 +649,21 @@ static void free_huge_page(struct page *page)
int nid = page_to_nid(page);
struct hugepage_subpool *spool =
(struct hugepage_subpool *)page_private(page);
+ bool restore_reserve;
set_page_private(page, 0);
page->mapping = NULL;
BUG_ON(page_count(page));
BUG_ON(page_mapcount(page));
+ restore_reserve = PagePrivate(page);
+ ClearPagePrivate(page);
spin_lock(&hugetlb_lock);
hugetlb_cgroup_uncharge_page(hstate_index(h),
pages_per_huge_page(h), page);
+ if (restore_reserve)
+ h->resv_huge_pages++;
+
if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
/* remove the page from active list */
list_del(&page->lru);
@@ -655,7 +690,8 @@ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
put_page(page); /* free it into the hugepage allocator */
}
-static void prep_compound_gigantic_page(struct page *page, unsigned long order)
+static void __init prep_compound_gigantic_page(struct page *page,
+ unsigned long order)
{
int i;
int nr_pages = 1 << order;
@@ -664,8 +700,22 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order)
/* we rely on prep_new_huge_page to set the destructor */
set_compound_order(page, order);
__SetPageHead(page);
+ __ClearPageReserved(page);
for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
__SetPageTail(p);
+ /*
+ * For gigantic hugepages allocated through bootmem at
+ * boot, it's safer to be consistent with the not-gigantic
+ * hugepages and clear the PG_reserved bit from all tail pages
+ * too. Otherwse drivers using get_user_pages() to access tail
+ * pages may get the reference counting wrong if they see
+ * PG_reserved set on a tail page (despite the head page not
+ * having PG_reserved set). Enforcing this consistency between
+ * head and tail pages allows drivers to optimize away a check
+ * on the head page when they need know if put_page() is needed
+ * after get_user_pages().
+ */
+ __ClearPageReserved(p);
set_page_count(p, 0);
p->first_page = page;
}
@@ -678,18 +728,43 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order)
*/
int PageHuge(struct page *page)
{
- compound_page_dtor *dtor;
-
if (!PageCompound(page))
return 0;
page = compound_head(page);
- dtor = get_compound_page_dtor(page);
-
- return dtor == free_huge_page;
+ return get_compound_page_dtor(page) == free_huge_page;
}
EXPORT_SYMBOL_GPL(PageHuge);
+/*
+ * PageHeadHuge() only returns true for hugetlbfs head page, but not for
+ * normal or transparent huge pages.
+ */
+int PageHeadHuge(struct page *page_head)
+{
+ if (!PageHead(page_head))
+ return 0;
+
+ return get_compound_page_dtor(page_head) == free_huge_page;
+}
+
+pgoff_t __basepage_index(struct page *page)
+{
+ struct page *page_head = compound_head(page);
+ pgoff_t index = page_index(page_head);
+ unsigned long compound_idx;
+
+ if (!PageHuge(page_head))
+ return page_index(page);
+
+ if (compound_order(page_head) >= MAX_ORDER)
+ compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
+ else
+ compound_idx = page - page_head;
+
+ return (index << compound_order(page_head)) + compound_idx;
+}
+
static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
{
struct page *page;
@@ -698,7 +773,7 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
return NULL;
page = alloc_pages_exact_node(nid,
- htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
+ htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
__GFP_REPEAT|__GFP_NOWARN,
huge_page_order(h));
if (page) {
@@ -755,33 +830,6 @@ static int hstate_next_node_to_alloc(struct hstate *h,
return nid;
}
-static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
-{
- struct page *page;
- int start_nid;
- int next_nid;
- int ret = 0;
-
- start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
- next_nid = start_nid;
-
- do {
- page = alloc_fresh_huge_page_node(h, next_nid);
- if (page) {
- ret = 1;
- break;
- }
- next_nid = hstate_next_node_to_alloc(h, nodes_allowed);
- } while (next_nid != start_nid);
-
- if (ret)
- count_vm_event(HTLB_BUDDY_PGALLOC);
- else
- count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
-
- return ret;
-}
-
/*
* helper for free_pool_huge_page() - return the previously saved
* node ["this node"] from which to free a huge page. Advance the
@@ -800,6 +848,40 @@ static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
return nid;
}
+#define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \
+ for (nr_nodes = nodes_weight(*mask); \
+ nr_nodes > 0 && \
+ ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \
+ nr_nodes--)
+
+#define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \
+ for (nr_nodes = nodes_weight(*mask); \
+ nr_nodes > 0 && \
+ ((node = hstate_next_node_to_free(hs, mask)) || 1); \
+ nr_nodes--)
+
+static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
+{
+ struct page *page;
+ int nr_nodes, node;
+ int ret = 0;
+
+ for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
+ page = alloc_fresh_huge_page_node(h, node);
+ if (page) {
+ ret = 1;
+ break;
+ }
+ }
+
+ if (ret)
+ count_vm_event(HTLB_BUDDY_PGALLOC);
+ else
+ count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
+
+ return ret;
+}
+
/*
* Free huge page from pool from next node to free.
* Attempt to keep persistent huge pages more or less
@@ -809,40 +891,73 @@ static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
bool acct_surplus)
{
- int start_nid;
- int next_nid;
+ int nr_nodes, node;
int ret = 0;
- start_nid = hstate_next_node_to_free(h, nodes_allowed);
- next_nid = start_nid;
-
- do {
+ for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
/*
* If we're returning unused surplus pages, only examine
* nodes with surplus pages.
*/
- if ((!acct_surplus || h->surplus_huge_pages_node[next_nid]) &&
- !list_empty(&h->hugepage_freelists[next_nid])) {
+ if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
+ !list_empty(&h->hugepage_freelists[node])) {
struct page *page =
- list_entry(h->hugepage_freelists[next_nid].next,
+ list_entry(h->hugepage_freelists[node].next,
struct page, lru);
list_del(&page->lru);
h->free_huge_pages--;
- h->free_huge_pages_node[next_nid]--;
+ h->free_huge_pages_node[node]--;
if (acct_surplus) {
h->surplus_huge_pages--;
- h->surplus_huge_pages_node[next_nid]--;
+ h->surplus_huge_pages_node[node]--;
}
update_and_free_page(h, page);
ret = 1;
break;
}
- next_nid = hstate_next_node_to_free(h, nodes_allowed);
- } while (next_nid != start_nid);
+ }
return ret;
}
+/*
+ * Dissolve a given free hugepage into free buddy pages. This function does
+ * nothing for in-use (including surplus) hugepages.
+ */
+static void dissolve_free_huge_page(struct page *page)
+{
+ spin_lock(&hugetlb_lock);
+ if (PageHuge(page) && !page_count(page)) {
+ struct hstate *h = page_hstate(page);
+ int nid = page_to_nid(page);
+ list_del(&page->lru);
+ h->free_huge_pages--;
+ h->free_huge_pages_node[nid]--;
+ update_and_free_page(h, page);
+ }
+ spin_unlock(&hugetlb_lock);
+}
+
+/*
+ * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
+ * make specified memory blocks removable from the system.
+ * Note that start_pfn should aligned with (minimum) hugepage size.
+ */
+void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
+{
+ unsigned int order = 8 * sizeof(void *);
+ unsigned long pfn;
+ struct hstate *h;
+
+ /* Set scan step to minimum hugepage size */
+ for_each_hstate(h)
+ if (order > huge_page_order(h))
+ order = huge_page_order(h);
+ VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << order));
+ for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order)
+ dissolve_free_huge_page(pfn_to_page(pfn));
+}
+
static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
{
struct page *page;
@@ -885,12 +1000,12 @@ static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
spin_unlock(&hugetlb_lock);
if (nid == NUMA_NO_NODE)
- page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
+ page = alloc_pages(htlb_alloc_mask(h)|__GFP_COMP|
__GFP_REPEAT|__GFP_NOWARN,
huge_page_order(h));
else
page = alloc_pages_exact_node(nid,
- htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
+ htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
__GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));
if (page && arch_prepare_hugepage(page)) {
@@ -927,10 +1042,11 @@ static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
*/
struct page *alloc_huge_page_node(struct hstate *h, int nid)
{
- struct page *page;
+ struct page *page = NULL;
spin_lock(&hugetlb_lock);
- page = dequeue_huge_page_node(h, nid);
+ if (h->free_huge_pages - h->resv_huge_pages > 0)
+ page = dequeue_huge_page_node(h, nid);
spin_unlock(&hugetlb_lock);
if (!page)
@@ -1011,18 +1127,15 @@ retry:
* no users -- drop the buddy allocator's reference.
*/
put_page_testzero(page);
- VM_BUG_ON(page_count(page));
+ VM_BUG_ON_PAGE(page_count(page), page);
enqueue_huge_page(h, page);
}
free:
spin_unlock(&hugetlb_lock);
/* Free unnecessary surplus pages to the buddy allocator */
- if (!list_empty(&surplus_list)) {
- list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
- put_page(page);
- }
- }
+ list_for_each_entry_safe(page, tmp, &surplus_list, lru)
+ put_page(page);
spin_lock(&hugetlb_lock);
return ret;
@@ -1059,6 +1172,7 @@ static void return_unused_surplus_pages(struct hstate *h,
while (nr_pages--) {
if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
break;
+ cond_resched_lock(&hugetlb_lock);
}
}
@@ -1075,45 +1189,34 @@ static void return_unused_surplus_pages(struct hstate *h,
static long vma_needs_reservation(struct hstate *h,
struct vm_area_struct *vma, unsigned long addr)
{
- struct address_space *mapping = vma->vm_file->f_mapping;
- struct inode *inode = mapping->host;
-
- if (vma->vm_flags & VM_MAYSHARE) {
- pgoff_t idx = vma_hugecache_offset(h, vma, addr);
- return region_chg(&inode->i_mapping->private_list,
- idx, idx + 1);
+ struct resv_map *resv;
+ pgoff_t idx;
+ long chg;
- } else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
+ resv = vma_resv_map(vma);
+ if (!resv)
return 1;
- } else {
- long err;
- pgoff_t idx = vma_hugecache_offset(h, vma, addr);
- struct resv_map *reservations = vma_resv_map(vma);
+ idx = vma_hugecache_offset(h, vma, addr);
+ chg = region_chg(resv, idx, idx + 1);
- err = region_chg(&reservations->regions, idx, idx + 1);
- if (err < 0)
- return err;
- return 0;
- }
+ if (vma->vm_flags & VM_MAYSHARE)
+ return chg;
+ else
+ return chg < 0 ? chg : 0;
}
static void vma_commit_reservation(struct hstate *h,
struct vm_area_struct *vma, unsigned long addr)
{
- struct address_space *mapping = vma->vm_file->f_mapping;
- struct inode *inode = mapping->host;
-
- if (vma->vm_flags & VM_MAYSHARE) {
- pgoff_t idx = vma_hugecache_offset(h, vma, addr);
- region_add(&inode->i_mapping->private_list, idx, idx + 1);
+ struct resv_map *resv;
+ pgoff_t idx;
- } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
- pgoff_t idx = vma_hugecache_offset(h, vma, addr);
- struct resv_map *reservations = vma_resv_map(vma);
+ resv = vma_resv_map(vma);
+ if (!resv)
+ return;
- /* Mark this page used in the map. */
- region_add(&reservations->regions, idx, idx + 1);
- }
+ idx = vma_hugecache_offset(h, vma, addr);
+ region_add(resv, idx, idx + 1);
}
static struct page *alloc_huge_page(struct vm_area_struct *vma,
@@ -1138,38 +1241,35 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
chg = vma_needs_reservation(h, vma, addr);
if (chg < 0)
return ERR_PTR(-ENOMEM);
- if (chg)
- if (hugepage_subpool_get_pages(spool, chg))
+ if (chg || avoid_reserve)
+ if (hugepage_subpool_get_pages(spool, 1))
return ERR_PTR(-ENOSPC);
ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
if (ret) {
- hugepage_subpool_put_pages(spool, chg);
+ if (chg || avoid_reserve)
+ hugepage_subpool_put_pages(spool, 1);
return ERR_PTR(-ENOSPC);
}
spin_lock(&hugetlb_lock);
- page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
- if (page) {
- /* update page cgroup details */
- hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h),
- h_cg, page);
- spin_unlock(&hugetlb_lock);
- } else {
+ page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, chg);
+ if (!page) {
spin_unlock(&hugetlb_lock);
page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
if (!page) {
hugetlb_cgroup_uncharge_cgroup(idx,
pages_per_huge_page(h),
h_cg);
- hugepage_subpool_put_pages(spool, chg);
+ if (chg || avoid_reserve)
+ hugepage_subpool_put_pages(spool, 1);
return ERR_PTR(-ENOSPC);
}
spin_lock(&hugetlb_lock);
- hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h),
- h_cg, page);
list_move(&page->lru, &h->hugepage_activelist);
- spin_unlock(&hugetlb_lock);
+ /* Fall through */
}
+ hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
+ spin_unlock(&hugetlb_lock);
set_page_private(page, (unsigned long)spool);
@@ -1177,19 +1277,31 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
return page;
}
+/*
+ * alloc_huge_page()'s wrapper which simply returns the page if allocation
+ * succeeds, otherwise NULL. This function is called from new_vma_page(),
+ * where no ERR_VALUE is expected to be returned.
+ */
+struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
+ unsigned long addr, int avoid_reserve)
+{
+ struct page *page = alloc_huge_page(vma, addr, avoid_reserve);
+ if (IS_ERR(page))
+ page = NULL;
+ return page;
+}
+
int __weak alloc_bootmem_huge_page(struct hstate *h)
{
struct huge_bootmem_page *m;
- int nr_nodes = nodes_weight(node_states[N_MEMORY]);
+ int nr_nodes, node;
- while (nr_nodes) {
+ for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
void *addr;
- addr = __alloc_bootmem_node_nopanic(
- NODE_DATA(hstate_next_node_to_alloc(h,
- &node_states[N_MEMORY])),
- huge_page_size(h), huge_page_size(h), 0);
-
+ addr = memblock_virt_alloc_try_nid_nopanic(
+ huge_page_size(h), huge_page_size(h),
+ 0, BOOTMEM_ALLOC_ACCESSIBLE, node);
if (addr) {
/*
* Use the beginning of the huge page to store the
@@ -1199,7 +1311,6 @@ int __weak alloc_bootmem_huge_page(struct hstate *h)
m = addr;
goto found;
}
- nr_nodes--;
}
return 0;
@@ -1211,7 +1322,7 @@ found:
return 1;
}
-static void prep_compound_huge_page(struct page *page, int order)
+static void __init prep_compound_huge_page(struct page *page, int order)
{
if (unlikely(order > (MAX_ORDER - 1)))
prep_compound_gigantic_page(page, order);
@@ -1230,14 +1341,14 @@ static void __init gather_bootmem_prealloc(void)
#ifdef CONFIG_HIGHMEM
page = pfn_to_page(m->phys >> PAGE_SHIFT);
- free_bootmem_late((unsigned long)m,
- sizeof(struct huge_bootmem_page));
+ memblock_free_late(__pa(m),
+ sizeof(struct huge_bootmem_page));
#else
page = virt_to_page(m);
#endif
- __ClearPageReserved(page);
WARN_ON(page_count(page) != 1);
prep_compound_huge_page(page, h->order);
+ WARN_ON(PageReserved(page));
prep_new_huge_page(h, page, page_to_nid(page));
/*
* If we had gigantic hugepages allocated at boot time, we need
@@ -1246,7 +1357,7 @@ static void __init gather_bootmem_prealloc(void)
* side-effects, like CommitLimit going negative.
*/
if (h->order > (MAX_ORDER - 1))
- totalram_pages += 1 << h->order;
+ adjust_managed_page_count(page, 1 << h->order);
}
}
@@ -1338,48 +1449,28 @@ static inline void try_to_free_low(struct hstate *h, unsigned long count,
static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
int delta)
{
- int start_nid, next_nid;
- int ret = 0;
+ int nr_nodes, node;
VM_BUG_ON(delta != -1 && delta != 1);
- if (delta < 0)
- start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
- else
- start_nid = hstate_next_node_to_free(h, nodes_allowed);
- next_nid = start_nid;
-
- do {
- int nid = next_nid;
- if (delta < 0) {
- /*
- * To shrink on this node, there must be a surplus page
- */
- if (!h->surplus_huge_pages_node[nid]) {
- next_nid = hstate_next_node_to_alloc(h,
- nodes_allowed);
- continue;
- }
+ if (delta < 0) {
+ for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
+ if (h->surplus_huge_pages_node[node])
+ goto found;
}
- if (delta > 0) {
- /*
- * Surplus cannot exceed the total number of pages
- */
- if (h->surplus_huge_pages_node[nid] >=
- h->nr_huge_pages_node[nid]) {
- next_nid = hstate_next_node_to_free(h,
- nodes_allowed);
- continue;
- }
+ } else {
+ for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
+ if (h->surplus_huge_pages_node[node] <
+ h->nr_huge_pages_node[node])
+ goto found;
}
+ }
+ return 0;
- h->surplus_huge_pages += delta;
- h->surplus_huge_pages_node[nid] += delta;
- ret = 1;
- break;
- } while (next_nid != start_nid);
-
- return ret;
+found:
+ h->surplus_huge_pages += delta;
+ h->surplus_huge_pages_node[node] += delta;
+ return 1;
}
#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
@@ -1446,6 +1537,7 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
while (min_count < persistent_huge_pages(h)) {
if (!free_pool_huge_page(h, nodes_allowed, 0))
break;
+ cond_resched_lock(&hugetlb_lock);
}
while (count < persistent_huge_pages(h)) {
if (!adjust_pool_surplus(h, nodes_allowed, 1))
@@ -1509,7 +1601,7 @@ static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
struct hstate *h;
NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
- err = strict_strtoul(buf, 10, &count);
+ err = kstrtoul(buf, 10, &count);
if (err)
goto out;
@@ -1600,7 +1692,7 @@ static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
if (h->order >= MAX_ORDER)
return -EINVAL;
- err = strict_strtoul(buf, 10, &input);
+ err = kstrtoul(buf, 10, &input);
if (err)
return err;
@@ -1881,16 +1973,15 @@ static void __exit hugetlb_exit(void)
}
kobject_put(hugepages_kobj);
+ kfree(htlb_fault_mutex_table);
}
module_exit(hugetlb_exit);
static int __init hugetlb_init(void)
{
- /* Some platform decide whether they support huge pages at boot
- * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
- * there is no such support
- */
- if (HPAGE_SHIFT == 0)
+ int i;
+
+ if (!hugepages_supported())
return 0;
if (!size_to_hstate(default_hstate_size)) {
@@ -1910,6 +2001,17 @@ static int __init hugetlb_init(void)
hugetlb_register_all_nodes();
hugetlb_cgroup_file_init();
+#ifdef CONFIG_SMP
+ num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
+#else
+ num_fault_mutexes = 1;
+#endif
+ htlb_fault_mutex_table =
+ kmalloc(sizeof(struct mutex) * num_fault_mutexes, GFP_KERNEL);
+ BUG_ON(!htlb_fault_mutex_table);
+
+ for (i = 0; i < num_fault_mutexes; i++)
+ mutex_init(&htlb_fault_mutex_table[i]);
return 0;
}
module_init(hugetlb_init);
@@ -2006,6 +2108,9 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
unsigned long tmp;
int ret;
+ if (!hugepages_supported())
+ return -ENOTSUPP;
+
tmp = h->max_huge_pages;
if (write && h->order >= MAX_ORDER)
@@ -2051,18 +2156,6 @@ int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
}
#endif /* CONFIG_NUMA */
-int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
- void __user *buffer,
- size_t *length, loff_t *ppos)
-{
- proc_dointvec(table, write, buffer, length, ppos);
- if (hugepages_treat_as_movable)
- htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
- else
- htlb_alloc_mask = GFP_HIGHUSER;
- return 0;
-}
-
int hugetlb_overcommit_handler(struct ctl_table *table, int write,
void __user *buffer,
size_t *length, loff_t *ppos)
@@ -2071,6 +2164,9 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
unsigned long tmp;
int ret;
+ if (!hugepages_supported())
+ return -ENOTSUPP;
+
tmp = h->nr_overcommit_huge_pages;
if (write && h->order >= MAX_ORDER)
@@ -2096,6 +2192,8 @@ out:
void hugetlb_report_meminfo(struct seq_file *m)
{
struct hstate *h = &default_hstate;
+ if (!hugepages_supported())
+ return;
seq_printf(m,
"HugePages_Total: %5lu\n"
"HugePages_Free: %5lu\n"
@@ -2112,6 +2210,8 @@ void hugetlb_report_meminfo(struct seq_file *m)
int hugetlb_report_node_meminfo(int nid, char *buf)
{
struct hstate *h = &default_hstate;
+ if (!hugepages_supported())
+ return 0;
return sprintf(buf,
"Node %d HugePages_Total: %5u\n"
"Node %d HugePages_Free: %5u\n"
@@ -2126,6 +2226,9 @@ void hugetlb_show_meminfo(void)
struct hstate *h;
int nid;
+ if (!hugepages_supported())
+ return;
+
for_each_node_state(nid, N_MEMORY)
for_each_hstate(h)
pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
@@ -2190,7 +2293,7 @@ out:
static void hugetlb_vm_op_open(struct vm_area_struct *vma)
{
- struct resv_map *reservations = vma_resv_map(vma);
+ struct resv_map *resv = vma_resv_map(vma);
/*
* This new VMA should share its siblings reservation map if present.
@@ -2200,41 +2303,30 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma)
* after this open call completes. It is therefore safe to take a
* new reference here without additional locking.
*/
- if (reservations)
- kref_get(&reservations->refs);
-}
-
-static void resv_map_put(struct vm_area_struct *vma)
-{
- struct resv_map *reservations = vma_resv_map(vma);
-
- if (!reservations)
- return;
- kref_put(&reservations->refs, resv_map_release);
+ if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
+ kref_get(&resv->refs);
}
static void hugetlb_vm_op_close(struct vm_area_struct *vma)
{
struct hstate *h = hstate_vma(vma);
- struct resv_map *reservations = vma_resv_map(vma);
+ struct resv_map *resv = vma_resv_map(vma);
struct hugepage_subpool *spool = subpool_vma(vma);
- unsigned long reserve;
- unsigned long start;
- unsigned long end;
+ unsigned long reserve, start, end;
- if (reservations) {
- start = vma_hugecache_offset(h, vma, vma->vm_start);
- end = vma_hugecache_offset(h, vma, vma->vm_end);
+ if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
+ return;
- reserve = (end - start) -
- region_count(&reservations->regions, start, end);
+ start = vma_hugecache_offset(h, vma, vma->vm_start);
+ end = vma_hugecache_offset(h, vma, vma->vm_end);
- resv_map_put(vma);
+ reserve = (end - start) - region_count(resv, start, end);
- if (reserve) {
- hugetlb_acct_memory(h, -reserve);
- hugepage_subpool_put_pages(spool, reserve);
- }
+ kref_put(&resv->refs, resv_map_release);
+
+ if (reserve) {
+ hugetlb_acct_memory(h, -reserve);
+ hugepage_subpool_put_pages(spool, reserve);
}
}
@@ -2295,23 +2387,35 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
int cow;
struct hstate *h = hstate_vma(vma);
unsigned long sz = huge_page_size(h);
+ unsigned long mmun_start; /* For mmu_notifiers */
+ unsigned long mmun_end; /* For mmu_notifiers */
+ int ret = 0;
cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
+ mmun_start = vma->vm_start;
+ mmun_end = vma->vm_end;
+ if (cow)
+ mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end);
+
for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
+ spinlock_t *src_ptl, *dst_ptl;
src_pte = huge_pte_offset(src, addr);
if (!src_pte)
continue;
dst_pte = huge_pte_alloc(dst, addr, sz);
- if (!dst_pte)
- goto nomem;
+ if (!dst_pte) {
+ ret = -ENOMEM;
+ break;
+ }
/* If the pagetables are shared don't copy or take references */
if (dst_pte == src_pte)
continue;
- spin_lock(&dst->page_table_lock);
- spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
+ dst_ptl = huge_pte_lock(h, dst, dst_pte);
+ src_ptl = huge_pte_lockptr(h, src, src_pte);
+ spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
if (!huge_pte_none(huge_ptep_get(src_pte))) {
if (cow)
huge_ptep_set_wrprotect(src, addr, src_pte);
@@ -2321,13 +2425,14 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
page_dup_rmap(ptepage);
set_huge_pte_at(dst, addr, dst_pte, entry);
}
- spin_unlock(&src->page_table_lock);
- spin_unlock(&dst->page_table_lock);
+ spin_unlock(src_ptl);
+ spin_unlock(dst_ptl);
}
- return 0;
-nomem:
- return -ENOMEM;
+ if (cow)
+ mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end);
+
+ return ret;
}
static int is_hugetlb_entry_migration(pte_t pte)
@@ -2365,6 +2470,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
unsigned long address;
pte_t *ptep;
pte_t pte;
+ spinlock_t *ptl;
struct page *page;
struct hstate *h = hstate_vma(vma);
unsigned long sz = huge_page_size(h);
@@ -2378,25 +2484,25 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
tlb_start_vma(tlb, vma);
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
again:
- spin_lock(&mm->page_table_lock);
for (address = start; address < end; address += sz) {
ptep = huge_pte_offset(mm, address);
if (!ptep)
continue;
+ ptl = huge_pte_lock(h, mm, ptep);
if (huge_pmd_unshare(mm, &address, ptep))
- continue;
+ goto unlock;
pte = huge_ptep_get(ptep);
if (huge_pte_none(pte))
- continue;
+ goto unlock;
/*
* HWPoisoned hugepage is already unmapped and dropped reference
*/
if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
huge_pte_clear(mm, address, ptep);
- continue;
+ goto unlock;
}
page = pte_page(pte);
@@ -2407,7 +2513,7 @@ again:
*/
if (ref_page) {
if (page != ref_page)
- continue;
+ goto unlock;
/*
* Mark the VMA as having unmapped its page so that
@@ -2424,13 +2530,18 @@ again:
page_remove_rmap(page);
force_flush = !__tlb_remove_page(tlb, page);
- if (force_flush)
+ if (force_flush) {
+ spin_unlock(ptl);
break;
+ }
/* Bail out after unmapping reference page if supplied */
- if (ref_page)
+ if (ref_page) {
+ spin_unlock(ptl);
break;
+ }
+unlock:
+ spin_unlock(ptl);
}
- spin_unlock(&mm->page_table_lock);
/*
* mmu_gather ran out of room to batch pages, we break out of
* the PTE lock to avoid doing the potential expensive TLB invalidate
@@ -2473,7 +2584,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
mm = vma->vm_mm;
- tlb_gather_mmu(&tlb, mm, 0);
+ tlb_gather_mmu(&tlb, mm, start, end);
__unmap_hugepage_range(&tlb, vma, start, end, ref_page);
tlb_finish_mmu(&tlb, start, end);
}
@@ -2536,11 +2647,10 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
*/
static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *ptep, pte_t pte,
- struct page *pagecache_page)
+ struct page *pagecache_page, spinlock_t *ptl)
{
struct hstate *h = hstate_vma(vma);
struct page *old_page, *new_page;
- int avoidcopy;
int outside_reserve = 0;
unsigned long mmun_start; /* For mmu_notifiers */
unsigned long mmun_end; /* For mmu_notifiers */
@@ -2550,10 +2660,8 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
retry_avoidcopy:
/* If no-one else is actually using this page, avoid the copy
* and just make the page writable */
- avoidcopy = (page_mapcount(old_page) == 1);
- if (avoidcopy) {
- if (PageAnon(old_page))
- page_move_anon_rmap(old_page, vma, address);
+ if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
+ page_move_anon_rmap(old_page, vma, address);
set_huge_ptep_writable(vma, address, ptep);
return 0;
}
@@ -2567,15 +2675,14 @@ retry_avoidcopy:
* at the time of fork() could consume its reserves on COW instead
* of the full address range.
*/
- if (!(vma->vm_flags & VM_MAYSHARE) &&
- is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
+ if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
old_page != pagecache_page)
outside_reserve = 1;
page_cache_get(old_page);
- /* Drop page_table_lock as buddy allocator may be called */
- spin_unlock(&mm->page_table_lock);
+ /* Drop page table lock as buddy allocator may be called */
+ spin_unlock(ptl);
new_page = alloc_huge_page(vma, address, outside_reserve);
if (IS_ERR(new_page)) {
@@ -2593,13 +2700,14 @@ retry_avoidcopy:
BUG_ON(huge_pte_none(pte));
if (unmap_ref_private(mm, vma, old_page, address)) {
BUG_ON(huge_pte_none(pte));
- spin_lock(&mm->page_table_lock);
+ spin_lock(ptl);
ptep = huge_pte_offset(mm, address & huge_page_mask(h));
- if (likely(pte_same(huge_ptep_get(ptep), pte)))
+ if (likely(ptep &&
+ pte_same(huge_ptep_get(ptep), pte)))
goto retry_avoidcopy;
/*
- * race occurs while re-acquiring page_table_lock, and
- * our job is done.
+ * race occurs while re-acquiring page table
+ * lock, and our job is done.
*/
return 0;
}
@@ -2607,7 +2715,7 @@ retry_avoidcopy:
}
/* Caller expects lock to be held */
- spin_lock(&mm->page_table_lock);
+ spin_lock(ptl);
if (err == -ENOMEM)
return VM_FAULT_OOM;
else
@@ -2622,7 +2730,7 @@ retry_avoidcopy:
page_cache_release(new_page);
page_cache_release(old_page);
/* Caller expects lock to be held */
- spin_lock(&mm->page_table_lock);
+ spin_lock(ptl);
return VM_FAULT_OOM;
}
@@ -2634,12 +2742,14 @@ retry_avoidcopy:
mmun_end = mmun_start + huge_page_size(h);
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
/*
- * Retake the page_table_lock to check for racing updates
+ * Retake the page table lock to check for racing updates
* before the page tables are altered
*/
- spin_lock(&mm->page_table_lock);
+ spin_lock(ptl);
ptep = huge_pte_offset(mm, address & huge_page_mask(h));
- if (likely(pte_same(huge_ptep_get(ptep), pte))) {
+ if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
+ ClearPagePrivate(new_page);
+
/* Break COW */
huge_ptep_clear_flush(vma, address, ptep);
set_huge_pte_at(mm, address, ptep,
@@ -2649,12 +2759,13 @@ retry_avoidcopy:
/* Make the old page be freed below */
new_page = old_page;
}
- spin_unlock(&mm->page_table_lock);
+ spin_unlock(ptl);
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
- /* Caller expects lock to be held */
- spin_lock(&mm->page_table_lock);
page_cache_release(new_page);
page_cache_release(old_page);
+
+ /* Caller expects lock to be held */
+ spin_lock(ptl);
return 0;
}
@@ -2692,16 +2803,16 @@ static bool hugetlbfs_pagecache_present(struct hstate *h,
}
static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long address, pte_t *ptep, unsigned int flags)
+ struct address_space *mapping, pgoff_t idx,
+ unsigned long address, pte_t *ptep, unsigned int flags)
{
struct hstate *h = hstate_vma(vma);
int ret = VM_FAULT_SIGBUS;
int anon_rmap = 0;
- pgoff_t idx;
unsigned long size;
struct page *page;
- struct address_space *mapping;
pte_t new_pte;
+ spinlock_t *ptl;
/*
* Currently, we are forced to kill the process in the event the
@@ -2714,9 +2825,6 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
return ret;
}
- mapping = vma->vm_file->f_mapping;
- idx = vma_hugecache_offset(h, vma, address);
-
/*
* Use page lock to guard against racing truncation
* before we get page_table_lock.
@@ -2750,6 +2858,7 @@ retry:
goto retry;
goto out;
}
+ ClearPagePrivate(page);
spin_lock(&inode->i_lock);
inode->i_blocks += blocks_per_huge_page(h);
@@ -2787,7 +2896,8 @@ retry:
goto backout_unlocked;
}
- spin_lock(&mm->page_table_lock);
+ ptl = huge_pte_lockptr(h, mm, ptep);
+ spin_lock(ptl);
size = i_size_read(mapping->host) >> huge_page_shift(h);
if (idx >= size)
goto backout;
@@ -2796,9 +2906,10 @@ retry:
if (!huge_pte_none(huge_ptep_get(ptep)))
goto backout;
- if (anon_rmap)
+ if (anon_rmap) {
+ ClearPagePrivate(page);
hugepage_add_new_anon_rmap(page, vma, address);
- else
+ } else
page_dup_rmap(page);
new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
&& (vma->vm_flags & VM_SHARED)));
@@ -2806,32 +2917,69 @@ retry:
if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
/* Optimization, do the COW without a second fault */
- ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
+ ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
}
- spin_unlock(&mm->page_table_lock);
+ spin_unlock(ptl);
unlock_page(page);
out:
return ret;
backout:
- spin_unlock(&mm->page_table_lock);
+ spin_unlock(ptl);
backout_unlocked:
unlock_page(page);
put_page(page);
goto out;
}
+#ifdef CONFIG_SMP
+static u32 fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ struct address_space *mapping,
+ pgoff_t idx, unsigned long address)
+{
+ unsigned long key[2];
+ u32 hash;
+
+ if (vma->vm_flags & VM_SHARED) {
+ key[0] = (unsigned long) mapping;
+ key[1] = idx;
+ } else {
+ key[0] = (unsigned long) mm;
+ key[1] = address >> huge_page_shift(h);
+ }
+
+ hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
+
+ return hash & (num_fault_mutexes - 1);
+}
+#else
+/*
+ * For uniprocesor systems we always use a single mutex, so just
+ * return 0 and avoid the hashing overhead.
+ */
+static u32 fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ struct address_space *mapping,
+ pgoff_t idx, unsigned long address)
+{
+ return 0;
+}
+#endif
+
int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, unsigned int flags)
{
- pte_t *ptep;
- pte_t entry;
+ pte_t *ptep, entry;
+ spinlock_t *ptl;
int ret;
+ u32 hash;
+ pgoff_t idx;
struct page *page = NULL;
struct page *pagecache_page = NULL;
- static DEFINE_MUTEX(hugetlb_instantiation_mutex);
struct hstate *h = hstate_vma(vma);
+ struct address_space *mapping;
address &= huge_page_mask(h);
@@ -2839,7 +2987,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
if (ptep) {
entry = huge_ptep_get(ptep);
if (unlikely(is_hugetlb_entry_migration(entry))) {
- migration_entry_wait_huge(mm, ptep);
+ migration_entry_wait_huge(vma, mm, ptep);
return 0;
} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
return VM_FAULT_HWPOISON_LARGE |
@@ -2850,15 +2998,20 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
if (!ptep)
return VM_FAULT_OOM;
+ mapping = vma->vm_file->f_mapping;
+ idx = vma_hugecache_offset(h, vma, address);
+
/*
* Serialize hugepage allocation and instantiation, so that we don't
* get spurious allocation failures if two CPUs race to instantiate
* the same page in the page cache.
*/
- mutex_lock(&hugetlb_instantiation_mutex);
+ hash = fault_mutex_hash(h, mm, vma, mapping, idx, address);
+ mutex_lock(&htlb_fault_mutex_table[hash]);
+
entry = huge_ptep_get(ptep);
if (huge_pte_none(entry)) {
- ret = hugetlb_no_page(mm, vma, address, ptep, flags);
+ ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
goto out_mutex;
}
@@ -2895,17 +3048,18 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
if (page != pagecache_page)
lock_page(page);
- spin_lock(&mm->page_table_lock);
+ ptl = huge_pte_lockptr(h, mm, ptep);
+ spin_lock(ptl);
/* Check for a racing update before calling hugetlb_cow */
if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
- goto out_page_table_lock;
+ goto out_ptl;
if (flags & FAULT_FLAG_WRITE) {
if (!huge_pte_write(entry)) {
ret = hugetlb_cow(mm, vma, address, ptep, entry,
- pagecache_page);
- goto out_page_table_lock;
+ pagecache_page, ptl);
+ goto out_ptl;
}
entry = huge_pte_mkdirty(entry);
}
@@ -2914,8 +3068,8 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
flags & FAULT_FLAG_WRITE))
update_mmu_cache(vma, address, ptep);
-out_page_table_lock:
- spin_unlock(&mm->page_table_lock);
+out_ptl:
+ spin_unlock(ptl);
if (pagecache_page) {
unlock_page(pagecache_page);
@@ -2926,20 +3080,10 @@ out_page_table_lock:
put_page(page);
out_mutex:
- mutex_unlock(&hugetlb_instantiation_mutex);
-
+ mutex_unlock(&htlb_fault_mutex_table[hash]);
return ret;
}
-/* Can be overriden by architectures */
-__attribute__((weak)) struct page *
-follow_huge_pud(struct mm_struct *mm, unsigned long address,
- pud_t *pud, int write)
-{
- BUG();
- return NULL;
-}
-
long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
struct page **pages, struct vm_area_struct **vmas,
unsigned long *position, unsigned long *nr_pages,
@@ -2950,9 +3094,9 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long remainder = *nr_pages;
struct hstate *h = hstate_vma(vma);
- spin_lock(&mm->page_table_lock);
while (vaddr < vma->vm_end && remainder) {
pte_t *pte;
+ spinlock_t *ptl = NULL;
int absent;
struct page *page;
@@ -2960,8 +3104,12 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
* Some archs (sparc64, sh*) have multiple pte_ts to
* each hugepage. We have to make sure we get the
* first, for the page indexing below to work.
+ *
+ * Note that page table lock is not held when pte is null.
*/
pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
+ if (pte)
+ ptl = huge_pte_lock(h, mm, pte);
absent = !pte || huge_pte_none(huge_ptep_get(pte));
/*
@@ -2973,6 +3121,8 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
*/
if (absent && (flags & FOLL_DUMP) &&
!hugetlbfs_pagecache_present(h, vma, vaddr)) {
+ if (pte)
+ spin_unlock(ptl);
remainder = 0;
break;
}
@@ -2992,10 +3142,10 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
!huge_pte_write(huge_ptep_get(pte)))) {
int ret;
- spin_unlock(&mm->page_table_lock);
+ if (pte)
+ spin_unlock(ptl);
ret = hugetlb_fault(mm, vma, vaddr,
(flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
- spin_lock(&mm->page_table_lock);
if (!(ret & VM_FAULT_ERROR))
continue;
@@ -3008,7 +3158,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
same_page:
if (pages) {
pages[i] = mem_map_offset(page, pfn_offset);
- get_page(pages[i]);
+ get_page_foll(pages[i]);
}
if (vmas)
@@ -3026,8 +3176,8 @@ same_page:
*/
goto same_page;
}
+ spin_unlock(ptl);
}
- spin_unlock(&mm->page_table_lock);
*nr_pages = remainder;
*position = vaddr;
@@ -3047,14 +3197,17 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
BUG_ON(address >= end);
flush_cache_range(vma, address, end);
+ mmu_notifier_invalidate_range_start(mm, start, end);
mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
- spin_lock(&mm->page_table_lock);
for (; address < end; address += huge_page_size(h)) {
+ spinlock_t *ptl;
ptep = huge_pte_offset(mm, address);
if (!ptep)
continue;
+ ptl = huge_pte_lock(h, mm, ptep);
if (huge_pmd_unshare(mm, &address, ptep)) {
pages++;
+ spin_unlock(ptl);
continue;
}
if (!huge_pte_none(huge_ptep_get(ptep))) {
@@ -3064,8 +3217,8 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
set_huge_pte_at(mm, address, ptep, pte);
pages++;
}
+ spin_unlock(ptl);
}
- spin_unlock(&mm->page_table_lock);
/*
* Must flush TLB before releasing i_mmap_mutex: x86's huge_pmd_unshare
* may have cleared our pud entry and done put_page on the page table:
@@ -3074,6 +3227,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
*/
flush_tlb_range(vma, start, end);
mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
+ mmu_notifier_invalidate_range_end(mm, start, end);
return pages << h->order;
}
@@ -3086,6 +3240,7 @@ int hugetlb_reserve_pages(struct inode *inode,
long ret, chg;
struct hstate *h = hstate_inode(inode);
struct hugepage_subpool *spool = subpool_inode(inode);
+ struct resv_map *resv_map;
/*
* Only apply hugepage reservation if asked. At fault time, an
@@ -3101,10 +3256,13 @@ int hugetlb_reserve_pages(struct inode *inode,
* to reserve the full area even if read-only as mprotect() may be
* called to make the mapping read-write. Assume !vma is a shm mapping
*/
- if (!vma || vma->vm_flags & VM_MAYSHARE)
- chg = region_chg(&inode->i_mapping->private_list, from, to);
- else {
- struct resv_map *resv_map = resv_map_alloc();
+ if (!vma || vma->vm_flags & VM_MAYSHARE) {
+ resv_map = inode_resv_map(inode);
+
+ chg = region_chg(resv_map, from, to);
+
+ } else {
+ resv_map = resv_map_alloc();
if (!resv_map)
return -ENOMEM;
@@ -3147,20 +3305,23 @@ int hugetlb_reserve_pages(struct inode *inode,
* else has to be done for private mappings here
*/
if (!vma || vma->vm_flags & VM_MAYSHARE)
- region_add(&inode->i_mapping->private_list, from, to);
+ region_add(resv_map, from, to);
return 0;
out_err:
- if (vma)
- resv_map_put(vma);
+ if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
+ kref_put(&resv_map->refs, resv_map_release);
return ret;
}
void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
{
struct hstate *h = hstate_inode(inode);
- long chg = region_truncate(&inode->i_mapping->private_list, offset);
+ struct resv_map *resv_map = inode_resv_map(inode);
+ long chg = 0;
struct hugepage_subpool *spool = subpool_inode(inode);
+ if (resv_map)
+ chg = region_truncate(resv_map, offset);
spin_lock(&inode->i_lock);
inode->i_blocks -= (blocks_per_huge_page(h) * freed);
spin_unlock(&inode->i_lock);
@@ -3169,6 +3330,218 @@ void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
hugetlb_acct_memory(h, -(chg - freed));
}
+#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
+static unsigned long page_table_shareable(struct vm_area_struct *svma,
+ struct vm_area_struct *vma,
+ unsigned long addr, pgoff_t idx)
+{
+ unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
+ svma->vm_start;
+ unsigned long sbase = saddr & PUD_MASK;
+ unsigned long s_end = sbase + PUD_SIZE;
+
+ /* Allow segments to share if only one is marked locked */
+ unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED;
+ unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED;
+
+ /*
+ * match the virtual addresses, permission and the alignment of the
+ * page table page.
+ */
+ if (pmd_index(addr) != pmd_index(saddr) ||
+ vm_flags != svm_flags ||
+ sbase < svma->vm_start || svma->vm_end < s_end)
+ return 0;
+
+ return saddr;
+}
+
+static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
+{
+ unsigned long base = addr & PUD_MASK;
+ unsigned long end = base + PUD_SIZE;
+
+ /*
+ * check on proper vm_flags and page table alignment
+ */
+ if (vma->vm_flags & VM_MAYSHARE &&
+ vma->vm_start <= base && end <= vma->vm_end)
+ return 1;
+ return 0;
+}
+
+/*
+ * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
+ * and returns the corresponding pte. While this is not necessary for the
+ * !shared pmd case because we can allocate the pmd later as well, it makes the
+ * code much cleaner. pmd allocation is essential for the shared case because
+ * pud has to be populated inside the same i_mmap_mutex section - otherwise
+ * racing tasks could either miss the sharing (see huge_pte_offset) or select a
+ * bad pmd for sharing.
+ */
+pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
+{
+ struct vm_area_struct *vma = find_vma(mm, addr);
+ struct address_space *mapping = vma->vm_file->f_mapping;
+ pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
+ vma->vm_pgoff;
+ struct vm_area_struct *svma;
+ unsigned long saddr;
+ pte_t *spte = NULL;
+ pte_t *pte;
+ spinlock_t *ptl;
+
+ if (!vma_shareable(vma, addr))
+ return (pte_t *)pmd_alloc(mm, pud, addr);
+
+ mutex_lock(&mapping->i_mmap_mutex);
+ vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
+ if (svma == vma)
+ continue;
+
+ saddr = page_table_shareable(svma, vma, addr, idx);
+ if (saddr) {
+ spte = huge_pte_offset(svma->vm_mm, saddr);
+ if (spte) {
+ get_page(virt_to_page(spte));
+ break;
+ }
+ }
+ }
+
+ if (!spte)
+ goto out;
+
+ ptl = huge_pte_lockptr(hstate_vma(vma), mm, spte);
+ spin_lock(ptl);
+ if (pud_none(*pud))
+ pud_populate(mm, pud,
+ (pmd_t *)((unsigned long)spte & PAGE_MASK));
+ else
+ put_page(virt_to_page(spte));
+ spin_unlock(ptl);
+out:
+ pte = (pte_t *)pmd_alloc(mm, pud, addr);
+ mutex_unlock(&mapping->i_mmap_mutex);
+ return pte;
+}
+
+/*
+ * unmap huge page backed by shared pte.
+ *
+ * Hugetlb pte page is ref counted at the time of mapping. If pte is shared
+ * indicated by page_count > 1, unmap is achieved by clearing pud and
+ * decrementing the ref count. If count == 1, the pte page is not shared.
+ *
+ * called with page table lock held.
+ *
+ * returns: 1 successfully unmapped a shared pte page
+ * 0 the underlying pte page is not shared, or it is the last user
+ */
+int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+{
+ pgd_t *pgd = pgd_offset(mm, *addr);
+ pud_t *pud = pud_offset(pgd, *addr);
+
+ BUG_ON(page_count(virt_to_page(ptep)) == 0);
+ if (page_count(virt_to_page(ptep)) == 1)
+ return 0;
+
+ pud_clear(pud);
+ put_page(virt_to_page(ptep));
+ *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
+ return 1;
+}
+#define want_pmd_share() (1)
+#else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
+pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
+{
+ return NULL;
+}
+#define want_pmd_share() (0)
+#endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
+
+#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
+pte_t *huge_pte_alloc(struct mm_struct *mm,
+ unsigned long addr, unsigned long sz)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pte_t *pte = NULL;
+
+ pgd = pgd_offset(mm, addr);
+ pud = pud_alloc(mm, pgd, addr);
+ if (pud) {
+ if (sz == PUD_SIZE) {
+ pte = (pte_t *)pud;
+ } else {
+ BUG_ON(sz != PMD_SIZE);
+ if (want_pmd_share() && pud_none(*pud))
+ pte = huge_pmd_share(mm, addr, pud);
+ else
+ pte = (pte_t *)pmd_alloc(mm, pud, addr);
+ }
+ }
+ BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
+
+ return pte;
+}
+
+pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd = NULL;
+
+ pgd = pgd_offset(mm, addr);
+ if (pgd_present(*pgd)) {
+ pud = pud_offset(pgd, addr);
+ if (pud_present(*pud)) {
+ if (pud_huge(*pud))
+ return (pte_t *)pud;
+ pmd = pmd_offset(pud, addr);
+ }
+ }
+ return (pte_t *) pmd;
+}
+
+struct page *
+follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+ pmd_t *pmd, int write)
+{
+ struct page *page;
+
+ page = pte_page(*(pte_t *)pmd);
+ if (page)
+ page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
+ return page;
+}
+
+struct page *
+follow_huge_pud(struct mm_struct *mm, unsigned long address,
+ pud_t *pud, int write)
+{
+ struct page *page;
+
+ page = pte_page(*(pte_t *)pud);
+ if (page)
+ page += ((address & ~PUD_MASK) >> PAGE_SHIFT);
+ return page;
+}
+
+#else /* !CONFIG_ARCH_WANT_GENERAL_HUGETLB */
+
+/* Can be overriden by architectures */
+struct page * __weak
+follow_huge_pud(struct mm_struct *mm, unsigned long address,
+ pud_t *pud, int write)
+{
+ BUG();
+ return NULL;
+}
+
+#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
+
#ifdef CONFIG_MEMORY_FAILURE
/* Should be called in hugetlb_lock */
@@ -3213,3 +3586,45 @@ int dequeue_hwpoisoned_huge_page(struct page *hpage)
return ret;
}
#endif
+
+bool isolate_huge_page(struct page *page, struct list_head *list)
+{
+ VM_BUG_ON_PAGE(!PageHead(page), page);
+ if (!get_page_unless_zero(page))
+ return false;
+ spin_lock(&hugetlb_lock);
+ list_move_tail(&page->lru, list);
+ spin_unlock(&hugetlb_lock);
+ return true;
+}
+
+void putback_active_hugepage(struct page *page)
+{
+ VM_BUG_ON_PAGE(!PageHead(page), page);
+ spin_lock(&hugetlb_lock);
+ list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
+ spin_unlock(&hugetlb_lock);
+ put_page(page);
+}
+
+bool is_hugepage_active(struct page *page)
+{
+ VM_BUG_ON_PAGE(!PageHuge(page), page);
+ /*
+ * This function can be called for a tail page because the caller,
+ * scan_movable_pages, scans through a given pfn-range which typically
+ * covers one memory block. In systems using gigantic hugepage (1GB
+ * for x86_64,) a hugepage is larger than a memory block, and we don't
+ * support migrating such large hugepages for now, so return false
+ * when called for tail pages.
+ */
+ if (PageTail(page))
+ return false;
+ /*
+ * Refcount of a hwpoisoned hugepages is 1, but they are not active,
+ * so we should return false for them.
+ */
+ if (unlikely(PageHWPoison(page)))
+ return false;
+ return page_count(page) > 0;
+}
diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c
index 9cea7de22ffb..595d7fd795e1 100644
--- a/mm/hugetlb_cgroup.c
+++ b/mm/hugetlb_cgroup.c
@@ -30,27 +30,18 @@ struct hugetlb_cgroup {
#define MEMFILE_IDX(val) (((val) >> 16) & 0xffff)
#define MEMFILE_ATTR(val) ((val) & 0xffff)
-struct cgroup_subsys hugetlb_subsys __read_mostly;
static struct hugetlb_cgroup *root_h_cgroup __read_mostly;
static inline
struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s)
{
- return container_of(s, struct hugetlb_cgroup, css);
-}
-
-static inline
-struct hugetlb_cgroup *hugetlb_cgroup_from_cgroup(struct cgroup *cgroup)
-{
- return hugetlb_cgroup_from_css(cgroup_subsys_state(cgroup,
- hugetlb_subsys_id));
+ return s ? container_of(s, struct hugetlb_cgroup, css) : NULL;
}
static inline
struct hugetlb_cgroup *hugetlb_cgroup_from_task(struct task_struct *task)
{
- return hugetlb_cgroup_from_css(task_subsys_state(task,
- hugetlb_subsys_id));
+ return hugetlb_cgroup_from_css(task_css(task, hugetlb_cgrp_id));
}
static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg)
@@ -58,17 +49,15 @@ static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg)
return (h_cg == root_h_cgroup);
}
-static inline struct hugetlb_cgroup *parent_hugetlb_cgroup(struct cgroup *cg)
+static inline struct hugetlb_cgroup *
+parent_hugetlb_cgroup(struct hugetlb_cgroup *h_cg)
{
- if (!cg->parent)
- return NULL;
- return hugetlb_cgroup_from_cgroup(cg->parent);
+ return hugetlb_cgroup_from_css(css_parent(&h_cg->css));
}
-static inline bool hugetlb_cgroup_have_usage(struct cgroup *cg)
+static inline bool hugetlb_cgroup_have_usage(struct hugetlb_cgroup *h_cg)
{
int idx;
- struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_cgroup(cg);
for (idx = 0; idx < hugetlb_max_hstate; idx++) {
if ((res_counter_read_u64(&h_cg->hugepage[idx], RES_USAGE)) > 0)
@@ -77,19 +66,18 @@ static inline bool hugetlb_cgroup_have_usage(struct cgroup *cg)
return false;
}
-static struct cgroup_subsys_state *hugetlb_cgroup_css_alloc(struct cgroup *cgroup)
+static struct cgroup_subsys_state *
+hugetlb_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
{
+ struct hugetlb_cgroup *parent_h_cgroup = hugetlb_cgroup_from_css(parent_css);
+ struct hugetlb_cgroup *h_cgroup;
int idx;
- struct cgroup *parent_cgroup;
- struct hugetlb_cgroup *h_cgroup, *parent_h_cgroup;
h_cgroup = kzalloc(sizeof(*h_cgroup), GFP_KERNEL);
if (!h_cgroup)
return ERR_PTR(-ENOMEM);
- parent_cgroup = cgroup->parent;
- if (parent_cgroup) {
- parent_h_cgroup = hugetlb_cgroup_from_cgroup(parent_cgroup);
+ if (parent_h_cgroup) {
for (idx = 0; idx < HUGE_MAX_HSTATE; idx++)
res_counter_init(&h_cgroup->hugepage[idx],
&parent_h_cgroup->hugepage[idx]);
@@ -101,11 +89,11 @@ static struct cgroup_subsys_state *hugetlb_cgroup_css_alloc(struct cgroup *cgrou
return &h_cgroup->css;
}
-static void hugetlb_cgroup_css_free(struct cgroup *cgroup)
+static void hugetlb_cgroup_css_free(struct cgroup_subsys_state *css)
{
struct hugetlb_cgroup *h_cgroup;
- h_cgroup = hugetlb_cgroup_from_cgroup(cgroup);
+ h_cgroup = hugetlb_cgroup_from_css(css);
kfree(h_cgroup);
}
@@ -117,15 +105,14 @@ static void hugetlb_cgroup_css_free(struct cgroup *cgroup)
* page reference and test for page active here. This function
* cannot fail.
*/
-static void hugetlb_cgroup_move_parent(int idx, struct cgroup *cgroup,
+static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg,
struct page *page)
{
int csize;
struct res_counter *counter;
struct res_counter *fail_res;
struct hugetlb_cgroup *page_hcg;
- struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_cgroup(cgroup);
- struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(cgroup);
+ struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(h_cg);
page_hcg = hugetlb_cgroup_from_page(page);
/*
@@ -155,8 +142,9 @@ out:
* Force the hugetlb cgroup to empty the hugetlb resources by moving them to
* the parent cgroup.
*/
-static void hugetlb_cgroup_css_offline(struct cgroup *cgroup)
+static void hugetlb_cgroup_css_offline(struct cgroup_subsys_state *css)
{
+ struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
struct hstate *h;
struct page *page;
int idx = 0;
@@ -165,13 +153,13 @@ static void hugetlb_cgroup_css_offline(struct cgroup *cgroup)
for_each_hstate(h) {
spin_lock(&hugetlb_lock);
list_for_each_entry(page, &h->hugepage_activelist, lru)
- hugetlb_cgroup_move_parent(idx, cgroup, page);
+ hugetlb_cgroup_move_parent(idx, h_cg, page);
spin_unlock(&hugetlb_lock);
idx++;
}
cond_resched();
- } while (hugetlb_cgroup_have_usage(cgroup));
+ } while (hugetlb_cgroup_have_usage(h_cg));
}
int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
@@ -253,29 +241,24 @@ void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
return;
}
-static ssize_t hugetlb_cgroup_read(struct cgroup *cgroup, struct cftype *cft,
- struct file *file, char __user *buf,
- size_t nbytes, loff_t *ppos)
+static u64 hugetlb_cgroup_read_u64(struct cgroup_subsys_state *css,
+ struct cftype *cft)
{
- u64 val;
- char str[64];
- int idx, name, len;
- struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_cgroup(cgroup);
+ int idx, name;
+ struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
idx = MEMFILE_IDX(cft->private);
name = MEMFILE_ATTR(cft->private);
- val = res_counter_read_u64(&h_cg->hugepage[idx], name);
- len = scnprintf(str, sizeof(str), "%llu\n", (unsigned long long)val);
- return simple_read_from_buffer(buf, nbytes, ppos, str, len);
+ return res_counter_read_u64(&h_cg->hugepage[idx], name);
}
-static int hugetlb_cgroup_write(struct cgroup *cgroup, struct cftype *cft,
- const char *buffer)
+static int hugetlb_cgroup_write(struct cgroup_subsys_state *css,
+ struct cftype *cft, char *buffer)
{
int idx, name, ret;
unsigned long long val;
- struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_cgroup(cgroup);
+ struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
idx = MEMFILE_IDX(cft->private);
name = MEMFILE_ATTR(cft->private);
@@ -300,10 +283,11 @@ static int hugetlb_cgroup_write(struct cgroup *cgroup, struct cftype *cft,
return ret;
}
-static int hugetlb_cgroup_reset(struct cgroup *cgroup, unsigned int event)
+static int hugetlb_cgroup_reset(struct cgroup_subsys_state *css,
+ unsigned int event)
{
int idx, name, ret = 0;
- struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_cgroup(cgroup);
+ struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
idx = MEMFILE_IDX(event);
name = MEMFILE_ATTR(event);
@@ -346,34 +330,34 @@ static void __init __hugetlb_cgroup_file_init(int idx)
cft = &h->cgroup_files[0];
snprintf(cft->name, MAX_CFTYPE_NAME, "%s.limit_in_bytes", buf);
cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT);
- cft->read = hugetlb_cgroup_read;
+ cft->read_u64 = hugetlb_cgroup_read_u64;
cft->write_string = hugetlb_cgroup_write;
/* Add the usage file */
cft = &h->cgroup_files[1];
snprintf(cft->name, MAX_CFTYPE_NAME, "%s.usage_in_bytes", buf);
cft->private = MEMFILE_PRIVATE(idx, RES_USAGE);
- cft->read = hugetlb_cgroup_read;
+ cft->read_u64 = hugetlb_cgroup_read_u64;
/* Add the MAX usage file */
cft = &h->cgroup_files[2];
snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max_usage_in_bytes", buf);
cft->private = MEMFILE_PRIVATE(idx, RES_MAX_USAGE);
cft->trigger = hugetlb_cgroup_reset;
- cft->read = hugetlb_cgroup_read;
+ cft->read_u64 = hugetlb_cgroup_read_u64;
/* Add the failcntfile */
cft = &h->cgroup_files[3];
snprintf(cft->name, MAX_CFTYPE_NAME, "%s.failcnt", buf);
cft->private = MEMFILE_PRIVATE(idx, RES_FAILCNT);
cft->trigger = hugetlb_cgroup_reset;
- cft->read = hugetlb_cgroup_read;
+ cft->read_u64 = hugetlb_cgroup_read_u64;
/* NULL terminate the last cft */
cft = &h->cgroup_files[4];
memset(cft, 0, sizeof(*cft));
- WARN_ON(cgroup_add_cftypes(&hugetlb_subsys, h->cgroup_files));
+ WARN_ON(cgroup_add_cftypes(&hugetlb_cgrp_subsys, h->cgroup_files));
return;
}
@@ -405,7 +389,7 @@ void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
if (hugetlb_cgroup_disabled())
return;
- VM_BUG_ON(!PageHuge(oldhpage));
+ VM_BUG_ON_PAGE(!PageHuge(oldhpage), oldhpage);
spin_lock(&hugetlb_lock);
h_cg = hugetlb_cgroup_from_page(oldhpage);
set_hugetlb_cgroup(oldhpage, NULL);
@@ -417,10 +401,8 @@ void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
return;
}
-struct cgroup_subsys hugetlb_subsys = {
- .name = "hugetlb",
+struct cgroup_subsys hugetlb_cgrp_subsys = {
.css_alloc = hugetlb_cgroup_css_alloc,
.css_offline = hugetlb_cgroup_css_offline,
.css_free = hugetlb_cgroup_css_free,
- .subsys_id = hugetlb_subsys_id,
};
diff --git a/mm/hwpoison-inject.c b/mm/hwpoison-inject.c
index 3a61efc518d5..95487c71cad5 100644
--- a/mm/hwpoison-inject.c
+++ b/mm/hwpoison-inject.c
@@ -20,8 +20,6 @@ static int hwpoison_inject(void *data, u64 val)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (!hwpoison_filter_enable)
- goto inject;
if (!pfn_valid(pfn))
return -ENXIO;
@@ -33,6 +31,9 @@ static int hwpoison_inject(void *data, u64 val)
if (!get_page_unless_zero(hpage))
return 0;
+ if (!hwpoison_filter_enable)
+ goto inject;
+
if (!PageLRU(p) && !PageHuge(p))
shake_page(p, 0);
/*
@@ -54,7 +55,7 @@ static int hwpoison_inject(void *data, u64 val)
return 0;
inject:
- printk(KERN_INFO "Injecting memory failure at pfn %lx\n", pfn);
+ pr_info("Injecting memory failure at pfn %#lx\n", pfn);
return memory_failure(pfn, 18, MF_COUNT_INCREASED);
}
@@ -88,12 +89,12 @@ static int pfn_inject_init(void)
* hardware status change, hence do not require hardware support.
* They are mainly for testing hwpoison in software level.
*/
- dentry = debugfs_create_file("corrupt-pfn", 0600, hwpoison_dir,
+ dentry = debugfs_create_file("corrupt-pfn", 0200, hwpoison_dir,
NULL, &hwpoison_fops);
if (!dentry)
goto fail;
- dentry = debugfs_create_file("unpoison-pfn", 0600, hwpoison_dir,
+ dentry = debugfs_create_file("unpoison-pfn", 0200, hwpoison_dir,
NULL, &unpoison_fops);
if (!dentry)
goto fail;
diff --git a/mm/internal.h b/mm/internal.h
index 8562de0a5197..07b67361a40a 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -11,6 +11,7 @@
#ifndef __MM_INTERNAL_H
#define __MM_INTERNAL_H
+#include <linux/fs.h>
#include <linux/mm.h>
void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
@@ -21,22 +22,31 @@ static inline void set_page_count(struct page *page, int v)
atomic_set(&page->_count, v);
}
+extern int __do_page_cache_readahead(struct address_space *mapping,
+ struct file *filp, pgoff_t offset, unsigned long nr_to_read,
+ unsigned long lookahead_size);
+
+/*
+ * Submit IO for the read-ahead request in file_ra_state.
+ */
+static inline unsigned long ra_submit(struct file_ra_state *ra,
+ struct address_space *mapping, struct file *filp)
+{
+ return __do_page_cache_readahead(mapping, filp,
+ ra->start, ra->size, ra->async_size);
+}
+
/*
* Turn a non-refcounted page (->_count == 0) into refcounted with
* a count of one.
*/
static inline void set_page_refcounted(struct page *page)
{
- VM_BUG_ON(PageTail(page));
- VM_BUG_ON(atomic_read(&page->_count));
+ VM_BUG_ON_PAGE(PageTail(page), page);
+ VM_BUG_ON_PAGE(atomic_read(&page->_count), page);
set_page_count(page, 1);
}
-static inline void __put_page(struct page *page)
-{
- atomic_dec(&page->_count);
-}
-
static inline void __get_page_tail_foll(struct page *page,
bool get_page_head)
{
@@ -51,12 +61,10 @@ static inline void __get_page_tail_foll(struct page *page,
* speculative page access (like in
* page_cache_get_speculative()) on tail pages.
*/
- VM_BUG_ON(atomic_read(&page->first_page->_count) <= 0);
- VM_BUG_ON(atomic_read(&page->_count) != 0);
- VM_BUG_ON(page_mapcount(page) < 0);
+ VM_BUG_ON_PAGE(atomic_read(&page->first_page->_count) <= 0, page);
if (get_page_head)
atomic_inc(&page->first_page->_count);
- atomic_inc(&page->_mapcount);
+ get_huge_page_tail(page);
}
/*
@@ -78,7 +86,7 @@ static inline void get_page_foll(struct page *page)
* Getting a normal page or the head of a compound page
* requires to already have an elevated page->_count.
*/
- VM_BUG_ON(atomic_read(&page->_count) <= 0);
+ VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page);
atomic_inc(&page->_count);
}
}
@@ -90,6 +98,7 @@ extern unsigned long highest_memmap_pfn;
*/
extern int isolate_lru_page(struct page *page);
extern void putback_lru_page(struct page *page);
+extern bool zone_reclaimable(struct zone *zone);
/*
* in mm/rmap.c:
@@ -104,6 +113,7 @@ extern void prep_compound_page(struct page *page, unsigned long order);
#ifdef CONFIG_MEMORY_FAILURE
extern bool is_free_buddy_page(struct page *page);
#endif
+extern int user_min_free_kbytes;
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
@@ -147,9 +157,11 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
#endif
/*
- * function for dealing with page's order in buddy system.
- * zone->lock is already acquired when we use these.
- * So, we don't need atomic page->flags operations here.
+ * This function returns the order of a free page in the buddy system. In
+ * general, page_zone(page)->lock must be held by the caller to prevent the
+ * page from being allocated in parallel and returning garbage as the order.
+ * If a caller does not hold page_zone(page)->lock, it must guarantee that the
+ * page cannot be allocated or merged in parallel.
*/
static inline unsigned long page_order(struct page *page)
{
@@ -178,7 +190,7 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
static inline int mlocked_vma_newpage(struct vm_area_struct *vma,
struct page *page)
{
- VM_BUG_ON(PageLRU(page));
+ VM_BUG_ON_PAGE(PageLRU(page), page);
if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED))
return 0;
@@ -373,5 +385,6 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
#define ALLOC_CPUSET 0x40 /* check for correct cpuset */
#define ALLOC_CMA 0x80 /* allow allocations from CMA areas */
+#define ALLOC_FAIR 0x100 /* fair zone allocation */
#endif /* __MM_INTERNAL_H */
diff --git a/mm/iov_iter.c b/mm/iov_iter.c
new file mode 100644
index 000000000000..10e46cd721de
--- /dev/null
+++ b/mm/iov_iter.c
@@ -0,0 +1,224 @@
+#include <linux/export.h>
+#include <linux/uio.h>
+#include <linux/pagemap.h>
+
+size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
+ struct iov_iter *i)
+{
+ size_t skip, copy, left, wanted;
+ const struct iovec *iov;
+ char __user *buf;
+ void *kaddr, *from;
+
+ if (unlikely(bytes > i->count))
+ bytes = i->count;
+
+ if (unlikely(!bytes))
+ return 0;
+
+ wanted = bytes;
+ iov = i->iov;
+ skip = i->iov_offset;
+ buf = iov->iov_base + skip;
+ copy = min(bytes, iov->iov_len - skip);
+
+ if (!fault_in_pages_writeable(buf, copy)) {
+ kaddr = kmap_atomic(page);
+ from = kaddr + offset;
+
+ /* first chunk, usually the only one */
+ left = __copy_to_user_inatomic(buf, from, copy);
+ copy -= left;
+ skip += copy;
+ from += copy;
+ bytes -= copy;
+
+ while (unlikely(!left && bytes)) {
+ iov++;
+ buf = iov->iov_base;
+ copy = min(bytes, iov->iov_len);
+ left = __copy_to_user_inatomic(buf, from, copy);
+ copy -= left;
+ skip = copy;
+ from += copy;
+ bytes -= copy;
+ }
+ if (likely(!bytes)) {
+ kunmap_atomic(kaddr);
+ goto done;
+ }
+ offset = from - kaddr;
+ buf += copy;
+ kunmap_atomic(kaddr);
+ copy = min(bytes, iov->iov_len - skip);
+ }
+ /* Too bad - revert to non-atomic kmap */
+ kaddr = kmap(page);
+ from = kaddr + offset;
+ left = __copy_to_user(buf, from, copy);
+ copy -= left;
+ skip += copy;
+ from += copy;
+ bytes -= copy;
+ while (unlikely(!left && bytes)) {
+ iov++;
+ buf = iov->iov_base;
+ copy = min(bytes, iov->iov_len);
+ left = __copy_to_user(buf, from, copy);
+ copy -= left;
+ skip = copy;
+ from += copy;
+ bytes -= copy;
+ }
+ kunmap(page);
+done:
+ i->count -= wanted - bytes;
+ i->nr_segs -= iov - i->iov;
+ i->iov = iov;
+ i->iov_offset = skip;
+ return wanted - bytes;
+}
+EXPORT_SYMBOL(copy_page_to_iter);
+
+static size_t __iovec_copy_from_user_inatomic(char *vaddr,
+ const struct iovec *iov, size_t base, size_t bytes)
+{
+ size_t copied = 0, left = 0;
+
+ while (bytes) {
+ char __user *buf = iov->iov_base + base;
+ int copy = min(bytes, iov->iov_len - base);
+
+ base = 0;
+ left = __copy_from_user_inatomic(vaddr, buf, copy);
+ copied += copy;
+ bytes -= copy;
+ vaddr += copy;
+ iov++;
+
+ if (unlikely(left))
+ break;
+ }
+ return copied - left;
+}
+
+/*
+ * Copy as much as we can into the page and return the number of bytes which
+ * were successfully copied. If a fault is encountered then return the number of
+ * bytes which were copied.
+ */
+size_t iov_iter_copy_from_user_atomic(struct page *page,
+ struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+ char *kaddr;
+ size_t copied;
+
+ kaddr = kmap_atomic(page);
+ if (likely(i->nr_segs == 1)) {
+ int left;
+ char __user *buf = i->iov->iov_base + i->iov_offset;
+ left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
+ copied = bytes - left;
+ } else {
+ copied = __iovec_copy_from_user_inatomic(kaddr + offset,
+ i->iov, i->iov_offset, bytes);
+ }
+ kunmap_atomic(kaddr);
+
+ return copied;
+}
+EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
+
+/*
+ * This has the same sideeffects and return value as
+ * iov_iter_copy_from_user_atomic().
+ * The difference is that it attempts to resolve faults.
+ * Page must not be locked.
+ */
+size_t iov_iter_copy_from_user(struct page *page,
+ struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+ char *kaddr;
+ size_t copied;
+
+ kaddr = kmap(page);
+ if (likely(i->nr_segs == 1)) {
+ int left;
+ char __user *buf = i->iov->iov_base + i->iov_offset;
+ left = __copy_from_user(kaddr + offset, buf, bytes);
+ copied = bytes - left;
+ } else {
+ copied = __iovec_copy_from_user_inatomic(kaddr + offset,
+ i->iov, i->iov_offset, bytes);
+ }
+ kunmap(page);
+ return copied;
+}
+EXPORT_SYMBOL(iov_iter_copy_from_user);
+
+void iov_iter_advance(struct iov_iter *i, size_t bytes)
+{
+ BUG_ON(i->count < bytes);
+
+ if (likely(i->nr_segs == 1)) {
+ i->iov_offset += bytes;
+ i->count -= bytes;
+ } else {
+ const struct iovec *iov = i->iov;
+ size_t base = i->iov_offset;
+ unsigned long nr_segs = i->nr_segs;
+
+ /*
+ * The !iov->iov_len check ensures we skip over unlikely
+ * zero-length segments (without overruning the iovec).
+ */
+ while (bytes || unlikely(i->count && !iov->iov_len)) {
+ int copy;
+
+ copy = min(bytes, iov->iov_len - base);
+ BUG_ON(!i->count || i->count < copy);
+ i->count -= copy;
+ bytes -= copy;
+ base += copy;
+ if (iov->iov_len == base) {
+ iov++;
+ nr_segs--;
+ base = 0;
+ }
+ }
+ i->iov = iov;
+ i->iov_offset = base;
+ i->nr_segs = nr_segs;
+ }
+}
+EXPORT_SYMBOL(iov_iter_advance);
+
+/*
+ * Fault in the first iovec of the given iov_iter, to a maximum length
+ * of bytes. Returns 0 on success, or non-zero if the memory could not be
+ * accessed (ie. because it is an invalid address).
+ *
+ * writev-intensive code may want this to prefault several iovecs -- that
+ * would be possible (callers must not rely on the fact that _only_ the
+ * first iovec will be faulted with the current implementation).
+ */
+int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
+{
+ char __user *buf = i->iov->iov_base + i->iov_offset;
+ bytes = min(bytes, i->iov->iov_len - i->iov_offset);
+ return fault_in_pages_readable(buf, bytes);
+}
+EXPORT_SYMBOL(iov_iter_fault_in_readable);
+
+/*
+ * Return the count of just the current iov_iter segment.
+ */
+size_t iov_iter_single_seg_count(const struct iov_iter *i)
+{
+ const struct iovec *iov = i->iov;
+ if (i->nr_segs == 1)
+ return i->count;
+ else
+ return min(i->count, iov->iov_len - i->iov_offset);
+}
+EXPORT_SYMBOL(iov_iter_single_seg_count);
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index c8d7f3110fd0..8d2fcdfeff7f 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -192,15 +192,15 @@ static struct kmem_cache *object_cache;
static struct kmem_cache *scan_area_cache;
/* set if tracing memory operations is enabled */
-static atomic_t kmemleak_enabled = ATOMIC_INIT(0);
+static int kmemleak_enabled;
/* set in the late_initcall if there were no errors */
-static atomic_t kmemleak_initialized = ATOMIC_INIT(0);
+static int kmemleak_initialized;
/* enables or disables early logging of the memory operations */
-static atomic_t kmemleak_early_log = ATOMIC_INIT(1);
+static int kmemleak_early_log = 1;
/* set if a kmemleak warning was issued */
-static atomic_t kmemleak_warning = ATOMIC_INIT(0);
+static int kmemleak_warning;
/* set if a fatal kmemleak error has occurred */
-static atomic_t kmemleak_error = ATOMIC_INIT(0);
+static int kmemleak_error;
/* minimum and maximum address that may be valid pointers */
static unsigned long min_addr = ULONG_MAX;
@@ -218,7 +218,8 @@ static int kmemleak_stack_scan = 1;
static DEFINE_MUTEX(scan_mutex);
/* setting kmemleak=on, will set this var, skipping the disable */
static int kmemleak_skip_disable;
-
+/* If there are leaks that can be reported */
+static bool kmemleak_found_leaks;
/*
* Early object allocation/freeing logging. Kmemleak is initialized after the
@@ -267,7 +268,7 @@ static void kmemleak_disable(void);
#define kmemleak_warn(x...) do { \
pr_warning(x); \
dump_stack(); \
- atomic_set(&kmemleak_warning, 1); \
+ kmemleak_warning = 1; \
} while (0)
/*
@@ -753,7 +754,9 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
}
spin_lock_irqsave(&object->lock, flags);
- if (ptr + size > object->pointer + object->size) {
+ if (size == SIZE_MAX) {
+ size = object->pointer + object->size - ptr;
+ } else if (ptr + size > object->pointer + object->size) {
kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
dump_object_info(object);
kmem_cache_free(scan_area_cache, area);
@@ -803,7 +806,7 @@ static void __init log_early(int op_type, const void *ptr, size_t size,
unsigned long flags;
struct early_log *log;
- if (atomic_read(&kmemleak_error)) {
+ if (kmemleak_error) {
/* kmemleak stopped recording, just count the requests */
crt_early_log++;
return;
@@ -838,7 +841,7 @@ static void early_alloc(struct early_log *log)
unsigned long flags;
int i;
- if (!atomic_read(&kmemleak_enabled) || !log->ptr || IS_ERR(log->ptr))
+ if (!kmemleak_enabled || !log->ptr || IS_ERR(log->ptr))
return;
/*
@@ -891,9 +894,9 @@ void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
{
pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
- if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
+ if (kmemleak_enabled && ptr && !IS_ERR(ptr))
create_object((unsigned long)ptr, size, min_count, gfp);
- else if (atomic_read(&kmemleak_early_log))
+ else if (kmemleak_early_log)
log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
}
EXPORT_SYMBOL_GPL(kmemleak_alloc);
@@ -917,11 +920,11 @@ void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size)
* Percpu allocations are only scanned and not reported as leaks
* (min_count is set to 0).
*/
- if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
+ if (kmemleak_enabled && ptr && !IS_ERR(ptr))
for_each_possible_cpu(cpu)
create_object((unsigned long)per_cpu_ptr(ptr, cpu),
size, 0, GFP_KERNEL);
- else if (atomic_read(&kmemleak_early_log))
+ else if (kmemleak_early_log)
log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0);
}
EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
@@ -937,9 +940,9 @@ void __ref kmemleak_free(const void *ptr)
{
pr_debug("%s(0x%p)\n", __func__, ptr);
- if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
+ if (kmemleak_enabled && ptr && !IS_ERR(ptr))
delete_object_full((unsigned long)ptr);
- else if (atomic_read(&kmemleak_early_log))
+ else if (kmemleak_early_log)
log_early(KMEMLEAK_FREE, ptr, 0, 0);
}
EXPORT_SYMBOL_GPL(kmemleak_free);
@@ -957,9 +960,9 @@ void __ref kmemleak_free_part(const void *ptr, size_t size)
{
pr_debug("%s(0x%p)\n", __func__, ptr);
- if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
+ if (kmemleak_enabled && ptr && !IS_ERR(ptr))
delete_object_part((unsigned long)ptr, size);
- else if (atomic_read(&kmemleak_early_log))
+ else if (kmemleak_early_log)
log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
}
EXPORT_SYMBOL_GPL(kmemleak_free_part);
@@ -977,11 +980,11 @@ void __ref kmemleak_free_percpu(const void __percpu *ptr)
pr_debug("%s(0x%p)\n", __func__, ptr);
- if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
+ if (kmemleak_enabled && ptr && !IS_ERR(ptr))
for_each_possible_cpu(cpu)
delete_object_full((unsigned long)per_cpu_ptr(ptr,
cpu));
- else if (atomic_read(&kmemleak_early_log))
+ else if (kmemleak_early_log)
log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0);
}
EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
@@ -997,9 +1000,9 @@ void __ref kmemleak_not_leak(const void *ptr)
{
pr_debug("%s(0x%p)\n", __func__, ptr);
- if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
+ if (kmemleak_enabled && ptr && !IS_ERR(ptr))
make_gray_object((unsigned long)ptr);
- else if (atomic_read(&kmemleak_early_log))
+ else if (kmemleak_early_log)
log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0);
}
EXPORT_SYMBOL(kmemleak_not_leak);
@@ -1017,9 +1020,9 @@ void __ref kmemleak_ignore(const void *ptr)
{
pr_debug("%s(0x%p)\n", __func__, ptr);
- if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
+ if (kmemleak_enabled && ptr && !IS_ERR(ptr))
make_black_object((unsigned long)ptr);
- else if (atomic_read(&kmemleak_early_log))
+ else if (kmemleak_early_log)
log_early(KMEMLEAK_IGNORE, ptr, 0, 0);
}
EXPORT_SYMBOL(kmemleak_ignore);
@@ -1039,9 +1042,9 @@ void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
{
pr_debug("%s(0x%p)\n", __func__, ptr);
- if (atomic_read(&kmemleak_enabled) && ptr && size && !IS_ERR(ptr))
+ if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
add_scan_area((unsigned long)ptr, size, gfp);
- else if (atomic_read(&kmemleak_early_log))
+ else if (kmemleak_early_log)
log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
}
EXPORT_SYMBOL(kmemleak_scan_area);
@@ -1059,9 +1062,9 @@ void __ref kmemleak_no_scan(const void *ptr)
{
pr_debug("%s(0x%p)\n", __func__, ptr);
- if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
+ if (kmemleak_enabled && ptr && !IS_ERR(ptr))
object_no_scan((unsigned long)ptr);
- else if (atomic_read(&kmemleak_early_log))
+ else if (kmemleak_early_log)
log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0);
}
EXPORT_SYMBOL(kmemleak_no_scan);
@@ -1086,7 +1089,7 @@ static bool update_checksum(struct kmemleak_object *object)
*/
static int scan_should_stop(void)
{
- if (!atomic_read(&kmemleak_enabled))
+ if (!kmemleak_enabled)
return 1;
/*
@@ -1380,9 +1383,12 @@ static void kmemleak_scan(void)
}
rcu_read_unlock();
- if (new_leaks)
+ if (new_leaks) {
+ kmemleak_found_leaks = true;
+
pr_info("%d new suspected memory leaks (see "
"/sys/kernel/debug/kmemleak)\n", new_leaks);
+ }
}
@@ -1543,11 +1549,6 @@ static int kmemleak_open(struct inode *inode, struct file *file)
return seq_open(file, &kmemleak_seq_ops);
}
-static int kmemleak_release(struct inode *inode, struct file *file)
-{
- return seq_release(inode, file);
-}
-
static int dump_str_object_info(const char *str)
{
unsigned long flags;
@@ -1590,8 +1591,12 @@ static void kmemleak_clear(void)
spin_unlock_irqrestore(&object->lock, flags);
}
rcu_read_unlock();
+
+ kmemleak_found_leaks = false;
}
+static void __kmemleak_do_cleanup(void);
+
/*
* File write operation to configure kmemleak at run-time. The following
* commands can be written to the /sys/kernel/debug/kmemleak file:
@@ -1604,7 +1609,8 @@ static void kmemleak_clear(void)
* disable it)
* scan - trigger a memory scan
* clear - mark all current reported unreferenced kmemleak objects as
- * grey to ignore printing them
+ * grey to ignore printing them, or free all kmemleak objects
+ * if kmemleak has been disabled.
* dump=... - dump information about the object found at the given address
*/
static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
@@ -1614,9 +1620,6 @@ static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
int buf_size;
int ret;
- if (!atomic_read(&kmemleak_enabled))
- return -EBUSY;
-
buf_size = min(size, (sizeof(buf) - 1));
if (strncpy_from_user(buf, user_buf, buf_size) < 0)
return -EFAULT;
@@ -1626,6 +1629,19 @@ static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
if (ret < 0)
return ret;
+ if (strncmp(buf, "clear", 5) == 0) {
+ if (kmemleak_enabled)
+ kmemleak_clear();
+ else
+ __kmemleak_do_cleanup();
+ goto out;
+ }
+
+ if (!kmemleak_enabled) {
+ ret = -EBUSY;
+ goto out;
+ }
+
if (strncmp(buf, "off", 3) == 0)
kmemleak_disable();
else if (strncmp(buf, "stack=on", 8) == 0)
@@ -1639,7 +1655,7 @@ static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
else if (strncmp(buf, "scan=", 5) == 0) {
unsigned long secs;
- ret = strict_strtoul(buf + 5, 0, &secs);
+ ret = kstrtoul(buf + 5, 0, &secs);
if (ret < 0)
goto out;
stop_scan_thread();
@@ -1649,8 +1665,6 @@ static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
}
} else if (strncmp(buf, "scan", 4) == 0)
kmemleak_scan();
- else if (strncmp(buf, "clear", 5) == 0)
- kmemleak_clear();
else if (strncmp(buf, "dump=", 5) == 0)
ret = dump_str_object_info(buf + 5);
else
@@ -1672,9 +1686,19 @@ static const struct file_operations kmemleak_fops = {
.read = seq_read,
.write = kmemleak_write,
.llseek = seq_lseek,
- .release = kmemleak_release,
+ .release = seq_release,
};
+static void __kmemleak_do_cleanup(void)
+{
+ struct kmemleak_object *object;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(object, &object_list, object_list)
+ delete_object_full(object->pointer);
+ rcu_read_unlock();
+}
+
/*
* Stop the memory scanning thread and free the kmemleak internal objects if
* no previous scan thread (otherwise, kmemleak may still have some useful
@@ -1682,18 +1706,14 @@ static const struct file_operations kmemleak_fops = {
*/
static void kmemleak_do_cleanup(struct work_struct *work)
{
- struct kmemleak_object *object;
- bool cleanup = scan_thread == NULL;
-
mutex_lock(&scan_mutex);
stop_scan_thread();
- if (cleanup) {
- rcu_read_lock();
- list_for_each_entry_rcu(object, &object_list, object_list)
- delete_object_full(object->pointer);
- rcu_read_unlock();
- }
+ if (!kmemleak_found_leaks)
+ __kmemleak_do_cleanup();
+ else
+ pr_info("Kmemleak disabled without freeing internal data. "
+ "Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\"\n");
mutex_unlock(&scan_mutex);
}
@@ -1706,14 +1726,14 @@ static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
static void kmemleak_disable(void)
{
/* atomically check whether it was already invoked */
- if (atomic_cmpxchg(&kmemleak_error, 0, 1))
+ if (cmpxchg(&kmemleak_error, 0, 1))
return;
/* stop any memory operation tracing */
- atomic_set(&kmemleak_enabled, 0);
+ kmemleak_enabled = 0;
/* check whether it is too early for a kernel thread */
- if (atomic_read(&kmemleak_initialized))
+ if (kmemleak_initialized)
schedule_work(&cleanup_work);
pr_info("Kernel memory leak detector disabled\n");
@@ -1757,7 +1777,7 @@ void __init kmemleak_init(void)
#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
if (!kmemleak_skip_disable) {
- atomic_set(&kmemleak_early_log, 0);
+ kmemleak_early_log = 0;
kmemleak_disable();
return;
}
@@ -1775,12 +1795,12 @@ void __init kmemleak_init(void)
/* the kernel is still in UP mode, so disabling the IRQs is enough */
local_irq_save(flags);
- atomic_set(&kmemleak_early_log, 0);
- if (atomic_read(&kmemleak_error)) {
+ kmemleak_early_log = 0;
+ if (kmemleak_error) {
local_irq_restore(flags);
return;
} else
- atomic_set(&kmemleak_enabled, 1);
+ kmemleak_enabled = 1;
local_irq_restore(flags);
/*
@@ -1824,9 +1844,9 @@ void __init kmemleak_init(void)
log->op_type);
}
- if (atomic_read(&kmemleak_warning)) {
+ if (kmemleak_warning) {
print_log_trace(log);
- atomic_set(&kmemleak_warning, 0);
+ kmemleak_warning = 0;
}
}
}
@@ -1838,9 +1858,9 @@ static int __init kmemleak_late_init(void)
{
struct dentry *dentry;
- atomic_set(&kmemleak_initialized, 1);
+ kmemleak_initialized = 1;
- if (atomic_read(&kmemleak_error)) {
+ if (kmemleak_error) {
/*
* Some error occurred and kmemleak was disabled. There is a
* small chance that kmemleak_disable() was called immediately
diff --git a/mm/ksm.c b/mm/ksm.c
index b6afe0c440d8..68710e80994a 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -444,7 +444,7 @@ static void break_cow(struct rmap_item *rmap_item)
static struct page *page_trans_compound_anon(struct page *page)
{
if (PageTransCompound(page)) {
- struct page *head = compound_trans_head(page);
+ struct page *head = compound_head(page);
/*
* head may actually be splitted and freed from under
* us but it's ok here.
@@ -1891,21 +1891,24 @@ struct page *ksm_might_need_to_copy(struct page *page,
return new_page;
}
-int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg,
- unsigned long *vm_flags)
+int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
{
struct stable_node *stable_node;
struct rmap_item *rmap_item;
- unsigned int mapcount = page_mapcount(page);
- int referenced = 0;
+ int ret = SWAP_AGAIN;
int search_new_forks = 0;
- VM_BUG_ON(!PageKsm(page));
- VM_BUG_ON(!PageLocked(page));
+ VM_BUG_ON_PAGE(!PageKsm(page), page);
+
+ /*
+ * Rely on the page lock to protect against concurrent modifications
+ * to that page's node of the stable tree.
+ */
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
stable_node = page_stable_node(page);
if (!stable_node)
- return 0;
+ return ret;
again:
hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
struct anon_vma *anon_vma = rmap_item->anon_vma;
@@ -1928,113 +1931,16 @@ again:
if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
continue;
- if (memcg && !mm_match_cgroup(vma->vm_mm, memcg))
- continue;
-
- referenced += page_referenced_one(page, vma,
- rmap_item->address, &mapcount, vm_flags);
- if (!search_new_forks || !mapcount)
- break;
- }
- anon_vma_unlock_read(anon_vma);
- if (!mapcount)
- goto out;
- }
- if (!search_new_forks++)
- goto again;
-out:
- return referenced;
-}
-
-int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
-{
- struct stable_node *stable_node;
- struct rmap_item *rmap_item;
- int ret = SWAP_AGAIN;
- int search_new_forks = 0;
-
- VM_BUG_ON(!PageKsm(page));
- VM_BUG_ON(!PageLocked(page));
-
- stable_node = page_stable_node(page);
- if (!stable_node)
- return SWAP_FAIL;
-again:
- hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
- struct anon_vma *anon_vma = rmap_item->anon_vma;
- struct anon_vma_chain *vmac;
- struct vm_area_struct *vma;
-
- anon_vma_lock_read(anon_vma);
- anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root,
- 0, ULONG_MAX) {
- vma = vmac->vma;
- if (rmap_item->address < vma->vm_start ||
- rmap_item->address >= vma->vm_end)
- continue;
- /*
- * Initially we examine only the vma which covers this
- * rmap_item; but later, if there is still work to do,
- * we examine covering vmas in other mms: in case they
- * were forked from the original since ksmd passed.
- */
- if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
+ if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
continue;
- ret = try_to_unmap_one(page, vma,
- rmap_item->address, flags);
- if (ret != SWAP_AGAIN || !page_mapped(page)) {
+ ret = rwc->rmap_one(page, vma,
+ rmap_item->address, rwc->arg);
+ if (ret != SWAP_AGAIN) {
anon_vma_unlock_read(anon_vma);
goto out;
}
- }
- anon_vma_unlock_read(anon_vma);
- }
- if (!search_new_forks++)
- goto again;
-out:
- return ret;
-}
-
-#ifdef CONFIG_MIGRATION
-int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
- struct vm_area_struct *, unsigned long, void *), void *arg)
-{
- struct stable_node *stable_node;
- struct rmap_item *rmap_item;
- int ret = SWAP_AGAIN;
- int search_new_forks = 0;
-
- VM_BUG_ON(!PageKsm(page));
- VM_BUG_ON(!PageLocked(page));
-
- stable_node = page_stable_node(page);
- if (!stable_node)
- return ret;
-again:
- hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
- struct anon_vma *anon_vma = rmap_item->anon_vma;
- struct anon_vma_chain *vmac;
- struct vm_area_struct *vma;
-
- anon_vma_lock_read(anon_vma);
- anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root,
- 0, ULONG_MAX) {
- vma = vmac->vma;
- if (rmap_item->address < vma->vm_start ||
- rmap_item->address >= vma->vm_end)
- continue;
- /*
- * Initially we examine only the vma which covers this
- * rmap_item; but later, if there is still work to do,
- * we examine covering vmas in other mms: in case they
- * were forked from the original since ksmd passed.
- */
- if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
- continue;
-
- ret = rmap_one(page, vma, rmap_item->address, arg);
- if (ret != SWAP_AGAIN) {
+ if (rwc->done && rwc->done(page)) {
anon_vma_unlock_read(anon_vma);
goto out;
}
@@ -2047,17 +1953,18 @@ out:
return ret;
}
+#ifdef CONFIG_MIGRATION
void ksm_migrate_page(struct page *newpage, struct page *oldpage)
{
struct stable_node *stable_node;
- VM_BUG_ON(!PageLocked(oldpage));
- VM_BUG_ON(!PageLocked(newpage));
- VM_BUG_ON(newpage->mapping != oldpage->mapping);
+ VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
+ VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
+ VM_BUG_ON_PAGE(newpage->mapping != oldpage->mapping, newpage);
stable_node = page_stable_node(newpage);
if (stable_node) {
- VM_BUG_ON(stable_node->kpfn != page_to_pfn(oldpage));
+ VM_BUG_ON_PAGE(stable_node->kpfn != page_to_pfn(oldpage), oldpage);
stable_node->kpfn = page_to_pfn(newpage);
/*
* newpage->mapping was set in advance; now we need smp_wmb()
@@ -2194,7 +2101,7 @@ static ssize_t sleep_millisecs_store(struct kobject *kobj,
unsigned long msecs;
int err;
- err = strict_strtoul(buf, 10, &msecs);
+ err = kstrtoul(buf, 10, &msecs);
if (err || msecs > UINT_MAX)
return -EINVAL;
@@ -2217,7 +2124,7 @@ static ssize_t pages_to_scan_store(struct kobject *kobj,
int err;
unsigned long nr_pages;
- err = strict_strtoul(buf, 10, &nr_pages);
+ err = kstrtoul(buf, 10, &nr_pages);
if (err || nr_pages > UINT_MAX)
return -EINVAL;
@@ -2239,7 +2146,7 @@ static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
int err;
unsigned long flags;
- err = strict_strtoul(buf, 10, &flags);
+ err = kstrtoul(buf, 10, &flags);
if (err || flags > UINT_MAX)
return -EINVAL;
if (flags > KSM_RUN_UNMERGE)
@@ -2309,8 +2216,8 @@ static ssize_t merge_across_nodes_store(struct kobject *kobj,
* Allocate stable and unstable together:
* MAXSMP NODES_SHIFT 10 will use 16kB.
*/
- buf = kcalloc(nr_node_ids + nr_node_ids,
- sizeof(*buf), GFP_KERNEL | __GFP_ZERO);
+ buf = kcalloc(nr_node_ids + nr_node_ids, sizeof(*buf),
+ GFP_KERNEL);
/* Let us assume that RB_ROOT is NULL is zero */
if (!buf)
err = -ENOMEM;
@@ -2438,4 +2345,4 @@ out_free:
out:
return err;
}
-module_init(ksm_init)
+subsys_initcall(ksm_init);
diff --git a/mm/list_lru.c b/mm/list_lru.c
new file mode 100644
index 000000000000..f1a0db194173
--- /dev/null
+++ b/mm/list_lru.c
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
+ * Authors: David Chinner and Glauber Costa
+ *
+ * Generic LRU infrastructure
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/list_lru.h>
+#include <linux/slab.h>
+
+bool list_lru_add(struct list_lru *lru, struct list_head *item)
+{
+ int nid = page_to_nid(virt_to_page(item));
+ struct list_lru_node *nlru = &lru->node[nid];
+
+ spin_lock(&nlru->lock);
+ WARN_ON_ONCE(nlru->nr_items < 0);
+ if (list_empty(item)) {
+ list_add_tail(item, &nlru->list);
+ if (nlru->nr_items++ == 0)
+ node_set(nid, lru->active_nodes);
+ spin_unlock(&nlru->lock);
+ return true;
+ }
+ spin_unlock(&nlru->lock);
+ return false;
+}
+EXPORT_SYMBOL_GPL(list_lru_add);
+
+bool list_lru_del(struct list_lru *lru, struct list_head *item)
+{
+ int nid = page_to_nid(virt_to_page(item));
+ struct list_lru_node *nlru = &lru->node[nid];
+
+ spin_lock(&nlru->lock);
+ if (!list_empty(item)) {
+ list_del_init(item);
+ if (--nlru->nr_items == 0)
+ node_clear(nid, lru->active_nodes);
+ WARN_ON_ONCE(nlru->nr_items < 0);
+ spin_unlock(&nlru->lock);
+ return true;
+ }
+ spin_unlock(&nlru->lock);
+ return false;
+}
+EXPORT_SYMBOL_GPL(list_lru_del);
+
+unsigned long
+list_lru_count_node(struct list_lru *lru, int nid)
+{
+ unsigned long count = 0;
+ struct list_lru_node *nlru = &lru->node[nid];
+
+ spin_lock(&nlru->lock);
+ WARN_ON_ONCE(nlru->nr_items < 0);
+ count += nlru->nr_items;
+ spin_unlock(&nlru->lock);
+
+ return count;
+}
+EXPORT_SYMBOL_GPL(list_lru_count_node);
+
+unsigned long
+list_lru_walk_node(struct list_lru *lru, int nid, list_lru_walk_cb isolate,
+ void *cb_arg, unsigned long *nr_to_walk)
+{
+
+ struct list_lru_node *nlru = &lru->node[nid];
+ struct list_head *item, *n;
+ unsigned long isolated = 0;
+
+ spin_lock(&nlru->lock);
+restart:
+ list_for_each_safe(item, n, &nlru->list) {
+ enum lru_status ret;
+
+ /*
+ * decrement nr_to_walk first so that we don't livelock if we
+ * get stuck on large numbesr of LRU_RETRY items
+ */
+ if (!*nr_to_walk)
+ break;
+ --*nr_to_walk;
+
+ ret = isolate(item, &nlru->lock, cb_arg);
+ switch (ret) {
+ case LRU_REMOVED_RETRY:
+ assert_spin_locked(&nlru->lock);
+ case LRU_REMOVED:
+ if (--nlru->nr_items == 0)
+ node_clear(nid, lru->active_nodes);
+ WARN_ON_ONCE(nlru->nr_items < 0);
+ isolated++;
+ /*
+ * If the lru lock has been dropped, our list
+ * traversal is now invalid and so we have to
+ * restart from scratch.
+ */
+ if (ret == LRU_REMOVED_RETRY)
+ goto restart;
+ break;
+ case LRU_ROTATE:
+ list_move_tail(item, &nlru->list);
+ break;
+ case LRU_SKIP:
+ break;
+ case LRU_RETRY:
+ /*
+ * The lru lock has been dropped, our list traversal is
+ * now invalid and so we have to restart from scratch.
+ */
+ assert_spin_locked(&nlru->lock);
+ goto restart;
+ default:
+ BUG();
+ }
+ }
+
+ spin_unlock(&nlru->lock);
+ return isolated;
+}
+EXPORT_SYMBOL_GPL(list_lru_walk_node);
+
+int list_lru_init_key(struct list_lru *lru, struct lock_class_key *key)
+{
+ int i;
+ size_t size = sizeof(*lru->node) * nr_node_ids;
+
+ lru->node = kzalloc(size, GFP_KERNEL);
+ if (!lru->node)
+ return -ENOMEM;
+
+ nodes_clear(lru->active_nodes);
+ for (i = 0; i < nr_node_ids; i++) {
+ spin_lock_init(&lru->node[i].lock);
+ if (key)
+ lockdep_set_class(&lru->node[i].lock, key);
+ INIT_LIST_HEAD(&lru->node[i].list);
+ lru->node[i].nr_items = 0;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(list_lru_init_key);
+
+void list_lru_destroy(struct list_lru *lru)
+{
+ kfree(lru->node);
+}
+EXPORT_SYMBOL_GPL(list_lru_destroy);
diff --git a/mm/madvise.c b/mm/madvise.c
index 7055883e6e25..a402f8fdc68e 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -42,11 +42,11 @@ static int madvise_need_mmap_write(int behavior)
* We can potentially split a vm area into separate
* areas, each area with its own behavior.
*/
-static long madvise_behavior(struct vm_area_struct * vma,
+static long madvise_behavior(struct vm_area_struct *vma,
struct vm_area_struct **prev,
unsigned long start, unsigned long end, int behavior)
{
- struct mm_struct * mm = vma->vm_mm;
+ struct mm_struct *mm = vma->vm_mm;
int error = 0;
pgoff_t pgoff;
unsigned long new_flags = vma->vm_flags;
@@ -195,7 +195,7 @@ static void force_shm_swapin_readahead(struct vm_area_struct *vma,
for (; start < end; start += PAGE_SIZE) {
index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
- page = find_get_page(mapping, index);
+ page = find_get_entry(mapping, index);
if (!radix_tree_exceptional_entry(page)) {
if (page)
page_cache_release(page);
@@ -215,8 +215,8 @@ static void force_shm_swapin_readahead(struct vm_area_struct *vma,
/*
* Schedule all required I/O operations. Do not wait for completion.
*/
-static long madvise_willneed(struct vm_area_struct * vma,
- struct vm_area_struct ** prev,
+static long madvise_willneed(struct vm_area_struct *vma,
+ struct vm_area_struct **prev,
unsigned long start, unsigned long end)
{
struct file *file = vma->vm_file;
@@ -270,8 +270,8 @@ static long madvise_willneed(struct vm_area_struct * vma,
* An interface that causes the system to free clean pages and flush
* dirty pages is already available as msync(MS_INVALIDATE).
*/
-static long madvise_dontneed(struct vm_area_struct * vma,
- struct vm_area_struct ** prev,
+static long madvise_dontneed(struct vm_area_struct *vma,
+ struct vm_area_struct **prev,
unsigned long start, unsigned long end)
{
*prev = vma;
@@ -343,29 +343,35 @@ static long madvise_remove(struct vm_area_struct *vma,
*/
static int madvise_hwpoison(int bhv, unsigned long start, unsigned long end)
{
- int ret = 0;
-
+ struct page *p;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- for (; start < end; start += PAGE_SIZE) {
- struct page *p;
- int ret = get_user_pages_fast(start, 1, 0, &p);
+ for (; start < end; start += PAGE_SIZE <<
+ compound_order(compound_head(p))) {
+ int ret;
+
+ ret = get_user_pages_fast(start, 1, 0, &p);
if (ret != 1)
return ret;
+
+ if (PageHWPoison(p)) {
+ put_page(p);
+ continue;
+ }
if (bhv == MADV_SOFT_OFFLINE) {
- printk(KERN_INFO "Soft offlining page %lx at %lx\n",
+ pr_info("Soft offlining page %#lx at %#lx\n",
page_to_pfn(p), start);
ret = soft_offline_page(p, MF_COUNT_INCREASED);
if (ret)
- break;
+ return ret;
continue;
}
- printk(KERN_INFO "Injecting memory failure for page %lx at %lx\n",
+ pr_info("Injecting memory failure for page %#lx at %#lx\n",
page_to_pfn(p), start);
/* Ignore return value for now */
memory_failure(page_to_pfn(p), 0, MF_COUNT_INCREASED);
}
- return ret;
+ return 0;
}
#endif
@@ -459,7 +465,7 @@ madvise_behavior_valid(int behavior)
SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
{
unsigned long end, tmp;
- struct vm_area_struct * vma, *prev;
+ struct vm_area_struct *vma, *prev;
int unmapped_error = 0;
int error = -EINVAL;
int write;
diff --git a/mm/memblock.c b/mm/memblock.c
index c5fad932fa51..e9d6ca9a01a9 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -20,6 +20,11 @@
#include <linux/seq_file.h>
#include <linux/memblock.h>
+#include <asm-generic/sections.h>
+#include <linux/io.h>
+
+#include "internal.h"
+
static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
@@ -32,10 +37,14 @@ struct memblock memblock __initdata_memblock = {
.reserved.cnt = 1, /* empty dummy entry */
.reserved.max = INIT_MEMBLOCK_REGIONS,
+ .bottom_up = false,
.current_limit = MEMBLOCK_ALLOC_ANYWHERE,
};
int memblock_debug __initdata_memblock;
+#ifdef CONFIG_MOVABLE_NODE
+bool movable_node_enabled __initdata_memblock = false;
+#endif
static int memblock_can_resize __initdata_memblock;
static int memblock_memory_in_slab __initdata_memblock = 0;
static int memblock_reserved_in_slab __initdata_memblock = 0;
@@ -82,33 +91,57 @@ static long __init_memblock memblock_overlaps_region(struct memblock_type *type,
return (i < type->cnt) ? i : -1;
}
-/**
- * memblock_find_in_range_node - find free area in given range and node
+/*
+ * __memblock_find_range_bottom_up - find free area utility in bottom-up
* @start: start of candidate range
* @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
* @size: size of free area to find
* @align: alignment of free area to find
- * @nid: nid of the free area to find, %MAX_NUMNODES for any node
+ * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
*
- * Find @size free area aligned to @align in the specified range and node.
+ * Utility called from memblock_find_in_range_node(), find free area bottom-up.
*
* RETURNS:
- * Found address on success, %0 on failure.
+ * Found address on success, 0 on failure.
*/
-phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start,
- phys_addr_t end, phys_addr_t size,
- phys_addr_t align, int nid)
+static phys_addr_t __init_memblock
+__memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
+ phys_addr_t size, phys_addr_t align, int nid)
{
phys_addr_t this_start, this_end, cand;
u64 i;
- /* pump up @end */
- if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
- end = memblock.current_limit;
+ for_each_free_mem_range(i, nid, &this_start, &this_end, NULL) {
+ this_start = clamp(this_start, start, end);
+ this_end = clamp(this_end, start, end);
- /* avoid allocating the first page */
- start = max_t(phys_addr_t, start, PAGE_SIZE);
- end = max(start, end);
+ cand = round_up(this_start, align);
+ if (cand < this_end && this_end - cand >= size)
+ return cand;
+ }
+
+ return 0;
+}
+
+/**
+ * __memblock_find_range_top_down - find free area utility, in top-down
+ * @start: start of candidate range
+ * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
+ * @size: size of free area to find
+ * @align: alignment of free area to find
+ * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
+ *
+ * Utility called from memblock_find_in_range_node(), find free area top-down.
+ *
+ * RETURNS:
+ * Found address on success, 0 on failure.
+ */
+static phys_addr_t __init_memblock
+__memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
+ phys_addr_t size, phys_addr_t align, int nid)
+{
+ phys_addr_t this_start, this_end, cand;
+ u64 i;
for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) {
this_start = clamp(this_start, start, end);
@@ -121,10 +154,81 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start,
if (cand >= this_start)
return cand;
}
+
return 0;
}
/**
+ * memblock_find_in_range_node - find free area in given range and node
+ * @size: size of free area to find
+ * @align: alignment of free area to find
+ * @start: start of candidate range
+ * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
+ * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
+ *
+ * Find @size free area aligned to @align in the specified range and node.
+ *
+ * When allocation direction is bottom-up, the @start should be greater
+ * than the end of the kernel image. Otherwise, it will be trimmed. The
+ * reason is that we want the bottom-up allocation just near the kernel
+ * image so it is highly likely that the allocated memory and the kernel
+ * will reside in the same node.
+ *
+ * If bottom-up allocation failed, will try to allocate memory top-down.
+ *
+ * RETURNS:
+ * Found address on success, 0 on failure.
+ */
+phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
+ phys_addr_t align, phys_addr_t start,
+ phys_addr_t end, int nid)
+{
+ int ret;
+ phys_addr_t kernel_end;
+
+ /* pump up @end */
+ if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
+ end = memblock.current_limit;
+
+ /* avoid allocating the first page */
+ start = max_t(phys_addr_t, start, PAGE_SIZE);
+ end = max(start, end);
+ kernel_end = __pa_symbol(_end);
+
+ /*
+ * try bottom-up allocation only when bottom-up mode
+ * is set and @end is above the kernel image.
+ */
+ if (memblock_bottom_up() && end > kernel_end) {
+ phys_addr_t bottom_up_start;
+
+ /* make sure we will allocate above the kernel */
+ bottom_up_start = max(start, kernel_end);
+
+ /* ok, try bottom-up allocation first */
+ ret = __memblock_find_range_bottom_up(bottom_up_start, end,
+ size, align, nid);
+ if (ret)
+ return ret;
+
+ /*
+ * we always limit bottom-up allocation above the kernel,
+ * but top-down allocation doesn't have the limit, so
+ * retrying top-down allocation may succeed when bottom-up
+ * allocation failed.
+ *
+ * bottom-up allocation is expected to be fail very rarely,
+ * so we use WARN_ONCE() here to see the stack trace if
+ * fail happens.
+ */
+ WARN_ONCE(1, "memblock: bottom-up allocation failed, "
+ "memory hotunplug may be affected\n");
+ }
+
+ return __memblock_find_range_top_down(start, end, size, align, nid);
+}
+
+/**
* memblock_find_in_range - find free area in given range
* @start: start of candidate range
* @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
@@ -134,14 +238,14 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start,
* Find @size free area aligned to @align in the specified range.
*
* RETURNS:
- * Found address on success, %0 on failure.
+ * Found address on success, 0 on failure.
*/
phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
phys_addr_t end, phys_addr_t size,
phys_addr_t align)
{
- return memblock_find_in_range_node(start, end, size, align,
- MAX_NUMNODES);
+ return memblock_find_in_range_node(size, align, start, end,
+ NUMA_NO_NODE);
}
static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
@@ -157,10 +261,13 @@ static void __init_memblock memblock_remove_region(struct memblock_type *type, u
type->cnt = 1;
type->regions[0].base = 0;
type->regions[0].size = 0;
+ type->regions[0].flags = 0;
memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
}
}
+#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
+
phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info(
phys_addr_t *addr)
{
@@ -173,6 +280,20 @@ phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info(
memblock.reserved.max);
}
+phys_addr_t __init_memblock get_allocated_memblock_memory_regions_info(
+ phys_addr_t *addr)
+{
+ if (memblock.memory.regions == memblock_memory_init_regions)
+ return 0;
+
+ *addr = __pa(memblock.memory.regions);
+
+ return PAGE_ALIGN(sizeof(struct memblock_region) *
+ memblock.memory.max);
+}
+
+#endif
+
/**
* memblock_double_array - double the size of the memblock regions array
* @type: memblock type of the regions array being doubled
@@ -307,7 +428,8 @@ static void __init_memblock memblock_merge_regions(struct memblock_type *type)
if (this->base + this->size != next->base ||
memblock_get_region_node(this) !=
- memblock_get_region_node(next)) {
+ memblock_get_region_node(next) ||
+ this->flags != next->flags) {
BUG_ON(this->base + this->size > next->base);
i++;
continue;
@@ -327,13 +449,15 @@ static void __init_memblock memblock_merge_regions(struct memblock_type *type)
* @base: base address of the new region
* @size: size of the new region
* @nid: node id of the new region
+ * @flags: flags of the new region
*
* Insert new memblock region [@base,@base+@size) into @type at @idx.
* @type must already have extra room to accomodate the new region.
*/
static void __init_memblock memblock_insert_region(struct memblock_type *type,
int idx, phys_addr_t base,
- phys_addr_t size, int nid)
+ phys_addr_t size,
+ int nid, unsigned long flags)
{
struct memblock_region *rgn = &type->regions[idx];
@@ -341,6 +465,7 @@ static void __init_memblock memblock_insert_region(struct memblock_type *type,
memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
rgn->base = base;
rgn->size = size;
+ rgn->flags = flags;
memblock_set_region_node(rgn, nid);
type->cnt++;
type->total_size += size;
@@ -352,6 +477,7 @@ static void __init_memblock memblock_insert_region(struct memblock_type *type,
* @base: base address of the new region
* @size: size of the new region
* @nid: nid of the new region
+ * @flags: flags of the new region
*
* Add new memblock region [@base,@base+@size) into @type. The new region
* is allowed to overlap with existing ones - overlaps don't affect already
@@ -362,7 +488,8 @@ static void __init_memblock memblock_insert_region(struct memblock_type *type,
* 0 on success, -errno on failure.
*/
static int __init_memblock memblock_add_region(struct memblock_type *type,
- phys_addr_t base, phys_addr_t size, int nid)
+ phys_addr_t base, phys_addr_t size,
+ int nid, unsigned long flags)
{
bool insert = false;
phys_addr_t obase = base;
@@ -377,6 +504,7 @@ static int __init_memblock memblock_add_region(struct memblock_type *type,
WARN_ON(type->cnt != 1 || type->total_size);
type->regions[0].base = base;
type->regions[0].size = size;
+ type->regions[0].flags = flags;
memblock_set_region_node(&type->regions[0], nid);
type->total_size = size;
return 0;
@@ -407,7 +535,8 @@ repeat:
nr_new++;
if (insert)
memblock_insert_region(type, i++, base,
- rbase - base, nid);
+ rbase - base, nid,
+ flags);
}
/* area below @rend is dealt with, forget about it */
base = min(rend, end);
@@ -417,7 +546,8 @@ repeat:
if (base < end) {
nr_new++;
if (insert)
- memblock_insert_region(type, i, base, end - base, nid);
+ memblock_insert_region(type, i, base, end - base,
+ nid, flags);
}
/*
@@ -439,12 +569,13 @@ repeat:
int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
int nid)
{
- return memblock_add_region(&memblock.memory, base, size, nid);
+ return memblock_add_region(&memblock.memory, base, size, nid, 0);
}
int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
{
- return memblock_add_region(&memblock.memory, base, size, MAX_NUMNODES);
+ return memblock_add_region(&memblock.memory, base, size,
+ MAX_NUMNODES, 0);
}
/**
@@ -499,7 +630,8 @@ static int __init_memblock memblock_isolate_range(struct memblock_type *type,
rgn->size -= base - rbase;
type->total_size -= base - rbase;
memblock_insert_region(type, i, rbase, base - rbase,
- memblock_get_region_node(rgn));
+ memblock_get_region_node(rgn),
+ rgn->flags);
} else if (rend > end) {
/*
* @rgn intersects from above. Split and redo the
@@ -509,7 +641,8 @@ static int __init_memblock memblock_isolate_range(struct memblock_type *type,
rgn->size -= end - rbase;
type->total_size -= end - rbase;
memblock_insert_region(type, i--, rbase, end - rbase,
- memblock_get_region_node(rgn));
+ memblock_get_region_node(rgn),
+ rgn->flags);
} else {
/* @rgn is fully contained, record it */
if (!*end_rgn)
@@ -545,28 +678,89 @@ int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
{
memblock_dbg(" memblock_free: [%#016llx-%#016llx] %pF\n",
(unsigned long long)base,
- (unsigned long long)base + size,
+ (unsigned long long)base + size - 1,
(void *)_RET_IP_);
return __memblock_remove(&memblock.reserved, base, size);
}
-int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
+static int __init_memblock memblock_reserve_region(phys_addr_t base,
+ phys_addr_t size,
+ int nid,
+ unsigned long flags)
{
struct memblock_type *_rgn = &memblock.reserved;
- memblock_dbg("memblock_reserve: [%#016llx-%#016llx] %pF\n",
+ memblock_dbg("memblock_reserve: [%#016llx-%#016llx] flags %#02lx %pF\n",
(unsigned long long)base,
- (unsigned long long)base + size,
- (void *)_RET_IP_);
+ (unsigned long long)base + size - 1,
+ flags, (void *)_RET_IP_);
+
+ return memblock_add_region(_rgn, base, size, nid, flags);
+}
+
+int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
+{
+ return memblock_reserve_region(base, size, MAX_NUMNODES, 0);
+}
+
+/**
+ * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
+ * @base: the base phys addr of the region
+ * @size: the size of the region
+ *
+ * This function isolates region [@base, @base + @size), and mark it with flag
+ * MEMBLOCK_HOTPLUG.
+ *
+ * Return 0 on succees, -errno on failure.
+ */
+int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
+{
+ struct memblock_type *type = &memblock.memory;
+ int i, ret, start_rgn, end_rgn;
- return memblock_add_region(_rgn, base, size, MAX_NUMNODES);
+ ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
+ if (ret)
+ return ret;
+
+ for (i = start_rgn; i < end_rgn; i++)
+ memblock_set_region_flags(&type->regions[i], MEMBLOCK_HOTPLUG);
+
+ memblock_merge_regions(type);
+ return 0;
+}
+
+/**
+ * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
+ * @base: the base phys addr of the region
+ * @size: the size of the region
+ *
+ * This function isolates region [@base, @base + @size), and clear flag
+ * MEMBLOCK_HOTPLUG for the isolated regions.
+ *
+ * Return 0 on succees, -errno on failure.
+ */
+int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
+{
+ struct memblock_type *type = &memblock.memory;
+ int i, ret, start_rgn, end_rgn;
+
+ ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
+ if (ret)
+ return ret;
+
+ for (i = start_rgn; i < end_rgn; i++)
+ memblock_clear_region_flags(&type->regions[i],
+ MEMBLOCK_HOTPLUG);
+
+ memblock_merge_regions(type);
+ return 0;
}
/**
* __next_free_mem_range - next function for for_each_free_mem_range()
* @idx: pointer to u64 loop variable
- * @nid: nid: node selector, %MAX_NUMNODES for all nodes
+ * @nid: node selector, %NUMA_NO_NODE for all nodes
* @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
* @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
* @out_nid: ptr to int for nid of the range, can be %NULL
@@ -595,13 +789,16 @@ void __init_memblock __next_free_mem_range(u64 *idx, int nid,
int mi = *idx & 0xffffffff;
int ri = *idx >> 32;
+ if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
+ nid = NUMA_NO_NODE;
+
for ( ; mi < mem->cnt; mi++) {
struct memblock_region *m = &mem->regions[mi];
phys_addr_t m_start = m->base;
phys_addr_t m_end = m->base + m->size;
/* only memory regions are associated with nodes, check it */
- if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m))
+ if (nid != NUMA_NO_NODE && nid != memblock_get_region_node(m))
continue;
/* scan areas before each reservation for intersection */
@@ -642,12 +839,17 @@ void __init_memblock __next_free_mem_range(u64 *idx, int nid,
/**
* __next_free_mem_range_rev - next function for for_each_free_mem_range_reverse()
* @idx: pointer to u64 loop variable
- * @nid: nid: node selector, %MAX_NUMNODES for all nodes
+ * @nid: nid: node selector, %NUMA_NO_NODE for all nodes
* @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
* @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
* @out_nid: ptr to int for nid of the range, can be %NULL
*
* Reverse of __next_free_mem_range().
+ *
+ * Linux kernel cannot migrate pages used by itself. Memory hotplug users won't
+ * be able to hot-remove hotpluggable memory used by the kernel. So this
+ * function skip hotpluggable regions if needed when allocating memory for the
+ * kernel.
*/
void __init_memblock __next_free_mem_range_rev(u64 *idx, int nid,
phys_addr_t *out_start,
@@ -658,6 +860,9 @@ void __init_memblock __next_free_mem_range_rev(u64 *idx, int nid,
int mi = *idx & 0xffffffff;
int ri = *idx >> 32;
+ if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
+ nid = NUMA_NO_NODE;
+
if (*idx == (u64)ULLONG_MAX) {
mi = mem->cnt - 1;
ri = rsv->cnt;
@@ -669,7 +874,11 @@ void __init_memblock __next_free_mem_range_rev(u64 *idx, int nid,
phys_addr_t m_end = m->base + m->size;
/* only memory regions are associated with nodes, check it */
- if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m))
+ if (nid != NUMA_NO_NODE && nid != memblock_get_region_node(m))
+ continue;
+
+ /* skip hotpluggable memory regions if needed */
+ if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
continue;
/* scan areas before each reservation for intersection */
@@ -739,18 +948,18 @@ void __init_memblock __next_mem_pfn_range(int *idx, int nid,
* memblock_set_node - set node ID on memblock regions
* @base: base of area to set node ID for
* @size: size of area to set node ID for
+ * @type: memblock type to set node ID for
* @nid: node ID to set
*
- * Set the nid of memblock memory regions in [@base,@base+@size) to @nid.
+ * Set the nid of memblock @type regions in [@base,@base+@size) to @nid.
* Regions which cross the area boundaries are split as necessary.
*
* RETURNS:
* 0 on success, -errno on failure.
*/
int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
- int nid)
+ struct memblock_type *type, int nid)
{
- struct memblock_type *type = &memblock.memory;
int start_rgn, end_rgn;
int i, ret;
@@ -772,13 +981,10 @@ static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
{
phys_addr_t found;
- if (WARN_ON(!align))
- align = __alignof__(long long);
-
- /* align @size to avoid excessive fragmentation on reserved array */
- size = round_up(size, align);
+ if (!align)
+ align = SMP_CACHE_BYTES;
- found = memblock_find_in_range_node(0, max_addr, size, align, nid);
+ found = memblock_find_in_range_node(size, align, 0, max_addr, nid);
if (found && !memblock_reserve(found, size))
return found;
@@ -792,7 +998,7 @@ phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int n
phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
{
- return memblock_alloc_base_nid(size, align, max_addr, MAX_NUMNODES);
+ return memblock_alloc_base_nid(size, align, max_addr, NUMA_NO_NODE);
}
phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
@@ -822,6 +1028,207 @@ phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, i
return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
}
+/**
+ * memblock_virt_alloc_internal - allocate boot memory block
+ * @size: size of memory block to be allocated in bytes
+ * @align: alignment of the region and block's size
+ * @min_addr: the lower bound of the memory region to allocate (phys address)
+ * @max_addr: the upper bound of the memory region to allocate (phys address)
+ * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
+ *
+ * The @min_addr limit is dropped if it can not be satisfied and the allocation
+ * will fall back to memory below @min_addr. Also, allocation may fall back
+ * to any node in the system if the specified node can not
+ * hold the requested memory.
+ *
+ * The allocation is performed from memory region limited by
+ * memblock.current_limit if @max_addr == %BOOTMEM_ALLOC_ACCESSIBLE.
+ *
+ * The memory block is aligned on SMP_CACHE_BYTES if @align == 0.
+ *
+ * The phys address of allocated boot memory block is converted to virtual and
+ * allocated memory is reset to 0.
+ *
+ * In addition, function sets the min_count to 0 using kmemleak_alloc for
+ * allocated boot memory block, so that it is never reported as leaks.
+ *
+ * RETURNS:
+ * Virtual address of allocated memory block on success, NULL on failure.
+ */
+static void * __init memblock_virt_alloc_internal(
+ phys_addr_t size, phys_addr_t align,
+ phys_addr_t min_addr, phys_addr_t max_addr,
+ int nid)
+{
+ phys_addr_t alloc;
+ void *ptr;
+
+ if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
+ nid = NUMA_NO_NODE;
+
+ /*
+ * Detect any accidental use of these APIs after slab is ready, as at
+ * this moment memblock may be deinitialized already and its
+ * internal data may be destroyed (after execution of free_all_bootmem)
+ */
+ if (WARN_ON_ONCE(slab_is_available()))
+ return kzalloc_node(size, GFP_NOWAIT, nid);
+
+ if (!align)
+ align = SMP_CACHE_BYTES;
+
+ if (max_addr > memblock.current_limit)
+ max_addr = memblock.current_limit;
+
+again:
+ alloc = memblock_find_in_range_node(size, align, min_addr, max_addr,
+ nid);
+ if (alloc)
+ goto done;
+
+ if (nid != NUMA_NO_NODE) {
+ alloc = memblock_find_in_range_node(size, align, min_addr,
+ max_addr, NUMA_NO_NODE);
+ if (alloc)
+ goto done;
+ }
+
+ if (min_addr) {
+ min_addr = 0;
+ goto again;
+ } else {
+ goto error;
+ }
+
+done:
+ memblock_reserve(alloc, size);
+ ptr = phys_to_virt(alloc);
+ memset(ptr, 0, size);
+
+ /*
+ * The min_count is set to 0 so that bootmem allocated blocks
+ * are never reported as leaks. This is because many of these blocks
+ * are only referred via the physical address which is not
+ * looked up by kmemleak.
+ */
+ kmemleak_alloc(ptr, size, 0, 0);
+
+ return ptr;
+
+error:
+ return NULL;
+}
+
+/**
+ * memblock_virt_alloc_try_nid_nopanic - allocate boot memory block
+ * @size: size of memory block to be allocated in bytes
+ * @align: alignment of the region and block's size
+ * @min_addr: the lower bound of the memory region from where the allocation
+ * is preferred (phys address)
+ * @max_addr: the upper bound of the memory region from where the allocation
+ * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
+ * allocate only from memory limited by memblock.current_limit value
+ * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
+ *
+ * Public version of _memblock_virt_alloc_try_nid_nopanic() which provides
+ * additional debug information (including caller info), if enabled.
+ *
+ * RETURNS:
+ * Virtual address of allocated memory block on success, NULL on failure.
+ */
+void * __init memblock_virt_alloc_try_nid_nopanic(
+ phys_addr_t size, phys_addr_t align,
+ phys_addr_t min_addr, phys_addr_t max_addr,
+ int nid)
+{
+ memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
+ __func__, (u64)size, (u64)align, nid, (u64)min_addr,
+ (u64)max_addr, (void *)_RET_IP_);
+ return memblock_virt_alloc_internal(size, align, min_addr,
+ max_addr, nid);
+}
+
+/**
+ * memblock_virt_alloc_try_nid - allocate boot memory block with panicking
+ * @size: size of memory block to be allocated in bytes
+ * @align: alignment of the region and block's size
+ * @min_addr: the lower bound of the memory region from where the allocation
+ * is preferred (phys address)
+ * @max_addr: the upper bound of the memory region from where the allocation
+ * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
+ * allocate only from memory limited by memblock.current_limit value
+ * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
+ *
+ * Public panicking version of _memblock_virt_alloc_try_nid_nopanic()
+ * which provides debug information (including caller info), if enabled,
+ * and panics if the request can not be satisfied.
+ *
+ * RETURNS:
+ * Virtual address of allocated memory block on success, NULL on failure.
+ */
+void * __init memblock_virt_alloc_try_nid(
+ phys_addr_t size, phys_addr_t align,
+ phys_addr_t min_addr, phys_addr_t max_addr,
+ int nid)
+{
+ void *ptr;
+
+ memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
+ __func__, (u64)size, (u64)align, nid, (u64)min_addr,
+ (u64)max_addr, (void *)_RET_IP_);
+ ptr = memblock_virt_alloc_internal(size, align,
+ min_addr, max_addr, nid);
+ if (ptr)
+ return ptr;
+
+ panic("%s: Failed to allocate %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx\n",
+ __func__, (u64)size, (u64)align, nid, (u64)min_addr,
+ (u64)max_addr);
+ return NULL;
+}
+
+/**
+ * __memblock_free_early - free boot memory block
+ * @base: phys starting address of the boot memory block
+ * @size: size of the boot memory block in bytes
+ *
+ * Free boot memory block previously allocated by memblock_virt_alloc_xx() API.
+ * The freeing memory will not be released to the buddy allocator.
+ */
+void __init __memblock_free_early(phys_addr_t base, phys_addr_t size)
+{
+ memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
+ __func__, (u64)base, (u64)base + size - 1,
+ (void *)_RET_IP_);
+ kmemleak_free_part(__va(base), size);
+ __memblock_remove(&memblock.reserved, base, size);
+}
+
+/*
+ * __memblock_free_late - free bootmem block pages directly to buddy allocator
+ * @addr: phys starting address of the boot memory block
+ * @size: size of the boot memory block in bytes
+ *
+ * This is only useful when the bootmem allocator has already been torn
+ * down, but we are still initializing the system. Pages are released directly
+ * to the buddy allocator, no bootmem metadata is updated because it is gone.
+ */
+void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
+{
+ u64 cursor, end;
+
+ memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
+ __func__, (u64)base, (u64)base + size - 1,
+ (void *)_RET_IP_);
+ kmemleak_free_part(__va(base), size);
+ cursor = PFN_UP(base);
+ end = PFN_DOWN(base + size);
+
+ for (; cursor < end; cursor++) {
+ __free_pages_bootmem(pfn_to_page(cursor), 0);
+ totalram_pages++;
+ }
+}
/*
* Remaining API functions
@@ -846,7 +1253,7 @@ phys_addr_t __init memblock_mem_size(unsigned long limit_pfn)
pages += end_pfn - start_pfn;
}
- return (phys_addr_t)pages << PAGE_SHIFT;
+ return PFN_PHYS(pages);
}
/* lowest address */
@@ -864,16 +1271,14 @@ phys_addr_t __init_memblock memblock_end_of_DRAM(void)
void __init memblock_enforce_memory_limit(phys_addr_t limit)
{
- unsigned long i;
phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX;
+ struct memblock_region *r;
if (!limit)
return;
/* find out max address */
- for (i = 0; i < memblock.memory.cnt; i++) {
- struct memblock_region *r = &memblock.memory.regions[i];
-
+ for_each_memblock(memory, r) {
if (limit <= r->size) {
max_addr = r->base + limit;
break;
@@ -914,6 +1319,24 @@ int __init_memblock memblock_is_memory(phys_addr_t addr)
return memblock_search(&memblock.memory, addr) != -1;
}
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
+ unsigned long *start_pfn, unsigned long *end_pfn)
+{
+ struct memblock_type *type = &memblock.memory;
+ int mid = memblock_search(type, PFN_PHYS(pfn));
+
+ if (mid == -1)
+ return -1;
+
+ *start_pfn = type->regions[mid].base >> PAGE_SHIFT;
+ *end_pfn = (type->regions[mid].base + type->regions[mid].size)
+ >> PAGE_SHIFT;
+
+ return type->regions[mid].nid;
+}
+#endif
+
/**
* memblock_is_region_memory - check if a region is a subset of memory
* @base: base of region to check
@@ -954,13 +1377,12 @@ int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t si
void __init_memblock memblock_trim_memory(phys_addr_t align)
{
- int i;
phys_addr_t start, end, orig_start, orig_end;
- struct memblock_type *mem = &memblock.memory;
+ struct memblock_region *r;
- for (i = 0; i < mem->cnt; i++) {
- orig_start = mem->regions[i].base;
- orig_end = mem->regions[i].base + mem->regions[i].size;
+ for_each_memblock(memory, r) {
+ orig_start = r->base;
+ orig_end = r->base + r->size;
start = round_up(orig_start, align);
end = round_down(orig_end, align);
@@ -968,11 +1390,12 @@ void __init_memblock memblock_trim_memory(phys_addr_t align)
continue;
if (start < end) {
- mem->regions[i].base = start;
- mem->regions[i].size = end - start;
+ r->base = start;
+ r->size = end - start;
} else {
- memblock_remove_region(mem, i);
- i--;
+ memblock_remove_region(&memblock.memory,
+ r - memblock.memory.regions);
+ r--;
}
}
}
@@ -982,9 +1405,15 @@ void __init_memblock memblock_set_current_limit(phys_addr_t limit)
memblock.current_limit = limit;
}
+phys_addr_t __init_memblock memblock_get_current_limit(void)
+{
+ return memblock.current_limit;
+}
+
static void __init_memblock memblock_dump(struct memblock_type *type, char *name)
{
unsigned long long base, size;
+ unsigned long flags;
int i;
pr_info(" %s.cnt = 0x%lx\n", name, type->cnt);
@@ -995,13 +1424,14 @@ static void __init_memblock memblock_dump(struct memblock_type *type, char *name
base = rgn->base;
size = rgn->size;
+ flags = rgn->flags;
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
if (memblock_get_region_node(rgn) != MAX_NUMNODES)
snprintf(nid_buf, sizeof(nid_buf), " on node %d",
memblock_get_region_node(rgn));
#endif
- pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s\n",
- name, i, base, base + size - 1, size, nid_buf);
+ pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s flags: %#lx\n",
+ name, i, base, base + size - 1, size, nid_buf, flags);
}
}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 194721839cf5..5177c6d4a2dd 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -45,26 +45,29 @@
#include <linux/swapops.h>
#include <linux/spinlock.h>
#include <linux/eventfd.h>
+#include <linux/poll.h>
#include <linux/sort.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
-#include <linux/vmalloc.h>
#include <linux/vmpressure.h>
#include <linux/mm_inline.h>
#include <linux/page_cgroup.h>
#include <linux/cpu.h>
#include <linux/oom.h>
+#include <linux/lockdep.h>
+#include <linux/file.h>
#include "internal.h"
#include <net/sock.h>
#include <net/ip.h>
#include <net/tcp_memcontrol.h>
+#include "slab.h"
#include <asm/uaccess.h>
#include <trace/events/vmscan.h>
-struct cgroup_subsys mem_cgroup_subsys __read_mostly;
-EXPORT_SYMBOL(mem_cgroup_subsys);
+struct cgroup_subsys memory_cgrp_subsys __read_mostly;
+EXPORT_SYMBOL(memory_cgrp_subsys);
#define MEM_CGROUP_RECLAIM_RETRIES 5
static struct mem_cgroup *root_mem_cgroup __read_mostly;
@@ -85,26 +88,12 @@ static int really_do_swap_account __initdata = 0;
#endif
-/*
- * Statistics for memory cgroup.
- */
-enum mem_cgroup_stat_index {
- /*
- * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
- */
- MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
- MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
- MEM_CGROUP_STAT_RSS_HUGE, /* # of pages charged as anon huge */
- MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
- MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */
- MEM_CGROUP_STAT_NSTATS,
-};
-
static const char * const mem_cgroup_stat_names[] = {
"cache",
"rss",
"rss_huge",
"mapped_file",
+ "writeback",
"swap",
};
@@ -160,7 +149,7 @@ struct mem_cgroup_reclaim_iter {
* matches memcg->dead_count of the hierarchy root group.
*/
struct mem_cgroup *last_visited;
- unsigned long last_dead_count;
+ int last_dead_count;
/* scan generation, increased every round-trip */
unsigned int generation;
@@ -187,10 +176,6 @@ struct mem_cgroup_per_node {
struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
};
-struct mem_cgroup_lru_info {
- struct mem_cgroup_per_node *nodeinfo[0];
-};
-
/*
* Cgroups above their limits are maintained in a RB-Tree, independent of
* their hierarchy representation
@@ -243,6 +228,46 @@ struct mem_cgroup_eventfd_list {
struct eventfd_ctx *eventfd;
};
+/*
+ * cgroup_event represents events which userspace want to receive.
+ */
+struct mem_cgroup_event {
+ /*
+ * memcg which the event belongs to.
+ */
+ struct mem_cgroup *memcg;
+ /*
+ * eventfd to signal userspace about the event.
+ */
+ struct eventfd_ctx *eventfd;
+ /*
+ * Each of these stored in a list by the cgroup.
+ */
+ struct list_head list;
+ /*
+ * register_event() callback will be used to add new userspace
+ * waiter for changes related to this event. Use eventfd_signal()
+ * on eventfd to send notification to userspace.
+ */
+ int (*register_event)(struct mem_cgroup *memcg,
+ struct eventfd_ctx *eventfd, const char *args);
+ /*
+ * unregister_event() callback will be called when userspace closes
+ * the eventfd or on cgroup removing. This callback must be set,
+ * if you want provide notification functionality.
+ */
+ void (*unregister_event)(struct mem_cgroup *memcg,
+ struct eventfd_ctx *eventfd);
+ /*
+ * All fields below needed to unregister event when
+ * userspace closes eventfd.
+ */
+ poll_table pt;
+ wait_queue_head_t *wqh;
+ wait_queue_t wait;
+ struct work_struct remove;
+};
+
static void mem_cgroup_threshold(struct mem_cgroup *memcg);
static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
@@ -267,28 +292,10 @@ struct mem_cgroup {
/* vmpressure notifications */
struct vmpressure vmpressure;
- union {
- /*
- * the counter to account for mem+swap usage.
- */
- struct res_counter memsw;
-
- /*
- * rcu_freeing is used only when freeing struct mem_cgroup,
- * so put it into a union to avoid wasting more memory.
- * It must be disjoint from the css field. It could be
- * in a union with the res field, but res plays a much
- * larger part in mem_cgroup life than memsw, and might
- * be of interest, even at time of free, when debugging.
- * So share rcu_head with the less interesting memsw.
- */
- struct rcu_head rcu_freeing;
- /*
- * We also need some space for a worker in deferred freeing.
- * By the time we call it, rcu_freeing is no longer in use.
- */
- struct work_struct work_freeing;
- };
+ /*
+ * the counter to account for mem+swap usage.
+ */
+ struct res_counter memsw;
/*
* the counter to account for kernel memory usage.
@@ -302,8 +309,7 @@ struct mem_cgroup {
bool oom_lock;
atomic_t under_oom;
-
- atomic_t refcnt;
+ atomic_t oom_wakeups;
int swappiness;
/* OOM-Killer disable */
@@ -328,7 +334,7 @@ struct mem_cgroup {
* Should we move charges of a task when a task is moved into this
* mem_cgroup ? And what type of charges should we move ?
*/
- unsigned long move_charge_at_immigrate;
+ unsigned long move_charge_at_immigrate;
/*
* set > 0 if pages under this cgroup are moving to other cgroup.
*/
@@ -348,7 +354,7 @@ struct mem_cgroup {
atomic_t dead_count;
#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
- struct tcp_memcontrol tcp_mem;
+ struct cg_proto tcp_mem;
#endif
#if defined(CONFIG_MEMCG_KMEM)
/* analogous to slab_common's slab_caches list. per-memcg */
@@ -366,33 +372,20 @@ struct mem_cgroup {
atomic_t numainfo_updating;
#endif
- /*
- * Per cgroup active and inactive list, similar to the
- * per zone LRU lists.
- *
- * WARNING: This has to be the last element of the struct. Don't
- * add new fields after this point.
- */
- struct mem_cgroup_lru_info info;
-};
+ /* List of events which userspace want to receive */
+ struct list_head event_list;
+ spinlock_t event_list_lock;
-static size_t memcg_size(void)
-{
- return sizeof(struct mem_cgroup) +
- nr_node_ids * sizeof(struct mem_cgroup_per_node);
-}
+ struct mem_cgroup_per_node *nodeinfo[0];
+ /* WARNING: nodeinfo must be the last member here */
+};
/* internal only representation about the status of kmem accounting. */
enum {
- KMEM_ACCOUNTED_ACTIVE = 0, /* accounted by this cgroup itself */
- KMEM_ACCOUNTED_ACTIVATED, /* static key enabled. */
+ KMEM_ACCOUNTED_ACTIVE, /* accounted by this cgroup itself */
KMEM_ACCOUNTED_DEAD, /* dead memcg with pending kmem charges */
};
-/* We account when limit is on, but only after call sites are patched */
-#define KMEM_ACCOUNTED_MASK \
- ((1 << KMEM_ACCOUNTED_ACTIVE) | (1 << KMEM_ACCOUNTED_ACTIVATED))
-
#ifdef CONFIG_MEMCG_KMEM
static inline void memcg_kmem_set_active(struct mem_cgroup *memcg)
{
@@ -404,18 +397,13 @@ static bool memcg_kmem_is_active(struct mem_cgroup *memcg)
return test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags);
}
-static void memcg_kmem_set_activated(struct mem_cgroup *memcg)
-{
- set_bit(KMEM_ACCOUNTED_ACTIVATED, &memcg->kmem_account_flags);
-}
-
-static void memcg_kmem_clear_activated(struct mem_cgroup *memcg)
-{
- clear_bit(KMEM_ACCOUNTED_ACTIVATED, &memcg->kmem_account_flags);
-}
-
static void memcg_kmem_mark_dead(struct mem_cgroup *memcg)
{
+ /*
+ * Our caller must use css_get() first, because memcg_uncharge_kmem()
+ * will call css_put() if it sees the memcg is dead.
+ */
+ smp_wmb();
if (test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags))
set_bit(KMEM_ACCOUNTED_DEAD, &memcg->kmem_account_flags);
}
@@ -508,13 +496,9 @@ enum res_type {
*/
static DEFINE_MUTEX(memcg_create_mutex);
-static void mem_cgroup_get(struct mem_cgroup *memcg);
-static void mem_cgroup_put(struct mem_cgroup *memcg);
-
-static inline
struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *s)
{
- return container_of(s, struct mem_cgroup, css);
+ return s ? container_of(s, struct mem_cgroup, css) : NULL;
}
/* Some nice accessors for the vmpressure. */
@@ -530,14 +514,32 @@ struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
}
-struct vmpressure *css_to_vmpressure(struct cgroup_subsys_state *css)
+static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
{
- return &mem_cgroup_from_css(css)->vmpressure;
+ return (memcg == root_mem_cgroup);
}
-static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
+/*
+ * We restrict the id in the range of [1, 65535], so it can fit into
+ * an unsigned short.
+ */
+#define MEM_CGROUP_ID_MAX USHRT_MAX
+
+static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
{
- return (memcg == root_mem_cgroup);
+ /*
+ * The ID of the root cgroup is 0, but memcg treat 0 as an
+ * invalid ID, so we return (cgroup_id + 1).
+ */
+ return memcg->css.cgroup->id + 1;
+}
+
+static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
+{
+ struct cgroup_subsys_state *css;
+
+ css = css_from_id(id - 1, &memory_cgrp_subsys);
+ return mem_cgroup_from_css(css);
}
/* Writing them here to avoid exposing memcg's inner layout */
@@ -561,15 +563,15 @@ void sock_update_memcg(struct sock *sk)
*/
if (sk->sk_cgrp) {
BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg));
- mem_cgroup_get(sk->sk_cgrp->memcg);
+ css_get(&sk->sk_cgrp->memcg->css);
return;
}
rcu_read_lock();
memcg = mem_cgroup_from_task(current);
cg_proto = sk->sk_prot->proto_cgroup(memcg);
- if (!mem_cgroup_is_root(memcg) && memcg_proto_active(cg_proto)) {
- mem_cgroup_get(memcg);
+ if (!mem_cgroup_is_root(memcg) &&
+ memcg_proto_active(cg_proto) && css_tryget(&memcg->css)) {
sk->sk_cgrp = cg_proto;
}
rcu_read_unlock();
@@ -583,7 +585,7 @@ void sock_release_memcg(struct sock *sk)
struct mem_cgroup *memcg;
WARN_ON(!sk->sk_cgrp->memcg);
memcg = sk->sk_cgrp->memcg;
- mem_cgroup_put(memcg);
+ css_put(&sk->sk_cgrp->memcg->css);
}
}
@@ -592,13 +594,13 @@ struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg)
if (!memcg || mem_cgroup_is_root(memcg))
return NULL;
- return &memcg->tcp_mem.cg_proto;
+ return &memcg->tcp_mem;
}
EXPORT_SYMBOL(tcp_proto_cgroup);
static void disarm_sock_keys(struct mem_cgroup *memcg)
{
- if (!memcg_proto_activated(&memcg->tcp_mem.cg_proto))
+ if (!memcg_proto_activated(&memcg->tcp_mem))
return;
static_key_slow_dec(&memcg_socket_limit_enabled);
}
@@ -611,16 +613,11 @@ static void disarm_sock_keys(struct mem_cgroup *memcg)
#ifdef CONFIG_MEMCG_KMEM
/*
* This will be the memcg's index in each cache's ->memcg_params->memcg_caches.
- * There are two main reasons for not using the css_id for this:
- * 1) this works better in sparse environments, where we have a lot of memcgs,
- * but only a few kmem-limited. Or also, if we have, for instance, 200
- * memcgs, and none but the 200th is kmem-limited, we'd have to have a
- * 200 entry array for that.
- *
- * 2) In order not to violate the cgroup API, we would like to do all memory
- * allocation in ->create(). At that point, we haven't yet allocated the
- * css_id. Having a separate index prevents us from messing with the cgroup
- * core for this
+ * The main reason for not using cgroup id for this:
+ * this works better in sparse environments, where we have a lot of memcgs,
+ * but only a few kmem-limited. Or also, if we have, for instance, 200
+ * memcgs, and none but the 200th is kmem-limited, we'd have to have a
+ * 200 entry array for that.
*
* The current size of the caches array is stored in
* memcg_limited_groups_array_size. It will double each time we have to
@@ -635,14 +632,14 @@ int memcg_limited_groups_array_size;
* cgroups is a reasonable guess. In the future, it could be a parameter or
* tunable, but that is strictly not necessary.
*
- * MAX_SIZE should be as large as the number of css_ids. Ideally, we could get
+ * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
* this constant directly from cgroup, but it is understandable that this is
* better kept as an internal representation in cgroup.c. In any case, the
- * css_id space is not getting any smaller, and we don't have to necessarily
+ * cgrp_id space is not getting any smaller, and we don't have to necessarily
* increase ours as well if it increases.
*/
#define MEMCG_CACHES_MIN_SIZE 4
-#define MEMCG_CACHES_MAX_SIZE 65535
+#define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
/*
* A lot of the calls to the cache allocation functions are expected to be
@@ -683,7 +680,7 @@ static struct mem_cgroup_per_zone *
mem_cgroup_zoneinfo(struct mem_cgroup *memcg, int nid, int zid)
{
VM_BUG_ON((unsigned)nid >= nr_node_ids);
- return &memcg->info.nodeinfo[nid]->zoneinfo[zid];
+ return &memcg->nodeinfo[nid]->zoneinfo[zid];
}
struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg)
@@ -908,6 +905,7 @@ static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
unsigned long val = 0;
int cpu;
+ get_online_cpus();
for_each_online_cpu(cpu)
val += per_cpu(memcg->stat->events[idx], cpu);
#ifdef CONFIG_HOTPLUG_CPU
@@ -915,6 +913,7 @@ static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
val += memcg->nocpu_base.events[idx];
spin_unlock(&memcg->pcp_counter_lock);
#endif
+ put_online_cpus();
return val;
}
@@ -922,8 +921,6 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
struct page *page,
bool anon, int nr_pages)
{
- preempt_disable();
-
/*
* Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
* counted as CACHE even if it's on ANON LRU.
@@ -948,8 +945,6 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
}
__this_cpu_add(memcg->stat->nr_page_events, nr_pages);
-
- preempt_enable();
}
unsigned long
@@ -1063,12 +1058,6 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
preempt_enable();
}
-struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
-{
- return mem_cgroup_from_css(
- cgroup_subsys_state(cont, mem_cgroup_subsys_id));
-}
-
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
{
/*
@@ -1079,25 +1068,27 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
if (unlikely(!p))
return NULL;
- return mem_cgroup_from_css(task_subsys_state(p, mem_cgroup_subsys_id));
+ return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
}
-struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
+static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
{
struct mem_cgroup *memcg = NULL;
- if (!mm)
- return NULL;
- /*
- * Because we have no locks, mm->owner's may be being moved to other
- * cgroup. We use css_tryget() here even if this looks
- * pessimistic (rather than adding locks here).
- */
rcu_read_lock();
do {
- memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
- if (unlikely(!memcg))
- break;
+ /*
+ * Page cache insertions can happen withou an
+ * actual mm context, e.g. during disk probing
+ * on boot, loopback IO, acct() writes etc.
+ */
+ if (unlikely(!mm))
+ memcg = root_mem_cgroup;
+ else {
+ memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
+ if (unlikely(!memcg))
+ memcg = root_mem_cgroup;
+ }
} while (!css_tryget(&memcg->css));
rcu_read_unlock();
return memcg;
@@ -1112,20 +1103,11 @@ struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root,
struct mem_cgroup *last_visited)
{
- struct cgroup *prev_cgroup, *next_cgroup;
-
- /*
- * Root is not visited by cgroup iterators so it needs an
- * explicit visit.
- */
- if (!last_visited)
- return root;
+ struct cgroup_subsys_state *prev_css, *next_css;
- prev_cgroup = (last_visited == root) ? NULL
- : last_visited->css.cgroup;
+ prev_css = last_visited ? &last_visited->css : NULL;
skip_node:
- next_cgroup = cgroup_next_descendant_pre(
- prev_cgroup, root->css.cgroup);
+ next_css = css_next_descendant_pre(prev_css, &root->css);
/*
* Even if we found a group we have to make sure it is
@@ -1133,21 +1115,89 @@ skip_node:
* skipped and we should continue the tree walk.
* last_visited css is safe to use because it is
* protected by css_get and the tree walk is rcu safe.
+ *
+ * We do not take a reference on the root of the tree walk
+ * because we might race with the root removal when it would
+ * be the only node in the iterated hierarchy and mem_cgroup_iter
+ * would end up in an endless loop because it expects that at
+ * least one valid node will be returned. Root cannot disappear
+ * because caller of the iterator should hold it already so
+ * skipping css reference should be safe.
*/
- if (next_cgroup) {
- struct mem_cgroup *mem = mem_cgroup_from_cont(
- next_cgroup);
- if (css_tryget(&mem->css))
- return mem;
- else {
- prev_cgroup = next_cgroup;
- goto skip_node;
- }
+ if (next_css) {
+ if ((next_css == &root->css) ||
+ ((next_css->flags & CSS_ONLINE) && css_tryget(next_css)))
+ return mem_cgroup_from_css(next_css);
+
+ prev_css = next_css;
+ goto skip_node;
}
return NULL;
}
+static void mem_cgroup_iter_invalidate(struct mem_cgroup *root)
+{
+ /*
+ * When a group in the hierarchy below root is destroyed, the
+ * hierarchy iterator can no longer be trusted since it might
+ * have pointed to the destroyed group. Invalidate it.
+ */
+ atomic_inc(&root->dead_count);
+}
+
+static struct mem_cgroup *
+mem_cgroup_iter_load(struct mem_cgroup_reclaim_iter *iter,
+ struct mem_cgroup *root,
+ int *sequence)
+{
+ struct mem_cgroup *position = NULL;
+ /*
+ * A cgroup destruction happens in two stages: offlining and
+ * release. They are separated by a RCU grace period.
+ *
+ * If the iterator is valid, we may still race with an
+ * offlining. The RCU lock ensures the object won't be
+ * released, tryget will fail if we lost the race.
+ */
+ *sequence = atomic_read(&root->dead_count);
+ if (iter->last_dead_count == *sequence) {
+ smp_rmb();
+ position = iter->last_visited;
+
+ /*
+ * We cannot take a reference to root because we might race
+ * with root removal and returning NULL would end up in
+ * an endless loop on the iterator user level when root
+ * would be returned all the time.
+ */
+ if (position && position != root &&
+ !css_tryget(&position->css))
+ position = NULL;
+ }
+ return position;
+}
+
+static void mem_cgroup_iter_update(struct mem_cgroup_reclaim_iter *iter,
+ struct mem_cgroup *last_visited,
+ struct mem_cgroup *new_position,
+ struct mem_cgroup *root,
+ int sequence)
+{
+ /* root reference counting symmetric to mem_cgroup_iter_load */
+ if (last_visited && last_visited != root)
+ css_put(&last_visited->css);
+ /*
+ * We store the sequence count from the time @last_visited was
+ * loaded successfully instead of rereading it here so that we
+ * don't lose destruction events in between. We could have
+ * raced with the destruction of @new_position after all.
+ */
+ iter->last_visited = new_position;
+ smp_wmb();
+ iter->last_dead_count = sequence;
+}
+
/**
* mem_cgroup_iter - iterate over memory cgroup hierarchy
* @root: hierarchy root
@@ -1171,7 +1221,6 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
{
struct mem_cgroup *memcg = NULL;
struct mem_cgroup *last_visited = NULL;
- unsigned long uninitialized_var(dead_count);
if (mem_cgroup_disabled())
return NULL;
@@ -1191,6 +1240,7 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
rcu_read_lock();
while (!memcg) {
struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
+ int uninitialized_var(seq);
if (reclaim) {
int nid = zone_to_nid(reclaim->zone);
@@ -1204,37 +1254,14 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
goto out_unlock;
}
- /*
- * If the dead_count mismatches, a destruction
- * has happened or is happening concurrently.
- * If the dead_count matches, a destruction
- * might still happen concurrently, but since
- * we checked under RCU, that destruction
- * won't free the object until we release the
- * RCU reader lock. Thus, the dead_count
- * check verifies the pointer is still valid,
- * css_tryget() verifies the cgroup pointed to
- * is alive.
- */
- dead_count = atomic_read(&root->dead_count);
- if (dead_count == iter->last_dead_count) {
- smp_rmb();
- last_visited = iter->last_visited;
- if (last_visited &&
- !css_tryget(&last_visited->css))
- last_visited = NULL;
- }
+ last_visited = mem_cgroup_iter_load(iter, root, &seq);
}
memcg = __mem_cgroup_iter_next(root, last_visited);
if (reclaim) {
- if (last_visited)
- css_put(&last_visited->css);
-
- iter->last_visited = memcg;
- smp_wmb();
- iter->last_dead_count = dead_count;
+ mem_cgroup_iter_update(iter, last_visited, memcg, root,
+ seq);
if (!memcg)
iter->generation++;
@@ -1434,7 +1461,7 @@ bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
return true;
if (!root_memcg->use_hierarchy || !memcg)
return false;
- return css_is_ancestor(&memcg->css, &root_memcg->css);
+ return cgroup_is_descendant(memcg->css.cgroup, root_memcg->css.cgroup);
}
static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
@@ -1448,15 +1475,16 @@ static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
return ret;
}
-int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg)
+bool task_in_mem_cgroup(struct task_struct *task,
+ const struct mem_cgroup *memcg)
{
- int ret;
struct mem_cgroup *curr = NULL;
struct task_struct *p;
+ bool ret;
p = find_lock_task_mm(task);
if (p) {
- curr = try_get_mem_cgroup_from_mm(p->mm);
+ curr = get_mem_cgroup_from_mm(p->mm);
task_unlock(p);
} else {
/*
@@ -1464,14 +1492,12 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg)
* killer still needs to detect if they have already been oom
* killed to prevent needlessly killing additional tasks.
*/
- task_lock(task);
+ rcu_read_lock();
curr = mem_cgroup_from_task(task);
if (curr)
css_get(&curr->css);
- task_unlock(task);
+ rcu_read_unlock();
}
- if (!curr)
- return 0;
/*
* We should check use_hierarchy of "memcg" not "curr". Because checking
* use_hierarchy of "curr" here make this function true if hierarchy is
@@ -1524,10 +1550,8 @@ static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
int mem_cgroup_swappiness(struct mem_cgroup *memcg)
{
- struct cgroup *cgrp = memcg->css.cgroup;
-
/* root ? */
- if (cgrp->parent == NULL)
+ if (!css_parent(&memcg->css))
return vm_swappiness;
return memcg->swappiness;
@@ -1655,53 +1679,25 @@ static void move_unlock_mem_cgroup(struct mem_cgroup *memcg,
*/
void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
{
- struct cgroup *task_cgrp;
- struct cgroup *mem_cgrp;
- /*
- * Need a buffer in BSS, can't rely on allocations. The code relies
- * on the assumption that OOM is serialized for memory controller.
- * If this assumption is broken, revisit this code.
- */
- static char memcg_name[PATH_MAX];
- int ret;
+ /* oom_info_lock ensures that parallel ooms do not interleave */
+ static DEFINE_MUTEX(oom_info_lock);
struct mem_cgroup *iter;
unsigned int i;
if (!p)
return;
+ mutex_lock(&oom_info_lock);
rcu_read_lock();
- mem_cgrp = memcg->css.cgroup;
- task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);
+ pr_info("Task in ");
+ pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
+ pr_info(" killed as a result of limit of ");
+ pr_cont_cgroup_path(memcg->css.cgroup);
+ pr_info("\n");
- ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
- if (ret < 0) {
- /*
- * Unfortunately, we are unable to convert to a useful name
- * But we'll still print out the usage information
- */
- rcu_read_unlock();
- goto done;
- }
rcu_read_unlock();
- pr_info("Task in %s killed", memcg_name);
-
- rcu_read_lock();
- ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
- if (ret < 0) {
- rcu_read_unlock();
- goto done;
- }
- rcu_read_unlock();
-
- /*
- * Continues from above, so we don't need an KERN_ level
- */
- pr_cont(" as a result of limit of %s\n", memcg_name);
-done:
-
pr_info("memory: usage %llukB, limit %llukB, failcnt %llu\n",
res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
@@ -1716,13 +1712,8 @@ done:
res_counter_read_u64(&memcg->kmem, RES_FAILCNT));
for_each_mem_cgroup_tree(iter, memcg) {
- pr_info("Memory cgroup stats");
-
- rcu_read_lock();
- ret = cgroup_path(iter->css.cgroup, memcg_name, PATH_MAX);
- if (!ret)
- pr_cont(" for %s", memcg_name);
- rcu_read_unlock();
+ pr_info("Memory cgroup stats for ");
+ pr_cont_cgroup_path(iter->css.cgroup);
pr_cont(":");
for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
@@ -1738,6 +1729,7 @@ done:
pr_cont("\n");
}
+ mutex_unlock(&oom_info_lock);
}
/*
@@ -1804,12 +1796,11 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL);
totalpages = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT ? : 1;
for_each_mem_cgroup_tree(iter, memcg) {
- struct cgroup *cgroup = iter->css.cgroup;
- struct cgroup_iter it;
+ struct css_task_iter it;
struct task_struct *task;
- cgroup_iter_start(cgroup, &it);
- while ((task = cgroup_iter_next(cgroup, &it))) {
+ css_task_iter_start(&iter->css, &it);
+ while ((task = css_task_iter_next(&it))) {
switch (oom_scan_process_thread(task, totalpages, NULL,
false)) {
case OOM_SCAN_SELECT:
@@ -1822,7 +1813,7 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
case OOM_SCAN_CONTINUE:
continue;
case OOM_SCAN_ABORT:
- cgroup_iter_end(cgroup, &it);
+ css_task_iter_end(&it);
mem_cgroup_iter_break(memcg, iter);
if (chosen)
put_task_struct(chosen);
@@ -1831,15 +1822,20 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
break;
};
points = oom_badness(task, memcg, NULL, totalpages);
- if (points > chosen_points) {
- if (chosen)
- put_task_struct(chosen);
- chosen = task;
- chosen_points = points;
- get_task_struct(chosen);
- }
+ if (!points || points < chosen_points)
+ continue;
+ /* Prefer thread group leaders for display purposes */
+ if (points == chosen_points &&
+ thread_group_leader(chosen))
+ continue;
+
+ if (chosen)
+ put_task_struct(chosen);
+ chosen = task;
+ chosen_points = points;
+ get_task_struct(chosen);
}
- cgroup_iter_end(cgroup, &it);
+ css_task_iter_end(&it);
}
if (!chosen)
@@ -2075,15 +2071,24 @@ static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
return total;
}
+#ifdef CONFIG_LOCKDEP
+static struct lockdep_map memcg_oom_lock_dep_map = {
+ .name = "memcg_oom_lock",
+};
+#endif
+
+static DEFINE_SPINLOCK(memcg_oom_lock);
+
/*
* Check OOM-Killer is already running under our hierarchy.
* If someone is running, return false.
- * Has to be called with memcg_oom_lock
*/
-static bool mem_cgroup_oom_lock(struct mem_cgroup *memcg)
+static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
{
struct mem_cgroup *iter, *failed = NULL;
+ spin_lock(&memcg_oom_lock);
+
for_each_mem_cgroup_tree(iter, memcg) {
if (iter->oom_lock) {
/*
@@ -2097,33 +2102,35 @@ static bool mem_cgroup_oom_lock(struct mem_cgroup *memcg)
iter->oom_lock = true;
}
- if (!failed)
- return true;
-
- /*
- * OK, we failed to lock the whole subtree so we have to clean up
- * what we set up to the failing subtree
- */
- for_each_mem_cgroup_tree(iter, memcg) {
- if (iter == failed) {
- mem_cgroup_iter_break(memcg, iter);
- break;
+ if (failed) {
+ /*
+ * OK, we failed to lock the whole subtree so we have
+ * to clean up what we set up to the failing subtree
+ */
+ for_each_mem_cgroup_tree(iter, memcg) {
+ if (iter == failed) {
+ mem_cgroup_iter_break(memcg, iter);
+ break;
+ }
+ iter->oom_lock = false;
}
- iter->oom_lock = false;
- }
- return false;
+ } else
+ mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
+
+ spin_unlock(&memcg_oom_lock);
+
+ return !failed;
}
-/*
- * Has to be called with memcg_oom_lock
- */
-static int mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
+static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
{
struct mem_cgroup *iter;
+ spin_lock(&memcg_oom_lock);
+ mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
for_each_mem_cgroup_tree(iter, memcg)
iter->oom_lock = false;
- return 0;
+ spin_unlock(&memcg_oom_lock);
}
static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
@@ -2147,7 +2154,6 @@ static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
atomic_add_unless(&iter->under_oom, -1, 0);
}
-static DEFINE_SPINLOCK(memcg_oom_lock);
static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
struct oom_wait_info {
@@ -2177,6 +2183,7 @@ static int memcg_oom_wake_function(wait_queue_t *wait,
static void memcg_wakeup_oom(struct mem_cgroup *memcg)
{
+ atomic_inc(&memcg->oom_wakeups);
/* for filtering, pass "memcg" as argument. */
__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
}
@@ -2187,57 +2194,97 @@ static void memcg_oom_recover(struct mem_cgroup *memcg)
memcg_wakeup_oom(memcg);
}
-/*
- * try to call OOM killer. returns false if we should exit memory-reclaim loop.
+static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
+{
+ if (!current->memcg_oom.may_oom)
+ return;
+ /*
+ * We are in the middle of the charge context here, so we
+ * don't want to block when potentially sitting on a callstack
+ * that holds all kinds of filesystem and mm locks.
+ *
+ * Also, the caller may handle a failed allocation gracefully
+ * (like optional page cache readahead) and so an OOM killer
+ * invocation might not even be necessary.
+ *
+ * That's why we don't do anything here except remember the
+ * OOM context and then deal with it at the end of the page
+ * fault when the stack is unwound, the locks are released,
+ * and when we know whether the fault was overall successful.
+ */
+ css_get(&memcg->css);
+ current->memcg_oom.memcg = memcg;
+ current->memcg_oom.gfp_mask = mask;
+ current->memcg_oom.order = order;
+}
+
+/**
+ * mem_cgroup_oom_synchronize - complete memcg OOM handling
+ * @handle: actually kill/wait or just clean up the OOM state
+ *
+ * This has to be called at the end of a page fault if the memcg OOM
+ * handler was enabled.
+ *
+ * Memcg supports userspace OOM handling where failed allocations must
+ * sleep on a waitqueue until the userspace task resolves the
+ * situation. Sleeping directly in the charge context with all kinds
+ * of locks held is not a good idea, instead we remember an OOM state
+ * in the task and mem_cgroup_oom_synchronize() has to be called at
+ * the end of the page fault to complete the OOM handling.
+ *
+ * Returns %true if an ongoing memcg OOM situation was detected and
+ * completed, %false otherwise.
*/
-static bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask,
- int order)
+bool mem_cgroup_oom_synchronize(bool handle)
{
+ struct mem_cgroup *memcg = current->memcg_oom.memcg;
struct oom_wait_info owait;
- bool locked, need_to_kill;
+ bool locked;
+
+ /* OOM is global, do not handle */
+ if (!memcg)
+ return false;
+
+ if (!handle)
+ goto cleanup;
owait.memcg = memcg;
owait.wait.flags = 0;
owait.wait.func = memcg_oom_wake_function;
owait.wait.private = current;
INIT_LIST_HEAD(&owait.wait.task_list);
- need_to_kill = true;
- mem_cgroup_mark_under_oom(memcg);
- /* At first, try to OOM lock hierarchy under memcg.*/
- spin_lock(&memcg_oom_lock);
- locked = mem_cgroup_oom_lock(memcg);
- /*
- * Even if signal_pending(), we can't quit charge() loop without
- * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL
- * under OOM is always welcomed, use TASK_KILLABLE here.
- */
prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
- if (!locked || memcg->oom_kill_disable)
- need_to_kill = false;
+ mem_cgroup_mark_under_oom(memcg);
+
+ locked = mem_cgroup_oom_trylock(memcg);
+
if (locked)
mem_cgroup_oom_notify(memcg);
- spin_unlock(&memcg_oom_lock);
- if (need_to_kill) {
+ if (locked && !memcg->oom_kill_disable) {
+ mem_cgroup_unmark_under_oom(memcg);
finish_wait(&memcg_oom_waitq, &owait.wait);
- mem_cgroup_out_of_memory(memcg, mask, order);
+ mem_cgroup_out_of_memory(memcg, current->memcg_oom.gfp_mask,
+ current->memcg_oom.order);
} else {
schedule();
+ mem_cgroup_unmark_under_oom(memcg);
finish_wait(&memcg_oom_waitq, &owait.wait);
}
- spin_lock(&memcg_oom_lock);
- if (locked)
- mem_cgroup_oom_unlock(memcg);
- memcg_wakeup_oom(memcg);
- spin_unlock(&memcg_oom_lock);
-
- mem_cgroup_unmark_under_oom(memcg);
- if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
- return false;
- /* Give chance to dying process */
- schedule_timeout_uninterruptible(1);
+ if (locked) {
+ mem_cgroup_oom_unlock(memcg);
+ /*
+ * There is no guarantee that an OOM-lock contender
+ * sees the wakeups triggered by the OOM kill
+ * uncharges. Wake any sleepers explicitely.
+ */
+ memcg_oom_recover(memcg);
+ }
+cleanup:
+ current->memcg_oom.memcg = NULL;
+ css_put(&memcg->css);
return true;
}
@@ -2306,7 +2353,7 @@ void __mem_cgroup_end_update_page_stat(struct page *page, unsigned long *flags)
}
void mem_cgroup_update_page_stat(struct page *page,
- enum mem_cgroup_page_stat_item idx, int val)
+ enum mem_cgroup_stat_index idx, int val)
{
struct mem_cgroup *memcg;
struct page_cgroup *pc = lookup_page_cgroup(page);
@@ -2315,18 +2362,11 @@ void mem_cgroup_update_page_stat(struct page *page,
if (mem_cgroup_disabled())
return;
+ VM_BUG_ON(!rcu_read_lock_held());
memcg = pc->mem_cgroup;
if (unlikely(!memcg || !PageCgroupUsed(pc)))
return;
- switch (idx) {
- case MEMCG_NR_FILE_MAPPED:
- idx = MEM_CGROUP_STAT_FILE_MAPPED;
- break;
- default:
- BUG();
- }
-
this_cpu_add(memcg->stat->count[idx], val);
}
@@ -2468,7 +2508,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync)
flush_work(&stock->work);
}
out:
- put_online_cpus();
+ put_online_cpus();
}
/*
@@ -2521,7 +2561,7 @@ static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu)
spin_unlock(&memcg->pcp_counter_lock);
}
-static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb,
+static int memcg_cpu_hotplug_callback(struct notifier_block *nb,
unsigned long action,
void *hcpu)
{
@@ -2544,18 +2584,17 @@ static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb,
}
-/* See __mem_cgroup_try_charge() for details */
+/* See mem_cgroup_try_charge() for details */
enum {
CHARGE_OK, /* success */
CHARGE_RETRY, /* need to retry but retry is not bad */
CHARGE_NOMEM, /* we can't do more. return -ENOMEM */
CHARGE_WOULDBLOCK, /* GFP_WAIT wasn't set and no enough res. */
- CHARGE_OOM_DIE, /* the current is killed because of OOM */
};
static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
unsigned int nr_pages, unsigned int min_pages,
- bool oom_check)
+ bool invoke_oom)
{
unsigned long csize = nr_pages * PAGE_SIZE;
struct mem_cgroup *mem_over_limit;
@@ -2612,171 +2651,116 @@ static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
if (mem_cgroup_wait_acct_move(mem_over_limit))
return CHARGE_RETRY;
- /* If we don't need to call oom-killer at el, return immediately */
- if (!oom_check)
- return CHARGE_NOMEM;
- /* check OOM */
- if (!mem_cgroup_handle_oom(mem_over_limit, gfp_mask, get_order(csize)))
- return CHARGE_OOM_DIE;
+ if (invoke_oom)
+ mem_cgroup_oom(mem_over_limit, gfp_mask, get_order(csize));
- return CHARGE_RETRY;
+ return CHARGE_NOMEM;
}
-/*
- * __mem_cgroup_try_charge() does
- * 1. detect memcg to be charged against from passed *mm and *ptr,
- * 2. update res_counter
- * 3. call memory reclaim if necessary.
- *
- * In some special case, if the task is fatal, fatal_signal_pending() or
- * has TIF_MEMDIE, this function returns -EINTR while writing root_mem_cgroup
- * to *ptr. There are two reasons for this. 1: fatal threads should quit as soon
- * as possible without any hazards. 2: all pages should have a valid
- * pc->mem_cgroup. If mm is NULL and the caller doesn't pass a valid memcg
- * pointer, that is treated as a charge to root_mem_cgroup.
- *
- * So __mem_cgroup_try_charge() will return
- * 0 ... on success, filling *ptr with a valid memcg pointer.
- * -ENOMEM ... charge failure because of resource limits.
- * -EINTR ... if thread is fatal. *ptr is filled with root_mem_cgroup.
+/**
+ * mem_cgroup_try_charge - try charging a memcg
+ * @memcg: memcg to charge
+ * @nr_pages: number of pages to charge
+ * @oom: trigger OOM if reclaim fails
*
- * Unlike the exported interface, an "oom" parameter is added. if oom==true,
- * the oom-killer can be invoked.
+ * Returns 0 if @memcg was charged successfully, -EINTR if the charge
+ * was bypassed to root_mem_cgroup, and -ENOMEM if the charge failed.
*/
-static int __mem_cgroup_try_charge(struct mm_struct *mm,
- gfp_t gfp_mask,
- unsigned int nr_pages,
- struct mem_cgroup **ptr,
- bool oom)
+static int mem_cgroup_try_charge(struct mem_cgroup *memcg,
+ gfp_t gfp_mask,
+ unsigned int nr_pages,
+ bool oom)
{
unsigned int batch = max(CHARGE_BATCH, nr_pages);
int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
- struct mem_cgroup *memcg = NULL;
int ret;
+ if (mem_cgroup_is_root(memcg))
+ goto done;
/*
- * Unlike gloval-vm's OOM-kill, we're not in memory shortage
- * in system level. So, allow to go ahead dying process in addition to
- * MEMDIE process.
+ * Unlike in global OOM situations, memcg is not in a physical
+ * memory shortage. Allow dying and OOM-killed tasks to
+ * bypass the last charges so that they can exit quickly and
+ * free their memory.
*/
- if (unlikely(test_thread_flag(TIF_MEMDIE)
- || fatal_signal_pending(current)))
+ if (unlikely(test_thread_flag(TIF_MEMDIE) ||
+ fatal_signal_pending(current)))
goto bypass;
- /*
- * We always charge the cgroup the mm_struct belongs to.
- * The mm_struct's mem_cgroup changes on task migration if the
- * thread group leader migrates. It's possible that mm is not
- * set, if so charge the root memcg (happens for pagecache usage).
- */
- if (!*ptr && !mm)
- *ptr = root_mem_cgroup;
-again:
- if (*ptr) { /* css should be a valid one */
- memcg = *ptr;
- if (mem_cgroup_is_root(memcg))
- goto done;
- if (consume_stock(memcg, nr_pages))
- goto done;
- css_get(&memcg->css);
- } else {
- struct task_struct *p;
+ if (unlikely(task_in_memcg_oom(current)))
+ goto nomem;
- rcu_read_lock();
- p = rcu_dereference(mm->owner);
- /*
- * Because we don't have task_lock(), "p" can exit.
- * In that case, "memcg" can point to root or p can be NULL with
- * race with swapoff. Then, we have small risk of mis-accouning.
- * But such kind of mis-account by race always happens because
- * we don't have cgroup_mutex(). It's overkill and we allo that
- * small race, here.
- * (*) swapoff at el will charge against mm-struct not against
- * task-struct. So, mm->owner can be NULL.
- */
- memcg = mem_cgroup_from_task(p);
- if (!memcg)
- memcg = root_mem_cgroup;
- if (mem_cgroup_is_root(memcg)) {
- rcu_read_unlock();
- goto done;
- }
- if (consume_stock(memcg, nr_pages)) {
- /*
- * It seems dagerous to access memcg without css_get().
- * But considering how consume_stok works, it's not
- * necessary. If consume_stock success, some charges
- * from this memcg are cached on this cpu. So, we
- * don't need to call css_get()/css_tryget() before
- * calling consume_stock().
- */
- rcu_read_unlock();
- goto done;
- }
- /* after here, we may be blocked. we need to get refcnt */
- if (!css_tryget(&memcg->css)) {
- rcu_read_unlock();
- goto again;
- }
- rcu_read_unlock();
- }
+ if (gfp_mask & __GFP_NOFAIL)
+ oom = false;
+again:
+ if (consume_stock(memcg, nr_pages))
+ goto done;
do {
- bool oom_check;
+ bool invoke_oom = oom && !nr_oom_retries;
/* If killed, bypass charge */
- if (fatal_signal_pending(current)) {
- css_put(&memcg->css);
+ if (fatal_signal_pending(current))
goto bypass;
- }
- oom_check = false;
- if (oom && !nr_oom_retries) {
- oom_check = true;
- nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
- }
-
- ret = mem_cgroup_do_charge(memcg, gfp_mask, batch, nr_pages,
- oom_check);
+ ret = mem_cgroup_do_charge(memcg, gfp_mask, batch,
+ nr_pages, invoke_oom);
switch (ret) {
case CHARGE_OK:
break;
case CHARGE_RETRY: /* not in OOM situation but retry */
batch = nr_pages;
- css_put(&memcg->css);
- memcg = NULL;
goto again;
case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */
- css_put(&memcg->css);
goto nomem;
case CHARGE_NOMEM: /* OOM routine works */
- if (!oom) {
- css_put(&memcg->css);
+ if (!oom || invoke_oom)
goto nomem;
- }
- /* If oom, we never return -ENOMEM */
nr_oom_retries--;
break;
- case CHARGE_OOM_DIE: /* Killed by OOM Killer */
- css_put(&memcg->css);
- goto bypass;
}
} while (ret != CHARGE_OK);
if (batch > nr_pages)
refill_stock(memcg, batch - nr_pages);
- css_put(&memcg->css);
done:
- *ptr = memcg;
return 0;
nomem:
- *ptr = NULL;
- return -ENOMEM;
+ if (!(gfp_mask & __GFP_NOFAIL))
+ return -ENOMEM;
bypass:
- *ptr = root_mem_cgroup;
return -EINTR;
}
+/**
+ * mem_cgroup_try_charge_mm - try charging a mm
+ * @mm: mm_struct to charge
+ * @nr_pages: number of pages to charge
+ * @oom: trigger OOM if reclaim fails
+ *
+ * Returns the charged mem_cgroup associated with the given mm_struct or
+ * NULL the charge failed.
+ */
+static struct mem_cgroup *mem_cgroup_try_charge_mm(struct mm_struct *mm,
+ gfp_t gfp_mask,
+ unsigned int nr_pages,
+ bool oom)
+
+{
+ struct mem_cgroup *memcg;
+ int ret;
+
+ memcg = get_mem_cgroup_from_mm(mm);
+ ret = mem_cgroup_try_charge(memcg, gfp_mask, nr_pages, oom);
+ css_put(&memcg->css);
+ if (ret == -EINTR)
+ memcg = root_mem_cgroup;
+ else if (ret)
+ memcg = NULL;
+
+ return memcg;
+}
+
/*
* Somemtimes we have to undo a charge we got by try_charge().
* This function is for that and do uncharge, put css's refcnt.
@@ -2820,15 +2804,10 @@ static void __mem_cgroup_cancel_local_charge(struct mem_cgroup *memcg,
*/
static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
{
- struct cgroup_subsys_state *css;
-
/* ID 0 is unused ID */
if (!id)
return NULL;
- css = css_lookup(&mem_cgroup_subsys, id);
- if (!css)
- return NULL;
- return mem_cgroup_from_css(css);
+ return mem_cgroup_from_id(id);
}
struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
@@ -2838,7 +2817,7 @@ struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
unsigned short id;
swp_entry_t ent;
- VM_BUG_ON(!PageLocked(page));
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
pc = lookup_page_cgroup(page);
lock_page_cgroup(pc);
@@ -2872,7 +2851,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
bool anon;
lock_page_cgroup(pc);
- VM_BUG_ON(PageCgroupUsed(pc));
+ VM_BUG_ON_PAGE(PageCgroupUsed(pc), page);
/*
* we don't need page_cgroup_lock about tail pages, becase they are not
* accessed by any other context at this point.
@@ -2900,14 +2879,14 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
* is accessed after testing USED bit. To make pc->mem_cgroup visible
* before USED bit, we need memory barrier here.
* See mem_cgroup_add_lru_list(), etc.
- */
+ */
smp_wmb();
SetPageCgroupUsed(pc);
if (lrucare) {
if (was_on_lru) {
lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
- VM_BUG_ON(PageLRU(page));
+ VM_BUG_ON_PAGE(PageLRU(page), page);
SetPageLRU(page);
add_page_to_lru_list(page, lruvec, page_lru(page));
}
@@ -2933,10 +2912,12 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
static DEFINE_MUTEX(set_limit_mutex);
#ifdef CONFIG_MEMCG_KMEM
+static DEFINE_MUTEX(activate_kmem_mutex);
+
static inline bool memcg_can_account_kmem(struct mem_cgroup *memcg)
{
return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg) &&
- (memcg->kmem_account_flags & KMEM_ACCOUNTED_MASK);
+ memcg_kmem_is_active(memcg);
}
/*
@@ -2949,14 +2930,13 @@ static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p)
VM_BUG_ON(p->is_root_cache);
cachep = p->root_cache;
- return cachep->memcg_params->memcg_caches[memcg_cache_id(p->memcg)];
+ return cache_from_memcg_idx(cachep, memcg_cache_id(p->memcg));
}
#ifdef CONFIG_SLABINFO
-static int mem_cgroup_slabinfo_read(struct cgroup *cont, struct cftype *cft,
- struct seq_file *m)
+static int mem_cgroup_slabinfo_read(struct seq_file *m, void *v)
{
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+ struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
struct memcg_cache_params *params;
if (!memcg_can_account_kmem(memcg))
@@ -2976,27 +2956,17 @@ static int mem_cgroup_slabinfo_read(struct cgroup *cont, struct cftype *cft,
static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size)
{
struct res_counter *fail_res;
- struct mem_cgroup *_memcg;
int ret = 0;
- bool may_oom;
ret = res_counter_charge(&memcg->kmem, size, &fail_res);
if (ret)
return ret;
- /*
- * Conditions under which we can wait for the oom_killer. Those are
- * the same conditions tested by the core page allocator
- */
- may_oom = (gfp & __GFP_FS) && !(gfp & __GFP_NORETRY);
-
- _memcg = memcg;
- ret = __mem_cgroup_try_charge(NULL, gfp, size >> PAGE_SHIFT,
- &_memcg, may_oom);
-
+ ret = mem_cgroup_try_charge(memcg, gfp, size >> PAGE_SHIFT,
+ oom_gfp_allowed(gfp));
if (ret == -EINTR) {
/*
- * __mem_cgroup_try_charge() chosed to bypass to root due to
+ * mem_cgroup_try_charge() chosed to bypass to root due to
* OOM kill or fatal signal. Since our only options are to
* either fail the allocation or charge it to this cgroup, do
* it as a temporary condition. But we can't fail. From a
@@ -3006,7 +2976,7 @@ static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size)
*
* This condition will only trigger if the task entered
* memcg_charge_kmem in a sane state, but was OOM-killed during
- * __mem_cgroup_try_charge() above. Tasks that were already
+ * mem_cgroup_try_charge() above. Tasks that were already
* dying when the allocation triggers should have been already
* directed to the root cgroup in memcontrol.h
*/
@@ -3031,18 +3001,16 @@ static void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size)
if (res_counter_uncharge(&memcg->kmem, size))
return;
+ /*
+ * Releases a reference taken in kmem_cgroup_css_offline in case
+ * this last uncharge is racing with the offlining code or it is
+ * outliving the memcg existence.
+ *
+ * The memory barrier imposed by test&clear is paired with the
+ * explicit one in memcg_kmem_mark_dead().
+ */
if (memcg_kmem_test_and_clear_dead(memcg))
- mem_cgroup_put(memcg);
-}
-
-void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep)
-{
- if (!memcg)
- return;
-
- mutex_lock(&memcg->slab_caches_mutex);
- list_add(&cachep->memcg_params->list, &memcg->memcg_slab_caches);
- mutex_unlock(&memcg->slab_caches_mutex);
+ css_put(&memcg->css);
}
/*
@@ -3055,43 +3023,6 @@ int memcg_cache_id(struct mem_cgroup *memcg)
return memcg ? memcg->kmemcg_id : -1;
}
-/*
- * This ends up being protected by the set_limit mutex, during normal
- * operation, because that is its main call site.
- *
- * But when we create a new cache, we can call this as well if its parent
- * is kmem-limited. That will have to hold set_limit_mutex as well.
- */
-int memcg_update_cache_sizes(struct mem_cgroup *memcg)
-{
- int num, ret;
-
- num = ida_simple_get(&kmem_limited_groups,
- 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
- if (num < 0)
- return num;
- /*
- * After this point, kmem_accounted (that we test atomically in
- * the beginning of this conditional), is no longer 0. This
- * guarantees only one process will set the following boolean
- * to true. We don't need test_and_set because we're protected
- * by the set_limit_mutex anyway.
- */
- memcg_kmem_set_activated(memcg);
-
- ret = memcg_update_all_caches(num+1);
- if (ret) {
- ida_simple_remove(&kmem_limited_groups, num);
- memcg_kmem_clear_activated(memcg);
- return ret;
- }
-
- memcg->kmemcg_id = num;
- INIT_LIST_HEAD(&memcg->memcg_slab_caches);
- mutex_init(&memcg->slab_caches_mutex);
- return 0;
-}
-
static size_t memcg_caches_array_size(int num_groups)
{
ssize_t size;
@@ -3124,22 +3055,21 @@ int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
{
struct memcg_cache_params *cur_params = s->memcg_params;
- VM_BUG_ON(s->memcg_params && !s->memcg_params->is_root_cache);
+ VM_BUG_ON(!is_root_cache(s));
if (num_groups > memcg_limited_groups_array_size) {
int i;
+ struct memcg_cache_params *new_params;
ssize_t size = memcg_caches_array_size(num_groups);
size *= sizeof(void *);
- size += sizeof(struct memcg_cache_params);
+ size += offsetof(struct memcg_cache_params, memcg_caches);
- s->memcg_params = kzalloc(size, GFP_KERNEL);
- if (!s->memcg_params) {
- s->memcg_params = cur_params;
+ new_params = kzalloc(size, GFP_KERNEL);
+ if (!new_params)
return -ENOMEM;
- }
- s->memcg_params->is_root_cache = true;
+ new_params->is_root_cache = true;
/*
* There is the chance it will be bigger than
@@ -3153,7 +3083,7 @@ int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
for (i = 0; i < memcg_limited_groups_array_size; i++) {
if (!cur_params->memcg_caches[i])
continue;
- s->memcg_params->memcg_caches[i] =
+ new_params->memcg_caches[i] =
cur_params->memcg_caches[i];
}
@@ -3166,66 +3096,144 @@ int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
* bigger than the others. And all updates will reset this
* anyway.
*/
- kfree(cur_params);
+ rcu_assign_pointer(s->memcg_params, new_params);
+ if (cur_params)
+ kfree_rcu(cur_params, rcu_head);
}
return 0;
}
-int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
- struct kmem_cache *root_cache)
+char *memcg_create_cache_name(struct mem_cgroup *memcg,
+ struct kmem_cache *root_cache)
{
- size_t size = sizeof(struct memcg_cache_params);
+ static char *buf = NULL;
+
+ /*
+ * We need a mutex here to protect the shared buffer. Since this is
+ * expected to be called only on cache creation, we can employ the
+ * slab_mutex for that purpose.
+ */
+ lockdep_assert_held(&slab_mutex);
+
+ if (!buf) {
+ buf = kmalloc(NAME_MAX + 1, GFP_KERNEL);
+ if (!buf)
+ return NULL;
+ }
+
+ cgroup_name(memcg->css.cgroup, buf, NAME_MAX + 1);
+ return kasprintf(GFP_KERNEL, "%s(%d:%s)", root_cache->name,
+ memcg_cache_id(memcg), buf);
+}
+
+int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s,
+ struct kmem_cache *root_cache)
+{
+ size_t size;
if (!memcg_kmem_enabled())
return 0;
- if (!memcg)
+ if (!memcg) {
+ size = offsetof(struct memcg_cache_params, memcg_caches);
size += memcg_limited_groups_array_size * sizeof(void *);
+ } else
+ size = sizeof(struct memcg_cache_params);
s->memcg_params = kzalloc(size, GFP_KERNEL);
if (!s->memcg_params)
return -ENOMEM;
- INIT_WORK(&s->memcg_params->destroy,
- kmem_cache_destroy_work_func);
if (memcg) {
s->memcg_params->memcg = memcg;
s->memcg_params->root_cache = root_cache;
+ INIT_WORK(&s->memcg_params->destroy,
+ kmem_cache_destroy_work_func);
+ css_get(&memcg->css);
} else
s->memcg_params->is_root_cache = true;
return 0;
}
-void memcg_release_cache(struct kmem_cache *s)
+void memcg_free_cache_params(struct kmem_cache *s)
+{
+ if (!s->memcg_params)
+ return;
+ if (!s->memcg_params->is_root_cache)
+ css_put(&s->memcg_params->memcg->css);
+ kfree(s->memcg_params);
+}
+
+void memcg_register_cache(struct kmem_cache *s)
{
struct kmem_cache *root;
struct mem_cgroup *memcg;
int id;
- /*
- * This happens, for instance, when a root cache goes away before we
- * add any memcg.
- */
- if (!s->memcg_params)
+ if (is_root_cache(s))
return;
- if (s->memcg_params->is_root_cache)
- goto out;
+ /*
+ * Holding the slab_mutex assures nobody will touch the memcg_caches
+ * array while we are modifying it.
+ */
+ lockdep_assert_held(&slab_mutex);
+ root = s->memcg_params->root_cache;
memcg = s->memcg_params->memcg;
- id = memcg_cache_id(memcg);
+ id = memcg_cache_id(memcg);
+
+ /*
+ * Since readers won't lock (see cache_from_memcg_idx()), we need a
+ * barrier here to ensure nobody will see the kmem_cache partially
+ * initialized.
+ */
+ smp_wmb();
+
+ /*
+ * Initialize the pointer to this cache in its parent's memcg_params
+ * before adding it to the memcg_slab_caches list, otherwise we can
+ * fail to convert memcg_params_to_cache() while traversing the list.
+ */
+ VM_BUG_ON(root->memcg_params->memcg_caches[id]);
+ root->memcg_params->memcg_caches[id] = s;
+
+ mutex_lock(&memcg->slab_caches_mutex);
+ list_add(&s->memcg_params->list, &memcg->memcg_slab_caches);
+ mutex_unlock(&memcg->slab_caches_mutex);
+}
+
+void memcg_unregister_cache(struct kmem_cache *s)
+{
+ struct kmem_cache *root;
+ struct mem_cgroup *memcg;
+ int id;
+
+ if (is_root_cache(s))
+ return;
+
+ /*
+ * Holding the slab_mutex assures nobody will touch the memcg_caches
+ * array while we are modifying it.
+ */
+ lockdep_assert_held(&slab_mutex);
root = s->memcg_params->root_cache;
- root->memcg_params->memcg_caches[id] = NULL;
+ memcg = s->memcg_params->memcg;
+ id = memcg_cache_id(memcg);
mutex_lock(&memcg->slab_caches_mutex);
list_del(&s->memcg_params->list);
mutex_unlock(&memcg->slab_caches_mutex);
- mem_cgroup_put(memcg);
-out:
- kfree(s->memcg_params);
+ /*
+ * Clear the pointer to this cache in its parent's memcg_params only
+ * after removing it from the memcg_slab_caches list, otherwise we can
+ * fail to convert memcg_params_to_cache() while traversing the list.
+ */
+ VM_BUG_ON(root->memcg_params->memcg_caches[id] != s);
+ root->memcg_params->memcg_caches[id] = NULL;
}
/*
@@ -3284,11 +3292,9 @@ static void kmem_cache_destroy_work_func(struct work_struct *w)
* So if we aren't down to zero, we'll just schedule a worker and try
* again
*/
- if (atomic_read(&cachep->memcg_params->nr_pages) != 0) {
+ if (atomic_read(&cachep->memcg_params->nr_pages) != 0)
kmem_cache_shrink(cachep);
- if (atomic_read(&cachep->memcg_params->nr_pages) == 0)
- return;
- } else
+ else
kmem_cache_destroy(cachep);
}
@@ -3324,97 +3330,10 @@ void mem_cgroup_destroy_cache(struct kmem_cache *cachep)
schedule_work(&cachep->memcg_params->destroy);
}
-/*
- * This lock protects updaters, not readers. We want readers to be as fast as
- * they can, and they will either see NULL or a valid cache value. Our model
- * allow them to see NULL, in which case the root memcg will be selected.
- *
- * We need this lock because multiple allocations to the same cache from a non
- * will span more than one worker. Only one of them can create the cache.
- */
-static DEFINE_MUTEX(memcg_cache_mutex);
-
-/*
- * Called with memcg_cache_mutex held
- */
-static struct kmem_cache *kmem_cache_dup(struct mem_cgroup *memcg,
- struct kmem_cache *s)
-{
- struct kmem_cache *new;
- static char *tmp_name = NULL;
-
- lockdep_assert_held(&memcg_cache_mutex);
-
- /*
- * kmem_cache_create_memcg duplicates the given name and
- * cgroup_name for this name requires RCU context.
- * This static temporary buffer is used to prevent from
- * pointless shortliving allocation.
- */
- if (!tmp_name) {
- tmp_name = kmalloc(PATH_MAX, GFP_KERNEL);
- if (!tmp_name)
- return NULL;
- }
-
- rcu_read_lock();
- snprintf(tmp_name, PATH_MAX, "%s(%d:%s)", s->name,
- memcg_cache_id(memcg), cgroup_name(memcg->css.cgroup));
- rcu_read_unlock();
-
- new = kmem_cache_create_memcg(memcg, tmp_name, s->object_size, s->align,
- (s->flags & ~SLAB_PANIC), s->ctor, s);
-
- if (new)
- new->allocflags |= __GFP_KMEMCG;
-
- return new;
-}
-
-static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,
- struct kmem_cache *cachep)
-{
- struct kmem_cache *new_cachep;
- int idx;
-
- BUG_ON(!memcg_can_account_kmem(memcg));
-
- idx = memcg_cache_id(memcg);
-
- mutex_lock(&memcg_cache_mutex);
- new_cachep = cachep->memcg_params->memcg_caches[idx];
- if (new_cachep)
- goto out;
-
- new_cachep = kmem_cache_dup(memcg, cachep);
- if (new_cachep == NULL) {
- new_cachep = cachep;
- goto out;
- }
-
- mem_cgroup_get(memcg);
- atomic_set(&new_cachep->memcg_params->nr_pages , 0);
-
- cachep->memcg_params->memcg_caches[idx] = new_cachep;
- /*
- * the readers won't lock, make sure everybody sees the updated value,
- * so they won't put stuff in the queue again for no reason
- */
- wmb();
-out:
- mutex_unlock(&memcg_cache_mutex);
- return new_cachep;
-}
-
-void kmem_cache_destroy_memcg_children(struct kmem_cache *s)
+int __kmem_cache_destroy_memcg_children(struct kmem_cache *s)
{
struct kmem_cache *c;
- int i;
-
- if (!s->memcg_params)
- return;
- if (!s->memcg_params->is_root_cache)
- return;
+ int i, failed = 0;
/*
* If the cache is being destroyed, we trust that there is no one else
@@ -3423,11 +3342,12 @@ void kmem_cache_destroy_memcg_children(struct kmem_cache *s)
*
* Still, we don't want anyone else freeing memcg_caches under our
* noses, which can happen if a new memcg comes to life. As usual,
- * we'll take the set_limit_mutex to protect ourselves against this.
+ * we'll take the activate_kmem_mutex to protect ourselves against
+ * this.
*/
- mutex_lock(&set_limit_mutex);
- for (i = 0; i < memcg_limited_groups_array_size; i++) {
- c = s->memcg_params->memcg_caches[i];
+ mutex_lock(&activate_kmem_mutex);
+ for_each_memcg_cache_index(i) {
+ c = cache_from_memcg_idx(s, i);
if (!c)
continue;
@@ -3447,16 +3367,14 @@ void kmem_cache_destroy_memcg_children(struct kmem_cache *s)
c->memcg_params->dead = false;
cancel_work_sync(&c->memcg_params->destroy);
kmem_cache_destroy(c);
+
+ if (cache_from_memcg_idx(s, i))
+ failed++;
}
- mutex_unlock(&set_limit_mutex);
+ mutex_unlock(&activate_kmem_mutex);
+ return failed;
}
-struct create_work {
- struct mem_cgroup *memcg;
- struct kmem_cache *cachep;
- struct work_struct work;
-};
-
static void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg)
{
struct kmem_cache *cachep;
@@ -3474,14 +3392,20 @@ static void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg)
mutex_unlock(&memcg->slab_caches_mutex);
}
+struct create_work {
+ struct mem_cgroup *memcg;
+ struct kmem_cache *cachep;
+ struct work_struct work;
+};
+
static void memcg_create_cache_work_func(struct work_struct *w)
{
- struct create_work *cw;
+ struct create_work *cw = container_of(w, struct create_work, work);
+ struct mem_cgroup *memcg = cw->memcg;
+ struct kmem_cache *cachep = cw->cachep;
- cw = container_of(w, struct create_work, work);
- memcg_create_kmem_cache(cw->memcg, cw->cachep);
- /* Drop the reference gotten when we enqueued. */
- css_put(&cw->memcg->css);
+ kmem_cache_create_memcg(memcg, cachep);
+ css_put(&memcg->css);
kfree(cw);
}
@@ -3541,7 +3465,7 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep,
gfp_t gfp)
{
struct mem_cgroup *memcg;
- int idx;
+ struct kmem_cache *memcg_cachep;
VM_BUG_ON(!cachep->memcg_params);
VM_BUG_ON(!cachep->memcg_params->is_root_cache);
@@ -3555,15 +3479,9 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep,
if (!memcg_can_account_kmem(memcg))
goto out;
- idx = memcg_cache_id(memcg);
-
- /*
- * barrier to mare sure we're always seeing the up to date value. The
- * code updating memcg_caches will issue a write barrier to match this.
- */
- read_barrier_depends();
- if (likely(cachep->memcg_params->memcg_caches[idx])) {
- cachep = cachep->memcg_params->memcg_caches[idx];
+ memcg_cachep = cache_from_memcg_idx(cachep, memcg_cache_id(memcg));
+ if (likely(memcg_cachep)) {
+ cachep = memcg_cachep;
goto out;
}
@@ -3618,16 +3536,36 @@ __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **_memcg, int order)
int ret;
*_memcg = NULL;
- memcg = try_get_mem_cgroup_from_mm(current->mm);
/*
- * very rare case described in mem_cgroup_from_task. Unfortunately there
- * isn't much we can do without complicating this too much, and it would
- * be gfp-dependent anyway. Just let it go
+ * Disabling accounting is only relevant for some specific memcg
+ * internal allocations. Therefore we would initially not have such
+ * check here, since direct calls to the page allocator that are marked
+ * with GFP_KMEMCG only happen outside memcg core. We are mostly
+ * concerned with cache allocations, and by having this test at
+ * memcg_kmem_get_cache, we are already able to relay the allocation to
+ * the root cache and bypass the memcg cache altogether.
+ *
+ * There is one exception, though: the SLUB allocator does not create
+ * large order caches, but rather service large kmallocs directly from
+ * the page allocator. Therefore, the following sequence when backed by
+ * the SLUB allocator:
+ *
+ * memcg_stop_kmem_account();
+ * kmalloc(<large_number>)
+ * memcg_resume_kmem_account();
+ *
+ * would effectively ignore the fact that we should skip accounting,
+ * since it will drive us directly to this function without passing
+ * through the cache selector memcg_kmem_get_cache. Such large
+ * allocations are extremely rare but can happen, for instance, for the
+ * cache arrays. We bring this test here.
*/
- if (unlikely(!memcg))
+ if (!current->mm || current->memcg_kmem_skip_account)
return true;
+ memcg = get_mem_cgroup_from_mm(current->mm);
+
if (!memcg_can_account_kmem(memcg)) {
css_put(&memcg->css);
return true;
@@ -3689,7 +3627,7 @@ void __memcg_kmem_uncharge_pages(struct page *page, int order)
if (!memcg)
return;
- VM_BUG_ON(mem_cgroup_is_root(memcg));
+ VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
memcg_uncharge_kmem(memcg, PAGE_SIZE << order);
}
#else
@@ -3755,7 +3693,7 @@ static int mem_cgroup_move_account(struct page *page,
bool anon = PageAnon(page);
VM_BUG_ON(from == to);
- VM_BUG_ON(PageLRU(page));
+ VM_BUG_ON_PAGE(PageLRU(page), page);
/*
* The page is isolated from LRU. So, collapse function
* will not handle this page. But page splitting can happen.
@@ -3775,12 +3713,19 @@ static int mem_cgroup_move_account(struct page *page,
move_lock_mem_cgroup(from, &flags);
if (!anon && page_mapped(page)) {
- /* Update mapped_file data for mem_cgroup */
- preempt_disable();
- __this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
- __this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
- preempt_enable();
+ __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
+ nr_pages);
+ __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
+ nr_pages);
}
+
+ if (PageWriteback(page)) {
+ __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK],
+ nr_pages);
+ __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK],
+ nr_pages);
+ }
+
mem_cgroup_charge_statistics(from, page, anon, -nr_pages);
/* caller should have done css_get */
@@ -3847,7 +3792,7 @@ static int mem_cgroup_move_parent(struct page *page,
parent = root_mem_cgroup;
if (nr_pages > 1) {
- VM_BUG_ON(!PageTransHuge(page));
+ VM_BUG_ON_PAGE(!PageTransHuge(page), page);
flags = compound_lock_irqsave(page);
}
@@ -3865,23 +3810,23 @@ out:
return ret;
}
-/*
- * Charge the memory controller for page usage.
- * Return
- * 0 if the charge was successful
- * < 0 if the cgroup is over its limit
- */
-static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
- gfp_t gfp_mask, enum charge_type ctype)
+int mem_cgroup_charge_anon(struct page *page,
+ struct mm_struct *mm, gfp_t gfp_mask)
{
- struct mem_cgroup *memcg = NULL;
unsigned int nr_pages = 1;
+ struct mem_cgroup *memcg;
bool oom = true;
- int ret;
+
+ if (mem_cgroup_disabled())
+ return 0;
+
+ VM_BUG_ON_PAGE(page_mapped(page), page);
+ VM_BUG_ON_PAGE(page->mapping && !PageAnon(page), page);
+ VM_BUG_ON(!mm);
if (PageTransHuge(page)) {
nr_pages <<= compound_order(page);
- VM_BUG_ON(!PageTransHuge(page));
+ VM_BUG_ON_PAGE(!PageTransHuge(page), page);
/*
* Never OOM-kill a process for a huge page. The
* fault handler will fall back to regular pages.
@@ -3889,25 +3834,14 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
oom = false;
}
- ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom);
- if (ret == -ENOMEM)
- return ret;
- __mem_cgroup_commit_charge(memcg, page, nr_pages, ctype, false);
+ memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, nr_pages, oom);
+ if (!memcg)
+ return -ENOMEM;
+ __mem_cgroup_commit_charge(memcg, page, nr_pages,
+ MEM_CGROUP_CHARGE_TYPE_ANON, false);
return 0;
}
-int mem_cgroup_newpage_charge(struct page *page,
- struct mm_struct *mm, gfp_t gfp_mask)
-{
- if (mem_cgroup_disabled())
- return 0;
- VM_BUG_ON(page_mapped(page));
- VM_BUG_ON(page->mapping && !PageAnon(page));
- VM_BUG_ON(!mm);
- return mem_cgroup_charge_common(page, mm, gfp_mask,
- MEM_CGROUP_CHARGE_TYPE_ANON);
-}
-
/*
* While swap-in, try_charge -> commit or cancel, the page is locked.
* And when try_charge() successfully returns, one refcnt to memcg without
@@ -3919,7 +3853,7 @@ static int __mem_cgroup_try_charge_swapin(struct mm_struct *mm,
gfp_t mask,
struct mem_cgroup **memcgp)
{
- struct mem_cgroup *memcg;
+ struct mem_cgroup *memcg = NULL;
struct page_cgroup *pc;
int ret;
@@ -3932,31 +3866,29 @@ static int __mem_cgroup_try_charge_swapin(struct mm_struct *mm,
* in turn serializes uncharging.
*/
if (PageCgroupUsed(pc))
- return 0;
- if (!do_swap_account)
- goto charge_cur_mm;
- memcg = try_get_mem_cgroup_from_page(page);
+ goto out;
+ if (do_swap_account)
+ memcg = try_get_mem_cgroup_from_page(page);
if (!memcg)
- goto charge_cur_mm;
- *memcgp = memcg;
- ret = __mem_cgroup_try_charge(NULL, mask, 1, memcgp, true);
+ memcg = get_mem_cgroup_from_mm(mm);
+ ret = mem_cgroup_try_charge(memcg, mask, 1, true);
css_put(&memcg->css);
if (ret == -EINTR)
- ret = 0;
- return ret;
-charge_cur_mm:
- ret = __mem_cgroup_try_charge(mm, mask, 1, memcgp, true);
- if (ret == -EINTR)
- ret = 0;
- return ret;
+ memcg = root_mem_cgroup;
+ else if (ret)
+ return ret;
+out:
+ *memcgp = memcg;
+ return 0;
}
int mem_cgroup_try_charge_swapin(struct mm_struct *mm, struct page *page,
gfp_t gfp_mask, struct mem_cgroup **memcgp)
{
- *memcgp = NULL;
- if (mem_cgroup_disabled())
+ if (mem_cgroup_disabled()) {
+ *memcgp = NULL;
return 0;
+ }
/*
* A racing thread's fault, or swapoff, may have already
* updated the pte, and even removed page from swap cache: in
@@ -3964,12 +3896,13 @@ int mem_cgroup_try_charge_swapin(struct mm_struct *mm, struct page *page,
* there's also a KSM case which does need to charge the page.
*/
if (!PageSwapCache(page)) {
- int ret;
+ struct mem_cgroup *memcg;
- ret = __mem_cgroup_try_charge(mm, gfp_mask, 1, memcgp, true);
- if (ret == -EINTR)
- ret = 0;
- return ret;
+ memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, 1, true);
+ if (!memcg)
+ return -ENOMEM;
+ *memcgp = memcg;
+ return 0;
}
return __mem_cgroup_try_charge_swapin(mm, page, gfp_mask, memcgp);
}
@@ -4013,11 +3946,11 @@ void mem_cgroup_commit_charge_swapin(struct page *page,
MEM_CGROUP_CHARGE_TYPE_ANON);
}
-int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
+int mem_cgroup_charge_file(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask)
{
- struct mem_cgroup *memcg = NULL;
enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
+ struct mem_cgroup *memcg;
int ret;
if (mem_cgroup_disabled())
@@ -4025,15 +3958,20 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
if (PageCompound(page))
return 0;
- if (!PageSwapCache(page))
- ret = mem_cgroup_charge_common(page, mm, gfp_mask, type);
- else { /* page is swapcache/shmem */
+ if (PageSwapCache(page)) { /* shmem */
ret = __mem_cgroup_try_charge_swapin(mm, page,
gfp_mask, &memcg);
- if (!ret)
- __mem_cgroup_commit_charge_swapin(page, memcg, type);
+ if (ret)
+ return ret;
+ __mem_cgroup_commit_charge_swapin(page, memcg, type);
+ return 0;
}
- return ret;
+
+ memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, 1, true);
+ if (!memcg)
+ return -ENOMEM;
+ __mem_cgroup_commit_charge(memcg, page, 1, type, false);
+ return 0;
}
static void mem_cgroup_do_uncharge(struct mem_cgroup *memcg,
@@ -4106,7 +4044,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype,
if (PageTransHuge(page)) {
nr_pages <<= compound_order(page);
- VM_BUG_ON(!PageTransHuge(page));
+ VM_BUG_ON_PAGE(!PageTransHuge(page), page);
}
/*
* Check if our page_cgroup is valid
@@ -4171,12 +4109,12 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype,
unlock_page_cgroup(pc);
/*
* even after unlock, we have memcg->res.usage here and this memcg
- * will never be freed.
+ * will never be freed, so it's safe to call css_get().
*/
memcg_check_events(memcg, page);
if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) {
mem_cgroup_swap_statistics(memcg, true);
- mem_cgroup_get(memcg);
+ css_get(&memcg->css);
}
/*
* Migration does not charge the res_counter for the
@@ -4198,7 +4136,7 @@ void mem_cgroup_uncharge_page(struct page *page)
/* early check. */
if (page_mapped(page))
return;
- VM_BUG_ON(page->mapping && !PageAnon(page));
+ VM_BUG_ON_PAGE(page->mapping && !PageAnon(page), page);
/*
* If the page is in swap cache, uncharge should be deferred
* to the swap path, which also properly accounts swap usage
@@ -4218,8 +4156,8 @@ void mem_cgroup_uncharge_page(struct page *page)
void mem_cgroup_uncharge_cache_page(struct page *page)
{
- VM_BUG_ON(page_mapped(page));
- VM_BUG_ON(page->mapping);
+ VM_BUG_ON_PAGE(page_mapped(page), page);
+ VM_BUG_ON_PAGE(page->mapping, page);
__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE, false);
}
@@ -4288,10 +4226,10 @@ mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
/*
* record memcg information, if swapout && memcg != NULL,
- * mem_cgroup_get() was called in uncharge().
+ * css_get() was called in uncharge().
*/
if (do_swap_account && swapout && memcg)
- swap_cgroup_record(ent, css_id(&memcg->css));
+ swap_cgroup_record(ent, mem_cgroup_id(memcg));
}
#endif
@@ -4319,7 +4257,7 @@ void mem_cgroup_uncharge_swap(swp_entry_t ent)
if (!mem_cgroup_is_root(memcg))
res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
mem_cgroup_swap_statistics(memcg, false);
- mem_cgroup_put(memcg);
+ css_put(&memcg->css);
}
rcu_read_unlock();
}
@@ -4343,8 +4281,8 @@ static int mem_cgroup_move_swap_account(swp_entry_t entry,
{
unsigned short old_id, new_id;
- old_id = css_id(&from->css);
- new_id = css_id(&to->css);
+ old_id = mem_cgroup_id(from);
+ new_id = mem_cgroup_id(to);
if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
mem_cgroup_swap_statistics(from, false);
@@ -4353,11 +4291,14 @@ static int mem_cgroup_move_swap_account(swp_entry_t entry,
* This function is only called from task migration context now.
* It postpones res_counter and refcount handling till the end
* of task migration(mem_cgroup_clear_mc()) for performance
- * improvement. But we cannot postpone mem_cgroup_get(to)
- * because if the process that has been moved to @to does
- * swap-in, the refcount of @to might be decreased to 0.
+ * improvement. But we cannot postpone css_get(to) because if
+ * the process that has been moved to @to does swap-in, the
+ * refcount of @to might be decreased to 0.
+ *
+ * We are in attach() phase, so the cgroup is guaranteed to be
+ * alive, so we can just call css_get().
*/
- mem_cgroup_get(to);
+ css_get(&to->css);
return 0;
}
return -EINVAL;
@@ -4633,7 +4574,7 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
MEM_CGROUP_RECLAIM_SHRINK);
curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
/* Usage is reduced ? */
- if (curusage >= oldusage)
+ if (curusage >= oldusage)
retry_count--;
else
oldusage = curusage;
@@ -4654,7 +4595,7 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
int enlarge = 0;
/* see mem_cgroup_resize_res_limit */
- retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
+ retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
while (retry_count) {
if (signal_pending(current)) {
@@ -4896,31 +4837,18 @@ static void mem_cgroup_reparent_charges(struct mem_cgroup *memcg)
} while (usage > 0);
}
-/*
- * This mainly exists for tests during the setting of set of use_hierarchy.
- * Since this is the very setting we are changing, the current hierarchy value
- * is meaningless
- */
-static inline bool __memcg_has_children(struct mem_cgroup *memcg)
-{
- struct cgroup *pos;
-
- /* bounce at first found */
- cgroup_for_each_child(pos, memcg->css.cgroup)
- return true;
- return false;
-}
-
-/*
- * Must be called with memcg_create_mutex held, unless the cgroup is guaranteed
- * to be already dead (as in mem_cgroup_force_empty, for instance). This is
- * from mem_cgroup_count_children(), in the sense that we don't really care how
- * many children we have; we only need to know if we have any. It also counts
- * any memcg without hierarchy as infertile.
- */
static inline bool memcg_has_children(struct mem_cgroup *memcg)
{
- return memcg->use_hierarchy && __memcg_has_children(memcg);
+ lockdep_assert_held(&memcg_create_mutex);
+ /*
+ * The lock does not prevent addition or deletion to the list
+ * of children, but it prevents a new child from being
+ * initialized based on this parent in css_online(), so it's
+ * enough to decide whether hierarchically inherited
+ * attributes can still be changed or not.
+ */
+ return memcg->use_hierarchy &&
+ !list_empty(&memcg->css.cgroup->children);
}
/*
@@ -4935,7 +4863,7 @@ static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
struct cgroup *cgrp = memcg->css.cgroup;
/* returns EBUSY if there is a task or if we come here twice. */
- if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
+ if (cgroup_has_tasks(cgrp) || !list_empty(&cgrp->children))
return -EBUSY;
/* we call try-to-free pages for make this cgroup empty */
@@ -4962,36 +4890,28 @@ static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
return 0;
}
-static int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
+static int mem_cgroup_force_empty_write(struct cgroup_subsys_state *css,
+ unsigned int event)
{
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
- int ret;
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
if (mem_cgroup_is_root(memcg))
return -EINVAL;
- css_get(&memcg->css);
- ret = mem_cgroup_force_empty(memcg);
- css_put(&memcg->css);
-
- return ret;
+ return mem_cgroup_force_empty(memcg);
}
-
-static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
+static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
+ struct cftype *cft)
{
- return mem_cgroup_from_cont(cont)->use_hierarchy;
+ return mem_cgroup_from_css(css)->use_hierarchy;
}
-static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
- u64 val)
+static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
+ struct cftype *cft, u64 val)
{
int retval = 0;
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
- struct cgroup *parent = cont->parent;
- struct mem_cgroup *parent_memcg = NULL;
-
- if (parent)
- parent_memcg = mem_cgroup_from_cont(parent);
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
+ struct mem_cgroup *parent_memcg = mem_cgroup_from_css(css_parent(&memcg->css));
mutex_lock(&memcg_create_mutex);
@@ -5008,7 +4928,7 @@ static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
*/
if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
(val == 1 || val == 0)) {
- if (!__memcg_has_children(memcg))
+ if (list_empty(&memcg->css.cgroup->children))
memcg->use_hierarchy = val;
else
retval = -EBUSY;
@@ -5061,14 +4981,12 @@ static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
return val << PAGE_SHIFT;
}
-static ssize_t mem_cgroup_read(struct cgroup *cont, struct cftype *cft,
- struct file *file, char __user *buf,
- size_t nbytes, loff_t *ppos)
+static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
+ struct cftype *cft)
{
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
- char str[64];
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
u64 val;
- int name, len;
+ int name;
enum res_type type;
type = MEMFILE_TYPE(cft->private);
@@ -5094,15 +5012,26 @@ static ssize_t mem_cgroup_read(struct cgroup *cont, struct cftype *cft,
BUG();
}
- len = scnprintf(str, sizeof(str), "%llu\n", (unsigned long long)val);
- return simple_read_from_buffer(buf, nbytes, ppos, str, len);
+ return val;
}
-static int memcg_update_kmem_limit(struct cgroup *cont, u64 val)
-{
- int ret = -EINVAL;
#ifdef CONFIG_MEMCG_KMEM
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+/* should be called with activate_kmem_mutex held */
+static int __memcg_activate_kmem(struct mem_cgroup *memcg,
+ unsigned long long limit)
+{
+ int err = 0;
+ int memcg_id;
+
+ if (memcg_kmem_is_active(memcg))
+ return 0;
+
+ /*
+ * We are going to allocate memory for data shared by all memory
+ * cgroups so let's stop accounting here.
+ */
+ memcg_stop_kmem_account();
+
/*
* For simplicity, we won't allow this to be disabled. It also can't
* be changed if the cgroup has children already, or if tasks had
@@ -5116,90 +5045,111 @@ static int memcg_update_kmem_limit(struct cgroup *cont, u64 val)
* of course permitted.
*/
mutex_lock(&memcg_create_mutex);
- mutex_lock(&set_limit_mutex);
- if (!memcg->kmem_account_flags && val != RESOURCE_MAX) {
- if (cgroup_task_count(cont) || memcg_has_children(memcg)) {
- ret = -EBUSY;
- goto out;
- }
- ret = res_counter_set_limit(&memcg->kmem, val);
- VM_BUG_ON(ret);
+ if (cgroup_has_tasks(memcg->css.cgroup) || memcg_has_children(memcg))
+ err = -EBUSY;
+ mutex_unlock(&memcg_create_mutex);
+ if (err)
+ goto out;
- ret = memcg_update_cache_sizes(memcg);
- if (ret) {
- res_counter_set_limit(&memcg->kmem, RESOURCE_MAX);
- goto out;
- }
- static_key_slow_inc(&memcg_kmem_enabled_key);
- /*
- * setting the active bit after the inc will guarantee no one
- * starts accounting before all call sites are patched
- */
- memcg_kmem_set_active(memcg);
+ memcg_id = ida_simple_get(&kmem_limited_groups,
+ 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
+ if (memcg_id < 0) {
+ err = memcg_id;
+ goto out;
+ }
- /*
- * kmem charges can outlive the cgroup. In the case of slab
- * pages, for instance, a page contain objects from various
- * processes, so it is unfeasible to migrate them away. We
- * need to reference count the memcg because of that.
- */
- mem_cgroup_get(memcg);
- } else
- ret = res_counter_set_limit(&memcg->kmem, val);
+ /*
+ * Make sure we have enough space for this cgroup in each root cache's
+ * memcg_params.
+ */
+ err = memcg_update_all_caches(memcg_id + 1);
+ if (err)
+ goto out_rmid;
+
+ memcg->kmemcg_id = memcg_id;
+ INIT_LIST_HEAD(&memcg->memcg_slab_caches);
+ mutex_init(&memcg->slab_caches_mutex);
+
+ /*
+ * We couldn't have accounted to this cgroup, because it hasn't got the
+ * active bit set yet, so this should succeed.
+ */
+ err = res_counter_set_limit(&memcg->kmem, limit);
+ VM_BUG_ON(err);
+
+ static_key_slow_inc(&memcg_kmem_enabled_key);
+ /*
+ * Setting the active bit after enabling static branching will
+ * guarantee no one starts accounting before all call sites are
+ * patched.
+ */
+ memcg_kmem_set_active(memcg);
out:
- mutex_unlock(&set_limit_mutex);
- mutex_unlock(&memcg_create_mutex);
-#endif
+ memcg_resume_kmem_account();
+ return err;
+
+out_rmid:
+ ida_simple_remove(&kmem_limited_groups, memcg_id);
+ goto out;
+}
+
+static int memcg_activate_kmem(struct mem_cgroup *memcg,
+ unsigned long long limit)
+{
+ int ret;
+
+ mutex_lock(&activate_kmem_mutex);
+ ret = __memcg_activate_kmem(memcg, limit);
+ mutex_unlock(&activate_kmem_mutex);
+ return ret;
+}
+
+static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
+ unsigned long long val)
+{
+ int ret;
+
+ if (!memcg_kmem_is_active(memcg))
+ ret = memcg_activate_kmem(memcg, val);
+ else
+ ret = res_counter_set_limit(&memcg->kmem, val);
return ret;
}
-#ifdef CONFIG_MEMCG_KMEM
static int memcg_propagate_kmem(struct mem_cgroup *memcg)
{
int ret = 0;
struct mem_cgroup *parent = parent_mem_cgroup(memcg);
- if (!parent)
- goto out;
- memcg->kmem_account_flags = parent->kmem_account_flags;
- /*
- * When that happen, we need to disable the static branch only on those
- * memcgs that enabled it. To achieve this, we would be forced to
- * complicate the code by keeping track of which memcgs were the ones
- * that actually enabled limits, and which ones got it from its
- * parents.
- *
- * It is a lot simpler just to do static_key_slow_inc() on every child
- * that is accounted.
- */
- if (!memcg_kmem_is_active(memcg))
- goto out;
+ if (!parent)
+ return 0;
+ mutex_lock(&activate_kmem_mutex);
/*
- * destroy(), called if we fail, will issue static_key_slow_inc() and
- * mem_cgroup_put() if kmem is enabled. We have to either call them
- * unconditionally, or clear the KMEM_ACTIVE flag. I personally find
- * this more consistent, since it always leads to the same destroy path
+ * If the parent cgroup is not kmem-active now, it cannot be activated
+ * after this point, because it has at least one child already.
*/
- mem_cgroup_get(memcg);
- static_key_slow_inc(&memcg_kmem_enabled_key);
-
- mutex_lock(&set_limit_mutex);
- ret = memcg_update_cache_sizes(memcg);
- mutex_unlock(&set_limit_mutex);
-out:
+ if (memcg_kmem_is_active(parent))
+ ret = __memcg_activate_kmem(memcg, RES_COUNTER_MAX);
+ mutex_unlock(&activate_kmem_mutex);
return ret;
}
+#else
+static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
+ unsigned long long val)
+{
+ return -EINVAL;
+}
#endif /* CONFIG_MEMCG_KMEM */
/*
* The user of this function is...
* RES_LIMIT.
*/
-static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
- const char *buffer)
+static int mem_cgroup_write(struct cgroup_subsys_state *css, struct cftype *cft,
+ char *buffer)
{
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
enum res_type type;
int name;
unsigned long long val;
@@ -5223,7 +5173,7 @@ static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
else if (type == _MEMSWAP)
ret = mem_cgroup_resize_memsw_limit(memcg, val);
else if (type == _KMEM)
- ret = memcg_update_kmem_limit(cont, val);
+ ret = memcg_update_kmem_limit(memcg, val);
else
return -EINVAL;
break;
@@ -5251,18 +5201,15 @@ static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
unsigned long long *mem_limit, unsigned long long *memsw_limit)
{
- struct cgroup *cgroup;
unsigned long long min_limit, min_memsw_limit, tmp;
min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
- cgroup = memcg->css.cgroup;
if (!memcg->use_hierarchy)
goto out;
- while (cgroup->parent) {
- cgroup = cgroup->parent;
- memcg = mem_cgroup_from_cont(cgroup);
+ while (css_parent(&memcg->css)) {
+ memcg = mem_cgroup_from_css(css_parent(&memcg->css));
if (!memcg->use_hierarchy)
break;
tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
@@ -5275,9 +5222,9 @@ out:
*memsw_limit = min_memsw_limit;
}
-static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
+static int mem_cgroup_reset(struct cgroup_subsys_state *css, unsigned int event)
{
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
int name;
enum res_type type;
@@ -5310,17 +5257,17 @@ static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
return 0;
}
-static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp,
+static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
struct cftype *cft)
{
- return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate;
+ return mem_cgroup_from_css(css)->move_charge_at_immigrate;
}
#ifdef CONFIG_MMU
-static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
+static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
struct cftype *cft, u64 val)
{
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
if (val >= (1 << NR_MOVE_TYPE))
return -EINVAL;
@@ -5335,7 +5282,7 @@ static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
return 0;
}
#else
-static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
+static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
struct cftype *cft, u64 val)
{
return -ENOSYS;
@@ -5343,48 +5290,52 @@ static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
#endif
#ifdef CONFIG_NUMA
-static int memcg_numa_stat_show(struct cgroup *cont, struct cftype *cft,
- struct seq_file *m)
+static int memcg_numa_stat_show(struct seq_file *m, void *v)
{
- int nid;
- unsigned long total_nr, file_nr, anon_nr, unevictable_nr;
- unsigned long node_nr;
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
-
- total_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL);
- seq_printf(m, "total=%lu", total_nr);
- for_each_node_state(nid, N_MEMORY) {
- node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL);
- seq_printf(m, " N%d=%lu", nid, node_nr);
- }
- seq_putc(m, '\n');
-
- file_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL_FILE);
- seq_printf(m, "file=%lu", file_nr);
- for_each_node_state(nid, N_MEMORY) {
- node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
- LRU_ALL_FILE);
- seq_printf(m, " N%d=%lu", nid, node_nr);
- }
- seq_putc(m, '\n');
+ struct numa_stat {
+ const char *name;
+ unsigned int lru_mask;
+ };
- anon_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL_ANON);
- seq_printf(m, "anon=%lu", anon_nr);
- for_each_node_state(nid, N_MEMORY) {
- node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
- LRU_ALL_ANON);
- seq_printf(m, " N%d=%lu", nid, node_nr);
+ static const struct numa_stat stats[] = {
+ { "total", LRU_ALL },
+ { "file", LRU_ALL_FILE },
+ { "anon", LRU_ALL_ANON },
+ { "unevictable", BIT(LRU_UNEVICTABLE) },
+ };
+ const struct numa_stat *stat;
+ int nid;
+ unsigned long nr;
+ struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
+
+ for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
+ nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
+ seq_printf(m, "%s=%lu", stat->name, nr);
+ for_each_node_state(nid, N_MEMORY) {
+ nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
+ stat->lru_mask);
+ seq_printf(m, " N%d=%lu", nid, nr);
+ }
+ seq_putc(m, '\n');
+ }
+
+ for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
+ struct mem_cgroup *iter;
+
+ nr = 0;
+ for_each_mem_cgroup_tree(iter, memcg)
+ nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
+ seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
+ for_each_node_state(nid, N_MEMORY) {
+ nr = 0;
+ for_each_mem_cgroup_tree(iter, memcg)
+ nr += mem_cgroup_node_nr_lru_pages(
+ iter, nid, stat->lru_mask);
+ seq_printf(m, " N%d=%lu", nid, nr);
+ }
+ seq_putc(m, '\n');
}
- seq_putc(m, '\n');
- unevictable_nr = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_UNEVICTABLE));
- seq_printf(m, "unevictable=%lu", unevictable_nr);
- for_each_node_state(nid, N_MEMORY) {
- node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
- BIT(LRU_UNEVICTABLE));
- seq_printf(m, " N%d=%lu", nid, node_nr);
- }
- seq_putc(m, '\n');
return 0;
}
#endif /* CONFIG_NUMA */
@@ -5394,10 +5345,9 @@ static inline void mem_cgroup_lru_names_not_uptodate(void)
BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
}
-static int memcg_stat_show(struct cgroup *cont, struct cftype *cft,
- struct seq_file *m)
+static int memcg_stat_show(struct seq_file *m, void *v)
{
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+ struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
struct mem_cgroup *mi;
unsigned int i;
@@ -5481,27 +5431,23 @@ static int memcg_stat_show(struct cgroup *cont, struct cftype *cft,
return 0;
}
-static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
+static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
+ struct cftype *cft)
{
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
return mem_cgroup_swappiness(memcg);
}
-static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
- u64 val)
+static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
+ struct cftype *cft, u64 val)
{
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
- struct mem_cgroup *parent;
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
+ struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(&memcg->css));
- if (val > 100)
+ if (val > 100 || !parent)
return -EINVAL;
- if (cgrp->parent == NULL)
- return -EINVAL;
-
- parent = mem_cgroup_from_cont(cgrp->parent);
-
mutex_lock(&memcg_create_mutex);
/* If under hierarchy, only empty-root can set this value */
@@ -5584,7 +5530,13 @@ static int compare_thresholds(const void *a, const void *b)
const struct mem_cgroup_threshold *_a = a;
const struct mem_cgroup_threshold *_b = b;
- return _a->threshold - _b->threshold;
+ if (_a->threshold > _b->threshold)
+ return 1;
+
+ if (_a->threshold < _b->threshold)
+ return -1;
+
+ return 0;
}
static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
@@ -5604,13 +5556,11 @@ static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
mem_cgroup_oom_notify_cb(iter);
}
-static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
- struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
+static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
+ struct eventfd_ctx *eventfd, const char *args, enum res_type type)
{
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
struct mem_cgroup_thresholds *thresholds;
struct mem_cgroup_threshold_ary *new;
- enum res_type type = MEMFILE_TYPE(cft->private);
u64 threshold, usage;
int i, size, ret;
@@ -5687,13 +5637,23 @@ unlock:
return ret;
}
-static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
- struct cftype *cft, struct eventfd_ctx *eventfd)
+static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
+ struct eventfd_ctx *eventfd, const char *args)
+{
+ return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
+}
+
+static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
+ struct eventfd_ctx *eventfd, const char *args)
+{
+ return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
+}
+
+static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
+ struct eventfd_ctx *eventfd, enum res_type type)
{
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
struct mem_cgroup_thresholds *thresholds;
struct mem_cgroup_threshold_ary *new;
- enum res_type type = MEMFILE_TYPE(cft->private);
u64 usage;
int i, j, size;
@@ -5766,14 +5726,23 @@ unlock:
mutex_unlock(&memcg->thresholds_lock);
}
-static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
- struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
+static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
+ struct eventfd_ctx *eventfd)
+{
+ return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
+}
+
+static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
+ struct eventfd_ctx *eventfd)
+{
+ return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
+}
+
+static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
+ struct eventfd_ctx *eventfd, const char *args)
{
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
struct mem_cgroup_eventfd_list *event;
- enum res_type type = MEMFILE_TYPE(cft->private);
- BUG_ON(type != _OOM_TYPE);
event = kmalloc(sizeof(*event), GFP_KERNEL);
if (!event)
return -ENOMEM;
@@ -5791,14 +5760,10 @@ static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
return 0;
}
-static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
- struct cftype *cft, struct eventfd_ctx *eventfd)
+static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
+ struct eventfd_ctx *eventfd)
{
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
struct mem_cgroup_eventfd_list *ev, *tmp;
- enum res_type type = MEMFILE_TYPE(cft->private);
-
- BUG_ON(type != _OOM_TYPE);
spin_lock(&memcg_oom_lock);
@@ -5812,32 +5777,25 @@ static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
spin_unlock(&memcg_oom_lock);
}
-static int mem_cgroup_oom_control_read(struct cgroup *cgrp,
- struct cftype *cft, struct cgroup_map_cb *cb)
+static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
{
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
+ struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
- cb->fill(cb, "oom_kill_disable", memcg->oom_kill_disable);
-
- if (atomic_read(&memcg->under_oom))
- cb->fill(cb, "under_oom", 1);
- else
- cb->fill(cb, "under_oom", 0);
+ seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
+ seq_printf(sf, "under_oom %d\n", (bool)atomic_read(&memcg->under_oom));
return 0;
}
-static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
+static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
struct cftype *cft, u64 val)
{
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
- struct mem_cgroup *parent;
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
+ struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(&memcg->css));
/* cannot set to root cgroup and only 0 and 1 are allowed */
- if (!cgrp->parent || !((val == 0) || (val == 1)))
+ if (!parent || !((val == 0) || (val == 1)))
return -EINVAL;
- parent = mem_cgroup_from_cont(cgrp->parent);
-
mutex_lock(&memcg_create_mutex);
/* oom-kill-disable is a flag for subhierarchy. */
if ((parent->use_hierarchy) || memcg_has_children(memcg)) {
@@ -5864,23 +5822,43 @@ static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
return mem_cgroup_sockets_init(memcg, ss);
}
-static void kmem_cgroup_destroy(struct mem_cgroup *memcg)
+static void memcg_destroy_kmem(struct mem_cgroup *memcg)
{
mem_cgroup_sockets_destroy(memcg);
+}
+
+static void kmem_cgroup_css_offline(struct mem_cgroup *memcg)
+{
+ if (!memcg_kmem_is_active(memcg))
+ return;
+
+ /*
+ * kmem charges can outlive the cgroup. In the case of slab
+ * pages, for instance, a page contain objects from various
+ * processes. As we prevent from taking a reference for every
+ * such allocation we have to be careful when doing uncharge
+ * (see memcg_uncharge_kmem) and here during offlining.
+ *
+ * The idea is that that only the _last_ uncharge which sees
+ * the dead memcg will drop the last reference. An additional
+ * reference is taken here before the group is marked dead
+ * which is then paired with css_put during uncharge resp. here.
+ *
+ * Although this might sound strange as this path is called from
+ * css_offline() when the referencemight have dropped down to 0
+ * and shouldn't be incremented anymore (css_tryget would fail)
+ * we do not have other options because of the kmem allocations
+ * lifetime.
+ */
+ css_get(&memcg->css);
memcg_kmem_mark_dead(memcg);
if (res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0)
return;
- /*
- * Charges already down to 0, undo mem_cgroup_get() done in the charge
- * path here, being careful not to race with memcg_uncharge_kmem: it is
- * possible that the charges went down to 0 between mark_dead and the
- * res_counter read, so in that case, we don't need the put
- */
if (memcg_kmem_test_and_clear_dead(memcg))
- mem_cgroup_put(memcg);
+ css_put(&memcg->css);
}
#else
static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
@@ -5888,46 +5866,268 @@ static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
return 0;
}
-static void kmem_cgroup_destroy(struct mem_cgroup *memcg)
+static void memcg_destroy_kmem(struct mem_cgroup *memcg)
+{
+}
+
+static void kmem_cgroup_css_offline(struct mem_cgroup *memcg)
{
}
#endif
+/*
+ * DO NOT USE IN NEW FILES.
+ *
+ * "cgroup.event_control" implementation.
+ *
+ * This is way over-engineered. It tries to support fully configurable
+ * events for each user. Such level of flexibility is completely
+ * unnecessary especially in the light of the planned unified hierarchy.
+ *
+ * Please deprecate this and replace with something simpler if at all
+ * possible.
+ */
+
+/*
+ * Unregister event and free resources.
+ *
+ * Gets called from workqueue.
+ */
+static void memcg_event_remove(struct work_struct *work)
+{
+ struct mem_cgroup_event *event =
+ container_of(work, struct mem_cgroup_event, remove);
+ struct mem_cgroup *memcg = event->memcg;
+
+ remove_wait_queue(event->wqh, &event->wait);
+
+ event->unregister_event(memcg, event->eventfd);
+
+ /* Notify userspace the event is going away. */
+ eventfd_signal(event->eventfd, 1);
+
+ eventfd_ctx_put(event->eventfd);
+ kfree(event);
+ css_put(&memcg->css);
+}
+
+/*
+ * Gets called on POLLHUP on eventfd when user closes it.
+ *
+ * Called with wqh->lock held and interrupts disabled.
+ */
+static int memcg_event_wake(wait_queue_t *wait, unsigned mode,
+ int sync, void *key)
+{
+ struct mem_cgroup_event *event =
+ container_of(wait, struct mem_cgroup_event, wait);
+ struct mem_cgroup *memcg = event->memcg;
+ unsigned long flags = (unsigned long)key;
+
+ if (flags & POLLHUP) {
+ /*
+ * If the event has been detached at cgroup removal, we
+ * can simply return knowing the other side will cleanup
+ * for us.
+ *
+ * We can't race against event freeing since the other
+ * side will require wqh->lock via remove_wait_queue(),
+ * which we hold.
+ */
+ spin_lock(&memcg->event_list_lock);
+ if (!list_empty(&event->list)) {
+ list_del_init(&event->list);
+ /*
+ * We are in atomic context, but cgroup_event_remove()
+ * may sleep, so we have to call it in workqueue.
+ */
+ schedule_work(&event->remove);
+ }
+ spin_unlock(&memcg->event_list_lock);
+ }
+
+ return 0;
+}
+
+static void memcg_event_ptable_queue_proc(struct file *file,
+ wait_queue_head_t *wqh, poll_table *pt)
+{
+ struct mem_cgroup_event *event =
+ container_of(pt, struct mem_cgroup_event, pt);
+
+ event->wqh = wqh;
+ add_wait_queue(wqh, &event->wait);
+}
+
+/*
+ * DO NOT USE IN NEW FILES.
+ *
+ * Parse input and register new cgroup event handler.
+ *
+ * Input must be in format '<event_fd> <control_fd> <args>'.
+ * Interpretation of args is defined by control file implementation.
+ */
+static int memcg_write_event_control(struct cgroup_subsys_state *css,
+ struct cftype *cft, char *buffer)
+{
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
+ struct mem_cgroup_event *event;
+ struct cgroup_subsys_state *cfile_css;
+ unsigned int efd, cfd;
+ struct fd efile;
+ struct fd cfile;
+ const char *name;
+ char *endp;
+ int ret;
+
+ efd = simple_strtoul(buffer, &endp, 10);
+ if (*endp != ' ')
+ return -EINVAL;
+ buffer = endp + 1;
+
+ cfd = simple_strtoul(buffer, &endp, 10);
+ if ((*endp != ' ') && (*endp != '\0'))
+ return -EINVAL;
+ buffer = endp + 1;
+
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (!event)
+ return -ENOMEM;
+
+ event->memcg = memcg;
+ INIT_LIST_HEAD(&event->list);
+ init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
+ init_waitqueue_func_entry(&event->wait, memcg_event_wake);
+ INIT_WORK(&event->remove, memcg_event_remove);
+
+ efile = fdget(efd);
+ if (!efile.file) {
+ ret = -EBADF;
+ goto out_kfree;
+ }
+
+ event->eventfd = eventfd_ctx_fileget(efile.file);
+ if (IS_ERR(event->eventfd)) {
+ ret = PTR_ERR(event->eventfd);
+ goto out_put_efile;
+ }
+
+ cfile = fdget(cfd);
+ if (!cfile.file) {
+ ret = -EBADF;
+ goto out_put_eventfd;
+ }
+
+ /* the process need read permission on control file */
+ /* AV: shouldn't we check that it's been opened for read instead? */
+ ret = inode_permission(file_inode(cfile.file), MAY_READ);
+ if (ret < 0)
+ goto out_put_cfile;
+
+ /*
+ * Determine the event callbacks and set them in @event. This used
+ * to be done via struct cftype but cgroup core no longer knows
+ * about these events. The following is crude but the whole thing
+ * is for compatibility anyway.
+ *
+ * DO NOT ADD NEW FILES.
+ */
+ name = cfile.file->f_dentry->d_name.name;
+
+ if (!strcmp(name, "memory.usage_in_bytes")) {
+ event->register_event = mem_cgroup_usage_register_event;
+ event->unregister_event = mem_cgroup_usage_unregister_event;
+ } else if (!strcmp(name, "memory.oom_control")) {
+ event->register_event = mem_cgroup_oom_register_event;
+ event->unregister_event = mem_cgroup_oom_unregister_event;
+ } else if (!strcmp(name, "memory.pressure_level")) {
+ event->register_event = vmpressure_register_event;
+ event->unregister_event = vmpressure_unregister_event;
+ } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
+ event->register_event = memsw_cgroup_usage_register_event;
+ event->unregister_event = memsw_cgroup_usage_unregister_event;
+ } else {
+ ret = -EINVAL;
+ goto out_put_cfile;
+ }
+
+ /*
+ * Verify @cfile should belong to @css. Also, remaining events are
+ * automatically removed on cgroup destruction but the removal is
+ * asynchronous, so take an extra ref on @css.
+ */
+ cfile_css = css_tryget_from_dir(cfile.file->f_dentry->d_parent,
+ &memory_cgrp_subsys);
+ ret = -EINVAL;
+ if (IS_ERR(cfile_css))
+ goto out_put_cfile;
+ if (cfile_css != css) {
+ css_put(cfile_css);
+ goto out_put_cfile;
+ }
+
+ ret = event->register_event(memcg, event->eventfd, buffer);
+ if (ret)
+ goto out_put_css;
+
+ efile.file->f_op->poll(efile.file, &event->pt);
+
+ spin_lock(&memcg->event_list_lock);
+ list_add(&event->list, &memcg->event_list);
+ spin_unlock(&memcg->event_list_lock);
+
+ fdput(cfile);
+ fdput(efile);
+
+ return 0;
+
+out_put_css:
+ css_put(css);
+out_put_cfile:
+ fdput(cfile);
+out_put_eventfd:
+ eventfd_ctx_put(event->eventfd);
+out_put_efile:
+ fdput(efile);
+out_kfree:
+ kfree(event);
+
+ return ret;
+}
+
static struct cftype mem_cgroup_files[] = {
{
.name = "usage_in_bytes",
.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
- .read = mem_cgroup_read,
- .register_event = mem_cgroup_usage_register_event,
- .unregister_event = mem_cgroup_usage_unregister_event,
+ .read_u64 = mem_cgroup_read_u64,
},
{
.name = "max_usage_in_bytes",
.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
.trigger = mem_cgroup_reset,
- .read = mem_cgroup_read,
+ .read_u64 = mem_cgroup_read_u64,
},
{
.name = "limit_in_bytes",
.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
.write_string = mem_cgroup_write,
- .read = mem_cgroup_read,
+ .read_u64 = mem_cgroup_read_u64,
},
{
.name = "soft_limit_in_bytes",
.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
.write_string = mem_cgroup_write,
- .read = mem_cgroup_read,
+ .read_u64 = mem_cgroup_read_u64,
},
{
.name = "failcnt",
.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
.trigger = mem_cgroup_reset,
- .read = mem_cgroup_read,
+ .read_u64 = mem_cgroup_read_u64,
},
{
.name = "stat",
- .read_seq_string = memcg_stat_show,
+ .seq_show = memcg_stat_show,
},
{
.name = "force_empty",
@@ -5940,6 +6140,12 @@ static struct cftype mem_cgroup_files[] = {
.read_u64 = mem_cgroup_hierarchy_read,
},
{
+ .name = "cgroup.event_control", /* XXX: for compat */
+ .write_string = memcg_write_event_control,
+ .flags = CFTYPE_NO_PREFIX,
+ .mode = S_IWUGO,
+ },
+ {
.name = "swappiness",
.read_u64 = mem_cgroup_swappiness_read,
.write_u64 = mem_cgroup_swappiness_write,
@@ -5951,21 +6157,17 @@ static struct cftype mem_cgroup_files[] = {
},
{
.name = "oom_control",
- .read_map = mem_cgroup_oom_control_read,
+ .seq_show = mem_cgroup_oom_control_read,
.write_u64 = mem_cgroup_oom_control_write,
- .register_event = mem_cgroup_oom_register_event,
- .unregister_event = mem_cgroup_oom_unregister_event,
.private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
},
{
.name = "pressure_level",
- .register_event = vmpressure_register_event,
- .unregister_event = vmpressure_unregister_event,
},
#ifdef CONFIG_NUMA
{
.name = "numa_stat",
- .read_seq_string = memcg_numa_stat_show,
+ .seq_show = memcg_numa_stat_show,
},
#endif
#ifdef CONFIG_MEMCG_KMEM
@@ -5973,29 +6175,29 @@ static struct cftype mem_cgroup_files[] = {
.name = "kmem.limit_in_bytes",
.private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
.write_string = mem_cgroup_write,
- .read = mem_cgroup_read,
+ .read_u64 = mem_cgroup_read_u64,
},
{
.name = "kmem.usage_in_bytes",
.private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
- .read = mem_cgroup_read,
+ .read_u64 = mem_cgroup_read_u64,
},
{
.name = "kmem.failcnt",
.private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
.trigger = mem_cgroup_reset,
- .read = mem_cgroup_read,
+ .read_u64 = mem_cgroup_read_u64,
},
{
.name = "kmem.max_usage_in_bytes",
.private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
.trigger = mem_cgroup_reset,
- .read = mem_cgroup_read,
+ .read_u64 = mem_cgroup_read_u64,
},
#ifdef CONFIG_SLABINFO
{
.name = "kmem.slabinfo",
- .read_seq_string = mem_cgroup_slabinfo_read,
+ .seq_show = mem_cgroup_slabinfo_read,
},
#endif
#endif
@@ -6007,27 +6209,25 @@ static struct cftype memsw_cgroup_files[] = {
{
.name = "memsw.usage_in_bytes",
.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
- .read = mem_cgroup_read,
- .register_event = mem_cgroup_usage_register_event,
- .unregister_event = mem_cgroup_usage_unregister_event,
+ .read_u64 = mem_cgroup_read_u64,
},
{
.name = "memsw.max_usage_in_bytes",
.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
.trigger = mem_cgroup_reset,
- .read = mem_cgroup_read,
+ .read_u64 = mem_cgroup_read_u64,
},
{
.name = "memsw.limit_in_bytes",
.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
.write_string = mem_cgroup_write,
- .read = mem_cgroup_read,
+ .read_u64 = mem_cgroup_read_u64,
},
{
.name = "memsw.failcnt",
.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
.trigger = mem_cgroup_reset,
- .read = mem_cgroup_read,
+ .read_u64 = mem_cgroup_read_u64,
},
{ }, /* terminate */
};
@@ -6058,26 +6258,24 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
mz->on_tree = false;
mz->memcg = memcg;
}
- memcg->info.nodeinfo[node] = pn;
+ memcg->nodeinfo[node] = pn;
return 0;
}
static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
{
- kfree(memcg->info.nodeinfo[node]);
+ kfree(memcg->nodeinfo[node]);
}
static struct mem_cgroup *mem_cgroup_alloc(void)
{
struct mem_cgroup *memcg;
- size_t size = memcg_size();
+ size_t size;
- /* Can be very big if nr_node_ids is very big */
- if (size < PAGE_SIZE)
- memcg = kzalloc(size, GFP_KERNEL);
- else
- memcg = vzalloc(size);
+ size = sizeof(struct mem_cgroup);
+ size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
+ memcg = kzalloc(size, GFP_KERNEL);
if (!memcg)
return NULL;
@@ -6088,10 +6286,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
return memcg;
out_free:
- if (size < PAGE_SIZE)
- kfree(memcg);
- else
- vfree(memcg);
+ kfree(memcg);
return NULL;
}
@@ -6109,10 +6304,8 @@ out_free:
static void __mem_cgroup_free(struct mem_cgroup *memcg)
{
int node;
- size_t size = memcg_size();
mem_cgroup_remove_from_trees(memcg);
- free_css_id(&mem_cgroup_subsys, &memcg->css);
for_each_node(node)
free_mem_cgroup_per_zone_info(memcg, node);
@@ -6131,53 +6324,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
* the cgroup_lock.
*/
disarm_static_keys(memcg);
- if (size < PAGE_SIZE)
- kfree(memcg);
- else
- vfree(memcg);
-}
-
-
-/*
- * Helpers for freeing a kmalloc()ed/vzalloc()ed mem_cgroup by RCU,
- * but in process context. The work_freeing structure is overlaid
- * on the rcu_freeing structure, which itself is overlaid on memsw.
- */
-static void free_work(struct work_struct *work)
-{
- struct mem_cgroup *memcg;
-
- memcg = container_of(work, struct mem_cgroup, work_freeing);
- __mem_cgroup_free(memcg);
-}
-
-static void free_rcu(struct rcu_head *rcu_head)
-{
- struct mem_cgroup *memcg;
-
- memcg = container_of(rcu_head, struct mem_cgroup, rcu_freeing);
- INIT_WORK(&memcg->work_freeing, free_work);
- schedule_work(&memcg->work_freeing);
-}
-
-static void mem_cgroup_get(struct mem_cgroup *memcg)
-{
- atomic_inc(&memcg->refcnt);
-}
-
-static void __mem_cgroup_put(struct mem_cgroup *memcg, int count)
-{
- if (atomic_sub_and_test(count, &memcg->refcnt)) {
- struct mem_cgroup *parent = parent_mem_cgroup(memcg);
- call_rcu(&memcg->rcu_freeing, free_rcu);
- if (parent)
- mem_cgroup_put(parent);
- }
-}
-
-static void mem_cgroup_put(struct mem_cgroup *memcg)
-{
- __mem_cgroup_put(memcg, 1);
+ kfree(memcg);
}
/*
@@ -6215,7 +6362,7 @@ static void __init mem_cgroup_soft_limit_tree_init(void)
}
static struct cgroup_subsys_state * __ref
-mem_cgroup_css_alloc(struct cgroup *cont)
+mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
{
struct mem_cgroup *memcg;
long error = -ENOMEM;
@@ -6230,7 +6377,7 @@ mem_cgroup_css_alloc(struct cgroup *cont)
goto free_out;
/* root ? */
- if (cont->parent == NULL) {
+ if (parent_css == NULL) {
root_mem_cgroup = memcg;
res_counter_init(&memcg->res, NULL);
res_counter_init(&memcg->memsw, NULL);
@@ -6239,11 +6386,12 @@ mem_cgroup_css_alloc(struct cgroup *cont)
memcg->last_scanned_node = MAX_NUMNODES;
INIT_LIST_HEAD(&memcg->oom_notify);
- atomic_set(&memcg->refcnt, 1);
memcg->move_charge_at_immigrate = 0;
mutex_init(&memcg->thresholds_lock);
spin_lock_init(&memcg->move_lock);
vmpressure_init(&memcg->vmpressure);
+ INIT_LIST_HEAD(&memcg->event_list);
+ spin_lock_init(&memcg->event_list_lock);
return &memcg->css;
@@ -6253,17 +6401,18 @@ free_out:
}
static int
-mem_cgroup_css_online(struct cgroup *cont)
+mem_cgroup_css_online(struct cgroup_subsys_state *css)
{
- struct mem_cgroup *memcg, *parent;
- int error = 0;
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
+ struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(css));
- if (!cont->parent)
+ if (css->cgroup->id > MEM_CGROUP_ID_MAX)
+ return -ENOSPC;
+
+ if (!parent)
return 0;
mutex_lock(&memcg_create_mutex);
- memcg = mem_cgroup_from_cont(cont);
- parent = mem_cgroup_from_cont(cont->parent);
memcg->use_hierarchy = parent->use_hierarchy;
memcg->oom_kill_disable = parent->oom_kill_disable;
@@ -6275,12 +6424,9 @@ mem_cgroup_css_online(struct cgroup *cont)
res_counter_init(&memcg->kmem, &parent->kmem);
/*
- * We increment refcnt of the parent to ensure that we can
- * safely access it on res_counter_charge/uncharge.
- * This refcnt will be decremented when freeing this
- * mem_cgroup(see mem_cgroup_put).
+ * No need to take a reference to the parent because cgroup
+ * core guarantees its existence.
*/
- mem_cgroup_get(parent);
} else {
res_counter_init(&memcg->res, NULL);
res_counter_init(&memcg->memsw, NULL);
@@ -6291,22 +6437,11 @@ mem_cgroup_css_online(struct cgroup *cont)
* unfortunate state in our controller.
*/
if (parent != root_mem_cgroup)
- mem_cgroup_subsys.broken_hierarchy = true;
+ memory_cgrp_subsys.broken_hierarchy = true;
}
-
- error = memcg_init_kmem(memcg, &mem_cgroup_subsys);
mutex_unlock(&memcg_create_mutex);
- if (error) {
- /*
- * We call put now because our (and parent's) refcnts
- * are already in place. mem_cgroup_put() will internally
- * call __mem_cgroup_free, so return directly
- */
- mem_cgroup_put(memcg);
- if (parent->use_hierarchy)
- mem_cgroup_put(parent);
- }
- return error;
+
+ return memcg_init_kmem(memcg, &memory_cgrp_subsys);
}
/*
@@ -6317,32 +6452,91 @@ static void mem_cgroup_invalidate_reclaim_iterators(struct mem_cgroup *memcg)
struct mem_cgroup *parent = memcg;
while ((parent = parent_mem_cgroup(parent)))
- atomic_inc(&parent->dead_count);
+ mem_cgroup_iter_invalidate(parent);
/*
* if the root memcg is not hierarchical we have to check it
* explicitely.
*/
if (!root_mem_cgroup->use_hierarchy)
- atomic_inc(&root_mem_cgroup->dead_count);
+ mem_cgroup_iter_invalidate(root_mem_cgroup);
}
-static void mem_cgroup_css_offline(struct cgroup *cont)
+static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
{
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
+ struct mem_cgroup_event *event, *tmp;
+ struct cgroup_subsys_state *iter;
+
+ /*
+ * Unregister events and notify userspace.
+ * Notify userspace about cgroup removing only after rmdir of cgroup
+ * directory to avoid race between userspace and kernelspace.
+ */
+ spin_lock(&memcg->event_list_lock);
+ list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
+ list_del_init(&event->list);
+ schedule_work(&event->remove);
+ }
+ spin_unlock(&memcg->event_list_lock);
+
+ kmem_cgroup_css_offline(memcg);
mem_cgroup_invalidate_reclaim_iterators(memcg);
- mem_cgroup_reparent_charges(memcg);
+
+ /*
+ * This requires that offlining is serialized. Right now that is
+ * guaranteed because css_killed_work_fn() holds the cgroup_mutex.
+ */
+ css_for_each_descendant_post(iter, css)
+ mem_cgroup_reparent_charges(mem_cgroup_from_css(iter));
+
mem_cgroup_destroy_all_caches(memcg);
+ vmpressure_cleanup(&memcg->vmpressure);
}
-static void mem_cgroup_css_free(struct cgroup *cont)
+static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
{
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
-
- kmem_cgroup_destroy(memcg);
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
+ /*
+ * XXX: css_offline() would be where we should reparent all
+ * memory to prepare the cgroup for destruction. However,
+ * memcg does not do css_tryget() and res_counter charging
+ * under the same RCU lock region, which means that charging
+ * could race with offlining. Offlining only happens to
+ * cgroups with no tasks in them but charges can show up
+ * without any tasks from the swapin path when the target
+ * memcg is looked up from the swapout record and not from the
+ * current task as it usually is. A race like this can leak
+ * charges and put pages with stale cgroup pointers into
+ * circulation:
+ *
+ * #0 #1
+ * lookup_swap_cgroup_id()
+ * rcu_read_lock()
+ * mem_cgroup_lookup()
+ * css_tryget()
+ * rcu_read_unlock()
+ * disable css_tryget()
+ * call_rcu()
+ * offline_css()
+ * reparent_charges()
+ * res_counter_charge()
+ * css_put()
+ * css_free()
+ * pc->mem_cgroup = dead memcg
+ * add page to lru
+ *
+ * The bulk of the charges are still moved in offline_css() to
+ * avoid pinning a lot of pages in case a long-term reference
+ * like a swapout record is deferring the css_free() to long
+ * after offlining. But this makes sure we catch any charges
+ * made after offlining:
+ */
+ mem_cgroup_reparent_charges(memcg);
- mem_cgroup_put(memcg);
+ memcg_destroy_kmem(memcg);
+ __mem_cgroup_free(memcg);
}
#ifdef CONFIG_MMU
@@ -6389,8 +6583,7 @@ one_by_one:
batch_count = PRECHARGE_COUNT_AT_ONCE;
cond_resched();
}
- ret = __mem_cgroup_try_charge(NULL,
- GFP_KERNEL, 1, &memcg, false);
+ ret = mem_cgroup_try_charge(memcg, GFP_KERNEL, 1, false);
if (ret)
/* mem_cgroup_clear_mc() will do uncharge later */
return ret;
@@ -6494,16 +6687,20 @@ static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
pgoff = pte_to_pgoff(ptent);
/* page is moved even if it's not RSS of this task(page-faulted). */
- page = find_get_page(mapping, pgoff);
-
#ifdef CONFIG_SWAP
/* shmem/tmpfs may report page out on swap: account for that too. */
- if (radix_tree_exceptional_entry(page)) {
- swp_entry_t swap = radix_to_swp_entry(page);
- if (do_swap_account)
- *entry = swap;
- page = find_get_page(swap_address_space(swap), swap.val);
- }
+ if (shmem_mapping(mapping)) {
+ page = find_get_entry(mapping, pgoff);
+ if (radix_tree_exceptional_entry(page)) {
+ swp_entry_t swp = radix_to_swp_entry(page);
+ if (do_swap_account)
+ *entry = swp;
+ page = find_get_page(swap_address_space(swp), swp.val);
+ }
+ } else
+ page = find_get_page(mapping, pgoff);
+#else
+ page = find_get_page(mapping, pgoff);
#endif
return page;
}
@@ -6542,7 +6739,7 @@ static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
}
/* There is a swap entry and a page doesn't exist or isn't charged */
if (ent.val && !ret &&
- css_id(&mc.from->css) == lookup_swap_cgroup_id(ent)) {
+ mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
ret = MC_TARGET_SWAP;
if (target)
target->ent = ent;
@@ -6564,7 +6761,7 @@ static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
enum mc_target_type ret = MC_TARGET_NONE;
page = pmd_page(pmd);
- VM_BUG_ON(!page || !PageHead(page));
+ VM_BUG_ON_PAGE(!page || !PageHead(page), page);
if (!move_anon())
return ret;
pc = lookup_page_cgroup(page);
@@ -6593,10 +6790,10 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
pte_t *pte;
spinlock_t *ptl;
- if (pmd_trans_huge_lock(pmd, vma) == 1) {
+ if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
mc.precharge += HPAGE_PMD_NR;
- spin_unlock(&vma->vm_mm->page_table_lock);
+ spin_unlock(ptl);
return 0;
}
@@ -6651,6 +6848,7 @@ static void __mem_cgroup_clear_mc(void)
{
struct mem_cgroup *from = mc.from;
struct mem_cgroup *to = mc.to;
+ int i;
/* we must uncharge all the leftover precharges from mc.to */
if (mc.precharge) {
@@ -6671,7 +6869,9 @@ static void __mem_cgroup_clear_mc(void)
if (!mem_cgroup_is_root(mc.from))
res_counter_uncharge(&mc.from->memsw,
PAGE_SIZE * mc.moved_swap);
- __mem_cgroup_put(mc.from, mc.moved_swap);
+
+ for (i = 0; i < mc.moved_swap; i++)
+ css_put(&mc.from->css);
if (!mem_cgroup_is_root(mc.to)) {
/*
@@ -6681,7 +6881,7 @@ static void __mem_cgroup_clear_mc(void)
res_counter_uncharge(&mc.to->res,
PAGE_SIZE * mc.moved_swap);
}
- /* we've already done mem_cgroup_get(mc.to) */
+ /* we've already done css_get(mc.to) */
mc.moved_swap = 0;
}
memcg_oom_recover(from);
@@ -6706,12 +6906,12 @@ static void mem_cgroup_clear_mc(void)
mem_cgroup_end_move(from);
}
-static int mem_cgroup_can_attach(struct cgroup *cgroup,
+static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset)
{
struct task_struct *p = cgroup_taskset_first(tset);
int ret = 0;
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cgroup);
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
unsigned long move_charge_at_immigrate;
/*
@@ -6753,7 +6953,7 @@ static int mem_cgroup_can_attach(struct cgroup *cgroup,
return ret;
}
-static void mem_cgroup_cancel_attach(struct cgroup *cgroup,
+static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset)
{
mem_cgroup_clear_mc();
@@ -6782,9 +6982,9 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
* to be unlocked in __split_huge_page_splitting(), where the main
* part of thp split is not executed yet.
*/
- if (pmd_trans_huge_lock(pmd, vma) == 1) {
+ if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
if (mc.precharge < HPAGE_PMD_NR) {
- spin_unlock(&vma->vm_mm->page_table_lock);
+ spin_unlock(ptl);
return 0;
}
target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
@@ -6801,7 +7001,7 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
}
put_page(page);
}
- spin_unlock(&vma->vm_mm->page_table_lock);
+ spin_unlock(ptl);
return 0;
}
@@ -6901,7 +7101,7 @@ retry:
up_read(&mm->mmap_sem);
}
-static void mem_cgroup_move_task(struct cgroup *cont,
+static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset)
{
struct task_struct *p = cgroup_taskset_first(tset);
@@ -6916,16 +7116,16 @@ static void mem_cgroup_move_task(struct cgroup *cont,
mem_cgroup_clear_mc();
}
#else /* !CONFIG_MMU */
-static int mem_cgroup_can_attach(struct cgroup *cgroup,
+static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset)
{
return 0;
}
-static void mem_cgroup_cancel_attach(struct cgroup *cgroup,
+static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset)
{
}
-static void mem_cgroup_move_task(struct cgroup *cont,
+static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset)
{
}
@@ -6935,20 +7135,18 @@ static void mem_cgroup_move_task(struct cgroup *cont,
* Cgroup retains root cgroups across [un]mount cycles making it necessary
* to verify sane_behavior flag on each mount attempt.
*/
-static void mem_cgroup_bind(struct cgroup *root)
+static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
{
/*
* use_hierarchy is forced with sane_behavior. cgroup core
* guarantees that @root doesn't have any children, so turning it
* on for the root memcg is enough.
*/
- if (cgroup_sane_behavior(root))
- mem_cgroup_from_cont(root)->use_hierarchy = true;
+ if (cgroup_sane_behavior(root_css->cgroup))
+ mem_cgroup_from_css(root_css)->use_hierarchy = true;
}
-struct cgroup_subsys mem_cgroup_subsys = {
- .name = "memory",
- .subsys_id = mem_cgroup_subsys_id,
+struct cgroup_subsys memory_cgrp_subsys = {
.css_alloc = mem_cgroup_css_alloc,
.css_online = mem_cgroup_css_online,
.css_offline = mem_cgroup_css_offline,
@@ -6959,13 +7157,11 @@ struct cgroup_subsys mem_cgroup_subsys = {
.bind = mem_cgroup_bind,
.base_cftypes = mem_cgroup_files,
.early_init = 0,
- .use_id = 1,
};
#ifdef CONFIG_MEMCG_SWAP
static int __init enable_swap_account(char *s)
{
- /* consider enabled if no parameter or 1 is given */
if (!strcmp(s, "1"))
really_do_swap_account = 1;
else if (!strcmp(s, "0"))
@@ -6976,7 +7172,7 @@ __setup("swapaccount=", enable_swap_account);
static void __init memsw_file_init(void)
{
- WARN_ON(cgroup_add_cftypes(&mem_cgroup_subsys, memsw_cgroup_files));
+ WARN_ON(cgroup_add_cftypes(&memory_cgrp_subsys, memsw_cgroup_files));
}
static void __init enable_swap_cgroup(void)
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index ceb0c7f1932f..9ccef39a9de2 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -145,14 +145,10 @@ static int hwpoison_filter_task(struct page *p)
return -EINVAL;
css = mem_cgroup_css(mem);
- /* root_mem_cgroup has NULL dentries */
- if (!css->cgroup->dentry)
- return -EINVAL;
-
- ino = css->cgroup->dentry->d_inode->i_ino;
+ ino = cgroup_ino(css->cgroup);
css_put(css);
- if (ino != hwpoison_filter_memcg)
+ if (!ino || ino != hwpoison_filter_memcg)
return -EINVAL;
return 0;
@@ -206,7 +202,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
#ifdef __ARCH_SI_TRAPNO
si.si_trapno = trapno;
#endif
- si.si_addr_lsb = compound_trans_order(compound_head(page)) + PAGE_SHIFT;
+ si.si_addr_lsb = compound_order(compound_head(page)) + PAGE_SHIFT;
if ((flags & MF_ACTION_REQUIRED) && t == current) {
si.si_code = BUS_MCEERR_AR;
@@ -248,10 +244,12 @@ void shake_page(struct page *p, int access)
*/
if (access) {
int nr;
+ int nid = page_to_nid(p);
do {
struct shrink_control shrink = {
.gfp_mask = GFP_KERNEL,
};
+ node_set(nid, shrink.nodes_to_scan);
nr = shrink_slab(&shrink, 1000, 1000);
if (page_count(p) == 1)
@@ -609,7 +607,7 @@ static int me_pagecache_clean(struct page *p, unsigned long pfn)
}
/*
- * Dirty cache page page
+ * Dirty pagecache page
* Issues: when the error hit a hole page the error is not properly
* propagated.
*/
@@ -854,14 +852,14 @@ static int page_action(struct page_state *ps, struct page *p,
* the pages and send SIGBUS to the processes if the data was dirty.
*/
static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
- int trapno, int flags)
+ int trapno, int flags, struct page **hpagep)
{
enum ttu_flags ttu = TTU_UNMAP | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
struct address_space *mapping;
LIST_HEAD(tokill);
int ret;
int kill = 1, forcekill;
- struct page *hpage = compound_head(p);
+ struct page *hpage = *hpagep;
struct page *ppage;
if (PageReserved(p) || PageSlab(p))
@@ -936,6 +934,21 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
BUG_ON(!PageHWPoison(p));
return SWAP_FAIL;
}
+ /*
+ * We pinned the head page for hwpoison handling,
+ * now we split the thp and we are interested in
+ * the hwpoisoned raw page, so move the refcount
+ * to it. Similarly, page lock is shifted.
+ */
+ if (hpage != p) {
+ if (!(flags & MF_COUNT_INCREASED)) {
+ put_page(hpage);
+ get_page(p);
+ }
+ lock_page(p);
+ unlock_page(hpage);
+ *hpagep = p;
+ }
/* THP is split, so ppage should be the real poisoned page. */
ppage = p;
}
@@ -952,17 +965,11 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
if (kill)
collect_procs(ppage, &tokill);
- if (hpage != ppage)
- lock_page(ppage);
-
ret = try_to_unmap(ppage, ttu);
if (ret != SWAP_SUCCESS)
printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n",
pfn, page_mapcount(ppage));
- if (hpage != ppage)
- unlock_page(ppage);
-
/*
* Now that the dirty bit has been propagated to the
* struct page and all unmaps done we can decide if
@@ -983,7 +990,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
static void set_page_hwpoison_huge_page(struct page *hpage)
{
int i;
- int nr_pages = 1 << compound_trans_order(hpage);
+ int nr_pages = 1 << compound_order(hpage);
for (i = 0; i < nr_pages; i++)
SetPageHWPoison(hpage + i);
}
@@ -991,7 +998,7 @@ static void set_page_hwpoison_huge_page(struct page *hpage)
static void clear_page_hwpoison_huge_page(struct page *hpage)
{
int i;
- int nr_pages = 1 << compound_trans_order(hpage);
+ int nr_pages = 1 << compound_order(hpage);
for (i = 0; i < nr_pages; i++)
ClearPageHWPoison(hpage + i);
}
@@ -1074,15 +1081,16 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
return 0;
} else if (PageHuge(hpage)) {
/*
- * Check "just unpoisoned", "filter hit", and
- * "race with other subpage."
+ * Check "filter hit" and "race with other subpage."
*/
lock_page(hpage);
- if (!PageHWPoison(hpage)
- || (hwpoison_filter(p) && TestClearPageHWPoison(p))
- || (p != hpage && TestSetPageHWPoison(hpage))) {
- atomic_long_sub(nr_pages, &num_poisoned_pages);
- return 0;
+ if (PageHWPoison(hpage)) {
+ if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
+ || (p != hpage && TestSetPageHWPoison(hpage))) {
+ atomic_long_sub(nr_pages, &num_poisoned_pages);
+ unlock_page(hpage);
+ return 0;
+ }
}
set_page_hwpoison_huge_page(hpage);
res = dequeue_hwpoisoned_huge_page(hpage);
@@ -1112,8 +1120,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
* shake_page could have turned it free.
*/
if (is_free_buddy_page(p)) {
- action_result(pfn, "free buddy, 2nd try",
- DELAYED);
+ if (flags & MF_COUNT_INCREASED)
+ action_result(pfn, "free buddy", DELAYED);
+ else
+ action_result(pfn, "free buddy, 2nd try", DELAYED);
return 0;
}
action_result(pfn, "non LRU", IGNORED);
@@ -1143,6 +1153,8 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
*/
if (!PageHWPoison(p)) {
printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
+ atomic_long_sub(nr_pages, &num_poisoned_pages);
+ put_page(hpage);
res = 0;
goto out;
}
@@ -1179,8 +1191,12 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
/*
* Now take care of user space mappings.
* Abort on fail: __delete_from_page_cache() assumes unmapped page.
+ *
+ * When the raw error page is thp tail page, hpage points to the raw
+ * page after thp split.
*/
- if (hwpoison_user_mappings(p, pfn, trapno, flags) != SWAP_SUCCESS) {
+ if (hwpoison_user_mappings(p, pfn, trapno, flags, &hpage)
+ != SWAP_SUCCESS) {
printk(KERN_ERR "MCE %#lx: cannot unmap page, give up\n", pfn);
res = -EBUSY;
goto out;
@@ -1204,6 +1220,9 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
for (ps = error_states;; ps++)
if ((p->flags & ps->mask) == ps->res)
break;
+
+ page_flags |= (p->flags & (1UL << PG_dirty));
+
if (!ps->mask)
for (ps = error_states;; ps++)
if ((page_flags & ps->mask) == ps->res)
@@ -1262,10 +1281,10 @@ void memory_failure_queue(unsigned long pfn, int trapno, int flags)
mf_cpu = &get_cpu_var(memory_failure_cpu);
spin_lock_irqsave(&mf_cpu->lock, proc_flags);
- if (kfifo_put(&mf_cpu->fifo, &entry))
+ if (kfifo_put(&mf_cpu->fifo, entry))
schedule_work_on(smp_processor_id(), &mf_cpu->work);
else
- pr_err("Memory failure: buffer overflow when queuing memory failure at 0x%#lx\n",
+ pr_err("Memory failure: buffer overflow when queuing memory failure at %#lx\n",
pfn);
spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
put_cpu_var(memory_failure_cpu);
@@ -1286,7 +1305,10 @@ static void memory_failure_work_func(struct work_struct *work)
spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
if (!gotten)
break;
- memory_failure(entry.pfn, entry.trapno, entry.flags);
+ if (entry.flags & MF_SOFT_OFFLINE)
+ soft_offline_page(pfn_to_page(entry.pfn), entry.flags);
+ else
+ memory_failure(entry.pfn, entry.trapno, entry.flags);
}
}
@@ -1336,7 +1358,17 @@ int unpoison_memory(unsigned long pfn)
return 0;
}
- nr_pages = 1 << compound_trans_order(page);
+ /*
+ * unpoison_memory() can encounter thp only when the thp is being
+ * worked by memory_failure() and the page lock is not held yet.
+ * In such case, we yield to memory_failure() and make unpoison fail.
+ */
+ if (!PageHuge(page) && PageTransHuge(page)) {
+ pr_info("MCE: Memory failure is now running on %#lx\n", pfn);
+ return 0;
+ }
+
+ nr_pages = 1 << compound_order(page);
if (!get_page_unless_zero(page)) {
/*
@@ -1350,7 +1382,7 @@ int unpoison_memory(unsigned long pfn)
return 0;
}
if (TestClearPageHWPoison(p))
- atomic_long_sub(nr_pages, &num_poisoned_pages);
+ atomic_long_dec(&num_poisoned_pages);
pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
return 0;
}
@@ -1372,7 +1404,7 @@ int unpoison_memory(unsigned long pfn)
unlock_page(page);
put_page(page);
- if (freeit)
+ if (freeit && !(pfn == my_zero_pfn(0) && page_count(p) == 1))
put_page(page);
return 0;
@@ -1403,17 +1435,6 @@ static int __get_any_page(struct page *p, unsigned long pfn, int flags)
return 1;
/*
- * The lock_memory_hotplug prevents a race with memory hotplug.
- * This is a big hammer, a better would be nicer.
- */
- lock_memory_hotplug();
-
- /*
- * Isolate the page, so that it doesn't get reallocated if it
- * was free.
- */
- set_migratetype_isolate(p, true);
- /*
* When the target page is a free hugepage, just remove it
* from free hugepage list.
*/
@@ -1433,8 +1454,6 @@ static int __get_any_page(struct page *p, unsigned long pfn, int flags)
/* Not a free page */
ret = 1;
}
- unset_migratetype_isolate(p, MIGRATE_MOVABLE);
- unlock_memory_hotplug();
return ret;
}
@@ -1467,6 +1486,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
int ret;
unsigned long pfn = page_to_pfn(page);
struct page *hpage = compound_head(page);
+ LIST_HEAD(pagelist);
/*
* This double-check of PageHWPoison is to avoid the race with
@@ -1482,84 +1502,32 @@ static int soft_offline_huge_page(struct page *page, int flags)
unlock_page(hpage);
/* Keep page count to indicate a given hugepage is isolated. */
- ret = migrate_huge_page(hpage, new_page, MPOL_MF_MOVE_ALL,
- MIGRATE_SYNC);
- put_page(hpage);
+ list_move(&hpage->lru, &pagelist);
+ ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
+ MIGRATE_SYNC, MR_MEMORY_FAILURE);
if (ret) {
pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
pfn, ret, page->flags);
+ /*
+ * We know that soft_offline_huge_page() tries to migrate
+ * only one hugepage pointed to by hpage, so we need not
+ * run through the pagelist here.
+ */
+ putback_active_hugepage(hpage);
+ if (ret > 0)
+ ret = -EIO;
} else {
- set_page_hwpoison_huge_page(hpage);
- dequeue_hwpoisoned_huge_page(hpage);
- atomic_long_add(1 << compound_trans_order(hpage),
- &num_poisoned_pages);
- }
- /* keep elevated page count for bad page */
- return ret;
-}
-
-static int __soft_offline_page(struct page *page, int flags);
-
-/**
- * soft_offline_page - Soft offline a page.
- * @page: page to offline
- * @flags: flags. Same as memory_failure().
- *
- * Returns 0 on success, otherwise negated errno.
- *
- * Soft offline a page, by migration or invalidation,
- * without killing anything. This is for the case when
- * a page is not corrupted yet (so it's still valid to access),
- * but has had a number of corrected errors and is better taken
- * out.
- *
- * The actual policy on when to do that is maintained by
- * user space.
- *
- * This should never impact any application or cause data loss,
- * however it might take some time.
- *
- * This is not a 100% solution for all memory, but tries to be
- * ``good enough'' for the majority of memory.
- */
-int soft_offline_page(struct page *page, int flags)
-{
- int ret;
- unsigned long pfn = page_to_pfn(page);
- struct page *hpage = compound_trans_head(page);
-
- if (PageHWPoison(page)) {
- pr_info("soft offline: %#lx page already poisoned\n", pfn);
- return -EBUSY;
- }
- if (!PageHuge(page) && PageTransHuge(hpage)) {
- if (PageAnon(hpage) && unlikely(split_huge_page(hpage))) {
- pr_info("soft offline: %#lx: failed to split THP\n",
- pfn);
- return -EBUSY;
- }
- }
-
- ret = get_any_page(page, pfn, flags);
- if (ret < 0)
- return ret;
- if (ret) { /* for in-use pages */
- if (PageHuge(page))
- ret = soft_offline_huge_page(page, flags);
- else
- ret = __soft_offline_page(page, flags);
- } else { /* for free pages */
+ /* overcommit hugetlb page will be freed to buddy */
if (PageHuge(page)) {
set_page_hwpoison_huge_page(hpage);
dequeue_hwpoisoned_huge_page(hpage);
- atomic_long_add(1 << compound_trans_order(hpage),
+ atomic_long_add(1 << compound_order(hpage),
&num_poisoned_pages);
} else {
SetPageHWPoison(page);
atomic_long_inc(&num_poisoned_pages);
}
}
- /* keep elevated page count for bad page */
return ret;
}
@@ -1619,13 +1587,34 @@ static int __soft_offline_page(struct page *page, int flags)
ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
MIGRATE_SYNC, MR_MEMORY_FAILURE);
if (ret) {
- putback_lru_pages(&pagelist);
+ if (!list_empty(&pagelist)) {
+ list_del(&page->lru);
+ dec_zone_page_state(page, NR_ISOLATED_ANON +
+ page_is_file_cache(page));
+ putback_lru_page(page);
+ }
+
pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
pfn, ret, page->flags);
if (ret > 0)
ret = -EIO;
} else {
+ /*
+ * After page migration succeeds, the source page can
+ * be trapped in pagevec and actual freeing is delayed.
+ * Freeing code works differently based on PG_hwpoison,
+ * so there's a race. We need to make sure that the
+ * source page should be freed back to buddy before
+ * setting PG_hwpoison.
+ */
+ if (!is_free_buddy_page(page))
+ lru_add_drain_all();
+ if (!is_free_buddy_page(page))
+ drain_all_pages();
SetPageHWPoison(page);
+ if (!is_free_buddy_page(page))
+ pr_info("soft offline: %#lx: page leaked\n",
+ pfn);
atomic_long_inc(&num_poisoned_pages);
}
} else {
@@ -1634,3 +1623,79 @@ static int __soft_offline_page(struct page *page, int flags)
}
return ret;
}
+
+/**
+ * soft_offline_page - Soft offline a page.
+ * @page: page to offline
+ * @flags: flags. Same as memory_failure().
+ *
+ * Returns 0 on success, otherwise negated errno.
+ *
+ * Soft offline a page, by migration or invalidation,
+ * without killing anything. This is for the case when
+ * a page is not corrupted yet (so it's still valid to access),
+ * but has had a number of corrected errors and is better taken
+ * out.
+ *
+ * The actual policy on when to do that is maintained by
+ * user space.
+ *
+ * This should never impact any application or cause data loss,
+ * however it might take some time.
+ *
+ * This is not a 100% solution for all memory, but tries to be
+ * ``good enough'' for the majority of memory.
+ */
+int soft_offline_page(struct page *page, int flags)
+{
+ int ret;
+ unsigned long pfn = page_to_pfn(page);
+ struct page *hpage = compound_head(page);
+
+ if (PageHWPoison(page)) {
+ pr_info("soft offline: %#lx page already poisoned\n", pfn);
+ return -EBUSY;
+ }
+ if (!PageHuge(page) && PageTransHuge(hpage)) {
+ if (PageAnon(hpage) && unlikely(split_huge_page(hpage))) {
+ pr_info("soft offline: %#lx: failed to split THP\n",
+ pfn);
+ return -EBUSY;
+ }
+ }
+
+ /*
+ * The lock_memory_hotplug prevents a race with memory hotplug.
+ * This is a big hammer, a better would be nicer.
+ */
+ lock_memory_hotplug();
+
+ /*
+ * Isolate the page, so that it doesn't get reallocated if it
+ * was free. This flag should be kept set until the source page
+ * is freed and PG_hwpoison on it is set.
+ */
+ if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
+ set_migratetype_isolate(page, true);
+
+ ret = get_any_page(page, pfn, flags);
+ unlock_memory_hotplug();
+ if (ret > 0) { /* for in-use pages */
+ if (PageHuge(page))
+ ret = soft_offline_huge_page(page, flags);
+ else
+ ret = __soft_offline_page(page, flags);
+ } else if (ret == 0) { /* for free pages */
+ if (PageHuge(page)) {
+ set_page_hwpoison_huge_page(hpage);
+ dequeue_hwpoisoned_huge_page(hpage);
+ atomic_long_add(1 << compound_order(hpage),
+ &num_poisoned_pages);
+ } else {
+ SetPageHWPoison(page);
+ atomic_long_inc(&num_poisoned_pages);
+ }
+ }
+ unset_migratetype_isolate(page, MIGRATE_MOVABLE);
+ return ret;
+}
diff --git a/mm/memory.c b/mm/memory.c
index 61a262b08e53..037b812a9531 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -59,6 +59,8 @@
#include <linux/gfp.h>
#include <linux/migrate.h>
#include <linux/string.h>
+#include <linux/dma-debug.h>
+#include <linux/debugfs.h>
#include <asm/io.h>
#include <asm/pgalloc.h>
@@ -69,8 +71,8 @@
#include "internal.h"
-#ifdef LAST_NID_NOT_IN_PAGE_FLAGS
-#warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_nid.
+#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
+#warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
#endif
#ifndef CONFIG_NEED_MULTIPLE_NODES
@@ -82,7 +84,6 @@ EXPORT_SYMBOL(max_mapnr);
EXPORT_SYMBOL(mem_map);
#endif
-unsigned long num_physpages;
/*
* A number of key systems in x86 including ioremap() rely on the assumption
* that high_memory defines the upper bound on direct map memory, then end
@@ -92,7 +93,6 @@ unsigned long num_physpages;
*/
void * high_memory;
-EXPORT_SYMBOL(num_physpages);
EXPORT_SYMBOL(high_memory);
/*
@@ -211,14 +211,15 @@ static int tlb_next_batch(struct mmu_gather *tlb)
* tear-down from @mm. The @fullmm argument is used when @mm is without
* users and we're going to destroy the full address space (exit/execve).
*/
-void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
+void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
{
tlb->mm = mm;
- tlb->fullmm = fullmm;
+ /* Is it from 0 to ~0? */
+ tlb->fullmm = !(start | (end+1));
tlb->need_flush_all = 0;
- tlb->start = -1UL;
- tlb->end = 0;
+ tlb->start = start;
+ tlb->end = end;
tlb->need_flush = 0;
tlb->local.next = NULL;
tlb->local.nr = 0;
@@ -231,17 +232,18 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
#endif
}
-void tlb_flush_mmu(struct mmu_gather *tlb)
+static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{
- struct mmu_gather_batch *batch;
-
- if (!tlb->need_flush)
- return;
tlb->need_flush = 0;
tlb_flush(tlb);
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
tlb_table_flush(tlb);
#endif
+}
+
+static void tlb_flush_mmu_free(struct mmu_gather *tlb)
+{
+ struct mmu_gather_batch *batch;
for (batch = &tlb->local; batch; batch = batch->next) {
free_pages_and_swap_cache(batch->pages, batch->nr);
@@ -250,6 +252,14 @@ void tlb_flush_mmu(struct mmu_gather *tlb)
tlb->active = &tlb->local;
}
+void tlb_flush_mmu(struct mmu_gather *tlb)
+{
+ if (!tlb->need_flush)
+ return;
+ tlb_flush_mmu_tlbonly(tlb);
+ tlb_flush_mmu_free(tlb);
+}
+
/* tlb_finish_mmu
* Called at the end of the shootdown operation to free up any resources
* that were required.
@@ -258,8 +268,6 @@ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long e
{
struct mmu_gather_batch *batch, *next;
- tlb->start = start;
- tlb->end = end;
tlb_flush_mmu(tlb);
/* keep the page table cache within bounds */
@@ -291,7 +299,7 @@ int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
return 0;
batch = tlb->active;
}
- VM_BUG_ON(batch->nr > batch->max);
+ VM_BUG_ON_PAGE(batch->nr > batch->max, page);
return batch->max - batch->nr;
}
@@ -376,30 +384,6 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
/*
- * If a p?d_bad entry is found while walking page tables, report
- * the error, before resetting entry to p?d_none. Usually (but
- * very seldom) called out from the p?d_none_or_clear_bad macros.
- */
-
-void pgd_clear_bad(pgd_t *pgd)
-{
- pgd_ERROR(*pgd);
- pgd_clear(pgd);
-}
-
-void pud_clear_bad(pud_t *pud)
-{
- pud_ERROR(*pud);
- pud_clear(pud);
-}
-
-void pmd_clear_bad(pmd_t *pmd)
-{
- pmd_ERROR(*pmd);
- pmd_clear(pmd);
-}
-
-/*
* Note: this doesn't free the actual pages themselves. That
* has been handled earlier when unmapping all the memory regions.
*/
@@ -409,7 +393,7 @@ static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
pgtable_t token = pmd_pgtable(*pmd);
pmd_clear(pmd);
pte_free_tlb(tlb, token, addr);
- tlb->mm->nr_ptes--;
+ atomic_long_dec(&tlb->mm->nr_ptes);
}
static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
@@ -480,8 +464,6 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
/*
* This function frees user-level page tables of a process.
- *
- * Must be called with pagetable lock held.
*/
void free_pgd_range(struct mmu_gather *tlb,
unsigned long addr, unsigned long end,
@@ -579,6 +561,7 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
pmd_t *pmd, unsigned long address)
{
+ spinlock_t *ptl;
pgtable_t new = pte_alloc_one(mm, address);
int wait_split_huge_page;
if (!new)
@@ -599,15 +582,15 @@ int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
*/
smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
- spin_lock(&mm->page_table_lock);
+ ptl = pmd_lock(mm, pmd);
wait_split_huge_page = 0;
if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
- mm->nr_ptes++;
+ atomic_long_inc(&mm->nr_ptes);
pmd_populate(mm, pmd, new);
new = NULL;
} else if (unlikely(pmd_trans_splitting(*pmd)))
wait_split_huge_page = 1;
- spin_unlock(&mm->page_table_lock);
+ spin_unlock(ptl);
if (new)
pte_free(mm, new);
if (wait_split_huge_page)
@@ -698,7 +681,7 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
current->comm,
(long long)pte_val(pte), (long long)pmd_val(*pmd));
if (page)
- dump_page(page);
+ dump_page(page, "bad pte");
printk(KERN_ALERT
"addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n",
(void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
@@ -708,7 +691,7 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
if (vma->vm_ops)
printk(KERN_ALERT "vma->vm_ops->fault: %pSR\n",
vma->vm_ops->fault);
- if (vma->vm_file && vma->vm_file->f_op)
+ if (vma->vm_file)
printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pSR\n",
vma->vm_file->f_op->mmap);
dump_stack();
@@ -864,6 +847,8 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
*/
make_migration_entry_read(&entry);
pte = swp_entry_to_pte(entry);
+ if (pte_swp_soft_dirty(*src_pte))
+ pte = pte_swp_mksoft_dirty(pte);
set_pte_at(src_mm, addr, src_pte, pte);
}
}
@@ -1142,25 +1127,31 @@ again:
continue;
if (unlikely(details) && details->nonlinear_vma
&& linear_page_index(details->nonlinear_vma,
- addr) != page->index)
- set_pte_at(mm, addr, pte,
- pgoff_to_pte(page->index));
+ addr) != page->index) {
+ pte_t ptfile = pgoff_to_pte(page->index);
+ if (pte_soft_dirty(ptent))
+ pte_file_mksoft_dirty(ptfile);
+ set_pte_at(mm, addr, pte, ptfile);
+ }
if (PageAnon(page))
rss[MM_ANONPAGES]--;
else {
- if (pte_dirty(ptent))
+ if (pte_dirty(ptent)) {
+ force_flush = 1;
set_page_dirty(page);
+ }
if (pte_young(ptent) &&
- likely(!VM_SequentialReadHint(vma)))
+ likely(!(vma->vm_flags & VM_SEQ_READ)))
mark_page_accessed(page);
rss[MM_FILEPAGES]--;
}
page_remove_rmap(page);
if (unlikely(page_mapcount(page) < 0))
print_bad_pte(vma, addr, ptent, page);
- force_flush = !__tlb_remove_page(tlb, page);
- if (force_flush)
+ if (unlikely(!__tlb_remove_page(tlb, page))) {
+ force_flush = 1;
break;
+ }
continue;
}
/*
@@ -1195,21 +1186,34 @@ again:
add_mm_rss_vec(mm, rss);
arch_leave_lazy_mmu_mode();
+
+ /* Do the actual TLB flush before dropping ptl */
+ if (force_flush) {
+ unsigned long old_end;
+
+ /*
+ * Flush the TLB just for the previous segment,
+ * then update the range to be the remaining
+ * TLB range.
+ */
+ old_end = tlb->end;
+ tlb->end = addr;
+ tlb_flush_mmu_tlbonly(tlb);
+ tlb->start = addr;
+ tlb->end = old_end;
+ }
pte_unmap_unlock(start_pte, ptl);
/*
- * mmu_gather ran out of room to batch pages, we break out of
- * the PTE lock to avoid doing the potential expensive TLB invalidate
- * and page-free while holding it.
+ * If we forced a TLB flush (either due to running out of
+ * batch buffers or because we needed to flush dirty TLB
+ * entries before releasing the ptl), free the batched
+ * memory too. Restart if we didn't do everything.
*/
if (force_flush) {
force_flush = 0;
+ tlb_flush_mmu_free(tlb);
-#ifdef HAVE_GENERIC_MMU_GATHER
- tlb->start = addr;
- tlb->end = end;
-#endif
- tlb_flush_mmu(tlb);
if (addr != end)
goto again;
}
@@ -1332,9 +1336,9 @@ static void unmap_single_vma(struct mmu_gather *tlb,
* It is undesirable to test vma->vm_file as it
* should be non-null for valid hugetlb area.
* However, vm_file will be NULL in the error
- * cleanup path of do_mmap_pgoff. When
+ * cleanup path of mmap_region. When
* hugetlbfs ->mmap method fails,
- * do_mmap_pgoff() nullifies vma->vm_file
+ * mmap_region() nullifies vma->vm_file
* before calling this function to clean up.
* Since no pte has actually been setup, it is
* safe to do nothing in this case.
@@ -1396,7 +1400,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end = start + size;
lru_add_drain();
- tlb_gather_mmu(&tlb, mm, 0);
+ tlb_gather_mmu(&tlb, mm, start, end);
update_hiwater_rss(mm);
mmu_notifier_invalidate_range_start(mm, start, end);
for ( ; vma && vma->vm_start < end; vma = vma->vm_next)
@@ -1422,7 +1426,7 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr
unsigned long end = address + size;
lru_add_drain();
- tlb_gather_mmu(&tlb, mm, 0);
+ tlb_gather_mmu(&tlb, mm, address, end);
update_hiwater_rss(mm);
mmu_notifier_invalidate_range_start(mm, address, end);
unmap_single_vma(&tlb, vma, address, end, details);
@@ -1495,7 +1499,8 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
if (pud_none(*pud))
goto no_page_table;
if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
- BUG_ON(flags & FOLL_GET);
+ if (flags & FOLL_GET)
+ goto out;
page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE);
goto out;
}
@@ -1506,8 +1511,20 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
if (pmd_none(*pmd))
goto no_page_table;
if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
- BUG_ON(flags & FOLL_GET);
page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
+ if (flags & FOLL_GET) {
+ /*
+ * Refcount on tail pages are not well-defined and
+ * shouldn't be taken. The caller should handle a NULL
+ * return when trying to follow tail pages.
+ */
+ if (PageHead(page))
+ get_page(page);
+ else {
+ page = NULL;
+ goto out;
+ }
+ }
goto out;
}
if ((flags & FOLL_NUMA) && pmd_numa(*pmd))
@@ -1517,20 +1534,20 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
split_huge_page_pmd(vma, address, pmd);
goto split_fallthrough;
}
- spin_lock(&mm->page_table_lock);
+ ptl = pmd_lock(mm, pmd);
if (likely(pmd_trans_huge(*pmd))) {
if (unlikely(pmd_trans_splitting(*pmd))) {
- spin_unlock(&mm->page_table_lock);
+ spin_unlock(ptl);
wait_split_huge_page(vma->anon_vma, pmd);
} else {
page = follow_trans_huge_pmd(vma, address,
pmd, flags);
- spin_unlock(&mm->page_table_lock);
+ spin_unlock(ptl);
*page_mask = HPAGE_PMD_NR - 1;
goto out;
}
} else
- spin_unlock(&mm->page_table_lock);
+ spin_unlock(ptl);
/* fall through */
}
split_fallthrough:
@@ -1704,15 +1721,6 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
- /*
- * Require read or write permissions.
- * If FOLL_FORCE is set, we only require the "MAY" flags.
- */
- vm_flags = (gup_flags & FOLL_WRITE) ?
- (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
- vm_flags &= (gup_flags & FOLL_FORCE) ?
- (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
-
/*
* If FOLL_FORCE and FOLL_NUMA are both set, handle_mm_fault
* would be called on PROT_NONE ranges. We must never invoke
@@ -1740,7 +1748,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
/* user gate pages are read-only */
if (gup_flags & FOLL_WRITE)
- return i ? : -EFAULT;
+ goto efault;
if (pg > TASK_SIZE)
pgd = pgd_offset_k(pg);
else
@@ -1750,12 +1758,12 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
BUG_ON(pud_none(*pud));
pmd = pmd_offset(pud, pg);
if (pmd_none(*pmd))
- return i ? : -EFAULT;
+ goto efault;
VM_BUG_ON(pmd_trans_huge(*pmd));
pte = pte_offset_map(pmd, pg);
if (pte_none(*pte)) {
pte_unmap(pte);
- return i ? : -EFAULT;
+ goto efault;
}
vma = get_gate_vma(mm);
if (pages) {
@@ -1768,7 +1776,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
page = pte_page(*pte);
else {
pte_unmap(pte);
- return i ? : -EFAULT;
+ goto efault;
}
}
pages[i] = page;
@@ -1779,10 +1787,42 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
goto next_page;
}
- if (!vma ||
- (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
- !(vm_flags & vma->vm_flags))
- return i ? : -EFAULT;
+ if (!vma)
+ goto efault;
+ vm_flags = vma->vm_flags;
+ if (vm_flags & (VM_IO | VM_PFNMAP))
+ goto efault;
+
+ if (gup_flags & FOLL_WRITE) {
+ if (!(vm_flags & VM_WRITE)) {
+ if (!(gup_flags & FOLL_FORCE))
+ goto efault;
+ /*
+ * We used to let the write,force case do COW
+ * in a VM_MAYWRITE VM_SHARED !VM_WRITE vma, so
+ * ptrace could set a breakpoint in a read-only
+ * mapping of an executable, without corrupting
+ * the file (yet only when that file had been
+ * opened for writing!). Anon pages in shared
+ * mappings are surprising: now just reject it.
+ */
+ if (!is_cow_mapping(vm_flags)) {
+ WARN_ON_ONCE(vm_flags & VM_MAYWRITE);
+ goto efault;
+ }
+ }
+ } else {
+ if (!(vm_flags & VM_READ)) {
+ if (!(gup_flags & FOLL_FORCE))
+ goto efault;
+ /*
+ * Is there actually any vma we can reach here
+ * which does not have VM_MAYREAD set?
+ */
+ if (!(vm_flags & VM_MAYREAD))
+ goto efault;
+ }
+ }
if (is_vm_hugetlb_page(vma)) {
i = follow_hugetlb_page(mm, vma, pages, vmas,
@@ -1836,7 +1876,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
return -EFAULT;
}
if (ret & VM_FAULT_SIGBUS)
- return i ? i : -EFAULT;
+ goto efault;
BUG();
}
@@ -1894,6 +1934,8 @@ next_page:
} while (nr_pages && start < vma->vm_end);
} while (nr_pages);
return i;
+efault:
+ return i ? : -EFAULT;
}
EXPORT_SYMBOL(__get_user_pages);
@@ -1928,12 +1970,17 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
unsigned long address, unsigned int fault_flags)
{
struct vm_area_struct *vma;
+ vm_flags_t vm_flags;
int ret;
vma = find_extend_vma(mm, address);
if (!vma || address < vma->vm_start)
return -EFAULT;
+ vm_flags = (fault_flags & FAULT_FLAG_WRITE) ? VM_WRITE : VM_READ;
+ if (!(vm_flags & vma->vm_flags))
+ return -EFAULT;
+
ret = handle_mm_fault(mm, vma, address, fault_flags);
if (ret & VM_FAULT_ERROR) {
if (ret & VM_FAULT_OOM)
@@ -1961,9 +2008,8 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
* @start: starting user address
* @nr_pages: number of pages from start to pin
* @write: whether pages will be written to by the caller
- * @force: whether to force write access even if user mapping is
- * readonly. This will result in the page being COWed even
- * in MAP_SHARED mappings. You do not want this.
+ * @force: whether to force access even when user mapping is currently
+ * protected (but never forces write access to shared mapping).
* @pages: array that receives pointers to the pages pinned.
* Should be at least nr_pages long. Or NULL, if caller
* only intends to ensure the pages are faulted in.
@@ -2559,6 +2605,8 @@ static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
{
+ debug_dma_assert_idle(src);
+
/*
* If the source page was a PFN mapping, we don't have
* a "struct page" for it. We do a best-effort copy by
@@ -2584,6 +2632,38 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
}
/*
+ * Notify the address space that the page is about to become writable so that
+ * it can prohibit this or wait for the page to get into an appropriate state.
+ *
+ * We do this without the lock held, so that it can sleep if it needs to.
+ */
+static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
+ unsigned long address)
+{
+ struct vm_fault vmf;
+ int ret;
+
+ vmf.virtual_address = (void __user *)(address & PAGE_MASK);
+ vmf.pgoff = page->index;
+ vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
+ vmf.page = page;
+
+ ret = vma->vm_ops->page_mkwrite(vma, &vmf);
+ if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
+ return ret;
+ if (unlikely(!(ret & VM_FAULT_LOCKED))) {
+ lock_page(page);
+ if (!page->mapping) {
+ unlock_page(page);
+ return 0; /* retry */
+ }
+ ret |= VM_FAULT_LOCKED;
+ } else
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
+ return ret;
+}
+
+/*
* This routine handles present pages, when users try to write
* to a shared page. It is done by copying the page to a new address
* and decrementing the shared-page counter for the old page.
@@ -2665,42 +2745,15 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
* get_user_pages(.write=1, .force=1).
*/
if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
- struct vm_fault vmf;
int tmp;
-
- vmf.virtual_address = (void __user *)(address &
- PAGE_MASK);
- vmf.pgoff = old_page->index;
- vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
- vmf.page = old_page;
-
- /*
- * Notify the address space that the page is about to
- * become writable so that it can prohibit this or wait
- * for the page to get into an appropriate state.
- *
- * We do this without the lock held, so that it can
- * sleep if it needs to.
- */
page_cache_get(old_page);
pte_unmap_unlock(page_table, ptl);
-
- tmp = vma->vm_ops->page_mkwrite(vma, &vmf);
- if (unlikely(tmp &
- (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
- ret = tmp;
- goto unwritable_page;
+ tmp = do_page_mkwrite(vma, old_page, address);
+ if (unlikely(!tmp || (tmp &
+ (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
+ page_cache_release(old_page);
+ return tmp;
}
- if (unlikely(!(tmp & VM_FAULT_LOCKED))) {
- lock_page(old_page);
- if (!old_page->mapping) {
- ret = 0; /* retry the fault */
- unlock_page(old_page);
- goto unwritable_page;
- }
- } else
- VM_BUG_ON(!PageLocked(old_page));
-
/*
* Since we dropped the lock we need to revalidate
* the PTE as someone else may have changed it. If
@@ -2720,6 +2773,14 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
get_page(dirty_page);
reuse:
+ /*
+ * Clear the pages cpupid information as the existing
+ * information potentially belongs to a now completely
+ * unrelated process.
+ */
+ if (old_page)
+ page_cpupid_xchg_last(old_page, (1 << LAST_CPUPID_SHIFT) - 1);
+
flush_cache_page(vma, address, pte_pfn(orig_pte));
entry = pte_mkyoung(orig_pte);
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
@@ -2737,11 +2798,11 @@ reuse:
* bit after it clear all dirty ptes, but before a racing
* do_wp_page installs a dirty pte.
*
- * __do_fault is protected similarly.
+ * do_shared_fault is protected similarly.
*/
if (!page_mkwrite) {
wait_on_page_locked(dirty_page);
- set_page_dirty_balance(dirty_page, page_mkwrite);
+ set_page_dirty_balance(dirty_page);
/* file_update_time outside page_lock */
if (vma->vm_file)
file_update_time(vma->vm_file);
@@ -2787,7 +2848,7 @@ gotten:
}
__SetPageUptodate(new_page);
- if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))
+ if (mem_cgroup_charge_anon(new_page, mm, GFP_KERNEL))
goto oom_free_new;
mmun_start = address & PAGE_MASK;
@@ -2881,10 +2942,6 @@ oom:
if (old_page)
page_cache_release(old_page);
return VM_FAULT_OOM;
-
-unwritable_page:
- page_cache_release(old_page);
- return ret;
}
static void unmap_mapping_range_vma(struct vm_area_struct *vma,
@@ -2904,7 +2961,7 @@ static inline void unmap_mapping_range_tree(struct rb_root *root,
details->first_index, details->last_index) {
vba = vma->vm_pgoff;
- vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1;
+ vea = vba + vma_pages(vma) - 1;
/* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */
zba = details->first_index;
if (zba < vba)
@@ -3114,6 +3171,8 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
exclusive = 1;
}
flush_icache_page(vma, page);
+ if (pte_swp_soft_dirty(orig_pte))
+ pte = pte_mksoft_dirty(pte);
set_pte_at(mm, address, page_table, pte);
if (page == swapcache)
do_page_add_anon_rmap(page, vma, address, exclusive);
@@ -3242,7 +3301,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
*/
__SetPageUptodate(page);
- if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))
+ if (mem_cgroup_charge_anon(page, mm, GFP_KERNEL))
goto oom_free_page;
entry = mk_pte(page, vma->vm_page_prot);
@@ -3273,53 +3332,11 @@ oom:
return VM_FAULT_OOM;
}
-/*
- * __do_fault() tries to create a new page mapping. It aggressively
- * tries to share with existing pages, but makes a separate copy if
- * the FAULT_FLAG_WRITE is set in the flags parameter in order to avoid
- * the next page fault.
- *
- * As this is called only for pages that do not currently exist, we
- * do not need to flush old virtual caches or the TLB.
- *
- * We enter with non-exclusive mmap_sem (to exclude vma changes,
- * but allow concurrent faults), and pte neither mapped nor locked.
- * We return with mmap_sem still held, but pte unmapped and unlocked.
- */
-static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long address, pmd_t *pmd,
- pgoff_t pgoff, unsigned int flags, pte_t orig_pte)
+static int __do_fault(struct vm_area_struct *vma, unsigned long address,
+ pgoff_t pgoff, unsigned int flags, struct page **page)
{
- pte_t *page_table;
- spinlock_t *ptl;
- struct page *page;
- struct page *cow_page;
- pte_t entry;
- int anon = 0;
- struct page *dirty_page = NULL;
struct vm_fault vmf;
int ret;
- int page_mkwrite = 0;
-
- /*
- * If we do COW later, allocate page befor taking lock_page()
- * on the file cache page. This will reduce lock holding time.
- */
- if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
-
- if (unlikely(anon_vma_prepare(vma)))
- return VM_FAULT_OOM;
-
- cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
- if (!cow_page)
- return VM_FAULT_OOM;
-
- if (mem_cgroup_newpage_charge(cow_page, mm, GFP_KERNEL)) {
- page_cache_release(cow_page);
- return VM_FAULT_OOM;
- }
- } else
- cow_page = NULL;
vmf.virtual_address = (void __user *)(address & PAGE_MASK);
vmf.pgoff = pgoff;
@@ -3327,148 +3344,304 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
vmf.page = NULL;
ret = vma->vm_ops->fault(vma, &vmf);
- if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
- VM_FAULT_RETRY)))
- goto uncharge_out;
+ if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
+ return ret;
if (unlikely(PageHWPoison(vmf.page))) {
if (ret & VM_FAULT_LOCKED)
unlock_page(vmf.page);
- ret = VM_FAULT_HWPOISON;
- goto uncharge_out;
+ page_cache_release(vmf.page);
+ return VM_FAULT_HWPOISON;
}
- /*
- * For consistency in subsequent calls, make the faulted page always
- * locked.
- */
if (unlikely(!(ret & VM_FAULT_LOCKED)))
lock_page(vmf.page);
else
- VM_BUG_ON(!PageLocked(vmf.page));
+ VM_BUG_ON_PAGE(!PageLocked(vmf.page), vmf.page);
+
+ *page = vmf.page;
+ return ret;
+}
+
+/**
+ * do_set_pte - setup new PTE entry for given page and add reverse page mapping.
+ *
+ * @vma: virtual memory area
+ * @address: user virtual address
+ * @page: page to map
+ * @pte: pointer to target page table entry
+ * @write: true, if new entry is writable
+ * @anon: true, if it's anonymous page
+ *
+ * Caller must hold page table lock relevant for @pte.
+ *
+ * Target users are page handler itself and implementations of
+ * vm_ops->map_pages.
+ */
+void do_set_pte(struct vm_area_struct *vma, unsigned long address,
+ struct page *page, pte_t *pte, bool write, bool anon)
+{
+ pte_t entry;
+
+ flush_icache_page(vma, page);
+ entry = mk_pte(page, vma->vm_page_prot);
+ if (write)
+ entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+ else if (pte_file(*pte) && pte_file_soft_dirty(*pte))
+ pte_mksoft_dirty(entry);
+ if (anon) {
+ inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
+ page_add_new_anon_rmap(page, vma, address);
+ } else {
+ inc_mm_counter_fast(vma->vm_mm, MM_FILEPAGES);
+ page_add_file_rmap(page);
+ }
+ set_pte_at(vma->vm_mm, address, pte, entry);
+
+ /* no need to invalidate: a not-present page won't be cached */
+ update_mmu_cache(vma, address, pte);
+}
+
+#define FAULT_AROUND_ORDER 4
+
+#ifdef CONFIG_DEBUG_FS
+static unsigned int fault_around_order = FAULT_AROUND_ORDER;
+
+static int fault_around_order_get(void *data, u64 *val)
+{
+ *val = fault_around_order;
+ return 0;
+}
+
+static int fault_around_order_set(void *data, u64 val)
+{
+ BUILD_BUG_ON((1UL << FAULT_AROUND_ORDER) > PTRS_PER_PTE);
+ if (1UL << val > PTRS_PER_PTE)
+ return -EINVAL;
+ fault_around_order = val;
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(fault_around_order_fops,
+ fault_around_order_get, fault_around_order_set, "%llu\n");
+
+static int __init fault_around_debugfs(void)
+{
+ void *ret;
+
+ ret = debugfs_create_file("fault_around_order", 0644, NULL, NULL,
+ &fault_around_order_fops);
+ if (!ret)
+ pr_warn("Failed to create fault_around_order in debugfs");
+ return 0;
+}
+late_initcall(fault_around_debugfs);
+
+static inline unsigned long fault_around_pages(void)
+{
+ return 1UL << fault_around_order;
+}
+
+static inline unsigned long fault_around_mask(void)
+{
+ return ~((1UL << (PAGE_SHIFT + fault_around_order)) - 1);
+}
+#else
+static inline unsigned long fault_around_pages(void)
+{
+ unsigned long nr_pages;
+
+ nr_pages = 1UL << FAULT_AROUND_ORDER;
+ BUILD_BUG_ON(nr_pages > PTRS_PER_PTE);
+ return nr_pages;
+}
+
+static inline unsigned long fault_around_mask(void)
+{
+ return ~((1UL << (PAGE_SHIFT + FAULT_AROUND_ORDER)) - 1);
+}
+#endif
+
+static void do_fault_around(struct vm_area_struct *vma, unsigned long address,
+ pte_t *pte, pgoff_t pgoff, unsigned int flags)
+{
+ unsigned long start_addr;
+ pgoff_t max_pgoff;
+ struct vm_fault vmf;
+ int off;
+
+ start_addr = max(address & fault_around_mask(), vma->vm_start);
+ off = ((address - start_addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
+ pte -= off;
+ pgoff -= off;
/*
- * Should we do an early C-O-W break?
+ * max_pgoff is either end of page table or end of vma
+ * or fault_around_pages() from pgoff, depending what is neast.
*/
- page = vmf.page;
- if (flags & FAULT_FLAG_WRITE) {
- if (!(vma->vm_flags & VM_SHARED)) {
- page = cow_page;
- anon = 1;
- copy_user_highpage(page, vmf.page, address, vma);
- __SetPageUptodate(page);
- } else {
- /*
- * If the page will be shareable, see if the backing
- * address space wants to know that the page is about
- * to become writable
- */
- if (vma->vm_ops->page_mkwrite) {
- int tmp;
-
- unlock_page(page);
- vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
- tmp = vma->vm_ops->page_mkwrite(vma, &vmf);
- if (unlikely(tmp &
- (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
- ret = tmp;
- goto unwritable_page;
- }
- if (unlikely(!(tmp & VM_FAULT_LOCKED))) {
- lock_page(page);
- if (!page->mapping) {
- ret = 0; /* retry the fault */
- unlock_page(page);
- goto unwritable_page;
- }
- } else
- VM_BUG_ON(!PageLocked(page));
- page_mkwrite = 1;
- }
- }
-
+ max_pgoff = pgoff - ((start_addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +
+ PTRS_PER_PTE - 1;
+ max_pgoff = min3(max_pgoff, vma_pages(vma) + vma->vm_pgoff - 1,
+ pgoff + fault_around_pages() - 1);
+
+ /* Check if it makes any sense to call ->map_pages */
+ while (!pte_none(*pte)) {
+ if (++pgoff > max_pgoff)
+ return;
+ start_addr += PAGE_SIZE;
+ if (start_addr >= vma->vm_end)
+ return;
+ pte++;
}
- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
+ vmf.virtual_address = (void __user *) start_addr;
+ vmf.pte = pte;
+ vmf.pgoff = pgoff;
+ vmf.max_pgoff = max_pgoff;
+ vmf.flags = flags;
+ vma->vm_ops->map_pages(vma, &vmf);
+}
+
+static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmd,
+ pgoff_t pgoff, unsigned int flags, pte_t orig_pte)
+{
+ struct page *fault_page;
+ spinlock_t *ptl;
+ pte_t *pte;
+ int ret = 0;
/*
- * This silly early PAGE_DIRTY setting removes a race
- * due to the bad i386 page protection. But it's valid
- * for other architectures too.
- *
- * Note that if FAULT_FLAG_WRITE is set, we either now have
- * an exclusive copy of the page, or this is a shared mapping,
- * so we can make it writable and dirty to avoid having to
- * handle that later.
+ * Let's call ->map_pages() first and use ->fault() as fallback
+ * if page by the offset is not ready to be mapped (cold cache or
+ * something).
*/
- /* Only go through if we didn't race with anybody else... */
- if (likely(pte_same(*page_table, orig_pte))) {
- flush_icache_page(vma, page);
- entry = mk_pte(page, vma->vm_page_prot);
- if (flags & FAULT_FLAG_WRITE)
- entry = maybe_mkwrite(pte_mkdirty(entry), vma);
- if (anon) {
- inc_mm_counter_fast(mm, MM_ANONPAGES);
- page_add_new_anon_rmap(page, vma, address);
- } else {
- inc_mm_counter_fast(mm, MM_FILEPAGES);
- page_add_file_rmap(page);
- if (flags & FAULT_FLAG_WRITE) {
- dirty_page = page;
- get_page(dirty_page);
- }
- }
- set_pte_at(mm, address, page_table, entry);
+ if (vma->vm_ops->map_pages) {
+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
+ do_fault_around(vma, address, pte, pgoff, flags);
+ if (!pte_same(*pte, orig_pte))
+ goto unlock_out;
+ pte_unmap_unlock(pte, ptl);
+ }
- /* no need to invalidate: a not-present page won't be cached */
- update_mmu_cache(vma, address, page_table);
- } else {
- if (cow_page)
- mem_cgroup_uncharge_page(cow_page);
- if (anon)
- page_cache_release(page);
- else
- anon = 1; /* no anon but release faulted_page */
+ ret = __do_fault(vma, address, pgoff, flags, &fault_page);
+ if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
+ return ret;
+
+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
+ if (unlikely(!pte_same(*pte, orig_pte))) {
+ pte_unmap_unlock(pte, ptl);
+ unlock_page(fault_page);
+ page_cache_release(fault_page);
+ return ret;
}
+ do_set_pte(vma, address, fault_page, pte, false, false);
+ unlock_page(fault_page);
+unlock_out:
+ pte_unmap_unlock(pte, ptl);
+ return ret;
+}
- pte_unmap_unlock(page_table, ptl);
+static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmd,
+ pgoff_t pgoff, unsigned int flags, pte_t orig_pte)
+{
+ struct page *fault_page, *new_page;
+ spinlock_t *ptl;
+ pte_t *pte;
+ int ret;
- if (dirty_page) {
- struct address_space *mapping = page->mapping;
- int dirtied = 0;
+ if (unlikely(anon_vma_prepare(vma)))
+ return VM_FAULT_OOM;
- if (set_page_dirty(dirty_page))
- dirtied = 1;
- unlock_page(dirty_page);
- put_page(dirty_page);
- if ((dirtied || page_mkwrite) && mapping) {
- /*
- * Some device drivers do not set page.mapping but still
- * dirty their pages
- */
- balance_dirty_pages_ratelimited(mapping);
- }
+ new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
+ if (!new_page)
+ return VM_FAULT_OOM;
- /* file_update_time outside page_lock */
- if (vma->vm_file && !page_mkwrite)
- file_update_time(vma->vm_file);
- } else {
- unlock_page(vmf.page);
- if (anon)
- page_cache_release(vmf.page);
+ if (mem_cgroup_charge_anon(new_page, mm, GFP_KERNEL)) {
+ page_cache_release(new_page);
+ return VM_FAULT_OOM;
}
- return ret;
+ ret = __do_fault(vma, address, pgoff, flags, &fault_page);
+ if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
+ goto uncharge_out;
-unwritable_page:
- page_cache_release(page);
+ copy_user_highpage(new_page, fault_page, address, vma);
+ __SetPageUptodate(new_page);
+
+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
+ if (unlikely(!pte_same(*pte, orig_pte))) {
+ pte_unmap_unlock(pte, ptl);
+ unlock_page(fault_page);
+ page_cache_release(fault_page);
+ goto uncharge_out;
+ }
+ do_set_pte(vma, address, new_page, pte, true, true);
+ pte_unmap_unlock(pte, ptl);
+ unlock_page(fault_page);
+ page_cache_release(fault_page);
return ret;
uncharge_out:
- /* fs's fault handler get error */
- if (cow_page) {
- mem_cgroup_uncharge_page(cow_page);
- page_cache_release(cow_page);
+ mem_cgroup_uncharge_page(new_page);
+ page_cache_release(new_page);
+ return ret;
+}
+
+static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmd,
+ pgoff_t pgoff, unsigned int flags, pte_t orig_pte)
+{
+ struct page *fault_page;
+ struct address_space *mapping;
+ spinlock_t *ptl;
+ pte_t *pte;
+ int dirtied = 0;
+ int ret, tmp;
+
+ ret = __do_fault(vma, address, pgoff, flags, &fault_page);
+ if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
+ return ret;
+
+ /*
+ * Check if the backing address space wants to know that the page is
+ * about to become writable
+ */
+ if (vma->vm_ops->page_mkwrite) {
+ unlock_page(fault_page);
+ tmp = do_page_mkwrite(vma, fault_page, address);
+ if (unlikely(!tmp ||
+ (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
+ page_cache_release(fault_page);
+ return tmp;
+ }
}
+
+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
+ if (unlikely(!pte_same(*pte, orig_pte))) {
+ pte_unmap_unlock(pte, ptl);
+ unlock_page(fault_page);
+ page_cache_release(fault_page);
+ return ret;
+ }
+ do_set_pte(vma, address, fault_page, pte, true, false);
+ pte_unmap_unlock(pte, ptl);
+
+ if (set_page_dirty(fault_page))
+ dirtied = 1;
+ mapping = fault_page->mapping;
+ unlock_page(fault_page);
+ if ((dirtied || vma->vm_ops->page_mkwrite) && mapping) {
+ /*
+ * Some device drivers do not set page.mapping but still
+ * dirty their pages
+ */
+ balance_dirty_pages_ratelimited(mapping);
+ }
+
+ /* file_update_time outside page_lock */
+ if (vma->vm_file && !vma->vm_ops->page_mkwrite)
+ file_update_time(vma->vm_file);
+
return ret;
}
@@ -3480,7 +3653,13 @@ static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
- vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
pte_unmap(page_table);
- return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
+ if (!(flags & FAULT_FLAG_WRITE))
+ return do_read_fault(mm, vma, address, pmd, pgoff, flags,
+ orig_pte);
+ if (!(vma->vm_flags & VM_SHARED))
+ return do_cow_fault(mm, vma, address, pmd, pgoff, flags,
+ orig_pte);
+ return do_shared_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
}
/*
@@ -3512,29 +3691,40 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
}
pgoff = pte_to_pgoff(orig_pte);
- return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
+ if (!(flags & FAULT_FLAG_WRITE))
+ return do_read_fault(mm, vma, address, pmd, pgoff, flags,
+ orig_pte);
+ if (!(vma->vm_flags & VM_SHARED))
+ return do_cow_fault(mm, vma, address, pmd, pgoff, flags,
+ orig_pte);
+ return do_shared_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
}
-int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
- unsigned long addr, int current_nid)
+static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
+ unsigned long addr, int page_nid,
+ int *flags)
{
get_page(page);
count_vm_numa_event(NUMA_HINT_FAULTS);
- if (current_nid == numa_node_id())
+ if (page_nid == numa_node_id()) {
count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
+ *flags |= TNF_FAULT_LOCAL;
+ }
return mpol_misplaced(page, vma, addr);
}
-int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
+static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pte_t pte, pte_t *ptep, pmd_t *pmd)
{
struct page *page = NULL;
spinlock_t *ptl;
- int current_nid = -1;
+ int page_nid = -1;
+ int last_cpupid;
int target_nid;
bool migrated = false;
+ int flags = 0;
/*
* The "pte" at this point cannot be used safely without
@@ -3561,123 +3751,44 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
pte_unmap_unlock(ptep, ptl);
return 0;
}
+ BUG_ON(is_zero_pfn(page_to_pfn(page)));
- current_nid = page_to_nid(page);
- target_nid = numa_migrate_prep(page, vma, addr, current_nid);
+ /*
+ * Avoid grouping on DSO/COW pages in specific and RO pages
+ * in general, RO pages shouldn't hurt as much anyway since
+ * they can be in shared cache state.
+ */
+ if (!pte_write(pte))
+ flags |= TNF_NO_GROUP;
+
+ /*
+ * Flag if the page is shared between multiple address spaces. This
+ * is later used when determining whether to group tasks together
+ */
+ if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED))
+ flags |= TNF_SHARED;
+
+ last_cpupid = page_cpupid_last(page);
+ page_nid = page_to_nid(page);
+ target_nid = numa_migrate_prep(page, vma, addr, page_nid, &flags);
pte_unmap_unlock(ptep, ptl);
if (target_nid == -1) {
- /*
- * Account for the fault against the current node if it not
- * being replaced regardless of where the page is located.
- */
- current_nid = numa_node_id();
put_page(page);
goto out;
}
/* Migrate to the requested node */
- migrated = migrate_misplaced_page(page, target_nid);
- if (migrated)
- current_nid = target_nid;
-
-out:
- if (current_nid != -1)
- task_numa_fault(current_nid, 1, migrated);
- return 0;
-}
-
-/* NUMA hinting page fault entry point for regular pmds */
-#ifdef CONFIG_NUMA_BALANCING
-static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long addr, pmd_t *pmdp)
-{
- pmd_t pmd;
- pte_t *pte, *orig_pte;
- unsigned long _addr = addr & PMD_MASK;
- unsigned long offset;
- spinlock_t *ptl;
- bool numa = false;
- int local_nid = numa_node_id();
-
- spin_lock(&mm->page_table_lock);
- pmd = *pmdp;
- if (pmd_numa(pmd)) {
- set_pmd_at(mm, _addr, pmdp, pmd_mknonnuma(pmd));
- numa = true;
+ migrated = migrate_misplaced_page(page, vma, target_nid);
+ if (migrated) {
+ page_nid = target_nid;
+ flags |= TNF_MIGRATED;
}
- spin_unlock(&mm->page_table_lock);
-
- if (!numa)
- return 0;
- /* we're in a page fault so some vma must be in the range */
- BUG_ON(!vma);
- BUG_ON(vma->vm_start >= _addr + PMD_SIZE);
- offset = max(_addr, vma->vm_start) & ~PMD_MASK;
- VM_BUG_ON(offset >= PMD_SIZE);
- orig_pte = pte = pte_offset_map_lock(mm, pmdp, _addr, &ptl);
- pte += offset >> PAGE_SHIFT;
- for (addr = _addr + offset; addr < _addr + PMD_SIZE; pte++, addr += PAGE_SIZE) {
- pte_t pteval = *pte;
- struct page *page;
- int curr_nid = local_nid;
- int target_nid;
- bool migrated;
- if (!pte_present(pteval))
- continue;
- if (!pte_numa(pteval))
- continue;
- if (addr >= vma->vm_end) {
- vma = find_vma(mm, addr);
- /* there's a pte present so there must be a vma */
- BUG_ON(!vma);
- BUG_ON(addr < vma->vm_start);
- }
- if (pte_numa(pteval)) {
- pteval = pte_mknonnuma(pteval);
- set_pte_at(mm, addr, pte, pteval);
- }
- page = vm_normal_page(vma, addr, pteval);
- if (unlikely(!page))
- continue;
- /* only check non-shared pages */
- if (unlikely(page_mapcount(page) != 1))
- continue;
-
- /*
- * Note that the NUMA fault is later accounted to either
- * the node that is currently running or where the page is
- * migrated to.
- */
- curr_nid = local_nid;
- target_nid = numa_migrate_prep(page, vma, addr,
- page_to_nid(page));
- if (target_nid == -1) {
- put_page(page);
- continue;
- }
-
- /* Migrate to the requested node */
- pte_unmap_unlock(pte, ptl);
- migrated = migrate_misplaced_page(page, target_nid);
- if (migrated)
- curr_nid = target_nid;
- task_numa_fault(curr_nid, 1, migrated);
-
- pte = pte_offset_map_lock(mm, pmdp, addr, &ptl);
- }
- pte_unmap_unlock(orig_pte, ptl);
-
- return 0;
-}
-#else
-static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long addr, pmd_t *pmdp)
-{
- BUG();
+out:
+ if (page_nid != -1)
+ task_numa_fault(last_cpupid, page_nid, 1, flags);
return 0;
}
-#endif /* CONFIG_NUMA_BALANCING */
/*
* These routines also need to handle stuff like marking pages dirty
@@ -3692,7 +3803,7 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
* but allow concurrent faults), and pte mapped but not yet locked.
* We return with mmap_sem still held, but pte unmapped and unlocked.
*/
-int handle_pte_fault(struct mm_struct *mm,
+static int handle_pte_fault(struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long address,
pte_t *pte, pmd_t *pmd, unsigned int flags)
{
@@ -3751,26 +3862,17 @@ unlock:
/*
* By the time we get here, we already hold the mm semaphore
*/
-int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long address, unsigned int flags)
+static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long address, unsigned int flags)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
- __set_current_state(TASK_RUNNING);
-
- count_vm_event(PGFAULT);
- mem_cgroup_count_vm_event(mm, PGFAULT);
-
- /* do counter updates before entering really critical section. */
- check_sync_rss_stat(current);
-
if (unlikely(is_vm_hugetlb_page(vma)))
return hugetlb_fault(mm, vma, address, flags);
-retry:
pgd = pgd_offset(mm, address);
pud = pud_alloc(mm, pgd, address);
if (!pud)
@@ -3779,9 +3881,12 @@ retry:
if (!pmd)
return VM_FAULT_OOM;
if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) {
+ int ret = VM_FAULT_FALLBACK;
if (!vma->vm_ops)
- return do_huge_pmd_anonymous_page(mm, vma, address,
- pmd, flags);
+ ret = do_huge_pmd_anonymous_page(mm, vma, address,
+ pmd, flags);
+ if (!(ret & VM_FAULT_FALLBACK))
+ return ret;
} else {
pmd_t orig_pmd = *pmd;
int ret;
@@ -3805,25 +3910,18 @@ retry:
if (dirty && !pmd_write(orig_pmd)) {
ret = do_huge_pmd_wp_page(mm, vma, address, pmd,
orig_pmd);
- /*
- * If COW results in an oom, the huge pmd will
- * have been split, so retry the fault on the
- * pte for a smaller charge.
- */
- if (unlikely(ret & VM_FAULT_OOM))
- goto retry;
- return ret;
+ if (!(ret & VM_FAULT_FALLBACK))
+ return ret;
} else {
huge_pmd_set_accessed(mm, vma, address, pmd,
orig_pmd, dirty);
+ return 0;
}
-
- return 0;
}
}
- if (pmd_numa(*pmd))
- return do_pmd_numa_page(mm, vma, address, pmd);
+ /* THP should already have been handled */
+ BUG_ON(pmd_numa(*pmd));
/*
* Use __pte_alloc instead of pte_alloc_map, because we can't
@@ -3847,6 +3945,43 @@ retry:
return handle_pte_fault(mm, vma, address, pte, pmd, flags);
}
+int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long address, unsigned int flags)
+{
+ int ret;
+
+ __set_current_state(TASK_RUNNING);
+
+ count_vm_event(PGFAULT);
+ mem_cgroup_count_vm_event(mm, PGFAULT);
+
+ /* do counter updates before entering really critical section. */
+ check_sync_rss_stat(current);
+
+ /*
+ * Enable the memcg OOM handling for faults triggered in user
+ * space. Kernel faults are handled more gracefully.
+ */
+ if (flags & FAULT_FLAG_USER)
+ mem_cgroup_oom_enable();
+
+ ret = __handle_mm_fault(mm, vma, address, flags);
+
+ if (flags & FAULT_FLAG_USER) {
+ mem_cgroup_oom_disable();
+ /*
+ * The task may have entered a memcg OOM situation but
+ * if the allocation error was handled gracefully (no
+ * VM_FAULT_OOM), there is no need to kill anything.
+ * Just clean up the OOM state peacefully.
+ */
+ if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
+ mem_cgroup_oom_synchronize(false);
+ }
+
+ return ret;
+}
+
#ifndef __PAGETABLE_PUD_FOLDED
/*
* Allocate page upper directory.
@@ -4065,6 +4200,7 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
return len;
}
+EXPORT_SYMBOL_GPL(generic_access_phys);
#endif
/*
@@ -4201,7 +4337,7 @@ void print_vma_addr(char *prefix, unsigned long ip)
up_read(&mm->mmap_sem);
}
-#ifdef CONFIG_PROVE_LOCKING
+#if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
void might_fault(void)
{
/*
@@ -4213,13 +4349,17 @@ void might_fault(void)
if (segment_eq(get_fs(), KERNEL_DS))
return;
- might_sleep();
/*
* it would be nicer only to annotate paths which are not under
* pagefault_disable, however that requires a larger audit and
* providing helpers like get_user_atomic.
*/
- if (!in_atomic() && current->mm)
+ if (in_atomic())
+ return;
+
+ __might_sleep(__FILE__, __LINE__, 0);
+
+ if (current->mm)
might_lock_read(&current->mm->mmap_sem);
}
EXPORT_SYMBOL(might_fault);
@@ -4295,3 +4435,30 @@ void copy_user_huge_page(struct page *dst, struct page *src,
}
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
+
+#if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS
+
+static struct kmem_cache *page_ptl_cachep;
+
+void __init ptlock_cache_init(void)
+{
+ page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
+ SLAB_PANIC, NULL);
+}
+
+bool ptlock_alloc(struct page *page)
+{
+ spinlock_t *ptl;
+
+ ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL);
+ if (!ptl)
+ return false;
+ page->ptl = ptl;
+ return true;
+}
+
+void ptlock_free(struct page *page)
+{
+ kmem_cache_free(page_ptl_cachep, page->ptl);
+}
+#endif
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 1ad92b46753e..a650db29606f 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -9,7 +9,6 @@
#include <linux/swap.h>
#include <linux/interrupt.h>
#include <linux/pagemap.h>
-#include <linux/bootmem.h>
#include <linux/compiler.h>
#include <linux/export.h>
#include <linux/pagevec.h>
@@ -30,6 +29,8 @@
#include <linux/mm_inline.h>
#include <linux/firmware-map.h>
#include <linux/stop_machine.h>
+#include <linux/hugetlb.h>
+#include <linux/memblock.h>
#include <asm/tlbflush.h>
@@ -51,14 +52,10 @@ DEFINE_MUTEX(mem_hotplug_mutex);
void lock_memory_hotplug(void)
{
mutex_lock(&mem_hotplug_mutex);
-
- /* for exclusive hibernation if CONFIG_HIBERNATION=y */
- lock_system_sleep();
}
void unlock_memory_hotplug(void)
{
- unlock_system_sleep();
mutex_unlock(&mem_hotplug_mutex);
}
@@ -75,7 +72,7 @@ static struct resource *register_memory_resource(u64 start, u64 size)
res->end = start + size - 1;
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
if (request_resource(&iomem_resource, res) < 0) {
- printk("System RAM resource %pR cannot be added\n", res);
+ pr_debug("System RAM resource %pR cannot be added\n", res);
kfree(res);
res = NULL;
}
@@ -101,12 +98,9 @@ void get_page_bootmem(unsigned long info, struct page *page,
atomic_inc(&page->_count);
}
-/* reference to __meminit __free_pages_bootmem is valid
- * so use __ref to tell modpost not to generate a warning */
-void __ref put_page_bootmem(struct page *page)
+void put_page_bootmem(struct page *page)
{
unsigned long type;
- static DEFINE_MUTEX(ppb_lock);
type = (unsigned long) page->lru.next;
BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
@@ -116,17 +110,8 @@ void __ref put_page_bootmem(struct page *page)
ClearPagePrivate(page);
set_page_private(page, 0);
INIT_LIST_HEAD(&page->lru);
-
- /*
- * Please refer to comment for __free_pages_bootmem()
- * for why we serialize here.
- */
- mutex_lock(&ppb_lock);
- __free_pages_bootmem(page, 0);
- mutex_unlock(&ppb_lock);
- totalram_pages++;
+ free_reserved_page(page);
}
-
}
#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
@@ -206,7 +191,7 @@ void register_page_bootmem_info_node(struct pglist_data *pgdat)
zone = &pgdat->node_zones[0];
for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) {
- if (zone->wait_table) {
+ if (zone_is_initialized(zone)) {
nr_pages = zone->wait_table_hash_nr_entries
* sizeof(wait_queue_head_t);
nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT;
@@ -220,13 +205,13 @@ void register_page_bootmem_info_node(struct pglist_data *pgdat)
pfn = pgdat->node_start_pfn;
end_pfn = pgdat_end_pfn(pgdat);
- /* register_section info */
+ /* register section info */
for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
/*
* Some platforms can assign the same pfn to multiple nodes - on
* node0 as well as nodeN. To avoid registering a pfn against
* multiple nodes we check that this pfn does not already
- * reside in some other node.
+ * reside in some other nodes.
*/
if (pfn_valid(pfn) && (pfn_to_nid(pfn) == node))
register_page_bootmem_info_section(pfn);
@@ -241,8 +226,8 @@ static void grow_zone_span(struct zone *zone, unsigned long start_pfn,
zone_span_writelock(zone);
- old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
- if (!zone->spanned_pages || start_pfn < zone->zone_start_pfn)
+ old_zone_end_pfn = zone_end_pfn(zone);
+ if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn)
zone->zone_start_pfn = start_pfn;
zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
@@ -283,7 +268,7 @@ static void fix_zone_id(struct zone *zone, unsigned long start_pfn,
}
/* Can fail with -ENOMEM from allocating a wait table with vmalloc() or
- * alloc_bootmem_node_nopanic() */
+ * alloc_bootmem_node_nopanic()/memblock_virt_alloc_node_nopanic() */
static int __ref ensure_zone_is_initialized(struct zone *zone,
unsigned long start_pfn, unsigned long num_pages)
{
@@ -309,7 +294,7 @@ static int __meminit move_pfn_range_left(struct zone *z1, struct zone *z2,
/* can't move pfns which are higher than @z2 */
if (end_pfn > zone_end_pfn(z2))
goto out_fail;
- /* the move out part mast at the left most of @z2 */
+ /* the move out part must be at the left most of @z2 */
if (start_pfn > z2->zone_start_pfn)
goto out_fail;
/* must included/overlap */
@@ -317,7 +302,7 @@ static int __meminit move_pfn_range_left(struct zone *z1, struct zone *z2,
goto out_fail;
/* use start_pfn for z1's start_pfn if z1 is empty */
- if (z1->spanned_pages)
+ if (!zone_is_empty(z1))
z1_start_pfn = z1->zone_start_pfn;
else
z1_start_pfn = start_pfn;
@@ -359,7 +344,7 @@ static int __meminit move_pfn_range_right(struct zone *z1, struct zone *z2,
goto out_fail;
/* use end_pfn for z2's end_pfn if z2 is empty */
- if (z2->spanned_pages)
+ if (!zone_is_empty(z2))
z2_end_pfn = zone_end_pfn(z2);
else
z2_end_pfn = end_pfn;
@@ -380,8 +365,7 @@ out_fail:
static void grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn,
unsigned long end_pfn)
{
- unsigned long old_pgdat_end_pfn =
- pgdat->node_start_pfn + pgdat->node_spanned_pages;
+ unsigned long old_pgdat_end_pfn = pgdat_end_pfn(pgdat);
if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn)
pgdat->node_start_pfn = start_pfn;
@@ -417,13 +401,12 @@ static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
static int __meminit __add_section(int nid, struct zone *zone,
unsigned long phys_start_pfn)
{
- int nr_pages = PAGES_PER_SECTION;
int ret;
if (pfn_valid(phys_start_pfn))
return -EEXIST;
- ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages);
+ ret = sparse_add_one_section(zone, phys_start_pfn);
if (ret < 0)
return ret;
@@ -526,8 +509,9 @@ static int find_biggest_section_pfn(int nid, struct zone *zone,
static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
unsigned long end_pfn)
{
- unsigned long zone_start_pfn = zone->zone_start_pfn;
- unsigned long zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
+ unsigned long zone_start_pfn = zone->zone_start_pfn;
+ unsigned long z = zone_end_pfn(zone); /* zone_end_pfn namespace clash */
+ unsigned long zone_end_pfn = z;
unsigned long pfn;
struct mem_section *ms;
int nid = zone_to_nid(zone);
@@ -593,9 +577,9 @@ static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
static void shrink_pgdat_span(struct pglist_data *pgdat,
unsigned long start_pfn, unsigned long end_pfn)
{
- unsigned long pgdat_start_pfn = pgdat->node_start_pfn;
- unsigned long pgdat_end_pfn =
- pgdat->node_start_pfn + pgdat->node_spanned_pages;
+ unsigned long pgdat_start_pfn = pgdat->node_start_pfn;
+ unsigned long p = pgdat_end_pfn(pgdat); /* pgdat_end_pfn namespace clash */
+ unsigned long pgdat_end_pfn = p;
unsigned long pfn;
struct mem_section *ms;
int nid = pgdat->node_id;
@@ -775,29 +759,18 @@ EXPORT_SYMBOL_GPL(restore_online_page_callback);
void __online_page_set_limits(struct page *page)
{
- unsigned long pfn = page_to_pfn(page);
-
- if (pfn >= num_physpages)
- num_physpages = pfn + 1;
}
EXPORT_SYMBOL_GPL(__online_page_set_limits);
void __online_page_increment_counters(struct page *page)
{
- totalram_pages++;
-
-#ifdef CONFIG_HIGHMEM
- if (PageHighMem(page))
- totalhigh_pages++;
-#endif
+ adjust_managed_page_count(page, 1);
}
EXPORT_SYMBOL_GPL(__online_page_increment_counters);
void __online_page_free(struct page *page)
{
- ClearPageReserved(page);
- init_page_count(page);
- __free_page(page);
+ __free_reserved_page(page);
}
EXPORT_SYMBOL_GPL(__online_page_free);
@@ -918,6 +891,7 @@ static void node_states_set_node(int node, struct memory_notify *arg)
int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_type)
{
+ unsigned long flags;
unsigned long onlined_pages = 0;
struct zone *zone;
int need_zonelists_rebuild = 0;
@@ -936,19 +910,19 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
if ((zone_idx(zone) > ZONE_NORMAL || online_type == ONLINE_MOVABLE) &&
!can_online_high_movable(zone)) {
unlock_memory_hotplug();
- return -1;
+ return -EINVAL;
}
if (online_type == ONLINE_KERNEL && zone_idx(zone) == ZONE_MOVABLE) {
if (move_pfn_range_left(zone - 1, zone, pfn, pfn + nr_pages)) {
unlock_memory_hotplug();
- return -1;
+ return -EINVAL;
}
}
if (online_type == ONLINE_MOVABLE && zone_idx(zone) == ZONE_MOVABLE - 1) {
if (move_pfn_range_right(zone, zone + 1, pfn, pfn + nr_pages)) {
unlock_memory_hotplug();
- return -1;
+ return -EINVAL;
}
}
@@ -959,7 +933,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
arg.nr_pages = nr_pages;
node_states_check_changes_online(nr_pages, zone, &arg);
- nid = page_to_nid(pfn_to_page(pfn));
+ nid = pfn_to_nid(pfn);
ret = memory_notify(MEM_GOING_ONLINE, &arg);
ret = notifier_to_errno(ret);
@@ -994,9 +968,12 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
return ret;
}
- zone->managed_pages += onlined_pages;
zone->present_pages += onlined_pages;
+
+ pgdat_resize_lock(zone->zone_pgdat, &flags);
zone->zone_pgdat->node_present_pages += onlined_pages;
+ pgdat_resize_unlock(zone->zone_pgdat, &flags);
+
if (onlined_pages) {
node_states_set_node(zone_to_nid(zone), &arg);
if (need_zonelists_rebuild)
@@ -1065,17 +1042,23 @@ static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
}
-/*
+/**
+ * try_online_node - online a node if offlined
+ *
* called by cpu_up() to online a node without onlined memory.
*/
-int mem_online_node(int nid)
+int try_online_node(int nid)
{
pg_data_t *pgdat;
int ret;
+ if (node_online(nid))
+ return 0;
+
lock_memory_hotplug();
pgdat = hotadd_new_pgdat(nid, 0);
if (!pgdat) {
+ pr_err("Cannot online node %d due to NULL pgdat\n", nid);
ret = -ENOMEM;
goto out;
}
@@ -1083,11 +1066,34 @@ int mem_online_node(int nid)
ret = register_one_node(nid);
BUG_ON(ret);
+ if (pgdat->node_zonelists->_zonerefs->zone == NULL) {
+ mutex_lock(&zonelists_mutex);
+ build_all_zonelists(NULL, NULL);
+ mutex_unlock(&zonelists_mutex);
+ }
+
out:
unlock_memory_hotplug();
return ret;
}
+static int check_hotplug_memory_range(u64 start, u64 size)
+{
+ u64 start_pfn = start >> PAGE_SHIFT;
+ u64 nr_pages = size >> PAGE_SHIFT;
+
+ /* Memory range must be aligned with section */
+ if ((start_pfn & ~PAGE_SECTION_MASK) ||
+ (nr_pages % PAGES_PER_SECTION) || (!nr_pages)) {
+ pr_err("Section-unaligned hotplug range: start 0x%llx, size 0x%llx\n",
+ (unsigned long long)start,
+ (unsigned long long)size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
int __ref add_memory(int nid, u64 start, u64 size)
{
@@ -1097,17 +1103,22 @@ int __ref add_memory(int nid, u64 start, u64 size)
struct resource *res;
int ret;
- lock_memory_hotplug();
+ ret = check_hotplug_memory_range(start, size);
+ if (ret)
+ return ret;
res = register_memory_resource(start, size);
ret = -EEXIST;
if (!res)
- goto out;
+ return ret;
{ /* Stupid hack to suppress address-never-null warning */
void *p = NODE_DATA(nid);
new_pgdat = !p;
}
+
+ lock_memory_hotplug();
+
new_node = !node_online(nid);
if (new_node) {
pgdat = hotadd_new_pgdat(nid, start);
@@ -1227,10 +1238,12 @@ static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
}
/*
- * Scanning pfn is much easier than scanning lru list.
- * Scan pfn from start to end and Find LRU page.
+ * Scan pfn range [start,end) to find movable/migratable pages (LRU pages
+ * and hugepages). We scan pfn because it's much easier than scanning over
+ * linked list. This function returns the pfn of the first found movable
+ * page if it's found, otherwise 0.
*/
-static unsigned long scan_lru_pages(unsigned long start, unsigned long end)
+static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
{
unsigned long pfn;
struct page *page;
@@ -1239,6 +1252,13 @@ static unsigned long scan_lru_pages(unsigned long start, unsigned long end)
page = pfn_to_page(pfn);
if (PageLRU(page))
return pfn;
+ if (PageHuge(page)) {
+ if (is_hugepage_active(page))
+ return pfn;
+ else
+ pfn = round_up(pfn + 1,
+ 1 << compound_order(page)) - 1;
+ }
}
}
return 0;
@@ -1259,6 +1279,19 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
if (!pfn_valid(pfn))
continue;
page = pfn_to_page(pfn);
+
+ if (PageHuge(page)) {
+ struct page *head = compound_head(page);
+ pfn = page_to_pfn(head) + (1<<compound_order(head)) - 1;
+ if (compound_order(head) > PFN_SECTION_SHIFT) {
+ ret = -EBUSY;
+ break;
+ }
+ if (isolate_huge_page(page, &source))
+ move_pages -= 1 << compound_order(head);
+ continue;
+ }
+
if (!get_page_unless_zero(page))
continue;
/*
@@ -1277,7 +1310,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
#ifdef CONFIG_DEBUG_VM
printk(KERN_ALERT "removing pfn %lx from LRU failed\n",
pfn);
- dump_page(page);
+ dump_page(page, "failed to remove from LRU");
#endif
put_page(page);
/* Because we don't have big zone->lock. we should
@@ -1291,7 +1324,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
}
if (!list_empty(&source)) {
if (not_managed) {
- putback_lru_pages(&source);
+ putback_movable_pages(&source);
goto out;
}
@@ -1302,7 +1335,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
ret = migrate_pages(&source, alloc_migrate_target, 0,
MIGRATE_SYNC, MR_MEMORY_HOTPLUG);
if (ret)
- putback_lru_pages(&source);
+ putback_movable_pages(&source);
}
out:
return ret;
@@ -1390,6 +1423,37 @@ static bool can_offline_normal(struct zone *zone, unsigned long nr_pages)
}
#endif /* CONFIG_MOVABLE_NODE */
+static int __init cmdline_parse_movable_node(char *p)
+{
+#ifdef CONFIG_MOVABLE_NODE
+ /*
+ * Memory used by the kernel cannot be hot-removed because Linux
+ * cannot migrate the kernel pages. When memory hotplug is
+ * enabled, we should prevent memblock from allocating memory
+ * for the kernel.
+ *
+ * ACPI SRAT records all hotpluggable memory ranges. But before
+ * SRAT is parsed, we don't know about it.
+ *
+ * The kernel image is loaded into memory at very early time. We
+ * cannot prevent this anyway. So on NUMA system, we set any
+ * node the kernel resides in as un-hotpluggable.
+ *
+ * Since on modern servers, one node could have double-digit
+ * gigabytes memory, we can assume the memory around the kernel
+ * image is also un-hotpluggable. So before SRAT is parsed, just
+ * allocate memory near the kernel image to try the best to keep
+ * the kernel away from hotpluggable memory.
+ */
+ memblock_set_bottom_up(true);
+ movable_node_enabled = true;
+#else
+ pr_warn("movable_node option not supported\n");
+#endif
+ return 0;
+}
+early_param("movable_node", cmdline_parse_movable_node);
+
/* check which state of node_states will be changed when offline memory */
static void node_states_check_changes_offline(unsigned long nr_pages,
struct zone *zone, struct memory_notify *arg)
@@ -1487,10 +1551,10 @@ static int __ref __offline_pages(unsigned long start_pfn,
unsigned long pfn, nr_pages, expire;
long offlined_pages;
int ret, drain, retry_max, node;
+ unsigned long flags;
struct zone *zone;
struct memory_notify arg;
- BUG_ON(start_pfn >= end_pfn);
/* at least, alignment against pageblock is necessary */
if (!IS_ALIGNED(start_pfn, pageblock_nr_pages))
return -EINVAL;
@@ -1545,8 +1609,8 @@ repeat:
drain_all_pages();
}
- pfn = scan_lru_pages(start_pfn, end_pfn);
- if (pfn) { /* We have page on LRU */
+ pfn = scan_movable_pages(start_pfn, end_pfn);
+ if (pfn) { /* We have movable pages */
ret = do_migrate_range(pfn, end_pfn);
if (!ret) {
drain = 1;
@@ -1565,6 +1629,11 @@ repeat:
yield();
/* drain pcp pages, this is synchronous. */
drain_all_pages();
+ /*
+ * dissolve free hugepages in the memory block before doing offlining
+ * actually in order to make hugetlbfs's object counting consistent.
+ */
+ dissolve_free_huge_pages(start_pfn, end_pfn);
/* check again */
offlined_pages = check_pages_isolated(start_pfn, end_pfn);
if (offlined_pages < 0) {
@@ -1578,10 +1647,12 @@ repeat:
/* reset pagetype flags and makes migrate type to be MOVABLE */
undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
/* removal success */
- zone->managed_pages -= offlined_pages;
+ adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages);
zone->present_pages -= offlined_pages;
+
+ pgdat_resize_lock(zone->zone_pgdat, &flags);
zone->zone_pgdat->node_present_pages -= offlined_pages;
- totalram_pages -= offlined_pages;
+ pgdat_resize_unlock(zone->zone_pgdat, &flags);
init_per_zone_wmark_min();
@@ -1621,6 +1692,7 @@ int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
{
return __offline_pages(start_pfn, start_pfn + nr_pages, 120 * HZ);
}
+#endif /* CONFIG_MEMORY_HOTREMOVE */
/**
* walk_memory_range - walks through all mem sections in [start_pfn, end_pfn)
@@ -1634,7 +1706,7 @@ int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
*
* Returns the return value of func.
*/
-static int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
+int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
void *arg, int (*func)(struct memory_block *, void *))
{
struct memory_block *mem = NULL;
@@ -1671,25 +1743,8 @@ static int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
return 0;
}
-/**
- * offline_memory_block_cb - callback function for offlining memory block
- * @mem: the memory block to be offlined
- * @arg: buffer to hold error msg
- *
- * Always return 0, and put the error msg in arg if any.
- */
-static int offline_memory_block_cb(struct memory_block *mem, void *arg)
-{
- int *ret = arg;
- int error = offline_memory_block(mem);
-
- if (error != 0 && *ret == 0)
- *ret = error;
-
- return 0;
-}
-
-static int is_memblock_offlined_cb(struct memory_block *mem, void *arg)
+#ifdef CONFIG_MEMORY_HOTREMOVE
+static int check_memblock_offlined_cb(struct memory_block *mem, void *arg)
{
int ret = !is_memblock_offlined(mem);
@@ -1706,9 +1761,8 @@ static int is_memblock_offlined_cb(struct memory_block *mem, void *arg)
return ret;
}
-static int check_cpu_on_node(void *data)
+static int check_cpu_on_node(pg_data_t *pgdat)
{
- struct pglist_data *pgdat = data;
int cpu;
for_each_present_cpu(cpu) {
@@ -1723,10 +1777,9 @@ static int check_cpu_on_node(void *data)
return 0;
}
-static void unmap_cpu_on_node(void *data)
+static void unmap_cpu_on_node(pg_data_t *pgdat)
{
#ifdef CONFIG_ACPI_NUMA
- struct pglist_data *pgdat = data;
int cpu;
for_each_possible_cpu(cpu)
@@ -1735,10 +1788,11 @@ static void unmap_cpu_on_node(void *data)
#endif
}
-static int check_and_unmap_cpu_on_node(void *data)
+static int check_and_unmap_cpu_on_node(pg_data_t *pgdat)
{
- int ret = check_cpu_on_node(data);
+ int ret;
+ ret = check_cpu_on_node(pgdat);
if (ret)
return ret;
@@ -1747,11 +1801,18 @@ static int check_and_unmap_cpu_on_node(void *data)
* the cpu_to_node() now.
*/
- unmap_cpu_on_node(data);
+ unmap_cpu_on_node(pgdat);
return 0;
}
-/* offline the node if all memory sections of this node are removed */
+/**
+ * try_offline_node
+ *
+ * Offline a node if all memory sections and cpus of the node are removed.
+ *
+ * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
+ * and online/offline operations before this call.
+ */
void try_offline_node(int nid)
{
pg_data_t *pgdat = NODE_DATA(nid);
@@ -1777,7 +1838,7 @@ void try_offline_node(int nid)
return;
}
- if (stop_machine(check_and_unmap_cpu_on_node, pgdat, NULL))
+ if (check_and_unmap_cpu_on_node(pgdat))
return;
/*
@@ -1814,54 +1875,31 @@ void try_offline_node(int nid)
}
EXPORT_SYMBOL(try_offline_node);
-int __ref remove_memory(int nid, u64 start, u64 size)
+/**
+ * remove_memory
+ *
+ * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
+ * and online/offline operations before this call, as required by
+ * try_offline_node().
+ */
+void __ref remove_memory(int nid, u64 start, u64 size)
{
- unsigned long start_pfn, end_pfn;
- int ret = 0;
- int retry = 1;
-
- start_pfn = PFN_DOWN(start);
- end_pfn = PFN_UP(start + size - 1);
-
- /*
- * When CONFIG_MEMCG is on, one memory block may be used by other
- * blocks to store page cgroup when onlining pages. But we don't know
- * in what order pages are onlined. So we iterate twice to offline
- * memory:
- * 1st iterate: offline every non primary memory block.
- * 2nd iterate: offline primary (i.e. first added) memory block.
- */
-repeat:
- walk_memory_range(start_pfn, end_pfn, &ret,
- offline_memory_block_cb);
- if (ret) {
- if (!retry)
- return ret;
+ int ret;
- retry = 0;
- ret = 0;
- goto repeat;
- }
+ BUG_ON(check_hotplug_memory_range(start, size));
lock_memory_hotplug();
/*
- * we have offlined all memory blocks like this:
- * 1. lock memory hotplug
- * 2. offline a memory block
- * 3. unlock memory hotplug
- *
- * repeat step1-3 to offline the memory block. All memory blocks
- * must be offlined before removing memory. But we don't hold the
- * lock in the whole operation. So we should check whether all
- * memory blocks are offlined.
+ * All memory blocks must be offlined before removing memory. Check
+ * whether all memory blocks in question are offline and trigger a BUG()
+ * if this is not the case.
*/
-
- ret = walk_memory_range(start_pfn, end_pfn, NULL,
- is_memblock_offlined_cb);
+ ret = walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1), NULL,
+ check_memblock_offlined_cb);
if (ret) {
unlock_memory_hotplug();
- return ret;
+ BUG();
}
/* remove memmap entry */
@@ -1872,17 +1910,6 @@ repeat:
try_offline_node(nid);
unlock_memory_hotplug();
-
- return 0;
}
-#else
-int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
-{
- return -EINVAL;
-}
-int remove_memory(int nid, u64 start, u64 size)
-{
- return -EINVAL;
-}
-#endif /* CONFIG_MEMORY_HOTREMOVE */
EXPORT_SYMBOL_GPL(remove_memory);
+#endif /* CONFIG_MEMORY_HOTREMOVE */
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 74310017296e..30cc47f8ffa0 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -123,16 +123,19 @@ static struct mempolicy preferred_node_policy[MAX_NUMNODES];
static struct mempolicy *get_task_policy(struct task_struct *p)
{
struct mempolicy *pol = p->mempolicy;
- int node;
if (!pol) {
- node = numa_node_id();
- if (node != NUMA_NO_NODE)
- pol = &preferred_node_policy[node];
+ int node = numa_node_id();
- /* preferred_node_policy is not initialised early in boot */
- if (!pol->mode)
- pol = NULL;
+ if (node != NUMA_NO_NODE) {
+ pol = &preferred_node_policy[node];
+ /*
+ * preferred_node_policy is not initialised early in
+ * boot
+ */
+ if (!pol->mode)
+ pol = NULL;
+ }
}
return pol;
@@ -473,8 +476,11 @@ static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
static void migrate_page_add(struct page *page, struct list_head *pagelist,
unsigned long flags);
-/* Scan through pages checking if pages follow certain conditions. */
-static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
+/*
+ * Scan through pages checking if pages follow certain conditions,
+ * and move them to the pagelist if they do.
+ */
+static int queue_pages_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end,
const nodemask_t *nodes, unsigned long flags,
void *private)
@@ -512,7 +518,36 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
return addr != end;
}
-static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
+static void queue_pages_hugetlb_pmd_range(struct vm_area_struct *vma,
+ pmd_t *pmd, const nodemask_t *nodes, unsigned long flags,
+ void *private)
+{
+#ifdef CONFIG_HUGETLB_PAGE
+ int nid;
+ struct page *page;
+ spinlock_t *ptl;
+ pte_t entry;
+
+ ptl = huge_pte_lock(hstate_vma(vma), vma->vm_mm, (pte_t *)pmd);
+ entry = huge_ptep_get((pte_t *)pmd);
+ if (!pte_present(entry))
+ goto unlock;
+ page = pte_page(entry);
+ nid = page_to_nid(page);
+ if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
+ goto unlock;
+ /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
+ if (flags & (MPOL_MF_MOVE_ALL) ||
+ (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
+ isolate_huge_page(page, private);
+unlock:
+ spin_unlock(ptl);
+#else
+ BUG();
+#endif
+}
+
+static inline int queue_pages_pmd_range(struct vm_area_struct *vma, pud_t *pud,
unsigned long addr, unsigned long end,
const nodemask_t *nodes, unsigned long flags,
void *private)
@@ -523,17 +558,24 @@ static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
+ if (!pmd_present(*pmd))
+ continue;
+ if (pmd_huge(*pmd) && is_vm_hugetlb_page(vma)) {
+ queue_pages_hugetlb_pmd_range(vma, pmd, nodes,
+ flags, private);
+ continue;
+ }
split_huge_page_pmd(vma, addr, pmd);
if (pmd_none_or_trans_huge_or_clear_bad(pmd))
continue;
- if (check_pte_range(vma, pmd, addr, next, nodes,
+ if (queue_pages_pte_range(vma, pmd, addr, next, nodes,
flags, private))
return -EIO;
} while (pmd++, addr = next, addr != end);
return 0;
}
-static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
+static inline int queue_pages_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
unsigned long addr, unsigned long end,
const nodemask_t *nodes, unsigned long flags,
void *private)
@@ -544,16 +586,18 @@ static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
pud = pud_offset(pgd, addr);
do {
next = pud_addr_end(addr, end);
+ if (pud_huge(*pud) && is_vm_hugetlb_page(vma))
+ continue;
if (pud_none_or_clear_bad(pud))
continue;
- if (check_pmd_range(vma, pud, addr, next, nodes,
+ if (queue_pages_pmd_range(vma, pud, addr, next, nodes,
flags, private))
return -EIO;
} while (pud++, addr = next, addr != end);
return 0;
}
-static inline int check_pgd_range(struct vm_area_struct *vma,
+static inline int queue_pages_pgd_range(struct vm_area_struct *vma,
unsigned long addr, unsigned long end,
const nodemask_t *nodes, unsigned long flags,
void *private)
@@ -566,14 +610,14 @@ static inline int check_pgd_range(struct vm_area_struct *vma,
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd))
continue;
- if (check_pud_range(vma, pgd, addr, next, nodes,
+ if (queue_pages_pud_range(vma, pgd, addr, next, nodes,
flags, private))
return -EIO;
} while (pgd++, addr = next, addr != end);
return 0;
}
-#ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
+#ifdef CONFIG_NUMA_BALANCING
/*
* This is used to mark a range of virtual addresses to be inaccessible.
* These are later cleared by a NUMA hinting fault. Depending on these
@@ -587,7 +631,6 @@ unsigned long change_prot_numa(struct vm_area_struct *vma,
unsigned long addr, unsigned long end)
{
int nr_updated;
- BUILD_BUG_ON(_PAGE_NUMA != _PAGE_PROTNONE);
nr_updated = change_protection(vma, addr, end, vma->vm_page_prot, 0, 1);
if (nr_updated)
@@ -601,15 +644,17 @@ static unsigned long change_prot_numa(struct vm_area_struct *vma,
{
return 0;
}
-#endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */
+#endif /* CONFIG_NUMA_BALANCING */
/*
- * Check if all pages in a range are on a set of nodes.
- * If pagelist != NULL then isolate pages from the LRU and
- * put them on the pagelist.
+ * Walk through page tables and collect pages to be migrated.
+ *
+ * If pages found in a given range are on a set of nodes (determined by
+ * @nodes and @flags,) it's isolated and queued to the pagelist which is
+ * passed via @private.)
*/
static struct vm_area_struct *
-check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
+queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
const nodemask_t *nodes, unsigned long flags, void *private)
{
int err;
@@ -635,9 +680,6 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
return ERR_PTR(-EFAULT);
}
- if (is_vm_hugetlb_page(vma))
- goto next;
-
if (flags & MPOL_MF_LAZY) {
change_prot_numa(vma, start, endvma);
goto next;
@@ -647,7 +689,7 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
vma_migratable(vma))) {
- err = check_pgd_range(vma, start, endvma, nodes,
+ err = queue_pages_pgd_range(vma, start, endvma, nodes,
flags, private);
if (err) {
first = ERR_PTR(err);
@@ -732,7 +774,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
if (prev) {
vma = prev;
next = vma->vm_next;
- continue;
+ if (mpol_equal(vma_policy(vma), new_pol))
+ continue;
+ /* vma_merge() joined vma && vma->next, case 8 */
+ goto replace;
}
if (vma->vm_start != vmstart) {
err = split_vma(vma->vm_mm, vma, vmstart, 1);
@@ -744,6 +789,7 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
if (err)
goto out;
}
+ replace:
err = vma_replace_policy(vma, new_pol);
if (err)
goto out;
@@ -753,36 +799,6 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
return err;
}
-/*
- * Update task->flags PF_MEMPOLICY bit: set iff non-default
- * mempolicy. Allows more rapid checking of this (combined perhaps
- * with other PF_* flag bits) on memory allocation hot code paths.
- *
- * If called from outside this file, the task 'p' should -only- be
- * a newly forked child not yet visible on the task list, because
- * manipulating the task flags of a visible task is not safe.
- *
- * The above limitation is why this routine has the funny name
- * mpol_fix_fork_child_flag().
- *
- * It is also safe to call this with a task pointer of current,
- * which the static wrapper mpol_set_task_struct_flag() does,
- * for use within this file.
- */
-
-void mpol_fix_fork_child_flag(struct task_struct *p)
-{
- if (p->mempolicy)
- p->flags |= PF_MEMPOLICY;
- else
- p->flags &= ~PF_MEMPOLICY;
-}
-
-static void mpol_set_task_struct_flag(void)
-{
- mpol_fix_fork_child_flag(current);
-}
-
/* Set the process memory policy */
static long do_set_mempolicy(unsigned short mode, unsigned short flags,
nodemask_t *nodes)
@@ -819,7 +835,6 @@ static long do_set_mempolicy(unsigned short mode, unsigned short flags,
}
old = current->mempolicy;
current->mempolicy = new;
- mpol_set_task_struct_flag();
if (new && new->mode == MPOL_INTERLEAVE &&
nodes_weight(new->v.nodes))
current->il_next = first_node(new->v.nodes);
@@ -986,7 +1001,11 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
static struct page *new_node_page(struct page *page, unsigned long node, int **x)
{
- return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
+ if (PageHuge(page))
+ return alloc_huge_page_node(page_hstate(compound_head(page)),
+ node);
+ else
+ return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
}
/*
@@ -1009,14 +1028,14 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
* space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
*/
VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
- check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
+ queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
flags | MPOL_MF_DISCONTIG_OK, &pagelist);
if (!list_empty(&pagelist)) {
err = migrate_pages(&pagelist, new_node_page, dest,
MIGRATE_SYNC, MR_SYSCALL);
if (err)
- putback_lru_pages(&pagelist);
+ putback_movable_pages(&pagelist);
}
return err;
@@ -1079,7 +1098,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
tmp = *from;
while (!nodes_empty(tmp)) {
int s,d;
- int source = -1;
+ int source = NUMA_NO_NODE;
int dest = 0;
for_each_node_mask(s, tmp) {
@@ -1114,7 +1133,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
if (!node_isset(dest, tmp))
break;
}
- if (source == -1)
+ if (source == NUMA_NO_NODE)
break;
node_clear(source, tmp);
@@ -1151,6 +1170,10 @@ static struct page *new_vma_page(struct page *page, unsigned long private, int *
vma = vma->vm_next;
}
+ if (PageHuge(page)) {
+ BUG_ON(!vma);
+ return alloc_huge_page_noerr(vma, address, 1);
+ }
/*
* if !vma, alloc_page_vma() will use task or system default policy
*/
@@ -1245,7 +1268,7 @@ static long do_mbind(unsigned long start, unsigned long len,
if (err)
goto mpol_out;
- vma = check_range(mm, start, end, nmask,
+ vma = queue_pages_range(mm, start, end, nmask,
flags | MPOL_MF_INVERT, &pagelist);
err = PTR_ERR(vma); /* maybe ... */
@@ -1261,13 +1284,13 @@ static long do_mbind(unsigned long start, unsigned long len,
(unsigned long)vma,
MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
if (nr_failed)
- putback_lru_pages(&pagelist);
+ putback_movable_pages(&pagelist);
}
if (nr_failed && (flags & MPOL_MF_STRICT))
err = -EIO;
} else
- putback_lru_pages(&pagelist);
+ putback_movable_pages(&pagelist);
up_write(&mm->mmap_sem);
mpol_out:
@@ -1506,10 +1529,10 @@ SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
#ifdef CONFIG_COMPAT
-asmlinkage long compat_sys_get_mempolicy(int __user *policy,
- compat_ulong_t __user *nmask,
- compat_ulong_t maxnode,
- compat_ulong_t addr, compat_ulong_t flags)
+COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
+ compat_ulong_t __user *, nmask,
+ compat_ulong_t, maxnode,
+ compat_ulong_t, addr, compat_ulong_t, flags)
{
long err;
unsigned long __user *nm = NULL;
@@ -1536,8 +1559,8 @@ asmlinkage long compat_sys_get_mempolicy(int __user *policy,
return err;
}
-asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
- compat_ulong_t maxnode)
+COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
+ compat_ulong_t, maxnode)
{
long err = 0;
unsigned long __user *nm = NULL;
@@ -1559,9 +1582,9 @@ asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
return sys_set_mempolicy(mode, nm, nr_bits+1);
}
-asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
- compat_ulong_t mode, compat_ulong_t __user *nmask,
- compat_ulong_t maxnode, compat_ulong_t flags)
+COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
+ compat_ulong_t, mode, compat_ulong_t __user *, nmask,
+ compat_ulong_t, maxnode, compat_ulong_t, flags)
{
long err = 0;
unsigned long __user *nm = NULL;
@@ -1629,6 +1652,30 @@ struct mempolicy *get_vma_policy(struct task_struct *task,
return pol;
}
+bool vma_policy_mof(struct task_struct *task, struct vm_area_struct *vma)
+{
+ struct mempolicy *pol = get_task_policy(task);
+ if (vma) {
+ if (vma->vm_ops && vma->vm_ops->get_policy) {
+ bool ret = false;
+
+ pol = vma->vm_ops->get_policy(vma, vma->vm_start);
+ if (pol && (pol->flags & MPOL_F_MOF))
+ ret = true;
+ mpol_cond_put(pol);
+
+ return ret;
+ } else if (vma->vm_policy) {
+ pol = vma->vm_policy;
+ }
+ }
+
+ if (!pol)
+ return default_policy.flags & MPOL_F_MOF;
+
+ return pol->flags & MPOL_F_MOF;
+}
+
static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
{
enum zone_type dynamic_policy_zone = policy_zone;
@@ -1708,21 +1755,18 @@ static unsigned interleave_nodes(struct mempolicy *policy)
/*
* Depending on the memory policy provide a node from which to allocate the
* next slab entry.
- * @policy must be protected by freeing by the caller. If @policy is
- * the current task's mempolicy, this protection is implicit, as only the
- * task can change it's policy. The system default policy requires no
- * such protection.
*/
-unsigned slab_node(void)
+unsigned int mempolicy_slab_node(void)
{
struct mempolicy *policy;
+ int node = numa_mem_id();
if (in_interrupt())
- return numa_node_id();
+ return node;
policy = current->mempolicy;
if (!policy || policy->flags & MPOL_F_LOCAL)
- return numa_node_id();
+ return node;
switch (policy->mode) {
case MPOL_PREFERRED:
@@ -1742,11 +1786,11 @@ unsigned slab_node(void)
struct zonelist *zonelist;
struct zone *zone;
enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
- zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
+ zonelist = &NODE_DATA(node)->node_zonelists[0];
(void)first_zones_zonelist(zonelist, highest_zoneidx,
&policy->v.nodes,
&zone);
- return zone ? zone->node : numa_node_id();
+ return zone ? zone->node : node;
}
default:
@@ -1761,7 +1805,7 @@ static unsigned offset_il_node(struct mempolicy *pol,
unsigned nnodes = nodes_weight(pol->v.nodes);
unsigned target;
int c;
- int nid = -1;
+ int nid = NUMA_NO_NODE;
if (!nnodes)
return numa_node_id();
@@ -1798,11 +1842,11 @@ static inline unsigned interleave_nid(struct mempolicy *pol,
/*
* Return the bit number of a random bit set in the nodemask.
- * (returns -1 if nodemask is empty)
+ * (returns NUMA_NO_NODE if nodemask is empty)
*/
int node_random(const nodemask_t *maskp)
{
- int w, bit = -1;
+ int w, bit = NUMA_NO_NODE;
w = nodes_weight(*maskp);
if (w)
@@ -1825,7 +1869,7 @@ int node_random(const nodemask_t *maskp)
* If the effective policy is 'BIND, returns a pointer to the mempolicy's
* @nodemask for filtering the zonelist.
*
- * Must be protected by get_mems_allowed()
+ * Must be protected by read_mems_allowed_begin()
*/
struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
gfp_t gfp_flags, struct mempolicy **mpol,
@@ -1989,7 +2033,7 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
retry_cpuset:
pol = get_vma_policy(current, vma, addr);
- cpuset_mems_cookie = get_mems_allowed();
+ cpuset_mems_cookie = read_mems_allowed_begin();
if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
unsigned nid;
@@ -1997,7 +2041,7 @@ retry_cpuset:
nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
mpol_cond_put(pol);
page = alloc_page_interleave(gfp, order, nid);
- if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
+ if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
goto retry_cpuset;
return page;
@@ -2007,7 +2051,7 @@ retry_cpuset:
policy_nodemask(gfp, pol));
if (unlikely(mpol_needs_cond_ref(pol)))
__mpol_put(pol);
- if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
+ if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
goto retry_cpuset;
return page;
}
@@ -2041,7 +2085,7 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
pol = &default_policy;
retry_cpuset:
- cpuset_mems_cookie = get_mems_allowed();
+ cpuset_mems_cookie = read_mems_allowed_begin();
/*
* No reference counting needed for current->mempolicy
@@ -2054,13 +2098,23 @@ retry_cpuset:
policy_zonelist(gfp, pol, numa_node_id()),
policy_nodemask(gfp, pol));
- if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
+ if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
goto retry_cpuset;
return page;
}
EXPORT_SYMBOL(alloc_pages_current);
+int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
+{
+ struct mempolicy *pol = mpol_dup(vma_policy(src));
+
+ if (IS_ERR(pol))
+ return PTR_ERR(pol);
+ dst->vm_policy = pol;
+ return 0;
+}
+
/*
* If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
* rebinds the mempolicy its copying by calling mpol_rebind_policy()
@@ -2240,6 +2294,8 @@ int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long
struct zone *zone;
int curnid = page_to_nid(page);
unsigned long pgoff;
+ int thiscpu = raw_smp_processor_id();
+ int thisnid = cpu_to_node(thiscpu);
int polnid = -1;
int ret = -1;
@@ -2288,33 +2344,9 @@ int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long
/* Migrate the page towards the node whose CPU is referencing it */
if (pol->flags & MPOL_F_MORON) {
- int last_nid;
-
- polnid = numa_node_id();
+ polnid = thisnid;
- /*
- * Multi-stage node selection is used in conjunction
- * with a periodic migration fault to build a temporal
- * task<->page relation. By using a two-stage filter we
- * remove short/unlikely relations.
- *
- * Using P(p) ~ n_p / n_t as per frequentist
- * probability, we can equate a task's usage of a
- * particular page (n_p) per total usage of this
- * page (n_t) (in a given time-span) to a probability.
- *
- * Our periodic faults will sample this probability and
- * getting the same result twice in a row, given these
- * samples are fully independent, is then given by
- * P(n)^2, provided our sample period is sufficiently
- * short compared to the usage pattern.
- *
- * This quadric squishes small probabilities, making
- * it less likely we act on an unlikely task<->page
- * relation.
- */
- last_nid = page_nid_xchg_last(page, polnid);
- if (last_nid != polnid)
+ if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
goto out;
}
@@ -2520,7 +2552,7 @@ void mpol_free_shared_policy(struct shared_policy *p)
}
#ifdef CONFIG_NUMA_BALANCING
-static bool __initdata numabalancing_override;
+static int __initdata numabalancing_override;
static void __init check_numabalancing_enable(void)
{
@@ -2529,9 +2561,15 @@ static void __init check_numabalancing_enable(void)
if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
numabalancing_default = true;
+ /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
+ if (numabalancing_override)
+ set_numabalancing_state(numabalancing_override == 1);
+
if (nr_node_ids > 1 && !numabalancing_override) {
- printk(KERN_INFO "Enabling automatic NUMA balancing. "
- "Configure with numa_balancing= or sysctl");
+ pr_info("%s automatic NUMA balancing. "
+ "Configure with numa_balancing= or the "
+ "kernel.numa_balancing sysctl",
+ numabalancing_default ? "Enabling" : "Disabling");
set_numabalancing_state(numabalancing_default);
}
}
@@ -2541,18 +2579,17 @@ static int __init setup_numabalancing(char *str)
int ret = 0;
if (!str)
goto out;
- numabalancing_override = true;
if (!strcmp(str, "enable")) {
- set_numabalancing_state(true);
+ numabalancing_override = 1;
ret = 1;
} else if (!strcmp(str, "disable")) {
- set_numabalancing_state(false);
+ numabalancing_override = -1;
ret = 1;
}
out:
if (!ret)
- printk(KERN_WARNING "Unable to parse numa_balancing=\n");
+ pr_warn("Unable to parse numa_balancing=\n");
return ret;
}
@@ -2780,62 +2817,45 @@ out:
* @maxlen: length of @buffer
* @pol: pointer to mempolicy to be formatted
*
- * Convert a mempolicy into a string.
- * Returns the number of characters in buffer (if positive)
- * or an error (negative)
+ * Convert @pol into a string. If @buffer is too short, truncate the string.
+ * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
+ * longest flag, "relative", and to display at least a few node ids.
*/
-int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
+void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
{
char *p = buffer;
- int l;
- nodemask_t nodes;
- unsigned short mode;
- unsigned short flags = pol ? pol->flags : 0;
+ nodemask_t nodes = NODE_MASK_NONE;
+ unsigned short mode = MPOL_DEFAULT;
+ unsigned short flags = 0;
- /*
- * Sanity check: room for longest mode, flag and some nodes
- */
- VM_BUG_ON(maxlen < strlen("interleave") + strlen("relative") + 16);
-
- if (!pol || pol == &default_policy)
- mode = MPOL_DEFAULT;
- else
+ if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
mode = pol->mode;
+ flags = pol->flags;
+ }
switch (mode) {
case MPOL_DEFAULT:
- nodes_clear(nodes);
break;
-
case MPOL_PREFERRED:
- nodes_clear(nodes);
if (flags & MPOL_F_LOCAL)
mode = MPOL_LOCAL;
else
node_set(pol->v.preferred_node, nodes);
break;
-
case MPOL_BIND:
- /* Fall through */
case MPOL_INTERLEAVE:
nodes = pol->v.nodes;
break;
-
default:
- return -EINVAL;
+ WARN_ON_ONCE(1);
+ snprintf(p, maxlen, "unknown");
+ return;
}
- l = strlen(policy_modes[mode]);
- if (buffer + maxlen < p + l + 1)
- return -ENOSPC;
-
- strcpy(p, policy_modes[mode]);
- p += l;
+ p += snprintf(p, maxlen, "%s", policy_modes[mode]);
if (flags & MPOL_MODE_FLAGS) {
- if (buffer + maxlen < p + 2)
- return -ENOSPC;
- *p++ = '=';
+ p += snprintf(p, buffer + maxlen - p, "=");
/*
* Currently, the only defined flags are mutually exclusive
@@ -2847,10 +2867,7 @@ int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
}
if (!nodes_empty(nodes)) {
- if (buffer + maxlen < p + 2)
- return -ENOSPC;
- *p++ = ':';
+ p += snprintf(p, buffer + maxlen - p, ":");
p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
}
- return p - buffer;
}
diff --git a/mm/mempool.c b/mm/mempool.c
index 54990476c049..905434f18c97 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -73,7 +73,7 @@ mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
gfp_t gfp_mask, int node_id)
{
mempool_t *pool;
- pool = kmalloc_node(sizeof(*pool), gfp_mask | __GFP_ZERO, node_id);
+ pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id);
if (!pool)
return NULL;
pool->elements = kmalloc_node(min_nr * sizeof(void *),
@@ -304,9 +304,9 @@ void mempool_free(void *element, mempool_t *pool)
* ensures that there will be frees which return elements to the
* pool waking up the waiters.
*/
- if (pool->curr_nr < pool->min_nr) {
+ if (unlikely(pool->curr_nr < pool->min_nr)) {
spin_lock_irqsave(&pool->lock, flags);
- if (pool->curr_nr < pool->min_nr) {
+ if (likely(pool->curr_nr < pool->min_nr)) {
add_element(pool, element);
spin_unlock_irqrestore(&pool->lock, flags);
wake_up(&pool->wait);
diff --git a/mm/migrate.c b/mm/migrate.c
index 6f0c24438bba..bed48809e5d0 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -36,6 +36,7 @@
#include <linux/hugetlb_cgroup.h>
#include <linux/gfp.h>
#include <linux/balloon_compaction.h>
+#include <linux/mmu_notifier.h>
#include <asm/tlbflush.h>
@@ -71,28 +72,12 @@ int migrate_prep_local(void)
}
/*
- * Add isolated pages on the list back to the LRU under page lock
- * to avoid leaking evictable pages back onto unevictable list.
- */
-void putback_lru_pages(struct list_head *l)
-{
- struct page *page;
- struct page *page2;
-
- list_for_each_entry_safe(page, page2, l, lru) {
- list_del(&page->lru);
- dec_zone_page_state(page, NR_ISOLATED_ANON +
- page_is_file_cache(page));
- putback_lru_page(page);
- }
-}
-
-/*
* Put previously isolated pages back onto the appropriate lists
* from where they were once taken off for compaction/migration.
*
- * This function shall be used instead of putback_lru_pages(),
- * whenever the isolated pageset has been built by isolate_migratepages_range()
+ * This function shall be used whenever the isolated pageset has been
+ * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
+ * and isolate_huge_page().
*/
void putback_movable_pages(struct list_head *l)
{
@@ -100,10 +85,14 @@ void putback_movable_pages(struct list_head *l)
struct page *page2;
list_for_each_entry_safe(page, page2, l, lru) {
+ if (unlikely(PageHuge(page))) {
+ putback_active_hugepage(page);
+ continue;
+ }
list_del(&page->lru);
dec_zone_page_state(page, NR_ISOLATED_ANON +
page_is_file_cache(page));
- if (unlikely(balloon_page_movable(page)))
+ if (unlikely(isolated_balloon_page(page)))
balloon_page_putback(page);
else
putback_lru_page(page);
@@ -126,7 +115,7 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
ptep = huge_pte_offset(mm, addr);
if (!ptep)
goto out;
- ptl = &mm->page_table_lock;
+ ptl = huge_pte_lockptr(hstate_vma(vma), mm, ptep);
} else {
pmd = mm_find_pmd(mm, addr);
if (!pmd)
@@ -157,6 +146,8 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
get_page(new);
pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
+ if (pte_swp_soft_dirty(*ptep))
+ pte = pte_mksoft_dirty(pte);
if (is_write_migration_entry(entry))
pte = pte_mkwrite(pte);
#ifdef CONFIG_HUGETLB_PAGE
@@ -187,12 +178,49 @@ out:
}
/*
+ * Congratulations to trinity for discovering this bug.
+ * mm/fremap.c's remap_file_pages() accepts any range within a single vma to
+ * convert that vma to VM_NONLINEAR; and generic_file_remap_pages() will then
+ * replace the specified range by file ptes throughout (maybe populated after).
+ * If page migration finds a page within that range, while it's still located
+ * by vma_interval_tree rather than lost to i_mmap_nonlinear list, no problem:
+ * zap_pte() clears the temporary migration entry before mmap_sem is dropped.
+ * But if the migrating page is in a part of the vma outside the range to be
+ * remapped, then it will not be cleared, and remove_migration_ptes() needs to
+ * deal with it. Fortunately, this part of the vma is of course still linear,
+ * so we just need to use linear location on the nonlinear list.
+ */
+static int remove_linear_migration_ptes_from_nonlinear(struct page *page,
+ struct address_space *mapping, void *arg)
+{
+ struct vm_area_struct *vma;
+ /* hugetlbfs does not support remap_pages, so no huge pgoff worries */
+ pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+ unsigned long addr;
+
+ list_for_each_entry(vma,
+ &mapping->i_mmap_nonlinear, shared.nonlinear) {
+
+ addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
+ if (addr >= vma->vm_start && addr < vma->vm_end)
+ remove_migration_pte(page, vma, addr, arg);
+ }
+ return SWAP_AGAIN;
+}
+
+/*
* Get rid of all migration entries and replace them by
* references to the indicated page.
*/
static void remove_migration_ptes(struct page *old, struct page *new)
{
- rmap_walk(new, remove_migration_pte, old);
+ struct rmap_walk_control rwc = {
+ .rmap_one = remove_migration_pte,
+ .arg = old,
+ .file_nonlinear = remove_linear_migration_ptes_from_nonlinear,
+ };
+
+ rmap_walk(new, &rwc);
}
/*
@@ -243,9 +271,10 @@ void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
__migration_entry_wait(mm, ptep, ptl);
}
-void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte)
+void migration_entry_wait_huge(struct vm_area_struct *vma,
+ struct mm_struct *mm, pte_t *pte)
{
- spinlock_t *ptl = &(mm)->page_table_lock;
+ spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte);
__migration_entry_wait(mm, pte, ptl);
}
@@ -307,16 +336,17 @@ static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
* 2 for pages with a mapping
* 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
*/
-static int migrate_page_move_mapping(struct address_space *mapping,
+int migrate_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page,
- struct buffer_head *head, enum migrate_mode mode)
+ struct buffer_head *head, enum migrate_mode mode,
+ int extra_count)
{
- int expected_count = 0;
+ int expected_count = 1 + extra_count;
void **pslot;
if (!mapping) {
/* Anonymous page without mapping */
- if (page_count(page) != 1)
+ if (page_count(page) != expected_count)
return -EAGAIN;
return MIGRATEPAGE_SUCCESS;
}
@@ -326,7 +356,7 @@ static int migrate_page_move_mapping(struct address_space *mapping,
pslot = radix_tree_lookup_slot(&mapping->page_tree,
page_index(page));
- expected_count = 2 + page_has_private(page);
+ expected_count += 1 + page_has_private(page);
if (page_count(page) != expected_count ||
radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
spin_unlock_irq(&mapping->tree_lock);
@@ -435,10 +465,60 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
}
/*
+ * Gigantic pages are so large that we do not guarantee that page++ pointer
+ * arithmetic will work across the entire page. We need something more
+ * specialized.
+ */
+static void __copy_gigantic_page(struct page *dst, struct page *src,
+ int nr_pages)
+{
+ int i;
+ struct page *dst_base = dst;
+ struct page *src_base = src;
+
+ for (i = 0; i < nr_pages; ) {
+ cond_resched();
+ copy_highpage(dst, src);
+
+ i++;
+ dst = mem_map_next(dst, dst_base, i);
+ src = mem_map_next(src, src_base, i);
+ }
+}
+
+static void copy_huge_page(struct page *dst, struct page *src)
+{
+ int i;
+ int nr_pages;
+
+ if (PageHuge(src)) {
+ /* hugetlbfs page */
+ struct hstate *h = page_hstate(src);
+ nr_pages = pages_per_huge_page(h);
+
+ if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) {
+ __copy_gigantic_page(dst, src, nr_pages);
+ return;
+ }
+ } else {
+ /* thp page */
+ BUG_ON(!PageTransHuge(src));
+ nr_pages = hpage_nr_pages(src);
+ }
+
+ for (i = 0; i < nr_pages; i++) {
+ cond_resched();
+ copy_highpage(dst + i, src + i);
+ }
+}
+
+/*
* Copy the page to its new location
*/
void migrate_page_copy(struct page *newpage, struct page *page)
{
+ int cpupid;
+
if (PageHuge(page) || PageTransHuge(page))
copy_huge_page(newpage, page);
else
@@ -451,7 +531,7 @@ void migrate_page_copy(struct page *newpage, struct page *page)
if (PageUptodate(page))
SetPageUptodate(newpage);
if (TestClearPageActive(page)) {
- VM_BUG_ON(PageUnevictable(page));
+ VM_BUG_ON_PAGE(PageUnevictable(page), page);
SetPageActive(newpage);
} else if (TestClearPageUnevictable(page))
SetPageUnevictable(newpage);
@@ -475,6 +555,13 @@ void migrate_page_copy(struct page *newpage, struct page *page)
__set_page_dirty_nobuffers(newpage);
}
+ /*
+ * Copy NUMA information to the new page, to prevent over-eager
+ * future migrations of this same page.
+ */
+ cpupid = page_cpupid_xchg_last(page, -1);
+ page_cpupid_xchg_last(newpage, cpupid);
+
mlock_migrate_page(newpage, page);
ksm_migrate_page(newpage, page);
/*
@@ -497,14 +584,6 @@ void migrate_page_copy(struct page *newpage, struct page *page)
* Migration functions
***********************************************************/
-/* Always fail migration. Used for mappings that are not movable */
-int fail_migrate_page(struct address_space *mapping,
- struct page *newpage, struct page *page)
-{
- return -EIO;
-}
-EXPORT_SYMBOL(fail_migrate_page);
-
/*
* Common logic to directly migrate a single page suitable for
* pages that do not use PagePrivate/PagePrivate2.
@@ -519,7 +598,7 @@ int migrate_page(struct address_space *mapping,
BUG_ON(PageWriteback(page)); /* Writeback must be complete */
- rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode);
+ rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
if (rc != MIGRATEPAGE_SUCCESS)
return rc;
@@ -546,7 +625,7 @@ int buffer_migrate_page(struct address_space *mapping,
head = page_buffers(page);
- rc = migrate_page_move_mapping(mapping, newpage, page, head, mode);
+ rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0);
if (rc != MIGRATEPAGE_SUCCESS)
return rc;
@@ -824,7 +903,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
* free the metadata, so the page can be freed.
*/
if (!page->mapping) {
- VM_BUG_ON(PageAnon(page));
+ VM_BUG_ON_PAGE(PageAnon(page), page);
if (page_has_private(page)) {
try_to_free_buffers(page);
goto uncharge;
@@ -942,9 +1021,22 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
{
int rc = 0;
int *result = NULL;
- struct page *new_hpage = get_new_page(hpage, private, &result);
+ struct page *new_hpage;
struct anon_vma *anon_vma = NULL;
+ /*
+ * Movability of hugepages depends on architectures and hugepage size.
+ * This check is necessary because some callers of hugepage migration
+ * like soft offline and memory hotremove don't walk through page
+ * tables or check whether the hugepage is pmd-based or not before
+ * kicking migration.
+ */
+ if (!hugepage_migration_support(page_hstate(hpage))) {
+ putback_active_hugepage(hpage);
+ return -ENOSYS;
+ }
+
+ new_hpage = get_new_page(hpage, private, &result);
if (!new_hpage)
return -ENOMEM;
@@ -975,6 +1067,8 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
unlock_page(hpage);
out:
+ if (rc != -EAGAIN)
+ putback_active_hugepage(hpage);
put_page(new_hpage);
if (result) {
if (rc)
@@ -1025,7 +1119,11 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
list_for_each_entry_safe(page, page2, from, lru) {
cond_resched();
- rc = unmap_and_move(get_new_page, private,
+ if (PageHuge(page))
+ rc = unmap_and_move_huge_page(get_new_page,
+ private, page, pass > 2, mode);
+ else
+ rc = unmap_and_move(get_new_page, private,
page, pass > 2, mode);
switch(rc) {
@@ -1038,7 +1136,12 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
nr_succeeded++;
break;
default:
- /* Permanent failure */
+ /*
+ * Permanent failure (-EBUSY, -ENOSYS, etc.):
+ * unlike -EAGAIN case, the failed page is
+ * removed from migration page list and not
+ * retried in the next outer loop.
+ */
nr_failed++;
break;
}
@@ -1058,32 +1161,6 @@ out:
return rc;
}
-int migrate_huge_page(struct page *hpage, new_page_t get_new_page,
- unsigned long private, enum migrate_mode mode)
-{
- int pass, rc;
-
- for (pass = 0; pass < 10; pass++) {
- rc = unmap_and_move_huge_page(get_new_page, private,
- hpage, pass > 2, mode);
- switch (rc) {
- case -ENOMEM:
- goto out;
- case -EAGAIN:
- /* try again */
- cond_resched();
- break;
- case MIGRATEPAGE_SUCCESS:
- goto out;
- default:
- rc = -EIO;
- goto out;
- }
- }
-out:
- return rc;
-}
-
#ifdef CONFIG_NUMA
/*
* Move a list of individual pages
@@ -1108,8 +1185,12 @@ static struct page *new_page_node(struct page *p, unsigned long private,
*result = &pm->status;
- return alloc_pages_exact_node(pm->node,
- GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0);
+ if (PageHuge(p))
+ return alloc_huge_page_node(page_hstate(compound_head(p)),
+ pm->node);
+ else
+ return alloc_pages_exact_node(pm->node,
+ GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 0);
}
/*
@@ -1168,6 +1249,11 @@ static int do_move_page_to_node_array(struct mm_struct *mm,
!migrate_all)
goto put_and_set;
+ if (PageHuge(page)) {
+ isolate_huge_page(page, &pagelist);
+ goto put_and_set;
+ }
+
err = isolate_lru_page(page);
if (!err) {
list_add_tail(&page->lru, &pagelist);
@@ -1190,7 +1276,7 @@ set_status:
err = migrate_pages(&pagelist, new_page_node,
(unsigned long)pm, MIGRATE_SYNC, MR_SYSCALL);
if (err)
- putback_lru_pages(&pagelist);
+ putback_movable_pages(&pagelist);
}
up_read(&mm->mmap_sem);
@@ -1468,7 +1554,7 @@ static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
if (!populated_zone(zone))
continue;
- if (zone->all_unreclaimable)
+ if (!zone_reclaimable(zone))
continue;
/* Avoid waking kswapd by allocating pages_to_migrate pages. */
@@ -1490,12 +1576,10 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
struct page *newpage;
newpage = alloc_pages_exact_node(nid,
- (GFP_HIGHUSER_MOVABLE | GFP_THISNODE |
- __GFP_NOMEMALLOC | __GFP_NORETRY |
- __GFP_NOWARN) &
+ (GFP_HIGHUSER_MOVABLE |
+ __GFP_THISNODE | __GFP_NOMEMALLOC |
+ __GFP_NORETRY | __GFP_NOWARN) &
~GFP_IOFS, 0);
- if (newpage)
- page_nid_xchg_last(newpage, page_nid_last(page));
return newpage;
}
@@ -1529,35 +1613,42 @@ bool migrate_ratelimited(int node)
}
/* Returns true if the node is migrate rate-limited after the update */
-bool numamigrate_update_ratelimit(pg_data_t *pgdat, unsigned long nr_pages)
+static bool numamigrate_update_ratelimit(pg_data_t *pgdat,
+ unsigned long nr_pages)
{
- bool rate_limited = false;
-
/*
* Rate-limit the amount of data that is being migrated to a node.
* Optimal placement is no good if the memory bus is saturated and
* all the time is being spent migrating!
*/
- spin_lock(&pgdat->numabalancing_migrate_lock);
if (time_after(jiffies, pgdat->numabalancing_migrate_next_window)) {
+ spin_lock(&pgdat->numabalancing_migrate_lock);
pgdat->numabalancing_migrate_nr_pages = 0;
pgdat->numabalancing_migrate_next_window = jiffies +
msecs_to_jiffies(migrate_interval_millisecs);
+ spin_unlock(&pgdat->numabalancing_migrate_lock);
}
- if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages)
- rate_limited = true;
- else
- pgdat->numabalancing_migrate_nr_pages += nr_pages;
- spin_unlock(&pgdat->numabalancing_migrate_lock);
-
- return rate_limited;
+ if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages) {
+ trace_mm_numa_migrate_ratelimit(current, pgdat->node_id,
+ nr_pages);
+ return true;
+ }
+
+ /*
+ * This is an unlocked non-atomic update so errors are possible.
+ * The consequences are failing to migrate when we potentiall should
+ * have which is not severe enough to warrant locking. If it is ever
+ * a problem, it can be converted to a per-cpu counter.
+ */
+ pgdat->numabalancing_migrate_nr_pages += nr_pages;
+ return false;
}
-int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
+static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
{
int page_lru;
- VM_BUG_ON(compound_order(page) && !PageTransHuge(page));
+ VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page);
/* Avoid migrating to a node that is nearly full */
if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page)))
@@ -1591,12 +1682,25 @@ int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
return 1;
}
+bool pmd_trans_migrating(pmd_t pmd)
+{
+ struct page *page = pmd_page(pmd);
+ return PageLocked(page);
+}
+
+void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd)
+{
+ struct page *page = pmd_page(*pmd);
+ wait_on_page_locked(page);
+}
+
/*
* Attempt to migrate a misplaced page to the specified destination
* node. Caller is expected to have an elevated reference count on
* the page that will be dropped by this function before returning.
*/
-int migrate_misplaced_page(struct page *page, int node)
+int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
+ int node)
{
pg_data_t *pgdat = NODE_DATA(node);
int isolated;
@@ -1604,10 +1708,11 @@ int migrate_misplaced_page(struct page *page, int node)
LIST_HEAD(migratepages);
/*
- * Don't migrate pages that are mapped in multiple processes.
- * TODO: Handle false sharing detection instead of this hammer
+ * Don't migrate file pages that are mapped in multiple processes
+ * with execute permissions as they are probably shared libraries.
*/
- if (page_mapcount(page) != 1)
+ if (page_mapcount(page) != 1 && page_is_file_cache(page) &&
+ (vma->vm_flags & VM_EXEC))
goto out;
/*
@@ -1626,7 +1731,12 @@ int migrate_misplaced_page(struct page *page, int node)
nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
node, MIGRATE_ASYNC, MR_NUMA_MISPLACED);
if (nr_remaining) {
- putback_lru_pages(&migratepages);
+ if (!list_empty(&migratepages)) {
+ list_del(&page->lru);
+ dec_zone_page_state(page, NR_ISOLATED_ANON +
+ page_is_file_cache(page));
+ putback_lru_page(page);
+ }
isolated = 0;
} else
count_vm_numa_event(NUMA_PAGE_MIGRATE);
@@ -1650,19 +1760,15 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
unsigned long address,
struct page *page, int node)
{
- unsigned long haddr = address & HPAGE_PMD_MASK;
+ spinlock_t *ptl;
pg_data_t *pgdat = NODE_DATA(node);
int isolated = 0;
struct page *new_page = NULL;
struct mem_cgroup *memcg = NULL;
int page_lru = page_is_file_cache(page);
-
- /*
- * Don't migrate pages that are mapped in multiple processes.
- * TODO: Handle false sharing detection instead of this hammer
- */
- if (page_mapcount(page) != 1)
- goto out_dropref;
+ unsigned long mmun_start = address & HPAGE_PMD_MASK;
+ unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE;
+ pmd_t orig_entry;
/*
* Rate-limit the amount of data that is being migrated to a node.
@@ -1673,18 +1779,20 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
goto out_dropref;
new_page = alloc_pages_node(node,
- (GFP_TRANSHUGE | GFP_THISNODE) & ~__GFP_WAIT, HPAGE_PMD_ORDER);
+ (GFP_TRANSHUGE | __GFP_THISNODE) & ~__GFP_WAIT,
+ HPAGE_PMD_ORDER);
if (!new_page)
goto out_fail;
- page_nid_xchg_last(new_page, page_nid_last(page));
-
isolated = numamigrate_isolate_page(pgdat, page);
if (!isolated) {
put_page(new_page);
goto out_fail;
}
+ if (mm_tlb_flush_pending(mm))
+ flush_tlb_range(vma, mmun_start, mmun_end);
+
/* Prepare a page as a migration target */
__set_page_locked(new_page);
SetPageSwapBacked(new_page);
@@ -1696,9 +1804,12 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
WARN_ON(PageLRU(new_page));
/* Recheck the target PMD */
- spin_lock(&mm->page_table_lock);
- if (unlikely(!pmd_same(*pmd, entry))) {
- spin_unlock(&mm->page_table_lock);
+ mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
+ ptl = pmd_lock(mm, pmd);
+ if (unlikely(!pmd_same(*pmd, entry) || page_count(page) != 2)) {
+fail_putback:
+ spin_unlock(ptl);
+ mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
/* Reverse changes made by migrate_page_copy() */
if (TestClearPageActive(new_page))
@@ -1710,12 +1821,13 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
unlock_page(new_page);
put_page(new_page); /* Free it */
- unlock_page(page);
+ /* Retake the callers reference and putback on LRU */
+ get_page(page);
putback_lru_page(page);
+ mod_zone_page_state(page_zone(page),
+ NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
- count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
- isolated = 0;
- goto out;
+ goto out_unlock;
}
/*
@@ -1727,23 +1839,43 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
*/
mem_cgroup_prepare_migration(page, new_page, &memcg);
+ orig_entry = *pmd;
entry = mk_pmd(new_page, vma->vm_page_prot);
- entry = pmd_mknonnuma(entry);
- entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
entry = pmd_mkhuge(entry);
+ entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
- page_add_new_anon_rmap(new_page, vma, haddr);
-
- set_pmd_at(mm, haddr, pmd, entry);
+ /*
+ * Clear the old entry under pagetable lock and establish the new PTE.
+ * Any parallel GUP will either observe the old page blocking on the
+ * page lock, block on the page table lock or observe the new page.
+ * The SetPageUptodate on the new page and page_add_new_anon_rmap
+ * guarantee the copy is visible before the pagetable update.
+ */
+ flush_cache_range(vma, mmun_start, mmun_end);
+ page_add_new_anon_rmap(new_page, vma, mmun_start);
+ pmdp_clear_flush(vma, mmun_start, pmd);
+ set_pmd_at(mm, mmun_start, pmd, entry);
+ flush_tlb_range(vma, mmun_start, mmun_end);
update_mmu_cache_pmd(vma, address, &entry);
+
+ if (page_count(page) != 2) {
+ set_pmd_at(mm, mmun_start, pmd, orig_entry);
+ flush_tlb_range(vma, mmun_start, mmun_end);
+ update_mmu_cache_pmd(vma, address, &entry);
+ page_remove_rmap(new_page);
+ goto fail_putback;
+ }
+
page_remove_rmap(page);
+
/*
* Finish the charge transaction under the page table lock to
* prevent split_huge_page() from dividing up the charge
* before it's fully transferred to the new page.
*/
mem_cgroup_end_migration(memcg, page, new_page, true);
- spin_unlock(&mm->page_table_lock);
+ spin_unlock(ptl);
+ mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
unlock_page(new_page);
unlock_page(page);
@@ -1753,7 +1885,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR);
count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR);
-out:
mod_zone_page_state(page_zone(page),
NR_ISOLATED_ANON + page_lru,
-HPAGE_PMD_NR);
@@ -1762,6 +1893,15 @@ out:
out_fail:
count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
out_dropref:
+ ptl = pmd_lock(mm, pmd);
+ if (pmd_same(*pmd, entry)) {
+ entry = pmd_mknonnuma(entry);
+ set_pmd_at(mm, mmun_start, pmd, entry);
+ update_mmu_cache_pmd(vma, address, &entry);
+ }
+ spin_unlock(ptl);
+
+out_unlock:
unlock_page(page);
put_page(page);
return 0;
diff --git a/mm/mincore.c b/mm/mincore.c
index da2be56a7b8f..725c80961048 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -70,13 +70,21 @@ static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
* any other file mapping (ie. marked !present and faulted in with
* tmpfs's .fault). So swapped out tmpfs mappings are tested here.
*/
- page = find_get_page(mapping, pgoff);
#ifdef CONFIG_SWAP
- /* shmem/tmpfs may return swap: account for swapcache page too. */
- if (radix_tree_exceptional_entry(page)) {
- swp_entry_t swap = radix_to_swp_entry(page);
- page = find_get_page(swap_address_space(swap), swap.val);
- }
+ if (shmem_mapping(mapping)) {
+ page = find_get_entry(mapping, pgoff);
+ /*
+ * shmem/tmpfs may return swap: account for swapcache
+ * page too.
+ */
+ if (radix_tree_exceptional_entry(page)) {
+ swp_entry_t swp = radix_to_swp_entry(page);
+ page = find_get_page(swap_address_space(swp), swp.val);
+ }
+ } else
+ page = find_get_page(mapping, pgoff);
+#else
+ page = find_get_page(mapping, pgoff);
#endif
if (page) {
present = PageUptodate(page);
@@ -225,13 +233,6 @@ static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *v
end = min(vma->vm_end, addr + (pages << PAGE_SHIFT));
- if (is_vm_hugetlb_page(vma)) {
- mincore_hugetlb_page_range(vma, addr, end, vec);
- return (end - addr) >> PAGE_SHIFT;
- }
-
- end = pmd_addr_end(addr, end);
-
if (is_vm_hugetlb_page(vma))
mincore_hugetlb_page_range(vma, addr, end, vec);
else
diff --git a/mm/mlock.c b/mm/mlock.c
index 79b7cf7d1bca..b1eb53634005 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -11,6 +11,7 @@
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/pagemap.h>
+#include <linux/pagevec.h>
#include <linux/mempolicy.h>
#include <linux/syscalls.h>
#include <linux/sched.h>
@@ -18,6 +19,8 @@
#include <linux/rmap.h>
#include <linux/mmzone.h>
#include <linux/hugetlb.h>
+#include <linux/memcontrol.h>
+#include <linux/mm_inline.h>
#include "internal.h"
@@ -76,6 +79,7 @@ void clear_page_mlock(struct page *page)
*/
void mlock_vma_page(struct page *page)
{
+ /* Serialize with page migration */
BUG_ON(!PageLocked(page));
if (!TestSetPageMlocked(page)) {
@@ -87,9 +91,73 @@ void mlock_vma_page(struct page *page)
}
}
+/*
+ * Isolate a page from LRU with optional get_page() pin.
+ * Assumes lru_lock already held and page already pinned.
+ */
+static bool __munlock_isolate_lru_page(struct page *page, bool getpage)
+{
+ if (PageLRU(page)) {
+ struct lruvec *lruvec;
+
+ lruvec = mem_cgroup_page_lruvec(page, page_zone(page));
+ if (getpage)
+ get_page(page);
+ ClearPageLRU(page);
+ del_page_from_lru_list(page, lruvec, page_lru(page));
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Finish munlock after successful page isolation
+ *
+ * Page must be locked. This is a wrapper for try_to_munlock()
+ * and putback_lru_page() with munlock accounting.
+ */
+static void __munlock_isolated_page(struct page *page)
+{
+ int ret = SWAP_AGAIN;
+
+ /*
+ * Optimization: if the page was mapped just once, that's our mapping
+ * and we don't need to check all the other vmas.
+ */
+ if (page_mapcount(page) > 1)
+ ret = try_to_munlock(page);
+
+ /* Did try_to_unlock() succeed or punt? */
+ if (ret != SWAP_MLOCK)
+ count_vm_event(UNEVICTABLE_PGMUNLOCKED);
+
+ putback_lru_page(page);
+}
+
+/*
+ * Accounting for page isolation fail during munlock
+ *
+ * Performs accounting when page isolation fails in munlock. There is nothing
+ * else to do because it means some other task has already removed the page
+ * from the LRU. putback_lru_page() will take care of removing the page from
+ * the unevictable list, if necessary. vmscan [page_referenced()] will move
+ * the page back to the unevictable list if some other vma has it mlocked.
+ */
+static void __munlock_isolation_failed(struct page *page)
+{
+ if (PageUnevictable(page))
+ __count_vm_event(UNEVICTABLE_PGSTRANDED);
+ else
+ __count_vm_event(UNEVICTABLE_PGMUNLOCKED);
+}
+
/**
* munlock_vma_page - munlock a vma page
- * @page - page to be unlocked
+ * @page - page to be unlocked, either a normal page or THP page head
+ *
+ * returns the size of the page as a page mask (0 for normal page,
+ * HPAGE_PMD_NR - 1 for THP head page)
*
* called from munlock()/munmap() path with page supposedly on the LRU.
* When we munlock a page, because the vma where we found the page is being
@@ -104,48 +172,37 @@ void mlock_vma_page(struct page *page)
*/
unsigned int munlock_vma_page(struct page *page)
{
- unsigned int page_mask = 0;
+ unsigned int nr_pages;
+ struct zone *zone = page_zone(page);
+ /* For try_to_munlock() and to serialize with page migration */
BUG_ON(!PageLocked(page));
- if (TestClearPageMlocked(page)) {
- unsigned int nr_pages = hpage_nr_pages(page);
- mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
- page_mask = nr_pages - 1;
- if (!isolate_lru_page(page)) {
- int ret = SWAP_AGAIN;
+ /*
+ * Serialize with any parallel __split_huge_page_refcount() which
+ * might otherwise copy PageMlocked to part of the tail pages before
+ * we clear it in the head page. It also stabilizes hpage_nr_pages().
+ */
+ spin_lock_irq(&zone->lru_lock);
- /*
- * Optimization: if the page was mapped just once,
- * that's our mapping and we don't need to check all the
- * other vmas.
- */
- if (page_mapcount(page) > 1)
- ret = try_to_munlock(page);
- /*
- * did try_to_unlock() succeed or punt?
- */
- if (ret != SWAP_MLOCK)
- count_vm_event(UNEVICTABLE_PGMUNLOCKED);
+ nr_pages = hpage_nr_pages(page);
+ if (!TestClearPageMlocked(page))
+ goto unlock_out;
- putback_lru_page(page);
- } else {
- /*
- * Some other task has removed the page from the LRU.
- * putback_lru_page() will take care of removing the
- * page from the unevictable list, if necessary.
- * vmscan [page_referenced()] will move the page back
- * to the unevictable list if some other vma has it
- * mlocked.
- */
- if (PageUnevictable(page))
- count_vm_event(UNEVICTABLE_PGSTRANDED);
- else
- count_vm_event(UNEVICTABLE_PGMUNLOCKED);
- }
+ __mod_zone_page_state(zone, NR_MLOCK, -nr_pages);
+
+ if (__munlock_isolate_lru_page(page, true)) {
+ spin_unlock_irq(&zone->lru_lock);
+ __munlock_isolated_page(page);
+ goto out;
}
+ __munlock_isolation_failed(page);
- return page_mask;
+unlock_out:
+ spin_unlock_irq(&zone->lru_lock);
+
+out:
+ return nr_pages - 1;
}
/**
@@ -210,6 +267,190 @@ static int __mlock_posix_error_return(long retval)
}
/*
+ * Prepare page for fast batched LRU putback via putback_lru_evictable_pagevec()
+ *
+ * The fast path is available only for evictable pages with single mapping.
+ * Then we can bypass the per-cpu pvec and get better performance.
+ * when mapcount > 1 we need try_to_munlock() which can fail.
+ * when !page_evictable(), we need the full redo logic of putback_lru_page to
+ * avoid leaving evictable page in unevictable list.
+ *
+ * In case of success, @page is added to @pvec and @pgrescued is incremented
+ * in case that the page was previously unevictable. @page is also unlocked.
+ */
+static bool __putback_lru_fast_prepare(struct page *page, struct pagevec *pvec,
+ int *pgrescued)
+{
+ VM_BUG_ON_PAGE(PageLRU(page), page);
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
+
+ if (page_mapcount(page) <= 1 && page_evictable(page)) {
+ pagevec_add(pvec, page);
+ if (TestClearPageUnevictable(page))
+ (*pgrescued)++;
+ unlock_page(page);
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Putback multiple evictable pages to the LRU
+ *
+ * Batched putback of evictable pages that bypasses the per-cpu pvec. Some of
+ * the pages might have meanwhile become unevictable but that is OK.
+ */
+static void __putback_lru_fast(struct pagevec *pvec, int pgrescued)
+{
+ count_vm_events(UNEVICTABLE_PGMUNLOCKED, pagevec_count(pvec));
+ /*
+ *__pagevec_lru_add() calls release_pages() so we don't call
+ * put_page() explicitly
+ */
+ __pagevec_lru_add(pvec);
+ count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
+}
+
+/*
+ * Munlock a batch of pages from the same zone
+ *
+ * The work is split to two main phases. First phase clears the Mlocked flag
+ * and attempts to isolate the pages, all under a single zone lru lock.
+ * The second phase finishes the munlock only for pages where isolation
+ * succeeded.
+ *
+ * Note that the pagevec may be modified during the process.
+ */
+static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
+{
+ int i;
+ int nr = pagevec_count(pvec);
+ int delta_munlocked;
+ struct pagevec pvec_putback;
+ int pgrescued = 0;
+
+ pagevec_init(&pvec_putback, 0);
+
+ /* Phase 1: page isolation */
+ spin_lock_irq(&zone->lru_lock);
+ for (i = 0; i < nr; i++) {
+ struct page *page = pvec->pages[i];
+
+ if (TestClearPageMlocked(page)) {
+ /*
+ * We already have pin from follow_page_mask()
+ * so we can spare the get_page() here.
+ */
+ if (__munlock_isolate_lru_page(page, false))
+ continue;
+ else
+ __munlock_isolation_failed(page);
+ }
+
+ /*
+ * We won't be munlocking this page in the next phase
+ * but we still need to release the follow_page_mask()
+ * pin. We cannot do it under lru_lock however. If it's
+ * the last pin, __page_cache_release() would deadlock.
+ */
+ pagevec_add(&pvec_putback, pvec->pages[i]);
+ pvec->pages[i] = NULL;
+ }
+ delta_munlocked = -nr + pagevec_count(&pvec_putback);
+ __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
+ spin_unlock_irq(&zone->lru_lock);
+
+ /* Now we can release pins of pages that we are not munlocking */
+ pagevec_release(&pvec_putback);
+
+ /* Phase 2: page munlock */
+ for (i = 0; i < nr; i++) {
+ struct page *page = pvec->pages[i];
+
+ if (page) {
+ lock_page(page);
+ if (!__putback_lru_fast_prepare(page, &pvec_putback,
+ &pgrescued)) {
+ /*
+ * Slow path. We don't want to lose the last
+ * pin before unlock_page()
+ */
+ get_page(page); /* for putback_lru_page() */
+ __munlock_isolated_page(page);
+ unlock_page(page);
+ put_page(page); /* from follow_page_mask() */
+ }
+ }
+ }
+
+ /*
+ * Phase 3: page putback for pages that qualified for the fast path
+ * This will also call put_page() to return pin from follow_page_mask()
+ */
+ if (pagevec_count(&pvec_putback))
+ __putback_lru_fast(&pvec_putback, pgrescued);
+}
+
+/*
+ * Fill up pagevec for __munlock_pagevec using pte walk
+ *
+ * The function expects that the struct page corresponding to @start address is
+ * a non-TPH page already pinned and in the @pvec, and that it belongs to @zone.
+ *
+ * The rest of @pvec is filled by subsequent pages within the same pmd and same
+ * zone, as long as the pte's are present and vm_normal_page() succeeds. These
+ * pages also get pinned.
+ *
+ * Returns the address of the next page that should be scanned. This equals
+ * @start + PAGE_SIZE when no page could be added by the pte walk.
+ */
+static unsigned long __munlock_pagevec_fill(struct pagevec *pvec,
+ struct vm_area_struct *vma, int zoneid, unsigned long start,
+ unsigned long end)
+{
+ pte_t *pte;
+ spinlock_t *ptl;
+
+ /*
+ * Initialize pte walk starting at the already pinned page where we
+ * are sure that there is a pte, as it was pinned under the same
+ * mmap_sem write op.
+ */
+ pte = get_locked_pte(vma->vm_mm, start, &ptl);
+ /* Make sure we do not cross the page table boundary */
+ end = pgd_addr_end(start, end);
+ end = pud_addr_end(start, end);
+ end = pmd_addr_end(start, end);
+
+ /* The page next to the pinned page is the first we will try to get */
+ start += PAGE_SIZE;
+ while (start < end) {
+ struct page *page = NULL;
+ pte++;
+ if (pte_present(*pte))
+ page = vm_normal_page(vma, start, *pte);
+ /*
+ * Break if page could not be obtained or the page's node+zone does not
+ * match
+ */
+ if (!page || page_zone_id(page) != zoneid)
+ break;
+
+ get_page(page);
+ /*
+ * Increase the address that will be returned *before* the
+ * eventual break due to pvec becoming full by adding the page
+ */
+ start += PAGE_SIZE;
+ if (pagevec_add(pvec, page) == 0)
+ break;
+ }
+ pte_unmap_unlock(pte, ptl);
+ return start;
+}
+
+/*
* munlock_vma_pages_range() - munlock all pages in the vma range.'
* @vma - vma containing range to be munlock()ed.
* @start - start address in @vma of the range
@@ -233,9 +474,14 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
vma->vm_flags &= ~VM_LOCKED;
while (start < end) {
- struct page *page;
- unsigned int page_mask, page_increm;
-
+ struct page *page = NULL;
+ unsigned int page_mask;
+ unsigned long page_increm;
+ struct pagevec pvec;
+ struct zone *zone;
+ int zoneid;
+
+ pagevec_init(&pvec, 0);
/*
* Although FOLL_DUMP is intended for get_dump_page(),
* it just so happens that its special treatment of the
@@ -244,21 +490,47 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
* has sneaked into the range, we won't oops here: great).
*/
page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP,
- &page_mask);
+ &page_mask);
+
if (page && !IS_ERR(page)) {
- lock_page(page);
- lru_add_drain();
- /*
- * Any THP page found by follow_page_mask() may have
- * gotten split before reaching munlock_vma_page(),
- * so we need to recompute the page_mask here.
- */
- page_mask = munlock_vma_page(page);
- unlock_page(page);
- put_page(page);
+ if (PageTransHuge(page)) {
+ lock_page(page);
+ /*
+ * Any THP page found by follow_page_mask() may
+ * have gotten split before reaching
+ * munlock_vma_page(), so we need to recompute
+ * the page_mask here.
+ */
+ page_mask = munlock_vma_page(page);
+ unlock_page(page);
+ put_page(page); /* follow_page_mask() */
+ } else {
+ /*
+ * Non-huge pages are handled in batches via
+ * pagevec. The pin from follow_page_mask()
+ * prevents them from collapsing by THP.
+ */
+ pagevec_add(&pvec, page);
+ zone = page_zone(page);
+ zoneid = page_zone_id(page);
+
+ /*
+ * Try to fill the rest of pagevec using fast
+ * pte walk. This will also update start to
+ * the next page to process. Then munlock the
+ * pagevec.
+ */
+ start = __munlock_pagevec_fill(&pvec, vma,
+ zoneid, start, end);
+ __munlock_pagevec(&pvec, zone);
+ goto next;
+ }
}
- page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask);
+ /* It's a bug to munlock in the middle of a THP page */
+ VM_BUG_ON((start >> PAGE_SHIFT) & page_mask);
+ page_increm = 1 + page_mask;
start += page_increm * PAGE_SIZE;
+next:
cond_resched();
}
}
@@ -455,19 +727,21 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
lru_add_drain_all(); /* flush pagevec */
- down_write(&current->mm->mmap_sem);
len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
start &= PAGE_MASK;
- locked = len >> PAGE_SHIFT;
- locked += current->mm->locked_vm;
-
lock_limit = rlimit(RLIMIT_MEMLOCK);
lock_limit >>= PAGE_SHIFT;
+ locked = len >> PAGE_SHIFT;
+
+ down_write(&current->mm->mmap_sem);
+
+ locked += current->mm->locked_vm;
/* check against resource limits */
if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
error = do_mlock(start, len, 1);
+
up_write(&current->mm->mmap_sem);
if (!error)
error = __mm_populate(start, len, 0);
@@ -478,11 +752,13 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
{
int ret;
- down_write(&current->mm->mmap_sem);
len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
start &= PAGE_MASK;
+
+ down_write(&current->mm->mmap_sem);
ret = do_mlock(start, len, 0);
up_write(&current->mm->mmap_sem);
+
return ret;
}
@@ -506,6 +782,7 @@ static int do_mlockall(int flags)
/* Ignore errors */
mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
+ cond_resched();
}
out:
return 0;
@@ -526,12 +803,12 @@ SYSCALL_DEFINE1(mlockall, int, flags)
if (flags & MCL_CURRENT)
lru_add_drain_all(); /* flush pagevec */
- down_write(&current->mm->mmap_sem);
-
lock_limit = rlimit(RLIMIT_MEMLOCK);
lock_limit >>= PAGE_SHIFT;
ret = -ENOMEM;
+ down_write(&current->mm->mmap_sem);
+
if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
capable(CAP_IPC_LOCK))
ret = do_mlockall(flags);
diff --git a/mm/mm_init.c b/mm/mm_init.c
index c280a02ea11e..4074caf9936b 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -9,6 +9,8 @@
#include <linux/init.h>
#include <linux/kobject.h>
#include <linux/export.h>
+#include <linux/memory.h>
+#include <linux/notifier.h>
#include "internal.h"
#ifdef CONFIG_DEBUG_MEMORY_INIT
@@ -69,26 +71,26 @@ void __init mminit_verify_pageflags_layout(void)
unsigned long or_mask, add_mask;
shift = 8 * sizeof(unsigned long);
- width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH - LAST_NID_SHIFT;
+ width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH - LAST_CPUPID_SHIFT;
mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
- "Section %d Node %d Zone %d Lastnid %d Flags %d\n",
+ "Section %d Node %d Zone %d Lastcpupid %d Flags %d\n",
SECTIONS_WIDTH,
NODES_WIDTH,
ZONES_WIDTH,
- LAST_NID_WIDTH,
+ LAST_CPUPID_WIDTH,
NR_PAGEFLAGS);
mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts",
- "Section %d Node %d Zone %d Lastnid %d\n",
+ "Section %d Node %d Zone %d Lastcpupid %d\n",
SECTIONS_SHIFT,
NODES_SHIFT,
ZONES_SHIFT,
- LAST_NID_SHIFT);
+ LAST_CPUPID_SHIFT);
mminit_dprintk(MMINIT_TRACE, "pageflags_layout_pgshifts",
- "Section %lu Node %lu Zone %lu Lastnid %lu\n",
+ "Section %lu Node %lu Zone %lu Lastcpupid %lu\n",
(unsigned long)SECTIONS_PGSHIFT,
(unsigned long)NODES_PGSHIFT,
(unsigned long)ZONES_PGSHIFT,
- (unsigned long)LAST_NID_PGSHIFT);
+ (unsigned long)LAST_CPUPID_PGSHIFT);
mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodezoneid",
"Node/Zone ID: %lu -> %lu\n",
(unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT),
@@ -100,9 +102,9 @@ void __init mminit_verify_pageflags_layout(void)
mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
"Node not in page flags");
#endif
-#ifdef LAST_NID_NOT_IN_PAGE_FLAGS
+#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
- "Last nid not in page flags");
+ "Last cpupid not in page flags");
#endif
if (SECTIONS_WIDTH) {
@@ -147,6 +149,51 @@ early_param("mminit_loglevel", set_mminit_loglevel);
struct kobject *mm_kobj;
EXPORT_SYMBOL_GPL(mm_kobj);
+#ifdef CONFIG_SMP
+s32 vm_committed_as_batch = 32;
+
+static void __meminit mm_compute_batch(void)
+{
+ u64 memsized_batch;
+ s32 nr = num_present_cpus();
+ s32 batch = max_t(s32, nr*2, 32);
+
+ /* batch size set to 0.4% of (total memory/#cpus), or max int32 */
+ memsized_batch = min_t(u64, (totalram_pages/nr)/256, 0x7fffffff);
+
+ vm_committed_as_batch = max_t(s32, memsized_batch, batch);
+}
+
+static int __meminit mm_compute_batch_notifier(struct notifier_block *self,
+ unsigned long action, void *arg)
+{
+ switch (action) {
+ case MEM_ONLINE:
+ case MEM_OFFLINE:
+ mm_compute_batch();
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block compute_batch_nb __meminitdata = {
+ .notifier_call = mm_compute_batch_notifier,
+ .priority = IPC_CALLBACK_PRI, /* use lowest priority */
+};
+
+static int __init mm_compute_batch_init(void)
+{
+ mm_compute_batch();
+ register_hotmemory_notifier(&compute_batch_nb);
+
+ return 0;
+}
+
+__initcall(mm_compute_batch_init);
+
+#endif
+
static int __init mm_sysfs_init(void)
{
mm_kobj = kobject_create_and_add("mm", kernel_kobj);
@@ -155,5 +202,4 @@ static int __init mm_sysfs_init(void)
return 0;
}
-
-__initcall(mm_sysfs_init);
+postcore_initcall(mm_sysfs_init);
diff --git a/mm/mmap.c b/mm/mmap.c
index f681e1842fad..b1202cf81f4b 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -10,6 +10,7 @@
#include <linux/slab.h>
#include <linux/backing-dev.h>
#include <linux/mm.h>
+#include <linux/vmacache.h>
#include <linux/shm.h>
#include <linux/mman.h>
#include <linux/pagemap.h>
@@ -86,6 +87,7 @@ EXPORT_SYMBOL(vm_get_page_prot);
int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
+unsigned long sysctl_overcommit_kbytes __read_mostly;
int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
@@ -179,14 +181,12 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
goto error;
}
- allowed = (totalram_pages - hugetlb_total_pages())
- * sysctl_overcommit_ratio / 100;
+ allowed = vm_commit_limit();
/*
* Reserve some for root
*/
if (!cap_sys_admin)
allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
- allowed += total_swap_pages;
/*
* Don't let a single process grow so big a user can't recover
@@ -406,7 +406,7 @@ static void validate_mm_rb(struct rb_root *root, struct vm_area_struct *ignore)
}
}
-void validate_mm(struct mm_struct *mm)
+static void validate_mm(struct mm_struct *mm)
{
int bug = 0;
int i = 0;
@@ -682,8 +682,9 @@ __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
prev->vm_next = next = vma->vm_next;
if (next)
next->vm_prev = prev;
- if (mm->mmap_cache == vma)
- mm->mmap_cache = prev;
+
+ /* Kill the cache */
+ vmacache_invalidate(mm);
}
/*
@@ -865,7 +866,7 @@ again: remove_next = 1 + (end > next->vm_end);
if (next->anon_vma)
anon_vma_merge(vma, next);
mm->map_count--;
- vma_set_policy(vma, vma_policy(next));
+ mpol_put(vma_policy(next));
kmem_cache_free(vm_area_cachep, next);
/*
* In mprotect's case 6 (see comments on vma_merge),
@@ -895,7 +896,15 @@ again: remove_next = 1 + (end > next->vm_end);
static inline int is_mergeable_vma(struct vm_area_struct *vma,
struct file *file, unsigned long vm_flags)
{
- if (vma->vm_flags ^ vm_flags)
+ /*
+ * VM_SOFTDIRTY should not prevent from VMA merging, if we
+ * match the flags but dirty bit -- the caller should mark
+ * merged VMA as dirty. If dirty bit won't be excluded from
+ * comparison, we increase pressue on the memory system forcing
+ * the kernel to generate new VMAs when old one could be
+ * extended instead.
+ */
+ if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY)
return 0;
if (vma->vm_file != file)
return 0;
@@ -955,7 +964,7 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
if (is_mergeable_vma(vma, file, vm_flags) &&
is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
pgoff_t vm_pglen;
- vm_pglen = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ vm_pglen = vma_pages(vma);
if (vma->vm_pgoff + vm_pglen == vm_pgoff)
return 1;
}
@@ -1084,7 +1093,7 @@ static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *
return a->vm_end == b->vm_start &&
mpol_equal(vma_policy(a), vma_policy(b)) &&
a->vm_file == b->vm_file &&
- !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC)) &&
+ !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC|VM_SOFTDIRTY)) &&
b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
}
@@ -1192,6 +1201,24 @@ static inline unsigned long round_hint_to_min(unsigned long hint)
return hint;
}
+static inline int mlock_future_check(struct mm_struct *mm,
+ unsigned long flags,
+ unsigned long len)
+{
+ unsigned long locked, lock_limit;
+
+ /* mlock MCL_FUTURE? */
+ if (flags & VM_LOCKED) {
+ locked = len >> PAGE_SHIFT;
+ locked += mm->locked_vm;
+ lock_limit = rlimit(RLIMIT_MEMLOCK);
+ lock_limit >>= PAGE_SHIFT;
+ if (locked > lock_limit && !capable(CAP_IPC_LOCK))
+ return -EAGAIN;
+ }
+ return 0;
+}
+
/*
* The caller must hold down_write(&current->mm->mmap_sem).
*/
@@ -1202,7 +1229,6 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
unsigned long *populate)
{
struct mm_struct * mm = current->mm;
- struct inode *inode;
vm_flags_t vm_flags;
*populate = 0;
@@ -1254,20 +1280,12 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
if (!can_do_mlock())
return -EPERM;
- /* mlock MCL_FUTURE? */
- if (vm_flags & VM_LOCKED) {
- unsigned long locked, lock_limit;
- locked = len >> PAGE_SHIFT;
- locked += mm->locked_vm;
- lock_limit = rlimit(RLIMIT_MEMLOCK);
- lock_limit >>= PAGE_SHIFT;
- if (locked > lock_limit && !capable(CAP_IPC_LOCK))
- return -EAGAIN;
- }
-
- inode = file ? file_inode(file) : NULL;
+ if (mlock_future_check(mm, vm_flags, len))
+ return -EAGAIN;
if (file) {
+ struct inode *inode = file_inode(file);
+
switch (flags & MAP_TYPE) {
case MAP_SHARED:
if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE))
@@ -1283,7 +1301,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
/*
* Make sure there are no mandatory locks on the file.
*/
- if (locks_verify_locked(inode))
+ if (locks_verify_locked(file))
return -EAGAIN;
vm_flags |= VM_SHARED | VM_MAYSHARE;
@@ -1300,8 +1318,10 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
vm_flags &= ~VM_MAYEXEC;
}
- if (!file->f_op || !file->f_op->mmap)
+ if (!file->f_op->mmap)
return -ENODEV;
+ if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
+ return -EINVAL;
break;
default:
@@ -1310,6 +1330,8 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
} else {
switch (flags & MAP_TYPE) {
case MAP_SHARED:
+ if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
+ return -EINVAL;
/*
* Ignore pgoff.
*/
@@ -1358,18 +1380,19 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
if (!(flags & MAP_ANONYMOUS)) {
audit_mmap_fd(fd, flags);
- if (unlikely(flags & MAP_HUGETLB))
- return -EINVAL;
file = fget(fd);
if (!file)
goto out;
if (is_file_hugepages(file))
len = ALIGN(len, huge_page_size(hstate_file(file)));
+ retval = -EINVAL;
+ if (unlikely(flags & MAP_HUGETLB && !is_file_hugepages(file)))
+ goto out_fput;
} else if (flags & MAP_HUGETLB) {
struct user_struct *user = NULL;
- struct hstate *hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) &
- SHM_HUGE_MASK);
+ struct hstate *hs;
+ hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & SHM_HUGE_MASK);
if (!hs)
return -EINVAL;
@@ -1391,6 +1414,7 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+out_fput:
if (file)
fput(file);
out:
@@ -1474,11 +1498,9 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma, *prev;
- int correct_wcount = 0;
int error;
struct rb_node **rb_link, *rb_parent;
unsigned long charged = 0;
- struct inode *inode = file ? file_inode(file) : NULL;
/* Check against address space limit. */
if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
@@ -1542,16 +1564,11 @@ munmap_back:
vma->vm_pgoff = pgoff;
INIT_LIST_HEAD(&vma->anon_vma_chain);
- error = -EINVAL; /* when rejecting VM_GROWSDOWN|VM_GROWSUP */
-
if (file) {
- if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
- goto free_vma;
if (vm_flags & VM_DENYWRITE) {
error = deny_write_access(file);
if (error)
goto free_vma;
- correct_wcount = 1;
}
vma->vm_file = get_file(file);
error = file->f_op->mmap(file, vma);
@@ -1568,11 +1585,8 @@ munmap_back:
WARN_ON_ONCE(addr != vma->vm_start);
addr = vma->vm_start;
- pgoff = vma->vm_pgoff;
vm_flags = vma->vm_flags;
} else if (vm_flags & VM_SHARED) {
- if (unlikely(vm_flags & (VM_GROWSDOWN|VM_GROWSUP)))
- goto free_vma;
error = shmem_zero_setup(vma);
if (error)
goto free_vma;
@@ -1594,11 +1608,10 @@ munmap_back:
}
vma_link(mm, vma, prev, rb_link, rb_parent);
- file = vma->vm_file;
-
/* Once vma denies write, undo our temporary denial count */
- if (correct_wcount)
- atomic_inc(&inode->i_writecount);
+ if (vm_flags & VM_DENYWRITE)
+ allow_write_access(file);
+ file = vma->vm_file;
out:
perf_event_mmap(vma);
@@ -1614,11 +1627,20 @@ out:
if (file)
uprobe_mmap(vma);
+ /*
+ * New (or expanded) vma always get soft dirty status.
+ * Otherwise user-space soft-dirty page tracker won't
+ * be able to distinguish situation when vma area unmapped,
+ * then new mapped in-place (which must be aimed as
+ * a completely new data area).
+ */
+ vma->vm_flags |= VM_SOFTDIRTY;
+
return addr;
unmap_and_free_vma:
- if (correct_wcount)
- atomic_inc(&inode->i_writecount);
+ if (vm_flags & VM_DENYWRITE)
+ allow_write_access(file);
vma->vm_file = NULL;
fput(file);
@@ -1853,7 +1875,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
struct vm_area_struct *vma;
struct vm_unmapped_area_info info;
- if (len > TASK_SIZE)
+ if (len > TASK_SIZE - mmap_min_addr)
return -ENOMEM;
if (flags & MAP_FIXED)
@@ -1862,29 +1884,20 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
if (addr) {
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
- if (TASK_SIZE - len >= addr &&
+ if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vma->vm_start))
return addr;
}
info.flags = 0;
info.length = len;
- info.low_limit = TASK_UNMAPPED_BASE;
+ info.low_limit = mm->mmap_base;
info.high_limit = TASK_SIZE;
info.align_mask = 0;
return vm_unmapped_area(&info);
}
#endif
-void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
-{
- /*
- * Is this a new hole at the lowest possible address?
- */
- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
- mm->free_area_cache = addr;
-}
-
/*
* This mmap-allocator allocates new areas top-down from below the
* stack's low limit (the base):
@@ -1901,7 +1914,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
struct vm_unmapped_area_info info;
/* requested length too big for entire address space */
- if (len > TASK_SIZE)
+ if (len > TASK_SIZE - mmap_min_addr)
return -ENOMEM;
if (flags & MAP_FIXED)
@@ -1911,14 +1924,14 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
if (addr) {
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
- if (TASK_SIZE - len >= addr &&
+ if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vma->vm_start))
return addr;
}
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
- info.low_limit = PAGE_SIZE;
+ info.low_limit = max(PAGE_SIZE, mmap_min_addr);
info.high_limit = mm->mmap_base;
info.align_mask = 0;
addr = vm_unmapped_area(&info);
@@ -1941,19 +1954,6 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
}
#endif
-void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
-{
- /*
- * Is this a new hole at the highest possible address?
- */
- if (addr > mm->free_area_cache)
- mm->free_area_cache = addr;
-
- /* dont allow allocations above current base */
- if (mm->free_area_cache > mm->mmap_base)
- mm->free_area_cache = mm->mmap_base;
-}
-
unsigned long
get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags)
@@ -1970,7 +1970,7 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
return -ENOMEM;
get_area = current->mm->get_unmapped_area;
- if (file && file->f_op && file->f_op->get_unmapped_area)
+ if (file && file->f_op->get_unmapped_area)
get_area = file->f_op->get_unmapped_area;
addr = get_area(file, addr, len, pgoff, flags);
if (IS_ERR_VALUE(addr))
@@ -1991,34 +1991,33 @@ EXPORT_SYMBOL(get_unmapped_area);
/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
{
- struct vm_area_struct *vma = NULL;
+ struct rb_node *rb_node;
+ struct vm_area_struct *vma;
/* Check the cache first. */
- /* (Cache hit rate is typically around 35%.) */
- vma = ACCESS_ONCE(mm->mmap_cache);
- if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
- struct rb_node *rb_node;
+ vma = vmacache_find(mm, addr);
+ if (likely(vma))
+ return vma;
- rb_node = mm->mm_rb.rb_node;
- vma = NULL;
+ rb_node = mm->mm_rb.rb_node;
+ vma = NULL;
- while (rb_node) {
- struct vm_area_struct *vma_tmp;
-
- vma_tmp = rb_entry(rb_node,
- struct vm_area_struct, vm_rb);
-
- if (vma_tmp->vm_end > addr) {
- vma = vma_tmp;
- if (vma_tmp->vm_start <= addr)
- break;
- rb_node = rb_node->rb_left;
- } else
- rb_node = rb_node->rb_right;
- }
- if (vma)
- mm->mmap_cache = vma;
+ while (rb_node) {
+ struct vm_area_struct *tmp;
+
+ tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
+
+ if (tmp->vm_end > addr) {
+ vma = tmp;
+ if (tmp->vm_start <= addr)
+ break;
+ rb_node = rb_node->rb_left;
+ } else
+ rb_node = rb_node->rb_right;
}
+
+ if (vma)
+ vmacache_update(addr, vma);
return vma;
}
@@ -2356,7 +2355,7 @@ static void unmap_region(struct mm_struct *mm,
struct mmu_gather tlb;
lru_add_drain();
- tlb_gather_mmu(&tlb, mm, 0);
+ tlb_gather_mmu(&tlb, mm, start, end);
update_hiwater_rss(mm);
unmap_vmas(&tlb, vma, start, end);
free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
@@ -2374,7 +2373,6 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
{
struct vm_area_struct **insertion_point;
struct vm_area_struct *tail_vma = NULL;
- unsigned long addr;
insertion_point = (prev ? &prev->vm_next : &mm->mmap);
vma->vm_prev = NULL;
@@ -2391,12 +2389,9 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
} else
mm->highest_vm_end = prev ? prev->vm_end : 0;
tail_vma->vm_next = NULL;
- if (mm->unmap_area == arch_unmap_area)
- addr = prev ? prev->vm_end : mm->mmap_base;
- else
- addr = vma ? vma->vm_start : mm->mmap_base;
- mm->unmap_area(mm, addr);
- mm->mmap_cache = NULL; /* Kill the cache. */
+
+ /* Kill the cache */
+ vmacache_invalidate(mm);
}
/*
@@ -2406,7 +2401,6 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
unsigned long addr, int new_below)
{
- struct mempolicy *pol;
struct vm_area_struct *new;
int err = -ENOMEM;
@@ -2430,12 +2424,9 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
}
- pol = mpol_dup(vma_policy(vma));
- if (IS_ERR(pol)) {
- err = PTR_ERR(pol);
+ err = vma_dup_policy(vma, new);
+ if (err)
goto out_free_vma;
- }
- vma_set_policy(new, pol);
if (anon_vma_clone(new, vma))
goto out_free_mpol;
@@ -2463,7 +2454,7 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
fput(new->vm_file);
unlink_anon_vmas(new);
out_free_mpol:
- mpol_put(pol);
+ mpol_put(vma_policy(new));
out_free_vma:
kmem_cache_free(vm_area_cachep, new);
out_err:
@@ -2622,18 +2613,9 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
if (error & ~PAGE_MASK)
return error;
- /*
- * mlock MCL_FUTURE?
- */
- if (mm->def_flags & VM_LOCKED) {
- unsigned long locked, lock_limit;
- locked = len >> PAGE_SHIFT;
- locked += mm->locked_vm;
- lock_limit = rlimit(RLIMIT_MEMLOCK);
- lock_limit >>= PAGE_SHIFT;
- if (locked > lock_limit && !capable(CAP_IPC_LOCK))
- return -EAGAIN;
- }
+ error = mlock_future_check(mm, mm->def_flags, len);
+ if (error)
+ return error;
/*
* mm->mmap_sem is required to protect against another thread
@@ -2689,6 +2671,7 @@ out:
mm->total_vm += len >> PAGE_SHIFT;
if (flags & VM_LOCKED)
mm->locked_vm += (len >> PAGE_SHIFT);
+ vma->vm_flags |= VM_SOFTDIRTY;
return addr;
}
@@ -2735,7 +2718,7 @@ void exit_mmap(struct mm_struct *mm)
lru_add_drain();
flush_cache_mm(mm);
- tlb_gather_mmu(&tlb, mm, 1);
+ tlb_gather_mmu(&tlb, mm, 0, -1);
/* update_hiwater_rss(mm) here? but nobody should be looking */
/* Use -1 here to ensure all VMAs in the mm are unmapped */
unmap_vmas(&tlb, vma, 0, -1);
@@ -2754,7 +2737,8 @@ void exit_mmap(struct mm_struct *mm)
}
vm_unacct_memory(nr_accounted);
- WARN_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
+ WARN_ON(atomic_long_read(&mm->nr_ptes) >
+ (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
}
/* Insert vm structure into process list sorted by address
@@ -2806,7 +2790,6 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *new_vma, *prev;
struct rb_node **rb_link, *rb_parent;
- struct mempolicy *pol;
bool faulted_in_anon_vma = true;
/*
@@ -2851,10 +2834,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
new_vma->vm_start = addr;
new_vma->vm_end = addr + len;
new_vma->vm_pgoff = pgoff;
- pol = mpol_dup(vma_policy(vma));
- if (IS_ERR(pol))
+ if (vma_dup_policy(vma, new_vma))
goto out_free_vma;
- vma_set_policy(new_vma, pol);
INIT_LIST_HEAD(&new_vma->anon_vma_chain);
if (anon_vma_clone(new_vma, vma))
goto out_free_mempol;
@@ -2869,7 +2850,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
return new_vma;
out_free_mempol:
- mpol_put(pol);
+ mpol_put(vma_policy(new_vma));
out_free_vma:
kmem_cache_free(vm_area_cachep, new_vma);
return NULL;
@@ -2940,7 +2921,7 @@ static const struct vm_operations_struct special_mapping_vmops = {
* The array pointer and the pages it points to are assumed to stay alive
* for as long as this mapping might exist.
*/
-int install_special_mapping(struct mm_struct *mm,
+struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
unsigned long addr, unsigned long len,
unsigned long vm_flags, struct page **pages)
{
@@ -2949,14 +2930,14 @@ int install_special_mapping(struct mm_struct *mm,
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (unlikely(vma == NULL))
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&vma->anon_vma_chain);
vma->vm_mm = mm;
vma->vm_start = addr;
vma->vm_end = addr + len;
- vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
+ vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
vma->vm_ops = &special_mapping_vmops;
@@ -2970,11 +2951,23 @@ int install_special_mapping(struct mm_struct *mm,
perf_event_mmap(vma);
- return 0;
+ return vma;
out:
kmem_cache_free(vm_area_cachep, vma);
- return ret;
+ return ERR_PTR(ret);
+}
+
+int install_special_mapping(struct mm_struct *mm,
+ unsigned long addr, unsigned long len,
+ unsigned long vm_flags, struct page **pages)
+{
+ struct vm_area_struct *vma = _install_special_mapping(mm,
+ addr, len, vm_flags, pages);
+
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+ return 0;
}
static DEFINE_MUTEX(mm_all_locks_mutex);
@@ -3172,7 +3165,7 @@ static int init_user_reserve(void)
sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
return 0;
}
-module_init(init_user_reserve)
+subsys_initcall(init_user_reserve);
/*
* Initialise sysctl_admin_reserve_kbytes.
@@ -3193,7 +3186,7 @@ static int init_admin_reserve(void)
sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
return 0;
}
-module_init(init_admin_reserve)
+subsys_initcall(init_admin_reserve);
/*
* Reinititalise user and admin reserves if memory is added or removed.
@@ -3263,4 +3256,4 @@ static int __meminit init_reserve_notifier(void)
return 0;
}
-module_init(init_reserve_notifier)
+subsys_initcall(init_reserve_notifier);
diff --git a/mm/mmu_context.c b/mm/mmu_context.c
index 8a8cd0265e52..f802c2d216a7 100644
--- a/mm/mmu_context.c
+++ b/mm/mmu_context.c
@@ -31,6 +31,9 @@ void use_mm(struct mm_struct *mm)
tsk->mm = mm;
switch_mm(active_mm, mm, tsk);
task_unlock(tsk);
+#ifdef finish_arch_post_lock_switch
+ finish_arch_post_lock_switch();
+#endif
if (active_mm != mm)
mmdrop(active_mm);
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 6725ff183374..41cefdf0aadd 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -315,7 +315,7 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
/*
* Wait for any running method to finish, of course including
- * ->release if it was run by mmu_notifier_relase instead of us.
+ * ->release if it was run by mmu_notifier_release instead of us.
*/
synchronize_srcu(&srcu);
@@ -329,5 +329,4 @@ static int __init mmu_notifier_init(void)
{
return init_srcu_struct(&srcu);
}
-
-module_init(mmu_notifier_init);
+subsys_initcall(mmu_notifier_init);
diff --git a/mm/mmzone.c b/mm/mmzone.c
index 2ac0afbd68f3..bf34fb8556db 100644
--- a/mm/mmzone.c
+++ b/mm/mmzone.c
@@ -97,20 +97,20 @@ void lruvec_init(struct lruvec *lruvec)
INIT_LIST_HEAD(&lruvec->lists[lru]);
}
-#if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_NID_NOT_IN_PAGE_FLAGS)
-int page_nid_xchg_last(struct page *page, int nid)
+#if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS)
+int page_cpupid_xchg_last(struct page *page, int cpupid)
{
unsigned long old_flags, flags;
- int last_nid;
+ int last_cpupid;
do {
old_flags = flags = page->flags;
- last_nid = page_nid_last(page);
+ last_cpupid = page_cpupid_last(page);
- flags &= ~(LAST_NID_MASK << LAST_NID_PGSHIFT);
- flags |= (nid & LAST_NID_MASK) << LAST_NID_PGSHIFT;
+ flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
+ flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
} while (unlikely(cmpxchg(&page->flags, old_flags, flags) != old_flags));
- return last_nid;
+ return last_cpupid;
}
#endif
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 94722a4d6b43..c43d557941f8 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -23,6 +23,7 @@
#include <linux/mmu_notifier.h>
#include <linux/migrate.h>
#include <linux/perf_event.h>
+#include <linux/ksm.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
@@ -35,18 +36,47 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
}
#endif
+/*
+ * For a prot_numa update we only hold mmap_sem for read so there is a
+ * potential race with faulting where a pmd was temporarily none. This
+ * function checks for a transhuge pmd under the appropriate lock. It
+ * returns a pte if it was successfully locked or NULL if it raced with
+ * a transhuge insertion.
+ */
+static pte_t *lock_pte_protection(struct vm_area_struct *vma, pmd_t *pmd,
+ unsigned long addr, int prot_numa, spinlock_t **ptl)
+{
+ pte_t *pte;
+ spinlock_t *pmdl;
+
+ /* !prot_numa is protected by mmap_sem held for write */
+ if (!prot_numa)
+ return pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl);
+
+ pmdl = pmd_lock(vma->vm_mm, pmd);
+ if (unlikely(pmd_trans_huge(*pmd) || pmd_none(*pmd))) {
+ spin_unlock(pmdl);
+ return NULL;
+ }
+
+ pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl);
+ spin_unlock(pmdl);
+ return pte;
+}
+
static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end, pgprot_t newprot,
- int dirty_accountable, int prot_numa, bool *ret_all_same_node)
+ int dirty_accountable, int prot_numa)
{
struct mm_struct *mm = vma->vm_mm;
pte_t *pte, oldpte;
spinlock_t *ptl;
unsigned long pages = 0;
- bool all_same_node = true;
- int last_nid = -1;
- pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
+ pte = lock_pte_protection(vma, pmd, addr, prot_numa, &ptl);
+ if (!pte)
+ return 0;
+
arch_enter_lazy_mmu_mode();
do {
oldpte = *pte;
@@ -54,117 +84,111 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
pte_t ptent;
bool updated = false;
- ptent = ptep_modify_prot_start(mm, addr, pte);
if (!prot_numa) {
+ ptent = ptep_modify_prot_start(mm, addr, pte);
+ if (pte_numa(ptent))
+ ptent = pte_mknonnuma(ptent);
ptent = pte_modify(ptent, newprot);
+ /*
+ * Avoid taking write faults for pages we
+ * know to be dirty.
+ */
+ if (dirty_accountable && pte_dirty(ptent))
+ ptent = pte_mkwrite(ptent);
+ ptep_modify_prot_commit(mm, addr, pte, ptent);
updated = true;
} else {
struct page *page;
page = vm_normal_page(vma, addr, oldpte);
- if (page) {
- int this_nid = page_to_nid(page);
- if (last_nid == -1)
- last_nid = this_nid;
- if (last_nid != this_nid)
- all_same_node = false;
-
- /* only check non-shared pages */
- if (!pte_numa(oldpte) &&
- page_mapcount(page) == 1) {
- ptent = pte_mknuma(ptent);
+ if (page && !PageKsm(page)) {
+ if (!pte_numa(oldpte)) {
+ ptep_set_numa(mm, addr, pte);
updated = true;
}
}
}
-
- /*
- * Avoid taking write faults for pages we know to be
- * dirty.
- */
- if (dirty_accountable && pte_dirty(ptent)) {
- ptent = pte_mkwrite(ptent);
- updated = true;
- }
-
if (updated)
pages++;
- ptep_modify_prot_commit(mm, addr, pte, ptent);
} else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) {
swp_entry_t entry = pte_to_swp_entry(oldpte);
if (is_write_migration_entry(entry)) {
+ pte_t newpte;
/*
* A protection check is difficult so
* just be safe and disable write
*/
make_migration_entry_read(&entry);
- set_pte_at(mm, addr, pte,
- swp_entry_to_pte(entry));
+ newpte = swp_entry_to_pte(entry);
+ if (pte_swp_soft_dirty(oldpte))
+ newpte = pte_swp_mksoft_dirty(newpte);
+ set_pte_at(mm, addr, pte, newpte);
+
+ pages++;
}
- pages++;
}
} while (pte++, addr += PAGE_SIZE, addr != end);
arch_leave_lazy_mmu_mode();
pte_unmap_unlock(pte - 1, ptl);
- *ret_all_same_node = all_same_node;
return pages;
}
-#ifdef CONFIG_NUMA_BALANCING
-static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr,
- pmd_t *pmd)
-{
- spin_lock(&mm->page_table_lock);
- set_pmd_at(mm, addr & PMD_MASK, pmd, pmd_mknuma(*pmd));
- spin_unlock(&mm->page_table_lock);
-}
-#else
-static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr,
- pmd_t *pmd)
-{
- BUG();
-}
-#endif /* CONFIG_NUMA_BALANCING */
-
static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
pud_t *pud, unsigned long addr, unsigned long end,
pgprot_t newprot, int dirty_accountable, int prot_numa)
{
pmd_t *pmd;
+ struct mm_struct *mm = vma->vm_mm;
unsigned long next;
unsigned long pages = 0;
- bool all_same_node;
+ unsigned long nr_huge_updates = 0;
+ unsigned long mni_start = 0;
pmd = pmd_offset(pud, addr);
do {
+ unsigned long this_pages;
+
next = pmd_addr_end(addr, end);
+ if (!pmd_trans_huge(*pmd) && pmd_none_or_clear_bad(pmd))
+ continue;
+
+ /* invoke the mmu notifier if the pmd is populated */
+ if (!mni_start) {
+ mni_start = addr;
+ mmu_notifier_invalidate_range_start(mm, mni_start, end);
+ }
+
if (pmd_trans_huge(*pmd)) {
if (next - addr != HPAGE_PMD_SIZE)
split_huge_page_pmd(vma, addr, pmd);
- else if (change_huge_pmd(vma, pmd, addr, newprot,
- prot_numa)) {
- pages += HPAGE_PMD_NR;
- continue;
+ else {
+ int nr_ptes = change_huge_pmd(vma, pmd, addr,
+ newprot, prot_numa);
+
+ if (nr_ptes) {
+ if (nr_ptes == HPAGE_PMD_NR) {
+ pages += HPAGE_PMD_NR;
+ nr_huge_updates++;
+ }
+
+ /* huge pmd was handled */
+ continue;
+ }
}
- /* fall through */
+ /* fall through, the trans huge pmd just split */
}
- if (pmd_none_or_clear_bad(pmd))
- continue;
- pages += change_pte_range(vma, pmd, addr, next, newprot,
- dirty_accountable, prot_numa, &all_same_node);
-
- /*
- * If we are changing protections for NUMA hinting faults then
- * set pmd_numa if the examined pages were all on the same
- * node. This allows a regular PMD to be handled as one fault
- * and effectively batches the taking of the PTL
- */
- if (prot_numa && all_same_node)
- change_pmd_protnuma(vma->vm_mm, addr, pmd);
+ this_pages = change_pte_range(vma, pmd, addr, next, newprot,
+ dirty_accountable, prot_numa);
+ pages += this_pages;
} while (pmd++, addr = next, addr != end);
+ if (mni_start)
+ mmu_notifier_invalidate_range_end(mm, mni_start, end);
+
+ if (nr_huge_updates)
+ count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
return pages;
}
@@ -201,6 +225,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
BUG_ON(addr >= end);
pgd = pgd_offset(mm, addr);
flush_cache_range(vma, addr, end);
+ set_tlb_flush_pending(mm);
do {
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd))
@@ -212,6 +237,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
/* Only flush the TLB if we actually modified any entries: */
if (pages)
flush_tlb_range(vma, start, end);
+ clear_tlb_flush_pending(mm);
return pages;
}
@@ -220,15 +246,12 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
unsigned long end, pgprot_t newprot,
int dirty_accountable, int prot_numa)
{
- struct mm_struct *mm = vma->vm_mm;
unsigned long pages;
- mmu_notifier_invalidate_range_start(mm, start, end);
if (is_vm_hugetlb_page(vma))
pages = hugetlb_change_protection(vma, start, end, newprot);
else
pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa);
- mmu_notifier_invalidate_range_end(mm, start, end);
return pages;
}
diff --git a/mm/mremap.c b/mm/mremap.c
index 463a25705ac6..05f1180e9f21 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -15,6 +15,7 @@
#include <linux/swap.h>
#include <linux/capability.h>
#include <linux/fs.h>
+#include <linux/swapops.h>
#include <linux/highmem.h>
#include <linux/security.h>
#include <linux/syscalls.h>
@@ -69,6 +70,23 @@ static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
return pmd;
}
+static pte_t move_soft_dirty_pte(pte_t pte)
+{
+ /*
+ * Set soft dirty bit so we can notice
+ * in userspace the ptes were moved.
+ */
+#ifdef CONFIG_MEM_SOFT_DIRTY
+ if (pte_present(pte))
+ pte = pte_mksoft_dirty(pte);
+ else if (is_swap_pte(pte))
+ pte = pte_swp_mksoft_dirty(pte);
+ else if (pte_file(pte))
+ pte = pte_file_mksoft_dirty(pte);
+#endif
+ return pte;
+}
+
static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
unsigned long old_addr, unsigned long old_end,
struct vm_area_struct *new_vma, pmd_t *new_pmd,
@@ -126,6 +144,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
continue;
pte = ptep_get_and_clear(mm, old_addr, old_pte);
pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
+ pte = move_soft_dirty_pte(pte);
set_pte_at(mm, new_addr, new_pte, pte);
}
@@ -175,10 +194,17 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
break;
if (pmd_trans_huge(*old_pmd)) {
int err = 0;
- if (extent == HPAGE_PMD_SIZE)
+ if (extent == HPAGE_PMD_SIZE) {
+ VM_BUG_ON(vma->vm_file || !vma->anon_vma);
+ /* See comment in move_ptes() */
+ if (need_rmap_locks)
+ anon_vma_lock_write(vma->anon_vma);
err = move_huge_pmd(vma, new_vma, old_addr,
new_addr, old_end,
old_pmd, new_pmd);
+ if (need_rmap_locks)
+ anon_vma_unlock_write(vma->anon_vma);
+ }
if (err > 0) {
need_flush = true;
continue;
@@ -456,13 +482,14 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
unsigned long charged = 0;
bool locked = false;
- down_write(&current->mm->mmap_sem);
-
if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
- goto out;
+ return ret;
+
+ if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE))
+ return ret;
if (addr & ~PAGE_MASK)
- goto out;
+ return ret;
old_len = PAGE_ALIGN(old_len);
new_len = PAGE_ALIGN(new_len);
@@ -473,12 +500,13 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
* a zero new-len is nonsensical.
*/
if (!new_len)
- goto out;
+ return ret;
+
+ down_write(&current->mm->mmap_sem);
if (flags & MREMAP_FIXED) {
- if (flags & MREMAP_MAYMOVE)
- ret = mremap_to(addr, old_len, new_addr, new_len,
- &locked);
+ ret = mremap_to(addr, old_len, new_addr, new_len,
+ &locked);
goto out;
}
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
index bdd3fa2fc73b..04a9d94333a5 100644
--- a/mm/nobootmem.c
+++ b/mm/nobootmem.c
@@ -41,11 +41,13 @@ static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
if (limit > memblock.current_limit)
limit = memblock.current_limit;
- addr = memblock_find_in_range_node(goal, limit, size, align, nid);
+ addr = memblock_find_in_range_node(size, align, goal, limit, nid);
if (!addr)
return NULL;
- memblock_reserve(addr, size);
+ if (memblock_reserve(addr, size))
+ return NULL;
+
ptr = phys_to_virt(addr);
memset(ptr, 0, size);
/*
@@ -82,27 +84,18 @@ void __init free_bootmem_late(unsigned long addr, unsigned long size)
static void __init __free_pages_memory(unsigned long start, unsigned long end)
{
- unsigned long i, start_aligned, end_aligned;
- int order = ilog2(BITS_PER_LONG);
-
- start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
- end_aligned = end & ~(BITS_PER_LONG - 1);
+ int order;
- if (end_aligned <= start_aligned) {
- for (i = start; i < end; i++)
- __free_pages_bootmem(pfn_to_page(i), 0);
-
- return;
- }
+ while (start < end) {
+ order = min(MAX_ORDER - 1UL, __ffs(start));
- for (i = start; i < start_aligned; i++)
- __free_pages_bootmem(pfn_to_page(i), 0);
+ while (start + (1UL << order) > end)
+ order--;
- for (i = start_aligned; i < end_aligned; i += BITS_PER_LONG)
- __free_pages_bootmem(pfn_to_page(i), order);
+ __free_pages_bootmem(pfn_to_page(start), order);
- for (i = end_aligned; i < end; i++)
- __free_pages_bootmem(pfn_to_page(i), 0);
+ start += (1UL << order);
+ }
}
static unsigned long __init __free_memory_core(phys_addr_t start,
@@ -123,34 +116,50 @@ static unsigned long __init __free_memory_core(phys_addr_t start,
static unsigned long __init free_low_memory_core_early(void)
{
unsigned long count = 0;
- phys_addr_t start, end, size;
+ phys_addr_t start, end;
u64 i;
- for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL)
+ for_each_free_mem_range(i, NUMA_NO_NODE, &start, &end, NULL)
count += __free_memory_core(start, end);
- /* free range that is used for reserved array if we allocate it */
- size = get_allocated_memblock_reserved_regions_info(&start);
- if (size)
- count += __free_memory_core(start, start + size);
+#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
+ {
+ phys_addr_t size;
+
+ /* Free memblock.reserved array if it was allocated */
+ size = get_allocated_memblock_reserved_regions_info(&start);
+ if (size)
+ count += __free_memory_core(start, start + size);
+
+ /* Free memblock.memory array if it was allocated */
+ size = get_allocated_memblock_memory_regions_info(&start);
+ if (size)
+ count += __free_memory_core(start, start + size);
+ }
+#endif
return count;
}
-static void reset_node_lowmem_managed_pages(pg_data_t *pgdat)
+static int reset_managed_pages_done __initdata;
+
+static inline void __init reset_node_managed_pages(pg_data_t *pgdat)
{
struct zone *z;
- /*
- * In free_area_init_core(), highmem zone's managed_pages is set to
- * present_pages, and bootmem allocator doesn't allocate from highmem
- * zones. So there's no need to recalculate managed_pages because all
- * highmem pages will be managed by the buddy system. Here highmem
- * zone also includes highmem movable zone.
- */
+ if (reset_managed_pages_done)
+ return;
for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
- if (!is_highmem(z))
- z->managed_pages = 0;
+ z->managed_pages = 0;
+}
+
+void __init reset_all_zones_managed_pages(void)
+{
+ struct pglist_data *pgdat;
+
+ for_each_online_pgdat(pgdat)
+ reset_node_managed_pages(pgdat);
+ reset_managed_pages_done = 1;
}
/**
@@ -160,17 +169,19 @@ static void reset_node_lowmem_managed_pages(pg_data_t *pgdat)
*/
unsigned long __init free_all_bootmem(void)
{
- struct pglist_data *pgdat;
+ unsigned long pages;
- for_each_online_pgdat(pgdat)
- reset_node_lowmem_managed_pages(pgdat);
+ reset_all_zones_managed_pages();
/*
- * We need to use MAX_NUMNODES instead of NODE_DATA(0)->node_id
+ * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
* because in some case like Node0 doesn't have RAM installed
* low ram will be on Node1
*/
- return free_low_memory_core_early();
+ pages = free_low_memory_core_early();
+ totalram_pages += pages;
+
+ return pages;
}
/**
@@ -217,7 +228,7 @@ static void * __init ___alloc_bootmem_nopanic(unsigned long size,
restart:
- ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, goal, limit);
+ ptr = __alloc_memory_core_early(NUMA_NO_NODE, size, align, goal, limit);
if (ptr)
return ptr;
@@ -301,7 +312,7 @@ again:
if (ptr)
return ptr;
- ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align,
+ ptr = __alloc_memory_core_early(NUMA_NO_NODE, size, align,
goal, limit);
if (ptr)
return ptr;
@@ -323,7 +334,7 @@ void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
return ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
}
-void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
+static void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
unsigned long align, unsigned long goal,
unsigned long limit)
{
diff --git a/mm/nommu.c b/mm/nommu.c
index 298884dcd6e7..85f8d6698d48 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -15,6 +15,7 @@
#include <linux/export.h>
#include <linux/mm.h>
+#include <linux/vmacache.h>
#include <linux/mman.h>
#include <linux/swap.h>
#include <linux/file.h>
@@ -24,6 +25,7 @@
#include <linux/vmalloc.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
+#include <linux/compiler.h>
#include <linux/mount.h>
#include <linux/personality.h>
#include <linux/security.h>
@@ -56,11 +58,11 @@
void *high_memory;
struct page *mem_map;
unsigned long max_mapnr;
-unsigned long num_physpages;
unsigned long highest_memmap_pfn;
struct percpu_counter vm_committed_as;
int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
int sysctl_overcommit_ratio = 50; /* default is 50% */
+unsigned long sysctl_overcommit_kbytes __read_mostly;
int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
@@ -85,7 +87,6 @@ unsigned long vm_memory_committed(void)
EXPORT_SYMBOL_GPL(vm_memory_committed);
EXPORT_SYMBOL(mem_map);
-EXPORT_SYMBOL(num_physpages);
/* list of mapped, potentially shareable regions */
static struct kmem_cache *vm_region_jar;
@@ -282,6 +283,10 @@ EXPORT_SYMBOL(vmalloc_to_pfn);
long vread(char *buf, char *addr, unsigned long count)
{
+ /* Don't allow overflow */
+ if ((unsigned long) buf + count < count)
+ count = -(unsigned long) buf;
+
memcpy(buf, addr, count);
return count;
}
@@ -293,7 +298,7 @@ long vwrite(char *buf, char *addr, unsigned long count)
count = -(unsigned long) addr;
memcpy(addr, buf, count);
- return(count);
+ return count;
}
/*
@@ -456,7 +461,7 @@ EXPORT_SYMBOL_GPL(vm_unmap_aliases);
* Implement a stub for vmalloc_sync_all() if the architecture chose not to
* have one.
*/
-void __attribute__((weak)) vmalloc_sync_all(void)
+void __weak vmalloc_sync_all(void)
{
}
@@ -765,16 +770,23 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
*/
static void delete_vma_from_mm(struct vm_area_struct *vma)
{
+ int i;
struct address_space *mapping;
struct mm_struct *mm = vma->vm_mm;
+ struct task_struct *curr = current;
kenter("%p", vma);
protect_vma(vma, 0);
mm->map_count--;
- if (mm->mmap_cache == vma)
- mm->mmap_cache = NULL;
+ for (i = 0; i < VMACACHE_SIZE; i++) {
+ /* if the vma is cached, invalidate the entire cache */
+ if (curr->vmacache[i] == vma) {
+ vmacache_invalidate(curr->mm);
+ break;
+ }
+ }
/* remove the VMA from the mapping */
if (vma->vm_file) {
@@ -822,8 +834,8 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
struct vm_area_struct *vma;
/* check the cache first */
- vma = ACCESS_ONCE(mm->mmap_cache);
- if (vma && vma->vm_start <= addr && vma->vm_end > addr)
+ vma = vmacache_find(mm, addr);
+ if (likely(vma))
return vma;
/* trawl the list (there may be multiple mappings in which addr
@@ -832,7 +844,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
if (vma->vm_start > addr)
return NULL;
if (vma->vm_end > addr) {
- mm->mmap_cache = vma;
+ vmacache_update(addr, vma);
return vma;
}
}
@@ -871,8 +883,8 @@ static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
unsigned long end = addr + len;
/* check the cache first */
- vma = mm->mmap_cache;
- if (vma && vma->vm_start == addr && vma->vm_end == end)
+ vma = vmacache_find_exact(mm, addr, end);
+ if (vma)
return vma;
/* trawl the list (there may be multiple mappings in which addr
@@ -883,7 +895,7 @@ static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
if (vma->vm_start > addr)
return NULL;
if (vma->vm_end == end) {
- mm->mmap_cache = vma;
+ vmacache_update(addr, vma);
return vma;
}
}
@@ -935,7 +947,7 @@ static int validate_mmap_request(struct file *file,
struct address_space *mapping;
/* files must support mmap */
- if (!file->f_op || !file->f_op->mmap)
+ if (!file->f_op->mmap)
return -ENODEV;
/* work out if what we've got could possibly be shared
@@ -992,7 +1004,7 @@ static int validate_mmap_request(struct file *file,
(file->f_mode & FMODE_WRITE))
return -EACCES;
- if (locks_verify_locked(file_inode(file)))
+ if (locks_verify_locked(file))
return -EAGAIN;
if (!(capabilities & BDI_CAP_MAP_DIRECT))
@@ -1000,8 +1012,7 @@ static int validate_mmap_request(struct file *file,
/* we mustn't privatise shared mappings */
capabilities &= ~BDI_CAP_MAP_COPY;
- }
- else {
+ } else {
/* we're going to read the file into private memory we
* allocate */
if (!(capabilities & BDI_CAP_MAP_COPY))
@@ -1032,23 +1043,20 @@ static int validate_mmap_request(struct file *file,
if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) {
if (prot & PROT_EXEC)
return -EPERM;
- }
- else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
+ } else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
/* handle implication of PROT_EXEC by PROT_READ */
if (current->personality & READ_IMPLIES_EXEC) {
if (capabilities & BDI_CAP_EXEC_MAP)
prot |= PROT_EXEC;
}
- }
- else if ((prot & PROT_READ) &&
+ } else if ((prot & PROT_READ) &&
(prot & PROT_EXEC) &&
!(capabilities & BDI_CAP_EXEC_MAP)
) {
/* backing file is not executable, try to copy */
capabilities &= ~BDI_CAP_MAP_DIRECT;
}
- }
- else {
+ } else {
/* anonymous mappings are always memory backed and can be
* privately mapped
*/
@@ -1656,7 +1664,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
/* find the first potentially overlapping VMA */
vma = find_vma(mm, start);
if (!vma) {
- static int limit = 0;
+ static int limit;
if (limit < 5) {
printk(KERN_WARNING
"munmap of memory not mmapped by process %d"
@@ -1869,10 +1877,6 @@ unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr,
return -ENOMEM;
}
-void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
-{
-}
-
void unmap_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen,
int even_cows)
@@ -1950,13 +1954,12 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
goto error;
}
- allowed = totalram_pages * sysctl_overcommit_ratio / 100;
+ allowed = vm_commit_limit();
/*
* Reserve some 3% for root
*/
if (!cap_sys_admin)
allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
- allowed += total_swap_pages;
/*
* Don't let a single process grow so big a user can't recover
@@ -1987,6 +1990,12 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
EXPORT_SYMBOL(filemap_fault);
+void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ BUG();
+}
+EXPORT_SYMBOL(filemap_map_pages);
+
int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
unsigned long size, pgoff_t pgoff)
{
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 79e451a78c9e..3291e82d4352 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -47,19 +47,21 @@ static DEFINE_SPINLOCK(zone_scan_lock);
#ifdef CONFIG_NUMA
/**
* has_intersects_mems_allowed() - check task eligiblity for kill
- * @tsk: task struct of which task to consider
+ * @start: task struct of which task to consider
* @mask: nodemask passed to page allocator for mempolicy ooms
*
* Task eligibility is determined by whether or not a candidate task, @tsk,
* shares the same mempolicy nodes as current if it is bound by such a policy
* and whether or not it has the same set of allowed cpuset nodes.
*/
-static bool has_intersects_mems_allowed(struct task_struct *tsk,
+static bool has_intersects_mems_allowed(struct task_struct *start,
const nodemask_t *mask)
{
- struct task_struct *start = tsk;
+ struct task_struct *tsk;
+ bool ret = false;
- do {
+ rcu_read_lock();
+ for_each_thread(start, tsk) {
if (mask) {
/*
* If this is a mempolicy constrained oom, tsk's
@@ -67,19 +69,20 @@ static bool has_intersects_mems_allowed(struct task_struct *tsk,
* mempolicy intersects current, otherwise it may be
* needlessly killed.
*/
- if (mempolicy_nodemask_intersects(tsk, mask))
- return true;
+ ret = mempolicy_nodemask_intersects(tsk, mask);
} else {
/*
* This is not a mempolicy constrained oom, so only
* check the mems of tsk's cpuset.
*/
- if (cpuset_mems_allowed_intersects(current, tsk))
- return true;
+ ret = cpuset_mems_allowed_intersects(current, tsk);
}
- } while_each_thread(start, tsk);
+ if (ret)
+ break;
+ }
+ rcu_read_unlock();
- return false;
+ return ret;
}
#else
static bool has_intersects_mems_allowed(struct task_struct *tsk,
@@ -97,16 +100,21 @@ static bool has_intersects_mems_allowed(struct task_struct *tsk,
*/
struct task_struct *find_lock_task_mm(struct task_struct *p)
{
- struct task_struct *t = p;
+ struct task_struct *t;
+
+ rcu_read_lock();
- do {
+ for_each_thread(p, t) {
task_lock(t);
if (likely(t->mm))
- return t;
+ goto found;
task_unlock(t);
- } while_each_thread(p, t);
+ }
+ t = NULL;
+found:
+ rcu_read_unlock();
- return NULL;
+ return t;
}
/* return true if the task is not adequate as candidate victim task. */
@@ -161,7 +169,7 @@ unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
* The baseline for the badness score is the proportion of RAM that each
* task's rss, pagetable and swap space use.
*/
- points = get_mm_rss(p->mm) + p->mm->nr_ptes +
+ points = get_mm_rss(p->mm) + atomic_long_read(&p->mm->nr_ptes) +
get_mm_counter(p->mm, MM_SWAPENTS);
task_unlock(p);
@@ -170,7 +178,7 @@ unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
* implementation used by LSMs.
*/
if (has_capability_noaudit(p, CAP_SYS_ADMIN))
- adj -= 30;
+ points -= (points * 3) / 100;
/* Normalize to oom_score_adj units */
adj *= totalpages / 1000;
@@ -288,7 +296,7 @@ enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
/*
* Simple selection loop. We chose the process with the highest
- * number of 'points'.
+ * number of 'points'. Returns -1 on scan abort.
*
* (not docbooked, we don't want this one cluttering up the manual)
*/
@@ -301,7 +309,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
unsigned long chosen_points = 0;
rcu_read_lock();
- do_each_thread(g, p) {
+ for_each_process_thread(g, p) {
unsigned int points;
switch (oom_scan_process_thread(p, totalpages, nodemask,
@@ -314,16 +322,20 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
continue;
case OOM_SCAN_ABORT:
rcu_read_unlock();
- return ERR_PTR(-1UL);
+ return (struct task_struct *)(-1UL);
case OOM_SCAN_OK:
break;
};
points = oom_badness(p, NULL, nodemask, totalpages);
- if (points > chosen_points) {
- chosen = p;
- chosen_points = points;
- }
- } while_each_thread(g, p);
+ if (!points || points < chosen_points)
+ continue;
+ /* Prefer thread group leaders for display purposes */
+ if (points == chosen_points && thread_group_leader(chosen))
+ continue;
+
+ chosen = p;
+ chosen_points = points;
+ }
if (chosen)
get_task_struct(chosen);
rcu_read_unlock();
@@ -364,10 +376,10 @@ static void dump_tasks(const struct mem_cgroup *memcg, const nodemask_t *nodemas
continue;
}
- pr_info("[%5d] %5d %5d %8lu %8lu %7lu %8lu %5hd %s\n",
+ pr_info("[%5d] %5d %5d %8lu %8lu %7ld %8lu %5hd %s\n",
task->pid, from_kuid(&init_user_ns, task_uid(task)),
task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
- task->mm->nr_ptes,
+ atomic_long_read(&task->mm->nr_ptes),
get_mm_counter(task->mm, MM_SWAPENTS),
task->signal->oom_score_adj, task->comm);
task_unlock(task);
@@ -406,7 +418,7 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
{
struct task_struct *victim = p;
struct task_struct *child;
- struct task_struct *t = p;
+ struct task_struct *t;
struct mm_struct *mm;
unsigned int victim_points = 0;
static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
@@ -437,7 +449,7 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
* still freeing memory.
*/
read_lock(&tasklist_lock);
- do {
+ for_each_thread(p, t) {
list_for_each_entry(child, &t->children, sibling) {
unsigned int child_points;
@@ -455,13 +467,11 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
get_task_struct(victim);
}
}
- } while_each_thread(p, t);
+ }
read_unlock(&tasklist_lock);
- rcu_read_lock();
p = find_lock_task_mm(victim);
if (!p) {
- rcu_read_unlock();
put_task_struct(victim);
return;
} else if (victim != p) {
@@ -487,6 +497,7 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
* That thread will now get access to memory reserves since it has a
* pending fatal signal.
*/
+ rcu_read_lock();
for_each_process(p)
if (p->mm == mm && !same_thread_group(p, victim) &&
!(p->flags & PF_KTHREAD)) {
@@ -657,7 +668,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
dump_header(NULL, gfp_mask, order, NULL, mpol_mask);
panic("Out of memory and no killable processes...\n");
}
- if (PTR_ERR(p) != -1UL) {
+ if (p != (void *)-1UL) {
oom_kill_process(p, gfp_mask, order, points, totalpages, NULL,
nodemask, "Out of memory");
killed = 1;
@@ -678,9 +689,12 @@ out:
*/
void pagefault_out_of_memory(void)
{
- struct zonelist *zonelist = node_zonelist(first_online_node,
- GFP_KERNEL);
+ struct zonelist *zonelist;
+
+ if (mem_cgroup_oom_synchronize(true))
+ return;
+ zonelist = node_zonelist(first_online_node, GFP_KERNEL);
if (try_set_zonelist_oom(zonelist, GFP_KERNEL)) {
out_of_memory(NULL, 0, 0, NULL, false);
clear_zonelist_oom(zonelist, GFP_KERNEL);
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 4514ad7415c3..a4317da60532 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -36,8 +36,11 @@
#include <linux/pagevec.h>
#include <linux/timer.h>
#include <linux/sched/rt.h>
+#include <linux/mm_inline.h>
#include <trace/events/writeback.h>
+#include "internal.h"
+
/*
* Sleep at most 200ms at a time in balance_dirty_pages().
*/
@@ -188,6 +191,26 @@ static unsigned long writeout_period_time = 0;
* global dirtyable memory first.
*/
+/**
+ * zone_dirtyable_memory - number of dirtyable pages in a zone
+ * @zone: the zone
+ *
+ * Returns the zone's number of pages potentially available for dirty
+ * page cache. This is the base value for the per-zone dirty limits.
+ */
+static unsigned long zone_dirtyable_memory(struct zone *zone)
+{
+ unsigned long nr_pages;
+
+ nr_pages = zone_page_state(zone, NR_FREE_PAGES);
+ nr_pages -= min(nr_pages, zone->dirty_balance_reserve);
+
+ nr_pages += zone_page_state(zone, NR_INACTIVE_FILE);
+ nr_pages += zone_page_state(zone, NR_ACTIVE_FILE);
+
+ return nr_pages;
+}
+
static unsigned long highmem_dirtyable_memory(unsigned long total)
{
#ifdef CONFIG_HIGHMEM
@@ -195,11 +218,9 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
unsigned long x = 0;
for_each_node_state(node, N_HIGH_MEMORY) {
- struct zone *z =
- &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
+ struct zone *z = &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
- x += zone_page_state(z, NR_FREE_PAGES) +
- zone_reclaimable_pages(z) - z->dirty_balance_reserve;
+ x += zone_dirtyable_memory(z);
}
/*
* Unreclaimable memory (kernel memory or anonymous memory
@@ -235,15 +256,15 @@ static unsigned long global_dirtyable_memory(void)
{
unsigned long x;
- x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
+ x = global_page_state(NR_FREE_PAGES);
x -= min(x, dirty_balance_reserve);
+ x += global_page_state(NR_INACTIVE_FILE);
+ x += global_page_state(NR_ACTIVE_FILE);
+
if (!vm_highmem_is_dirtyable)
x -= highmem_dirtyable_memory(x);
- /* Subtract min_free_kbytes */
- x -= min_t(unsigned long, x, min_free_kbytes >> (PAGE_SHIFT - 10));
-
return x + 1; /* Ensure that we never return 0 */
}
@@ -289,32 +310,6 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
}
/**
- * zone_dirtyable_memory - number of dirtyable pages in a zone
- * @zone: the zone
- *
- * Returns the zone's number of pages potentially available for dirty
- * page cache. This is the base value for the per-zone dirty limits.
- */
-static unsigned long zone_dirtyable_memory(struct zone *zone)
-{
- /*
- * The effective global number of dirtyable pages may exclude
- * highmem as a big-picture measure to keep the ratio between
- * dirty memory and lowmem reasonable.
- *
- * But this function is purely about the individual zone and a
- * highmem zone can hold its share of dirty pages, so we don't
- * care about vm_highmem_is_dirtyable here.
- */
- unsigned long nr_pages = zone_page_state(zone, NR_FREE_PAGES) +
- zone_reclaimable_pages(zone);
-
- /* don't allow this to underflow */
- nr_pages -= min(nr_pages, zone->dirty_balance_reserve);
- return nr_pages;
-}
-
-/**
* zone_dirty_limit - maximum number of dirty pages allowed in a zone
* @zone: the zone
*
@@ -585,6 +580,37 @@ unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)
}
/*
+ * setpoint - dirty 3
+ * f(dirty) := 1.0 + (----------------)
+ * limit - setpoint
+ *
+ * it's a 3rd order polynomial that subjects to
+ *
+ * (1) f(freerun) = 2.0 => rampup dirty_ratelimit reasonably fast
+ * (2) f(setpoint) = 1.0 => the balance point
+ * (3) f(limit) = 0 => the hard limit
+ * (4) df/dx <= 0 => negative feedback control
+ * (5) the closer to setpoint, the smaller |df/dx| (and the reverse)
+ * => fast response on large errors; small oscillation near setpoint
+ */
+static long long pos_ratio_polynom(unsigned long setpoint,
+ unsigned long dirty,
+ unsigned long limit)
+{
+ long long pos_ratio;
+ long x;
+
+ x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
+ limit - setpoint + 1);
+ pos_ratio = x;
+ pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
+ pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
+ pos_ratio += 1 << RATELIMIT_CALC_SHIFT;
+
+ return clamp(pos_ratio, 0LL, 2LL << RATELIMIT_CALC_SHIFT);
+}
+
+/*
* Dirty position control.
*
* (o) global/bdi setpoints
@@ -682,26 +708,80 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
/*
* global setpoint
*
- * setpoint - dirty 3
- * f(dirty) := 1.0 + (----------------)
- * limit - setpoint
+ * See comment for pos_ratio_polynom().
+ */
+ setpoint = (freerun + limit) / 2;
+ pos_ratio = pos_ratio_polynom(setpoint, dirty, limit);
+
+ /*
+ * The strictlimit feature is a tool preventing mistrusted filesystems
+ * from growing a large number of dirty pages before throttling. For
+ * such filesystems balance_dirty_pages always checks bdi counters
+ * against bdi limits. Even if global "nr_dirty" is under "freerun".
+ * This is especially important for fuse which sets bdi->max_ratio to
+ * 1% by default. Without strictlimit feature, fuse writeback may
+ * consume arbitrary amount of RAM because it is accounted in
+ * NR_WRITEBACK_TEMP which is not involved in calculating "nr_dirty".
*
- * it's a 3rd order polynomial that subjects to
+ * Here, in bdi_position_ratio(), we calculate pos_ratio based on
+ * two values: bdi_dirty and bdi_thresh. Let's consider an example:
+ * total amount of RAM is 16GB, bdi->max_ratio is equal to 1%, global
+ * limits are set by default to 10% and 20% (background and throttle).
+ * Then bdi_thresh is 1% of 20% of 16GB. This amounts to ~8K pages.
+ * bdi_dirty_limit(bdi, bg_thresh) is about ~4K pages. bdi_setpoint is
+ * about ~6K pages (as the average of background and throttle bdi
+ * limits). The 3rd order polynomial will provide positive feedback if
+ * bdi_dirty is under bdi_setpoint and vice versa.
*
- * (1) f(freerun) = 2.0 => rampup dirty_ratelimit reasonably fast
- * (2) f(setpoint) = 1.0 => the balance point
- * (3) f(limit) = 0 => the hard limit
- * (4) df/dx <= 0 => negative feedback control
- * (5) the closer to setpoint, the smaller |df/dx| (and the reverse)
- * => fast response on large errors; small oscillation near setpoint
+ * Note, that we cannot use global counters in these calculations
+ * because we want to throttle process writing to a strictlimit BDI
+ * much earlier than global "freerun" is reached (~23MB vs. ~2.3GB
+ * in the example above).
*/
- setpoint = (freerun + limit) / 2;
- x = div_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
- limit - setpoint + 1);
- pos_ratio = x;
- pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
- pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
- pos_ratio += 1 << RATELIMIT_CALC_SHIFT;
+ if (unlikely(bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
+ long long bdi_pos_ratio;
+ unsigned long bdi_bg_thresh;
+
+ if (bdi_dirty < 8)
+ return min_t(long long, pos_ratio * 2,
+ 2 << RATELIMIT_CALC_SHIFT);
+
+ if (bdi_dirty >= bdi_thresh)
+ return 0;
+
+ bdi_bg_thresh = div_u64((u64)bdi_thresh * bg_thresh, thresh);
+ bdi_setpoint = dirty_freerun_ceiling(bdi_thresh,
+ bdi_bg_thresh);
+
+ if (bdi_setpoint == 0 || bdi_setpoint == bdi_thresh)
+ return 0;
+
+ bdi_pos_ratio = pos_ratio_polynom(bdi_setpoint, bdi_dirty,
+ bdi_thresh);
+
+ /*
+ * Typically, for strictlimit case, bdi_setpoint << setpoint
+ * and pos_ratio >> bdi_pos_ratio. In the other words global
+ * state ("dirty") is not limiting factor and we have to
+ * make decision based on bdi counters. But there is an
+ * important case when global pos_ratio should get precedence:
+ * global limits are exceeded (e.g. due to activities on other
+ * BDIs) while given strictlimit BDI is below limit.
+ *
+ * "pos_ratio * bdi_pos_ratio" would work for the case above,
+ * but it would look too non-natural for the case of all
+ * activity in the system coming from a single strictlimit BDI
+ * with bdi->max_ratio == 100%.
+ *
+ * Note that min() below somewhat changes the dynamics of the
+ * control system. Normally, pos_ratio value can be well over 3
+ * (when globally we are at freerun and bdi is well below bdi
+ * setpoint). Now the maximum pos_ratio in the same situation
+ * is 2. We might want to tweak this if we observe the control
+ * system is too slow to adapt.
+ */
+ return min(pos_ratio, bdi_pos_ratio);
+ }
/*
* We have computed basic pos_ratio above based on global situation. If
@@ -762,7 +842,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
x_intercept = bdi_setpoint + span;
if (bdi_dirty < x_intercept - span / 4) {
- pos_ratio = div_u64(pos_ratio * (x_intercept - bdi_dirty),
+ pos_ratio = div64_u64(pos_ratio * (x_intercept - bdi_dirty),
x_intercept - bdi_setpoint + 1);
} else
pos_ratio /= 4;
@@ -994,6 +1074,27 @@ static void bdi_update_dirty_ratelimit(struct backing_dev_info *bdi,
* keep that period small to reduce time lags).
*/
step = 0;
+
+ /*
+ * For strictlimit case, calculations above were based on bdi counters
+ * and limits (starting from pos_ratio = bdi_position_ratio() and up to
+ * balanced_dirty_ratelimit = task_ratelimit * write_bw / dirty_rate).
+ * Hence, to calculate "step" properly, we have to use bdi_dirty as
+ * "dirty" and bdi_setpoint as "setpoint".
+ *
+ * We rampup dirty_ratelimit forcibly if bdi_dirty is low because
+ * it's possible that bdi_thresh is close to zero due to inactivity
+ * of backing device (see the implementation of bdi_dirty_limit()).
+ */
+ if (unlikely(bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
+ dirty = bdi_dirty;
+ if (bdi_dirty < 8)
+ setpoint = bdi_dirty + 1;
+ else
+ setpoint = (bdi_thresh +
+ bdi_dirty_limit(bdi, bg_thresh)) / 2;
+ }
+
if (dirty < setpoint) {
x = min(bdi->balanced_dirty_ratelimit,
min(balanced_dirty_ratelimit, task_ratelimit));
@@ -1104,11 +1205,11 @@ static unsigned long dirty_poll_interval(unsigned long dirty,
return 1;
}
-static long bdi_max_pause(struct backing_dev_info *bdi,
- unsigned long bdi_dirty)
+static unsigned long bdi_max_pause(struct backing_dev_info *bdi,
+ unsigned long bdi_dirty)
{
- long bw = bdi->avg_write_bandwidth;
- long t;
+ unsigned long bw = bdi->avg_write_bandwidth;
+ unsigned long t;
/*
* Limit pause time for small memory systems. If sleeping for too long
@@ -1120,7 +1221,7 @@ static long bdi_max_pause(struct backing_dev_info *bdi,
t = bdi_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8));
t++;
- return min_t(long, t, MAX_PAUSE);
+ return min_t(unsigned long, t, MAX_PAUSE);
}
static long bdi_min_pause(struct backing_dev_info *bdi,
@@ -1198,6 +1299,56 @@ static long bdi_min_pause(struct backing_dev_info *bdi,
return pages >= DIRTY_POLL_THRESH ? 1 + t / 2 : t;
}
+static inline void bdi_dirty_limits(struct backing_dev_info *bdi,
+ unsigned long dirty_thresh,
+ unsigned long background_thresh,
+ unsigned long *bdi_dirty,
+ unsigned long *bdi_thresh,
+ unsigned long *bdi_bg_thresh)
+{
+ unsigned long bdi_reclaimable;
+
+ /*
+ * bdi_thresh is not treated as some limiting factor as
+ * dirty_thresh, due to reasons
+ * - in JBOD setup, bdi_thresh can fluctuate a lot
+ * - in a system with HDD and USB key, the USB key may somehow
+ * go into state (bdi_dirty >> bdi_thresh) either because
+ * bdi_dirty starts high, or because bdi_thresh drops low.
+ * In this case we don't want to hard throttle the USB key
+ * dirtiers for 100 seconds until bdi_dirty drops under
+ * bdi_thresh. Instead the auxiliary bdi control line in
+ * bdi_position_ratio() will let the dirtier task progress
+ * at some rate <= (write_bw / 2) for bringing down bdi_dirty.
+ */
+ *bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
+
+ if (bdi_bg_thresh)
+ *bdi_bg_thresh = div_u64((u64)*bdi_thresh *
+ background_thresh,
+ dirty_thresh);
+
+ /*
+ * In order to avoid the stacked BDI deadlock we need
+ * to ensure we accurately count the 'dirty' pages when
+ * the threshold is low.
+ *
+ * Otherwise it would be possible to get thresh+n pages
+ * reported dirty, even though there are thresh-m pages
+ * actually dirty; with m+n sitting in the percpu
+ * deltas.
+ */
+ if (*bdi_thresh < 2 * bdi_stat_error(bdi)) {
+ bdi_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);
+ *bdi_dirty = bdi_reclaimable +
+ bdi_stat_sum(bdi, BDI_WRITEBACK);
+ } else {
+ bdi_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
+ *bdi_dirty = bdi_reclaimable +
+ bdi_stat(bdi, BDI_WRITEBACK);
+ }
+}
+
/*
* balance_dirty_pages() must be called by processes which are generating dirty
* data. It looks at the number of dirty pages in the machine and will force
@@ -1209,13 +1360,9 @@ static void balance_dirty_pages(struct address_space *mapping,
unsigned long pages_dirtied)
{
unsigned long nr_reclaimable; /* = file_dirty + unstable_nfs */
- unsigned long bdi_reclaimable;
unsigned long nr_dirty; /* = file_dirty + writeback + unstable_nfs */
- unsigned long bdi_dirty;
- unsigned long freerun;
unsigned long background_thresh;
unsigned long dirty_thresh;
- unsigned long bdi_thresh;
long period;
long pause;
long max_pause;
@@ -1226,10 +1373,16 @@ static void balance_dirty_pages(struct address_space *mapping,
unsigned long dirty_ratelimit;
unsigned long pos_ratio;
struct backing_dev_info *bdi = mapping->backing_dev_info;
+ bool strictlimit = bdi->capabilities & BDI_CAP_STRICTLIMIT;
unsigned long start_time = jiffies;
for (;;) {
unsigned long now = jiffies;
+ unsigned long uninitialized_var(bdi_thresh);
+ unsigned long thresh;
+ unsigned long uninitialized_var(bdi_dirty);
+ unsigned long dirty;
+ unsigned long bg_thresh;
/*
* Unstable writes are a feature of certain networked
@@ -1243,61 +1396,44 @@ static void balance_dirty_pages(struct address_space *mapping,
global_dirty_limits(&background_thresh, &dirty_thresh);
+ if (unlikely(strictlimit)) {
+ bdi_dirty_limits(bdi, dirty_thresh, background_thresh,
+ &bdi_dirty, &bdi_thresh, &bg_thresh);
+
+ dirty = bdi_dirty;
+ thresh = bdi_thresh;
+ } else {
+ dirty = nr_dirty;
+ thresh = dirty_thresh;
+ bg_thresh = background_thresh;
+ }
+
/*
* Throttle it only when the background writeback cannot
* catch-up. This avoids (excessively) small writeouts
- * when the bdi limits are ramping up.
+ * when the bdi limits are ramping up in case of !strictlimit.
+ *
+ * In strictlimit case make decision based on the bdi counters
+ * and limits. Small writeouts when the bdi limits are ramping
+ * up are the price we consciously pay for strictlimit-ing.
*/
- freerun = dirty_freerun_ceiling(dirty_thresh,
- background_thresh);
- if (nr_dirty <= freerun) {
+ if (dirty <= dirty_freerun_ceiling(thresh, bg_thresh)) {
current->dirty_paused_when = now;
current->nr_dirtied = 0;
current->nr_dirtied_pause =
- dirty_poll_interval(nr_dirty, dirty_thresh);
+ dirty_poll_interval(dirty, thresh);
break;
}
if (unlikely(!writeback_in_progress(bdi)))
bdi_start_background_writeback(bdi);
- /*
- * bdi_thresh is not treated as some limiting factor as
- * dirty_thresh, due to reasons
- * - in JBOD setup, bdi_thresh can fluctuate a lot
- * - in a system with HDD and USB key, the USB key may somehow
- * go into state (bdi_dirty >> bdi_thresh) either because
- * bdi_dirty starts high, or because bdi_thresh drops low.
- * In this case we don't want to hard throttle the USB key
- * dirtiers for 100 seconds until bdi_dirty drops under
- * bdi_thresh. Instead the auxiliary bdi control line in
- * bdi_position_ratio() will let the dirtier task progress
- * at some rate <= (write_bw / 2) for bringing down bdi_dirty.
- */
- bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
-
- /*
- * In order to avoid the stacked BDI deadlock we need
- * to ensure we accurately count the 'dirty' pages when
- * the threshold is low.
- *
- * Otherwise it would be possible to get thresh+n pages
- * reported dirty, even though there are thresh-m pages
- * actually dirty; with m+n sitting in the percpu
- * deltas.
- */
- if (bdi_thresh < 2 * bdi_stat_error(bdi)) {
- bdi_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);
- bdi_dirty = bdi_reclaimable +
- bdi_stat_sum(bdi, BDI_WRITEBACK);
- } else {
- bdi_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
- bdi_dirty = bdi_reclaimable +
- bdi_stat(bdi, BDI_WRITEBACK);
- }
+ if (!strictlimit)
+ bdi_dirty_limits(bdi, dirty_thresh, background_thresh,
+ &bdi_dirty, &bdi_thresh, NULL);
dirty_exceeded = (bdi_dirty > bdi_thresh) &&
- (nr_dirty > dirty_thresh);
+ ((nr_dirty > dirty_thresh) || strictlimit);
if (dirty_exceeded && !bdi->dirty_exceeded)
bdi->dirty_exceeded = 1;
@@ -1426,9 +1562,9 @@ pause:
bdi_start_background_writeback(bdi);
}
-void set_page_dirty_balance(struct page *page, int page_mkwrite)
+void set_page_dirty_balance(struct page *page)
{
- if (set_page_dirty(page) || page_mkwrite) {
+ if (set_page_dirty(page)) {
struct address_space *mapping = page_mapping(page);
if (mapping)
@@ -1619,7 +1755,7 @@ void writeback_set_ratelimit(void)
ratelimit_pages = 16;
}
-static int __cpuinit
+static int
ratelimit_handler(struct notifier_block *self, unsigned long action,
void *hcpu)
{
@@ -1634,7 +1770,7 @@ ratelimit_handler(struct notifier_block *self, unsigned long action,
}
}
-static struct notifier_block __cpuinitdata ratelimit_nb = {
+static struct notifier_block ratelimit_nb = {
.notifier_call = ratelimit_handler,
.next = NULL,
};
@@ -2002,11 +2138,17 @@ EXPORT_SYMBOL(account_page_dirtied);
/*
* Helper function for set_page_writeback family.
+ *
+ * The caller must hold mem_cgroup_begin/end_update_page_stat() lock
+ * while calling this function.
+ * See test_set_page_writeback for example.
+ *
* NOTE: Unlike account_page_dirtied this does not rely on being atomic
* wrt interrupts.
*/
void account_page_writeback(struct page *page)
{
+ mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_WRITEBACK);
inc_zone_page_state(page, NR_WRITEBACK);
}
EXPORT_SYMBOL(account_page_writeback);
@@ -2031,11 +2173,12 @@ int __set_page_dirty_nobuffers(struct page *page)
if (!TestSetPageDirty(page)) {
struct address_space *mapping = page_mapping(page);
struct address_space *mapping2;
+ unsigned long flags;
if (!mapping)
return 1;
- spin_lock_irq(&mapping->tree_lock);
+ spin_lock_irqsave(&mapping->tree_lock, flags);
mapping2 = page_mapping(page);
if (mapping2) { /* Race with truncate? */
BUG_ON(mapping2 != mapping);
@@ -2044,7 +2187,7 @@ int __set_page_dirty_nobuffers(struct page *page)
radix_tree_tag_set(&mapping->page_tree,
page_index(page), PAGECACHE_TAG_DIRTY);
}
- spin_unlock_irq(&mapping->tree_lock);
+ spin_unlock_irqrestore(&mapping->tree_lock, flags);
if (mapping->host) {
/* !PageAnon && !swapper_space */
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
@@ -2223,7 +2366,10 @@ int test_clear_page_writeback(struct page *page)
{
struct address_space *mapping = page_mapping(page);
int ret;
+ bool locked;
+ unsigned long memcg_flags;
+ mem_cgroup_begin_update_page_stat(page, &locked, &memcg_flags);
if (mapping) {
struct backing_dev_info *bdi = mapping->backing_dev_info;
unsigned long flags;
@@ -2244,9 +2390,11 @@ int test_clear_page_writeback(struct page *page)
ret = TestClearPageWriteback(page);
}
if (ret) {
+ mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_WRITEBACK);
dec_zone_page_state(page, NR_WRITEBACK);
inc_zone_page_state(page, NR_WRITTEN);
}
+ mem_cgroup_end_update_page_stat(page, &locked, &memcg_flags);
return ret;
}
@@ -2254,7 +2402,10 @@ int test_set_page_writeback(struct page *page)
{
struct address_space *mapping = page_mapping(page);
int ret;
+ bool locked;
+ unsigned long memcg_flags;
+ mem_cgroup_begin_update_page_stat(page, &locked, &memcg_flags);
if (mapping) {
struct backing_dev_info *bdi = mapping->backing_dev_info;
unsigned long flags;
@@ -2281,6 +2432,7 @@ int test_set_page_writeback(struct page *page)
}
if (!ret)
account_page_writeback(page);
+ mem_cgroup_end_update_page_stat(page, &locked, &memcg_flags);
return ret;
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c3edb624fccf..5dba2933c9c0 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -56,15 +56,20 @@
#include <linux/ftrace_event.h>
#include <linux/memcontrol.h>
#include <linux/prefetch.h>
+#include <linux/mm_inline.h>
#include <linux/migrate.h>
#include <linux/page-debug-flags.h>
#include <linux/hugetlb.h>
#include <linux/sched/rt.h>
+#include <asm/sections.h>
#include <asm/tlbflush.h>
#include <asm/div64.h>
#include "internal.h"
+/* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
+static DEFINE_MUTEX(pcp_batch_high_lock);
+
#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
DEFINE_PER_CPU(int, numa_node);
EXPORT_PER_CPU_SYMBOL(numa_node);
@@ -100,6 +105,9 @@ nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
};
EXPORT_SYMBOL(node_states);
+/* Protect totalram_pages and zone->managed_pages */
+static DEFINE_SPINLOCK(managed_page_count_lock);
+
unsigned long totalram_pages __read_mostly;
unsigned long totalreserve_pages __read_mostly;
/*
@@ -197,6 +205,7 @@ static char * const zone_names[MAX_NR_ZONES] = {
};
int min_free_kbytes = 1024;
+int user_min_free_kbytes = -1;
static unsigned long __meminitdata nr_kernel_pages;
static unsigned long __meminitdata nr_all_pages;
@@ -225,8 +234,8 @@ int page_group_by_mobility_disabled __read_mostly;
void set_pageblock_migratetype(struct page *page, int migratetype)
{
-
- if (unlikely(page_group_by_mobility_disabled))
+ if (unlikely(page_group_by_mobility_disabled &&
+ migratetype < MIGRATE_PCPTYPES))
migratetype = MIGRATE_UNMOVABLE;
set_pageblock_flags_group(page, (unsigned long)migratetype,
@@ -286,7 +295,8 @@ static inline int bad_range(struct zone *zone, struct page *page)
}
#endif
-static void bad_page(struct page *page)
+static void bad_page(struct page *page, const char *reason,
+ unsigned long bad_flags)
{
static unsigned long resume;
static unsigned long nr_shown;
@@ -320,7 +330,7 @@ static void bad_page(struct page *page)
printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n",
current->comm, page_to_pfn(page));
- dump_page(page);
+ dump_page_badflags(page, reason, bad_flags);
print_modules();
dump_stack();
@@ -360,9 +370,11 @@ void prep_compound_page(struct page *page, unsigned long order)
__SetPageHead(page);
for (i = 1; i < nr_pages; i++) {
struct page *p = page + i;
- __SetPageTail(p);
set_page_count(p, 0);
p->first_page = page;
+ /* Make sure p->first_page is always valid for PageTail() */
+ smp_wmb();
+ __SetPageTail(p);
}
}
@@ -374,7 +386,7 @@ static int destroy_compound_page(struct page *page, unsigned long order)
int bad = 0;
if (unlikely(compound_order(page) != order)) {
- bad_page(page);
+ bad_page(page, "wrong compound order", 0);
bad++;
}
@@ -383,8 +395,11 @@ static int destroy_compound_page(struct page *page, unsigned long order)
for (i = 1; i < nr_pages; i++) {
struct page *p = page + i;
- if (unlikely(!PageTail(p) || (p->first_page != page))) {
- bad_page(page);
+ if (unlikely(!PageTail(p))) {
+ bad_page(page, "PageTail not set", 0);
+ bad++;
+ } else if (unlikely(p->first_page != page)) {
+ bad_page(page, "first_page not consistent", 0);
bad++;
}
__ClearPageTail(p);
@@ -480,8 +495,10 @@ __find_buddy_index(unsigned long page_idx, unsigned int order)
* (c) a page and its buddy have the same order &&
* (d) a page and its buddy are in the same zone.
*
- * For recording whether a page is in the buddy system, we set ->_mapcount -2.
- * Setting, clearing, and testing _mapcount -2 is serialized by zone->lock.
+ * For recording whether a page is in the buddy system, we set ->_mapcount
+ * PAGE_BUDDY_MAPCOUNT_VALUE.
+ * Setting, clearing, and testing _mapcount PAGE_BUDDY_MAPCOUNT_VALUE is
+ * serialized by zone->lock.
*
* For recording page's order, we use page_private(page).
*/
@@ -495,12 +512,12 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
return 0;
if (page_is_guard(buddy) && page_order(buddy) == order) {
- VM_BUG_ON(page_count(buddy) != 0);
+ VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
return 1;
}
if (PageBuddy(buddy) && page_order(buddy) == order) {
- VM_BUG_ON(page_count(buddy) != 0);
+ VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
return 1;
}
return 0;
@@ -519,8 +536,9 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
* as necessary, plus some accounting needed to play nicely with other
* parts of the VM system.
* At each level, we keep a list of pages, which are heads of continuous
- * free pages of length of (1 << order) and marked with _mapcount -2. Page's
- * order is recorded in page_private(page) field.
+ * free pages of length of (1 << order) and marked with _mapcount
+ * PAGE_BUDDY_MAPCOUNT_VALUE. Page's order is recorded in page_private(page)
+ * field.
* So when we are allocating or freeing one, we can derive the state of the
* other. That is, if we allocate a small block, and both were
* free, the remainder of the region must be split into blocks.
@@ -549,8 +567,8 @@ static inline void __free_one_page(struct page *page,
page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
- VM_BUG_ON(page_idx & ((1 << order) - 1));
- VM_BUG_ON(bad_range(zone, page));
+ VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
+ VM_BUG_ON_PAGE(bad_range(zone, page), page);
while (order < MAX_ORDER-1) {
buddy_idx = __find_buddy_index(page_idx, order);
@@ -606,15 +624,26 @@ out:
static inline int free_pages_check(struct page *page)
{
- if (unlikely(page_mapcount(page) |
- (page->mapping != NULL) |
- (atomic_read(&page->_count) != 0) |
- (page->flags & PAGE_FLAGS_CHECK_AT_FREE) |
- (mem_cgroup_bad_page_check(page)))) {
- bad_page(page);
+ const char *bad_reason = NULL;
+ unsigned long bad_flags = 0;
+
+ if (unlikely(page_mapcount(page)))
+ bad_reason = "nonzero mapcount";
+ if (unlikely(page->mapping != NULL))
+ bad_reason = "non-NULL mapping";
+ if (unlikely(atomic_read(&page->_count) != 0))
+ bad_reason = "nonzero _count";
+ if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) {
+ bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
+ bad_flags = PAGE_FLAGS_CHECK_AT_FREE;
+ }
+ if (unlikely(mem_cgroup_bad_page_check(page)))
+ bad_reason = "cgroup check failed";
+ if (unlikely(bad_reason)) {
+ bad_page(page, bad_reason, bad_flags);
return 1;
}
- page_nid_reset_last(page);
+ page_cpupid_reset_last(page);
if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
return 0;
@@ -639,7 +668,6 @@ static void free_pcppages_bulk(struct zone *zone, int count,
int to_free = count;
spin_lock(&zone->lock);
- zone->all_unreclaimable = 0;
zone->pages_scanned = 0;
while (to_free) {
@@ -688,7 +716,6 @@ static void free_one_page(struct zone *zone, struct page *page, int order,
int migratetype)
{
spin_lock(&zone->lock);
- zone->all_unreclaimable = 0;
zone->pages_scanned = 0;
__free_one_page(page, zone, order, migratetype);
@@ -713,7 +740,8 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
return false;
if (!PageHighMem(page)) {
- debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
+ debug_check_no_locks_freed(page_address(page),
+ PAGE_SIZE << order);
debug_check_no_obj_freed(page_address(page),
PAGE_SIZE << order);
}
@@ -739,35 +767,28 @@ static void __free_pages_ok(struct page *page, unsigned int order)
local_irq_restore(flags);
}
-/*
- * Read access to zone->managed_pages is safe because it's unsigned long,
- * but we still need to serialize writers. Currently all callers of
- * __free_pages_bootmem() except put_page_bootmem() should only be used
- * at boot time. So for shorter boot time, we shift the burden to
- * put_page_bootmem() to serialize writers.
- */
-void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
+void __init __free_pages_bootmem(struct page *page, unsigned int order)
{
unsigned int nr_pages = 1 << order;
+ struct page *p = page;
unsigned int loop;
- prefetchw(page);
- for (loop = 0; loop < nr_pages; loop++) {
- struct page *p = &page[loop];
-
- if (loop + 1 < nr_pages)
- prefetchw(p + 1);
+ prefetchw(p);
+ for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
+ prefetchw(p + 1);
__ClearPageReserved(p);
set_page_count(p, 0);
}
+ __ClearPageReserved(p);
+ set_page_count(p, 0);
- page_zone(page)->managed_pages += 1 << order;
+ page_zone(page)->managed_pages += nr_pages;
set_page_refcounted(page);
__free_pages(page, order);
}
#ifdef CONFIG_CMA
-/* Free whole pageblock and set it's migration type to MIGRATE_CMA. */
+/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
void __init init_cma_reserved_pageblock(struct page *page)
{
unsigned i = pageblock_nr_pages;
@@ -781,11 +802,7 @@ void __init init_cma_reserved_pageblock(struct page *page)
set_page_refcounted(page);
set_pageblock_migratetype(page, MIGRATE_CMA);
__free_pages(page, pageblock_order);
- totalram_pages += pageblock_nr_pages;
-#ifdef CONFIG_HIGHMEM
- if (PageHighMem(page))
- totalhigh_pages += pageblock_nr_pages;
-#endif
+ adjust_managed_page_count(page, pageblock_nr_pages);
}
#endif
@@ -813,7 +830,7 @@ static inline void expand(struct zone *zone, struct page *page,
area--;
high--;
size >>= 1;
- VM_BUG_ON(bad_range(zone, &page[size]));
+ VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
#ifdef CONFIG_DEBUG_PAGEALLOC
if (high < debug_guardpage_minorder()) {
@@ -843,12 +860,23 @@ static inline void expand(struct zone *zone, struct page *page,
*/
static inline int check_new_page(struct page *page)
{
- if (unlikely(page_mapcount(page) |
- (page->mapping != NULL) |
- (atomic_read(&page->_count) != 0) |
- (page->flags & PAGE_FLAGS_CHECK_AT_PREP) |
- (mem_cgroup_bad_page_check(page)))) {
- bad_page(page);
+ const char *bad_reason = NULL;
+ unsigned long bad_flags = 0;
+
+ if (unlikely(page_mapcount(page)))
+ bad_reason = "nonzero mapcount";
+ if (unlikely(page->mapping != NULL))
+ bad_reason = "non-NULL mapping";
+ if (unlikely(atomic_read(&page->_count) != 0))
+ bad_reason = "nonzero _count";
+ if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) {
+ bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
+ bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
+ }
+ if (unlikely(mem_cgroup_bad_page_check(page)))
+ bad_reason = "cgroup check failed";
+ if (unlikely(bad_reason)) {
+ bad_page(page, bad_reason, bad_flags);
return 1;
}
return 0;
@@ -888,7 +916,7 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
int migratetype)
{
unsigned int current_order;
- struct free_area * area;
+ struct free_area *area;
struct page *page;
/* Find a page of the appropriate size in the preferred list */
@@ -955,7 +983,7 @@ int move_freepages(struct zone *zone,
for (page = start_page; page <= end_page;) {
/* Make sure we are not inadvertently changing nodes */
- VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
+ VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
if (!pfn_valid_within(page_to_pfn(page))) {
page++;
@@ -1010,14 +1038,64 @@ static void change_pageblock_range(struct page *pageblock_page,
}
}
+/*
+ * If breaking a large block of pages, move all free pages to the preferred
+ * allocation list. If falling back for a reclaimable kernel allocation, be
+ * more aggressive about taking ownership of free pages.
+ *
+ * On the other hand, never change migration type of MIGRATE_CMA pageblocks
+ * nor move CMA pages to different free lists. We don't want unmovable pages
+ * to be allocated from MIGRATE_CMA areas.
+ *
+ * Returns the new migratetype of the pageblock (or the same old migratetype
+ * if it was unchanged).
+ */
+static int try_to_steal_freepages(struct zone *zone, struct page *page,
+ int start_type, int fallback_type)
+{
+ int current_order = page_order(page);
+
+ /*
+ * When borrowing from MIGRATE_CMA, we need to release the excess
+ * buddy pages to CMA itself.
+ */
+ if (is_migrate_cma(fallback_type))
+ return fallback_type;
+
+ /* Take ownership for orders >= pageblock_order */
+ if (current_order >= pageblock_order) {
+ change_pageblock_range(page, current_order, start_type);
+ return start_type;
+ }
+
+ if (current_order >= pageblock_order / 2 ||
+ start_type == MIGRATE_RECLAIMABLE ||
+ page_group_by_mobility_disabled) {
+ int pages;
+
+ pages = move_freepages_block(zone, page, start_type);
+
+ /* Claim the whole block if over half of it is free */
+ if (pages >= (1 << (pageblock_order-1)) ||
+ page_group_by_mobility_disabled) {
+
+ set_pageblock_migratetype(page, start_type);
+ return start_type;
+ }
+
+ }
+
+ return fallback_type;
+}
+
/* Remove an element from the buddy allocator from the fallback list */
static inline struct page *
__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
{
- struct free_area * area;
+ struct free_area *area;
int current_order;
struct page *page;
- int migratetype, i;
+ int migratetype, new_type, i;
/* Find the largest possible block of pages in the other list */
for (current_order = MAX_ORDER-1; current_order >= order;
@@ -1037,51 +1115,19 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
struct page, lru);
area->nr_free--;
- /*
- * If breaking a large block of pages, move all free
- * pages to the preferred allocation list. If falling
- * back for a reclaimable kernel allocation, be more
- * aggressive about taking ownership of free pages
- *
- * On the other hand, never change migration
- * type of MIGRATE_CMA pageblocks nor move CMA
- * pages on different free lists. We don't
- * want unmovable pages to be allocated from
- * MIGRATE_CMA areas.
- */
- if (!is_migrate_cma(migratetype) &&
- (unlikely(current_order >= pageblock_order / 2) ||
- start_migratetype == MIGRATE_RECLAIMABLE ||
- page_group_by_mobility_disabled)) {
- int pages;
- pages = move_freepages_block(zone, page,
- start_migratetype);
-
- /* Claim the whole block if over half of it is free */
- if (pages >= (1 << (pageblock_order-1)) ||
- page_group_by_mobility_disabled)
- set_pageblock_migratetype(page,
- start_migratetype);
-
- migratetype = start_migratetype;
- }
+ new_type = try_to_steal_freepages(zone, page,
+ start_migratetype,
+ migratetype);
/* Remove the page from the freelists */
list_del(&page->lru);
rmv_page_order(page);
- /* Take ownership for orders >= pageblock_order */
- if (current_order >= pageblock_order &&
- !is_migrate_cma(migratetype))
- change_pageblock_range(page, current_order,
- start_migratetype);
-
expand(zone, page, order, current_order, area,
- is_migrate_cma(migratetype)
- ? migratetype : start_migratetype);
+ new_type);
trace_mm_page_alloc_extfrag(page, order, current_order,
- start_migratetype, migratetype);
+ start_migratetype, migratetype, new_type);
return page;
}
@@ -1179,10 +1225,12 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
{
unsigned long flags;
int to_drain;
+ unsigned long batch;
local_irq_save(flags);
- if (pcp->count >= pcp->batch)
- to_drain = pcp->batch;
+ batch = ACCESS_ONCE(pcp->batch);
+ if (pcp->count >= batch)
+ to_drain = batch;
else
to_drain = pcp->count;
if (to_drain > 0) {
@@ -1282,7 +1330,7 @@ void mark_free_pages(struct zone *zone)
int order, t;
struct list_head *curr;
- if (!zone->spanned_pages)
+ if (zone_is_empty(zone))
return;
spin_lock_irqsave(&zone->lock, flags);
@@ -1350,8 +1398,9 @@ void free_hot_cold_page(struct page *page, int cold)
list_add(&page->lru, &pcp->lists[migratetype]);
pcp->count++;
if (pcp->count >= pcp->high) {
- free_pcppages_bulk(zone, pcp->batch, pcp);
- pcp->count -= pcp->batch;
+ unsigned long batch = ACCESS_ONCE(pcp->batch);
+ free_pcppages_bulk(zone, batch, pcp);
+ pcp->count -= batch;
}
out:
@@ -1383,8 +1432,8 @@ void split_page(struct page *page, unsigned int order)
{
int i;
- VM_BUG_ON(PageCompound(page));
- VM_BUG_ON(!page_count(page));
+ VM_BUG_ON_PAGE(PageCompound(page), page);
+ VM_BUG_ON_PAGE(!page_count(page), page);
#ifdef CONFIG_KMEMCHECK
/*
@@ -1526,11 +1575,13 @@ again:
get_pageblock_migratetype(page));
}
+ __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
+
__count_zone_vm_events(PGALLOC, zone, 1 << order);
zone_statistics(preferred_zone, zone, gfp_flags);
local_irq_restore(flags);
- VM_BUG_ON(bad_range(zone, page));
+ VM_BUG_ON_PAGE(bad_range(zone, page), page);
if (prep_new_page(page, order, gfp_flags))
goto again;
return page;
@@ -1683,7 +1734,7 @@ bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
* comments in mmzone.h. Reduces cache footprint of zonelist scans
* that have to skip over a lot of full or unallowed zones.
*
- * If the zonelist cache is present in the passed in zonelist, then
+ * If the zonelist cache is present in the passed zonelist, then
* returns a pointer to the allowed node mask (either the current
* tasks mems_allowed, or node_states[N_MEMORY].)
*
@@ -1792,6 +1843,11 @@ static void zlc_clear_zones_full(struct zonelist *zonelist)
bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
}
+static bool zone_local(struct zone *local_zone, struct zone *zone)
+{
+ return local_zone->node == zone->node;
+}
+
static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
{
return node_isset(local_zone->node, zone->zone_pgdat->reclaim_nodes);
@@ -1801,7 +1857,7 @@ static void __paginginit init_zone_allows_reclaim(int nid)
{
int i;
- for_each_online_node(i)
+ for_each_node_state(i, N_MEMORY)
if (node_distance(nid, i) <= RECLAIM_DISTANCE)
node_set(i, NODE_DATA(nid)->reclaim_nodes);
else
@@ -1829,6 +1885,11 @@ static void zlc_clear_zones_full(struct zonelist *zonelist)
{
}
+static bool zone_local(struct zone *local_zone, struct zone *zone)
+{
+ return true;
+}
+
static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
{
return true;
@@ -1860,16 +1921,33 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
zonelist_scan:
/*
* Scan zonelist, looking for a zone with enough free.
- * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
+ * See also __cpuset_node_allowed_softwall() comment in kernel/cpuset.c.
*/
for_each_zone_zonelist_nodemask(zone, z, zonelist,
high_zoneidx, nodemask) {
+ unsigned long mark;
+
if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
!zlc_zone_worth_trying(zonelist, z, allowednodes))
continue;
if ((alloc_flags & ALLOC_CPUSET) &&
!cpuset_zone_allowed_softwall(zone, gfp_mask))
continue;
+ BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
+ if (unlikely(alloc_flags & ALLOC_NO_WATERMARKS))
+ goto try_this_zone;
+ /*
+ * Distribute pages in proportion to the individual
+ * zone size to ensure fair page aging. The zone a
+ * page was allocated in should have no effect on the
+ * time the page has in memory before being reclaimed.
+ */
+ if (alloc_flags & ALLOC_FAIR) {
+ if (!zone_local(preferred_zone, zone))
+ continue;
+ if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0)
+ continue;
+ }
/*
* When allocating a page cache page for writing, we
* want to get it from a zone that is within its dirty
@@ -1900,16 +1978,11 @@ zonelist_scan:
(gfp_mask & __GFP_WRITE) && !zone_dirty_ok(zone))
goto this_zone_full;
- BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
- if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
- unsigned long mark;
+ mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
+ if (!zone_watermark_ok(zone, order, mark,
+ classzone_idx, alloc_flags)) {
int ret;
- mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
- if (zone_watermark_ok(zone, order, mark,
- classzone_idx, alloc_flags))
- goto try_this_zone;
-
if (IS_ENABLED(CONFIG_NUMA) &&
!did_zlc_setup && nr_online_nodes > 1) {
/*
@@ -2021,13 +2094,6 @@ void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
return;
/*
- * Walking all memory to count page types is very expensive and should
- * be inhibited in non-blockable contexts.
- */
- if (!(gfp_mask & __GFP_WAIT))
- filter |= SHOW_MEM_FILTER_PAGE_COUNT;
-
- /*
* This documents exceptions given to allocations in certain
* contexts that are allowed to allocate outside current's set
* of allowed nodes.
@@ -2191,10 +2257,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
preferred_zone, migratetype);
if (page) {
preferred_zone->compact_blockskip_flush = false;
- preferred_zone->compact_considered = 0;
- preferred_zone->compact_defer_shift = 0;
- if (order >= preferred_zone->compact_order_failed)
- preferred_zone->compact_order_failed = order + 1;
+ compaction_defer_reset(preferred_zone, order, true);
count_vm_event(COMPACTSUCCESS);
return page;
}
@@ -2321,16 +2384,38 @@ __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
return page;
}
-static inline
-void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
- enum zone_type high_zoneidx,
- enum zone_type classzone_idx)
+static void reset_alloc_batches(struct zonelist *zonelist,
+ enum zone_type high_zoneidx,
+ struct zone *preferred_zone)
+{
+ struct zoneref *z;
+ struct zone *zone;
+
+ for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
+ /*
+ * Only reset the batches of zones that were actually
+ * considered in the fairness pass, we don't want to
+ * trash fairness information for zones that are not
+ * actually part of this zonelist's round-robin cycle.
+ */
+ if (!zone_local(preferred_zone, zone))
+ continue;
+ mod_zone_page_state(zone, NR_ALLOC_BATCH,
+ high_wmark_pages(zone) - low_wmark_pages(zone) -
+ atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
+ }
+}
+
+static void wake_all_kswapds(unsigned int order,
+ struct zonelist *zonelist,
+ enum zone_type high_zoneidx,
+ struct zone *preferred_zone)
{
struct zoneref *z;
struct zone *zone;
for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
- wakeup_kswapd(zone, order, classzone_idx);
+ wakeup_kswapd(zone, order, zone_idx(preferred_zone));
}
static inline int
@@ -2422,13 +2507,12 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
* over allocated.
*/
if (IS_ENABLED(CONFIG_NUMA) &&
- (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
+ (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
goto nopage;
restart:
if (!(gfp_mask & __GFP_NO_KSWAPD))
- wake_all_kswapd(order, zonelist, high_zoneidx,
- zone_idx(preferred_zone));
+ wake_all_kswapds(order, zonelist, high_zoneidx, preferred_zone);
/*
* OK, we're below the kswapd watermark and have kicked background
@@ -2471,8 +2555,15 @@ rebalance:
}
/* Atomic allocations - we can't balance anything */
- if (!wait)
+ if (!wait) {
+ /*
+ * All existing users of the deprecated __GFP_NOFAIL are
+ * blockable, so warn of any new users that actually allow this
+ * type of allocation to fail.
+ */
+ WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL);
goto nopage;
+ }
/* Avoid recursion of direct reclaim */
if (current->flags & PF_MEMALLOC)
@@ -2522,7 +2613,7 @@ rebalance:
* running out of options and have to consider going OOM
*/
if (!did_some_progress) {
- if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
+ if (oom_gfp_allowed(gfp_mask)) {
if (oom_killer_disabled)
goto nopage;
/* Coredumps can quickly deplete all memory reserves */
@@ -2605,7 +2696,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
struct page *page = NULL;
int migratetype = allocflags_to_migratetype(gfp_mask);
unsigned int cpuset_mems_cookie;
- int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET;
+ int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR;
struct mem_cgroup *memcg = NULL;
gfp_mask &= gfp_allowed_mask;
@@ -2633,7 +2724,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
return NULL;
retry_cpuset:
- cpuset_mems_cookie = get_mems_allowed();
+ cpuset_mems_cookie = read_mems_allowed_begin();
/* The preferred zone is used for statistics later */
first_zones_zonelist(zonelist, high_zoneidx,
@@ -2646,12 +2737,29 @@ retry_cpuset:
if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
alloc_flags |= ALLOC_CMA;
#endif
+retry:
/* First allocation attempt */
page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
zonelist, high_zoneidx, alloc_flags,
preferred_zone, migratetype);
if (unlikely(!page)) {
/*
+ * The first pass makes sure allocations are spread
+ * fairly within the local node. However, the local
+ * node might have free pages left after the fairness
+ * batches are exhausted, and remote zones haven't
+ * even been considered yet. Try once more without
+ * fairness, and include remote zones now, before
+ * entering the slowpath and waking kswapd: prefer
+ * spilling to a remote zone over swapping locally.
+ */
+ if (alloc_flags & ALLOC_FAIR) {
+ reset_alloc_batches(zonelist, high_zoneidx,
+ preferred_zone);
+ alloc_flags &= ~ALLOC_FAIR;
+ goto retry;
+ }
+ /*
* Runtime PM, block IO and its error handling path
* can deadlock because I/O on the device might not
* complete.
@@ -2671,7 +2779,7 @@ out:
* the mask is being updated. If a page allocation is about to fail,
* check if the cpuset changed during allocation and if so, retry.
*/
- if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
+ if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
goto retry_cpuset;
memcg_kmem_commit_charge(page, memcg, order);
@@ -2839,7 +2947,7 @@ EXPORT_SYMBOL(free_pages_exact);
* nr_free_zone_pages() counts the number of counts pages which are beyond the
* high watermark within all zones at or below a given zone index. For each
* zone, the number of pages is calculated as:
- * present_pages - high_pages
+ * managed_pages - high_pages
*/
static unsigned long nr_free_zone_pages(int offset)
{
@@ -2906,9 +3014,13 @@ EXPORT_SYMBOL(si_meminfo);
#ifdef CONFIG_NUMA
void si_meminfo_node(struct sysinfo *val, int nid)
{
+ int zone_type; /* needs to be signed */
+ unsigned long managed_pages = 0;
pg_data_t *pgdat = NODE_DATA(nid);
- val->totalram = pgdat->node_present_pages;
+ for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
+ managed_pages += pgdat->node_zones[zone_type].managed_pages;
+ val->totalram = managed_pages;
val->freeram = node_page_state(nid, NR_FREE_PAGES);
#ifdef CONFIG_HIGHMEM
val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].managed_pages;
@@ -2935,9 +3047,9 @@ bool skip_free_areas_node(unsigned int flags, int nid)
goto out;
do {
- cpuset_mems_cookie = get_mems_allowed();
+ cpuset_mems_cookie = read_mems_allowed_begin();
ret = !node_isset(nid, cpuset_current_mems_allowed);
- } while (!put_mems_allowed(cpuset_mems_cookie));
+ } while (read_mems_allowed_retry(cpuset_mems_cookie));
out:
return ret;
}
@@ -3091,7 +3203,7 @@ void show_free_areas(unsigned int filter)
K(zone_page_state(zone, NR_FREE_CMA_PAGES)),
K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
zone->pages_scanned,
- (zone->all_unreclaimable ? "yes" : "no")
+ (!zone_reclaimable(zone) ? "yes" : "no")
);
printk("lowmem_reserve[]:");
for (i = 0; i < MAX_NR_ZONES; i++)
@@ -3100,7 +3212,7 @@ void show_free_areas(unsigned int filter)
}
for_each_populated_zone(zone) {
- unsigned long nr[MAX_ORDER], flags, order, total = 0;
+ unsigned long nr[MAX_ORDER], flags, order, total = 0;
unsigned char types[MAX_ORDER];
if (skip_free_areas_node(filter, zone_to_nid(zone)))
@@ -3150,12 +3262,10 @@ static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
* Add all populated zones of a node to the zonelist.
*/
static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
- int nr_zones, enum zone_type zone_type)
+ int nr_zones)
{
struct zone *zone;
-
- BUG_ON(zone_type >= MAX_NR_ZONES);
- zone_type++;
+ enum zone_type zone_type = MAX_NR_ZONES;
do {
zone_type--;
@@ -3165,8 +3275,8 @@ static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
&zonelist->_zonerefs[nr_zones++]);
check_highest_zone(zone_type);
}
-
} while (zone_type);
+
return nr_zones;
}
@@ -3250,18 +3360,25 @@ int numa_zonelist_order_handler(ctl_table *table, int write,
static DEFINE_MUTEX(zl_order_mutex);
mutex_lock(&zl_order_mutex);
- if (write)
- strcpy(saved_string, (char*)table->data);
+ if (write) {
+ if (strlen((char *)table->data) >= NUMA_ZONELIST_ORDER_LEN) {
+ ret = -EINVAL;
+ goto out;
+ }
+ strcpy(saved_string, (char *)table->data);
+ }
ret = proc_dostring(table, write, buffer, length, ppos);
if (ret)
goto out;
if (write) {
int oldval = user_zonelist_order;
- if (__parse_numa_zonelist_order((char*)table->data)) {
+
+ ret = __parse_numa_zonelist_order((char *)table->data);
+ if (ret) {
/*
* bogus value. restore saved string
*/
- strncpy((char*)table->data, saved_string,
+ strncpy((char *)table->data, saved_string,
NUMA_ZONELIST_ORDER_LEN);
user_zonelist_order = oldval;
} else if (oldval != user_zonelist_order) {
@@ -3353,8 +3470,7 @@ static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
zonelist = &pgdat->node_zonelists[0];
for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
;
- j = build_zonelists_node(NODE_DATA(node), zonelist, j,
- MAX_NR_ZONES - 1);
+ j = build_zonelists_node(NODE_DATA(node), zonelist, j);
zonelist->_zonerefs[j].zone = NULL;
zonelist->_zonerefs[j].zone_idx = 0;
}
@@ -3368,7 +3484,7 @@ static void build_thisnode_zonelists(pg_data_t *pgdat)
struct zonelist *zonelist;
zonelist = &pgdat->node_zonelists[1];
- j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
+ j = build_zonelists_node(pgdat, zonelist, 0);
zonelist->_zonerefs[j].zone = NULL;
zonelist->_zonerefs[j].zone_idx = 0;
}
@@ -3408,11 +3524,11 @@ static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
static int default_zonelist_order(void)
{
int nid, zone_type;
- unsigned long low_kmem_size,total_size;
+ unsigned long low_kmem_size, total_size;
struct zone *z;
int average_size;
/*
- * ZONE_DMA and ZONE_DMA32 can be very small area in the system.
+ * ZONE_DMA and ZONE_DMA32 can be very small area in the system.
* If they are really small and used heavily, the system can fall
* into OOM very easily.
* This function detect ZONE_DMA/DMA32 size and configures zone order.
@@ -3425,8 +3541,8 @@ static int default_zonelist_order(void)
z = &NODE_DATA(nid)->node_zones[zone_type];
if (populated_zone(z)) {
if (zone_type < ZONE_NORMAL)
- low_kmem_size += z->present_pages;
- total_size += z->present_pages;
+ low_kmem_size += z->managed_pages;
+ total_size += z->managed_pages;
} else if (zone_type == ZONE_NORMAL) {
/*
* If any node has only lowmem, then node order
@@ -3444,9 +3560,9 @@ static int default_zonelist_order(void)
return ZONELIST_ORDER_NODE;
/*
* look into each node's config.
- * If there is a node whose DMA/DMA32 memory is very big area on
- * local memory, NODE_ORDER may be suitable.
- */
+ * If there is a node whose DMA/DMA32 memory is very big area on
+ * local memory, NODE_ORDER may be suitable.
+ */
average_size = total_size /
(nodes_weight(node_states[N_MEMORY]) + 1);
for_each_online_node(nid) {
@@ -3576,7 +3692,7 @@ static void build_zonelists(pg_data_t *pgdat)
local_node = pgdat->node_id;
zonelist = &pgdat->node_zonelists[0];
- j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
+ j = build_zonelists_node(pgdat, zonelist, 0);
/*
* Now we build the zonelist so that it contains the zones
@@ -3589,14 +3705,12 @@ static void build_zonelists(pg_data_t *pgdat)
for (node = local_node + 1; node < MAX_NUMNODES; node++) {
if (!node_online(node))
continue;
- j = build_zonelists_node(NODE_DATA(node), zonelist, j,
- MAX_NR_ZONES - 1);
+ j = build_zonelists_node(NODE_DATA(node), zonelist, j);
}
for (node = 0; node < local_node; node++) {
if (!node_online(node))
continue;
- j = build_zonelists_node(NODE_DATA(node), zonelist, j,
- MAX_NR_ZONES - 1);
+ j = build_zonelists_node(NODE_DATA(node), zonelist, j);
}
zonelist->_zonerefs[j].zone = NULL;
@@ -3705,12 +3819,12 @@ void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone)
mminit_verify_zonelist();
cpuset_init_current_mems_allowed();
} else {
- /* we have to stop all cpus to guarantee there is no user
- of zonelist */
#ifdef CONFIG_MEMORY_HOTPLUG
if (zone)
setup_zone_pageset(zone);
#endif
+ /* we have to stop all cpus to guarantee there is no user
+ of zonelist */
stop_machine(__build_all_zonelists, pgdat, NULL);
/* cpuset refresh routine should be here */
}
@@ -3804,8 +3918,6 @@ static inline unsigned long wait_table_bits(unsigned long size)
return ffz(~size);
}
-#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
-
/*
* Check if a pageblock contains reserved pages
*/
@@ -3833,6 +3945,7 @@ static void setup_zone_migrate_reserve(struct zone *zone)
struct page *page;
unsigned long block_migratetype;
int reserve;
+ int old_reserve;
/*
* Get the start pfn, end pfn and the number of blocks to reserve
@@ -3854,6 +3967,12 @@ static void setup_zone_migrate_reserve(struct zone *zone)
* future allocation of hugepages at runtime.
*/
reserve = min(2, reserve);
+ old_reserve = zone->nr_migrate_reserve_block;
+
+ /* When memory hot-add, we almost always need to do nothing */
+ if (reserve == old_reserve)
+ return;
+ zone->nr_migrate_reserve_block = reserve;
for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
if (!pfn_valid(pfn))
@@ -3891,6 +4010,12 @@ static void setup_zone_migrate_reserve(struct zone *zone)
reserve--;
continue;
}
+ } else if (!old_reserve) {
+ /*
+ * At boot time we don't need to scan the whole zone
+ * for turning off MIGRATE_RESERVE.
+ */
+ break;
}
/*
@@ -3938,7 +4063,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
mminit_verify_page_links(page, zone, nid, pfn);
init_page_count(page);
page_mapcount_reset(page);
- page_nid_reset_last(page);
+ page_cpupid_reset_last(page);
SetPageReserved(page);
/*
* Mark the block movable so that blocks are reserved for
@@ -4032,7 +4157,40 @@ static int __meminit zone_batchsize(struct zone *zone)
#endif
}
-static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
+/*
+ * pcp->high and pcp->batch values are related and dependent on one another:
+ * ->batch must never be higher then ->high.
+ * The following function updates them in a safe manner without read side
+ * locking.
+ *
+ * Any new users of pcp->batch and pcp->high should ensure they can cope with
+ * those fields changing asynchronously (acording the the above rule).
+ *
+ * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
+ * outside of boot time (or some other assurance that no concurrent updaters
+ * exist).
+ */
+static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
+ unsigned long batch)
+{
+ /* start with a fail safe value for batch */
+ pcp->batch = 1;
+ smp_wmb();
+
+ /* Update high, then batch, in order */
+ pcp->high = high;
+ smp_wmb();
+
+ pcp->batch = batch;
+}
+
+/* a companion to pageset_set_high() */
+static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch)
+{
+ pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch));
+}
+
+static void pageset_init(struct per_cpu_pageset *p)
{
struct per_cpu_pages *pcp;
int migratetype;
@@ -4041,45 +4199,55 @@ static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
pcp = &p->pcp;
pcp->count = 0;
- pcp->high = 6 * batch;
- pcp->batch = max(1UL, 1 * batch);
for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
INIT_LIST_HEAD(&pcp->lists[migratetype]);
}
+static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
+{
+ pageset_init(p);
+ pageset_set_batch(p, batch);
+}
+
/*
- * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
+ * pageset_set_high() sets the high water mark for hot per_cpu_pagelist
* to the value high for the pageset p.
*/
-
-static void setup_pagelist_highmark(struct per_cpu_pageset *p,
+static void pageset_set_high(struct per_cpu_pageset *p,
unsigned long high)
{
- struct per_cpu_pages *pcp;
+ unsigned long batch = max(1UL, high / 4);
+ if ((high / 4) > (PAGE_SHIFT * 8))
+ batch = PAGE_SHIFT * 8;
- pcp = &p->pcp;
- pcp->high = high;
- pcp->batch = max(1UL, high/4);
- if ((high/4) > (PAGE_SHIFT * 8))
- pcp->batch = PAGE_SHIFT * 8;
+ pageset_update(&p->pcp, high, batch);
}
-static void __meminit setup_zone_pageset(struct zone *zone)
+static void __meminit pageset_set_high_and_batch(struct zone *zone,
+ struct per_cpu_pageset *pcp)
{
- int cpu;
-
- zone->pageset = alloc_percpu(struct per_cpu_pageset);
+ if (percpu_pagelist_fraction)
+ pageset_set_high(pcp,
+ (zone->managed_pages /
+ percpu_pagelist_fraction));
+ else
+ pageset_set_batch(pcp, zone_batchsize(zone));
+}
- for_each_possible_cpu(cpu) {
- struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
+static void __meminit zone_pageset_init(struct zone *zone, int cpu)
+{
+ struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
- setup_pageset(pcp, zone_batchsize(zone));
+ pageset_init(pcp);
+ pageset_set_high_and_batch(zone, pcp);
+}
- if (percpu_pagelist_fraction)
- setup_pagelist_highmark(pcp,
- (zone->managed_pages /
- percpu_pagelist_fraction));
- }
+static void __meminit setup_zone_pageset(struct zone *zone)
+{
+ int cpu;
+ zone->pageset = alloc_percpu(struct per_cpu_pageset);
+ for_each_possible_cpu(cpu)
+ zone_pageset_init(zone, cpu);
}
/*
@@ -4098,7 +4266,6 @@ static noinline __init_refok
int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
{
int i;
- struct pglist_data *pgdat = zone->zone_pgdat;
size_t alloc_size;
/*
@@ -4114,7 +4281,8 @@ int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
if (!slab_is_available()) {
zone->wait_table = (wait_queue_head_t *)
- alloc_bootmem_node_nopanic(pgdat, alloc_size);
+ memblock_virt_alloc_node_nopanic(
+ alloc_size, zone->zone_pgdat->node_id);
} else {
/*
* This case means that a zone whose size was 0 gets new memory
@@ -4131,7 +4299,7 @@ int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
if (!zone->wait_table)
return -ENOMEM;
- for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
+ for (i = 0; i < zone->wait_table_hash_nr_entries; ++i)
init_waitqueue_head(zone->wait_table + i);
return 0;
@@ -4146,7 +4314,7 @@ static __meminit void zone_pcp_init(struct zone *zone)
*/
zone->pageset = &boot_pageset;
- if (zone->present_pages)
+ if (populated_zone(zone))
printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
zone->name, zone->present_pages,
zone_batchsize(zone));
@@ -4188,7 +4356,7 @@ int __meminit init_currently_empty_zone(struct zone *zone,
int __meminit __early_pfn_to_nid(unsigned long pfn)
{
unsigned long start_pfn, end_pfn;
- int i, nid;
+ int nid;
/*
* NOTE: The following SMP-unsafe globals are only used early in boot
* when the kernel is running single-threaded.
@@ -4199,15 +4367,14 @@ int __meminit __early_pfn_to_nid(unsigned long pfn)
if (last_start_pfn <= pfn && pfn < last_end_pfn)
return last_nid;
- for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
- if (start_pfn <= pfn && pfn < end_pfn) {
- last_start_pfn = start_pfn;
- last_end_pfn = end_pfn;
- last_nid = nid;
- return nid;
- }
- /* This is a memory hole */
- return -1;
+ nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
+ if (nid != -1) {
+ last_start_pfn = start_pfn;
+ last_end_pfn = end_pfn;
+ last_nid = nid;
+ }
+
+ return nid;
}
#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
@@ -4235,13 +4402,14 @@ bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
#endif
/**
- * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
+ * free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range
* @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
- * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
+ * @max_low_pfn: The highest PFN that will be passed to memblock_free_early_nid
*
* If an architecture guarantees that all ranges registered with
* add_active_ranges() contain no holes and may be freed, this
- * this function may be used instead of calling free_bootmem() manually.
+ * this function may be used instead of calling memblock_free_early_nid()
+ * manually.
*/
void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
{
@@ -4253,9 +4421,9 @@ void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
end_pfn = min(end_pfn, max_low_pfn);
if (start_pfn < end_pfn)
- free_bootmem_node(NODE_DATA(this_nid),
- PFN_PHYS(start_pfn),
- (end_pfn - start_pfn) << PAGE_SHIFT);
+ memblock_free_early_nid(PFN_PHYS(start_pfn),
+ (end_pfn - start_pfn) << PAGE_SHIFT,
+ this_nid);
}
}
@@ -4368,13 +4536,13 @@ static void __meminit adjust_zone_range_for_zone_movable(int nid,
*/
static unsigned long __meminit zone_spanned_pages_in_node(int nid,
unsigned long zone_type,
+ unsigned long node_start_pfn,
+ unsigned long node_end_pfn,
unsigned long *ignored)
{
- unsigned long node_start_pfn, node_end_pfn;
unsigned long zone_start_pfn, zone_end_pfn;
- /* Get the start and end of the node and zone */
- get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
+ /* Get the start and end of the zone */
zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
adjust_zone_range_for_zone_movable(nid, zone_type,
@@ -4429,14 +4597,14 @@ unsigned long __init absent_pages_in_range(unsigned long start_pfn,
/* Return the number of page frames in holes in a zone on a node */
static unsigned long __meminit zone_absent_pages_in_node(int nid,
unsigned long zone_type,
+ unsigned long node_start_pfn,
+ unsigned long node_end_pfn,
unsigned long *ignored)
{
unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
- unsigned long node_start_pfn, node_end_pfn;
unsigned long zone_start_pfn, zone_end_pfn;
- get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
@@ -4449,6 +4617,8 @@ static unsigned long __meminit zone_absent_pages_in_node(int nid,
#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
unsigned long zone_type,
+ unsigned long node_start_pfn,
+ unsigned long node_end_pfn,
unsigned long *zones_size)
{
return zones_size[zone_type];
@@ -4456,6 +4626,8 @@ static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
unsigned long zone_type,
+ unsigned long node_start_pfn,
+ unsigned long node_end_pfn,
unsigned long *zholes_size)
{
if (!zholes_size)
@@ -4467,21 +4639,27 @@ static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
- unsigned long *zones_size, unsigned long *zholes_size)
+ unsigned long node_start_pfn,
+ unsigned long node_end_pfn,
+ unsigned long *zones_size,
+ unsigned long *zholes_size)
{
unsigned long realtotalpages, totalpages = 0;
enum zone_type i;
for (i = 0; i < MAX_NR_ZONES; i++)
totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
- zones_size);
+ node_start_pfn,
+ node_end_pfn,
+ zones_size);
pgdat->node_spanned_pages = totalpages;
realtotalpages = totalpages;
for (i = 0; i < MAX_NR_ZONES; i++)
realtotalpages -=
zone_absent_pages_in_node(pgdat->node_id, i,
- zholes_size);
+ node_start_pfn, node_end_pfn,
+ zholes_size);
pgdat->node_present_pages = realtotalpages;
printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
realtotalpages);
@@ -4516,8 +4694,9 @@ static void __init setup_usemap(struct pglist_data *pgdat,
unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
zone->pageblock_flags = NULL;
if (usemapsize)
- zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat,
- usemapsize);
+ zone->pageblock_flags =
+ memblock_virt_alloc_node_nopanic(usemapsize,
+ pgdat->node_id);
}
#else
static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
@@ -4527,7 +4706,7 @@ static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
-void __init set_pageblock_order(void)
+void __paginginit set_pageblock_order(void)
{
unsigned int order;
@@ -4555,7 +4734,7 @@ void __init set_pageblock_order(void)
* include/linux/pageblock-flags.h for the values of pageblock_order based on
* the kernel config
*/
-void __init set_pageblock_order(void)
+void __paginginit set_pageblock_order(void)
{
}
@@ -4590,6 +4769,7 @@ static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages,
* NOTE: pgdat should get zeroed by caller.
*/
static void __paginginit free_area_init_core(struct pglist_data *pgdat,
+ unsigned long node_start_pfn, unsigned long node_end_pfn,
unsigned long *zones_size, unsigned long *zholes_size)
{
enum zone_type j;
@@ -4611,8 +4791,11 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
struct zone *zone = pgdat->node_zones + j;
unsigned long size, realsize, freesize, memmap_pages;
- size = zone_spanned_pages_in_node(nid, j, zones_size);
+ size = zone_spanned_pages_in_node(nid, j, node_start_pfn,
+ node_end_pfn, zones_size);
realsize = freesize = size - zone_absent_pages_in_node(nid, j,
+ node_start_pfn,
+ node_end_pfn,
zholes_size);
/*
@@ -4665,8 +4848,11 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
spin_lock_init(&zone->lru_lock);
zone_seqlock_init(zone);
zone->zone_pgdat = pgdat;
-
zone_pcp_init(zone);
+
+ /* For bootup, initialized properly in watermark setup */
+ mod_zone_page_state(zone, NR_ALLOC_BATCH, zone->managed_pages);
+
lruvec_init(&zone->lruvec);
if (!size)
continue;
@@ -4704,7 +4890,8 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
size = (end - start) * sizeof(struct page);
map = alloc_remap(pgdat->node_id, size);
if (!map)
- map = alloc_bootmem_node_nopanic(pgdat, size);
+ map = memblock_virt_alloc_node_nopanic(size,
+ pgdat->node_id);
pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
}
#ifndef CONFIG_NEED_MULTIPLE_NODES
@@ -4726,14 +4913,21 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
unsigned long node_start_pfn, unsigned long *zholes_size)
{
pg_data_t *pgdat = NODE_DATA(nid);
+ unsigned long start_pfn = 0;
+ unsigned long end_pfn = 0;
/* pg_data_t should be reset to zero when it's allocated */
WARN_ON(pgdat->nr_zones || pgdat->classzone_idx);
pgdat->node_id = nid;
pgdat->node_start_pfn = node_start_pfn;
- init_zone_allows_reclaim(nid);
- calculate_node_totalpages(pgdat, zones_size, zholes_size);
+ if (node_state(nid, N_MEMORY))
+ init_zone_allows_reclaim(nid);
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+ get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
+#endif
+ calculate_node_totalpages(pgdat, start_pfn, end_pfn,
+ zones_size, zholes_size);
alloc_node_mem_map(pgdat);
#ifdef CONFIG_FLAT_NODE_MEM_MAP
@@ -4742,7 +4936,8 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
(unsigned long)pgdat->node_mem_map);
#endif
- free_area_init_core(pgdat, zones_size, zholes_size);
+ free_area_init_core(pgdat, start_pfn, end_pfn,
+ zones_size, zholes_size);
}
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
@@ -4860,7 +5055,7 @@ static unsigned long __init early_calculate_totalpages(void)
if (pages)
node_set_state(nid, N_MEMORY);
}
- return totalpages;
+ return totalpages;
}
/*
@@ -4878,9 +5073,33 @@ static void __init find_zone_movable_pfns_for_nodes(void)
nodemask_t saved_node_state = node_states[N_MEMORY];
unsigned long totalpages = early_calculate_totalpages();
int usable_nodes = nodes_weight(node_states[N_MEMORY]);
+ struct memblock_region *r;
+
+ /* Need to find movable_zone earlier when movable_node is specified. */
+ find_usable_zone_for_movable();
/*
- * If movablecore was specified, calculate what size of
+ * If movable_node is specified, ignore kernelcore and movablecore
+ * options.
+ */
+ if (movable_node_is_enabled()) {
+ for_each_memblock(memory, r) {
+ if (!memblock_is_hotpluggable(r))
+ continue;
+
+ nid = r->nid;
+
+ usable_startpfn = PFN_DOWN(r->base);
+ zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
+ min(usable_startpfn, zone_movable_pfn[nid]) :
+ usable_startpfn;
+ }
+
+ goto out2;
+ }
+
+ /*
+ * If movablecore=nn[KMG] was specified, calculate what size of
* kernelcore that corresponds so that memory usable for
* any allocation type is evenly spread. If both kernelcore
* and movablecore are specified, then the value of kernelcore
@@ -4906,7 +5125,6 @@ static void __init find_zone_movable_pfns_for_nodes(void)
goto out;
/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
- find_usable_zone_for_movable();
usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
restart:
@@ -4977,7 +5195,7 @@ restart:
/*
* Some kernelcore has been met, update counts and
* break if the kernelcore for this node has been
- * satisified
+ * satisfied
*/
required_kernelcore -= min(required_kernelcore,
size_pages);
@@ -4991,12 +5209,13 @@ restart:
* If there is still required_kernelcore, we do another pass with one
* less node in the count. This will push zone_movable_pfn[nid] further
* along on the nodes that still have memory until kernelcore is
- * satisified
+ * satisfied
*/
usable_nodes--;
if (usable_nodes && required_kernelcore > usable_nodes)
goto restart;
+out2:
/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
for (nid = 0; nid < MAX_NUMNODES; nid++)
zone_movable_pfn[nid] =
@@ -5017,7 +5236,7 @@ static void check_for_memory(pg_data_t *pgdat, int nid)
for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
struct zone *zone = &pgdat->node_zones[zone_type];
- if (zone->present_pages) {
+ if (populated_zone(zone)) {
node_set_state(nid, N_HIGH_MEMORY);
if (N_NORMAL_MEMORY != N_HIGH_MEMORY &&
zone_type <= ZONE_NORMAL)
@@ -5150,35 +5369,103 @@ early_param("movablecore", cmdline_parse_movablecore);
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
-unsigned long free_reserved_area(unsigned long start, unsigned long end,
- int poison, char *s)
+void adjust_managed_page_count(struct page *page, long count)
+{
+ spin_lock(&managed_page_count_lock);
+ page_zone(page)->managed_pages += count;
+ totalram_pages += count;
+#ifdef CONFIG_HIGHMEM
+ if (PageHighMem(page))
+ totalhigh_pages += count;
+#endif
+ spin_unlock(&managed_page_count_lock);
+}
+EXPORT_SYMBOL(adjust_managed_page_count);
+
+unsigned long free_reserved_area(void *start, void *end, int poison, char *s)
{
- unsigned long pages, pos;
+ void *pos;
+ unsigned long pages = 0;
- pos = start = PAGE_ALIGN(start);
- end &= PAGE_MASK;
- for (pages = 0; pos < end; pos += PAGE_SIZE, pages++) {
- if (poison)
- memset((void *)pos, poison, PAGE_SIZE);
- free_reserved_page(virt_to_page((void *)pos));
+ start = (void *)PAGE_ALIGN((unsigned long)start);
+ end = (void *)((unsigned long)end & PAGE_MASK);
+ for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
+ if ((unsigned int)poison <= 0xFF)
+ memset(pos, poison, PAGE_SIZE);
+ free_reserved_page(virt_to_page(pos));
}
if (pages && s)
- pr_info("Freeing %s memory: %ldK (%lx - %lx)\n",
+ pr_info("Freeing %s memory: %ldK (%p - %p)\n",
s, pages << (PAGE_SHIFT - 10), start, end);
return pages;
}
+EXPORT_SYMBOL(free_reserved_area);
#ifdef CONFIG_HIGHMEM
void free_highmem_page(struct page *page)
{
__free_reserved_page(page);
totalram_pages++;
+ page_zone(page)->managed_pages++;
totalhigh_pages++;
}
#endif
+
+void __init mem_init_print_info(const char *str)
+{
+ unsigned long physpages, codesize, datasize, rosize, bss_size;
+ unsigned long init_code_size, init_data_size;
+
+ physpages = get_num_physpages();
+ codesize = _etext - _stext;
+ datasize = _edata - _sdata;
+ rosize = __end_rodata - __start_rodata;
+ bss_size = __bss_stop - __bss_start;
+ init_data_size = __init_end - __init_begin;
+ init_code_size = _einittext - _sinittext;
+
+ /*
+ * Detect special cases and adjust section sizes accordingly:
+ * 1) .init.* may be embedded into .data sections
+ * 2) .init.text.* may be out of [__init_begin, __init_end],
+ * please refer to arch/tile/kernel/vmlinux.lds.S.
+ * 3) .rodata.* may be embedded into .text or .data sections.
+ */
+#define adj_init_size(start, end, size, pos, adj) \
+ do { \
+ if (start <= pos && pos < end && size > adj) \
+ size -= adj; \
+ } while (0)
+
+ adj_init_size(__init_begin, __init_end, init_data_size,
+ _sinittext, init_code_size);
+ adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
+ adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
+ adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
+ adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
+
+#undef adj_init_size
+
+ printk("Memory: %luK/%luK available "
+ "(%luK kernel code, %luK rwdata, %luK rodata, "
+ "%luK init, %luK bss, %luK reserved"
+#ifdef CONFIG_HIGHMEM
+ ", %luK highmem"
+#endif
+ "%s%s)\n",
+ nr_free_pages() << (PAGE_SHIFT-10), physpages << (PAGE_SHIFT-10),
+ codesize >> 10, datasize >> 10, rosize >> 10,
+ (init_data_size + init_code_size) >> 10, bss_size >> 10,
+ (physpages - totalram_pages) << (PAGE_SHIFT-10),
+#ifdef CONFIG_HIGHMEM
+ totalhigh_pages << (PAGE_SHIFT-10),
+#endif
+ str ? ", " : "", str ? str : "");
+}
+
/**
* set_dma_reserve - set the specified number of pages reserved in the first zone
* @new_dma_reserve: The number of pages to mark reserved
@@ -5225,7 +5512,7 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
* This is only okay since the processor is dead and cannot
* race with what we are doing.
*/
- refresh_cpu_vm_stats(cpu);
+ cpu_vm_stats_fold(cpu);
}
return NOTIFY_OK;
}
@@ -5362,6 +5649,11 @@ static void __setup_per_zone_wmarks(void)
zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2);
zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
+ __mod_zone_page_state(zone, NR_ALLOC_BATCH,
+ high_wmark_pages(zone) -
+ low_wmark_pages(zone) -
+ zone_page_state(zone, NR_ALLOC_BATCH));
+
setup_zone_migrate_reserve(zone);
spin_unlock_irqrestore(&zone->lock, flags);
}
@@ -5434,7 +5726,7 @@ static void __meminit setup_per_zone_inactive_ratio(void)
* we want it large (64MB max). But it is not linear, because network
* bandwidth does not increase linearly with machine size. We use
*
- * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
+ * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
* min_free_kbytes = sqrt(lowmem_kbytes * 16)
*
* which yields
@@ -5454,14 +5746,21 @@ static void __meminit setup_per_zone_inactive_ratio(void)
int __meminit init_per_zone_wmark_min(void)
{
unsigned long lowmem_kbytes;
+ int new_min_free_kbytes;
lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
-
- min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
- if (min_free_kbytes < 128)
- min_free_kbytes = 128;
- if (min_free_kbytes > 65536)
- min_free_kbytes = 65536;
+ new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
+
+ if (new_min_free_kbytes > user_min_free_kbytes) {
+ min_free_kbytes = new_min_free_kbytes;
+ if (min_free_kbytes < 128)
+ min_free_kbytes = 128;
+ if (min_free_kbytes > 65536)
+ min_free_kbytes = 65536;
+ } else {
+ pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
+ new_min_free_kbytes, user_min_free_kbytes);
+ }
setup_per_zone_wmarks();
refresh_zone_stat_thresholds();
setup_per_zone_lowmem_reserve();
@@ -5471,16 +5770,23 @@ int __meminit init_per_zone_wmark_min(void)
module_init(init_per_zone_wmark_min)
/*
- * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
+ * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
* that we can call two helper functions whenever min_free_kbytes
* changes.
*/
-int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
+int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
{
- proc_dointvec(table, write, buffer, length, ppos);
- if (write)
+ int rc;
+
+ rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
+ if (rc)
+ return rc;
+
+ if (write) {
+ user_min_free_kbytes = min_free_kbytes;
setup_per_zone_wmarks();
+ }
return 0;
}
@@ -5537,10 +5843,9 @@ int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
/*
* percpu_pagelist_fraction - changes the pcp->high for each zone on each
- * cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist
- * can have before it gets flushed back to buddy allocator.
+ * cpu. It is the fraction of total pages in each zone that a hot per cpu
+ * pagelist can have before it gets flushed back to buddy allocator.
*/
-
int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
{
@@ -5551,14 +5856,16 @@ int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
if (!write || (ret < 0))
return ret;
+
+ mutex_lock(&pcp_batch_high_lock);
for_each_populated_zone(zone) {
- for_each_possible_cpu(cpu) {
- unsigned long high;
- high = zone->managed_pages / percpu_pagelist_fraction;
- setup_pagelist_highmark(
- per_cpu_ptr(zone->pageset, cpu), high);
- }
+ unsigned long high;
+ high = zone->managed_pages / percpu_pagelist_fraction;
+ for_each_possible_cpu(cpu)
+ pageset_set_high(per_cpu_ptr(zone->pageset, cpu),
+ high);
}
+ mutex_unlock(&pcp_batch_high_lock);
return 0;
}
@@ -5599,9 +5906,10 @@ void *__init alloc_large_system_hash(const char *tablename,
if (!numentries) {
/* round applicable memory size up to nearest megabyte */
numentries = nr_kernel_pages;
- numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
- numentries >>= 20 - PAGE_SHIFT;
- numentries <<= 20 - PAGE_SHIFT;
+
+ /* It isn't necessary when PAGE_SIZE >= 1MB */
+ if (PAGE_SHIFT < 20)
+ numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
/* limit to 1 bucket per 2^scale bytes of low memory */
if (scale > PAGE_SHIFT)
@@ -5639,7 +5947,7 @@ void *__init alloc_large_system_hash(const char *tablename,
do {
size = bucketsize << log2qty;
if (flags & HASH_EARLY)
- table = alloc_bootmem_nopanic(size);
+ table = memblock_virt_alloc_nopanic(size, 0);
else if (hashdist)
table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
else {
@@ -5741,7 +6049,7 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags,
pfn = page_to_pfn(page);
bitmap = get_pageblock_bitmap(zone, pfn);
bitidx = pfn_to_bitidx(zone, pfn);
- VM_BUG_ON(!zone_spans_pfn(zone, pfn));
+ VM_BUG_ON_PAGE(!zone_spans_pfn(zone, pfn), page);
for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
if (flags & value)
@@ -5754,7 +6062,7 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags,
* This function checks whether pageblock includes unmovable pages or not.
* If @count is not zero, it is okay to include less @count unmovable pages
*
- * PageLRU check wihtout isolation or lru_lock could race so that
+ * PageLRU check without isolation or lru_lock could race so that
* MIGRATE_MOVABLE block might include unmovable pages. It means you can't
* expect this function should be exact.
*/
@@ -5782,6 +6090,17 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
continue;
page = pfn_to_page(check);
+
+ /*
+ * Hugepages are not in LRU lists, but they're movable.
+ * We need not scan over tail pages bacause we don't
+ * handle each tail page individually in migration.
+ */
+ if (PageHuge(page)) {
+ iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
+ continue;
+ }
+
/*
* We can't use page_count without pin a page
* because another CPU can free compound page.
@@ -6047,32 +6366,18 @@ void free_contig_range(unsigned long pfn, unsigned nr_pages)
#endif
#ifdef CONFIG_MEMORY_HOTPLUG
-static int __meminit __zone_pcp_update(void *data)
-{
- struct zone *zone = data;
- int cpu;
- unsigned long batch = zone_batchsize(zone), flags;
-
- for_each_possible_cpu(cpu) {
- struct per_cpu_pageset *pset;
- struct per_cpu_pages *pcp;
-
- pset = per_cpu_ptr(zone->pageset, cpu);
- pcp = &pset->pcp;
-
- local_irq_save(flags);
- if (pcp->count > 0)
- free_pcppages_bulk(zone, pcp->count, pcp);
- drain_zonestat(zone, pset);
- setup_pageset(pset, batch);
- local_irq_restore(flags);
- }
- return 0;
-}
-
+/*
+ * The zone indicated has a new number of managed_pages; batch sizes and percpu
+ * page high values need to be recalulated.
+ */
void __meminit zone_pcp_update(struct zone *zone)
{
- stop_machine(__zone_pcp_update, zone, NULL);
+ unsigned cpu;
+ mutex_lock(&pcp_batch_high_lock);
+ for_each_possible_cpu(cpu)
+ pageset_set_high_and_batch(zone,
+ per_cpu_ptr(zone->pageset, cpu));
+ mutex_unlock(&pcp_batch_high_lock);
}
#endif
@@ -6242,12 +6547,25 @@ static void dump_page_flags(unsigned long flags)
printk(")\n");
}
-void dump_page(struct page *page)
+void dump_page_badflags(struct page *page, const char *reason,
+ unsigned long badflags)
{
printk(KERN_ALERT
"page:%p count:%d mapcount:%d mapping:%p index:%#lx\n",
page, atomic_read(&page->_count), page_mapcount(page),
page->mapping, page->index);
dump_page_flags(page->flags);
+ if (reason)
+ pr_alert("page dumped because: %s\n", reason);
+ if (page->flags & badflags) {
+ pr_alert("bad because of flags:\n");
+ dump_page_flags(page->flags & badflags);
+ }
mem_cgroup_print_bad_page(page);
}
+
+void dump_page(struct page *page, const char *reason)
+{
+ dump_page_badflags(page, reason, 0);
+}
+EXPORT_SYMBOL(dump_page);
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index 6d757e3a872a..3708264d2833 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -54,8 +54,9 @@ static int __init alloc_node_page_cgroup(int nid)
table_size = sizeof(struct page_cgroup) * nr_pages;
- base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
- table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
+ base = memblock_virt_alloc_try_nid_nopanic(
+ table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
+ BOOTMEM_ALLOC_ACCESSIBLE, nid);
if (!base)
return -ENOMEM;
NODE_DATA(nid)->node_page_cgroup = base;
@@ -174,7 +175,7 @@ static void free_page_cgroup(void *addr)
}
}
-void __free_page_cgroup(unsigned long pfn)
+static void __free_page_cgroup(unsigned long pfn)
{
struct mem_section *ms;
struct page_cgroup *base;
@@ -187,9 +188,9 @@ void __free_page_cgroup(unsigned long pfn)
ms->page_cgroup = NULL;
}
-int __meminit online_page_cgroup(unsigned long start_pfn,
- unsigned long nr_pages,
- int nid)
+static int __meminit online_page_cgroup(unsigned long start_pfn,
+ unsigned long nr_pages,
+ int nid)
{
unsigned long start, end, pfn;
int fail = 0;
@@ -222,8 +223,8 @@ int __meminit online_page_cgroup(unsigned long start_pfn,
return -ENOMEM;
}
-int __meminit offline_page_cgroup(unsigned long start_pfn,
- unsigned long nr_pages, int nid)
+static int __meminit offline_page_cgroup(unsigned long start_pfn,
+ unsigned long nr_pages, int nid)
{
unsigned long start, end, pfn;
@@ -451,7 +452,7 @@ unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id)
* lookup_swap_cgroup_id - lookup mem_cgroup id tied to swap entry
* @ent: swap entry to be looked up.
*
- * Returns CSS ID of mem_cgroup at success. 0 at failure. (0 is invalid ID)
+ * Returns ID of mem_cgroup at success. 0 at failure. (0 is invalid ID)
*/
unsigned short lookup_swap_cgroup_id(swp_entry_t ent)
{
diff --git a/mm/page_io.c b/mm/page_io.c
index a8a3ef45fed7..7c59ef681381 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -21,6 +21,7 @@
#include <linux/writeback.h>
#include <linux/frontswap.h>
#include <linux/aio.h>
+#include <linux/blkdev.h>
#include <asm/pgtable.h>
static struct bio *get_swap_bio(gfp_t gfp_flags,
@@ -30,13 +31,13 @@ static struct bio *get_swap_bio(gfp_t gfp_flags,
bio = bio_alloc(gfp_flags, 1);
if (bio) {
- bio->bi_sector = map_swap_page(page, &bio->bi_bdev);
- bio->bi_sector <<= PAGE_SHIFT - 9;
+ bio->bi_iter.bi_sector = map_swap_page(page, &bio->bi_bdev);
+ bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9;
bio->bi_io_vec[0].bv_page = page;
bio->bi_io_vec[0].bv_len = PAGE_SIZE;
bio->bi_io_vec[0].bv_offset = 0;
bio->bi_vcnt = 1;
- bio->bi_size = PAGE_SIZE;
+ bio->bi_iter.bi_size = PAGE_SIZE;
bio->bi_end_io = end_io;
}
return bio;
@@ -61,7 +62,7 @@ void end_swap_bio_write(struct bio *bio, int err)
printk(KERN_ALERT "Write-error on swap-device (%u:%u:%Lu)\n",
imajor(bio->bi_bdev->bd_inode),
iminor(bio->bi_bdev->bd_inode),
- (unsigned long long)bio->bi_sector);
+ (unsigned long long)bio->bi_iter.bi_sector);
ClearPageReclaim(page);
}
end_page_writeback(page);
@@ -79,10 +80,55 @@ void end_swap_bio_read(struct bio *bio, int err)
printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n",
imajor(bio->bi_bdev->bd_inode),
iminor(bio->bi_bdev->bd_inode),
- (unsigned long long)bio->bi_sector);
- } else {
- SetPageUptodate(page);
+ (unsigned long long)bio->bi_iter.bi_sector);
+ goto out;
}
+
+ SetPageUptodate(page);
+
+ /*
+ * There is no guarantee that the page is in swap cache - the software
+ * suspend code (at least) uses end_swap_bio_read() against a non-
+ * swapcache page. So we must check PG_swapcache before proceeding with
+ * this optimization.
+ */
+ if (likely(PageSwapCache(page))) {
+ struct swap_info_struct *sis;
+
+ sis = page_swap_info(page);
+ if (sis->flags & SWP_BLKDEV) {
+ /*
+ * The swap subsystem performs lazy swap slot freeing,
+ * expecting that the page will be swapped out again.
+ * So we can avoid an unnecessary write if the page
+ * isn't redirtied.
+ * This is good for real swap storage because we can
+ * reduce unnecessary I/O and enhance wear-leveling
+ * if an SSD is used as the as swap device.
+ * But if in-memory swap device (eg zram) is used,
+ * this causes a duplicated copy between uncompressed
+ * data in VM-owned memory and compressed data in
+ * zram-owned memory. So let's free zram-owned memory
+ * and make the VM-owned decompressed page *dirty*,
+ * so the page should be swapped out somewhere again if
+ * we again wish to reclaim it.
+ */
+ struct gendisk *disk = sis->bdev->bd_disk;
+ if (disk->fops->swap_slot_free_notify) {
+ swp_entry_t entry;
+ unsigned long offset;
+
+ entry.val = page_private(page);
+ offset = swp_offset(entry);
+
+ SetPageDirty(page);
+ disk->fops->swap_slot_free_notify(sis->bdev,
+ offset);
+ }
+ }
+ }
+
+out:
unlock_page(page);
bio_put(bio);
}
@@ -220,7 +266,6 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
init_sync_kiocb(&kiocb, swap_file);
kiocb.ki_pos = page_file_offset(page);
- kiocb.ki_left = PAGE_SIZE;
kiocb.ki_nbytes = PAGE_SIZE;
set_page_writeback(page);
@@ -275,8 +320,8 @@ int swap_readpage(struct page *page)
int ret = 0;
struct swap_info_struct *sis = page_swap_info(page);
- VM_BUG_ON(!PageLocked(page));
- VM_BUG_ON(PageUptodate(page));
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
+ VM_BUG_ON_PAGE(PageUptodate(page), page);
if (frontswap_load(page) == 0) {
SetPageUptodate(page);
unlock_page(page);
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 383bdbb98b04..d1473b2e9481 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -6,6 +6,7 @@
#include <linux/page-isolation.h>
#include <linux/pageblock-flags.h>
#include <linux/memory.h>
+#include <linux/hugetlb.h>
#include "internal.h"
int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages)
@@ -226,9 +227,9 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
int ret;
/*
- * Note: pageblock_nr_page != MAX_ORDER. Then, chunks of free page
- * is not aligned to pageblock_nr_pages.
- * Then we just check pagetype fist.
+ * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages
+ * are not aligned to pageblock_nr_pages.
+ * Then we just check migratetype first.
*/
for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
page = __first_valid_page(pfn, pageblock_nr_pages);
@@ -238,7 +239,7 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
page = __first_valid_page(start_pfn, end_pfn - start_pfn);
if ((pfn < end_pfn) || !page)
return -EBUSY;
- /* Check all pages are free or Marked as ISOLATED */
+ /* Check all pages are free or marked as ISOLATED */
zone = page_zone(page);
spin_lock_irqsave(&zone->lock, flags);
ret = __test_page_isolated_in_pageblock(start_pfn, end_pfn,
@@ -252,6 +253,19 @@ struct page *alloc_migrate_target(struct page *page, unsigned long private,
{
gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
+ /*
+ * TODO: allocate a destination hugepage from a nearest neighbor node,
+ * accordance with memory policy of the user process if possible. For
+ * now as a simple work-around, we use the next node for destination.
+ */
+ if (PageHuge(page)) {
+ nodemask_t src = nodemask_of_node(page_to_nid(page));
+ nodemask_t dst;
+ nodes_complement(dst, src);
+ return alloc_huge_page_node(page_hstate(compound_head(page)),
+ next_node(page_to_nid(page), dst));
+ }
+
if (PageHighMem(page))
gfp_mask |= __GFP_HIGHMEM;
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index 5da2cbcfdbb5..2beeabf502c5 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -242,7 +242,7 @@ int walk_page_range(unsigned long addr, unsigned long end,
if (err)
break;
pgd++;
- } while (addr = next, addr != end);
+ } while (addr = next, addr < end);
return err;
}
diff --git a/mm/percpu.c b/mm/percpu.c
index 8c8e08f3a692..2ddf9a990dbd 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -102,10 +102,11 @@ struct pcpu_chunk {
int free_size; /* free bytes in the chunk */
int contig_hint; /* max contiguous size hint */
void *base_addr; /* base address of this chunk */
- int map_used; /* # of map entries used */
+ int map_used; /* # of map entries used before the sentry */
int map_alloc; /* # of map entries allocated */
int *map; /* allocation map */
void *data; /* chunk data */
+ int first_free; /* no free below this */
bool immutable; /* no [de]population allowed */
unsigned long populated[]; /* populated bitmap */
};
@@ -356,11 +357,11 @@ static int pcpu_need_to_extend(struct pcpu_chunk *chunk)
{
int new_alloc;
- if (chunk->map_alloc >= chunk->map_used + 2)
+ if (chunk->map_alloc >= chunk->map_used + 3)
return 0;
new_alloc = PCPU_DFL_MAP_ALLOC;
- while (new_alloc < chunk->map_used + 2)
+ while (new_alloc < chunk->map_used + 3)
new_alloc *= 2;
return new_alloc;
@@ -418,48 +419,6 @@ out_unlock:
}
/**
- * pcpu_split_block - split a map block
- * @chunk: chunk of interest
- * @i: index of map block to split
- * @head: head size in bytes (can be 0)
- * @tail: tail size in bytes (can be 0)
- *
- * Split the @i'th map block into two or three blocks. If @head is
- * non-zero, @head bytes block is inserted before block @i moving it
- * to @i+1 and reducing its size by @head bytes.
- *
- * If @tail is non-zero, the target block, which can be @i or @i+1
- * depending on @head, is reduced by @tail bytes and @tail byte block
- * is inserted after the target block.
- *
- * @chunk->map must have enough free slots to accommodate the split.
- *
- * CONTEXT:
- * pcpu_lock.
- */
-static void pcpu_split_block(struct pcpu_chunk *chunk, int i,
- int head, int tail)
-{
- int nr_extra = !!head + !!tail;
-
- BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra);
-
- /* insert new subblocks */
- memmove(&chunk->map[i + nr_extra], &chunk->map[i],
- sizeof(chunk->map[0]) * (chunk->map_used - i));
- chunk->map_used += nr_extra;
-
- if (head) {
- chunk->map[i + 1] = chunk->map[i] - head;
- chunk->map[i++] = head;
- }
- if (tail) {
- chunk->map[i++] -= tail;
- chunk->map[i] = tail;
- }
-}
-
-/**
* pcpu_alloc_area - allocate area from a pcpu_chunk
* @chunk: chunk of interest
* @size: wanted size in bytes
@@ -483,19 +442,27 @@ static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align)
int oslot = pcpu_chunk_slot(chunk);
int max_contig = 0;
int i, off;
+ bool seen_free = false;
+ int *p;
- for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) {
- bool is_last = i + 1 == chunk->map_used;
+ for (i = chunk->first_free, p = chunk->map + i; i < chunk->map_used; i++, p++) {
int head, tail;
+ int this_size;
+
+ off = *p;
+ if (off & 1)
+ continue;
/* extra for alignment requirement */
head = ALIGN(off, align) - off;
- BUG_ON(i == 0 && head != 0);
- if (chunk->map[i] < 0)
- continue;
- if (chunk->map[i] < head + size) {
- max_contig = max(chunk->map[i], max_contig);
+ this_size = (p[1] & ~1) - off;
+ if (this_size < head + size) {
+ if (!seen_free) {
+ chunk->first_free = i;
+ seen_free = true;
+ }
+ max_contig = max(this_size, max_contig);
continue;
}
@@ -505,44 +472,59 @@ static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align)
* than sizeof(int), which is very small but isn't too
* uncommon for percpu allocations.
*/
- if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) {
- if (chunk->map[i - 1] > 0)
- chunk->map[i - 1] += head;
- else {
- chunk->map[i - 1] -= head;
+ if (head && (head < sizeof(int) || !(p[-1] & 1))) {
+ *p = off += head;
+ if (p[-1] & 1)
chunk->free_size -= head;
- }
- chunk->map[i] -= head;
- off += head;
+ else
+ max_contig = max(*p - p[-1], max_contig);
+ this_size -= head;
head = 0;
}
/* if tail is small, just keep it around */
- tail = chunk->map[i] - head - size;
- if (tail < sizeof(int))
+ tail = this_size - head - size;
+ if (tail < sizeof(int)) {
tail = 0;
+ size = this_size - head;
+ }
/* split if warranted */
if (head || tail) {
- pcpu_split_block(chunk, i, head, tail);
+ int nr_extra = !!head + !!tail;
+
+ /* insert new subblocks */
+ memmove(p + nr_extra + 1, p + 1,
+ sizeof(chunk->map[0]) * (chunk->map_used - i));
+ chunk->map_used += nr_extra;
+
if (head) {
- i++;
- off += head;
- max_contig = max(chunk->map[i - 1], max_contig);
+ if (!seen_free) {
+ chunk->first_free = i;
+ seen_free = true;
+ }
+ *++p = off += head;
+ ++i;
+ max_contig = max(head, max_contig);
+ }
+ if (tail) {
+ p[1] = off + size;
+ max_contig = max(tail, max_contig);
}
- if (tail)
- max_contig = max(chunk->map[i + 1], max_contig);
}
+ if (!seen_free)
+ chunk->first_free = i + 1;
+
/* update hint and mark allocated */
- if (is_last)
+ if (i + 1 == chunk->map_used)
chunk->contig_hint = max_contig; /* fully scanned */
else
chunk->contig_hint = max(chunk->contig_hint,
max_contig);
- chunk->free_size -= chunk->map[i];
- chunk->map[i] = -chunk->map[i];
+ chunk->free_size -= size;
+ *p |= 1;
pcpu_chunk_relocate(chunk, oslot);
return off;
@@ -570,34 +552,50 @@ static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align)
static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
{
int oslot = pcpu_chunk_slot(chunk);
- int i, off;
-
- for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++]))
- if (off == freeme)
- break;
+ int off = 0;
+ unsigned i, j;
+ int to_free = 0;
+ int *p;
+
+ freeme |= 1; /* we are searching for <given offset, in use> pair */
+
+ i = 0;
+ j = chunk->map_used;
+ while (i != j) {
+ unsigned k = (i + j) / 2;
+ off = chunk->map[k];
+ if (off < freeme)
+ i = k + 1;
+ else if (off > freeme)
+ j = k;
+ else
+ i = j = k;
+ }
BUG_ON(off != freeme);
- BUG_ON(chunk->map[i] > 0);
- chunk->map[i] = -chunk->map[i];
- chunk->free_size += chunk->map[i];
+ if (i < chunk->first_free)
+ chunk->first_free = i;
+ p = chunk->map + i;
+ *p = off &= ~1;
+ chunk->free_size += (p[1] & ~1) - off;
+
+ /* merge with next? */
+ if (!(p[1] & 1))
+ to_free++;
/* merge with previous? */
- if (i > 0 && chunk->map[i - 1] >= 0) {
- chunk->map[i - 1] += chunk->map[i];
- chunk->map_used--;
- memmove(&chunk->map[i], &chunk->map[i + 1],
- (chunk->map_used - i) * sizeof(chunk->map[0]));
+ if (i > 0 && !(p[-1] & 1)) {
+ to_free++;
i--;
+ p--;
}
- /* merge with next? */
- if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) {
- chunk->map[i] += chunk->map[i + 1];
- chunk->map_used--;
- memmove(&chunk->map[i + 1], &chunk->map[i + 2],
- (chunk->map_used - (i + 1)) * sizeof(chunk->map[0]));
+ if (to_free) {
+ chunk->map_used -= to_free;
+ memmove(p + 1, p + 1 + to_free,
+ (chunk->map_used - i) * sizeof(chunk->map[0]));
}
- chunk->contig_hint = max(chunk->map[i], chunk->contig_hint);
+ chunk->contig_hint = max(chunk->map[i + 1] - chunk->map[i] - 1, chunk->contig_hint);
pcpu_chunk_relocate(chunk, oslot);
}
@@ -612,12 +610,14 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC *
sizeof(chunk->map[0]));
if (!chunk->map) {
- kfree(chunk);
+ pcpu_mem_free(chunk, pcpu_chunk_struct_size);
return NULL;
}
chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
- chunk->map[chunk->map_used++] = pcpu_unit_size;
+ chunk->map[0] = 0;
+ chunk->map[1] = pcpu_unit_size | 1;
+ chunk->map_used = 1;
INIT_LIST_HEAD(&chunk->list);
chunk->free_size = pcpu_unit_size;
@@ -713,6 +713,16 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
unsigned long flags;
void __percpu *ptr;
+ /*
+ * We want the lowest bit of offset available for in-use/free
+ * indicator, so force >= 16bit alignment and make size even.
+ */
+ if (unlikely(align < 2))
+ align = 2;
+
+ if (unlikely(size & 1))
+ size++;
+
if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
WARN(true, "illegal size (%zu) or align (%zu) for "
"percpu allocation\n", size, align);
@@ -1063,7 +1073,7 @@ struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
__alignof__(ai->groups[0].cpu_map[0]));
ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
- ptr = alloc_bootmem_nopanic(PFN_ALIGN(ai_size));
+ ptr = memblock_virt_alloc_nopanic(PFN_ALIGN(ai_size), 0);
if (!ptr)
return NULL;
ai = ptr;
@@ -1088,7 +1098,7 @@ struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
*/
void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
{
- free_bootmem(__pa(ai), ai->__ai_size);
+ memblock_free_early(__pa(ai), ai->__ai_size);
}
/**
@@ -1246,10 +1256,12 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
/* process group information and build config tables accordingly */
- group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0]));
- group_sizes = alloc_bootmem(ai->nr_groups * sizeof(group_sizes[0]));
- unit_map = alloc_bootmem(nr_cpu_ids * sizeof(unit_map[0]));
- unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0]));
+ group_offsets = memblock_virt_alloc(ai->nr_groups *
+ sizeof(group_offsets[0]), 0);
+ group_sizes = memblock_virt_alloc(ai->nr_groups *
+ sizeof(group_sizes[0]), 0);
+ unit_map = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0);
+ unit_off = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0);
for (cpu = 0; cpu < nr_cpu_ids; cpu++)
unit_map[cpu] = UINT_MAX;
@@ -1311,7 +1323,8 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
* empty chunks.
*/
pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
- pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0]));
+ pcpu_slot = memblock_virt_alloc(
+ pcpu_nr_slots * sizeof(pcpu_slot[0]), 0);
for (i = 0; i < pcpu_nr_slots; i++)
INIT_LIST_HEAD(&pcpu_slot[i]);
@@ -1322,7 +1335,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
* covers static area + reserved area (mostly used for module
* static percpu allocation).
*/
- schunk = alloc_bootmem(pcpu_chunk_struct_size);
+ schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
INIT_LIST_HEAD(&schunk->list);
schunk->base_addr = base_addr;
schunk->map = smap;
@@ -1340,13 +1353,17 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
}
schunk->contig_hint = schunk->free_size;
- schunk->map[schunk->map_used++] = -ai->static_size;
+ schunk->map[0] = 1;
+ schunk->map[1] = ai->static_size;
+ schunk->map_used = 1;
if (schunk->free_size)
- schunk->map[schunk->map_used++] = schunk->free_size;
+ schunk->map[++schunk->map_used] = 1 | (ai->static_size + schunk->free_size);
+ else
+ schunk->map[1] |= 1;
/* init dynamic chunk if necessary */
if (dyn_size) {
- dchunk = alloc_bootmem(pcpu_chunk_struct_size);
+ dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
INIT_LIST_HEAD(&dchunk->list);
dchunk->base_addr = base_addr;
dchunk->map = dmap;
@@ -1355,8 +1372,10 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
bitmap_fill(dchunk->populated, pcpu_unit_pages);
dchunk->contig_hint = dchunk->free_size = dyn_size;
- dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit;
- dchunk->map[dchunk->map_used++] = dchunk->free_size;
+ dchunk->map[0] = 1;
+ dchunk->map[1] = pcpu_reserved_chunk_limit;
+ dchunk->map[2] = (pcpu_reserved_chunk_limit + dchunk->free_size) | 1;
+ dchunk->map_used = 2;
}
/* link the first chunk in */
@@ -1626,7 +1645,7 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
- areas = alloc_bootmem_nopanic(areas_size);
+ areas = memblock_virt_alloc_nopanic(areas_size, 0);
if (!areas) {
rc = -ENOMEM;
goto out_free;
@@ -1686,10 +1705,10 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
max_distance += ai->unit_size;
/* warn if maximum distance is further than 75% of vmalloc space */
- if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) {
+ if (max_distance > VMALLOC_TOTAL * 3 / 4) {
pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc "
"space 0x%lx\n", max_distance,
- (unsigned long)(VMALLOC_END - VMALLOC_START));
+ VMALLOC_TOTAL);
#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
/* and fail if we have fallback */
rc = -EINVAL;
@@ -1706,12 +1725,13 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
out_free_areas:
for (group = 0; group < ai->nr_groups; group++)
- free_fn(areas[group],
- ai->groups[group].nr_units * ai->unit_size);
+ if (areas[group])
+ free_fn(areas[group],
+ ai->groups[group].nr_units * ai->unit_size);
out_free:
pcpu_free_alloc_info(ai);
if (areas)
- free_bootmem(__pa(areas), areas_size);
+ memblock_free_early(__pa(areas), areas_size);
return rc;
}
#endif /* BUILD_EMBED_FIRST_CHUNK */
@@ -1759,7 +1779,7 @@ int __init pcpu_page_first_chunk(size_t reserved_size,
/* unaligned allocations can't be freed, round up to page size */
pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
sizeof(pages[0]));
- pages = alloc_bootmem(pages_size);
+ pages = memblock_virt_alloc(pages_size, 0);
/* allocate pages */
j = 0;
@@ -1822,7 +1842,7 @@ enomem:
free_fn(page_address(pages[j]), PAGE_SIZE);
rc = -ENOMEM;
out_free_ar:
- free_bootmem(__pa(pages), pages_size);
+ memblock_free_early(__pa(pages), pages_size);
pcpu_free_alloc_info(ai);
return rc;
}
@@ -1847,12 +1867,13 @@ EXPORT_SYMBOL(__per_cpu_offset);
static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
size_t align)
{
- return __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS));
+ return memblock_virt_alloc_from_nopanic(
+ size, align, __pa(MAX_DMA_ADDRESS));
}
static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
{
- free_bootmem(__pa(ptr), size);
+ memblock_free_early(__pa(ptr), size);
}
void __init setup_per_cpu_areas(void)
@@ -1895,7 +1916,9 @@ void __init setup_per_cpu_areas(void)
void *fc;
ai = pcpu_alloc_alloc_info(1, 1);
- fc = __alloc_bootmem(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
+ fc = memblock_virt_alloc_from_nopanic(unit_size,
+ PAGE_SIZE,
+ __pa(MAX_DMA_ADDRESS));
if (!ai || !fc)
panic("Failed to allocate memory for percpu areas.");
/* kmemleak tracks the percpu allocations separately */
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index 0c8323fe6c8f..a8b919925934 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -10,6 +10,30 @@
#include <asm/tlb.h>
#include <asm-generic/pgtable.h>
+/*
+ * If a p?d_bad entry is found while walking page tables, report
+ * the error, before resetting entry to p?d_none. Usually (but
+ * very seldom) called out from the p?d_none_or_clear_bad macros.
+ */
+
+void pgd_clear_bad(pgd_t *pgd)
+{
+ pgd_ERROR(*pgd);
+ pgd_clear(pgd);
+}
+
+void pud_clear_bad(pud_t *pud)
+{
+ pud_ERROR(*pud);
+ pud_clear(pud);
+}
+
+void pmd_clear_bad(pmd_t *pmd)
+{
+ pmd_ERROR(*pmd);
+ pmd_clear(pmd);
+}
+
#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
/*
* Only sets the access flags (dirty, accessed), as well as write
@@ -86,9 +110,10 @@ int pmdp_clear_flush_young(struct vm_area_struct *vma,
pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
pte_t *ptep)
{
+ struct mm_struct *mm = (vma)->vm_mm;
pte_t pte;
- pte = ptep_get_and_clear((vma)->vm_mm, address, ptep);
- if (pte_accessible(pte))
+ pte = ptep_get_and_clear(mm, address, ptep);
+ if (pte_accessible(mm, pte))
flush_tlb_page(vma, address);
return pte;
}
@@ -124,16 +149,17 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable)
+void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
+ pgtable_t pgtable)
{
- assert_spin_locked(&mm->page_table_lock);
+ assert_spin_locked(pmd_lockptr(mm, pmdp));
/* FIFO */
- if (!mm->pmd_huge_pte)
+ if (!pmd_huge_pte(mm, pmdp))
INIT_LIST_HEAD(&pgtable->lru);
else
- list_add(&pgtable->lru, &mm->pmd_huge_pte->lru);
- mm->pmd_huge_pte = pgtable;
+ list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru);
+ pmd_huge_pte(mm, pmdp) = pgtable;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
@@ -141,18 +167,18 @@ void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable)
#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/* no "address" argument so destroys page coloring of some arch */
-pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm)
+pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
{
pgtable_t pgtable;
- assert_spin_locked(&mm->page_table_lock);
+ assert_spin_locked(pmd_lockptr(mm, pmdp));
/* FIFO */
- pgtable = mm->pmd_huge_pte;
+ pgtable = pmd_huge_pte(mm, pmdp);
if (list_empty(&pgtable->lru))
- mm->pmd_huge_pte = NULL;
+ pmd_huge_pte(mm, pmdp) = NULL;
else {
- mm->pmd_huge_pte = list_entry(pgtable->lru.next,
+ pmd_huge_pte(mm, pmdp) = list_entry(pgtable->lru.next,
struct page, lru);
list_del(&pgtable->lru);
}
@@ -166,6 +192,9 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm)
void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp)
{
+ pmd_t entry = *pmdp;
+ if (pmd_numa(entry))
+ entry = pmd_mknonnuma(entry);
set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(*pmdp));
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
}
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
index fd26d0433509..8505c9262b35 100644
--- a/mm/process_vm_access.c
+++ b/mm/process_vm_access.c
@@ -23,129 +23,44 @@
/**
* process_vm_rw_pages - read/write pages from task specified
- * @task: task to read/write from
- * @mm: mm for task
- * @process_pages: struct pages area that can store at least
- * nr_pages_to_copy struct page pointers
- * @pa: address of page in task to start copying from/to
+ * @pages: array of pointers to pages we want to copy
* @start_offset: offset in page to start copying from/to
* @len: number of bytes to copy
- * @lvec: iovec array specifying where to copy to/from
- * @lvec_cnt: number of elements in iovec array
- * @lvec_current: index in iovec array we are up to
- * @lvec_offset: offset in bytes from current iovec iov_base we are up to
+ * @iter: where to copy to/from locally
* @vm_write: 0 means copy from, 1 means copy to
- * @nr_pages_to_copy: number of pages to copy
- * @bytes_copied: returns number of bytes successfully copied
* Returns 0 on success, error code otherwise
*/
-static int process_vm_rw_pages(struct task_struct *task,
- struct mm_struct *mm,
- struct page **process_pages,
- unsigned long pa,
- unsigned long start_offset,
- unsigned long len,
- const struct iovec *lvec,
- unsigned long lvec_cnt,
- unsigned long *lvec_current,
- size_t *lvec_offset,
- int vm_write,
- unsigned int nr_pages_to_copy,
- ssize_t *bytes_copied)
+static int process_vm_rw_pages(struct page **pages,
+ unsigned offset,
+ size_t len,
+ struct iov_iter *iter,
+ int vm_write)
{
- int pages_pinned;
- void *target_kaddr;
- int pgs_copied = 0;
- int j;
- int ret;
- ssize_t bytes_to_copy;
- ssize_t rc = 0;
-
- *bytes_copied = 0;
-
- /* Get the pages we're interested in */
- down_read(&mm->mmap_sem);
- pages_pinned = get_user_pages(task, mm, pa,
- nr_pages_to_copy,
- vm_write, 0, process_pages, NULL);
- up_read(&mm->mmap_sem);
-
- if (pages_pinned != nr_pages_to_copy) {
- rc = -EFAULT;
- goto end;
- }
-
/* Do the copy for each page */
- for (pgs_copied = 0;
- (pgs_copied < nr_pages_to_copy) && (*lvec_current < lvec_cnt);
- pgs_copied++) {
- /* Make sure we have a non zero length iovec */
- while (*lvec_current < lvec_cnt
- && lvec[*lvec_current].iov_len == 0)
- (*lvec_current)++;
- if (*lvec_current == lvec_cnt)
- break;
-
- /*
- * Will copy smallest of:
- * - bytes remaining in page
- * - bytes remaining in destination iovec
- */
- bytes_to_copy = min_t(ssize_t, PAGE_SIZE - start_offset,
- len - *bytes_copied);
- bytes_to_copy = min_t(ssize_t, bytes_to_copy,
- lvec[*lvec_current].iov_len
- - *lvec_offset);
-
- target_kaddr = kmap(process_pages[pgs_copied]) + start_offset;
-
- if (vm_write)
- ret = copy_from_user(target_kaddr,
- lvec[*lvec_current].iov_base
- + *lvec_offset,
- bytes_to_copy);
- else
- ret = copy_to_user(lvec[*lvec_current].iov_base
- + *lvec_offset,
- target_kaddr, bytes_to_copy);
- kunmap(process_pages[pgs_copied]);
- if (ret) {
- *bytes_copied += bytes_to_copy - ret;
- pgs_copied++;
- rc = -EFAULT;
- goto end;
- }
- *bytes_copied += bytes_to_copy;
- *lvec_offset += bytes_to_copy;
- if (*lvec_offset == lvec[*lvec_current].iov_len) {
- /*
- * Need to copy remaining part of page into the
- * next iovec if there are any bytes left in page
- */
- (*lvec_current)++;
- *lvec_offset = 0;
- start_offset = (start_offset + bytes_to_copy)
- % PAGE_SIZE;
- if (start_offset)
- pgs_copied--;
+ while (len && iov_iter_count(iter)) {
+ struct page *page = *pages++;
+ size_t copy = PAGE_SIZE - offset;
+ size_t copied;
+
+ if (copy > len)
+ copy = len;
+
+ if (vm_write) {
+ if (copy > iov_iter_count(iter))
+ copy = iov_iter_count(iter);
+ copied = iov_iter_copy_from_user(page, iter,
+ offset, copy);
+ iov_iter_advance(iter, copied);
+ set_page_dirty_lock(page);
} else {
- start_offset = 0;
+ copied = copy_page_to_iter(page, offset, copy, iter);
}
+ len -= copied;
+ if (copied < copy && iov_iter_count(iter))
+ return -EFAULT;
+ offset = 0;
}
-
-end:
- if (vm_write) {
- for (j = 0; j < pages_pinned; j++) {
- if (j < pgs_copied)
- set_page_dirty_lock(process_pages[j]);
- put_page(process_pages[j]);
- }
- } else {
- for (j = 0; j < pages_pinned; j++)
- put_page(process_pages[j]);
- }
-
- return rc;
+ return 0;
}
/* Maximum number of pages kmalloc'd to hold struct page's during copy */
@@ -155,67 +70,60 @@ end:
* process_vm_rw_single_vec - read/write pages from task specified
* @addr: start memory address of target process
* @len: size of area to copy to/from
- * @lvec: iovec array specifying where to copy to/from locally
- * @lvec_cnt: number of elements in iovec array
- * @lvec_current: index in iovec array we are up to
- * @lvec_offset: offset in bytes from current iovec iov_base we are up to
+ * @iter: where to copy to/from locally
* @process_pages: struct pages area that can store at least
* nr_pages_to_copy struct page pointers
* @mm: mm for task
* @task: task to read/write from
* @vm_write: 0 means copy from, 1 means copy to
- * @bytes_copied: returns number of bytes successfully copied
* Returns 0 on success or on failure error code
*/
static int process_vm_rw_single_vec(unsigned long addr,
unsigned long len,
- const struct iovec *lvec,
- unsigned long lvec_cnt,
- unsigned long *lvec_current,
- size_t *lvec_offset,
+ struct iov_iter *iter,
struct page **process_pages,
struct mm_struct *mm,
struct task_struct *task,
- int vm_write,
- ssize_t *bytes_copied)
+ int vm_write)
{
unsigned long pa = addr & PAGE_MASK;
unsigned long start_offset = addr - pa;
unsigned long nr_pages;
- ssize_t bytes_copied_loop;
ssize_t rc = 0;
- unsigned long nr_pages_copied = 0;
- unsigned long nr_pages_to_copy;
unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES
/ sizeof(struct pages *);
- *bytes_copied = 0;
-
/* Work out address and page range required */
if (len == 0)
return 0;
nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1;
- while ((nr_pages_copied < nr_pages) && (*lvec_current < lvec_cnt)) {
- nr_pages_to_copy = min(nr_pages - nr_pages_copied,
- max_pages_per_loop);
+ while (!rc && nr_pages && iov_iter_count(iter)) {
+ int pages = min(nr_pages, max_pages_per_loop);
+ size_t bytes;
- rc = process_vm_rw_pages(task, mm, process_pages, pa,
- start_offset, len,
- lvec, lvec_cnt,
- lvec_current, lvec_offset,
- vm_write, nr_pages_to_copy,
- &bytes_copied_loop);
- start_offset = 0;
- *bytes_copied += bytes_copied_loop;
+ /* Get the pages we're interested in */
+ down_read(&mm->mmap_sem);
+ pages = get_user_pages(task, mm, pa, pages,
+ vm_write, 0, process_pages, NULL);
+ up_read(&mm->mmap_sem);
- if (rc < 0) {
- return rc;
- } else {
- len -= bytes_copied_loop;
- nr_pages_copied += nr_pages_to_copy;
- pa += nr_pages_to_copy * PAGE_SIZE;
- }
+ if (pages <= 0)
+ return -EFAULT;
+
+ bytes = pages * PAGE_SIZE - start_offset;
+ if (bytes > len)
+ bytes = len;
+
+ rc = process_vm_rw_pages(process_pages,
+ start_offset, bytes, iter,
+ vm_write);
+ len -= bytes;
+ start_offset = 0;
+ nr_pages -= pages;
+ pa += pages * PAGE_SIZE;
+ while (pages)
+ put_page(process_pages[--pages]);
}
return rc;
@@ -228,8 +136,7 @@ static int process_vm_rw_single_vec(unsigned long addr,
/**
* process_vm_rw_core - core of reading/writing pages from task specified
* @pid: PID of process to read/write from/to
- * @lvec: iovec array specifying where to copy to/from locally
- * @liovcnt: size of lvec array
+ * @iter: where to copy to/from locally
* @rvec: iovec array specifying where to copy to/from in the other process
* @riovcnt: size of rvec array
* @flags: currently unused
@@ -238,8 +145,7 @@ static int process_vm_rw_single_vec(unsigned long addr,
* return less bytes than expected if an error occurs during the copying
* process.
*/
-static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
- unsigned long liovcnt,
+static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
const struct iovec *rvec,
unsigned long riovcnt,
unsigned long flags, int vm_write)
@@ -250,13 +156,10 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
struct mm_struct *mm;
unsigned long i;
ssize_t rc = 0;
- ssize_t bytes_copied_loop;
- ssize_t bytes_copied = 0;
unsigned long nr_pages = 0;
unsigned long nr_pages_iov;
- unsigned long iov_l_curr_idx = 0;
- size_t iov_l_curr_offset = 0;
ssize_t iov_len;
+ size_t total_len = iov_iter_count(iter);
/*
* Work out how many pages of struct pages we're going to need
@@ -310,24 +213,20 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
goto put_task_struct;
}
- for (i = 0; i < riovcnt && iov_l_curr_idx < liovcnt; i++) {
+ for (i = 0; i < riovcnt && iov_iter_count(iter) && !rc; i++)
rc = process_vm_rw_single_vec(
(unsigned long)rvec[i].iov_base, rvec[i].iov_len,
- lvec, liovcnt, &iov_l_curr_idx, &iov_l_curr_offset,
- process_pages, mm, task, vm_write, &bytes_copied_loop);
- bytes_copied += bytes_copied_loop;
- if (rc != 0) {
- /* If we have managed to copy any data at all then
- we return the number of bytes copied. Otherwise
- we return the error code */
- if (bytes_copied)
- rc = bytes_copied;
- goto put_mm;
- }
- }
+ iter, process_pages, mm, task, vm_write);
+
+ /* copied = space before - space after */
+ total_len -= iov_iter_count(iter);
+
+ /* If we have managed to copy any data at all then
+ we return the number of bytes copied. Otherwise
+ we return the error code */
+ if (total_len)
+ rc = total_len;
- rc = bytes_copied;
-put_mm:
mmput(mm);
put_task_struct:
@@ -363,6 +262,7 @@ static ssize_t process_vm_rw(pid_t pid,
struct iovec iovstack_r[UIO_FASTIOV];
struct iovec *iov_l = iovstack_l;
struct iovec *iov_r = iovstack_r;
+ struct iov_iter iter;
ssize_t rc;
if (flags != 0)
@@ -378,13 +278,14 @@ static ssize_t process_vm_rw(pid_t pid,
if (rc <= 0)
goto free_iovecs;
+ iov_iter_init(&iter, iov_l, liovcnt, rc, 0);
+
rc = rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt, UIO_FASTIOV,
iovstack_r, &iov_r);
if (rc <= 0)
goto free_iovecs;
- rc = process_vm_rw_core(pid, iov_l, liovcnt, iov_r, riovcnt, flags,
- vm_write);
+ rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write);
free_iovecs:
if (iov_r != iovstack_r)
@@ -412,7 +313,7 @@ SYSCALL_DEFINE6(process_vm_writev, pid_t, pid,
#ifdef CONFIG_COMPAT
-asmlinkage ssize_t
+static ssize_t
compat_process_vm_rw(compat_pid_t pid,
const struct compat_iovec __user *lvec,
unsigned long liovcnt,
@@ -424,6 +325,7 @@ compat_process_vm_rw(compat_pid_t pid,
struct iovec iovstack_r[UIO_FASTIOV];
struct iovec *iov_l = iovstack_l;
struct iovec *iov_r = iovstack_r;
+ struct iov_iter iter;
ssize_t rc = -EFAULT;
if (flags != 0)
@@ -439,14 +341,14 @@ compat_process_vm_rw(compat_pid_t pid,
&iov_l);
if (rc <= 0)
goto free_iovecs;
+ iov_iter_init(&iter, iov_l, liovcnt, rc, 0);
rc = compat_rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt,
UIO_FASTIOV, iovstack_r,
&iov_r);
if (rc <= 0)
goto free_iovecs;
- rc = process_vm_rw_core(pid, iov_l, liovcnt, iov_r, riovcnt, flags,
- vm_write);
+ rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write);
free_iovecs:
if (iov_r != iovstack_r)
@@ -456,25 +358,23 @@ free_iovecs:
return rc;
}
-asmlinkage ssize_t
-compat_sys_process_vm_readv(compat_pid_t pid,
- const struct compat_iovec __user *lvec,
- unsigned long liovcnt,
- const struct compat_iovec __user *rvec,
- unsigned long riovcnt,
- unsigned long flags)
+COMPAT_SYSCALL_DEFINE6(process_vm_readv, compat_pid_t, pid,
+ const struct compat_iovec __user *, lvec,
+ compat_ulong_t, liovcnt,
+ const struct compat_iovec __user *, rvec,
+ compat_ulong_t, riovcnt,
+ compat_ulong_t, flags)
{
return compat_process_vm_rw(pid, lvec, liovcnt, rvec,
riovcnt, flags, 0);
}
-asmlinkage ssize_t
-compat_sys_process_vm_writev(compat_pid_t pid,
- const struct compat_iovec __user *lvec,
- unsigned long liovcnt,
- const struct compat_iovec __user *rvec,
- unsigned long riovcnt,
- unsigned long flags)
+COMPAT_SYSCALL_DEFINE6(process_vm_writev, compat_pid_t, pid,
+ const struct compat_iovec __user *, lvec,
+ compat_ulong_t, liovcnt,
+ const struct compat_iovec __user *, rvec,
+ compat_ulong_t, riovcnt,
+ compat_ulong_t, flags)
{
return compat_process_vm_rw(pid, lvec, liovcnt, rvec,
riovcnt, flags, 1);
diff --git a/mm/readahead.c b/mm/readahead.c
index daed28dd5830..0ca36a7770b1 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -8,9 +8,7 @@
*/
#include <linux/kernel.h>
-#include <linux/fs.h>
#include <linux/gfp.h>
-#include <linux/mm.h>
#include <linux/export.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
@@ -20,6 +18,8 @@
#include <linux/syscalls.h>
#include <linux/file.h>
+#include "internal.h"
+
/*
* Initialise a struct file's readahead state. Assumes that the caller has
* memset *ra to zero.
@@ -48,7 +48,7 @@ static void read_cache_pages_invalidate_page(struct address_space *mapping,
if (!trylock_page(page))
BUG();
page->mapping = mapping;
- do_invalidatepage(page, 0);
+ do_invalidatepage(page, 0, PAGE_CACHE_SIZE);
page->mapping = NULL;
unlock_page(page);
}
@@ -149,8 +149,7 @@ out:
*
* Returns the number of pages requested, or the maximum amount of I/O allowed.
*/
-static int
-__do_page_cache_readahead(struct address_space *mapping, struct file *filp,
+int __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
pgoff_t offset, unsigned long nr_to_read,
unsigned long lookahead_size)
{
@@ -179,7 +178,7 @@ __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
rcu_read_lock();
page = radix_tree_lookup(&mapping->page_tree, page_offset);
rcu_read_unlock();
- if (page)
+ if (page && !radix_tree_exceptional_entry(page))
continue;
page = page_cache_alloc_readahead(mapping);
@@ -211,8 +210,6 @@ out:
int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
pgoff_t offset, unsigned long nr_to_read)
{
- int ret = 0;
-
if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages))
return -EINVAL;
@@ -226,39 +223,23 @@ int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
this_chunk = nr_to_read;
err = __do_page_cache_readahead(mapping, filp,
offset, this_chunk, 0);
- if (err < 0) {
- ret = err;
- break;
- }
- ret += err;
+ if (err < 0)
+ return err;
+
offset += this_chunk;
nr_to_read -= this_chunk;
}
- return ret;
+ return 0;
}
+#define MAX_READAHEAD ((512*4096)/PAGE_CACHE_SIZE)
/*
* Given a desired number of PAGE_CACHE_SIZE readahead pages, return a
* sensible upper limit.
*/
unsigned long max_sane_readahead(unsigned long nr)
{
- return min(nr, (node_page_state(numa_node_id(), NR_INACTIVE_FILE)
- + node_page_state(numa_node_id(), NR_FREE_PAGES)) / 2);
-}
-
-/*
- * Submit IO for the read-ahead request in file_ra_state.
- */
-unsigned long ra_submit(struct file_ra_state *ra,
- struct address_space *mapping, struct file *filp)
-{
- int actual;
-
- actual = __do_page_cache_readahead(mapping, filp,
- ra->start, ra->size, ra->async_size);
-
- return actual;
+ return min(nr, MAX_READAHEAD);
}
/*
@@ -351,7 +332,7 @@ static pgoff_t count_history_pages(struct address_space *mapping,
pgoff_t head;
rcu_read_lock();
- head = radix_tree_prev_hole(&mapping->page_tree, offset - 1, max);
+ head = page_cache_prev_hole(mapping, offset - 1, max);
rcu_read_unlock();
return offset - 1 - head;
@@ -371,10 +352,10 @@ static int try_context_readahead(struct address_space *mapping,
size = count_history_pages(mapping, ra, offset, max);
/*
- * no history pages:
+ * not enough history pages:
* it could be a random read
*/
- if (!size)
+ if (size <= req_size)
return 0;
/*
@@ -385,8 +366,8 @@ static int try_context_readahead(struct address_space *mapping,
size *= 2;
ra->start = offset;
- ra->size = get_init_ra_size(size + req_size, max);
- ra->async_size = ra->size;
+ ra->size = min(size + req_size, max);
+ ra->async_size = 1;
return 1;
}
@@ -401,6 +382,7 @@ ondemand_readahead(struct address_space *mapping,
unsigned long req_size)
{
unsigned long max = max_sane_readahead(ra->ra_pages);
+ pgoff_t prev_offset;
/*
* start of file
@@ -430,7 +412,7 @@ ondemand_readahead(struct address_space *mapping,
pgoff_t start;
rcu_read_lock();
- start = radix_tree_next_hole(&mapping->page_tree, offset+1,max);
+ start = page_cache_next_hole(mapping, offset + 1, max);
rcu_read_unlock();
if (!start || start - offset > max)
@@ -452,8 +434,11 @@ ondemand_readahead(struct address_space *mapping,
/*
* sequential cache miss
+ * trivial case: (offset - prev_offset) == 1
+ * unaligned reads: (offset - prev_offset) == 0
*/
- if (offset - (ra->prev_pos >> PAGE_CACHE_SHIFT) <= 1UL)
+ prev_offset = (unsigned long long)ra->prev_pos >> PAGE_CACHE_SHIFT;
+ if (offset - prev_offset <= 1UL)
goto initial_readahead;
/*
@@ -569,11 +554,10 @@ static ssize_t
do_readahead(struct address_space *mapping, struct file *filp,
pgoff_t index, unsigned long nr)
{
- if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage)
+ if (!mapping || !mapping->a_ops)
return -EINVAL;
- force_page_cache_readahead(mapping, filp, index, nr);
- return 0;
+ return force_page_cache_readahead(mapping, filp, index, nr);
}
SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count)
diff --git a/mm/rmap.c b/mm/rmap.c
index 6280da86b5d6..83bfafabb47b 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -600,8 +600,12 @@ pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
spinlock_t *ptl;
if (unlikely(PageHuge(page))) {
+ /* when pud is not present, pte will be NULL */
pte = huge_pte_offset(mm, address);
- ptl = &mm->page_table_lock;
+ if (!pte)
+ return NULL;
+
+ ptl = huge_pte_lockptr(page_hstate(page), mm, pte);
goto check;
}
@@ -656,46 +660,47 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
return 1;
}
+struct page_referenced_arg {
+ int mapcount;
+ int referenced;
+ unsigned long vm_flags;
+ struct mem_cgroup *memcg;
+};
/*
- * Subfunctions of page_referenced: page_referenced_one called
- * repeatedly from either page_referenced_anon or page_referenced_file.
+ * arg: page_referenced_arg will be passed
*/
int page_referenced_one(struct page *page, struct vm_area_struct *vma,
- unsigned long address, unsigned int *mapcount,
- unsigned long *vm_flags)
+ unsigned long address, void *arg)
{
struct mm_struct *mm = vma->vm_mm;
+ spinlock_t *ptl;
int referenced = 0;
+ struct page_referenced_arg *pra = arg;
if (unlikely(PageTransHuge(page))) {
pmd_t *pmd;
- spin_lock(&mm->page_table_lock);
/*
* rmap might return false positives; we must filter
* these out using page_check_address_pmd().
*/
pmd = page_check_address_pmd(page, mm, address,
- PAGE_CHECK_ADDRESS_PMD_FLAG);
- if (!pmd) {
- spin_unlock(&mm->page_table_lock);
- goto out;
- }
+ PAGE_CHECK_ADDRESS_PMD_FLAG, &ptl);
+ if (!pmd)
+ return SWAP_AGAIN;
if (vma->vm_flags & VM_LOCKED) {
- spin_unlock(&mm->page_table_lock);
- *mapcount = 0; /* break early from loop */
- *vm_flags |= VM_LOCKED;
- goto out;
+ spin_unlock(ptl);
+ pra->vm_flags |= VM_LOCKED;
+ return SWAP_FAIL; /* To break the loop */
}
/* go ahead even if the pmd is pmd_trans_splitting() */
if (pmdp_clear_flush_young_notify(vma, address, pmd))
referenced++;
- spin_unlock(&mm->page_table_lock);
+ spin_unlock(ptl);
} else {
pte_t *pte;
- spinlock_t *ptl;
/*
* rmap might return false positives; we must filter
@@ -703,13 +708,12 @@ int page_referenced_one(struct page *page, struct vm_area_struct *vma,
*/
pte = page_check_address(page, mm, address, &ptl, 0);
if (!pte)
- goto out;
+ return SWAP_AGAIN;
if (vma->vm_flags & VM_LOCKED) {
pte_unmap_unlock(pte, ptl);
- *mapcount = 0; /* break early from loop */
- *vm_flags |= VM_LOCKED;
- goto out;
+ pra->vm_flags |= VM_LOCKED;
+ return SWAP_FAIL; /* To break the loop */
}
if (ptep_clear_flush_young_notify(vma, address, pte)) {
@@ -720,119 +724,33 @@ int page_referenced_one(struct page *page, struct vm_area_struct *vma,
* mapping is already gone, the unmap path will have
* set PG_referenced or activated the page.
*/
- if (likely(!VM_SequentialReadHint(vma)))
+ if (likely(!(vma->vm_flags & VM_SEQ_READ)))
referenced++;
}
pte_unmap_unlock(pte, ptl);
}
- (*mapcount)--;
-
- if (referenced)
- *vm_flags |= vma->vm_flags;
-out:
- return referenced;
-}
-
-static int page_referenced_anon(struct page *page,
- struct mem_cgroup *memcg,
- unsigned long *vm_flags)
-{
- unsigned int mapcount;
- struct anon_vma *anon_vma;
- pgoff_t pgoff;
- struct anon_vma_chain *avc;
- int referenced = 0;
-
- anon_vma = page_lock_anon_vma_read(page);
- if (!anon_vma)
- return referenced;
-
- mapcount = page_mapcount(page);
- pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
- anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
- struct vm_area_struct *vma = avc->vma;
- unsigned long address = vma_address(page, vma);
- /*
- * If we are reclaiming on behalf of a cgroup, skip
- * counting on behalf of references from different
- * cgroups
- */
- if (memcg && !mm_match_cgroup(vma->vm_mm, memcg))
- continue;
- referenced += page_referenced_one(page, vma, address,
- &mapcount, vm_flags);
- if (!mapcount)
- break;
+ if (referenced) {
+ pra->referenced++;
+ pra->vm_flags |= vma->vm_flags;
}
- page_unlock_anon_vma_read(anon_vma);
- return referenced;
+ pra->mapcount--;
+ if (!pra->mapcount)
+ return SWAP_SUCCESS; /* To break the loop */
+
+ return SWAP_AGAIN;
}
-/**
- * page_referenced_file - referenced check for object-based rmap
- * @page: the page we're checking references on.
- * @memcg: target memory control group
- * @vm_flags: collect encountered vma->vm_flags who actually referenced the page
- *
- * For an object-based mapped page, find all the places it is mapped and
- * check/clear the referenced flag. This is done by following the page->mapping
- * pointer, then walking the chain of vmas it holds. It returns the number
- * of references it found.
- *
- * This function is only called from page_referenced for object-based pages.
- */
-static int page_referenced_file(struct page *page,
- struct mem_cgroup *memcg,
- unsigned long *vm_flags)
+static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg)
{
- unsigned int mapcount;
- struct address_space *mapping = page->mapping;
- pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
- struct vm_area_struct *vma;
- int referenced = 0;
-
- /*
- * The caller's checks on page->mapping and !PageAnon have made
- * sure that this is a file page: the check for page->mapping
- * excludes the case just before it gets set on an anon page.
- */
- BUG_ON(PageAnon(page));
-
- /*
- * The page lock not only makes sure that page->mapping cannot
- * suddenly be NULLified by truncation, it makes sure that the
- * structure at mapping cannot be freed and reused yet,
- * so we can safely take mapping->i_mmap_mutex.
- */
- BUG_ON(!PageLocked(page));
+ struct page_referenced_arg *pra = arg;
+ struct mem_cgroup *memcg = pra->memcg;
- mutex_lock(&mapping->i_mmap_mutex);
-
- /*
- * i_mmap_mutex does not stabilize mapcount at all, but mapcount
- * is more likely to be accurate if we note it after spinning.
- */
- mapcount = page_mapcount(page);
-
- vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
- unsigned long address = vma_address(page, vma);
- /*
- * If we are reclaiming on behalf of a cgroup, skip
- * counting on behalf of references from different
- * cgroups
- */
- if (memcg && !mm_match_cgroup(vma->vm_mm, memcg))
- continue;
- referenced += page_referenced_one(page, vma, address,
- &mapcount, vm_flags);
- if (!mapcount)
- break;
- }
+ if (!mm_match_cgroup(vma->vm_mm, memcg))
+ return true;
- mutex_unlock(&mapping->i_mmap_mutex);
- return referenced;
+ return false;
}
/**
@@ -850,44 +768,57 @@ int page_referenced(struct page *page,
struct mem_cgroup *memcg,
unsigned long *vm_flags)
{
- int referenced = 0;
+ int ret;
int we_locked = 0;
+ struct page_referenced_arg pra = {
+ .mapcount = page_mapcount(page),
+ .memcg = memcg,
+ };
+ struct rmap_walk_control rwc = {
+ .rmap_one = page_referenced_one,
+ .arg = (void *)&pra,
+ .anon_lock = page_lock_anon_vma_read,
+ };
*vm_flags = 0;
- if (page_mapped(page) && page_rmapping(page)) {
- if (!is_locked && (!PageAnon(page) || PageKsm(page))) {
- we_locked = trylock_page(page);
- if (!we_locked) {
- referenced++;
- goto out;
- }
- }
- if (unlikely(PageKsm(page)))
- referenced += page_referenced_ksm(page, memcg,
- vm_flags);
- else if (PageAnon(page))
- referenced += page_referenced_anon(page, memcg,
- vm_flags);
- else if (page->mapping)
- referenced += page_referenced_file(page, memcg,
- vm_flags);
- if (we_locked)
- unlock_page(page);
-
- if (page_test_and_clear_young(page_to_pfn(page)))
- referenced++;
+ if (!page_mapped(page))
+ return 0;
+
+ if (!page_rmapping(page))
+ return 0;
+
+ if (!is_locked && (!PageAnon(page) || PageKsm(page))) {
+ we_locked = trylock_page(page);
+ if (!we_locked)
+ return 1;
}
-out:
- return referenced;
+
+ /*
+ * If we are reclaiming on behalf of a cgroup, skip
+ * counting on behalf of references from different
+ * cgroups
+ */
+ if (memcg) {
+ rwc.invalid_vma = invalid_page_referenced_vma;
+ }
+
+ ret = rmap_walk(page, &rwc);
+ *vm_flags = pra.vm_flags;
+
+ if (we_locked)
+ unlock_page(page);
+
+ return pra.referenced;
}
static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
- unsigned long address)
+ unsigned long address, void *arg)
{
struct mm_struct *mm = vma->vm_mm;
pte_t *pte;
spinlock_t *ptl;
int ret = 0;
+ int *cleaned = arg;
pte = page_check_address(page, mm, address, &ptl, 1);
if (!pte)
@@ -906,44 +837,44 @@ static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
pte_unmap_unlock(pte, ptl);
- if (ret)
+ if (ret) {
mmu_notifier_invalidate_page(mm, address);
+ (*cleaned)++;
+ }
out:
- return ret;
+ return SWAP_AGAIN;
}
-static int page_mkclean_file(struct address_space *mapping, struct page *page)
+static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg)
{
- pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
- struct vm_area_struct *vma;
- int ret = 0;
-
- BUG_ON(PageAnon(page));
+ if (vma->vm_flags & VM_SHARED)
+ return false;
- mutex_lock(&mapping->i_mmap_mutex);
- vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
- if (vma->vm_flags & VM_SHARED) {
- unsigned long address = vma_address(page, vma);
- ret += page_mkclean_one(page, vma, address);
- }
- }
- mutex_unlock(&mapping->i_mmap_mutex);
- return ret;
+ return true;
}
int page_mkclean(struct page *page)
{
- int ret = 0;
+ int cleaned = 0;
+ struct address_space *mapping;
+ struct rmap_walk_control rwc = {
+ .arg = (void *)&cleaned,
+ .rmap_one = page_mkclean_one,
+ .invalid_vma = invalid_mkclean_vma,
+ };
BUG_ON(!PageLocked(page));
- if (page_mapped(page)) {
- struct address_space *mapping = page_mapping(page);
- if (mapping)
- ret = page_mkclean_file(mapping, page);
- }
+ if (!page_mapped(page))
+ return 0;
- return ret;
+ mapping = page_mapping(page);
+ if (!mapping)
+ return 0;
+
+ rmap_walk(page, &rwc);
+
+ return cleaned;
}
EXPORT_SYMBOL_GPL(page_mkclean);
@@ -963,9 +894,9 @@ void page_move_anon_rmap(struct page *page,
{
struct anon_vma *anon_vma = vma->anon_vma;
- VM_BUG_ON(!PageLocked(page));
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON(!anon_vma);
- VM_BUG_ON(page->index != linear_page_index(vma, address));
+ VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page);
anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
page->mapping = (struct address_space *) anon_vma;
@@ -1055,16 +986,16 @@ void do_page_add_anon_rmap(struct page *page,
{
int first = atomic_inc_and_test(&page->_mapcount);
if (first) {
- if (!PageTransHuge(page))
- __inc_zone_page_state(page, NR_ANON_PAGES);
- else
+ if (PageTransHuge(page))
__inc_zone_page_state(page,
NR_ANON_TRANSPARENT_HUGEPAGES);
+ __mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
+ hpage_nr_pages(page));
}
if (unlikely(PageKsm(page)))
return;
- VM_BUG_ON(!PageLocked(page));
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
/* address might be in next vma when migration races vma_adjust */
if (first)
__page_set_anon_rmap(page, vma, address, exclusive);
@@ -1088,14 +1019,15 @@ void page_add_new_anon_rmap(struct page *page,
VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
SetPageSwapBacked(page);
atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */
- if (!PageTransHuge(page))
- __inc_zone_page_state(page, NR_ANON_PAGES);
- else
+ if (PageTransHuge(page))
__inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
+ __mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
+ hpage_nr_pages(page));
__page_set_anon_rmap(page, vma, address, 1);
- if (!mlocked_vma_newpage(vma, page))
- lru_cache_add_lru(page, LRU_ACTIVE_ANON);
- else
+ if (!mlocked_vma_newpage(vma, page)) {
+ SetPageActive(page);
+ lru_cache_add(page);
+ } else
add_page_to_unevictable_list(page);
}
@@ -1113,7 +1045,7 @@ void page_add_file_rmap(struct page *page)
mem_cgroup_begin_update_page_stat(page, &locked, &flags);
if (atomic_inc_and_test(&page->_mapcount)) {
__inc_zone_page_state(page, NR_FILE_MAPPED);
- mem_cgroup_inc_page_stat(page, MEMCG_NR_FILE_MAPPED);
+ mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
}
mem_cgroup_end_update_page_stat(page, &locked, &flags);
}
@@ -1150,14 +1082,14 @@ void page_remove_rmap(struct page *page)
goto out;
if (anon) {
mem_cgroup_uncharge_page(page);
- if (!PageTransHuge(page))
- __dec_zone_page_state(page, NR_ANON_PAGES);
- else
+ if (PageTransHuge(page))
__dec_zone_page_state(page,
NR_ANON_TRANSPARENT_HUGEPAGES);
+ __mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
+ -hpage_nr_pages(page));
} else {
__dec_zone_page_state(page, NR_FILE_MAPPED);
- mem_cgroup_dec_page_stat(page, MEMCG_NR_FILE_MAPPED);
+ mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
mem_cgroup_end_update_page_stat(page, &locked, &flags);
}
if (unlikely(PageMlocked(page)))
@@ -1178,17 +1110,17 @@ out:
}
/*
- * Subfunctions of try_to_unmap: try_to_unmap_one called
- * repeatedly from try_to_unmap_ksm, try_to_unmap_anon or try_to_unmap_file.
+ * @arg: enum ttu_flags will be passed to this argument
*/
int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
- unsigned long address, enum ttu_flags flags)
+ unsigned long address, void *arg)
{
struct mm_struct *mm = vma->vm_mm;
pte_t *pte;
pte_t pteval;
spinlock_t *ptl;
int ret = SWAP_AGAIN;
+ enum ttu_flags flags = (enum ttu_flags)arg;
pte = page_check_address(page, mm, address, &ptl, 0);
if (!pte)
@@ -1233,8 +1165,19 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
}
set_pte_at(mm, address, pte,
swp_entry_to_pte(make_hwpoison_entry(page)));
+ } else if (pte_unused(pteval)) {
+ /*
+ * The guest indicated that the page content is of no
+ * interest anymore. Simply discard the pte, vmscan
+ * will take care of the rest.
+ */
+ if (PageAnon(page))
+ dec_mm_counter(mm, MM_ANONPAGES);
+ else
+ dec_mm_counter(mm, MM_FILEPAGES);
} else if (PageAnon(page)) {
swp_entry_t entry = { .val = page_private(page) };
+ pte_t swp_pte;
if (PageSwapCache(page)) {
/*
@@ -1263,7 +1206,10 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION);
entry = make_migration_entry(page, pte_write(pteval));
}
- set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
+ swp_pte = swp_entry_to_pte(entry);
+ if (pte_soft_dirty(pteval))
+ swp_pte = pte_swp_mksoft_dirty(swp_pte);
+ set_pte_at(mm, address, pte, swp_pte);
BUG_ON(pte_file(*pte));
} else if (IS_ENABLED(CONFIG_MIGRATION) &&
(TTU_ACTION(flags) == TTU_MIGRATION)) {
@@ -1386,9 +1332,19 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
BUG_ON(!page || PageAnon(page));
if (locked_vma) {
- mlock_vma_page(page); /* no-op if already mlocked */
- if (page == check_page)
+ if (page == check_page) {
+ /* we know we have check_page locked */
+ mlock_vma_page(page);
ret = SWAP_MLOCK;
+ } else if (trylock_page(page)) {
+ /*
+ * If we can lock the page, perform mlock.
+ * Otherwise leave the page alone, it will be
+ * eventually encountered again later.
+ */
+ mlock_vma_page(page);
+ unlock_page(page);
+ }
continue; /* don't unmap */
}
@@ -1400,8 +1356,12 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
pteval = ptep_clear_flush(vma, address, pte);
/* If nonlinear, store the file page offset in the pte. */
- if (page->index != linear_page_index(vma, address))
- set_pte_at(mm, address, pte, pgoff_to_pte(page->index));
+ if (page->index != linear_page_index(vma, address)) {
+ pte_t ptfile = pgoff_to_pte(page->index);
+ if (pte_soft_dirty(pteval))
+ pte_file_mksoft_dirty(ptfile);
+ set_pte_at(mm, address, pte, ptfile);
+ }
/* Move the dirty bit to the physical page now the pte is gone. */
if (pte_dirty(pteval))
@@ -1419,93 +1379,9 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
return ret;
}
-bool is_vma_temporary_stack(struct vm_area_struct *vma)
-{
- int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
-
- if (!maybe_stack)
- return false;
-
- if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
- VM_STACK_INCOMPLETE_SETUP)
- return true;
-
- return false;
-}
-
-/**
- * try_to_unmap_anon - unmap or unlock anonymous page using the object-based
- * rmap method
- * @page: the page to unmap/unlock
- * @flags: action and flags
- *
- * Find all the mappings of a page using the mapping pointer and the vma chains
- * contained in the anon_vma struct it points to.
- *
- * This function is only called from try_to_unmap/try_to_munlock for
- * anonymous pages.
- * When called from try_to_munlock(), the mmap_sem of the mm containing the vma
- * where the page was found will be held for write. So, we won't recheck
- * vm_flags for that VMA. That should be OK, because that vma shouldn't be
- * 'LOCKED.
- */
-static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
-{
- struct anon_vma *anon_vma;
- pgoff_t pgoff;
- struct anon_vma_chain *avc;
- int ret = SWAP_AGAIN;
-
- anon_vma = page_lock_anon_vma_read(page);
- if (!anon_vma)
- return ret;
-
- pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
- anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
- struct vm_area_struct *vma = avc->vma;
- unsigned long address;
-
- /*
- * During exec, a temporary VMA is setup and later moved.
- * The VMA is moved under the anon_vma lock but not the
- * page tables leading to a race where migration cannot
- * find the migration ptes. Rather than increasing the
- * locking requirements of exec(), migration skips
- * temporary VMAs until after exec() completes.
- */
- if (IS_ENABLED(CONFIG_MIGRATION) && (flags & TTU_MIGRATION) &&
- is_vma_temporary_stack(vma))
- continue;
-
- address = vma_address(page, vma);
- ret = try_to_unmap_one(page, vma, address, flags);
- if (ret != SWAP_AGAIN || !page_mapped(page))
- break;
- }
-
- page_unlock_anon_vma_read(anon_vma);
- return ret;
-}
-
-/**
- * try_to_unmap_file - unmap/unlock file page using the object-based rmap method
- * @page: the page to unmap/unlock
- * @flags: action and flags
- *
- * Find all the mappings of a page using the mapping pointer and the vma chains
- * contained in the address_space struct it points to.
- *
- * This function is only called from try_to_unmap/try_to_munlock for
- * object-based pages.
- * When called from try_to_munlock(), the mmap_sem of the mm containing the vma
- * where the page was found will be held for write. So, we won't recheck
- * vm_flags for that VMA. That should be OK, because that vma shouldn't be
- * 'LOCKED.
- */
-static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
+static int try_to_unmap_nonlinear(struct page *page,
+ struct address_space *mapping, void *arg)
{
- struct address_space *mapping = page->mapping;
- pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
struct vm_area_struct *vma;
int ret = SWAP_AGAIN;
unsigned long cursor;
@@ -1513,30 +1389,9 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
unsigned long max_nl_size = 0;
unsigned int mapcount;
- if (PageHuge(page))
- pgoff = page->index << compound_order(page);
-
- mutex_lock(&mapping->i_mmap_mutex);
- vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
- unsigned long address = vma_address(page, vma);
- ret = try_to_unmap_one(page, vma, address, flags);
- if (ret != SWAP_AGAIN || !page_mapped(page))
- goto out;
- }
-
- if (list_empty(&mapping->i_mmap_nonlinear))
- goto out;
-
- /*
- * We don't bother to try to find the munlocked page in nonlinears.
- * It's costly. Instead, later, page reclaim logic may call
- * try_to_unmap(TTU_MUNLOCK) and recover PG_mlocked lazily.
- */
- if (TTU_ACTION(flags) == TTU_MUNLOCK)
- goto out;
+ list_for_each_entry(vma,
+ &mapping->i_mmap_nonlinear, shared.nonlinear) {
- list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
- shared.nonlinear) {
cursor = (unsigned long) vma->vm_private_data;
if (cursor > max_nl_cursor)
max_nl_cursor = cursor;
@@ -1546,8 +1401,7 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
}
if (max_nl_size == 0) { /* all nonlinears locked or reserved ? */
- ret = SWAP_FAIL;
- goto out;
+ return SWAP_FAIL;
}
/*
@@ -1559,7 +1413,8 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
*/
mapcount = page_mapcount(page);
if (!mapcount)
- goto out;
+ return ret;
+
cond_resched();
max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK;
@@ -1567,10 +1422,11 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
max_nl_cursor = CLUSTER_SIZE;
do {
- list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
- shared.nonlinear) {
+ list_for_each_entry(vma,
+ &mapping->i_mmap_nonlinear, shared.nonlinear) {
+
cursor = (unsigned long) vma->vm_private_data;
- while ( cursor < max_nl_cursor &&
+ while (cursor < max_nl_cursor &&
cursor < vma->vm_end - vma->vm_start) {
if (try_to_unmap_cluster(cursor, &mapcount,
vma, page) == SWAP_MLOCK)
@@ -1578,7 +1434,7 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
cursor += CLUSTER_SIZE;
vma->vm_private_data = (void *) cursor;
if ((int)mapcount <= 0)
- goto out;
+ return ret;
}
vma->vm_private_data = (void *) max_nl_cursor;
}
@@ -1593,11 +1449,34 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
*/
list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.nonlinear)
vma->vm_private_data = NULL;
-out:
- mutex_unlock(&mapping->i_mmap_mutex);
+
return ret;
}
+bool is_vma_temporary_stack(struct vm_area_struct *vma)
+{
+ int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
+
+ if (!maybe_stack)
+ return false;
+
+ if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
+ VM_STACK_INCOMPLETE_SETUP)
+ return true;
+
+ return false;
+}
+
+static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
+{
+ return is_vma_temporary_stack(vma);
+}
+
+static int page_not_mapped(struct page *page)
+{
+ return !page_mapped(page);
+};
+
/**
* try_to_unmap - try to remove all page table mappings to a page
* @page: the page to get unmapped
@@ -1615,16 +1494,29 @@ out:
int try_to_unmap(struct page *page, enum ttu_flags flags)
{
int ret;
+ struct rmap_walk_control rwc = {
+ .rmap_one = try_to_unmap_one,
+ .arg = (void *)flags,
+ .done = page_not_mapped,
+ .file_nonlinear = try_to_unmap_nonlinear,
+ .anon_lock = page_lock_anon_vma_read,
+ };
- BUG_ON(!PageLocked(page));
- VM_BUG_ON(!PageHuge(page) && PageTransHuge(page));
+ VM_BUG_ON_PAGE(!PageHuge(page) && PageTransHuge(page), page);
+
+ /*
+ * During exec, a temporary VMA is setup and later moved.
+ * The VMA is moved under the anon_vma lock but not the
+ * page tables leading to a race where migration cannot
+ * find the migration ptes. Rather than increasing the
+ * locking requirements of exec(), migration skips
+ * temporary VMAs until after exec() completes.
+ */
+ if (flags & TTU_MIGRATION && !PageKsm(page) && PageAnon(page))
+ rwc.invalid_vma = invalid_migration_vma;
+
+ ret = rmap_walk(page, &rwc);
- if (unlikely(PageKsm(page)))
- ret = try_to_unmap_ksm(page, flags);
- else if (PageAnon(page))
- ret = try_to_unmap_anon(page, flags);
- else
- ret = try_to_unmap_file(page, flags);
if (ret != SWAP_MLOCK && !page_mapped(page))
ret = SWAP_SUCCESS;
return ret;
@@ -1647,38 +1539,43 @@ int try_to_unmap(struct page *page, enum ttu_flags flags)
*/
int try_to_munlock(struct page *page)
{
- VM_BUG_ON(!PageLocked(page) || PageLRU(page));
+ int ret;
+ struct rmap_walk_control rwc = {
+ .rmap_one = try_to_unmap_one,
+ .arg = (void *)TTU_MUNLOCK,
+ .done = page_not_mapped,
+ /*
+ * We don't bother to try to find the munlocked page in
+ * nonlinears. It's costly. Instead, later, page reclaim logic
+ * may call try_to_unmap() and recover PG_mlocked lazily.
+ */
+ .file_nonlinear = NULL,
+ .anon_lock = page_lock_anon_vma_read,
- if (unlikely(PageKsm(page)))
- return try_to_unmap_ksm(page, TTU_MUNLOCK);
- else if (PageAnon(page))
- return try_to_unmap_anon(page, TTU_MUNLOCK);
- else
- return try_to_unmap_file(page, TTU_MUNLOCK);
+ };
+
+ VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page);
+
+ ret = rmap_walk(page, &rwc);
+ return ret;
}
void __put_anon_vma(struct anon_vma *anon_vma)
{
struct anon_vma *root = anon_vma->root;
+ anon_vma_free(anon_vma);
if (root != anon_vma && atomic_dec_and_test(&root->refcount))
anon_vma_free(root);
-
- anon_vma_free(anon_vma);
}
-#ifdef CONFIG_MIGRATION
-/*
- * rmap_walk() and its helpers rmap_walk_anon() and rmap_walk_file():
- * Called by migrate.c to remove migration ptes, but might be used more later.
- */
-static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
- struct vm_area_struct *, unsigned long, void *), void *arg)
+static struct anon_vma *rmap_walk_anon_lock(struct page *page,
+ struct rmap_walk_control *rwc)
{
struct anon_vma *anon_vma;
- pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
- struct anon_vma_chain *avc;
- int ret = SWAP_AGAIN;
+
+ if (rwc->anon_lock)
+ return rwc->anon_lock(page);
/*
* Note: remove_migration_ptes() cannot use page_lock_anon_vma_read()
@@ -1688,58 +1585,120 @@ static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
*/
anon_vma = page_anon_vma(page);
if (!anon_vma)
- return ret;
+ return NULL;
+
anon_vma_lock_read(anon_vma);
+ return anon_vma;
+}
+
+/*
+ * rmap_walk_anon - do something to anonymous page using the object-based
+ * rmap method
+ * @page: the page to be handled
+ * @rwc: control variable according to each walk type
+ *
+ * Find all the mappings of a page using the mapping pointer and the vma chains
+ * contained in the anon_vma struct it points to.
+ *
+ * When called from try_to_munlock(), the mmap_sem of the mm containing the vma
+ * where the page was found will be held for write. So, we won't recheck
+ * vm_flags for that VMA. That should be OK, because that vma shouldn't be
+ * LOCKED.
+ */
+static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
+{
+ struct anon_vma *anon_vma;
+ pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+ struct anon_vma_chain *avc;
+ int ret = SWAP_AGAIN;
+
+ anon_vma = rmap_walk_anon_lock(page, rwc);
+ if (!anon_vma)
+ return ret;
+
anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
struct vm_area_struct *vma = avc->vma;
unsigned long address = vma_address(page, vma);
- ret = rmap_one(page, vma, address, arg);
+
+ if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
+ continue;
+
+ ret = rwc->rmap_one(page, vma, address, rwc->arg);
if (ret != SWAP_AGAIN)
break;
+ if (rwc->done && rwc->done(page))
+ break;
}
anon_vma_unlock_read(anon_vma);
return ret;
}
-static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *,
- struct vm_area_struct *, unsigned long, void *), void *arg)
+/*
+ * rmap_walk_file - do something to file page using the object-based rmap method
+ * @page: the page to be handled
+ * @rwc: control variable according to each walk type
+ *
+ * Find all the mappings of a page using the mapping pointer and the vma chains
+ * contained in the address_space struct it points to.
+ *
+ * When called from try_to_munlock(), the mmap_sem of the mm containing the vma
+ * where the page was found will be held for write. So, we won't recheck
+ * vm_flags for that VMA. That should be OK, because that vma shouldn't be
+ * LOCKED.
+ */
+static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
{
struct address_space *mapping = page->mapping;
- pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+ pgoff_t pgoff = page->index << compound_order(page);
struct vm_area_struct *vma;
int ret = SWAP_AGAIN;
+ /*
+ * The page lock not only makes sure that page->mapping cannot
+ * suddenly be NULLified by truncation, it makes sure that the
+ * structure at mapping cannot be freed and reused yet,
+ * so we can safely take mapping->i_mmap_mutex.
+ */
+ VM_BUG_ON(!PageLocked(page));
+
if (!mapping)
return ret;
mutex_lock(&mapping->i_mmap_mutex);
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
unsigned long address = vma_address(page, vma);
- ret = rmap_one(page, vma, address, arg);
+
+ if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
+ continue;
+
+ ret = rwc->rmap_one(page, vma, address, rwc->arg);
if (ret != SWAP_AGAIN)
- break;
+ goto done;
+ if (rwc->done && rwc->done(page))
+ goto done;
}
- /*
- * No nonlinear handling: being always shared, nonlinear vmas
- * never contain migration ptes. Decide what to do about this
- * limitation to linear when we need rmap_walk() on nonlinear.
- */
+
+ if (!rwc->file_nonlinear)
+ goto done;
+
+ if (list_empty(&mapping->i_mmap_nonlinear))
+ goto done;
+
+ ret = rwc->file_nonlinear(page, mapping, rwc->arg);
+
+done:
mutex_unlock(&mapping->i_mmap_mutex);
return ret;
}
-int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
- struct vm_area_struct *, unsigned long, void *), void *arg)
+int rmap_walk(struct page *page, struct rmap_walk_control *rwc)
{
- VM_BUG_ON(!PageLocked(page));
-
if (unlikely(PageKsm(page)))
- return rmap_walk_ksm(page, rmap_one, arg);
+ return rmap_walk_ksm(page, rwc);
else if (PageAnon(page))
- return rmap_walk_anon(page, rmap_one, arg);
+ return rmap_walk_anon(page, rwc);
else
- return rmap_walk_file(page, rmap_one, arg);
+ return rmap_walk_file(page, rwc);
}
-#endif /* CONFIG_MIGRATION */
#ifdef CONFIG_HUGETLB_PAGE
/*
diff --git a/mm/shmem.c b/mm/shmem.c
index 5e6a8422658b..9f70e02111c6 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -45,7 +45,7 @@ static struct vfsmount *shm_mnt;
#include <linux/xattr.h>
#include <linux/exportfs.h>
#include <linux/posix_acl.h>
-#include <linux/generic_acl.h>
+#include <linux/posix_acl_xattr.h>
#include <linux/mman.h>
#include <linux/string.h>
#include <linux/slab.h>
@@ -242,19 +242,17 @@ static int shmem_radix_tree_replace(struct address_space *mapping,
pgoff_t index, void *expected, void *replacement)
{
void **pslot;
- void *item = NULL;
+ void *item;
VM_BUG_ON(!expected);
+ VM_BUG_ON(!replacement);
pslot = radix_tree_lookup_slot(&mapping->page_tree, index);
- if (pslot)
- item = radix_tree_deref_slot_protected(pslot,
- &mapping->tree_lock);
+ if (!pslot)
+ return -ENOENT;
+ item = radix_tree_deref_slot_protected(pslot, &mapping->tree_lock);
if (item != expected)
return -ENOENT;
- if (replacement)
- radix_tree_replace_slot(pslot, replacement);
- else
- radix_tree_delete(&mapping->page_tree, index);
+ radix_tree_replace_slot(pslot, replacement);
return 0;
}
@@ -285,8 +283,8 @@ static int shmem_add_to_page_cache(struct page *page,
{
int error;
- VM_BUG_ON(!PageLocked(page));
- VM_BUG_ON(!PageSwapBacked(page));
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
+ VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
page_cache_get(page);
page->mapping = mapping;
@@ -331,84 +329,20 @@ static void shmem_delete_from_page_cache(struct page *page, void *radswap)
}
/*
- * Like find_get_pages, but collecting swap entries as well as pages.
- */
-static unsigned shmem_find_get_pages_and_swap(struct address_space *mapping,
- pgoff_t start, unsigned int nr_pages,
- struct page **pages, pgoff_t *indices)
-{
- void **slot;
- unsigned int ret = 0;
- struct radix_tree_iter iter;
-
- if (!nr_pages)
- return 0;
-
- rcu_read_lock();
-restart:
- radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
- struct page *page;
-repeat:
- page = radix_tree_deref_slot(slot);
- if (unlikely(!page))
- continue;
- if (radix_tree_exception(page)) {
- if (radix_tree_deref_retry(page))
- goto restart;
- /*
- * Otherwise, we must be storing a swap entry
- * here as an exceptional entry: so return it
- * without attempting to raise page count.
- */
- goto export;
- }
- if (!page_cache_get_speculative(page))
- goto repeat;
-
- /* Has the page moved? */
- if (unlikely(page != *slot)) {
- page_cache_release(page);
- goto repeat;
- }
-export:
- indices[ret] = iter.index;
- pages[ret] = page;
- if (++ret == nr_pages)
- break;
- }
- rcu_read_unlock();
- return ret;
-}
-
-/*
* Remove swap entry from radix tree, free the swap and its page cache.
*/
static int shmem_free_swap(struct address_space *mapping,
pgoff_t index, void *radswap)
{
- int error;
+ void *old;
spin_lock_irq(&mapping->tree_lock);
- error = shmem_radix_tree_replace(mapping, index, radswap, NULL);
+ old = radix_tree_delete_item(&mapping->page_tree, index, radswap);
spin_unlock_irq(&mapping->tree_lock);
- if (!error)
- free_swap_and_cache(radix_to_swp_entry(radswap));
- return error;
-}
-
-/*
- * Pagevec may contain swap entries, so shuffle up pages before releasing.
- */
-static void shmem_deswap_pagevec(struct pagevec *pvec)
-{
- int i, j;
-
- for (i = 0, j = 0; i < pagevec_count(pvec); i++) {
- struct page *page = pvec->pages[i];
- if (!radix_tree_exceptional_entry(page))
- pvec->pages[j++] = page;
- }
- pvec->nr = j;
+ if (old != radswap)
+ return -ENOENT;
+ free_swap_and_cache(radix_to_swp_entry(radswap));
+ return 0;
}
/*
@@ -429,12 +363,12 @@ void shmem_unlock_mapping(struct address_space *mapping)
* Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
* has finished, if it hits a row of PAGEVEC_SIZE swap entries.
*/
- pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
- PAGEVEC_SIZE, pvec.pages, indices);
+ pvec.nr = find_get_entries(mapping, index,
+ PAGEVEC_SIZE, pvec.pages, indices);
if (!pvec.nr)
break;
index = indices[pvec.nr - 1] + 1;
- shmem_deswap_pagevec(&pvec);
+ pagevec_remove_exceptionals(&pvec);
check_move_unevictable_pages(pvec.pages, pvec.nr);
pagevec_release(&pvec);
cond_resched();
@@ -466,9 +400,9 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
pagevec_init(&pvec, 0);
index = start;
while (index < end) {
- pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
- min(end - index, (pgoff_t)PAGEVEC_SIZE),
- pvec.pages, indices);
+ pvec.nr = find_get_entries(mapping, index,
+ min(end - index, (pgoff_t)PAGEVEC_SIZE),
+ pvec.pages, indices);
if (!pvec.nr)
break;
mem_cgroup_uncharge_start();
@@ -491,13 +425,13 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
continue;
if (!unfalloc || !PageUptodate(page)) {
if (page->mapping == mapping) {
- VM_BUG_ON(PageWriteback(page));
+ VM_BUG_ON_PAGE(PageWriteback(page), page);
truncate_inode_page(mapping, page);
}
}
unlock_page(page);
}
- shmem_deswap_pagevec(&pvec);
+ pagevec_remove_exceptionals(&pvec);
pagevec_release(&pvec);
mem_cgroup_uncharge_end();
cond_resched();
@@ -535,9 +469,10 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
index = start;
for ( ; ; ) {
cond_resched();
- pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
+
+ pvec.nr = find_get_entries(mapping, index,
min(end - index, (pgoff_t)PAGEVEC_SIZE),
- pvec.pages, indices);
+ pvec.pages, indices);
if (!pvec.nr) {
if (index == start || unfalloc)
break;
@@ -545,7 +480,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
continue;
}
if ((index == start || unfalloc) && indices[0] >= end) {
- shmem_deswap_pagevec(&pvec);
+ pagevec_remove_exceptionals(&pvec);
pagevec_release(&pvec);
break;
}
@@ -568,13 +503,13 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
lock_page(page);
if (!unfalloc || !PageUptodate(page)) {
if (page->mapping == mapping) {
- VM_BUG_ON(PageWriteback(page));
+ VM_BUG_ON_PAGE(PageWriteback(page), page);
truncate_inode_page(mapping, page);
}
}
unlock_page(page);
}
- shmem_deswap_pagevec(&pvec);
+ pagevec_remove_exceptionals(&pvec);
pagevec_release(&pvec);
mem_cgroup_uncharge_end();
index++;
@@ -620,10 +555,8 @@ static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
}
setattr_copy(inode, attr);
-#ifdef CONFIG_TMPFS_POSIX_ACL
if (attr->ia_valid & ATTR_MODE)
- error = generic_acl_chmod(inode);
-#endif
+ error = posix_acl_chmod(inode, inode->i_mode);
return error;
}
@@ -750,7 +683,7 @@ int shmem_unuse(swp_entry_t swap, struct page *page)
* the shmem_swaplist_mutex which might hold up shmem_writepage().
* Charged back to the user (not to caller) when swap account is used.
*/
- error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
+ error = mem_cgroup_charge_file(page, current->mm, GFP_KERNEL);
if (error)
goto out;
/* No radix_tree_preload: swap entry keeps a place for page in tree */
@@ -1082,7 +1015,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
return -EFBIG;
repeat:
swap.val = 0;
- page = find_lock_page(mapping, index);
+ page = find_lock_entry(mapping, index);
if (radix_tree_exceptional_entry(page)) {
swap = radix_to_swp_entry(page);
page = NULL;
@@ -1147,7 +1080,7 @@ repeat:
goto failed;
}
- error = mem_cgroup_cache_charge(page, current->mm,
+ error = mem_cgroup_charge_file(page, current->mm,
gfp & GFP_RECLAIM_MASK);
if (!error) {
error = shmem_add_to_page_cache(page, mapping, index,
@@ -1201,11 +1134,11 @@ repeat:
SetPageSwapBacked(page);
__set_page_locked(page);
- error = mem_cgroup_cache_charge(page, current->mm,
+ error = mem_cgroup_charge_file(page, current->mm,
gfp & GFP_RECLAIM_MASK);
if (error)
goto decused;
- error = radix_tree_preload(gfp & GFP_RECLAIM_MASK);
+ error = radix_tree_maybe_preload(gfp & GFP_RECLAIM_MASK);
if (!error) {
error = shmem_add_to_page_cache(page, mapping, index,
gfp, NULL);
@@ -1419,6 +1352,11 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
return inode;
}
+bool shmem_mapping(struct address_space *mapping)
+{
+ return mapping->backing_dev_info == &shmem_backing_dev_info;
+}
+
#ifdef CONFIG_TMPFS
static const struct inode_operations shmem_symlink_inode_operations;
static const struct inode_operations shmem_short_symlink_operations;
@@ -1464,13 +1402,25 @@ shmem_write_end(struct file *file, struct address_space *mapping,
return copied;
}
-static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
+static ssize_t shmem_file_aio_read(struct kiocb *iocb,
+ const struct iovec *iov, unsigned long nr_segs, loff_t pos)
{
- struct inode *inode = file_inode(filp);
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file_inode(file);
struct address_space *mapping = inode->i_mapping;
pgoff_t index;
unsigned long offset;
enum sgp_type sgp = SGP_READ;
+ int error = 0;
+ ssize_t retval;
+ size_t count;
+ loff_t *ppos = &iocb->ki_pos;
+ struct iov_iter iter;
+
+ retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
+ if (retval)
+ return retval;
+ iov_iter_init(&iter, iov, nr_segs, count, 0);
/*
* Might this read be for a stacking filesystem? Then when reading
@@ -1498,10 +1448,10 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_
break;
}
- desc->error = shmem_getpage(inode, index, &page, sgp, NULL);
- if (desc->error) {
- if (desc->error == -EINVAL)
- desc->error = 0;
+ error = shmem_getpage(inode, index, &page, sgp, NULL);
+ if (error) {
+ if (error == -EINVAL)
+ error = 0;
break;
}
if (page)
@@ -1545,61 +1495,26 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_
/*
* Ok, we have the page, and it's up-to-date, so
* now we can copy it to user space...
- *
- * The actor routine returns how many bytes were actually used..
- * NOTE! This may not be the same as how much of a user buffer
- * we filled up (we may be padding etc), so we can only update
- * "pos" here (the actor routine has to update the user buffer
- * pointers and the remaining count).
*/
- ret = actor(desc, page, offset, nr);
+ ret = copy_page_to_iter(page, offset, nr, &iter);
+ retval += ret;
offset += ret;
index += offset >> PAGE_CACHE_SHIFT;
offset &= ~PAGE_CACHE_MASK;
page_cache_release(page);
- if (ret != nr || !desc->count)
+ if (!iov_iter_count(&iter))
break;
-
+ if (ret < nr) {
+ error = -EFAULT;
+ break;
+ }
cond_resched();
}
*ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
- file_accessed(filp);
-}
-
-static ssize_t shmem_file_aio_read(struct kiocb *iocb,
- const struct iovec *iov, unsigned long nr_segs, loff_t pos)
-{
- struct file *filp = iocb->ki_filp;
- ssize_t retval;
- unsigned long seg;
- size_t count;
- loff_t *ppos = &iocb->ki_pos;
-
- retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
- if (retval)
- return retval;
-
- for (seg = 0; seg < nr_segs; seg++) {
- read_descriptor_t desc;
-
- desc.written = 0;
- desc.arg.buf = iov[seg].iov_base;
- desc.count = iov[seg].iov_len;
- if (desc.count == 0)
- continue;
- desc.error = 0;
- do_shmem_file_read(filp, ppos, &desc, file_read_actor);
- retval += desc.written;
- if (desc.error) {
- retval = retval ?: desc.error;
- break;
- }
- if (desc.count > 0)
- break;
- }
- return retval;
+ file_accessed(file);
+ return retval ? retval : error;
}
static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
@@ -1638,7 +1553,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
index = *ppos >> PAGE_CACHE_SHIFT;
loff = *ppos & ~PAGE_CACHE_MASK;
req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
- nr_pages = min(req_pages, pipe->buffers);
+ nr_pages = min(req_pages, spd.nr_pages_max);
spd.nr_pages = find_get_pages_contig(mapping, index,
nr_pages, spd.pages);
@@ -1731,7 +1646,7 @@ static pgoff_t shmem_seek_hole_data(struct address_space *mapping,
pagevec_init(&pvec, 0);
pvec.nr = 1; /* start small: we may be there already */
while (!done) {
- pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
+ pvec.nr = find_get_entries(mapping, index,
pvec.nr, pvec.pages, indices);
if (!pvec.nr) {
if (whence == SEEK_DATA)
@@ -1758,7 +1673,7 @@ static pgoff_t shmem_seek_hole_data(struct address_space *mapping,
break;
}
}
- shmem_deswap_pagevec(&pvec);
+ pagevec_remove_exceptionals(&pvec);
pagevec_release(&pvec);
pvec.nr = PAGEVEC_SIZE;
cond_resched();
@@ -1798,10 +1713,8 @@ static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
}
}
- if (offset >= 0 && offset != file->f_pos) {
- file->f_pos = offset;
- file->f_version = 0;
- }
+ if (offset >= 0)
+ offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
mutex_unlock(&inode->i_mutex);
return offset;
}
@@ -1939,30 +1852,49 @@ shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
if (inode) {
+ error = simple_acl_create(dir, inode);
+ if (error)
+ goto out_iput;
error = security_inode_init_security(inode, dir,
&dentry->d_name,
shmem_initxattrs, NULL);
- if (error) {
- if (error != -EOPNOTSUPP) {
- iput(inode);
- return error;
- }
- }
-#ifdef CONFIG_TMPFS_POSIX_ACL
- error = generic_acl_init(inode, dir);
- if (error) {
- iput(inode);
- return error;
- }
-#else
+ if (error && error != -EOPNOTSUPP)
+ goto out_iput;
+
error = 0;
-#endif
dir->i_size += BOGO_DIRENT_SIZE;
dir->i_ctime = dir->i_mtime = CURRENT_TIME;
d_instantiate(dentry, inode);
dget(dentry); /* Extra count - pin the dentry in core */
}
return error;
+out_iput:
+ iput(inode);
+ return error;
+}
+
+static int
+shmem_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
+{
+ struct inode *inode;
+ int error = -ENOSPC;
+
+ inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE);
+ if (inode) {
+ error = security_inode_init_security(inode, dir,
+ NULL,
+ shmem_initxattrs, NULL);
+ if (error && error != -EOPNOTSUPP)
+ goto out_iput;
+ error = simple_acl_create(dir, inode);
+ if (error)
+ goto out_iput;
+ d_tmpfile(dentry, inode);
+ }
+ return error;
+out_iput:
+ iput(inode);
+ return error;
}
static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
@@ -2194,8 +2126,8 @@ static int shmem_initxattrs(struct inode *inode,
static const struct xattr_handler *shmem_xattr_handlers[] = {
#ifdef CONFIG_TMPFS_POSIX_ACL
- &generic_acl_access_handler,
- &generic_acl_default_handler,
+ &posix_acl_access_xattr_handler,
+ &posix_acl_default_xattr_handler,
#endif
NULL
};
@@ -2586,13 +2518,15 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
* tmpfs instance, limiting inodes to one per page of lowmem;
* but the internal instance is left unlimited.
*/
- if (!(sb->s_flags & MS_NOUSER)) {
+ if (!(sb->s_flags & MS_KERNMOUNT)) {
sbinfo->max_blocks = shmem_default_max_blocks();
sbinfo->max_inodes = shmem_default_max_inodes();
if (shmem_parse_options(data, sbinfo, false)) {
err = -EINVAL;
goto failed;
}
+ } else {
+ sb->s_flags |= MS_NOUSER;
}
sb->s_export_op = &shmem_export_ops;
sb->s_flags |= MS_NOSEC;
@@ -2709,6 +2643,7 @@ static const struct inode_operations shmem_inode_operations = {
.getxattr = shmem_getxattr,
.listxattr = shmem_listxattr,
.removexattr = shmem_removexattr,
+ .set_acl = simple_set_acl,
#endif
};
@@ -2723,6 +2658,7 @@ static const struct inode_operations shmem_dir_inode_operations = {
.rmdir = shmem_rmdir,
.mknod = shmem_mknod,
.rename = shmem_rename,
+ .tmpfile = shmem_tmpfile,
#endif
#ifdef CONFIG_TMPFS_XATTR
.setxattr = shmem_setxattr,
@@ -2732,6 +2668,7 @@ static const struct inode_operations shmem_dir_inode_operations = {
#endif
#ifdef CONFIG_TMPFS_POSIX_ACL
.setattr = shmem_setattr,
+ .set_acl = simple_set_acl,
#endif
};
@@ -2744,6 +2681,7 @@ static const struct inode_operations shmem_special_inode_operations = {
#endif
#ifdef CONFIG_TMPFS_POSIX_ACL
.setattr = shmem_setattr,
+ .set_acl = simple_set_acl,
#endif
};
@@ -2762,6 +2700,7 @@ static const struct super_operations shmem_ops = {
static const struct vm_operations_struct shmem_vm_ops = {
.fault = shmem_fault,
+ .map_pages = filemap_map_pages,
#ifdef CONFIG_NUMA
.set_policy = shmem_set_policy,
.get_policy = shmem_get_policy,
@@ -2787,6 +2726,10 @@ int __init shmem_init(void)
{
int error;
+ /* If rootfs called this, don't re-init */
+ if (shmem_inode_cachep)
+ return 0;
+
error = bdi_init(&shmem_backing_dev_info);
if (error)
goto out4;
@@ -2801,8 +2744,7 @@ int __init shmem_init(void)
goto out2;
}
- shm_mnt = vfs_kern_mount(&shmem_fs_type, MS_NOUSER,
- shmem_fs_type.name, NULL);
+ shm_mnt = kern_mount(&shmem_fs_type);
if (IS_ERR(shm_mnt)) {
error = PTR_ERR(shm_mnt);
printk(KERN_ERR "Could not kern_mount tmpfs\n");
@@ -2879,23 +2821,12 @@ EXPORT_SYMBOL_GPL(shmem_truncate_range);
/* common code */
-static char *shmem_dname(struct dentry *dentry, char *buffer, int buflen)
-{
- return dynamic_dname(dentry, buffer, buflen, "/%s (deleted)",
- dentry->d_name.name);
-}
-
static struct dentry_operations anon_ops = {
- .d_dname = shmem_dname
+ .d_dname = simple_dname
};
-/**
- * shmem_file_setup - get an unlinked file living in tmpfs
- * @name: name for dentry (to be seen in /proc/<pid>/maps
- * @size: size to be set for the file
- * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
- */
-struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
+static struct file *__shmem_file_setup(const char *name, loff_t size,
+ unsigned long flags, unsigned int i_flags)
{
struct file *res;
struct inode *inode;
@@ -2928,6 +2859,7 @@ struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags
if (!inode)
goto put_dentry;
+ inode->i_flags |= i_flags;
d_instantiate(path.dentry, inode);
inode->i_size = size;
clear_nlink(inode); /* It is unlinked */
@@ -2948,6 +2880,32 @@ put_memory:
shmem_unacct_size(flags, size);
return res;
}
+
+/**
+ * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
+ * kernel internal. There will be NO LSM permission checks against the
+ * underlying inode. So users of this interface must do LSM checks at a
+ * higher layer. The one user is the big_key implementation. LSM checks
+ * are provided at the key level rather than the inode level.
+ * @name: name for dentry (to be seen in /proc/<pid>/maps
+ * @size: size to be set for the file
+ * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
+ */
+struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
+{
+ return __shmem_file_setup(name, size, flags, S_PRIVATE);
+}
+
+/**
+ * shmem_file_setup - get an unlinked file living in tmpfs
+ * @name: name for dentry (to be seen in /proc/<pid>/maps
+ * @size: size to be set for the file
+ * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
+ */
+struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
+{
+ return __shmem_file_setup(name, size, flags, 0);
+}
EXPORT_SYMBOL_GPL(shmem_file_setup);
/**
diff --git a/mm/slab.c b/mm/slab.c
index 8ccd296c6d9c..19d92181ce24 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -157,77 +157,22 @@
#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
#endif
-/*
- * true if a page was allocated from pfmemalloc reserves for network-based
- * swap
- */
-static bool pfmemalloc_active __read_mostly;
-
-/*
- * kmem_bufctl_t:
- *
- * Bufctl's are used for linking objs within a slab
- * linked offsets.
- *
- * This implementation relies on "struct page" for locating the cache &
- * slab an object belongs to.
- * This allows the bufctl structure to be small (one int), but limits
- * the number of objects a slab (not a cache) can contain when off-slab
- * bufctls are used. The limit is the size of the largest general cache
- * that does not use off-slab slabs.
- * For 32bit archs with 4 kB pages, is this 56.
- * This is not serious, as it is only for large objects, when it is unwise
- * to have too many per slab.
- * Note: This limit can be raised by introducing a general cache whose size
- * is less than 512 (PAGE_SIZE<<3), but greater than 256.
- */
+#define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \
+ <= SLAB_OBJ_MIN_SIZE) ? 1 : 0)
-typedef unsigned int kmem_bufctl_t;
-#define BUFCTL_END (((kmem_bufctl_t)(~0U))-0)
-#define BUFCTL_FREE (((kmem_bufctl_t)(~0U))-1)
-#define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2)
-#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3)
+#if FREELIST_BYTE_INDEX
+typedef unsigned char freelist_idx_t;
+#else
+typedef unsigned short freelist_idx_t;
+#endif
-/*
- * struct slab_rcu
- *
- * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to
- * arrange for kmem_freepages to be called via RCU. This is useful if
- * we need to approach a kernel structure obliquely, from its address
- * obtained without the usual locking. We can lock the structure to
- * stabilize it and check it's still at the given address, only if we
- * can be sure that the memory has not been meanwhile reused for some
- * other kind of object (which our subsystem's lock might corrupt).
- *
- * rcu_read_lock before reading the address, then rcu_read_unlock after
- * taking the spinlock within the structure expected at that address.
- */
-struct slab_rcu {
- struct rcu_head head;
- struct kmem_cache *cachep;
- void *addr;
-};
+#define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1)
/*
- * struct slab
- *
- * Manages the objs in a slab. Placed either at the beginning of mem allocated
- * for a slab, or allocated from an general cache.
- * Slabs are chained into three list: fully used, partial, fully free slabs.
+ * true if a page was allocated from pfmemalloc reserves for network-based
+ * swap
*/
-struct slab {
- union {
- struct {
- struct list_head list;
- unsigned long colouroff;
- void *s_mem; /* including colour offset */
- unsigned int inuse; /* num of objs active in slab */
- kmem_bufctl_t free;
- unsigned short nodeid;
- };
- struct slab_rcu __slab_cover_slab_rcu;
- };
-};
+static bool pfmemalloc_active __read_mostly;
/*
* struct array_cache
@@ -343,8 +288,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
* OTOH the cpuarrays can contain lots of objects,
* which could lock up otherwise freeable slabs.
*/
-#define REAPTIMEOUT_CPUC (2*HZ)
-#define REAPTIMEOUT_LIST3 (4*HZ)
+#define REAPTIMEOUT_AC (2*HZ)
+#define REAPTIMEOUT_NODE (4*HZ)
#if STATS
#define STATS_INC_ACTIVE(x) ((x)->num_active++)
@@ -456,18 +401,10 @@ static inline struct kmem_cache *virt_to_cache(const void *obj)
return page->slab_cache;
}
-static inline struct slab *virt_to_slab(const void *obj)
-{
- struct page *page = virt_to_head_page(obj);
-
- VM_BUG_ON(!PageSlab(page));
- return page->slab_page;
-}
-
-static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
+static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
unsigned int idx)
{
- return slab->s_mem + cache->size * idx;
+ return page->s_mem + cache->size * idx;
}
/*
@@ -477,9 +414,9 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
* reciprocal_divide(offset, cache->reciprocal_buffer_size)
*/
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
- const struct slab *slab, void *obj)
+ const struct page *page, void *obj)
{
- u32 offset = (obj - slab->s_mem);
+ u32 offset = (obj - page->s_mem);
return reciprocal_divide(offset, cache->reciprocal_buffer_size);
}
@@ -565,7 +502,7 @@ static void init_node_lock_keys(int q)
if (slab_state < UP)
return;
- for (i = 1; i < PAGE_SHIFT + MAX_ORDER; i++) {
+ for (i = 1; i <= KMALLOC_SHIFT_HIGH; i++) {
struct kmem_cache_node *n;
struct kmem_cache *cache = kmalloc_caches[i];
@@ -639,9 +576,31 @@ static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
return cachep->array[smp_processor_id()];
}
-static size_t slab_mgmt_size(size_t nr_objs, size_t align)
+static int calculate_nr_objs(size_t slab_size, size_t buffer_size,
+ size_t idx_size, size_t align)
{
- return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
+ int nr_objs;
+ size_t freelist_size;
+
+ /*
+ * Ignore padding for the initial guess. The padding
+ * is at most @align-1 bytes, and @buffer_size is at
+ * least @align. In the worst case, this result will
+ * be one greater than the number of objects that fit
+ * into the memory allocation when taking the padding
+ * into account.
+ */
+ nr_objs = slab_size / (buffer_size + idx_size);
+
+ /*
+ * This calculated number will be either the right
+ * amount, or one greater than what we want.
+ */
+ freelist_size = slab_size - nr_objs * buffer_size;
+ if (freelist_size < ALIGN(nr_objs * idx_size, align))
+ nr_objs--;
+
+ return nr_objs;
}
/*
@@ -660,8 +619,7 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
* on it. For the latter case, the memory allocated for a
* slab is used for:
*
- * - The struct slab
- * - One kmem_bufctl_t for each object
+ * - One unsigned int for each object
* - Padding to respect alignment of @align
* - @buffer_size bytes for each object
*
@@ -674,32 +632,10 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
mgmt_size = 0;
nr_objs = slab_size / buffer_size;
- if (nr_objs > SLAB_LIMIT)
- nr_objs = SLAB_LIMIT;
} else {
- /*
- * Ignore padding for the initial guess. The padding
- * is at most @align-1 bytes, and @buffer_size is at
- * least @align. In the worst case, this result will
- * be one greater than the number of objects that fit
- * into the memory allocation when taking the padding
- * into account.
- */
- nr_objs = (slab_size - sizeof(struct slab)) /
- (buffer_size + sizeof(kmem_bufctl_t));
-
- /*
- * This calculated number will be either the right
- * amount, or one greater than what we want.
- */
- if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size
- > slab_size)
- nr_objs--;
-
- if (nr_objs > SLAB_LIMIT)
- nr_objs = SLAB_LIMIT;
-
- mgmt_size = slab_mgmt_size(nr_objs, align);
+ nr_objs = calculate_nr_objs(slab_size, buffer_size,
+ sizeof(freelist_idx_t), align);
+ mgmt_size = ALIGN(nr_objs * sizeof(freelist_idx_t), align);
}
*num = nr_objs;
*left_over = slab_size - nr_objs*buffer_size - mgmt_size;
@@ -787,7 +723,7 @@ static void next_reap_node(void)
* the CPUs getting into lockstep and contending for the global cache chain
* lock.
*/
-static void __cpuinit start_cpu_timer(int cpu)
+static void start_cpu_timer(int cpu)
{
struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
@@ -829,10 +765,8 @@ static struct array_cache *alloc_arraycache(int node, int entries,
return nc;
}
-static inline bool is_slab_pfmemalloc(struct slab *slabp)
+static inline bool is_slab_pfmemalloc(struct page *page)
{
- struct page *page = virt_to_page(slabp->s_mem);
-
return PageSlabPfmemalloc(page);
}
@@ -841,23 +775,23 @@ static void recheck_pfmemalloc_active(struct kmem_cache *cachep,
struct array_cache *ac)
{
struct kmem_cache_node *n = cachep->node[numa_mem_id()];
- struct slab *slabp;
+ struct page *page;
unsigned long flags;
if (!pfmemalloc_active)
return;
spin_lock_irqsave(&n->list_lock, flags);
- list_for_each_entry(slabp, &n->slabs_full, list)
- if (is_slab_pfmemalloc(slabp))
+ list_for_each_entry(page, &n->slabs_full, lru)
+ if (is_slab_pfmemalloc(page))
goto out;
- list_for_each_entry(slabp, &n->slabs_partial, list)
- if (is_slab_pfmemalloc(slabp))
+ list_for_each_entry(page, &n->slabs_partial, lru)
+ if (is_slab_pfmemalloc(page))
goto out;
- list_for_each_entry(slabp, &n->slabs_free, list)
- if (is_slab_pfmemalloc(slabp))
+ list_for_each_entry(page, &n->slabs_free, lru)
+ if (is_slab_pfmemalloc(page))
goto out;
pfmemalloc_active = false;
@@ -897,8 +831,8 @@ static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
*/
n = cachep->node[numa_mem_id()];
if (!list_empty(&n->slabs_free) && force_refill) {
- struct slab *slabp = virt_to_slab(objp);
- ClearPageSlabPfmemalloc(virt_to_head_page(slabp->s_mem));
+ struct page *page = virt_to_head_page(objp);
+ ClearPageSlabPfmemalloc(page);
clear_obj_pfmemalloc(&objp);
recheck_pfmemalloc_active(cachep, ac);
return objp;
@@ -1099,8 +1033,7 @@ static void drain_alien_cache(struct kmem_cache *cachep,
static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
{
- struct slab *slabp = virt_to_slab(objp);
- int nodeid = slabp->nodeid;
+ int nodeid = page_to_nid(virt_to_page(objp));
struct kmem_cache_node *n;
struct array_cache *alien = NULL;
int node;
@@ -1111,7 +1044,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
* Make sure we are not freeing a object from another node to the array
* cache on this cpu.
*/
- if (likely(slabp->nodeid == node))
+ if (likely(nodeid == node))
return 0;
n = cachep->node[node];
@@ -1151,7 +1084,7 @@ static int init_cache_node_node(int node)
list_for_each_entry(cachep, &slab_caches, list) {
/*
- * Set up the size64 kmemlist for cpu before we can
+ * Set up the kmem_cache_node for cpu before we can
* begin anything. Make sure some other cpu on this
* node has not already allocated this
*/
@@ -1160,12 +1093,12 @@ static int init_cache_node_node(int node)
if (!n)
return -ENOMEM;
kmem_cache_node_init(n);
- n->next_reap = jiffies + REAPTIMEOUT_LIST3 +
- ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
+ n->next_reap = jiffies + REAPTIMEOUT_NODE +
+ ((unsigned long)cachep) % REAPTIMEOUT_NODE;
/*
- * The l3s don't come and go as CPUs come and
- * go. slab_mutex is sufficient
+ * The kmem_cache_nodes don't come and go as CPUs
+ * come and go. slab_mutex is sufficient
* protection here.
*/
cachep->node[node] = n;
@@ -1180,7 +1113,13 @@ static int init_cache_node_node(int node)
return 0;
}
-static void __cpuinit cpuup_canceled(long cpu)
+static inline int slabs_tofree(struct kmem_cache *cachep,
+ struct kmem_cache_node *n)
+{
+ return (n->free_objects + cachep->num - 1) / cachep->num;
+}
+
+static void cpuup_canceled(long cpu)
{
struct kmem_cache *cachep;
struct kmem_cache_node *n = NULL;
@@ -1241,11 +1180,11 @@ free_array_cache:
n = cachep->node[node];
if (!n)
continue;
- drain_freelist(cachep, n, n->free_objects);
+ drain_freelist(cachep, n, slabs_tofree(cachep, n));
}
}
-static int __cpuinit cpuup_prepare(long cpu)
+static int cpuup_prepare(long cpu)
{
struct kmem_cache *cachep;
struct kmem_cache_node *n = NULL;
@@ -1328,7 +1267,7 @@ bad:
return -ENOMEM;
}
-static int __cpuinit cpuup_callback(struct notifier_block *nfb,
+static int cpuup_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
@@ -1384,7 +1323,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
return notifier_from_errno(err);
}
-static struct notifier_block __cpuinitdata cpucache_notifier = {
+static struct notifier_block cpucache_notifier = {
&cpuup_callback, NULL, 0
};
@@ -1408,7 +1347,7 @@ static int __meminit drain_cache_node_node(int node)
if (!n)
continue;
- drain_freelist(cachep, n, n->free_objects);
+ drain_freelist(cachep, n, slabs_tofree(cachep, n));
if (!list_empty(&n->slabs_full) ||
!list_empty(&n->slabs_partial)) {
@@ -1484,8 +1423,8 @@ static void __init set_up_node(struct kmem_cache *cachep, int index)
for_each_online_node(node) {
cachep->node[node] = &init_kmem_cache_node[index + node];
cachep->node[node]->next_reap = jiffies +
- REAPTIMEOUT_LIST3 +
- ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
+ REAPTIMEOUT_NODE +
+ ((unsigned long)cachep) % REAPTIMEOUT_NODE;
}
}
@@ -1506,6 +1445,8 @@ void __init kmem_cache_init(void)
{
int i;
+ BUILD_BUG_ON(sizeof(((struct page *)NULL)->lru) <
+ sizeof(struct rcu_head));
kmem_cache = &kmem_cache_boot;
setup_node_pointer(kmem_cache);
@@ -1681,7 +1622,7 @@ static noinline void
slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
{
struct kmem_cache_node *n;
- struct slab *slabp;
+ struct page *page;
unsigned long flags;
int node;
@@ -1700,15 +1641,15 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
continue;
spin_lock_irqsave(&n->list_lock, flags);
- list_for_each_entry(slabp, &n->slabs_full, list) {
+ list_for_each_entry(page, &n->slabs_full, lru) {
active_objs += cachep->num;
active_slabs++;
}
- list_for_each_entry(slabp, &n->slabs_partial, list) {
- active_objs += slabp->inuse;
+ list_for_each_entry(page, &n->slabs_partial, lru) {
+ active_objs += page->active;
active_slabs++;
}
- list_for_each_entry(slabp, &n->slabs_free, list)
+ list_for_each_entry(page, &n->slabs_free, lru)
num_slabs++;
free_objects += n->free_objects;
@@ -1730,19 +1671,11 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
* did not request dmaable memory, we might get it, but that
* would be relatively rare and ignorable.
*/
-static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
+static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
+ int nodeid)
{
struct page *page;
int nr_pages;
- int i;
-
-#ifndef CONFIG_MMU
- /*
- * Nommu uses slab's for process anonymous memory allocations, and thus
- * requires __GFP_COMP to properly refcount higher order allocations
- */
- flags |= __GFP_COMP;
-#endif
flags |= cachep->allocflags;
if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
@@ -1766,12 +1699,9 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
else
add_zone_page_state(page_zone(page),
NR_SLAB_UNRECLAIMABLE, nr_pages);
- for (i = 0; i < nr_pages; i++) {
- __SetPageSlab(page + i);
-
- if (page->pfmemalloc)
- SetPageSlabPfmemalloc(page + i);
- }
+ __SetPageSlab(page);
+ if (page->pfmemalloc)
+ SetPageSlabPfmemalloc(page);
memcg_bind_pages(cachep, cachep->gfporder);
if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
@@ -1783,17 +1713,15 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
kmemcheck_mark_unallocated_pages(page, nr_pages);
}
- return page_address(page);
+ return page;
}
/*
* Interface to system's page release.
*/
-static void kmem_freepages(struct kmem_cache *cachep, void *addr)
+static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
{
- unsigned long i = (1 << cachep->gfporder);
- struct page *page = virt_to_page(addr);
- const unsigned long nr_freed = i;
+ const unsigned long nr_freed = (1 << cachep->gfporder);
kmemcheck_free_shadow(page, cachep->gfporder);
@@ -1803,27 +1731,28 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr)
else
sub_zone_page_state(page_zone(page),
NR_SLAB_UNRECLAIMABLE, nr_freed);
- while (i--) {
- BUG_ON(!PageSlab(page));
- __ClearPageSlabPfmemalloc(page);
- __ClearPageSlab(page);
- page++;
- }
+
+ BUG_ON(!PageSlab(page));
+ __ClearPageSlabPfmemalloc(page);
+ __ClearPageSlab(page);
+ page_mapcount_reset(page);
+ page->mapping = NULL;
memcg_release_pages(cachep, cachep->gfporder);
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += nr_freed;
- free_memcg_kmem_pages((unsigned long)addr, cachep->gfporder);
+ __free_memcg_kmem_pages(page, cachep->gfporder);
}
static void kmem_rcu_free(struct rcu_head *head)
{
- struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
- struct kmem_cache *cachep = slab_rcu->cachep;
+ struct kmem_cache *cachep;
+ struct page *page;
- kmem_freepages(cachep, slab_rcu->addr);
- if (OFF_SLAB(cachep))
- kmem_cache_free(cachep->slabp_cache, slab_rcu);
+ page = container_of(head, struct page, rcu_head);
+ cachep = page->slab_cache;
+
+ kmem_freepages(cachep, page);
}
#if DEBUG
@@ -1972,19 +1901,19 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
/* Print some data about the neighboring objects, if they
* exist:
*/
- struct slab *slabp = virt_to_slab(objp);
+ struct page *page = virt_to_head_page(objp);
unsigned int objnr;
- objnr = obj_to_index(cachep, slabp, objp);
+ objnr = obj_to_index(cachep, page, objp);
if (objnr) {
- objp = index_to_obj(cachep, slabp, objnr - 1);
+ objp = index_to_obj(cachep, page, objnr - 1);
realobj = (char *)objp + obj_offset(cachep);
printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
realobj, size);
print_objinfo(cachep, objp, 2);
}
if (objnr + 1 < cachep->num) {
- objp = index_to_obj(cachep, slabp, objnr + 1);
+ objp = index_to_obj(cachep, page, objnr + 1);
realobj = (char *)objp + obj_offset(cachep);
printk(KERN_ERR "Next obj: start=%p, len=%d\n",
realobj, size);
@@ -1995,11 +1924,12 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
#endif
#if DEBUG
-static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
+static void slab_destroy_debugcheck(struct kmem_cache *cachep,
+ struct page *page)
{
int i;
for (i = 0; i < cachep->num; i++) {
- void *objp = index_to_obj(cachep, slabp, i);
+ void *objp = index_to_obj(cachep, page, i);
if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
@@ -2024,7 +1954,8 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slab
}
}
#else
-static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
+static void slab_destroy_debugcheck(struct kmem_cache *cachep,
+ struct page *page)
{
}
#endif
@@ -2032,29 +1963,40 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slab
/**
* slab_destroy - destroy and release all objects in a slab
* @cachep: cache pointer being destroyed
- * @slabp: slab pointer being destroyed
+ * @page: page pointer being destroyed
*
* Destroy all the objs in a slab, and release the mem back to the system.
* Before calling the slab must have been unlinked from the cache. The
* cache-lock is not held/needed.
*/
-static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
+static void slab_destroy(struct kmem_cache *cachep, struct page *page)
{
- void *addr = slabp->s_mem - slabp->colouroff;
+ void *freelist;
- slab_destroy_debugcheck(cachep, slabp);
+ freelist = page->freelist;
+ slab_destroy_debugcheck(cachep, page);
if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
- struct slab_rcu *slab_rcu;
+ struct rcu_head *head;
+
+ /*
+ * RCU free overloads the RCU head over the LRU.
+ * slab_page has been overloeaded over the LRU,
+ * however it is not used from now on so that
+ * we can use it safely.
+ */
+ head = (void *)&page->rcu_head;
+ call_rcu(head, kmem_rcu_free);
- slab_rcu = (struct slab_rcu *)slabp;
- slab_rcu->cachep = cachep;
- slab_rcu->addr = addr;
- call_rcu(&slab_rcu->head, kmem_rcu_free);
} else {
- kmem_freepages(cachep, addr);
- if (OFF_SLAB(cachep))
- kmem_cache_free(cachep->slabp_cache, slabp);
+ kmem_freepages(cachep, page);
}
+
+ /*
+ * From now on, we don't use freelist
+ * although actual page can be freed in rcu context
+ */
+ if (OFF_SLAB(cachep))
+ kmem_cache_free(cachep->freelist_cache, freelist);
}
/**
@@ -2085,14 +2027,18 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
if (!num)
continue;
+ /* Can't handle number of objects more than SLAB_OBJ_MAX_NUM */
+ if (num > SLAB_OBJ_MAX_NUM)
+ break;
+
if (flags & CFLGS_OFF_SLAB) {
/*
* Max number of objs-per-slab for caches which
* use off-slab slabs. Needed to avoid a possible
* looping condition in cache_grow().
*/
- offslab_limit = size - sizeof(struct slab);
- offslab_limit /= sizeof(kmem_bufctl_t);
+ offslab_limit = size;
+ offslab_limit /= sizeof(freelist_idx_t);
if (num > offslab_limit)
break;
@@ -2178,8 +2124,8 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
}
}
cachep->node[numa_mem_id()]->next_reap =
- jiffies + REAPTIMEOUT_LIST3 +
- ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
+ jiffies + REAPTIMEOUT_NODE +
+ ((unsigned long)cachep) % REAPTIMEOUT_NODE;
cpu_cache_get(cachep)->avail = 0;
cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
@@ -2214,7 +2160,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
int
__kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
{
- size_t left_over, slab_size, ralign;
+ size_t left_over, freelist_size, ralign;
gfp_t gfp;
int err;
size_t size = cachep->size;
@@ -2318,7 +2264,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
* it too early on. Always use on-slab management when
* SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak)
*/
- if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init &&
+ if ((size >= (PAGE_SIZE >> 5)) && !slab_early_init &&
!(flags & SLAB_NOLEAKTRACE))
/*
* Size is large, assume best to place the slab management obj
@@ -2327,28 +2273,33 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
flags |= CFLGS_OFF_SLAB;
size = ALIGN(size, cachep->align);
+ /*
+ * We should restrict the number of objects in a slab to implement
+ * byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition.
+ */
+ if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE)
+ size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align);
left_over = calculate_slab_order(cachep, size, cachep->align, flags);
if (!cachep->num)
return -E2BIG;
- slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
- + sizeof(struct slab), cachep->align);
+ freelist_size =
+ ALIGN(cachep->num * sizeof(freelist_idx_t), cachep->align);
/*
* If the slab has been placed off-slab, and we have enough space then
* move it on-slab. This is at the expense of any extra colouring.
*/
- if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) {
+ if (flags & CFLGS_OFF_SLAB && left_over >= freelist_size) {
flags &= ~CFLGS_OFF_SLAB;
- left_over -= slab_size;
+ left_over -= freelist_size;
}
if (flags & CFLGS_OFF_SLAB) {
/* really off slab. No need for manual alignment */
- slab_size =
- cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);
+ freelist_size = cachep->num * sizeof(freelist_idx_t);
#ifdef CONFIG_PAGE_POISONING
/* If we're going to use the generic kernel_map_pages()
@@ -2365,24 +2316,24 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
if (cachep->colour_off < cachep->align)
cachep->colour_off = cachep->align;
cachep->colour = left_over / cachep->colour_off;
- cachep->slab_size = slab_size;
+ cachep->freelist_size = freelist_size;
cachep->flags = flags;
- cachep->allocflags = 0;
+ cachep->allocflags = __GFP_COMP;
if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
cachep->allocflags |= GFP_DMA;
cachep->size = size;
cachep->reciprocal_buffer_size = reciprocal_value(size);
if (flags & CFLGS_OFF_SLAB) {
- cachep->slabp_cache = kmalloc_slab(slab_size, 0u);
+ cachep->freelist_cache = kmalloc_slab(freelist_size, 0u);
/*
- * This is a possibility for one of the malloc_sizes caches.
+ * This is a possibility for one of the kmalloc_{dma,}_caches.
* But since we go off slab only for object size greater than
- * PAGE_SIZE/8, and malloc_sizes gets created in ascending order,
- * this should not happen at all.
+ * PAGE_SIZE/8, and kmalloc_{dma,}_caches get created
+ * in ascending order,this should not happen at all.
* But leave a BUG_ON for some lucky dude.
*/
- BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache));
+ BUG_ON(ZERO_OR_NULL_PTR(cachep->freelist_cache));
}
err = setup_cpu_cache(cachep, gfp);
@@ -2488,7 +2439,7 @@ static int drain_freelist(struct kmem_cache *cache,
{
struct list_head *p;
int nr_freed;
- struct slab *slabp;
+ struct page *page;
nr_freed = 0;
while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
@@ -2500,18 +2451,18 @@ static int drain_freelist(struct kmem_cache *cache,
goto out;
}
- slabp = list_entry(p, struct slab, list);
+ page = list_entry(p, struct page, lru);
#if DEBUG
- BUG_ON(slabp->inuse);
+ BUG_ON(page->active);
#endif
- list_del(&slabp->list);
+ list_del(&page->lru);
/*
* Safe to drop the lock. The slab is no longer linked
* to the cache.
*/
n->free_objects -= cache->num;
spin_unlock_irq(&n->list_lock);
- slab_destroy(cache, slabp);
+ slab_destroy(cache, page);
nr_freed++;
}
out:
@@ -2532,7 +2483,7 @@ static int __cache_shrink(struct kmem_cache *cachep)
if (!n)
continue;
- drain_freelist(cachep, n, n->free_objects);
+ drain_freelist(cachep, n, slabs_tofree(cachep, n));
ret += !list_empty(&n->slabs_full) ||
!list_empty(&n->slabs_partial);
@@ -2587,59 +2538,58 @@ int __kmem_cache_shutdown(struct kmem_cache *cachep)
/*
* Get the memory for a slab management obj.
- * For a slab cache when the slab descriptor is off-slab, slab descriptors
- * always come from malloc_sizes caches. The slab descriptor cannot
- * come from the same cache which is getting created because,
- * when we are searching for an appropriate cache for these
- * descriptors in kmem_cache_create, we search through the malloc_sizes array.
- * If we are creating a malloc_sizes cache here it would not be visible to
- * kmem_find_general_cachep till the initialization is complete.
- * Hence we cannot have slabp_cache same as the original cache.
+ *
+ * For a slab cache when the slab descriptor is off-slab, the
+ * slab descriptor can't come from the same cache which is being created,
+ * Because if it is the case, that means we defer the creation of
+ * the kmalloc_{dma,}_cache of size sizeof(slab descriptor) to this point.
+ * And we eventually call down to __kmem_cache_create(), which
+ * in turn looks up in the kmalloc_{dma,}_caches for the disired-size one.
+ * This is a "chicken-and-egg" problem.
+ *
+ * So the off-slab slab descriptor shall come from the kmalloc_{dma,}_caches,
+ * which are all initialized during kmem_cache_init().
*/
-static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
- int colour_off, gfp_t local_flags,
- int nodeid)
+static void *alloc_slabmgmt(struct kmem_cache *cachep,
+ struct page *page, int colour_off,
+ gfp_t local_flags, int nodeid)
{
- struct slab *slabp;
+ void *freelist;
+ void *addr = page_address(page);
if (OFF_SLAB(cachep)) {
/* Slab management obj is off-slab. */
- slabp = kmem_cache_alloc_node(cachep->slabp_cache,
+ freelist = kmem_cache_alloc_node(cachep->freelist_cache,
local_flags, nodeid);
- /*
- * If the first object in the slab is leaked (it's allocated
- * but no one has a reference to it), we want to make sure
- * kmemleak does not treat the ->s_mem pointer as a reference
- * to the object. Otherwise we will not report the leak.
- */
- kmemleak_scan_area(&slabp->list, sizeof(struct list_head),
- local_flags);
- if (!slabp)
+ if (!freelist)
return NULL;
} else {
- slabp = objp + colour_off;
- colour_off += cachep->slab_size;
+ freelist = addr + colour_off;
+ colour_off += cachep->freelist_size;
}
- slabp->inuse = 0;
- slabp->colouroff = colour_off;
- slabp->s_mem = objp + colour_off;
- slabp->nodeid = nodeid;
- slabp->free = 0;
- return slabp;
+ page->active = 0;
+ page->s_mem = addr + colour_off;
+ return freelist;
}
-static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
+static inline freelist_idx_t get_free_obj(struct page *page, unsigned int idx)
{
- return (kmem_bufctl_t *) (slabp + 1);
+ return ((freelist_idx_t *)page->freelist)[idx];
+}
+
+static inline void set_free_obj(struct page *page,
+ unsigned int idx, freelist_idx_t val)
+{
+ ((freelist_idx_t *)(page->freelist))[idx] = val;
}
static void cache_init_objs(struct kmem_cache *cachep,
- struct slab *slabp)
+ struct page *page)
{
int i;
for (i = 0; i < cachep->num; i++) {
- void *objp = index_to_obj(cachep, slabp, i);
+ void *objp = index_to_obj(cachep, page, i);
#if DEBUG
/* need to poison the objs? */
if (cachep->flags & SLAB_POISON)
@@ -2675,9 +2625,8 @@ static void cache_init_objs(struct kmem_cache *cachep,
if (cachep->ctor)
cachep->ctor(objp);
#endif
- slab_bufctl(slabp)[i] = i + 1;
+ set_free_obj(page, i, i);
}
- slab_bufctl(slabp)[i - 1] = BUFCTL_END;
}
static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
@@ -2690,41 +2639,41 @@ static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
}
}
-static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp,
+static void *slab_get_obj(struct kmem_cache *cachep, struct page *page,
int nodeid)
{
- void *objp = index_to_obj(cachep, slabp, slabp->free);
- kmem_bufctl_t next;
+ void *objp;
- slabp->inuse++;
- next = slab_bufctl(slabp)[slabp->free];
+ objp = index_to_obj(cachep, page, get_free_obj(page, page->active));
+ page->active++;
#if DEBUG
- slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
- WARN_ON(slabp->nodeid != nodeid);
+ WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid);
#endif
- slabp->free = next;
return objp;
}
-static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
+static void slab_put_obj(struct kmem_cache *cachep, struct page *page,
void *objp, int nodeid)
{
- unsigned int objnr = obj_to_index(cachep, slabp, objp);
-
+ unsigned int objnr = obj_to_index(cachep, page, objp);
#if DEBUG
+ unsigned int i;
+
/* Verify that the slab belongs to the intended node */
- WARN_ON(slabp->nodeid != nodeid);
+ WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid);
- if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) {
- printk(KERN_ERR "slab: double free detected in cache "
- "'%s', objp %p\n", cachep->name, objp);
- BUG();
+ /* Verify double free bug */
+ for (i = page->active; i < cachep->num; i++) {
+ if (get_free_obj(page, i) == objnr) {
+ printk(KERN_ERR "slab: double free detected in cache "
+ "'%s', objp %p\n", cachep->name, objp);
+ BUG();
+ }
}
#endif
- slab_bufctl(slabp)[objnr] = slabp->free;
- slabp->free = objnr;
- slabp->inuse--;
+ page->active--;
+ set_free_obj(page, page->active, objnr);
}
/*
@@ -2732,23 +2681,11 @@ static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
* for the slab allocator to be able to lookup the cache and slab of a
* virtual address for kfree, ksize, and slab debugging.
*/
-static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
- void *addr)
+static void slab_map_pages(struct kmem_cache *cache, struct page *page,
+ void *freelist)
{
- int nr_pages;
- struct page *page;
-
- page = virt_to_page(addr);
-
- nr_pages = 1;
- if (likely(!PageCompound(page)))
- nr_pages <<= cache->gfporder;
-
- do {
- page->slab_cache = cache;
- page->slab_page = slab;
- page++;
- } while (--nr_pages);
+ page->slab_cache = cache;
+ page->freelist = freelist;
}
/*
@@ -2756,9 +2693,9 @@ static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
* kmem_cache_alloc() when there are no active objs left in a cache.
*/
static int cache_grow(struct kmem_cache *cachep,
- gfp_t flags, int nodeid, void *objp)
+ gfp_t flags, int nodeid, struct page *page)
{
- struct slab *slabp;
+ void *freelist;
size_t offset;
gfp_t local_flags;
struct kmem_cache_node *n;
@@ -2799,20 +2736,20 @@ static int cache_grow(struct kmem_cache *cachep,
* Get mem for the objs. Attempt to allocate a physical page from
* 'nodeid'.
*/
- if (!objp)
- objp = kmem_getpages(cachep, local_flags, nodeid);
- if (!objp)
+ if (!page)
+ page = kmem_getpages(cachep, local_flags, nodeid);
+ if (!page)
goto failed;
/* Get slab management. */
- slabp = alloc_slabmgmt(cachep, objp, offset,
+ freelist = alloc_slabmgmt(cachep, page, offset,
local_flags & ~GFP_CONSTRAINT_MASK, nodeid);
- if (!slabp)
+ if (!freelist)
goto opps1;
- slab_map_pages(cachep, slabp, objp);
+ slab_map_pages(cachep, page, freelist);
- cache_init_objs(cachep, slabp);
+ cache_init_objs(cachep, page);
if (local_flags & __GFP_WAIT)
local_irq_disable();
@@ -2820,13 +2757,13 @@ static int cache_grow(struct kmem_cache *cachep,
spin_lock(&n->list_lock);
/* Make slab active. */
- list_add_tail(&slabp->list, &(n->slabs_free));
+ list_add_tail(&page->lru, &(n->slabs_free));
STATS_INC_GROWN(cachep);
n->free_objects += cachep->num;
spin_unlock(&n->list_lock);
return 1;
opps1:
- kmem_freepages(cachep, objp);
+ kmem_freepages(cachep, page);
failed:
if (local_flags & __GFP_WAIT)
local_irq_disable();
@@ -2874,9 +2811,8 @@ static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
unsigned long caller)
{
- struct page *page;
unsigned int objnr;
- struct slab *slabp;
+ struct page *page;
BUG_ON(virt_to_cache(objp) != cachep);
@@ -2884,8 +2820,6 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
kfree_debugcheck(objp);
page = virt_to_head_page(objp);
- slabp = page->slab_page;
-
if (cachep->flags & SLAB_RED_ZONE) {
verify_redzone_free(cachep, objp);
*dbg_redzone1(cachep, objp) = RED_INACTIVE;
@@ -2894,14 +2828,11 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
if (cachep->flags & SLAB_STORE_USER)
*dbg_userword(cachep, objp) = (void *)caller;
- objnr = obj_to_index(cachep, slabp, objp);
+ objnr = obj_to_index(cachep, page, objp);
BUG_ON(objnr >= cachep->num);
- BUG_ON(objp != index_to_obj(cachep, slabp, objnr));
+ BUG_ON(objp != index_to_obj(cachep, page, objnr));
-#ifdef CONFIG_DEBUG_SLAB_LEAK
- slab_bufctl(slabp)[objnr] = BUFCTL_FREE;
-#endif
if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
@@ -2918,33 +2849,9 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
return objp;
}
-static void check_slabp(struct kmem_cache *cachep, struct slab *slabp)
-{
- kmem_bufctl_t i;
- int entries = 0;
-
- /* Check slab's freelist to see if this obj is there. */
- for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) {
- entries++;
- if (entries > cachep->num || i >= cachep->num)
- goto bad;
- }
- if (entries != cachep->num - slabp->inuse) {
-bad:
- printk(KERN_ERR "slab: Internal list corruption detected in "
- "cache '%s'(%d), slabp %p(%d). Tainted(%s). Hexdump:\n",
- cachep->name, cachep->num, slabp, slabp->inuse,
- print_tainted());
- print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, slabp,
- sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t),
- 1);
- BUG();
- }
-}
#else
#define kfree_debugcheck(x) do { } while(0)
#define cache_free_debugcheck(x,objp,z) (objp)
-#define check_slabp(x,y) do { } while(0)
#endif
static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags,
@@ -2983,7 +2890,7 @@ retry:
while (batchcount > 0) {
struct list_head *entry;
- struct slab *slabp;
+ struct page *page;
/* Get slab alloc is to come from. */
entry = n->slabs_partial.next;
if (entry == &n->slabs_partial) {
@@ -2993,8 +2900,7 @@ retry:
goto must_grow;
}
- slabp = list_entry(entry, struct slab, list);
- check_slabp(cachep, slabp);
+ page = list_entry(entry, struct page, lru);
check_spinlock_acquired(cachep);
/*
@@ -3002,24 +2908,23 @@ retry:
* there must be at least one object available for
* allocation.
*/
- BUG_ON(slabp->inuse >= cachep->num);
+ BUG_ON(page->active >= cachep->num);
- while (slabp->inuse < cachep->num && batchcount--) {
+ while (page->active < cachep->num && batchcount--) {
STATS_INC_ALLOCED(cachep);
STATS_INC_ACTIVE(cachep);
STATS_SET_HIGH(cachep);
- ac_put_obj(cachep, ac, slab_get_obj(cachep, slabp,
+ ac_put_obj(cachep, ac, slab_get_obj(cachep, page,
node));
}
- check_slabp(cachep, slabp);
/* move slabp to correct slabp list: */
- list_del(&slabp->list);
- if (slabp->free == BUFCTL_END)
- list_add(&slabp->list, &n->slabs_full);
+ list_del(&page->lru);
+ if (page->active == cachep->num)
+ list_add(&page->lru, &n->slabs_full);
else
- list_add(&slabp->list, &n->slabs_partial);
+ list_add(&page->lru, &n->slabs_partial);
}
must_grow:
@@ -3091,16 +2996,6 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
*dbg_redzone1(cachep, objp) = RED_ACTIVE;
*dbg_redzone2(cachep, objp) = RED_ACTIVE;
}
-#ifdef CONFIG_DEBUG_SLAB_LEAK
- {
- struct slab *slabp;
- unsigned objnr;
-
- slabp = virt_to_head_page(objp)->slab_page;
- objnr = (unsigned)(objp - slabp->s_mem) / cachep->size;
- slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
- }
-#endif
objp += obj_offset(cachep);
if (cachep->ctor && cachep->flags & SLAB_POISON)
cachep->ctor(objp);
@@ -3168,7 +3063,7 @@ out:
#ifdef CONFIG_NUMA
/*
- * Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY.
+ * Try allocating on another node if PF_SPREAD_SLAB is a mempolicy is set.
*
* If we are in_interrupt, then process context, including cpusets and
* mempolicy, may not apply and should not be used for allocation policy.
@@ -3183,7 +3078,7 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
nid_alloc = cpuset_slab_spread_node();
else if (current->mempolicy)
- nid_alloc = slab_node();
+ nid_alloc = mempolicy_slab_node();
if (nid_alloc != nid_here)
return ____cache_alloc_node(cachep, flags, nid_alloc);
return NULL;
@@ -3214,8 +3109,8 @@ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
retry_cpuset:
- cpuset_mems_cookie = get_mems_allowed();
- zonelist = node_zonelist(slab_node(), flags);
+ cpuset_mems_cookie = read_mems_allowed_begin();
+ zonelist = node_zonelist(mempolicy_slab_node(), flags);
retry:
/*
@@ -3242,18 +3137,20 @@ retry:
* We may trigger various forms of reclaim on the allowed
* set and go into memory reserves if necessary.
*/
+ struct page *page;
+
if (local_flags & __GFP_WAIT)
local_irq_enable();
kmem_flagcheck(cache, flags);
- obj = kmem_getpages(cache, local_flags, numa_mem_id());
+ page = kmem_getpages(cache, local_flags, numa_mem_id());
if (local_flags & __GFP_WAIT)
local_irq_disable();
- if (obj) {
+ if (page) {
/*
* Insert into the appropriate per node queues
*/
- nid = page_to_nid(virt_to_page(obj));
- if (cache_grow(cache, flags, nid, obj)) {
+ nid = page_to_nid(page);
+ if (cache_grow(cache, flags, nid, page)) {
obj = ____cache_alloc_node(cache,
flags | GFP_THISNODE, nid);
if (!obj)
@@ -3270,7 +3167,7 @@ retry:
}
}
- if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !obj))
+ if (unlikely(!obj && read_mems_allowed_retry(cpuset_mems_cookie)))
goto retry_cpuset;
return obj;
}
@@ -3282,7 +3179,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
int nodeid)
{
struct list_head *entry;
- struct slab *slabp;
+ struct page *page;
struct kmem_cache_node *n;
void *obj;
int x;
@@ -3302,26 +3199,24 @@ retry:
goto must_grow;
}
- slabp = list_entry(entry, struct slab, list);
+ page = list_entry(entry, struct page, lru);
check_spinlock_acquired_node(cachep, nodeid);
- check_slabp(cachep, slabp);
STATS_INC_NODEALLOCS(cachep);
STATS_INC_ACTIVE(cachep);
STATS_SET_HIGH(cachep);
- BUG_ON(slabp->inuse == cachep->num);
+ BUG_ON(page->active == cachep->num);
- obj = slab_get_obj(cachep, slabp, nodeid);
- check_slabp(cachep, slabp);
+ obj = slab_get_obj(cachep, page, nodeid);
n->free_objects--;
/* move slabp to correct slabp list: */
- list_del(&slabp->list);
+ list_del(&page->lru);
- if (slabp->free == BUFCTL_END)
- list_add(&slabp->list, &n->slabs_full);
+ if (page->active == cachep->num)
+ list_add(&page->lru, &n->slabs_full);
else
- list_add(&slabp->list, &n->slabs_partial);
+ list_add(&page->lru, &n->slabs_partial);
spin_unlock(&n->list_lock);
goto done;
@@ -3338,18 +3233,6 @@ done:
return obj;
}
-/**
- * kmem_cache_alloc_node - Allocate an object on the specified node
- * @cachep: The cache to allocate from.
- * @flags: See kmalloc().
- * @nodeid: node number of the target node.
- * @caller: return address of caller, used for debug information
- *
- * Identical to kmem_cache_alloc but it will allocate memory on the given
- * node, which can improve the performance for cpu bound structures.
- *
- * Fallback to other node is possible if __GFP_THISNODE is not set.
- */
static __always_inline void *
slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
unsigned long caller)
@@ -3398,11 +3281,11 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
kmemleak_alloc_recursive(ptr, cachep->object_size, 1, cachep->flags,
flags);
- if (likely(ptr))
+ if (likely(ptr)) {
kmemcheck_slab_alloc(cachep, flags, ptr, cachep->object_size);
-
- if (unlikely((flags & __GFP_ZERO) && ptr))
- memset(ptr, 0, cachep->object_size);
+ if (unlikely(flags & __GFP_ZERO))
+ memset(ptr, 0, cachep->object_size);
+ }
return ptr;
}
@@ -3412,7 +3295,7 @@ __do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
{
void *objp;
- if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) {
+ if (current->mempolicy || unlikely(current->flags & PF_SPREAD_SLAB)) {
objp = alternate_node_alloc(cache, flags);
if (objp)
goto out;
@@ -3463,17 +3346,17 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
flags);
prefetchw(objp);
- if (likely(objp))
+ if (likely(objp)) {
kmemcheck_slab_alloc(cachep, flags, objp, cachep->object_size);
-
- if (unlikely((flags & __GFP_ZERO) && objp))
- memset(objp, 0, cachep->object_size);
+ if (unlikely(flags & __GFP_ZERO))
+ memset(objp, 0, cachep->object_size);
+ }
return objp;
}
/*
- * Caller needs to acquire correct kmem_list's list_lock
+ * Caller needs to acquire correct kmem_cache_node's list_lock
*/
static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
int node)
@@ -3483,23 +3366,21 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
for (i = 0; i < nr_objects; i++) {
void *objp;
- struct slab *slabp;
+ struct page *page;
clear_obj_pfmemalloc(&objpp[i]);
objp = objpp[i];
- slabp = virt_to_slab(objp);
+ page = virt_to_head_page(objp);
n = cachep->node[node];
- list_del(&slabp->list);
+ list_del(&page->lru);
check_spinlock_acquired_node(cachep, node);
- check_slabp(cachep, slabp);
- slab_put_obj(cachep, slabp, objp, node);
+ slab_put_obj(cachep, page, objp, node);
STATS_DEC_ACTIVE(cachep);
n->free_objects++;
- check_slabp(cachep, slabp);
/* fixup slab chains */
- if (slabp->inuse == 0) {
+ if (page->active == 0) {
if (n->free_objects > n->free_limit) {
n->free_objects -= cachep->num;
/* No need to drop any previously held
@@ -3508,16 +3389,16 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
* a different cache, refer to comments before
* alloc_slabmgmt.
*/
- slab_destroy(cachep, slabp);
+ slab_destroy(cachep, page);
} else {
- list_add(&slabp->list, &n->slabs_free);
+ list_add(&page->lru, &n->slabs_free);
}
} else {
/* Unconditionally move a slab to the end of the
* partial list on free - maximum time for the
* other objects to be freed, too.
*/
- list_add_tail(&slabp->list, &n->slabs_partial);
+ list_add_tail(&page->lru, &n->slabs_partial);
}
}
}
@@ -3557,10 +3438,10 @@ free_done:
p = n->slabs_free.next;
while (p != &(n->slabs_free)) {
- struct slab *slabp;
+ struct page *page;
- slabp = list_entry(p, struct slab, list);
- BUG_ON(slabp->inuse);
+ page = list_entry(p, struct page, lru);
+ BUG_ON(page->active);
i++;
p = p->next;
@@ -3643,6 +3524,17 @@ EXPORT_SYMBOL(kmem_cache_alloc_trace);
#endif
#ifdef CONFIG_NUMA
+/**
+ * kmem_cache_alloc_node - Allocate an object on the specified node
+ * @cachep: The cache to allocate from.
+ * @flags: See kmalloc().
+ * @nodeid: node number of the target node.
+ *
+ * Identical to kmem_cache_alloc but it will allocate memory on the given
+ * node, which can improve the performance for cpu bound structures.
+ *
+ * Fallback to other node is possible if __GFP_THISNODE is not set.
+ */
void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
@@ -3718,11 +3610,6 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
struct kmem_cache *cachep;
void *ret;
- /* If you want to save a few bytes .text space: replace
- * __ with kmem_.
- * Then kmalloc uses the uninlined functions instead of the inline
- * functions.
- */
cachep = kmalloc_slab(size, flags);
if (unlikely(ZERO_OR_NULL_PTR(cachep)))
return cachep;
@@ -3814,7 +3701,7 @@ EXPORT_SYMBOL(kfree);
/*
* This initializes kmem_cache_node or resizes various caches for all nodes.
*/
-static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
+static int alloc_kmem_cache_node(struct kmem_cache *cachep, gfp_t gfp)
{
int node;
struct kmem_cache_node *n;
@@ -3870,8 +3757,8 @@ static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
}
kmem_cache_node_init(n);
- n->next_reap = jiffies + REAPTIMEOUT_LIST3 +
- ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
+ n->next_reap = jiffies + REAPTIMEOUT_NODE +
+ ((unsigned long)cachep) % REAPTIMEOUT_NODE;
n->shared = new_shared;
n->alien = new_alien;
n->free_limit = (1 + nr_cpus_node(node)) *
@@ -3957,7 +3844,7 @@ static int __do_tune_cpucache(struct kmem_cache *cachep, int limit,
kfree(ccold);
}
kfree(new);
- return alloc_kmemlist(cachep, gfp);
+ return alloc_kmem_cache_node(cachep, gfp);
}
static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
@@ -3977,7 +3864,7 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
VM_BUG_ON(!mutex_is_locked(&slab_mutex));
for_each_memcg_cache_index(i) {
- c = cache_from_memcg(cachep, i);
+ c = cache_from_memcg_idx(cachep, i);
if (c)
/* return value determined by the parent cache only */
__do_tune_cpucache(c, limit, batchcount, shared, gfp);
@@ -4126,7 +4013,7 @@ static void cache_reap(struct work_struct *w)
if (time_after(n->next_reap, jiffies))
goto next;
- n->next_reap = jiffies + REAPTIMEOUT_LIST3;
+ n->next_reap = jiffies + REAPTIMEOUT_NODE;
drain_array(searchp, n, n->shared, 0, node);
@@ -4147,13 +4034,13 @@ next:
next_reap_node();
out:
/* Set up the next iteration */
- schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_CPUC));
+ schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_AC));
}
#ifdef CONFIG_SLABINFO
void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
{
- struct slab *slabp;
+ struct page *page;
unsigned long active_objs;
unsigned long num_objs;
unsigned long active_slabs = 0;
@@ -4173,23 +4060,23 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
check_irq_on();
spin_lock_irq(&n->list_lock);
- list_for_each_entry(slabp, &n->slabs_full, list) {
- if (slabp->inuse != cachep->num && !error)
+ list_for_each_entry(page, &n->slabs_full, lru) {
+ if (page->active != cachep->num && !error)
error = "slabs_full accounting error";
active_objs += cachep->num;
active_slabs++;
}
- list_for_each_entry(slabp, &n->slabs_partial, list) {
- if (slabp->inuse == cachep->num && !error)
- error = "slabs_partial inuse accounting error";
- if (!slabp->inuse && !error)
- error = "slabs_partial/inuse accounting error";
- active_objs += slabp->inuse;
+ list_for_each_entry(page, &n->slabs_partial, lru) {
+ if (page->active == cachep->num && !error)
+ error = "slabs_partial accounting error";
+ if (!page->active && !error)
+ error = "slabs_partial accounting error";
+ active_objs += page->active;
active_slabs++;
}
- list_for_each_entry(slabp, &n->slabs_free, list) {
- if (slabp->inuse && !error)
- error = "slabs_free/inuse accounting error";
+ list_for_each_entry(page, &n->slabs_free, lru) {
+ if (page->active && !error)
+ error = "slabs_free accounting error";
num_slabs++;
}
free_objects += n->free_objects;
@@ -4341,15 +4228,27 @@ static inline int add_caller(unsigned long *n, unsigned long v)
return 1;
}
-static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s)
+static void handle_slab(unsigned long *n, struct kmem_cache *c,
+ struct page *page)
{
void *p;
- int i;
+ int i, j;
+
if (n[0] == n[1])
return;
- for (i = 0, p = s->s_mem; i < c->num; i++, p += c->size) {
- if (slab_bufctl(s)[i] != BUFCTL_ACTIVE)
+ for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) {
+ bool active = true;
+
+ for (j = page->active; j < c->num; j++) {
+ /* Skip freed item */
+ if (get_free_obj(page, j) == i) {
+ active = false;
+ break;
+ }
+ }
+ if (!active)
continue;
+
if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
return;
}
@@ -4374,7 +4273,7 @@ static void show_symbol(struct seq_file *m, unsigned long address)
static int leaks_show(struct seq_file *m, void *p)
{
struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
- struct slab *slabp;
+ struct page *page;
struct kmem_cache_node *n;
const char *name;
unsigned long *x = m->private;
@@ -4398,10 +4297,10 @@ static int leaks_show(struct seq_file *m, void *p)
check_irq_on();
spin_lock_irq(&n->list_lock);
- list_for_each_entry(slabp, &n->slabs_full, list)
- handle_slab(x, cachep, slabp);
- list_for_each_entry(slabp, &n->slabs_partial, list)
- handle_slab(x, cachep, slabp);
+ list_for_each_entry(page, &n->slabs_full, lru)
+ handle_slab(x, cachep, page);
+ list_for_each_entry(page, &n->slabs_partial, lru)
+ handle_slab(x, cachep, page);
spin_unlock_irq(&n->list_lock);
}
name = cachep->name;
@@ -4431,20 +4330,10 @@ static int leaks_show(struct seq_file *m, void *p)
return 0;
}
-static void *s_next(struct seq_file *m, void *p, loff_t *pos)
-{
- return seq_list_next(p, &slab_caches, pos);
-}
-
-static void s_stop(struct seq_file *m, void *p)
-{
- mutex_unlock(&slab_mutex);
-}
-
static const struct seq_operations slabstats_op = {
.start = leaks_start,
- .next = s_next,
- .stop = s_stop,
+ .next = slab_next,
+ .stop = slab_stop,
.show = leaks_show,
};
diff --git a/mm/slab.h b/mm/slab.h
index f96b49e4704e..6bd4c353704f 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -55,12 +55,12 @@ extern void create_boot_cache(struct kmem_cache *, const char *name,
struct mem_cgroup;
#ifdef CONFIG_SLUB
struct kmem_cache *
-__kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
- size_t align, unsigned long flags, void (*ctor)(void *));
+__kmem_cache_alias(const char *name, size_t size, size_t align,
+ unsigned long flags, void (*ctor)(void *));
#else
static inline struct kmem_cache *
-__kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
- size_t align, unsigned long flags, void (*ctor)(void *))
+__kmem_cache_alias(const char *name, size_t size, size_t align,
+ unsigned long flags, void (*ctor)(void *))
{ return NULL; }
#endif
@@ -91,6 +91,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
int __kmem_cache_shutdown(struct kmem_cache *);
+void slab_kmem_cache_release(struct kmem_cache *);
struct seq_file;
struct file;
@@ -119,13 +120,6 @@ static inline bool is_root_cache(struct kmem_cache *s)
return !s->memcg_params || s->memcg_params->is_root_cache;
}
-static inline bool cache_match_memcg(struct kmem_cache *cachep,
- struct mem_cgroup *memcg)
-{
- return (is_root_cache(cachep) && !memcg) ||
- (cachep->memcg_params->memcg == memcg);
-}
-
static inline void memcg_bind_pages(struct kmem_cache *s, int order)
{
if (!is_root_cache(s))
@@ -160,9 +154,36 @@ static inline const char *cache_name(struct kmem_cache *s)
return s->name;
}
-static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx)
+/*
+ * Note, we protect with RCU only the memcg_caches array, not per-memcg caches.
+ * That said the caller must assure the memcg's cache won't go away. Since once
+ * created a memcg's cache is destroyed only along with the root cache, it is
+ * true if we are going to allocate from the cache or hold a reference to the
+ * root cache by other means. Otherwise, we should hold either the slab_mutex
+ * or the memcg's slab_caches_mutex while calling this function and accessing
+ * the returned value.
+ */
+static inline struct kmem_cache *
+cache_from_memcg_idx(struct kmem_cache *s, int idx)
{
- return s->memcg_params->memcg_caches[idx];
+ struct kmem_cache *cachep;
+ struct memcg_cache_params *params;
+
+ if (!s->memcg_params)
+ return NULL;
+
+ rcu_read_lock();
+ params = rcu_dereference(s->memcg_params);
+ cachep = params->memcg_caches[idx];
+ rcu_read_unlock();
+
+ /*
+ * Make sure we will access the up-to-date value. The code updating
+ * memcg_caches issues a write barrier to match this (see
+ * memcg_register_cache()).
+ */
+ smp_read_barrier_depends();
+ return cachep;
}
static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
@@ -177,12 +198,6 @@ static inline bool is_root_cache(struct kmem_cache *s)
return true;
}
-static inline bool cache_match_memcg(struct kmem_cache *cachep,
- struct mem_cgroup *memcg)
-{
- return true;
-}
-
static inline void memcg_bind_pages(struct kmem_cache *s, int order)
{
}
@@ -202,7 +217,8 @@ static inline const char *cache_name(struct kmem_cache *s)
return s->name;
}
-static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx)
+static inline struct kmem_cache *
+cache_from_memcg_idx(struct kmem_cache *s, int idx)
{
return NULL;
}
@@ -271,3 +287,6 @@ struct kmem_cache_node {
#endif
};
+
+void *slab_next(struct seq_file *m, void *p, loff_t *pos);
+void slab_stop(struct seq_file *m, void *p);
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 2d414508e9ec..102cc6fca3d3 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -19,6 +19,7 @@
#include <asm/tlbflush.h>
#include <asm/page.h>
#include <linux/memcontrol.h>
+#include <trace/events/kmem.h>
#include "slab.h"
@@ -28,8 +29,7 @@ DEFINE_MUTEX(slab_mutex);
struct kmem_cache *kmem_cache;
#ifdef CONFIG_DEBUG_VM
-static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
- size_t size)
+static int kmem_cache_sanity_check(const char *name, size_t size)
{
struct kmem_cache *s = NULL;
@@ -55,27 +55,22 @@ static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
continue;
}
- /*
- * For simplicity, we won't check this in the list of memcg
- * caches. We have control over memcg naming, and if there
- * aren't duplicates in the global list, there won't be any
- * duplicates in the memcg lists as well.
- */
- if (!memcg && !strcmp(s->name, name)) {
+#if !defined(CONFIG_SLUB) || !defined(CONFIG_SLUB_DEBUG_ON)
+ if (!strcmp(s->name, name)) {
pr_err("%s (%s): Cache name already exists.\n",
__func__, name);
dump_stack();
s = NULL;
return -EINVAL;
}
+#endif
}
WARN_ON(strchr(name, ' ')); /* It confuses parsers */
return 0;
}
#else
-static inline int kmem_cache_sanity_check(struct mem_cgroup *memcg,
- const char *name, size_t size)
+static inline int kmem_cache_sanity_check(const char *name, size_t size)
{
return 0;
}
@@ -136,6 +131,46 @@ unsigned long calculate_alignment(unsigned long flags,
return ALIGN(align, sizeof(void *));
}
+static struct kmem_cache *
+do_kmem_cache_create(char *name, size_t object_size, size_t size, size_t align,
+ unsigned long flags, void (*ctor)(void *),
+ struct mem_cgroup *memcg, struct kmem_cache *root_cache)
+{
+ struct kmem_cache *s;
+ int err;
+
+ err = -ENOMEM;
+ s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
+ if (!s)
+ goto out;
+
+ s->name = name;
+ s->object_size = object_size;
+ s->size = size;
+ s->align = align;
+ s->ctor = ctor;
+
+ err = memcg_alloc_cache_params(memcg, s, root_cache);
+ if (err)
+ goto out_free_cache;
+
+ err = __kmem_cache_create(s, flags);
+ if (err)
+ goto out_free_cache;
+
+ s->refcount = 1;
+ list_add(&s->list, &slab_caches);
+ memcg_register_cache(s);
+out:
+ if (err)
+ return ERR_PTR(err);
+ return s;
+
+out_free_cache:
+ memcg_free_cache_params(s);
+ kfree(s);
+ goto out;
+}
/*
* kmem_cache_create - Create a cache.
@@ -161,20 +196,20 @@ unsigned long calculate_alignment(unsigned long flags,
* cacheline. This can be beneficial if you're counting cycles as closely
* as davem.
*/
-
struct kmem_cache *
-kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
- size_t align, unsigned long flags, void (*ctor)(void *),
- struct kmem_cache *parent_cache)
+kmem_cache_create(const char *name, size_t size, size_t align,
+ unsigned long flags, void (*ctor)(void *))
{
- struct kmem_cache *s = NULL;
- int err = 0;
+ struct kmem_cache *s;
+ char *cache_name;
+ int err;
get_online_cpus();
mutex_lock(&slab_mutex);
- if (!kmem_cache_sanity_check(memcg, name, size) == 0)
- goto out_locked;
+ err = kmem_cache_sanity_check(name, size);
+ if (err)
+ goto out_unlock;
/*
* Some allocators will constraint the set of valid flags to a subset
@@ -184,47 +219,29 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
*/
flags &= CACHE_CREATE_MASK;
- s = __kmem_cache_alias(memcg, name, size, align, flags, ctor);
+ s = __kmem_cache_alias(name, size, align, flags, ctor);
if (s)
- goto out_locked;
+ goto out_unlock;
- s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
- if (s) {
- s->object_size = s->size = size;
- s->align = calculate_alignment(flags, align, size);
- s->ctor = ctor;
-
- if (memcg_register_cache(memcg, s, parent_cache)) {
- kmem_cache_free(kmem_cache, s);
- err = -ENOMEM;
- goto out_locked;
- }
-
- s->name = kstrdup(name, GFP_KERNEL);
- if (!s->name) {
- kmem_cache_free(kmem_cache, s);
- err = -ENOMEM;
- goto out_locked;
- }
-
- err = __kmem_cache_create(s, flags);
- if (!err) {
- s->refcount = 1;
- list_add(&s->list, &slab_caches);
- memcg_cache_list_add(memcg, s);
- } else {
- kfree(s->name);
- kmem_cache_free(kmem_cache, s);
- }
- } else
+ cache_name = kstrdup(name, GFP_KERNEL);
+ if (!cache_name) {
err = -ENOMEM;
+ goto out_unlock;
+ }
+
+ s = do_kmem_cache_create(cache_name, size, size,
+ calculate_alignment(flags, align, size),
+ flags, ctor, NULL, NULL);
+ if (IS_ERR(s)) {
+ err = PTR_ERR(s);
+ kfree(cache_name);
+ }
-out_locked:
+out_unlock:
mutex_unlock(&slab_mutex);
put_online_cpus();
if (err) {
-
if (flags & SLAB_PANIC)
panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
name, err);
@@ -233,50 +250,124 @@ out_locked:
name, err);
dump_stack();
}
-
return NULL;
}
-
return s;
}
+EXPORT_SYMBOL(kmem_cache_create);
-struct kmem_cache *
-kmem_cache_create(const char *name, size_t size, size_t align,
- unsigned long flags, void (*ctor)(void *))
+#ifdef CONFIG_MEMCG_KMEM
+/*
+ * kmem_cache_create_memcg - Create a cache for a memory cgroup.
+ * @memcg: The memory cgroup the new cache is for.
+ * @root_cache: The parent of the new cache.
+ *
+ * This function attempts to create a kmem cache that will serve allocation
+ * requests going from @memcg to @root_cache. The new cache inherits properties
+ * from its parent.
+ */
+void kmem_cache_create_memcg(struct mem_cgroup *memcg, struct kmem_cache *root_cache)
{
- return kmem_cache_create_memcg(NULL, name, size, align, flags, ctor, NULL);
+ struct kmem_cache *s;
+ char *cache_name;
+
+ get_online_cpus();
+ mutex_lock(&slab_mutex);
+
+ /*
+ * Since per-memcg caches are created asynchronously on first
+ * allocation (see memcg_kmem_get_cache()), several threads can try to
+ * create the same cache, but only one of them may succeed.
+ */
+ if (cache_from_memcg_idx(root_cache, memcg_cache_id(memcg)))
+ goto out_unlock;
+
+ cache_name = memcg_create_cache_name(memcg, root_cache);
+ if (!cache_name)
+ goto out_unlock;
+
+ s = do_kmem_cache_create(cache_name, root_cache->object_size,
+ root_cache->size, root_cache->align,
+ root_cache->flags, root_cache->ctor,
+ memcg, root_cache);
+ if (IS_ERR(s)) {
+ kfree(cache_name);
+ goto out_unlock;
+ }
+
+ s->allocflags |= __GFP_KMEMCG;
+
+out_unlock:
+ mutex_unlock(&slab_mutex);
+ put_online_cpus();
}
-EXPORT_SYMBOL(kmem_cache_create);
-void kmem_cache_destroy(struct kmem_cache *s)
+static int kmem_cache_destroy_memcg_children(struct kmem_cache *s)
+{
+ int rc;
+
+ if (!s->memcg_params ||
+ !s->memcg_params->is_root_cache)
+ return 0;
+
+ mutex_unlock(&slab_mutex);
+ rc = __kmem_cache_destroy_memcg_children(s);
+ mutex_lock(&slab_mutex);
+
+ return rc;
+}
+#else
+static int kmem_cache_destroy_memcg_children(struct kmem_cache *s)
{
- /* Destroy all the children caches if we aren't a memcg cache */
- kmem_cache_destroy_memcg_children(s);
+ return 0;
+}
+#endif /* CONFIG_MEMCG_KMEM */
+void slab_kmem_cache_release(struct kmem_cache *s)
+{
+ kfree(s->name);
+ kmem_cache_free(kmem_cache, s);
+}
+
+void kmem_cache_destroy(struct kmem_cache *s)
+{
get_online_cpus();
mutex_lock(&slab_mutex);
+
s->refcount--;
- if (!s->refcount) {
- list_del(&s->list);
-
- if (!__kmem_cache_shutdown(s)) {
- mutex_unlock(&slab_mutex);
- if (s->flags & SLAB_DESTROY_BY_RCU)
- rcu_barrier();
-
- memcg_release_cache(s);
- kfree(s->name);
- kmem_cache_free(kmem_cache, s);
- } else {
- list_add(&s->list, &slab_caches);
- mutex_unlock(&slab_mutex);
- printk(KERN_ERR "kmem_cache_destroy %s: Slab cache still has objects\n",
- s->name);
- dump_stack();
- }
- } else {
- mutex_unlock(&slab_mutex);
+ if (s->refcount)
+ goto out_unlock;
+
+ if (kmem_cache_destroy_memcg_children(s) != 0)
+ goto out_unlock;
+
+ list_del(&s->list);
+ memcg_unregister_cache(s);
+
+ if (__kmem_cache_shutdown(s) != 0) {
+ list_add(&s->list, &slab_caches);
+ memcg_register_cache(s);
+ printk(KERN_ERR "kmem_cache_destroy %s: "
+ "Slab cache still has objects\n", s->name);
+ dump_stack();
+ goto out_unlock;
}
+
+ mutex_unlock(&slab_mutex);
+ if (s->flags & SLAB_DESTROY_BY_RCU)
+ rcu_barrier();
+
+ memcg_free_cache_params(s);
+#ifdef SLAB_SUPPORTS_SYSFS
+ sysfs_slab_remove(s);
+#else
+ slab_kmem_cache_release(s);
+#endif
+ goto out_put_cpus;
+
+out_unlock:
+ mutex_unlock(&slab_mutex);
+out_put_cpus:
put_online_cpus();
}
EXPORT_SYMBOL(kmem_cache_destroy);
@@ -373,7 +464,7 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
{
int index;
- if (size > KMALLOC_MAX_SIZE) {
+ if (unlikely(size > KMALLOC_MAX_SIZE)) {
WARN_ON_ONCE(!(flags & __GFP_NOWARN));
return NULL;
}
@@ -495,8 +586,24 @@ void __init create_kmalloc_caches(unsigned long flags)
}
#endif /* !CONFIG_SLOB */
+#ifdef CONFIG_TRACING
+void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
+{
+ void *ret = kmalloc_order(size, flags, order);
+ trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
+ return ret;
+}
+EXPORT_SYMBOL(kmalloc_order_trace);
+#endif
#ifdef CONFIG_SLABINFO
+
+#ifdef CONFIG_SLAB
+#define SLABINFO_RIGHTS (S_IWUSR | S_IRUSR)
+#else
+#define SLABINFO_RIGHTS S_IRUSR
+#endif
+
void print_slabinfo_header(struct seq_file *m)
{
/*
@@ -531,12 +638,12 @@ static void *s_start(struct seq_file *m, loff_t *pos)
return seq_list_start(&slab_caches, *pos);
}
-static void *s_next(struct seq_file *m, void *p, loff_t *pos)
+void *slab_next(struct seq_file *m, void *p, loff_t *pos)
{
return seq_list_next(p, &slab_caches, pos);
}
-static void s_stop(struct seq_file *m, void *p)
+void slab_stop(struct seq_file *m, void *p)
{
mutex_unlock(&slab_mutex);
}
@@ -552,7 +659,7 @@ memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info)
return;
for_each_memcg_cache_index(i) {
- c = cache_from_memcg(s, i);
+ c = cache_from_memcg_idx(s, i);
if (!c)
continue;
@@ -613,8 +720,8 @@ static int s_show(struct seq_file *m, void *p)
*/
static const struct seq_operations slabinfo_op = {
.start = s_start,
- .next = s_next,
- .stop = s_stop,
+ .next = slab_next,
+ .stop = slab_stop,
.show = s_show,
};
@@ -633,7 +740,8 @@ static const struct file_operations proc_slabinfo_operations = {
static int __init slab_proc_init(void)
{
- proc_create("slabinfo", S_IRUSR, NULL, &proc_slabinfo_operations);
+ proc_create("slabinfo", SLABINFO_RIGHTS, NULL,
+ &proc_slabinfo_operations);
return 0;
}
module_init(slab_proc_init);
diff --git a/mm/slob.c b/mm/slob.c
index eeed4a05a2ef..730cad45d4be 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -111,18 +111,18 @@ static inline int slob_page_free(struct page *sp)
static void set_slob_page_free(struct page *sp, struct list_head *list)
{
- list_add(&sp->list, list);
+ list_add(&sp->lru, list);
__SetPageSlobFree(sp);
}
static inline void clear_slob_page_free(struct page *sp)
{
- list_del(&sp->list);
+ list_del(&sp->lru);
__ClearPageSlobFree(sp);
}
#define SLOB_UNIT sizeof(slob_t)
-#define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT)
+#define SLOB_UNITS(size) DIV_ROUND_UP(size, SLOB_UNIT)
/*
* struct slob_rcu is inserted at the tail of allocated slob blocks, which
@@ -282,7 +282,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
spin_lock_irqsave(&slob_lock, flags);
/* Iterate through each partially free page, try to find room */
- list_for_each_entry(sp, slob_list, list) {
+ list_for_each_entry(sp, slob_list, lru) {
#ifdef CONFIG_NUMA
/*
* If there's a node specification, search for a partial
@@ -296,7 +296,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
continue;
/* Attempt to alloc */
- prev = sp->list.prev;
+ prev = sp->lru.prev;
b = slob_page_alloc(sp, size, align);
if (!b)
continue;
@@ -322,7 +322,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
spin_lock_irqsave(&slob_lock, flags);
sp->units = SLOB_UNITS(PAGE_SIZE);
sp->freelist = b;
- INIT_LIST_HEAD(&sp->list);
+ INIT_LIST_HEAD(&sp->lru);
set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
set_slob_page_free(sp, slob_list);
b = slob_page_alloc(sp, size, align);
@@ -462,11 +462,11 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
return ret;
}
-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
+void *__kmalloc(size_t size, gfp_t gfp)
{
- return __do_kmalloc_node(size, gfp, node, _RET_IP_);
+ return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_);
}
-EXPORT_SYMBOL(__kmalloc_node);
+EXPORT_SYMBOL(__kmalloc);
#ifdef CONFIG_TRACING
void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller)
@@ -534,7 +534,7 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
return 0;
}
-void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
+void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
{
void *b;
@@ -554,13 +554,33 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
flags, node);
}
- if (c->ctor)
+ if (b && c->ctor)
c->ctor(b);
kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags);
return b;
}
+EXPORT_SYMBOL(slob_alloc_node);
+
+void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
+{
+ return slob_alloc_node(cachep, flags, NUMA_NO_NODE);
+}
+EXPORT_SYMBOL(kmem_cache_alloc);
+
+#ifdef CONFIG_NUMA
+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
+{
+ return __do_kmalloc_node(size, gfp, node, _RET_IP_);
+}
+EXPORT_SYMBOL(__kmalloc_node);
+
+void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
+{
+ return slob_alloc_node(cachep, gfp, node);
+}
EXPORT_SYMBOL(kmem_cache_alloc_node);
+#endif
static void __kmem_cache_free(void *b, int size)
{
diff --git a/mm/slub.c b/mm/slub.c
index 57707f01bcfb..2b1ce697fc4b 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -123,6 +123,15 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
#endif
}
+static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
+{
+#ifdef CONFIG_SLUB_CPU_PARTIAL
+ return !kmem_cache_debug(s);
+#else
+ return false;
+#endif
+}
+
/*
* Issues still to be resolved:
*
@@ -146,7 +155,7 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
/*
* Maximum number of desirable partial slabs.
* The existence of more partial slabs makes kmem_cache_shrink
- * sort the partial list by the number of objects in the.
+ * sort the partial list by the number of objects in use.
*/
#define MAX_PARTIAL 10
@@ -201,21 +210,22 @@ enum track_item { TRACK_ALLOC, TRACK_FREE };
#ifdef CONFIG_SYSFS
static int sysfs_slab_add(struct kmem_cache *);
static int sysfs_slab_alias(struct kmem_cache *, const char *);
-static void sysfs_slab_remove(struct kmem_cache *);
static void memcg_propagate_slab_attrs(struct kmem_cache *s);
#else
static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
{ return 0; }
-static inline void sysfs_slab_remove(struct kmem_cache *s) { }
-
static inline void memcg_propagate_slab_attrs(struct kmem_cache *s) { }
#endif
static inline void stat(const struct kmem_cache *s, enum stat_item si)
{
#ifdef CONFIG_SLUB_STATS
- __this_cpu_inc(s->cpu_slab->stat[si]);
+ /*
+ * The rmw is racy on a preemptible kernel but this is acceptable, so
+ * avoid this_cpu_add()'s irq-disable overhead.
+ */
+ raw_cpu_inc(s->cpu_slab->stat[si]);
#endif
}
@@ -346,6 +356,21 @@ static __always_inline void slab_unlock(struct page *page)
__bit_spin_unlock(PG_locked, &page->flags);
}
+static inline void set_page_slub_counters(struct page *page, unsigned long counters_new)
+{
+ struct page tmp;
+ tmp.counters = counters_new;
+ /*
+ * page->counters can cover frozen/inuse/objects as well
+ * as page->_count. If we assign to ->counters directly
+ * we run the risk of losing updates to page->_count, so
+ * be careful and only assign to the fields we need.
+ */
+ page->frozen = tmp.frozen;
+ page->inuse = tmp.inuse;
+ page->objects = tmp.objects;
+}
+
/* Interrupts must be disabled (for the fallback code to work right) */
static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
void *freelist_old, unsigned long counters_old,
@@ -364,9 +389,10 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page
#endif
{
slab_lock(page);
- if (page->freelist == freelist_old && page->counters == counters_old) {
+ if (page->freelist == freelist_old &&
+ page->counters == counters_old) {
page->freelist = freelist_new;
- page->counters = counters_new;
+ set_page_slub_counters(page, counters_new);
slab_unlock(page);
return 1;
}
@@ -402,9 +428,10 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
local_irq_save(flags);
slab_lock(page);
- if (page->freelist == freelist_old && page->counters == counters_old) {
+ if (page->freelist == freelist_old &&
+ page->counters == counters_old) {
page->freelist = freelist_new;
- page->counters = counters_new;
+ set_page_slub_counters(page, counters_new);
slab_unlock(page);
local_irq_restore(flags);
return 1;
@@ -544,8 +571,9 @@ static void print_tracking(struct kmem_cache *s, void *object)
static void print_page_info(struct page *page)
{
- printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
- page, page->objects, page->inuse, page->freelist, page->flags);
+ printk(KERN_ERR
+ "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
+ page, page->objects, page->inuse, page->freelist, page->flags);
}
@@ -620,7 +648,8 @@ static void object_err(struct kmem_cache *s, struct page *page,
print_trailer(s, page, object);
}
-static void slab_err(struct kmem_cache *s, struct page *page, const char *fmt, ...)
+static void slab_err(struct kmem_cache *s, struct page *page,
+ const char *fmt, ...)
{
va_list args;
char buf[100];
@@ -779,7 +808,8 @@ static int check_object(struct kmem_cache *s, struct page *page,
} else {
if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
check_bytes_and_report(s, page, p, "Alignment padding",
- endobject, POISON_INUSE, s->inuse - s->object_size);
+ endobject, POISON_INUSE,
+ s->inuse - s->object_size);
}
}
@@ -864,7 +894,6 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
object_err(s, page, object,
"Freechain corrupt");
set_freepointer(s, object, NULL);
- break;
} else {
slab_err(s, page, "Freepointer corrupt");
page->freelist = NULL;
@@ -909,7 +938,8 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
page->freelist);
if (!alloc)
- print_section("Object ", (void *)object, s->object_size);
+ print_section("Object ", (void *)object,
+ s->object_size);
dump_stack();
}
@@ -919,6 +949,16 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
* Hooks for other subsystems that check memory allocations. In a typical
* production configuration these hooks all should produce no code at all.
*/
+static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
+{
+ kmemleak_alloc(ptr, size, 1, flags);
+}
+
+static inline void kfree_hook(const void *x)
+{
+ kmemleak_free(x);
+}
+
static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
{
flags &= gfp_allowed_mask;
@@ -928,7 +968,8 @@ static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
return should_failslab(s->object_size, flags, s->flags);
}
-static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object)
+static inline void slab_post_alloc_hook(struct kmem_cache *s,
+ gfp_t flags, void *object)
{
flags &= gfp_allowed_mask;
kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
@@ -940,7 +981,7 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
kmemleak_free_recursive(x, s->flags);
/*
- * Trouble is that we may no longer disable interupts in the fast path
+ * Trouble is that we may no longer disable interrupts in the fast path
* So in order to make the debug calls that expect irqs to be
* disabled we need to disable interrupts temporarily.
*/
@@ -960,8 +1001,6 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
/*
* Tracking of fully allocated slabs for debugging purposes.
- *
- * list_lock must be held.
*/
static void add_full(struct kmem_cache *s,
struct kmem_cache_node *n, struct page *page)
@@ -969,17 +1008,16 @@ static void add_full(struct kmem_cache *s,
if (!(s->flags & SLAB_STORE_USER))
return;
+ lockdep_assert_held(&n->list_lock);
list_add(&page->lru, &n->full);
}
-/*
- * list_lock must be held.
- */
-static void remove_full(struct kmem_cache *s, struct page *page)
+static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
{
if (!(s->flags & SLAB_STORE_USER))
return;
+ lockdep_assert_held(&n->list_lock);
list_del(&page->lru);
}
@@ -1030,7 +1068,8 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page,
init_tracking(s, object);
}
-static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *page,
+static noinline int alloc_debug_processing(struct kmem_cache *s,
+ struct page *page,
void *object, unsigned long addr)
{
if (!check_slab(s, page))
@@ -1201,8 +1240,8 @@ static unsigned long kmem_cache_flags(unsigned long object_size,
/*
* Enable debugging if selected on the kernel commandline.
*/
- if (slub_debug && (!slub_debug_slabs ||
- !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs))))
+ if (slub_debug && (!slub_debug_slabs || (name &&
+ !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)))))
flags |= slub_debug;
return flags;
@@ -1224,7 +1263,8 @@ static inline int check_object(struct kmem_cache *s, struct page *page,
void *object, u8 val) { return 1; }
static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
struct page *page) {}
-static inline void remove_full(struct kmem_cache *s, struct page *page) {}
+static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
+ struct page *page) {}
static inline unsigned long kmem_cache_flags(unsigned long object_size,
unsigned long flags, const char *name,
void (*ctor)(void *))
@@ -1244,13 +1284,30 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node,
static inline void dec_slabs_node(struct kmem_cache *s, int node,
int objects) {}
+static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
+{
+ kmemleak_alloc(ptr, size, 1, flags);
+}
+
+static inline void kfree_hook(const void *x)
+{
+ kmemleak_free(x);
+}
+
static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
{ return 0; }
static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
- void *object) {}
+ void *object)
+{
+ kmemleak_alloc_recursive(object, s->object_size, 1, s->flags,
+ flags & gfp_allowed_mask);
+}
-static inline void slab_free_hook(struct kmem_cache *s, void *x) {}
+static inline void slab_free_hook(struct kmem_cache *s, void *x)
+{
+ kmemleak_free_recursive(x, s->flags);
+}
#endif /* CONFIG_SLUB_DEBUG */
@@ -1292,11 +1349,12 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
page = alloc_slab_page(alloc_gfp, node, oo);
if (unlikely(!page)) {
oo = s->min;
+ alloc_gfp = flags;
/*
* Allocation may have failed due to fragmentation.
* Try a lower order alloc if possible
*/
- page = alloc_slab_page(flags, node, oo);
+ page = alloc_slab_page(alloc_gfp, node, oo);
if (page)
stat(s, ORDER_FALLBACK);
@@ -1306,7 +1364,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
&& !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
int pages = 1 << oo_order(oo);
- kmemcheck_alloc_shadow(page, oo_order(oo), flags, node);
+ kmemcheck_alloc_shadow(page, oo_order(oo), alloc_gfp, node);
/*
* Objects from caches that have a constructor don't get
@@ -1461,11 +1519,9 @@ static void discard_slab(struct kmem_cache *s, struct page *page)
/*
* Management of partially allocated slabs.
- *
- * list_lock must be held.
*/
-static inline void add_partial(struct kmem_cache_node *n,
- struct page *page, int tail)
+static inline void
+__add_partial(struct kmem_cache_node *n, struct page *page, int tail)
{
n->nr_partial++;
if (tail == DEACTIVATE_TO_TAIL)
@@ -1474,23 +1530,32 @@ static inline void add_partial(struct kmem_cache_node *n,
list_add(&page->lru, &n->partial);
}
-/*
- * list_lock must be held.
- */
-static inline void remove_partial(struct kmem_cache_node *n,
- struct page *page)
+static inline void add_partial(struct kmem_cache_node *n,
+ struct page *page, int tail)
+{
+ lockdep_assert_held(&n->list_lock);
+ __add_partial(n, page, tail);
+}
+
+static inline void
+__remove_partial(struct kmem_cache_node *n, struct page *page)
{
list_del(&page->lru);
n->nr_partial--;
}
+static inline void remove_partial(struct kmem_cache_node *n,
+ struct page *page)
+{
+ lockdep_assert_held(&n->list_lock);
+ __remove_partial(n, page);
+}
+
/*
* Remove slab from the partial list, freeze it and
* return the pointer to the freelist.
*
* Returns a list of objects or NULL if it fails.
- *
- * Must hold list_lock since we modify the partial list.
*/
static inline void *acquire_slab(struct kmem_cache *s,
struct kmem_cache_node *n, struct page *page,
@@ -1500,6 +1565,8 @@ static inline void *acquire_slab(struct kmem_cache *s,
unsigned long counters;
struct page new;
+ lockdep_assert_held(&n->list_lock);
+
/*
* Zap the freelist and set the frozen bit.
* The old freelist is the list of objects for the
@@ -1573,7 +1640,8 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
put_cpu_partial(s, page, 0);
stat(s, CPU_PARTIAL_NODE);
}
- if (kmem_cache_debug(s) || available > s->cpu_partial / 2)
+ if (!kmem_cache_has_cpu_partial(s)
+ || available > s->cpu_partial / 2)
break;
}
@@ -1618,8 +1686,8 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
return NULL;
do {
- cpuset_mems_cookie = get_mems_allowed();
- zonelist = node_zonelist(slab_node(), flags);
+ cpuset_mems_cookie = read_mems_allowed_begin();
+ zonelist = node_zonelist(mempolicy_slab_node(), flags);
for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
struct kmem_cache_node *n;
@@ -1630,19 +1698,17 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
object = get_partial_node(s, n, c, flags);
if (object) {
/*
- * Return the object even if
- * put_mems_allowed indicated that
- * the cpuset mems_allowed was
- * updated in parallel. It's a
- * harmless race between the alloc
- * and the cpuset update.
+ * Don't check read_mems_allowed_retry()
+ * here - if mems_allowed was updated in
+ * parallel, that was a harmless race
+ * between allocation and the cpuset
+ * update
*/
- put_mems_allowed(cpuset_mems_cookie);
return object;
}
}
}
- } while (!put_mems_allowed(cpuset_mems_cookie));
+ } while (read_mems_allowed_retry(cpuset_mems_cookie));
#endif
return NULL;
}
@@ -1733,7 +1799,8 @@ static void init_kmem_cache_cpus(struct kmem_cache *s)
/*
* Remove the cpu slab
*/
-static void deactivate_slab(struct kmem_cache *s, struct page *page, void *freelist)
+static void deactivate_slab(struct kmem_cache *s, struct page *page,
+ void *freelist)
{
enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
@@ -1842,7 +1909,7 @@ redo:
else if (l == M_FULL)
- remove_full(s, page);
+ remove_full(s, n, page);
if (m == M_PARTIAL) {
@@ -1884,6 +1951,7 @@ redo:
static void unfreeze_partials(struct kmem_cache *s,
struct kmem_cache_cpu *c)
{
+#ifdef CONFIG_SLUB_CPU_PARTIAL
struct kmem_cache_node *n = NULL, *n2 = NULL;
struct page *page, *discard_page = NULL;
@@ -1938,6 +2006,7 @@ static void unfreeze_partials(struct kmem_cache *s,
discard_slab(s, page);
stat(s, FREE_SLAB);
}
+#endif
}
/*
@@ -1951,6 +2020,7 @@ static void unfreeze_partials(struct kmem_cache *s,
*/
static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
{
+#ifdef CONFIG_SLUB_CPU_PARTIAL
struct page *oldpage;
int pages;
int pobjects;
@@ -1986,7 +2056,9 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
page->pobjects = pobjects;
page->next = oldpage;
- } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
+ } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
+ != oldpage);
+#endif
}
static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
@@ -2155,8 +2227,8 @@ static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
}
/*
- * Check the page->freelist of a page and either transfer the freelist to the per cpu freelist
- * or deactivate the page.
+ * Check the page->freelist of a page and either transfer the freelist to the
+ * per cpu freelist or deactivate the page.
*
* The page is still frozen if the return value is not NULL.
*
@@ -2300,7 +2372,8 @@ new_slab:
goto load_freelist;
/* Only entered in the debug case */
- if (kmem_cache_debug(s) && !alloc_debug_processing(s, page, freelist, addr))
+ if (kmem_cache_debug(s) &&
+ !alloc_debug_processing(s, page, freelist, addr))
goto new_slab; /* Slab failed checks. Next slab needed */
deactivate_slab(s, page, get_freepointer(s, freelist));
@@ -2368,13 +2441,15 @@ redo:
* The cmpxchg will only match if there was no additional
* operation and if we are on the right processor.
*
- * The cmpxchg does the following atomically (without lock semantics!)
+ * The cmpxchg does the following atomically (without lock
+ * semantics!)
* 1. Relocate first pointer to the current per cpu area.
* 2. Verify that tid and freelist have not been changed
* 3. If they were not changed replace tid and freelist
*
- * Since this is without lock semantics the protection is only against
- * code executing on this cpu *not* from access by other cpus.
+ * Since this is without lock semantics the protection is only
+ * against code executing on this cpu *not* from access by
+ * other cpus.
*/
if (unlikely(!this_cpu_cmpxchg_double(
s->cpu_slab->freelist, s->cpu_slab->tid,
@@ -2406,7 +2481,8 @@ void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
{
void *ret = slab_alloc(s, gfpflags, _RET_IP_);
- trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, s->size, gfpflags);
+ trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size,
+ s->size, gfpflags);
return ret;
}
@@ -2420,14 +2496,6 @@ void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_trace);
-
-void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
-{
- void *ret = kmalloc_order(size, flags, order);
- trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
- return ret;
-}
-EXPORT_SYMBOL(kmalloc_order_trace);
#endif
#ifdef CONFIG_NUMA
@@ -2495,15 +2563,17 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
new.inuse--;
if ((!new.inuse || !prior) && !was_frozen) {
- if (!kmem_cache_debug(s) && !prior)
+ if (kmem_cache_has_cpu_partial(s) && !prior) {
/*
- * Slab was on no list before and will be partially empty
- * We can defer the list move and instead freeze it.
+ * Slab was on no list before and will be
+ * partially empty
+ * We can defer the list move and instead
+ * freeze it.
*/
new.frozen = 1;
- else { /* Needs to be taken off a list */
+ } else { /* Needs to be taken off a list */
n = get_node(s, page_to_nid(page));
/*
@@ -2550,8 +2620,9 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
* Objects left in the slab. If it was not on the partial list before
* then add it.
*/
- if (kmem_cache_debug(s) && unlikely(!prior)) {
- remove_full(s, page);
+ if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
+ if (kmem_cache_debug(s))
+ remove_full(s, n, page);
add_partial(n, page, DEACTIVATE_TO_TAIL);
stat(s, FREE_ADD_PARTIAL);
}
@@ -2565,9 +2636,10 @@ slab_empty:
*/
remove_partial(n, page);
stat(s, FREE_REMOVE_PARTIAL);
- } else
+ } else {
/* Slab must be on the full list */
- remove_full(s, page);
+ remove_full(s, n, page);
+ }
spin_unlock_irqrestore(&n->list_lock, flags);
stat(s, FREE_SLAB);
@@ -2807,8 +2879,8 @@ static struct kmem_cache *kmem_cache_node;
* slab on the node for this slabcache. There are no concurrent accesses
* possible.
*
- * Note that this function only works on the kmalloc_node_cache
- * when allocating for the kmalloc_node_cache. This is used for bootstrapping
+ * Note that this function only works on the kmem_cache_node
+ * when allocating for the kmem_cache_node. This is used for bootstrapping
* memory on a fresh node that has no slab structures yet.
*/
static void early_kmem_cache_node_alloc(int node)
@@ -2841,7 +2913,11 @@ static void early_kmem_cache_node_alloc(int node)
init_kmem_cache_node(n);
inc_slabs_node(kmem_cache_node, node, page->objects);
- add_partial(n, page, DEACTIVATE_TO_HEAD);
+ /*
+ * No locks need to be taken here as it has just been
+ * initialized and there is no concurrent access.
+ */
+ __add_partial(n, page, DEACTIVATE_TO_HEAD);
}
static void free_kmem_cache_nodes(struct kmem_cache *s)
@@ -3056,10 +3132,10 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
* A) The number of objects from per cpu partial slabs dumped to the
* per node list when we reach the limit.
* B) The number of objects in cpu partial slabs to extract from the
- * per node list when we run out of per cpu objects. We only fetch 50%
- * to keep some capacity around for frees.
+ * per node list when we run out of per cpu objects. We only fetch
+ * 50% to keep some capacity around for frees.
*/
- if (kmem_cache_debug(s))
+ if (!kmem_cache_has_cpu_partial(s))
s->cpu_partial = 0;
else if (s->size >= PAGE_SIZE)
s->cpu_partial = 2;
@@ -3084,8 +3160,8 @@ error:
if (flags & SLAB_PANIC)
panic("Cannot create slab %s size=%lu realsize=%u "
"order=%u offset=%u flags=%lx\n",
- s->name, (unsigned long)s->size, s->size, oo_order(s->oo),
- s->offset, flags);
+ s->name, (unsigned long)s->size, s->size,
+ oo_order(s->oo), s->offset, flags);
return -EINVAL;
}
@@ -3127,7 +3203,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
list_for_each_entry_safe(page, h, &n->partial, lru) {
if (!page->inuse) {
- remove_partial(n, page);
+ __remove_partial(n, page);
discard_slab(s, page);
} else {
list_slab_objects(s, page,
@@ -3159,23 +3235,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
int __kmem_cache_shutdown(struct kmem_cache *s)
{
- int rc = kmem_cache_close(s);
-
- if (!rc) {
- /*
- * We do the same lock strategy around sysfs_slab_add, see
- * __kmem_cache_create. Because this is pretty much the last
- * operation we do and the lock will be released shortly after
- * that in slab_common.c, we could just move sysfs_slab_remove
- * to a later point in common code. We should do that when we
- * have a common sysfs framework for all allocators.
- */
- mutex_unlock(&slab_mutex);
- sysfs_slab_remove(s);
- mutex_lock(&slab_mutex);
- }
-
- return rc;
+ return kmem_cache_close(s);
}
/********************************************************************
@@ -3250,7 +3310,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
if (page)
ptr = page_address(page);
- kmemleak_alloc(ptr, size, 1, flags);
+ kmalloc_large_node_hook(ptr, size, flags);
return ptr;
}
@@ -3301,42 +3361,6 @@ size_t ksize(const void *object)
}
EXPORT_SYMBOL(ksize);
-#ifdef CONFIG_SLUB_DEBUG
-bool verify_mem_not_deleted(const void *x)
-{
- struct page *page;
- void *object = (void *)x;
- unsigned long flags;
- bool rv;
-
- if (unlikely(ZERO_OR_NULL_PTR(x)))
- return false;
-
- local_irq_save(flags);
-
- page = virt_to_head_page(x);
- if (unlikely(!PageSlab(page))) {
- /* maybe it was from stack? */
- rv = true;
- goto out_unlock;
- }
-
- slab_lock(page);
- if (on_freelist(page->slab_cache, page, object)) {
- object_err(page->slab_cache, page, object, "Object is on free-list");
- rv = false;
- } else {
- rv = true;
- }
- slab_unlock(page);
-
-out_unlock:
- local_irq_restore(flags);
- return rv;
-}
-EXPORT_SYMBOL(verify_mem_not_deleted);
-#endif
-
void kfree(const void *x)
{
struct page *page;
@@ -3350,7 +3374,7 @@ void kfree(const void *x)
page = virt_to_head_page(x);
if (unlikely(!PageSlab(page))) {
BUG_ON(!PageCompound(page));
- kmemleak_free(x);
+ kfree_hook(x);
__free_memcg_kmem_pages(page, compound_order(page));
return;
}
@@ -3646,6 +3670,9 @@ static int slab_unmergeable(struct kmem_cache *s)
if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
return 1;
+ if (!is_root_cache(s))
+ return 1;
+
if (s->ctor)
return 1;
@@ -3658,9 +3685,8 @@ static int slab_unmergeable(struct kmem_cache *s)
return 0;
}
-static struct kmem_cache *find_mergeable(struct mem_cgroup *memcg, size_t size,
- size_t align, unsigned long flags, const char *name,
- void (*ctor)(void *))
+static struct kmem_cache *find_mergeable(size_t size, size_t align,
+ unsigned long flags, const char *name, void (*ctor)(void *))
{
struct kmem_cache *s;
@@ -3683,7 +3709,7 @@ static struct kmem_cache *find_mergeable(struct mem_cgroup *memcg, size_t size,
continue;
if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME))
- continue;
+ continue;
/*
* Check if alignment is compatible.
* Courtesy of Adrian Drzewiecki
@@ -3694,23 +3720,24 @@ static struct kmem_cache *find_mergeable(struct mem_cgroup *memcg, size_t size,
if (s->size - size >= sizeof(void *))
continue;
- if (!cache_match_memcg(s, memcg))
- continue;
-
return s;
}
return NULL;
}
struct kmem_cache *
-__kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
- size_t align, unsigned long flags, void (*ctor)(void *))
+__kmem_cache_alias(const char *name, size_t size, size_t align,
+ unsigned long flags, void (*ctor)(void *))
{
struct kmem_cache *s;
- s = find_mergeable(memcg, size, align, flags, name, ctor);
+ s = find_mergeable(size, align, flags, name, ctor);
if (s) {
+ int i;
+ struct kmem_cache *c;
+
s->refcount++;
+
/*
* Adjust the object sizes so that we clear
* the complete object on kzalloc.
@@ -3718,6 +3745,15 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
s->object_size = max(s->object_size, (int)size);
s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
+ for_each_memcg_cache_index(i) {
+ c = cache_from_memcg_idx(s, i);
+ if (!c)
+ continue;
+ c->object_size = s->object_size;
+ c->inuse = max_t(int, c->inuse,
+ ALIGN(size, sizeof(void *)));
+ }
+
if (sysfs_slab_alias(s, name)) {
s->refcount--;
s = NULL;
@@ -3740,10 +3776,7 @@ int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
return 0;
memcg_propagate_slab_attrs(s);
- mutex_unlock(&slab_mutex);
err = sysfs_slab_add(s);
- mutex_lock(&slab_mutex);
-
if (err)
kmem_cache_close(s);
@@ -3755,7 +3788,7 @@ int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
* Use the cpu notifier to insure that the cpu slabs are flushed when
* necessary.
*/
-static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
+static int slab_cpuup_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
@@ -3781,7 +3814,7 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
-static struct notifier_block __cpuinitdata slab_notifier = {
+static struct notifier_block slab_notifier = {
.notifier_call = slab_cpuup_callback
};
@@ -4147,15 +4180,17 @@ static int list_locations(struct kmem_cache *s, char *buf,
!cpumask_empty(to_cpumask(l->cpus)) &&
len < PAGE_SIZE - 60) {
len += sprintf(buf + len, " cpus=");
- len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
+ len += cpulist_scnprintf(buf + len,
+ PAGE_SIZE - len - 50,
to_cpumask(l->cpus));
}
if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
len < PAGE_SIZE - 60) {
len += sprintf(buf + len, " nodes=");
- len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50,
- l->nodes);
+ len += nodelist_scnprintf(buf + len,
+ PAGE_SIZE - len - 50,
+ l->nodes);
}
len += sprintf(buf + len, "\n");
@@ -4253,18 +4288,17 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
int node;
int x;
unsigned long *nodes;
- unsigned long *per_cpu;
- nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
+ nodes = kzalloc(sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
if (!nodes)
return -ENOMEM;
- per_cpu = nodes + nr_node_ids;
if (flags & SO_CPU) {
int cpu;
for_each_possible_cpu(cpu) {
- struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
+ struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab,
+ cpu);
int node;
struct page *page;
@@ -4285,12 +4319,16 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
page = ACCESS_ONCE(c->partial);
if (page) {
- x = page->pobjects;
+ node = page_to_nid(page);
+ if (flags & SO_TOTAL)
+ WARN_ON_ONCE(1);
+ else if (flags & SO_OBJECTS)
+ WARN_ON_ONCE(1);
+ else
+ x = page->pages;
total += x;
nodes[node] += x;
}
-
- per_cpu[node]++;
}
}
@@ -4300,12 +4338,11 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
for_each_node_state(node, N_NORMAL_MEMORY) {
struct kmem_cache_node *n = get_node(s, node);
- if (flags & SO_TOTAL)
- x = atomic_long_read(&n->total_objects);
- else if (flags & SO_OBJECTS)
- x = atomic_long_read(&n->total_objects) -
- count_partial(n, count_free);
-
+ if (flags & SO_TOTAL)
+ x = atomic_long_read(&n->total_objects);
+ else if (flags & SO_OBJECTS)
+ x = atomic_long_read(&n->total_objects) -
+ count_partial(n, count_free);
else
x = atomic_long_read(&n->nr_slabs);
total += x;
@@ -4405,7 +4442,7 @@ static ssize_t order_store(struct kmem_cache *s,
unsigned long order;
int err;
- err = strict_strtoul(buf, 10, &order);
+ err = kstrtoul(buf, 10, &order);
if (err)
return err;
@@ -4433,7 +4470,7 @@ static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
unsigned long min;
int err;
- err = strict_strtoul(buf, 10, &min);
+ err = kstrtoul(buf, 10, &min);
if (err)
return err;
@@ -4453,10 +4490,10 @@ static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
unsigned long objects;
int err;
- err = strict_strtoul(buf, 10, &objects);
+ err = kstrtoul(buf, 10, &objects);
if (err)
return err;
- if (objects && kmem_cache_debug(s))
+ if (objects && !kmem_cache_has_cpu_partial(s))
return -EINVAL;
s->cpu_partial = objects;
@@ -4769,7 +4806,7 @@ static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
unsigned long ratio;
int err;
- err = strict_strtoul(buf, 10, &ratio);
+ err = kstrtoul(buf, 10, &ratio);
if (err)
return err;
@@ -4999,7 +5036,7 @@ static ssize_t slab_attr_store(struct kobject *kobj,
* through the descendants with best-effort propagation.
*/
for_each_memcg_cache_index(i) {
- struct kmem_cache *c = cache_from_memcg(s, i);
+ struct kmem_cache *c = cache_from_memcg_idx(s, i);
if (c)
attribute->store(c, buf, len);
}
@@ -5014,15 +5051,18 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s)
#ifdef CONFIG_MEMCG_KMEM
int i;
char *buffer = NULL;
+ struct kmem_cache *root_cache;
- if (!is_root_cache(s))
+ if (is_root_cache(s))
return;
+ root_cache = s->memcg_params->root_cache;
+
/*
* This mean this cache had no attribute written. Therefore, no point
* in copying default values around
*/
- if (!s->max_attr_size)
+ if (!root_cache->max_attr_size)
return;
for (i = 0; i < ARRAY_SIZE(slab_attrs); i++) {
@@ -5044,7 +5084,7 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s)
*/
if (buffer)
buf = buffer;
- else if (s->max_attr_size < ARRAY_SIZE(mbuf))
+ else if (root_cache->max_attr_size < ARRAY_SIZE(mbuf))
buf = mbuf;
else {
buffer = (char *) get_zeroed_page(GFP_KERNEL);
@@ -5053,7 +5093,7 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s)
buf = buffer;
}
- attr->show(s->memcg_params->root_cache, buf);
+ attr->show(root_cache, buf);
attr->store(s, buf, strlen(buf));
}
@@ -5062,6 +5102,11 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s)
#endif
}
+static void kmem_cache_release(struct kobject *k)
+{
+ slab_kmem_cache_release(to_slab(k));
+}
+
static const struct sysfs_ops slab_sysfs_ops = {
.show = slab_attr_show,
.store = slab_attr_store,
@@ -5069,6 +5114,7 @@ static const struct sysfs_ops slab_sysfs_ops = {
static struct kobj_type slab_ktype = {
.sysfs_ops = &slab_sysfs_ops,
+ .release = kmem_cache_release,
};
static int uevent_filter(struct kset *kset, struct kobject *kobj)
@@ -5086,6 +5132,15 @@ static const struct kset_uevent_ops slab_uevent_ops = {
static struct kset *slab_kset;
+static inline struct kset *cache_kset(struct kmem_cache *s)
+{
+#ifdef CONFIG_MEMCG_KMEM
+ if (!is_root_cache(s))
+ return s->memcg_params->root_cache->memcg_kset;
+#endif
+ return slab_kset;
+}
+
#define ID_STR_LENGTH 64
/* Create a unique string id for a slab cache:
@@ -5121,7 +5176,8 @@ static char *create_unique_id(struct kmem_cache *s)
#ifdef CONFIG_MEMCG_KMEM
if (!is_root_cache(s))
- p += sprintf(p, "-%08d", memcg_cache_id(s->memcg_params->memcg));
+ p += sprintf(p, "-%08d",
+ memcg_cache_id(s->memcg_params->memcg));
#endif
BUG_ON(p > name + ID_STR_LENGTH - 1);
@@ -5150,29 +5206,42 @@ static int sysfs_slab_add(struct kmem_cache *s)
name = create_unique_id(s);
}
- s->kobj.kset = slab_kset;
- err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
- if (err) {
- kobject_put(&s->kobj);
- return err;
- }
+ s->kobj.kset = cache_kset(s);
+ err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
+ if (err)
+ goto out_put_kobj;
err = sysfs_create_group(&s->kobj, &slab_attr_group);
- if (err) {
- kobject_del(&s->kobj);
- kobject_put(&s->kobj);
- return err;
+ if (err)
+ goto out_del_kobj;
+
+#ifdef CONFIG_MEMCG_KMEM
+ if (is_root_cache(s)) {
+ s->memcg_kset = kset_create_and_add("cgroup", NULL, &s->kobj);
+ if (!s->memcg_kset) {
+ err = -ENOMEM;
+ goto out_del_kobj;
+ }
}
+#endif
+
kobject_uevent(&s->kobj, KOBJ_ADD);
if (!unmergeable) {
/* Setup first alias */
sysfs_slab_alias(s, s->name);
- kfree(name);
}
- return 0;
+out:
+ if (!unmergeable)
+ kfree(name);
+ return err;
+out_del_kobj:
+ kobject_del(&s->kobj);
+out_put_kobj:
+ kobject_put(&s->kobj);
+ goto out;
}
-static void sysfs_slab_remove(struct kmem_cache *s)
+void sysfs_slab_remove(struct kmem_cache *s)
{
if (slab_state < FULL)
/*
@@ -5181,6 +5250,9 @@ static void sysfs_slab_remove(struct kmem_cache *s)
*/
return;
+#ifdef CONFIG_MEMCG_KMEM
+ kset_unregister(s->memcg_kset);
+#endif
kobject_uevent(&s->kobj, KOBJ_REMOVE);
kobject_del(&s->kobj);
kobject_put(&s->kobj);
@@ -5269,7 +5341,6 @@ __initcall(slab_sysfs_init);
#ifdef CONFIG_SLABINFO
void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
{
- unsigned long nr_partials = 0;
unsigned long nr_slabs = 0;
unsigned long nr_objs = 0;
unsigned long nr_free = 0;
@@ -5281,9 +5352,8 @@ void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
if (!n)
continue;
- nr_partials += n->nr_partial;
- nr_slabs += atomic_long_read(&n->nr_slabs);
- nr_objs += atomic_long_read(&n->total_objects);
+ nr_slabs += node_nr_slabs(n);
+ nr_objs += node_nr_objs(n);
nr_free += count_partial(n, count_free);
}
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index 27eeab3be757..4cba9c2783a1 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -40,7 +40,8 @@ static void * __init_refok __earlyonly_bootmem_alloc(int node,
unsigned long align,
unsigned long goal)
{
- return __alloc_bootmem_node_high(NODE_DATA(node), size, align, goal);
+ return memblock_virt_alloc_try_nid(size, align, goal,
+ BOOTMEM_ALLOC_ACCESSIBLE, node);
}
static void *vmemmap_buf;
@@ -226,7 +227,8 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
if (vmemmap_buf_start) {
/* need to free left buf */
- free_bootmem(__pa(vmemmap_buf), vmemmap_buf_end - vmemmap_buf);
+ memblock_free_early(__pa(vmemmap_buf),
+ vmemmap_buf_end - vmemmap_buf);
vmemmap_buf = NULL;
vmemmap_buf_end = NULL;
}
diff --git a/mm/sparse.c b/mm/sparse.c
index 1c91f0d3f6ab..d1b48b691ac8 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -5,10 +5,12 @@
#include <linux/slab.h>
#include <linux/mmzone.h>
#include <linux/bootmem.h>
+#include <linux/compiler.h>
#include <linux/highmem.h>
#include <linux/export.h>
#include <linux/spinlock.h>
#include <linux/vmalloc.h>
+
#include "internal.h"
#include <asm/dma.h>
#include <asm/pgalloc.h>
@@ -69,7 +71,7 @@ static struct mem_section noinline __init_refok *sparse_index_alloc(int nid)
else
section = kzalloc(array_size, GFP_KERNEL);
} else {
- section = alloc_bootmem_node(NODE_DATA(nid), array_size);
+ section = memblock_virt_alloc_node(array_size, nid);
}
return section;
@@ -79,7 +81,6 @@ static int __meminit sparse_index_init(unsigned long section_nr, int nid)
{
unsigned long root = SECTION_NR_TO_ROOT(section_nr);
struct mem_section *section;
- int ret = 0;
if (mem_section[root])
return -EEXIST;
@@ -90,7 +91,7 @@ static int __meminit sparse_index_init(unsigned long section_nr, int nid)
mem_section[root] = section;
- return ret;
+ return 0;
}
#else /* !SPARSEMEM_EXTREME */
static inline int sparse_index_init(unsigned long section_nr, int nid)
@@ -269,7 +270,7 @@ sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
/*
* A page may contain usemaps for other sections preventing the
* page being freed and making a section unremovable while
- * other sections referencing the usemap retmain active. Similarly,
+ * other sections referencing the usemap remain active. Similarly,
* a pgdat can prevent a section being removed. If section A
* contains a pgdat and section B contains the usemap, both
* sections become inter-dependent. This allocates usemaps
@@ -280,8 +281,9 @@ sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
limit = goal + (1UL << PA_SECTION_SHIFT);
nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
again:
- p = ___alloc_bootmem_node_nopanic(NODE_DATA(nid), size,
- SMP_CACHE_BYTES, goal, limit);
+ p = memblock_virt_alloc_try_nid_nopanic(size,
+ SMP_CACHE_BYTES, goal, limit,
+ nid);
if (!p && limit) {
limit = 0;
goto again;
@@ -332,7 +334,7 @@ static unsigned long * __init
sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
unsigned long size)
{
- return alloc_bootmem_node_nopanic(pgdat, size);
+ return memblock_virt_alloc_node_nopanic(size, pgdat->node_id);
}
static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
@@ -340,13 +342,14 @@ static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
}
#endif /* CONFIG_MEMORY_HOTREMOVE */
-static void __init sparse_early_usemaps_alloc_node(unsigned long**usemap_map,
+static void __init sparse_early_usemaps_alloc_node(void *data,
unsigned long pnum_begin,
unsigned long pnum_end,
unsigned long usemap_count, int nodeid)
{
void *usemap;
unsigned long pnum;
+ unsigned long **usemap_map = (unsigned long **)data;
int size = usemap_size();
usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid),
@@ -376,8 +379,9 @@ struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
return map;
size = PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
- map = __alloc_bootmem_node_high(NODE_DATA(nid), size,
- PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
+ map = memblock_virt_alloc_try_nid(size,
+ PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
+ BOOTMEM_ALLOC_ACCESSIBLE, nid);
return map;
}
void __init sparse_mem_maps_populate_node(struct page **map_map,
@@ -401,8 +405,9 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
}
size = PAGE_ALIGN(size);
- map = __alloc_bootmem_node_high(NODE_DATA(nodeid), size * map_count,
- PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
+ map = memblock_virt_alloc_try_nid(size * map_count,
+ PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
+ BOOTMEM_ALLOC_ACCESSIBLE, nodeid);
if (map) {
for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
if (!present_section_nr(pnum))
@@ -431,11 +436,12 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
-static void __init sparse_early_mem_maps_alloc_node(struct page **map_map,
+static void __init sparse_early_mem_maps_alloc_node(void *data,
unsigned long pnum_begin,
unsigned long pnum_end,
unsigned long map_count, int nodeid)
{
+ struct page **map_map = (struct page **)data;
sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end,
map_count, nodeid);
}
@@ -457,48 +463,22 @@ static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
}
#endif
-void __attribute__((weak)) __meminit vmemmap_populate_print_last(void)
+void __weak __meminit vmemmap_populate_print_last(void)
{
}
-/*
- * Allocate the accumulated non-linear sections, allocate a mem_map
- * for each and record the physical to section mapping.
+/**
+ * alloc_usemap_and_memmap - memory alloction for pageblock flags and vmemmap
+ * @map: usemap_map for pageblock flags or mmap_map for vmemmap
*/
-void __init sparse_init(void)
+static void __init alloc_usemap_and_memmap(void (*alloc_func)
+ (void *, unsigned long, unsigned long,
+ unsigned long, int), void *data)
{
unsigned long pnum;
- struct page *map;
- unsigned long *usemap;
- unsigned long **usemap_map;
- int size;
+ unsigned long map_count;
int nodeid_begin = 0;
unsigned long pnum_begin = 0;
- unsigned long usemap_count;
-#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
- unsigned long map_count;
- int size2;
- struct page **map_map;
-#endif
-
- /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */
- set_pageblock_order();
-
- /*
- * map is using big page (aka 2M in x86 64 bit)
- * usemap is less one page (aka 24 bytes)
- * so alloc 2M (with 2M align) and 24 bytes in turn will
- * make next 2M slip to one more 2M later.
- * then in big system, the memory will have a lot of holes...
- * here try to allocate 2M pages continuously.
- *
- * powerpc need to call sparse_init_one_section right after each
- * sparse_early_mem_map_alloc, so allocate usemap_map at first.
- */
- size = sizeof(unsigned long *) * NR_MEM_SECTIONS;
- usemap_map = alloc_bootmem(size);
- if (!usemap_map)
- panic("can not allocate usemap_map\n");
for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
struct mem_section *ms;
@@ -510,7 +490,7 @@ void __init sparse_init(void)
pnum_begin = pnum;
break;
}
- usemap_count = 1;
+ map_count = 1;
for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) {
struct mem_section *ms;
int nodeid;
@@ -520,61 +500,69 @@ void __init sparse_init(void)
ms = __nr_to_section(pnum);
nodeid = sparse_early_nid(ms);
if (nodeid == nodeid_begin) {
- usemap_count++;
+ map_count++;
continue;
}
/* ok, we need to take cake of from pnum_begin to pnum - 1*/
- sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, pnum,
- usemap_count, nodeid_begin);
+ alloc_func(data, pnum_begin, pnum,
+ map_count, nodeid_begin);
/* new start, update count etc*/
nodeid_begin = nodeid;
pnum_begin = pnum;
- usemap_count = 1;
+ map_count = 1;
}
/* ok, last chunk */
- sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, NR_MEM_SECTIONS,
- usemap_count, nodeid_begin);
+ alloc_func(data, pnum_begin, NR_MEM_SECTIONS,
+ map_count, nodeid_begin);
+}
+/*
+ * Allocate the accumulated non-linear sections, allocate a mem_map
+ * for each and record the physical to section mapping.
+ */
+void __init sparse_init(void)
+{
+ unsigned long pnum;
+ struct page *map;
+ unsigned long *usemap;
+ unsigned long **usemap_map;
+ int size;
#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
- size2 = sizeof(struct page *) * NR_MEM_SECTIONS;
- map_map = alloc_bootmem(size2);
- if (!map_map)
- panic("can not allocate map_map\n");
+ int size2;
+ struct page **map_map;
+#endif
- for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
- struct mem_section *ms;
+ /* see include/linux/mmzone.h 'struct mem_section' definition */
+ BUILD_BUG_ON(!is_power_of_2(sizeof(struct mem_section)));
- if (!present_section_nr(pnum))
- continue;
- ms = __nr_to_section(pnum);
- nodeid_begin = sparse_early_nid(ms);
- pnum_begin = pnum;
- break;
- }
- map_count = 1;
- for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) {
- struct mem_section *ms;
- int nodeid;
+ /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */
+ set_pageblock_order();
- if (!present_section_nr(pnum))
- continue;
- ms = __nr_to_section(pnum);
- nodeid = sparse_early_nid(ms);
- if (nodeid == nodeid_begin) {
- map_count++;
- continue;
- }
- /* ok, we need to take cake of from pnum_begin to pnum - 1*/
- sparse_early_mem_maps_alloc_node(map_map, pnum_begin, pnum,
- map_count, nodeid_begin);
- /* new start, update count etc*/
- nodeid_begin = nodeid;
- pnum_begin = pnum;
- map_count = 1;
- }
- /* ok, last chunk */
- sparse_early_mem_maps_alloc_node(map_map, pnum_begin, NR_MEM_SECTIONS,
- map_count, nodeid_begin);
+ /*
+ * map is using big page (aka 2M in x86 64 bit)
+ * usemap is less one page (aka 24 bytes)
+ * so alloc 2M (with 2M align) and 24 bytes in turn will
+ * make next 2M slip to one more 2M later.
+ * then in big system, the memory will have a lot of holes...
+ * here try to allocate 2M pages continuously.
+ *
+ * powerpc need to call sparse_init_one_section right after each
+ * sparse_early_mem_map_alloc, so allocate usemap_map at first.
+ */
+ size = sizeof(unsigned long *) * NR_MEM_SECTIONS;
+ usemap_map = memblock_virt_alloc(size, 0);
+ if (!usemap_map)
+ panic("can not allocate usemap_map\n");
+ alloc_usemap_and_memmap(sparse_early_usemaps_alloc_node,
+ (void *)usemap_map);
+
+#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
+ size2 = sizeof(struct page *) * NR_MEM_SECTIONS;
+ map_map = memblock_virt_alloc(size2, 0);
+ if (!map_map)
+ panic("can not allocate map_map\n");
+ alloc_usemap_and_memmap(sparse_early_mem_maps_alloc_node,
+ (void *)map_map);
#endif
for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
@@ -600,40 +588,39 @@ void __init sparse_init(void)
vmemmap_populate_print_last();
#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
- free_bootmem(__pa(map_map), size2);
+ memblock_free_early(__pa(map_map), size2);
#endif
- free_bootmem(__pa(usemap_map), size);
+ memblock_free_early(__pa(usemap_map), size);
}
#ifdef CONFIG_MEMORY_HOTPLUG
#ifdef CONFIG_SPARSEMEM_VMEMMAP
-static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
- unsigned long nr_pages)
+static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid)
{
/* This will make the necessary allocations eventually. */
return sparse_mem_map_populate(pnum, nid);
}
-static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
+static void __kfree_section_memmap(struct page *memmap)
{
unsigned long start = (unsigned long)memmap;
- unsigned long end = (unsigned long)(memmap + nr_pages);
+ unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
vmemmap_free(start, end);
}
#ifdef CONFIG_MEMORY_HOTREMOVE
-static void free_map_bootmem(struct page *memmap, unsigned long nr_pages)
+static void free_map_bootmem(struct page *memmap)
{
unsigned long start = (unsigned long)memmap;
- unsigned long end = (unsigned long)(memmap + nr_pages);
+ unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
vmemmap_free(start, end);
}
#endif /* CONFIG_MEMORY_HOTREMOVE */
#else
-static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
+static struct page *__kmalloc_section_memmap(void)
{
struct page *page, *ret;
- unsigned long memmap_size = sizeof(struct page) * nr_pages;
+ unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION;
page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
if (page)
@@ -651,28 +638,30 @@ got_map_ptr:
return ret;
}
-static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
- unsigned long nr_pages)
+static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid)
{
- return __kmalloc_section_memmap(nr_pages);
+ return __kmalloc_section_memmap();
}
-static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
+static void __kfree_section_memmap(struct page *memmap)
{
if (is_vmalloc_addr(memmap))
vfree(memmap);
else
free_pages((unsigned long)memmap,
- get_order(sizeof(struct page) * nr_pages));
+ get_order(sizeof(struct page) * PAGES_PER_SECTION));
}
#ifdef CONFIG_MEMORY_HOTREMOVE
-static void free_map_bootmem(struct page *memmap, unsigned long nr_pages)
+static void free_map_bootmem(struct page *memmap)
{
unsigned long maps_section_nr, removing_section_nr, i;
- unsigned long magic;
+ unsigned long magic, nr_pages;
struct page *page = virt_to_page(memmap);
+ nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
+ >> PAGE_SHIFT;
+
for (i = 0; i < nr_pages; i++, page++) {
magic = (unsigned long) page->lru.next;
@@ -701,8 +690,7 @@ static void free_map_bootmem(struct page *memmap, unsigned long nr_pages)
* set. If this is <=0, then that means that the passed-in
* map was not consumed and must be freed.
*/
-int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
- int nr_pages)
+int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn)
{
unsigned long section_nr = pfn_to_section_nr(start_pfn);
struct pglist_data *pgdat = zone->zone_pgdat;
@@ -719,12 +707,12 @@ int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
ret = sparse_index_init(section_nr, pgdat->node_id);
if (ret < 0 && ret != -EEXIST)
return ret;
- memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages);
+ memmap = kmalloc_section_memmap(section_nr, pgdat->node_id);
if (!memmap)
return -ENOMEM;
usemap = __kmalloc_section_usemap();
if (!usemap) {
- __kfree_section_memmap(memmap, nr_pages);
+ __kfree_section_memmap(memmap);
return -ENOMEM;
}
@@ -736,7 +724,7 @@ int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
goto out;
}
- memset(memmap, 0, sizeof(struct page) * nr_pages);
+ memset(memmap, 0, sizeof(struct page) * PAGES_PER_SECTION);
ms->section_mem_map |= SECTION_MARKED_PRESENT;
@@ -746,11 +734,12 @@ out:
pgdat_resize_unlock(pgdat, &flags);
if (ret <= 0) {
kfree(usemap);
- __kfree_section_memmap(memmap, nr_pages);
+ __kfree_section_memmap(memmap);
}
return ret;
}
+#ifdef CONFIG_MEMORY_HOTREMOVE
#ifdef CONFIG_MEMORY_FAILURE
static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
{
@@ -772,11 +761,9 @@ static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
}
#endif
-#ifdef CONFIG_MEMORY_HOTREMOVE
static void free_section_usemap(struct page *memmap, unsigned long *usemap)
{
struct page *usemap_page;
- unsigned long nr_pages;
if (!usemap)
return;
@@ -788,7 +775,7 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap)
if (PageSlab(usemap_page) || PageCompound(usemap_page)) {
kfree(usemap);
if (memmap)
- __kfree_section_memmap(memmap, PAGES_PER_SECTION);
+ __kfree_section_memmap(memmap);
return;
}
@@ -797,12 +784,8 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap)
* on the section which has pgdat at boot time. Just keep it as is now.
*/
- if (memmap) {
- nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
- >> PAGE_SHIFT;
-
- free_map_bootmem(memmap, nr_pages);
- }
+ if (memmap)
+ free_map_bootmem(memmap);
}
void sparse_remove_one_section(struct zone *zone, struct mem_section *ms)
diff --git a/mm/swap.c b/mm/swap.c
index dfd7d71d6841..9ce43ba4498b 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -34,10 +34,13 @@
#include "internal.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/pagemap.h>
+
/* How many pages do we try to swap or page in/out together? */
int page_cluster;
-static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs);
+static DEFINE_PER_CPU(struct pagevec, lru_add_pvec);
static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
@@ -54,7 +57,7 @@ static void __page_cache_release(struct page *page)
spin_lock_irqsave(&zone->lru_lock, flags);
lruvec = mem_cgroup_page_lruvec(page, zone);
- VM_BUG_ON(!PageLRU(page));
+ VM_BUG_ON_PAGE(!PageLRU(page), page);
__ClearPageLRU(page);
del_page_from_lru_list(page, lruvec, page_off_lru(page));
spin_unlock_irqrestore(&zone->lru_lock, flags);
@@ -78,83 +81,150 @@ static void __put_compound_page(struct page *page)
static void put_compound_page(struct page *page)
{
- if (unlikely(PageTail(page))) {
- /* __split_huge_page_refcount can run under us */
- struct page *page_head = compound_trans_head(page);
-
- if (likely(page != page_head &&
- get_page_unless_zero(page_head))) {
- unsigned long flags;
+ struct page *page_head;
+ if (likely(!PageTail(page))) {
+ if (put_page_testzero(page)) {
/*
- * THP can not break up slab pages so avoid taking
- * compound_lock(). Slab performs non-atomic bit ops
- * on page->flags for better performance. In particular
- * slab_unlock() in slub used to be a hot path. It is
- * still hot on arches that do not support
- * this_cpu_cmpxchg_double().
+ * By the time all refcounts have been released
+ * split_huge_page cannot run anymore from under us.
*/
- if (PageSlab(page_head)) {
- if (PageTail(page)) {
- if (put_page_testzero(page_head))
- VM_BUG_ON(1);
-
- atomic_dec(&page->_mapcount);
- goto skip_lock_tail;
- } else
- goto skip_lock;
- }
+ if (PageHead(page))
+ __put_compound_page(page);
+ else
+ __put_single_page(page);
+ }
+ return;
+ }
+
+ /* __split_huge_page_refcount can run under us */
+ page_head = compound_head(page);
+
+ /*
+ * THP can not break up slab pages so avoid taking
+ * compound_lock() and skip the tail page refcounting (in
+ * _mapcount) too. Slab performs non-atomic bit ops on
+ * page->flags for better performance. In particular
+ * slab_unlock() in slub used to be a hot path. It is still
+ * hot on arches that do not support
+ * this_cpu_cmpxchg_double().
+ *
+ * If "page" is part of a slab or hugetlbfs page it cannot be
+ * splitted and the head page cannot change from under us. And
+ * if "page" is part of a THP page under splitting, if the
+ * head page pointed by the THP tail isn't a THP head anymore,
+ * we'll find PageTail clear after smp_rmb() and we'll treat
+ * it as a single page.
+ */
+ if (!__compound_tail_refcounted(page_head)) {
+ /*
+ * If "page" is a THP tail, we must read the tail page
+ * flags after the head page flags. The
+ * split_huge_page side enforces write memory barriers
+ * between clearing PageTail and before the head page
+ * can be freed and reallocated.
+ */
+ smp_rmb();
+ if (likely(PageTail(page))) {
/*
- * page_head wasn't a dangling pointer but it
- * may not be a head page anymore by the time
- * we obtain the lock. That is ok as long as it
- * can't be freed from under us.
+ * __split_huge_page_refcount cannot race
+ * here.
*/
- flags = compound_lock_irqsave(page_head);
- if (unlikely(!PageTail(page))) {
- /* __split_huge_page_refcount run before us */
- compound_unlock_irqrestore(page_head, flags);
-skip_lock:
- if (put_page_testzero(page_head))
- __put_single_page(page_head);
-out_put_single:
- if (put_page_testzero(page))
- __put_single_page(page);
- return;
+ VM_BUG_ON_PAGE(!PageHead(page_head), page_head);
+ VM_BUG_ON_PAGE(page_mapcount(page) != 0, page);
+ if (put_page_testzero(page_head)) {
+ /*
+ * If this is the tail of a slab
+ * compound page, the tail pin must
+ * not be the last reference held on
+ * the page, because the PG_slab
+ * cannot be cleared before all tail
+ * pins (which skips the _mapcount
+ * tail refcounting) have been
+ * released. For hugetlbfs the tail
+ * pin may be the last reference on
+ * the page instead, because
+ * PageHeadHuge will not go away until
+ * the compound page enters the buddy
+ * allocator.
+ */
+ VM_BUG_ON_PAGE(PageSlab(page_head), page_head);
+ __put_compound_page(page_head);
}
- VM_BUG_ON(page_head != page->first_page);
+ return;
+ } else
/*
- * We can release the refcount taken by
- * get_page_unless_zero() now that
- * __split_huge_page_refcount() is blocked on
- * the compound_lock.
+ * __split_huge_page_refcount run before us,
+ * "page" was a THP tail. The split page_head
+ * has been freed and reallocated as slab or
+ * hugetlbfs page of smaller order (only
+ * possible if reallocated as slab on x86).
*/
- if (put_page_testzero(page_head))
- VM_BUG_ON(1);
- /* __split_huge_page_refcount will wait now */
- VM_BUG_ON(page_mapcount(page) <= 0);
- atomic_dec(&page->_mapcount);
- VM_BUG_ON(atomic_read(&page_head->_count) <= 0);
- VM_BUG_ON(atomic_read(&page->_count) != 0);
- compound_unlock_irqrestore(page_head, flags);
+ goto out_put_single;
+ }
+
+ if (likely(page != page_head && get_page_unless_zero(page_head))) {
+ unsigned long flags;
-skip_lock_tail:
+ /*
+ * page_head wasn't a dangling pointer but it may not
+ * be a head page anymore by the time we obtain the
+ * lock. That is ok as long as it can't be freed from
+ * under us.
+ */
+ flags = compound_lock_irqsave(page_head);
+ if (unlikely(!PageTail(page))) {
+ /* __split_huge_page_refcount run before us */
+ compound_unlock_irqrestore(page_head, flags);
if (put_page_testzero(page_head)) {
+ /*
+ * The head page may have been freed
+ * and reallocated as a compound page
+ * of smaller order and then freed
+ * again. All we know is that it
+ * cannot have become: a THP page, a
+ * compound page of higher order, a
+ * tail page. That is because we
+ * still hold the refcount of the
+ * split THP tail and page_head was
+ * the THP head before the split.
+ */
if (PageHead(page_head))
__put_compound_page(page_head);
else
__put_single_page(page_head);
}
- } else {
- /* page_head is a dangling pointer */
- VM_BUG_ON(PageTail(page));
- goto out_put_single;
+out_put_single:
+ if (put_page_testzero(page))
+ __put_single_page(page);
+ return;
}
- } else if (put_page_testzero(page)) {
- if (PageHead(page))
- __put_compound_page(page);
- else
- __put_single_page(page);
+ VM_BUG_ON_PAGE(page_head != page->first_page, page);
+ /*
+ * We can release the refcount taken by
+ * get_page_unless_zero() now that
+ * __split_huge_page_refcount() is blocked on the
+ * compound_lock.
+ */
+ if (put_page_testzero(page_head))
+ VM_BUG_ON_PAGE(1, page_head);
+ /* __split_huge_page_refcount will wait now */
+ VM_BUG_ON_PAGE(page_mapcount(page) <= 0, page);
+ atomic_dec(&page->_mapcount);
+ VM_BUG_ON_PAGE(atomic_read(&page_head->_count) <= 0, page_head);
+ VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page);
+ compound_unlock_irqrestore(page_head, flags);
+
+ if (put_page_testzero(page_head)) {
+ if (PageHead(page_head))
+ __put_compound_page(page_head);
+ else
+ __put_single_page(page_head);
+ }
+ } else {
+ /* page_head is a dangling pointer */
+ VM_BUG_ON_PAGE(PageTail(page), page);
+ goto out_put_single;
}
}
@@ -182,22 +252,37 @@ bool __get_page_tail(struct page *page)
* split_huge_page().
*/
unsigned long flags;
- bool got = false;
- struct page *page_head = compound_trans_head(page);
-
- if (likely(page != page_head && get_page_unless_zero(page_head))) {
+ bool got;
+ struct page *page_head = compound_head(page);
- /* Ref to put_compound_page() comment. */
- if (PageSlab(page_head)) {
- if (likely(PageTail(page))) {
- __get_page_tail_foll(page, false);
- return true;
- } else {
- put_page(page_head);
- return false;
- }
+ /* Ref to put_compound_page() comment. */
+ if (!__compound_tail_refcounted(page_head)) {
+ smp_rmb();
+ if (likely(PageTail(page))) {
+ /*
+ * This is a hugetlbfs page or a slab
+ * page. __split_huge_page_refcount
+ * cannot race here.
+ */
+ VM_BUG_ON_PAGE(!PageHead(page_head), page_head);
+ __get_page_tail_foll(page, true);
+ return true;
+ } else {
+ /*
+ * __split_huge_page_refcount run
+ * before us, "page" was a THP
+ * tail. The split page_head has been
+ * freed and reallocated as slab or
+ * hugetlbfs page of smaller order
+ * (only possible if reallocated as
+ * slab on x86).
+ */
+ return false;
}
+ }
+ got = false;
+ if (likely(page != page_head && get_page_unless_zero(page_head))) {
/*
* page_head wasn't a dangling pointer but it
* may not be a head page anymore by the time
@@ -384,6 +469,7 @@ static void __activate_page(struct page *page, struct lruvec *lruvec,
SetPageActive(page);
lru += LRU_ACTIVE;
add_page_to_lru_list(page, lruvec, lru);
+ trace_mm_lru_activate(page, page_to_pfn(page));
__count_vm_event(PGACTIVATE);
update_page_reclaim_stat(lruvec, file, 1);
@@ -401,6 +487,11 @@ static void activate_page_drain(int cpu)
pagevec_lru_move_fn(pvec, __activate_page, NULL);
}
+static bool need_activate_page_drain(int cpu)
+{
+ return pagevec_count(&per_cpu(activate_page_pvecs, cpu)) != 0;
+}
+
void activate_page(struct page *page)
{
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
@@ -418,6 +509,11 @@ static inline void activate_page_drain(int cpu)
{
}
+static bool need_activate_page_drain(int cpu)
+{
+ return false;
+}
+
void activate_page(struct page *page)
{
struct zone *zone = page_zone(page);
@@ -428,6 +524,33 @@ void activate_page(struct page *page)
}
#endif
+static void __lru_cache_activate_page(struct page *page)
+{
+ struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
+ int i;
+
+ /*
+ * Search backwards on the optimistic assumption that the page being
+ * activated has just been added to this pagevec. Note that only
+ * the local pagevec is examined as a !PageLRU page could be in the
+ * process of being released, reclaimed, migrated or on a remote
+ * pagevec that is currently being drained. Furthermore, marking
+ * a remote pagevec's page PageActive potentially hits a race where
+ * a page is marked PageActive just after it is added to the inactive
+ * list causing accounting errors and BUG_ON checks to trigger.
+ */
+ for (i = pagevec_count(pvec) - 1; i >= 0; i--) {
+ struct page *pagevec_page = pvec->pages[i];
+
+ if (pagevec_page == page) {
+ SetPageActive(page);
+ break;
+ }
+ }
+
+ put_cpu_var(lru_add_pvec);
+}
+
/*
* Mark a page as having seen activity.
*
@@ -438,9 +561,21 @@ void activate_page(struct page *page)
void mark_page_accessed(struct page *page)
{
if (!PageActive(page) && !PageUnevictable(page) &&
- PageReferenced(page) && PageLRU(page)) {
- activate_page(page);
+ PageReferenced(page)) {
+
+ /*
+ * If the page is on the LRU, queue it for activation via
+ * activate_page_pvecs. Otherwise, assume the page is on a
+ * pagevec, mark it active and it'll be moved to the active
+ * LRU on the next drain.
+ */
+ if (PageLRU(page))
+ activate_page(page);
+ else
+ __lru_cache_activate_page(page);
ClearPageReferenced(page);
+ if (page_is_file_cache(page))
+ workingset_activation(page);
} else if (!PageReferenced(page)) {
SetPageReferenced(page);
}
@@ -448,42 +583,32 @@ void mark_page_accessed(struct page *page)
EXPORT_SYMBOL(mark_page_accessed);
/*
- * Order of operations is important: flush the pagevec when it's already
- * full, not when adding the last page, to make sure that last page is
- * not added to the LRU directly when passed to this function. Because
- * mark_page_accessed() (called after this when writing) only activates
- * pages that are on the LRU, linear writes in subpage chunks would see
- * every PAGEVEC_SIZE page activated, which is unexpected.
+ * Queue the page for addition to the LRU via pagevec. The decision on whether
+ * to add the page to the [in]active [file|anon] list is deferred until the
+ * pagevec is drained. This gives a chance for the caller of __lru_cache_add()
+ * have the page added to the active list using mark_page_accessed().
*/
-void __lru_cache_add(struct page *page, enum lru_list lru)
+void __lru_cache_add(struct page *page)
{
- struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru];
+ struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
page_cache_get(page);
if (!pagevec_space(pvec))
- __pagevec_lru_add(pvec, lru);
+ __pagevec_lru_add(pvec);
pagevec_add(pvec, page);
- put_cpu_var(lru_add_pvecs);
+ put_cpu_var(lru_add_pvec);
}
EXPORT_SYMBOL(__lru_cache_add);
/**
- * lru_cache_add_lru - add a page to a page list
+ * lru_cache_add - add a page to a page list
* @page: the page to be added to the LRU.
- * @lru: the LRU list to which the page is added.
*/
-void lru_cache_add_lru(struct page *page, enum lru_list lru)
+void lru_cache_add(struct page *page)
{
- if (PageActive(page)) {
- VM_BUG_ON(PageUnevictable(page));
- ClearPageActive(page);
- } else if (PageUnevictable(page)) {
- VM_BUG_ON(PageActive(page));
- ClearPageUnevictable(page);
- }
-
- VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page));
- __lru_cache_add(page, lru);
+ VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
+ VM_BUG_ON_PAGE(PageLRU(page), page);
+ __lru_cache_add(page);
}
/**
@@ -503,6 +628,7 @@ void add_page_to_unevictable_list(struct page *page)
spin_lock_irq(&zone->lru_lock);
lruvec = mem_cgroup_page_lruvec(page, zone);
+ ClearPageActive(page);
SetPageUnevictable(page);
SetPageLRU(page);
add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE);
@@ -583,15 +709,10 @@ static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
*/
void lru_add_drain_cpu(int cpu)
{
- struct pagevec *pvecs = per_cpu(lru_add_pvecs, cpu);
- struct pagevec *pvec;
- int lru;
+ struct pagevec *pvec = &per_cpu(lru_add_pvec, cpu);
- for_each_lru(lru) {
- pvec = &pvecs[lru - LRU_BASE];
- if (pagevec_count(pvec))
- __pagevec_lru_add(pvec, lru);
- }
+ if (pagevec_count(pvec))
+ __pagevec_lru_add(pvec);
pvec = &per_cpu(lru_rotate_pvecs, cpu);
if (pagevec_count(pvec)) {
@@ -647,12 +768,36 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy)
lru_add_drain();
}
-/*
- * Returns 0 for success
- */
-int lru_add_drain_all(void)
+static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
+
+void lru_add_drain_all(void)
{
- return schedule_on_each_cpu(lru_add_drain_per_cpu);
+ static DEFINE_MUTEX(lock);
+ static struct cpumask has_work;
+ int cpu;
+
+ mutex_lock(&lock);
+ get_online_cpus();
+ cpumask_clear(&has_work);
+
+ for_each_online_cpu(cpu) {
+ struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
+
+ if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
+ pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
+ pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
+ need_activate_page_drain(cpu)) {
+ INIT_WORK(work, lru_add_drain_per_cpu);
+ schedule_work_on(cpu, work);
+ cpumask_set_cpu(cpu, &has_work);
+ }
+ }
+
+ for_each_cpu(cpu, &has_work)
+ flush_work(&per_cpu(lru_add_drain_work, cpu));
+
+ put_online_cpus();
+ mutex_unlock(&lock);
}
/*
@@ -703,11 +848,14 @@ void release_pages(struct page **pages, int nr, int cold)
}
lruvec = mem_cgroup_page_lruvec(page, zone);
- VM_BUG_ON(!PageLRU(page));
+ VM_BUG_ON_PAGE(!PageLRU(page), page);
__ClearPageLRU(page);
del_page_from_lru_list(page, lruvec, page_off_lru(page));
}
+ /* Clear Active bit in case of parallel mark_page_accessed */
+ ClearPageActive(page);
+
list_add(&page->lru, &pages_to_free);
}
if (zone)
@@ -740,33 +888,17 @@ EXPORT_SYMBOL(__pagevec_release);
void lru_add_page_tail(struct page *page, struct page *page_tail,
struct lruvec *lruvec, struct list_head *list)
{
- int uninitialized_var(active);
- enum lru_list lru;
const int file = 0;
- VM_BUG_ON(!PageHead(page));
- VM_BUG_ON(PageCompound(page_tail));
- VM_BUG_ON(PageLRU(page_tail));
+ VM_BUG_ON_PAGE(!PageHead(page), page);
+ VM_BUG_ON_PAGE(PageCompound(page_tail), page);
+ VM_BUG_ON_PAGE(PageLRU(page_tail), page);
VM_BUG_ON(NR_CPUS != 1 &&
!spin_is_locked(&lruvec_zone(lruvec)->lru_lock));
if (!list)
SetPageLRU(page_tail);
- if (page_evictable(page_tail)) {
- if (PageActive(page)) {
- SetPageActive(page_tail);
- active = 1;
- lru = LRU_ACTIVE_ANON;
- } else {
- active = 0;
- lru = LRU_INACTIVE_ANON;
- }
- } else {
- SetPageUnevictable(page_tail);
- lru = LRU_UNEVICTABLE;
- }
-
if (likely(PageLRU(page)))
list_add_tail(&page_tail->lru, &page->lru);
else if (list) {
@@ -782,47 +914,93 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
* Use the standard add function to put page_tail on the list,
* but then correct its position so they all end up in order.
*/
- add_page_to_lru_list(page_tail, lruvec, lru);
+ add_page_to_lru_list(page_tail, lruvec, page_lru(page_tail));
list_head = page_tail->lru.prev;
list_move_tail(&page_tail->lru, list_head);
}
if (!PageUnevictable(page))
- update_page_reclaim_stat(lruvec, file, active);
+ update_page_reclaim_stat(lruvec, file, PageActive(page_tail));
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
void *arg)
{
- enum lru_list lru = (enum lru_list)arg;
- int file = is_file_lru(lru);
- int active = is_active_lru(lru);
+ int file = page_is_file_cache(page);
+ int active = PageActive(page);
+ enum lru_list lru = page_lru(page);
- VM_BUG_ON(PageActive(page));
- VM_BUG_ON(PageUnevictable(page));
- VM_BUG_ON(PageLRU(page));
+ VM_BUG_ON_PAGE(PageLRU(page), page);
SetPageLRU(page);
- if (active)
- SetPageActive(page);
add_page_to_lru_list(page, lruvec, lru);
update_page_reclaim_stat(lruvec, file, active);
+ trace_mm_lru_insertion(page, page_to_pfn(page), lru, trace_pagemap_flags(page));
}
/*
* Add the passed pages to the LRU, then drop the caller's refcount
* on them. Reinitialises the caller's pagevec.
*/
-void __pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
+void __pagevec_lru_add(struct pagevec *pvec)
{
- VM_BUG_ON(is_unevictable_lru(lru));
-
- pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, (void *)lru);
+ pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, NULL);
}
EXPORT_SYMBOL(__pagevec_lru_add);
/**
+ * pagevec_lookup_entries - gang pagecache lookup
+ * @pvec: Where the resulting entries are placed
+ * @mapping: The address_space to search
+ * @start: The starting entry index
+ * @nr_entries: The maximum number of entries
+ * @indices: The cache indices corresponding to the entries in @pvec
+ *
+ * pagevec_lookup_entries() will search for and return a group of up
+ * to @nr_entries pages and shadow entries in the mapping. All
+ * entries are placed in @pvec. pagevec_lookup_entries() takes a
+ * reference against actual pages in @pvec.
+ *
+ * The search returns a group of mapping-contiguous entries with
+ * ascending indexes. There may be holes in the indices due to
+ * not-present entries.
+ *
+ * pagevec_lookup_entries() returns the number of entries which were
+ * found.
+ */
+unsigned pagevec_lookup_entries(struct pagevec *pvec,
+ struct address_space *mapping,
+ pgoff_t start, unsigned nr_pages,
+ pgoff_t *indices)
+{
+ pvec->nr = find_get_entries(mapping, start, nr_pages,
+ pvec->pages, indices);
+ return pagevec_count(pvec);
+}
+
+/**
+ * pagevec_remove_exceptionals - pagevec exceptionals pruning
+ * @pvec: The pagevec to prune
+ *
+ * pagevec_lookup_entries() fills both pages and exceptional radix
+ * tree entries into the pagevec. This function prunes all
+ * exceptionals from @pvec without leaving holes, so that it can be
+ * passed on to page-only pagevec operations.
+ */
+void pagevec_remove_exceptionals(struct pagevec *pvec)
+{
+ int i, j;
+
+ for (i = 0, j = 0; i < pagevec_count(pvec); i++) {
+ struct page *page = pvec->pages[i];
+ if (!radix_tree_exceptional_entry(page))
+ pvec->pages[j++] = page;
+ }
+ pvec->nr = j;
+}
+
+/**
* pagevec_lookup - gang pagecache lookup
* @pvec: Where the resulting pages are placed
* @mapping: The address_space to search
@@ -864,7 +1042,8 @@ void __init swap_setup(void)
#ifdef CONFIG_SWAP
int i;
- bdi_init(swapper_spaces[0].backing_dev_info);
+ if (bdi_init(swapper_spaces[0].backing_dev_info))
+ panic("Failed to init swap bdi");
for (i = 0; i < MAX_SWAPFILES; i++) {
spin_lock_init(&swapper_spaces[i].tree_lock);
INIT_LIST_HEAD(&swapper_spaces[i].i_mmap_nonlinear);
diff --git a/mm/swap_state.c b/mm/swap_state.c
index f24ab0dff554..e76ace30d436 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -63,6 +63,8 @@ unsigned long total_swapcache_pages(void)
return ret;
}
+static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
+
void show_swap_cache_info(void)
{
printk("%lu pages in swap cache\n", total_swapcache_pages());
@@ -83,9 +85,9 @@ int __add_to_swap_cache(struct page *page, swp_entry_t entry)
int error;
struct address_space *address_space;
- VM_BUG_ON(!PageLocked(page));
- VM_BUG_ON(PageSwapCache(page));
- VM_BUG_ON(!PageSwapBacked(page));
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
+ VM_BUG_ON_PAGE(PageSwapCache(page), page);
+ VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
page_cache_get(page);
SetPageSwapCache(page);
@@ -122,7 +124,7 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
{
int error;
- error = radix_tree_preload(gfp_mask);
+ error = radix_tree_maybe_preload(gfp_mask);
if (!error) {
error = __add_to_swap_cache(page, entry);
radix_tree_preload_end();
@@ -139,9 +141,9 @@ void __delete_from_swap_cache(struct page *page)
swp_entry_t entry;
struct address_space *address_space;
- VM_BUG_ON(!PageLocked(page));
- VM_BUG_ON(!PageSwapCache(page));
- VM_BUG_ON(PageWriteback(page));
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
+ VM_BUG_ON_PAGE(!PageSwapCache(page), page);
+ VM_BUG_ON_PAGE(PageWriteback(page), page);
entry.val = page_private(page);
address_space = swap_address_space(entry);
@@ -165,8 +167,8 @@ int add_to_swap(struct page *page, struct list_head *list)
swp_entry_t entry;
int err;
- VM_BUG_ON(!PageLocked(page));
- VM_BUG_ON(!PageUptodate(page));
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
+ VM_BUG_ON_PAGE(!PageUptodate(page), page);
entry = get_swap_page();
if (!entry.val)
@@ -286,8 +288,11 @@ struct page * lookup_swap_cache(swp_entry_t entry)
page = find_get_page(swap_address_space(entry), entry.val);
- if (page)
+ if (page) {
INC_CACHE_INFO(find_success);
+ if (TestClearPageReadahead(page))
+ atomic_inc(&swapin_readahead_hits);
+ }
INC_CACHE_INFO(find_total);
return page;
@@ -328,7 +333,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
/*
* call radix_tree_preload() while we can wait.
*/
- err = radix_tree_preload(gfp_mask & GFP_KERNEL);
+ err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL);
if (err)
break;
@@ -389,6 +394,50 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
return found_page;
}
+static unsigned long swapin_nr_pages(unsigned long offset)
+{
+ static unsigned long prev_offset;
+ unsigned int pages, max_pages, last_ra;
+ static atomic_t last_readahead_pages;
+
+ max_pages = 1 << ACCESS_ONCE(page_cluster);
+ if (max_pages <= 1)
+ return 1;
+
+ /*
+ * This heuristic has been found to work well on both sequential and
+ * random loads, swapping to hard disk or to SSD: please don't ask
+ * what the "+ 2" means, it just happens to work well, that's all.
+ */
+ pages = atomic_xchg(&swapin_readahead_hits, 0) + 2;
+ if (pages == 2) {
+ /*
+ * We can have no readahead hits to judge by: but must not get
+ * stuck here forever, so check for an adjacent offset instead
+ * (and don't even bother to check whether swap type is same).
+ */
+ if (offset != prev_offset + 1 && offset != prev_offset - 1)
+ pages = 1;
+ prev_offset = offset;
+ } else {
+ unsigned int roundup = 4;
+ while (roundup < pages)
+ roundup <<= 1;
+ pages = roundup;
+ }
+
+ if (pages > max_pages)
+ pages = max_pages;
+
+ /* Don't shrink readahead too fast */
+ last_ra = atomic_read(&last_readahead_pages) / 2;
+ if (pages < last_ra)
+ pages = last_ra;
+ atomic_set(&last_readahead_pages, pages);
+
+ return pages;
+}
+
/**
* swapin_readahead - swap in pages in hope we need them soon
* @entry: swap entry of this memory
@@ -412,11 +461,16 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
struct vm_area_struct *vma, unsigned long addr)
{
struct page *page;
- unsigned long offset = swp_offset(entry);
+ unsigned long entry_offset = swp_offset(entry);
+ unsigned long offset = entry_offset;
unsigned long start_offset, end_offset;
- unsigned long mask = (1UL << page_cluster) - 1;
+ unsigned long mask;
struct blk_plug plug;
+ mask = swapin_nr_pages(offset) - 1;
+ if (!mask)
+ goto skip;
+
/* Read a page_cluster sized and aligned cluster around offset. */
start_offset = offset & ~mask;
end_offset = offset | mask;
@@ -430,10 +484,13 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
gfp_mask, vma, addr);
if (!page)
continue;
+ if (offset != entry_offset)
+ SetPageReadahead(page);
page_cache_release(page);
}
blk_finish_plug(&plug);
lru_add_drain(); /* Push any new pages onto the LRU now */
+skip:
return read_swap_cache_async(entry, gfp_mask, vma, addr);
}
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 746af55b8455..4a7f7e6992b6 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -175,14 +175,296 @@ static void discard_swap_cluster(struct swap_info_struct *si,
}
}
-static int wait_for_discard(void *word)
+#define SWAPFILE_CLUSTER 256
+#define LATENCY_LIMIT 256
+
+static inline void cluster_set_flag(struct swap_cluster_info *info,
+ unsigned int flag)
{
- schedule();
- return 0;
+ info->flags = flag;
}
-#define SWAPFILE_CLUSTER 256
-#define LATENCY_LIMIT 256
+static inline unsigned int cluster_count(struct swap_cluster_info *info)
+{
+ return info->data;
+}
+
+static inline void cluster_set_count(struct swap_cluster_info *info,
+ unsigned int c)
+{
+ info->data = c;
+}
+
+static inline void cluster_set_count_flag(struct swap_cluster_info *info,
+ unsigned int c, unsigned int f)
+{
+ info->flags = f;
+ info->data = c;
+}
+
+static inline unsigned int cluster_next(struct swap_cluster_info *info)
+{
+ return info->data;
+}
+
+static inline void cluster_set_next(struct swap_cluster_info *info,
+ unsigned int n)
+{
+ info->data = n;
+}
+
+static inline void cluster_set_next_flag(struct swap_cluster_info *info,
+ unsigned int n, unsigned int f)
+{
+ info->flags = f;
+ info->data = n;
+}
+
+static inline bool cluster_is_free(struct swap_cluster_info *info)
+{
+ return info->flags & CLUSTER_FLAG_FREE;
+}
+
+static inline bool cluster_is_null(struct swap_cluster_info *info)
+{
+ return info->flags & CLUSTER_FLAG_NEXT_NULL;
+}
+
+static inline void cluster_set_null(struct swap_cluster_info *info)
+{
+ info->flags = CLUSTER_FLAG_NEXT_NULL;
+ info->data = 0;
+}
+
+/* Add a cluster to discard list and schedule it to do discard */
+static void swap_cluster_schedule_discard(struct swap_info_struct *si,
+ unsigned int idx)
+{
+ /*
+ * If scan_swap_map() can't find a free cluster, it will check
+ * si->swap_map directly. To make sure the discarding cluster isn't
+ * taken by scan_swap_map(), mark the swap entries bad (occupied). It
+ * will be cleared after discard
+ */
+ memset(si->swap_map + idx * SWAPFILE_CLUSTER,
+ SWAP_MAP_BAD, SWAPFILE_CLUSTER);
+
+ if (cluster_is_null(&si->discard_cluster_head)) {
+ cluster_set_next_flag(&si->discard_cluster_head,
+ idx, 0);
+ cluster_set_next_flag(&si->discard_cluster_tail,
+ idx, 0);
+ } else {
+ unsigned int tail = cluster_next(&si->discard_cluster_tail);
+ cluster_set_next(&si->cluster_info[tail], idx);
+ cluster_set_next_flag(&si->discard_cluster_tail,
+ idx, 0);
+ }
+
+ schedule_work(&si->discard_work);
+}
+
+/*
+ * Doing discard actually. After a cluster discard is finished, the cluster
+ * will be added to free cluster list. caller should hold si->lock.
+*/
+static void swap_do_scheduled_discard(struct swap_info_struct *si)
+{
+ struct swap_cluster_info *info;
+ unsigned int idx;
+
+ info = si->cluster_info;
+
+ while (!cluster_is_null(&si->discard_cluster_head)) {
+ idx = cluster_next(&si->discard_cluster_head);
+
+ cluster_set_next_flag(&si->discard_cluster_head,
+ cluster_next(&info[idx]), 0);
+ if (cluster_next(&si->discard_cluster_tail) == idx) {
+ cluster_set_null(&si->discard_cluster_head);
+ cluster_set_null(&si->discard_cluster_tail);
+ }
+ spin_unlock(&si->lock);
+
+ discard_swap_cluster(si, idx * SWAPFILE_CLUSTER,
+ SWAPFILE_CLUSTER);
+
+ spin_lock(&si->lock);
+ cluster_set_flag(&info[idx], CLUSTER_FLAG_FREE);
+ if (cluster_is_null(&si->free_cluster_head)) {
+ cluster_set_next_flag(&si->free_cluster_head,
+ idx, 0);
+ cluster_set_next_flag(&si->free_cluster_tail,
+ idx, 0);
+ } else {
+ unsigned int tail;
+
+ tail = cluster_next(&si->free_cluster_tail);
+ cluster_set_next(&info[tail], idx);
+ cluster_set_next_flag(&si->free_cluster_tail,
+ idx, 0);
+ }
+ memset(si->swap_map + idx * SWAPFILE_CLUSTER,
+ 0, SWAPFILE_CLUSTER);
+ }
+}
+
+static void swap_discard_work(struct work_struct *work)
+{
+ struct swap_info_struct *si;
+
+ si = container_of(work, struct swap_info_struct, discard_work);
+
+ spin_lock(&si->lock);
+ swap_do_scheduled_discard(si);
+ spin_unlock(&si->lock);
+}
+
+/*
+ * The cluster corresponding to page_nr will be used. The cluster will be
+ * removed from free cluster list and its usage counter will be increased.
+ */
+static void inc_cluster_info_page(struct swap_info_struct *p,
+ struct swap_cluster_info *cluster_info, unsigned long page_nr)
+{
+ unsigned long idx = page_nr / SWAPFILE_CLUSTER;
+
+ if (!cluster_info)
+ return;
+ if (cluster_is_free(&cluster_info[idx])) {
+ VM_BUG_ON(cluster_next(&p->free_cluster_head) != idx);
+ cluster_set_next_flag(&p->free_cluster_head,
+ cluster_next(&cluster_info[idx]), 0);
+ if (cluster_next(&p->free_cluster_tail) == idx) {
+ cluster_set_null(&p->free_cluster_tail);
+ cluster_set_null(&p->free_cluster_head);
+ }
+ cluster_set_count_flag(&cluster_info[idx], 0, 0);
+ }
+
+ VM_BUG_ON(cluster_count(&cluster_info[idx]) >= SWAPFILE_CLUSTER);
+ cluster_set_count(&cluster_info[idx],
+ cluster_count(&cluster_info[idx]) + 1);
+}
+
+/*
+ * The cluster corresponding to page_nr decreases one usage. If the usage
+ * counter becomes 0, which means no page in the cluster is in using, we can
+ * optionally discard the cluster and add it to free cluster list.
+ */
+static void dec_cluster_info_page(struct swap_info_struct *p,
+ struct swap_cluster_info *cluster_info, unsigned long page_nr)
+{
+ unsigned long idx = page_nr / SWAPFILE_CLUSTER;
+
+ if (!cluster_info)
+ return;
+
+ VM_BUG_ON(cluster_count(&cluster_info[idx]) == 0);
+ cluster_set_count(&cluster_info[idx],
+ cluster_count(&cluster_info[idx]) - 1);
+
+ if (cluster_count(&cluster_info[idx]) == 0) {
+ /*
+ * If the swap is discardable, prepare discard the cluster
+ * instead of free it immediately. The cluster will be freed
+ * after discard.
+ */
+ if ((p->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) ==
+ (SWP_WRITEOK | SWP_PAGE_DISCARD)) {
+ swap_cluster_schedule_discard(p, idx);
+ return;
+ }
+
+ cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE);
+ if (cluster_is_null(&p->free_cluster_head)) {
+ cluster_set_next_flag(&p->free_cluster_head, idx, 0);
+ cluster_set_next_flag(&p->free_cluster_tail, idx, 0);
+ } else {
+ unsigned int tail = cluster_next(&p->free_cluster_tail);
+ cluster_set_next(&cluster_info[tail], idx);
+ cluster_set_next_flag(&p->free_cluster_tail, idx, 0);
+ }
+ }
+}
+
+/*
+ * It's possible scan_swap_map() uses a free cluster in the middle of free
+ * cluster list. Avoiding such abuse to avoid list corruption.
+ */
+static bool
+scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si,
+ unsigned long offset)
+{
+ struct percpu_cluster *percpu_cluster;
+ bool conflict;
+
+ offset /= SWAPFILE_CLUSTER;
+ conflict = !cluster_is_null(&si->free_cluster_head) &&
+ offset != cluster_next(&si->free_cluster_head) &&
+ cluster_is_free(&si->cluster_info[offset]);
+
+ if (!conflict)
+ return false;
+
+ percpu_cluster = this_cpu_ptr(si->percpu_cluster);
+ cluster_set_null(&percpu_cluster->index);
+ return true;
+}
+
+/*
+ * Try to get a swap entry from current cpu's swap entry pool (a cluster). This
+ * might involve allocating a new cluster for current CPU too.
+ */
+static void scan_swap_map_try_ssd_cluster(struct swap_info_struct *si,
+ unsigned long *offset, unsigned long *scan_base)
+{
+ struct percpu_cluster *cluster;
+ bool found_free;
+ unsigned long tmp;
+
+new_cluster:
+ cluster = this_cpu_ptr(si->percpu_cluster);
+ if (cluster_is_null(&cluster->index)) {
+ if (!cluster_is_null(&si->free_cluster_head)) {
+ cluster->index = si->free_cluster_head;
+ cluster->next = cluster_next(&cluster->index) *
+ SWAPFILE_CLUSTER;
+ } else if (!cluster_is_null(&si->discard_cluster_head)) {
+ /*
+ * we don't have free cluster but have some clusters in
+ * discarding, do discard now and reclaim them
+ */
+ swap_do_scheduled_discard(si);
+ *scan_base = *offset = si->cluster_next;
+ goto new_cluster;
+ } else
+ return;
+ }
+
+ found_free = false;
+
+ /*
+ * Other CPUs can use our cluster if they can't find a free cluster,
+ * check if there is still free entry in the cluster
+ */
+ tmp = cluster->next;
+ while (tmp < si->max && tmp < (cluster_next(&cluster->index) + 1) *
+ SWAPFILE_CLUSTER) {
+ if (!si->swap_map[tmp]) {
+ found_free = true;
+ break;
+ }
+ tmp++;
+ }
+ if (!found_free) {
+ cluster_set_null(&cluster->index);
+ goto new_cluster;
+ }
+ cluster->next = tmp + 1;
+ *offset = tmp;
+ *scan_base = tmp;
+}
static unsigned long scan_swap_map(struct swap_info_struct *si,
unsigned char usage)
@@ -191,7 +473,6 @@ static unsigned long scan_swap_map(struct swap_info_struct *si,
unsigned long scan_base;
unsigned long last_in_cluster = 0;
int latency_ration = LATENCY_LIMIT;
- int found_free_cluster = 0;
/*
* We try to cluster swap pages by allocating them sequentially
@@ -207,24 +488,18 @@ static unsigned long scan_swap_map(struct swap_info_struct *si,
si->flags += SWP_SCANNING;
scan_base = offset = si->cluster_next;
+ /* SSD algorithm */
+ if (si->cluster_info) {
+ scan_swap_map_try_ssd_cluster(si, &offset, &scan_base);
+ goto checks;
+ }
+
if (unlikely(!si->cluster_nr--)) {
if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) {
si->cluster_nr = SWAPFILE_CLUSTER - 1;
goto checks;
}
- if (si->flags & SWP_DISCARDABLE) {
- /*
- * Start range check on racing allocations, in case
- * they overlap the cluster we eventually decide on
- * (we scan without swap_lock to allow preemption).
- * It's hardly conceivable that cluster_nr could be
- * wrapped during our scan, but don't depend on it.
- */
- if (si->lowest_alloc)
- goto checks;
- si->lowest_alloc = si->max;
- si->highest_alloc = 0;
- }
+
spin_unlock(&si->lock);
/*
@@ -248,7 +523,6 @@ static unsigned long scan_swap_map(struct swap_info_struct *si,
offset -= SWAPFILE_CLUSTER - 1;
si->cluster_next = offset;
si->cluster_nr = SWAPFILE_CLUSTER - 1;
- found_free_cluster = 1;
goto checks;
}
if (unlikely(--latency_ration < 0)) {
@@ -269,7 +543,6 @@ static unsigned long scan_swap_map(struct swap_info_struct *si,
offset -= SWAPFILE_CLUSTER - 1;
si->cluster_next = offset;
si->cluster_nr = SWAPFILE_CLUSTER - 1;
- found_free_cluster = 1;
goto checks;
}
if (unlikely(--latency_ration < 0)) {
@@ -281,10 +554,13 @@ static unsigned long scan_swap_map(struct swap_info_struct *si,
offset = scan_base;
spin_lock(&si->lock);
si->cluster_nr = SWAPFILE_CLUSTER - 1;
- si->lowest_alloc = 0;
}
checks:
+ if (si->cluster_info) {
+ while (scan_swap_map_ssd_cluster_conflict(si, offset))
+ scan_swap_map_try_ssd_cluster(si, &offset, &scan_base);
+ }
if (!(si->flags & SWP_WRITEOK))
goto no_page;
if (!si->highest_bit)
@@ -317,62 +593,10 @@ checks:
si->highest_bit = 0;
}
si->swap_map[offset] = usage;
+ inc_cluster_info_page(si, si->cluster_info, offset);
si->cluster_next = offset + 1;
si->flags -= SWP_SCANNING;
- if (si->lowest_alloc) {
- /*
- * Only set when SWP_DISCARDABLE, and there's a scan
- * for a free cluster in progress or just completed.
- */
- if (found_free_cluster) {
- /*
- * To optimize wear-levelling, discard the
- * old data of the cluster, taking care not to
- * discard any of its pages that have already
- * been allocated by racing tasks (offset has
- * already stepped over any at the beginning).
- */
- if (offset < si->highest_alloc &&
- si->lowest_alloc <= last_in_cluster)
- last_in_cluster = si->lowest_alloc - 1;
- si->flags |= SWP_DISCARDING;
- spin_unlock(&si->lock);
-
- if (offset < last_in_cluster)
- discard_swap_cluster(si, offset,
- last_in_cluster - offset + 1);
-
- spin_lock(&si->lock);
- si->lowest_alloc = 0;
- si->flags &= ~SWP_DISCARDING;
-
- smp_mb(); /* wake_up_bit advises this */
- wake_up_bit(&si->flags, ilog2(SWP_DISCARDING));
-
- } else if (si->flags & SWP_DISCARDING) {
- /*
- * Delay using pages allocated by racing tasks
- * until the whole discard has been issued. We
- * could defer that delay until swap_writepage,
- * but it's easier to keep this self-contained.
- */
- spin_unlock(&si->lock);
- wait_on_bit(&si->flags, ilog2(SWP_DISCARDING),
- wait_for_discard, TASK_UNINTERRUPTIBLE);
- spin_lock(&si->lock);
- } else {
- /*
- * Note pages allocated by racing tasks while
- * scan for a free cluster is in progress, so
- * that its final discard can exclude them.
- */
- if (offset < si->lowest_alloc)
- si->lowest_alloc = offset;
- if (offset > si->highest_alloc)
- si->highest_alloc = offset;
- }
- }
return offset;
scan:
@@ -392,7 +616,7 @@ scan:
}
}
offset = si->lowest_bit;
- while (++offset < scan_base) {
+ while (offset < scan_base) {
if (!si->swap_map[offset]) {
spin_lock(&si->lock);
goto checks;
@@ -405,6 +629,7 @@ scan:
cond_resched();
latency_ration = LATENCY_LIMIT;
}
+ offset++;
}
spin_lock(&si->lock);
@@ -483,7 +708,7 @@ noswap:
return (swp_entry_t) {0};
}
-/* The only caller of this function is now susupend routine */
+/* The only caller of this function is now suspend routine */
swp_entry_t get_swap_page_of_type(int type)
{
struct swap_info_struct *si;
@@ -527,16 +752,16 @@ static struct swap_info_struct *swap_info_get(swp_entry_t entry)
return p;
bad_free:
- printk(KERN_ERR "swap_free: %s%08lx\n", Unused_offset, entry.val);
+ pr_err("swap_free: %s%08lx\n", Unused_offset, entry.val);
goto out;
bad_offset:
- printk(KERN_ERR "swap_free: %s%08lx\n", Bad_offset, entry.val);
+ pr_err("swap_free: %s%08lx\n", Bad_offset, entry.val);
goto out;
bad_device:
- printk(KERN_ERR "swap_free: %s%08lx\n", Unused_file, entry.val);
+ pr_err("swap_free: %s%08lx\n", Unused_file, entry.val);
goto out;
bad_nofile:
- printk(KERN_ERR "swap_free: %s%08lx\n", Bad_file, entry.val);
+ pr_err("swap_free: %s%08lx\n", Bad_file, entry.val);
out:
return NULL;
}
@@ -600,6 +825,7 @@ static unsigned char swap_entry_free(struct swap_info_struct *p,
/* free if no reference */
if (!usage) {
+ dec_cluster_info_page(p, p->cluster_info, offset);
if (offset < p->lowest_bit)
p->lowest_bit = offset;
if (offset > p->highest_bit)
@@ -620,7 +846,7 @@ static unsigned char swap_entry_free(struct swap_info_struct *p,
}
/*
- * Caller has made sure that the swapdevice corresponding to entry
+ * Caller has made sure that the swap device corresponding to entry
* is still around or has not been recycled.
*/
void swap_free(swp_entry_t entry)
@@ -681,7 +907,7 @@ int reuse_swap_page(struct page *page)
{
int count;
- VM_BUG_ON(!PageLocked(page));
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
if (unlikely(PageKsm(page)))
return 0;
count = page_mapcount(page);
@@ -701,7 +927,7 @@ int reuse_swap_page(struct page *page)
*/
int try_to_free_swap(struct page *page)
{
- VM_BUG_ON(!PageLocked(page));
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
if (!PageSwapCache(page))
return 0;
@@ -722,7 +948,7 @@ int try_to_free_swap(struct page *page)
* original page might be freed under memory pressure, then
* later read back in from swap, now with the wrong data.
*
- * Hibration suspends storage while it is writing the image
+ * Hibernation suspends storage while it is writing the image
* to disk so check that here.
*/
if (pm_suspended_storage())
@@ -866,6 +1092,21 @@ unsigned int count_swap_pages(int type, int free)
}
#endif /* CONFIG_HIBERNATION */
+static inline int maybe_same_pte(pte_t pte, pte_t swp_pte)
+{
+#ifdef CONFIG_MEM_SOFT_DIRTY
+ /*
+ * When pte keeps soft dirty bit the pte generated
+ * from swap entry does not has it, still it's same
+ * pte from logical point of view.
+ */
+ pte_t swp_pte_dirty = pte_swp_mksoft_dirty(swp_pte);
+ return pte_same(pte, swp_pte) || pte_same(pte, swp_pte_dirty);
+#else
+ return pte_same(pte, swp_pte);
+#endif
+}
+
/*
* No need to decide whether this PTE shares the swap entry with others,
* just let do_wp_page work it out if a write is requested later - to
@@ -892,7 +1133,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
}
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
- if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) {
+ if (unlikely(!maybe_same_pte(*pte, swp_entry_to_pte(entry)))) {
mem_cgroup_cancel_charge_swapin(memcg);
ret = 0;
goto out;
@@ -939,7 +1180,7 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
* some architectures (e.g. x86_32 with PAE) we might catch a glimpse
* of unmatched parts which look like swp_pte, so unuse_pte must
* recheck under pte lock. Scanning without pte lock lets it be
- * preemptible whenever CONFIG_PREEMPT but not CONFIG_HIGHPTE.
+ * preemptable whenever CONFIG_PREEMPT but not CONFIG_HIGHPTE.
*/
pte = pte_offset_map(pmd, addr);
do {
@@ -947,7 +1188,7 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
* swapoff spends a _lot_ of time in this loop!
* Test inline before going to call unuse_pte.
*/
- if (unlikely(pte_same(*pte, swp_pte))) {
+ if (unlikely(maybe_same_pte(*pte, swp_pte))) {
pte_unmap(pte);
ret = unuse_pte(vma, pmd, addr, entry, page);
if (ret)
@@ -1092,7 +1333,7 @@ static unsigned int find_next_to_unuse(struct swap_info_struct *si,
else
continue;
}
- count = si->swap_map[i];
+ count = ACCESS_ONCE(si->swap_map[i]);
if (count && swap_count(count) != SWAP_MAP_BAD)
break;
}
@@ -1112,7 +1353,11 @@ int try_to_unuse(unsigned int type, bool frontswap,
{
struct swap_info_struct *si = swap_info[type];
struct mm_struct *start_mm;
- unsigned char *swap_map;
+ volatile unsigned char *swap_map; /* swap_map is accessed without
+ * locking. Mark it as volatile
+ * to prevent compiler doing
+ * something odd.
+ */
unsigned char swcount;
struct page *page;
swp_entry_t entry;
@@ -1163,7 +1408,15 @@ int try_to_unuse(unsigned int type, bool frontswap,
* reused since sys_swapoff() already disabled
* allocation from here, or alloc_page() failed.
*/
- if (!*swap_map)
+ swcount = *swap_map;
+ /*
+ * We don't hold lock here, so the swap entry could be
+ * SWAP_MAP_BAD (when the cluster is discarding).
+ * Instead of fail out, We can just skip the swap
+ * entry because swapoff will wait for discarding
+ * finish anyway.
+ */
+ if (!swcount || swcount == SWAP_MAP_BAD)
continue;
retval = -ENOMEM;
break;
@@ -1509,7 +1762,8 @@ static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
}
static void _enable_swap_info(struct swap_info_struct *p, int prio,
- unsigned char *swap_map)
+ unsigned char *swap_map,
+ struct swap_cluster_info *cluster_info)
{
int i, prev;
@@ -1518,6 +1772,7 @@ static void _enable_swap_info(struct swap_info_struct *p, int prio,
else
p->prio = --least_priority;
p->swap_map = swap_map;
+ p->cluster_info = cluster_info;
p->flags |= SWP_WRITEOK;
atomic_long_add(p->pages, &nr_swap_pages);
total_swap_pages += p->pages;
@@ -1538,12 +1793,13 @@ static void _enable_swap_info(struct swap_info_struct *p, int prio,
static void enable_swap_info(struct swap_info_struct *p, int prio,
unsigned char *swap_map,
+ struct swap_cluster_info *cluster_info,
unsigned long *frontswap_map)
{
frontswap_init(p->type, frontswap_map);
spin_lock(&swap_lock);
spin_lock(&p->lock);
- _enable_swap_info(p, prio, swap_map);
+ _enable_swap_info(p, prio, swap_map, cluster_info);
spin_unlock(&p->lock);
spin_unlock(&swap_lock);
}
@@ -1552,7 +1808,7 @@ static void reinsert_swap_info(struct swap_info_struct *p)
{
spin_lock(&swap_lock);
spin_lock(&p->lock);
- _enable_swap_info(p, p->prio, p->swap_map);
+ _enable_swap_info(p, p->prio, p->swap_map, p->cluster_info);
spin_unlock(&p->lock);
spin_unlock(&swap_lock);
}
@@ -1561,6 +1817,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
{
struct swap_info_struct *p = NULL;
unsigned char *swap_map;
+ struct swap_cluster_info *cluster_info;
unsigned long *frontswap_map;
struct file *swap_file, *victim;
struct address_space *mapping;
@@ -1568,6 +1825,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
struct filename *pathname;
int i, type, prev;
int err;
+ unsigned int old_block_size;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -1636,6 +1894,8 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
goto out_dput;
}
+ flush_work(&p->discard_work);
+
destroy_swap_extents(p);
if (p->flags & SWP_CONTINUED)
free_swap_count_continuations(p);
@@ -1656,26 +1916,31 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
}
swap_file = p->swap_file;
+ old_block_size = p->old_block_size;
p->swap_file = NULL;
p->max = 0;
swap_map = p->swap_map;
p->swap_map = NULL;
- p->flags = 0;
+ cluster_info = p->cluster_info;
+ p->cluster_info = NULL;
frontswap_map = frontswap_map_get(p);
- frontswap_map_set(p, NULL);
spin_unlock(&p->lock);
spin_unlock(&swap_lock);
frontswap_invalidate_area(type);
+ frontswap_map_set(p, NULL);
mutex_unlock(&swapon_mutex);
+ free_percpu(p->percpu_cluster);
+ p->percpu_cluster = NULL;
vfree(swap_map);
+ vfree(cluster_info);
vfree(frontswap_map);
- /* Destroy swap account informatin */
+ /* Destroy swap account information */
swap_cgroup_swapoff(type);
inode = mapping->host;
if (S_ISBLK(inode->i_mode)) {
struct block_device *bdev = I_BDEV(inode);
- set_blocksize(bdev, p->old_block_size);
+ set_blocksize(bdev, old_block_size);
blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
} else {
mutex_lock(&inode->i_mutex);
@@ -1683,6 +1948,16 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
mutex_unlock(&inode->i_mutex);
}
filp_close(swap_file, NULL);
+
+ /*
+ * Clear the SWP_USED flag after all resources are freed so that swapon
+ * can reuse this swap_info in alloc_swap_info() safely. It is ok to
+ * not hold p->lock after we cleared its SWP_WRITEOK.
+ */
+ spin_lock(&swap_lock);
+ p->flags = 0;
+ spin_unlock(&swap_lock);
+
err = 0;
atomic_inc(&proc_poll_event);
wake_up_interruptible(&proc_poll_wait);
@@ -1911,9 +2186,10 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
int i;
unsigned long maxpages;
unsigned long swapfilepages;
+ unsigned long last_page;
if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) {
- printk(KERN_ERR "Unable to find swap-space signature\n");
+ pr_err("Unable to find swap-space signature\n");
return 0;
}
@@ -1927,9 +2203,8 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
}
/* Check the swap header's sub-version */
if (swap_header->info.version != 1) {
- printk(KERN_WARNING
- "Unable to handle swap header version %d\n",
- swap_header->info.version);
+ pr_warn("Unable to handle swap header version %d\n",
+ swap_header->info.version);
return 0;
}
@@ -1953,8 +2228,14 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
*/
maxpages = swp_offset(pte_to_swp_entry(
swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
- if (maxpages > swap_header->info.last_page) {
- maxpages = swap_header->info.last_page + 1;
+ last_page = swap_header->info.last_page;
+ if (last_page > maxpages) {
+ pr_warn("Truncating oversized swap area, only using %luk out of %luk\n",
+ maxpages << (PAGE_SHIFT - 10),
+ last_page << (PAGE_SHIFT - 10));
+ }
+ if (maxpages > last_page) {
+ maxpages = last_page + 1;
/* p->max is an unsigned int: don't overflow it */
if ((unsigned int)maxpages == 0)
maxpages = UINT_MAX;
@@ -1965,8 +2246,7 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
return 0;
swapfilepages = i_size_read(inode) >> PAGE_SHIFT;
if (swapfilepages && maxpages > swapfilepages) {
- printk(KERN_WARNING
- "Swap area shorter than signature indicates\n");
+ pr_warn("Swap area shorter than signature indicates\n");
return 0;
}
if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
@@ -1980,15 +2260,23 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
static int setup_swap_map_and_extents(struct swap_info_struct *p,
union swap_header *swap_header,
unsigned char *swap_map,
+ struct swap_cluster_info *cluster_info,
unsigned long maxpages,
sector_t *span)
{
int i;
unsigned int nr_good_pages;
int nr_extents;
+ unsigned long nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
+ unsigned long idx = p->cluster_next / SWAPFILE_CLUSTER;
nr_good_pages = maxpages - 1; /* omit header page */
+ cluster_set_null(&p->free_cluster_head);
+ cluster_set_null(&p->free_cluster_tail);
+ cluster_set_null(&p->discard_cluster_head);
+ cluster_set_null(&p->discard_cluster_tail);
+
for (i = 0; i < swap_header->info.nr_badpages; i++) {
unsigned int page_nr = swap_header->info.badpages[i];
if (page_nr == 0 || page_nr > swap_header->info.last_page)
@@ -1996,11 +2284,25 @@ static int setup_swap_map_and_extents(struct swap_info_struct *p,
if (page_nr < maxpages) {
swap_map[page_nr] = SWAP_MAP_BAD;
nr_good_pages--;
+ /*
+ * Haven't marked the cluster free yet, no list
+ * operation involved
+ */
+ inc_cluster_info_page(p, cluster_info, page_nr);
}
}
+ /* Haven't marked the cluster free yet, no list operation involved */
+ for (i = maxpages; i < round_up(maxpages, SWAPFILE_CLUSTER); i++)
+ inc_cluster_info_page(p, cluster_info, i);
+
if (nr_good_pages) {
swap_map[0] = SWAP_MAP_BAD;
+ /*
+ * Not mark the cluster free yet, no list
+ * operation involved
+ */
+ inc_cluster_info_page(p, cluster_info, 0);
p->max = maxpages;
p->pages = nr_good_pages;
nr_extents = setup_swap_extents(p, span);
@@ -2009,13 +2311,51 @@ static int setup_swap_map_and_extents(struct swap_info_struct *p,
nr_good_pages = p->pages;
}
if (!nr_good_pages) {
- printk(KERN_WARNING "Empty swap-file\n");
+ pr_warn("Empty swap-file\n");
return -EINVAL;
}
+ if (!cluster_info)
+ return nr_extents;
+
+ for (i = 0; i < nr_clusters; i++) {
+ if (!cluster_count(&cluster_info[idx])) {
+ cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE);
+ if (cluster_is_null(&p->free_cluster_head)) {
+ cluster_set_next_flag(&p->free_cluster_head,
+ idx, 0);
+ cluster_set_next_flag(&p->free_cluster_tail,
+ idx, 0);
+ } else {
+ unsigned int tail;
+
+ tail = cluster_next(&p->free_cluster_tail);
+ cluster_set_next(&cluster_info[tail], idx);
+ cluster_set_next_flag(&p->free_cluster_tail,
+ idx, 0);
+ }
+ }
+ idx++;
+ if (idx == nr_clusters)
+ idx = 0;
+ }
return nr_extents;
}
+/*
+ * Helper to sys_swapon determining if a given swap
+ * backing device queue supports DISCARD operations.
+ */
+static bool swap_discardable(struct swap_info_struct *si)
+{
+ struct request_queue *q = bdev_get_queue(si->bdev);
+
+ if (!q || !blk_queue_discard(q))
+ return false;
+
+ return true;
+}
+
SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
{
struct swap_info_struct *p;
@@ -2030,6 +2370,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
sector_t span;
unsigned long maxpages;
unsigned char *swap_map = NULL;
+ struct swap_cluster_info *cluster_info = NULL;
unsigned long *frontswap_map = NULL;
struct page *page = NULL;
struct inode *inode = NULL;
@@ -2044,6 +2385,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
if (IS_ERR(p))
return PTR_ERR(p);
+ INIT_WORK(&p->discard_work, swap_discard_work);
+
name = getname(specialfile);
if (IS_ERR(name)) {
error = PTR_ERR(name);
@@ -2103,13 +2446,38 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
error = -ENOMEM;
goto bad_swap;
}
+ if (p->bdev && blk_queue_nonrot(bdev_get_queue(p->bdev))) {
+ p->flags |= SWP_SOLIDSTATE;
+ /*
+ * select a random position to start with to help wear leveling
+ * SSD
+ */
+ p->cluster_next = 1 + (prandom_u32() % p->highest_bit);
+
+ cluster_info = vzalloc(DIV_ROUND_UP(maxpages,
+ SWAPFILE_CLUSTER) * sizeof(*cluster_info));
+ if (!cluster_info) {
+ error = -ENOMEM;
+ goto bad_swap;
+ }
+ p->percpu_cluster = alloc_percpu(struct percpu_cluster);
+ if (!p->percpu_cluster) {
+ error = -ENOMEM;
+ goto bad_swap;
+ }
+ for_each_possible_cpu(i) {
+ struct percpu_cluster *cluster;
+ cluster = per_cpu_ptr(p->percpu_cluster, i);
+ cluster_set_null(&cluster->index);
+ }
+ }
error = swap_cgroup_swapon(p->type, maxpages);
if (error)
goto bad_swap;
nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map,
- maxpages, &span);
+ cluster_info, maxpages, &span);
if (unlikely(nr_extents < 0)) {
error = nr_extents;
goto bad_swap;
@@ -2118,13 +2486,34 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
if (frontswap_enabled)
frontswap_map = vzalloc(BITS_TO_LONGS(maxpages) * sizeof(long));
- if (p->bdev) {
- if (blk_queue_nonrot(bdev_get_queue(p->bdev))) {
- p->flags |= SWP_SOLIDSTATE;
- p->cluster_next = 1 + (prandom_u32() % p->highest_bit);
+ if (p->bdev &&(swap_flags & SWAP_FLAG_DISCARD) && swap_discardable(p)) {
+ /*
+ * When discard is enabled for swap with no particular
+ * policy flagged, we set all swap discard flags here in
+ * order to sustain backward compatibility with older
+ * swapon(8) releases.
+ */
+ p->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD |
+ SWP_PAGE_DISCARD);
+
+ /*
+ * By flagging sys_swapon, a sysadmin can tell us to
+ * either do single-time area discards only, or to just
+ * perform discards for released swap page-clusters.
+ * Now it's time to adjust the p->flags accordingly.
+ */
+ if (swap_flags & SWAP_FLAG_DISCARD_ONCE)
+ p->flags &= ~SWP_PAGE_DISCARD;
+ else if (swap_flags & SWAP_FLAG_DISCARD_PAGES)
+ p->flags &= ~SWP_AREA_DISCARD;
+
+ /* issue a swapon-time discard if it's still required */
+ if (p->flags & SWP_AREA_DISCARD) {
+ int err = discard_swap(p);
+ if (unlikely(err))
+ pr_err("swapon: discard_swap(%p): %d\n",
+ p, err);
}
- if ((swap_flags & SWAP_FLAG_DISCARD) && discard_swap(p) == 0)
- p->flags |= SWP_DISCARDABLE;
}
mutex_lock(&swapon_mutex);
@@ -2132,14 +2521,16 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
if (swap_flags & SWAP_FLAG_PREFER)
prio =
(swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
- enable_swap_info(p, prio, swap_map, frontswap_map);
+ enable_swap_info(p, prio, swap_map, cluster_info, frontswap_map);
- printk(KERN_INFO "Adding %uk swap on %s. "
- "Priority:%d extents:%d across:%lluk %s%s%s\n",
+ pr_info("Adding %uk swap on %s. "
+ "Priority:%d extents:%d across:%lluk %s%s%s%s%s\n",
p->pages<<(PAGE_SHIFT-10), name->name, p->prio,
nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10),
(p->flags & SWP_SOLIDSTATE) ? "SS" : "",
(p->flags & SWP_DISCARDABLE) ? "D" : "",
+ (p->flags & SWP_AREA_DISCARD) ? "s" : "",
+ (p->flags & SWP_PAGE_DISCARD) ? "c" : "",
(frontswap_map) ? "FS" : "");
mutex_unlock(&swapon_mutex);
@@ -2151,6 +2542,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
error = 0;
goto out;
bad_swap:
+ free_percpu(p->percpu_cluster);
+ p->percpu_cluster = NULL;
if (inode && S_ISBLK(inode->i_mode) && p->bdev) {
set_blocksize(p->bdev, p->old_block_size);
blkdev_put(p->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
@@ -2162,6 +2555,7 @@ bad_swap:
p->flags = 0;
spin_unlock(&swap_lock);
vfree(swap_map);
+ vfree(cluster_info);
if (swap_file) {
if (inode && S_ISREG(inode->i_mode)) {
mutex_unlock(&inode->i_mutex);
@@ -2231,6 +2625,16 @@ static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
goto unlock_out;
count = p->swap_map[offset];
+
+ /*
+ * swapin_readahead() doesn't check if a swap entry is valid, so the
+ * swap entry could be SWAP_MAP_BAD. Check here with lock held.
+ */
+ if (unlikely(swap_count(count) == SWAP_MAP_BAD)) {
+ err = -ENOENT;
+ goto unlock_out;
+ }
+
has_cache = count & SWAP_HAS_CACHE;
count &= ~SWAP_HAS_CACHE;
err = 0;
@@ -2266,7 +2670,7 @@ out:
return err;
bad_file:
- printk(KERN_ERR "swap_dup: %s%08lx\n", Bad_file, entry.val);
+ pr_err("swap_dup: %s%08lx\n", Bad_file, entry.val);
goto out;
}
@@ -2320,7 +2724,7 @@ struct swap_info_struct *page_swap_info(struct page *page)
*/
struct address_space *__page_file_mapping(struct page *page)
{
- VM_BUG_ON(!PageSwapCache(page));
+ VM_BUG_ON_PAGE(!PageSwapCache(page), page);
return page_swap_info(page)->swap_file->f_mapping;
}
EXPORT_SYMBOL_GPL(__page_file_mapping);
@@ -2328,7 +2732,7 @@ EXPORT_SYMBOL_GPL(__page_file_mapping);
pgoff_t __page_file_index(struct page *page)
{
swp_entry_t swap = { .val = page_private(page) };
- VM_BUG_ON(!PageSwapCache(page));
+ VM_BUG_ON_PAGE(!PageSwapCache(page), page);
return swp_offset(swap);
}
EXPORT_SYMBOL_GPL(__page_file_index);
@@ -2392,8 +2796,8 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
/*
* We are fortunate that although vmalloc_to_page uses pte_offset_map,
- * no architecture is using highmem pages for kernel pagetables: so it
- * will not corrupt the GFP_ATOMIC caller's atomic pagetable kmaps.
+ * no architecture is using highmem pages for kernel page tables: so it
+ * will not corrupt the GFP_ATOMIC caller's atomic page table kmaps.
*/
head = vmalloc_to_page(si->swap_map + offset);
offset &= ~PAGE_MASK;
diff --git a/mm/truncate.c b/mm/truncate.c
index c75b736e54b7..6a78c814bebf 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -22,11 +22,51 @@
#include <linux/cleancache.h>
#include "internal.h"
+static void clear_exceptional_entry(struct address_space *mapping,
+ pgoff_t index, void *entry)
+{
+ struct radix_tree_node *node;
+ void **slot;
+
+ /* Handled by shmem itself */
+ if (shmem_mapping(mapping))
+ return;
+
+ spin_lock_irq(&mapping->tree_lock);
+ /*
+ * Regular page slots are stabilized by the page lock even
+ * without the tree itself locked. These unlocked entries
+ * need verification under the tree lock.
+ */
+ if (!__radix_tree_lookup(&mapping->page_tree, index, &node, &slot))
+ goto unlock;
+ if (*slot != entry)
+ goto unlock;
+ radix_tree_replace_slot(slot, NULL);
+ mapping->nrshadows--;
+ if (!node)
+ goto unlock;
+ workingset_node_shadows_dec(node);
+ /*
+ * Don't track node without shadow entries.
+ *
+ * Avoid acquiring the list_lru lock if already untracked.
+ * The list_empty() test is safe as node->private_list is
+ * protected by mapping->tree_lock.
+ */
+ if (!workingset_node_shadows(node) &&
+ !list_empty(&node->private_list))
+ list_lru_del(&workingset_shadow_nodes, &node->private_list);
+ __radix_tree_delete_node(&mapping->page_tree, node);
+unlock:
+ spin_unlock_irq(&mapping->tree_lock);
+}
/**
* do_invalidatepage - invalidate part or all of a page
* @page: the page which is affected
- * @offset: the index of the truncation point
+ * @offset: start of the range to invalidate
+ * @length: length of the range to invalidate
*
* do_invalidatepage() is called when all or part of the page has become
* invalidated by a truncate operation.
@@ -37,24 +77,18 @@
* point. Because the caller is about to free (and possibly reuse) those
* blocks on-disk.
*/
-void do_invalidatepage(struct page *page, unsigned long offset)
+void do_invalidatepage(struct page *page, unsigned int offset,
+ unsigned int length)
{
- void (*invalidatepage)(struct page *, unsigned long);
+ void (*invalidatepage)(struct page *, unsigned int, unsigned int);
+
invalidatepage = page->mapping->a_ops->invalidatepage;
#ifdef CONFIG_BLOCK
if (!invalidatepage)
invalidatepage = block_invalidatepage;
#endif
if (invalidatepage)
- (*invalidatepage)(page, offset);
-}
-
-static inline void truncate_partial_page(struct page *page, unsigned partial)
-{
- zero_user_segment(page, partial, PAGE_CACHE_SIZE);
- cleancache_invalidate_page(page->mapping, page);
- if (page_has_private(page))
- do_invalidatepage(page, partial);
+ (*invalidatepage)(page, offset, length);
}
/*
@@ -103,7 +137,7 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
return -EIO;
if (page_has_private(page))
- do_invalidatepage(page, 0);
+ do_invalidatepage(page, 0, PAGE_CACHE_SIZE);
cancel_dirty_page(page, PAGE_CACHE_SIZE);
@@ -185,11 +219,11 @@ int invalidate_inode_page(struct page *page)
* truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
* @mapping: mapping to truncate
* @lstart: offset from which to truncate
- * @lend: offset to which to truncate
+ * @lend: offset to which to truncate (inclusive)
*
* Truncate the page cache, removing the pages that are between
- * specified offsets (and zeroing out partial page
- * (if lstart is not page aligned)).
+ * specified offsets (and zeroing out partial pages
+ * if lstart or lend + 1 is not page aligned).
*
* Truncate takes two passes - the first pass is nonblocking. It will not
* block on page locks and it will not block on writeback. The second pass
@@ -200,37 +234,67 @@ int invalidate_inode_page(struct page *page)
* We pass down the cache-hot hint to the page freeing code. Even if the
* mapping is large, it is probably the case that the final pages are the most
* recently touched, and freeing happens in ascending file offset order.
+ *
+ * Note that since ->invalidatepage() accepts range to invalidate
+ * truncate_inode_pages_range is able to handle cases where lend + 1 is not
+ * page aligned properly.
*/
void truncate_inode_pages_range(struct address_space *mapping,
loff_t lstart, loff_t lend)
{
- const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
- const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
- struct pagevec pvec;
- pgoff_t index;
- pgoff_t end;
- int i;
+ pgoff_t start; /* inclusive */
+ pgoff_t end; /* exclusive */
+ unsigned int partial_start; /* inclusive */
+ unsigned int partial_end; /* exclusive */
+ struct pagevec pvec;
+ pgoff_t indices[PAGEVEC_SIZE];
+ pgoff_t index;
+ int i;
cleancache_invalidate_inode(mapping);
- if (mapping->nrpages == 0)
+ if (mapping->nrpages == 0 && mapping->nrshadows == 0)
return;
- BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
- end = (lend >> PAGE_CACHE_SHIFT);
+ /* Offsets within partial pages */
+ partial_start = lstart & (PAGE_CACHE_SIZE - 1);
+ partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1);
+
+ /*
+ * 'start' and 'end' always covers the range of pages to be fully
+ * truncated. Partial pages are covered with 'partial_start' at the
+ * start of the range and 'partial_end' at the end of the range.
+ * Note that 'end' is exclusive while 'lend' is inclusive.
+ */
+ start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ if (lend == -1)
+ /*
+ * lend == -1 indicates end-of-file so we have to set 'end'
+ * to the highest possible pgoff_t and since the type is
+ * unsigned we're using -1.
+ */
+ end = -1;
+ else
+ end = (lend + 1) >> PAGE_CACHE_SHIFT;
pagevec_init(&pvec, 0);
index = start;
- while (index <= end && pagevec_lookup(&pvec, mapping, index,
- min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
+ while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
+ min(end - index, (pgoff_t)PAGEVEC_SIZE),
+ indices)) {
mem_cgroup_uncharge_start();
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
/* We rely upon deletion not changing page->index */
- index = page->index;
- if (index > end)
+ index = indices[i];
+ if (index >= end)
break;
+ if (radix_tree_exceptional_entry(page)) {
+ clear_exceptional_entry(mapping, index, page);
+ continue;
+ }
+
if (!trylock_page(page))
continue;
WARN_ON(page->index != index);
@@ -241,33 +305,65 @@ void truncate_inode_pages_range(struct address_space *mapping,
truncate_inode_page(mapping, page);
unlock_page(page);
}
+ pagevec_remove_exceptionals(&pvec);
pagevec_release(&pvec);
mem_cgroup_uncharge_end();
cond_resched();
index++;
}
- if (partial) {
+ if (partial_start) {
struct page *page = find_lock_page(mapping, start - 1);
if (page) {
+ unsigned int top = PAGE_CACHE_SIZE;
+ if (start > end) {
+ /* Truncation within a single page */
+ top = partial_end;
+ partial_end = 0;
+ }
+ wait_on_page_writeback(page);
+ zero_user_segment(page, partial_start, top);
+ cleancache_invalidate_page(mapping, page);
+ if (page_has_private(page))
+ do_invalidatepage(page, partial_start,
+ top - partial_start);
+ unlock_page(page);
+ page_cache_release(page);
+ }
+ }
+ if (partial_end) {
+ struct page *page = find_lock_page(mapping, end);
+ if (page) {
wait_on_page_writeback(page);
- truncate_partial_page(page, partial);
+ zero_user_segment(page, 0, partial_end);
+ cleancache_invalidate_page(mapping, page);
+ if (page_has_private(page))
+ do_invalidatepage(page, 0,
+ partial_end);
unlock_page(page);
page_cache_release(page);
}
}
+ /*
+ * If the truncation happened within a single page no pages
+ * will be released, just zeroed, so we can bail out now.
+ */
+ if (start >= end)
+ return;
index = start;
for ( ; ; ) {
cond_resched();
- if (!pagevec_lookup(&pvec, mapping, index,
- min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
+ if (!pagevec_lookup_entries(&pvec, mapping, index,
+ min(end - index, (pgoff_t)PAGEVEC_SIZE),
+ indices)) {
if (index == start)
break;
index = start;
continue;
}
- if (index == start && pvec.pages[0]->index > end) {
+ if (index == start && indices[0] >= end) {
+ pagevec_remove_exceptionals(&pvec);
pagevec_release(&pvec);
break;
}
@@ -276,16 +372,22 @@ void truncate_inode_pages_range(struct address_space *mapping,
struct page *page = pvec.pages[i];
/* We rely upon deletion not changing page->index */
- index = page->index;
- if (index > end)
+ index = indices[i];
+ if (index >= end)
break;
+ if (radix_tree_exceptional_entry(page)) {
+ clear_exceptional_entry(mapping, index, page);
+ continue;
+ }
+
lock_page(page);
WARN_ON(page->index != index);
wait_on_page_writeback(page);
truncate_inode_page(mapping, page);
unlock_page(page);
}
+ pagevec_remove_exceptionals(&pvec);
pagevec_release(&pvec);
mem_cgroup_uncharge_end();
index++;
@@ -313,6 +415,53 @@ void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
EXPORT_SYMBOL(truncate_inode_pages);
/**
+ * truncate_inode_pages_final - truncate *all* pages before inode dies
+ * @mapping: mapping to truncate
+ *
+ * Called under (and serialized by) inode->i_mutex.
+ *
+ * Filesystems have to use this in the .evict_inode path to inform the
+ * VM that this is the final truncate and the inode is going away.
+ */
+void truncate_inode_pages_final(struct address_space *mapping)
+{
+ unsigned long nrshadows;
+ unsigned long nrpages;
+
+ /*
+ * Page reclaim can not participate in regular inode lifetime
+ * management (can't call iput()) and thus can race with the
+ * inode teardown. Tell it when the address space is exiting,
+ * so that it does not install eviction information after the
+ * final truncate has begun.
+ */
+ mapping_set_exiting(mapping);
+
+ /*
+ * When reclaim installs eviction entries, it increases
+ * nrshadows first, then decreases nrpages. Make sure we see
+ * this in the right order or we might miss an entry.
+ */
+ nrpages = mapping->nrpages;
+ smp_rmb();
+ nrshadows = mapping->nrshadows;
+
+ if (nrpages || nrshadows) {
+ /*
+ * As truncation uses a lockless tree lookup, cycle
+ * the tree lock to make sure any ongoing tree
+ * modification that does not see AS_EXITING is
+ * completed before starting the final truncate.
+ */
+ spin_lock_irq(&mapping->tree_lock);
+ spin_unlock_irq(&mapping->tree_lock);
+
+ truncate_inode_pages(mapping, 0);
+ }
+}
+EXPORT_SYMBOL(truncate_inode_pages_final);
+
+/**
* invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
* @mapping: the address_space which holds the pages to invalidate
* @start: the offset 'from' which to invalidate
@@ -328,32 +477,31 @@ EXPORT_SYMBOL(truncate_inode_pages);
unsigned long invalidate_mapping_pages(struct address_space *mapping,
pgoff_t start, pgoff_t end)
{
+ pgoff_t indices[PAGEVEC_SIZE];
struct pagevec pvec;
pgoff_t index = start;
unsigned long ret;
unsigned long count = 0;
int i;
- /*
- * Note: this function may get called on a shmem/tmpfs mapping:
- * pagevec_lookup() might then return 0 prematurely (because it
- * got a gangful of swap entries); but it's hardly worth worrying
- * about - it can rarely have anything to free from such a mapping
- * (most pages are dirty), and already skips over any difficulties.
- */
-
pagevec_init(&pvec, 0);
- while (index <= end && pagevec_lookup(&pvec, mapping, index,
- min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
+ while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
+ min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
+ indices)) {
mem_cgroup_uncharge_start();
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
/* We rely upon deletion not changing page->index */
- index = page->index;
+ index = indices[i];
if (index > end)
break;
+ if (radix_tree_exceptional_entry(page)) {
+ clear_exceptional_entry(mapping, index, page);
+ continue;
+ }
+
if (!trylock_page(page))
continue;
WARN_ON(page->index != index);
@@ -367,6 +515,7 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
deactivate_page(page);
count += ret;
}
+ pagevec_remove_exceptionals(&pvec);
pagevec_release(&pvec);
mem_cgroup_uncharge_end();
cond_resched();
@@ -397,7 +546,7 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
goto failed;
BUG_ON(page_has_private(page));
- __delete_from_page_cache(page);
+ __delete_from_page_cache(page, NULL);
spin_unlock_irq(&mapping->tree_lock);
mem_cgroup_uncharge_cache_page(page);
@@ -434,6 +583,7 @@ static int do_launder_page(struct address_space *mapping, struct page *page)
int invalidate_inode_pages2_range(struct address_space *mapping,
pgoff_t start, pgoff_t end)
{
+ pgoff_t indices[PAGEVEC_SIZE];
struct pagevec pvec;
pgoff_t index;
int i;
@@ -444,17 +594,23 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
cleancache_invalidate_inode(mapping);
pagevec_init(&pvec, 0);
index = start;
- while (index <= end && pagevec_lookup(&pvec, mapping, index,
- min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
+ while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
+ min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
+ indices)) {
mem_cgroup_uncharge_start();
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
/* We rely upon deletion not changing page->index */
- index = page->index;
+ index = indices[i];
if (index > end)
break;
+ if (radix_tree_exceptional_entry(page)) {
+ clear_exceptional_entry(mapping, index, page);
+ continue;
+ }
+
lock_page(page);
WARN_ON(page->index != index);
if (page->mapping != mapping) {
@@ -492,6 +648,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
ret = ret2;
unlock_page(page);
}
+ pagevec_remove_exceptionals(&pvec);
pagevec_release(&pvec);
mem_cgroup_uncharge_end();
cond_resched();
@@ -520,7 +677,6 @@ EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
/**
* truncate_pagecache - unmap and remove pagecache that has been truncated
* @inode: inode
- * @oldsize: old file size
* @newsize: new file size
*
* inode's new i_size must already be written before truncate_pagecache
@@ -533,7 +689,7 @@ EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
* situations such as writepage being called for a page that has already
* had its underlying blocks deallocated.
*/
-void truncate_pagecache(struct inode *inode, loff_t oldsize, loff_t newsize)
+void truncate_pagecache(struct inode *inode, loff_t newsize)
{
struct address_space *mapping = inode->i_mapping;
loff_t holebegin = round_up(newsize, PAGE_SIZE);
@@ -567,12 +723,8 @@ EXPORT_SYMBOL(truncate_pagecache);
*/
void truncate_setsize(struct inode *inode, loff_t newsize)
{
- loff_t oldsize;
-
- oldsize = inode->i_size;
i_size_write(inode, newsize);
-
- truncate_pagecache(inode, oldsize, newsize);
+ truncate_pagecache(inode, newsize);
}
EXPORT_SYMBOL(truncate_setsize);
@@ -598,10 +750,8 @@ void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
* This rounding is currently just for example: unmap_mapping_range
* expands its hole outwards, whereas we want it to contract the hole
* inwards. However, existing callers of truncate_pagecache_range are
- * doing their own page rounding first; and truncate_inode_pages_range
- * currently BUGs if lend is not pagealigned-1 (it handles partial
- * page at start of hole, but not partial page at end of hole). Note
- * unmap_mapping_range allows holelen 0 for all, and we allow lend -1.
+ * doing their own page rounding first. Note that unmap_mapping_range
+ * allows holelen 0 for all, and we allow lend -1 for end of file.
*/
/*
diff --git a/mm/util.c b/mm/util.c
index ab1424dbe2e6..d5ea733c5082 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -1,12 +1,17 @@
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/compiler.h>
#include <linux/export.h>
#include <linux/err.h>
#include <linux/sched.h>
#include <linux/security.h>
#include <linux/swap.h>
#include <linux/swapops.h>
+#include <linux/mman.h>
+#include <linux/hugetlb.h>
+#include <linux/vmalloc.h>
+
#include <asm/uaccess.h>
#include "internal.h"
@@ -295,7 +300,6 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
{
mm->mmap_base = TASK_UNMAPPED_BASE;
mm->get_unmapped_area = arch_get_unmapped_area;
- mm->unmap_area = arch_unmap_area;
}
#endif
@@ -305,7 +309,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
* If the architecture not support this function, simply return with no
* page pinned
*/
-int __attribute__((weak)) __get_user_pages_fast(unsigned long start,
+int __weak __get_user_pages_fast(unsigned long start,
int nr_pages, int write, struct page **pages)
{
return 0;
@@ -336,7 +340,7 @@ EXPORT_SYMBOL_GPL(__get_user_pages_fast);
* callers need to carefully consider what to use. On many architectures,
* get_user_pages_fast simply falls back to get_user_pages.
*/
-int __attribute__((weak)) get_user_pages_fast(unsigned long start,
+int __weak get_user_pages_fast(unsigned long start,
int nr_pages, int write, struct page **pages)
{
struct mm_struct *mm = current->mm;
@@ -384,24 +388,123 @@ unsigned long vm_mmap(struct file *file, unsigned long addr,
}
EXPORT_SYMBOL(vm_mmap);
+void kvfree(const void *addr)
+{
+ if (is_vmalloc_addr(addr))
+ vfree(addr);
+ else
+ kfree(addr);
+}
+EXPORT_SYMBOL(kvfree);
+
struct address_space *page_mapping(struct page *page)
{
struct address_space *mapping = page->mapping;
- VM_BUG_ON(PageSlab(page));
-#ifdef CONFIG_SWAP
+ /* This happens if someone calls flush_dcache_page on slab page */
+ if (unlikely(PageSlab(page)))
+ return NULL;
+
if (unlikely(PageSwapCache(page))) {
swp_entry_t entry;
entry.val = page_private(page);
mapping = swap_address_space(entry);
- } else
-#endif
- if ((unsigned long)mapping & PAGE_MAPPING_ANON)
+ } else if ((unsigned long)mapping & PAGE_MAPPING_ANON)
mapping = NULL;
return mapping;
}
+int overcommit_ratio_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ int ret;
+
+ ret = proc_dointvec(table, write, buffer, lenp, ppos);
+ if (ret == 0 && write)
+ sysctl_overcommit_kbytes = 0;
+ return ret;
+}
+
+int overcommit_kbytes_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ int ret;
+
+ ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
+ if (ret == 0 && write)
+ sysctl_overcommit_ratio = 0;
+ return ret;
+}
+
+/*
+ * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
+ */
+unsigned long vm_commit_limit(void)
+{
+ unsigned long allowed;
+
+ if (sysctl_overcommit_kbytes)
+ allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
+ else
+ allowed = ((totalram_pages - hugetlb_total_pages())
+ * sysctl_overcommit_ratio / 100);
+ allowed += total_swap_pages;
+
+ return allowed;
+}
+
+/**
+ * get_cmdline() - copy the cmdline value to a buffer.
+ * @task: the task whose cmdline value to copy.
+ * @buffer: the buffer to copy to.
+ * @buflen: the length of the buffer. Larger cmdline values are truncated
+ * to this length.
+ * Returns the size of the cmdline field copied. Note that the copy does
+ * not guarantee an ending NULL byte.
+ */
+int get_cmdline(struct task_struct *task, char *buffer, int buflen)
+{
+ int res = 0;
+ unsigned int len;
+ struct mm_struct *mm = get_task_mm(task);
+ if (!mm)
+ goto out;
+ if (!mm->arg_end)
+ goto out_mm; /* Shh! No looking before we're done */
+
+ len = mm->arg_end - mm->arg_start;
+
+ if (len > buflen)
+ len = buflen;
+
+ res = access_process_vm(task, mm->arg_start, buffer, len, 0);
+
+ /*
+ * If the nul at the end of args has been overwritten, then
+ * assume application is using setproctitle(3).
+ */
+ if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
+ len = strnlen(buffer, res);
+ if (len < res) {
+ res = len;
+ } else {
+ len = mm->env_end - mm->env_start;
+ if (len > buflen - res)
+ len = buflen - res;
+ res += access_process_vm(task, mm->env_start,
+ buffer+res, len, 0);
+ res = strnlen(buffer, res);
+ }
+ }
+out_mm:
+ mmput(mm);
+out:
+ return res;
+}
+
/* Tracepoints definitions. */
EXPORT_TRACEPOINT_SYMBOL(kmalloc);
EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
diff --git a/mm/vmacache.c b/mm/vmacache.c
new file mode 100644
index 000000000000..1037a3bab505
--- /dev/null
+++ b/mm/vmacache.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2014 Davidlohr Bueso.
+ */
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/vmacache.h>
+
+/*
+ * Flush vma caches for threads that share a given mm.
+ *
+ * The operation is safe because the caller holds the mmap_sem
+ * exclusively and other threads accessing the vma cache will
+ * have mmap_sem held at least for read, so no extra locking
+ * is required to maintain the vma cache.
+ */
+void vmacache_flush_all(struct mm_struct *mm)
+{
+ struct task_struct *g, *p;
+
+ rcu_read_lock();
+ for_each_process_thread(g, p) {
+ /*
+ * Only flush the vmacache pointers as the
+ * mm seqnum is already set and curr's will
+ * be set upon invalidation when the next
+ * lookup is done.
+ */
+ if (mm == p->mm)
+ vmacache_flush(p);
+ }
+ rcu_read_unlock();
+}
+
+/*
+ * This task may be accessing a foreign mm via (for example)
+ * get_user_pages()->find_vma(). The vmacache is task-local and this
+ * task's vmacache pertains to a different mm (ie, its own). There is
+ * nothing we can do here.
+ *
+ * Also handle the case where a kernel thread has adopted this mm via use_mm().
+ * That kernel thread's vmacache is not applicable to this mm.
+ */
+static bool vmacache_valid_mm(struct mm_struct *mm)
+{
+ return current->mm == mm && !(current->flags & PF_KTHREAD);
+}
+
+void vmacache_update(unsigned long addr, struct vm_area_struct *newvma)
+{
+ if (vmacache_valid_mm(newvma->vm_mm))
+ current->vmacache[VMACACHE_HASH(addr)] = newvma;
+}
+
+static bool vmacache_valid(struct mm_struct *mm)
+{
+ struct task_struct *curr;
+
+ if (!vmacache_valid_mm(mm))
+ return false;
+
+ curr = current;
+ if (mm->vmacache_seqnum != curr->vmacache_seqnum) {
+ /*
+ * First attempt will always be invalid, initialize
+ * the new cache for this task here.
+ */
+ curr->vmacache_seqnum = mm->vmacache_seqnum;
+ vmacache_flush(curr);
+ return false;
+ }
+ return true;
+}
+
+struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr)
+{
+ int i;
+
+ if (!vmacache_valid(mm))
+ return NULL;
+
+ for (i = 0; i < VMACACHE_SIZE; i++) {
+ struct vm_area_struct *vma = current->vmacache[i];
+
+ if (!vma)
+ continue;
+ if (WARN_ON_ONCE(vma->vm_mm != mm))
+ break;
+ if (vma->vm_start <= addr && vma->vm_end > addr)
+ return vma;
+ }
+
+ return NULL;
+}
+
+#ifndef CONFIG_MMU
+struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ int i;
+
+ if (!vmacache_valid(mm))
+ return NULL;
+
+ for (i = 0; i < VMACACHE_SIZE; i++) {
+ struct vm_area_struct *vma = current->vmacache[i];
+
+ if (vma && vma->vm_start == start && vma->vm_end == end)
+ return vma;
+ }
+
+ return NULL;
+}
+#endif
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index d365724feb05..bf233b283319 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -27,7 +27,9 @@
#include <linux/pfn.h>
#include <linux/kmemleak.h>
#include <linux/atomic.h>
+#include <linux/compiler.h>
#include <linux/llist.h>
+
#include <asm/uaccess.h>
#include <asm/tlbflush.h>
#include <asm/shmparam.h>
@@ -292,7 +294,7 @@ static struct vmap_area *__find_vmap_area(unsigned long addr)
va = rb_entry(n, struct vmap_area, rb_node);
if (addr < va->va_start)
n = n->rb_left;
- else if (addr > va->va_start)
+ else if (addr >= va->va_end)
n = n->rb_right;
else
return va;
@@ -359,6 +361,12 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
if (unlikely(!va))
return ERR_PTR(-ENOMEM);
+ /*
+ * Only scan the relevant parts containing pointers to other objects
+ * to avoid false negatives.
+ */
+ kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK);
+
retry:
spin_lock(&vmap_area_lock);
/*
@@ -388,12 +396,12 @@ nocache:
addr = ALIGN(first->va_end, align);
if (addr < vstart)
goto nocache;
- if (addr + size - 1 < addr)
+ if (addr + size < addr)
goto overflow;
} else {
addr = ALIGN(vstart, align);
- if (addr + size - 1 < addr)
+ if (addr + size < addr)
goto overflow;
n = vmap_area_root.rb_node;
@@ -420,7 +428,7 @@ nocache:
if (addr + cached_hole_size < first->va_start)
cached_hole_size = first->va_start - addr;
addr = ALIGN(first->va_end, align);
- if (addr + size - 1 < addr)
+ if (addr + size < addr)
goto overflow;
if (list_is_last(&first->list, &vmap_area_list))
@@ -752,9 +760,7 @@ struct vmap_block_queue {
struct vmap_block {
spinlock_t lock;
struct vmap_area *va;
- struct vmap_block_queue *vbq;
unsigned long free, dirty;
- DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS);
DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS);
struct list_head free_list;
struct rcu_head rcu_head;
@@ -820,7 +826,6 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
vb->va = va;
vb->free = VMAP_BBMAP_BITS;
vb->dirty = 0;
- bitmap_zero(vb->alloc_map, VMAP_BBMAP_BITS);
bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS);
INIT_LIST_HEAD(&vb->free_list);
@@ -832,7 +837,6 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
radix_tree_preload_end();
vbq = &get_cpu_var(vmap_block_queue);
- vb->vbq = vbq;
spin_lock(&vbq->lock);
list_add_rcu(&vb->free_list, &vbq->free);
spin_unlock(&vbq->lock);
@@ -873,7 +877,6 @@ static void purge_fragmented_blocks(int cpu)
if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
vb->free = 0; /* prevent further allocs after releasing lock */
vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
- bitmap_fill(vb->alloc_map, VMAP_BBMAP_BITS);
bitmap_fill(vb->dirty_map, VMAP_BBMAP_BITS);
spin_lock(&vbq->lock);
list_del_rcu(&vb->free_list);
@@ -891,11 +894,6 @@ static void purge_fragmented_blocks(int cpu)
}
}
-static void purge_fragmented_blocks_thiscpu(void)
-{
- purge_fragmented_blocks(smp_processor_id());
-}
-
static void purge_fragmented_blocks_allcpus(void)
{
int cpu;
@@ -910,7 +908,6 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
struct vmap_block *vb;
unsigned long addr = 0;
unsigned int order;
- int purge = 0;
BUG_ON(size & ~PAGE_MASK);
BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
@@ -934,17 +931,7 @@ again:
if (vb->free < 1UL << order)
goto next;
- i = bitmap_find_free_region(vb->alloc_map,
- VMAP_BBMAP_BITS, order);
-
- if (i < 0) {
- if (vb->free + vb->dirty == VMAP_BBMAP_BITS) {
- /* fragmented and no outstanding allocations */
- BUG_ON(vb->dirty != VMAP_BBMAP_BITS);
- purge = 1;
- }
- goto next;
- }
+ i = VMAP_BBMAP_BITS - vb->free;
addr = vb->va->va_start + (i << PAGE_SHIFT);
BUG_ON(addr_to_vb_idx(addr) !=
addr_to_vb_idx(vb->va->va_start));
@@ -960,9 +947,6 @@ next:
spin_unlock(&vb->lock);
}
- if (purge)
- purge_fragmented_blocks_thiscpu();
-
put_cpu_var(vmap_block_queue);
rcu_read_unlock();
@@ -1040,15 +1024,16 @@ void vm_unmap_aliases(void)
rcu_read_lock();
list_for_each_entry_rcu(vb, &vbq->free, free_list) {
- int i;
+ int i, j;
spin_lock(&vb->lock);
i = find_first_bit(vb->dirty_map, VMAP_BBMAP_BITS);
- while (i < VMAP_BBMAP_BITS) {
+ if (i < VMAP_BBMAP_BITS) {
unsigned long s, e;
- int j;
- j = find_next_zero_bit(vb->dirty_map,
- VMAP_BBMAP_BITS, i);
+
+ j = find_last_bit(vb->dirty_map,
+ VMAP_BBMAP_BITS);
+ j = j + 1; /* need exclusive index */
s = vb->va->va_start + (i << PAGE_SHIFT);
e = vb->va->va_start + (j << PAGE_SHIFT);
@@ -1058,10 +1043,6 @@ void vm_unmap_aliases(void)
start = s;
if (e > end)
end = e;
-
- i = j;
- i = find_next_bit(vb->dirty_map,
- VMAP_BBMAP_BITS, i);
}
spin_unlock(&vb->lock);
}
@@ -1104,6 +1085,12 @@ EXPORT_SYMBOL(vm_unmap_ram);
* @node: prefer to allocate data structures on this node
* @prot: memory protection to use. PAGE_KERNEL for regular RAM
*
+ * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
+ * faster than vmap so it's good. But if you mix long-life and short-life
+ * objects with vm_map_ram(), it could consume lots of address space through
+ * fragmentation (especially on a 32bit machine). You could see failures in
+ * the end. Please use this function for short-lived objects.
+ *
* Returns: a pointer to the address that has been mapped, or %NULL on failure
*/
void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
@@ -1285,7 +1272,7 @@ void unmap_kernel_range(unsigned long addr, unsigned long size)
int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
{
unsigned long addr = (unsigned long)area->addr;
- unsigned long end = addr + area->size - PAGE_SIZE;
+ unsigned long end = addr + get_vm_area_size(area);
int err;
err = vmap_page_range(addr, end, prot, *pages);
@@ -1311,22 +1298,15 @@ static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
spin_unlock(&vmap_area_lock);
}
-static void clear_vm_unlist(struct vm_struct *vm)
+static void clear_vm_uninitialized_flag(struct vm_struct *vm)
{
/*
- * Before removing VM_UNLIST,
+ * Before removing VM_UNINITIALIZED,
* we should make sure that vm has proper values.
* Pair with smp_rmb() in show_numa_info().
*/
smp_wmb();
- vm->flags &= ~VM_UNLIST;
-}
-
-static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
- unsigned long flags, const void *caller)
-{
- setup_vmalloc_vm(vm, va, flags, caller);
- clear_vm_unlist(vm);
+ vm->flags &= ~VM_UNINITIALIZED;
}
static struct vm_struct *__get_vm_area_node(unsigned long size,
@@ -1337,16 +1317,8 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
struct vm_struct *area;
BUG_ON(in_interrupt());
- if (flags & VM_IOREMAP) {
- int bit = fls(size);
-
- if (bit > IOREMAP_MAX_ORDER)
- bit = IOREMAP_MAX_ORDER;
- else if (bit < PAGE_SHIFT)
- bit = PAGE_SHIFT;
-
- align = 1ul << bit;
- }
+ if (flags & VM_IOREMAP)
+ align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER);
size = PAGE_ALIGN(size);
if (unlikely(!size))
@@ -1367,16 +1339,7 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
return NULL;
}
- /*
- * When this function is called from __vmalloc_node_range,
- * we add VM_UNLIST flag to avoid accessing uninitialized
- * members of vm_struct such as pages and nr_pages fields.
- * They will be set later.
- */
- if (flags & VM_UNLIST)
- setup_vmalloc_vm(area, va, flags, caller);
- else
- insert_vmalloc_vm(area, va, flags, caller);
+ setup_vmalloc_vm(area, va, flags, caller);
return area;
}
@@ -1476,10 +1439,9 @@ static void __vunmap(const void *addr, int deallocate_pages)
if (!addr)
return;
- if ((PAGE_SIZE-1) & (unsigned long)addr) {
- WARN(1, KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
+ if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
+ addr))
return;
- }
area = remove_vm_area(addr);
if (unlikely(!area)) {
@@ -1524,7 +1486,6 @@ static void __vunmap(const void *addr, int deallocate_pages)
* conventions for vfree() arch-depenedent would be a really bad idea)
*
* NOTE: assumes that the object at *addr has a size >= sizeof(llist_node)
- *
*/
void vfree(const void *addr)
{
@@ -1536,8 +1497,8 @@ void vfree(const void *addr)
return;
if (unlikely(in_interrupt())) {
struct vfree_deferred *p = &__get_cpu_var(vfree_deferred);
- llist_add((struct llist_node *)addr, &p->list);
- schedule_work(&p->wq);
+ if (llist_add((struct llist_node *)addr, &p->list))
+ schedule_work(&p->wq);
} else
__vunmap(addr, 1);
}
@@ -1599,27 +1560,26 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
gfp_t gfp_mask, pgprot_t prot,
int node, const void *caller);
static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
- pgprot_t prot, int node, const void *caller)
+ pgprot_t prot, int node)
{
const int order = 0;
struct page **pages;
unsigned int nr_pages, array_size, i;
gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
- nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
+ nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
array_size = (nr_pages * sizeof(struct page *));
area->nr_pages = nr_pages;
/* Please note that the recursion is strictly bounded. */
if (array_size > PAGE_SIZE) {
pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM,
- PAGE_KERNEL, node, caller);
+ PAGE_KERNEL, node, area->caller);
area->flags |= VM_VPAGES;
} else {
pages = kmalloc_node(array_size, nested_gfp, node);
}
area->pages = pages;
- area->caller = caller;
if (!area->pages) {
remove_vm_area(area->addr);
kfree(area);
@@ -1630,7 +1590,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
struct page *page;
gfp_t tmp_mask = gfp_mask | __GFP_NOWARN;
- if (node < 0)
+ if (node == NUMA_NO_NODE)
page = alloc_page(tmp_mask);
else
page = alloc_pages_node(node, tmp_mask, order);
@@ -1682,28 +1642,28 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
if (!size || (size >> PAGE_SHIFT) > totalram_pages)
goto fail;
- area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED,
start, end, node, gfp_mask, caller);
if (!area)
goto fail;
- addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller);
+ addr = __vmalloc_area_node(area, gfp_mask, prot, node);
if (!addr)
return NULL;
/*
- * In this function, newly allocated vm_struct has VM_UNLIST flag.
- * It means that vm_struct is not fully initialized.
+ * In this function, newly allocated vm_struct has VM_UNINITIALIZED
+ * flag. It means that vm_struct is not fully initialized.
* Now, it is fully initialized, so remove this flag here.
*/
- clear_vm_unlist(area);
+ clear_vm_uninitialized_flag(area);
/*
- * A ref_count = 3 is needed because the vm_struct and vmap_area
- * structures allocated in the __get_vm_area_node() function contain
- * references to the virtual address of the vmalloc'ed block.
+ * A ref_count = 2 is needed because vm_struct allocated in
+ * __get_vm_area_node() contains a reference to the virtual address of
+ * the vmalloc'ed block.
*/
- kmemleak_alloc(addr, real_size, 3, gfp_mask);
+ kmemleak_alloc(addr, real_size, 2, gfp_mask);
return addr;
@@ -2038,7 +1998,7 @@ long vread(char *buf, char *addr, unsigned long count)
vm = va->vm;
vaddr = (char *) vm->addr;
- if (addr >= vaddr + vm->size - PAGE_SIZE)
+ if (addr >= vaddr + get_vm_area_size(vm))
continue;
while (addr < vaddr) {
if (count == 0)
@@ -2048,7 +2008,7 @@ long vread(char *buf, char *addr, unsigned long count)
addr++;
count--;
}
- n = vaddr + vm->size - PAGE_SIZE - addr;
+ n = vaddr + get_vm_area_size(vm) - addr;
if (n > count)
n = count;
if (!(vm->flags & VM_IOREMAP))
@@ -2120,7 +2080,7 @@ long vwrite(char *buf, char *addr, unsigned long count)
vm = va->vm;
vaddr = (char *) vm->addr;
- if (addr >= vaddr + vm->size - PAGE_SIZE)
+ if (addr >= vaddr + get_vm_area_size(vm))
continue;
while (addr < vaddr) {
if (count == 0)
@@ -2129,7 +2089,7 @@ long vwrite(char *buf, char *addr, unsigned long count)
addr++;
count--;
}
- n = vaddr + vm->size - PAGE_SIZE - addr;
+ n = vaddr + get_vm_area_size(vm) - addr;
if (n > count)
n = count;
if (!(vm->flags & VM_IOREMAP)) {
@@ -2148,42 +2108,43 @@ finished:
}
/**
- * remap_vmalloc_range - map vmalloc pages to userspace
- * @vma: vma to cover (map full range of vma)
- * @addr: vmalloc memory
- * @pgoff: number of pages into addr before first page to map
+ * remap_vmalloc_range_partial - map vmalloc pages to userspace
+ * @vma: vma to cover
+ * @uaddr: target user address to start at
+ * @kaddr: virtual address of vmalloc kernel memory
+ * @size: size of map area
*
* Returns: 0 for success, -Exxx on failure
*
- * This function checks that addr is a valid vmalloc'ed area, and
- * that it is big enough to cover the vma. Will return failure if
- * that criteria isn't met.
+ * This function checks that @kaddr is a valid vmalloc'ed area,
+ * and that it is big enough to cover the range starting at
+ * @uaddr in @vma. Will return failure if that criteria isn't
+ * met.
*
* Similar to remap_pfn_range() (see mm/memory.c)
*/
-int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
- unsigned long pgoff)
+int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
+ void *kaddr, unsigned long size)
{
struct vm_struct *area;
- unsigned long uaddr = vma->vm_start;
- unsigned long usize = vma->vm_end - vma->vm_start;
- if ((PAGE_SIZE-1) & (unsigned long)addr)
+ size = PAGE_ALIGN(size);
+
+ if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
return -EINVAL;
- area = find_vm_area(addr);
+ area = find_vm_area(kaddr);
if (!area)
return -EINVAL;
if (!(area->flags & VM_USERMAP))
return -EINVAL;
- if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE)
+ if (kaddr + size > area->addr + area->size)
return -EINVAL;
- addr += pgoff << PAGE_SHIFT;
do {
- struct page *page = vmalloc_to_page(addr);
+ struct page *page = vmalloc_to_page(kaddr);
int ret;
ret = vm_insert_page(vma, uaddr, page);
@@ -2191,21 +2152,44 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
return ret;
uaddr += PAGE_SIZE;
- addr += PAGE_SIZE;
- usize -= PAGE_SIZE;
- } while (usize > 0);
+ kaddr += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ } while (size > 0);
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
return 0;
}
+EXPORT_SYMBOL(remap_vmalloc_range_partial);
+
+/**
+ * remap_vmalloc_range - map vmalloc pages to userspace
+ * @vma: vma to cover (map full range of vma)
+ * @addr: vmalloc memory
+ * @pgoff: number of pages into addr before first page to map
+ *
+ * Returns: 0 for success, -Exxx on failure
+ *
+ * This function checks that addr is a valid vmalloc'ed area, and
+ * that it is big enough to cover the vma. Will return failure if
+ * that criteria isn't met.
+ *
+ * Similar to remap_pfn_range() (see mm/memory.c)
+ */
+int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
+ unsigned long pgoff)
+{
+ return remap_vmalloc_range_partial(vma, vma->vm_start,
+ addr + (pgoff << PAGE_SHIFT),
+ vma->vm_end - vma->vm_start);
+}
EXPORT_SYMBOL(remap_vmalloc_range);
/*
* Implement a stub for vmalloc_sync_all() if the architecture chose not to
* have one.
*/
-void __attribute__((weak)) vmalloc_sync_all(void)
+void __weak vmalloc_sync_all(void)
{
}
@@ -2512,8 +2496,8 @@ found:
/* insert all vm's */
for (area = 0; area < nr_vms; area++)
- insert_vmalloc_vm(vms[area], vas[area], VM_ALLOC,
- pcpu_get_vm_areas);
+ setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC,
+ pcpu_get_vm_areas);
kfree(vas);
return vms;
@@ -2592,9 +2576,9 @@ static void show_numa_info(struct seq_file *m, struct vm_struct *v)
if (!counters)
return;
- /* Pair with smp_wmb() in clear_vm_unlist() */
+ /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
smp_rmb();
- if (v->flags & VM_UNLIST)
+ if (v->flags & VM_UNINITIALIZED)
return;
memset(counters, 0, nr_node_ids * sizeof(unsigned int));
@@ -2613,15 +2597,12 @@ static int s_show(struct seq_file *m, void *p)
struct vmap_area *va = p;
struct vm_struct *v;
- if (va->flags & (VM_LAZY_FREE | VM_LAZY_FREEING))
- return 0;
-
- if (!(va->flags & VM_VM_AREA)) {
- seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
- (void *)va->va_start, (void *)va->va_end,
- va->va_end - va->va_start);
+ /*
+ * s_show can encounter race with remove_vm_area, !VM_VM_AREA on
+ * behalf of vmap area is being tear down or vm_map_ram allocation.
+ */
+ if (!(va->flags & VM_VM_AREA))
return 0;
- }
v = va->vm;
diff --git a/mm/vmpressure.c b/mm/vmpressure.c
index 736a6011c2c8..d4042e75f7c7 100644
--- a/mm/vmpressure.c
+++ b/mm/vmpressure.c
@@ -19,6 +19,7 @@
#include <linux/mm.h>
#include <linux/vmstat.h>
#include <linux/eventfd.h>
+#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/printk.h>
#include <linux/vmpressure.h>
@@ -74,15 +75,10 @@ static struct vmpressure *work_to_vmpressure(struct work_struct *work)
return container_of(work, struct vmpressure, work);
}
-static struct vmpressure *cg_to_vmpressure(struct cgroup *cg)
-{
- return css_to_vmpressure(cgroup_subsys_state(cg, mem_cgroup_subsys_id));
-}
-
static struct vmpressure *vmpressure_parent(struct vmpressure *vmpr)
{
- struct cgroup *cg = vmpressure_to_css(vmpr)->cgroup;
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cg);
+ struct cgroup_subsys_state *css = vmpressure_to_css(vmpr);
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
memcg = parent_mem_cgroup(memcg);
if (!memcg)
@@ -180,12 +176,12 @@ static void vmpressure_work_fn(struct work_struct *work)
if (!vmpr->scanned)
return;
- mutex_lock(&vmpr->sr_lock);
+ spin_lock(&vmpr->sr_lock);
scanned = vmpr->scanned;
reclaimed = vmpr->reclaimed;
vmpr->scanned = 0;
vmpr->reclaimed = 0;
- mutex_unlock(&vmpr->sr_lock);
+ spin_unlock(&vmpr->sr_lock);
do {
if (vmpressure_event(vmpr, scanned, reclaimed))
@@ -240,13 +236,13 @@ void vmpressure(gfp_t gfp, struct mem_cgroup *memcg,
if (!scanned)
return;
- mutex_lock(&vmpr->sr_lock);
+ spin_lock(&vmpr->sr_lock);
vmpr->scanned += scanned;
vmpr->reclaimed += reclaimed;
scanned = vmpr->scanned;
- mutex_unlock(&vmpr->sr_lock);
+ spin_unlock(&vmpr->sr_lock);
- if (scanned < vmpressure_win || work_pending(&vmpr->work))
+ if (scanned < vmpressure_win)
return;
schedule_work(&vmpr->work);
}
@@ -283,8 +279,7 @@ void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio)
/**
* vmpressure_register_event() - Bind vmpressure notifications to an eventfd
- * @cg: cgroup that is interested in vmpressure notifications
- * @cft: cgroup control files handle
+ * @memcg: memcg that is interested in vmpressure notifications
* @eventfd: eventfd context to link notifications with
* @args: event arguments (used to set up a pressure level threshold)
*
@@ -294,14 +289,12 @@ void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio)
* threshold (one of vmpressure_str_levels, i.e. "low", "medium", or
* "critical").
*
- * This function should not be used directly, just pass it to (struct
- * cftype).register_event, and then cgroup core will handle everything by
- * itself.
+ * To be used as memcg event method.
*/
-int vmpressure_register_event(struct cgroup *cg, struct cftype *cft,
+int vmpressure_register_event(struct mem_cgroup *memcg,
struct eventfd_ctx *eventfd, const char *args)
{
- struct vmpressure *vmpr = cg_to_vmpressure(cg);
+ struct vmpressure *vmpr = memcg_to_vmpressure(memcg);
struct vmpressure_event *ev;
int level;
@@ -329,22 +322,19 @@ int vmpressure_register_event(struct cgroup *cg, struct cftype *cft,
/**
* vmpressure_unregister_event() - Unbind eventfd from vmpressure
- * @cg: cgroup handle
- * @cft: cgroup control files handle
+ * @memcg: memcg handle
* @eventfd: eventfd context that was used to link vmpressure with the @cg
*
* This function does internal manipulations to detach the @eventfd from
* the vmpressure notifications, and then frees internal resources
* associated with the @eventfd (but the @eventfd itself is not freed).
*
- * This function should not be used directly, just pass it to (struct
- * cftype).unregister_event, and then cgroup core will handle everything
- * by itself.
+ * To be used as memcg event method.
*/
-void vmpressure_unregister_event(struct cgroup *cg, struct cftype *cft,
+void vmpressure_unregister_event(struct mem_cgroup *memcg,
struct eventfd_ctx *eventfd)
{
- struct vmpressure *vmpr = cg_to_vmpressure(cg);
+ struct vmpressure *vmpr = memcg_to_vmpressure(memcg);
struct vmpressure_event *ev;
mutex_lock(&vmpr->events_lock);
@@ -367,8 +357,24 @@ void vmpressure_unregister_event(struct cgroup *cg, struct cftype *cft,
*/
void vmpressure_init(struct vmpressure *vmpr)
{
- mutex_init(&vmpr->sr_lock);
+ spin_lock_init(&vmpr->sr_lock);
mutex_init(&vmpr->events_lock);
INIT_LIST_HEAD(&vmpr->events);
INIT_WORK(&vmpr->work, vmpressure_work_fn);
}
+
+/**
+ * vmpressure_cleanup() - shuts down vmpressure control structure
+ * @vmpr: Structure to be cleaned up
+ *
+ * This function should be called before the structure in which it is
+ * embedded is cleaned up.
+ */
+void vmpressure_cleanup(struct vmpressure *vmpr)
+{
+ /*
+ * Make sure there is no pending work before eventfd infrastructure
+ * goes away.
+ */
+ flush_work(&vmpr->work);
+}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index fa6a85378ee4..32c661d66a45 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -48,6 +48,7 @@
#include <asm/div64.h>
#include <linux/swapops.h>
+#include <linux/balloon_compaction.h>
#include "internal.h"
@@ -146,6 +147,25 @@ static bool global_reclaim(struct scan_control *sc)
}
#endif
+static unsigned long zone_reclaimable_pages(struct zone *zone)
+{
+ int nr;
+
+ nr = zone_page_state(zone, NR_ACTIVE_FILE) +
+ zone_page_state(zone, NR_INACTIVE_FILE);
+
+ if (get_nr_swap_pages() > 0)
+ nr += zone_page_state(zone, NR_ACTIVE_ANON) +
+ zone_page_state(zone, NR_INACTIVE_ANON);
+
+ return nr;
+}
+
+bool zone_reclaimable(struct zone *zone)
+{
+ return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
+}
+
static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru)
{
if (!mem_cgroup_disabled())
@@ -155,14 +175,31 @@ static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru)
}
/*
- * Add a shrinker callback to be called from the vm
+ * Add a shrinker callback to be called from the vm.
*/
-void register_shrinker(struct shrinker *shrinker)
+int register_shrinker(struct shrinker *shrinker)
{
- atomic_long_set(&shrinker->nr_in_batch, 0);
+ size_t size = sizeof(*shrinker->nr_deferred);
+
+ /*
+ * If we only have one possible node in the system anyway, save
+ * ourselves the trouble and disable NUMA aware behavior. This way we
+ * will save memory and some small loop time later.
+ */
+ if (nr_node_ids == 1)
+ shrinker->flags &= ~SHRINKER_NUMA_AWARE;
+
+ if (shrinker->flags & SHRINKER_NUMA_AWARE)
+ size *= nr_node_ids;
+
+ shrinker->nr_deferred = kzalloc(size, GFP_KERNEL);
+ if (!shrinker->nr_deferred)
+ return -ENOMEM;
+
down_write(&shrinker_rwsem);
list_add_tail(&shrinker->list, &shrinker_list);
up_write(&shrinker_rwsem);
+ return 0;
}
EXPORT_SYMBOL(register_shrinker);
@@ -174,18 +211,123 @@ void unregister_shrinker(struct shrinker *shrinker)
down_write(&shrinker_rwsem);
list_del(&shrinker->list);
up_write(&shrinker_rwsem);
+ kfree(shrinker->nr_deferred);
}
EXPORT_SYMBOL(unregister_shrinker);
-static inline int do_shrinker_shrink(struct shrinker *shrinker,
- struct shrink_control *sc,
- unsigned long nr_to_scan)
-{
- sc->nr_to_scan = nr_to_scan;
- return (*shrinker->shrink)(shrinker, sc);
+#define SHRINK_BATCH 128
+
+static unsigned long
+shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker,
+ unsigned long nr_pages_scanned, unsigned long lru_pages)
+{
+ unsigned long freed = 0;
+ unsigned long long delta;
+ long total_scan;
+ long freeable;
+ long nr;
+ long new_nr;
+ int nid = shrinkctl->nid;
+ long batch_size = shrinker->batch ? shrinker->batch
+ : SHRINK_BATCH;
+
+ freeable = shrinker->count_objects(shrinker, shrinkctl);
+ if (freeable == 0)
+ return 0;
+
+ /*
+ * copy the current shrinker scan count into a local variable
+ * and zero it so that other concurrent shrinker invocations
+ * don't also do this scanning work.
+ */
+ nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
+
+ total_scan = nr;
+ delta = (4 * nr_pages_scanned) / shrinker->seeks;
+ delta *= freeable;
+ do_div(delta, lru_pages + 1);
+ total_scan += delta;
+ if (total_scan < 0) {
+ printk(KERN_ERR
+ "shrink_slab: %pF negative objects to delete nr=%ld\n",
+ shrinker->scan_objects, total_scan);
+ total_scan = freeable;
+ }
+
+ /*
+ * We need to avoid excessive windup on filesystem shrinkers
+ * due to large numbers of GFP_NOFS allocations causing the
+ * shrinkers to return -1 all the time. This results in a large
+ * nr being built up so when a shrink that can do some work
+ * comes along it empties the entire cache due to nr >>>
+ * freeable. This is bad for sustaining a working set in
+ * memory.
+ *
+ * Hence only allow the shrinker to scan the entire cache when
+ * a large delta change is calculated directly.
+ */
+ if (delta < freeable / 4)
+ total_scan = min(total_scan, freeable / 2);
+
+ /*
+ * Avoid risking looping forever due to too large nr value:
+ * never try to free more than twice the estimate number of
+ * freeable entries.
+ */
+ if (total_scan > freeable * 2)
+ total_scan = freeable * 2;
+
+ trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
+ nr_pages_scanned, lru_pages,
+ freeable, delta, total_scan);
+
+ /*
+ * Normally, we should not scan less than batch_size objects in one
+ * pass to avoid too frequent shrinker calls, but if the slab has less
+ * than batch_size objects in total and we are really tight on memory,
+ * we will try to reclaim all available objects, otherwise we can end
+ * up failing allocations although there are plenty of reclaimable
+ * objects spread over several slabs with usage less than the
+ * batch_size.
+ *
+ * We detect the "tight on memory" situations by looking at the total
+ * number of objects we want to scan (total_scan). If it is greater
+ * than the total number of objects on slab (freeable), we must be
+ * scanning at high prio and therefore should try to reclaim as much as
+ * possible.
+ */
+ while (total_scan >= batch_size ||
+ total_scan >= freeable) {
+ unsigned long ret;
+ unsigned long nr_to_scan = min(batch_size, total_scan);
+
+ shrinkctl->nr_to_scan = nr_to_scan;
+ ret = shrinker->scan_objects(shrinker, shrinkctl);
+ if (ret == SHRINK_STOP)
+ break;
+ freed += ret;
+
+ count_vm_events(SLABS_SCANNED, nr_to_scan);
+ total_scan -= nr_to_scan;
+
+ cond_resched();
+ }
+
+ /*
+ * move the unused scan count back into the shrinker in a
+ * manner that handles concurrent updates. If we exhausted the
+ * scan, there is no need to do an update.
+ */
+ if (total_scan > 0)
+ new_nr = atomic_long_add_return(total_scan,
+ &shrinker->nr_deferred[nid]);
+ else
+ new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
+
+ trace_mm_shrink_slab_end(shrinker, freed, nr, new_nr);
+ return freed;
}
-#define SHRINK_BATCH 128
/*
* Call the shrink functions to age shrinkable caches
*
@@ -205,115 +347,46 @@ static inline int do_shrinker_shrink(struct shrinker *shrinker,
*
* Returns the number of slab objects which we shrunk.
*/
-unsigned long shrink_slab(struct shrink_control *shrink,
+unsigned long shrink_slab(struct shrink_control *shrinkctl,
unsigned long nr_pages_scanned,
unsigned long lru_pages)
{
struct shrinker *shrinker;
- unsigned long ret = 0;
+ unsigned long freed = 0;
if (nr_pages_scanned == 0)
nr_pages_scanned = SWAP_CLUSTER_MAX;
if (!down_read_trylock(&shrinker_rwsem)) {
- /* Assume we'll be able to shrink next time */
- ret = 1;
+ /*
+ * If we would return 0, our callers would understand that we
+ * have nothing else to shrink and give up trying. By returning
+ * 1 we keep it going and assume we'll be able to shrink next
+ * time.
+ */
+ freed = 1;
goto out;
}
list_for_each_entry(shrinker, &shrinker_list, list) {
- unsigned long long delta;
- long total_scan;
- long max_pass;
- int shrink_ret = 0;
- long nr;
- long new_nr;
- long batch_size = shrinker->batch ? shrinker->batch
- : SHRINK_BATCH;
-
- max_pass = do_shrinker_shrink(shrinker, shrink, 0);
- if (max_pass <= 0)
+ if (!(shrinker->flags & SHRINKER_NUMA_AWARE)) {
+ shrinkctl->nid = 0;
+ freed += shrink_slab_node(shrinkctl, shrinker,
+ nr_pages_scanned, lru_pages);
continue;
-
- /*
- * copy the current shrinker scan count into a local variable
- * and zero it so that other concurrent shrinker invocations
- * don't also do this scanning work.
- */
- nr = atomic_long_xchg(&shrinker->nr_in_batch, 0);
-
- total_scan = nr;
- delta = (4 * nr_pages_scanned) / shrinker->seeks;
- delta *= max_pass;
- do_div(delta, lru_pages + 1);
- total_scan += delta;
- if (total_scan < 0) {
- printk(KERN_ERR "shrink_slab: %pF negative objects to "
- "delete nr=%ld\n",
- shrinker->shrink, total_scan);
- total_scan = max_pass;
}
- /*
- * We need to avoid excessive windup on filesystem shrinkers
- * due to large numbers of GFP_NOFS allocations causing the
- * shrinkers to return -1 all the time. This results in a large
- * nr being built up so when a shrink that can do some work
- * comes along it empties the entire cache due to nr >>>
- * max_pass. This is bad for sustaining a working set in
- * memory.
- *
- * Hence only allow the shrinker to scan the entire cache when
- * a large delta change is calculated directly.
- */
- if (delta < max_pass / 4)
- total_scan = min(total_scan, max_pass / 2);
-
- /*
- * Avoid risking looping forever due to too large nr value:
- * never try to free more than twice the estimate number of
- * freeable entries.
- */
- if (total_scan > max_pass * 2)
- total_scan = max_pass * 2;
-
- trace_mm_shrink_slab_start(shrinker, shrink, nr,
- nr_pages_scanned, lru_pages,
- max_pass, delta, total_scan);
+ for_each_node_mask(shrinkctl->nid, shrinkctl->nodes_to_scan) {
+ if (node_online(shrinkctl->nid))
+ freed += shrink_slab_node(shrinkctl, shrinker,
+ nr_pages_scanned, lru_pages);
- while (total_scan >= batch_size) {
- int nr_before;
-
- nr_before = do_shrinker_shrink(shrinker, shrink, 0);
- shrink_ret = do_shrinker_shrink(shrinker, shrink,
- batch_size);
- if (shrink_ret == -1)
- break;
- if (shrink_ret < nr_before)
- ret += nr_before - shrink_ret;
- count_vm_events(SLABS_SCANNED, batch_size);
- total_scan -= batch_size;
-
- cond_resched();
}
-
- /*
- * move the unused scan count back into the shrinker in a
- * manner that handles concurrent updates. If we exhausted the
- * scan, there is no need to do an update.
- */
- if (total_scan > 0)
- new_nr = atomic_long_add_return(total_scan,
- &shrinker->nr_in_batch);
- else
- new_nr = atomic_long_read(&shrinker->nr_in_batch);
-
- trace_mm_shrink_slab_end(shrinker, shrink_ret, nr, new_nr);
}
up_read(&shrinker_rwsem);
out:
cond_resched();
- return ret;
+ return freed;
}
static inline int is_page_cache_freeable(struct page *page)
@@ -450,7 +523,8 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
* Same as remove_mapping, but if the page is removed from the mapping, it
* gets returned with a refcount of 0.
*/
-static int __remove_mapping(struct address_space *mapping, struct page *page)
+static int __remove_mapping(struct address_space *mapping, struct page *page,
+ bool reclaimed)
{
BUG_ON(!PageLocked(page));
BUG_ON(mapping != page_mapping(page));
@@ -496,10 +570,23 @@ static int __remove_mapping(struct address_space *mapping, struct page *page)
swapcache_free(swap, page);
} else {
void (*freepage)(struct page *);
+ void *shadow = NULL;
freepage = mapping->a_ops->freepage;
-
- __delete_from_page_cache(page);
+ /*
+ * Remember a shadow entry for reclaimed file cache in
+ * order to detect refaults, thus thrashing, later on.
+ *
+ * But don't store shadows in an address space that is
+ * already exiting. This is not just an optizimation,
+ * inode reclaim needs to empty out the radix tree or
+ * the nodes are lost. Don't plant shadows behind its
+ * back.
+ */
+ if (reclaimed && page_is_file_cache(page) &&
+ !mapping_exiting(mapping))
+ shadow = workingset_eviction(mapping, page);
+ __delete_from_page_cache(page, shadow);
spin_unlock_irq(&mapping->tree_lock);
mem_cgroup_uncharge_cache_page(page);
@@ -522,7 +609,7 @@ cannot_free:
*/
int remove_mapping(struct address_space *mapping, struct page *page)
{
- if (__remove_mapping(mapping, page)) {
+ if (__remove_mapping(mapping, page, false)) {
/*
* Unfreezing the refcount with 1 rather than 2 effectively
* drops the pagecache ref for us without requiring another
@@ -545,11 +632,10 @@ int remove_mapping(struct address_space *mapping, struct page *page)
*/
void putback_lru_page(struct page *page)
{
- int lru;
- int active = !!TestClearPageActive(page);
+ bool is_unevictable;
int was_unevictable = PageUnevictable(page);
- VM_BUG_ON(PageLRU(page));
+ VM_BUG_ON_PAGE(PageLRU(page), page);
redo:
ClearPageUnevictable(page);
@@ -561,14 +647,14 @@ redo:
* unevictable page on [in]active list.
* We know how to handle that.
*/
- lru = active + page_lru_base_type(page);
- lru_cache_add_lru(page, lru);
+ is_unevictable = false;
+ lru_cache_add(page);
} else {
/*
* Put unevictable pages directly on zone's unevictable
* list.
*/
- lru = LRU_UNEVICTABLE;
+ is_unevictable = true;
add_page_to_unevictable_list(page);
/*
* When racing with an mlock or AS_UNEVICTABLE clearing
@@ -588,7 +674,7 @@ redo:
* page is on unevictable list, it never be freed. To avoid that,
* check after we added it to the list, again.
*/
- if (lru == LRU_UNEVICTABLE && page_evictable(page)) {
+ if (is_unevictable && page_evictable(page)) {
if (!isolate_lru_page(page)) {
put_page(page);
goto redo;
@@ -599,9 +685,9 @@ redo:
*/
}
- if (was_unevictable && lru != LRU_UNEVICTABLE)
+ if (was_unevictable && !is_unevictable)
count_vm_event(UNEVICTABLE_PGRESCUED);
- else if (!was_unevictable && lru == LRU_UNEVICTABLE)
+ else if (!was_unevictable && is_unevictable)
count_vm_event(UNEVICTABLE_PGCULLED);
put_page(page); /* drop ref from isolate */
@@ -669,6 +755,35 @@ static enum page_references page_check_references(struct page *page,
return PAGEREF_RECLAIM;
}
+/* Check if a page is dirty or under writeback */
+static void page_check_dirty_writeback(struct page *page,
+ bool *dirty, bool *writeback)
+{
+ struct address_space *mapping;
+
+ /*
+ * Anonymous pages are not handled by flushers and must be written
+ * from reclaim context. Do not stall reclaim based on them
+ */
+ if (!page_is_file_cache(page)) {
+ *dirty = false;
+ *writeback = false;
+ return;
+ }
+
+ /* By default assume that the page flags are accurate */
+ *dirty = PageDirty(page);
+ *writeback = PageWriteback(page);
+
+ /* Verify dirty/writeback state if the filesystem supports it */
+ if (!page_has_private(page))
+ return;
+
+ mapping = page_mapping(page);
+ if (mapping && mapping->a_ops->is_dirty_writeback)
+ mapping->a_ops->is_dirty_writeback(page, dirty, writeback);
+}
+
/*
* shrink_page_list() returns the number of reclaimed pages
*/
@@ -677,16 +792,21 @@ static unsigned long shrink_page_list(struct list_head *page_list,
struct scan_control *sc,
enum ttu_flags ttu_flags,
unsigned long *ret_nr_dirty,
+ unsigned long *ret_nr_unqueued_dirty,
+ unsigned long *ret_nr_congested,
unsigned long *ret_nr_writeback,
+ unsigned long *ret_nr_immediate,
bool force_reclaim)
{
LIST_HEAD(ret_pages);
LIST_HEAD(free_pages);
int pgactivate = 0;
+ unsigned long nr_unqueued_dirty = 0;
unsigned long nr_dirty = 0;
unsigned long nr_congested = 0;
unsigned long nr_reclaimed = 0;
unsigned long nr_writeback = 0;
+ unsigned long nr_immediate = 0;
cond_resched();
@@ -696,6 +816,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
struct page *page;
int may_enter_fs;
enum page_references references = PAGEREF_RECLAIM_CLEAN;
+ bool dirty, writeback;
cond_resched();
@@ -705,8 +826,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
if (!trylock_page(page))
goto keep;
- VM_BUG_ON(PageActive(page));
- VM_BUG_ON(page_zone(page) != zone);
+ VM_BUG_ON_PAGE(PageActive(page), page);
+ VM_BUG_ON_PAGE(page_zone(page) != zone, page);
sc->nr_scanned++;
@@ -723,25 +844,77 @@ static unsigned long shrink_page_list(struct list_head *page_list,
may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
(PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
+ /*
+ * The number of dirty pages determines if a zone is marked
+ * reclaim_congested which affects wait_iff_congested. kswapd
+ * will stall and start writing pages if the tail of the LRU
+ * is all dirty unqueued pages.
+ */
+ page_check_dirty_writeback(page, &dirty, &writeback);
+ if (dirty || writeback)
+ nr_dirty++;
+
+ if (dirty && !writeback)
+ nr_unqueued_dirty++;
+
+ /*
+ * Treat this page as congested if the underlying BDI is or if
+ * pages are cycling through the LRU so quickly that the
+ * pages marked for immediate reclaim are making it to the
+ * end of the LRU a second time.
+ */
+ mapping = page_mapping(page);
+ if ((mapping && bdi_write_congested(mapping->backing_dev_info)) ||
+ (writeback && PageReclaim(page)))
+ nr_congested++;
+
+ /*
+ * If a page at the tail of the LRU is under writeback, there
+ * are three cases to consider.
+ *
+ * 1) If reclaim is encountering an excessive number of pages
+ * under writeback and this page is both under writeback and
+ * PageReclaim then it indicates that pages are being queued
+ * for IO but are being recycled through the LRU before the
+ * IO can complete. Waiting on the page itself risks an
+ * indefinite stall if it is impossible to writeback the
+ * page due to IO error or disconnected storage so instead
+ * note that the LRU is being scanned too quickly and the
+ * caller can stall after page list has been processed.
+ *
+ * 2) Global reclaim encounters a page, memcg encounters a
+ * page that is not marked for immediate reclaim or
+ * the caller does not have __GFP_IO. In this case mark
+ * the page for immediate reclaim and continue scanning.
+ *
+ * __GFP_IO is checked because a loop driver thread might
+ * enter reclaim, and deadlock if it waits on a page for
+ * which it is needed to do the write (loop masks off
+ * __GFP_IO|__GFP_FS for this reason); but more thought
+ * would probably show more reasons.
+ *
+ * Don't require __GFP_FS, since we're not going into the
+ * FS, just waiting on its writeback completion. Worryingly,
+ * ext4 gfs2 and xfs allocate pages with
+ * grab_cache_page_write_begin(,,AOP_FLAG_NOFS), so testing
+ * may_enter_fs here is liable to OOM on them.
+ *
+ * 3) memcg encounters a page that is not already marked
+ * PageReclaim. memcg does not have any dirty pages
+ * throttling so we could easily OOM just because too many
+ * pages are in writeback and there is nothing else to
+ * reclaim. Wait for the writeback to complete.
+ */
if (PageWriteback(page)) {
- /*
- * memcg doesn't have any dirty pages throttling so we
- * could easily OOM just because too many pages are in
- * writeback and there is nothing else to reclaim.
- *
- * Check __GFP_IO, certainly because a loop driver
- * thread might enter reclaim, and deadlock if it waits
- * on a page for which it is needed to do the write
- * (loop masks off __GFP_IO|__GFP_FS for this reason);
- * but more thought would probably show more reasons.
- *
- * Don't require __GFP_FS, since we're not going into
- * the FS, just waiting on its writeback completion.
- * Worryingly, ext4 gfs2 and xfs allocate pages with
- * grab_cache_page_write_begin(,,AOP_FLAG_NOFS), so
- * testing may_enter_fs here is liable to OOM on them.
- */
- if (global_reclaim(sc) ||
+ /* Case 1 above */
+ if (current_is_kswapd() &&
+ PageReclaim(page) &&
+ zone_is_reclaim_writeback(zone)) {
+ nr_immediate++;
+ goto keep_locked;
+
+ /* Case 2 above */
+ } else if (global_reclaim(sc) ||
!PageReclaim(page) || !(sc->gfp_mask & __GFP_IO)) {
/*
* This is slightly racy - end_page_writeback()
@@ -756,9 +929,13 @@ static unsigned long shrink_page_list(struct list_head *page_list,
*/
SetPageReclaim(page);
nr_writeback++;
+
goto keep_locked;
+
+ /* Case 3 above */
+ } else {
+ wait_on_page_writeback(page);
}
- wait_on_page_writeback(page);
}
if (!force_reclaim)
@@ -784,9 +961,10 @@ static unsigned long shrink_page_list(struct list_head *page_list,
if (!add_to_swap(page, page_list))
goto activate_locked;
may_enter_fs = 1;
- }
- mapping = page_mapping(page);
+ /* Adding to swap updated mapping */
+ mapping = page_mapping(page);
+ }
/*
* The page is mapped into the page tables of one or more
@@ -806,16 +984,14 @@ static unsigned long shrink_page_list(struct list_head *page_list,
}
if (PageDirty(page)) {
- nr_dirty++;
-
/*
* Only kswapd can writeback filesystem pages to
- * avoid risk of stack overflow but do not writeback
- * unless under significant pressure.
+ * avoid risk of stack overflow but only writeback
+ * if many dirty pages have been encountered.
*/
if (page_is_file_cache(page) &&
(!current_is_kswapd() ||
- sc->priority >= DEF_PRIORITY - 2)) {
+ !zone_is_reclaim_dirty(zone))) {
/*
* Immediately reclaim when written back.
* Similar in principal to deactivate_page()
@@ -838,7 +1014,6 @@ static unsigned long shrink_page_list(struct list_head *page_list,
/* Page is dirty, try to write it out here */
switch (pageout(page, mapping, sc)) {
case PAGE_KEEP:
- nr_congested++;
goto keep_locked;
case PAGE_ACTIVATE:
goto activate_locked;
@@ -904,7 +1079,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
}
}
- if (!mapping || !__remove_mapping(mapping, page))
+ if (!mapping || !__remove_mapping(mapping, page, true))
goto keep_locked;
/*
@@ -936,32 +1111,26 @@ activate_locked:
/* Not a candidate for swapping, so reclaim swap space. */
if (PageSwapCache(page) && vm_swap_full())
try_to_free_swap(page);
- VM_BUG_ON(PageActive(page));
+ VM_BUG_ON_PAGE(PageActive(page), page);
SetPageActive(page);
pgactivate++;
keep_locked:
unlock_page(page);
keep:
list_add(&page->lru, &ret_pages);
- VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
+ VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page);
}
- /*
- * Tag a zone as congested if all the dirty pages encountered were
- * backed by a congested BDI. In this case, reclaimers should just
- * back off and wait for congestion to clear because further reclaim
- * will encounter the same problem
- */
- if (nr_dirty && nr_dirty == nr_congested && global_reclaim(sc))
- zone_set_flag(zone, ZONE_CONGESTED);
-
free_hot_cold_page_list(&free_pages, 1);
list_splice(&ret_pages, page_list);
count_vm_events(PGACTIVATE, pgactivate);
mem_cgroup_uncharge_end();
*ret_nr_dirty += nr_dirty;
+ *ret_nr_congested += nr_congested;
+ *ret_nr_unqueued_dirty += nr_unqueued_dirty;
*ret_nr_writeback += nr_writeback;
+ *ret_nr_immediate += nr_immediate;
return nr_reclaimed;
}
@@ -973,22 +1142,23 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
.priority = DEF_PRIORITY,
.may_unmap = 1,
};
- unsigned long ret, dummy1, dummy2;
+ unsigned long ret, dummy1, dummy2, dummy3, dummy4, dummy5;
struct page *page, *next;
LIST_HEAD(clean_pages);
list_for_each_entry_safe(page, next, page_list, lru) {
- if (page_is_file_cache(page) && !PageDirty(page)) {
+ if (page_is_file_cache(page) && !PageDirty(page) &&
+ !isolated_balloon_page(page)) {
ClearPageActive(page);
list_move(&page->lru, &clean_pages);
}
}
ret = shrink_page_list(&clean_pages, zone, &sc,
- TTU_UNMAP|TTU_IGNORE_ACCESS,
- &dummy1, &dummy2, true);
+ TTU_UNMAP|TTU_IGNORE_ACCESS,
+ &dummy1, &dummy2, &dummy3, &dummy4, &dummy5, true);
list_splice(&clean_pages, page_list);
- __mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret);
+ mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret);
return ret;
}
@@ -1102,7 +1272,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
page = lru_to_page(src);
prefetchw_prev_lru_page(page, src, flags);
- VM_BUG_ON(!PageLRU(page));
+ VM_BUG_ON_PAGE(!PageLRU(page), page);
switch (__isolate_lru_page(page, mode)) {
case 0:
@@ -1157,7 +1327,7 @@ int isolate_lru_page(struct page *page)
{
int ret = -EBUSY;
- VM_BUG_ON(!page_count(page));
+ VM_BUG_ON_PAGE(!page_count(page), page);
if (PageLRU(page)) {
struct zone *zone = page_zone(page);
@@ -1228,7 +1398,7 @@ putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
struct page *page = lru_to_page(page_list);
int lru;
- VM_BUG_ON(PageLRU(page));
+ VM_BUG_ON_PAGE(PageLRU(page), page);
list_del(&page->lru);
if (unlikely(!page_evictable(page))) {
spin_unlock_irq(&zone->lru_lock);
@@ -1281,7 +1451,10 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
unsigned long nr_reclaimed = 0;
unsigned long nr_taken;
unsigned long nr_dirty = 0;
+ unsigned long nr_congested = 0;
+ unsigned long nr_unqueued_dirty = 0;
unsigned long nr_writeback = 0;
+ unsigned long nr_immediate = 0;
isolate_mode_t isolate_mode = 0;
int file = is_file_lru(lru);
struct zone *zone = lruvec_zone(lruvec);
@@ -1323,7 +1496,9 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
return 0;
nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP,
- &nr_dirty, &nr_writeback, false);
+ &nr_dirty, &nr_unqueued_dirty, &nr_congested,
+ &nr_writeback, &nr_immediate,
+ false);
spin_lock_irq(&zone->lru_lock);
@@ -1356,21 +1531,51 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
* as there is no guarantee the dirtying process is throttled in the
* same way balance_dirty_pages() manages.
*
- * This scales the number of dirty pages that must be under writeback
- * before throttling depending on priority. It is a simple backoff
- * function that has the most effect in the range DEF_PRIORITY to
- * DEF_PRIORITY-2 which is the priority reclaim is considered to be
- * in trouble and reclaim is considered to be in trouble.
- *
- * DEF_PRIORITY 100% isolated pages must be PageWriteback to throttle
- * DEF_PRIORITY-1 50% must be PageWriteback
- * DEF_PRIORITY-2 25% must be PageWriteback, kswapd in trouble
- * ...
- * DEF_PRIORITY-6 For SWAP_CLUSTER_MAX isolated pages, throttle if any
- * isolated page is PageWriteback
+ * Once a zone is flagged ZONE_WRITEBACK, kswapd will count the number
+ * of pages under pages flagged for immediate reclaim and stall if any
+ * are encountered in the nr_immediate check below.
+ */
+ if (nr_writeback && nr_writeback == nr_taken)
+ zone_set_flag(zone, ZONE_WRITEBACK);
+
+ /*
+ * memcg will stall in page writeback so only consider forcibly
+ * stalling for global reclaim
+ */
+ if (global_reclaim(sc)) {
+ /*
+ * Tag a zone as congested if all the dirty pages scanned were
+ * backed by a congested BDI and wait_iff_congested will stall.
+ */
+ if (nr_dirty && nr_dirty == nr_congested)
+ zone_set_flag(zone, ZONE_CONGESTED);
+
+ /*
+ * If dirty pages are scanned that are not queued for IO, it
+ * implies that flushers are not keeping up. In this case, flag
+ * the zone ZONE_TAIL_LRU_DIRTY and kswapd will start writing
+ * pages from reclaim context. It will forcibly stall in the
+ * next check.
+ */
+ if (nr_unqueued_dirty == nr_taken)
+ zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY);
+
+ /*
+ * In addition, if kswapd scans pages marked marked for
+ * immediate reclaim and under writeback (nr_immediate), it
+ * implies that pages are cycling through the LRU faster than
+ * they are written so also forcibly stall.
+ */
+ if (nr_unqueued_dirty == nr_taken || nr_immediate)
+ congestion_wait(BLK_RW_ASYNC, HZ/10);
+ }
+
+ /*
+ * Stall direct reclaim for IO completions if underlying BDIs or zone
+ * is congested. Allow kswapd to continue until it starts encountering
+ * unqueued dirty pages or cycling through the LRU too quickly.
*/
- if (nr_writeback && nr_writeback >=
- (nr_taken >> (DEF_PRIORITY - sc->priority)))
+ if (!sc->hibernation_mode && !current_is_kswapd())
wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10);
trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id,
@@ -1413,7 +1618,7 @@ static void move_active_pages_to_lru(struct lruvec *lruvec,
page = lru_to_page(list);
lruvec = mem_cgroup_page_lruvec(page, zone);
- VM_BUG_ON(PageLRU(page));
+ VM_BUG_ON_PAGE(PageLRU(page), page);
SetPageLRU(page);
nr_pages = hpage_nr_pages(page);
@@ -1657,7 +1862,7 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
struct zone *zone = lruvec_zone(lruvec);
unsigned long anon_prio, file_prio;
enum scan_balance scan_balance;
- unsigned long anon, file, free;
+ unsigned long anon, file;
bool force_scan = false;
unsigned long ap, fp;
enum lru_list lru;
@@ -1672,7 +1877,7 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
* latencies, so it's better to scan a minimum amount there as
* well.
*/
- if (current_is_kswapd() && zone->all_unreclaimable)
+ if (current_is_kswapd() && !zone_reclaimable(zone))
force_scan = true;
if (!global_reclaim(sc))
force_scan = true;
@@ -1711,13 +1916,17 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
get_lru_size(lruvec, LRU_INACTIVE_FILE);
/*
- * If it's foreseeable that reclaiming the file cache won't be
- * enough to get the zone back into a desirable shape, we have
- * to swap. Better start now and leave the - probably heavily
- * thrashing - remaining file pages alone.
+ * Prevent the reclaimer from falling into the cache trap: as
+ * cache pages start out inactive, every cache fault will tip
+ * the scan balance towards the file LRU. And as the file LRU
+ * shrinks, so does the window for rotation from references.
+ * This means we have a runaway feedback loop where a tiny
+ * thrashing file LRU becomes infinitely more attractive than
+ * anon pages. Try to detect this based on file LRU size.
*/
if (global_reclaim(sc)) {
- free = zone_page_state(zone, NR_FREE_PAGES);
+ unsigned long free = zone_page_state(zone, NR_FREE_PAGES);
+
if (unlikely(file + free <= high_wmark_pages(zone))) {
scan_balance = SCAN_ANON;
goto out;
@@ -1822,17 +2031,25 @@ out:
static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
{
unsigned long nr[NR_LRU_LISTS];
+ unsigned long targets[NR_LRU_LISTS];
unsigned long nr_to_scan;
enum lru_list lru;
unsigned long nr_reclaimed = 0;
unsigned long nr_to_reclaim = sc->nr_to_reclaim;
struct blk_plug plug;
+ bool scan_adjusted = false;
get_scan_count(lruvec, sc, nr);
+ /* Record the original scan target for proportional adjustments later */
+ memcpy(targets, nr, sizeof(nr));
+
blk_start_plug(&plug);
while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
nr[LRU_INACTIVE_FILE]) {
+ unsigned long nr_anon, nr_file, percentage;
+ unsigned long nr_scanned;
+
for_each_evictable_lru(lru) {
if (nr[lru]) {
nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX);
@@ -1842,17 +2059,60 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
lruvec, sc);
}
}
+
+ if (nr_reclaimed < nr_to_reclaim || scan_adjusted)
+ continue;
+
/*
- * On large memory systems, scan >> priority can become
- * really large. This is fine for the starting priority;
- * we want to put equal scanning pressure on each zone.
- * However, if the VM has a harder time of freeing pages,
- * with multiple processes reclaiming pages, the total
- * freeing target can get unreasonably large.
+ * For global direct reclaim, reclaim only the number of pages
+ * requested. Less care is taken to scan proportionally as it
+ * is more important to minimise direct reclaim stall latency
+ * than it is to properly age the LRU lists.
*/
- if (nr_reclaimed >= nr_to_reclaim &&
- sc->priority < DEF_PRIORITY)
+ if (global_reclaim(sc) && !current_is_kswapd())
break;
+
+ /*
+ * For kswapd and memcg, reclaim at least the number of pages
+ * requested. Ensure that the anon and file LRUs shrink
+ * proportionally what was requested by get_scan_count(). We
+ * stop reclaiming one LRU and reduce the amount scanning
+ * proportional to the original scan target.
+ */
+ nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE];
+ nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON];
+
+ if (nr_file > nr_anon) {
+ unsigned long scan_target = targets[LRU_INACTIVE_ANON] +
+ targets[LRU_ACTIVE_ANON] + 1;
+ lru = LRU_BASE;
+ percentage = nr_anon * 100 / scan_target;
+ } else {
+ unsigned long scan_target = targets[LRU_INACTIVE_FILE] +
+ targets[LRU_ACTIVE_FILE] + 1;
+ lru = LRU_FILE;
+ percentage = nr_file * 100 / scan_target;
+ }
+
+ /* Stop scanning the smaller of the LRU */
+ nr[lru] = 0;
+ nr[lru + LRU_ACTIVE] = 0;
+
+ /*
+ * Recalculate the other LRU scan count based on its original
+ * scan target and the percentage scanning already complete
+ */
+ lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE;
+ nr_scanned = targets[lru] - nr[lru];
+ nr[lru] = targets[lru] * (100 - percentage) / 100;
+ nr[lru] -= min(nr[lru], nr_scanned);
+
+ lru += LRU_ACTIVE;
+ nr_scanned = targets[lru] - nr[lru];
+ nr[lru] = targets[lru] * (100 - percentage) / 100;
+ nr[lru] -= min(nr[lru], nr_scanned);
+
+ scan_adjusted = true;
}
blk_finish_plug(&plug);
sc->nr_reclaimed += nr_reclaimed;
@@ -2055,16 +2315,26 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
struct zone *zone;
unsigned long nr_soft_reclaimed;
unsigned long nr_soft_scanned;
+ unsigned long lru_pages = 0;
bool aborted_reclaim = false;
+ struct reclaim_state *reclaim_state = current->reclaim_state;
+ gfp_t orig_mask;
+ struct shrink_control shrink = {
+ .gfp_mask = sc->gfp_mask,
+ };
+ enum zone_type requested_highidx = gfp_zone(sc->gfp_mask);
/*
* If the number of buffer_heads in the machine exceeds the maximum
* allowed level, force direct reclaim to scan the highmem zone as
* highmem pages could be pinning lowmem pages storing buffer_heads
*/
+ orig_mask = sc->gfp_mask;
if (buffer_heads_over_limit)
sc->gfp_mask |= __GFP_HIGHMEM;
+ nodes_clear(shrink.nodes_to_scan);
+
for_each_zone_zonelist_nodemask(zone, z, zonelist,
gfp_zone(sc->gfp_mask), sc->nodemask) {
if (!populated_zone(zone))
@@ -2076,8 +2346,12 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
if (global_reclaim(sc)) {
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
continue;
- if (zone->all_unreclaimable &&
- sc->priority != DEF_PRIORITY)
+
+ lru_pages += zone_reclaimable_pages(zone);
+ node_set(zone_to_nid(zone), shrink.nodes_to_scan);
+
+ if (sc->priority != DEF_PRIORITY &&
+ !zone_reclaimable(zone))
continue; /* Let kswapd poll it */
if (IS_ENABLED(CONFIG_COMPACTION)) {
/*
@@ -2089,7 +2363,8 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
* noticeable problem, like transparent huge
* page allocations.
*/
- if (compaction_ready(zone, sc)) {
+ if ((zonelist_zone_idx(z) <= requested_highidx)
+ && compaction_ready(zone, sc)) {
aborted_reclaim = true;
continue;
}
@@ -2112,12 +2387,27 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
shrink_zone(zone, sc);
}
- return aborted_reclaim;
-}
+ /*
+ * Don't shrink slabs when reclaiming memory from over limit cgroups
+ * but do shrink slab at least once when aborting reclaim for
+ * compaction to avoid unevenly scanning file/anon LRU pages over slab
+ * pages.
+ */
+ if (global_reclaim(sc)) {
+ shrink_slab(&shrink, sc->nr_scanned, lru_pages);
+ if (reclaim_state) {
+ sc->nr_reclaimed += reclaim_state->reclaimed_slab;
+ reclaim_state->reclaimed_slab = 0;
+ }
+ }
-static bool zone_reclaimable(struct zone *zone)
-{
- return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
+ /*
+ * Restore to original mask to avoid the impact on the caller if we
+ * promoted it to __GFP_HIGHMEM.
+ */
+ sc->gfp_mask = orig_mask;
+
+ return aborted_reclaim;
}
/* All zones in zonelist are unreclaimable? */
@@ -2133,7 +2423,7 @@ static bool all_unreclaimable(struct zonelist *zonelist,
continue;
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
continue;
- if (!zone->all_unreclaimable)
+ if (zone_reclaimable(zone))
return false;
}
@@ -2157,13 +2447,9 @@ static bool all_unreclaimable(struct zonelist *zonelist,
* else, the number of pages reclaimed
*/
static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
- struct scan_control *sc,
- struct shrink_control *shrink)
+ struct scan_control *sc)
{
unsigned long total_scanned = 0;
- struct reclaim_state *reclaim_state = current->reclaim_state;
- struct zoneref *z;
- struct zone *zone;
unsigned long writeback_threshold;
bool aborted_reclaim;
@@ -2178,26 +2464,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
sc->nr_scanned = 0;
aborted_reclaim = shrink_zones(zonelist, sc);
- /*
- * Don't shrink slabs when reclaiming memory from
- * over limit cgroups
- */
- if (global_reclaim(sc)) {
- unsigned long lru_pages = 0;
- for_each_zone_zonelist(zone, z, zonelist,
- gfp_zone(sc->gfp_mask)) {
- if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
- continue;
-
- lru_pages += zone_reclaimable_pages(zone);
- }
-
- shrink_slab(shrink, sc->nr_scanned, lru_pages);
- if (reclaim_state) {
- sc->nr_reclaimed += reclaim_state->reclaimed_slab;
- reclaim_state->reclaimed_slab = 0;
- }
- }
total_scanned += sc->nr_scanned;
if (sc->nr_reclaimed >= sc->nr_to_reclaim)
goto out;
@@ -2222,18 +2488,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
WB_REASON_TRY_TO_FREE_PAGES);
sc->may_writepage = 1;
}
-
- /* Take a nap, wait for some writeback to complete */
- if (!sc->hibernation_mode && sc->nr_scanned &&
- sc->priority < DEF_PRIORITY - 2) {
- struct zone *preferred_zone;
-
- first_zones_zonelist(zonelist, gfp_zone(sc->gfp_mask),
- &cpuset_current_mems_allowed,
- &preferred_zone);
- wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/10);
- }
- } while (--sc->priority >= 0);
+ } while (--sc->priority >= 0 && !aborted_reclaim);
out:
delayacct_freepages_end();
@@ -2370,9 +2625,6 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
.target_mem_cgroup = NULL,
.nodemask = nodemask,
};
- struct shrink_control shrink = {
- .gfp_mask = sc.gfp_mask,
- };
/*
* Do not enter reclaim if fatal signal was delivered while throttled.
@@ -2386,7 +2638,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
sc.may_writepage,
gfp_mask);
- nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
+ nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
@@ -2453,9 +2705,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
};
- struct shrink_control shrink = {
- .gfp_mask = sc.gfp_mask,
- };
/*
* Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't
@@ -2470,7 +2719,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
sc.may_writepage,
sc.gfp_mask);
- nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
+ nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
@@ -2553,7 +2802,7 @@ static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx)
* DEF_PRIORITY. Effectively, it considers them balanced so
* they must be considered balanced here as well!
*/
- if (zone->all_unreclaimable) {
+ if (!zone_reclaimable(zone)) {
balanced_pages += zone->managed_pages;
continue;
}
@@ -2601,6 +2850,89 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
}
/*
+ * kswapd shrinks the zone by the number of pages required to reach
+ * the high watermark.
+ *
+ * Returns true if kswapd scanned at least the requested number of pages to
+ * reclaim or if the lack of progress was due to pages under writeback.
+ * This is used to determine if the scanning priority needs to be raised.
+ */
+static bool kswapd_shrink_zone(struct zone *zone,
+ int classzone_idx,
+ struct scan_control *sc,
+ unsigned long lru_pages,
+ unsigned long *nr_attempted)
+{
+ int testorder = sc->order;
+ unsigned long balance_gap;
+ struct reclaim_state *reclaim_state = current->reclaim_state;
+ struct shrink_control shrink = {
+ .gfp_mask = sc->gfp_mask,
+ };
+ bool lowmem_pressure;
+
+ /* Reclaim above the high watermark. */
+ sc->nr_to_reclaim = max(SWAP_CLUSTER_MAX, high_wmark_pages(zone));
+
+ /*
+ * Kswapd reclaims only single pages with compaction enabled. Trying
+ * too hard to reclaim until contiguous free pages have become
+ * available can hurt performance by evicting too much useful data
+ * from memory. Do not reclaim more than needed for compaction.
+ */
+ if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
+ compaction_suitable(zone, sc->order) !=
+ COMPACT_SKIPPED)
+ testorder = 0;
+
+ /*
+ * We put equal pressure on every zone, unless one zone has way too
+ * many pages free already. The "too many pages" is defined as the
+ * high wmark plus a "gap" where the gap is either the low
+ * watermark or 1% of the zone, whichever is smaller.
+ */
+ balance_gap = min(low_wmark_pages(zone),
+ (zone->managed_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
+ KSWAPD_ZONE_BALANCE_GAP_RATIO);
+
+ /*
+ * If there is no low memory pressure or the zone is balanced then no
+ * reclaim is necessary
+ */
+ lowmem_pressure = (buffer_heads_over_limit && is_highmem(zone));
+ if (!lowmem_pressure && zone_balanced(zone, testorder,
+ balance_gap, classzone_idx))
+ return true;
+
+ shrink_zone(zone, sc);
+ nodes_clear(shrink.nodes_to_scan);
+ node_set(zone_to_nid(zone), shrink.nodes_to_scan);
+
+ reclaim_state->reclaimed_slab = 0;
+ shrink_slab(&shrink, sc->nr_scanned, lru_pages);
+ sc->nr_reclaimed += reclaim_state->reclaimed_slab;
+
+ /* Account for the number of pages attempted to reclaim */
+ *nr_attempted += sc->nr_to_reclaim;
+
+ zone_clear_flag(zone, ZONE_WRITEBACK);
+
+ /*
+ * If a zone reaches its high watermark, consider it to be no longer
+ * congested. It's possible there are dirty pages backed by congested
+ * BDIs but as pressure is relieved, speculatively avoid congestion
+ * waits.
+ */
+ if (zone_reclaimable(zone) &&
+ zone_balanced(zone, testorder, 0, classzone_idx)) {
+ zone_clear_flag(zone, ZONE_CONGESTED);
+ zone_clear_flag(zone, ZONE_TAIL_LRU_DIRTY);
+ }
+
+ return sc->nr_scanned >= sc->nr_to_reclaim;
+}
+
+/*
* For kswapd, balance_pgdat() will work across all this node's zones until
* they are all at high_wmark_pages(zone).
*
@@ -2624,35 +2956,28 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
int *classzone_idx)
{
- bool pgdat_is_balanced = false;
int i;
int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
- struct reclaim_state *reclaim_state = current->reclaim_state;
unsigned long nr_soft_reclaimed;
unsigned long nr_soft_scanned;
struct scan_control sc = {
.gfp_mask = GFP_KERNEL,
+ .priority = DEF_PRIORITY,
.may_unmap = 1,
.may_swap = 1,
- /*
- * kswapd doesn't want to be bailed out while reclaim. because
- * we want to put equal scanning pressure on each zone.
- */
- .nr_to_reclaim = ULONG_MAX,
+ .may_writepage = !laptop_mode,
.order = order,
.target_mem_cgroup = NULL,
};
- struct shrink_control shrink = {
- .gfp_mask = sc.gfp_mask,
- };
-loop_again:
- sc.priority = DEF_PRIORITY;
- sc.nr_reclaimed = 0;
- sc.may_writepage = !laptop_mode;
count_vm_event(PAGEOUTRUN);
do {
unsigned long lru_pages = 0;
+ unsigned long nr_attempted = 0;
+ bool raise_priority = true;
+ bool pgdat_needs_compaction = (order > 0);
+
+ sc.nr_reclaimed = 0;
/*
* Scan in the highmem->dma direction for the highest
@@ -2664,8 +2989,8 @@ loop_again:
if (!populated_zone(zone))
continue;
- if (zone->all_unreclaimable &&
- sc.priority != DEF_PRIORITY)
+ if (sc.priority != DEF_PRIORITY &&
+ !zone_reclaimable(zone))
continue;
/*
@@ -2689,23 +3014,46 @@ loop_again:
end_zone = i;
break;
} else {
- /* If balanced, clear the congested flag */
+ /*
+ * If balanced, clear the dirty and congested
+ * flags
+ */
zone_clear_flag(zone, ZONE_CONGESTED);
+ zone_clear_flag(zone, ZONE_TAIL_LRU_DIRTY);
}
}
- if (i < 0) {
- pgdat_is_balanced = true;
+ if (i < 0)
goto out;
- }
for (i = 0; i <= end_zone; i++) {
struct zone *zone = pgdat->node_zones + i;
+ if (!populated_zone(zone))
+ continue;
+
lru_pages += zone_reclaimable_pages(zone);
+
+ /*
+ * If any zone is currently balanced then kswapd will
+ * not call compaction as it is expected that the
+ * necessary pages are already available.
+ */
+ if (pgdat_needs_compaction &&
+ zone_watermark_ok(zone, order,
+ low_wmark_pages(zone),
+ *classzone_idx, 0))
+ pgdat_needs_compaction = false;
}
/*
+ * If we're getting trouble reclaiming, start doing writepage
+ * even in laptop mode.
+ */
+ if (sc.priority < DEF_PRIORITY - 2)
+ sc.may_writepage = 1;
+
+ /*
* Now scan the zone in the dma->highmem direction, stopping
* at the last zone which needs scanning.
*
@@ -2716,14 +3064,12 @@ loop_again:
*/
for (i = 0; i <= end_zone; i++) {
struct zone *zone = pgdat->node_zones + i;
- int nr_slab, testorder;
- unsigned long balance_gap;
if (!populated_zone(zone))
continue;
- if (zone->all_unreclaimable &&
- sc.priority != DEF_PRIORITY)
+ if (sc.priority != DEF_PRIORITY &&
+ !zone_reclaimable(zone))
continue;
sc.nr_scanned = 0;
@@ -2738,65 +3084,14 @@ loop_again:
sc.nr_reclaimed += nr_soft_reclaimed;
/*
- * We put equal pressure on every zone, unless
- * one zone has way too many pages free
- * already. The "too many pages" is defined
- * as the high wmark plus a "gap" where the
- * gap is either the low watermark or 1%
- * of the zone, whichever is smaller.
+ * There should be no need to raise the scanning
+ * priority if enough pages are already being scanned
+ * that that high watermark would be met at 100%
+ * efficiency.
*/
- balance_gap = min(low_wmark_pages(zone),
- (zone->managed_pages +
- KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
- KSWAPD_ZONE_BALANCE_GAP_RATIO);
- /*
- * Kswapd reclaims only single pages with compaction
- * enabled. Trying too hard to reclaim until contiguous
- * free pages have become available can hurt performance
- * by evicting too much useful data from memory.
- * Do not reclaim more than needed for compaction.
- */
- testorder = order;
- if (IS_ENABLED(CONFIG_COMPACTION) && order &&
- compaction_suitable(zone, order) !=
- COMPACT_SKIPPED)
- testorder = 0;
-
- if ((buffer_heads_over_limit && is_highmem_idx(i)) ||
- !zone_balanced(zone, testorder,
- balance_gap, end_zone)) {
- shrink_zone(zone, &sc);
-
- reclaim_state->reclaimed_slab = 0;
- nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages);
- sc.nr_reclaimed += reclaim_state->reclaimed_slab;
-
- if (nr_slab == 0 && !zone_reclaimable(zone))
- zone->all_unreclaimable = 1;
- }
-
- /*
- * If we're getting trouble reclaiming, start doing
- * writepage even in laptop mode.
- */
- if (sc.priority < DEF_PRIORITY - 2)
- sc.may_writepage = 1;
-
- if (zone->all_unreclaimable) {
- if (end_zone && end_zone == i)
- end_zone--;
- continue;
- }
-
- if (zone_balanced(zone, testorder, 0, end_zone))
- /*
- * If a zone reaches its high watermark,
- * consider it to be no longer congested. It's
- * possible there are dirty pages backed by
- * congested BDIs but as pressure is relieved,
- * speculatively avoid congestion waits
- */
- zone_clear_flag(zone, ZONE_CONGESTED);
+ if (kswapd_shrink_zone(zone, end_zone, &sc,
+ lru_pages, &nr_attempted))
+ raise_priority = false;
}
/*
@@ -2808,74 +3103,38 @@ loop_again:
pfmemalloc_watermark_ok(pgdat))
wake_up(&pgdat->pfmemalloc_wait);
- if (pgdat_balanced(pgdat, order, *classzone_idx)) {
- pgdat_is_balanced = true;
- break; /* kswapd: all done */
- }
-
/*
- * We do this so kswapd doesn't build up large priorities for
- * example when it is freeing in parallel with allocators. It
- * matches the direct reclaim path behaviour in terms of impact
- * on zone->*_priority.
+ * Fragmentation may mean that the system cannot be rebalanced
+ * for high-order allocations in all zones. If twice the
+ * allocation size has been reclaimed and the zones are still
+ * not balanced then recheck the watermarks at order-0 to
+ * prevent kswapd reclaiming excessively. Assume that a
+ * process requested a high-order can direct reclaim/compact.
*/
- if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX)
- break;
- } while (--sc.priority >= 0);
-
-out:
- if (!pgdat_is_balanced) {
- cond_resched();
+ if (order && sc.nr_reclaimed >= 2UL << order)
+ order = sc.order = 0;
- try_to_freeze();
+ /* Check if kswapd should be suspending */
+ if (try_to_freeze() || kthread_should_stop())
+ break;
/*
- * Fragmentation may mean that the system cannot be
- * rebalanced for high-order allocations in all zones.
- * At this point, if nr_reclaimed < SWAP_CLUSTER_MAX,
- * it means the zones have been fully scanned and are still
- * not balanced. For high-order allocations, there is
- * little point trying all over again as kswapd may
- * infinite loop.
- *
- * Instead, recheck all watermarks at order-0 as they
- * are the most important. If watermarks are ok, kswapd will go
- * back to sleep. High-order users can still perform direct
- * reclaim if they wish.
+ * Compact if necessary and kswapd is reclaiming at least the
+ * high watermark number of pages as requsted
*/
- if (sc.nr_reclaimed < SWAP_CLUSTER_MAX)
- order = sc.order = 0;
-
- goto loop_again;
- }
-
- /*
- * If kswapd was reclaiming at a higher order, it has the option of
- * sleeping without all zones being balanced. Before it does, it must
- * ensure that the watermarks for order-0 on *all* zones are met and
- * that the congestion flags are cleared. The congestion flag must
- * be cleared as kswapd is the only mechanism that clears the flag
- * and it is potentially going to sleep here.
- */
- if (order) {
- int zones_need_compaction = 1;
-
- for (i = 0; i <= end_zone; i++) {
- struct zone *zone = pgdat->node_zones + i;
-
- if (!populated_zone(zone))
- continue;
-
- /* Check if the memory needs to be defragmented. */
- if (zone_watermark_ok(zone, order,
- low_wmark_pages(zone), *classzone_idx, 0))
- zones_need_compaction = 0;
- }
-
- if (zones_need_compaction)
+ if (pgdat_needs_compaction && sc.nr_reclaimed > nr_attempted)
compact_pgdat(pgdat, order);
- }
+ /*
+ * Raise priority if scanning rate is too low or there was no
+ * progress in reclaiming pages
+ */
+ if (raise_priority || !sc.nr_reclaimed)
+ sc.priority--;
+ } while (sc.priority >= 1 &&
+ !pgdat_balanced(pgdat, order, *classzone_idx));
+
+out:
/*
* Return the order we were reclaiming at so prepare_kswapd_sleep()
* makes a decision on the order we were last reclaiming at. However,
@@ -3066,48 +3325,13 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
}
if (!waitqueue_active(&pgdat->kswapd_wait))
return;
- if (zone_watermark_ok_safe(zone, order, low_wmark_pages(zone), 0, 0))
+ if (zone_balanced(zone, order, 0, 0))
return;
trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
wake_up_interruptible(&pgdat->kswapd_wait);
}
-/*
- * The reclaimable count would be mostly accurate.
- * The less reclaimable pages may be
- * - mlocked pages, which will be moved to unevictable list when encountered
- * - mapped pages, which may require several travels to be reclaimed
- * - dirty pages, which is not "instantly" reclaimable
- */
-unsigned long global_reclaimable_pages(void)
-{
- int nr;
-
- nr = global_page_state(NR_ACTIVE_FILE) +
- global_page_state(NR_INACTIVE_FILE);
-
- if (get_nr_swap_pages() > 0)
- nr += global_page_state(NR_ACTIVE_ANON) +
- global_page_state(NR_INACTIVE_ANON);
-
- return nr;
-}
-
-unsigned long zone_reclaimable_pages(struct zone *zone)
-{
- int nr;
-
- nr = zone_page_state(zone, NR_ACTIVE_FILE) +
- zone_page_state(zone, NR_INACTIVE_FILE);
-
- if (get_nr_swap_pages() > 0)
- nr += zone_page_state(zone, NR_ACTIVE_ANON) +
- zone_page_state(zone, NR_INACTIVE_ANON);
-
- return nr;
-}
-
#ifdef CONFIG_HIBERNATION
/*
* Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
@@ -3130,9 +3354,6 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
.order = 0,
.priority = DEF_PRIORITY,
};
- struct shrink_control shrink = {
- .gfp_mask = sc.gfp_mask,
- };
struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
struct task_struct *p = current;
unsigned long nr_reclaimed;
@@ -3142,7 +3363,7 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
reclaim_state.reclaimed_slab = 0;
p->reclaim_state = &reclaim_state;
- nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
+ nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
p->reclaim_state = NULL;
lockdep_clear_current_reclaim_state();
@@ -3353,10 +3574,9 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
* number of slab pages and shake the slab until it is reduced
* by the same nr_pages that we used for reclaiming unmapped
* pages.
- *
- * Note that shrink_slab will free memory on all zones and may
- * take a long time.
*/
+ nodes_clear(shrink.nodes_to_scan);
+ node_set(zone_to_nid(zone), shrink.nodes_to_scan);
for (;;) {
unsigned long lru_pages = zone_reclaimable_pages(zone);
@@ -3405,7 +3625,7 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages)
return ZONE_RECLAIM_FULL;
- if (zone->all_unreclaimable)
+ if (!zone_reclaimable(zone))
return ZONE_RECLAIM_FULL;
/*
@@ -3492,7 +3712,7 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages)
if (page_evictable(page)) {
enum lru_list lru = page_lru_base_type(page);
- VM_BUG_ON(PageActive(page));
+ VM_BUG_ON_PAGE(PageActive(page), page);
ClearPageUnevictable(page);
del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
add_page_to_lru_list(page, lruvec, lru);
diff --git a/mm/vmstat.c b/mm/vmstat.c
index f42745e65780..302dd076b8bf 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -19,6 +19,9 @@
#include <linux/math64.h>
#include <linux/writeback.h>
#include <linux/compaction.h>
+#include <linux/mm_inline.h>
+
+#include "internal.h"
#ifdef CONFIG_VM_EVENT_COUNTERS
DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
@@ -414,12 +417,17 @@ void dec_zone_page_state(struct page *page, enum zone_stat_item item)
EXPORT_SYMBOL(dec_zone_page_state);
#endif
+static inline void fold_diff(int *diff)
+{
+ int i;
+
+ for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
+ if (diff[i])
+ atomic_long_add(diff[i], &vm_stat[i]);
+}
+
/*
- * Update the zone counters for one cpu.
- *
- * The cpu specified must be either the current cpu or a processor that
- * is not online. If it is the current cpu then the execution thread must
- * be pinned to the current cpu.
+ * Update the zone counters for the current cpu.
*
* Note that refresh_cpu_vm_stats strives to only access
* node local memory. The per cpu pagesets on remote zones are placed
@@ -432,33 +440,29 @@ EXPORT_SYMBOL(dec_zone_page_state);
* with the global counters. These could cause remote node cache line
* bouncing and will have to be only done when necessary.
*/
-void refresh_cpu_vm_stats(int cpu)
+static void refresh_cpu_vm_stats(void)
{
struct zone *zone;
int i;
int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
for_each_populated_zone(zone) {
- struct per_cpu_pageset *p;
+ struct per_cpu_pageset __percpu *p = zone->pageset;
- p = per_cpu_ptr(zone->pageset, cpu);
+ for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
+ int v;
- for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
- if (p->vm_stat_diff[i]) {
- unsigned long flags;
- int v;
+ v = this_cpu_xchg(p->vm_stat_diff[i], 0);
+ if (v) {
- local_irq_save(flags);
- v = p->vm_stat_diff[i];
- p->vm_stat_diff[i] = 0;
- local_irq_restore(flags);
atomic_long_add(v, &zone->vm_stat[i]);
global_diff[i] += v;
#ifdef CONFIG_NUMA
/* 3 seconds idle till flush */
- p->expire = 3;
+ __this_cpu_write(p->expire, 3);
#endif
}
+ }
cond_resched();
#ifdef CONFIG_NUMA
/*
@@ -468,29 +472,57 @@ void refresh_cpu_vm_stats(int cpu)
* Check if there are pages remaining in this pageset
* if not then there is nothing to expire.
*/
- if (!p->expire || !p->pcp.count)
+ if (!__this_cpu_read(p->expire) ||
+ !__this_cpu_read(p->pcp.count))
continue;
/*
* We never drain zones local to this processor.
*/
if (zone_to_nid(zone) == numa_node_id()) {
- p->expire = 0;
+ __this_cpu_write(p->expire, 0);
continue;
}
- p->expire--;
- if (p->expire)
+
+ if (__this_cpu_dec_return(p->expire))
continue;
- if (p->pcp.count)
- drain_zone_pages(zone, &p->pcp);
+ if (__this_cpu_read(p->pcp.count))
+ drain_zone_pages(zone, __this_cpu_ptr(&p->pcp));
#endif
}
+ fold_diff(global_diff);
+}
- for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
- if (global_diff[i])
- atomic_long_add(global_diff[i], &vm_stat[i]);
+/*
+ * Fold the data for an offline cpu into the global array.
+ * There cannot be any access by the offline cpu and therefore
+ * synchronization is simplified.
+ */
+void cpu_vm_stats_fold(int cpu)
+{
+ struct zone *zone;
+ int i;
+ int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
+
+ for_each_populated_zone(zone) {
+ struct per_cpu_pageset *p;
+
+ p = per_cpu_ptr(zone->pageset, cpu);
+
+ for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
+ if (p->vm_stat_diff[i]) {
+ int v;
+
+ v = p->vm_stat_diff[i];
+ p->vm_stat_diff[i] = 0;
+ atomic_long_add(v, &zone->vm_stat[i]);
+ global_diff[i] += v;
+ }
+ }
+
+ fold_diff(global_diff);
}
/*
@@ -703,6 +735,7 @@ static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
const char * const vmstat_text[] = {
/* Zoned VM counters */
"nr_free_pages",
+ "nr_alloc_batch",
"nr_inactive_anon",
"nr_active_anon",
"nr_inactive_file",
@@ -737,6 +770,9 @@ const char * const vmstat_text[] = {
"numa_local",
"numa_other",
#endif
+ "workingset_refault",
+ "workingset_activate",
+ "workingset_nodereclaim",
"nr_anon_transparent_hugepages",
"nr_free_cma",
"nr_dirty_threshold",
@@ -777,8 +813,12 @@ const char * const vmstat_text[] = {
"pgrotated",
+ "drop_pagecache",
+ "drop_slab",
+
#ifdef CONFIG_NUMA_BALANCING
"numa_pte_updates",
+ "numa_huge_pte_updates",
"numa_hint_faults",
"numa_hint_faults_local",
"numa_pages_migrated",
@@ -817,6 +857,14 @@ const char * const vmstat_text[] = {
"thp_zero_page_alloc",
"thp_zero_page_alloc_failed",
#endif
+#ifdef CONFIG_DEBUG_TLBFLUSH
+#ifdef CONFIG_SMP
+ "nr_tlb_remote_flush",
+ "nr_tlb_remote_flush_received",
+#endif /* CONFIG_SMP */
+ "nr_tlb_local_flush_all",
+ "nr_tlb_local_flush_one",
+#endif /* CONFIG_DEBUG_TLBFLUSH */
#endif /* CONFIG_VM_EVENTS_COUNTERS */
};
@@ -1052,7 +1100,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
"\n all_unreclaimable: %u"
"\n start_pfn: %lu"
"\n inactive_ratio: %u",
- zone->all_unreclaimable,
+ !zone_reclaimable(zone),
zone->zone_start_pfn,
zone->inactive_ratio);
seq_putc(m, '\n');
@@ -1177,12 +1225,12 @@ int sysctl_stat_interval __read_mostly = HZ;
static void vmstat_update(struct work_struct *w)
{
- refresh_cpu_vm_stats(smp_processor_id());
+ refresh_cpu_vm_stats();
schedule_delayed_work(&__get_cpu_var(vmstat_work),
round_jiffies_relative(sysctl_stat_interval));
}
-static void __cpuinit start_cpu_timer(int cpu)
+static void start_cpu_timer(int cpu)
{
struct delayed_work *work = &per_cpu(vmstat_work, cpu);
@@ -1190,11 +1238,25 @@ static void __cpuinit start_cpu_timer(int cpu)
schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu));
}
+static void vmstat_cpu_dead(int node)
+{
+ int cpu;
+
+ get_online_cpus();
+ for_each_online_cpu(cpu)
+ if (cpu_to_node(cpu) == node)
+ goto end;
+
+ node_clear_state(node, N_CPU);
+end:
+ put_online_cpus();
+}
+
/*
* Use the cpu notifier to insure that the thresholds are recalculated
* when necessary.
*/
-static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
+static int vmstat_cpuup_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
@@ -1219,6 +1281,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
case CPU_DEAD:
case CPU_DEAD_FROZEN:
refresh_zone_stat_thresholds();
+ vmstat_cpu_dead(cpu_to_node(cpu));
break;
default:
break;
@@ -1226,7 +1289,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
-static struct notifier_block __cpuinitdata vmstat_notifier =
+static struct notifier_block vmstat_notifier =
{ &vmstat_cpuup_callback, NULL, 0 };
#endif
@@ -1235,10 +1298,14 @@ static int __init setup_vmstat(void)
#ifdef CONFIG_SMP
int cpu;
- register_cpu_notifier(&vmstat_notifier);
+ cpu_notifier_register_begin();
+ __register_cpu_notifier(&vmstat_notifier);
- for_each_online_cpu(cpu)
+ for_each_online_cpu(cpu) {
start_cpu_timer(cpu);
+ node_set_state(cpu_to_node(cpu), N_CPU);
+ }
+ cpu_notifier_register_done();
#endif
#ifdef CONFIG_PROC_FS
proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
diff --git a/mm/workingset.c b/mm/workingset.c
new file mode 100644
index 000000000000..f7216fa7da27
--- /dev/null
+++ b/mm/workingset.c
@@ -0,0 +1,414 @@
+/*
+ * Workingset detection
+ *
+ * Copyright (C) 2013 Red Hat, Inc., Johannes Weiner
+ */
+
+#include <linux/memcontrol.h>
+#include <linux/writeback.h>
+#include <linux/pagemap.h>
+#include <linux/atomic.h>
+#include <linux/module.h>
+#include <linux/swap.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+
+/*
+ * Double CLOCK lists
+ *
+ * Per zone, two clock lists are maintained for file pages: the
+ * inactive and the active list. Freshly faulted pages start out at
+ * the head of the inactive list and page reclaim scans pages from the
+ * tail. Pages that are accessed multiple times on the inactive list
+ * are promoted to the active list, to protect them from reclaim,
+ * whereas active pages are demoted to the inactive list when the
+ * active list grows too big.
+ *
+ * fault ------------------------+
+ * |
+ * +--------------+ | +-------------+
+ * reclaim <- | inactive | <-+-- demotion | active | <--+
+ * +--------------+ +-------------+ |
+ * | |
+ * +-------------- promotion ------------------+
+ *
+ *
+ * Access frequency and refault distance
+ *
+ * A workload is thrashing when its pages are frequently used but they
+ * are evicted from the inactive list every time before another access
+ * would have promoted them to the active list.
+ *
+ * In cases where the average access distance between thrashing pages
+ * is bigger than the size of memory there is nothing that can be
+ * done - the thrashing set could never fit into memory under any
+ * circumstance.
+ *
+ * However, the average access distance could be bigger than the
+ * inactive list, yet smaller than the size of memory. In this case,
+ * the set could fit into memory if it weren't for the currently
+ * active pages - which may be used more, hopefully less frequently:
+ *
+ * +-memory available to cache-+
+ * | |
+ * +-inactive------+-active----+
+ * a b | c d e f g h i | J K L M N |
+ * +---------------+-----------+
+ *
+ * It is prohibitively expensive to accurately track access frequency
+ * of pages. But a reasonable approximation can be made to measure
+ * thrashing on the inactive list, after which refaulting pages can be
+ * activated optimistically to compete with the existing active pages.
+ *
+ * Approximating inactive page access frequency - Observations:
+ *
+ * 1. When a page is accessed for the first time, it is added to the
+ * head of the inactive list, slides every existing inactive page
+ * towards the tail by one slot, and pushes the current tail page
+ * out of memory.
+ *
+ * 2. When a page is accessed for the second time, it is promoted to
+ * the active list, shrinking the inactive list by one slot. This
+ * also slides all inactive pages that were faulted into the cache
+ * more recently than the activated page towards the tail of the
+ * inactive list.
+ *
+ * Thus:
+ *
+ * 1. The sum of evictions and activations between any two points in
+ * time indicate the minimum number of inactive pages accessed in
+ * between.
+ *
+ * 2. Moving one inactive page N page slots towards the tail of the
+ * list requires at least N inactive page accesses.
+ *
+ * Combining these:
+ *
+ * 1. When a page is finally evicted from memory, the number of
+ * inactive pages accessed while the page was in cache is at least
+ * the number of page slots on the inactive list.
+ *
+ * 2. In addition, measuring the sum of evictions and activations (E)
+ * at the time of a page's eviction, and comparing it to another
+ * reading (R) at the time the page faults back into memory tells
+ * the minimum number of accesses while the page was not cached.
+ * This is called the refault distance.
+ *
+ * Because the first access of the page was the fault and the second
+ * access the refault, we combine the in-cache distance with the
+ * out-of-cache distance to get the complete minimum access distance
+ * of this page:
+ *
+ * NR_inactive + (R - E)
+ *
+ * And knowing the minimum access distance of a page, we can easily
+ * tell if the page would be able to stay in cache assuming all page
+ * slots in the cache were available:
+ *
+ * NR_inactive + (R - E) <= NR_inactive + NR_active
+ *
+ * which can be further simplified to
+ *
+ * (R - E) <= NR_active
+ *
+ * Put into words, the refault distance (out-of-cache) can be seen as
+ * a deficit in inactive list space (in-cache). If the inactive list
+ * had (R - E) more page slots, the page would not have been evicted
+ * in between accesses, but activated instead. And on a full system,
+ * the only thing eating into inactive list space is active pages.
+ *
+ *
+ * Activating refaulting pages
+ *
+ * All that is known about the active list is that the pages have been
+ * accessed more than once in the past. This means that at any given
+ * time there is actually a good chance that pages on the active list
+ * are no longer in active use.
+ *
+ * So when a refault distance of (R - E) is observed and there are at
+ * least (R - E) active pages, the refaulting page is activated
+ * optimistically in the hope that (R - E) active pages are actually
+ * used less frequently than the refaulting page - or even not used at
+ * all anymore.
+ *
+ * If this is wrong and demotion kicks in, the pages which are truly
+ * used more frequently will be reactivated while the less frequently
+ * used once will be evicted from memory.
+ *
+ * But if this is right, the stale pages will be pushed out of memory
+ * and the used pages get to stay in cache.
+ *
+ *
+ * Implementation
+ *
+ * For each zone's file LRU lists, a counter for inactive evictions
+ * and activations is maintained (zone->inactive_age).
+ *
+ * On eviction, a snapshot of this counter (along with some bits to
+ * identify the zone) is stored in the now empty page cache radix tree
+ * slot of the evicted page. This is called a shadow entry.
+ *
+ * On cache misses for which there are shadow entries, an eligible
+ * refault distance will immediately activate the refaulting page.
+ */
+
+static void *pack_shadow(unsigned long eviction, struct zone *zone)
+{
+ eviction = (eviction << NODES_SHIFT) | zone_to_nid(zone);
+ eviction = (eviction << ZONES_SHIFT) | zone_idx(zone);
+ eviction = (eviction << RADIX_TREE_EXCEPTIONAL_SHIFT);
+
+ return (void *)(eviction | RADIX_TREE_EXCEPTIONAL_ENTRY);
+}
+
+static void unpack_shadow(void *shadow,
+ struct zone **zone,
+ unsigned long *distance)
+{
+ unsigned long entry = (unsigned long)shadow;
+ unsigned long eviction;
+ unsigned long refault;
+ unsigned long mask;
+ int zid, nid;
+
+ entry >>= RADIX_TREE_EXCEPTIONAL_SHIFT;
+ zid = entry & ((1UL << ZONES_SHIFT) - 1);
+ entry >>= ZONES_SHIFT;
+ nid = entry & ((1UL << NODES_SHIFT) - 1);
+ entry >>= NODES_SHIFT;
+ eviction = entry;
+
+ *zone = NODE_DATA(nid)->node_zones + zid;
+
+ refault = atomic_long_read(&(*zone)->inactive_age);
+ mask = ~0UL >> (NODES_SHIFT + ZONES_SHIFT +
+ RADIX_TREE_EXCEPTIONAL_SHIFT);
+ /*
+ * The unsigned subtraction here gives an accurate distance
+ * across inactive_age overflows in most cases.
+ *
+ * There is a special case: usually, shadow entries have a
+ * short lifetime and are either refaulted or reclaimed along
+ * with the inode before they get too old. But it is not
+ * impossible for the inactive_age to lap a shadow entry in
+ * the field, which can then can result in a false small
+ * refault distance, leading to a false activation should this
+ * old entry actually refault again. However, earlier kernels
+ * used to deactivate unconditionally with *every* reclaim
+ * invocation for the longest time, so the occasional
+ * inappropriate activation leading to pressure on the active
+ * list is not a problem.
+ */
+ *distance = (refault - eviction) & mask;
+}
+
+/**
+ * workingset_eviction - note the eviction of a page from memory
+ * @mapping: address space the page was backing
+ * @page: the page being evicted
+ *
+ * Returns a shadow entry to be stored in @mapping->page_tree in place
+ * of the evicted @page so that a later refault can be detected.
+ */
+void *workingset_eviction(struct address_space *mapping, struct page *page)
+{
+ struct zone *zone = page_zone(page);
+ unsigned long eviction;
+
+ eviction = atomic_long_inc_return(&zone->inactive_age);
+ return pack_shadow(eviction, zone);
+}
+
+/**
+ * workingset_refault - evaluate the refault of a previously evicted page
+ * @shadow: shadow entry of the evicted page
+ *
+ * Calculates and evaluates the refault distance of the previously
+ * evicted page in the context of the zone it was allocated in.
+ *
+ * Returns %true if the page should be activated, %false otherwise.
+ */
+bool workingset_refault(void *shadow)
+{
+ unsigned long refault_distance;
+ struct zone *zone;
+
+ unpack_shadow(shadow, &zone, &refault_distance);
+ inc_zone_state(zone, WORKINGSET_REFAULT);
+
+ if (refault_distance <= zone_page_state(zone, NR_ACTIVE_FILE)) {
+ inc_zone_state(zone, WORKINGSET_ACTIVATE);
+ return true;
+ }
+ return false;
+}
+
+/**
+ * workingset_activation - note a page activation
+ * @page: page that is being activated
+ */
+void workingset_activation(struct page *page)
+{
+ atomic_long_inc(&page_zone(page)->inactive_age);
+}
+
+/*
+ * Shadow entries reflect the share of the working set that does not
+ * fit into memory, so their number depends on the access pattern of
+ * the workload. In most cases, they will refault or get reclaimed
+ * along with the inode, but a (malicious) workload that streams
+ * through files with a total size several times that of available
+ * memory, while preventing the inodes from being reclaimed, can
+ * create excessive amounts of shadow nodes. To keep a lid on this,
+ * track shadow nodes and reclaim them when they grow way past the
+ * point where they would still be useful.
+ */
+
+struct list_lru workingset_shadow_nodes;
+
+static unsigned long count_shadow_nodes(struct shrinker *shrinker,
+ struct shrink_control *sc)
+{
+ unsigned long shadow_nodes;
+ unsigned long max_nodes;
+ unsigned long pages;
+
+ /* list_lru lock nests inside IRQ-safe mapping->tree_lock */
+ local_irq_disable();
+ shadow_nodes = list_lru_count_node(&workingset_shadow_nodes, sc->nid);
+ local_irq_enable();
+
+ pages = node_present_pages(sc->nid);
+ /*
+ * Active cache pages are limited to 50% of memory, and shadow
+ * entries that represent a refault distance bigger than that
+ * do not have any effect. Limit the number of shadow nodes
+ * such that shadow entries do not exceed the number of active
+ * cache pages, assuming a worst-case node population density
+ * of 1/8th on average.
+ *
+ * On 64-bit with 7 radix_tree_nodes per page and 64 slots
+ * each, this will reclaim shadow entries when they consume
+ * ~2% of available memory:
+ *
+ * PAGE_SIZE / radix_tree_nodes / node_entries / PAGE_SIZE
+ */
+ max_nodes = pages >> (1 + RADIX_TREE_MAP_SHIFT - 3);
+
+ if (shadow_nodes <= max_nodes)
+ return 0;
+
+ return shadow_nodes - max_nodes;
+}
+
+static enum lru_status shadow_lru_isolate(struct list_head *item,
+ spinlock_t *lru_lock,
+ void *arg)
+{
+ struct address_space *mapping;
+ struct radix_tree_node *node;
+ unsigned int i;
+ int ret;
+
+ /*
+ * Page cache insertions and deletions synchroneously maintain
+ * the shadow node LRU under the mapping->tree_lock and the
+ * lru_lock. Because the page cache tree is emptied before
+ * the inode can be destroyed, holding the lru_lock pins any
+ * address_space that has radix tree nodes on the LRU.
+ *
+ * We can then safely transition to the mapping->tree_lock to
+ * pin only the address_space of the particular node we want
+ * to reclaim, take the node off-LRU, and drop the lru_lock.
+ */
+
+ node = container_of(item, struct radix_tree_node, private_list);
+ mapping = node->private_data;
+
+ /* Coming from the list, invert the lock order */
+ if (!spin_trylock(&mapping->tree_lock)) {
+ spin_unlock(lru_lock);
+ ret = LRU_RETRY;
+ goto out;
+ }
+
+ list_del_init(item);
+ spin_unlock(lru_lock);
+
+ /*
+ * The nodes should only contain one or more shadow entries,
+ * no pages, so we expect to be able to remove them all and
+ * delete and free the empty node afterwards.
+ */
+
+ BUG_ON(!node->count);
+ BUG_ON(node->count & RADIX_TREE_COUNT_MASK);
+
+ for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
+ if (node->slots[i]) {
+ BUG_ON(!radix_tree_exceptional_entry(node->slots[i]));
+ node->slots[i] = NULL;
+ BUG_ON(node->count < (1U << RADIX_TREE_COUNT_SHIFT));
+ node->count -= 1U << RADIX_TREE_COUNT_SHIFT;
+ BUG_ON(!mapping->nrshadows);
+ mapping->nrshadows--;
+ }
+ }
+ BUG_ON(node->count);
+ inc_zone_state(page_zone(virt_to_page(node)), WORKINGSET_NODERECLAIM);
+ if (!__radix_tree_delete_node(&mapping->page_tree, node))
+ BUG();
+
+ spin_unlock(&mapping->tree_lock);
+ ret = LRU_REMOVED_RETRY;
+out:
+ local_irq_enable();
+ cond_resched();
+ local_irq_disable();
+ spin_lock(lru_lock);
+ return ret;
+}
+
+static unsigned long scan_shadow_nodes(struct shrinker *shrinker,
+ struct shrink_control *sc)
+{
+ unsigned long ret;
+
+ /* list_lru lock nests inside IRQ-safe mapping->tree_lock */
+ local_irq_disable();
+ ret = list_lru_walk_node(&workingset_shadow_nodes, sc->nid,
+ shadow_lru_isolate, NULL, &sc->nr_to_scan);
+ local_irq_enable();
+ return ret;
+}
+
+static struct shrinker workingset_shadow_shrinker = {
+ .count_objects = count_shadow_nodes,
+ .scan_objects = scan_shadow_nodes,
+ .seeks = DEFAULT_SEEKS,
+ .flags = SHRINKER_NUMA_AWARE,
+};
+
+/*
+ * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe
+ * mapping->tree_lock.
+ */
+static struct lock_class_key shadow_nodes_key;
+
+static int __init workingset_init(void)
+{
+ int ret;
+
+ ret = list_lru_init_key(&workingset_shadow_nodes, &shadow_nodes_key);
+ if (ret)
+ goto err;
+ ret = register_shrinker(&workingset_shadow_shrinker);
+ if (ret)
+ goto err_list_lru;
+ return 0;
+err_list_lru:
+ list_lru_destroy(&workingset_shadow_nodes);
+err:
+ return ret;
+}
+module_init(workingset_init);
diff --git a/mm/zbud.c b/mm/zbud.c
new file mode 100644
index 000000000000..9451361e6aa7
--- /dev/null
+++ b/mm/zbud.c
@@ -0,0 +1,527 @@
+/*
+ * zbud.c
+ *
+ * Copyright (C) 2013, Seth Jennings, IBM
+ *
+ * Concepts based on zcache internal zbud allocator by Dan Magenheimer.
+ *
+ * zbud is an special purpose allocator for storing compressed pages. Contrary
+ * to what its name may suggest, zbud is not a buddy allocator, but rather an
+ * allocator that "buddies" two compressed pages together in a single memory
+ * page.
+ *
+ * While this design limits storage density, it has simple and deterministic
+ * reclaim properties that make it preferable to a higher density approach when
+ * reclaim will be used.
+ *
+ * zbud works by storing compressed pages, or "zpages", together in pairs in a
+ * single memory page called a "zbud page". The first buddy is "left
+ * justified" at the beginning of the zbud page, and the last buddy is "right
+ * justified" at the end of the zbud page. The benefit is that if either
+ * buddy is freed, the freed buddy space, coalesced with whatever slack space
+ * that existed between the buddies, results in the largest possible free region
+ * within the zbud page.
+ *
+ * zbud also provides an attractive lower bound on density. The ratio of zpages
+ * to zbud pages can not be less than 1. This ensures that zbud can never "do
+ * harm" by using more pages to store zpages than the uncompressed zpages would
+ * have used on their own.
+ *
+ * zbud pages are divided into "chunks". The size of the chunks is fixed at
+ * compile time and determined by NCHUNKS_ORDER below. Dividing zbud pages
+ * into chunks allows organizing unbuddied zbud pages into a manageable number
+ * of unbuddied lists according to the number of free chunks available in the
+ * zbud page.
+ *
+ * The zbud API differs from that of conventional allocators in that the
+ * allocation function, zbud_alloc(), returns an opaque handle to the user,
+ * not a dereferenceable pointer. The user must map the handle using
+ * zbud_map() in order to get a usable pointer by which to access the
+ * allocation data and unmap the handle with zbud_unmap() when operations
+ * on the allocation data are complete.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/atomic.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/preempt.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/zbud.h>
+
+/*****************
+ * Structures
+*****************/
+/*
+ * NCHUNKS_ORDER determines the internal allocation granularity, effectively
+ * adjusting internal fragmentation. It also determines the number of
+ * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
+ * allocation granularity will be in chunks of size PAGE_SIZE/64, and there
+ * will be 64 freelists per pool.
+ */
+#define NCHUNKS_ORDER 6
+
+#define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
+#define CHUNK_SIZE (1 << CHUNK_SHIFT)
+#define NCHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
+#define ZHDR_SIZE_ALIGNED CHUNK_SIZE
+
+/**
+ * struct zbud_pool - stores metadata for each zbud pool
+ * @lock: protects all pool fields and first|last_chunk fields of any
+ * zbud page in the pool
+ * @unbuddied: array of lists tracking zbud pages that only contain one buddy;
+ * the lists each zbud page is added to depends on the size of
+ * its free region.
+ * @buddied: list tracking the zbud pages that contain two buddies;
+ * these zbud pages are full
+ * @lru: list tracking the zbud pages in LRU order by most recently
+ * added buddy.
+ * @pages_nr: number of zbud pages in the pool.
+ * @ops: pointer to a structure of user defined operations specified at
+ * pool creation time.
+ *
+ * This structure is allocated at pool creation time and maintains metadata
+ * pertaining to a particular zbud pool.
+ */
+struct zbud_pool {
+ spinlock_t lock;
+ struct list_head unbuddied[NCHUNKS];
+ struct list_head buddied;
+ struct list_head lru;
+ u64 pages_nr;
+ struct zbud_ops *ops;
+};
+
+/*
+ * struct zbud_header - zbud page metadata occupying the first chunk of each
+ * zbud page.
+ * @buddy: links the zbud page into the unbuddied/buddied lists in the pool
+ * @lru: links the zbud page into the lru list in the pool
+ * @first_chunks: the size of the first buddy in chunks, 0 if free
+ * @last_chunks: the size of the last buddy in chunks, 0 if free
+ */
+struct zbud_header {
+ struct list_head buddy;
+ struct list_head lru;
+ unsigned int first_chunks;
+ unsigned int last_chunks;
+ bool under_reclaim;
+};
+
+/*****************
+ * Helpers
+*****************/
+/* Just to make the code easier to read */
+enum buddy {
+ FIRST,
+ LAST
+};
+
+/* Converts an allocation size in bytes to size in zbud chunks */
+static int size_to_chunks(int size)
+{
+ return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
+}
+
+#define for_each_unbuddied_list(_iter, _begin) \
+ for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
+
+/* Initializes the zbud header of a newly allocated zbud page */
+static struct zbud_header *init_zbud_page(struct page *page)
+{
+ struct zbud_header *zhdr = page_address(page);
+ zhdr->first_chunks = 0;
+ zhdr->last_chunks = 0;
+ INIT_LIST_HEAD(&zhdr->buddy);
+ INIT_LIST_HEAD(&zhdr->lru);
+ zhdr->under_reclaim = 0;
+ return zhdr;
+}
+
+/* Resets the struct page fields and frees the page */
+static void free_zbud_page(struct zbud_header *zhdr)
+{
+ __free_page(virt_to_page(zhdr));
+}
+
+/*
+ * Encodes the handle of a particular buddy within a zbud page
+ * Pool lock should be held as this function accesses first|last_chunks
+ */
+static unsigned long encode_handle(struct zbud_header *zhdr, enum buddy bud)
+{
+ unsigned long handle;
+
+ /*
+ * For now, the encoded handle is actually just the pointer to the data
+ * but this might not always be the case. A little information hiding.
+ * Add CHUNK_SIZE to the handle if it is the first allocation to jump
+ * over the zbud header in the first chunk.
+ */
+ handle = (unsigned long)zhdr;
+ if (bud == FIRST)
+ /* skip over zbud header */
+ handle += ZHDR_SIZE_ALIGNED;
+ else /* bud == LAST */
+ handle += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
+ return handle;
+}
+
+/* Returns the zbud page where a given handle is stored */
+static struct zbud_header *handle_to_zbud_header(unsigned long handle)
+{
+ return (struct zbud_header *)(handle & PAGE_MASK);
+}
+
+/* Returns the number of free chunks in a zbud page */
+static int num_free_chunks(struct zbud_header *zhdr)
+{
+ /*
+ * Rather than branch for different situations, just use the fact that
+ * free buddies have a length of zero to simplify everything. -1 at the
+ * end for the zbud header.
+ */
+ return NCHUNKS - zhdr->first_chunks - zhdr->last_chunks - 1;
+}
+
+/*****************
+ * API Functions
+*****************/
+/**
+ * zbud_create_pool() - create a new zbud pool
+ * @gfp: gfp flags when allocating the zbud pool structure
+ * @ops: user-defined operations for the zbud pool
+ *
+ * Return: pointer to the new zbud pool or NULL if the metadata allocation
+ * failed.
+ */
+struct zbud_pool *zbud_create_pool(gfp_t gfp, struct zbud_ops *ops)
+{
+ struct zbud_pool *pool;
+ int i;
+
+ pool = kmalloc(sizeof(struct zbud_pool), gfp);
+ if (!pool)
+ return NULL;
+ spin_lock_init(&pool->lock);
+ for_each_unbuddied_list(i, 0)
+ INIT_LIST_HEAD(&pool->unbuddied[i]);
+ INIT_LIST_HEAD(&pool->buddied);
+ INIT_LIST_HEAD(&pool->lru);
+ pool->pages_nr = 0;
+ pool->ops = ops;
+ return pool;
+}
+
+/**
+ * zbud_destroy_pool() - destroys an existing zbud pool
+ * @pool: the zbud pool to be destroyed
+ *
+ * The pool should be emptied before this function is called.
+ */
+void zbud_destroy_pool(struct zbud_pool *pool)
+{
+ kfree(pool);
+}
+
+/**
+ * zbud_alloc() - allocates a region of a given size
+ * @pool: zbud pool from which to allocate
+ * @size: size in bytes of the desired allocation
+ * @gfp: gfp flags used if the pool needs to grow
+ * @handle: handle of the new allocation
+ *
+ * This function will attempt to find a free region in the pool large enough to
+ * satisfy the allocation request. A search of the unbuddied lists is
+ * performed first. If no suitable free region is found, then a new page is
+ * allocated and added to the pool to satisfy the request.
+ *
+ * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
+ * as zbud pool pages.
+ *
+ * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
+ * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
+ * a new page.
+ */
+int zbud_alloc(struct zbud_pool *pool, int size, gfp_t gfp,
+ unsigned long *handle)
+{
+ int chunks, i, freechunks;
+ struct zbud_header *zhdr = NULL;
+ enum buddy bud;
+ struct page *page;
+
+ if (size <= 0 || gfp & __GFP_HIGHMEM)
+ return -EINVAL;
+ if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
+ return -ENOSPC;
+ chunks = size_to_chunks(size);
+ spin_lock(&pool->lock);
+
+ /* First, try to find an unbuddied zbud page. */
+ zhdr = NULL;
+ for_each_unbuddied_list(i, chunks) {
+ if (!list_empty(&pool->unbuddied[i])) {
+ zhdr = list_first_entry(&pool->unbuddied[i],
+ struct zbud_header, buddy);
+ list_del(&zhdr->buddy);
+ if (zhdr->first_chunks == 0)
+ bud = FIRST;
+ else
+ bud = LAST;
+ goto found;
+ }
+ }
+
+ /* Couldn't find unbuddied zbud page, create new one */
+ spin_unlock(&pool->lock);
+ page = alloc_page(gfp);
+ if (!page)
+ return -ENOMEM;
+ spin_lock(&pool->lock);
+ pool->pages_nr++;
+ zhdr = init_zbud_page(page);
+ bud = FIRST;
+
+found:
+ if (bud == FIRST)
+ zhdr->first_chunks = chunks;
+ else
+ zhdr->last_chunks = chunks;
+
+ if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0) {
+ /* Add to unbuddied list */
+ freechunks = num_free_chunks(zhdr);
+ list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
+ } else {
+ /* Add to buddied list */
+ list_add(&zhdr->buddy, &pool->buddied);
+ }
+
+ /* Add/move zbud page to beginning of LRU */
+ if (!list_empty(&zhdr->lru))
+ list_del(&zhdr->lru);
+ list_add(&zhdr->lru, &pool->lru);
+
+ *handle = encode_handle(zhdr, bud);
+ spin_unlock(&pool->lock);
+
+ return 0;
+}
+
+/**
+ * zbud_free() - frees the allocation associated with the given handle
+ * @pool: pool in which the allocation resided
+ * @handle: handle associated with the allocation returned by zbud_alloc()
+ *
+ * In the case that the zbud page in which the allocation resides is under
+ * reclaim, as indicated by the PG_reclaim flag being set, this function
+ * only sets the first|last_chunks to 0. The page is actually freed
+ * once both buddies are evicted (see zbud_reclaim_page() below).
+ */
+void zbud_free(struct zbud_pool *pool, unsigned long handle)
+{
+ struct zbud_header *zhdr;
+ int freechunks;
+
+ spin_lock(&pool->lock);
+ zhdr = handle_to_zbud_header(handle);
+
+ /* If first buddy, handle will be page aligned */
+ if ((handle - ZHDR_SIZE_ALIGNED) & ~PAGE_MASK)
+ zhdr->last_chunks = 0;
+ else
+ zhdr->first_chunks = 0;
+
+ if (zhdr->under_reclaim) {
+ /* zbud page is under reclaim, reclaim will free */
+ spin_unlock(&pool->lock);
+ return;
+ }
+
+ /* Remove from existing buddy list */
+ list_del(&zhdr->buddy);
+
+ if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
+ /* zbud page is empty, free */
+ list_del(&zhdr->lru);
+ free_zbud_page(zhdr);
+ pool->pages_nr--;
+ } else {
+ /* Add to unbuddied list */
+ freechunks = num_free_chunks(zhdr);
+ list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
+ }
+
+ spin_unlock(&pool->lock);
+}
+
+#define list_tail_entry(ptr, type, member) \
+ list_entry((ptr)->prev, type, member)
+
+/**
+ * zbud_reclaim_page() - evicts allocations from a pool page and frees it
+ * @pool: pool from which a page will attempt to be evicted
+ * @retires: number of pages on the LRU list for which eviction will
+ * be attempted before failing
+ *
+ * zbud reclaim is different from normal system reclaim in that the reclaim is
+ * done from the bottom, up. This is because only the bottom layer, zbud, has
+ * information on how the allocations are organized within each zbud page. This
+ * has the potential to create interesting locking situations between zbud and
+ * the user, however.
+ *
+ * To avoid these, this is how zbud_reclaim_page() should be called:
+
+ * The user detects a page should be reclaimed and calls zbud_reclaim_page().
+ * zbud_reclaim_page() will remove a zbud page from the pool LRU list and call
+ * the user-defined eviction handler with the pool and handle as arguments.
+ *
+ * If the handle can not be evicted, the eviction handler should return
+ * non-zero. zbud_reclaim_page() will add the zbud page back to the
+ * appropriate list and try the next zbud page on the LRU up to
+ * a user defined number of retries.
+ *
+ * If the handle is successfully evicted, the eviction handler should
+ * return 0 _and_ should have called zbud_free() on the handle. zbud_free()
+ * contains logic to delay freeing the page if the page is under reclaim,
+ * as indicated by the setting of the PG_reclaim flag on the underlying page.
+ *
+ * If all buddies in the zbud page are successfully evicted, then the
+ * zbud page can be freed.
+ *
+ * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
+ * no pages to evict or an eviction handler is not registered, -EAGAIN if
+ * the retry limit was hit.
+ */
+int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries)
+{
+ int i, ret, freechunks;
+ struct zbud_header *zhdr;
+ unsigned long first_handle = 0, last_handle = 0;
+
+ spin_lock(&pool->lock);
+ if (!pool->ops || !pool->ops->evict || list_empty(&pool->lru) ||
+ retries == 0) {
+ spin_unlock(&pool->lock);
+ return -EINVAL;
+ }
+ for (i = 0; i < retries; i++) {
+ zhdr = list_tail_entry(&pool->lru, struct zbud_header, lru);
+ list_del(&zhdr->lru);
+ list_del(&zhdr->buddy);
+ /* Protect zbud page against free */
+ zhdr->under_reclaim = true;
+ /*
+ * We need encode the handles before unlocking, since we can
+ * race with free that will set (first|last)_chunks to 0
+ */
+ first_handle = 0;
+ last_handle = 0;
+ if (zhdr->first_chunks)
+ first_handle = encode_handle(zhdr, FIRST);
+ if (zhdr->last_chunks)
+ last_handle = encode_handle(zhdr, LAST);
+ spin_unlock(&pool->lock);
+
+ /* Issue the eviction callback(s) */
+ if (first_handle) {
+ ret = pool->ops->evict(pool, first_handle);
+ if (ret)
+ goto next;
+ }
+ if (last_handle) {
+ ret = pool->ops->evict(pool, last_handle);
+ if (ret)
+ goto next;
+ }
+next:
+ spin_lock(&pool->lock);
+ zhdr->under_reclaim = false;
+ if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
+ /*
+ * Both buddies are now free, free the zbud page and
+ * return success.
+ */
+ free_zbud_page(zhdr);
+ pool->pages_nr--;
+ spin_unlock(&pool->lock);
+ return 0;
+ } else if (zhdr->first_chunks == 0 ||
+ zhdr->last_chunks == 0) {
+ /* add to unbuddied list */
+ freechunks = num_free_chunks(zhdr);
+ list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
+ } else {
+ /* add to buddied list */
+ list_add(&zhdr->buddy, &pool->buddied);
+ }
+
+ /* add to beginning of LRU */
+ list_add(&zhdr->lru, &pool->lru);
+ }
+ spin_unlock(&pool->lock);
+ return -EAGAIN;
+}
+
+/**
+ * zbud_map() - maps the allocation associated with the given handle
+ * @pool: pool in which the allocation resides
+ * @handle: handle associated with the allocation to be mapped
+ *
+ * While trivial for zbud, the mapping functions for others allocators
+ * implementing this allocation API could have more complex information encoded
+ * in the handle and could create temporary mappings to make the data
+ * accessible to the user.
+ *
+ * Returns: a pointer to the mapped allocation
+ */
+void *zbud_map(struct zbud_pool *pool, unsigned long handle)
+{
+ return (void *)(handle);
+}
+
+/**
+ * zbud_unmap() - maps the allocation associated with the given handle
+ * @pool: pool in which the allocation resides
+ * @handle: handle associated with the allocation to be unmapped
+ */
+void zbud_unmap(struct zbud_pool *pool, unsigned long handle)
+{
+}
+
+/**
+ * zbud_get_pool_size() - gets the zbud pool size in pages
+ * @pool: pool whose size is being queried
+ *
+ * Returns: size in pages of the given pool. The pool lock need not be
+ * taken to access pages_nr.
+ */
+u64 zbud_get_pool_size(struct zbud_pool *pool)
+{
+ return pool->pages_nr;
+}
+
+static int __init init_zbud(void)
+{
+ /* Make sure the zbud header will fit in one chunk */
+ BUILD_BUG_ON(sizeof(struct zbud_header) > ZHDR_SIZE_ALIGNED);
+ pr_info("loaded\n");
+ return 0;
+}
+
+static void __exit exit_zbud(void)
+{
+ pr_info("unloaded\n");
+}
+
+module_init(init_zbud);
+module_exit(exit_zbud);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Seth Jennings <sjenning@linux.vnet.ibm.com>");
+MODULE_DESCRIPTION("Buddy Allocator for Compressed Pages");
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
new file mode 100644
index 000000000000..36b4591a7a2d
--- /dev/null
+++ b/mm/zsmalloc.c
@@ -0,0 +1,1117 @@
+/*
+ * zsmalloc memory allocator
+ *
+ * Copyright (C) 2011 Nitin Gupta
+ * Copyright (C) 2012, 2013 Minchan Kim
+ *
+ * This code is released using a dual license strategy: BSD/GPL
+ * You can choose the license that better fits your requirements.
+ *
+ * Released under the terms of 3-clause BSD License
+ * Released under the terms of GNU General Public License Version 2.0
+ */
+
+/*
+ * This allocator is designed for use with zram. Thus, the allocator is
+ * supposed to work well under low memory conditions. In particular, it
+ * never attempts higher order page allocation which is very likely to
+ * fail under memory pressure. On the other hand, if we just use single
+ * (0-order) pages, it would suffer from very high fragmentation --
+ * any object of size PAGE_SIZE/2 or larger would occupy an entire page.
+ * This was one of the major issues with its predecessor (xvmalloc).
+ *
+ * To overcome these issues, zsmalloc allocates a bunch of 0-order pages
+ * and links them together using various 'struct page' fields. These linked
+ * pages act as a single higher-order page i.e. an object can span 0-order
+ * page boundaries. The code refers to these linked pages as a single entity
+ * called zspage.
+ *
+ * For simplicity, zsmalloc can only allocate objects of size up to PAGE_SIZE
+ * since this satisfies the requirements of all its current users (in the
+ * worst case, page is incompressible and is thus stored "as-is" i.e. in
+ * uncompressed form). For allocation requests larger than this size, failure
+ * is returned (see zs_malloc).
+ *
+ * Additionally, zs_malloc() does not return a dereferenceable pointer.
+ * Instead, it returns an opaque handle (unsigned long) which encodes actual
+ * location of the allocated object. The reason for this indirection is that
+ * zsmalloc does not keep zspages permanently mapped since that would cause
+ * issues on 32-bit systems where the VA region for kernel space mappings
+ * is very small. So, before using the allocating memory, the object has to
+ * be mapped using zs_map_object() to get a usable pointer and subsequently
+ * unmapped using zs_unmap_object().
+ *
+ * Following is how we use various fields and flags of underlying
+ * struct page(s) to form a zspage.
+ *
+ * Usage of struct page fields:
+ * page->first_page: points to the first component (0-order) page
+ * page->index (union with page->freelist): offset of the first object
+ * starting in this page. For the first page, this is
+ * always 0, so we use this field (aka freelist) to point
+ * to the first free object in zspage.
+ * page->lru: links together all component pages (except the first page)
+ * of a zspage
+ *
+ * For _first_ page only:
+ *
+ * page->private (union with page->first_page): refers to the
+ * component page after the first page
+ * page->freelist: points to the first free object in zspage.
+ * Free objects are linked together using in-place
+ * metadata.
+ * page->objects: maximum number of objects we can store in this
+ * zspage (class->zspage_order * PAGE_SIZE / class->size)
+ * page->lru: links together first pages of various zspages.
+ * Basically forming list of zspages in a fullness group.
+ * page->mapping: class index and fullness group of the zspage
+ *
+ * Usage of struct page flags:
+ * PG_private: identifies the first component page
+ * PG_private2: identifies the last component page
+ *
+ */
+
+#ifdef CONFIG_ZSMALLOC_DEBUG
+#define DEBUG
+#endif
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/highmem.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <asm/tlbflush.h>
+#include <asm/pgtable.h>
+#include <linux/cpumask.h>
+#include <linux/cpu.h>
+#include <linux/vmalloc.h>
+#include <linux/hardirq.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/zsmalloc.h>
+
+/*
+ * This must be power of 2 and greater than of equal to sizeof(link_free).
+ * These two conditions ensure that any 'struct link_free' itself doesn't
+ * span more than 1 page which avoids complex case of mapping 2 pages simply
+ * to restore link_free pointer values.
+ */
+#define ZS_ALIGN 8
+
+/*
+ * A single 'zspage' is composed of up to 2^N discontiguous 0-order (single)
+ * pages. ZS_MAX_ZSPAGE_ORDER defines upper limit on N.
+ */
+#define ZS_MAX_ZSPAGE_ORDER 2
+#define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER)
+
+/*
+ * Object location (<PFN>, <obj_idx>) is encoded as
+ * as single (unsigned long) handle value.
+ *
+ * Note that object index <obj_idx> is relative to system
+ * page <PFN> it is stored in, so for each sub-page belonging
+ * to a zspage, obj_idx starts with 0.
+ *
+ * This is made more complicated by various memory models and PAE.
+ */
+
+#ifndef MAX_PHYSMEM_BITS
+#ifdef CONFIG_HIGHMEM64G
+#define MAX_PHYSMEM_BITS 36
+#else /* !CONFIG_HIGHMEM64G */
+/*
+ * If this definition of MAX_PHYSMEM_BITS is used, OBJ_INDEX_BITS will just
+ * be PAGE_SHIFT
+ */
+#define MAX_PHYSMEM_BITS BITS_PER_LONG
+#endif
+#endif
+#define _PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT)
+#define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS)
+#define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1)
+
+#define MAX(a, b) ((a) >= (b) ? (a) : (b))
+/* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */
+#define ZS_MIN_ALLOC_SIZE \
+ MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
+#define ZS_MAX_ALLOC_SIZE PAGE_SIZE
+
+/*
+ * On systems with 4K page size, this gives 254 size classes! There is a
+ * trader-off here:
+ * - Large number of size classes is potentially wasteful as free page are
+ * spread across these classes
+ * - Small number of size classes causes large internal fragmentation
+ * - Probably its better to use specific size classes (empirically
+ * determined). NOTE: all those class sizes must be set as multiple of
+ * ZS_ALIGN to make sure link_free itself never has to span 2 pages.
+ *
+ * ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN
+ * (reason above)
+ */
+#define ZS_SIZE_CLASS_DELTA (PAGE_SIZE >> 8)
+#define ZS_SIZE_CLASSES ((ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE) / \
+ ZS_SIZE_CLASS_DELTA + 1)
+
+/*
+ * We do not maintain any list for completely empty or full pages
+ */
+enum fullness_group {
+ ZS_ALMOST_FULL,
+ ZS_ALMOST_EMPTY,
+ _ZS_NR_FULLNESS_GROUPS,
+
+ ZS_EMPTY,
+ ZS_FULL
+};
+
+/*
+ * We assign a page to ZS_ALMOST_EMPTY fullness group when:
+ * n <= N / f, where
+ * n = number of allocated objects
+ * N = total number of objects zspage can store
+ * f = 1/fullness_threshold_frac
+ *
+ * Similarly, we assign zspage to:
+ * ZS_ALMOST_FULL when n > N / f
+ * ZS_EMPTY when n == 0
+ * ZS_FULL when n == N
+ *
+ * (see: fix_fullness_group())
+ */
+static const int fullness_threshold_frac = 4;
+
+struct size_class {
+ /*
+ * Size of objects stored in this class. Must be multiple
+ * of ZS_ALIGN.
+ */
+ int size;
+ unsigned int index;
+
+ /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
+ int pages_per_zspage;
+
+ spinlock_t lock;
+
+ /* stats */
+ u64 pages_allocated;
+
+ struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS];
+};
+
+/*
+ * Placed within free objects to form a singly linked list.
+ * For every zspage, first_page->freelist gives head of this list.
+ *
+ * This must be power of 2 and less than or equal to ZS_ALIGN
+ */
+struct link_free {
+ /* Handle of next free chunk (encodes <PFN, obj_idx>) */
+ void *next;
+};
+
+struct zs_pool {
+ struct size_class size_class[ZS_SIZE_CLASSES];
+
+ gfp_t flags; /* allocation flags used when growing pool */
+};
+
+/*
+ * A zspage's class index and fullness group
+ * are encoded in its (first)page->mapping
+ */
+#define CLASS_IDX_BITS 28
+#define FULLNESS_BITS 4
+#define CLASS_IDX_MASK ((1 << CLASS_IDX_BITS) - 1)
+#define FULLNESS_MASK ((1 << FULLNESS_BITS) - 1)
+
+struct mapping_area {
+#ifdef CONFIG_PGTABLE_MAPPING
+ struct vm_struct *vm; /* vm area for mapping object that span pages */
+#else
+ char *vm_buf; /* copy buffer for objects that span pages */
+#endif
+ char *vm_addr; /* address of kmap_atomic()'ed pages */
+ enum zs_mapmode vm_mm; /* mapping mode */
+};
+
+
+/* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
+static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
+
+static int is_first_page(struct page *page)
+{
+ return PagePrivate(page);
+}
+
+static int is_last_page(struct page *page)
+{
+ return PagePrivate2(page);
+}
+
+static void get_zspage_mapping(struct page *page, unsigned int *class_idx,
+ enum fullness_group *fullness)
+{
+ unsigned long m;
+ BUG_ON(!is_first_page(page));
+
+ m = (unsigned long)page->mapping;
+ *fullness = m & FULLNESS_MASK;
+ *class_idx = (m >> FULLNESS_BITS) & CLASS_IDX_MASK;
+}
+
+static void set_zspage_mapping(struct page *page, unsigned int class_idx,
+ enum fullness_group fullness)
+{
+ unsigned long m;
+ BUG_ON(!is_first_page(page));
+
+ m = ((class_idx & CLASS_IDX_MASK) << FULLNESS_BITS) |
+ (fullness & FULLNESS_MASK);
+ page->mapping = (struct address_space *)m;
+}
+
+/*
+ * zsmalloc divides the pool into various size classes where each
+ * class maintains a list of zspages where each zspage is divided
+ * into equal sized chunks. Each allocation falls into one of these
+ * classes depending on its size. This function returns index of the
+ * size class which has chunk size big enough to hold the give size.
+ */
+static int get_size_class_index(int size)
+{
+ int idx = 0;
+
+ if (likely(size > ZS_MIN_ALLOC_SIZE))
+ idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE,
+ ZS_SIZE_CLASS_DELTA);
+
+ return idx;
+}
+
+/*
+ * For each size class, zspages are divided into different groups
+ * depending on how "full" they are. This was done so that we could
+ * easily find empty or nearly empty zspages when we try to shrink
+ * the pool (not yet implemented). This function returns fullness
+ * status of the given page.
+ */
+static enum fullness_group get_fullness_group(struct page *page)
+{
+ int inuse, max_objects;
+ enum fullness_group fg;
+ BUG_ON(!is_first_page(page));
+
+ inuse = page->inuse;
+ max_objects = page->objects;
+
+ if (inuse == 0)
+ fg = ZS_EMPTY;
+ else if (inuse == max_objects)
+ fg = ZS_FULL;
+ else if (inuse <= max_objects / fullness_threshold_frac)
+ fg = ZS_ALMOST_EMPTY;
+ else
+ fg = ZS_ALMOST_FULL;
+
+ return fg;
+}
+
+/*
+ * Each size class maintains various freelists and zspages are assigned
+ * to one of these freelists based on the number of live objects they
+ * have. This functions inserts the given zspage into the freelist
+ * identified by <class, fullness_group>.
+ */
+static void insert_zspage(struct page *page, struct size_class *class,
+ enum fullness_group fullness)
+{
+ struct page **head;
+
+ BUG_ON(!is_first_page(page));
+
+ if (fullness >= _ZS_NR_FULLNESS_GROUPS)
+ return;
+
+ head = &class->fullness_list[fullness];
+ if (*head)
+ list_add_tail(&page->lru, &(*head)->lru);
+
+ *head = page;
+}
+
+/*
+ * This function removes the given zspage from the freelist identified
+ * by <class, fullness_group>.
+ */
+static void remove_zspage(struct page *page, struct size_class *class,
+ enum fullness_group fullness)
+{
+ struct page **head;
+
+ BUG_ON(!is_first_page(page));
+
+ if (fullness >= _ZS_NR_FULLNESS_GROUPS)
+ return;
+
+ head = &class->fullness_list[fullness];
+ BUG_ON(!*head);
+ if (list_empty(&(*head)->lru))
+ *head = NULL;
+ else if (*head == page)
+ *head = (struct page *)list_entry((*head)->lru.next,
+ struct page, lru);
+
+ list_del_init(&page->lru);
+}
+
+/*
+ * Each size class maintains zspages in different fullness groups depending
+ * on the number of live objects they contain. When allocating or freeing
+ * objects, the fullness status of the page can change, say, from ALMOST_FULL
+ * to ALMOST_EMPTY when freeing an object. This function checks if such
+ * a status change has occurred for the given page and accordingly moves the
+ * page from the freelist of the old fullness group to that of the new
+ * fullness group.
+ */
+static enum fullness_group fix_fullness_group(struct zs_pool *pool,
+ struct page *page)
+{
+ int class_idx;
+ struct size_class *class;
+ enum fullness_group currfg, newfg;
+
+ BUG_ON(!is_first_page(page));
+
+ get_zspage_mapping(page, &class_idx, &currfg);
+ newfg = get_fullness_group(page);
+ if (newfg == currfg)
+ goto out;
+
+ class = &pool->size_class[class_idx];
+ remove_zspage(page, class, currfg);
+ insert_zspage(page, class, newfg);
+ set_zspage_mapping(page, class_idx, newfg);
+
+out:
+ return newfg;
+}
+
+/*
+ * We have to decide on how many pages to link together
+ * to form a zspage for each size class. This is important
+ * to reduce wastage due to unusable space left at end of
+ * each zspage which is given as:
+ * wastage = Zp - Zp % size_class
+ * where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ...
+ *
+ * For example, for size class of 3/8 * PAGE_SIZE, we should
+ * link together 3 PAGE_SIZE sized pages to form a zspage
+ * since then we can perfectly fit in 8 such objects.
+ */
+static int get_pages_per_zspage(int class_size)
+{
+ int i, max_usedpc = 0;
+ /* zspage order which gives maximum used size per KB */
+ int max_usedpc_order = 1;
+
+ for (i = 1; i <= ZS_MAX_PAGES_PER_ZSPAGE; i++) {
+ int zspage_size;
+ int waste, usedpc;
+
+ zspage_size = i * PAGE_SIZE;
+ waste = zspage_size % class_size;
+ usedpc = (zspage_size - waste) * 100 / zspage_size;
+
+ if (usedpc > max_usedpc) {
+ max_usedpc = usedpc;
+ max_usedpc_order = i;
+ }
+ }
+
+ return max_usedpc_order;
+}
+
+/*
+ * A single 'zspage' is composed of many system pages which are
+ * linked together using fields in struct page. This function finds
+ * the first/head page, given any component page of a zspage.
+ */
+static struct page *get_first_page(struct page *page)
+{
+ if (is_first_page(page))
+ return page;
+ else
+ return page->first_page;
+}
+
+static struct page *get_next_page(struct page *page)
+{
+ struct page *next;
+
+ if (is_last_page(page))
+ next = NULL;
+ else if (is_first_page(page))
+ next = (struct page *)page_private(page);
+ else
+ next = list_entry(page->lru.next, struct page, lru);
+
+ return next;
+}
+
+/*
+ * Encode <page, obj_idx> as a single handle value.
+ * On hardware platforms with physical memory starting at 0x0 the pfn
+ * could be 0 so we ensure that the handle will never be 0 by adjusting the
+ * encoded obj_idx value before encoding.
+ */
+static void *obj_location_to_handle(struct page *page, unsigned long obj_idx)
+{
+ unsigned long handle;
+
+ if (!page) {
+ BUG_ON(obj_idx);
+ return NULL;
+ }
+
+ handle = page_to_pfn(page) << OBJ_INDEX_BITS;
+ handle |= ((obj_idx + 1) & OBJ_INDEX_MASK);
+
+ return (void *)handle;
+}
+
+/*
+ * Decode <page, obj_idx> pair from the given object handle. We adjust the
+ * decoded obj_idx back to its original value since it was adjusted in
+ * obj_location_to_handle().
+ */
+static void obj_handle_to_location(unsigned long handle, struct page **page,
+ unsigned long *obj_idx)
+{
+ *page = pfn_to_page(handle >> OBJ_INDEX_BITS);
+ *obj_idx = (handle & OBJ_INDEX_MASK) - 1;
+}
+
+static unsigned long obj_idx_to_offset(struct page *page,
+ unsigned long obj_idx, int class_size)
+{
+ unsigned long off = 0;
+
+ if (!is_first_page(page))
+ off = page->index;
+
+ return off + obj_idx * class_size;
+}
+
+static void reset_page(struct page *page)
+{
+ clear_bit(PG_private, &page->flags);
+ clear_bit(PG_private_2, &page->flags);
+ set_page_private(page, 0);
+ page->mapping = NULL;
+ page->freelist = NULL;
+ page_mapcount_reset(page);
+}
+
+static void free_zspage(struct page *first_page)
+{
+ struct page *nextp, *tmp, *head_extra;
+
+ BUG_ON(!is_first_page(first_page));
+ BUG_ON(first_page->inuse);
+
+ head_extra = (struct page *)page_private(first_page);
+
+ reset_page(first_page);
+ __free_page(first_page);
+
+ /* zspage with only 1 system page */
+ if (!head_extra)
+ return;
+
+ list_for_each_entry_safe(nextp, tmp, &head_extra->lru, lru) {
+ list_del(&nextp->lru);
+ reset_page(nextp);
+ __free_page(nextp);
+ }
+ reset_page(head_extra);
+ __free_page(head_extra);
+}
+
+/* Initialize a newly allocated zspage */
+static void init_zspage(struct page *first_page, struct size_class *class)
+{
+ unsigned long off = 0;
+ struct page *page = first_page;
+
+ BUG_ON(!is_first_page(first_page));
+ while (page) {
+ struct page *next_page;
+ struct link_free *link;
+ unsigned int i, objs_on_page;
+
+ /*
+ * page->index stores offset of first object starting
+ * in the page. For the first page, this is always 0,
+ * so we use first_page->index (aka ->freelist) to store
+ * head of corresponding zspage's freelist.
+ */
+ if (page != first_page)
+ page->index = off;
+
+ link = (struct link_free *)kmap_atomic(page) +
+ off / sizeof(*link);
+ objs_on_page = (PAGE_SIZE - off) / class->size;
+
+ for (i = 1; i <= objs_on_page; i++) {
+ off += class->size;
+ if (off < PAGE_SIZE) {
+ link->next = obj_location_to_handle(page, i);
+ link += class->size / sizeof(*link);
+ }
+ }
+
+ /*
+ * We now come to the last (full or partial) object on this
+ * page, which must point to the first object on the next
+ * page (if present)
+ */
+ next_page = get_next_page(page);
+ link->next = obj_location_to_handle(next_page, 0);
+ kunmap_atomic(link);
+ page = next_page;
+ off = (off + class->size) % PAGE_SIZE;
+ }
+}
+
+/*
+ * Allocate a zspage for the given size class
+ */
+static struct page *alloc_zspage(struct size_class *class, gfp_t flags)
+{
+ int i, error;
+ struct page *first_page = NULL, *uninitialized_var(prev_page);
+
+ /*
+ * Allocate individual pages and link them together as:
+ * 1. first page->private = first sub-page
+ * 2. all sub-pages are linked together using page->lru
+ * 3. each sub-page is linked to the first page using page->first_page
+ *
+ * For each size class, First/Head pages are linked together using
+ * page->lru. Also, we set PG_private to identify the first page
+ * (i.e. no other sub-page has this flag set) and PG_private_2 to
+ * identify the last page.
+ */
+ error = -ENOMEM;
+ for (i = 0; i < class->pages_per_zspage; i++) {
+ struct page *page;
+
+ page = alloc_page(flags);
+ if (!page)
+ goto cleanup;
+
+ INIT_LIST_HEAD(&page->lru);
+ if (i == 0) { /* first page */
+ SetPagePrivate(page);
+ set_page_private(page, 0);
+ first_page = page;
+ first_page->inuse = 0;
+ }
+ if (i == 1)
+ set_page_private(first_page, (unsigned long)page);
+ if (i >= 1)
+ page->first_page = first_page;
+ if (i >= 2)
+ list_add(&page->lru, &prev_page->lru);
+ if (i == class->pages_per_zspage - 1) /* last page */
+ SetPagePrivate2(page);
+ prev_page = page;
+ }
+
+ init_zspage(first_page, class);
+
+ first_page->freelist = obj_location_to_handle(first_page, 0);
+ /* Maximum number of objects we can store in this zspage */
+ first_page->objects = class->pages_per_zspage * PAGE_SIZE / class->size;
+
+ error = 0; /* Success */
+
+cleanup:
+ if (unlikely(error) && first_page) {
+ free_zspage(first_page);
+ first_page = NULL;
+ }
+
+ return first_page;
+}
+
+static struct page *find_get_zspage(struct size_class *class)
+{
+ int i;
+ struct page *page;
+
+ for (i = 0; i < _ZS_NR_FULLNESS_GROUPS; i++) {
+ page = class->fullness_list[i];
+ if (page)
+ break;
+ }
+
+ return page;
+}
+
+#ifdef CONFIG_PGTABLE_MAPPING
+static inline int __zs_cpu_up(struct mapping_area *area)
+{
+ /*
+ * Make sure we don't leak memory if a cpu UP notification
+ * and zs_init() race and both call zs_cpu_up() on the same cpu
+ */
+ if (area->vm)
+ return 0;
+ area->vm = alloc_vm_area(PAGE_SIZE * 2, NULL);
+ if (!area->vm)
+ return -ENOMEM;
+ return 0;
+}
+
+static inline void __zs_cpu_down(struct mapping_area *area)
+{
+ if (area->vm)
+ free_vm_area(area->vm);
+ area->vm = NULL;
+}
+
+static inline void *__zs_map_object(struct mapping_area *area,
+ struct page *pages[2], int off, int size)
+{
+ BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, &pages));
+ area->vm_addr = area->vm->addr;
+ return area->vm_addr + off;
+}
+
+static inline void __zs_unmap_object(struct mapping_area *area,
+ struct page *pages[2], int off, int size)
+{
+ unsigned long addr = (unsigned long)area->vm_addr;
+
+ unmap_kernel_range(addr, PAGE_SIZE * 2);
+}
+
+#else /* CONFIG_PGTABLE_MAPPING */
+
+static inline int __zs_cpu_up(struct mapping_area *area)
+{
+ /*
+ * Make sure we don't leak memory if a cpu UP notification
+ * and zs_init() race and both call zs_cpu_up() on the same cpu
+ */
+ if (area->vm_buf)
+ return 0;
+ area->vm_buf = (char *)__get_free_page(GFP_KERNEL);
+ if (!area->vm_buf)
+ return -ENOMEM;
+ return 0;
+}
+
+static inline void __zs_cpu_down(struct mapping_area *area)
+{
+ if (area->vm_buf)
+ free_page((unsigned long)area->vm_buf);
+ area->vm_buf = NULL;
+}
+
+static void *__zs_map_object(struct mapping_area *area,
+ struct page *pages[2], int off, int size)
+{
+ int sizes[2];
+ void *addr;
+ char *buf = area->vm_buf;
+
+ /* disable page faults to match kmap_atomic() return conditions */
+ pagefault_disable();
+
+ /* no read fastpath */
+ if (area->vm_mm == ZS_MM_WO)
+ goto out;
+
+ sizes[0] = PAGE_SIZE - off;
+ sizes[1] = size - sizes[0];
+
+ /* copy object to per-cpu buffer */
+ addr = kmap_atomic(pages[0]);
+ memcpy(buf, addr + off, sizes[0]);
+ kunmap_atomic(addr);
+ addr = kmap_atomic(pages[1]);
+ memcpy(buf + sizes[0], addr, sizes[1]);
+ kunmap_atomic(addr);
+out:
+ return area->vm_buf;
+}
+
+static void __zs_unmap_object(struct mapping_area *area,
+ struct page *pages[2], int off, int size)
+{
+ int sizes[2];
+ void *addr;
+ char *buf = area->vm_buf;
+
+ /* no write fastpath */
+ if (area->vm_mm == ZS_MM_RO)
+ goto out;
+
+ sizes[0] = PAGE_SIZE - off;
+ sizes[1] = size - sizes[0];
+
+ /* copy per-cpu buffer to object */
+ addr = kmap_atomic(pages[0]);
+ memcpy(addr + off, buf, sizes[0]);
+ kunmap_atomic(addr);
+ addr = kmap_atomic(pages[1]);
+ memcpy(addr, buf + sizes[0], sizes[1]);
+ kunmap_atomic(addr);
+
+out:
+ /* enable page faults to match kunmap_atomic() return conditions */
+ pagefault_enable();
+}
+
+#endif /* CONFIG_PGTABLE_MAPPING */
+
+static int zs_cpu_notifier(struct notifier_block *nb, unsigned long action,
+ void *pcpu)
+{
+ int ret, cpu = (long)pcpu;
+ struct mapping_area *area;
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+ area = &per_cpu(zs_map_area, cpu);
+ ret = __zs_cpu_up(area);
+ if (ret)
+ return notifier_from_errno(ret);
+ break;
+ case CPU_DEAD:
+ case CPU_UP_CANCELED:
+ area = &per_cpu(zs_map_area, cpu);
+ __zs_cpu_down(area);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block zs_cpu_nb = {
+ .notifier_call = zs_cpu_notifier
+};
+
+static void zs_exit(void)
+{
+ int cpu;
+
+ cpu_notifier_register_begin();
+
+ for_each_online_cpu(cpu)
+ zs_cpu_notifier(NULL, CPU_DEAD, (void *)(long)cpu);
+ __unregister_cpu_notifier(&zs_cpu_nb);
+
+ cpu_notifier_register_done();
+}
+
+static int zs_init(void)
+{
+ int cpu, ret;
+
+ cpu_notifier_register_begin();
+
+ __register_cpu_notifier(&zs_cpu_nb);
+ for_each_online_cpu(cpu) {
+ ret = zs_cpu_notifier(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
+ if (notifier_to_errno(ret)) {
+ cpu_notifier_register_done();
+ goto fail;
+ }
+ }
+
+ cpu_notifier_register_done();
+
+ return 0;
+fail:
+ zs_exit();
+ return notifier_to_errno(ret);
+}
+
+/**
+ * zs_create_pool - Creates an allocation pool to work from.
+ * @flags: allocation flags used to allocate pool metadata
+ *
+ * This function must be called before anything when using
+ * the zsmalloc allocator.
+ *
+ * On success, a pointer to the newly created pool is returned,
+ * otherwise NULL.
+ */
+struct zs_pool *zs_create_pool(gfp_t flags)
+{
+ int i, ovhd_size;
+ struct zs_pool *pool;
+
+ ovhd_size = roundup(sizeof(*pool), PAGE_SIZE);
+ pool = kzalloc(ovhd_size, GFP_KERNEL);
+ if (!pool)
+ return NULL;
+
+ for (i = 0; i < ZS_SIZE_CLASSES; i++) {
+ int size;
+ struct size_class *class;
+
+ size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA;
+ if (size > ZS_MAX_ALLOC_SIZE)
+ size = ZS_MAX_ALLOC_SIZE;
+
+ class = &pool->size_class[i];
+ class->size = size;
+ class->index = i;
+ spin_lock_init(&class->lock);
+ class->pages_per_zspage = get_pages_per_zspage(size);
+
+ }
+
+ pool->flags = flags;
+
+ return pool;
+}
+EXPORT_SYMBOL_GPL(zs_create_pool);
+
+void zs_destroy_pool(struct zs_pool *pool)
+{
+ int i;
+
+ for (i = 0; i < ZS_SIZE_CLASSES; i++) {
+ int fg;
+ struct size_class *class = &pool->size_class[i];
+
+ for (fg = 0; fg < _ZS_NR_FULLNESS_GROUPS; fg++) {
+ if (class->fullness_list[fg]) {
+ pr_info("Freeing non-empty class with size %db, fullness group %d\n",
+ class->size, fg);
+ }
+ }
+ }
+ kfree(pool);
+}
+EXPORT_SYMBOL_GPL(zs_destroy_pool);
+
+/**
+ * zs_malloc - Allocate block of given size from pool.
+ * @pool: pool to allocate from
+ * @size: size of block to allocate
+ *
+ * On success, handle to the allocated object is returned,
+ * otherwise 0.
+ * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
+ */
+unsigned long zs_malloc(struct zs_pool *pool, size_t size)
+{
+ unsigned long obj;
+ struct link_free *link;
+ int class_idx;
+ struct size_class *class;
+
+ struct page *first_page, *m_page;
+ unsigned long m_objidx, m_offset;
+
+ if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE))
+ return 0;
+
+ class_idx = get_size_class_index(size);
+ class = &pool->size_class[class_idx];
+ BUG_ON(class_idx != class->index);
+
+ spin_lock(&class->lock);
+ first_page = find_get_zspage(class);
+
+ if (!first_page) {
+ spin_unlock(&class->lock);
+ first_page = alloc_zspage(class, pool->flags);
+ if (unlikely(!first_page))
+ return 0;
+
+ set_zspage_mapping(first_page, class->index, ZS_EMPTY);
+ spin_lock(&class->lock);
+ class->pages_allocated += class->pages_per_zspage;
+ }
+
+ obj = (unsigned long)first_page->freelist;
+ obj_handle_to_location(obj, &m_page, &m_objidx);
+ m_offset = obj_idx_to_offset(m_page, m_objidx, class->size);
+
+ link = (struct link_free *)kmap_atomic(m_page) +
+ m_offset / sizeof(*link);
+ first_page->freelist = link->next;
+ memset(link, POISON_INUSE, sizeof(*link));
+ kunmap_atomic(link);
+
+ first_page->inuse++;
+ /* Now move the zspage to another fullness group, if required */
+ fix_fullness_group(pool, first_page);
+ spin_unlock(&class->lock);
+
+ return obj;
+}
+EXPORT_SYMBOL_GPL(zs_malloc);
+
+void zs_free(struct zs_pool *pool, unsigned long obj)
+{
+ struct link_free *link;
+ struct page *first_page, *f_page;
+ unsigned long f_objidx, f_offset;
+
+ int class_idx;
+ struct size_class *class;
+ enum fullness_group fullness;
+
+ if (unlikely(!obj))
+ return;
+
+ obj_handle_to_location(obj, &f_page, &f_objidx);
+ first_page = get_first_page(f_page);
+
+ get_zspage_mapping(first_page, &class_idx, &fullness);
+ class = &pool->size_class[class_idx];
+ f_offset = obj_idx_to_offset(f_page, f_objidx, class->size);
+
+ spin_lock(&class->lock);
+
+ /* Insert this object in containing zspage's freelist */
+ link = (struct link_free *)((unsigned char *)kmap_atomic(f_page)
+ + f_offset);
+ link->next = first_page->freelist;
+ kunmap_atomic(link);
+ first_page->freelist = (void *)obj;
+
+ first_page->inuse--;
+ fullness = fix_fullness_group(pool, first_page);
+
+ if (fullness == ZS_EMPTY)
+ class->pages_allocated -= class->pages_per_zspage;
+
+ spin_unlock(&class->lock);
+
+ if (fullness == ZS_EMPTY)
+ free_zspage(first_page);
+}
+EXPORT_SYMBOL_GPL(zs_free);
+
+/**
+ * zs_map_object - get address of allocated object from handle.
+ * @pool: pool from which the object was allocated
+ * @handle: handle returned from zs_malloc
+ *
+ * Before using an object allocated from zs_malloc, it must be mapped using
+ * this function. When done with the object, it must be unmapped using
+ * zs_unmap_object.
+ *
+ * Only one object can be mapped per cpu at a time. There is no protection
+ * against nested mappings.
+ *
+ * This function returns with preemption and page faults disabled.
+ */
+void *zs_map_object(struct zs_pool *pool, unsigned long handle,
+ enum zs_mapmode mm)
+{
+ struct page *page;
+ unsigned long obj_idx, off;
+
+ unsigned int class_idx;
+ enum fullness_group fg;
+ struct size_class *class;
+ struct mapping_area *area;
+ struct page *pages[2];
+
+ BUG_ON(!handle);
+
+ /*
+ * Because we use per-cpu mapping areas shared among the
+ * pools/users, we can't allow mapping in interrupt context
+ * because it can corrupt another users mappings.
+ */
+ BUG_ON(in_interrupt());
+
+ obj_handle_to_location(handle, &page, &obj_idx);
+ get_zspage_mapping(get_first_page(page), &class_idx, &fg);
+ class = &pool->size_class[class_idx];
+ off = obj_idx_to_offset(page, obj_idx, class->size);
+
+ area = &get_cpu_var(zs_map_area);
+ area->vm_mm = mm;
+ if (off + class->size <= PAGE_SIZE) {
+ /* this object is contained entirely within a page */
+ area->vm_addr = kmap_atomic(page);
+ return area->vm_addr + off;
+ }
+
+ /* this object spans two pages */
+ pages[0] = page;
+ pages[1] = get_next_page(page);
+ BUG_ON(!pages[1]);
+
+ return __zs_map_object(area, pages, off, class->size);
+}
+EXPORT_SYMBOL_GPL(zs_map_object);
+
+void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
+{
+ struct page *page;
+ unsigned long obj_idx, off;
+
+ unsigned int class_idx;
+ enum fullness_group fg;
+ struct size_class *class;
+ struct mapping_area *area;
+
+ BUG_ON(!handle);
+
+ obj_handle_to_location(handle, &page, &obj_idx);
+ get_zspage_mapping(get_first_page(page), &class_idx, &fg);
+ class = &pool->size_class[class_idx];
+ off = obj_idx_to_offset(page, obj_idx, class->size);
+
+ area = &__get_cpu_var(zs_map_area);
+ if (off + class->size <= PAGE_SIZE)
+ kunmap_atomic(area->vm_addr);
+ else {
+ struct page *pages[2];
+
+ pages[0] = page;
+ pages[1] = get_next_page(page);
+ BUG_ON(!pages[1]);
+
+ __zs_unmap_object(area, pages, off, class->size);
+ }
+ put_cpu_var(zs_map_area);
+}
+EXPORT_SYMBOL_GPL(zs_unmap_object);
+
+u64 zs_get_total_size_bytes(struct zs_pool *pool)
+{
+ int i;
+ u64 npages = 0;
+
+ for (i = 0; i < ZS_SIZE_CLASSES; i++)
+ npages += pool->size_class[i].pages_allocated;
+
+ return npages << PAGE_SHIFT;
+}
+EXPORT_SYMBOL_GPL(zs_get_total_size_bytes);
+
+module_init(zs_init);
+module_exit(zs_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
diff --git a/mm/zswap.c b/mm/zswap.c
new file mode 100644
index 000000000000..aeaef0fb5624
--- /dev/null
+++ b/mm/zswap.c
@@ -0,0 +1,940 @@
+/*
+ * zswap.c - zswap driver file
+ *
+ * zswap is a backend for frontswap that takes pages that are in the process
+ * of being swapped out and attempts to compress and store them in a
+ * RAM-based memory pool. This can result in a significant I/O reduction on
+ * the swap device and, in the case where decompressing from RAM is faster
+ * than reading from the swap device, can also improve workload performance.
+ *
+ * Copyright (C) 2012 Seth Jennings <sjenning@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/cpu.h>
+#include <linux/highmem.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/atomic.h>
+#include <linux/frontswap.h>
+#include <linux/rbtree.h>
+#include <linux/swap.h>
+#include <linux/crypto.h>
+#include <linux/mempool.h>
+#include <linux/zbud.h>
+
+#include <linux/mm_types.h>
+#include <linux/page-flags.h>
+#include <linux/swapops.h>
+#include <linux/writeback.h>
+#include <linux/pagemap.h>
+
+/*********************************
+* statistics
+**********************************/
+/* Number of memory pages used by the compressed pool */
+static u64 zswap_pool_pages;
+/* The number of compressed pages currently stored in zswap */
+static atomic_t zswap_stored_pages = ATOMIC_INIT(0);
+
+/*
+ * The statistics below are not protected from concurrent access for
+ * performance reasons so they may not be a 100% accurate. However,
+ * they do provide useful information on roughly how many times a
+ * certain event is occurring.
+*/
+
+/* Pool limit was hit (see zswap_max_pool_percent) */
+static u64 zswap_pool_limit_hit;
+/* Pages written back when pool limit was reached */
+static u64 zswap_written_back_pages;
+/* Store failed due to a reclaim failure after pool limit was reached */
+static u64 zswap_reject_reclaim_fail;
+/* Compressed page was too big for the allocator to (optimally) store */
+static u64 zswap_reject_compress_poor;
+/* Store failed because underlying allocator could not get memory */
+static u64 zswap_reject_alloc_fail;
+/* Store failed because the entry metadata could not be allocated (rare) */
+static u64 zswap_reject_kmemcache_fail;
+/* Duplicate store was encountered (rare) */
+static u64 zswap_duplicate_entry;
+
+/*********************************
+* tunables
+**********************************/
+/* Enable/disable zswap (disabled by default, fixed at boot for now) */
+static bool zswap_enabled __read_mostly;
+module_param_named(enabled, zswap_enabled, bool, 0444);
+
+/* Compressor to be used by zswap (fixed at boot for now) */
+#define ZSWAP_COMPRESSOR_DEFAULT "lzo"
+static char *zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT;
+module_param_named(compressor, zswap_compressor, charp, 0444);
+
+/* The maximum percentage of memory that the compressed pool can occupy */
+static unsigned int zswap_max_pool_percent = 20;
+module_param_named(max_pool_percent,
+ zswap_max_pool_percent, uint, 0644);
+
+/* zbud_pool is shared by all of zswap backend */
+static struct zbud_pool *zswap_pool;
+
+/*********************************
+* compression functions
+**********************************/
+/* per-cpu compression transforms */
+static struct crypto_comp * __percpu *zswap_comp_pcpu_tfms;
+
+enum comp_op {
+ ZSWAP_COMPOP_COMPRESS,
+ ZSWAP_COMPOP_DECOMPRESS
+};
+
+static int zswap_comp_op(enum comp_op op, const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen)
+{
+ struct crypto_comp *tfm;
+ int ret;
+
+ tfm = *per_cpu_ptr(zswap_comp_pcpu_tfms, get_cpu());
+ switch (op) {
+ case ZSWAP_COMPOP_COMPRESS:
+ ret = crypto_comp_compress(tfm, src, slen, dst, dlen);
+ break;
+ case ZSWAP_COMPOP_DECOMPRESS:
+ ret = crypto_comp_decompress(tfm, src, slen, dst, dlen);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ put_cpu();
+ return ret;
+}
+
+static int __init zswap_comp_init(void)
+{
+ if (!crypto_has_comp(zswap_compressor, 0, 0)) {
+ pr_info("%s compressor not available\n", zswap_compressor);
+ /* fall back to default compressor */
+ zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT;
+ if (!crypto_has_comp(zswap_compressor, 0, 0))
+ /* can't even load the default compressor */
+ return -ENODEV;
+ }
+ pr_info("using %s compressor\n", zswap_compressor);
+
+ /* alloc percpu transforms */
+ zswap_comp_pcpu_tfms = alloc_percpu(struct crypto_comp *);
+ if (!zswap_comp_pcpu_tfms)
+ return -ENOMEM;
+ return 0;
+}
+
+static void zswap_comp_exit(void)
+{
+ /* free percpu transforms */
+ if (zswap_comp_pcpu_tfms)
+ free_percpu(zswap_comp_pcpu_tfms);
+}
+
+/*********************************
+* data structures
+**********************************/
+/*
+ * struct zswap_entry
+ *
+ * This structure contains the metadata for tracking a single compressed
+ * page within zswap.
+ *
+ * rbnode - links the entry into red-black tree for the appropriate swap type
+ * refcount - the number of outstanding reference to the entry. This is needed
+ * to protect against premature freeing of the entry by code
+ * concurrent calls to load, invalidate, and writeback. The lock
+ * for the zswap_tree structure that contains the entry must
+ * be held while changing the refcount. Since the lock must
+ * be held, there is no reason to also make refcount atomic.
+ * offset - the swap offset for the entry. Index into the red-black tree.
+ * handle - zbud allocation handle that stores the compressed page data
+ * length - the length in bytes of the compressed page data. Needed during
+ * decompression
+ */
+struct zswap_entry {
+ struct rb_node rbnode;
+ pgoff_t offset;
+ int refcount;
+ unsigned int length;
+ unsigned long handle;
+};
+
+struct zswap_header {
+ swp_entry_t swpentry;
+};
+
+/*
+ * The tree lock in the zswap_tree struct protects a few things:
+ * - the rbtree
+ * - the refcount field of each entry in the tree
+ */
+struct zswap_tree {
+ struct rb_root rbroot;
+ spinlock_t lock;
+};
+
+static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
+
+/*********************************
+* zswap entry functions
+**********************************/
+static struct kmem_cache *zswap_entry_cache;
+
+static int zswap_entry_cache_create(void)
+{
+ zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
+ return zswap_entry_cache == NULL;
+}
+
+static void zswap_entry_cache_destory(void)
+{
+ kmem_cache_destroy(zswap_entry_cache);
+}
+
+static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp)
+{
+ struct zswap_entry *entry;
+ entry = kmem_cache_alloc(zswap_entry_cache, gfp);
+ if (!entry)
+ return NULL;
+ entry->refcount = 1;
+ RB_CLEAR_NODE(&entry->rbnode);
+ return entry;
+}
+
+static void zswap_entry_cache_free(struct zswap_entry *entry)
+{
+ kmem_cache_free(zswap_entry_cache, entry);
+}
+
+/*********************************
+* rbtree functions
+**********************************/
+static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset)
+{
+ struct rb_node *node = root->rb_node;
+ struct zswap_entry *entry;
+
+ while (node) {
+ entry = rb_entry(node, struct zswap_entry, rbnode);
+ if (entry->offset > offset)
+ node = node->rb_left;
+ else if (entry->offset < offset)
+ node = node->rb_right;
+ else
+ return entry;
+ }
+ return NULL;
+}
+
+/*
+ * In the case that a entry with the same offset is found, a pointer to
+ * the existing entry is stored in dupentry and the function returns -EEXIST
+ */
+static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
+ struct zswap_entry **dupentry)
+{
+ struct rb_node **link = &root->rb_node, *parent = NULL;
+ struct zswap_entry *myentry;
+
+ while (*link) {
+ parent = *link;
+ myentry = rb_entry(parent, struct zswap_entry, rbnode);
+ if (myentry->offset > entry->offset)
+ link = &(*link)->rb_left;
+ else if (myentry->offset < entry->offset)
+ link = &(*link)->rb_right;
+ else {
+ *dupentry = myentry;
+ return -EEXIST;
+ }
+ }
+ rb_link_node(&entry->rbnode, parent, link);
+ rb_insert_color(&entry->rbnode, root);
+ return 0;
+}
+
+static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
+{
+ if (!RB_EMPTY_NODE(&entry->rbnode)) {
+ rb_erase(&entry->rbnode, root);
+ RB_CLEAR_NODE(&entry->rbnode);
+ }
+}
+
+/*
+ * Carries out the common pattern of freeing and entry's zbud allocation,
+ * freeing the entry itself, and decrementing the number of stored pages.
+ */
+static void zswap_free_entry(struct zswap_entry *entry)
+{
+ zbud_free(zswap_pool, entry->handle);
+ zswap_entry_cache_free(entry);
+ atomic_dec(&zswap_stored_pages);
+ zswap_pool_pages = zbud_get_pool_size(zswap_pool);
+}
+
+/* caller must hold the tree lock */
+static void zswap_entry_get(struct zswap_entry *entry)
+{
+ entry->refcount++;
+}
+
+/* caller must hold the tree lock
+* remove from the tree and free it, if nobody reference the entry
+*/
+static void zswap_entry_put(struct zswap_tree *tree,
+ struct zswap_entry *entry)
+{
+ int refcount = --entry->refcount;
+
+ BUG_ON(refcount < 0);
+ if (refcount == 0) {
+ zswap_rb_erase(&tree->rbroot, entry);
+ zswap_free_entry(entry);
+ }
+}
+
+/* caller must hold the tree lock */
+static struct zswap_entry *zswap_entry_find_get(struct rb_root *root,
+ pgoff_t offset)
+{
+ struct zswap_entry *entry = NULL;
+
+ entry = zswap_rb_search(root, offset);
+ if (entry)
+ zswap_entry_get(entry);
+
+ return entry;
+}
+
+/*********************************
+* per-cpu code
+**********************************/
+static DEFINE_PER_CPU(u8 *, zswap_dstmem);
+
+static int __zswap_cpu_notifier(unsigned long action, unsigned long cpu)
+{
+ struct crypto_comp *tfm;
+ u8 *dst;
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+ tfm = crypto_alloc_comp(zswap_compressor, 0, 0);
+ if (IS_ERR(tfm)) {
+ pr_err("can't allocate compressor transform\n");
+ return NOTIFY_BAD;
+ }
+ *per_cpu_ptr(zswap_comp_pcpu_tfms, cpu) = tfm;
+ dst = kmalloc(PAGE_SIZE * 2, GFP_KERNEL);
+ if (!dst) {
+ pr_err("can't allocate compressor buffer\n");
+ crypto_free_comp(tfm);
+ *per_cpu_ptr(zswap_comp_pcpu_tfms, cpu) = NULL;
+ return NOTIFY_BAD;
+ }
+ per_cpu(zswap_dstmem, cpu) = dst;
+ break;
+ case CPU_DEAD:
+ case CPU_UP_CANCELED:
+ tfm = *per_cpu_ptr(zswap_comp_pcpu_tfms, cpu);
+ if (tfm) {
+ crypto_free_comp(tfm);
+ *per_cpu_ptr(zswap_comp_pcpu_tfms, cpu) = NULL;
+ }
+ dst = per_cpu(zswap_dstmem, cpu);
+ kfree(dst);
+ per_cpu(zswap_dstmem, cpu) = NULL;
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static int zswap_cpu_notifier(struct notifier_block *nb,
+ unsigned long action, void *pcpu)
+{
+ unsigned long cpu = (unsigned long)pcpu;
+ return __zswap_cpu_notifier(action, cpu);
+}
+
+static struct notifier_block zswap_cpu_notifier_block = {
+ .notifier_call = zswap_cpu_notifier
+};
+
+static int zswap_cpu_init(void)
+{
+ unsigned long cpu;
+
+ cpu_notifier_register_begin();
+ for_each_online_cpu(cpu)
+ if (__zswap_cpu_notifier(CPU_UP_PREPARE, cpu) != NOTIFY_OK)
+ goto cleanup;
+ __register_cpu_notifier(&zswap_cpu_notifier_block);
+ cpu_notifier_register_done();
+ return 0;
+
+cleanup:
+ for_each_online_cpu(cpu)
+ __zswap_cpu_notifier(CPU_UP_CANCELED, cpu);
+ cpu_notifier_register_done();
+ return -ENOMEM;
+}
+
+/*********************************
+* helpers
+**********************************/
+static bool zswap_is_full(void)
+{
+ return totalram_pages * zswap_max_pool_percent / 100 <
+ zswap_pool_pages;
+}
+
+/*********************************
+* writeback code
+**********************************/
+/* return enum for zswap_get_swap_cache_page */
+enum zswap_get_swap_ret {
+ ZSWAP_SWAPCACHE_NEW,
+ ZSWAP_SWAPCACHE_EXIST,
+ ZSWAP_SWAPCACHE_FAIL,
+};
+
+/*
+ * zswap_get_swap_cache_page
+ *
+ * This is an adaption of read_swap_cache_async()
+ *
+ * This function tries to find a page with the given swap entry
+ * in the swapper_space address space (the swap cache). If the page
+ * is found, it is returned in retpage. Otherwise, a page is allocated,
+ * added to the swap cache, and returned in retpage.
+ *
+ * If success, the swap cache page is returned in retpage
+ * Returns ZSWAP_SWAPCACHE_EXIST if page was already in the swap cache
+ * Returns ZSWAP_SWAPCACHE_NEW if the new page needs to be populated,
+ * the new page is added to swapcache and locked
+ * Returns ZSWAP_SWAPCACHE_FAIL on error
+ */
+static int zswap_get_swap_cache_page(swp_entry_t entry,
+ struct page **retpage)
+{
+ struct page *found_page, *new_page = NULL;
+ struct address_space *swapper_space = swap_address_space(entry);
+ int err;
+
+ *retpage = NULL;
+ do {
+ /*
+ * First check the swap cache. Since this is normally
+ * called after lookup_swap_cache() failed, re-calling
+ * that would confuse statistics.
+ */
+ found_page = find_get_page(swapper_space, entry.val);
+ if (found_page)
+ break;
+
+ /*
+ * Get a new page to read into from swap.
+ */
+ if (!new_page) {
+ new_page = alloc_page(GFP_KERNEL);
+ if (!new_page)
+ break; /* Out of memory */
+ }
+
+ /*
+ * call radix_tree_preload() while we can wait.
+ */
+ err = radix_tree_preload(GFP_KERNEL);
+ if (err)
+ break;
+
+ /*
+ * Swap entry may have been freed since our caller observed it.
+ */
+ err = swapcache_prepare(entry);
+ if (err == -EEXIST) { /* seems racy */
+ radix_tree_preload_end();
+ continue;
+ }
+ if (err) { /* swp entry is obsolete ? */
+ radix_tree_preload_end();
+ break;
+ }
+
+ /* May fail (-ENOMEM) if radix-tree node allocation failed. */
+ __set_page_locked(new_page);
+ SetPageSwapBacked(new_page);
+ err = __add_to_swap_cache(new_page, entry);
+ if (likely(!err)) {
+ radix_tree_preload_end();
+ lru_cache_add_anon(new_page);
+ *retpage = new_page;
+ return ZSWAP_SWAPCACHE_NEW;
+ }
+ radix_tree_preload_end();
+ ClearPageSwapBacked(new_page);
+ __clear_page_locked(new_page);
+ /*
+ * add_to_swap_cache() doesn't return -EEXIST, so we can safely
+ * clear SWAP_HAS_CACHE flag.
+ */
+ swapcache_free(entry, NULL);
+ } while (err != -ENOMEM);
+
+ if (new_page)
+ page_cache_release(new_page);
+ if (!found_page)
+ return ZSWAP_SWAPCACHE_FAIL;
+ *retpage = found_page;
+ return ZSWAP_SWAPCACHE_EXIST;
+}
+
+/*
+ * Attempts to free an entry by adding a page to the swap cache,
+ * decompressing the entry data into the page, and issuing a
+ * bio write to write the page back to the swap device.
+ *
+ * This can be thought of as a "resumed writeback" of the page
+ * to the swap device. We are basically resuming the same swap
+ * writeback path that was intercepted with the frontswap_store()
+ * in the first place. After the page has been decompressed into
+ * the swap cache, the compressed version stored by zswap can be
+ * freed.
+ */
+static int zswap_writeback_entry(struct zbud_pool *pool, unsigned long handle)
+{
+ struct zswap_header *zhdr;
+ swp_entry_t swpentry;
+ struct zswap_tree *tree;
+ pgoff_t offset;
+ struct zswap_entry *entry;
+ struct page *page;
+ u8 *src, *dst;
+ unsigned int dlen;
+ int ret;
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_NONE,
+ };
+
+ /* extract swpentry from data */
+ zhdr = zbud_map(pool, handle);
+ swpentry = zhdr->swpentry; /* here */
+ zbud_unmap(pool, handle);
+ tree = zswap_trees[swp_type(swpentry)];
+ offset = swp_offset(swpentry);
+
+ /* find and ref zswap entry */
+ spin_lock(&tree->lock);
+ entry = zswap_entry_find_get(&tree->rbroot, offset);
+ if (!entry) {
+ /* entry was invalidated */
+ spin_unlock(&tree->lock);
+ return 0;
+ }
+ spin_unlock(&tree->lock);
+ BUG_ON(offset != entry->offset);
+
+ /* try to allocate swap cache page */
+ switch (zswap_get_swap_cache_page(swpentry, &page)) {
+ case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */
+ ret = -ENOMEM;
+ goto fail;
+
+ case ZSWAP_SWAPCACHE_EXIST:
+ /* page is already in the swap cache, ignore for now */
+ page_cache_release(page);
+ ret = -EEXIST;
+ goto fail;
+
+ case ZSWAP_SWAPCACHE_NEW: /* page is locked */
+ /* decompress */
+ dlen = PAGE_SIZE;
+ src = (u8 *)zbud_map(zswap_pool, entry->handle) +
+ sizeof(struct zswap_header);
+ dst = kmap_atomic(page);
+ ret = zswap_comp_op(ZSWAP_COMPOP_DECOMPRESS, src,
+ entry->length, dst, &dlen);
+ kunmap_atomic(dst);
+ zbud_unmap(zswap_pool, entry->handle);
+ BUG_ON(ret);
+ BUG_ON(dlen != PAGE_SIZE);
+
+ /* page is up to date */
+ SetPageUptodate(page);
+ }
+
+ /* move it to the tail of the inactive list after end_writeback */
+ SetPageReclaim(page);
+
+ /* start writeback */
+ __swap_writepage(page, &wbc, end_swap_bio_write);
+ page_cache_release(page);
+ zswap_written_back_pages++;
+
+ spin_lock(&tree->lock);
+ /* drop local reference */
+ zswap_entry_put(tree, entry);
+
+ /*
+ * There are two possible situations for entry here:
+ * (1) refcount is 1(normal case), entry is valid and on the tree
+ * (2) refcount is 0, entry is freed and not on the tree
+ * because invalidate happened during writeback
+ * search the tree and free the entry if find entry
+ */
+ if (entry == zswap_rb_search(&tree->rbroot, offset))
+ zswap_entry_put(tree, entry);
+ spin_unlock(&tree->lock);
+
+ goto end;
+
+ /*
+ * if we get here due to ZSWAP_SWAPCACHE_EXIST
+ * a load may happening concurrently
+ * it is safe and okay to not free the entry
+ * if we free the entry in the following put
+ * it it either okay to return !0
+ */
+fail:
+ spin_lock(&tree->lock);
+ zswap_entry_put(tree, entry);
+ spin_unlock(&tree->lock);
+
+end:
+ return ret;
+}
+
+/*********************************
+* frontswap hooks
+**********************************/
+/* attempts to compress and store an single page */
+static int zswap_frontswap_store(unsigned type, pgoff_t offset,
+ struct page *page)
+{
+ struct zswap_tree *tree = zswap_trees[type];
+ struct zswap_entry *entry, *dupentry;
+ int ret;
+ unsigned int dlen = PAGE_SIZE, len;
+ unsigned long handle;
+ char *buf;
+ u8 *src, *dst;
+ struct zswap_header *zhdr;
+
+ if (!tree) {
+ ret = -ENODEV;
+ goto reject;
+ }
+
+ /* reclaim space if needed */
+ if (zswap_is_full()) {
+ zswap_pool_limit_hit++;
+ if (zbud_reclaim_page(zswap_pool, 8)) {
+ zswap_reject_reclaim_fail++;
+ ret = -ENOMEM;
+ goto reject;
+ }
+ }
+
+ /* allocate entry */
+ entry = zswap_entry_cache_alloc(GFP_KERNEL);
+ if (!entry) {
+ zswap_reject_kmemcache_fail++;
+ ret = -ENOMEM;
+ goto reject;
+ }
+
+ /* compress */
+ dst = get_cpu_var(zswap_dstmem);
+ src = kmap_atomic(page);
+ ret = zswap_comp_op(ZSWAP_COMPOP_COMPRESS, src, PAGE_SIZE, dst, &dlen);
+ kunmap_atomic(src);
+ if (ret) {
+ ret = -EINVAL;
+ goto freepage;
+ }
+
+ /* store */
+ len = dlen + sizeof(struct zswap_header);
+ ret = zbud_alloc(zswap_pool, len, __GFP_NORETRY | __GFP_NOWARN,
+ &handle);
+ if (ret == -ENOSPC) {
+ zswap_reject_compress_poor++;
+ goto freepage;
+ }
+ if (ret) {
+ zswap_reject_alloc_fail++;
+ goto freepage;
+ }
+ zhdr = zbud_map(zswap_pool, handle);
+ zhdr->swpentry = swp_entry(type, offset);
+ buf = (u8 *)(zhdr + 1);
+ memcpy(buf, dst, dlen);
+ zbud_unmap(zswap_pool, handle);
+ put_cpu_var(zswap_dstmem);
+
+ /* populate entry */
+ entry->offset = offset;
+ entry->handle = handle;
+ entry->length = dlen;
+
+ /* map */
+ spin_lock(&tree->lock);
+ do {
+ ret = zswap_rb_insert(&tree->rbroot, entry, &dupentry);
+ if (ret == -EEXIST) {
+ zswap_duplicate_entry++;
+ /* remove from rbtree */
+ zswap_rb_erase(&tree->rbroot, dupentry);
+ zswap_entry_put(tree, dupentry);
+ }
+ } while (ret == -EEXIST);
+ spin_unlock(&tree->lock);
+
+ /* update stats */
+ atomic_inc(&zswap_stored_pages);
+ zswap_pool_pages = zbud_get_pool_size(zswap_pool);
+
+ return 0;
+
+freepage:
+ put_cpu_var(zswap_dstmem);
+ zswap_entry_cache_free(entry);
+reject:
+ return ret;
+}
+
+/*
+ * returns 0 if the page was successfully decompressed
+ * return -1 on entry not found or error
+*/
+static int zswap_frontswap_load(unsigned type, pgoff_t offset,
+ struct page *page)
+{
+ struct zswap_tree *tree = zswap_trees[type];
+ struct zswap_entry *entry;
+ u8 *src, *dst;
+ unsigned int dlen;
+ int ret;
+
+ /* find */
+ spin_lock(&tree->lock);
+ entry = zswap_entry_find_get(&tree->rbroot, offset);
+ if (!entry) {
+ /* entry was written back */
+ spin_unlock(&tree->lock);
+ return -1;
+ }
+ spin_unlock(&tree->lock);
+
+ /* decompress */
+ dlen = PAGE_SIZE;
+ src = (u8 *)zbud_map(zswap_pool, entry->handle) +
+ sizeof(struct zswap_header);
+ dst = kmap_atomic(page);
+ ret = zswap_comp_op(ZSWAP_COMPOP_DECOMPRESS, src, entry->length,
+ dst, &dlen);
+ kunmap_atomic(dst);
+ zbud_unmap(zswap_pool, entry->handle);
+ BUG_ON(ret);
+
+ spin_lock(&tree->lock);
+ zswap_entry_put(tree, entry);
+ spin_unlock(&tree->lock);
+
+ return 0;
+}
+
+/* frees an entry in zswap */
+static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset)
+{
+ struct zswap_tree *tree = zswap_trees[type];
+ struct zswap_entry *entry;
+
+ /* find */
+ spin_lock(&tree->lock);
+ entry = zswap_rb_search(&tree->rbroot, offset);
+ if (!entry) {
+ /* entry was written back */
+ spin_unlock(&tree->lock);
+ return;
+ }
+
+ /* remove from rbtree */
+ zswap_rb_erase(&tree->rbroot, entry);
+
+ /* drop the initial reference from entry creation */
+ zswap_entry_put(tree, entry);
+
+ spin_unlock(&tree->lock);
+}
+
+/* frees all zswap entries for the given swap type */
+static void zswap_frontswap_invalidate_area(unsigned type)
+{
+ struct zswap_tree *tree = zswap_trees[type];
+ struct zswap_entry *entry, *n;
+
+ if (!tree)
+ return;
+
+ /* walk the tree and free everything */
+ spin_lock(&tree->lock);
+ rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode)
+ zswap_free_entry(entry);
+ tree->rbroot = RB_ROOT;
+ spin_unlock(&tree->lock);
+ kfree(tree);
+ zswap_trees[type] = NULL;
+}
+
+static struct zbud_ops zswap_zbud_ops = {
+ .evict = zswap_writeback_entry
+};
+
+static void zswap_frontswap_init(unsigned type)
+{
+ struct zswap_tree *tree;
+
+ tree = kzalloc(sizeof(struct zswap_tree), GFP_KERNEL);
+ if (!tree) {
+ pr_err("alloc failed, zswap disabled for swap type %d\n", type);
+ return;
+ }
+
+ tree->rbroot = RB_ROOT;
+ spin_lock_init(&tree->lock);
+ zswap_trees[type] = tree;
+}
+
+static struct frontswap_ops zswap_frontswap_ops = {
+ .store = zswap_frontswap_store,
+ .load = zswap_frontswap_load,
+ .invalidate_page = zswap_frontswap_invalidate_page,
+ .invalidate_area = zswap_frontswap_invalidate_area,
+ .init = zswap_frontswap_init
+};
+
+/*********************************
+* debugfs functions
+**********************************/
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+
+static struct dentry *zswap_debugfs_root;
+
+static int __init zswap_debugfs_init(void)
+{
+ if (!debugfs_initialized())
+ return -ENODEV;
+
+ zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
+ if (!zswap_debugfs_root)
+ return -ENOMEM;
+
+ debugfs_create_u64("pool_limit_hit", S_IRUGO,
+ zswap_debugfs_root, &zswap_pool_limit_hit);
+ debugfs_create_u64("reject_reclaim_fail", S_IRUGO,
+ zswap_debugfs_root, &zswap_reject_reclaim_fail);
+ debugfs_create_u64("reject_alloc_fail", S_IRUGO,
+ zswap_debugfs_root, &zswap_reject_alloc_fail);
+ debugfs_create_u64("reject_kmemcache_fail", S_IRUGO,
+ zswap_debugfs_root, &zswap_reject_kmemcache_fail);
+ debugfs_create_u64("reject_compress_poor", S_IRUGO,
+ zswap_debugfs_root, &zswap_reject_compress_poor);
+ debugfs_create_u64("written_back_pages", S_IRUGO,
+ zswap_debugfs_root, &zswap_written_back_pages);
+ debugfs_create_u64("duplicate_entry", S_IRUGO,
+ zswap_debugfs_root, &zswap_duplicate_entry);
+ debugfs_create_u64("pool_pages", S_IRUGO,
+ zswap_debugfs_root, &zswap_pool_pages);
+ debugfs_create_atomic_t("stored_pages", S_IRUGO,
+ zswap_debugfs_root, &zswap_stored_pages);
+
+ return 0;
+}
+
+static void __exit zswap_debugfs_exit(void)
+{
+ debugfs_remove_recursive(zswap_debugfs_root);
+}
+#else
+static int __init zswap_debugfs_init(void)
+{
+ return 0;
+}
+
+static void __exit zswap_debugfs_exit(void) { }
+#endif
+
+/*********************************
+* module init and exit
+**********************************/
+static int __init init_zswap(void)
+{
+ if (!zswap_enabled)
+ return 0;
+
+ pr_info("loading zswap\n");
+
+ zswap_pool = zbud_create_pool(GFP_KERNEL, &zswap_zbud_ops);
+ if (!zswap_pool) {
+ pr_err("zbud pool creation failed\n");
+ goto error;
+ }
+
+ if (zswap_entry_cache_create()) {
+ pr_err("entry cache creation failed\n");
+ goto cachefail;
+ }
+ if (zswap_comp_init()) {
+ pr_err("compressor initialization failed\n");
+ goto compfail;
+ }
+ if (zswap_cpu_init()) {
+ pr_err("per-cpu initialization failed\n");
+ goto pcpufail;
+ }
+
+ frontswap_register_ops(&zswap_frontswap_ops);
+ if (zswap_debugfs_init())
+ pr_warn("debugfs initialization failed\n");
+ return 0;
+pcpufail:
+ zswap_comp_exit();
+compfail:
+ zswap_entry_cache_destory();
+cachefail:
+ zbud_destroy_pool(zswap_pool);
+error:
+ return -ENOMEM;
+}
+/* must be late so crypto has time to come up */
+late_initcall(init_zswap);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Seth Jennings <sjenning@linux.vnet.ibm.com>");
+MODULE_DESCRIPTION("Compressed cache for swap pages");