diff options
author | KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> | 2009-01-07 18:08:08 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-01-08 08:31:06 -0800 |
commit | a636b327f731143ccc544b966cfd8de6cb6d72c6 (patch) | |
tree | e53b03b64e8ebca20649c2d877bc4c3ef54ec34c /mm/memcontrol.c | |
parent | 2e4d40915fb85207fe48cfc31201824ec6d7426e (diff) | |
download | linux-a636b327f731143ccc544b966cfd8de6cb6d72c6.tar.gz |
memcg: avoid unnecessary system-wide-oom-killer
Current mmtom has new oom function as pagefault_out_of_memory(). It's
added for select bad process rathar than killing current.
When memcg hit limit and calls OOM at page_fault, this handler called and
system-wide-oom handling happens. (means kernel panics if panic_on_oom is
true....)
To avoid overkill, check memcg's recent behavior before starting
system-wide-oom.
And this patch also fixes to guarantee "don't accnout against process with
TIF_MEMDIE". This is necessary for smooth OOM.
[akpm@linux-foundation.org: build fix]
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Badari Pulavarty <pbadari@us.ibm.com>
Cc: Jan Blunck <jblunck@suse.de>
Cc: Hirokazu Takahashi <taka@valinux.co.jp>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r-- | mm/memcontrol.c | 33 |
1 files changed, 29 insertions, 4 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 886e2224c5fd..659b0c58f13e 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -153,7 +153,7 @@ struct mem_cgroup { * Should the accounting and control be hierarchical, per subtree? */ bool use_hierarchy; - + unsigned long last_oom_jiffies; int obsolete; atomic_t refcnt; /* @@ -615,6 +615,22 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, return ret; } +bool mem_cgroup_oom_called(struct task_struct *task) +{ + bool ret = false; + struct mem_cgroup *mem; + struct mm_struct *mm; + + rcu_read_lock(); + mm = task->mm; + if (!mm) + mm = &init_mm; + mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); + if (mem && time_before(jiffies, mem->last_oom_jiffies + HZ/10)) + ret = true; + rcu_read_unlock(); + return ret; +} /* * Unlike exported interface, "oom" parameter is added. if oom==true, * oom-killer can be invoked. @@ -626,6 +642,13 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, struct mem_cgroup *mem, *mem_over_limit; int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; struct res_counter *fail_res; + + if (unlikely(test_thread_flag(TIF_MEMDIE))) { + /* Don't account this! */ + *memcg = NULL; + return 0; + } + /* * We always charge the cgroup the mm_struct belongs to. * The mm_struct's mem_cgroup changes on task migration if the @@ -694,8 +717,10 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, continue; if (!nr_retries--) { - if (oom) + if (oom) { mem_cgroup_out_of_memory(mem, gfp_mask); + mem->last_oom_jiffies = jiffies; + } goto nomem; } } @@ -832,7 +857,7 @@ static int mem_cgroup_move_parent(struct page_cgroup *pc, ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false); - if (ret) + if (ret || !parent) return ret; if (!get_page_unless_zero(page)) @@ -883,7 +908,7 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, mem = memcg; ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true); - if (ret) + if (ret || !mem) return ret; __mem_cgroup_commit_charge(mem, pc, ctype); |