summaryrefslogtreecommitdiff
path: root/include/linux/memcontrol.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/memcontrol.h')
-rw-r--r--include/linux/memcontrol.h348
1 files changed, 289 insertions, 59 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 899949bbb2f9..69966c461d1c 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -26,7 +26,8 @@
#include <linux/page_counter.h>
#include <linux/vmpressure.h>
#include <linux/eventfd.h>
-#include <linux/mmzone.h>
+#include <linux/mm.h>
+#include <linux/vmstat.h>
#include <linux/writeback.h>
#include <linux/page-flags.h>
@@ -44,8 +45,6 @@ enum memcg_stat_item {
MEMCG_SOCK,
/* XXX: why are these zone and not node counters? */
MEMCG_KERNEL_STACK_KB,
- MEMCG_SLAB_RECLAIMABLE,
- MEMCG_SLAB_UNRECLAIMABLE,
MEMCG_NR_STAT,
};
@@ -100,11 +99,16 @@ struct mem_cgroup_reclaim_iter {
unsigned int generation;
};
+struct lruvec_stat {
+ long count[NR_VM_NODE_STAT_ITEMS];
+};
+
/*
* per-zone information in memory controller.
*/
struct mem_cgroup_per_node {
struct lruvec lruvec;
+ struct lruvec_stat __percpu *lruvec_stat;
unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1];
@@ -357,6 +361,17 @@ static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
}
struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
+static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
+{
+ struct mem_cgroup_per_node *mz;
+
+ if (mem_cgroup_disabled())
+ return NULL;
+
+ mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
+ return mz->memcg;
+}
+
/**
* parent_mem_cgroup - find the accounting parent of a memcg
* @memcg: memcg whose parent to find
@@ -469,11 +484,13 @@ bool mem_cgroup_oom_synchronize(bool wait);
extern int do_swap_account;
#endif
-void lock_page_memcg(struct page *page);
+struct mem_cgroup *lock_page_memcg(struct page *page);
+void __unlock_page_memcg(struct mem_cgroup *memcg);
void unlock_page_memcg(struct page *page);
+/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx)
+ int idx)
{
long val = 0;
int cpu;
@@ -487,23 +504,20 @@ static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
return val;
}
-static inline void mod_memcg_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx, int val)
+/* idx can be of type enum memcg_stat_item or node_stat_item */
+static inline void __mod_memcg_state(struct mem_cgroup *memcg,
+ int idx, int val)
{
if (!mem_cgroup_disabled())
- this_cpu_add(memcg->stat->count[idx], val);
-}
-
-static inline void inc_memcg_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx)
-{
- mod_memcg_state(memcg, idx, 1);
+ __this_cpu_add(memcg->stat->count[idx], val);
}
-static inline void dec_memcg_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx)
+/* idx can be of type enum memcg_stat_item or node_stat_item */
+static inline void mod_memcg_state(struct mem_cgroup *memcg,
+ int idx, int val)
{
- mod_memcg_state(memcg, idx, -1);
+ if (!mem_cgroup_disabled())
+ this_cpu_add(memcg->stat->count[idx], val);
}
/**
@@ -523,31 +537,114 @@ static inline void dec_memcg_state(struct mem_cgroup *memcg,
*
* Kernel pages are an exception to this, since they'll never move.
*/
+static inline void __mod_memcg_page_state(struct page *page,
+ int idx, int val)
+{
+ if (page->mem_cgroup)
+ __mod_memcg_state(page->mem_cgroup, idx, val);
+}
+
static inline void mod_memcg_page_state(struct page *page,
- enum memcg_stat_item idx, int val)
+ int idx, int val)
{
if (page->mem_cgroup)
mod_memcg_state(page->mem_cgroup, idx, val);
}
-static inline void inc_memcg_page_state(struct page *page,
- enum memcg_stat_item idx)
+static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
+ enum node_stat_item idx)
{
- mod_memcg_page_state(page, idx, 1);
+ struct mem_cgroup_per_node *pn;
+ long val = 0;
+ int cpu;
+
+ if (mem_cgroup_disabled())
+ return node_page_state(lruvec_pgdat(lruvec), idx);
+
+ pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
+ for_each_possible_cpu(cpu)
+ val += per_cpu(pn->lruvec_stat->count[idx], cpu);
+
+ if (val < 0)
+ val = 0;
+
+ return val;
}
-static inline void dec_memcg_page_state(struct page *page,
- enum memcg_stat_item idx)
+static inline void __mod_lruvec_state(struct lruvec *lruvec,
+ enum node_stat_item idx, int val)
{
- mod_memcg_page_state(page, idx, -1);
+ struct mem_cgroup_per_node *pn;
+
+ __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
+ if (mem_cgroup_disabled())
+ return;
+ pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
+ __mod_memcg_state(pn->memcg, idx, val);
+ __this_cpu_add(pn->lruvec_stat->count[idx], val);
+}
+
+static inline void mod_lruvec_state(struct lruvec *lruvec,
+ enum node_stat_item idx, int val)
+{
+ struct mem_cgroup_per_node *pn;
+
+ mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
+ if (mem_cgroup_disabled())
+ return;
+ pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
+ mod_memcg_state(pn->memcg, idx, val);
+ this_cpu_add(pn->lruvec_stat->count[idx], val);
+}
+
+static inline void __mod_lruvec_page_state(struct page *page,
+ enum node_stat_item idx, int val)
+{
+ struct mem_cgroup_per_node *pn;
+
+ __mod_node_page_state(page_pgdat(page), idx, val);
+ if (mem_cgroup_disabled() || !page->mem_cgroup)
+ return;
+ __mod_memcg_state(page->mem_cgroup, idx, val);
+ pn = page->mem_cgroup->nodeinfo[page_to_nid(page)];
+ __this_cpu_add(pn->lruvec_stat->count[idx], val);
+}
+
+static inline void mod_lruvec_page_state(struct page *page,
+ enum node_stat_item idx, int val)
+{
+ struct mem_cgroup_per_node *pn;
+
+ mod_node_page_state(page_pgdat(page), idx, val);
+ if (mem_cgroup_disabled() || !page->mem_cgroup)
+ return;
+ mod_memcg_state(page->mem_cgroup, idx, val);
+ pn = page->mem_cgroup->nodeinfo[page_to_nid(page)];
+ this_cpu_add(pn->lruvec_stat->count[idx], val);
}
unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
gfp_t gfp_mask,
unsigned long *total_scanned);
-static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
- enum vm_event_item idx)
+static inline void count_memcg_events(struct mem_cgroup *memcg,
+ enum vm_event_item idx,
+ unsigned long count)
+{
+ if (!mem_cgroup_disabled())
+ this_cpu_add(memcg->stat->events[idx], count);
+}
+
+/* idx can be of type enum memcg_stat_item or node_stat_item */
+static inline void count_memcg_page_event(struct page *page,
+ int idx)
+{
+ if (page->mem_cgroup)
+ count_memcg_events(page->mem_cgroup, idx, 1);
+}
+
+static inline void count_memcg_event_mm(struct mm_struct *mm,
+ enum vm_event_item idx)
{
struct mem_cgroup *memcg;
@@ -556,8 +653,11 @@ static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
rcu_read_lock();
memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
- if (likely(memcg))
+ if (likely(memcg)) {
this_cpu_inc(memcg->stat->events[idx]);
+ if (idx == OOM_KILL)
+ cgroup_file_notify(&memcg->events_file);
+ }
rcu_read_unlock();
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -675,6 +775,11 @@ static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
return NULL;
}
+static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
+{
+ return NULL;
+}
+
static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
{
return true;
@@ -709,7 +814,12 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
{
}
-static inline void lock_page_memcg(struct page *page)
+static inline struct mem_cgroup *lock_page_memcg(struct page *page)
+{
+ return NULL;
+}
+
+static inline void __unlock_page_memcg(struct mem_cgroup *memcg)
{
}
@@ -740,41 +850,63 @@ static inline bool mem_cgroup_oom_synchronize(bool wait)
}
static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx)
+ int idx)
{
return 0;
}
-static inline void mod_memcg_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx,
- int nr)
+static inline void __mod_memcg_state(struct mem_cgroup *memcg,
+ int idx,
+ int nr)
{
}
-static inline void inc_memcg_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx)
+static inline void mod_memcg_state(struct mem_cgroup *memcg,
+ int idx,
+ int nr)
{
}
-static inline void dec_memcg_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx)
+static inline void __mod_memcg_page_state(struct page *page,
+ int idx,
+ int nr)
{
}
static inline void mod_memcg_page_state(struct page *page,
- enum memcg_stat_item idx,
+ int idx,
int nr)
{
}
-static inline void inc_memcg_page_state(struct page *page,
- enum memcg_stat_item idx)
+static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
+ enum node_stat_item idx)
{
+ return node_page_state(lruvec_pgdat(lruvec), idx);
}
-static inline void dec_memcg_page_state(struct page *page,
- enum memcg_stat_item idx)
+static inline void __mod_lruvec_state(struct lruvec *lruvec,
+ enum node_stat_item idx, int val)
+{
+ __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
+}
+
+static inline void mod_lruvec_state(struct lruvec *lruvec,
+ enum node_stat_item idx, int val)
+{
+ mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
+}
+
+static inline void __mod_lruvec_page_state(struct page *page,
+ enum node_stat_item idx, int val)
+{
+ __mod_node_page_state(page_pgdat(page), idx, val);
+}
+
+static inline void mod_lruvec_page_state(struct page *page,
+ enum node_stat_item idx, int val)
{
+ mod_node_page_state(page_pgdat(page), idx, val);
}
static inline
@@ -789,12 +921,127 @@ static inline void mem_cgroup_split_huge_fixup(struct page *head)
{
}
+static inline void count_memcg_events(struct mem_cgroup *memcg,
+ enum vm_event_item idx,
+ unsigned long count)
+{
+}
+
+static inline void count_memcg_page_event(struct page *page,
+ int idx)
+{
+}
+
static inline
-void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
+void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
{
}
#endif /* CONFIG_MEMCG */
+/* idx can be of type enum memcg_stat_item or node_stat_item */
+static inline void __inc_memcg_state(struct mem_cgroup *memcg,
+ int idx)
+{
+ __mod_memcg_state(memcg, idx, 1);
+}
+
+/* idx can be of type enum memcg_stat_item or node_stat_item */
+static inline void __dec_memcg_state(struct mem_cgroup *memcg,
+ int idx)
+{
+ __mod_memcg_state(memcg, idx, -1);
+}
+
+/* idx can be of type enum memcg_stat_item or node_stat_item */
+static inline void __inc_memcg_page_state(struct page *page,
+ int idx)
+{
+ __mod_memcg_page_state(page, idx, 1);
+}
+
+/* idx can be of type enum memcg_stat_item or node_stat_item */
+static inline void __dec_memcg_page_state(struct page *page,
+ int idx)
+{
+ __mod_memcg_page_state(page, idx, -1);
+}
+
+static inline void __inc_lruvec_state(struct lruvec *lruvec,
+ enum node_stat_item idx)
+{
+ __mod_lruvec_state(lruvec, idx, 1);
+}
+
+static inline void __dec_lruvec_state(struct lruvec *lruvec,
+ enum node_stat_item idx)
+{
+ __mod_lruvec_state(lruvec, idx, -1);
+}
+
+static inline void __inc_lruvec_page_state(struct page *page,
+ enum node_stat_item idx)
+{
+ __mod_lruvec_page_state(page, idx, 1);
+}
+
+static inline void __dec_lruvec_page_state(struct page *page,
+ enum node_stat_item idx)
+{
+ __mod_lruvec_page_state(page, idx, -1);
+}
+
+/* idx can be of type enum memcg_stat_item or node_stat_item */
+static inline void inc_memcg_state(struct mem_cgroup *memcg,
+ int idx)
+{
+ mod_memcg_state(memcg, idx, 1);
+}
+
+/* idx can be of type enum memcg_stat_item or node_stat_item */
+static inline void dec_memcg_state(struct mem_cgroup *memcg,
+ int idx)
+{
+ mod_memcg_state(memcg, idx, -1);
+}
+
+/* idx can be of type enum memcg_stat_item or node_stat_item */
+static inline void inc_memcg_page_state(struct page *page,
+ int idx)
+{
+ mod_memcg_page_state(page, idx, 1);
+}
+
+/* idx can be of type enum memcg_stat_item or node_stat_item */
+static inline void dec_memcg_page_state(struct page *page,
+ int idx)
+{
+ mod_memcg_page_state(page, idx, -1);
+}
+
+static inline void inc_lruvec_state(struct lruvec *lruvec,
+ enum node_stat_item idx)
+{
+ mod_lruvec_state(lruvec, idx, 1);
+}
+
+static inline void dec_lruvec_state(struct lruvec *lruvec,
+ enum node_stat_item idx)
+{
+ mod_lruvec_state(lruvec, idx, -1);
+}
+
+static inline void inc_lruvec_page_state(struct page *page,
+ enum node_stat_item idx)
+{
+ mod_lruvec_page_state(page, idx, 1);
+}
+
+static inline void dec_lruvec_page_state(struct page *page,
+ enum node_stat_item idx)
+{
+ mod_lruvec_page_state(page, idx, -1);
+}
+
#ifdef CONFIG_CGROUP_WRITEBACK
struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg);
@@ -886,19 +1133,6 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg)
return memcg ? memcg->kmemcg_id : -1;
}
-/**
- * memcg_kmem_update_page_stat - update kmem page state statistics
- * @page: the page
- * @idx: page state item to account
- * @val: number of pages (positive or negative)
- */
-static inline void memcg_kmem_update_page_stat(struct page *page,
- enum memcg_stat_item idx, int val)
-{
- if (memcg_kmem_enabled() && page->mem_cgroup)
- this_cpu_add(page->mem_cgroup->stat->count[idx], val);
-}
-
#else
#define for_each_memcg_cache_index(_idx) \
for (; NULL; )
@@ -921,10 +1155,6 @@ static inline void memcg_put_cache_ids(void)
{
}
-static inline void memcg_kmem_update_page_stat(struct page *page,
- enum memcg_stat_item idx, int val)
-{
-}
#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
#endif /* _LINUX_MEMCONTROL_H */