123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884 |
- /* memcontrol.h - Memory Controller
- *
- * Copyright IBM Corporation, 2007
- * Author Balbir Singh <balbir@linux.vnet.ibm.com>
- *
- * Copyright 2007 OpenVZ SWsoft Inc
- * Author: Pavel Emelianov <xemul@openvz.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
- #ifndef _LINUX_MEMCONTROL_H
- #define _LINUX_MEMCONTROL_H
- #include <linux/cgroup.h>
- #include <linux/vm_event_item.h>
- #include <linux/hardirq.h>
- #include <linux/jump_label.h>
- #include <linux/page_counter.h>
- #include <linux/vmpressure.h>
- #include <linux/eventfd.h>
- #include <linux/mmzone.h>
- #include <linux/writeback.h>
- struct mem_cgroup;
- struct page;
- struct mm_struct;
- struct kmem_cache;
- /*
- * The corresponding mem_cgroup_stat_names is defined in mm/memcontrol.c,
- * These two lists should keep in accord with each other.
- */
- enum mem_cgroup_stat_index {
- /*
- * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
- */
- MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
- MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
- MEM_CGROUP_STAT_RSS_HUGE, /* # of pages charged as anon huge */
- MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
- MEM_CGROUP_STAT_DIRTY, /* # of dirty pages in page cache */
- MEM_CGROUP_STAT_WRITEBACK, /* # of pages under writeback */
- MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */
- MEM_CGROUP_STAT_NSTATS,
- };
- struct mem_cgroup_reclaim_cookie {
- struct zone *zone;
- int priority;
- unsigned int generation;
- };
- enum mem_cgroup_events_index {
- MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */
- MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */
- MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */
- MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */
- MEM_CGROUP_EVENTS_NSTATS,
- /* default hierarchy events */
- MEMCG_LOW = MEM_CGROUP_EVENTS_NSTATS,
- MEMCG_HIGH,
- MEMCG_MAX,
- MEMCG_OOM,
- MEMCG_NR_EVENTS,
- };
- /*
- * Per memcg event counter is incremented at every pagein/pageout. With THP,
- * it will be incremated by the number of pages. This counter is used for
- * for trigger some periodic events. This is straightforward and better
- * than using jiffies etc. to handle periodic memcg event.
- */
- enum mem_cgroup_events_target {
- MEM_CGROUP_TARGET_THRESH,
- MEM_CGROUP_TARGET_SOFTLIMIT,
- MEM_CGROUP_TARGET_NUMAINFO,
- MEM_CGROUP_NTARGETS,
- };
- /*
- * Bits in struct cg_proto.flags
- */
- enum cg_proto_flags {
- /* Currently active and new sockets should be assigned to cgroups */
- MEMCG_SOCK_ACTIVE,
- /* It was ever activated; we must disarm static keys on destruction */
- MEMCG_SOCK_ACTIVATED,
- };
- struct cg_proto {
- struct page_counter memory_allocated; /* Current allocated memory. */
- struct percpu_counter sockets_allocated; /* Current number of sockets. */
- int memory_pressure;
- long sysctl_mem[3];
- unsigned long flags;
- /*
- * memcg field is used to find which memcg we belong directly
- * Each memcg struct can hold more than one cg_proto, so container_of
- * won't really cut.
- *
- * The elegant solution would be having an inverse function to
- * proto_cgroup in struct proto, but that means polluting the structure
- * for everybody, instead of just for memcg users.
- */
- struct mem_cgroup *memcg;
- };
- #ifdef CONFIG_MEMCG
- struct mem_cgroup_stat_cpu {
- long count[MEM_CGROUP_STAT_NSTATS];
- unsigned long events[MEMCG_NR_EVENTS];
- unsigned long nr_page_events;
- unsigned long targets[MEM_CGROUP_NTARGETS];
- };
- struct mem_cgroup_reclaim_iter {
- struct mem_cgroup *position;
- /* scan generation, increased every round-trip */
- unsigned int generation;
- };
- /*
- * per-zone information in memory controller.
- */
- struct mem_cgroup_per_zone {
- struct lruvec lruvec;
- unsigned long lru_size[NR_LRU_LISTS];
- struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1];
- struct rb_node tree_node; /* RB tree node */
- unsigned long usage_in_excess;/* Set to the value by which */
- /* the soft limit is exceeded*/
- bool on_tree;
- struct mem_cgroup *memcg; /* Back pointer, we cannot */
- /* use container_of */
- };
- struct mem_cgroup_per_node {
- struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
- };
- struct mem_cgroup_threshold {
- struct eventfd_ctx *eventfd;
- unsigned long threshold;
- };
- /* For threshold */
- struct mem_cgroup_threshold_ary {
- /* An array index points to threshold just below or equal to usage. */
- int current_threshold;
- /* Size of entries[] */
- unsigned int size;
- /* Array of thresholds */
- struct mem_cgroup_threshold entries[0];
- };
- struct mem_cgroup_thresholds {
- /* Primary thresholds array */
- struct mem_cgroup_threshold_ary *primary;
- /*
- * Spare threshold array.
- * This is needed to make mem_cgroup_unregister_event() "never fail".
- * It must be able to store at least primary->size - 1 entries.
- */
- struct mem_cgroup_threshold_ary *spare;
- };
- struct mem_cgroup_id {
- int id;
- atomic_t ref;
- };
- /*
- * The memory controller data structure. The memory controller controls both
- * page cache and RSS per cgroup. We would eventually like to provide
- * statistics based on the statistics developed by Rik Van Riel for clock-pro,
- * to help the administrator determine what knobs to tune.
- */
- struct mem_cgroup {
- struct cgroup_subsys_state css;
- /* Private memcg ID. Used to ID objects that outlive the cgroup */
- struct mem_cgroup_id id;
- /* Accounted resources */
- struct page_counter memory;
- struct page_counter memsw;
- struct page_counter kmem;
- /* Normal memory consumption range */
- unsigned long low;
- unsigned long high;
- unsigned long soft_limit;
- /* vmpressure notifications */
- struct vmpressure vmpressure;
- /* css_online() has been completed */
- int initialized;
- /*
- * Should the accounting and control be hierarchical, per subtree?
- */
- bool use_hierarchy;
- /* protected by memcg_oom_lock */
- bool oom_lock;
- int under_oom;
- int swappiness;
- /* OOM-Killer disable */
- int oom_kill_disable;
- /* handle for "memory.events" */
- struct cgroup_file events_file;
- /* protect arrays of thresholds */
- struct mutex thresholds_lock;
- /* thresholds for memory usage. RCU-protected */
- struct mem_cgroup_thresholds thresholds;
- /* thresholds for mem+swap usage. RCU-protected */
- struct mem_cgroup_thresholds memsw_thresholds;
- /* For oom notifier event fd */
- struct list_head oom_notify;
- /*
- * Should we move charges of a task when a task is moved into this
- * mem_cgroup ? And what type of charges should we move ?
- */
- unsigned long move_charge_at_immigrate;
- /*
- * set > 0 if pages under this cgroup are moving to other cgroup.
- */
- atomic_t moving_account;
- /* taken only while moving_account > 0 */
- spinlock_t move_lock;
- struct task_struct *move_lock_task;
- unsigned long move_lock_flags;
- /*
- * percpu counter.
- */
- struct mem_cgroup_stat_cpu __percpu *stat;
- #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
- struct cg_proto tcp_mem;
- #endif
- #if defined(CONFIG_MEMCG_KMEM)
- /* Index in the kmem_cache->memcg_params.memcg_caches array */
- int kmemcg_id;
- bool kmem_acct_activated;
- bool kmem_acct_active;
- #endif
- int last_scanned_node;
- #if MAX_NUMNODES > 1
- nodemask_t scan_nodes;
- atomic_t numainfo_events;
- atomic_t numainfo_updating;
- #endif
- #ifdef CONFIG_CGROUP_WRITEBACK
- struct list_head cgwb_list;
- struct wb_domain cgwb_domain;
- #endif
- /* List of events which userspace want to receive */
- struct list_head event_list;
- spinlock_t event_list_lock;
- struct mem_cgroup_per_node *nodeinfo[0];
- /* WARNING: nodeinfo must be the last member here */
- };
- extern struct cgroup_subsys_state *mem_cgroup_root_css;
- /**
- * mem_cgroup_events - count memory events against a cgroup
- * @memcg: the memory cgroup
- * @idx: the event index
- * @nr: the number of events to account for
- */
- static inline void mem_cgroup_events(struct mem_cgroup *memcg,
- enum mem_cgroup_events_index idx,
- unsigned int nr)
- {
- this_cpu_add(memcg->stat->events[idx], nr);
- cgroup_file_notify(&memcg->events_file);
- }
- bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg);
- int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
- gfp_t gfp_mask, struct mem_cgroup **memcgp);
- void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
- bool lrucare);
- void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg);
- void mem_cgroup_uncharge(struct page *page);
- void mem_cgroup_uncharge_list(struct list_head *page_list);
- void mem_cgroup_replace_page(struct page *oldpage, struct page *newpage);
- struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
- struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *);
- bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg);
- struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
- struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
- static inline
- struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
- return css ? container_of(css, struct mem_cgroup, css) : NULL;
- }
- struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
- struct mem_cgroup *,
- struct mem_cgroup_reclaim_cookie *);
- void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
- static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
- struct mem_cgroup *root)
- {
- if (root == memcg)
- return true;
- if (!root->use_hierarchy)
- return false;
- return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
- }
- static inline bool mm_match_cgroup(struct mm_struct *mm,
- struct mem_cgroup *memcg)
- {
- struct mem_cgroup *task_memcg;
- bool match = false;
- rcu_read_lock();
- task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
- if (task_memcg)
- match = mem_cgroup_is_descendant(task_memcg, memcg);
- rcu_read_unlock();
- return match;
- }
- struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
- ino_t page_cgroup_ino(struct page *page);
- static inline bool mem_cgroup_disabled(void)
- {
- return !cgroup_subsys_enabled(memory_cgrp_subsys);
- }
- /*
- * For memory reclaim.
- */
- int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
- void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
- int nr_pages);
- static inline bool mem_cgroup_lruvec_online(struct lruvec *lruvec)
- {
- struct mem_cgroup_per_zone *mz;
- struct mem_cgroup *memcg;
- if (mem_cgroup_disabled())
- return true;
- mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
- memcg = mz->memcg;
- return !!(memcg->css.flags & CSS_ONLINE);
- }
- static inline
- unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
- {
- struct mem_cgroup_per_zone *mz;
- mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
- return mz->lru_size[lru];
- }
- static inline bool mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
- {
- unsigned long inactive_ratio;
- unsigned long inactive;
- unsigned long active;
- unsigned long gb;
- inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON);
- active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON);
- gb = (inactive + active) >> (30 - PAGE_SHIFT);
- if (gb)
- inactive_ratio = int_sqrt(10 * gb);
- else
- inactive_ratio = 1;
- return inactive * inactive_ratio < active;
- }
- void mem_cgroup_handle_over_high(void);
- void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
- struct task_struct *p);
- static inline void mem_cgroup_oom_enable(void)
- {
- WARN_ON(current->memcg_may_oom);
- current->memcg_may_oom = 1;
- }
- static inline void mem_cgroup_oom_disable(void)
- {
- WARN_ON(!current->memcg_may_oom);
- current->memcg_may_oom = 0;
- }
- static inline bool task_in_memcg_oom(struct task_struct *p)
- {
- return p->memcg_in_oom;
- }
- bool mem_cgroup_oom_synchronize(bool wait);
- #ifdef CONFIG_MEMCG_SWAP
- extern int do_swap_account;
- #endif
- struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page);
- void mem_cgroup_end_page_stat(struct mem_cgroup *memcg);
- /**
- * mem_cgroup_update_page_stat - update page state statistics
- * @memcg: memcg to account against
- * @idx: page state item to account
- * @val: number of pages (positive or negative)
- *
- * See mem_cgroup_begin_page_stat() for locking requirements.
- */
- static inline void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
- enum mem_cgroup_stat_index idx, int val)
- {
- VM_BUG_ON(!rcu_read_lock_held());
- if (memcg)
- this_cpu_add(memcg->stat->count[idx], val);
- }
- static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
- enum mem_cgroup_stat_index idx)
- {
- mem_cgroup_update_page_stat(memcg, idx, 1);
- }
- static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg,
- enum mem_cgroup_stat_index idx)
- {
- mem_cgroup_update_page_stat(memcg, idx, -1);
- }
- unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
- gfp_t gfp_mask,
- unsigned long *total_scanned);
- static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
- enum vm_event_item idx)
- {
- struct mem_cgroup *memcg;
- if (mem_cgroup_disabled())
- return;
- rcu_read_lock();
- memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
- if (unlikely(!memcg))
- goto out;
- switch (idx) {
- case PGFAULT:
- this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]);
- break;
- case PGMAJFAULT:
- this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
- break;
- default:
- BUG();
- }
- out:
- rcu_read_unlock();
- }
- #ifdef CONFIG_TRANSPARENT_HUGEPAGE
- void mem_cgroup_split_huge_fixup(struct page *head);
- #endif
- #else /* CONFIG_MEMCG */
- struct mem_cgroup;
- static inline void mem_cgroup_events(struct mem_cgroup *memcg,
- enum mem_cgroup_events_index idx,
- unsigned int nr)
- {
- }
- static inline bool mem_cgroup_low(struct mem_cgroup *root,
- struct mem_cgroup *memcg)
- {
- return false;
- }
- static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
- gfp_t gfp_mask,
- struct mem_cgroup **memcgp)
- {
- *memcgp = NULL;
- return 0;
- }
- static inline void mem_cgroup_commit_charge(struct page *page,
- struct mem_cgroup *memcg,
- bool lrucare)
- {
- }
- static inline void mem_cgroup_cancel_charge(struct page *page,
- struct mem_cgroup *memcg)
- {
- }
- static inline void mem_cgroup_uncharge(struct page *page)
- {
- }
- static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
- {
- }
- static inline void mem_cgroup_replace_page(struct page *old, struct page *new)
- {
- }
- static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
- struct mem_cgroup *memcg)
- {
- return &zone->lruvec;
- }
- static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
- struct zone *zone)
- {
- return &zone->lruvec;
- }
- static inline bool mm_match_cgroup(struct mm_struct *mm,
- struct mem_cgroup *memcg)
- {
- return true;
- }
- static inline bool task_in_mem_cgroup(struct task_struct *task,
- const struct mem_cgroup *memcg)
- {
- return true;
- }
- static inline struct mem_cgroup *
- mem_cgroup_iter(struct mem_cgroup *root,
- struct mem_cgroup *prev,
- struct mem_cgroup_reclaim_cookie *reclaim)
- {
- return NULL;
- }
- static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
- struct mem_cgroup *prev)
- {
- }
- static inline bool mem_cgroup_disabled(void)
- {
- return true;
- }
- static inline bool
- mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
- {
- return true;
- }
- static inline bool mem_cgroup_lruvec_online(struct lruvec *lruvec)
- {
- return true;
- }
- static inline unsigned long
- mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
- {
- return 0;
- }
- static inline void
- mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
- int increment)
- {
- }
- static inline void
- mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
- {
- }
- static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page)
- {
- return NULL;
- }
- static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg)
- {
- }
- static inline void mem_cgroup_handle_over_high(void)
- {
- }
- static inline void mem_cgroup_oom_enable(void)
- {
- }
- static inline void mem_cgroup_oom_disable(void)
- {
- }
- static inline bool task_in_memcg_oom(struct task_struct *p)
- {
- return false;
- }
- static inline bool mem_cgroup_oom_synchronize(bool wait)
- {
- return false;
- }
- static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
- enum mem_cgroup_stat_index idx)
- {
- }
- static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg,
- enum mem_cgroup_stat_index idx)
- {
- }
- static inline
- unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
- gfp_t gfp_mask,
- unsigned long *total_scanned)
- {
- return 0;
- }
- static inline void mem_cgroup_split_huge_fixup(struct page *head)
- {
- }
- static inline
- void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
- {
- }
- #endif /* CONFIG_MEMCG */
- enum {
- UNDER_LIMIT,
- SOFT_LIMIT,
- OVER_LIMIT,
- };
- #ifdef CONFIG_CGROUP_WRITEBACK
- struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg);
- struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
- void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
- unsigned long *pheadroom, unsigned long *pdirty,
- unsigned long *pwriteback);
- #else /* CONFIG_CGROUP_WRITEBACK */
- static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
- {
- return NULL;
- }
- static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
- unsigned long *pfilepages,
- unsigned long *pheadroom,
- unsigned long *pdirty,
- unsigned long *pwriteback)
- {
- }
- #endif /* CONFIG_CGROUP_WRITEBACK */
- struct sock;
- #if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
- void sock_update_memcg(struct sock *sk);
- void sock_release_memcg(struct sock *sk);
- #else
- static inline void sock_update_memcg(struct sock *sk)
- {
- }
- static inline void sock_release_memcg(struct sock *sk)
- {
- }
- #endif /* CONFIG_INET && CONFIG_MEMCG_KMEM */
- #ifdef CONFIG_MEMCG_KMEM
- extern struct static_key memcg_kmem_enabled_key;
- extern int memcg_nr_cache_ids;
- void memcg_get_cache_ids(void);
- void memcg_put_cache_ids(void);
- /*
- * Helper macro to loop through all memcg-specific caches. Callers must still
- * check if the cache is valid (it is either valid or NULL).
- * the slab_mutex must be held when looping through those caches
- */
- #define for_each_memcg_cache_index(_idx) \
- for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++)
- static inline bool memcg_kmem_enabled(void)
- {
- return static_key_false(&memcg_kmem_enabled_key);
- }
- static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg)
- {
- return memcg->kmem_acct_active;
- }
- /*
- * In general, we'll do everything in our power to not incur in any overhead
- * for non-memcg users for the kmem functions. Not even a function call, if we
- * can avoid it.
- *
- * Therefore, we'll inline all those functions so that in the best case, we'll
- * see that kmemcg is off for everybody and proceed quickly. If it is on,
- * we'll still do most of the flag checking inline. We check a lot of
- * conditions, but because they are pretty simple, they are expected to be
- * fast.
- */
- int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
- struct mem_cgroup *memcg);
- int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
- void __memcg_kmem_uncharge(struct page *page, int order);
- /*
- * helper for acessing a memcg's index. It will be used as an index in the
- * child cache array in kmem_cache, and also to derive its name. This function
- * will return -1 when this is not a kmem-limited memcg.
- */
- static inline int memcg_cache_id(struct mem_cgroup *memcg)
- {
- return memcg ? memcg->kmemcg_id : -1;
- }
- struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep);
- void __memcg_kmem_put_cache(struct kmem_cache *cachep);
- static inline bool __memcg_kmem_bypass(gfp_t gfp)
- {
- if (!memcg_kmem_enabled())
- return true;
- if (gfp & __GFP_NOACCOUNT)
- return true;
- if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
- return true;
- return false;
- }
- /**
- * memcg_kmem_charge: charge a kmem page
- * @page: page to charge
- * @gfp: reclaim mode
- * @order: allocation order
- *
- * Returns 0 on success, an error code on failure.
- */
- static __always_inline int memcg_kmem_charge(struct page *page,
- gfp_t gfp, int order)
- {
- if (__memcg_kmem_bypass(gfp))
- return 0;
- return __memcg_kmem_charge(page, gfp, order);
- }
- /**
- * memcg_kmem_uncharge: uncharge a kmem page
- * @page: page to uncharge
- * @order: allocation order
- */
- static __always_inline void memcg_kmem_uncharge(struct page *page, int order)
- {
- if (memcg_kmem_enabled())
- __memcg_kmem_uncharge(page, order);
- }
- /**
- * memcg_kmem_get_cache: selects the correct per-memcg cache for allocation
- * @cachep: the original global kmem cache
- * @gfp: allocation flags.
- *
- * All memory allocated from a per-memcg cache is charged to the owner memcg.
- */
- static __always_inline struct kmem_cache *
- memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
- {
- if (__memcg_kmem_bypass(gfp))
- return cachep;
- return __memcg_kmem_get_cache(cachep);
- }
- static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
- {
- if (memcg_kmem_enabled())
- __memcg_kmem_put_cache(cachep);
- }
- #else
- #define for_each_memcg_cache_index(_idx) \
- for (; NULL; )
- static inline bool memcg_kmem_enabled(void)
- {
- return false;
- }
- static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg)
- {
- return false;
- }
- static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
- {
- return 0;
- }
- static inline void memcg_kmem_uncharge(struct page *page, int order)
- {
- }
- static inline int memcg_cache_id(struct mem_cgroup *memcg)
- {
- return -1;
- }
- static inline void memcg_get_cache_ids(void)
- {
- }
- static inline void memcg_put_cache_ids(void)
- {
- }
- static inline struct kmem_cache *
- memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
- {
- return cachep;
- }
- static inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
- {
- }
- #endif /* CONFIG_MEMCG_KMEM */
- #endif /* _LINUX_MEMCONTROL_H */
|