1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795 |
- /*
- * mm/rmap.c - physical to virtual reverse mappings
- *
- * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
- * Released under the General Public License (GPL).
- *
- * Simple, low overhead reverse mapping scheme.
- * Please try to keep this thing as modular as possible.
- *
- * Provides methods for unmapping each kind of mapped page:
- * the anon methods track anonymous pages, and
- * the file methods track pages belonging to an inode.
- *
- * Original design by Rik van Riel <riel@conectiva.com.br> 2001
- * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
- * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
- * Contributions by Hugh Dickins 2003, 2004
- */
- /*
- * Lock ordering in mm:
- *
- * inode->i_mutex (while writing or truncating, not reading or faulting)
- * mm->mmap_sem
- * page->flags PG_locked (lock_page)
- * mapping->i_mmap_rwsem
- * anon_vma->rwsem
- * mm->page_table_lock or pte_lock
- * zone->lru_lock (in mark_page_accessed, isolate_lru_page)
- * swap_lock (in swap_duplicate, swap_info_get)
- * mmlist_lock (in mmput, drain_mmlist and others)
- * mapping->private_lock (in __set_page_dirty_buffers)
- * mem_cgroup_{begin,end}_page_stat (memcg->move_lock)
- * mapping->tree_lock (widely used)
- * inode->i_lock (in set_page_dirty's __mark_inode_dirty)
- * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
- * sb_lock (within inode_lock in fs/fs-writeback.c)
- * mapping->tree_lock (widely used, in set_page_dirty,
- * in arch-dependent flush_dcache_mmap_lock,
- * within bdi.wb->list_lock in __sync_single_inode)
- *
- * anon_vma->rwsem,mapping->i_mutex (memory_failure, collect_procs_anon)
- * ->tasklist_lock
- * pte map lock
- */
- #include <linux/mm.h>
- #include <linux/pagemap.h>
- #include <linux/swap.h>
- #include <linux/swapops.h>
- #include <linux/slab.h>
- #include <linux/init.h>
- #include <linux/ksm.h>
- #include <linux/rmap.h>
- #include <linux/rcupdate.h>
- #include <linux/export.h>
- #include <linux/memcontrol.h>
- #include <linux/mmu_notifier.h>
- #include <linux/migrate.h>
- #include <linux/hugetlb.h>
- #include <linux/backing-dev.h>
- #include <linux/page_idle.h>
- #include <asm/tlbflush.h>
- #include <trace/events/tlb.h>
- #include "internal.h"
- static struct kmem_cache *anon_vma_cachep;
- static struct kmem_cache *anon_vma_chain_cachep;
- static inline struct anon_vma *anon_vma_alloc(void)
- {
- struct anon_vma *anon_vma;
- anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
- if (anon_vma) {
- atomic_set(&anon_vma->refcount, 1);
- anon_vma->degree = 1; /* Reference for first vma */
- anon_vma->parent = anon_vma;
- /*
- * Initialise the anon_vma root to point to itself. If called
- * from fork, the root will be reset to the parents anon_vma.
- */
- anon_vma->root = anon_vma;
- }
- return anon_vma;
- }
- static inline void anon_vma_free(struct anon_vma *anon_vma)
- {
- VM_BUG_ON(atomic_read(&anon_vma->refcount));
- /*
- * Synchronize against page_lock_anon_vma_read() such that
- * we can safely hold the lock without the anon_vma getting
- * freed.
- *
- * Relies on the full mb implied by the atomic_dec_and_test() from
- * put_anon_vma() against the acquire barrier implied by
- * down_read_trylock() from page_lock_anon_vma_read(). This orders:
- *
- * page_lock_anon_vma_read() VS put_anon_vma()
- * down_read_trylock() atomic_dec_and_test()
- * LOCK MB
- * atomic_read() rwsem_is_locked()
- *
- * LOCK should suffice since the actual taking of the lock must
- * happen _before_ what follows.
- */
- might_sleep();
- if (rwsem_is_locked(&anon_vma->root->rwsem)) {
- anon_vma_lock_write(anon_vma);
- anon_vma_unlock_write(anon_vma);
- }
- kmem_cache_free(anon_vma_cachep, anon_vma);
- }
- static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp)
- {
- return kmem_cache_alloc(anon_vma_chain_cachep, gfp);
- }
- static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
- {
- kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain);
- }
- static void anon_vma_chain_link(struct vm_area_struct *vma,
- struct anon_vma_chain *avc,
- struct anon_vma *anon_vma)
- {
- avc->vma = vma;
- avc->anon_vma = anon_vma;
- list_add(&avc->same_vma, &vma->anon_vma_chain);
- anon_vma_interval_tree_insert(avc, &anon_vma->rb_root);
- }
- /**
- * anon_vma_prepare - attach an anon_vma to a memory region
- * @vma: the memory region in question
- *
- * This makes sure the memory mapping described by 'vma' has
- * an 'anon_vma' attached to it, so that we can associate the
- * anonymous pages mapped into it with that anon_vma.
- *
- * The common case will be that we already have one, but if
- * not we either need to find an adjacent mapping that we
- * can re-use the anon_vma from (very common when the only
- * reason for splitting a vma has been mprotect()), or we
- * allocate a new one.
- *
- * Anon-vma allocations are very subtle, because we may have
- * optimistically looked up an anon_vma in page_lock_anon_vma_read()
- * and that may actually touch the spinlock even in the newly
- * allocated vma (it depends on RCU to make sure that the
- * anon_vma isn't actually destroyed).
- *
- * As a result, we need to do proper anon_vma locking even
- * for the new allocation. At the same time, we do not want
- * to do any locking for the common case of already having
- * an anon_vma.
- *
- * This must be called with the mmap_sem held for reading.
- */
- int anon_vma_prepare(struct vm_area_struct *vma)
- {
- struct anon_vma *anon_vma = vma->anon_vma;
- struct anon_vma_chain *avc;
- might_sleep();
- if (unlikely(!anon_vma)) {
- struct mm_struct *mm = vma->vm_mm;
- struct anon_vma *allocated;
- avc = anon_vma_chain_alloc(GFP_KERNEL);
- if (!avc)
- goto out_enomem;
- anon_vma = find_mergeable_anon_vma(vma);
- allocated = NULL;
- if (!anon_vma) {
- anon_vma = anon_vma_alloc();
- if (unlikely(!anon_vma))
- goto out_enomem_free_avc;
- allocated = anon_vma;
- }
- anon_vma_lock_write(anon_vma);
- /* page_table_lock to protect against threads */
- spin_lock(&mm->page_table_lock);
- if (likely(!vma->anon_vma)) {
- vma->anon_vma = anon_vma;
- anon_vma_chain_link(vma, avc, anon_vma);
- /* vma reference or self-parent link for new root */
- anon_vma->degree++;
- allocated = NULL;
- avc = NULL;
- }
- spin_unlock(&mm->page_table_lock);
- anon_vma_unlock_write(anon_vma);
- if (unlikely(allocated))
- put_anon_vma(allocated);
- if (unlikely(avc))
- anon_vma_chain_free(avc);
- }
- return 0;
- out_enomem_free_avc:
- anon_vma_chain_free(avc);
- out_enomem:
- return -ENOMEM;
- }
- /*
- * This is a useful helper function for locking the anon_vma root as
- * we traverse the vma->anon_vma_chain, looping over anon_vma's that
- * have the same vma.
- *
- * Such anon_vma's should have the same root, so you'd expect to see
- * just a single mutex_lock for the whole traversal.
- */
- static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma)
- {
- struct anon_vma *new_root = anon_vma->root;
- if (new_root != root) {
- if (WARN_ON_ONCE(root))
- up_write(&root->rwsem);
- root = new_root;
- down_write(&root->rwsem);
- }
- return root;
- }
- static inline void unlock_anon_vma_root(struct anon_vma *root)
- {
- if (root)
- up_write(&root->rwsem);
- }
- /*
- * Attach the anon_vmas from src to dst.
- * Returns 0 on success, -ENOMEM on failure.
- *
- * If dst->anon_vma is NULL this function tries to find and reuse existing
- * anon_vma which has no vmas and only one child anon_vma. This prevents
- * degradation of anon_vma hierarchy to endless linear chain in case of
- * constantly forking task. On the other hand, an anon_vma with more than one
- * child isn't reused even if there was no alive vma, thus rmap walker has a
- * good chance of avoiding scanning the whole hierarchy when it searches where
- * page is mapped.
- */
- int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
- {
- struct anon_vma_chain *avc, *pavc;
- struct anon_vma *root = NULL;
- list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
- struct anon_vma *anon_vma;
- avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN);
- if (unlikely(!avc)) {
- unlock_anon_vma_root(root);
- root = NULL;
- avc = anon_vma_chain_alloc(GFP_KERNEL);
- if (!avc)
- goto enomem_failure;
- }
- anon_vma = pavc->anon_vma;
- root = lock_anon_vma_root(root, anon_vma);
- anon_vma_chain_link(dst, avc, anon_vma);
- /*
- * Reuse existing anon_vma if its degree lower than two,
- * that means it has no vma and only one anon_vma child.
- *
- * Do not chose parent anon_vma, otherwise first child
- * will always reuse it. Root anon_vma is never reused:
- * it has self-parent reference and at least one child.
- */
- if (!dst->anon_vma && anon_vma != src->anon_vma &&
- anon_vma->degree < 2)
- dst->anon_vma = anon_vma;
- }
- if (dst->anon_vma)
- dst->anon_vma->degree++;
- unlock_anon_vma_root(root);
- return 0;
- enomem_failure:
- /*
- * dst->anon_vma is dropped here otherwise its degree can be incorrectly
- * decremented in unlink_anon_vmas().
- * We can safely do this because callers of anon_vma_clone() don't care
- * about dst->anon_vma if anon_vma_clone() failed.
- */
- dst->anon_vma = NULL;
- unlink_anon_vmas(dst);
- return -ENOMEM;
- }
- /*
- * Attach vma to its own anon_vma, as well as to the anon_vmas that
- * the corresponding VMA in the parent process is attached to.
- * Returns 0 on success, non-zero on failure.
- */
- int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
- {
- struct anon_vma_chain *avc;
- struct anon_vma *anon_vma;
- int error;
- /* Don't bother if the parent process has no anon_vma here. */
- if (!pvma->anon_vma)
- return 0;
- /* Drop inherited anon_vma, we'll reuse existing or allocate new. */
- vma->anon_vma = NULL;
- /*
- * First, attach the new VMA to the parent VMA's anon_vmas,
- * so rmap can find non-COWed pages in child processes.
- */
- error = anon_vma_clone(vma, pvma);
- if (error)
- return error;
- /* An existing anon_vma has been reused, all done then. */
- if (vma->anon_vma)
- return 0;
- /* Then add our own anon_vma. */
- anon_vma = anon_vma_alloc();
- if (!anon_vma)
- goto out_error;
- avc = anon_vma_chain_alloc(GFP_KERNEL);
- if (!avc)
- goto out_error_free_anon_vma;
- /*
- * The root anon_vma's spinlock is the lock actually used when we
- * lock any of the anon_vmas in this anon_vma tree.
- */
- anon_vma->root = pvma->anon_vma->root;
- anon_vma->parent = pvma->anon_vma;
- /*
- * With refcounts, an anon_vma can stay around longer than the
- * process it belongs to. The root anon_vma needs to be pinned until
- * this anon_vma is freed, because the lock lives in the root.
- */
- get_anon_vma(anon_vma->root);
- /* Mark this anon_vma as the one where our new (COWed) pages go. */
- vma->anon_vma = anon_vma;
- anon_vma_lock_write(anon_vma);
- anon_vma_chain_link(vma, avc, anon_vma);
- anon_vma->parent->degree++;
- anon_vma_unlock_write(anon_vma);
- return 0;
- out_error_free_anon_vma:
- put_anon_vma(anon_vma);
- out_error:
- unlink_anon_vmas(vma);
- return -ENOMEM;
- }
- void unlink_anon_vmas(struct vm_area_struct *vma)
- {
- struct anon_vma_chain *avc, *next;
- struct anon_vma *root = NULL;
- /*
- * Unlink each anon_vma chained to the VMA. This list is ordered
- * from newest to oldest, ensuring the root anon_vma gets freed last.
- */
- list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
- struct anon_vma *anon_vma = avc->anon_vma;
- root = lock_anon_vma_root(root, anon_vma);
- anon_vma_interval_tree_remove(avc, &anon_vma->rb_root);
- /*
- * Leave empty anon_vmas on the list - we'll need
- * to free them outside the lock.
- */
- if (RB_EMPTY_ROOT(&anon_vma->rb_root)) {
- anon_vma->parent->degree--;
- continue;
- }
- list_del(&avc->same_vma);
- anon_vma_chain_free(avc);
- }
- if (vma->anon_vma)
- vma->anon_vma->degree--;
- unlock_anon_vma_root(root);
- /*
- * Iterate the list once more, it now only contains empty and unlinked
- * anon_vmas, destroy them. Could not do before due to __put_anon_vma()
- * needing to write-acquire the anon_vma->root->rwsem.
- */
- list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
- struct anon_vma *anon_vma = avc->anon_vma;
- VM_WARN_ON(anon_vma->degree);
- put_anon_vma(anon_vma);
- list_del(&avc->same_vma);
- anon_vma_chain_free(avc);
- }
- }
- static void anon_vma_ctor(void *data)
- {
- struct anon_vma *anon_vma = data;
- init_rwsem(&anon_vma->rwsem);
- atomic_set(&anon_vma->refcount, 0);
- anon_vma->rb_root = RB_ROOT;
- }
- void __init anon_vma_init(void)
- {
- anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
- 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
- anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
- }
- /*
- * Getting a lock on a stable anon_vma from a page off the LRU is tricky!
- *
- * Since there is no serialization what so ever against page_remove_rmap()
- * the best this function can do is return a locked anon_vma that might
- * have been relevant to this page.
- *
- * The page might have been remapped to a different anon_vma or the anon_vma
- * returned may already be freed (and even reused).
- *
- * In case it was remapped to a different anon_vma, the new anon_vma will be a
- * child of the old anon_vma, and the anon_vma lifetime rules will therefore
- * ensure that any anon_vma obtained from the page will still be valid for as
- * long as we observe page_mapped() [ hence all those page_mapped() tests ].
- *
- * All users of this function must be very careful when walking the anon_vma
- * chain and verify that the page in question is indeed mapped in it
- * [ something equivalent to page_mapped_in_vma() ].
- *
- * Since anon_vma's slab is DESTROY_BY_RCU and we know from page_remove_rmap()
- * that the anon_vma pointer from page->mapping is valid if there is a
- * mapcount, we can dereference the anon_vma after observing those.
- */
- struct anon_vma *page_get_anon_vma(struct page *page)
- {
- struct anon_vma *anon_vma = NULL;
- unsigned long anon_mapping;
- rcu_read_lock();
- anon_mapping = (unsigned long)READ_ONCE(page->mapping);
- if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
- goto out;
- if (!page_mapped(page))
- goto out;
- anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
- if (!atomic_inc_not_zero(&anon_vma->refcount)) {
- anon_vma = NULL;
- goto out;
- }
- /*
- * If this page is still mapped, then its anon_vma cannot have been
- * freed. But if it has been unmapped, we have no security against the
- * anon_vma structure being freed and reused (for another anon_vma:
- * SLAB_DESTROY_BY_RCU guarantees that - so the atomic_inc_not_zero()
- * above cannot corrupt).
- */
- if (!page_mapped(page)) {
- rcu_read_unlock();
- put_anon_vma(anon_vma);
- return NULL;
- }
- out:
- rcu_read_unlock();
- return anon_vma;
- }
- /*
- * Similar to page_get_anon_vma() except it locks the anon_vma.
- *
- * Its a little more complex as it tries to keep the fast path to a single
- * atomic op -- the trylock. If we fail the trylock, we fall back to getting a
- * reference like with page_get_anon_vma() and then block on the mutex.
- */
- struct anon_vma *page_lock_anon_vma_read(struct page *page)
- {
- struct anon_vma *anon_vma = NULL;
- struct anon_vma *root_anon_vma;
- unsigned long anon_mapping;
- rcu_read_lock();
- anon_mapping = (unsigned long)READ_ONCE(page->mapping);
- if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
- goto out;
- if (!page_mapped(page))
- goto out;
- anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
- root_anon_vma = READ_ONCE(anon_vma->root);
- if (down_read_trylock(&root_anon_vma->rwsem)) {
- /*
- * If the page is still mapped, then this anon_vma is still
- * its anon_vma, and holding the mutex ensures that it will
- * not go away, see anon_vma_free().
- */
- if (!page_mapped(page)) {
- up_read(&root_anon_vma->rwsem);
- anon_vma = NULL;
- }
- goto out;
- }
- /* trylock failed, we got to sleep */
- if (!atomic_inc_not_zero(&anon_vma->refcount)) {
- anon_vma = NULL;
- goto out;
- }
- if (!page_mapped(page)) {
- rcu_read_unlock();
- put_anon_vma(anon_vma);
- return NULL;
- }
- /* we pinned the anon_vma, its safe to sleep */
- rcu_read_unlock();
- anon_vma_lock_read(anon_vma);
- if (atomic_dec_and_test(&anon_vma->refcount)) {
- /*
- * Oops, we held the last refcount, release the lock
- * and bail -- can't simply use put_anon_vma() because
- * we'll deadlock on the anon_vma_lock_write() recursion.
- */
- anon_vma_unlock_read(anon_vma);
- __put_anon_vma(anon_vma);
- anon_vma = NULL;
- }
- return anon_vma;
- out:
- rcu_read_unlock();
- return anon_vma;
- }
- void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
- {
- anon_vma_unlock_read(anon_vma);
- }
- /*
- * At what user virtual address is page expected in @vma?
- */
- static inline unsigned long
- __vma_address(struct page *page, struct vm_area_struct *vma)
- {
- pgoff_t pgoff = page_to_pgoff(page);
- return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
- }
- inline unsigned long
- vma_address(struct page *page, struct vm_area_struct *vma)
- {
- unsigned long address = __vma_address(page, vma);
- /* page should be within @vma mapping range */
- VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
- return address;
- }
- #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
- /*
- * Flush TLB entries for recently unmapped pages from remote CPUs. It is
- * important if a PTE was dirty when it was unmapped that it's flushed
- * before any IO is initiated on the page to prevent lost writes. Similarly,
- * it must be flushed before freeing to prevent data leakage.
- */
- void try_to_unmap_flush(void)
- {
- struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
- int cpu;
- if (!tlb_ubc->flush_required)
- return;
- cpu = get_cpu();
- if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask)) {
- count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
- local_flush_tlb();
- trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
- }
- if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids)
- flush_tlb_others(&tlb_ubc->cpumask, NULL, 0, TLB_FLUSH_ALL);
- cpumask_clear(&tlb_ubc->cpumask);
- tlb_ubc->flush_required = false;
- tlb_ubc->writable = false;
- put_cpu();
- }
- /* Flush iff there are potentially writable TLB entries that can race with IO */
- void try_to_unmap_flush_dirty(void)
- {
- struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
- if (tlb_ubc->writable)
- try_to_unmap_flush();
- }
- static void set_tlb_ubc_flush_pending(struct mm_struct *mm,
- struct page *page, bool writable)
- {
- struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
- cpumask_or(&tlb_ubc->cpumask, &tlb_ubc->cpumask, mm_cpumask(mm));
- tlb_ubc->flush_required = true;
- /*
- * Ensure compiler does not re-order the setting of tlb_flush_batched
- * before the PTE is cleared.
- */
- barrier();
- mm->tlb_flush_batched = true;
- /*
- * If the PTE was dirty then it's best to assume it's writable. The
- * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush()
- * before the page is queued for IO.
- */
- if (writable)
- tlb_ubc->writable = true;
- }
- /*
- * Returns true if the TLB flush should be deferred to the end of a batch of
- * unmap operations to reduce IPIs.
- */
- static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
- {
- bool should_defer = false;
- if (!(flags & TTU_BATCH_FLUSH))
- return false;
- /* If remote CPUs need to be flushed then defer batch the flush */
- if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids)
- should_defer = true;
- put_cpu();
- return should_defer;
- }
- /*
- * Reclaim unmaps pages under the PTL but do not flush the TLB prior to
- * releasing the PTL if TLB flushes are batched. It's possible for a parallel
- * operation such as mprotect or munmap to race between reclaim unmapping
- * the page and flushing the page. If this race occurs, it potentially allows
- * access to data via a stale TLB entry. Tracking all mm's that have TLB
- * batching in flight would be expensive during reclaim so instead track
- * whether TLB batching occurred in the past and if so then do a flush here
- * if required. This will cost one additional flush per reclaim cycle paid
- * by the first operation at risk such as mprotect and mumap.
- *
- * This must be called under the PTL so that an access to tlb_flush_batched
- * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise
- * via the PTL.
- */
- void flush_tlb_batched_pending(struct mm_struct *mm)
- {
- if (mm->tlb_flush_batched) {
- flush_tlb_mm(mm);
- /*
- * Do not allow the compiler to re-order the clearing of
- * tlb_flush_batched before the tlb is flushed.
- */
- barrier();
- mm->tlb_flush_batched = false;
- }
- }
- #else
- static void set_tlb_ubc_flush_pending(struct mm_struct *mm,
- struct page *page, bool writable)
- {
- }
- static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
- {
- return false;
- }
- #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
- /*
- * At what user virtual address is page expected in vma?
- * Caller should check the page is actually part of the vma.
- */
- unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
- {
- unsigned long address;
- if (PageAnon(page)) {
- struct anon_vma *page__anon_vma = page_anon_vma(page);
- /*
- * Note: swapoff's unuse_vma() is more efficient with this
- * check, and needs it to match anon_vma when KSM is active.
- */
- if (!vma->anon_vma || !page__anon_vma ||
- vma->anon_vma->root != page__anon_vma->root)
- return -EFAULT;
- } else if (page->mapping) {
- if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping)
- return -EFAULT;
- } else
- return -EFAULT;
- address = __vma_address(page, vma);
- if (unlikely(address < vma->vm_start || address >= vma->vm_end))
- return -EFAULT;
- return address;
- }
- pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
- {
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd = NULL;
- pmd_t pmde;
- pgd = pgd_offset(mm, address);
- if (!pgd_present(*pgd))
- goto out;
- pud = pud_offset(pgd, address);
- if (!pud_present(*pud))
- goto out;
- pmd = pmd_offset(pud, address);
- /*
- * Some THP functions use the sequence pmdp_huge_clear_flush(), set_pmd_at()
- * without holding anon_vma lock for write. So when looking for a
- * genuine pmde (in which to find pte), test present and !THP together.
- */
- pmde = *pmd;
- barrier();
- if (!pmd_present(pmde) || pmd_trans_huge(pmde))
- pmd = NULL;
- out:
- return pmd;
- }
- /*
- * Check that @page is mapped at @address into @mm.
- *
- * If @sync is false, page_check_address may perform a racy check to avoid
- * the page table lock when the pte is not present (helpful when reclaiming
- * highly shared pages).
- *
- * On success returns with pte mapped and locked.
- */
- pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
- unsigned long address, spinlock_t **ptlp, int sync)
- {
- pmd_t *pmd;
- pte_t *pte;
- spinlock_t *ptl;
- if (unlikely(PageHuge(page))) {
- /* when pud is not present, pte will be NULL */
- pte = huge_pte_offset(mm, address);
- if (!pte)
- return NULL;
- ptl = huge_pte_lockptr(page_hstate(page), mm, pte);
- goto check;
- }
- pmd = mm_find_pmd(mm, address);
- if (!pmd)
- return NULL;
- pte = pte_offset_map(pmd, address);
- /* Make a quick check before getting the lock */
- if (!sync && !pte_present(*pte)) {
- pte_unmap(pte);
- return NULL;
- }
- ptl = pte_lockptr(mm, pmd);
- check:
- spin_lock(ptl);
- if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) {
- *ptlp = ptl;
- return pte;
- }
- pte_unmap_unlock(pte, ptl);
- return NULL;
- }
- /**
- * page_mapped_in_vma - check whether a page is really mapped in a VMA
- * @page: the page to test
- * @vma: the VMA to test
- *
- * Returns 1 if the page is mapped into the page tables of the VMA, 0
- * if the page is not mapped into the page tables of this VMA. Only
- * valid for normal file or anonymous VMAs.
- */
- int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
- {
- unsigned long address;
- pte_t *pte;
- spinlock_t *ptl;
- address = __vma_address(page, vma);
- if (unlikely(address < vma->vm_start || address >= vma->vm_end))
- return 0;
- pte = page_check_address(page, vma->vm_mm, address, &ptl, 1);
- if (!pte) /* the page is not in this mm */
- return 0;
- pte_unmap_unlock(pte, ptl);
- return 1;
- }
- struct page_referenced_arg {
- int mapcount;
- int referenced;
- unsigned long vm_flags;
- struct mem_cgroup *memcg;
- };
- /*
- * arg: page_referenced_arg will be passed
- */
- static int page_referenced_one(struct page *page, struct vm_area_struct *vma,
- unsigned long address, void *arg)
- {
- struct mm_struct *mm = vma->vm_mm;
- spinlock_t *ptl;
- int referenced = 0;
- struct page_referenced_arg *pra = arg;
- if (unlikely(PageTransHuge(page))) {
- pmd_t *pmd;
- /*
- * rmap might return false positives; we must filter
- * these out using page_check_address_pmd().
- */
- pmd = page_check_address_pmd(page, mm, address,
- PAGE_CHECK_ADDRESS_PMD_FLAG, &ptl);
- if (!pmd)
- return SWAP_AGAIN;
- if (vma->vm_flags & VM_LOCKED) {
- spin_unlock(ptl);
- pra->vm_flags |= VM_LOCKED;
- return SWAP_FAIL; /* To break the loop */
- }
- /* go ahead even if the pmd is pmd_trans_splitting() */
- if (pmdp_clear_flush_young_notify(vma, address, pmd))
- referenced++;
- spin_unlock(ptl);
- } else {
- pte_t *pte;
- /*
- * rmap might return false positives; we must filter
- * these out using page_check_address().
- */
- pte = page_check_address(page, mm, address, &ptl, 0);
- if (!pte)
- return SWAP_AGAIN;
- if (vma->vm_flags & VM_LOCKED) {
- pte_unmap_unlock(pte, ptl);
- pra->vm_flags |= VM_LOCKED;
- return SWAP_FAIL; /* To break the loop */
- }
- if (ptep_clear_flush_young_notify(vma, address, pte)) {
- /*
- * Don't treat a reference through a sequentially read
- * mapping as such. If the page has been used in
- * another mapping, we will catch it; if this other
- * mapping is already gone, the unmap path will have
- * set PG_referenced or activated the page.
- */
- if (likely(!(vma->vm_flags & VM_SEQ_READ)))
- referenced++;
- }
- pte_unmap_unlock(pte, ptl);
- }
- if (referenced)
- clear_page_idle(page);
- if (test_and_clear_page_young(page))
- referenced++;
- if (referenced) {
- pra->referenced++;
- pra->vm_flags |= vma->vm_flags;
- }
- pra->mapcount--;
- if (!pra->mapcount)
- return SWAP_SUCCESS; /* To break the loop */
- return SWAP_AGAIN;
- }
- static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg)
- {
- struct page_referenced_arg *pra = arg;
- struct mem_cgroup *memcg = pra->memcg;
- if (!mm_match_cgroup(vma->vm_mm, memcg))
- return true;
- return false;
- }
- /**
- * page_referenced - test if the page was referenced
- * @page: the page to test
- * @is_locked: caller holds lock on the page
- * @memcg: target memory cgroup
- * @vm_flags: collect encountered vma->vm_flags who actually referenced the page
- *
- * Quick test_and_clear_referenced for all mappings to a page,
- * returns the number of ptes which referenced the page.
- */
- int page_referenced(struct page *page,
- int is_locked,
- struct mem_cgroup *memcg,
- unsigned long *vm_flags)
- {
- int ret;
- int we_locked = 0;
- struct page_referenced_arg pra = {
- .mapcount = page_mapcount(page),
- .memcg = memcg,
- };
- struct rmap_walk_control rwc = {
- .rmap_one = page_referenced_one,
- .arg = (void *)&pra,
- .anon_lock = page_lock_anon_vma_read,
- };
- *vm_flags = 0;
- if (!page_mapped(page))
- return 0;
- if (!page_rmapping(page))
- return 0;
- if (!is_locked && (!PageAnon(page) || PageKsm(page))) {
- we_locked = trylock_page(page);
- if (!we_locked)
- return 1;
- }
- /*
- * If we are reclaiming on behalf of a cgroup, skip
- * counting on behalf of references from different
- * cgroups
- */
- if (memcg) {
- rwc.invalid_vma = invalid_page_referenced_vma;
- }
- ret = rmap_walk(page, &rwc);
- *vm_flags = pra.vm_flags;
- if (we_locked)
- unlock_page(page);
- return pra.referenced;
- }
- static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
- unsigned long address, void *arg)
- {
- struct mm_struct *mm = vma->vm_mm;
- pte_t *pte;
- spinlock_t *ptl;
- int ret = 0;
- int *cleaned = arg;
- pte = page_check_address(page, mm, address, &ptl, 1);
- if (!pte)
- goto out;
- if (pte_dirty(*pte) || pte_write(*pte)) {
- pte_t entry;
- flush_cache_page(vma, address, pte_pfn(*pte));
- entry = ptep_clear_flush(vma, address, pte);
- entry = pte_wrprotect(entry);
- entry = pte_mkclean(entry);
- set_pte_at(mm, address, pte, entry);
- ret = 1;
- }
- pte_unmap_unlock(pte, ptl);
- if (ret) {
- mmu_notifier_invalidate_page(mm, address);
- (*cleaned)++;
- }
- out:
- return SWAP_AGAIN;
- }
- static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg)
- {
- if (vma->vm_flags & VM_SHARED)
- return false;
- return true;
- }
- int page_mkclean(struct page *page)
- {
- int cleaned = 0;
- struct address_space *mapping;
- struct rmap_walk_control rwc = {
- .arg = (void *)&cleaned,
- .rmap_one = page_mkclean_one,
- .invalid_vma = invalid_mkclean_vma,
- };
- BUG_ON(!PageLocked(page));
- if (!page_mapped(page))
- return 0;
- mapping = page_mapping(page);
- if (!mapping)
- return 0;
- rmap_walk(page, &rwc);
- return cleaned;
- }
- EXPORT_SYMBOL_GPL(page_mkclean);
- /**
- * page_move_anon_rmap - move a page to our anon_vma
- * @page: the page to move to our anon_vma
- * @vma: the vma the page belongs to
- * @address: the user virtual address mapped
- *
- * When a page belongs exclusively to one process after a COW event,
- * that page can be moved into the anon_vma that belongs to just that
- * process, so the rmap code will not search the parent or sibling
- * processes.
- */
- void page_move_anon_rmap(struct page *page,
- struct vm_area_struct *vma, unsigned long address)
- {
- struct anon_vma *anon_vma = vma->anon_vma;
- VM_BUG_ON_PAGE(!PageLocked(page), page);
- VM_BUG_ON_VMA(!anon_vma, vma);
- VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page);
- anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
- /*
- * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written
- * simultaneously, so a concurrent reader (eg page_referenced()'s
- * PageAnon()) will not see one without the other.
- */
- WRITE_ONCE(page->mapping, (struct address_space *) anon_vma);
- }
- /**
- * __page_set_anon_rmap - set up new anonymous rmap
- * @page: Page to add to rmap
- * @vma: VM area to add page to.
- * @address: User virtual address of the mapping
- * @exclusive: the page is exclusively owned by the current process
- */
- static void __page_set_anon_rmap(struct page *page,
- struct vm_area_struct *vma, unsigned long address, int exclusive)
- {
- struct anon_vma *anon_vma = vma->anon_vma;
- BUG_ON(!anon_vma);
- if (PageAnon(page))
- return;
- /*
- * If the page isn't exclusively mapped into this vma,
- * we must use the _oldest_ possible anon_vma for the
- * page mapping!
- */
- if (!exclusive)
- anon_vma = anon_vma->root;
- anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
- page->mapping = (struct address_space *) anon_vma;
- page->index = linear_page_index(vma, address);
- }
- /**
- * __page_check_anon_rmap - sanity check anonymous rmap addition
- * @page: the page to add the mapping to
- * @vma: the vm area in which the mapping is added
- * @address: the user virtual address mapped
- */
- static void __page_check_anon_rmap(struct page *page,
- struct vm_area_struct *vma, unsigned long address)
- {
- #ifdef CONFIG_DEBUG_VM
- /*
- * The page's anon-rmap details (mapping and index) are guaranteed to
- * be set up correctly at this point.
- *
- * We have exclusion against page_add_anon_rmap because the caller
- * always holds the page locked, except if called from page_dup_rmap,
- * in which case the page is already known to be setup.
- *
- * We have exclusion against page_add_new_anon_rmap because those pages
- * are initially only visible via the pagetables, and the pte is locked
- * over the call to page_add_new_anon_rmap.
- */
- BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root);
- BUG_ON(page->index != linear_page_index(vma, address));
- #endif
- }
- /**
- * page_add_anon_rmap - add pte mapping to an anonymous page
- * @page: the page to add the mapping to
- * @vma: the vm area in which the mapping is added
- * @address: the user virtual address mapped
- *
- * The caller needs to hold the pte lock, and the page must be locked in
- * the anon_vma case: to serialize mapping,index checking after setting,
- * and to ensure that PageAnon is not being upgraded racily to PageKsm
- * (but PageKsm is never downgraded to PageAnon).
- */
- void page_add_anon_rmap(struct page *page,
- struct vm_area_struct *vma, unsigned long address)
- {
- do_page_add_anon_rmap(page, vma, address, 0);
- }
- /*
- * Special version of the above for do_swap_page, which often runs
- * into pages that are exclusively owned by the current process.
- * Everybody else should continue to use page_add_anon_rmap above.
- */
- void do_page_add_anon_rmap(struct page *page,
- struct vm_area_struct *vma, unsigned long address, int exclusive)
- {
- int first = atomic_inc_and_test(&page->_mapcount);
- if (first) {
- /*
- * We use the irq-unsafe __{inc|mod}_zone_page_stat because
- * these counters are not modified in interrupt context, and
- * pte lock(a spinlock) is held, which implies preemption
- * disabled.
- */
- if (PageTransHuge(page))
- __inc_zone_page_state(page,
- NR_ANON_TRANSPARENT_HUGEPAGES);
- __mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
- hpage_nr_pages(page));
- }
- if (unlikely(PageKsm(page)))
- return;
- VM_BUG_ON_PAGE(!PageLocked(page), page);
- /* address might be in next vma when migration races vma_adjust */
- if (first)
- __page_set_anon_rmap(page, vma, address, exclusive);
- else
- __page_check_anon_rmap(page, vma, address);
- }
- /**
- * page_add_new_anon_rmap - add pte mapping to a new anonymous page
- * @page: the page to add the mapping to
- * @vma: the vm area in which the mapping is added
- * @address: the user virtual address mapped
- *
- * Same as page_add_anon_rmap but must only be called on *new* pages.
- * This means the inc-and-test can be bypassed.
- * Page does not have to be locked.
- */
- void page_add_new_anon_rmap(struct page *page,
- struct vm_area_struct *vma, unsigned long address)
- {
- VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
- SetPageSwapBacked(page);
- atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */
- if (PageTransHuge(page))
- __inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
- __mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
- hpage_nr_pages(page));
- __page_set_anon_rmap(page, vma, address, 1);
- }
- /**
- * page_add_file_rmap - add pte mapping to a file page
- * @page: the page to add the mapping to
- *
- * The caller needs to hold the pte lock.
- */
- void page_add_file_rmap(struct page *page)
- {
- struct mem_cgroup *memcg;
- memcg = mem_cgroup_begin_page_stat(page);
- if (atomic_inc_and_test(&page->_mapcount)) {
- __inc_zone_page_state(page, NR_FILE_MAPPED);
- mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
- }
- mem_cgroup_end_page_stat(memcg);
- }
- static void page_remove_file_rmap(struct page *page)
- {
- struct mem_cgroup *memcg;
- memcg = mem_cgroup_begin_page_stat(page);
- /* page still mapped by someone else? */
- if (!atomic_add_negative(-1, &page->_mapcount))
- goto out;
- /* Hugepages are not counted in NR_FILE_MAPPED for now. */
- if (unlikely(PageHuge(page)))
- goto out;
- /*
- * We use the irq-unsafe __{inc|mod}_zone_page_stat because
- * these counters are not modified in interrupt context, and
- * pte lock(a spinlock) is held, which implies preemption disabled.
- */
- __dec_zone_page_state(page, NR_FILE_MAPPED);
- mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
- if (unlikely(PageMlocked(page)))
- clear_page_mlock(page);
- out:
- mem_cgroup_end_page_stat(memcg);
- }
- /**
- * page_remove_rmap - take down pte mapping from a page
- * @page: page to remove mapping from
- *
- * The caller needs to hold the pte lock.
- */
- void page_remove_rmap(struct page *page)
- {
- if (!PageAnon(page)) {
- page_remove_file_rmap(page);
- return;
- }
- /* page still mapped by someone else? */
- if (!atomic_add_negative(-1, &page->_mapcount))
- return;
- /* Hugepages are not counted in NR_ANON_PAGES for now. */
- if (unlikely(PageHuge(page)))
- return;
- /*
- * We use the irq-unsafe __{inc|mod}_zone_page_stat because
- * these counters are not modified in interrupt context, and
- * pte lock(a spinlock) is held, which implies preemption disabled.
- */
- if (PageTransHuge(page))
- __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
- __mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
- -hpage_nr_pages(page));
- if (unlikely(PageMlocked(page)))
- clear_page_mlock(page);
- /*
- * It would be tidy to reset the PageAnon mapping here,
- * but that might overwrite a racing page_add_anon_rmap
- * which increments mapcount after us but sets mapping
- * before us: so leave the reset to free_hot_cold_page,
- * and remember that it's only reliable while mapped.
- * Leaving it set also helps swapoff to reinstate ptes
- * faster for those pages still in swapcache.
- */
- }
- /*
- * @arg: enum ttu_flags will be passed to this argument
- */
- static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
- unsigned long address, void *arg)
- {
- struct mm_struct *mm = vma->vm_mm;
- pte_t *pte;
- pte_t pteval;
- spinlock_t *ptl;
- int ret = SWAP_AGAIN;
- unsigned long sh_address;
- bool pmd_sharing_possible = false;
- unsigned long spmd_start, spmd_end;
- enum ttu_flags flags = (enum ttu_flags)arg;
- /* munlock has nothing to gain from examining un-locked vmas */
- if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED))
- goto out;
- /*
- * Only use the range_start/end mmu notifiers if huge pmd sharing
- * is possible. In the normal case, mmu_notifier_invalidate_page
- * is sufficient as we only unmap a page. However, if we unshare
- * a pmd, we will unmap a PUD_SIZE range.
- */
- if (PageHuge(page)) {
- spmd_start = address;
- spmd_end = spmd_start + vma_mmu_pagesize(vma);
- /*
- * Check if pmd sharing is possible. If possible, we could
- * unmap a PUD_SIZE range. spmd_start/spmd_end will be
- * modified if sharing is possible.
- */
- adjust_range_if_pmd_sharing_possible(vma, &spmd_start,
- &spmd_end);
- if (spmd_end - spmd_start != vma_mmu_pagesize(vma)) {
- sh_address = address;
- pmd_sharing_possible = true;
- mmu_notifier_invalidate_range_start(vma->vm_mm,
- spmd_start, spmd_end);
- }
- }
- pte = page_check_address(page, mm, address, &ptl, 0);
- if (!pte)
- goto out;
- /*
- * If the page is mlock()d, we cannot swap it out.
- * If it's recently referenced (perhaps page_referenced
- * skipped over this mm) then we should reactivate it.
- */
- if (!(flags & TTU_IGNORE_MLOCK)) {
- if (vma->vm_flags & VM_LOCKED) {
- /* Holding pte lock, we do *not* need mmap_sem here */
- mlock_vma_page(page);
- ret = SWAP_MLOCK;
- goto out_unmap;
- }
- if (flags & TTU_MUNLOCK)
- goto out_unmap;
- }
- if (!(flags & TTU_IGNORE_ACCESS)) {
- if (ptep_clear_flush_young_notify(vma, address, pte)) {
- ret = SWAP_FAIL;
- goto out_unmap;
- }
- }
- /*
- * Call huge_pmd_unshare to potentially unshare a huge pmd. Pass
- * sh_address as it will be modified if unsharing is successful.
- */
- if (PageHuge(page) && huge_pmd_unshare(mm, &sh_address, pte)) {
- /*
- * huge_pmd_unshare unmapped an entire PMD page. There is
- * no way of knowing exactly which PMDs may be cached for
- * this mm, so flush them all. spmd_start/spmd_end cover
- * this PUD_SIZE range.
- */
- flush_cache_range(vma, spmd_start, spmd_end);
- flush_tlb_range(vma, spmd_start, spmd_end);
- /*
- * The ref count of the PMD page was dropped which is part
- * of the way map counting is done for shared PMDs. When
- * there is no other sharing, huge_pmd_unshare returns false
- * and we will unmap the actual page and drop map count
- * to zero.
- */
- goto out_unmap;
- }
- /* Nuke the page table entry. */
- flush_cache_page(vma, address, page_to_pfn(page));
- if (should_defer_flush(mm, flags)) {
- /*
- * We clear the PTE but do not flush so potentially a remote
- * CPU could still be writing to the page. If the entry was
- * previously clean then the architecture must guarantee that
- * a clear->dirty transition on a cached TLB entry is written
- * through and traps if the PTE is unmapped.
- */
- pteval = ptep_get_and_clear(mm, address, pte);
- set_tlb_ubc_flush_pending(mm, page, pte_dirty(pteval));
- } else {
- pteval = ptep_clear_flush(vma, address, pte);
- }
- /* Move the dirty bit to the physical page now the pte is gone. */
- if (pte_dirty(pteval))
- set_page_dirty(page);
- /* Update high watermark before we lower rss */
- update_hiwater_rss(mm);
- if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
- if (PageHuge(page)) {
- hugetlb_count_sub(1 << compound_order(page), mm);
- } else {
- if (PageAnon(page))
- dec_mm_counter(mm, MM_ANONPAGES);
- else
- dec_mm_counter(mm, MM_FILEPAGES);
- }
- set_pte_at(mm, address, pte,
- swp_entry_to_pte(make_hwpoison_entry(page)));
- } else if (pte_unused(pteval)) {
- /*
- * The guest indicated that the page content is of no
- * interest anymore. Simply discard the pte, vmscan
- * will take care of the rest.
- */
- if (PageAnon(page))
- dec_mm_counter(mm, MM_ANONPAGES);
- else
- dec_mm_counter(mm, MM_FILEPAGES);
- } else if (IS_ENABLED(CONFIG_MIGRATION) && (flags & TTU_MIGRATION)) {
- swp_entry_t entry;
- pte_t swp_pte;
- /*
- * Store the pfn of the page in a special migration
- * pte. do_swap_page() will wait until the migration
- * pte is removed and then restart fault handling.
- */
- entry = make_migration_entry(page, pte_write(pteval));
- swp_pte = swp_entry_to_pte(entry);
- if (pte_soft_dirty(pteval))
- swp_pte = pte_swp_mksoft_dirty(swp_pte);
- set_pte_at(mm, address, pte, swp_pte);
- } else if (PageAnon(page)) {
- swp_entry_t entry = { .val = page_private(page) };
- pte_t swp_pte;
- /*
- * Store the swap location in the pte.
- * See handle_pte_fault() ...
- */
- VM_BUG_ON_PAGE(!PageSwapCache(page), page);
- if (swap_duplicate(entry) < 0) {
- set_pte_at(mm, address, pte, pteval);
- ret = SWAP_FAIL;
- goto out_unmap;
- }
- if (list_empty(&mm->mmlist)) {
- spin_lock(&mmlist_lock);
- if (list_empty(&mm->mmlist))
- list_add(&mm->mmlist, &init_mm.mmlist);
- spin_unlock(&mmlist_lock);
- }
- dec_mm_counter(mm, MM_ANONPAGES);
- inc_mm_counter(mm, MM_SWAPENTS);
- swp_pte = swp_entry_to_pte(entry);
- if (pte_soft_dirty(pteval))
- swp_pte = pte_swp_mksoft_dirty(swp_pte);
- set_pte_at(mm, address, pte, swp_pte);
- } else
- dec_mm_counter(mm, MM_FILEPAGES);
- page_remove_rmap(page);
- page_cache_release(page);
- out_unmap:
- pte_unmap_unlock(pte, ptl);
- if (ret != SWAP_FAIL && ret != SWAP_MLOCK && !(flags & TTU_MUNLOCK))
- mmu_notifier_invalidate_page(mm, address);
- out:
- if (pmd_sharing_possible)
- mmu_notifier_invalidate_range_end(vma->vm_mm,
- spmd_start, spmd_end);
- return ret;
- }
- bool is_vma_temporary_stack(struct vm_area_struct *vma)
- {
- int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
- if (!maybe_stack)
- return false;
- if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
- VM_STACK_INCOMPLETE_SETUP)
- return true;
- return false;
- }
- static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
- {
- return is_vma_temporary_stack(vma);
- }
- static int page_not_mapped(struct page *page)
- {
- return !page_mapped(page);
- };
- /**
- * try_to_unmap - try to remove all page table mappings to a page
- * @page: the page to get unmapped
- * @flags: action and flags
- *
- * Tries to remove all the page table entries which are mapping this
- * page, used in the pageout path. Caller must hold the page lock.
- * Return values are:
- *
- * SWAP_SUCCESS - we succeeded in removing all mappings
- * SWAP_AGAIN - we missed a mapping, try again later
- * SWAP_FAIL - the page is unswappable
- * SWAP_MLOCK - page is mlocked.
- */
- int try_to_unmap(struct page *page, enum ttu_flags flags)
- {
- int ret;
- struct rmap_walk_control rwc = {
- .rmap_one = try_to_unmap_one,
- .arg = (void *)flags,
- .done = page_not_mapped,
- .anon_lock = page_lock_anon_vma_read,
- };
- VM_BUG_ON_PAGE(!PageHuge(page) && PageTransHuge(page), page);
- /*
- * During exec, a temporary VMA is setup and later moved.
- * The VMA is moved under the anon_vma lock but not the
- * page tables leading to a race where migration cannot
- * find the migration ptes. Rather than increasing the
- * locking requirements of exec(), migration skips
- * temporary VMAs until after exec() completes.
- */
- if ((flags & TTU_MIGRATION) && !PageKsm(page) && PageAnon(page))
- rwc.invalid_vma = invalid_migration_vma;
- ret = rmap_walk(page, &rwc);
- if (ret != SWAP_MLOCK && !page_mapped(page))
- ret = SWAP_SUCCESS;
- return ret;
- }
- /**
- * try_to_munlock - try to munlock a page
- * @page: the page to be munlocked
- *
- * Called from munlock code. Checks all of the VMAs mapping the page
- * to make sure nobody else has this page mlocked. The page will be
- * returned with PG_mlocked cleared if no other vmas have it mlocked.
- *
- * Return values are:
- *
- * SWAP_AGAIN - no vma is holding page mlocked, or,
- * SWAP_AGAIN - page mapped in mlocked vma -- couldn't acquire mmap sem
- * SWAP_FAIL - page cannot be located at present
- * SWAP_MLOCK - page is now mlocked.
- */
- int try_to_munlock(struct page *page)
- {
- int ret;
- struct rmap_walk_control rwc = {
- .rmap_one = try_to_unmap_one,
- .arg = (void *)TTU_MUNLOCK,
- .done = page_not_mapped,
- .anon_lock = page_lock_anon_vma_read,
- };
- VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page);
- ret = rmap_walk(page, &rwc);
- return ret;
- }
- void __put_anon_vma(struct anon_vma *anon_vma)
- {
- struct anon_vma *root = anon_vma->root;
- anon_vma_free(anon_vma);
- if (root != anon_vma && atomic_dec_and_test(&root->refcount))
- anon_vma_free(root);
- }
- static struct anon_vma *rmap_walk_anon_lock(struct page *page,
- struct rmap_walk_control *rwc)
- {
- struct anon_vma *anon_vma;
- if (rwc->anon_lock)
- return rwc->anon_lock(page);
- /*
- * Note: remove_migration_ptes() cannot use page_lock_anon_vma_read()
- * because that depends on page_mapped(); but not all its usages
- * are holding mmap_sem. Users without mmap_sem are required to
- * take a reference count to prevent the anon_vma disappearing
- */
- anon_vma = page_anon_vma(page);
- if (!anon_vma)
- return NULL;
- anon_vma_lock_read(anon_vma);
- return anon_vma;
- }
- /*
- * rmap_walk_anon - do something to anonymous page using the object-based
- * rmap method
- * @page: the page to be handled
- * @rwc: control variable according to each walk type
- *
- * Find all the mappings of a page using the mapping pointer and the vma chains
- * contained in the anon_vma struct it points to.
- *
- * When called from try_to_munlock(), the mmap_sem of the mm containing the vma
- * where the page was found will be held for write. So, we won't recheck
- * vm_flags for that VMA. That should be OK, because that vma shouldn't be
- * LOCKED.
- */
- static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
- {
- struct anon_vma *anon_vma;
- pgoff_t pgoff;
- struct anon_vma_chain *avc;
- int ret = SWAP_AGAIN;
- anon_vma = rmap_walk_anon_lock(page, rwc);
- if (!anon_vma)
- return ret;
- pgoff = page_to_pgoff(page);
- anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
- struct vm_area_struct *vma = avc->vma;
- unsigned long address = vma_address(page, vma);
- cond_resched();
- if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
- continue;
- ret = rwc->rmap_one(page, vma, address, rwc->arg);
- if (ret != SWAP_AGAIN)
- break;
- if (rwc->done && rwc->done(page))
- break;
- }
- anon_vma_unlock_read(anon_vma);
- return ret;
- }
- /*
- * rmap_walk_file - do something to file page using the object-based rmap method
- * @page: the page to be handled
- * @rwc: control variable according to each walk type
- *
- * Find all the mappings of a page using the mapping pointer and the vma chains
- * contained in the address_space struct it points to.
- *
- * When called from try_to_munlock(), the mmap_sem of the mm containing the vma
- * where the page was found will be held for write. So, we won't recheck
- * vm_flags for that VMA. That should be OK, because that vma shouldn't be
- * LOCKED.
- */
- static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
- {
- struct address_space *mapping = page->mapping;
- pgoff_t pgoff;
- struct vm_area_struct *vma;
- int ret = SWAP_AGAIN;
- /*
- * The page lock not only makes sure that page->mapping cannot
- * suddenly be NULLified by truncation, it makes sure that the
- * structure at mapping cannot be freed and reused yet,
- * so we can safely take mapping->i_mmap_rwsem.
- */
- VM_BUG_ON_PAGE(!PageLocked(page), page);
- if (!mapping)
- return ret;
- pgoff = page_to_pgoff(page);
- i_mmap_lock_read(mapping);
- vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
- unsigned long address = vma_address(page, vma);
- cond_resched();
- if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
- continue;
- ret = rwc->rmap_one(page, vma, address, rwc->arg);
- if (ret != SWAP_AGAIN)
- goto done;
- if (rwc->done && rwc->done(page))
- goto done;
- }
- done:
- i_mmap_unlock_read(mapping);
- return ret;
- }
- int rmap_walk(struct page *page, struct rmap_walk_control *rwc)
- {
- if (unlikely(PageKsm(page)))
- return rmap_walk_ksm(page, rwc);
- else if (PageAnon(page))
- return rmap_walk_anon(page, rwc);
- else
- return rmap_walk_file(page, rwc);
- }
- #ifdef CONFIG_HUGETLB_PAGE
- /*
- * The following three functions are for anonymous (private mapped) hugepages.
- * Unlike common anonymous pages, anonymous hugepages have no accounting code
- * and no lru code, because we handle hugepages differently from common pages.
- */
- static void __hugepage_set_anon_rmap(struct page *page,
- struct vm_area_struct *vma, unsigned long address, int exclusive)
- {
- struct anon_vma *anon_vma = vma->anon_vma;
- BUG_ON(!anon_vma);
- if (PageAnon(page))
- return;
- if (!exclusive)
- anon_vma = anon_vma->root;
- anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
- page->mapping = (struct address_space *) anon_vma;
- page->index = linear_page_index(vma, address);
- }
- void hugepage_add_anon_rmap(struct page *page,
- struct vm_area_struct *vma, unsigned long address)
- {
- struct anon_vma *anon_vma = vma->anon_vma;
- int first;
- BUG_ON(!PageLocked(page));
- BUG_ON(!anon_vma);
- /* address might be in next vma when migration races vma_adjust */
- first = atomic_inc_and_test(&page->_mapcount);
- if (first)
- __hugepage_set_anon_rmap(page, vma, address, 0);
- }
- void hugepage_add_new_anon_rmap(struct page *page,
- struct vm_area_struct *vma, unsigned long address)
- {
- BUG_ON(address < vma->vm_start || address >= vma->vm_end);
- atomic_set(&page->_mapcount, 0);
- __hugepage_set_anon_rmap(page, vma, address, 1);
- }
- #endif /* CONFIG_HUGETLB_PAGE */
|