Push down the page queues into vm_page_cache(), vm_page_try_to_cache(), and

vm_page_try_to_free().  Consequently, push down the page queues lock into
pmap_enter_quick(), pmap_page_wired_mapped(), pmap_remove_all(), and
pmap_remove_write().

Push down the page queues lock into Xen's pmap_page_is_mapped().  (I
overlooked the Xen pmap in r207702.)

Switch to a per-processor counter for the total number of pages cached.
This commit is contained in:
Alan Cox 2010-05-08 20:34:01 +00:00
parent b6cb607644
commit 3c4a24406b
19 changed files with 150 additions and 149 deletions

View file

@ -2796,7 +2796,7 @@ pmap_remove_all(vm_page_t m)
KASSERT((m->flags & PG_FICTITIOUS) == 0,
("pmap_remove_all: page %p is fictitious", m));
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
vm_page_lock_queues();
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
pmap = PV_PMAP(pv);
@ -2834,6 +2834,7 @@ pmap_remove_all(vm_page_t m)
PMAP_UNLOCK(pmap);
}
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_unlock_queues();
}
/*
@ -3414,8 +3415,10 @@ void
pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
{
vm_page_lock_queues();
PMAP_LOCK(pmap);
(void) pmap_enter_quick_locked(pmap, va, m, prot, NULL);
(void)pmap_enter_quick_locked(pmap, va, m, prot, NULL);
vm_page_unlock_queues();
PMAP_UNLOCK(pmap);
}
@ -3926,8 +3929,11 @@ pmap_page_wired_mappings(vm_page_t m)
count = 0;
if ((m->flags & PG_FICTITIOUS) != 0)
return (count);
vm_page_lock_queues();
count = pmap_pvh_wired_mappings(&m->md, count);
return (pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)), count));
count = pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)), count);
vm_page_unlock_queues();
return (count);
}
/*
@ -4237,7 +4243,7 @@ pmap_remove_write(vm_page_t m)
if ((m->flags & PG_FICTITIOUS) != 0 ||
(m->flags & PG_WRITEABLE) == 0)
return;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
vm_page_lock_queues();
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
pmap = PV_PMAP(pv);
@ -4268,6 +4274,7 @@ retry:
PMAP_UNLOCK(pmap);
}
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_unlock_queues();
}
/*

View file

@ -3118,18 +3118,11 @@ pmap_remove_all(vm_page_t m)
pmap_t curpm;
int flags = 0;
#if defined(PMAP_DEBUG)
/*
* XXX This makes pmap_remove_all() illegal for non-managed pages!
*/
if (m->flags & PG_FICTITIOUS) {
panic("pmap_remove_all: illegal for unmanaged page, va: 0x%x", VM_PAGE_TO_PHYS(m));
}
#endif
KASSERT((m->flags & PG_FICTITIOUS) == 0,
("pmap_remove_all: page %p is fictitious", m));
if (TAILQ_EMPTY(&m->md.pv_list))
return;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
vm_page_lock_queues();
pmap_remove_write(m);
curpm = vmspace_pmap(curproc->p_vmspace);
while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
@ -3180,6 +3173,7 @@ pmap_remove_all(vm_page_t m)
pmap_tlb_flushD(curpm);
}
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_unlock_queues();
}
@ -3615,9 +3609,11 @@ void
pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
{
vm_page_lock_queues();
PMAP_LOCK(pmap);
pmap_enter_locked(pmap, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
FALSE, M_NOWAIT);
vm_page_unlock_queues();
PMAP_UNLOCK(pmap);
}
@ -4450,10 +4446,11 @@ pmap_page_wired_mappings(vm_page_t m)
count = 0;
if ((m->flags & PG_FICTITIOUS) != 0)
return (count);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
vm_page_lock_queues();
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list)
if ((pv->pv_flags & PVF_WIRED) != 0)
count++;
vm_page_unlock_queues();
return (count);
}
@ -4530,8 +4527,11 @@ void
pmap_remove_write(vm_page_t m)
{
if (m->flags & PG_WRITEABLE)
if (m->flags & PG_WRITEABLE) {
vm_page_lock_queues();
pmap_clearbit(m, PVF_WRITE);
vm_page_unlock_queues();
}
}

View file

@ -2900,7 +2900,7 @@ pmap_remove_all(vm_page_t m)
KASSERT((m->flags & PG_FICTITIOUS) == 0,
("pmap_remove_all: page %p is fictitious", m));
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
vm_page_lock_queues();
sched_pin();
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
@ -2940,6 +2940,7 @@ pmap_remove_all(vm_page_t m)
}
vm_page_flag_clear(m, PG_WRITEABLE);
sched_unpin();
vm_page_unlock_queues();
}
/*
@ -3544,8 +3545,10 @@ void
pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
{
vm_page_lock_queues();
PMAP_LOCK(pmap);
(void) pmap_enter_quick_locked(pmap, va, m, prot, NULL);
(void)pmap_enter_quick_locked(pmap, va, m, prot, NULL);
vm_page_unlock_queues();
PMAP_UNLOCK(pmap);
}
@ -4088,8 +4091,11 @@ pmap_page_wired_mappings(vm_page_t m)
count = 0;
if ((m->flags & PG_FICTITIOUS) != 0)
return (count);
vm_page_lock_queues();
count = pmap_pvh_wired_mappings(&m->md, count);
return (pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)), count));
count = pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)), count);
vm_page_unlock_queues();
return (count);
}
/*
@ -4404,10 +4410,10 @@ pmap_remove_write(vm_page_t m)
pt_entry_t oldpte, *pte;
vm_offset_t va;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if ((m->flags & PG_FICTITIOUS) != 0 ||
(m->flags & PG_WRITEABLE) == 0)
return;
vm_page_lock_queues();
sched_pin();
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
@ -4445,6 +4451,7 @@ retry:
}
vm_page_flag_clear(m, PG_WRITEABLE);
sched_unpin();
vm_page_unlock_queues();
}
/*

View file

@ -2485,16 +2485,9 @@ pmap_remove_all(vm_page_t m)
pt_entry_t *pte, tpte;
vm_page_t free;
#if defined(PMAP_DIAGNOSTIC)
/*
* XXX This makes pmap_remove_all() illegal for non-managed pages!
*/
if (m->flags & PG_FICTITIOUS) {
panic("pmap_remove_all: illegal for unmanaged page, va: 0x%jx",
VM_PAGE_TO_PHYS(m) & 0xffffffff);
}
#endif
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
KASSERT((m->flags & PG_FICTITIOUS) == 0,
("pmap_remove_all: page %p is fictitious", m));
vm_page_lock_queues();
sched_pin();
while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
pmap = PV_PMAP(pv);
@ -2531,6 +2524,7 @@ pmap_remove_all(vm_page_t m)
if (*PMAP1)
PT_SET_MA(PADDR1, 0);
sched_unpin();
vm_page_unlock_queues();
}
/*
@ -2946,10 +2940,12 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
CTR4(KTR_PMAP, "pmap_enter_quick: pmap=%p va=0x%x m=%p prot=0x%x",
pmap, va, m, prot);
vm_page_lock_queues();
PMAP_LOCK(pmap);
(void) pmap_enter_quick_locked(&mclp, &count, pmap, va, m, prot, NULL);
(void)pmap_enter_quick_locked(&mclp, &count, pmap, va, m, prot, NULL);
if (count)
HYPERVISOR_multicall(&mcl, count);
vm_page_unlock_queues();
PMAP_UNLOCK(pmap);
}
@ -3504,7 +3500,7 @@ pmap_page_wired_mappings(vm_page_t m)
count = 0;
if ((m->flags & PG_FICTITIOUS) != 0)
return (count);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
vm_page_lock_queues();
sched_pin();
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
pmap = PV_PMAP(pv);
@ -3515,6 +3511,7 @@ pmap_page_wired_mappings(vm_page_t m)
PMAP_UNLOCK(pmap);
}
sched_unpin();
vm_page_unlock_queues();
return (count);
}
@ -3525,16 +3522,15 @@ pmap_page_wired_mappings(vm_page_t m)
boolean_t
pmap_page_is_mapped(vm_page_t m)
{
struct md_page *pvh;
boolean_t rv;
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
return (FALSE);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if (TAILQ_EMPTY(&m->md.pv_list)) {
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
return (!TAILQ_EMPTY(&pvh->pv_list));
} else
return (TRUE);
vm_page_lock_queues();
rv = !TAILQ_EMPTY(&m->md.pv_list) ||
!TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list);
vm_page_unlock_queues();
return (rv);
}
/*
@ -3784,10 +3780,10 @@ pmap_remove_write(vm_page_t m)
pmap_t pmap;
pt_entry_t oldpte, *pte;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if ((m->flags & PG_FICTITIOUS) != 0 ||
(m->flags & PG_WRITEABLE) == 0)
return;
vm_page_lock_queues();
sched_pin();
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
pmap = PV_PMAP(pv);
@ -3818,6 +3814,7 @@ retry:
if (*PMAP1)
PT_SET_MA(PADDR1, 0);
sched_unpin();
vm_page_unlock_queues();
}
/*

View file

@ -1392,15 +1392,9 @@ pmap_remove_all(vm_page_t m)
pmap_t oldpmap;
pv_entry_t pv;
#if defined(DIAGNOSTIC)
/*
* XXX This makes pmap_remove_all() illegal for non-managed pages!
*/
if (m->flags & PG_FICTITIOUS) {
panic("pmap_remove_all: illegal for unmanaged page, va: 0x%lx", VM_PAGE_TO_PHYS(m));
}
#endif
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
KASSERT((m->flags & PG_FICTITIOUS) == 0,
("pmap_remove_all: page %p is fictitious", m));
vm_page_lock_queues();
while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
struct ia64_lpte *pte;
pmap_t pmap = pv->pv_pmap;
@ -1417,6 +1411,7 @@ pmap_remove_all(vm_page_t m)
PMAP_UNLOCK(pmap);
}
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_unlock_queues();
}
/*
@ -1655,9 +1650,11 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
{
pmap_t oldpmap;
vm_page_lock_queues();
PMAP_LOCK(pmap);
oldpmap = pmap_switch(pmap);
pmap_enter_quick_locked(pmap, va, m, prot);
vm_page_unlock_queues();
pmap_switch(oldpmap);
PMAP_UNLOCK(pmap);
}
@ -1875,7 +1872,7 @@ pmap_page_wired_mappings(vm_page_t m)
count = 0;
if ((m->flags & PG_FICTITIOUS) != 0)
return (count);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
vm_page_lock_queues();
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
pmap = pv->pv_pmap;
PMAP_LOCK(pmap);
@ -1887,6 +1884,7 @@ pmap_page_wired_mappings(vm_page_t m)
pmap_switch(oldpmap);
PMAP_UNLOCK(pmap);
}
vm_page_unlock_queues();
return (count);
}
@ -2118,10 +2116,10 @@ pmap_remove_write(vm_page_t m)
pv_entry_t pv;
vm_prot_t prot;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if ((m->flags & PG_FICTITIOUS) != 0 ||
(m->flags & PG_WRITEABLE) == 0)
return;
vm_page_lock_queues();
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
pmap = pv->pv_pmap;
PMAP_LOCK(pmap);
@ -2142,6 +2140,7 @@ pmap_remove_write(vm_page_t m)
PMAP_UNLOCK(pmap);
}
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_unlock_queues();
}
/*

View file

@ -105,7 +105,6 @@ retry:
if (vm_page_sleep_if_busy(user_pg, TRUE, "vm_pgmoveco"))
goto retry;
vm_page_lock(user_pg);
vm_page_lock_queues();
pmap_remove_all(user_pg);
vm_page_free(user_pg);
vm_page_unlock(user_pg);
@ -117,11 +116,9 @@ retry:
*/
if (uobject->backing_object != NULL)
pmap_remove(map->pmap, uaddr, uaddr + PAGE_SIZE);
vm_page_lock_queues();
}
vm_page_insert(kern_pg, uobject, upindex);
vm_page_dirty(kern_pg);
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(uobject);
vm_map_lookup_done(map, entry);
return(KERN_SUCCESS);

View file

@ -1579,7 +1579,6 @@ vfs_vmio_release(struct buf *bp)
*/
if ((m->oflags & VPO_BUSY) == 0 && m->busy == 0 &&
m->wire_count == 0) {
vm_page_lock_queues();
/*
* Might as well free the page if we can and it has
* no valid data. We also free the page if the
@ -1593,7 +1592,6 @@ vfs_vmio_release(struct buf *bp)
} else if (buf_vm_page_count_severe()) {
vm_page_try_to_cache(m);
}
vm_page_unlock_queues();
}
vm_page_unlock(m);
}

View file

@ -1595,7 +1595,7 @@ pmap_remove_all(vm_page_t m)
KASSERT((m->flags & PG_FICTITIOUS) == 0,
("pmap_remove_all: page %p is fictitious", m));
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
vm_page_lock_queues();
if (m->md.pv_flags & PV_TABLE_REF)
vm_page_flag_set(m, PG_REFERENCED);
@ -1646,6 +1646,7 @@ pmap_remove_all(vm_page_t m)
vm_page_flag_clear(m, PG_WRITEABLE);
m->md.pv_flags &= ~(PV_TABLE_REF | PV_TABLE_MOD);
vm_page_unlock_queues();
}
/*
@ -1921,8 +1922,10 @@ void
pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
{
vm_page_lock_queues();
PMAP_LOCK(pmap);
(void)pmap_enter_quick_locked(pmap, va, m, prot, NULL);
vm_page_unlock_queues();
PMAP_UNLOCK(pmap);
}
@ -2510,10 +2513,11 @@ pmap_page_wired_mappings(vm_page_t m)
count = 0;
if ((m->flags & PG_FICTITIOUS) != 0)
return (count);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
vm_page_lock_queues();
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list)
if (pv->pv_wired)
count++;
vm_page_unlock_queues();
return (count);
}
@ -2527,12 +2531,14 @@ pmap_remove_write(vm_page_t m)
vm_offset_t va;
pt_entry_t *pte;
if ((m->flags & PG_WRITEABLE) == 0)
if ((m->flags & PG_FICTITIOUS) != 0 ||
(m->flags & PG_WRITEABLE) == 0)
return;
/*
* Loop over all current mappings setting/clearing as appropos.
*/
vm_page_lock_queues();
for (pv = TAILQ_FIRST(&m->md.pv_list); pv; pv = npv) {
npv = TAILQ_NEXT(pv, pv_plist);
pte = pmap_pte(pv->pv_pmap, pv->pv_va);
@ -2545,6 +2551,7 @@ pmap_remove_write(vm_page_t m)
VM_PROT_READ | VM_PROT_EXECUTE);
}
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_unlock_queues();
}
/*

View file

@ -1208,11 +1208,12 @@ moea_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m,
vm_prot_t prot)
{
vm_page_lock_queues();
PMAP_LOCK(pm);
moea_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
FALSE);
vm_page_unlock_queues();
PMAP_UNLOCK(pm);
}
vm_paddr_t
@ -1322,10 +1323,10 @@ moea_remove_write(mmu_t mmu, vm_page_t m)
pmap_t pmap;
u_int lo;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
(m->flags & PG_WRITEABLE) == 0)
return;
vm_page_lock_queues();
lo = moea_attr_fetch(m);
powerpc_sync();
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
@ -1351,6 +1352,7 @@ moea_remove_write(mmu_t mmu, vm_page_t m)
vm_page_dirty(m);
}
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_unlock_queues();
}
/*
@ -1518,10 +1520,11 @@ moea_page_wired_mappings(mmu_t mmu, vm_page_t m)
count = 0;
if (!moea_initialized || (m->flags & PG_FICTITIOUS) != 0)
return (count);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
vm_page_lock_queues();
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
count++;
vm_page_unlock_queues();
return (count);
}
@ -1732,8 +1735,7 @@ moea_remove_all(mmu_t mmu, vm_page_t m)
struct pvo_entry *pvo, *next_pvo;
pmap_t pmap;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
vm_page_lock_queues();
pvo_head = vm_page_to_pvoh(m);
for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
next_pvo = LIST_NEXT(pvo, pvo_vlink);
@ -1749,6 +1751,7 @@ moea_remove_all(mmu_t mmu, vm_page_t m)
vm_page_dirty(m);
}
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_unlock_queues();
}
/*

View file

@ -1341,11 +1341,13 @@ void
moea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m,
vm_prot_t prot)
{
vm_page_lock_queues();
PMAP_LOCK(pm);
moea64_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
FALSE);
vm_page_unlock_queues();
PMAP_UNLOCK(pm);
}
vm_paddr_t
@ -1517,10 +1519,10 @@ moea64_remove_write(mmu_t mmu, vm_page_t m)
pmap_t pmap;
uint64_t lo;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
(m->flags & PG_WRITEABLE) == 0)
return;
vm_page_lock_queues();
lo = moea64_attr_fetch(m);
SYNC();
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
@ -1547,6 +1549,7 @@ moea64_remove_write(mmu_t mmu, vm_page_t m)
vm_page_dirty(m);
}
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_unlock_queues();
}
/*
@ -1710,10 +1713,11 @@ moea64_page_wired_mappings(mmu_t mmu, vm_page_t m)
count = 0;
if (!moea64_initialized || (m->flags & PG_FICTITIOUS) != 0)
return (count);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
vm_page_lock_queues();
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
count++;
vm_page_unlock_queues();
return (count);
}
@ -1929,8 +1933,7 @@ moea64_remove_all(mmu_t mmu, vm_page_t m)
struct pvo_entry *pvo, *next_pvo;
pmap_t pmap;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
vm_page_lock_queues();
pvo_head = vm_page_to_pvoh(m);
for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
next_pvo = LIST_NEXT(pvo, pvo_vlink);
@ -1946,6 +1949,7 @@ moea64_remove_all(mmu_t mmu, vm_page_t m)
vm_page_dirty(m);
}
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_unlock_queues();
}
/*

View file

@ -1722,9 +1722,11 @@ mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
vm_prot_t prot)
{
vm_page_lock_queues();
PMAP_LOCK(pmap);
mmu_booke_enter_locked(mmu, pmap, va, m,
prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
vm_page_unlock_queues();
PMAP_UNLOCK(pmap);
}
@ -1783,8 +1785,7 @@ mmu_booke_remove_all(mmu_t mmu, vm_page_t m)
pv_entry_t pv, pvn;
uint8_t hold_flag;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
vm_page_lock_queues();
for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) {
pvn = TAILQ_NEXT(pv, pv_link);
@ -1794,6 +1795,7 @@ mmu_booke_remove_all(mmu_t mmu, vm_page_t m)
PMAP_UNLOCK(pv->pv_pmap);
}
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_unlock_queues();
}
/*
@ -1939,11 +1941,10 @@ mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
pv_entry_t pv;
pte_t *pte;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
(m->flags & PG_WRITEABLE) == 0)
return;
vm_page_lock_queues();
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
PMAP_LOCK(pv->pv_pmap);
if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
@ -1967,6 +1968,7 @@ mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
PMAP_UNLOCK(pv->pv_pmap);
}
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_unlock_queues();
}
static void
@ -2388,8 +2390,7 @@ mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m)
if ((m->flags & PG_FICTITIOUS) != 0)
return (count);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
vm_page_lock_queues();
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
PMAP_LOCK(pv->pv_pmap);
if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL)
@ -2397,7 +2398,7 @@ mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m)
count++;
PMAP_UNLOCK(pv->pv_pmap);
}
vm_page_unlock_queues();
return (count);
}

View file

@ -1240,7 +1240,7 @@ pmap_remove_all(vm_page_t m)
struct tte *tp;
vm_offset_t va;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
vm_page_lock_queues();
for (tp = TAILQ_FIRST(&m->md.tte_list); tp != NULL; tp = tpn) {
tpn = TAILQ_NEXT(tp, tte_link);
if ((tp->tte_data & TD_PV) == 0)
@ -1263,6 +1263,7 @@ pmap_remove_all(vm_page_t m)
PMAP_UNLOCK(pm);
}
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_unlock_queues();
}
int
@ -1502,9 +1503,11 @@ void
pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot)
{
vm_page_lock_queues();
PMAP_LOCK(pm);
pmap_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
FALSE);
vm_page_unlock_queues();
PMAP_UNLOCK(pm);
}
@ -1809,10 +1812,11 @@ pmap_page_wired_mappings(vm_page_t m)
count = 0;
if ((m->flags & PG_FICTITIOUS) != 0)
return (count);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
vm_page_lock_queues();
TAILQ_FOREACH(tp, &m->md.tte_list, tte_link)
if ((tp->tte_data & (TD_PV | TD_WIRED)) == (TD_PV | TD_WIRED))
count++;
vm_page_unlock_queues();
return (count);
}
@ -1981,10 +1985,10 @@ pmap_remove_write(vm_page_t m)
struct tte *tp;
u_long data;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
(m->flags & PG_WRITEABLE) == 0)
return;
vm_page_lock_queues();
TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
if ((tp->tte_data & TD_PV) == 0)
continue;
@ -1995,6 +1999,7 @@ pmap_remove_write(vm_page_t m)
}
}
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_unlock_queues();
}
int

View file

@ -1211,8 +1211,11 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
void
pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
{
vm_page_lock_queues();
PMAP_LOCK(pmap);
pmap_enter_quick_locked(pmap, va, m, prot);
vm_page_unlock_queues();
PMAP_UNLOCK(pmap);
}
@ -1714,7 +1717,7 @@ pmap_page_wired_mappings(vm_page_t m)
count = 0;
if ((m->flags & PG_FICTITIOUS) != 0)
return (count);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
vm_page_lock_queues();
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
pmap = pv->pv_pmap;
PMAP_LOCK(pmap);
@ -1723,6 +1726,7 @@ pmap_page_wired_mappings(vm_page_t m)
count++;
PMAP_UNLOCK(pmap);
}
vm_page_unlock_queues();
return (count);
}
@ -1732,12 +1736,15 @@ pmap_page_wired_mappings(vm_page_t m)
void
pmap_remove_write(vm_page_t m)
{
if ((m->flags & PG_WRITEABLE) == 0)
return;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
vm_page_lock_queues();
tte_clear_phys_bit(m, VTD_SW_W|VTD_W);
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_unlock_queues();
}
/*
* Initialize the pmap associated with process 0.
*/
@ -1956,7 +1963,7 @@ pmap_remove_all(vm_page_t m)
uint64_t tte_data;
DPRINTF("pmap_remove_all 0x%lx\n", VM_PAGE_TO_PHYS(m));
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
vm_page_lock_queues();
while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
PMAP_LOCK(pv->pv_pmap);
pv->pv_pmap->pm_stats.resident_count--;
@ -1986,6 +1993,7 @@ pmap_remove_all(vm_page_t m)
free_pv_entry(pv);
}
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_unlock_queues();
}
static void

View file

@ -72,7 +72,7 @@ struct vmmeter {
u_int v_pdwakeups; /* (f) times daemon has awaken from sleep */
u_int v_pdpages; /* (q) pages analyzed by daemon */
u_int v_tcached; /* (q) total pages cached */
u_int v_tcached; /* (p) total pages cached */
u_int v_dfree; /* (q) pages freed by daemon */
u_int v_pfree; /* (p) pages freed by exiting processes */
u_int v_tfree; /* (p) total pages freed */

View file

@ -382,8 +382,10 @@ static void
swp_pager_free_nrpage(vm_page_t m)
{
vm_page_lock(m);
if (m->wire_count == 0)
vm_page_free(m);
vm_page_unlock(m);
}
/*
@ -1137,17 +1139,10 @@ swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
if (0 < i || j < count) {
int k;
for (k = 0; k < i; ++k) {
vm_page_lock(m[k]);
for (k = 0; k < i; ++k)
swp_pager_free_nrpage(m[k]);
vm_page_unlock(m[k]);
}
for (k = j; k < count; ++k) {
vm_page_lock(m[k]);
for (k = j; k < count; ++k)
swp_pager_free_nrpage(m[k]);
vm_page_unlock(m[k]);
}
}
/*
@ -1514,8 +1509,6 @@ swp_pager_async_iodone(struct buf *bp)
for (i = 0; i < bp->b_npages; ++i) {
vm_page_t m = bp->b_pages[i];
vm_page_lock(m);
vm_page_lock_queues();
m->oflags &= ~VPO_SWAPINPROG;
if (bp->b_ioflags & BIO_ERROR) {
@ -1558,7 +1551,9 @@ swp_pager_async_iodone(struct buf *bp)
* then finish the I/O.
*/
vm_page_dirty(m);
vm_page_lock(m);
vm_page_activate(m);
vm_page_unlock(m);
vm_page_io_finish(m);
}
} else if (bp->b_iocmd == BIO_READ) {
@ -1593,11 +1588,12 @@ swp_pager_async_iodone(struct buf *bp)
* left busy.
*/
if (i != bp->b_pager.pg_reqpage) {
vm_page_lock(m);
vm_page_deactivate(m);
vm_page_unlock(m);
vm_page_wakeup(m);
} else {
} else
vm_page_flash(m);
}
} else {
/*
* For write success, clear the dirty
@ -1609,11 +1605,12 @@ swp_pager_async_iodone(struct buf *bp)
" protected", m));
vm_page_undirty(m);
vm_page_io_finish(m);
if (vm_page_count_severe())
if (vm_page_count_severe()) {
vm_page_lock(m);
vm_page_try_to_cache(m);
vm_page_unlock(m);
}
}
vm_page_unlock_queues();
vm_page_unlock(m);
}
/*

View file

@ -487,20 +487,16 @@ readrest:
(mt->oflags & VPO_BUSY))
continue;
vm_page_lock(mt);
vm_page_lock_queues();
if (mt->hold_count ||
mt->wire_count) {
vm_page_unlock_queues();
vm_page_unlock(mt);
continue;
}
pmap_remove_all(mt);
if (mt->dirty) {
if (mt->dirty != 0)
vm_page_deactivate(mt);
} else {
else
vm_page_cache(mt);
}
vm_page_unlock_queues();
vm_page_unlock(mt);
}
ahead += behind;
@ -1025,13 +1021,8 @@ vm_fault_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry)
break;
}
if (m->valid == VM_PAGE_BITS_ALL &&
(m->flags & PG_FICTITIOUS) == 0) {
vm_page_lock(m);
vm_page_lock_queues();
(m->flags & PG_FICTITIOUS) == 0)
pmap_enter_quick(pmap, addr, m, entry->protection);
vm_page_unlock_queues();
vm_page_unlock(m);
}
VM_OBJECT_UNLOCK(lobject);
}
}

View file

@ -876,13 +876,8 @@ vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int
p->oflags |= VPO_CLEANCHK;
if ((flags & OBJPC_NOSYNC) && (p->oflags & VPO_NOSYNC))
clearobjflags = 0;
else {
vm_page_lock(p);
vm_page_lock_queues();
else
pmap_remove_write(p);
vm_page_unlock_queues();
vm_page_unlock(p);
}
}
if (clearobjflags && (tstart == 0) && (tend == object->size))
@ -1048,11 +1043,7 @@ vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration,
vm_pageout_flush(ma, runlen, pagerflags);
for (i = 0; i < runlen; i++) {
if (ma[i]->dirty) {
vm_page_lock(ma[i]);
vm_page_lock_queues();
pmap_remove_write(ma[i]);
vm_page_unlock_queues();
vm_page_unlock(ma[i]);
ma[i]->oflags |= VPO_CLEANCHK;
/*
@ -1968,7 +1959,6 @@ again:
* if "clean_only" is FALSE.
*/
vm_page_lock(p);
vm_page_lock_queues();
if ((wirings = p->wire_count) != 0 &&
(wirings = pmap_page_wired_mappings(p)) != p->wire_count) {
/* Fictitious pages do not have managed mappings. */
@ -1980,7 +1970,6 @@ again:
p->valid = 0;
vm_page_undirty(p);
}
vm_page_unlock_queues();
vm_page_unlock(p);
continue;
}
@ -1991,7 +1980,6 @@ again:
if (clean_only && p->valid) {
pmap_remove_write(p);
if (p->dirty) {
vm_page_unlock_queues();
vm_page_unlock(p);
continue;
}
@ -2001,7 +1989,6 @@ again:
if (wirings != 0)
p->wire_count -= wirings;
vm_page_free(p);
vm_page_unlock_queues();
vm_page_unlock(p);
}
vm_object_pip_wakeup(object);

View file

@ -1676,13 +1676,11 @@ int
vm_page_try_to_cache(vm_page_t m)
{
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
vm_page_lock_assert(m, MA_OWNED);
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if (m->dirty || m->hold_count || m->busy || m->wire_count ||
(m->oflags & VPO_BUSY) || (m->flags & PG_UNMANAGED)) {
(m->oflags & VPO_BUSY) || (m->flags & PG_UNMANAGED))
return (0);
}
pmap_remove_all(m);
if (m->dirty)
return (0);
@ -1700,14 +1698,12 @@ int
vm_page_try_to_free(vm_page_t m)
{
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
vm_page_lock_assert(m, MA_OWNED);
if (m->object != NULL)
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if (m->dirty || m->hold_count || m->busy || m->wire_count ||
(m->oflags & VPO_BUSY) || (m->flags & PG_UNMANAGED)) {
(m->oflags & VPO_BUSY) || (m->flags & PG_UNMANAGED))
return (0);
}
pmap_remove_all(m);
if (m->dirty)
return (0);
@ -1728,14 +1724,12 @@ vm_page_cache(vm_page_t m)
vm_object_t object;
vm_page_t root;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
vm_page_lock_assert(m, MA_OWNED);
object = m->object;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
if ((m->flags & PG_UNMANAGED) || (m->oflags & VPO_BUSY) || m->busy ||
m->hold_count || m->wire_count) {
m->hold_count || m->wire_count)
panic("vm_page_cache: attempting to cache busy page");
}
pmap_remove_all(m);
if (m->dirty != 0)
panic("vm_page_cache: page %p is dirty", m);
@ -1752,12 +1746,16 @@ vm_page_cache(vm_page_t m)
}
KASSERT((m->flags & PG_CACHED) == 0,
("vm_page_cache: page %p is already cached", m));
cnt.v_tcached++;
PCPU_INC(cnt.v_tcached);
/*
* Remove the page from the paging queues.
*/
vm_pageq_remove(m);
if (VM_PAGE_GETQUEUE(m) != PQ_NONE) {
vm_page_lock_queues();
vm_pageq_remove(m);
vm_page_unlock_queues();
}
/*
* Remove the page from the object's collection of resident
@ -1786,7 +1784,7 @@ vm_page_cache(vm_page_t m)
* Insert the page into the object's collection of cached pages
* and the physical memory allocator's cache/free page queues.
*/
vm_page_flag_clear(m, PG_ZERO);
m->flags &= ~PG_ZERO;
mtx_lock(&vm_page_queue_free_mtx);
m->flags |= PG_CACHED;
cnt.v_cache_count++;
@ -2314,9 +2312,7 @@ vm_page_cowsetup(vm_page_t m)
if (m->cow == USHRT_MAX - 1)
return (EBUSY);
m->cow++;
vm_page_lock_queues();
pmap_remove_write(m);
vm_page_unlock_queues();
return (0);
}

View file

@ -490,11 +490,7 @@ vm_pageout_flush(vm_page_t *mc, int count, int flags)
("vm_pageout_flush: partially invalid page %p index %d/%d",
mc[i], i, count));
vm_page_io_start(mc[i]);
vm_page_lock(mc[i]);
vm_page_lock_queues();
pmap_remove_write(mc[i]);
vm_page_unlock(mc[i]);
vm_page_unlock_queues();
}
vm_object_pip_add(object, count);
@ -503,8 +499,6 @@ vm_pageout_flush(vm_page_t *mc, int count, int flags)
for (i = 0; i < count; i++) {
vm_page_t mt = mc[i];
vm_page_lock(mt);
vm_page_lock_queues();
KASSERT(pageout_status[i] == VM_PAGER_PEND ||
(mt->flags & PG_WRITEABLE) == 0,
("vm_pageout_flush: page %p is not write protected", mt));
@ -528,7 +522,9 @@ vm_pageout_flush(vm_page_t *mc, int count, int flags)
* page so it doesn't clog the inactive list. (We
* will try paging out it again later).
*/
vm_page_lock(mt);
vm_page_activate(mt);
vm_page_unlock(mt);
break;
case VM_PAGER_AGAIN:
break;
@ -543,13 +539,14 @@ vm_pageout_flush(vm_page_t *mc, int count, int flags)
if (pageout_status[i] != VM_PAGER_PEND) {
vm_object_pip_wakeup(object);
vm_page_io_finish(mt);
if (vm_page_count_severe())
if (vm_page_count_severe()) {
vm_page_lock(mt);
vm_page_try_to_cache(mt);
vm_page_unlock(mt);
}
}
vm_page_unlock_queues();
vm_page_unlock(mt);
}
return numpagedout;
return (numpagedout);
}
#if !defined(NO_SWAPPING)