Now that pmap_remove_all() is exported by our pmap implementations

use it directly.
This commit is contained in:
Alan Cox 2002-11-16 07:44:25 +00:00
parent 9ed358fddc
commit 4fec79bef8
7 changed files with 22 additions and 22 deletions

View file

@ -771,7 +771,7 @@ exec_map_first_page(imgp)
(ma[0]->valid == 0)) {
if (ma[0]) {
vm_page_lock_queues();
pmap_page_protect(ma[0], VM_PROT_NONE);
pmap_remove_all(ma[0]);
vm_page_free(ma[0]);
vm_page_unlock_queues();
}

View file

@ -1504,7 +1504,7 @@ vfs_vmio_release(bp)
if ((bp->b_flags & B_ASYNC) == 0 && !m->valid &&
m->hold_count == 0) {
vm_page_busy(m);
pmap_page_protect(m, VM_PROT_NONE);
pmap_remove_all(m);
vm_page_free(m);
} else if (bp->b_flags & B_DIRECT) {
vm_page_try_to_free(m);
@ -3268,7 +3268,7 @@ retry:
* It may not work properly with small-block devices.
* We need to find a better way.
*/
pmap_page_protect(m, VM_PROT_NONE);
pmap_remove_all(m);
if (clear_modify)
vfs_page_set_valid(bp, foff, i, m);
else if (m->valid == VM_PAGE_BITS_ALL &&

View file

@ -474,7 +474,7 @@ readrest:
if (mt->dirty == 0)
vm_page_test_dirty(mt);
if (mt->dirty) {
pmap_page_protect(mt, VM_PROT_NONE);
pmap_remove_all(mt);
vm_page_deactivate(mt);
} else {
vm_page_cache(mt);
@ -700,7 +700,7 @@ readrest:
* get rid of the unnecessary page
*/
vm_page_lock_queues();
pmap_page_protect(fs.first_m, VM_PROT_NONE);
pmap_remove_all(fs.first_m);
vm_page_free(fs.first_m);
vm_page_unlock_queues();
fs.first_m = NULL;

View file

@ -1415,7 +1415,7 @@ vm_object_backing_scan(vm_object_t object, int op)
* can simply destroy it.
*/
vm_page_lock_queues();
pmap_page_protect(p, VM_PROT_NONE);
pmap_remove_all(p);
vm_page_free(p);
vm_page_unlock_queues();
p = next;
@ -1435,7 +1435,7 @@ vm_object_backing_scan(vm_object_t object, int op)
* Leave the parent's page alone
*/
vm_page_lock_queues();
pmap_page_protect(p, VM_PROT_NONE);
pmap_remove_all(p);
vm_page_free(p);
vm_page_unlock_queues();
p = next;
@ -1722,7 +1722,7 @@ again:
next = TAILQ_NEXT(p, listq);
if (all || ((start <= p->pindex) && (p->pindex < end))) {
if (p->wire_count != 0) {
pmap_page_protect(p, VM_PROT_NONE);
pmap_remove_all(p);
if (!clean_only)
p->valid = 0;
continue;
@ -1741,7 +1741,7 @@ again:
continue;
}
vm_page_busy(p);
pmap_page_protect(p, VM_PROT_NONE);
pmap_remove_all(p);
vm_page_free(p);
}
}
@ -1749,7 +1749,7 @@ again:
while (size > 0) {
if ((p = vm_page_lookup(object, start)) != NULL) {
if (p->wire_count != 0) {
pmap_page_protect(p, VM_PROT_NONE);
pmap_remove_all(p);
if (!clean_only)
p->valid = 0;
start += 1;
@ -1773,7 +1773,7 @@ again:
}
}
vm_page_busy(p);
pmap_page_protect(p, VM_PROT_NONE);
pmap_remove_all(p);
vm_page_free(p);
}
start += 1;
@ -1944,7 +1944,7 @@ vm_freeze_copyopts(vm_object_t object, vm_pindex_t froma, vm_pindex_t toa)
vm_page_unlock_queues();
}
pmap_page_protect(m_in, VM_PROT_NONE);
pmap_remove_all(m_in);
pmap_copy_page(m_in, m_out);
m_out->valid = m_in->valid;
vm_page_dirty(m_out);

View file

@ -382,7 +382,7 @@ vm_page_protect(vm_page_t mem, int prot)
{
if (prot == VM_PROT_NONE) {
if (pmap_page_is_mapped(mem) || (mem->flags & PG_WRITEABLE)) {
pmap_page_protect(mem, VM_PROT_NONE);
pmap_remove_all(mem);
vm_page_flag_clear(mem, PG_WRITEABLE);
}
} else if ((prot == VM_PROT_READ) && (mem->flags & PG_WRITEABLE)) {
@ -878,7 +878,7 @@ loop:
}
KASSERT(m->dirty == 0, ("Found dirty cache page %p", m));
vm_page_busy(m);
pmap_page_protect(m, VM_PROT_NONE);
pmap_remove_all(m);
vm_page_free(m);
vm_page_unlock_queues();
goto loop;
@ -1384,7 +1384,7 @@ vm_page_try_to_free(vm_page_t m)
if (m->dirty)
return (0);
vm_page_busy(m);
pmap_page_protect(m, VM_PROT_NONE);
pmap_remove_all(m);
vm_page_free(m);
return (1);
}
@ -1413,7 +1413,7 @@ vm_page_cache(vm_page_t m)
* Remove all pmaps and indicate that the page is not
* writeable or mapped.
*/
pmap_page_protect(m, VM_PROT_NONE);
pmap_remove_all(m);
if (m->dirty != 0) {
panic("vm_page_cache: caching a dirty page, pindex: %ld",
(long)m->pindex);

View file

@ -513,7 +513,7 @@ vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only)
if ((p->flags & PG_REFERENCED) == 0) {
p->act_count -= min(p->act_count, ACT_DECLINE);
if (!remove_mode && (vm_pageout_algorithm || (p->act_count == 0))) {
pmap_page_protect(p, VM_PROT_NONE);
pmap_remove_all(p);
vm_page_deactivate(p);
} else {
vm_pageq_requeue(p);
@ -526,7 +526,7 @@ vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only)
vm_pageq_requeue(p);
}
} else if (p->queue == PQ_INACTIVE) {
pmap_page_protect(p, VM_PROT_NONE);
pmap_remove_all(p);
}
p = next;
}
@ -619,7 +619,7 @@ vm_pageout_page_free(vm_page_t m) {
if (type == OBJT_SWAP || type == OBJT_DEFAULT)
vm_object_reference(object);
vm_page_busy(m);
pmap_page_protect(m, VM_PROT_NONE);
pmap_remove_all(m);
vm_page_free(m);
cnt.v_dfree++;
if (type == OBJT_SWAP || type == OBJT_DEFAULT)
@ -1073,7 +1073,7 @@ rescan0:
m->act_count == 0) {
page_shortage--;
if (m->object->ref_count == 0) {
pmap_page_protect(m, VM_PROT_NONE);
pmap_remove_all(m);
if (m->dirty == 0)
vm_page_cache(m);
else
@ -1308,7 +1308,7 @@ vm_pageout_page_stats()
* operations would be higher than the value
* of doing the operation.
*/
pmap_page_protect(m, VM_PROT_NONE);
pmap_remove_all(m);
vm_page_deactivate(m);
} else {
m->act_count -= min(m->act_count, ACT_DECLINE);

View file

@ -348,7 +348,7 @@ vnode_pager_setsize(vp, nsize)
* XXX should vm_pager_unmap_page() have
* dealt with this?
*/
pmap_page_protect(m, VM_PROT_NONE);
pmap_remove_all(m);
/*
* Clear out partial-page dirty bits. This