Clear PGA_WRITEABLE in moea_pvo_remove().

moea_pvo_remove() might remove the last mapping of a page, in which case
it is clearly no longer writeable.  This can happen via pmap_remove(),
or when a CoW fault removes the last mapping of the old page.

Reported and tested by:	bdragon
Reviewed by:	alc, bdragon, kib
MFC after:	1 week
Sponsored by:	The FreeBSD Foundation
Differential Revision:	https://reviews.freebsd.org/D22044
This commit is contained in:
Mark Johnston 2019-10-16 15:50:12 +00:00
parent 2981bc73c7
commit b4efea53e0

View file

@ -2107,25 +2107,28 @@ moea_pvo_remove(struct pvo_entry *pvo, int pteidx)
if (pvo->pvo_vaddr & PVO_WIRED)
pvo->pvo_pmap->pm_stats.wired_count--;
/*
* Remove this PVO from the PV and pmap lists.
*/
LIST_REMOVE(pvo, pvo_vlink);
RB_REMOVE(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo);
/*
* Save the REF/CHG bits into their cache if the page is managed.
* Clear PGA_WRITEABLE if all mappings of the page have been removed.
*/
if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED) {
struct vm_page *pg;
struct vm_page *pg;
pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte.pte_lo & PTE_RPGN);
if (pg != NULL) {
moea_attr_save(pg, pvo->pvo_pte.pte.pte_lo &
(PTE_REF | PTE_CHG));
if (LIST_EMPTY(&pg->md.mdpg_pvoh))
vm_page_aflag_clear(pg, PGA_WRITEABLE);
}
}
/*
* Remove this PVO from the PV and pmap lists.
*/
LIST_REMOVE(pvo, pvo_vlink);
RB_REMOVE(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo);
/*
* Remove this from the overflow list and return it to the pool
* if we aren't going to reuse it.