mirror of
https://github.com/opnsense/src.git
synced 2026-02-20 00:11:07 -05:00
vm_object: Modify vm_object_allocate_anon() to return OBJT_SWAP objects
With this change, OBJT_DEFAULT objects are no longer allocated. Instead, anonymous objects are always of type OBJT_SWAP and always have OBJ_SWAP set. Modify the page fault handler to check the swap block radix tree in places where it checked for objects of type OBJT_DEFAULT. In particular, there's no need to invoke getpages for an OBJT_SWAP object with no swap blocks assigned. Reviewed by: alc, kib Tested by: pho Sponsored by: The FreeBSD Foundation Differential Revision: https://reviews.freebsd.org/D35785
This commit is contained in:
parent
6226f8f254
commit
5d32157d4e
3 changed files with 27 additions and 9 deletions
|
|
@ -85,6 +85,7 @@ __FBSDID("$FreeBSD$");
|
|||
#include <sys/lock.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/mutex.h>
|
||||
#include <sys/pctrie.h>
|
||||
#include <sys/proc.h>
|
||||
#include <sys/racct.h>
|
||||
#include <sys/refcount.h>
|
||||
|
|
@ -220,6 +221,21 @@ fault_page_free(vm_page_t *mp)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Return true if a vm_pager_get_pages() call is needed in order to check
|
||||
* whether the pager might have a particular page, false if it can be determined
|
||||
* immediately that the pager can not have a copy. For swap objects, this can
|
||||
* be checked quickly.
|
||||
*/
|
||||
static inline bool
|
||||
fault_object_needs_getpages(vm_object_t object)
|
||||
{
|
||||
VM_OBJECT_ASSERT_LOCKED(object);
|
||||
|
||||
return ((object->flags & OBJ_SWAP) == 0 ||
|
||||
!pctrie_is_empty(&object->un_pager.swp.swp_blks));
|
||||
}
|
||||
|
||||
static inline void
|
||||
unlock_map(struct faultstate *fs)
|
||||
{
|
||||
|
|
@ -1406,10 +1422,9 @@ vm_fault_object(struct faultstate *fs, int *behindp, int *aheadp)
|
|||
/*
|
||||
* Page is not resident. If the pager might contain the page
|
||||
* or this is the beginning of the search, allocate a new
|
||||
* page. (Default objects are zero-fill, so there is no real
|
||||
* pager for them.)
|
||||
* page.
|
||||
*/
|
||||
if (fs->m == NULL && (fs->object->type != OBJT_DEFAULT ||
|
||||
if (fs->m == NULL && (fault_object_needs_getpages(fs->object) ||
|
||||
fs->object == fs->first_object)) {
|
||||
res = vm_fault_allocate(fs);
|
||||
if (res != FAULT_CONTINUE)
|
||||
|
|
@ -1422,7 +1437,7 @@ vm_fault_object(struct faultstate *fs, int *behindp, int *aheadp)
|
|||
* object without dropping the lock to preserve atomicity of
|
||||
* shadow faults.
|
||||
*/
|
||||
if (fs->object->type != OBJT_DEFAULT) {
|
||||
if (fault_object_needs_getpages(fs->object)) {
|
||||
/*
|
||||
* At this point, we have either allocated a new page
|
||||
* or found an existing page that is only partially
|
||||
|
|
@ -1841,7 +1856,7 @@ vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra,
|
|||
if (!obj_locked)
|
||||
VM_OBJECT_RLOCK(lobject);
|
||||
while ((m = vm_page_lookup(lobject, pindex)) == NULL &&
|
||||
lobject->type == OBJT_DEFAULT &&
|
||||
!fault_object_needs_getpages(lobject) &&
|
||||
(backing_object = lobject->backing_object) != NULL) {
|
||||
KASSERT((lobject->backing_object_offset & PAGE_MASK) ==
|
||||
0, ("vm_fault_prefault: unaligned object offset"));
|
||||
|
|
|
|||
|
|
@ -244,8 +244,10 @@ _vm_object_allocate(objtype_t type, vm_pindex_t size, u_short flags,
|
|||
|
||||
object->type = type;
|
||||
object->flags = flags;
|
||||
if ((flags & OBJ_SWAP) != 0)
|
||||
if ((flags & OBJ_SWAP) != 0) {
|
||||
pctrie_init(&object->un_pager.swp.swp_blks);
|
||||
object->un_pager.swp.writemappings = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure that swap_pager_swapoff() iteration over object_list
|
||||
|
|
@ -473,8 +475,8 @@ vm_object_allocate_anon(vm_pindex_t size, vm_object_t backing_object,
|
|||
else
|
||||
handle = backing_object;
|
||||
object = uma_zalloc(obj_zone, M_WAITOK);
|
||||
_vm_object_allocate(OBJT_DEFAULT, size, OBJ_ANON | OBJ_ONEMAPPING,
|
||||
object, handle);
|
||||
_vm_object_allocate(OBJT_SWAP, size,
|
||||
OBJ_ANON | OBJ_ONEMAPPING | OBJ_SWAP, object, handle);
|
||||
object->cred = cred;
|
||||
object->charge = cred != NULL ? charge : 0;
|
||||
return (object);
|
||||
|
|
|
|||
|
|
@ -202,7 +202,8 @@ struct vm_object {
|
|||
#define OBJ_SIZEVNLOCK 0x0040 /* lock vnode to check obj size */
|
||||
#define OBJ_PG_DTOR 0x0080 /* dont reset object, leave that for dtor */
|
||||
#define OBJ_SHADOWLIST 0x0100 /* Object is on the shadow list. */
|
||||
#define OBJ_SWAP 0x0200 /* object swaps */
|
||||
#define OBJ_SWAP 0x0200 /* object swaps, type will be OBJT_SWAP
|
||||
or dynamically registered */
|
||||
#define OBJ_SPLIT 0x0400 /* object is being split */
|
||||
#define OBJ_COLLAPSING 0x0800 /* Parent of collapse. */
|
||||
#define OBJ_COLORED 0x1000 /* pg_color is defined */
|
||||
|
|
|
|||
Loading…
Reference in a new issue