Reduce traffic on vm_cnt.v_free_count

The variable is modified with the highly contended page free queue lock.
It unnecessarily shares a cacheline with purely read-only fields and is
re-read after the lock is dropped in the page allocation code making the
hold time longer.

Pad the variable just like the others and store the value as found with
the lock held instead of re-reading.

Provides a modest 1%-ish speed up in concurrent page faults.

Reviewed by:	kib, markj
Differential Revision:	https://reviews.freebsd.org/D12665
This commit is contained in:
Mateusz Guzik 2017-10-13 21:54:34 +00:00
parent 30a33cefae
commit 1dbf52e7d9
3 changed files with 12 additions and 11 deletions

View file

@ -131,7 +131,6 @@ struct vmmeter {
u_int v_free_reserved; /* (c) pages reserved for deadlock */
u_int v_free_target; /* (c) pages desired free */
u_int v_free_min; /* (c) pages desired free */
u_int v_free_count; /* (f) pages free */
u_int v_inactive_target; /* (c) pages desired inactive */
u_int v_pageout_free_min; /* (c) min pages reserved for kernel */
u_int v_interrupt_free_min; /* (c) reserved pages for int code */
@ -141,6 +140,7 @@ struct vmmeter {
u_int v_inactive_count VMMETER_ALIGNED; /* (a) pages inactive */
u_int v_laundry_count VMMETER_ALIGNED; /* (a) pages eligible for
laundering */
u_int v_free_count VMMETER_ALIGNED; /* (a) pages free */
};
#endif /* _KERNEL || _WANT_VMMETER */
@ -208,10 +208,10 @@ vm_paging_target(void)
* Returns TRUE if the pagedaemon needs to be woken up.
*/
static inline int
vm_paging_needed(void)
vm_paging_needed(u_int free_count)
{
return (vm_cnt.v_free_count < vm_pageout_wakeup_thresh);
return (free_count < vm_pageout_wakeup_thresh);
}
/*

View file

@ -1588,6 +1588,7 @@ vm_page_alloc_after(vm_object_t object, vm_pindex_t pindex, int req,
{
vm_page_t m;
int flags, req_class;
u_int free_count;
KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) &&
(object != NULL || (req & VM_ALLOC_SBUSY) == 0) &&
@ -1655,7 +1656,7 @@ vm_page_alloc_after(vm_object_t object, vm_pindex_t pindex, int req,
* At this point we had better have found a good page.
*/
KASSERT(m != NULL, ("missing page"));
vm_phys_freecnt_adj(m, -1);
free_count = vm_phys_freecnt_adj(m, -1);
mtx_unlock(&vm_page_queue_free_mtx);
vm_page_alloc_check(m);
@ -1713,7 +1714,7 @@ vm_page_alloc_after(vm_object_t object, vm_pindex_t pindex, int req,
* Don't wakeup too often - wakeup the pageout daemon when
* we would be nearly out of memory.
*/
if (vm_paging_needed())
if (vm_paging_needed(free_count))
pagedaemon_wakeup();
return (m);
@ -1899,7 +1900,7 @@ retry:
pmap_page_set_memattr(m, memattr);
pindex++;
}
if (vm_paging_needed())
if (vm_paging_needed(vm_cnt.v_free_count))
pagedaemon_wakeup();
return (m_ret);
}
@ -1948,7 +1949,7 @@ vm_page_t
vm_page_alloc_freelist(int flind, int req)
{
vm_page_t m;
u_int flags;
u_int flags, free_count;
int req_class;
req_class = req & VM_ALLOC_CLASS_MASK;
@ -1980,7 +1981,7 @@ vm_page_alloc_freelist(int flind, int req)
mtx_unlock(&vm_page_queue_free_mtx);
return (NULL);
}
vm_phys_freecnt_adj(m, -1);
free_count = vm_phys_freecnt_adj(m, -1);
mtx_unlock(&vm_page_queue_free_mtx);
vm_page_alloc_check(m);
@ -2002,7 +2003,7 @@ vm_page_alloc_freelist(int flind, int req)
}
/* Unmanaged pages don't use "act_count". */
m->oflags = VPO_UNMANAGED;
if (vm_paging_needed())
if (vm_paging_needed(free_count))
pagedaemon_wakeup();
return (m);
}

View file

@ -112,13 +112,13 @@ vm_phys_domain(vm_page_t m)
#endif
}
static inline void
static inline u_int
vm_phys_freecnt_adj(vm_page_t m, int adj)
{
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
vm_cnt.v_free_count += adj;
vm_phys_domain(m)->vmd_free_count += adj;
return (vm_cnt.v_free_count += adj);
}
#endif /* _KERNEL */