mirror of
https://github.com/opnsense/src.git
synced 2026-04-27 17:17:19 -04:00
- Split UMA_ZFLAG_OFFPAGE into UMA_ZFLAG_OFFPAGE and UMA_ZFLAG_HASH.
- Remove all instances of the mallochash. - Stash the slab pointer in the vm page's object pointer when allocating from the kmem_obj. - Use the overloaded object pointer to find slabs for malloced memory.
This commit is contained in:
parent
d0505643e5
commit
99571dc345
6 changed files with 111 additions and 126 deletions
|
|
@ -48,11 +48,12 @@
|
|||
#include <sys/sysctl.h>
|
||||
|
||||
#include <vm/vm.h>
|
||||
#include <vm/pmap.h>
|
||||
#include <vm/vm_param.h>
|
||||
#include <vm/vm_kern.h>
|
||||
#include <vm/vm_extern.h>
|
||||
#include <vm/pmap.h>
|
||||
#include <vm/vm_map.h>
|
||||
#include <vm/vm_page.h>
|
||||
#include <vm/uma.h>
|
||||
#include <vm/uma_int.h>
|
||||
#include <vm/uma_dbg.h>
|
||||
|
|
@ -120,8 +121,7 @@ struct {
|
|||
u_int vm_kmem_size;
|
||||
|
||||
/*
|
||||
* The malloc_mtx protects the kmemstatistics linked list as well as the
|
||||
* mallochash.
|
||||
* The malloc_mtx protects the kmemstatistics linked list.
|
||||
*/
|
||||
|
||||
struct mtx malloc_mtx;
|
||||
|
|
@ -206,10 +206,9 @@ free(addr, type)
|
|||
void *addr;
|
||||
struct malloc_type *type;
|
||||
{
|
||||
uma_slab_t slab;
|
||||
void *mem;
|
||||
u_long size;
|
||||
register struct malloc_type *ksp = type;
|
||||
uma_slab_t slab;
|
||||
u_long size;
|
||||
|
||||
/* free(NULL, ...) does nothing */
|
||||
if (addr == NULL)
|
||||
|
|
@ -217,14 +216,12 @@ free(addr, type)
|
|||
|
||||
size = 0;
|
||||
|
||||
mem = (void *)((u_long)addr & (~UMA_SLAB_MASK));
|
||||
mtx_lock(&malloc_mtx);
|
||||
slab = hash_sfind(mallochash, mem);
|
||||
mtx_unlock(&malloc_mtx);
|
||||
slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK));
|
||||
|
||||
if (slab == NULL)
|
||||
panic("free: address %p(%p) has not been allocated.\n",
|
||||
addr, mem);
|
||||
addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
|
||||
|
||||
|
||||
if (!(slab->us_flags & UMA_SLAB_MALLOC)) {
|
||||
#ifdef INVARIANTS
|
||||
|
|
@ -275,10 +272,7 @@ realloc(addr, size, type, flags)
|
|||
if (addr == NULL)
|
||||
return (malloc(size, type, flags));
|
||||
|
||||
mtx_lock(&malloc_mtx);
|
||||
slab = hash_sfind(mallochash,
|
||||
(void *)((u_long)addr & ~(UMA_SLAB_MASK)));
|
||||
mtx_unlock(&malloc_mtx);
|
||||
slab = vtoslab((vm_offset_t)addr & ~(UMA_SLAB_MASK));
|
||||
|
||||
/* Sanity check */
|
||||
KASSERT(slab != NULL,
|
||||
|
|
@ -333,10 +327,6 @@ kmeminit(dummy)
|
|||
u_int8_t indx;
|
||||
u_long npg;
|
||||
u_long mem_size;
|
||||
void *hashmem;
|
||||
u_long hashsize;
|
||||
int highbit;
|
||||
int bits;
|
||||
int i;
|
||||
|
||||
mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF);
|
||||
|
|
@ -392,21 +382,7 @@ kmeminit(dummy)
|
|||
(vm_offset_t *)&kmemlimit, (vm_size_t)(npg * PAGE_SIZE));
|
||||
kmem_map->system_map = 1;
|
||||
|
||||
hashsize = npg * sizeof(void *);
|
||||
|
||||
highbit = 0;
|
||||
bits = 0;
|
||||
/* The hash size must be a power of two */
|
||||
for (i = 0; i < 8 * sizeof(hashsize); i++)
|
||||
if (hashsize & (1 << i)) {
|
||||
highbit = i;
|
||||
bits++;
|
||||
}
|
||||
if (bits > 1)
|
||||
hashsize = 1 << (highbit);
|
||||
|
||||
hashmem = (void *)kmem_alloc(kernel_map, (vm_size_t)hashsize);
|
||||
uma_startup2(hashmem, hashsize / sizeof(void *));
|
||||
uma_startup2();
|
||||
|
||||
for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) {
|
||||
int size = kmemzones[indx].kz_size;
|
||||
|
|
|
|||
17
sys/vm/uma.h
17
sys/vm/uma.h
|
|
@ -173,7 +173,14 @@ uma_zone_t uma_zcreate(char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
|
|||
#define UMA_ZONE_MALLOC 0x0010 /* For use by malloc(9) only! */
|
||||
#define UMA_ZONE_NOFREE 0x0020 /* Do not free slabs of this type! */
|
||||
#define UMA_ZONE_MTXCLASS 0x0040 /* Create a new lock class */
|
||||
#define UMA_ZONE_VM 0x0080 /* Used for internal vm datastructures */
|
||||
#define UMA_ZONE_VM 0x0080 /*
|
||||
* Used for internal vm datastructures
|
||||
* only.
|
||||
*/
|
||||
#define UMA_ZONE_HASH 0x0100 /*
|
||||
* Use a hash table instead of caching
|
||||
* information in the vm_page.
|
||||
*/
|
||||
|
||||
/* Definitions for align */
|
||||
#define UMA_ALIGN_PTR (sizeof(void *) - 1) /* Alignment fit for ptr */
|
||||
|
|
@ -309,18 +316,16 @@ void uma_startup(void *bootmem);
|
|||
* be called when kva is ready for normal allocs.
|
||||
*
|
||||
* Arguments:
|
||||
* hash An area of memory that will become the malloc hash
|
||||
* elems The number of elements in this array
|
||||
* None
|
||||
*
|
||||
* Returns:
|
||||
* Nothing
|
||||
*
|
||||
* Discussion:
|
||||
* uma_startup2 is called by kmeminit() to prepare the malloc
|
||||
* hash bucket, and enable use of uma for malloc ops.
|
||||
* uma_startup2 is called by kmeminit() to enable us of uma for malloc.
|
||||
*/
|
||||
|
||||
void uma_startup2(void *hash, u_long elems);
|
||||
void uma_startup2(void);
|
||||
|
||||
/*
|
||||
* Reclaims unused memory for all zones
|
||||
|
|
|
|||
|
|
@ -145,14 +145,6 @@ struct uma_zctor_args {
|
|||
u_int16_t flags;
|
||||
};
|
||||
|
||||
/*
|
||||
* This is the malloc hash table which is used to find the zone that a
|
||||
* malloc allocation came from. It is not currently resizeable. The
|
||||
* memory for the actual hash bucket is allocated in kmeminit.
|
||||
*/
|
||||
struct uma_hash mhash;
|
||||
struct uma_hash *mallochash = &mhash;
|
||||
|
||||
/* Prototypes.. */
|
||||
|
||||
static void *obj_alloc(uma_zone_t, int, u_int8_t *, int);
|
||||
|
|
@ -283,35 +275,32 @@ zone_timeout(uma_zone_t zone)
|
|||
* may be a little aggressive. Should I allow for two collisions max?
|
||||
*/
|
||||
|
||||
if ((zone->uz_flags & UMA_ZFLAG_OFFPAGE) &&
|
||||
!(zone->uz_flags & UMA_ZFLAG_MALLOC)) {
|
||||
if (zone->uz_pages / zone->uz_ppera
|
||||
>= zone->uz_hash.uh_hashsize) {
|
||||
struct uma_hash newhash;
|
||||
struct uma_hash oldhash;
|
||||
int ret;
|
||||
if (zone->uz_flags & UMA_ZFLAG_HASH &&
|
||||
zone->uz_pages / zone->uz_ppera >= zone->uz_hash.uh_hashsize) {
|
||||
struct uma_hash newhash;
|
||||
struct uma_hash oldhash;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* This is so involved because allocating and freeing
|
||||
* while the zone lock is held will lead to deadlock.
|
||||
* I have to do everything in stages and check for
|
||||
* races.
|
||||
*/
|
||||
newhash = zone->uz_hash;
|
||||
ZONE_UNLOCK(zone);
|
||||
ret = hash_alloc(&newhash);
|
||||
ZONE_LOCK(zone);
|
||||
if (ret) {
|
||||
if (hash_expand(&zone->uz_hash, &newhash)) {
|
||||
oldhash = zone->uz_hash;
|
||||
zone->uz_hash = newhash;
|
||||
} else
|
||||
oldhash = newhash;
|
||||
|
||||
/*
|
||||
* This is so involved because allocating and freeing
|
||||
* while the zone lock is held will lead to deadlock.
|
||||
* I have to do everything in stages and check for
|
||||
* races.
|
||||
*/
|
||||
newhash = zone->uz_hash;
|
||||
ZONE_UNLOCK(zone);
|
||||
ret = hash_alloc(&newhash);
|
||||
hash_free(&oldhash);
|
||||
ZONE_LOCK(zone);
|
||||
if (ret) {
|
||||
if (hash_expand(&zone->uz_hash, &newhash)) {
|
||||
oldhash = zone->uz_hash;
|
||||
zone->uz_hash = newhash;
|
||||
} else
|
||||
oldhash = newhash;
|
||||
|
||||
ZONE_UNLOCK(zone);
|
||||
hash_free(&oldhash);
|
||||
ZONE_LOCK(zone);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -479,13 +468,8 @@ bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
|
|||
* hold them. This will go away when free() gets a size passed
|
||||
* to it.
|
||||
*/
|
||||
if (mzone) {
|
||||
mtx_lock(&malloc_mtx);
|
||||
slab = hash_sfind(mallochash,
|
||||
(u_int8_t *)((unsigned long)item &
|
||||
(~UMA_SLAB_MASK)));
|
||||
mtx_unlock(&malloc_mtx);
|
||||
}
|
||||
if (mzone)
|
||||
slab = vtoslab((vm_offset_t)item & (~UMA_SLAB_MASK));
|
||||
uma_zfree_internal(zone, item, slab, 1);
|
||||
}
|
||||
}
|
||||
|
|
@ -622,13 +606,7 @@ zone_drain(uma_zone_t zone)
|
|||
zone->uz_pages -= zone->uz_ppera;
|
||||
zone->uz_free -= zone->uz_ipers;
|
||||
|
||||
if (zone->uz_flags & UMA_ZFLAG_MALLOC) {
|
||||
mtx_lock(&malloc_mtx);
|
||||
UMA_HASH_REMOVE(mallochash, slab, slab->us_data);
|
||||
mtx_unlock(&malloc_mtx);
|
||||
}
|
||||
if (zone->uz_flags & UMA_ZFLAG_OFFPAGE &&
|
||||
!(zone->uz_flags & UMA_ZFLAG_MALLOC))
|
||||
if (zone->uz_flags & UMA_ZFLAG_HASH)
|
||||
UMA_HASH_REMOVE(&zone->uz_hash, slab, slab->us_data);
|
||||
|
||||
SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
|
||||
|
|
@ -648,9 +626,13 @@ finished:
|
|||
zone->uz_size);
|
||||
flags = slab->us_flags;
|
||||
mem = slab->us_data;
|
||||
if (zone->uz_flags & UMA_ZFLAG_OFFPAGE) {
|
||||
|
||||
if (zone->uz_flags & UMA_ZFLAG_OFFPAGE)
|
||||
uma_zfree_internal(slabzone, slab, NULL, 0);
|
||||
}
|
||||
if (zone->uz_flags & UMA_ZFLAG_MALLOC)
|
||||
for (i = 0; i < zone->uz_ppera; i++)
|
||||
vsetobj((vm_offset_t)mem + (i * PAGE_SIZE),
|
||||
kmem_object);
|
||||
#ifdef UMA_DEBUG
|
||||
printf("%s: Returning %d bytes.\n",
|
||||
zone->uz_name, UMA_SLAB_SIZE * zone->uz_ppera);
|
||||
|
|
@ -732,19 +714,12 @@ slab_zalloc(uma_zone_t zone, int wait)
|
|||
}
|
||||
|
||||
/* Point the slab into the allocated memory */
|
||||
if (!(zone->uz_flags & UMA_ZFLAG_OFFPAGE)) {
|
||||
if (!(zone->uz_flags & UMA_ZFLAG_OFFPAGE))
|
||||
slab = (uma_slab_t )(mem + zone->uz_pgoff);
|
||||
}
|
||||
|
||||
if (zone->uz_flags & UMA_ZFLAG_MALLOC) {
|
||||
#ifdef UMA_DEBUG
|
||||
printf("Inserting %p into malloc hash from slab %p\n",
|
||||
mem, slab);
|
||||
#endif
|
||||
mtx_lock(&malloc_mtx);
|
||||
UMA_HASH_INSERT(mallochash, slab, mem);
|
||||
mtx_unlock(&malloc_mtx);
|
||||
}
|
||||
if (zone->uz_flags & UMA_ZFLAG_MALLOC)
|
||||
for (i = 0; i < zone->uz_ppera; i++)
|
||||
vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab);
|
||||
|
||||
slab->us_zone = zone;
|
||||
slab->us_data = mem;
|
||||
|
|
@ -778,8 +753,7 @@ slab_zalloc(uma_zone_t zone, int wait)
|
|||
zone->uz_size);
|
||||
ZONE_LOCK(zone);
|
||||
|
||||
if ((zone->uz_flags & (UMA_ZFLAG_OFFPAGE|UMA_ZFLAG_MALLOC)) ==
|
||||
UMA_ZFLAG_OFFPAGE)
|
||||
if (zone->uz_flags & UMA_ZFLAG_HASH)
|
||||
UMA_HASH_INSERT(&zone->uz_hash, slab, mem);
|
||||
|
||||
zone->uz_pages += zone->uz_ppera;
|
||||
|
|
@ -936,6 +910,8 @@ zone_small_init(uma_zone_t zone)
|
|||
ipers = UMA_SLAB_SIZE / zone->uz_rsize;
|
||||
if (ipers > zone->uz_ipers) {
|
||||
zone->uz_flags |= UMA_ZFLAG_OFFPAGE;
|
||||
if ((zone->uz_flags & UMA_ZFLAG_MALLOC) == 0)
|
||||
zone->uz_flags |= UMA_ZFLAG_HASH;
|
||||
zone->uz_ipers = ipers;
|
||||
}
|
||||
}
|
||||
|
|
@ -968,6 +944,9 @@ zone_large_init(uma_zone_t zone)
|
|||
zone->uz_ipers = 1;
|
||||
|
||||
zone->uz_flags |= UMA_ZFLAG_OFFPAGE;
|
||||
if ((zone->uz_flags & UMA_ZFLAG_MALLOC) == 0)
|
||||
zone->uz_flags |= UMA_ZFLAG_HASH;
|
||||
|
||||
zone->uz_rsize = zone->uz_size;
|
||||
}
|
||||
|
||||
|
|
@ -1073,11 +1052,11 @@ zone_ctor(void *mem, int size, void *udata)
|
|||
zone->uz_size);
|
||||
panic("UMA slab won't fit.\n");
|
||||
}
|
||||
} else {
|
||||
hash_alloc(&zone->uz_hash);
|
||||
zone->uz_pgoff = 0;
|
||||
}
|
||||
|
||||
if (zone->uz_flags & UMA_ZFLAG_HASH)
|
||||
hash_alloc(&zone->uz_hash);
|
||||
|
||||
#ifdef UMA_DEBUG
|
||||
printf("%s(%p) size = %d ipers = %d ppera = %d pgoff = %d\n",
|
||||
zone->uz_name, zone,
|
||||
|
|
@ -1253,12 +1232,8 @@ uma_startup(void *bootmem)
|
|||
|
||||
/* see uma.h */
|
||||
void
|
||||
uma_startup2(void *hashmem, u_long elems)
|
||||
uma_startup2(void)
|
||||
{
|
||||
bzero(hashmem, elems * sizeof(void *));
|
||||
mallochash->uh_slab_hash = hashmem;
|
||||
mallochash->uh_hashsize = elems;
|
||||
mallochash->uh_hashmask = elems - 1;
|
||||
booted = 1;
|
||||
bucket_enable();
|
||||
#ifdef UMA_DEBUG
|
||||
|
|
@ -1803,7 +1778,7 @@ uma_zfree_internal(uma_zone_t zone, void *item, void *udata, int skip)
|
|||
|
||||
if (!(zone->uz_flags & UMA_ZFLAG_MALLOC)) {
|
||||
mem = (u_int8_t *)((unsigned long)item & (~UMA_SLAB_MASK));
|
||||
if (zone->uz_flags & UMA_ZFLAG_OFFPAGE)
|
||||
if (zone->uz_flags & UMA_ZFLAG_HASH)
|
||||
slab = hash_sfind(&zone->uz_hash, mem);
|
||||
else {
|
||||
mem += zone->uz_pgoff;
|
||||
|
|
@ -2001,12 +1976,10 @@ uma_large_malloc(int size, int wait)
|
|||
|
||||
mem = page_alloc(NULL, size, &flags, wait);
|
||||
if (mem) {
|
||||
vsetslab((vm_offset_t)mem, slab);
|
||||
slab->us_data = mem;
|
||||
slab->us_flags = flags | UMA_SLAB_MALLOC;
|
||||
slab->us_size = size;
|
||||
mtx_lock(&malloc_mtx);
|
||||
UMA_HASH_INSERT(mallochash, slab, mem);
|
||||
mtx_unlock(&malloc_mtx);
|
||||
} else {
|
||||
uma_zfree_internal(slabzone, slab, NULL, 0);
|
||||
}
|
||||
|
|
@ -2018,9 +1991,7 @@ uma_large_malloc(int size, int wait)
|
|||
void
|
||||
uma_large_free(uma_slab_t slab)
|
||||
{
|
||||
mtx_lock(&malloc_mtx);
|
||||
UMA_HASH_REMOVE(mallochash, slab, slab->us_data);
|
||||
mtx_unlock(&malloc_mtx);
|
||||
vsetobj((vm_offset_t)slab->us_data, kmem_object);
|
||||
page_free(slab->us_data, slab->us_size, slab->us_flags);
|
||||
uma_zfree_internal(slabzone, slab, NULL, 0);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -42,6 +42,9 @@
|
|||
#include <sys/mutex.h>
|
||||
#include <sys/malloc.h>
|
||||
|
||||
#include <vm/vm.h>
|
||||
#include <vm/vm_object.h>
|
||||
#include <vm/vm_page.h>
|
||||
#include <vm/uma.h>
|
||||
#include <vm/uma_int.h>
|
||||
#include <vm/uma_dbg.h>
|
||||
|
|
@ -194,10 +197,8 @@ uma_dbg_getslab(uma_zone_t zone, void *item)
|
|||
|
||||
mem = (u_int8_t *)((unsigned long)item & (~UMA_SLAB_MASK));
|
||||
if (zone->uz_flags & UMA_ZFLAG_MALLOC) {
|
||||
mtx_lock(&malloc_mtx);
|
||||
slab = hash_sfind(mallochash, mem);
|
||||
mtx_unlock(&malloc_mtx);
|
||||
} else if (zone->uz_flags & UMA_ZFLAG_OFFPAGE) {
|
||||
slab = vtoslab((vm_offset_t)mem);
|
||||
} else if (zone->uz_flags & UMA_ZFLAG_HASH) {
|
||||
ZONE_LOCK(zone);
|
||||
slab = hash_sfind(&zone->uz_hash, mem);
|
||||
ZONE_UNLOCK(zone);
|
||||
|
|
|
|||
|
|
@ -103,8 +103,6 @@
|
|||
#ifndef VM_UMA_INT_H
|
||||
#define VM_UMA_INT_H
|
||||
|
||||
#include <sys/mutex.h>
|
||||
|
||||
#define UMA_SLAB_SIZE PAGE_SIZE /* How big are our slabs? */
|
||||
#define UMA_SLAB_MASK (PAGE_SIZE - 1) /* Mask to get back to the page */
|
||||
#define UMA_SLAB_SHIFT PAGE_SHIFT /* Number of bits PAGE_MASK */
|
||||
|
|
@ -175,8 +173,6 @@ struct uma_hash {
|
|||
int uh_hashmask; /* Mask used during hashing */
|
||||
};
|
||||
|
||||
extern struct uma_hash *mallochash;
|
||||
|
||||
/*
|
||||
* Structures for per cpu queues.
|
||||
*/
|
||||
|
|
@ -274,6 +270,7 @@ struct uma_zone {
|
|||
#define UMA_ZFLAG_NOFREE 0x0010 /* Don't free data from this zone */
|
||||
#define UMA_ZFLAG_FULL 0x0020 /* This zone reached uz_maxpages */
|
||||
#define UMA_ZFLAG_BUCKETCACHE 0x0040 /* Only allocate buckets from cache */
|
||||
#define UMA_ZFLAG_HASH 0x0080 /* Look up slab via hash */
|
||||
|
||||
/* This lives in uflags */
|
||||
#define UMA_ZONE_INTERNAL 0x1000 /* Internal zone for uflags */
|
||||
|
|
@ -346,5 +343,39 @@ hash_sfind(struct uma_hash *hash, u_int8_t *data)
|
|||
return (NULL);
|
||||
}
|
||||
|
||||
static __inline uma_slab_t
|
||||
vtoslab(vm_offset_t va)
|
||||
{
|
||||
vm_page_t p;
|
||||
uma_slab_t slab;
|
||||
|
||||
p = PHYS_TO_VM_PAGE(pmap_kextract(va));
|
||||
slab = (uma_slab_t )p->object;
|
||||
|
||||
if (p->flags & PG_SLAB)
|
||||
return (slab);
|
||||
else
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
static __inline void
|
||||
vsetslab(vm_offset_t va, uma_slab_t slab)
|
||||
{
|
||||
vm_page_t p;
|
||||
|
||||
p = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)va));
|
||||
p->object = (vm_object_t)slab;
|
||||
p->flags |= PG_SLAB;
|
||||
}
|
||||
|
||||
static __inline void
|
||||
vsetobj(vm_offset_t va, vm_object_t obj)
|
||||
{
|
||||
vm_page_t p;
|
||||
|
||||
p = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)va));
|
||||
p->object = obj;
|
||||
p->flags &= ~PG_SLAB;
|
||||
}
|
||||
|
||||
#endif /* VM_UMA_INT_H */
|
||||
|
|
|
|||
|
|
@ -244,6 +244,7 @@ extern struct mtx vm_page_queue_free_mtx;
|
|||
#define PG_NOSYNC 0x0400 /* do not collect for syncer */
|
||||
#define PG_UNMANAGED 0x0800 /* No PV management for page */
|
||||
#define PG_MARKER 0x1000 /* special queue marker page */
|
||||
#define PG_SLAB 0x2000 /* object pointer is actually a slab */
|
||||
|
||||
/*
|
||||
* Misc constants.
|
||||
|
|
|
|||
Loading…
Reference in a new issue