Compare commits

...

11 Commits

Author SHA1 Message Date
Ondrej Sury
be1555252e WIP 2021-02-26 08:33:12 +01:00
Ondřej Surý
25896dac11 Derp. Fix missing constant post rename 2021-02-26 08:20:23 +01:00
Ondřej Surý
bc658d9b7f Fixed a bug cause Rupert said to 2021-02-25 18:02:41 +01:00
Ondrej Sury
bfbbc617d9 WIP 2021-02-25 17:52:30 +01:00
Ondřej Surý
116f01260c WIP: Max the mempool lockless on the hotpath 2021-02-25 17:11:57 +01:00
Ondřej Surý
f916eaa72f WIP: Remove freemax from mempool 2021-02-25 16:28:53 +01:00
Ondřej Surý
96f125c433 Remove the fillcount from the isc_mempool API
Previously, the fillcount could be used to indicate how many elements
would be preallocated every time the memory would be empty.  This would
result in bursty allocations when the mempool would be drained.

For more smooth performance, we allocate the new pool items only as
needed.  In the future, we could consider changing the
isc_mempool_create() function to take an initial number of pre-allocated
items on the pool, so the bursty allocation happens only on the pool
creation.
2021-02-25 16:26:46 +01:00
Ondřej Surý
69e9eab61f Cleanup the isc_mempool_get() calls
Use the semantic patch to cleanup the error handling from
isc_mempool_get() since it can't fail now and it will always return a
valid chunk of memory.
2021-02-25 16:26:06 +01:00
Ondřej Surý
1536ac3d4d Add semantic patch for never failing isc_mempool_get()
The isc_mempool_get() could never fail now, thus we add a semantic patch
for cleaning up the error paths from the calls where previously we had
to check if the return value was not NULL.
2021-02-25 16:26:06 +01:00
Ondřej Surý
1a7698a416 Remove the maximum allocation limit from the isc_mempool
The only place where the limits on the maximum number of allocated items
from the pool was the dns_dispatch where we already have different
limits in place.

As an example the maximum number of buffers is guarded by:

    if (disp->mgr->buffers >= DNS_DISPATCH_MAXBUFFERS) {
            UNLOCK(&disp->mgr->buffer_lock);
            return (NULL);
    }

but then at the same time we were limiting the maximum number of items
we can get from the disp->bpool.

By removing the maximum allocation limit from the isc_mempool API, we
can simplify the logic in many places as the isc_mempool_get() would
never fail now and it would always return a chunk of memory.
2021-02-25 16:26:03 +01:00
Ondřej Surý
0e876a8dec Add mempool get/put tracking with AddressSanitizer
When AddressSanitizer is in use, disable the internal mempool
implementation and redirect the isc_mempool_get to isc_mem_get (and
similarly for isc_mempool_put).  This is recommended method by
AddressSanitizer authors for tracking allocations and deallocations
instead of custom poison/unpoison code.
2021-02-25 16:24:27 +01:00
12 changed files with 137 additions and 349 deletions

View File

@@ -1428,12 +1428,6 @@ setup_libs(void) {
isc_mempool_create(mctx, COMMSIZE, &commctx);
isc_mempool_setname(commctx, "COMMPOOL");
/*
* 6 and 2 set as reasonable parameters for 3 or 4 nameserver
* systems.
*/
isc_mempool_setfreemax(commctx, 6);
isc_mempool_setfillcount(commctx, 2);
isc_mutex_init(&lookup_lock);
}
@@ -2434,9 +2428,6 @@ setup_lookup(dig_lookup_t *lookup) {
}
lookup->sendspace = isc_mempool_get(commctx);
if (lookup->sendspace == NULL) {
fatal("memory allocation failure");
}
result = dns_compress_init(&cctx, -1, mctx);
check_result(result, "dns_compress_init");

View File

@@ -360,18 +360,6 @@ plugin_register(const char *parameters, const void *cfg, const char *cfg_file,
CHECK(isc_ht_init(&inst->ht, mctx, 16));
isc_mutex_init(&inst->hlock);
/*
* Fill the mempool with 1K filter_aaaa state objects at
* a time; ideally after a single allocation, the mempool will
* have enough to handle all the simultaneous queries the system
* requires and it won't be necessary to allocate more.
*
* We don't set any limit on the number of free state objects
* so that they'll always be returned to the pool and not
* freed until the pool is destroyed on shutdown.
*/
isc_mempool_setfillcount(inst->datapool, 1024);
isc_mempool_setfreemax(inst->datapool, UINT_MAX);
isc_mutex_init(&inst->plock);
isc_mempool_associatelock(inst->datapool, &inst->plock);
@@ -516,9 +504,6 @@ client_state_create(const query_ctx_t *qctx, filter_instance_t *inst) {
isc_result_t result;
client_state = isc_mempool_get(inst->datapool);
if (client_state == NULL) {
return;
}
client_state->mode = NONE;
client_state->flags = 0;

View File

@@ -231,9 +231,6 @@ client_state_create(const query_ctx_t *qctx, async_instance_t *inst) {
isc_result_t result;
state = isc_mempool_get(inst->datapool);
if (state == NULL) {
return;
}
LOCK(&inst->hlock);
result = isc_ht_add(inst->ht, (const unsigned char *)&qctx->client,

View File

@@ -0,0 +1,41 @@
@@
statement S;
expression V;
@@
V = isc_mempool_get(...);
- if (V == NULL) S
@@
type T;
statement S;
expression V;
@@
V = (T *)isc_mempool_get(...);
- if (V == NULL) S
@@
statement S;
expression V;
@@
if (V == NULL) V = isc_mempool_get(...);
- if (V == NULL) S
@@
statement S1, S2;
expression V;
@@
V = isc_mempool_get(...);
- if (V == NULL) S1 else { S2 }
+ S2
@@
type T;
expression V, E1, E2;
@@
- V = (T)isc_mempool_get(E1, E2);
+ V = isc_mempool_get(E1, E2);

View File

@@ -77,9 +77,6 @@
#define ADB_STALE_MARGIN 1800
#endif /* ifndef ADB_STALE_MARGIN */
#define FREE_ITEMS 64 /*%< free count for memory pools */
#define FILL_COUNT 16 /*%< fill count for memory pools */
#define DNS_ADB_INVALIDBUCKET (-1) /*%< invalid bucket address */
#define DNS_ADB_MINADBSIZE (1024U * 1024U) /*%< 1 Megabyte */
@@ -1735,9 +1732,6 @@ new_adbname(dns_adb_t *adb, const dns_name_t *dnsname) {
dns_adbname_t *name;
name = isc_mempool_get(adb->nmp);
if (name == NULL) {
return (NULL);
}
dns_name_init(&name->name, NULL);
dns_name_dup(dnsname, adb->mctx, &name->name);
@@ -1807,9 +1801,6 @@ new_adbnamehook(dns_adb_t *adb, dns_adbentry_t *entry) {
dns_adbnamehook_t *nh;
nh = isc_mempool_get(adb->nhmp);
if (nh == NULL) {
return (NULL);
}
nh->magic = DNS_ADBNAMEHOOK_MAGIC;
nh->entry = entry;
@@ -1839,9 +1830,6 @@ new_adblameinfo(dns_adb_t *adb, const dns_name_t *qname,
dns_adblameinfo_t *li;
li = isc_mempool_get(adb->limp);
if (li == NULL) {
return (NULL);
}
dns_name_init(&li->qname, NULL);
dns_name_dup(qname, adb->mctx, &li->qname);
@@ -1875,9 +1863,6 @@ new_adbentry(dns_adb_t *adb) {
dns_adbentry_t *e;
e = isc_mempool_get(adb->emp);
if (e == NULL) {
return (NULL);
}
e->magic = DNS_ADBENTRY_MAGIC;
e->lock_bucket = DNS_ADB_INVALIDBUCKET;
@@ -1956,9 +1941,6 @@ new_adbfind(dns_adb_t *adb) {
dns_adbfind_t *h;
h = isc_mempool_get(adb->ahmp);
if (h == NULL) {
return (NULL);
}
/*
* Public members.
@@ -1994,9 +1976,6 @@ new_adbfetch(dns_adb_t *adb) {
dns_adbfetch_t *f;
f = isc_mempool_get(adb->afmp);
if (f == NULL) {
return (NULL);
}
f->magic = 0;
f->fetch = NULL;
@@ -2056,9 +2035,6 @@ new_adbaddrinfo(dns_adb_t *adb, dns_adbentry_t *entry, in_port_t port) {
dns_adbaddrinfo_t *ai;
ai = isc_mempool_get(adb->aimp);
if (ai == NULL) {
return (NULL);
}
ai->magic = DNS_ADBADDRINFO_MAGIC;
ai->sockaddr = entry->sockaddr;
@@ -2736,8 +2712,6 @@ dns_adb_create(isc_mem_t *mem, dns_view_t *view, isc_timermgr_t *timermgr,
#define MPINIT(t, p, n) \
do { \
isc_mempool_create(mem, sizeof(t), &(p)); \
isc_mempool_setfreemax((p), FREE_ITEMS); \
isc_mempool_setfillcount((p), FILL_COUNT); \
isc_mempool_setname((p), n); \
isc_mempool_associatelock((p), &adb->mplock); \
} while (0)
@@ -3523,7 +3497,7 @@ dump_adb(dns_adb_t *adb, FILE *f, bool debug, isc_stdtime_t now) {
fprintf(f, "; [plain success/timeout]\n;\n");
if (debug) {
LOCK(&adb->reflock);
fprintf(f, "; addr %p, erefcnt %u, irefcnt %u, finds out %u\n",
fprintf(f, "; addr %p, erefcnt %u, irefcnt %u, finds out %zu\n",
adb, adb->erefcnt, adb->irefcnt,
isc_mempool_getallocated(adb->nhmp));
UNLOCK(&adb->reflock);

View File

@@ -704,9 +704,6 @@ get_dispsocket(dns_dispatch_t *disp, const isc_sockaddr_t *dest,
dispsock->socket = NULL;
} else {
dispsock = isc_mempool_get(mgr->spool);
if (dispsock == NULL) {
return (ISC_R_NOMEMORY);
}
disp->nsockets++;
dispsock->socket = NULL;
@@ -951,12 +948,6 @@ allocate_udp_buffer(dns_dispatch_t *disp) {
temp = isc_mempool_get(bpool);
if (temp == NULL) {
LOCK(&disp->mgr->buffer_lock);
disp->mgr->buffers--;
UNLOCK(&disp->mgr->buffer_lock);
}
return (temp);
}
@@ -1628,7 +1619,7 @@ static bool
destroy_mgr_ok(dns_dispatchmgr_t *mgr) {
mgr_log(mgr, LVL(90),
"destroy_mgr_ok: shuttingdown=%d, listnonempty=%d, "
"depool=%d, rpool=%d, dpool=%d",
"depool=%zu, rpool=%zu, dpool=%zu",
MGR_IS_SHUTTINGDOWN(mgr), !ISC_LIST_EMPTY(mgr->list),
isc_mempool_getallocated(mgr->depool),
isc_mempool_getallocated(mgr->rpool),
@@ -1817,22 +1808,13 @@ dns_dispatchmgr_create(isc_mem_t *mctx, dns_dispatchmgr_t **mgrp) {
isc_mempool_create(mgr->mctx, sizeof(dns_dispatch_t), &mgr->dpool);
isc_mempool_setname(mgr->depool, "dispmgr_depool");
isc_mempool_setmaxalloc(mgr->depool, 32768);
isc_mempool_setfreemax(mgr->depool, 32768);
isc_mempool_associatelock(mgr->depool, &mgr->depool_lock);
isc_mempool_setfillcount(mgr->depool, 32);
isc_mempool_setname(mgr->rpool, "dispmgr_rpool");
isc_mempool_setmaxalloc(mgr->rpool, 32768);
isc_mempool_setfreemax(mgr->rpool, 32768);
isc_mempool_associatelock(mgr->rpool, &mgr->rpool_lock);
isc_mempool_setfillcount(mgr->rpool, 32);
isc_mempool_setname(mgr->dpool, "dispmgr_dpool");
isc_mempool_setmaxalloc(mgr->dpool, 32768);
isc_mempool_setfreemax(mgr->dpool, 32768);
isc_mempool_associatelock(mgr->dpool, &mgr->dpool_lock);
isc_mempool_setfillcount(mgr->dpool, 32);
mgr->buffers = 0;
mgr->buffersize = 0;
@@ -1966,6 +1948,8 @@ dns_dispatchmgr_setudp(dns_dispatchmgr_t *mgr, unsigned int buffersize,
REQUIRE(buckets < 2097169); /* next prime > 65536 * 32 */
REQUIRE(increment > buckets);
UNUSED(maxrequests);
/*
* Keep some number of items around. This should be a config
* option. For now, keep 8, but later keep at least two even
@@ -1996,37 +1980,23 @@ dns_dispatchmgr_setudp(dns_dispatchmgr_t *mgr, unsigned int buffersize,
* complexity.
*/
if (maxbuffers > mgr->maxbuffers) {
isc_mempool_setmaxalloc(mgr->bpool, maxbuffers);
isc_mempool_setfreemax(mgr->bpool, maxbuffers);
mgr->maxbuffers = maxbuffers;
}
} else {
isc_mempool_create(mgr->mctx, buffersize, &mgr->bpool);
isc_mempool_setname(mgr->bpool, "dispmgr_bpool");
isc_mempool_setmaxalloc(mgr->bpool, maxbuffers);
isc_mempool_setfreemax(mgr->bpool, maxbuffers);
isc_mempool_associatelock(mgr->bpool, &mgr->bpool_lock);
isc_mempool_setfillcount(mgr->bpool, 32);
}
/* Create or adjust socket pool */
if (mgr->spool != NULL) {
if (maxrequests < DNS_DISPATCH_POOLSOCKS * 2) {
isc_mempool_setmaxalloc(mgr->spool,
DNS_DISPATCH_POOLSOCKS * 2);
isc_mempool_setfreemax(mgr->spool,
DNS_DISPATCH_POOLSOCKS * 2);
}
UNLOCK(&mgr->buffer_lock);
return (ISC_R_SUCCESS);
}
isc_mempool_create(mgr->mctx, sizeof(dispsocket_t), &mgr->spool);
isc_mempool_setname(mgr->spool, "dispmgr_spool");
isc_mempool_setmaxalloc(mgr->spool, maxrequests);
isc_mempool_setfreemax(mgr->spool, maxrequests);
isc_mempool_associatelock(mgr->spool, &mgr->spool_lock);
isc_mempool_setfillcount(mgr->spool, 32);
result = qid_allocate(mgr, buckets, increment, &mgr->qid, true);
if (result != ISC_R_SUCCESS) {
@@ -2890,7 +2860,6 @@ dispatch_createudp(dns_dispatchmgr_t *mgr, isc_socketmgr_t *sockmgr,
isc_mempool_create(mgr->mctx, sizeof(dispportentry_t),
&disp->portpool);
isc_mempool_setname(disp->portpool, "disp_portpool");
isc_mempool_setfreemax(disp->portpool, 128);
}
disp->socket = sock;
disp->local = *localaddr;
@@ -2923,10 +2892,7 @@ dispatch_createudp(dns_dispatchmgr_t *mgr, isc_socketmgr_t *sockmgr,
isc_mutex_init(&disp->sepool_lock);
isc_mempool_setname(disp->sepool, "disp_sepool");
isc_mempool_setmaxalloc(disp->sepool, 32768);
isc_mempool_setfreemax(disp->sepool, 32768);
isc_mempool_associatelock(disp->sepool, &disp->sepool_lock);
isc_mempool_setfillcount(disp->sepool, 16);
attributes &= ~DNS_DISPATCHATTR_TCP;
attributes |= DNS_DISPATCHATTR_UDP;

View File

@@ -750,13 +750,9 @@ dns_message_create(isc_mem_t *mctx, unsigned int intent, dns_message_t **msgp) {
ISC_LIST_INIT(m->freerdatalist);
isc_mempool_create(m->mctx, sizeof(dns_name_t), &m->namepool);
isc_mempool_setfillcount(m->namepool, NAME_COUNT);
isc_mempool_setfreemax(m->namepool, NAME_COUNT);
isc_mempool_setname(m->namepool, "msg:names");
isc_mempool_create(m->mctx, sizeof(dns_rdataset_t), &m->rdspool);
isc_mempool_setfillcount(m->rdspool, RDATASET_COUNT);
isc_mempool_setfreemax(m->rdspool, RDATASET_COUNT);
isc_mempool_setname(m->rdspool, "msg:rdataset");
dynbuf = NULL;
@@ -1009,9 +1005,6 @@ getquestions(isc_buffer_t *source, dns_message_t *msg, dns_decompress_t *dctx,
for (count = 0; count < msg->counts[DNS_SECTION_QUESTION]; count++) {
name = isc_mempool_get(msg->namepool);
if (name == NULL) {
return (ISC_R_NOMEMORY);
}
free_name = true;
offsets = newoffsets(msg);
@@ -1108,10 +1101,6 @@ getquestions(isc_buffer_t *source, dns_message_t *msg, dns_decompress_t *dctx,
goto cleanup;
}
rdataset = isc_mempool_get(msg->rdspool);
if (rdataset == NULL) {
result = ISC_R_NOMEMORY;
goto cleanup;
}
/*
* Convert rdatalist to rdataset, and attach the latter to
@@ -1260,9 +1249,6 @@ getsection(isc_buffer_t *source, dns_message_t *msg, dns_decompress_t *dctx,
istsig = false;
name = isc_mempool_get(msg->namepool);
if (name == NULL) {
return (ISC_R_NOMEMORY);
}
free_name = true;
offsets = newoffsets(msg);
@@ -1564,10 +1550,6 @@ getsection(isc_buffer_t *source, dns_message_t *msg, dns_decompress_t *dctx,
if (result == ISC_R_NOTFOUND) {
rdataset = isc_mempool_get(msg->rdspool);
if (rdataset == NULL) {
result = ISC_R_NOMEMORY;
goto cleanup;
}
free_rdataset = true;
rdatalist = newrdatalist(msg);
@@ -2577,9 +2559,6 @@ dns_message_gettempname(dns_message_t *msg, dns_name_t **item) {
REQUIRE(item != NULL && *item == NULL);
*item = isc_mempool_get(msg->namepool);
if (*item == NULL) {
return (ISC_R_NOMEMORY);
}
dns_name_init(*item, NULL);
return (ISC_R_SUCCESS);
@@ -2617,9 +2596,6 @@ dns_message_gettemprdataset(dns_message_t *msg, dns_rdataset_t **item) {
REQUIRE(item != NULL && *item == NULL);
*item = isc_mempool_get(msg->rdspool);
if (*item == NULL) {
return (ISC_R_NOMEMORY);
}
dns_rdataset_init(*item);

View File

@@ -382,9 +382,6 @@ isc_mempool_create(isc_mem_t *mctx, size_t size, isc_mempool_t **mpctxp);
*\li mpctxp != NULL and *mpctxp == NULL
*
* Defaults:
*\li maxalloc = UINT_MAX
*\li freemax = 1
*\li fillcount = 1
*
* Returns:
*\li #ISC_R_NOMEMORY -- not enough memory to create pool
@@ -442,72 +439,24 @@ isc_mempool_associatelock(isc_mempool_t *mpctx, isc_mutex_t *lock);
/*
* The following functions get/set various parameters. Note that due to
* the unlocked nature of pools these are potentially random values
*unless the imposed externally provided locking protocols are followed.
*
* Also note that the quota limits will not always take immediate
*effect. For instance, setting "maxalloc" to a number smaller than the
*currently allocated count is permitted. New allocations will be
*refused until the count drops below this threshold.
* unless the imposed externally provided locking protocols are followed.
*
* All functions require (in addition to other requirements):
* mpctx is a valid memory pool
*/
unsigned int
isc_mempool_getfreemax(isc_mempool_t *mpctx);
/*%<
* Returns the maximum allowed size of the free list.
*/
void
isc_mempool_setfreemax(isc_mempool_t *mpctx, unsigned int limit);
/*%<
* Sets the maximum allowed size of the free list.
*/
unsigned int
size_t
isc_mempool_getfreecount(isc_mempool_t *mpctx);
/*%<
* Returns current size of the free list.
*/
unsigned int
isc_mempool_getmaxalloc(isc_mempool_t *mpctx);
/*!<
* Returns the maximum allowed number of allocations.
*/
void
isc_mempool_setmaxalloc(isc_mempool_t *mpctx, unsigned int limit);
/*%<
* Sets the maximum allowed number of allocations.
*
* Additional requirements:
*\li limit > 0
*/
unsigned int
size_t
isc_mempool_getallocated(isc_mempool_t *mpctx);
/*%<
* Returns the number of items allocated from this pool.
*/
unsigned int
isc_mempool_getfillcount(isc_mempool_t *mpctx);
/*%<
* Returns the number of items allocated as a block from the parent
* memory context when the free list is empty.
*/
void
isc_mempool_setfillcount(isc_mempool_t *mpctx, unsigned int limit);
/*%<
* Sets the fillcount.
*
* Additional requirements:
*\li limit > 0
*/
/*
* Pseudo-private functions for use via macros. Do not call directly.
*/

View File

@@ -30,6 +30,7 @@
#include <isc/refcount.h>
#include <isc/strerr.h>
#include <isc/string.h>
#include <isc/thread.h>
#include <isc/types.h>
#include <isc/util.h>
@@ -44,6 +45,8 @@
#include "mem_p.h"
#define MEM_MAX_THREADS 128
#define MCTXLOCK(m) LOCK(&m->lock)
#define MCTXUNLOCK(m) UNLOCK(&m->lock)
#define MPCTXLOCK(mp) \
@@ -168,17 +171,15 @@ struct isc_mempool {
/*%< locked via the memory context's lock */
ISC_LINK(isc_mempool_t) link; /*%< next pool in this mem context */
/*%< optionally locked from here down */
element *items; /*%< low water item list */
size_t size; /*%< size of each item on this pool */
atomic_size_t maxalloc; /*%< max number of items allowed */
atomic_size_t allocated; /*%< # of items currently given out */
atomic_size_t freecount; /*%< # of items on reserved list */
atomic_size_t freemax; /*%< # of items allowed on free list */
atomic_size_t fillcount; /*%< # of items to fetch on each fill */
atomic_size_t *freecount; /*%< # of items on reserved list */
/*%< Stats only. */
atomic_size_t gets; /*%< # of requests to this pool */
/*%< Debugging only. */
char name[16]; /*%< printed name in stats reports */
size_t max_threads;
element **items; /*%< low water item list */
};
/*
@@ -876,19 +877,15 @@ isc_mem_stats(isc_mem_t *ctx, FILE *out) {
pool = ISC_LIST_HEAD(ctx->pools);
if (pool != NULL) {
fprintf(out, "[Pool statistics]\n");
fprintf(out, "%15s %10s %10s %10s %10s %10s %10s %10s %1s\n",
"name", "size", "maxalloc", "allocated", "freecount",
"freemax", "fillcount", "gets", "L");
fprintf(out, "%15s %10s %10s %10s %10s %1s\n", "name",
"size", "allocated", "freecount",
"gets", "L");
}
while (pool != NULL) {
fprintf(out,
"%15s %10zu %10zu %10zu %10zu %10zu %10zu %10zu %s\n",
fprintf(out, "%15s %10zu %10zu %10zu %10zu %s\n",
pool->name, pool->size,
atomic_load_relaxed(&pool->maxalloc),
atomic_load_relaxed(&pool->allocated),
atomic_load_relaxed(&pool->freecount),
atomic_load_relaxed(&pool->freemax),
atomic_load_relaxed(&pool->fillcount),
isc_mempool_getallocated(pool),
isc_mempool_getfreecount(pool),
atomic_load_relaxed(&pool->gets),
(pool->lock == NULL ? "N" : "Y"));
pool = ISC_LIST_NEXT(pool, link);
@@ -1204,13 +1201,18 @@ isc_mempool_create(isc_mem_t *mctx, size_t size, isc_mempool_t **mpctxp) {
.magic = MEMPOOL_MAGIC,
.mctx = mctx,
.size = size,
.max_threads = MEM_MAX_THREADS,
};
atomic_init(&mpctx->maxalloc, SIZE_MAX);
atomic_init(&mpctx->allocated, 0);
atomic_init(&mpctx->freecount, 0);
atomic_init(&mpctx->freemax, 1);
atomic_init(&mpctx->fillcount, 1);
mpctx->freecount = isc_mem_get(mctx, mpctx->max_threads * sizeof(mpctx->freecount[0]));
mpctx->items = isc_mem_get(mctx, mpctx->max_threads * sizeof(mpctx->items[0]));
for (size_t i = 0; i < mpctx->max_threads; i++) {
atomic_init(&mpctx->freecount[i], 0);
mpctx->items[i] = NULL;
}
*mpctxp = (isc_mempool_t *)mpctx;
@@ -1239,11 +1241,13 @@ isc_mempool_destroy(isc_mempool_t **mpctxp) {
isc_mempool_t *mpctx;
isc_mem_t *mctx;
isc_mutex_t *lock;
element *item;
mpctx = *mpctxp;
*mpctxp = NULL;
mpctx->magic = 0;
if (atomic_load_acquire(&mpctx->allocated) > 0) {
UNEXPECTED_ERROR(__FILE__, __LINE__,
"isc_mempool_destroy(): mempool %s "
@@ -1254,25 +1258,23 @@ isc_mempool_destroy(isc_mempool_t **mpctxp) {
mctx = mpctx->mctx;
lock = mpctx->lock;
if (lock != NULL) {
LOCK(lock);
}
/*
* Return any items on the free list
*/
while (mpctx->items != NULL) {
INSIST(atomic_fetch_sub_release(&mpctx->freecount, 1) > 0);
for (size_t i = 0; i < mpctx->max_threads; i++) {
while (mpctx->items[i] != NULL) {
INSIST(atomic_fetch_sub_release(&mpctx->freecount[i], 1) > 0);
item = mpctx->items[i];
mpctx->items[i] = item->next;
item = mpctx->items;
mpctx->items = item->next;
mem_putstats(mctx, item, mpctx->size);
mem_put(mctx, item, mpctx->size);
mem_putstats(mctx, item, mpctx->size);
mem_put(mctx, item, mpctx->size);
}
}
isc_mem_put(mctx, mpctx->freecount, mpctx->max_threads * sizeof(mpctx->freecount[0]));
isc_mem_put(mctx, mpctx->items, mpctx->max_threads * sizeof(mpctx->items[0]));
/*
* Remove our linked list entry from the memory context.
*/
@@ -1281,13 +1283,7 @@ isc_mempool_destroy(isc_mempool_t **mpctxp) {
mctx->poolcnt--;
MCTXUNLOCK(mctx);
mpctx->magic = 0;
isc_mem_put(mpctx->mctx, mpctx, sizeof(isc_mempool_t));
if (lock != NULL) {
UNLOCK(lock);
}
}
void
@@ -1300,59 +1296,54 @@ isc_mempool_associatelock(isc_mempool_t *mpctx, isc_mutex_t *lock) {
mpctx->lock = lock;
}
#if __SANITIZE_ADDRESS__
void *
isc__mempool_get(isc_mempool_t *mpctx FLARG) {
REQUIRE(VALID_MEMPOOL(mpctx));
(void)atomic_fetch_add_release(&mpctx->allocated, 1);
(void)atomic_fetch_add_relaxed(&mpctx->gets, 1);
return (isc__mem_get(mpctx->mctx, mpctx->size FLARG_PASS));
}
void
isc__mempool_put(isc_mempool_t *mpctx, void *mem FLARG) {
REQUIRE(VALID_MEMPOOL(mpctx));
REQUIRE(mem != NULL);
INSIST(atomic_fetch_sub_release(&mpctx->allocated, 1) > 0);
isc__mem_put(mpctx->mctx, mem, mpctx->size FLARG_PASS);
}
#else /* __SANITIZE_ADDRESS__ */
void *
isc__mempool_get(isc_mempool_t *mpctx FLARG) {
REQUIRE(VALID_MEMPOOL(mpctx));
REQUIRE(isc_tid_v < mpctx->max_threads);
element *item;
unsigned int i;
size_t allocated = atomic_fetch_add_release(&mpctx->allocated, 1);
size_t maxalloc = atomic_load_acquire(&mpctx->maxalloc);
/*
* Don't let the caller go over quota
*/
if (ISC_UNLIKELY(allocated >= maxalloc)) {
atomic_fetch_sub_release(&mpctx->allocated, 1);
return (NULL);
}
MPCTXLOCK(mpctx);
if (ISC_UNLIKELY(mpctx->items == NULL)) {
if (ISC_UNLIKELY(mpctx->items[isc_tid_v] == NULL)) {
isc_mem_t *mctx = mpctx->mctx;
size_t fillcount = atomic_load_acquire(&mpctx->fillcount);
/*
* We need to dip into the well. Lock the memory
* context here and fill up our free list.
*/
for (i = 0; i < fillcount; i++) {
item = mem_get(mctx, mpctx->size);
mem_getstats(mctx, mpctx->size);
item->next = mpctx->items;
mpctx->items = item;
atomic_fetch_add_relaxed(&mpctx->freecount, 1);
}
item = mem_get(mctx, mpctx->size);
mem_getstats(mctx, mpctx->size);
} else {
item = mpctx->items[isc_tid_v];
mpctx->items[isc_tid_v] = item->next;
INSIST(atomic_fetch_sub_relaxed(&mpctx->freecount[isc_tid_v], 1) > 0);
}
/*
* If we didn't get any items, return NULL.
*/
item = mpctx->items;
if (ISC_UNLIKELY(item == NULL)) {
atomic_fetch_sub_release(&mpctx->allocated, 1);
goto out;
}
REQUIRE(item != NULL);
mpctx->items = item->next;
item->next = NULL;
INSIST(atomic_fetch_sub_release(&mpctx->freecount, 1) > 0);
atomic_fetch_add_relaxed(&mpctx->gets, 1);
atomic_fetch_add_relaxed(&mpctx->allocated, 1);
ADD_TRACE(mpctx->mctx, item, mpctx->size, file, line);
out:
MPCTXUNLOCK(mpctx);
return (item);
}
@@ -1361,100 +1352,46 @@ void
isc__mempool_put(isc_mempool_t *mpctx, void *mem FLARG) {
REQUIRE(VALID_MEMPOOL(mpctx));
REQUIRE(mem != NULL);
REQUIRE(isc_tid_v < mpctx->max_threads);
isc_mem_t *mctx = mpctx->mctx;
element *item;
size_t freecount = atomic_load_acquire(&mpctx->freecount);
size_t freemax = atomic_load_acquire(&mpctx->freemax);
size_t allocated = atomic_fetch_sub_relaxed(&mpctx->allocated, 1);
(void)atomic_fetch_add_relaxed(&mpctx->freecount[isc_tid_v], 1);
INSIST(atomic_fetch_sub_release(&mpctx->allocated, 1) > 0);
INSIST(allocated > 0);
DELETE_TRACE(mctx, mem, mpctx->size, file, line);
/*
* If our free list is full, return this to the mctx directly.
*/
if (freecount >= freemax) {
mem_putstats(mctx, mem, mpctx->size);
mem_put(mctx, mem, mpctx->size);
return;
}
/*
* Otherwise, attach it to our free list and bump the counter.
*/
MPCTXLOCK(mpctx);
item = (element *)mem;
item->next = mpctx->items;
mpctx->items = item;
atomic_fetch_add_relaxed(&mpctx->freecount, 1);
MPCTXUNLOCK(mpctx);
item->next = mpctx->items[isc_tid_v];
mpctx->items[isc_tid_v] = item;
}
#endif /* __SANITIZE_ADDRESS__ */
/*
* Quotas
*/
void
isc_mempool_setfreemax(isc_mempool_t *mpctx, unsigned int limit) {
REQUIRE(VALID_MEMPOOL(mpctx));
atomic_store_release(&mpctx->freemax, limit);
}
unsigned int
isc_mempool_getfreemax(isc_mempool_t *mpctx) {
REQUIRE(VALID_MEMPOOL(mpctx));
return (atomic_load_acquire(&mpctx->freemax));
}
unsigned int
size_t
isc_mempool_getfreecount(isc_mempool_t *mpctx) {
REQUIRE(VALID_MEMPOOL(mpctx));
size_t freecount = 0;
return (atomic_load_relaxed(&mpctx->freecount));
for (size_t i = 0; i < mpctx->max_threads; i++) {
freecount += atomic_load_relaxed(&mpctx->freecount[i]);
}
return (freecount);
}
void
isc_mempool_setmaxalloc(isc_mempool_t *mpctx, unsigned int limit) {
REQUIRE(VALID_MEMPOOL(mpctx));
REQUIRE(limit > 0);
atomic_store_release(&mpctx->maxalloc, limit);
}
unsigned int
isc_mempool_getmaxalloc(isc_mempool_t *mpctx) {
REQUIRE(VALID_MEMPOOL(mpctx));
return (atomic_load_relaxed(&mpctx->maxalloc));
}
unsigned int
size_t
isc_mempool_getallocated(isc_mempool_t *mpctx) {
REQUIRE(VALID_MEMPOOL(mpctx));
return (atomic_load_relaxed(&mpctx->allocated));
}
void
isc_mempool_setfillcount(isc_mempool_t *mpctx, unsigned int limit) {
REQUIRE(VALID_MEMPOOL(mpctx));
REQUIRE(limit > 0);
atomic_store_release(&mpctx->fillcount, limit);
}
unsigned int
isc_mempool_getfillcount(isc_mempool_t *mpctx) {
REQUIRE(VALID_MEMPOOL(mpctx));
return (atomic_load_relaxed(&mpctx->fillcount));
}
/*
* Requires contextslock to be held by caller.
*/

View File

@@ -242,17 +242,13 @@ isc_nm_start(isc_mem_t *mctx, uint32_t workers) {
isc_mutex_init(&mgr->reqlock);
isc_mempool_create(mgr->mctx, sizeof(isc__nm_uvreq_t), &mgr->reqpool);
isc_mempool_setname(mgr->reqpool, "nm_reqpool");
isc_mempool_setfreemax(mgr->reqpool, 4096);
isc_mempool_associatelock(mgr->reqpool, &mgr->reqlock);
isc_mempool_setfillcount(mgr->reqpool, 32);
isc_mutex_init(&mgr->evlock);
isc_mempool_create(mgr->mctx, sizeof(isc__netievent_storage_t),
&mgr->evpool);
isc_mempool_setname(mgr->evpool, "nm_evpool");
isc_mempool_setfreemax(mgr->evpool, 4096);
isc_mempool_associatelock(mgr->evpool, &mgr->evlock);
isc_mempool_setfillcount(mgr->evpool, 32);
mgr->workers = isc_mem_get(mctx, workers * sizeof(isc__networker_t));
for (size_t i = 0; i < workers; i++) {

View File

@@ -58,13 +58,8 @@ _teardown(void **state) {
return (0);
}
#define MP1_FREEMAX 10
#define MP1_FILLCNT 10
#define MP1_MAXALLOC 30
#define MP2_FREEMAX 25
#define MP2_FILLCNT 25
/* general memory system tests */
static void
isc_mem_test(void **state) {
@@ -80,10 +75,6 @@ isc_mem_test(void **state) {
isc_mempool_create(test_mctx, 24, &mp1);
isc_mempool_create(test_mctx, 31, &mp2);
isc_mempool_setfreemax(mp1, MP1_FREEMAX);
isc_mempool_setfillcount(mp1, MP1_FILLCNT);
isc_mempool_setmaxalloc(mp1, MP1_MAXALLOC);
/*
* Allocate MP1_MAXALLOC items from the pool. This is our max.
*/
@@ -92,12 +83,6 @@ isc_mem_test(void **state) {
assert_non_null(items1[i]);
}
/*
* Try to allocate one more. This should fail.
*/
tmp = isc_mempool_get(mp1);
assert_null(tmp);
/*
* Free the first 11 items. Verify that there are 10 free items on
* the free list (which is our max).
@@ -107,8 +92,10 @@ isc_mem_test(void **state) {
items1[i] = NULL;
}
#if !__SANITIZE_ADDRESS__
rval = isc_mempool_getfreecount(mp1);
assert_int_equal(rval, 10);
assert_int_equal(rval, 11);
#endif /* !__SANITIZE_ADDRESS__ */
rval = isc_mempool_getallocated(mp1);
assert_int_equal(rval, 19);
@@ -118,9 +105,6 @@ isc_mem_test(void **state) {
* them, then allocate 50 more, etc.
*/
isc_mempool_setfreemax(mp2, 25);
isc_mempool_setfillcount(mp2, 25);
for (j = 0; j < 500000; j++) {
for (i = 0; i < 50; i++) {
items2[i] = isc_mempool_get(mp2);
@@ -372,6 +356,7 @@ isc_mem_traceflag_test(void **state) {
#define ITERS 512
#define NUM_ITEMS 1024 /* 768 */
#define ITEM_SIZE 65534
#define MP_ITEM_SIZE 256
static atomic_size_t mem_size;
@@ -457,13 +442,10 @@ isc_mempool_benchmark(void **state) {
isc_mutex_init(&mplock);
isc_mempool_create(test_mctx, ITEM_SIZE, &mp);
isc_mempool_create(test_mctx, MP_ITEM_SIZE, &mp);
isc_mempool_associatelock(mp, &mplock);
isc_mempool_setfreemax(mp, 32768);
isc_mempool_setfillcount(mp, ISC_MAX(NUM_ITEMS / nthreads, 1));
UNUSED(state);
result = isc_time_now(&ts1);

View File

@@ -394,13 +394,7 @@ isc_mempool_associatelock
isc_mempool_create
isc_mempool_destroy
isc_mempool_getallocated
isc_mempool_getfillcount
isc_mempool_getfreecount
isc_mempool_getfreemax
isc_mempool_getmaxalloc
isc_mempool_setfillcount
isc_mempool_setfreemax
isc_mempool_setmaxalloc
isc_mempool_setname
isc_mutexblock_destroy
isc_mutexblock_init