Compare commits

...

9 Commits

Author SHA1 Message Date
Ondřej Surý
7d4b99d65c fixup! WIP: Implement CLH lock without malloc 2021-04-08 16:41:50 +02:00
Ondřej Surý
04129aead3 WIP: Implement CLH lock without malloc 2021-04-08 15:02:06 +02:00
Ondřej Surý
2701c454f7 WIP: pause() until run out of patience then yield() 2021-04-08 15:02:06 +02:00
Ondřej Surý
7091f9bb03 WIP: implement the writer_barrier approach for not-starving the readers 2021-04-08 15:02:06 +02:00
Ondřej Surý
5c138c9683 WIP: add rudimentary support for write_quota 2021-04-08 15:02:06 +02:00
Ondřej Surý
c003b77d7c WIP: Replace the RW lock implementation with C-RW-WP lock 2021-04-08 15:02:06 +02:00
Ondřej Surý
a268daf9b7 WIP: Add rwlock_test 2021-04-08 15:02:06 +02:00
Ondřej Surý
98629a1d0b WIP: Add mutex_test 2021-04-08 15:02:06 +02:00
Ondřej Surý
6417bc131c WIP: Add pause.h 2021-04-08 15:02:06 +02:00
10 changed files with 1463 additions and 43 deletions

View File

@@ -603,7 +603,8 @@ AS_IF([test "$enable_pthread_rwlock" = "yes"],
[AC_CHECK_FUNCS([pthread_rwlock_rdlock], [],
[AC_MSG_ERROR([pthread_rwlock_rdlock requested but not found])])
AC_DEFINE([USE_PTHREAD_RWLOCK],[1],[Define if you want to use pthread rwlock implementation])
])
],
[AC_DEFINE([USE_CLH_RWLOCK],[1],[Define if you want to use CLH rwlock implementation])])
CRYPTO=OpenSSL

View File

@@ -57,6 +57,7 @@ libisc_la_HEADERS = \
include/isc/nonce.h \
include/isc/os.h \
include/isc/parseint.h \
include/isc/pause.h \
include/isc/pool.h \
include/isc/portset.h \
include/isc/platform.h \

View File

@@ -0,0 +1,41 @@
/*
* Copyright (C) Internet Systems Consortium, Inc. ("ISC")
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, you can obtain one at https://mozilla.org/MPL/2.0/.
*
* See the COPYRIGHT file distributed with this work for additional
* information regarding copyright ownership.
*/
#pragma once
#if defined(_MSC_VER)
#include <intrin.h>
#define isc__pause() YieldProcessor()
#elif defined(__x86_64__)
#include <immintrin.h>
#define isc__pause() _mm_pause()
#elif defined(__i386__)
#define isc__pause() __asm__ __volatile__("rep; nop")
#elif defined(__ia64__)
#define isc__pause() __asm__ __volatile__("hint @pause")
#elif defined(__arm__) && HAVE_ARM_YIELD
#define isc__pause() __asm__ __volatile__("yield")
#elif defined(sun) && (defined(__sparc) || defined(__sparc__))
#include <synch.h>
#define isc__pause() smt_pause()
#elif (defined(__sparc) || defined(__sparc__)) && HAVE_SPARC_PAUSE
#define isc__pause() __asm__ __volatile__("pause")
#elif defined(__ppc__) || defined(_ARCH_PPC) || defined(_ARCH_PWR) || \
defined(_ARCH_PWR2) || defined(_POWER)
#define isc__pause() __asm__ volatile("or 27,27,27")
#else /* if defined(_MSC_VER) */
#define isc__pause() sched_yield()
#endif /* if defined(_MSC_VER) */
#define isc_pause(iters) \
for (size_t __pause = 0; __pause < iters; __pause++) { \
isc__pause(); \
}

View File

@@ -21,6 +21,7 @@
#include <isc/lang.h>
#include <isc/platform.h>
#include <isc/types.h>
#include <isc/queue.h>
ISC_LANG_BEGINDECLS
@@ -30,7 +31,47 @@ typedef enum {
isc_rwlocktype_write
} isc_rwlocktype_t;
#if USE_PTHREAD_RWLOCK
#if USE_CLH_RWLOCK
#include <isc/align.h>
#define ISC_RWLOCK_UNLOCKED 0
#define ISC_RWLOCK_LOCKED 1
#define ISC_CACHE_LINE 64
typedef struct isc_rwlock_node {
alignas(ISC_CACHE_LINE) atomic_bool succ_must_wait;
} isc_rwlock_node_t;
struct isc_rwlock {
unsigned int magic;
alignas(ISC_CACHE_LINE) isc_rwlock_node_t *mynode;
alignas(ISC_CACHE_LINE) atomic_uintptr_t tail;
alignas(ISC_CACHE_LINE) isc_queue_t *nodes;
alignas(ISC_CACHE_LINE) atomic_uint_fast32_t readers_counter;
};
#elif USE_C_RW_WP
#include <isc/align.h>
#define ISC_RWLOCK_UNLOCKED false
#define ISC_RWLOCK_LOCKED true
#define ISC_CACHE_LINE 64 /* TODO: Move to platform.h */
#define ISC_RWLOCK_HASH_RATIO 3
#define ISC_RWLOCK_COUNTERS_RATIO \
(ISC_RWLOCK_HASH_RATIO * ISC_CACHE_LINE / sizeof(atomic_int_fast32_t))
struct isc_rwlock {
unsigned int magic;
uint16_t hashbits;
uint16_t ncounters;
alignas(ISC_CACHE_LINE) atomic_int_fast32_t *readers_counters;
alignas(ISC_CACHE_LINE) atomic_bool writers_mutex;
alignas(ISC_CACHE_LINE) atomic_int_fast32_t writers_barrier;
};
#elif USE_PTHREAD_RWLOCK
#include <pthread.h>
struct isc_rwlock {
@@ -42,9 +83,9 @@ struct isc_rwlock {
struct isc_rwlock {
/* Unlocked. */
unsigned int magic;
isc_mutex_t lock;
atomic_int_fast32_t spins;
unsigned int magic;
isc_mutex_t lock;
atomic_uint_fast32_t spins;
/*
* When some atomic instructions with hardware assistance are

View File

@@ -19,6 +19,7 @@
#include "config.h"
#include "mem_p.h"
#include "rwlock_p.h"
#include "tls_p.h"
#include "trampoline_p.h"
@@ -45,10 +46,12 @@ isc__initialize(void) {
isc__mem_initialize();
isc__tls_initialize();
isc__trampoline_initialize();
isc__rwlock_initialize();
}
void
isc__shutdown(void) {
isc__rwlock_shutdown();
isc__trampoline_shutdown();
isc__tls_shutdown();
isc__mem_shutdown();

View File

@@ -22,6 +22,8 @@
ISC_LANG_BEGINDECLS
#define ISC_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
/*!
* Supply mutex attributes that enable deadlock detection
* (helpful when debugging). This is system dependent and

View File

@@ -14,10 +14,7 @@
#include <inttypes.h>
#include <stdbool.h>
#include <stddef.h>
#if defined(sun) && (defined(__sparc) || defined(__sparc__))
#include <synch.h> /* for smt_pause(3c) */
#endif /* if defined(sun) && (defined(__sparc) || defined(__sparc__)) */
#include <unistd.h>
#include <isc/atomic.h>
#include <isc/magic.h>
@@ -26,7 +23,662 @@
#include <isc/rwlock.h>
#include <isc/util.h>
#if USE_PTHREAD_RWLOCK
#include "rwlock_p.h"
static isc_mem_t *rwlock_mctx = NULL;
void
isc__rwlock_initialize(void) {
isc_mem_create(&rwlock_mctx);
/* isc_mem_setdestroycheck(rwlock_mctx, false); */
}
void
isc__rwlock_shutdown(void) {
isc_mem_detach(&rwlock_mctx);
}
#if USE_CLH_RWLOCK
/* CLH RWLock + C-RW-P */
#define RWLOCK_MAGIC ISC_MAGIC('R', 'W', 'W', 'P')
#define VALID_RWLOCK(rwl) ISC_MAGIC_VALID(rwl, RWLOCK_MAGIC)
#define RWLOCK_MAX_THREADS 128
#ifndef RWLOCK_MAX_SPIN_COUNT
#define RWLOCK_MAX_SPIN_COUNT 100000
#endif /* ifndef RWLOCK_MAX_SPIN_COUNT */
#include <stdlib.h>
#include <isc/os.h>
#include <isc/pause.h>
#include <isc/thread.h>
#define READER_INCR(rwl) \
(void)atomic_fetch_add_release(&(rwl->readers_counter), 1);
#define READER_DECR(rwl) \
(void)atomic_fetch_sub_release(&(rwl->readers_counter), 1);
/* fprintf(stderr, "%zu:%s:%d:%s:NODE_LOCK(%p)\n", isc_tid_v, __FILE__, __LINE__, __func__, mynode); \ */
#define NODE_LOCK(mynode) \
REQUIRE(atomic_compare_exchange_strong(&mynode->succ_must_wait, \
&(bool){ ISC_RWLOCK_UNLOCKED }, \
ISC_RWLOCK_LOCKED));
/* fprintf(stderr, "%zu:%s:%d:%s:NODE_UNLOCK(%p)\n", isc_tid_v, __FILE__, __LINE__, __func__, mynode); \ */
#define NODE_UNLOCK(mynode) \
REQUIRE(atomic_compare_exchange_strong(&mynode->succ_must_wait, \
&(bool){ ISC_RWLOCK_LOCKED }, \
ISC_RWLOCK_UNLOCKED));
static inline void
isc__rwlock_exclusive_unlock(isc_rwlock_t *rwl);
static inline void
isc__rwlock_wait_for_running_readers(isc_rwlock_t *rwl);
static inline
isc_rwlock_node_t *isc_rwlock_create_node(isc_rwlock_t *rwl, bool succ_must_wait) {
isc_rwlock_node_t *mynode = (isc_rwlock_node_t *)isc_queue_dequeue(rwl->nodes);
if (mynode == NULL) {
mynode = isc_mem_get(rwlock_mctx, sizeof(*mynode));
}
atomic_init(&mynode->succ_must_wait, succ_must_wait);
return (mynode);
}
static inline isc_rwlock_node_t *
isc__rwlock_wait_for_prev(isc_rwlock_t *rwl, isc_rwlock_node_t **prev) {
REQUIRE(prev != NULL && *prev == NULL);
isc_rwlock_node_t *mynode = isc_rwlock_create_node(rwl, true);
*prev = (void *)atomic_exchange(&rwl->tail, (uintptr_t)mynode);
INSIST(*prev != NULL);
bool prev_islocked = atomic_load_relaxed(&(*prev)->succ_must_wait);
if (prev_islocked) {
uint32_t cnt = 0;
const uint32_t max_cnt = RWLOCK_MAX_SPIN_COUNT;
while (prev_islocked) {
if (ISC_LIKELY(cnt < max_cnt)) {
cnt++;
isc_pause(1);
} else {
isc_thread_yield();
}
prev_islocked = atomic_load(&(*prev)->succ_must_wait);
}
}
return (mynode);
}
static inline void
isc__rwlock_shared_lock(isc_rwlock_t *rwl) {
isc_rwlock_node_t *prev = NULL;
isc_rwlock_node_t *mynode = isc__rwlock_wait_for_prev(rwl, &prev);
INSIST(prev != NULL);
READER_INCR(rwl);
NODE_UNLOCK(mynode);
isc_queue_enqueue(rwl->nodes, (uintptr_t)prev); /* free */
}
static inline isc_result_t
isc__rwlock_shared_trylock(isc_rwlock_t *rwl) {
UNUSED(rwl);
return (ISC_R_LOCKBUSY);
}
static inline void
isc__rwlock_shared_unlock(isc_rwlock_t *rwl) {
READER_DECR(rwl);
}
static inline isc_result_t
isc__rwlock_shared_tryupgrade(isc_rwlock_t *rwl) {
UNUSED(rwl);
return (ISC_R_LOCKBUSY);
}
static inline void
isc__rwlock_wait_for_running_readers(isc_rwlock_t *rwl) {
/* Write-lock was acquired, now wait for running Readers to finish */
if (atomic_load_relaxed(&rwl->readers_counter) > 0) {
uint32_t cnt = 0;
const uint32_t max_cnt = RWLOCK_MAX_SPIN_COUNT;
while (atomic_load_acquire(&rwl->readers_counter) > 0) {
if (ISC_LIKELY(cnt < max_cnt)) {
cnt++;
isc_pause(1);
} else {
isc_thread_yield();
}
}
}
}
static inline void
isc__rwlock_exclusive_lock(isc_rwlock_t *rwl) {
isc_rwlock_node_t *prev = NULL;
isc_rwlock_node_t *mynode = isc__rwlock_wait_for_prev(rwl, &prev);
INSIST(prev != NULL);
isc__rwlock_wait_for_running_readers(rwl);
rwl->mynode = mynode;
isc_queue_enqueue(rwl->nodes, (uintptr_t)prev); /* free */
}
static isc_result_t
isc__rwlock_exclusive_trylock(isc_rwlock_t *rwl) {
UNUSED(rwl);
return (ISC_R_LOCKBUSY);
}
static inline void
isc__rwlock_exclusive_unlock(isc_rwlock_t *rwl) {
isc_rwlock_node_t *mynode = rwl->mynode;
NODE_UNLOCK(mynode);
}
static inline void
isc__rwlock_exclusive_downgrade(isc_rwlock_t *rwl) {
READER_INCR(rwl);
isc__rwlock_exclusive_unlock(rwl);
}
void
isc_rwlock_init(isc_rwlock_t *rwl, unsigned int read_quota,
unsigned int write_quota) {
REQUIRE(rwl != NULL);
rwl->magic = 0;
if (read_quota != 0) {
UNEXPECTED_ERROR(__FILE__, __LINE__,
"read quota is not supported");
}
if (write_quota != 0) {
UNEXPECTED_ERROR(__FILE__, __LINE__,
"write quota is not supported");
}
atomic_init(&rwl->readers_counter, 0);
rwl->nodes = isc_queue_new(rwlock_mctx, RWLOCK_MAX_THREADS);
rwl->mynode = isc_rwlock_create_node(rwl, false);
atomic_store(&rwl->tail, (uintptr_t)rwl->mynode);
rwl->magic = RWLOCK_MAGIC;
}
void
isc_rwlock_destroy(isc_rwlock_t *rwl) {
REQUIRE(VALID_RWLOCK(rwl));
rwl->magic = 0;
isc_rwlock_node_t *mynode = rwl->mynode;
/* Check whether write lock has been unlocked */
REQUIRE(atomic_load(&mynode->succ_must_wait) == ISC_RWLOCK_UNLOCKED);
REQUIRE(atomic_load(&rwl->readers_counter) == 0);
while ((mynode = (isc_rwlock_node_t *)isc_queue_dequeue(rwl->nodes)) != NULL) {
isc_mem_put(rwlock_mctx, mynode, sizeof(*mynode));
}
mynode = (isc_rwlock_node_t *)atomic_load(&rwl->tail);
isc_mem_put(rwlock_mctx, mynode, sizeof(*mynode));
isc_queue_destroy(rwl->nodes);
}
isc_result_t
isc_rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
REQUIRE(VALID_RWLOCK(rwl));
switch (type) {
case isc_rwlocktype_read:
isc__rwlock_shared_lock(rwl);
break;
case isc_rwlocktype_write:
isc__rwlock_exclusive_lock(rwl);
break;
default:
INSIST(0);
ISC_UNREACHABLE();
}
return (ISC_R_SUCCESS);
}
isc_result_t
isc_rwlock_trylock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
REQUIRE(VALID_RWLOCK(rwl));
switch (type) {
case isc_rwlocktype_read:
return (isc__rwlock_shared_trylock(rwl));
break;
case isc_rwlocktype_write:
return (isc__rwlock_exclusive_trylock(rwl));
break;
default:
INSIST(0);
ISC_UNREACHABLE();
}
}
isc_result_t
isc_rwlock_unlock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
REQUIRE(VALID_RWLOCK(rwl));
switch (type) {
case isc_rwlocktype_read:
isc__rwlock_shared_unlock(rwl);
break;
case isc_rwlocktype_write:
isc__rwlock_exclusive_unlock(rwl);
break;
default:
INSIST(0);
ISC_UNREACHABLE();
}
return (ISC_R_SUCCESS);
}
isc_result_t
isc_rwlock_tryupgrade(isc_rwlock_t *rwl) {
return (isc__rwlock_shared_tryupgrade(rwl));
}
void
isc_rwlock_downgrade(isc_rwlock_t *rwl) {
isc__rwlock_exclusive_downgrade(rwl);
}
#elif USE_C_RW_WP
/*
* C-RW-WP Implementation from NUMA-Aware Reader-Writer Locks paper:
* http://dl.acm.org/citation.cfm?id=2442532
*
* This work is based on C++ code available from
* https://github.com/pramalhe/ConcurrencyFreaks/
*
* Copyright (c) 2014-2016, Pedro Ramalhete, Andreia Correia
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Concurrency Freaks nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
* IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER>
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#define RWLOCK_MAGIC ISC_MAGIC('R', 'W', 'W', 'P')
#define VALID_RWLOCK(rwl) ISC_MAGIC_VALID(rwl, RWLOCK_MAGIC)
#ifndef RWLOCK_DEFAULT_WRITE_QUOTA
#define RWLOCK_DEFAULT_WRITE_QUOTA 4
#endif /* ifndef RWLOCK_DEFAULT_WRITE_QUOTA */
#include <stdlib.h>
#include <isc/os.h>
#include <isc/pause.h>
#include <isc/thread.h>
/* FIXME: Now used in both rwlock.c and rbt.c */
#define HASHSIZE(bits) (UINT64_C(1) << (bits))
#define HASH_MAX_BITS 32
#define GOLDEN_RATIO_32 0x61C88647
#define GOLDEN_RATIO_64 0x61C8864680B583EBull
static inline uint32_t
hash_32(uint32_t val, unsigned int bits) {
REQUIRE(bits <= HASH_MAX_BITS);
/* High bits are more random. */
return (val * GOLDEN_RATIO_32 >> (32 - bits));
}
static inline size_t
tid2idx(isc_rwlock_t *rwl) {
uint32_t tid = hash_32(isc_tid_v, rwl->hashbits);
uint16_t idx = tid * ISC_RWLOCK_COUNTERS_RATIO;
return (idx);
}
#ifndef RWLOCK_MAX_READER_PATIENCE
#define RWLOCK_MAX_READER_PATIENCE 1000
#endif /* ifndef RWLOCK_MAX_READER_PATIENCE */
static inline void
isc__rwlock_exclusive_unlock(isc_rwlock_t *rwl);
static inline isc_result_t
isc__rwlock_check_for_running_readers(isc_rwlock_t *rwl);
static inline void
isc__rwlock_wait_for_running_readers(isc_rwlock_t *rwl);
static inline void
isc__rwlock_shared_lock(isc_rwlock_t *rwl) {
const size_t idx = tid2idx(rwl);
uint32_t cnt = 0;
bool barrier_raised = false;
while (true) {
(void)atomic_fetch_add_release(&rwl->readers_counters[idx], 1);
if (atomic_load_acquire(&rwl->writers_mutex) ==
ISC_RWLOCK_UNLOCKED) {
/* Acquired lock in read-only mode */
break;
}
/* Writer has acquired the lock, must reset to 0 and wait */
(void)atomic_fetch_sub_release(&rwl->readers_counters[idx], 1);
while (atomic_load_acquire(&rwl->writers_mutex) !=
ISC_RWLOCK_UNLOCKED) {
if (!barrier_raised) {
isc_pause(1);
if (ISC_UNLIKELY(cnt++ >=
RWLOCK_MAX_READER_PATIENCE)) {
(void)atomic_fetch_add_release(
&rwl->writers_barrier, 1);
barrier_raised = true;
}
} else {
isc_thread_yield();
}
}
}
if (barrier_raised) {
(void)atomic_fetch_sub_release(&rwl->writers_barrier, 1);
}
}
static inline isc_result_t
isc__rwlock_shared_trylock(isc_rwlock_t *rwl) {
const size_t idx = tid2idx(rwl);
(void)atomic_fetch_add_release(&rwl->readers_counters[idx], 1);
if (atomic_load_acquire(&rwl->writers_mutex) == ISC_RWLOCK_LOCKED) {
/* Writer has acquired the lock, must reset to 0 */
(void)atomic_fetch_sub_release(&rwl->readers_counters[idx], 1);
return (ISC_R_LOCKBUSY);
}
/* Acquired lock in read-only mode */
return (ISC_R_SUCCESS);
}
static inline void
isc__rwlock_shared_unlock(isc_rwlock_t *rwl) {
const size_t idx = tid2idx(rwl);
REQUIRE(atomic_fetch_sub_release(&rwl->readers_counters[idx], 1) > 0);
}
static inline isc_result_t
isc__rwlock_shared_tryupgrade(isc_rwlock_t *rwl) {
const size_t idx = tid2idx(rwl);
/* Write Barriers has been raised */
if (atomic_load_acquire(&rwl->writers_barrier) > 0) {
return (ISC_R_LOCKBUSY);
}
/* Try to acquire the write-lock */
if (!atomic_compare_exchange_weak_acq_rel(
&rwl->writers_mutex, &(bool){ ISC_RWLOCK_UNLOCKED },
ISC_RWLOCK_LOCKED))
{
return (ISC_R_LOCKBUSY);
}
/* Unlock the read-lock */
REQUIRE(atomic_fetch_sub_release(&rwl->readers_counters[idx], 1) > 0);
if (isc__rwlock_check_for_running_readers(rwl) == ISC_R_LOCKBUSY) {
/* Re-acquire the read-lock back */
(void)atomic_fetch_add_release(&rwl->readers_counters[idx], 1);
/* Unlock the write-lock */
isc__rwlock_exclusive_unlock(rwl);
return (ISC_R_LOCKBUSY);
}
return (ISC_R_SUCCESS);
}
static inline isc_result_t
isc__rwlock_check_for_running_readers(isc_rwlock_t *rwl) {
/* Write-lock was acquired, now wait for running Readers to finish */
for (size_t idx = 0; idx < rwl->ncounters;
idx += ISC_RWLOCK_COUNTERS_RATIO) {
if (atomic_load_relaxed(&rwl->readers_counters[idx]) > 0) {
return (ISC_R_LOCKBUSY);
}
}
return (ISC_R_SUCCESS);
}
static inline void
isc__rwlock_wait_for_running_readers(isc_rwlock_t *rwl) {
/* Write-lock was acquired, now wait for running Readers to finish */
for (size_t idx = 0; idx < rwl->ncounters;
idx += ISC_RWLOCK_COUNTERS_RATIO) {
while (atomic_load_acquire(&rwl->readers_counters[idx]) > 0) {
isc_pause(1);
}
}
}
static inline void
isc__rwlock_exclusive_lock(isc_rwlock_t *rwl) {
/* Write Barriers has been raised, wait */
while (atomic_load_acquire(&rwl->writers_barrier) > 0) {
isc_pause(1);
}
/* Try to acquire the write-lock */
while (!atomic_compare_exchange_weak_acq_rel(
&rwl->writers_mutex, &(bool){ ISC_RWLOCK_UNLOCKED },
ISC_RWLOCK_LOCKED))
{
isc_pause(1);
}
isc__rwlock_wait_for_running_readers(rwl);
}
static isc_result_t
isc__rwlock_exclusive_trylock(isc_rwlock_t *rwl) {
/* Write Barriers has been raised */
if (atomic_load_acquire(&rwl->writers_barrier) > 0) {
return (ISC_R_LOCKBUSY);
}
/* Try to acquire the write-lock */
if (!atomic_compare_exchange_weak_acq_rel(
&rwl->writers_mutex, &(bool){ ISC_RWLOCK_UNLOCKED },
ISC_RWLOCK_LOCKED))
{
return (ISC_R_LOCKBUSY);
}
if (isc__rwlock_check_for_running_readers(rwl)) {
/* Unlock the write-lock */
isc__rwlock_exclusive_unlock(rwl);
return (ISC_R_LOCKBUSY);
}
return (ISC_R_SUCCESS);
}
static inline void
isc__rwlock_exclusive_unlock(isc_rwlock_t *rwl) {
REQUIRE(atomic_compare_exchange_strong_acq_rel(
&rwl->writers_mutex, &(bool){ ISC_RWLOCK_LOCKED },
ISC_RWLOCK_UNLOCKED));
}
static inline void
isc__rwlock_exclusive_downgrade(isc_rwlock_t *rwl) {
const size_t idx = tid2idx(rwl);
(void)atomic_fetch_add_release(&rwl->readers_counters[idx], 1);
isc__rwlock_exclusive_unlock(rwl);
}
void
isc_rwlock_init(isc_rwlock_t *rwl, unsigned int read_quota,
unsigned int write_quota) {
uint16_t ncpus = isc_os_ncpus();
REQUIRE(rwl != NULL);
rwl->magic = 0;
rwl->hashbits = 0;
if (read_quota != 0) {
UNEXPECTED_ERROR(__FILE__, __LINE__,
"read quota is not supported");
}
if (write_quota != 0) {
UNEXPECTED_ERROR(__FILE__, __LINE__,
"write quota is not supported");
}
while (ncpus > HASHSIZE(rwl->hashbits)) {
rwl->hashbits += 1;
}
RUNTIME_CHECK(rwl->hashbits <= HASH_MAX_BITS);
rwl->ncounters = HASHSIZE(rwl->hashbits) * ISC_RWLOCK_COUNTERS_RATIO;
atomic_init(&rwl->writers_mutex, ISC_RWLOCK_UNLOCKED);
atomic_init(&rwl->writers_barrier, 0);
rwl->readers_counters =
malloc(rwl->ncounters * sizeof(rwl->readers_counters[0]));
for (size_t i = 0; i < rwl->ncounters; i++) {
atomic_init(&rwl->readers_counters[i], 0);
}
rwl->magic = RWLOCK_MAGIC;
}
void
isc_rwlock_destroy(isc_rwlock_t *rwl) {
REQUIRE(VALID_RWLOCK(rwl));
rwl->magic = 0;
/* Check whether write lock has been unlocked */
REQUIRE(atomic_load(&rwl->writers_mutex) == ISC_RWLOCK_UNLOCKED);
/* Check whether all read locks has been unlocked */
for (size_t i = 0; i < rwl->ncounters; i++) {
REQUIRE(atomic_load(&rwl->readers_counters[i]) == 0);
}
free(rwl->readers_counters);
}
isc_result_t
isc_rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
REQUIRE(VALID_RWLOCK(rwl));
switch (type) {
case isc_rwlocktype_read:
isc__rwlock_shared_lock(rwl);
break;
case isc_rwlocktype_write:
isc__rwlock_exclusive_lock(rwl);
break;
default:
INSIST(0);
ISC_UNREACHABLE();
}
return (ISC_R_SUCCESS);
}
isc_result_t
isc_rwlock_trylock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
REQUIRE(VALID_RWLOCK(rwl));
switch (type) {
case isc_rwlocktype_read:
return (isc__rwlock_shared_trylock(rwl));
break;
case isc_rwlocktype_write:
return (isc__rwlock_exclusive_trylock(rwl));
break;
default:
INSIST(0);
ISC_UNREACHABLE();
}
}
isc_result_t
isc_rwlock_unlock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
REQUIRE(VALID_RWLOCK(rwl));
switch (type) {
case isc_rwlocktype_read:
isc__rwlock_shared_unlock(rwl);
break;
case isc_rwlocktype_write:
isc__rwlock_exclusive_unlock(rwl);
break;
default:
INSIST(0);
ISC_UNREACHABLE();
}
return (ISC_R_SUCCESS);
}
isc_result_t
isc_rwlock_tryupgrade(isc_rwlock_t *rwl) {
return (isc__rwlock_shared_tryupgrade(rwl));
}
void
isc_rwlock_downgrade(isc_rwlock_t *rwl) {
isc__rwlock_exclusive_downgrade(rwl);
}
#elif USE_PTHREAD_RWLOCK
#include <errno.h>
#include <pthread.h>
@@ -138,32 +790,9 @@ isc_rwlock_destroy(isc_rwlock_t *rwl) {
#endif /* ifndef RWLOCK_DEFAULT_WRITE_QUOTA */
#ifndef RWLOCK_MAX_ADAPTIVE_COUNT
#define RWLOCK_MAX_ADAPTIVE_COUNT 100
#define RWLOCK_MAX_ADAPTIVE_COUNT 2000
#endif /* ifndef RWLOCK_MAX_ADAPTIVE_COUNT */
#if defined(_MSC_VER)
#include <intrin.h>
#define isc_rwlock_pause() YieldProcessor()
#elif defined(__x86_64__)
#include <immintrin.h>
#define isc_rwlock_pause() _mm_pause()
#elif defined(__i386__)
#define isc_rwlock_pause() __asm__ __volatile__("rep; nop")
#elif defined(__ia64__)
#define isc_rwlock_pause() __asm__ __volatile__("hint @pause")
#elif defined(__arm__) && HAVE_ARM_YIELD
#define isc_rwlock_pause() __asm__ __volatile__("yield")
#elif defined(sun) && (defined(__sparc) || defined(__sparc__))
#define isc_rwlock_pause() smt_pause()
#elif (defined(__sparc) || defined(__sparc__)) && HAVE_SPARC_PAUSE
#define isc_rwlock_pause() __asm__ __volatile__("pause")
#elif defined(__ppc__) || defined(_ARCH_PPC) || defined(_ARCH_PWR) || \
defined(_ARCH_PWR2) || defined(_POWER)
#define isc_rwlock_pause() __asm__ volatile("or 27,27,27")
#else /* if defined(_MSC_VER) */
#define isc_rwlock_pause()
#endif /* if defined(_MSC_VER) */
static isc_result_t
isc__rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type);
@@ -423,20 +1052,28 @@ isc__rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
isc_result_t
isc_rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
int32_t cnt = 0;
int32_t spins = atomic_load_acquire(&rwl->spins) * 2 + 10;
int32_t max_cnt = ISC_MAX(spins, RWLOCK_MAX_ADAPTIVE_COUNT);
isc_result_t result = ISC_R_SUCCESS;
uint32_t cnt = 0;
uint32_t update;
const uint32_t cachedspins = atomic_load_acquire(&rwl->spins);
const uint32_t spins = cachedspins * 2 + 10;
const uint32_t max_cnt = ISC_MIN(spins, RWLOCK_MAX_ADAPTIVE_COUNT);
do {
if (cnt++ >= max_cnt) {
while (isc_rwlock_trylock(rwl, type) != ISC_R_SUCCESS) {
if (ISC_LIKELY(cnt < max_cnt)) {
cnt++;
isc_pause(1);
} else {
result = isc__rwlock_lock(rwl, type);
break;
}
isc_rwlock_pause();
} while (isc_rwlock_trylock(rwl, type) != ISC_R_SUCCESS);
atomic_fetch_add_release(&rwl->spins, (cnt - spins) / 8);
}
/*
* C99 integer division rounds towards 0, but we want a real 'floor'
* here - otherwise we will never drop to anything below 7.
*/
update = ((cnt - cachedspins + 9) / 8) - 1;
atomic_fetch_add_release(&rwl->spins, update);
return (result);
}

View File

@@ -30,6 +30,7 @@ TESTS = \
lex_test \
md_test \
mem_test \
mutex_test \
netaddr_test \
netmgr_test \
parse_test \
@@ -39,6 +40,7 @@ TESTS = \
random_test \
regex_test \
result_test \
rwlock_test \
safe_test \
siphash_test \
sockaddr_test \

259
lib/isc/tests/mutex_test.c Normal file
View File

@@ -0,0 +1,259 @@
/*
* Copyright (C) Internet Systems Consortium, Inc. ("ISC")
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, you can obtain one at https://mozilla.org/MPL/2.0/.
*
* See the COPYRIGHT file distributed with this work for additional
* information regarding copyright ownership.
*/
#if HAVE_CMOCKA
#include <fcntl.h>
#include <sched.h> /* IWYU pragma: keep */
#include <setjmp.h>
#include <stdarg.h>
#include <stddef.h>
#include <stdlib.h>
#include <unistd.h>
#define UNIT_TESTING
#include <cmocka.h>
#include <isc/atomic.h>
#include <isc/file.h>
#include <isc/mem.h>
#include <isc/mutex.h>
#include <isc/os.h>
#include <isc/pause.h>
#include <isc/print.h>
#include <isc/result.h>
#include <isc/stdio.h>
#include <isc/thread.h>
#include <isc/time.h>
#include <isc/util.h>
#include "isctest.h"
#define LOOPS 1000
#define DELAY_LOOP 100
static unsigned int workers = 0;
static int
_setup(void **state) {
isc_result_t result;
char *p;
if (workers == 0) {
workers = isc_os_ncpus();
}
p = getenv("ISC_TASK_WORKERS");
if (p != NULL) {
workers = atoi(p);
}
INSIST(workers != 0);
UNUSED(state);
result = isc_test_begin(NULL, true, workers);
assert_int_equal(result, ISC_R_SUCCESS);
return (0);
}
static int
_teardown(void **state) {
UNUSED(state);
isc_test_end();
return (0);
}
static void
isc_mutex_test(void **state) {
isc_mutex_t lock;
UNUSED(state);
isc_mutex_init(&lock);
for (size_t i = 0; i < LOOPS; i++) {
isc_mutex_lock(&lock);
isc_pause(DELAY_LOOP);
isc_mutex_unlock(&lock);
}
isc_mutex_destroy(&lock);
}
#define ITERS 20
#define DC 200
#define MIN 800
#define MAX 1600
static size_t shared_counter = 0;
static size_t expected_counter = SIZE_MAX;
static isc_mutex_t lock;
static pthread_mutex_t mutex;
static isc_threadresult_t
pthread_mutex_thread(isc_threadarg_t arg) {
size_t cont = *(size_t *)arg;
for (size_t i = 0; i < LOOPS; i++) {
pthread_mutex_lock(&mutex);
size_t v = shared_counter;
isc_pause(DELAY_LOOP);
shared_counter = v + 1;
pthread_mutex_unlock(&mutex);
isc_pause(cont);
}
return ((isc_threadresult_t)0);
}
static isc_threadresult_t
isc_mutex_thread(isc_threadarg_t arg) {
size_t cont = *(size_t *)arg;
for (size_t i = 0; i < LOOPS; i++) {
isc_mutex_lock(&lock);
size_t v = shared_counter;
isc_pause(DELAY_LOOP);
shared_counter = v + 1;
isc_mutex_unlock(&lock);
isc_pause(cont);
}
return ((isc_threadresult_t)0);
}
static void
isc_mutex_benchmark(void **state) {
isc_thread_t *threads = isc_mem_get(test_mctx,
sizeof(*threads) * workers);
isc_time_t ts1, ts2;
double t;
isc_result_t result;
int dc;
size_t cont;
int r;
UNUSED(state);
memset(threads, 0, sizeof(*threads) * workers);
expected_counter = ITERS * workers * LOOPS * ((MAX - MIN) / DC + 1);
/* PTHREAD MUTEX */
r = pthread_mutex_init(&mutex, NULL);
assert_int_not_equal(r, -1);
result = isc_time_now_hires(&ts1);
assert_int_equal(result, ISC_R_SUCCESS);
shared_counter = 0;
dc = DC;
for (size_t l = 0; l < ITERS; l++) {
for (cont = (dc > 0) ? MIN : MAX; cont <= MAX && cont >= MIN;
cont += dc) {
for (size_t i = 0; i < workers; i++) {
isc_thread_create(pthread_mutex_thread, &cont,
&threads[i]);
}
for (size_t i = 0; i < workers; i++) {
isc_thread_join(threads[i], NULL);
}
}
dc = -dc;
}
assert_int_equal(shared_counter, expected_counter);
result = isc_time_now_hires(&ts2);
assert_int_equal(result, ISC_R_SUCCESS);
t = isc_time_microdiff(&ts2, &ts1);
printf("[ TIME ] isc_mutex_benchmark: %zu pthread_mutex loops in "
"%u threads, %2.3f seconds, %2.3f calls/second\n",
shared_counter, workers, t / 1000000.0,
shared_counter / (t / 1000000.0));
r = pthread_mutex_destroy(&mutex);
assert_int_not_equal(r, -1);
/* ISC MUTEX */
isc_mutex_init(&lock);
result = isc_time_now_hires(&ts1);
assert_int_equal(result, ISC_R_SUCCESS);
dc = DC;
shared_counter = 0;
for (size_t l = 0; l < ITERS; l++) {
for (cont = (dc > 0) ? MIN : MAX; cont <= MAX && cont >= MIN;
cont += dc) {
for (size_t i = 0; i < workers; i++) {
isc_thread_create(isc_mutex_thread, &cont,
&threads[i]);
}
for (size_t i = 0; i < workers; i++) {
isc_thread_join(threads[i], NULL);
}
}
dc = -dc;
}
assert_int_equal(shared_counter, expected_counter);
result = isc_time_now_hires(&ts2);
assert_int_equal(result, ISC_R_SUCCESS);
t = isc_time_microdiff(&ts2, &ts1);
printf("[ TIME ] isc_mutex_benchmark: %zu isc_mutex loops in %u "
"threads, %2.3f seconds, %2.3f calls/second\n",
shared_counter, workers, t / 1000000.0,
shared_counter / (t / 1000000.0));
isc_mutex_destroy(&mutex);
isc_mem_put(test_mctx, threads, sizeof(*threads) * workers);
}
/*
* Main
*/
int
main(void) {
const struct CMUnitTest tests[] = {
cmocka_unit_test_setup_teardown(isc_mutex_test, _setup,
_teardown),
#if !defined(__SANITIZE_THREAD__)
cmocka_unit_test_setup_teardown(isc_mutex_benchmark, _setup,
_teardown),
#endif /* __SANITIZE_THREAD__ */
};
return (cmocka_run_group_tests(tests, NULL, NULL));
}
#else /* HAVE_CMOCKA */
#include <stdio.h>
int
main(void) {
printf("1..0 # Skipped: cmocka not available\n");
return (SKIPPED_TEST_EXIT_CODE);
}
#endif /* if HAVE_CMOCKA */

433
lib/isc/tests/rwlock_test.c Normal file
View File

@@ -0,0 +1,433 @@
/*
* Copyright (C) Internet Systems Consortium, Inc. ("ISC")
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, you can obtain one at https://mozilla.org/MPL/2.0/.
*
* See the COPYRIGHT file distributed with this work for additional
* information regarding copyright ownership.
*/
#if HAVE_CMOCKA
#include <fcntl.h>
#include <sched.h> /* IWYU pragma: keep */
#include <semaphore.h>
#include <setjmp.h>
#include <stdarg.h>
#include <stddef.h>
#include <stdlib.h>
#include <unistd.h>
#define UNIT_TESTING
#include <cmocka.h>
#include <isc/atomic.h>
#include <isc/file.h>
#include <isc/mem.h>
#include <isc/os.h>
#include <isc/pause.h>
#include <isc/print.h>
#include <isc/random.h>
#include <isc/result.h>
#include <isc/rwlock.h>
#include <isc/stdio.h>
#include <isc/thread.h>
#include <isc/time.h>
#include <isc/util.h>
#include "isctest.h"
#define LOOPS 100000
#define DELAY_LOOP 1
static unsigned int workers = 0;
static isc_rwlock_t rwlock;
static pthread_rwlock_t prwlock;
static sem_t sem1;
static sem_t sem2;
#define ITERS 20
#define DC 200
#define MIN 800
#define MAX 1600
static size_t shared_counter = 0;
static size_t expected_counter = SIZE_MAX;
static uint8_t boundary = 0;
static uint8_t rnd[LOOPS];
static int
_setup(void **state) {
isc_result_t result;
char *p;
if (workers == 0) {
workers = isc_os_ncpus();
}
p = getenv("ISC_TASK_WORKERS");
if (p != NULL) {
workers = atoi(p);
}
INSIST(workers != 0);
UNUSED(state);
result = isc_test_begin(NULL, true, workers);
assert_int_equal(result, ISC_R_SUCCESS);
for (size_t i = 0; i < sizeof(rnd); i++) {
rnd[i] = (uint8_t)isc_random_uniform(100);
}
return (0);
}
static int
_teardown(void **state) {
UNUSED(state);
isc_test_end();
return (0);
}
static int
rwlock_setup(void **state) {
UNUSED(state);
isc_rwlock_init(&rwlock, 0, 0);
if (sem_init(&sem1, 0, 0) == -1) {
return (errno);
}
if (sem_init(&sem2, 0, 0) == -1) {
return (errno);
}
if (pthread_rwlock_init(&prwlock, NULL) == -1) {
return (errno);
}
return (0);
}
static int
rwlock_teardown(void **state) {
UNUSED(state);
if (pthread_rwlock_destroy(&prwlock) == -1) {
return (errno);
}
if (sem_destroy(&sem2) == -1) {
return (errno);
}
if (sem_destroy(&sem1) == -1) {
return (errno);
}
isc_rwlock_destroy(&rwlock);
return (0);
}
/*
* Simple single-threaded read lock/unlock test
*/
static void
isc_rwlock_rdlock_test(void **state) {
UNUSED(state);
isc_rwlock_lock(&rwlock, isc_rwlocktype_read);
isc_pause(DELAY_LOOP);
isc_rwlock_unlock(&rwlock, isc_rwlocktype_read);
}
/*
* Simple single-threaded write lock/unlock test
*/
static void
isc_rwlock_wrlock_test(void **state) {
UNUSED(state);
isc_rwlock_lock(&rwlock, isc_rwlocktype_write);
isc_pause(DELAY_LOOP);
isc_rwlock_unlock(&rwlock, isc_rwlocktype_write);
}
/*
* Simple single-threaded lock/downgrade/unlock test
*/
static void
isc_rwlock_downgrade_test(void **state) {
UNUSED(state);
isc_rwlock_lock(&rwlock, isc_rwlocktype_write);
isc_rwlock_downgrade(&rwlock);
isc_rwlock_unlock(&rwlock, isc_rwlocktype_read);
}
/*
* Simple single-threaded lock/tryupgrade/unlock test
*/
static void
isc_rwlock_tryupgrade_test(void **state) {
isc_result_t result;
UNUSED(state);
isc_rwlock_lock(&rwlock, isc_rwlocktype_read);
result = isc_rwlock_tryupgrade(&rwlock);
/* assert_int_equal(result, ISC_R_SUCCESS); */
/* isc_rwlock_unlock(&rwlock, isc_rwlocktype_write); */
assert_int_equal(result, ISC_R_LOCKBUSY);
isc_rwlock_unlock(&rwlock, isc_rwlocktype_read);
}
static isc_threadresult_t
trylock_thread1(isc_threadarg_t arg) {
UNUSED(arg);
isc_rwlock_lock(&rwlock, isc_rwlocktype_write);
sem_post(&sem1);
sem_wait(&sem2);
isc_rwlock_unlock(&rwlock, isc_rwlocktype_write);
isc_rwlock_lock(&rwlock, isc_rwlocktype_read);
sem_post(&sem1);
sem_wait(&sem2);
isc_rwlock_unlock(&rwlock, isc_rwlocktype_read);
return ((isc_threadresult_t)0);
}
static isc_threadresult_t
trylock_thread2(isc_threadarg_t arg) {
isc_result_t result;
UNUSED(arg);
sem_wait(&sem1);
result = isc_rwlock_trylock(&rwlock, isc_rwlocktype_read);
assert_int_equal(result, ISC_R_LOCKBUSY);
sem_post(&sem2);
sem_wait(&sem1);
result = isc_rwlock_trylock(&rwlock, isc_rwlocktype_read);
assert_int_equal(result, ISC_R_SUCCESS);
sem_post(&sem2);
isc_rwlock_unlock(&rwlock, isc_rwlocktype_read);
return ((isc_threadresult_t)0);
}
static void
isc_rwlock_trylock_test(void **state) {
UNUSED(state);
isc_thread_t thread1;
isc_thread_t thread2;
isc_thread_create(trylock_thread1, NULL, &thread1);
isc_thread_create(trylock_thread2, NULL, &thread2);
isc_thread_join(thread2, NULL);
isc_thread_join(thread1, NULL);
}
static isc_threadresult_t
pthread_rwlock_thread(isc_threadarg_t arg) {
/* size_t cont = *(size_t *)arg; */
UNUSED(arg);
for (size_t i = 0; i < LOOPS; i++) {
if (rnd[i] < boundary) {
pthread_rwlock_wrlock(&prwlock);
size_t v = shared_counter;
isc_pause(DELAY_LOOP);
shared_counter = v + 1;
pthread_rwlock_unlock(&prwlock);
} else {
pthread_rwlock_rdlock(&prwlock);
isc_pause(DELAY_LOOP);
pthread_rwlock_unlock(&prwlock);
}
/* isc_pause(cont); */
}
return ((isc_threadresult_t)0);
}
static isc_threadresult_t
isc_rwlock_thread(isc_threadarg_t arg) {
/* size_t cont = *(size_t *)arg; */
UNUSED(arg);
for (size_t i = 0; i < LOOPS; i++) {
if (rnd[i] < boundary) {
isc_rwlock_lock(&rwlock, isc_rwlocktype_write);
size_t v = shared_counter;
isc_pause(DELAY_LOOP);
shared_counter = v + 1;
isc_rwlock_unlock(&rwlock, isc_rwlocktype_write);
} else {
isc_rwlock_lock(&rwlock, isc_rwlocktype_read);
isc_pause(DELAY_LOOP);
isc_rwlock_unlock(&rwlock, isc_rwlocktype_read);
}
/* isc_pause(cont); */
}
return ((isc_threadresult_t)0);
}
static void
isc__rwlock_benchmark(isc_thread_t *threads, unsigned int nthreads, uint8_t pct) {
isc_time_t ts1, ts2;
double t;
isc_result_t result;
int dc;
size_t cont;
expected_counter = ITERS * nthreads * LOOPS * ((MAX - MIN) / DC + 1);
boundary = pct;
/* PTHREAD RWLOCK */
result = isc_time_now_hires(&ts1);
assert_int_equal(result, ISC_R_SUCCESS);
shared_counter = 0;
dc = DC;
for (size_t l = 0; l < ITERS; l++) {
for (cont = (dc > 0) ? MIN : MAX; cont <= MAX && cont >= MIN;
cont += dc) {
for (size_t i = 0; i < nthreads; i++) {
isc_thread_create(pthread_rwlock_thread, &cont,
&threads[i]);
}
for (size_t i = 0; i < nthreads; i++) {
isc_thread_join(threads[i], NULL);
}
}
dc = -dc;
}
result = isc_time_now_hires(&ts2);
assert_int_equal(result, ISC_R_SUCCESS);
t = isc_time_microdiff(&ts2, &ts1);
printf("[ TIME ] isc_rwlock_benchmark: %zu pthread_rwlock loops in "
"%u threads, %2.3f%% writes, %2.3f seconds, %2.3f "
"calls/second\n",
expected_counter, nthreads,
(double)shared_counter * 100 / expected_counter, t / 1000000.0,
expected_counter / (t / 1000000.0));
/* ISC RWLOCK */
result = isc_time_now_hires(&ts1);
assert_int_equal(result, ISC_R_SUCCESS);
dc = DC;
shared_counter = 0;
for (size_t l = 0; l < ITERS; l++) {
for (cont = (dc > 0) ? MIN : MAX; cont <= MAX && cont >= MIN;
cont += dc) {
for (size_t i = 0; i < nthreads; i++) {
isc_thread_create(isc_rwlock_thread, &cont,
&threads[i]);
}
for (size_t i = 0; i < nthreads; i++) {
isc_thread_join(threads[i], NULL);
}
}
dc = -dc;
}
result = isc_time_now_hires(&ts2);
assert_int_equal(result, ISC_R_SUCCESS);
t = isc_time_microdiff(&ts2, &ts1);
printf("[ TIME ] isc_rwlock_benchmark: %zu isc_rwlock loops in "
"%u threads, %2.3f%% writes, %2.3f seconds, %2.3f "
"calls/second\n",
expected_counter, nthreads,
(double)shared_counter * 100 / expected_counter, t / 1000000.0,
expected_counter / (t / 1000000.0));
}
static void
isc_rwlock_benchmark(void **state) {
isc_thread_t *threads = isc_mem_get(test_mctx,
sizeof(*threads) * workers);
UNUSED(state);
memset(threads, 0, sizeof(*threads) * workers);
for (unsigned int nthreads = workers; nthreads > 0; nthreads /= 2) {
isc__rwlock_benchmark(threads, nthreads, 0);
isc__rwlock_benchmark(threads, nthreads, 1);
isc__rwlock_benchmark(threads, nthreads, 10);
isc__rwlock_benchmark(threads, nthreads, 50);
isc__rwlock_benchmark(threads, nthreads, 90);
isc__rwlock_benchmark(threads, nthreads, 99);
isc__rwlock_benchmark(threads, nthreads, 100);
}
isc_mem_put(test_mctx, threads, sizeof(*threads) * workers);
}
/*
* Main
*/
int
main(void) {
const struct CMUnitTest tests[] = {
#if !defined(__SANITIZE_THREAD__)
cmocka_unit_test_setup_teardown(isc_rwlock_benchmark,
rwlock_setup, rwlock_teardown),
#endif /* __SANITIZE_THREAD__ */
cmocka_unit_test_setup_teardown(isc_rwlock_rdlock_test,
rwlock_setup, rwlock_teardown),
cmocka_unit_test_setup_teardown(isc_rwlock_wrlock_test,
rwlock_setup, rwlock_teardown),
cmocka_unit_test_setup_teardown(isc_rwlock_downgrade_test,
rwlock_setup, rwlock_teardown),
cmocka_unit_test_setup_teardown(isc_rwlock_tryupgrade_test,
rwlock_setup, rwlock_teardown),
cmocka_unit_test_setup_teardown(isc_rwlock_trylock_test,
rwlock_setup, rwlock_teardown),
};
return (cmocka_run_group_tests(tests, _setup, _teardown));
}
#else /* HAVE_CMOCKA */
#include <stdio.h>
int
main(void) {
printf("1..0 # Skipped: cmocka not available\n");
return (SKIPPED_TEST_EXIT_CODE);
}
#endif /* if HAVE_CMOCKA */