Move setting the sock->write_timeout to the async_*send
Setting the sock->write_timeout from the TCP, TCPDNS, and TLSDNS send functions could lead to (harmless) data race when setting the value for the first time when the isc_nm_send() function would be called from thread not-matching the socket we are sending to. Move the setting the sock->write_timeout to the matching async function which is always called from the matching thread.
This commit is contained in:
@@ -1060,13 +1060,6 @@ isc__nm_tcp_send(isc_nmhandle_t *handle, const isc_region_t *region,
|
||||
uvreq->cb.send = cb;
|
||||
uvreq->cbarg = cbarg;
|
||||
|
||||
if (sock->write_timeout == 0) {
|
||||
sock->write_timeout =
|
||||
(atomic_load(&sock->keepalive)
|
||||
? atomic_load(&sock->mgr->keepalive)
|
||||
: atomic_load(&sock->mgr->idle));
|
||||
}
|
||||
|
||||
ievent = isc__nm_get_netievent_tcpsend(sock->mgr, sock, uvreq);
|
||||
isc__nm_maybe_enqueue_ievent(&sock->mgr->workers[sock->tid],
|
||||
(isc__netievent_t *)ievent);
|
||||
@@ -1110,6 +1103,13 @@ isc__nm_async_tcpsend(isc__networker_t *worker, isc__netievent_t *ev0) {
|
||||
REQUIRE(sock->tid == isc_nm_tid());
|
||||
UNUSED(worker);
|
||||
|
||||
if (sock->write_timeout == 0) {
|
||||
sock->write_timeout =
|
||||
(atomic_load(&sock->keepalive)
|
||||
? atomic_load(&sock->mgr->keepalive)
|
||||
: atomic_load(&sock->mgr->idle));
|
||||
}
|
||||
|
||||
result = tcp_send_direct(sock, uvreq);
|
||||
if (result != ISC_R_SUCCESS) {
|
||||
isc__nm_incstats(sock, STATID_SENDFAIL);
|
||||
|
||||
@@ -1085,13 +1085,6 @@ isc__nm_tcpdns_send(isc_nmhandle_t *handle, isc_region_t *region,
|
||||
uvreq->cb.send = cb;
|
||||
uvreq->cbarg = cbarg;
|
||||
|
||||
if (sock->write_timeout == 0) {
|
||||
sock->write_timeout =
|
||||
(atomic_load(&sock->keepalive)
|
||||
? atomic_load(&sock->mgr->keepalive)
|
||||
: atomic_load(&sock->mgr->idle));
|
||||
}
|
||||
|
||||
ievent = isc__nm_get_netievent_tcpdnssend(sock->mgr, sock, uvreq);
|
||||
isc__nm_maybe_enqueue_ievent(&sock->mgr->workers[sock->tid],
|
||||
(isc__netievent_t *)ievent);
|
||||
@@ -1143,6 +1136,13 @@ isc__nm_async_tcpdnssend(isc__networker_t *worker, isc__netievent_t *ev0) {
|
||||
sock = ievent->sock;
|
||||
uvreq = ievent->req;
|
||||
|
||||
if (sock->write_timeout == 0) {
|
||||
sock->write_timeout =
|
||||
(atomic_load(&sock->keepalive)
|
||||
? atomic_load(&sock->mgr->keepalive)
|
||||
: atomic_load(&sock->mgr->idle));
|
||||
}
|
||||
|
||||
uv_buf_t bufs[2] = { { .base = uvreq->tcplen, .len = 2 },
|
||||
{ .base = uvreq->uvbuf.base,
|
||||
.len = uvreq->uvbuf.len } };
|
||||
|
||||
@@ -1672,13 +1672,6 @@ isc__nm_tlsdns_send(isc_nmhandle_t *handle, isc_region_t *region,
|
||||
uvreq->cb.send = cb;
|
||||
uvreq->cbarg = cbarg;
|
||||
|
||||
if (sock->write_timeout == 0) {
|
||||
sock->write_timeout =
|
||||
(atomic_load(&sock->keepalive)
|
||||
? atomic_load(&sock->mgr->keepalive)
|
||||
: atomic_load(&sock->mgr->idle));
|
||||
}
|
||||
|
||||
ievent = isc__nm_get_netievent_tlsdnssend(sock->mgr, sock, uvreq);
|
||||
isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
|
||||
(isc__netievent_t *)ievent);
|
||||
@@ -1701,6 +1694,13 @@ isc__nm_async_tlsdnssend(isc__networker_t *worker, isc__netievent_t *ev0) {
|
||||
REQUIRE(sock->type == isc_nm_tlsdnssocket);
|
||||
REQUIRE(sock->tid == isc_nm_tid());
|
||||
|
||||
if (sock->write_timeout == 0) {
|
||||
sock->write_timeout =
|
||||
(atomic_load(&sock->keepalive)
|
||||
? atomic_load(&sock->mgr->keepalive)
|
||||
: atomic_load(&sock->mgr->idle));
|
||||
}
|
||||
|
||||
result = tlsdns_send_direct(sock, uvreq);
|
||||
if (result != ISC_R_SUCCESS) {
|
||||
isc__nm_incstats(sock, STATID_SENDFAIL);
|
||||
|
||||
Reference in New Issue
Block a user