Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

backend: increase connection count before connect #4154

Merged
merged 1 commit into from
Sep 23, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 11 additions & 25 deletions bin/varnishd/cache/cache_backend.c
Original file line number Diff line number Diff line change
Expand Up @@ -187,16 +187,6 @@ vbe_connwait_signal_locked(const struct backend *bp)
}
}

static void
vbe_connwait_dequeue_locked(struct backend *bp, struct connwait *cw)
{
Lck_AssertHeld(bp->director->mtx);
assert(cw->cw_state == CW_QUEUED);
VTAILQ_REMOVE(&bp->cw_head, cw, cw_list);
vbe_connwait_signal_locked(bp);
cw->cw_state = CW_DEQUEUED;
}

static void
vbe_connwait_fini(struct connwait *cw)
{
Expand Down Expand Up @@ -260,13 +250,17 @@ vbe_dir_getfd(VRT_CTX, struct worker *wrk, VCL_BACKEND dir, struct backend *bp,
err = Lck_CondWaitUntil(&cw->cw_cond, bp->director->mtx,
wait_end);
} while (err == EINTR);
assert(cw->cw_state == CW_QUEUED);
VTAILQ_REMOVE(&bp->cw_head, cw, cw_list);
cw->cw_state = CW_DEQUEUED;
bp->cw_count--;
if ((err != 0 && BE_BUSY(bp)) || !VRT_Healthy(ctx, dir, NULL)) {
VTAILQ_REMOVE(&bp->cw_head, cw, cw_list);
VSC_C_main->backend_wait_fail++;
cw->cw_state = CW_BE_BUSY;
}
}
if (cw->cw_state != CW_BE_BUSY)
bp->n_conn++;
Lck_Unlock(bp->director->mtx);

if (cw->cw_state == CW_BE_BUSY) {
Expand All @@ -284,11 +278,10 @@ vbe_dir_getfd(VRT_CTX, struct worker *wrk, VCL_BACKEND dir, struct backend *bp,
if (bo->htc == NULL) {
VSLb(bo->vsl, SLT_FetchError, "out of workspace");
/* XXX: counter ? */
if (cw->cw_state == CW_QUEUED) {
Lck_Lock(bp->director->mtx);
vbe_connwait_dequeue_locked(bp, cw);
Lck_Unlock(bp->director->mtx);
}
Lck_Lock(bp->director->mtx);
bp->n_conn--;
vbe_connwait_signal_locked(bp);
Lck_Unlock(bp->director->mtx);
vbe_connwait_fini(cw);
return (NULL);
}
Expand All @@ -300,17 +293,14 @@ vbe_dir_getfd(VRT_CTX, struct worker *wrk, VCL_BACKEND dir, struct backend *bp,
if (pfd == NULL) {
Lck_Lock(bp->director->mtx);
VBE_Connect_Error(bp->vsc, err);
bp->n_conn--;
vbe_connwait_signal_locked(bp);
Lck_Unlock(bp->director->mtx);
VSLb(bo->vsl, SLT_FetchError,
"backend %s: fail errno %d (%s)",
VRT_BACKEND_string(dir), err, VAS_errtxt(err));
VSC_C_main->backend_fail++;
bo->htc = NULL;
if (cw->cw_state == CW_QUEUED) {
Lck_Lock(bp->director->mtx);
vbe_connwait_dequeue_locked(bp, cw);
Lck_Unlock(bp->director->mtx);
}
vbe_connwait_fini(cw);
return (NULL);
}
Expand All @@ -321,12 +311,8 @@ vbe_dir_getfd(VRT_CTX, struct worker *wrk, VCL_BACKEND dir, struct backend *bp,
assert(*fdp >= 0);

Lck_Lock(bp->director->mtx);
bp->n_conn++;
bp->vsc->conn++;
bp->vsc->req++;
if (cw->cw_state == CW_QUEUED)
vbe_connwait_dequeue_locked(bp, cw);

Lck_Unlock(bp->director->mtx);

CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC);
Expand Down
46 changes: 46 additions & 0 deletions bin/varnishtest/tests/b00089.vtc
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
varnishtest "connect: blackhole"

# XXX: This test case relies on a way for us to never successed in connecting
# to a backend. We need the connect syscall to hang forever. For TCP this means
# that the backend must not respond to the three-way handshake initiated by
# Varnish.
#
# One way to achieve this is to use an IP address non-routeable over the
# Internet, but this is not always a bullet-proof solution across all the
# platforms we support. Another downside with this solution is that it sends
# traffic over the local network.
#
# The current test case seems to work on a few supported platforms.
# Comment the line below to enable this test.

feature cmd false

varnish v1 -vcl {
backend default {
# Non-routeable IPv4 address to simulate connect_timeout
# XXX: $\{blackhole_backend}
.host = "10.255.255.1 80";
.max_connections = 1;
}
} -start

varnish v1 -cli "param.set connect_timeout 2"

client c1 {
txreq -url "/blackhole"
rxresp
expect resp.status == 503
} -start

client c2 {
txreq -url "/bye"
rxresp
expect resp.status == 503
} -start

client c1 -wait
client c2 -wait

varnish v1 -expect MAIN.backend_conn == 0
varnish v1 -expect MAIN.backend_busy == 1
varnish v1 -expect MAIN.backend_fail == 1
49 changes: 49 additions & 0 deletions bin/varnishtest/tests/b00090.vtc
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
varnishtest "connect: blackhole with queueing"

# XXX: This test case relies on a way for us to never successed in connecting
# to a backend. We need the connect syscall to hang forever. For TCP this means
# that the backend must not respond to the three-way handshake initiated by
# Varnish.
#
# One way to achieve this is to use an IP address non-routeable over the
# Internet, but this is not always a bullet-proof solution across all the
# platforms we support. Another downside with this solution is that it sends
# traffic over the local network.
#
# The current test case seems to work on a few supported platforms.
# Comment the line below to enable this test.

feature cmd false

varnish v1 -vcl {
backend default {
# Non-routeable IPv4 address to simulate connect_timeout
# XXX: $\{blackhole_backend}
.host = "10.255.255.1 80";
.max_connections = 1;
.wait_limit = 1;
.wait_timeout = 0.2s;
}
} -start

varnish v1 -cli "param.set connect_timeout 2"

client c1 {
txreq -url "/blackhole"
rxresp
expect resp.status == 503
} -start

client c2 {
txreq -url "/bye"
rxresp
expect resp.status == 503
} -start

client c1 -wait
client c2 -wait

varnish v1 -expect MAIN.backend_conn == 0
varnish v1 -expect MAIN.backend_busy == 1
varnish v1 -expect MAIN.backend_wait_fail == 1
varnish v1 -expect MAIN.backend_fail == 1