Upstream: cached connections now tested against next_upstream.

Much like normal connections, cached connections are now tested against
u->conf->next_upstream, and u->state->status is now always set.

This allows to disable additional tries even with upstream keepalive
by using "proxy_next_upstream off".
This commit is contained in:
Maxim Dounin 2016-03-28 19:49:52 +03:00
parent f5fff1eda0
commit 5a76856dc2

View File

@ -3947,42 +3947,36 @@ ngx_http_upstream_next(ngx_http_request_t *r, ngx_http_upstream_t *u,
"upstream timed out"); "upstream timed out");
} }
if (u->peer.cached && ft_type == NGX_HTTP_UPSTREAM_FT_ERROR if (u->peer.cached && ft_type == NGX_HTTP_UPSTREAM_FT_ERROR) {
&& (!u->request_sent || !r->request_body_no_buffering))
{
status = 0;
/* TODO: inform balancer instead */ /* TODO: inform balancer instead */
u->peer.tries++; u->peer.tries++;
}
} else { switch (ft_type) {
switch (ft_type) {
case NGX_HTTP_UPSTREAM_FT_TIMEOUT: case NGX_HTTP_UPSTREAM_FT_TIMEOUT:
status = NGX_HTTP_GATEWAY_TIME_OUT; status = NGX_HTTP_GATEWAY_TIME_OUT;
break; break;
case NGX_HTTP_UPSTREAM_FT_HTTP_500: case NGX_HTTP_UPSTREAM_FT_HTTP_500:
status = NGX_HTTP_INTERNAL_SERVER_ERROR; status = NGX_HTTP_INTERNAL_SERVER_ERROR;
break; break;
case NGX_HTTP_UPSTREAM_FT_HTTP_403: case NGX_HTTP_UPSTREAM_FT_HTTP_403:
status = NGX_HTTP_FORBIDDEN; status = NGX_HTTP_FORBIDDEN;
break; break;
case NGX_HTTP_UPSTREAM_FT_HTTP_404: case NGX_HTTP_UPSTREAM_FT_HTTP_404:
status = NGX_HTTP_NOT_FOUND; status = NGX_HTTP_NOT_FOUND;
break; break;
/* /*
* NGX_HTTP_UPSTREAM_FT_BUSY_LOCK and NGX_HTTP_UPSTREAM_FT_MAX_WAITING * NGX_HTTP_UPSTREAM_FT_BUSY_LOCK and NGX_HTTP_UPSTREAM_FT_MAX_WAITING
* never reach here * never reach here
*/ */
default: default:
status = NGX_HTTP_BAD_GATEWAY; status = NGX_HTTP_BAD_GATEWAY;
}
} }
if (r->connection->error) { if (r->connection->error) {
@ -3991,37 +3985,36 @@ ngx_http_upstream_next(ngx_http_request_t *r, ngx_http_upstream_t *u,
return; return;
} }
if (status) { u->state->status = status;
u->state->status = status;
timeout = u->conf->next_upstream_timeout;
if (u->peer.tries == 0 timeout = u->conf->next_upstream_timeout;
|| !(u->conf->next_upstream & ft_type)
|| (u->request_sent && r->request_body_no_buffering) if (u->peer.tries == 0
|| (timeout && ngx_current_msec - u->peer.start_time >= timeout)) || !(u->conf->next_upstream & ft_type)
{ || (u->request_sent && r->request_body_no_buffering)
|| (timeout && ngx_current_msec - u->peer.start_time >= timeout))
{
#if (NGX_HTTP_CACHE) #if (NGX_HTTP_CACHE)
if (u->cache_status == NGX_HTTP_CACHE_EXPIRED if (u->cache_status == NGX_HTTP_CACHE_EXPIRED
&& (u->conf->cache_use_stale & ft_type)) && (u->conf->cache_use_stale & ft_type))
{ {
ngx_int_t rc; ngx_int_t rc;
rc = u->reinit_request(r); rc = u->reinit_request(r);
if (rc == NGX_OK) { if (rc == NGX_OK) {
u->cache_status = NGX_HTTP_CACHE_STALE; u->cache_status = NGX_HTTP_CACHE_STALE;
rc = ngx_http_upstream_cache_send(r, u); rc = ngx_http_upstream_cache_send(r, u);
}
ngx_http_upstream_finalize_request(r, u, rc);
return;
} }
#endif
ngx_http_upstream_finalize_request(r, u, status); ngx_http_upstream_finalize_request(r, u, rc);
return; return;
} }
#endif
ngx_http_upstream_finalize_request(r, u, status);
return;
} }
if (u->peer.connection) { if (u->peer.connection) {