mirror of
https://github.com/nginx/nginx.git
synced 2024-11-21 16:28:40 +00:00
Fixed request termination with AIO and subrequests (ticket #2555).
When a request was terminated due to an error via ngx_http_terminate_request() while an AIO operation was running in a subrequest, various issues were observed. This happened because ngx_http_request_finalizer() was only set in the subrequest where ngx_http_terminate_request() was called, but not in the subrequest where the AIO operation was running. After completion of the AIO operation normal processing of the subrequest was resumed, leading to issues. In particular, in case of the upstream module, termination of the request called upstream cleanup, which closed the upstream connection. Attempts to further work with the upstream connection after AIO operation completion resulted in segfaults in ngx_ssl_recv(), "readv() failed (9: Bad file descriptor) while reading upstream" errors, or socket leaks. In ticket #2555, issues were observed with the following configuration with cache background update (with thread writing instrumented to introduce a delay, when a client closes the connection during an update): location = /background-and-aio-write { proxy_pass ... proxy_cache one; proxy_cache_valid 200 1s; proxy_cache_background_update on; proxy_cache_use_stale updating; aio threads; aio_write on; limit_rate 1000; } Similarly, the same issue can be seen with SSI, and can be caused by errors in subrequests, such as in the following configuration (where "/proxy" uses AIO, and "/sleep" returns 444 after some delay, causing request termination): location = /ssi-active-boom { ssi on; ssi_types *; return 200 ' <!--#include virtual="/proxy" --> <!--#include virtual="/sleep" --> '; limit_rate 1000; } Or the same with both AIO operation and the error in non-active subrequests (which needs slightly different handling, see below): location = /ssi-non-active-boom { ssi on; ssi_types *; return 200 ' <!--#include virtual="/static" --> <!--#include virtual="/proxy" --> <!--#include virtual="/sleep" --> '; limit_rate 1000; } Similarly, issues can be observed with just static files. However, with static files potential impact is limited due to timeout safeguards in ngx_http_writer(), and the fact that c->error is set during request termination. In a simple configuration with an AIO operation in the active subrequest, such as in the following configuration, the connection is closed right after completion of the AIO operation anyway, since ngx_http_writer() tries to write to the connection and fails due to c->error set: location = /ssi-active-static-boom { ssi on; ssi_types *; return 200 ' <!--#include virtual="/static-aio" --> <!--#include virtual="/sleep" --> '; limit_rate 1000; } In the following configuration, with an AIO operation in a non-active subrequest, the connection is closed only after send_timeout expires: location = /ssi-non-active-static-boom { ssi on; ssi_types *; return 200 ' <!--#include virtual="/static" --> <!--#include virtual="/static-aio" --> <!--#include virtual="/sleep" --> '; limit_rate 1000; } Fix is to introduce r->main->terminated flag, which is to be checked by AIO event handlers when the r->main->blocked counter is decremented. When the flag is set, handlers are expected to wake up the connection instead of the subrequest (which might be already cleaned up). Additionally, now ngx_http_request_finalizer() is always set in the active subrequest, so waking up the connection properly finalizes the request even if termination happened in a non-active subrequest.
This commit is contained in:
parent
b794465178
commit
c251961c41
@ -208,9 +208,18 @@ ngx_http_copy_aio_event_handler(ngx_event_t *ev)
|
|||||||
r->main->blocked--;
|
r->main->blocked--;
|
||||||
r->aio = 0;
|
r->aio = 0;
|
||||||
|
|
||||||
r->write_event_handler(r);
|
if (r->main->terminated) {
|
||||||
|
/*
|
||||||
|
* trigger connection event handler if the request was
|
||||||
|
* terminated
|
||||||
|
*/
|
||||||
|
|
||||||
ngx_http_run_posted_requests(c);
|
c->write->handler(c->write);
|
||||||
|
|
||||||
|
} else {
|
||||||
|
r->write_event_handler(r);
|
||||||
|
ngx_http_run_posted_requests(c);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
@ -331,11 +340,11 @@ ngx_http_copy_thread_event_handler(ngx_event_t *ev)
|
|||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (r->done) {
|
if (r->done || r->main->terminated) {
|
||||||
/*
|
/*
|
||||||
* trigger connection event handler if the subrequest was
|
* trigger connection event handler if the subrequest was
|
||||||
* already finalized; this can happen if the handler is used
|
* already finalized (this can happen if the handler is used
|
||||||
* for sendfile() in threads
|
* for sendfile() in threads), or if the request was terminated
|
||||||
*/
|
*/
|
||||||
|
|
||||||
c->write->handler(c->write);
|
c->write->handler(c->write);
|
||||||
|
@ -14,7 +14,7 @@
|
|||||||
static ngx_int_t ngx_http_file_cache_lock(ngx_http_request_t *r,
|
static ngx_int_t ngx_http_file_cache_lock(ngx_http_request_t *r,
|
||||||
ngx_http_cache_t *c);
|
ngx_http_cache_t *c);
|
||||||
static void ngx_http_file_cache_lock_wait_handler(ngx_event_t *ev);
|
static void ngx_http_file_cache_lock_wait_handler(ngx_event_t *ev);
|
||||||
static void ngx_http_file_cache_lock_wait(ngx_http_request_t *r,
|
static ngx_int_t ngx_http_file_cache_lock_wait(ngx_http_request_t *r,
|
||||||
ngx_http_cache_t *c);
|
ngx_http_cache_t *c);
|
||||||
static ngx_int_t ngx_http_file_cache_read(ngx_http_request_t *r,
|
static ngx_int_t ngx_http_file_cache_read(ngx_http_request_t *r,
|
||||||
ngx_http_cache_t *c);
|
ngx_http_cache_t *c);
|
||||||
@ -463,6 +463,7 @@ ngx_http_file_cache_lock(ngx_http_request_t *r, ngx_http_cache_t *c)
|
|||||||
static void
|
static void
|
||||||
ngx_http_file_cache_lock_wait_handler(ngx_event_t *ev)
|
ngx_http_file_cache_lock_wait_handler(ngx_event_t *ev)
|
||||||
{
|
{
|
||||||
|
ngx_int_t rc;
|
||||||
ngx_connection_t *c;
|
ngx_connection_t *c;
|
||||||
ngx_http_request_t *r;
|
ngx_http_request_t *r;
|
||||||
|
|
||||||
@ -474,13 +475,31 @@ ngx_http_file_cache_lock_wait_handler(ngx_event_t *ev)
|
|||||||
ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0,
|
ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0,
|
||||||
"http file cache wait: \"%V?%V\"", &r->uri, &r->args);
|
"http file cache wait: \"%V?%V\"", &r->uri, &r->args);
|
||||||
|
|
||||||
ngx_http_file_cache_lock_wait(r, r->cache);
|
rc = ngx_http_file_cache_lock_wait(r, r->cache);
|
||||||
|
|
||||||
ngx_http_run_posted_requests(c);
|
if (rc == NGX_AGAIN) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
r->cache->waiting = 0;
|
||||||
|
r->main->blocked--;
|
||||||
|
|
||||||
|
if (r->main->terminated) {
|
||||||
|
/*
|
||||||
|
* trigger connection event handler if the request was
|
||||||
|
* terminated
|
||||||
|
*/
|
||||||
|
|
||||||
|
c->write->handler(c->write);
|
||||||
|
|
||||||
|
} else {
|
||||||
|
r->write_event_handler(r);
|
||||||
|
ngx_http_run_posted_requests(c);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static void
|
static ngx_int_t
|
||||||
ngx_http_file_cache_lock_wait(ngx_http_request_t *r, ngx_http_cache_t *c)
|
ngx_http_file_cache_lock_wait(ngx_http_request_t *r, ngx_http_cache_t *c)
|
||||||
{
|
{
|
||||||
ngx_uint_t wait;
|
ngx_uint_t wait;
|
||||||
@ -495,7 +514,7 @@ ngx_http_file_cache_lock_wait(ngx_http_request_t *r, ngx_http_cache_t *c)
|
|||||||
ngx_log_error(NGX_LOG_INFO, r->connection->log, 0,
|
ngx_log_error(NGX_LOG_INFO, r->connection->log, 0,
|
||||||
"cache lock timeout");
|
"cache lock timeout");
|
||||||
c->lock_timeout = 0;
|
c->lock_timeout = 0;
|
||||||
goto wakeup;
|
return NGX_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
cache = c->file_cache;
|
cache = c->file_cache;
|
||||||
@ -513,14 +532,10 @@ ngx_http_file_cache_lock_wait(ngx_http_request_t *r, ngx_http_cache_t *c)
|
|||||||
|
|
||||||
if (wait) {
|
if (wait) {
|
||||||
ngx_add_timer(&c->wait_event, (timer > 500) ? 500 : timer);
|
ngx_add_timer(&c->wait_event, (timer > 500) ? 500 : timer);
|
||||||
return;
|
return NGX_AGAIN;
|
||||||
}
|
}
|
||||||
|
|
||||||
wakeup:
|
return NGX_OK;
|
||||||
|
|
||||||
c->waiting = 0;
|
|
||||||
r->main->blocked--;
|
|
||||||
r->write_event_handler(r);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -753,9 +768,18 @@ ngx_http_cache_aio_event_handler(ngx_event_t *ev)
|
|||||||
r->main->blocked--;
|
r->main->blocked--;
|
||||||
r->aio = 0;
|
r->aio = 0;
|
||||||
|
|
||||||
r->write_event_handler(r);
|
if (r->main->terminated) {
|
||||||
|
/*
|
||||||
|
* trigger connection event handler if the request was
|
||||||
|
* terminated
|
||||||
|
*/
|
||||||
|
|
||||||
ngx_http_run_posted_requests(c);
|
c->write->handler(c->write);
|
||||||
|
|
||||||
|
} else {
|
||||||
|
r->write_event_handler(r);
|
||||||
|
ngx_http_run_posted_requests(c);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
@ -836,9 +860,18 @@ ngx_http_cache_thread_event_handler(ngx_event_t *ev)
|
|||||||
r->main->blocked--;
|
r->main->blocked--;
|
||||||
r->aio = 0;
|
r->aio = 0;
|
||||||
|
|
||||||
r->write_event_handler(r);
|
if (r->main->terminated) {
|
||||||
|
/*
|
||||||
|
* trigger connection event handler if the request was
|
||||||
|
* terminated
|
||||||
|
*/
|
||||||
|
|
||||||
ngx_http_run_posted_requests(c);
|
c->write->handler(c->write);
|
||||||
|
|
||||||
|
} else {
|
||||||
|
r->write_event_handler(r);
|
||||||
|
ngx_http_run_posted_requests(c);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -2694,6 +2694,8 @@ ngx_http_terminate_request(ngx_http_request_t *r, ngx_int_t rc)
|
|||||||
ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0,
|
ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0,
|
||||||
"http terminate request count:%d", mr->count);
|
"http terminate request count:%d", mr->count);
|
||||||
|
|
||||||
|
mr->terminated = 1;
|
||||||
|
|
||||||
if (rc > 0 && (mr->headers_out.status == 0 || mr->connection->sent == 0)) {
|
if (rc > 0 && (mr->headers_out.status == 0 || mr->connection->sent == 0)) {
|
||||||
mr->headers_out.status = rc;
|
mr->headers_out.status = rc;
|
||||||
}
|
}
|
||||||
@ -2716,8 +2718,11 @@ ngx_http_terminate_request(ngx_http_request_t *r, ngx_int_t rc)
|
|||||||
if (mr->write_event_handler) {
|
if (mr->write_event_handler) {
|
||||||
|
|
||||||
if (mr->blocked) {
|
if (mr->blocked) {
|
||||||
|
r = r->connection->data;
|
||||||
|
|
||||||
r->connection->error = 1;
|
r->connection->error = 1;
|
||||||
r->write_event_handler = ngx_http_request_finalizer;
|
r->write_event_handler = ngx_http_request_finalizer;
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -550,6 +550,7 @@ struct ngx_http_request_s {
|
|||||||
unsigned root_tested:1;
|
unsigned root_tested:1;
|
||||||
unsigned done:1;
|
unsigned done:1;
|
||||||
unsigned logged:1;
|
unsigned logged:1;
|
||||||
|
unsigned terminated:1;
|
||||||
|
|
||||||
unsigned buffered:4;
|
unsigned buffered:4;
|
||||||
|
|
||||||
|
@ -3997,11 +3997,11 @@ ngx_http_upstream_thread_event_handler(ngx_event_t *ev)
|
|||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (r->done) {
|
if (r->done || r->main->terminated) {
|
||||||
/*
|
/*
|
||||||
* trigger connection event handler if the subrequest was
|
* trigger connection event handler if the subrequest was
|
||||||
* already finalized; this can happen if the handler is used
|
* already finalized (this can happen if the handler is used
|
||||||
* for sendfile() in threads
|
* for sendfile() in threads), or if the request was terminated
|
||||||
*/
|
*/
|
||||||
|
|
||||||
c->write->handler(c->write);
|
c->write->handler(c->write);
|
||||||
|
Loading…
Reference in New Issue
Block a user