[core] support POLLRDHUP, where available (#2743)

x-ref:
  "mod_cgi, lighty not killing CGI if connection in the other end is closed"
  https://redmine.lighttpd.net/boards/2/topics/5962
  "1.4.40/41 mod_proxy, mod_scgi may trigger POLLHUP on *BSD,Darwin"
  https://redmine.lighttpd.net/issues/2743
personal/stbuehler/fix-fdevent
Glenn Strauss 6 years ago
parent 9f02df2d39
commit d5d0258362

@ -908,9 +908,26 @@ static handler_t connection_handle_fdevent(server *srv, void *context, int reven
if (con->state == CON_STATE_CLOSE) {
con->close_timeout_ts = srv->cur_ts - (HTTP_LINGER_TIMEOUT+1);
} else if (revents & FDEVENT_HUP) {
if (fdevent_is_tcp_half_closed(con->fd)) {
connection_set_state(srv, con, CON_STATE_ERROR);
} else if (revents & FDEVENT_RDHUP) {
if (sock_addr_get_family(&con->dst_addr) == AF_UNIX) {
/* future: will getpeername() on AF_UNIX properly check if still connected? */
fdevent_event_clr(srv->ev, &con->fde_ndx, con->fd, FDEVENT_RDHUP);
con->keep_alive = 0;
} else if (fdevent_is_tcp_half_closed(con->fd)) {
/* Success of fdevent_is_tcp_half_closed() after FDEVENT_RDHUP indicates TCP FIN received,
* but does not distinguish between client shutdown(fd, SHUT_WR) and client close(fd).
* Remove FDEVENT_RDHUP so that we do not spin on the ready event.
* However, a later TCP RST will not be detected until next write to socket.
* future: might getpeername() to check for TCP RST on half-closed sockets
* (without FDEVENT_RDHUP interest) when checking for write timeouts
* once a second in server.c, though getpeername() on Windows might not indicate this */
fdevent_event_clr(srv->ev, &con->fde_ndx, con->fd, FDEVENT_RDHUP);
con->keep_alive = 0;
} else {
/* Failure of fdevent_is_tcp_half_closed() indicates TCP RST
* (or unable to tell (unsupported OS), though should not
* be setting FDEVENT_RDHUP in that case) */
connection_set_state(srv, con, CON_STATE_ERROR);
}
} else if (revents & FDEVENT_ERR) { /* error, connection reset */
@ -1370,8 +1387,7 @@ int connection_state_machine(server *srv, connection *con) {
r = 0;
switch(con->state) {
case CON_STATE_READ:
case CON_STATE_CLOSE:
r = FDEVENT_IN;
r = FDEVENT_IN | FDEVENT_RDHUP;
break;
case CON_STATE_WRITE:
/* request write-fdevent only if we really need it
@ -1386,9 +1402,12 @@ int connection_state_machine(server *srv, connection *con) {
/* fall through */
case CON_STATE_READ_POST:
if (con->conf.stream_request_body & FDEVENT_STREAM_REQUEST_POLLIN) {
r |= FDEVENT_IN;
r |= FDEVENT_IN | FDEVENT_RDHUP;
}
break;
case CON_STATE_CLOSE:
r = FDEVENT_IN;
break;
default:
break;
}
@ -1402,6 +1421,9 @@ int connection_state_machine(server *srv, connection *con) {
con->is_writable = 0;
r |= FDEVENT_OUT;
}
if (events & FDEVENT_RDHUP) {
r |= FDEVENT_RDHUP;
}
if (r != events) {
/* update timestamps when enabling interest in events */
if ((r & FDEVENT_IN) && !(events & FDEVENT_IN)) {

@ -391,7 +391,7 @@ void fdevent_event_add(fdevents *ev, int *fde_ndx, int fd, int event) {
if (-1 == fd) return;
events = ev->fdarray[fd]->events;
if ((events & event) || 0 == event) return; /*(no change; nothing to do)*/
if ((events & event) == event) return; /*(no change; nothing to do)*/
events |= event;
if (ev->event_set) *fde_ndx = ev->event_set(ev, *fde_ndx, fd, events);

@ -19,6 +19,7 @@ typedef handler_t (*fdevent_handler)(struct server *srv, void *ctx, int revents)
#define FDEVENT_ERR BV(3)
#define FDEVENT_HUP BV(4)
#define FDEVENT_NVAL BV(5)
#define FDEVENT_RDHUP BV(13)
#define FDEVENT_STREAM_REQUEST BV(0)
#define FDEVENT_STREAM_REQUEST_BUFMIN BV(1)

@ -138,7 +138,7 @@ static int fdevent_freebsd_kqueue_poll(fdevents *ev, int timeout_ms) {
static int fdevent_freebsd_kqueue_event_get_revent(fdevents *ev, size_t ndx) {
int events = 0, e;
e = ev->kq_results[ndx].filter;
int filt = e = ev->kq_results[ndx].filter;
if (e == EVFILT_READ) {
events |= FDEVENT_IN;
@ -149,7 +149,11 @@ static int fdevent_freebsd_kqueue_event_get_revent(fdevents *ev, size_t ndx) {
e = ev->kq_results[ndx].flags;
if (e & EV_EOF) {
events |= FDEVENT_HUP;
if (filt == EVFILT_READ) {
events |= FDEVENT_RDHUP;
} else {
events |= FDEVENT_HUP;
}
}
if (e & EV_ERROR) {

@ -16,6 +16,10 @@
# include <sys/epoll.h>
#ifndef EPOLLRDHUP
#define EPOLLRDHUP 0
#endif
static void fdevent_linux_sysepoll_free(fdevents *ev) {
close(ev->epoll_fd);
free(ev->epoll_events);
@ -56,6 +60,7 @@ static int fdevent_linux_sysepoll_event_set(fdevents *ev, int fde_ndx, int fd, i
if (events & FDEVENT_IN) ep.events |= EPOLLIN;
if (events & FDEVENT_OUT) ep.events |= EPOLLOUT;
if (events & FDEVENT_RDHUP) ep.events |= EPOLLRDHUP;
/**
*
@ -95,6 +100,7 @@ static int fdevent_linux_sysepoll_event_get_revent(fdevents *ev, size_t ndx) {
if (e & EPOLLERR) events |= FDEVENT_ERR;
if (e & EPOLLHUP) events |= FDEVENT_HUP;
if (e & EPOLLPRI) events |= FDEVENT_PRI;
if (e & EPOLLRDHUP) events |= FDEVENT_RDHUP;
return events;
}

@ -20,6 +20,10 @@
# include <sys/poll.h>
# endif
#ifndef POLLRDHUP
#define POLLRDHUP 0
#endif
static void fdevent_poll_free(fdevents *ev) {
free(ev->pollfds);
if (ev->unused.ptr) free(ev->unused.ptr);
@ -79,6 +83,7 @@ static int fdevent_poll_event_set(fdevents *ev, int fde_ndx, int fd, int events)
int pevents = 0;
if (events & FDEVENT_IN) pevents |= POLLIN;
if (events & FDEVENT_OUT) pevents |= POLLOUT;
if (events & FDEVENT_RDHUP) pevents |= POLLRDHUP;
/* known index */
@ -153,6 +158,7 @@ static int fdevent_poll_event_get_revent(fdevents *ev, size_t ndx) {
if (poll_r & POLLHUP) r |= FDEVENT_HUP;
if (poll_r & POLLNVAL) r |= FDEVENT_NVAL;
if (poll_r & POLLPRI) r |= FDEVENT_PRI;
if (poll_r & POLLRDHUP) r |= FDEVENT_RDHUP;
return r;
}

@ -1811,7 +1811,8 @@ static handler_t gw_write_request(server *srv, gw_handler_ctx *hctx) {
}
}
fdevent_event_add(srv->ev, &(hctx->fde_ndx), hctx->fd, FDEVENT_IN);
fdevent_event_add(srv->ev, &hctx->fde_ndx, hctx->fd,
FDEVENT_IN | FDEVENT_RDHUP);
gw_set_state(srv, hctx, GW_STATE_WRITE);
/* fall through */
case GW_STATE_WRITE:
@ -2144,7 +2145,7 @@ static handler_t gw_handle_fdevent(server *srv, void *ctx, int revents) {
}
/* perhaps this issue is already handled */
if (revents & FDEVENT_HUP) {
if (revents & (FDEVENT_HUP|FDEVENT_RDHUP)) {
if (hctx->state == GW_STATE_CONNECT_DELAYED) {
/* getoptsock will catch this one (right ?)
*

@ -417,7 +417,7 @@ static handler_t cgi_handle_fdevent(server *srv, void *ctx, int revents) {
}
/* perhaps this issue is already handled */
if (revents & FDEVENT_HUP) {
if (revents & (FDEVENT_HUP|FDEVENT_RDHUP)) {
if (con->file_started) {
/* drain any remaining data from kernel pipe buffers
* even if (con->conf.stream_response_body
@ -823,7 +823,7 @@ static int cgi_create_env(server *srv, connection *con, plugin_data *p, handler_
cgi_connection_close(srv, hctx);
return -1;
}
fdevent_event_set(srv->ev, &(hctx->fde_ndx), hctx->fd, FDEVENT_IN);
fdevent_event_set(srv->ev, &(hctx->fde_ndx), hctx->fd, FDEVENT_IN | FDEVENT_RDHUP);
return 0;
}

@ -205,7 +205,7 @@ static handler_t stat_cache_handle_fdevent(server *srv, void *_fce, int revent)
}
}
if (revent & FDEVENT_HUP) {
if (revent & (FDEVENT_HUP|FDEVENT_RDHUP)) {
/* fam closed the connection */
fdevent_event_del(srv->ev, &(scf->fam_fcce_ndx), FAMCONNECTION_GETFD(&scf->fam));
fdevent_unregister(srv->ev, FAMCONNECTION_GETFD(&scf->fam));
@ -234,7 +234,7 @@ static stat_cache_fam * stat_cache_init_fam(server *srv) {
fdevent_setfd_cloexec(FAMCONNECTION_GETFD(&scf->fam));
fdevent_register(srv->ev, FAMCONNECTION_GETFD(&scf->fam), stat_cache_handle_fdevent, NULL);
fdevent_event_set(srv->ev, &(scf->fam_fcce_ndx), FAMCONNECTION_GETFD(&scf->fam), FDEVENT_IN);
fdevent_event_set(srv->ev, &(scf->fam_fcce_ndx), FAMCONNECTION_GETFD(&scf->fam), FDEVENT_IN | FDEVENT_RDHUP);
return scf;
}

Loading…
Cancel
Save