[multiple] omit passing srv to fdevent_handler
This commit is contained in:
parent
fda01e3305
commit
05cc88ddb4
|
@ -300,6 +300,7 @@ typedef struct server_socket {
|
|||
unsigned short sidx;
|
||||
|
||||
fdnode *fdn;
|
||||
server *srv;
|
||||
buffer *srv_token;
|
||||
} server_socket;
|
||||
|
||||
|
|
|
@ -864,10 +864,10 @@ static int connection_handle_read_state(connection * const con) {
|
|||
return 1;
|
||||
}
|
||||
|
||||
static handler_t connection_handle_fdevent(server *srv, void *context, int revents) {
|
||||
static handler_t connection_handle_fdevent(void *context, int revents) {
|
||||
connection *con = context;
|
||||
|
||||
joblist_append(srv, con);
|
||||
joblist_append(con);
|
||||
|
||||
if (con->is_ssl_sock) {
|
||||
/* ssl may read and write for both reads and writes */
|
||||
|
@ -922,7 +922,7 @@ static handler_t connection_handle_fdevent(server *srv, void *context, int reven
|
|||
}
|
||||
if (sock_addr_get_family(&con->dst_addr) == AF_UNIX) {
|
||||
/* future: will getpeername() on AF_UNIX properly check if still connected? */
|
||||
fdevent_fdnode_event_set(srv->ev, con->fdn, events);
|
||||
fdevent_fdnode_event_set(con->srv->ev, con->fdn, events);
|
||||
} else if (fdevent_is_tcp_half_closed(con->fd)) {
|
||||
/* Success of fdevent_is_tcp_half_closed() after FDEVENT_RDHUP indicates TCP FIN received,
|
||||
* but does not distinguish between client shutdown(fd, SHUT_WR) and client close(fd).
|
||||
|
@ -932,7 +932,7 @@ static handler_t connection_handle_fdevent(server *srv, void *context, int reven
|
|||
* (without FDEVENT_RDHUP interest) when checking for write timeouts
|
||||
* once a second in server.c, though getpeername() on Windows might not indicate this */
|
||||
con->conf.stream_request_body |= FDEVENT_STREAM_REQUEST_TCP_FIN;
|
||||
fdevent_fdnode_event_set(srv->ev, con->fdn, events);
|
||||
fdevent_fdnode_event_set(con->srv->ev, con->fdn, events);
|
||||
} else {
|
||||
/* Failure of fdevent_is_tcp_half_closed() indicates TCP RST
|
||||
* (or unable to tell (unsupported OS), though should not
|
||||
|
|
|
@ -23,7 +23,7 @@ handler_t connection_handle_read_post_error(connection *con, int http_status);
|
|||
int connection_write_chunkqueue(connection *con, chunkqueue *c, off_t max_bytes);
|
||||
void connection_response_reset(connection *con);
|
||||
|
||||
#define joblist_append(srv, con) connection_list_append(&(srv)->joblist, (con))
|
||||
#define joblist_append(con) connection_list_append(&(con)->srv->joblist, (con))
|
||||
void connection_list_append(connections *conns, connection *con);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
struct fdevents; /* declaration */
|
||||
typedef struct fdevents fdevents;
|
||||
|
||||
typedef handler_t (*fdevent_handler)(struct server *srv, void *ctx, int revents);
|
||||
typedef handler_t (*fdevent_handler)(void *ctx, int revents);
|
||||
|
||||
struct fdnode_st {
|
||||
fdevent_handler handler;
|
||||
|
|
|
@ -71,7 +71,6 @@ static int fdevent_freebsd_kqueue_event_set(fdevents *ev, fdnode *fdn, int event
|
|||
}
|
||||
|
||||
static int fdevent_freebsd_kqueue_poll(fdevents * const ev, int timeout_ms) {
|
||||
server * const srv = ev->srv;
|
||||
struct timespec ts;
|
||||
int n;
|
||||
|
||||
|
@ -90,7 +89,7 @@ static int fdevent_freebsd_kqueue_poll(fdevents * const ev, int timeout_ms) {
|
|||
revents |= (filt == EVFILT_READ ? FDEVENT_RDHUP : FDEVENT_HUP);
|
||||
if (e & EV_ERROR)
|
||||
revents |= FDEVENT_ERR;
|
||||
(*fdn->handler)(srv, fdn->ctx, revents);
|
||||
(*fdn->handler)(fdn->ctx, revents);
|
||||
}
|
||||
}
|
||||
return n;
|
||||
|
|
|
@ -21,7 +21,7 @@ static void io_watcher_cb(struct ev_loop *loop, ev_io *w, int revents) {
|
|||
if (revents & EV_ERROR) rv |= FDEVENT_ERR;
|
||||
|
||||
if (0 == ((uintptr_t)fdn & 0x3)) {
|
||||
(*fdn->handler)(ev->srv, fdn->ctx, rv);
|
||||
(*fdn->handler)(fdn->ctx, rv);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -36,12 +36,11 @@ static int fdevent_linux_sysepoll_event_set(fdevents *ev, fdnode *fdn, int event
|
|||
|
||||
static int fdevent_linux_sysepoll_poll(fdevents * const ev, int timeout_ms) {
|
||||
int n = epoll_wait(ev->epoll_fd, ev->epoll_events, ev->maxfds, timeout_ms);
|
||||
server * const srv = ev->srv;
|
||||
for (int i = 0; i < n; ++i) {
|
||||
fdnode * const fdn = (fdnode *)ev->epoll_events[i].data.ptr;
|
||||
int revents = ev->epoll_events[i].events;
|
||||
if ((fdevent_handler)NULL != fdn->handler) {
|
||||
(*fdn->handler)(srv, fdn->ctx, revents);
|
||||
(*fdn->handler)(fdn->ctx, revents);
|
||||
}
|
||||
}
|
||||
return n;
|
||||
|
|
|
@ -86,12 +86,11 @@ static int fdevent_poll_next_ndx(const fdevents *ev, int ndx) {
|
|||
|
||||
static int fdevent_poll_poll(fdevents *ev, int timeout_ms) {
|
||||
const int n = poll(ev->pollfds, ev->used, timeout_ms);
|
||||
server * const srv = ev->srv;
|
||||
for (int ndx=-1,i=0; i<n && -1!=(ndx=fdevent_poll_next_ndx(ev,ndx)); ++i){
|
||||
fdnode *fdn = ev->fdarray[ev->pollfds[ndx].fd];
|
||||
int revents = ev->pollfds[ndx].revents;
|
||||
if (0 == ((uintptr_t)fdn & 0x3)) {
|
||||
(*fdn->handler)(srv, fdn->ctx, revents);
|
||||
(*fdn->handler)(fdn->ctx, revents);
|
||||
}
|
||||
}
|
||||
return n;
|
||||
|
|
|
@ -90,7 +90,7 @@ static int fdevent_select_poll(fdevents *ev, int timeout_ms) {
|
|||
fdn = ev->fdarray[ndx];
|
||||
if (0 == ((uintptr_t)fdn & 0x3)) {
|
||||
int revents = fdevent_select_event_get_revent(ev, ndx);
|
||||
(*fdn->handler)(ev->srv, fdn->ctx, revents);
|
||||
(*fdn->handler)(fdn->ctx, revents);
|
||||
}
|
||||
}
|
||||
return n;
|
||||
|
|
|
@ -43,7 +43,6 @@ static int fdevent_solaris_devpoll_event_set(fdevents *ev, fdnode *fdn, int even
|
|||
|
||||
static int fdevent_solaris_devpoll_poll(fdevents *ev, int timeout_ms) {
|
||||
int n;
|
||||
server * const srv = ev->srv;
|
||||
struct dvpoll dopoll;
|
||||
|
||||
dopoll.dp_timeout = timeout_ms;
|
||||
|
@ -56,7 +55,7 @@ static int fdevent_solaris_devpoll_poll(fdevents *ev, int timeout_ms) {
|
|||
fdnode * const fdn = ev->fdarray[ev->devpollfds[i].fd];
|
||||
int revents = ev->devpollfds[i].revents;
|
||||
if (0 == ((uintptr_t)fdn & 0x3)) {
|
||||
(*fdn->handler)(srv, fdn->ctx, revents);
|
||||
(*fdn->handler)(fdn->ctx, revents);
|
||||
}
|
||||
}
|
||||
return n;
|
||||
|
|
|
@ -67,7 +67,7 @@ static int fdevent_solaris_port_poll(fdevents *ev, int timeout_ms) {
|
|||
log_error(ev->srv->errh, __FILE__, __LINE__,
|
||||
"port_associate failed");
|
||||
}
|
||||
(*fdn->handler)(ev->srv, fdn->ctx, revents);
|
||||
(*fdn->handler)(fdn->ctx, revents);
|
||||
}
|
||||
else {
|
||||
fdn->fde_ndx = -1;
|
||||
|
|
|
@ -1047,7 +1047,7 @@ static void gw_restart_dead_procs(gw_host * const host, log_error_st * const err
|
|||
|
||||
|
||||
/* ok, we need a prototype */
|
||||
static handler_t gw_handle_fdevent(server *srv, void *ctx, int revents);
|
||||
static handler_t gw_handle_fdevent(void *ctx, int revents);
|
||||
|
||||
|
||||
static gw_handler_ctx * handler_ctx_init(size_t sz) {
|
||||
|
@ -2227,11 +2227,11 @@ static handler_t gw_recv_response(gw_handler_ctx *hctx, connection *con) {
|
|||
}
|
||||
|
||||
|
||||
static handler_t gw_handle_fdevent(server *srv, void *ctx, int revents) {
|
||||
static handler_t gw_handle_fdevent(void *ctx, int revents) {
|
||||
gw_handler_ctx *hctx = ctx;
|
||||
connection *con = hctx->remote_conn;
|
||||
|
||||
joblist_append(srv, con);
|
||||
joblist_append(con);
|
||||
|
||||
if (revents & FDEVENT_IN) {
|
||||
handler_t rc = gw_recv_response(hctx, con); /*(might invalidate hctx)*/
|
||||
|
|
|
@ -351,12 +351,12 @@ static handler_t cgi_connection_close_callback(connection *con, void *p_d) {
|
|||
static int cgi_write_request(handler_ctx *hctx, int fd);
|
||||
|
||||
|
||||
static handler_t cgi_handle_fdevent_send (server *srv, void *ctx, int revents) {
|
||||
static handler_t cgi_handle_fdevent_send (void *ctx, int revents) {
|
||||
handler_ctx *hctx = ctx;
|
||||
connection *con = hctx->remote_conn;
|
||||
|
||||
/*(joblist only actually necessary here in mod_cgi fdevent send if returning HANDLER_ERROR)*/
|
||||
joblist_append(srv, con);
|
||||
joblist_append(con);
|
||||
|
||||
if (revents & FDEVENT_OUT) {
|
||||
if (0 != cgi_write_request(hctx, hctx->fdtocgi)) {
|
||||
|
@ -445,11 +445,11 @@ static int cgi_recv_response(connection *con, handler_ctx *hctx) {
|
|||
}
|
||||
|
||||
|
||||
static handler_t cgi_handle_fdevent(server *srv, void *ctx, int revents) {
|
||||
static handler_t cgi_handle_fdevent(void *ctx, int revents) {
|
||||
handler_ctx *hctx = ctx;
|
||||
connection *con = hctx->remote_conn;
|
||||
|
||||
joblist_append(srv, con);
|
||||
joblist_append(con);
|
||||
|
||||
if (revents & FDEVENT_IN) {
|
||||
handler_t rc = cgi_recv_response(con, hctx);/*(might invalidate hctx)*/
|
||||
|
|
|
@ -606,7 +606,7 @@ TRIGGER_FUNC(mod_wstunnel_handle_trigger) {
|
|||
DEBUG_LOG_INFO("timeout client (fd=%d)", con->fd);
|
||||
mod_wstunnel_frame_send(hctx,MOD_WEBSOCKET_FRAME_TYPE_CLOSE,NULL,0);
|
||||
gw_connection_reset(con, p_d);
|
||||
joblist_append(srv, con);
|
||||
joblist_append(con);
|
||||
/* avoid server.c closing connection with error due to max_read_idle
|
||||
* (might instead run joblist after plugins_call_handle_trigger())*/
|
||||
con->read_idle_ts = cur_ts;
|
||||
|
@ -618,7 +618,7 @@ TRIGGER_FUNC(mod_wstunnel_handle_trigger) {
|
|||
&& (time_t)hctx->conf.ping_interval + hctx->ping_ts < cur_ts) {
|
||||
hctx->ping_ts = cur_ts;
|
||||
mod_wstunnel_frame_send(hctx, MOD_WEBSOCKET_FRAME_TYPE_PING, CONST_STR_LEN("ping"));
|
||||
joblist_append(srv, con);
|
||||
joblist_append(con);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -43,13 +43,12 @@ network_accept_tcp_nagle_disable (const int fd)
|
|||
(void)setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &opt, sizeof(opt));
|
||||
}
|
||||
|
||||
static handler_t network_server_handle_fdevent(server *srv, void *context, int revents) {
|
||||
server_socket *srv_socket = (server_socket *)context;
|
||||
static handler_t network_server_handle_fdevent(void *context, int revents) {
|
||||
server_socket * const srv_socket = (server_socket *)context;
|
||||
server * const srv = srv_socket->srv;
|
||||
connection *con;
|
||||
int loops;
|
||||
|
||||
UNUSED(context);
|
||||
|
||||
if (0 == (revents & FDEVENT_IN)) {
|
||||
log_error(srv->errh, __FILE__, __LINE__,
|
||||
"strange event for server socket %d %d", srv_socket->fd, revents);
|
||||
|
@ -266,6 +265,7 @@ static int network_server_init(server *srv, network_socket_config *s, buffer *ho
|
|||
srv_socket->fd = -1;
|
||||
srv_socket->sidx = sidx;
|
||||
srv_socket->is_ssl = s->ssl_enabled;
|
||||
srv_socket->srv = srv;
|
||||
srv_socket->srv_token = buffer_init_buffer(host_token);
|
||||
|
||||
network_srv_sockets_append(srv, srv_socket);
|
||||
|
|
|
@ -334,16 +334,16 @@ static void stat_cache_handle_fdevent_in(stat_cache_fam *scf)
|
|||
}
|
||||
}
|
||||
|
||||
static handler_t stat_cache_handle_fdevent(server *srv, void *_fce, int revent)
|
||||
static handler_t stat_cache_handle_fdevent(void *ctx, int revent)
|
||||
{
|
||||
stat_cache_fam * const scf = sc.scf;
|
||||
UNUSED(_fce);
|
||||
|
||||
if (revent & FDEVENT_IN) {
|
||||
stat_cache_handle_fdevent_in(scf);
|
||||
}
|
||||
|
||||
if (revent & (FDEVENT_HUP|FDEVENT_RDHUP)) {
|
||||
server *srv = ctx;
|
||||
/* fam closed the connection */
|
||||
log_error(srv->errh, __FILE__, __LINE__,
|
||||
"FAM connection closed; disabling stat_cache.");
|
||||
|
@ -378,7 +378,7 @@ static stat_cache_fam * stat_cache_init_fam(server *srv) {
|
|||
|
||||
scf->fd = FAMCONNECTION_GETFD(&scf->fam);
|
||||
fdevent_setfd_cloexec(scf->fd);
|
||||
scf->fdn = fdevent_register(srv->ev, scf->fd, stat_cache_handle_fdevent, NULL);
|
||||
scf->fdn = fdevent_register(srv->ev, scf->fd, stat_cache_handle_fdevent, srv);
|
||||
fdevent_fdnode_event_set(srv->ev, scf->fdn, FDEVENT_IN | FDEVENT_RDHUP);
|
||||
|
||||
return scf;
|
||||
|
|
Loading…
Reference in New Issue