Browse Source

[multiple] inline chunkqueue where always alloc'd

inline struct chunkqueue where always allocated in other structs

(memory locality)
master
Glenn Strauss 12 months ago
parent
commit
81029b8b51
  1. 11
      src/chunk.c
  2. 4
      src/chunk.h
  3. 44
      src/connections.c
  4. 48
      src/gw_backend.c
  5. 2
      src/gw_backend.h
  6. 24
      src/h2.c
  7. 18
      src/http-header-glue.c
  8. 18
      src/http_chunk.c
  9. 6
      src/mod_accesslog.c
  10. 10
      src/mod_cgi.c
  11. 8
      src/mod_cml_lua.c
  12. 34
      src/mod_deflate.c
  13. 4
      src/mod_dirlisting.c
  14. 27
      src/mod_fastcgi.c
  15. 2
      src/mod_flv_streaming.c
  16. 8
      src/mod_magnet.c
  17. 25
      src/mod_proxy.c
  18. 4
      src/mod_rrdtool.c
  19. 19
      src/mod_scgi.c
  20. 8
      src/mod_ssi.c
  21. 26
      src/mod_status.c
  22. 6
      src/mod_uploadprogress.c
  23. 44
      src/mod_webdav.c
  24. 24
      src/mod_wstunnel.c
  25. 20
      src/reqpool.c
  26. 8
      src/request.h
  27. 23
      src/response.c
  28. 2
      src/server.c

11
src/chunk.c

@ -56,11 +56,12 @@ static inline size_t chunk_buffer_string_space(const buffer *b) {
return b->size ? b->size - (b->used | (0 == b->used)) : 0;
}
chunkqueue *chunkqueue_init(void) {
chunkqueue *cq;
cq = calloc(1, sizeof(*cq));
force_assert(NULL != cq);
chunkqueue *chunkqueue_init(chunkqueue *cq) {
/* (if caller passes non-NULL cq, it must be 0-init) */
if (NULL == cq) {
cq = calloc(1, sizeof(*cq));
force_assert(NULL != cq);
}
cq->first = NULL;
cq->last = NULL;

4
src/chunk.h

@ -62,7 +62,7 @@ void chunkqueue_chunk_pool_clear(void);
void chunkqueue_chunk_pool_free(void);
__attribute_returns_nonnull__
chunkqueue *chunkqueue_init(void);
chunkqueue *chunkqueue_init(chunkqueue *cq);
void chunkqueue_set_chunk_size (size_t sz);
void chunkqueue_set_tempdirs_default_reset (void);
@ -134,7 +134,9 @@ static inline off_t chunkqueue_length(const chunkqueue *cq) {
return cq->bytes_in - cq->bytes_out;
}
__attribute_cold__
void chunkqueue_free(chunkqueue *cq);
void chunkqueue_reset(chunkqueue *cq);
__attribute_pure__

44
src/connections.c

@ -216,14 +216,14 @@ static void connection_handle_response_end_state(request_st * const r, connectio
if (r->state != CON_STATE_ERROR) ++con->srv->con_written;
if (r->reqbody_length != r->reqbody_queue->bytes_in
if (r->reqbody_length != r->reqbody_queue.bytes_in
|| r->state == CON_STATE_ERROR) {
/* request body may not have been read completely */
r->keep_alive = 0;
/* clean up failed partial write of 1xx intermediate responses*/
if (r->write_queue != con->write_queue) { /*(for HTTP/1.1)*/
if (&r->write_queue != con->write_queue) { /*(for HTTP/1.1)*/
chunkqueue_free(con->write_queue);
con->write_queue = r->write_queue;
con->write_queue = &r->write_queue;
}
}
@ -354,11 +354,11 @@ connection_write_1xx_info (request_st * const r, connection * const con)
if (!chunkqueue_is_empty(cq)) { /* partial write (unlikely) */
con->is_writable = 0;
if (cq == r->write_queue) {
if (cq == &r->write_queue) {
/* save partial write of 1xx in separate chunkqueue
* Note: sending of remainder of 1xx might be delayed
* until next set of response headers are sent */
con->write_queue = chunkqueue_init();
con->write_queue = chunkqueue_init(NULL);
chunkqueue_append_chunkqueue(con->write_queue, cq);
}
}
@ -374,7 +374,7 @@ connection_write_1xx_info (request_st * const r, connection * const con)
* instead of (0 == r->resp_header_len) as flag that final response was set
* (Doing the following would "discard" the 1xx len from bytes_out)
*/
r->write_queue->bytes_in = r->write_queue->bytes_out = 0;
r->write_queue.bytes_in = r->write_queue.bytes_out = 0;
#endif
return 1; /* success */
@ -463,7 +463,7 @@ static void connection_handle_write(request_st * const r, connection * const con
static void connection_handle_write_state(request_st * const r, connection * const con) {
do {
/* only try to write if we have something in the queue */
if (!chunkqueue_is_empty(r->write_queue)) {
if (!chunkqueue_is_empty(&r->write_queue)) {
if (r->http_version <= HTTP_VERSION_1_1 && con->is_writable) {
connection_handle_write(r, con);
if (r->state != CON_STATE_WRITE) break;
@ -500,7 +500,7 @@ static void connection_handle_write_state(request_st * const r, connection * con
}
} while (r->state == CON_STATE_WRITE
&& r->http_version <= HTTP_VERSION_1_1
&& (!chunkqueue_is_empty(r->write_queue)
&& (!chunkqueue_is_empty(&r->write_queue)
? con->is_writable
: r->resp_body_finished));
}
@ -524,8 +524,8 @@ static connection *connection_init(server *srv) {
request_st * const r = &con->request;
request_init_data(r, con, srv);
config_reset_config(r);
con->write_queue = r->write_queue;
con->read_queue = r->read_queue;
con->write_queue = &r->write_queue;
con->read_queue = &r->read_queue;
/* init plugin-specific per-connection structures */
con->plugin_ctx = calloc(1, (srv->plugins.used + 1) * sizeof(void *));
@ -542,9 +542,9 @@ void connections_free(server *srv) {
request_st * const r = &con->request;
connection_reset(con);
if (con->write_queue != r->write_queue)
if (con->write_queue != &r->write_queue)
chunkqueue_free(con->write_queue);
if (con->read_queue != r->read_queue)
if (con->read_queue != &r->read_queue)
chunkqueue_free(con->read_queue);
request_free_data(r);
@ -861,7 +861,7 @@ static handler_t connection_handle_fdevent(void *context, int revents) {
con->is_readable = 1; /*(can read 0 for end-of-stream)*/
if (chunkqueue_is_empty(con->read_queue)) r->keep_alive = 0;
if (r->reqbody_length < -1) { /*(transparent proxy mode; no more data to read)*/
r->reqbody_length = r->reqbody_queue->bytes_in;
r->reqbody_length = r->reqbody_queue.bytes_in;
}
if (sock_addr_get_family(&con->dst_addr) == AF_UNIX) {
/* future: will getpeername() on AF_UNIX properly check if still connected? */
@ -1302,10 +1302,10 @@ connection_state_machine_h2 (request_st * const h2r, connection * const con)
connection_state_machine_loop(r, con);
if (r->resp_header_len && !chunkqueue_is_empty(r->write_queue)
if (r->resp_header_len && !chunkqueue_is_empty(&r->write_queue)
&& (r->resp_body_finished || r->conf.stream_response_body)) {
chunkqueue * const cq = r->write_queue;
chunkqueue * const cq = &r->write_queue;
off_t avail = chunkqueue_length(cq);
if (avail > max_bytes) avail = max_bytes;
if (avail > fsize) avail = fsize;
@ -1437,7 +1437,7 @@ static void connection_check_timeout (connection * const con, const time_t cur_t
changed = 1;
continue;
}
if (rr->reqbody_length != rr->reqbody_queue->bytes_in) {
if (rr->reqbody_length != rr->reqbody_queue.bytes_in) {
/* XXX: should timeout apply if not trying to read on h2con?
* (still applying timeout to catch stuck connections) */
/* XXX: con->read_idle_ts is not per-request, so timeout
@ -1471,7 +1471,7 @@ static void connection_check_timeout (connection * const con, const time_t cur_t
"server.max-write-idle",
BUFFER_INTLEN_PTR(con->dst_addr_buf),
BUFFER_INTLEN_PTR(&r->target),
(long long)r->write_queue->bytes_out,
(long long)r->write_queue.bytes_out,
(int)r->conf.max_write_idle);
}
connection_set_state_error(r, CON_STATE_ERROR);
@ -1887,9 +1887,9 @@ connection_check_expect_100 (request_st * const r, connection * const con)
http_header_request_unset(r, HTTP_HEADER_EXPECT,
CONST_STR_LEN("Expect"));
if (!rc
|| 0 != r->reqbody_queue->bytes_in
|| !chunkqueue_is_empty(r->read_queue)
|| !chunkqueue_is_empty(r->write_queue))
|| 0 != r->reqbody_queue.bytes_in
|| !chunkqueue_is_empty(&r->read_queue)
|| !chunkqueue_is_empty(&r->write_queue))
return 1;
/* send 100 Continue only if no request body data received yet
@ -1907,8 +1907,8 @@ static handler_t
connection_handle_read_post_state (request_st * const r)
{
connection * const con = r->con;
chunkqueue * const cq = r->read_queue;
chunkqueue * const dst_cq = r->reqbody_queue;
chunkqueue * const cq = &r->read_queue;
chunkqueue * const dst_cq = &r->reqbody_queue;
int is_closed = 0;

48
src/gw_backend.c

@ -1094,7 +1094,7 @@ static gw_handler_ctx * handler_ctx_init(size_t sz) {
hctx->send_content_body = 1;
/*hctx->rb = chunkqueue_init();*//*(allocated when needed)*/
hctx->wb = chunkqueue_init();
chunkqueue_init(&hctx->wb);
hctx->wb_reqlen = 0;
return hctx;
@ -1105,8 +1105,8 @@ static void handler_ctx_free(gw_handler_ctx *hctx) {
if (hctx->handler_ctx_free) hctx->handler_ctx_free(hctx);
chunk_buffer_release(hctx->response);
chunkqueue_free(hctx->rb);
chunkqueue_free(hctx->wb);
if (hctx->rb) chunkqueue_free(hctx->rb);
chunkqueue_reset(&hctx->wb);
free(hctx);
}
@ -1124,7 +1124,7 @@ static void handler_ctx_clear(gw_handler_ctx *hctx) {
/*hctx->state_timestamp = 0;*//*(unused; left as-is)*/
if (hctx->rb) chunkqueue_reset(hctx->rb);
if (hctx->wb) chunkqueue_reset(hctx->wb);
chunkqueue_reset(&hctx->wb);
hctx->wb_reqlen = 0;
if (hctx->response) buffer_clear(hctx->response);
@ -1816,7 +1816,7 @@ handler_t gw_handle_request_reset(request_st * const r, void *p_d) {
static void gw_conditional_tcp_fin(gw_handler_ctx * const hctx, request_st * const r) {
/*assert(r->conf.stream_request_body & FDEVENT_STREAM_REQUEST_TCP_FIN);*/
if (!chunkqueue_is_empty(hctx->wb)) return;
if (!chunkqueue_is_empty(&hctx->wb))return;
if (!hctx->host->tcp_fin_propagate) return;
if (hctx->gw_mode == GW_AUTHORIZER) return;
if (r->conf.stream_request_body & FDEVENT_STREAM_REQUEST_BACKEND_SHUT_WR)
@ -1931,16 +1931,16 @@ static handler_t gw_write_request(gw_handler_ctx * const hctx, request_st * cons
gw_set_state(hctx, GW_STATE_WRITE);
/* fall through */
case GW_STATE_WRITE:
if (!chunkqueue_is_empty(hctx->wb)) {
if (!chunkqueue_is_empty(&hctx->wb)) {
log_error_st * const errh = r->conf.errh;
#if 0
if (hctx->conf.debug > 1) {
log_error(errh, __FILE__, __LINE__, "sdsx",
"send data to backend (fd=%d), size=%zu",
hctx->fd, chunkqueue_length(hctx->wb));
hctx->fd, chunkqueue_length(&hctx->wb));
}
#endif
if (r->con->srv->network_backend_write(hctx->fd, hctx->wb,
if (r->con->srv->network_backend_write(hctx->fd, &hctx->wb,
MAX_WRITE_LIMIT, errh) < 0) {
switch(errno) {
case EPIPE:
@ -1954,7 +1954,7 @@ static handler_t gw_write_request(gw_handler_ctx * const hctx, request_st * cons
"connection was dropped after accept() "
"(perhaps the gw process died), "
"write-offset: %lld socket: %s",
(long long)hctx->wb->bytes_out,
(long long)hctx->wb.bytes_out,
hctx->proc->connection_name->ptr);
return HANDLER_ERROR;
default:
@ -1964,12 +1964,12 @@ static handler_t gw_write_request(gw_handler_ctx * const hctx, request_st * cons
}
}
if (hctx->wb->bytes_out == hctx->wb_reqlen) {
if (hctx->wb.bytes_out == hctx->wb_reqlen) {
fdevent_fdnode_event_clr(hctx->ev, hctx->fdn, FDEVENT_OUT);
gw_set_state(hctx, GW_STATE_READ);
} else {
off_t wblen = chunkqueue_length(hctx->wb);
if ((hctx->wb->bytes_in < hctx->wb_reqlen || hctx->wb_reqlen < 0)
off_t wblen = chunkqueue_length(&hctx->wb);
if ((hctx->wb.bytes_in < hctx->wb_reqlen || hctx->wb_reqlen < 0)
&& wblen < 65536 - 16384) {
/*(r->conf.stream_request_body & FDEVENT_STREAM_REQUEST)*/
if (!(r->conf.stream_request_body
@ -2040,7 +2040,7 @@ handler_t gw_handle_subrequest(request_st * const r, void *p_d) {
if ((r->conf.stream_response_body & FDEVENT_STREAM_RESPONSE_BUFMIN)
&& r->resp_body_started) {
if (chunkqueue_length(r->write_queue) > 65536 - 4096) {
if (chunkqueue_length(&r->write_queue) > 65536 - 4096) {
fdevent_fdnode_event_clr(hctx->ev, hctx->fdn, FDEVENT_IN);
}
else if (!(fdevent_fdnode_interest(hctx->fdn) & FDEVENT_IN)) {
@ -2057,18 +2057,18 @@ handler_t gw_handle_subrequest(request_st * const r, void *p_d) {
* the FastCGI Authorizer) */
if (hctx->gw_mode != GW_AUTHORIZER
&& (0 == hctx->wb->bytes_in
&& (0 == hctx->wb.bytes_in
? (r->state == CON_STATE_READ_POST || -1 == hctx->wb_reqlen)
: (hctx->wb->bytes_in < hctx->wb_reqlen || hctx->wb_reqlen < 0))) {
: (hctx->wb.bytes_in < hctx->wb_reqlen || hctx->wb_reqlen < 0))) {
/* leave excess data in r->reqbody_queue, which is
* buffered to disk if too large and backend can not keep up */
/*(64k - 4k to attempt to avoid temporary files
* in conjunction with FDEVENT_STREAM_REQUEST_BUFMIN)*/
if (chunkqueue_length(hctx->wb) > 65536 - 4096) {
if (chunkqueue_length(&hctx->wb) > 65536 - 4096) {
if (r->conf.stream_request_body & FDEVENT_STREAM_REQUEST_BUFMIN) {
r->conf.stream_request_body &= ~FDEVENT_STREAM_REQUEST_POLLIN;
}
if (0 != hctx->wb->bytes_in) return HANDLER_WAIT_FOR_EVENT;
if (0 != hctx->wb.bytes_in) return HANDLER_WAIT_FOR_EVENT;
}
else {
handler_t rc = r->con->reqbody_read(r);
@ -2093,14 +2093,14 @@ handler_t gw_handle_subrequest(request_st * const r, void *p_d) {
}
}
if ((0 != hctx->wb->bytes_in || -1 == hctx->wb_reqlen)
&& !chunkqueue_is_empty(r->reqbody_queue)) {
if ((0 != hctx->wb.bytes_in || -1 == hctx->wb_reqlen)
&& !chunkqueue_is_empty(&r->reqbody_queue)) {
if (hctx->stdin_append) {
handler_t rca = hctx->stdin_append(hctx);
if (HANDLER_GO_ON != rca) return rca;
}
else
chunkqueue_append_chunkqueue(hctx->wb, r->reqbody_queue);
chunkqueue_append_chunkqueue(&hctx->wb, &r->reqbody_queue);
if (fdevent_fdnode_interest(hctx->fdn) & FDEVENT_OUT) {
return (rc == HANDLER_GO_ON) ? HANDLER_WAIT_FOR_EVENT : rc;
}
@ -2110,7 +2110,7 @@ handler_t gw_handle_subrequest(request_st * const r, void *p_d) {
}
{
handler_t rc =((0==hctx->wb->bytes_in || !chunkqueue_is_empty(hctx->wb))
handler_t rc =((0==hctx->wb.bytes_in || !chunkqueue_is_empty(&hctx->wb))
&& hctx->state != GW_STATE_CONNECT_DELAYED)
? gw_send_request(hctx, r)
: HANDLER_WAIT_FOR_EVENT;
@ -2217,9 +2217,7 @@ static handler_t gw_recv_response(gw_handler_ctx * const hctx, request_st * cons
if (r->resp_body_started == 0) {
/* nothing has been sent out yet, try to use another child */
if (hctx->wb->bytes_out == 0 &&
hctx->reconnects++ < 5) {
if (hctx->wb.bytes_out == 0 && hctx->reconnects++ < 5) {
log_error(r->conf.errh, __FILE__, __LINE__,
"response not received, request not sent on "
"socket: %s for %s?%.*s, reconnecting",
@ -2232,7 +2230,7 @@ static handler_t gw_recv_response(gw_handler_ctx * const hctx, request_st * cons
log_error(r->conf.errh, __FILE__, __LINE__,
"response not received, request sent: %lld on "
"socket: %s for %s?%.*s, closing connection",
(long long)hctx->wb->bytes_out, proc->connection_name->ptr,
(long long)hctx->wb.bytes_out, proc->connection_name->ptr,
r->uri.path.ptr, BUFFER_INTLEN_PTR(&r->uri.query));
} else {
log_error(r->conf.errh, __FILE__, __LINE__,

2
src/gw_backend.h

@ -302,8 +302,8 @@ typedef struct gw_handler_ctx {
time_t state_timestamp;
chunkqueue *rb; /* read queue */
chunkqueue *wb; /* write queue */
off_t wb_reqlen;
chunkqueue wb; /* write queue */
buffer *response;

24
src/h2.c

@ -711,7 +711,7 @@ h2_recv_settings (connection * const con, const uint8_t * const s, const uint32_
static int
h2_recv_end_data (request_st * const r, connection * const con, const uint32_t alen)
{
chunkqueue * const reqbody_queue = r->reqbody_queue;
chunkqueue * const reqbody_queue = &r->reqbody_queue;
r->h2state = (r->h2state == H2_STATE_OPEN)
? H2_STATE_HALF_CLOSED_REMOTE
: H2_STATE_CLOSED;
@ -814,7 +814,7 @@ h2_recv_data (connection * const con, const uint8_t * const s, const uint32_t le
h2_send_window_update(con, r->h2id, len); /*(r->h2_rwin)*/
h2_send_window_update(con, 0, len); /*(h2r->h2_rwin)*/
chunkqueue * const dst = r->reqbody_queue;
chunkqueue * const dst = &r->reqbody_queue;
if (r->reqbody_length >= 0 && r->reqbody_length < dst->bytes_in + alen) {
/* data exceeds Content-Length specified (client mistake) */
@ -829,7 +829,7 @@ h2_recv_data (connection * const con, const uint8_t * const s, const uint32_t le
}
/*(accounting for mod_accesslog and mod_rrdtool)*/
chunkqueue * const rq = r->read_queue;
chunkqueue * const rq = &r->read_queue;
rq->bytes_in += (off_t)alen;
rq->bytes_out += (off_t)alen;
@ -1217,7 +1217,7 @@ h2_parse_headers_frame (request_st * const restrict r, const unsigned char *psrc
hpctx.hlen += 2;
r->rqst_header_len += hpctx.hlen;
/*(accounting for mod_accesslog and mod_rrdtool)*/
chunkqueue * const rq = r->read_queue;
chunkqueue * const rq = &r->read_queue;
rq->bytes_in += (off_t)hpctx.hlen;
rq->bytes_out += (off_t)hpctx.hlen;
@ -1994,13 +1994,13 @@ h2_send_headers (request_st * const r, connection * const con)
alen += 2; /* "virtual" blank line ("\r\n") ending headers */
r->resp_header_len = alen;
/*(accounting for mod_accesslog and mod_rrdtool)*/
chunkqueue * const wq = r->write_queue;
chunkqueue * const wq = &r->write_queue;
wq->bytes_in += (off_t)alen;
wq->bytes_out += (off_t)alen;
const uint32_t dlen = (uint32_t)((char *)dst - tb->ptr);
const uint32_t flags =
(r->resp_body_finished && chunkqueue_is_empty(r->write_queue))
(r->resp_body_finished && chunkqueue_is_empty(&r->write_queue))
? H2_FLAG_END_STREAM
: 0;
h2_send_hpack(r, con, tb->ptr, dlen, flags);
@ -2202,13 +2202,13 @@ h2_send_cqheaders (request_st * const r, connection * const con)
*(future: if r->write_queue is bypassed for headers, adjust
* r->write_queue bytes counts (bytes_in, bytes_out) with header len)*/
/* note: expects field-names are lowercased (http_response_write_header())*/
chunk * const c = r->write_queue->first;
chunk * const c = r->write_queue.first;
const uint32_t len = buffer_string_length(c->mem) - (uint32_t)c->offset;
uint32_t flags = (r->resp_body_finished && NULL == c->next)
? H2_FLAG_END_STREAM
: 0;
h2_send_headers_block(r, con, c->mem->ptr + c->offset, len, flags);
chunkqueue_mark_written(r->write_queue, len);
chunkqueue_mark_written(&r->write_queue, len);
}
#endif
@ -2436,8 +2436,8 @@ h2_release_stream (request_st * const r, connection * const con)
* r->read_queue and r->write_queue) */
/* DISABLED since mismatches invalidate the relationship between
* con->bytes_in and con->bytes_out */
con->read_queue->bytes_in -= r->read_queue->bytes_in;
con->write_queue->bytes_out -= r->write_queue->bytes_out;
con->read_queue->bytes_in -= r->read_queue.bytes_in;
con->write_queue->bytes_out -= r->write_queue.bytes_out;
#else
UNUSED(con);
#endif
@ -2529,7 +2529,7 @@ h2_con_upgrade_h2c (request_st * const h2r, const buffer * const http2_settings)
static const char switch_proto[] = "HTTP/1.1 101 Switching Protocols\r\n"
"Connection: Upgrade\r\n"
"Upgrade: h2c\r\n\r\n";
chunkqueue_append_mem(h2r->write_queue,
chunkqueue_append_mem(&h2r->write_queue,
CONST_STR_LEN(switch_proto));
h2r->resp_header_len = sizeof(switch_proto)-1;
#else
@ -2579,7 +2579,7 @@ h2_con_upgrade_h2c (request_st * const h2r, const buffer * const http2_settings)
#if 0 /* expect empty request body */
r->reqbody_length = h2r->reqbody_length; /* currently always 0 */
r->te_chunked = h2r->te_chunked; /* must be 0 */
swap(r->reqbody_queue, h2r->reqbody_queue); /*currently always empty queue*/
swap(&r->reqbody_queue,&h2r->reqbody_queue);/*currently always empty queue*/
#endif
r->http_host = h2r->http_host;
h2r->http_host = NULL;

18
src/http-header-glue.c

@ -271,7 +271,7 @@ void http_response_body_clear (request_st * const r, int preserve_length) {
r->gw_dechunk = NULL;
}
}
chunkqueue_reset(r->write_queue);
chunkqueue_reset(&r->write_queue);
}
@ -479,10 +479,10 @@ static int http_response_parse_range(request_st * const r, buffer * const path,
buffer_append_string_len(b, CONST_STR_LEN("\r\n\r\n"));
r->content_length += buffer_string_length(b);
chunkqueue_append_mem(r->write_queue, CONST_BUF_LEN(b));
chunkqueue_append_mem(&r->write_queue, CONST_BUF_LEN(b));
}
chunkqueue_append_file(r->write_queue, path, start, end - start + 1);
chunkqueue_append_file(&r->write_queue, path, start, end - start + 1);
r->content_length += end - start + 1;
}
}
@ -499,7 +499,7 @@ static int http_response_parse_range(request_st * const r, buffer * const path,
buffer_append_string_len(tb, "--\r\n", 4);
r->content_length += buffer_string_length(tb);
chunkqueue_append_mem(r->write_queue, CONST_BUF_LEN(tb));
chunkqueue_append_mem(&r->write_queue, CONST_BUF_LEN(tb));
/* set header-fields */
@ -1025,12 +1025,10 @@ static handler_t http_response_process_local_redir(request_st * const r, size_t
buffer_copy_buffer(&r->target, vb);
if (r->reqbody_length) {
if (r->reqbody_length
!= r->reqbody_queue->bytes_in) {
if (r->reqbody_length != r->reqbody_queue.bytes_in)
r->keep_alive = 0;
}
r->reqbody_length = 0;
chunkqueue_reset(r->reqbody_queue);
chunkqueue_reset(&r->reqbody_queue);
}
if (r->http_status != 307 && r->http_status != 308) {
@ -1439,7 +1437,7 @@ handler_t http_response_read(request_st * const r, http_response_opts * const op
}
if (r->conf.stream_response_body & FDEVENT_STREAM_RESPONSE_BUFMIN) {
off_t cqlen = chunkqueue_length(r->write_queue);
off_t cqlen = chunkqueue_length(&r->write_queue);
if (cqlen + (off_t)toread > 65536 - 4096) {
if (!r->con->is_writable) {
/*(defer removal of FDEVENT_IN interest since
@ -1515,7 +1513,7 @@ handler_t http_response_read(request_st * const r, http_response_opts * const op
}
if (r->conf.stream_response_body & FDEVENT_STREAM_RESPONSE_BUFMIN) {
if (chunkqueue_length(r->write_queue) > 65536 - 4096) {
if (chunkqueue_length(&r->write_queue) > 65536 - 4096) {
/*(defer removal of FDEVENT_IN interest since
* connection_state_machine() might be able to send
* data immediately, unless !con->is_writable, where

18
src/http_chunk.c

@ -71,7 +71,7 @@ static int http_chunk_append_read_fd_range(request_st * const r, const buffer *
* offset in for cq->bytes_in in chunkqueue_append_buffer_commit() */
UNUSED(fn);
chunkqueue * const cq = r->write_queue;
chunkqueue * const cq = &r->write_queue;
if (r->resp_send_chunked)
http_chunk_len_append(cq, (uintmax_t)len);
@ -93,7 +93,7 @@ static int http_chunk_append_read_fd_range(request_st * const r, const buffer *
}
static void http_chunk_append_file_fd_range(request_st * const r, const buffer * const fn, const int fd, const off_t offset, const off_t len) {
chunkqueue * const cq = r->write_queue;
chunkqueue * const cq = &r->write_queue;
if (r->resp_send_chunked)
http_chunk_len_append(cq, (uintmax_t)len);
@ -146,7 +146,7 @@ int http_chunk_append_file_fd(request_st * const r, const buffer * const fn, con
}
static int http_chunk_append_to_tempfile(request_st * const r, const char * const mem, const size_t len) {
chunkqueue * const cq = r->write_queue;
chunkqueue * const cq = &r->write_queue;
log_error_st * const errh = r->conf.errh;
if (r->resp_send_chunked
@ -165,7 +165,7 @@ static int http_chunk_append_to_tempfile(request_st * const r, const char * cons
}
static int http_chunk_append_cq_to_tempfile(request_st * const r, chunkqueue * const src, const size_t len) {
chunkqueue * const cq = r->write_queue;
chunkqueue * const cq = &r->write_queue;
log_error_st * const errh = r->conf.errh;
if (r->resp_send_chunked
@ -208,7 +208,7 @@ int http_chunk_append_buffer(request_st * const r, buffer * const mem) {
size_t len = buffer_string_length(mem);
if (0 == len) return 0;
chunkqueue * const cq = r->write_queue;
chunkqueue * const cq = &r->write_queue;
if (http_chunk_uses_tempfile(r, cq, len))
return http_chunk_append_to_tempfile(r, mem->ptr, len);
@ -229,7 +229,7 @@ int http_chunk_append_mem(request_st * const r, const char * const mem, const si
if (0 == len) return 0;
force_assert(NULL != mem);
chunkqueue * const cq = r->write_queue;
chunkqueue * const cq = &r->write_queue;
if (http_chunk_uses_tempfile(r, cq, len))
return http_chunk_append_to_tempfile(r, mem, len);
@ -248,7 +248,7 @@ int http_chunk_append_mem(request_st * const r, const char * const mem, const si
int http_chunk_transfer_cqlen(request_st * const r, chunkqueue * const src, const size_t len) {
if (0 == len) return 0;
chunkqueue * const cq = r->write_queue;
chunkqueue * const cq = &r->write_queue;
if (http_chunk_uses_tempfile(r, cq, len))
return http_chunk_append_cq_to_tempfile(r, src, len);
@ -269,12 +269,12 @@ void http_chunk_close(request_st * const r) {
if (r->gw_dechunk && !buffer_string_is_empty(&r->gw_dechunk->b)) {
/* XXX: trailers passed through; no sanity check currently done */
chunkqueue_append_buffer(r->write_queue, &r->gw_dechunk->b);
chunkqueue_append_buffer(&r->write_queue, &r->gw_dechunk->b);
if (!r->gw_dechunk->done)
r->keep_alive = 0;
}
else
chunkqueue_append_mem(r->write_queue, CONST_STR_LEN("0\r\n\r\n"));
chunkqueue_append_mem(&r->write_queue, CONST_STR_LEN("0\r\n\r\n"));
}
static int

6
src/mod_accesslog.c

@ -987,7 +987,7 @@ static int log_access_record (const request_st * const r, buffer * const b, form
{
off_t bytes = r->http_version <= HTTP_VERSION_1_1
? con->bytes_written - r->bytes_written_ckpt
: r->write_queue->bytes_out;
: r->write_queue.bytes_out;
if (bytes > 0) {
bytes -= (off_t)r->resp_header_len;
buffer_append_int(b, bytes > 0 ? bytes : 0);
@ -1029,7 +1029,7 @@ static int log_access_record (const request_st * const r, buffer * const b, form
{
off_t bytes = r->http_version <= HTTP_VERSION_1_1
? con->bytes_written - r->bytes_written_ckpt
: r->write_queue->bytes_out;
: r->write_queue.bytes_out;
if (bytes > 0) {
buffer_append_int(b, bytes);
} else {
@ -1041,7 +1041,7 @@ static int log_access_record (const request_st * const r, buffer * const b, form
{
off_t bytes = r->http_version <= HTTP_VERSION_1_1
? con->bytes_read - r->bytes_read_ckpt
: r->read_queue->bytes_in + (off_t)r->rqst_header_len;
: r->read_queue.bytes_in + (off_t)r->rqst_header_len;
if (bytes > 0) {
buffer_append_int(b, bytes);
} else {

10
src/mod_cgi.c

@ -373,7 +373,7 @@ static handler_t cgi_handle_fdevent_send (void *ctx, int revents) {
if (revents & FDEVENT_HUP) {
/* skip sending remaining data to CGI */
if (r->reqbody_length) {
chunkqueue *cq = r->reqbody_queue;
chunkqueue *cq = &r->reqbody_queue;
chunkqueue_mark_written(cq, chunkqueue_length(cq));
if (cq->bytes_in != (off_t)r->reqbody_length) {
r->keep_alive = 0;
@ -417,7 +417,7 @@ static handler_t cgi_response_headers(request_st * const r, struct http_response
if (hctx->conf.upgrade
&& !light_btst(r->resp_htags, HTTP_HEADER_UPGRADE)) {
chunkqueue *cq = r->reqbody_queue;
chunkqueue *cq = &r->reqbody_queue;
hctx->conf.upgrade = 0;
if (cq->bytes_out == (off_t)r->reqbody_length) {
cgi_connection_close_fdtocgi(hctx); /*(closes hctx->fdtocgi)*/
@ -641,7 +641,7 @@ static ssize_t cgi_write_file_chunk_mmap(request_st * const r, int fd, chunkqueu
static int cgi_write_request(handler_ctx *hctx, int fd) {
request_st * const r = hctx->r;
chunkqueue *cq = r->reqbody_queue;
chunkqueue *cq = &r->reqbody_queue;
chunk *c;
/* old comment: windows doesn't support select() on pipes - wouldn't be easy to fix for all platforms.
@ -947,7 +947,7 @@ SUBREQUEST_FUNC(mod_cgi_handle_subrequest) {
if ((r->conf.stream_response_body & FDEVENT_STREAM_RESPONSE_BUFMIN)
&& r->resp_body_started) {
if (chunkqueue_length(r->write_queue) > 65536 - 4096) {
if (chunkqueue_length(&r->write_queue) > 65536 - 4096) {
fdevent_fdnode_event_clr(hctx->ev, hctx->fdn, FDEVENT_IN);
} else if (!(fdevent_fdnode_interest(hctx->fdn) & FDEVENT_IN)) {
/* optimistic read from backend */
@ -958,7 +958,7 @@ SUBREQUEST_FUNC(mod_cgi_handle_subrequest) {
}
}
chunkqueue * const cq = r->reqbody_queue;
chunkqueue * const cq = &r->reqbody_queue;
if (cq->bytes_in != (off_t)r->reqbody_length) {
/*(64k - 4k to attempt to avoid temporary files

8
src/mod_cml_lua.c

@ -260,7 +260,7 @@ int cache_parse_lua(request_st * const r, plugin_data * const p, const buffer *
break;
}
} else {
chunkqueue_append_file_fd(r->write_queue, b, fd, 0, st.st_size);
chunkqueue_append_file_fd(&r->write_queue, b, fd, 0, st.st_size);
if (st.st_mtime > mtime) mtime = st.st_mtime;
}
} else {
@ -292,11 +292,11 @@ int cache_parse_lua(request_st * const r, plugin_data * const p, const buffer *
/* ok, the client already has our content,
* no need to send it again */
chunkqueue_reset(r->write_queue);
chunkqueue_reset(&r->write_queue);
ret = 0; /* cache-hit */
}
} else {
chunkqueue_reset(r->write_queue);
chunkqueue_reset(&r->write_queue);
}
}
@ -308,7 +308,7 @@ int cache_parse_lua(request_st * const r, plugin_data * const p, const buffer *
buffer_copy_buffer(&r->physical.path, &p->basedir);
buffer_append_string_buffer(&r->physical.path, &p->trigger_handler);
chunkqueue_reset(r->write_queue);
chunkqueue_reset(&r->write_queue);
}
error:

34
src/mod_deflate.c

@ -211,20 +211,20 @@ typedef struct {
} u;
off_t bytes_in;
off_t bytes_out;
chunkqueue *in_queue;
buffer *output;
plugin_data *plugin_data;
request_st *r;
int compression_type;
int cache_fd;
char *cache_fn;
chunkqueue in_queue;
} handler_ctx;
static handler_ctx *handler_ctx_init() {
handler_ctx *hctx;
hctx = calloc(1, sizeof(*hctx));
hctx->in_queue = chunkqueue_init();
chunkqueue_init(&hctx->in_queue);
hctx->cache_fd = -1;
return hctx;
@ -242,7 +242,7 @@ static void handler_ctx_free(handler_ctx *hctx) {
buffer_free(hctx->output);
}
#endif
chunkqueue_free(hctx->in_queue);
chunkqueue_reset(&hctx->in_queue);
free(hctx);
}
@ -315,7 +315,7 @@ static int mod_deflate_cache_file_finish (request_st * const r, handler_ctx * co
return -1;
free(hctx->cache_fn);
hctx->cache_fn = NULL;
chunkqueue_reset(r->write_queue);
chunkqueue_reset(&r->write_queue);
int rc = http_chunk_append_file_fd(r, fn, hctx->cache_fd, hctx->bytes_out);
hctx->cache_fd = -1;
return rc;
@ -1207,14 +1207,14 @@ static handler_t deflate_compress_response(request_st * const r, handler_ctx * c
/* move all chunk from write_queue into our in_queue, then adjust
* counters since r->write_queue is reused for compressed output */
chunkqueue * const cq = r->write_queue;
chunkqueue * const cq = &r->write_queue;
len = chunkqueue_length(cq);
chunkqueue_remove_finished_chunks(cq);
chunkqueue_append_chunkqueue(hctx->in_queue, cq);
chunkqueue_append_chunkqueue(&hctx->in_queue, cq);
cq->bytes_in -= len;
cq->bytes_out -= len;
max = chunkqueue_length(hctx->in_queue);
max = chunkqueue_length(&hctx->in_queue);
#if 0
/* calculate max bytes to compress for this call */
if (p->conf.sync_flush && max > (len = p->conf.work_block_size << 10)) {
@ -1224,7 +1224,7 @@ static handler_t deflate_compress_response(request_st * const r, handler_ctx * c
/* Compress chunks from in_queue into chunks for write_queue */
while (max) {
chunk *c = hctx->in_queue->first;
chunk *c = hctx->in_queue.first;
switch(c->type) {
case MEM_CHUNK:
@ -1249,13 +1249,13 @@ static handler_t deflate_compress_response(request_st * const r, handler_ctx * c
}
max -= len;
chunkqueue_mark_written(hctx->in_queue, len);
chunkqueue_mark_written(&hctx->in_queue, len);
}
/*(currently should always be true)*/
/*(current implementation requires response be complete)*/
close_stream = (r->resp_body_finished
&& chunkqueue_is_empty(hctx->in_queue));
&& chunkqueue_is_empty(&hctx->in_queue));
if (mod_deflate_stream_flush(hctx, close_stream) < 0) {
log_error(r->conf.errh, __FILE__, __LINE__, "flush error");
return HANDLER_ERROR;
@ -1409,7 +1409,7 @@ REQUEST_FUNC(mod_deflate_handle_response_start) {
/* check if size of response is below min-compress-size or exceeds max*/
/* (r->resp_body_finished checked at top of routine) */
len = chunkqueue_length(r->write_queue);
len = chunkqueue_length(&r->write_queue);
if (len <= (off_t)p->conf.min_compress_size) return HANDLER_GO_ON;
if (p->conf.max_compress_size /*(max_compress_size in KB)*/
&& len > ((off_t)p->conf.max_compress_size << 10)) {
@ -1528,17 +1528,17 @@ REQUEST_FUNC(mod_deflate_handle_response_start) {
&& !had_vary
&& etaglen > 2
&& r->resp_body_finished
&& r->write_queue->first == r->write_queue->last
&& r->write_queue->first->type == FILE_CHUNK
&& r->write_queue->first->file.start == 0
&& !r->write_queue->first->file.is_temp
&& r->write_queue.first == r->write_queue.last
&& r->write_queue.first->type == FILE_CHUNK
&& r->write_queue.first->file.start == 0
&& !r->write_queue.first->file.is_temp
&& !http_header_response_get(r, HTTP_HEADER_RANGE,
CONST_STR_LEN("Range"))) {
tb = mod_deflate_cache_file_name(r, p->conf.cache_dir, vb);
/*(checked earlier and skipped if Transfer-Encoding had been set)*/
stat_cache_entry *sce = stat_cache_get_entry(tb);
if (NULL != sce) {
chunkqueue_reset(r->write_queue);
chunkqueue_reset(&r->write_queue);
if (0 != http_chunk_append_file(r, tb))
return HANDLER_ERROR;
if (light_btst(r->resp_htags, HTTP_HEADER_CONTENT_LENGTH))
@ -1549,7 +1549,7 @@ REQUEST_FUNC(mod_deflate_handle_response_start) {
}
/* sanity check that response was whole file;
* (racy since using stat_cache, but cache file only if match) */
sce = stat_cache_get_entry(r->write_queue->first->mem);
sce = stat_cache_get_entry(r->write_queue.first->mem);
if (NULL == sce || sce->st.st_size != len)
tb = NULL;
if (0 != mkdir_for_file(tb->ptr))

4
src/mod_dirlisting.c

@ -931,7 +931,7 @@ static int http_list_directory(request_st * const r, plugin_data * const p, buff
if (files.used) http_dirls_sort(files.ent, files.used);
out = chunkqueue_append_buffer_open(r->write_queue);
out = chunkqueue_append_buffer_open(&r->write_queue);
http_list_directory_header(r, p, out);
/* directories */
@ -1015,7 +1015,7 @@ static int http_list_directory(request_st * const r, plugin_data * const p, buff
http_header_response_set(r, HTTP_HEADER_CONTENT_TYPE, CONST_STR_LEN("Content-Type"), CONST_BUF_LEN(&p->tmp_buf));
}
chunkqueue_append_buffer_commit(r->write_queue);
chunkqueue_append_buffer_commit(&r->write_queue);
r->resp_body_finished = 1;
return 0;

27
src/mod_fastcgi.c

@ -219,7 +219,7 @@ static void fcgi_header(FCGI_Header * header, unsigned char type, int request_id
static handler_t fcgi_stdin_append(handler_ctx *hctx) {
FCGI_Header header;
chunkqueue * const req_cq = hctx->r->reqbody_queue;
chunkqueue * const req_cq = &hctx->r->reqbody_queue;
off_t offset, weWant;
const off_t req_cqlen = chunkqueue_length(req_cq);
int request_id = hctx->request_id;
@ -237,20 +237,20 @@ static handler_t fcgi_stdin_append(handler_ctx *hctx) {
}
fcgi_header(&(header), FCGI_STDIN, request_id, weWant, 0);
(chunkqueue_is_empty(hctx->wb) || hctx->wb->first->type == MEM_CHUNK) /* else FILE_CHUNK for temp file */
? chunkqueue_append_mem(hctx->wb, (const char *)&header, sizeof(header))
: chunkqueue_append_mem_min(hctx->wb, (const char *)&header, sizeof(header));
chunkqueue_steal(hctx->wb, req_cq, weWant);
(chunkqueue_is_empty(&hctx->wb) || hctx->wb.first->type == MEM_CHUNK) /* else FILE_CHUNK for temp file */
? chunkqueue_append_mem(&hctx->wb, (const char *)&header, sizeof(header))
: chunkqueue_append_mem_min(&hctx->wb, (const char *)&header, sizeof(header));
chunkqueue_steal(&hctx->wb, req_cq, weWant);
/*(hctx->wb_reqlen already includes reqbody_length)*/
}
if (hctx->wb->bytes_in == hctx->wb_reqlen) {
if (hctx->wb.bytes_in == hctx->wb_reqlen) {
/* terminate STDIN */
/* (future: must defer ending FCGI_STDIN
* if might later upgrade protocols
* and then have more data to send) */
fcgi_header(&(header), FCGI_STDIN, request_id, 0, 0);
chunkqueue_append_mem(hctx->wb, (const char *)&header, sizeof(header));
chunkqueue_append_mem(&hctx->wb, (const char *)&header, sizeof(header));
hctx->wb_reqlen += (int)sizeof(header);
}
@ -272,8 +272,9 @@ static handler_t fcgi_create_env(handler_ctx *hctx) {
host->strip_request_uri
};
size_t rsz = (size_t)(r->read_queue->bytes_out - hctx->wb->bytes_in);
buffer * const b = chunkqueue_prepend_buffer_open_sz(hctx->wb, rsz < 65536 ? rsz : r->rqst_header_len);
size_t rsz = (size_t)(r->read_queue.bytes_out - hctx->wb.bytes_in);
if (rsz >= 65536) rsz = r->rqst_header_len;
buffer * const b = chunkqueue_prepend_buffer_open_sz(&hctx->wb, rsz);
/* send FCGI_BEGIN_REQUEST */
@ -301,7 +302,7 @@ static handler_t fcgi_create_env(handler_ctx *hctx) {
r->http_status = 400;
r->handler_module = NULL;
buffer_clear(b);
chunkqueue_remove_finished_chunks(hctx->wb);
chunkqueue_remove_finished_chunks(&hctx->wb);
return HANDLER_FINISHED;
} else {
fcgi_header(&(header), FCGI_PARAMS, request_id,
@ -312,11 +313,11 @@ static handler_t fcgi_create_env(handler_ctx *hctx) {
buffer_append_string_len(b, (const char *)&header, sizeof(header));
hctx->wb_reqlen = buffer_string_length(b);
chunkqueue_prepend_buffer_commit(hctx->wb);
chunkqueue_prepend_buffer_commit(&hctx->wb);
}
if (r->reqbody_length) {
/*chunkqueue_append_chunkqueue(hctx->wb, r->reqbody_queue);*/
/*chunkqueue_append_chunkqueue(&hctx->wb, &r->reqbody_queue);*/
if (r->reqbody_length > 0)
hctx->wb_reqlen += r->reqbody_length;/* (eventual) (minimal) total request size, not necessarily including all fcgi_headers around content length yet */
else /* as-yet-unknown total request size (Transfer-Encoding: chunked)*/
@ -522,7 +523,7 @@ static handler_t fcgi_check_extension(request_st * const r, void *p_d, int uri_p
hctx->stdin_append = fcgi_stdin_append;
hctx->create_env = fcgi_create_env;
if (!hctx->rb) {
hctx->rb = chunkqueue_init();
hctx->rb = chunkqueue_init(NULL);
}
else {
chunkqueue_reset(hctx->rb);

2
src/mod_flv_streaming.c

@ -123,7 +123,7 @@ URIHANDLER_FUNC(mod_flv_streaming_path_handler) {
/* let's build a flv header */
http_chunk_append_mem(r, CONST_STR_LEN("FLV\x1\x1\0\0\0\x9\0\0\0\x9"));
if (0 != http_chunk_append_file_range(r, &r->physical.path, start, len)) {
chunkqueue_reset(r->write_queue);
chunkqueue_reset(&r->write_queue);
return HANDLER_GO_ON;
}

8
src/mod_magnet.c

@ -555,13 +555,13 @@ static buffer *magnet_env_get_buffer_by_id(request_st * const r, int id) {
buffer_clear(dest);
if (!r->resp_body_finished)
break;
buffer_append_int(dest, chunkqueue_length(r->write_queue));
buffer_append_int(dest, chunkqueue_length(&r->write_queue));
break;
case MAGNET_ENV_RESPONSE_BODY:
if (!r->resp_body_finished)
break;
else {
chunkqueue * const cq = r->write_queue;
chunkqueue * const cq = &r->write_queue;
off_t len = chunkqueue_length(cq);
if (0 == len) {
dest = r->tmp_buf;
@ -773,7 +773,7 @@ static int magnet_attach_content(request_st * const r, lua_State * const L, int
if (lua_isstring(L, -1)) {
const_buffer data = magnet_checkconstbuffer(L, -1);
chunkqueue_append_mem(r->write_queue, data.ptr, data.len);
chunkqueue_append_mem(&r->write_queue, data.ptr, data.len);
} else if (lua_istable(L, -1)) {
lua_getfield(L, -1, "filename");
lua_getfield(L, -2, "length"); /* (0-based) end of range (not actually "length") */
@ -1049,7 +1049,7 @@ static handler_t magnet_attract(request_st * const r, plugin_data * const p, buf
if (0 == setjmp(exceptionjmp)) {
magnet_attach_content(r, L, lighty_table_ndx);
if (!chunkqueue_is_empty(r->write_queue)) {
if (!chunkqueue_is_empty(&r->write_queue)) {
r->handler_module = p->self;
}
} else {

25
src/mod_proxy.c

@ -823,7 +823,7 @@ static void proxy_set_Forwarded(connection * const con, request_st * const r, co
static handler_t proxy_stdin_append(gw_handler_ctx *hctx) {
/*handler_ctx *hctx = (handler_ctx *)gwhctx;*/
chunkqueue * const req_cq = hctx->r->reqbody_queue;
chunkqueue * const req_cq = &hctx->r->reqbody_queue;
const off_t req_cqlen = chunkqueue_length(req_cq);
if (req_cqlen) {
/* XXX: future: use http_chunk_len_append() */
@ -838,18 +838,18 @@ static handler_t proxy_stdin_append(gw_handler_ctx *hctx) {
if (-1 != hctx->wb_reqlen)
hctx->wb_reqlen += (hctx->wb_reqlen >= 0) ? len : -len;
(chunkqueue_is_empty(hctx->wb) || hctx->wb->first->type == MEM_CHUNK)
(chunkqueue_is_empty(&hctx->wb) || hctx->wb.first->type == MEM_CHUNK)
/* else FILE_CHUNK for temp file */
? chunkqueue_append_mem(hctx->wb, CONST_BUF_LEN(tb))
: chunkqueue_append_mem_min(hctx->wb, CONST_BUF_LEN(tb));
chunkqueue_steal(hctx->wb, req_cq, req_cqlen);
? chunkqueue_append_mem(&hctx->wb, CONST_BUF_LEN(tb))
: chunkqueue_append_mem_min(&hctx->wb, CONST_BUF_LEN(tb));
chunkqueue_steal(&hctx->wb, req_cq, req_cqlen);
chunkqueue_append_mem_min(hctx->wb, CONST_STR_LEN("\r\n"));
chunkqueue_append_mem_min(&hctx->wb, CONST_STR_LEN("\r\n"));
}
if (hctx->wb->bytes_in == hctx->wb_reqlen) {/*hctx->r->reqbody_length >= 0*/
if (hctx->wb.bytes_in == hctx->wb_reqlen) {/*hctx->r->reqbody_length >= 0*/
/* terminate STDIN */
chunkqueue_append_mem(hctx->wb, CONST_STR_LEN("0\r\n\r\n"));
chunkqueue_append_mem(&hctx->wb, CONST_STR_LEN("0\r\n\r\n"));
hctx->wb_reqlen += (int)sizeof("0\r\n\r\n");
}
@ -862,8 +862,9 @@ static handler_t proxy_create_env(gw_handler_ctx *gwhctx) {
request_st * const r = hctx->gw.r;
const int remap_headers = (NULL != hctx->conf.header.urlpaths
|| NULL != hctx->conf.header.hosts_request);
size_t rsz = (size_t)(r->read_queue->bytes_out - hctx->gw.wb->bytes_in);
buffer * const b = chunkqueue_prepend_buffer_open_sz(hctx->gw.wb, rsz < 65536 ? rsz : r->rqst_header_len);
size_t rsz = (size_t)(r->read_queue.bytes_out - hctx->gw.wb.bytes_in);
if (rsz >= 65536) rsz = r->rqst_header_len;
buffer * const b = chunkqueue_prepend_buffer_open_sz(&hctx->gw.wb, rsz);
/* build header */
@ -1029,10 +1030,10 @@ static handler_t proxy_create_env(gw_handler_ctx *gwhctx) {
buffer_append_string_len(b, CONST_STR_LEN("Connection: close\r\n\r\n"));
hctx->gw.wb_reqlen = buffer_string_length(b);
chunkqueue_prepend_buffer_commit(hctx->gw.wb);
chunkqueue_prepend_buffer_commit(&hctx->gw.wb);
if (r->reqbody_length) {
chunkqueue_append_chunkqueue(hctx->gw.wb, r->reqbody_queue);
chunkqueue_append_chunkqueue(&hctx->gw.wb, &r->reqbody_queue);
if (r->reqbody_length > 0)
hctx->gw.wb_reqlen += r->reqbody_length; /* total req size */
else /* as-yet-unknown total request size (Transfer-Encoding: chunked)*/

4
src/mod_rrdtool.c

@ -429,8 +429,8 @@ REQUESTDONE_FUNC(mod_rrd_account) {
rrd->bytes_read += (r->con->bytes_read - r->bytes_read_ckpt);
}
else {
rrd->bytes_written += r->write_queue->bytes_out;
rrd->bytes_read += r->read_queue->bytes_in;
rrd->bytes_written += r->write_queue.bytes_out;
rrd->bytes_read += r->read_queue.bytes_in;
}
return HANDLER_GO_ON;

19
src/mod_scgi.c

@ -215,8 +215,9 @@ static handler_t scgi_create_env(handler_ctx *hctx) {
? scgi_env_add_scgi
: scgi_env_add_uwsgi;
size_t offset;
size_t rsz = (size_t)(r->read_queue->bytes_out - hctx->wb->bytes_in);
buffer * const b = chunkqueue_prepend_buffer_open_sz(hctx->wb, rsz < 65536 ? rsz : r->rqst_header_len);
size_t rsz = (size_t)(r->read_queue.bytes_out - hctx->wb.bytes_in);
if (rsz >= 65536) rsz = r->rqst_header_len;
buffer * const b = chunkqueue_prepend_buffer_open_sz(&hctx->wb, rsz);
/* save space for 9 digits (plus ':'), though incoming HTTP request
* currently limited to 64k (65535, so 5 chars) */
@ -226,7 +227,7 @@ static handler_t scgi_create_env(handler_ctx *hctx) {
r->http_status = 400;
r->handler_module = NULL;
buffer_clear(b);
chunkqueue_remove_finished_chunks(hctx->wb);
chunkqueue_remove_finished_chunks(&hctx->wb);
return HANDLER_FINISHED;
}
@ -249,7 +250,7 @@ static handler_t scgi_create_env(handler_ctx *hctx) {
r->http_status = 431; /* Request Header Fields Too Large */
r->handler_module = NULL;
buffer_clear(b);
chunkqueue_remove_finished_chunks(hctx->wb);
chunkqueue_remove_finished_chunks(&hctx->wb);
return HANDLER_FINISHED;
}
offset = 10 - 4;
@ -258,13 +259,13 @@ static handler_t scgi_create_env(handler_ctx *hctx) {
}
hctx->wb_reqlen = buffer_string_length(b) - offset;
chunkqueue_prepend_buffer_commit(hctx->wb);
chunkqueue_mark_written(hctx->wb, offset);
hctx->wb->bytes_in -= (off_t)offset;
hctx->wb->bytes_out -= (off_t)offset;
chunkqueue_prepend_buffer_commit(&hctx->wb);
chunkqueue_mark_written(&hctx->wb, offset);
hctx->wb.bytes_in -= (off_t)offset;
hctx->wb.bytes_out -= (off_t)offset;
if (r->reqbody_length) {
chunkqueue_append_chunkqueue(hctx->wb, r->reqbody_queue);
chunkqueue_append_chunkqueue(&hctx->wb, &r->reqbody_queue);
if (r->reqbody_length > 0)
hctx->wb_reqlen += r->reqbody_length; /* total req size */
else /* as-yet-unknown total request size (Transfer-Encoding: chunked)*/

8
src/mod_ssi.c

@ -281,7 +281,7 @@ static int process_ssi_stmt(request_st * const r, handler_ctx * const p, const c
}
}
chunkqueue * const cq = r->write_queue;
chunkqueue * const cq = &r->write_queue;
switch(ssicmd) {
case SSI_ECHO: {
@ -1030,7 +1030,7 @@ static void mod_ssi_parse_ssi_stmt(request_st * const r, handler_ctx * const p,
&& (s[12] == ' ' || s[12] == '\t'))
return;
/* XXX: perhaps emit error comment instead of invalid <!--#...--> code to client */
chunkqueue_append_mem(r->write_queue, s, len); /* append stmt as-is */
chunkqueue_append_mem(&r->write_queue, s, len); /* append stmt as-is */
return;
}
@ -1094,7 +1094,7 @@ static void mod_ssi_read_fd(request_st * const r, handler_ctx * const p, struct
size_t offset, pretag;
const size_t bufsz = 8192;
char * const buf = malloc(bufsz); /* allocate to reduce chance of stack exhaustion upon deep recursion */
chunkqueue * const cq = r->write_queue;
chunkqueue * const cq = &r->write_queue;
force_assert(buf);
offset = 0;
@ -1238,7 +1238,7 @@ static int mod_ssi_handle_request(request_st * const r, handler_ctx * const p) {
/* ok, the client already has our content,
* no need to send it again */
chunkqueue_reset(r->write_queue);
chunkqueue_reset(&r->write_queue);
}
}

26
src/mod_status.c

@ -221,7 +221,7 @@ static void mod_status_html_rtable_r (buffer * const b, const request_st * const
buffer_append_string_len(b, CONST_STR_LEN("</td><td class=\"int\">"));
if (r->reqbody_length) {
buffer_append_int(b, r->reqbody_queue->bytes_in);
buffer_append_int(b