[core] Allow disabling of stat cache through stat_cache.ttl 0;

personal/stbuehler/wip
Thomas Porzelt 13 years ago
parent cbb9165eb0
commit c8663ffda9

@ -201,7 +201,7 @@ static liHandlerResult core_handle_docroot(liVRequest *vr, gpointer param, gpoin
for (; i < arr->len; i++) {
struct stat st;
gint err;
g_string_truncate(vr->physical.doc_root, 0);
li_pattern_eval(vr, vr->physical.doc_root, g_array_index(arr, liPattern*, i), core_docroot_nth_cb, vr->request.uri.host, li_pattern_regex_cb, match_info);
@ -483,7 +483,7 @@ static void core_index_free(liServer *srv, gpointer param) {
UNUSED(srv);
for (i = 0; i < files->len; i++)
for (i = 0; i < files->len; i++)
li_value_free(g_array_index(files, liValue*, i));
g_array_free(files, TRUE);
@ -599,7 +599,7 @@ static liHandlerResult core_handle_static(liVRequest *vr, gpointer param, gpoint
if (CORE_OPTION(LI_CORE_OPTION_DEBUG_REQUEST_HANDLING).boolean) {
VR_DEBUG(vr, "not a regular file: '%s'", vr->physical.path->str);
}
if (fd != -1)
close(fd);
@ -979,7 +979,7 @@ static gboolean core_io_timeout(liServer *srv, liPlugin* p, liValue *val, gpoint
static gboolean core_stat_cache_ttl(liServer *srv, liPlugin* p, liValue *val, gpointer userdata) {
UNUSED(p); UNUSED(userdata);
if (!val || val->type != LI_VALUE_NUMBER || val->data.number < 1) {
if (!val || val->type != LI_VALUE_NUMBER || val->data.number < 0) {
ERROR(srv, "%s", "stat_cache.ttl expects a positive number as parameter");
return FALSE;
}

@ -178,6 +178,7 @@ liServer* li_server_new(const gchar *module_dir, gboolean module_resident) {
srv->io_timeout = 300; /* default I/O timeout */
srv->keep_alive_queue_timeout = 5;
srv->stat_cache_ttl = 10.0; /* default stat cache ttl */
#ifdef LIGHTY_OS_LINUX
/* sched_getaffinity is only available on linux */

@ -12,9 +12,13 @@ void li_stat_cache_new(liWorker *wrk, gdouble ttl) {
liStatCache *sc;
GError *err;
/* ttl default 10s */
if (ttl < 1)
if (ttl < 0) {
/* fall back to default if not sane */
ttl = 10.0;
} else if (ttl == 0) {
/* ttl means disabled stat cache */
return;
}
sc = g_slice_new0(liStatCache);
sc->ttl = ttl;
@ -43,6 +47,10 @@ void li_stat_cache_free(liStatCache *sc) {
liStatCacheEntry *sce;
liWaitQueueElem *wqe;
/* check if stat cache was enabled */
if (!sc)
return;
/* wake up thread */
sce = g_slice_new0(liStatCacheEntry);
g_async_queue_push(sc->job_queue_out, sce);
@ -296,43 +304,40 @@ static liHandlerResult stat_cache_get(liVRequest *vr, GString *path, struct stat
liStatCacheEntry *sce;
guint i;
if (NULL == vr) goto callstat;
sc = vr->wrk->stat_cache;
sce = g_hash_table_lookup(sc->entries, path);
/* force blocking call if we are not in a vrequest context or stat cache is disabled */
if (!vr || !(sc = vr->wrk->stat_cache))
async = FALSE;
if (sce) {
/* cache hit, check state */
if (g_atomic_int_get(&sce->state) == STAT_CACHE_ENTRY_WAITING) {
if (async) {
sce = NULL;
goto callstat;
}
if (async) {
sce = g_hash_table_lookup(sc->entries, path);
/* already waiting for it? */
for (i = 0; i < vr->stat_cache_entries->len; i++) {
if (g_ptr_array_index(vr->stat_cache_entries, i) == sce) {
return LI_HANDLER_WAIT_FOR_EVENT;
if (sce) {
/* cache hit, check state */
if (g_atomic_int_get(&sce->state) == STAT_CACHE_ENTRY_WAITING) {
/* already waiting for it? */
for (i = 0; i < vr->stat_cache_entries->len; i++) {
if (g_ptr_array_index(vr->stat_cache_entries, i) == sce) {
return LI_HANDLER_WAIT_FOR_EVENT;
}
}
li_stat_cache_entry_acquire(vr, sce);
return LI_HANDLER_WAIT_FOR_EVENT;
}
sc->hits++;
} else {
/* cache miss, allocate new entry */
sce = stat_cache_entry_new(path);
sce->type = STAT_CACHE_ENTRY_SINGLE;
li_stat_cache_entry_acquire(vr, sce);
li_waitqueue_push(&sc->delete_queue, &sce->queue_elem);
g_hash_table_insert(sc->entries, sce->data.path, sce);
g_async_queue_push(sc->job_queue_out, sce);
sc->misses++;
return LI_HANDLER_WAIT_FOR_EVENT;
}
sc->hits++;
} else if (async) {
/* cache miss, allocate new entry */
sce = stat_cache_entry_new(path);
sce->type = STAT_CACHE_ENTRY_SINGLE;
li_stat_cache_entry_acquire(vr, sce);
li_waitqueue_push(&sc->delete_queue, &sce->queue_elem);
g_hash_table_insert(sc->entries, sce->data.path, sce);
g_async_queue_push(sc->job_queue_out, sce);
sc->misses++;
return LI_HANDLER_WAIT_FOR_EVENT;
}
callstat:
if (fd) {
/* open + fstat */
while (-1 == (*fd = open(path->str, O_RDONLY))) {

@ -455,8 +455,6 @@ liWorker* li_worker_new(liServer *srv, struct ev_loop *loop) {
ev_async_start(wrk->loop, &wrk->job_async_queue_watcher);
ev_unref(wrk->loop); /* this watcher shouldn't keep the loop alive */
li_stat_cache_new(wrk, srv->stat_cache_ttl);
wrk->network_read_buf = g_byte_array_sized_new(0);
return wrk;
@ -558,6 +556,10 @@ void li_worker_run(liWorker *wrk) {
}
}
/* setup stat cache if necessary */
if (wrk->srv->stat_cache_ttl && !wrk->stat_cache)
li_stat_cache_new(wrk, wrk->srv->stat_cache_ttl);
ev_loop(wrk->loop, 0);
}
@ -572,7 +574,8 @@ void li_worker_stop(liWorker *context, liWorker *wrk) {
ev_async_stop(wrk->loop, &wrk->new_con_watcher);
li_waitqueue_stop(&wrk->io_timeout_queue);
li_waitqueue_stop(&wrk->throttle_queue);
li_waitqueue_stop(&wrk->stat_cache->delete_queue);
if (wrk->stat_cache)
li_waitqueue_stop(&wrk->stat_cache->delete_queue);
li_worker_new_con_cb(wrk->loop, &wrk->new_con_watcher, 0); /* handle remaining new connections */
/* close keep alive connections */
@ -656,7 +659,7 @@ void li_worker_con_put(liConnection *con) {
guint threshold;
liWorker *wrk = con->wrk;
ev_tstamp now = CUR_TS(wrk);
if (con->state == LI_CON_STATE_DEAD)
/* already disconnected */
return;

Loading…
Cancel
Save