lighttpd1.4/src/stat_cache.c

1525 lines
50 KiB
C
Raw Normal View History

#include "first.h"
#include "stat_cache.h"
#include "log.h"
2018-02-03 18:43:59 +00:00
#include "fdevent.h"
#include "http_etag.h"
#include "algo_splaytree.h"
#include <sys/types.h>
#include <sys/stat.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <unistd.h>
#include <fcntl.h>
#if defined(HAVE_SYS_XATTR_H)
# include <sys/xattr.h>
#elif defined(HAVE_ATTR_ATTRIBUTES_H)
# include <attr/attributes.h>
#endif
#ifdef HAVE_SYS_EXTATTR_H
# include <sys/extattr.h>
#endif
#ifndef HAVE_LSTAT
#define lstat stat
#ifndef S_ISLNK
#define S_ISLNK(mode) (0)
#endif
#endif
/*
* stat-cache
*
* - a splay-tree is used as we can use the caching effect of it
*/
enum {
STAT_CACHE_ENGINE_SIMPLE = 0 /*(default)*/
,STAT_CACHE_ENGINE_NONE = 1
,STAT_CACHE_ENGINE_FAM = 2 /* same as STAT_CACHE_ENGINE_INOTIFY */
,STAT_CACHE_ENGINE_INOTIFY = 2 /* same as STAT_CACHE_ENGINE_FAM */
,STAT_CACHE_ENGINE_KQUEUE = 2 /* same as STAT_CACHE_ENGINE_FAM */
};
struct stat_cache_fam; /* declaration */
typedef struct stat_cache {
2019-12-05 08:16:25 +00:00
int stat_cache_engine;
splay_tree *files; /* nodes of tree are (stat_cache_entry *) */
struct stat_cache_fam *scf;
} stat_cache;
2019-12-05 08:16:25 +00:00
static stat_cache sc;
static void * stat_cache_sptree_find(splay_tree ** const sptree,
const char * const name,
uint32_t len)
{
const int ndx = splaytree_djbhash(name, len);
*sptree = splaytree_splay(*sptree, ndx);
return (*sptree && (*sptree)->key == ndx) ? (*sptree)->data : NULL;
}
#if defined(HAVE_SYS_INOTIFY_H) \
|| (defined(HAVE_SYS_EVENT_H) && defined(HAVE_KQUEUE))
#ifndef HAVE_FAM_H
#define HAVE_FAM_H
#endif
#endif
#ifdef HAVE_FAM_H
/* monitor changes in directories using FAM
*
* This implementation employing FAM monitors directories as they are used,
* and maintains a reference count for cache use within stat_cache.c.
* A periodic job runs in lighttpd every 32 seconds, expiring entries unused
* in last 64 seconds out of the cache and cancelling FAM monitoring. Items
* within the cache are checked against the filesystem upon use if last stat()
* was greater than or equal to 16 seconds ago.
*
* This implementation does not monitor every directory in a tree, and therefore
* the cache may get out-of-sync with the filesystem. Delays in receiving and
* processing events from FAM might also lead to stale cache entries.
*
* For many websites, a large number of files are seldom, if ever, modified,
* and a common practice with images is to create a new file with a new name
* when a new version is needed, in order for client browsers and CDNs to better
* cache the content. Given this, most use will see little difference in
* performance between server.stat-cache-engine = "fam" and "simple" (default).
* The default server.stat-cache-engine = "simple" calls stat() on a target once
* per second, and reuses that information until the next second. For use where
* changes must be immediately visible, server.stat-cache-engine = "disable"
* should be used.
*
* When considering use of server.stat-cache-engine = "fam", there are a few
* additional limitations for this cache implementation using FAM.
* - symlinks to files located outside of the current directory do not result
* in changes to that file being monitored (unless that file is in a directory
* which is monitored as a result of a different request). symlinks can be
* chained and can be circular. This implementation *does not* readlink() or
* realpath() to resolve the chains to find and monitor the ultimate target
* directory. While symlinks to files located outside the current directory
* are not monitored, symlinks to directories *are* monitored, though chains
* of symlinks to directories do not result in monitoring of the directories
* containing intermediate symlinks to the target directory.
* - directory rename of a directory which is not currently being monitored will
* result in stale information in the cache if there is a subdirectory that is
* being monitored.
* Even though lighttpd will not receive FAM events in the above cases, lighttpd
* does re-validate the information in the cache upon use if the cache entry has
* not been checked in 16 seconds, so that is the upper limit for use of stale
* data.
*
* Use of server.stat-cache-engine = "fam" is discouraged for extremely volatile
* directories such as temporary directories (e.g. /tmp and maybe /var/tmp) due
* to the overhead of processing the additional noise generated from changes.
* Related, server.stat-cache-engine = "fam" is not recommended on trees of
* untrusted files where a malicious user could generate an excess of change
* events.
*
* Internal note: lighttpd walks the caches to prune trees in stat_cache when an
* event is received for a directory (or symlink to a directory) which has been
* deleted or renamed. The splaytree data structure is suboptimal for frequent
* changes of large directories trees where there have been a large number of
* different files recently accessed and part of the stat_cache.
*/
#if defined(HAVE_SYS_INOTIFY_H) \
&& !(defined(HAVE_SYS_EVENT_H) && defined(HAVE_KQUEUE))
#include <sys/inotify.h>
/*(translate FAM API to inotify; this is specific to stat_cache.c use of FAM)*/
#define fam fd /*(translate struct stat_cache_fam scf->fam -> scf->fd)*/
typedef int FAMRequest; /*(fr)*/
#define FAMClose(fd) \
close(*(fd))
#define FAMCancelMonitor(fd, wd) \
inotify_rm_watch(*(fd), *(wd))
#define fam_watch_mask ( IN_ATTRIB | IN_CREATE | IN_DELETE | IN_DELETE_SELF \
| IN_MODIFY | IN_MOVE_SELF | IN_MOVED_FROM \
| IN_EXCL_UNLINK | IN_ONLYDIR )
/*(note: follows symlinks; not providing IN_DONT_FOLLOW)*/
#define FAMMonitorDirectory(fd, fn, wd, userData) \
((*(wd) = inotify_add_watch(*(fd), (fn), (fam_watch_mask))) < 0)
typedef enum FAMCodes { /*(copied from fam.h to define arbitrary enum values)*/
FAMChanged=1,
FAMDeleted=2,
FAMCreated=5,
FAMMoved=6,
} FAMCodes;
#elif defined HAVE_SYS_EVENT_H && defined HAVE_KQUEUE
#undef HAVE_SYS_INOTIFY_H
#include <sys/event.h>
#include <sys/time.h>
/*(translate FAM API to inotify; this is specific to stat_cache.c use of FAM)*/
#define fam fd /*(translate struct stat_cache_fam scf->fam -> scf->fd)*/
typedef int FAMRequest; /*(fr)*/
#define FAMClose(fd) \
(-1 != (*(fd)) ? close(*(fd)) : 0)
static int FAMCancelMonitor (const int * const fd, int * const wd)
{
if (-1 == *fd) return 0;
if (-1 == *wd) return 0;
struct timespec t0 = { 0, 0 };
struct kevent kev;
EV_SET(&kev, *wd, EVFILT_VNODE, EV_DELETE, 0, 0, 0);
int rc = kevent(*fd, &kev, 1, NULL, 0, &t0);
close(*wd);
*wd = -1;
return rc;
}
static int FAMMonitorDirectory (int * const fd, char * const fn, int * const wd, void * const userData)
{
*wd = fdevent_open_dirname(fn, 1); /*(note: follows symlinks)*/
if (-1 == *wd) return -1;
struct timespec t0 = { 0, 0 };
struct kevent kev;
unsigned short kev_flags = EV_ADD | EV_ENABLE | EV_CLEAR;
unsigned int kev_fflags = NOTE_ATTRIB | NOTE_EXTEND | NOTE_LINK | NOTE_WRITE
| NOTE_DELETE | NOTE_REVOKE | NOTE_RENAME;
EV_SET(&kev, *wd, EVFILT_VNODE, kev_flags, kev_fflags, 0, userData);
return kevent(*fd, &kev, 1, NULL, 0, &t0);
}
typedef enum FAMCodes { /*(copied from fam.h to define arbitrary enum values)*/
FAMChanged=1,
FAMDeleted=2,
FAMCreated=5,
FAMMoved=6,
} FAMCodes;
#else
#include <fam.h>
#ifdef HAVE_FAMNOEXISTS
#ifndef LIGHTTPD_STATIC
#include <dlfcn.h>
#endif
#endif
#endif
typedef struct fam_dir_entry {
buffer *name;
int refcnt;
FAMRequest req;
time_t stat_ts;
dev_t st_dev;
ino_t st_ino;
struct fam_dir_entry *fam_parent;
} fam_dir_entry;
typedef struct stat_cache_fam {
splay_tree *dirs; /* indexed by path; node data is fam_dir_entry */
#ifdef HAVE_SYS_INOTIFY_H
splay_tree *wds; /* indexed by inotify watch descriptor */
#elif defined HAVE_SYS_EVENT_H && defined HAVE_KQUEUE
#else
FAMConnection fam;
#endif
log_error_st *errh;
fdevents *ev;
fdnode *fdn;
int fd;
} stat_cache_fam;
static fam_dir_entry * fam_dir_entry_init(const char *name, size_t len)
{
fam_dir_entry * const fam_dir = calloc(1, sizeof(*fam_dir));
force_assert(NULL != fam_dir);
fam_dir->name = buffer_init();
buffer_copy_string_len(fam_dir->name, name, len);
fam_dir->refcnt = 0;
#if defined HAVE_SYS_EVENT_H && defined HAVE_KQUEUE
fam_dir->req = -1;
#endif
return fam_dir;
}
static void fam_dir_entry_free(fam_dir_entry *fam_dir)
{
if (!fam_dir) return;
/*(fam_dir->parent might be invalid pointer here; ignore)*/
buffer_free(fam_dir->name);
#if defined HAVE_SYS_EVENT_H && defined HAVE_KQUEUE
if (-1 != fam_dir->req)
close(fam_dir->req);
#endif
free(fam_dir);
}
static void fam_dir_invalidate_node(fam_dir_entry *fam_dir)
{
fam_dir->stat_ts = 0;
if (fam_dir->fam_parent) {
--fam_dir->fam_parent->refcnt;
fam_dir->fam_parent = NULL;
}
}
/*
* walk though splay_tree and collect contents of dir tree.
* remove tagged entries in a second loop
*/
static void fam_dir_tag_refcnt(splay_tree *t, int *keys, int *ndx)
{
if (*ndx == 512) return; /*(must match num array entries in keys[])*/
if (t->left) fam_dir_tag_refcnt(t->left, keys, ndx);
if (t->right) fam_dir_tag_refcnt(t->right, keys, ndx);
if (*ndx == 512) return; /*(must match num array entries in keys[])*/
fam_dir_entry * const fam_dir = t->data;
if (0 == fam_dir->refcnt) {
fam_dir_invalidate_node(fam_dir);
keys[(*ndx)++] = t->key;
}
}
__attribute_noinline__
2019-12-05 08:16:25 +00:00
static void fam_dir_periodic_cleanup() {
stat_cache_fam * const scf = sc.scf;
int max_ndx, i;
int keys[512]; /* 2k size on stack */
#if defined HAVE_SYS_EVENT_H && defined HAVE_KQUEUE
struct kevent kevl[512]; /* 32k size on stack to batch kevent EV_DELETE */
#endif
do {
if (!scf->dirs) break;
max_ndx = 0;
fam_dir_tag_refcnt(scf->dirs, keys, &max_ndx);
for (i = 0; i < max_ndx; ++i) {
const int ndx = keys[i];
splay_tree *node = scf->dirs = splaytree_splay(scf->dirs, ndx);
if (node && node->key == ndx) {
fam_dir_entry *fam_dir = node->data;
scf->dirs = splaytree_delete(scf->dirs, ndx);
#ifdef HAVE_SYS_INOTIFY_H
scf->wds = splaytree_delete(scf->wds, fam_dir->req);
#elif defined HAVE_SYS_EVENT_H && defined HAVE_KQUEUE
/* batch process kevent removal; defer cancel */
EV_SET(kevl+i, fam_dir->req, EVFILT_VNODE, EV_DELETE, 0, 0, 0);
fam_dir->req = -1; /*(make FAMCancelMonitor() a no-op)*/
#endif
FAMCancelMonitor(&scf->fam, &fam_dir->req);
fam_dir_entry_free(fam_dir);
}
}
#if defined HAVE_SYS_EVENT_H && defined HAVE_KQUEUE
/* batch process: kevent() to submit EV_DELETE, then close dir fds */
if (0 == max_ndx) break;
struct timespec t0 = { 0, 0 };
kevent(scf->fd, kevl, max_ndx, NULL, 0, &t0);
for (i = 0; i < max_ndx; ++i)
close((int)kevl[i].ident);
#endif
} while (max_ndx == sizeof(keys)/sizeof(int));
}
static void fam_dir_invalidate_tree(splay_tree *t, const char *name, size_t len)
{
#ifdef __clang_analyzer__
force_assert(name);
#endif
/*force_assert(t);*/
if (t->left) fam_dir_invalidate_tree(t->left, name, len);
if (t->right) fam_dir_invalidate_tree(t->right, name, len);
fam_dir_entry * const fam_dir = t->data;
#ifdef __clang_analyzer__
force_assert(fam_dir);
#endif
buffer *b = fam_dir->name;
size_t blen = buffer_string_length(b);
if (blen > len && b->ptr[len] == '/' && 0 == memcmp(b->ptr, name, len))
fam_dir_invalidate_node(fam_dir);
}
/* declarations */
static void stat_cache_delete_tree(const char *name, uint32_t len);
2019-12-05 08:16:25 +00:00
static void stat_cache_invalidate_dir_tree(const char *name, size_t len);
static void stat_cache_handle_fdevent_fn(stat_cache_fam * const scf, fam_dir_entry * const fam_dir, const char * const fn, const uint32_t fnlen, int code);
2019-12-05 08:16:25 +00:00
static void stat_cache_handle_fdevent_in(stat_cache_fam *scf)
{
#ifdef HAVE_SYS_INOTIFY_H
/*(inotify pads in->len to align struct following in->name[])*/
char buf[4096]
__attribute__ ((__aligned__(__alignof__(struct inotify_event))));
int rd;
do {
rd = (int)read(scf->fd, buf, sizeof(buf));
if (rd <= 0) {
if (-1 == rd && errno != EINTR && errno != EAGAIN) {
log_perror(scf->errh, __FILE__, __LINE__, "inotify error");
/* TODO: could flush cache, close scf->fd, and re-open inotify*/
}
break;
}
for (int i = 0; i < rd; ) {
struct inotify_event * const in =
(struct inotify_event *)((uintptr_t)buf + i);
uint32_t len = in->len;
if (len > sizeof(buf)) break; /*(should not happen)*/
i += sizeof(struct inotify_event) + len;
if (i > rd) break; /*(should not happen (partial record))*/
if (in->mask & IN_CREATE)
continue; /*(see comment below for FAMCreated)*/
if (in->mask & IN_Q_OVERFLOW) {
log_error(scf->errh, __FILE__, __LINE__,
"inotify queue overflow");
continue;
}
/* ignore events which may have been pending for
* paths recently cancelled via FAMCancelMonitor() */
scf->wds = splaytree_splay(scf->wds, in->wd);
if (!scf->wds || scf->wds->key != in->wd)
continue;
fam_dir_entry *fam_dir = scf->wds->data;
if (NULL == fam_dir) /*(should not happen)*/
continue;
if (fam_dir->req != in->wd) /*(should not happen)*/
continue;
/*(specific to use here in stat_cache.c)*/
int code = 0;
if (in->mask & (IN_ATTRIB | IN_MODIFY))
code = FAMChanged;
else if (in->mask & (IN_DELETE | IN_DELETE_SELF | IN_UNMOUNT))
code = FAMDeleted;
else if (in->mask & (IN_MOVE_SELF | IN_MOVED_FROM))
code = FAMMoved;
if (len) {
do { --len; } while (len && in->name[len-1] == '\0');
}
stat_cache_handle_fdevent_fn(scf, fam_dir, in->name, len, code);
}
} while (rd + sizeof(struct inotify_event) + NAME_MAX + 1 > sizeof(buf));
#elif defined HAVE_SYS_EVENT_H && defined HAVE_KQUEUE
struct kevent kevl[256];
struct timespec t0 = { 0, 0 };
int n;
do {
n = kevent(scf->fd, NULL, 0, kevl, sizeof(kevl)/sizeof(*kevl), &t0);
if (n <= 0) break;
for (int i = 0; i < n; ++i) {
const struct kevent * const kev = kevl+i;
/* ignore events which may have been pending for
* paths recently cancelled via FAMCancelMonitor() */
int ndx = (int)(intptr_t)kev->udata;
scf->dirs = splaytree_splay(scf->dirs, ndx);
if (!scf->dirs || scf->dirs->key != ndx)
continue;
fam_dir_entry *fam_dir = scf->dirs->data;
if (fam_dir->req != (int)kev->ident)
continue;
/*(specific to use here in stat_cache.c)*/
/* note: stat_cache only monitors on directories,
* so events here are only on directories
* note: changes are treated as FAMDeleted since
* it is unknown which file in dir was changed
* This is not efficient, but this stat_cache mechanism also
* should not be used on frequently modified directories. */
int code = 0;
if (kev->fflags & (NOTE_WRITE|NOTE_ATTRIB|NOTE_EXTEND|NOTE_LINK))
code = FAMDeleted; /*(not FAMChanged; see comment above)*/
else if (kev->fflags & (NOTE_DELETE|NOTE_REVOKE))
code = FAMDeleted;
else if (kev->fflags & NOTE_RENAME)
code = FAMMoved;
if (kev->flags & EV_ERROR) /*(not expected; treat as FAMDeleted)*/
code = FAMDeleted;
stat_cache_handle_fdevent_fn(scf, fam_dir, NULL, 0, code);
}
} while (n == sizeof(kevl)/sizeof(*kevl));
#else
for (int i = 0, ndx; i || (i = FAMPending(&scf->fam)) > 0; --i) {
FAMEvent fe;
if (FAMNextEvent(&scf->fam, &fe) < 0) break;
/* ignore events which may have been pending for
* paths recently cancelled via FAMCancelMonitor() */
ndx = (int)(intptr_t)fe.userdata;
scf->dirs = splaytree_splay(scf->dirs, ndx);
if (!scf->dirs || scf->dirs->key != ndx) {
continue;
}
fam_dir_entry *fam_dir = scf->dirs->data;
if (FAMREQUEST_GETREQNUM(&fam_dir->req)
!= FAMREQUEST_GETREQNUM(&fe.fr)) {
continue;
}
uint32_t fnlen = (fe.code != FAMCreated && fe.filename[0] != '/')
? (uint32_t)strlen(fe.filename)
: 0;
stat_cache_handle_fdevent_fn(scf, fam_dir, fe.filename, fnlen, fe.code);
}
#endif
}
static void stat_cache_handle_fdevent_fn(stat_cache_fam * const scf, fam_dir_entry *fam_dir, const char * const fn, const uint32_t fnlen, int code)
{
if (fnlen) {
buffer * const n = fam_dir->name;
fam_dir_entry *fam_link;
uint32_t len;
switch (code) {
case FAMCreated:
/* file created in monitored dir modifies dir and
* we should get a separate FAMChanged event for dir.
* Therefore, ignore file FAMCreated event here.
* Also, if FAMNoExists() is used, might get spurious
* FAMCreated events as changes are made e.g. in monitored
* sub-sub-sub dirs and the library discovers new (already
* existing) dir entries */
return;
case FAMChanged:
/* file changed in monitored dir does not modify dir */
case FAMDeleted:
case FAMMoved:
/* file deleted or moved in monitored dir modifies dir,
* but FAM provides separate notification for that */
/* temporarily append filename to dir in fam_dir->name to
* construct path, then delete stat_cache entry (if any)*/
len = buffer_string_length(n);
buffer_append_path_len(n, fn, fnlen);
/* (alternatively, could chose to stat() and update)*/
2019-12-05 08:16:25 +00:00
stat_cache_invalidate_entry(CONST_BUF_LEN(n));
fam_link = /*(check if might be symlink to monitored dir)*/
stat_cache_sptree_find(&scf->dirs, CONST_BUF_LEN(n));
if (fam_link && !buffer_is_equal(fam_link->name, n))
fam_link = NULL;
buffer_string_set_length(n, len);
if (fam_link) {
/* replaced symlink changes containing dir */
2019-12-05 08:16:25 +00:00
stat_cache_invalidate_entry(CONST_BUF_LEN(n));
/* handle symlink to dir as deleted dir below */
code = FAMDeleted;
fam_dir = fam_link;
break;
}
return;
default:
return;
}
}
switch(code) {
case FAMChanged:
2019-12-05 08:16:25 +00:00
stat_cache_invalidate_entry(CONST_BUF_LEN(fam_dir->name));
break;
case FAMDeleted:
case FAMMoved:
2019-12-05 08:16:25 +00:00
stat_cache_delete_tree(CONST_BUF_LEN(fam_dir->name));
fam_dir_invalidate_node(fam_dir);
if (scf->dirs)
fam_dir_invalidate_tree(scf->dirs,CONST_BUF_LEN(fam_dir->name));
2019-12-05 08:16:25 +00:00
fam_dir_periodic_cleanup();
break;
default:
break;
}
}
static handler_t stat_cache_handle_fdevent(void *ctx, int revent)
{
stat_cache_fam * const scf = ctx; /* sc.scf */
if (revent & FDEVENT_IN) {
2019-12-05 08:16:25 +00:00
stat_cache_handle_fdevent_in(scf);
}
if (revent & (FDEVENT_HUP|FDEVENT_RDHUP)) {
/* fam closed the connection */
log_error(scf->errh, __FILE__, __LINE__,
"FAM connection closed; disabling stat_cache.");
/* (although effectively STAT_CACHE_ENGINE_NONE,
* do not change here so that periodic jobs clean up memory)*/
2019-12-05 08:16:25 +00:00
/*sc.stat_cache_engine = STAT_CACHE_ENGINE_NONE; */
fdevent_fdnode_event_del(scf->ev, scf->fdn);
fdevent_unregister(scf->ev, scf->fd);
scf->fdn = NULL;
FAMClose(&scf->fam);
scf->fd = -1;
}
return HANDLER_GO_ON;
}
static stat_cache_fam * stat_cache_init_fam(fdevents *ev, log_error_st *errh) {
stat_cache_fam *scf = calloc(1, sizeof(*scf));
force_assert(scf);
scf->fd = -1;
scf->ev = ev;
scf->errh = errh;
#ifdef HAVE_SYS_INOTIFY_H
scf->fd = inotify_init1(IN_NONBLOCK|IN_CLOEXEC);
if (scf->fd < 0) {
log_perror(errh, __FILE__, __LINE__, "inotify_init1()");
free(scf);
return NULL;
}
#elif defined HAVE_SYS_EVENT_H && defined HAVE_KQUEUE
#ifdef __NetBSD__
scf->fd = kqueue1(O_NONBLOCK|O_CLOEXEC|O_NOSIGPIPE);
#else
scf->fd = kqueue();
if (scf->fd >= 0) fdevent_setfd_cloexec(scf->fd);
#endif
if (scf->fd < 0) {
log_perror(errh, __FILE__, __LINE__, "kqueue()");
free(scf);
return NULL;
}
#else
/* setup FAM */
if (0 != FAMOpen2(&scf->fam, "lighttpd")) {
log_error(errh, __FILE__, __LINE__,
"could not open a fam connection, dying.");
free(scf);
return NULL;
}
#ifdef HAVE_FAMNOEXISTS
#ifdef LIGHTTPD_STATIC
FAMNoExists(&scf->fam);
#else
int (*FAMNoExists_fn)(FAMConnection *);
FAMNoExists_fn =
(int (*)(FAMConnection *))(intptr_t)dlsym(RTLD_DEFAULT,"FAMNoExists");
if (FAMNoExists_fn) FAMNoExists_fn(&scf->fam);
#endif
#endif
scf->fd = FAMCONNECTION_GETFD(&scf->fam);
fdevent_setfd_cloexec(scf->fd);
#endif
scf->fdn = fdevent_register(scf->ev, scf->fd, stat_cache_handle_fdevent, scf);
fdevent_fdnode_event_set(scf->ev, scf->fdn, FDEVENT_IN | FDEVENT_RDHUP);
return scf;
}
static void stat_cache_free_fam(stat_cache_fam *scf) {
if (NULL == scf) return;
#ifdef HAVE_SYS_INOTIFY_H
while (scf->wds) {
splay_tree *node = scf->wds;
scf->wds = splaytree_delete(scf->wds, node->key);
}
#elif defined HAVE_SYS_EVENT_H && defined HAVE_KQUEUE
/*(quicker cleanup to close kqueue() before cancel per entry)*/
close(scf->fd);
scf->fd = -1;
#endif
while (scf->dirs) {
/*(skip entry invalidation and FAMCancelMonitor())*/
splay_tree *node = scf->dirs;
fam_dir_entry_free((fam_dir_entry *)node->data);
scf->dirs = splaytree_delete(scf->dirs, node->key);
}
if (-1 != scf->fd) {
/*scf->fdn already cleaned up in fdevent_free()*/
FAMClose(&scf->fam);
/*scf->fd = -1;*/
}
free(scf);
}
static fam_dir_entry * fam_dir_monitor(stat_cache_fam *scf, char *fn, uint32_t dirlen, struct stat *st)
{
if (NULL == scf->fdn) return NULL; /* FAM connection closed; do nothing */
const int fn_is_dir = S_ISDIR(st->st_mode);
/*force_assert(0 != dirlen);*/
/*force_assert(fn[0] == '/');*/
/* consistency: ensure fn does not end in '/' unless root "/"
* FAM events will not end in '/', so easier to match this way */
if (fn[dirlen-1] == '/') --dirlen;
if (0 == dirlen) dirlen = 1; /* root dir ("/") */
/* Note: paths are expected to be normalized before calling stat_cache,
* e.g. without repeated '/' */
if (!fn_is_dir) {
while (fn[--dirlen] != '/') ;
if (0 == dirlen) dirlen = 1; /*(should not happen for file)*/
}
int dir_ndx = splaytree_djbhash(fn, dirlen);
fam_dir_entry *fam_dir = NULL;
scf->dirs = splaytree_splay(scf->dirs, dir_ndx);
if (NULL != scf->dirs && scf->dirs->key == dir_ndx) {
fam_dir = scf->dirs->data;
if (!buffer_is_equal_string(fam_dir->name, fn, dirlen)) {
/* hash collision; preserve existing
* do not monitor new to avoid cache thrashing */
return NULL;
}
/* directory already registered */
}
const time_t cur_ts = log_epoch_secs;
struct stat lst;
int ck_dir = fn_is_dir;
if (!fn_is_dir && (NULL==fam_dir || cur_ts - fam_dir->stat_ts >= 16)) {
ck_dir = 1;
/*(temporarily modify fn)*/
fn[dirlen] = '\0';
if (0 != lstat(fn, &lst)) {
fn[dirlen] = '/';
return NULL;
}
if (!S_ISLNK(lst.st_mode)) {
st = &lst;
}
else if (0 != stat(fn, st)) { /*st passed in now is stat() of dir*/
fn[dirlen] = '/';
return NULL;
}
fn[dirlen] = '/';
}
int ck_lnk = (NULL == fam_dir);
if (ck_dir && NULL != fam_dir) {