Browse Source

experimental, and likely broken, inotify support

master
Marc Alexander Lehmann 14 years ago
parent
commit
4e6b0502d5
  1. 206
      ev.c
  2. 12
      ev.h
  3. 18
      ev.pod
  4. 8
      ev_epoll.c
  5. 10
      ev_kqueue.c
  6. 6
      ev_poll.c
  7. 8
      ev_port.c
  8. 4
      ev_select.c
  9. 10
      ev_vars.h
  10. 3
      ev_wrap.h
  11. 8
      libev.m4

206
ev.c

@ -96,6 +96,14 @@ extern "C" {
# endif
# endif
# ifndef EV_USE_INOTIFY
# if HAVE_INOTIFY_INIT && HAVE_SYS_INOTIFY_H
# define EV_USE_INOTIFY 1
# else
# define EV_USE_INOTIFY 0
# endif
# endif
#endif
#include <math.h>
@ -112,6 +120,12 @@ extern "C" {
#include <signal.h>
#ifdef EV_H
# include EV_H
#else
# include "ev.h"
#endif
#ifndef _WIN32
# include <sys/time.h>
# include <sys/wait.h>
@ -158,6 +172,10 @@ extern "C" {
# define EV_USE_PORT 0
#endif
#ifndef EV_USE_INOTIFY
# define EV_USE_INOTIFY 0
#endif
#ifndef EV_PID_HASHSIZE
# if EV_MINIMAL
# define EV_PID_HASHSIZE 1
@ -166,6 +184,14 @@ extern "C" {
# endif
#endif
#ifndef EV_INOTIFY_HASHSIZE
# if EV_MINIMAL
# define EV_INOTIFY_HASHSIZE 1
# else
# define EV_INOTIFY_HASHSIZE 16
# endif
#endif
/**/
#ifndef CLOCK_MONOTONIC
@ -182,18 +208,20 @@ extern "C" {
# include <winsock.h>
#endif
#if !EV_STAT_ENABLE
# define EV_USE_INOTIFY 0
#endif
#if EV_USE_INOTIFY
# include <sys/inotify.h>
#endif
/**/
#define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */
#define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */
/*#define CLEANUP_INTERVAL (MAX_BLOCKTIME * 5.) /* how often to try to free memory and re-check fds */
#ifdef EV_H
# include EV_H
#else
# include "ev.h"
#endif
#if __GNUC__ >= 3
# define expect(expr,value) __builtin_expect ((expr),(value))
# define inline_size static inline /* inline for codesize */
@ -298,6 +326,13 @@ typedef struct
int events;
} ANPENDING;
typedef struct
{
#if EV_USE_INOTIFY
WL head;
#endif
} ANFS;
#if EV_MULTIPLICITY
struct ev_loop
@ -881,6 +916,11 @@ loop_init (EV_P_ unsigned int flags)
flags |= ev_recommended_backends ();
backend = 0;
backend_fd = -1;
#if EV_USE_INOTIFY
fs_fd = -2;
#endif
#if EV_USE_PORT
if (!backend && (flags & EVBACKEND_PORT )) backend = port_init (EV_A_ flags);
#endif
@ -907,6 +947,14 @@ loop_destroy (EV_P)
{
int i;
#if EV_USE_INOTIFY
if (fs_fd >= 0)
close (fs_fd);
#endif
if (backend_fd >= 0)
close (backend_fd);
#if EV_USE_PORT
if (backend == EVBACKEND_PORT ) port_destroy (EV_A);
#endif
@ -1667,6 +1715,127 @@ ev_child_stop (EV_P_ ev_child *w)
#define DEF_STAT_INTERVAL 5.0074891
#define MIN_STAT_INTERVAL 0.1074891
void noinline stat_timer_cb (EV_P_ ev_timer *w_, int revents);
#if EV_USE_INOTIFY
# define EV_INOTIFY_BUFSIZE ((PATH_MAX + sizeof (struct inotify_event)) + 2048)
static void noinline
infy_add (EV_P_ ev_stat *w)
{
w->wd = inotify_add_watch (fs_fd, w->path, IN_ATTRIB | IN_DELETE_SELF | IN_MOVE_SELF | IN_MODIFY | IN_DONT_FOLLOW | IN_MASK_ADD);
if (w->wd < 0)
{
ev_timer_start (EV_A_ &w->timer); /* this is not race-free, so we still need to recheck periodically */
/* monitor some parent directory for speedup hints */
if (errno == ENOENT || errno == EACCES)
{
char path [PATH_MAX];
strcpy (path, w->path);
do
{
int mask = IN_MASK_ADD | IN_DELETE_SELF | IN_MOVE_SELF
| (errno == EACCES ? IN_ATTRIB : IN_CREATE | IN_MOVED_TO);
char *pend = strrchr (path, '/');
if (!pend)
break; /* whoops, no '/', complain to your admin */
*pend = 0;
w->wd = inotify_add_watch (fs_fd, path, IN_DELETE_SELF | IN_CREATE | IN_MOVED_TO | IN_MASK_ADD);
}
while (w->wd < 0 && (errno == ENOENT || errno == EACCES));
}
}
else
ev_timer_stop (EV_A_ &w->timer); /* we can watch this in a race-free way */
if (w->wd >= 0)
wlist_add (&fs_hash [w->wd & (EV_INOTIFY_HASHSIZE - 1)].head, (WL)w);
}
static void noinline
infy_del (EV_P_ ev_stat *w)
{
WL w_;
int slot;
int wd = w->wd;
if (wd < 0)
return;
w->wd = -2;
slot = wd & (EV_INOTIFY_HASHSIZE - 1);
wlist_del (&fs_hash [slot].head, (WL)w);
/* remove this watcher, if others are watching it, they will rearm */
inotify_rm_watch (fs_fd, wd);
}
static void noinline
infy_wd (EV_P_ int slot, int wd, struct inotify_event *ev)
{
if (slot < 0)
/* overflow, need to check for all hahs slots */
for (slot = 0; slot < EV_INOTIFY_HASHSIZE; ++slot)
infy_wd (EV_A_ slot, wd, ev);
else
{
WL w_;
for (w_ = fs_hash [slot & (EV_INOTIFY_HASHSIZE - 1)].head; w_; )
{
ev_stat *w = (ev_stat *)w_;
w_ = w_->next; /* lets us remove this watcher and all before it */
if (w->wd == wd || wd == -1)
{
if (ev->mask & (IN_IGNORED | IN_UNMOUNT | IN_DELETE_SELF))
{
w->wd = -1;
infy_add (EV_A_ w); /* re-add, no matter what */
}
stat_timer_cb (EV_P_ &w->timer, 0);
}
}
}
}
static void
infy_cb (EV_P_ ev_io *w, int revents)
{
char buf [EV_INOTIFY_BUFSIZE];
struct inotify_event *ev = (struct inotify_event *)buf;
int ofs;
int len = read (fs_fd, buf, sizeof (buf));
for (ofs = 0; ofs < len; ofs += sizeof (struct inotify_event) + ev->len)
infy_wd (EV_A_ ev->wd, ev->wd, ev);
}
void inline_size
infy_init (EV_P)
{
if (fs_fd != -2)
return;
fs_fd = inotify_init ();
if (fs_fd >= 0)
{
ev_io_init (&fs_w, infy_cb, fs_fd, EV_READ);
ev_set_priority (&fs_w, EV_MAXPRI);
ev_io_start (EV_A_ &fs_w);
}
}
#endif
void
ev_stat_stat (EV_P_ ev_stat *w)
{
@ -1676,7 +1845,7 @@ ev_stat_stat (EV_P_ ev_stat *w)
w->attr.st_nlink = 1;
}
static void
void noinline
stat_timer_cb (EV_P_ ev_timer *w_, int revents)
{
ev_stat *w = (ev_stat *)(((char *)w_) - offsetof (ev_stat, timer));
@ -1687,7 +1856,15 @@ stat_timer_cb (EV_P_ ev_timer *w_, int revents)
ev_stat_stat (EV_A_ w);
if (memcmp (&w->prev, &w->attr, sizeof (ev_statdata)))
ev_feed_event (EV_A_ w, EV_STAT);
{
#if EV_USE_INOTIFY
infy_del (EV_A_ w);
infy_add (EV_A_ w);
ev_stat_stat (EV_A_ w); /* avoid race... */
#endif
ev_feed_event (EV_A_ w, EV_STAT);
}
}
void
@ -1707,7 +1884,15 @@ ev_stat_start (EV_P_ ev_stat *w)
ev_timer_init (&w->timer, stat_timer_cb, w->interval, w->interval);
ev_set_priority (&w->timer, ev_priority (w));
ev_timer_start (EV_A_ &w->timer);
#if EV_USE_INOTIFY
infy_init (EV_A);
if (fs_fd >= 0)
infy_add (EV_A_ w);
else
#endif
ev_timer_start (EV_A_ &w->timer);
ev_start (EV_A_ (W)w, 1);
}
@ -1719,6 +1904,9 @@ ev_stat_stop (EV_P_ ev_stat *w)
if (expect_false (!ev_is_active (w)))
return;
#if EV_USE_INOTIFY
infy_del (EV_A_ w);
#endif
ev_timer_stop (EV_A_ &w->timer);
ev_stop (EV_A_ (W)w);

12
ev.h

@ -220,23 +220,25 @@ typedef struct ev_child
#if EV_STAT_ENABLE
/* st_nlink = 0 means missing file or other error */
#ifdef _WIN32
# ifdef _WIN32
typedef struct _stati64 ev_statdata;
#else
# else
typedef struct stat ev_statdata;
#endif
# endif
/* invoked each time the stat data changes for a given path */
/* revent EV_STAT */
typedef struct ev_stat
{
EV_WATCHER (ev_stat)
EV_WATCHER_LIST (ev_stat)
ev_timer timer; /* private */
ev_tstamp interval; /* ro */
const char *path; /* ro */
ev_statdata prev; /* ro */
ev_statdata attr; /* ro */
int wd; /* wd for inotify, fd for kqueue */
} ev_stat;
#endif
@ -426,7 +428,7 @@ void ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revent
#define ev_periodic_set(ev,at_,ival_,res_) do { (ev)->at = (at_); (ev)->interval = (ival_); (ev)->reschedule_cb= (res_); } while (0)
#define ev_signal_set(ev,signum_) do { (ev)->signum = (signum_); } while (0)
#define ev_child_set(ev,pid_) do { (ev)->pid = (pid_); } while (0)
#define ev_stat_set(ev,path_,interval_) do { (ev)->path = (path_); (ev)->interval = (interval_); } while (0)
#define ev_stat_set(ev,path_,interval_) do { (ev)->path = (path_); (ev)->interval = (interval_); (ev)->wd = -2; } while (0)
#define ev_idle_set(ev) /* nop, yes, this is a serious in-joke */
#define ev_prepare_set(ev) /* nop, yes, this is a serious in-joke */
#define ev_check_set(ev) /* nop, yes, this is a serious in-joke */

18
ev.pod

@ -2016,6 +2016,12 @@ backend for Solaris 10 systems.
reserved for future expansion, works like the USE symbols above.
=item EV_USE_INOTIFY
If defined to be C<1>, libev will compile in support for the Linux inotify
interface to speed up C<ev_stat> watchers. Its actual availability will
be detected at runtime.
=item EV_H
The name of the F<ev.h> header file used to include it. The default if
@ -2080,7 +2086,15 @@ some inlining decisions, saves roughly 30% codesize of amd64.
C<ev_child> watchers use a small hash table to distribute workload by
pid. The default size is C<16> (or C<1> with C<EV_MINIMAL>), usually more
than enough. If you need to manage thousands of children you might want to
increase this value.
increase this value (I<must> be a power of two).
=item EV_INOTIFY_HASHSIZE
C<ev_staz> watchers use a small hash table to distribute workload by
inotify watch id. The default size is C<16> (or C<1> with C<EV_MINIMAL>),
usually more than enough. If you need to manage thousands of C<ev_stat>
watchers you might want to increase this value (I<must> be a power of
two).
=item EV_COMMON
@ -2150,7 +2164,7 @@ documentation for C<ev_default_init>.
=item Stopping check/prepare/idle watchers: O(1)
=item Stopping an io/signal/child watcher: O(number_of_watchers_for_this_(fd/signal/pid % 16))
=item Stopping an io/signal/child watcher: O(number_of_watchers_for_this_(fd/signal/pid % EV_PID_HASHSIZE))
=item Finding the next timer per loop iteration: O(1)

8
ev_epoll.c

@ -79,7 +79,7 @@ epoll_poll (EV_P_ ev_tstamp timeout)
}
}
static int
int inline_size
epoll_init (EV_P_ int flags)
{
backend_fd = epoll_create (256);
@ -99,15 +99,13 @@ epoll_init (EV_P_ int flags)
return EVBACKEND_EPOLL;
}
static void
void inline_size
epoll_destroy (EV_P)
{
close (backend_fd);
ev_free (epoll_events);
}
static void
void inline_size
epoll_fork (EV_P)
{
close (backend_fd);

10
ev_kqueue.c

@ -36,7 +36,7 @@
#include <string.h>
#include <errno.h>
static void
void inline_speed
kqueue_change (EV_P_ int fd, int filter, int flags, int fflags)
{
struct kevent *ke;
@ -142,7 +142,7 @@ kqueue_poll (EV_P_ ev_tstamp timeout)
}
}
static int
int inline_size
kqueue_init (EV_P_ int flags)
{
struct kevent ch, ev;
@ -168,16 +168,14 @@ kqueue_init (EV_P_ int flags)
return EVBACKEND_KQUEUE;
}
static void
void inline_size
kqueue_destroy (EV_P)
{
close (backend_fd);
ev_free (kqueue_events);
ev_free (kqueue_changes);
}
static void
void inline_size
kqueue_fork (EV_P)
{
close (backend_fd);

6
ev_poll.c

@ -1,5 +1,5 @@
/*
* libev epoll fd activity backend
* libev poll fd activity backend
*
* Copyright (c) 2007 Marc Alexander Lehmann <libev@schmorp.de>
* All rights reserved.
@ -105,7 +105,7 @@ poll_poll (EV_P_ ev_tstamp timeout)
);
}
static int
int inline_size
poll_init (EV_P_ int flags)
{
backend_fudge = 1e-3; /* needed to compensate for select returning early, very conservative */
@ -118,7 +118,7 @@ poll_init (EV_P_ int flags)
return EVBACKEND_POLL;
}
static void
void inline_size
poll_destroy (EV_P)
{
ev_free (pollidxs);

8
ev_port.c

@ -108,7 +108,7 @@ port_poll (EV_P_ ev_tstamp timeout)
}
}
static int
int inline_size
port_init (EV_P_ int flags)
{
/* Initalize the kernel queue */
@ -127,15 +127,13 @@ port_init (EV_P_ int flags)
return EVBACKEND_PORT;
}
static void
void inline_size
port_destroy (EV_P)
{
close (backend_fd);
ev_free (port_events);
}
static void
void inline_size
port_fork (EV_P)
{
close (backend_fd);

4
ev_select.c

@ -200,7 +200,7 @@ select_poll (EV_P_ ev_tstamp timeout)
#endif
}
static int
int inline_size
select_init (EV_P_ int flags)
{
backend_fudge = 1e-2; /* needed to compensate for select returning early, very conservative */
@ -224,7 +224,7 @@ select_init (EV_P_ int flags)
return EVBACKEND_SELECT;
}
static void
void inline_size
select_destroy (EV_P)
{
ev_free (vec_ri);

10
ev_vars.h

@ -2,7 +2,7 @@
VARx(ev_tstamp, now_floor) /* last time we refreshed rt_time */
VARx(ev_tstamp, mn_now) /* monotonic clock "now" */
VARx(ev_tstamp, rtmn_diff) /* difference realtime - monotonic time */
VARx(ev_tstamp, rtmn_diff) /* difference realtime - monotonic time */
VARx(int, backend)
VARx(ev_tstamp, backend_fudge) /* assumed typical timer resolution */
@ -11,7 +11,7 @@ VAR (backend_poll , void (*backend_poll)(EV_P_ ev_tstamp timeout))
VARx(int, backend_fd)
VARx(int, postfork) /* true if we need to recreate kernel state after fork */
VARx(int, activecnt) /* number of active events */
VARx(int, activecnt) /* total number of active events ("refcount") */
#if EV_USE_SELECT || EV_GENWRAP
VARx(void *, vec_ri)
@ -86,5 +86,11 @@ VARx(int, forkmax)
VARx(int, forkcnt)
#endif
#if EV_USE_INOTIFY || EV_GENWRAP
VARx(int, fs_fd)
VARx(ev_io, fs_w);
VAR (fs_hash, ANFS fs_hash [EV_INOTIFY_HASHSIZE])
#endif
#undef VARx

3
ev_wrap.h

@ -54,3 +54,6 @@
#define forks ((loop)->forks)
#define forkmax ((loop)->forkmax)
#define forkcnt ((loop)->forkcnt)
#define fs_fd ((loop)->fs_fd)
#define fs_w ((loop)->fs_w);
#define fs_hash ((loop)->fs_hash)

8
libev.m4

@ -2,14 +2,14 @@ dnl this file is part of libev, do not make local modifications
dnl http://software.schmorp.de/pkg/libev
dnl libev support
AC_CHECK_HEADERS(sys/epoll.h sys/event.h sys/queue.h port.h poll.h sys/select.h)
AC_CHECK_HEADERS(sys/inotify.h sys/epoll.h sys/event.h sys/queue.h port.h poll.h sys/select.h)
AC_CHECK_FUNCS(epoll_ctl kqueue port_create poll select)
AC_CHECK_FUNCS(inotify_init epoll_ctl kqueue port_create poll select)
AC_CHECK_FUNC(clock_gettime, [], [
if test -z "$LIBEV_M4_AVOID_LIBRT"; then
AC_CHECK_LIB(rt, clock_gettime)
AC_CHECK_FUNCS(clock_gettime)
AC_CHECK_LIB(rt, clock_gettime)
AC_CHECK_FUNCS(clock_gettime)
fi
])

Loading…
Cancel
Save