Mirror of :pserver:anonymous@cvs.schmorp.de/schmorpforge libev http://software.schmorp.de/pkg/libev.html
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2237 lines
46 KiB

14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
14 years ago
  1. /*
  2. * libev event processing core, watcher management
  3. *
  4. * Copyright (c) 2007 Marc Alexander Lehmann <libev@schmorp.de>
  5. * All rights reserved.
  6. *
  7. * Redistribution and use in source and binary forms, with or without
  8. * modification, are permitted provided that the following conditions are
  9. * met:
  10. *
  11. * * Redistributions of source code must retain the above copyright
  12. * notice, this list of conditions and the following disclaimer.
  13. *
  14. * * Redistributions in binary form must reproduce the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer in the documentation and/or other materials provided
  17. * with the distribution.
  18. *
  19. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  20. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  21. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  22. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  23. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  24. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  25. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  26. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  27. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  28. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  29. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  30. */
  31. #ifdef __cplusplus
  32. extern "C" {
  33. #endif
  34. #ifndef EV_STANDALONE
  35. # ifdef EV_CONFIG_H
  36. # include EV_CONFIG_H
  37. # else
  38. # include "config.h"
  39. # endif
  40. # if HAVE_CLOCK_GETTIME
  41. # ifndef EV_USE_MONOTONIC
  42. # define EV_USE_MONOTONIC 1
  43. # endif
  44. # ifndef EV_USE_REALTIME
  45. # define EV_USE_REALTIME 1
  46. # endif
  47. # else
  48. # ifndef EV_USE_MONOTONIC
  49. # define EV_USE_MONOTONIC 0
  50. # endif
  51. # ifndef EV_USE_REALTIME
  52. # define EV_USE_REALTIME 0
  53. # endif
  54. # endif
  55. # ifndef EV_USE_SELECT
  56. # if HAVE_SELECT && HAVE_SYS_SELECT_H
  57. # define EV_USE_SELECT 1
  58. # else
  59. # define EV_USE_SELECT 0
  60. # endif
  61. # endif
  62. # ifndef EV_USE_POLL
  63. # if HAVE_POLL && HAVE_POLL_H
  64. # define EV_USE_POLL 1
  65. # else
  66. # define EV_USE_POLL 0
  67. # endif
  68. # endif
  69. # ifndef EV_USE_EPOLL
  70. # if HAVE_EPOLL_CTL && HAVE_SYS_EPOLL_H
  71. # define EV_USE_EPOLL 1
  72. # else
  73. # define EV_USE_EPOLL 0
  74. # endif
  75. # endif
  76. # ifndef EV_USE_KQUEUE
  77. # if HAVE_KQUEUE && HAVE_SYS_EVENT_H && HAVE_SYS_QUEUE_H
  78. # define EV_USE_KQUEUE 1
  79. # else
  80. # define EV_USE_KQUEUE 0
  81. # endif
  82. # endif
  83. # ifndef EV_USE_PORT
  84. # if HAVE_PORT_H && HAVE_PORT_CREATE
  85. # define EV_USE_PORT 1
  86. # else
  87. # define EV_USE_PORT 0
  88. # endif
  89. # endif
  90. # ifndef EV_USE_INOTIFY
  91. # if HAVE_INOTIFY_INIT && HAVE_SYS_INOTIFY_H
  92. # define EV_USE_INOTIFY 1
  93. # else
  94. # define EV_USE_INOTIFY 0
  95. # endif
  96. # endif
  97. #endif
  98. #include <math.h>
  99. #include <stdlib.h>
  100. #include <fcntl.h>
  101. #include <stddef.h>
  102. #include <stdio.h>
  103. #include <assert.h>
  104. #include <errno.h>
  105. #include <sys/types.h>
  106. #include <time.h>
  107. #include <signal.h>
  108. #ifdef EV_H
  109. # include EV_H
  110. #else
  111. # include "ev.h"
  112. #endif
  113. #ifndef _WIN32
  114. # include <sys/time.h>
  115. # include <sys/wait.h>
  116. # include <unistd.h>
  117. #else
  118. # define WIN32_LEAN_AND_MEAN
  119. # include <windows.h>
  120. # ifndef EV_SELECT_IS_WINSOCKET
  121. # define EV_SELECT_IS_WINSOCKET 1
  122. # endif
  123. #endif
  124. /**/
  125. #ifndef EV_USE_MONOTONIC
  126. # define EV_USE_MONOTONIC 0
  127. #endif
  128. #ifndef EV_USE_REALTIME
  129. # define EV_USE_REALTIME 0
  130. #endif
  131. #ifndef EV_USE_SELECT
  132. # define EV_USE_SELECT 1
  133. #endif
  134. #ifndef EV_USE_POLL
  135. # ifdef _WIN32
  136. # define EV_USE_POLL 0
  137. # else
  138. # define EV_USE_POLL 1
  139. # endif
  140. #endif
  141. #ifndef EV_USE_EPOLL
  142. # define EV_USE_EPOLL 0
  143. #endif
  144. #ifndef EV_USE_KQUEUE
  145. # define EV_USE_KQUEUE 0
  146. #endif
  147. #ifndef EV_USE_PORT
  148. # define EV_USE_PORT 0
  149. #endif
  150. #ifndef EV_USE_INOTIFY
  151. # define EV_USE_INOTIFY 0
  152. #endif
  153. #ifndef EV_PID_HASHSIZE
  154. # if EV_MINIMAL
  155. # define EV_PID_HASHSIZE 1
  156. # else
  157. # define EV_PID_HASHSIZE 16
  158. # endif
  159. #endif
  160. #ifndef EV_INOTIFY_HASHSIZE
  161. # if EV_MINIMAL
  162. # define EV_INOTIFY_HASHSIZE 1
  163. # else
  164. # define EV_INOTIFY_HASHSIZE 16
  165. # endif
  166. #endif
  167. /**/
  168. #ifndef CLOCK_MONOTONIC
  169. # undef EV_USE_MONOTONIC
  170. # define EV_USE_MONOTONIC 0
  171. #endif
  172. #ifndef CLOCK_REALTIME
  173. # undef EV_USE_REALTIME
  174. # define EV_USE_REALTIME 0
  175. #endif
  176. #if EV_SELECT_IS_WINSOCKET
  177. # include <winsock.h>
  178. #endif
  179. #if !EV_STAT_ENABLE
  180. # define EV_USE_INOTIFY 0
  181. #endif
  182. #if EV_USE_INOTIFY
  183. # include <sys/inotify.h>
  184. #endif
  185. /**/
  186. #define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */
  187. #define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */
  188. /*#define CLEANUP_INTERVAL (MAX_BLOCKTIME * 5.) /* how often to try to free memory and re-check fds */
  189. #if __GNUC__ >= 3
  190. # define expect(expr,value) __builtin_expect ((expr),(value))
  191. # define inline_size static inline /* inline for codesize */
  192. # if EV_MINIMAL
  193. # define noinline __attribute__ ((noinline))
  194. # define inline_speed static noinline
  195. # else
  196. # define noinline
  197. # define inline_speed static inline
  198. # endif
  199. #else
  200. # define expect(expr,value) (expr)
  201. # define inline_speed static
  202. # define inline_size static
  203. # define noinline
  204. #endif
  205. #define expect_false(expr) expect ((expr) != 0, 0)
  206. #define expect_true(expr) expect ((expr) != 0, 1)
  207. #define NUMPRI (EV_MAXPRI - EV_MINPRI + 1)
  208. #define ABSPRI(w) ((w)->priority - EV_MINPRI)
  209. #define EMPTY0 /* required for microsofts broken pseudo-c compiler */
  210. #define EMPTY2(a,b) /* used to suppress some warnings */
  211. typedef ev_watcher *W;
  212. typedef ev_watcher_list *WL;
  213. typedef ev_watcher_time *WT;
  214. static int have_monotonic; /* did clock_gettime (CLOCK_MONOTONIC) work? */
  215. #ifdef _WIN32
  216. # include "ev_win32.c"
  217. #endif
  218. /*****************************************************************************/
  219. static void (*syserr_cb)(const char *msg);
  220. void
  221. ev_set_syserr_cb (void (*cb)(const char *msg))
  222. {
  223. syserr_cb = cb;
  224. }
  225. static void noinline
  226. syserr (const char *msg)
  227. {
  228. if (!msg)
  229. msg = "(libev) system error";
  230. if (syserr_cb)
  231. syserr_cb (msg);
  232. else
  233. {
  234. perror (msg);
  235. abort ();
  236. }
  237. }
  238. static void *(*alloc)(void *ptr, long size);
  239. void
  240. ev_set_allocator (void *(*cb)(void *ptr, long size))
  241. {
  242. alloc = cb;
  243. }
  244. inline_speed void *
  245. ev_realloc (void *ptr, long size)
  246. {
  247. ptr = alloc ? alloc (ptr, size) : realloc (ptr, size);
  248. if (!ptr && size)
  249. {
  250. fprintf (stderr, "libev: cannot allocate %ld bytes, aborting.", size);
  251. abort ();
  252. }
  253. return ptr;
  254. }
  255. #define ev_malloc(size) ev_realloc (0, (size))
  256. #define ev_free(ptr) ev_realloc ((ptr), 0)
  257. /*****************************************************************************/
  258. typedef struct
  259. {
  260. WL head;
  261. unsigned char events;
  262. unsigned char reify;
  263. #if EV_SELECT_IS_WINSOCKET
  264. SOCKET handle;
  265. #endif
  266. } ANFD;
  267. typedef struct
  268. {
  269. W w;
  270. int events;
  271. } ANPENDING;
  272. #if EV_USE_INOTIFY
  273. typedef struct
  274. {
  275. WL head;
  276. } ANFS;
  277. #endif
  278. #if EV_MULTIPLICITY
  279. struct ev_loop
  280. {
  281. ev_tstamp ev_rt_now;
  282. #define ev_rt_now ((loop)->ev_rt_now)
  283. #define VAR(name,decl) decl;
  284. #include "ev_vars.h"
  285. #undef VAR
  286. };
  287. #include "ev_wrap.h"
  288. static struct ev_loop default_loop_struct;
  289. struct ev_loop *ev_default_loop_ptr;
  290. #else
  291. ev_tstamp ev_rt_now;
  292. #define VAR(name,decl) static decl;
  293. #include "ev_vars.h"
  294. #undef VAR
  295. static int ev_default_loop_ptr;
  296. #endif
  297. /*****************************************************************************/
  298. ev_tstamp
  299. ev_time (void)
  300. {
  301. #if EV_USE_REALTIME
  302. struct timespec ts;
  303. clock_gettime (CLOCK_REALTIME, &ts);
  304. return ts.tv_sec + ts.tv_nsec * 1e-9;
  305. #else
  306. struct timeval tv;
  307. gettimeofday (&tv, 0);
  308. return tv.tv_sec + tv.tv_usec * 1e-6;
  309. #endif
  310. }
  311. ev_tstamp inline_size
  312. get_clock (void)
  313. {
  314. #if EV_USE_MONOTONIC
  315. if (expect_true (have_monotonic))
  316. {
  317. struct timespec ts;
  318. clock_gettime (CLOCK_MONOTONIC, &ts);
  319. return ts.tv_sec + ts.tv_nsec * 1e-9;
  320. }
  321. #endif
  322. return ev_time ();
  323. }
  324. #if EV_MULTIPLICITY
  325. ev_tstamp
  326. ev_now (EV_P)
  327. {
  328. return ev_rt_now;
  329. }
  330. #endif
  331. int inline_size
  332. array_nextsize (int elem, int cur, int cnt)
  333. {
  334. int ncur = cur + 1;
  335. do
  336. ncur <<= 1;
  337. while (cnt > ncur);
  338. /* if size > 4096, round to 4096 - 4 * longs to accomodate malloc overhead */
  339. if (elem * ncur > 4096)
  340. {
  341. ncur *= elem;
  342. ncur = (ncur + elem + 4095 + sizeof (void *) * 4) & ~4095;
  343. ncur = ncur - sizeof (void *) * 4;
  344. ncur /= elem;
  345. }
  346. return ncur;
  347. }
  348. inline_speed void *
  349. array_realloc (int elem, void *base, int *cur, int cnt)
  350. {
  351. *cur = array_nextsize (elem, *cur, cnt);
  352. return ev_realloc (base, elem * *cur);
  353. }
  354. #define array_needsize(type,base,cur,cnt,init) \
  355. if (expect_false ((cnt) > (cur))) \
  356. { \
  357. int ocur_ = (cur); \
  358. (base) = (type *)array_realloc \
  359. (sizeof (type), (base), &(cur), (cnt)); \
  360. init ((base) + (ocur_), (cur) - ocur_); \
  361. }
  362. #if 0
  363. #define array_slim(type,stem) \
  364. if (stem ## max < array_roundsize (stem ## cnt >> 2)) \
  365. { \
  366. stem ## max = array_roundsize (stem ## cnt >> 1); \
  367. base = (type *)ev_realloc (base, sizeof (type) * (stem ## max));\
  368. fprintf (stderr, "slimmed down " # stem " to %d\n", stem ## max);/*D*/\
  369. }
  370. #endif
  371. #define array_free(stem, idx) \
  372. ev_free (stem ## s idx); stem ## cnt idx = stem ## max idx = 0;
  373. /*****************************************************************************/
  374. void noinline
  375. ev_feed_event (EV_P_ void *w, int revents)
  376. {
  377. W w_ = (W)w;
  378. if (expect_false (w_->pending))
  379. {
  380. pendings [ABSPRI (w_)][w_->pending - 1].events |= revents;
  381. return;
  382. }
  383. w_->pending = ++pendingcnt [ABSPRI (w_)];
  384. array_needsize (ANPENDING, pendings [ABSPRI (w_)], pendingmax [ABSPRI (w_)], pendingcnt [ABSPRI (w_)], EMPTY2);
  385. pendings [ABSPRI (w_)][w_->pending - 1].w = w_;
  386. pendings [ABSPRI (w_)][w_->pending - 1].events = revents;
  387. }
  388. void inline_size
  389. queue_events (EV_P_ W *events, int eventcnt, int type)
  390. {
  391. int i;
  392. for (i = 0; i < eventcnt; ++i)
  393. ev_feed_event (EV_A_ events [i], type);
  394. }
  395. /*****************************************************************************/
  396. void inline_size
  397. anfds_init (ANFD *base, int count)
  398. {
  399. while (count--)
  400. {
  401. base->head = 0;
  402. base->events = EV_NONE;
  403. base->reify = 0;
  404. ++base;
  405. }
  406. }
  407. void inline_speed
  408. fd_event (EV_P_ int fd, int revents)
  409. {
  410. ANFD *anfd = anfds + fd;
  411. ev_io *w;
  412. for (w = (ev_io *)anfd->head; w; w = (ev_io *)((WL)w)->next)
  413. {
  414. int ev = w->events & revents;
  415. if (ev)
  416. ev_feed_event (EV_A_ (W)w, ev);
  417. }
  418. }
  419. void
  420. ev_feed_fd_event (EV_P_ int fd, int revents)
  421. {
  422. fd_event (EV_A_ fd, revents);
  423. }
  424. void inline_size
  425. fd_reify (EV_P)
  426. {
  427. int i;
  428. for (i = 0; i < fdchangecnt; ++i)
  429. {
  430. int fd = fdchanges [i];
  431. ANFD *anfd = anfds + fd;
  432. ev_io *w;
  433. int events = 0;
  434. for (w = (ev_io *)anfd->head; w; w = (ev_io *)((WL)w)->next)
  435. events |= w->events;
  436. #if EV_SELECT_IS_WINSOCKET
  437. if (events)
  438. {
  439. unsigned long argp;
  440. anfd->handle = _get_osfhandle (fd);
  441. assert (("libev only supports socket fds in this configuration", ioctlsocket (anfd->handle, FIONREAD, &argp) == 0));
  442. }
  443. #endif
  444. anfd->reify = 0;
  445. backend_modify (EV_A_ fd, anfd->events, events);
  446. anfd->events = events;
  447. }
  448. fdchangecnt = 0;
  449. }
  450. void inline_size
  451. fd_change (EV_P_ int fd)
  452. {
  453. if (expect_false (anfds [fd].reify))
  454. return;
  455. anfds [fd].reify = 1;
  456. ++fdchangecnt;
  457. array_needsize (int, fdchanges, fdchangemax, fdchangecnt, EMPTY2);
  458. fdchanges [fdchangecnt - 1] = fd;
  459. }
  460. void inline_speed
  461. fd_kill (EV_P_ int fd)
  462. {
  463. ev_io *w;
  464. while ((w = (ev_io *)anfds [fd].head))
  465. {
  466. ev_io_stop (EV_A_ w);
  467. ev_feed_event (EV_A_ (W)w, EV_ERROR | EV_READ | EV_WRITE);
  468. }
  469. }
  470. int inline_size
  471. fd_valid (int fd)
  472. {
  473. #ifdef _WIN32
  474. return _get_osfhandle (fd) != -1;
  475. #else
  476. return fcntl (fd, F_GETFD) != -1;
  477. #endif
  478. }
  479. /* called on EBADF to verify fds */
  480. static void noinline
  481. fd_ebadf (EV_P)
  482. {
  483. int fd;
  484. for (fd = 0; fd < anfdmax; ++fd)
  485. if (anfds [fd].events)
  486. if (!fd_valid (fd) == -1 && errno == EBADF)
  487. fd_kill (EV_A_ fd);
  488. }
  489. /* called on ENOMEM in select/poll to kill some fds and retry */
  490. static void noinline
  491. fd_enomem (EV_P)
  492. {
  493. int fd;
  494. for (fd = anfdmax; fd--; )
  495. if (anfds [fd].events)
  496. {
  497. fd_kill (EV_A_ fd);
  498. return;
  499. }
  500. }
  501. /* usually called after fork if backend needs to re-arm all fds from scratch */
  502. static void noinline
  503. fd_rearm_all (EV_P)
  504. {
  505. int fd;
  506. for (fd = 0; fd < anfdmax; ++fd)
  507. if (anfds [fd].events)
  508. {
  509. anfds [fd].events = 0;
  510. fd_change (EV_A_ fd);
  511. }
  512. }
  513. /*****************************************************************************/
  514. void inline_speed
  515. upheap (WT *heap, int k)
  516. {
  517. WT w = heap [k];
  518. while (k && heap [k >> 1]->at > w->at)
  519. {
  520. heap [k] = heap [k >> 1];
  521. ((W)heap [k])->active = k + 1;
  522. k >>= 1;
  523. }
  524. heap [k] = w;
  525. ((W)heap [k])->active = k + 1;
  526. }
  527. void inline_speed
  528. downheap (WT *heap, int N, int k)
  529. {
  530. WT w = heap [k];
  531. while (k < (N >> 1))
  532. {
  533. int j = k << 1;
  534. if (j + 1 < N && heap [j]->at > heap [j + 1]->at)
  535. ++j;
  536. if (w->at <= heap [j]->at)
  537. break;
  538. heap [k] = heap [j];
  539. ((W)heap [k])->active = k + 1;
  540. k = j;
  541. }
  542. heap [k] = w;
  543. ((W)heap [k])->active = k + 1;
  544. }
  545. void inline_size
  546. adjustheap (WT *heap, int N, int k)
  547. {
  548. upheap (heap, k);
  549. downheap (heap, N, k);
  550. }
  551. /*****************************************************************************/
  552. typedef struct
  553. {
  554. WL head;
  555. sig_atomic_t volatile gotsig;
  556. } ANSIG;
  557. static ANSIG *signals;
  558. static int signalmax;
  559. static int sigpipe [2];
  560. static sig_atomic_t volatile gotsig;
  561. static ev_io sigev;
  562. void inline_size
  563. signals_init (ANSIG *base, int count)
  564. {
  565. while (count--)
  566. {
  567. base->head = 0;
  568. base->gotsig = 0;
  569. ++base;
  570. }
  571. }
  572. static void
  573. sighandler (int signum)
  574. {
  575. #if _WIN32
  576. signal (signum, sighandler);
  577. #endif
  578. signals [signum - 1].gotsig = 1;
  579. if (!gotsig)
  580. {
  581. int old_errno = errno;
  582. gotsig = 1;
  583. write (sigpipe [1], &signum, 1);
  584. errno = old_errno;
  585. }
  586. }
  587. void noinline
  588. ev_feed_signal_event (EV_P_ int signum)
  589. {
  590. WL w;
  591. #if EV_MULTIPLICITY
  592. assert (("feeding signal events is only supported in the default loop", loop == ev_default_loop_ptr));
  593. #endif
  594. --signum;
  595. if (signum < 0 || signum >= signalmax)
  596. return;
  597. signals [signum].gotsig = 0;
  598. for (w = signals [signum].head; w; w = w->next)
  599. ev_feed_event (EV_A_ (W)w, EV_SIGNAL);
  600. }
  601. static void
  602. sigcb (EV_P_ ev_io *iow, int revents)
  603. {
  604. int signum;
  605. read (sigpipe [0], &revents, 1);
  606. gotsig = 0;
  607. for (signum = signalmax; signum--; )
  608. if (signals [signum].gotsig)
  609. ev_feed_signal_event (EV_A_ signum + 1);
  610. }
  611. void inline_size
  612. fd_intern (int fd)
  613. {
  614. #ifdef _WIN32
  615. int arg = 1;
  616. ioctlsocket (_get_osfhandle (fd), FIONBIO, &arg);
  617. #else
  618. fcntl (fd, F_SETFD, FD_CLOEXEC);
  619. fcntl (fd, F_SETFL, O_NONBLOCK);
  620. #endif
  621. }
  622. static void noinline
  623. siginit (EV_P)
  624. {
  625. fd_intern (sigpipe [0]);
  626. fd_intern (sigpipe [1]);
  627. ev_io_set (&sigev, sigpipe [0], EV_READ);
  628. ev_io_start (EV_A_ &sigev);
  629. ev_unref (EV_A); /* child watcher should not keep loop alive */
  630. }
  631. /*****************************************************************************/
  632. static ev_child *childs [EV_PID_HASHSIZE];
  633. #ifndef _WIN32
  634. static ev_signal childev;
  635. void inline_speed
  636. child_reap (EV_P_ ev_signal *sw, int chain, int pid, int status)
  637. {
  638. ev_child *w;
  639. for (w = (ev_child *)childs [chain & (EV_PID_HASHSIZE - 1)]; w; w = (ev_child *)((WL)w)->next)
  640. if (w->pid == pid || !w->pid)
  641. {
  642. ev_priority (w) = ev_priority (sw); /* need to do it *now* */
  643. w->rpid = pid;
  644. w->rstatus = status;
  645. ev_feed_event (EV_A_ (W)w, EV_CHILD);
  646. }
  647. }
  648. #ifndef WCONTINUED
  649. # define WCONTINUED 0
  650. #endif
  651. static void
  652. childcb (EV_P_ ev_signal *sw, int revents)
  653. {
  654. int pid, status;
  655. /* some systems define WCONTINUED but then fail to support it (linux 2.4) */
  656. if (0 >= (pid = waitpid (-1, &status, WNOHANG | WUNTRACED | WCONTINUED)))
  657. if (!WCONTINUED
  658. || errno != EINVAL
  659. || 0 >= (pid = waitpid (-1, &status, WNOHANG | WUNTRACED)))
  660. return;
  661. /* make sure we are called again until all childs have been reaped */
  662. /* we need to do it this way so that the callback gets called before we continue */
  663. ev_feed_event (EV_A_ (W)sw, EV_SIGNAL);
  664. child_reap (EV_A_ sw, pid, pid, status);
  665. if (EV_PID_HASHSIZE > 1)
  666. child_reap (EV_A_ sw, 0, pid, status); /* this might trigger a watcher twice, but feed_event catches that */
  667. }
  668. #endif
  669. /*****************************************************************************/
  670. #if EV_USE_PORT
  671. # include "ev_port.c"
  672. #endif
  673. #if EV_USE_KQUEUE
  674. # include "ev_kqueue.c"
  675. #endif
  676. #if EV_USE_EPOLL
  677. # include "ev_epoll.c"
  678. #endif
  679. #if EV_USE_POLL
  680. # include "ev_poll.c"
  681. #endif
  682. #if EV_USE_SELECT
  683. # include "ev_select.c"
  684. #endif
  685. int
  686. ev_version_major (void)
  687. {
  688. return EV_VERSION_MAJOR;
  689. }
  690. int
  691. ev_version_minor (void)
  692. {
  693. return EV_VERSION_MINOR;
  694. }
  695. /* return true if we are running with elevated privileges and should ignore env variables */
  696. int inline_size
  697. enable_secure (void)
  698. {
  699. #ifdef _WIN32
  700. return 0;
  701. #else
  702. return getuid () != geteuid ()
  703. || getgid () != getegid ();
  704. #endif
  705. }
  706. unsigned int
  707. ev_supported_backends (void)
  708. {
  709. unsigned int flags = 0;
  710. if (EV_USE_PORT ) flags |= EVBACKEND_PORT;
  711. if (EV_USE_KQUEUE) flags |= EVBACKEND_KQUEUE;
  712. if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL;
  713. if (EV_USE_POLL ) flags |= EVBACKEND_POLL;
  714. if (EV_USE_SELECT) flags |= EVBACKEND_SELECT;
  715. return flags;
  716. }
  717. unsigned int
  718. ev_recommended_backends (void)
  719. {
  720. unsigned int flags = ev_supported_backends ();
  721. #ifndef __NetBSD__
  722. /* kqueue is borked on everything but netbsd apparently */
  723. /* it usually doesn't work correctly on anything but sockets and pipes */
  724. flags &= ~EVBACKEND_KQUEUE;
  725. #endif
  726. #ifdef __APPLE__
  727. // flags &= ~EVBACKEND_KQUEUE; for documentation
  728. flags &= ~EVBACKEND_POLL;
  729. #endif
  730. return flags;
  731. }
  732. unsigned int
  733. ev_embeddable_backends (void)
  734. {
  735. return EVBACKEND_EPOLL
  736. | EVBACKEND_KQUEUE
  737. | EVBACKEND_PORT;
  738. }
  739. unsigned int
  740. ev_backend (EV_P)
  741. {
  742. return backend;
  743. }
  744. unsigned int
  745. ev_loop_count (EV_P)
  746. {
  747. return loop_count;
  748. }
  749. static void noinline
  750. loop_init (EV_P_ unsigned int flags)
  751. {
  752. if (!backend)
  753. {
  754. #if EV_USE_MONOTONIC
  755. {
  756. struct timespec ts;
  757. if (!clock_gettime (CLOCK_MONOTONIC, &ts))
  758. have_monotonic = 1;
  759. }
  760. #endif
  761. ev_rt_now = ev_time ();
  762. mn_now = get_clock ();
  763. now_floor = mn_now;
  764. rtmn_diff = ev_rt_now - mn_now;
  765. /* pid check not overridable via env */
  766. #ifndef _WIN32
  767. if (flags & EVFLAG_FORKCHECK)
  768. curpid = getpid ();
  769. #endif
  770. if (!(flags & EVFLAG_NOENV)
  771. && !enable_secure ()
  772. && getenv ("LIBEV_FLAGS"))
  773. flags = atoi (getenv ("LIBEV_FLAGS"));
  774. if (!(flags & 0x0000ffffUL))
  775. flags |= ev_recommended_backends ();
  776. backend = 0;
  777. backend_fd = -1;
  778. #if EV_USE_INOTIFY
  779. fs_fd = -2;
  780. #endif
  781. #if EV_USE_PORT
  782. if (!backend && (flags & EVBACKEND_PORT )) backend = port_init (EV_A_ flags);
  783. #endif
  784. #if EV_USE_KQUEUE
  785. if (!backend && (flags & EVBACKEND_KQUEUE)) backend = kqueue_init (EV_A_ flags);
  786. #endif
  787. #if EV_USE_EPOLL
  788. if (!backend && (flags & EVBACKEND_EPOLL )) backend = epoll_init (EV_A_ flags);
  789. #endif
  790. #if EV_USE_POLL
  791. if (!backend && (flags & EVBACKEND_POLL )) backend = poll_init (EV_A_ flags);
  792. #endif
  793. #if EV_USE_SELECT
  794. if (!backend && (flags & EVBACKEND_SELECT)) backend = select_init (EV_A_ flags);
  795. #endif
  796. ev_init (&sigev, sigcb);
  797. ev_set_priority (&sigev, EV_MAXPRI);
  798. }
  799. }
  800. static void noinline
  801. loop_destroy (EV_P)
  802. {
  803. int i;
  804. #if EV_USE_INOTIFY
  805. if (fs_fd >= 0)
  806. close (fs_fd);
  807. #endif
  808. if (backend_fd >= 0)
  809. close (backend_fd);
  810. #if EV_USE_PORT
  811. if (backend == EVBACKEND_PORT ) port_destroy (EV_A);
  812. #endif
  813. #if EV_USE_KQUEUE
  814. if (backend == EVBACKEND_KQUEUE) kqueue_destroy (EV_A);
  815. #endif
  816. #if EV_USE_EPOLL
  817. if (backend == EVBACKEND_EPOLL ) epoll_destroy (EV_A);
  818. #endif
  819. #if EV_USE_POLL
  820. if (backend == EVBACKEND_POLL ) poll_destroy (EV_A);
  821. #endif
  822. #if EV_USE_SELECT
  823. if (backend == EVBACKEND_SELECT) select_destroy (EV_A);
  824. #endif
  825. for (i = NUMPRI; i--; )
  826. array_free (pending, [i]);
  827. /* have to use the microsoft-never-gets-it-right macro */
  828. array_free (fdchange, EMPTY0);
  829. array_free (timer, EMPTY0);
  830. #if EV_PERIODIC_ENABLE
  831. array_free (periodic, EMPTY0);
  832. #endif
  833. array_free (idle, EMPTY0);
  834. array_free (prepare, EMPTY0);
  835. array_free (check, EMPTY0);
  836. backend = 0;
  837. }
  838. void inline_size infy_fork (EV_P);
  839. void inline_size
  840. loop_fork (EV_P)
  841. {
  842. #if EV_USE_PORT
  843. if (backend == EVBACKEND_PORT ) port_fork (EV_A);
  844. #endif
  845. #if EV_USE_KQUEUE
  846. if (backend == EVBACKEND_KQUEUE) kqueue_fork (EV_A);
  847. #endif
  848. #if EV_USE_EPOLL
  849. if (backend == EVBACKEND_EPOLL ) epoll_fork (EV_A);
  850. #endif
  851. #if EV_USE_INOTIFY
  852. infy_fork (EV_A);
  853. #endif
  854. if (ev_is_active (&sigev))
  855. {
  856. /* default loop */
  857. ev_ref (EV_A);
  858. ev_io_stop (EV_A_ &sigev);
  859. close (sigpipe [0]);
  860. close (sigpipe [1]);
  861. while (pipe (sigpipe))
  862. syserr ("(libev) error creating pipe");
  863. siginit (EV_A);
  864. }
  865. postfork = 0;
  866. }
  867. #if EV_MULTIPLICITY
  868. struct ev_loop *
  869. ev_loop_new (unsigned int flags)
  870. {
  871. struct ev_loop *loop = (struct ev_loop *)ev_malloc (sizeof (struct ev_loop));
  872. memset (loop, 0, sizeof (struct ev_loop));
  873. loop_init (EV_A_ flags);
  874. if (ev_backend (EV_A))
  875. return loop;
  876. return 0;
  877. }
  878. void
  879. ev_loop_destroy (EV_P)
  880. {
  881. loop_destroy (EV_A);
  882. ev_free (loop);
  883. }
  884. void
  885. ev_loop_fork (EV_P)
  886. {
  887. postfork = 1;
  888. }
  889. #endif
  890. #if EV_MULTIPLICITY
  891. struct ev_loop *
  892. ev_default_loop_init (unsigned int flags)
  893. #else
  894. int
  895. ev_default_loop (unsigned int flags)
  896. #endif
  897. {
  898. if (sigpipe [0] == sigpipe [1])
  899. if (pipe (sigpipe))
  900. return 0;
  901. if (!ev_default_loop_ptr)
  902. {
  903. #if EV_MULTIPLICITY
  904. struct ev_loop *loop = ev_default_loop_ptr = &default_loop_struct;
  905. #else
  906. ev_default_loop_ptr = 1;
  907. #endif
  908. loop_init (EV_A_ flags);
  909. if (ev_backend (EV_A))
  910. {
  911. siginit (EV_A);
  912. #ifndef _WIN32
  913. ev_signal_init (&childev, childcb, SIGCHLD);
  914. ev_set_priority (&childev, EV_MAXPRI);
  915. ev_signal_start (EV_A_ &childev);
  916. ev_unref (EV_A); /* child watcher should not keep loop alive */
  917. #endif
  918. }
  919. else
  920. ev_default_loop_ptr = 0;
  921. }
  922. return ev_default_loop_ptr;
  923. }
  924. void
  925. ev_default_destroy (void)
  926. {
  927. #if EV_MULTIPLICITY
  928. struct ev_loop *loop = ev_default_loop_ptr;
  929. #endif
  930. #ifndef _WIN32
  931. ev_ref (EV_A); /* child watcher */
  932. ev_signal_stop (EV_A_ &childev);
  933. #endif
  934. ev_ref (EV_A); /* signal watcher */
  935. ev_io_stop (EV_A_ &sigev);
  936. close (sigpipe [0]); sigpipe [0] = 0;
  937. close (sigpipe [1]); sigpipe [1] = 0;
  938. loop_destroy (EV_A);
  939. }
  940. void
  941. ev_default_fork (void)
  942. {
  943. #if EV_MULTIPLICITY
  944. struct ev_loop *loop = ev_default_loop_ptr;
  945. #endif
  946. if (backend)
  947. postfork = 1;
  948. }
  949. /*****************************************************************************/
  950. int inline_size
  951. any_pending (EV_P)
  952. {
  953. int pri;
  954. for (pri = NUMPRI; pri--; )
  955. if (pendingcnt [pri])
  956. return 1;
  957. return 0;
  958. }
  959. void inline_speed
  960. call_pending (EV_P)
  961. {
  962. int pri;
  963. for (pri = NUMPRI; pri--; )
  964. while (pendingcnt [pri])
  965. {
  966. ANPENDING *p = pendings [pri] + --pendingcnt [pri];
  967. if (expect_true (p->w))
  968. {
  969. /*assert (("non-pending watcher on pending list", p->w->pending));*/
  970. p->w->pending = 0;
  971. EV_CB_INVOKE (p->w, p->events);
  972. }
  973. }
  974. }
  975. void inline_size
  976. timers_reify (EV_P)
  977. {
  978. while (timercnt && ((WT)timers [0])->at <= mn_now)
  979. {
  980. ev_timer *w = timers [0];
  981. /*assert (("inactive timer on timer heap detected", ev_is_active (w)));*/
  982. /* first reschedule or stop timer */
  983. if (w->repeat)
  984. {
  985. assert (("negative ev_timer repeat value found while processing timers", w->repeat > 0.));
  986. ((WT)w)->at += w->repeat;
  987. if (((WT)w)->at < mn_now)
  988. ((WT)w)->at = mn_now;
  989. downheap ((WT *)timers, timercnt, 0);
  990. }
  991. else
  992. ev_timer_stop (EV_A_ w); /* nonrepeating: stop timer */
  993. ev_feed_event (EV_A_ (W)w, EV_TIMEOUT);
  994. }
  995. }
  996. #if EV_PERIODIC_ENABLE
  997. void inline_size
  998. periodics_reify (EV_P)
  999. {
  1000. while (periodiccnt && ((WT)periodics [0])->at <= ev_rt_now)
  1001. {
  1002. ev_periodic *w = periodics [0];
  1003. /*assert (("inactive timer on periodic heap detected", ev_is_active (w)));*/
  1004. /* first reschedule or stop timer */
  1005. if (w->reschedule_cb)
  1006. {
  1007. ((WT)w)->at = w->reschedule_cb (w, ev_rt_now + 0.0001);
  1008. assert (("ev_periodic reschedule callback returned time in the past", ((WT)w)->at > ev_rt_now));
  1009. downheap ((WT *)periodics, periodiccnt, 0);
  1010. }
  1011. else if (w->interval)
  1012. {
  1013. ((WT)w)->at += floor ((ev_rt_now - ((WT)w)->at) / w->interval + 1.) * w->interval;
  1014. assert (("ev_periodic timeout in the past detected while processing timers, negative interval?", ((WT)w)->at > ev_rt_now));
  1015. downheap ((WT *)periodics, periodiccnt, 0);
  1016. }
  1017. else
  1018. ev_periodic_stop (EV_A_ w); /* nonrepeating: stop timer */
  1019. ev_feed_event (EV_A_ (W)w, EV_PERIODIC);
  1020. }
  1021. }
  1022. static void noinline
  1023. periodics_reschedule (EV_P)
  1024. {
  1025. int i;
  1026. /* adjust periodics after time jump */
  1027. for (i = 0; i < periodiccnt; ++i)
  1028. {
  1029. ev_periodic *w = periodics [i];
  1030. if (w->reschedule_cb)
  1031. ((WT)w)->at = w->reschedule_cb (w, ev_rt_now);
  1032. else if (w->interval)
  1033. ((WT)w)->at += ceil ((ev_rt_now - ((WT)w)->at) / w->interval) * w->interval;
  1034. }
  1035. /* now rebuild the heap */
  1036. for (i = periodiccnt >> 1; i--; )
  1037. downheap ((WT *)periodics, periodiccnt, i);
  1038. }
  1039. #endif
  1040. int inline_size
  1041. time_update_monotonic (EV_P)
  1042. {
  1043. mn_now = get_clock ();
  1044. if (expect_true (mn_now - now_floor < MIN_TIMEJUMP * .5))
  1045. {
  1046. ev_rt_now = rtmn_diff + mn_now;
  1047. return 0;
  1048. }
  1049. else
  1050. {
  1051. now_floor = mn_now;
  1052. ev_rt_now = ev_time ();
  1053. return 1;
  1054. }
  1055. }
  1056. void inline_size
  1057. time_update (EV_P)
  1058. {
  1059. int i;
  1060. #if EV_USE_MONOTONIC
  1061. if (expect_true (have_monotonic))
  1062. {
  1063. if (time_update_monotonic (EV_A))
  1064. {
  1065. ev_tstamp odiff = rtmn_diff;
  1066. /* loop a few times, before making important decisions.
  1067. * on the choice of "4": one iteration isn't enough,
  1068. * in case we get preempted during the calls to
  1069. * ev_time and get_clock. a second call is almost guaranteed
  1070. * to succeed in that case, though. and looping a few more times
  1071. * doesn't hurt either as we only do this on time-jumps or
  1072. * in the unlikely event of having been preempted here.
  1073. */
  1074. for (i = 4; --i; )
  1075. {
  1076. rtmn_diff = ev_rt_now - mn_now;
  1077. if (fabs (odiff - rtmn_diff) < MIN_TIMEJUMP)
  1078. return; /* all is well */
  1079. ev_rt_now = ev_time ();
  1080. mn_now = get_clock ();
  1081. now_floor = mn_now;
  1082. }
  1083. # if EV_PERIODIC_ENABLE
  1084. periodics_reschedule (EV_A);
  1085. # endif
  1086. /* no timer adjustment, as the monotonic clock doesn't jump */
  1087. /* timers_reschedule (EV_A_ rtmn_diff - odiff) */
  1088. }
  1089. }
  1090. else
  1091. #endif
  1092. {
  1093. ev_rt_now = ev_time ();
  1094. if (expect_false (mn_now > ev_rt_now || mn_now < ev_rt_now - MAX_BLOCKTIME - MIN_TIMEJUMP))
  1095. {
  1096. #if EV_PERIODIC_ENABLE
  1097. periodics_reschedule (EV_A);
  1098. #endif
  1099. /* adjust timers. this is easy, as the offset is the same for all of them */
  1100. for (i = 0; i < timercnt; ++i)
  1101. ((WT)timers [i])->at += ev_rt_now - mn_now;
  1102. }
  1103. mn_now = ev_rt_now;
  1104. }
  1105. }
  1106. void
  1107. ev_ref (EV_P)
  1108. {
  1109. ++activecnt;
  1110. }
  1111. void
  1112. ev_unref (EV_P)
  1113. {
  1114. --activecnt;
  1115. }
  1116. static int loop_done;
  1117. void
  1118. ev_loop (EV_P_ int flags)
  1119. {
  1120. loop_done = flags & (EVLOOP_ONESHOT | EVLOOP_NONBLOCK)
  1121. ? EVUNLOOP_ONE
  1122. : EVUNLOOP_CANCEL;
  1123. call_pending (EV_A); /* in case we recurse, ensure ordering stays nice and clean */
  1124. do
  1125. {
  1126. #ifndef _WIN32
  1127. if (expect_false (curpid)) /* penalise the forking check even more */
  1128. if (expect_false (getpid () != curpid))
  1129. {
  1130. curpid = getpid ();
  1131. postfork = 1;
  1132. }
  1133. #endif
  1134. #if EV_FORK_ENABLE
  1135. /* we might have forked, so queue fork handlers */
  1136. if (expect_false (postfork))
  1137. if (forkcnt)
  1138. {
  1139. queue_events (EV_A_ (W *)forks, forkcnt, EV_FORK);
  1140. call_pending (EV_A);
  1141. }
  1142. #endif
  1143. /* queue check watchers (and execute them) */
  1144. if (expect_false (preparecnt))
  1145. {
  1146. queue_events (EV_A_ (W *)prepares, preparecnt, EV_PREPARE);
  1147. call_pending (EV_A);
  1148. }
  1149. if (expect_false (!activecnt))
  1150. break;
  1151. /* we might have forked, so reify kernel state if necessary */
  1152. if (expect_false (postfork))
  1153. loop_fork (EV_A);
  1154. /* update fd-related kernel structures */
  1155. fd_reify (EV_A);
  1156. /* calculate blocking time */
  1157. {
  1158. ev_tstamp block;
  1159. if (expect_false (flags & EVLOOP_NONBLOCK || idlecnt || !activecnt))
  1160. block = 0.; /* do not block at all */
  1161. else
  1162. {
  1163. /* update time to cancel out callback processing overhead */
  1164. #if EV_USE_MONOTONIC
  1165. if (expect_true (have_monotonic))
  1166. time_update_monotonic (EV_A);
  1167. else
  1168. #endif
  1169. {
  1170. ev_rt_now = ev_time ();
  1171. mn_now = ev_rt_now;
  1172. }
  1173. block = MAX_BLOCKTIME;
  1174. if (timercnt)
  1175. {
  1176. ev_tstamp to = ((WT)timers [0])->at - mn_now + backend_fudge;
  1177. if (block > to) block = to;
  1178. }
  1179. #if EV_PERIODIC_ENABLE
  1180. if (periodiccnt)
  1181. {
  1182. ev_tstamp to = ((WT)periodics [0])->at - ev_rt_now + backend_fudge;
  1183. if (block > to) block = to;
  1184. }
  1185. #endif
  1186. if (expect_false (block < 0.)) block = 0.;
  1187. }
  1188. ++loop_count;
  1189. backend_poll (EV_A_ block);
  1190. }
  1191. /* update ev_rt_now, do magic */
  1192. time_update (EV_A);
  1193. /* queue pending timers and reschedule them */
  1194. timers_reify (EV_A); /* relative timers called last */
  1195. #if EV_PERIODIC_ENABLE
  1196. periodics_reify (EV_A); /* absolute timers called first */
  1197. #endif
  1198. /* queue idle watchers unless other events are pending */
  1199. if (idlecnt && !any_pending (EV_A))
  1200. queue_events (EV_A_ (W *)idles, idlecnt, EV_IDLE);
  1201. /* queue check watchers, to be executed first */
  1202. if (expect_false (checkcnt))
  1203. queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK);
  1204. call_pending (EV_A);
  1205. }
  1206. while (expect_true (activecnt && !loop_done));
  1207. if (loop_done == EVUNLOOP_ONE)
  1208. loop_done = EVUNLOOP_CANCEL;
  1209. }
  1210. void
  1211. ev_unloop (EV_P_ int how)
  1212. {
  1213. loop_done = how;
  1214. }
  1215. /*****************************************************************************/
  1216. void inline_size
  1217. wlist_add (WL *head, WL elem)
  1218. {
  1219. elem->next = *head;
  1220. *head = elem;
  1221. }
  1222. void inline_size
  1223. wlist_del (WL *head, WL elem)
  1224. {
  1225. while (*head)
  1226. {
  1227. if (*head == elem)
  1228. {
  1229. *head = elem->next;
  1230. return;
  1231. }
  1232. head = &(*head)->next;
  1233. }
  1234. }
  1235. void inline_speed
  1236. ev_clear_pending (EV_P_ W w)
  1237. {
  1238. if (w->pending)
  1239. {
  1240. pendings [ABSPRI (w)][w->pending - 1].w = 0;
  1241. w->pending = 0;
  1242. }
  1243. }
  1244. void inline_speed
  1245. ev_start (EV_P_ W w, int active)
  1246. {
  1247. if (w->priority < EV_MINPRI) w->priority = EV_MINPRI;
  1248. if (w->priority > EV_MAXPRI) w->priority = EV_MAXPRI;
  1249. w->active = active;
  1250. ev_ref (EV_A);
  1251. }
  1252. void inline_size
  1253. ev_stop (EV_P_ W w)
  1254. {
  1255. ev_unref (EV_A);
  1256. w->active = 0;
  1257. }
  1258. /*****************************************************************************/
  1259. void
  1260. ev_io_start (EV_P_ ev_io *w)
  1261. {
  1262. int fd = w->fd;
  1263. if (expect_false (ev_is_active (w)))
  1264. return;
  1265. assert (("ev_io_start called with negative fd", fd >= 0));
  1266. ev_start (EV_A_ (W)w, 1);
  1267. array_needsize (ANFD, anfds, anfdmax, fd + 1, anfds_init);
  1268. wlist_add ((WL *)&anfds[fd].head, (WL)w);
  1269. fd_change (EV_A_ fd);
  1270. }
  1271. void
  1272. ev_io_stop (EV_P_ ev_io *w)
  1273. {
  1274. ev_clear_pending (EV_A_ (W)w);
  1275. if (expect_false (!ev_is_active (w)))
  1276. return;
  1277. assert (("ev_io_start called with illegal fd (must stay constant after start!)", w->fd >= 0 && w->fd < anfdmax));
  1278. wlist_del ((WL *)&anfds[w->fd].head, (WL)w);
  1279. ev_stop (EV_A_ (W)w);
  1280. fd_change (EV_A_ w->fd);
  1281. }
  1282. void
  1283. ev_timer_start (EV_P_ ev_timer *w)
  1284. {
  1285. if (expect_false (ev_is_active (w)))
  1286. return;
  1287. ((WT)w)->at += mn_now;
  1288. assert (("ev_timer_start called with negative timer repeat value", w->repeat >= 0.));
  1289. ev_start (EV_A_ (W)w, ++timercnt);
  1290. array_needsize (ev_timer *, timers, timermax, timercnt, EMPTY2);
  1291. timers [timercnt - 1] = w;
  1292. upheap ((WT *)timers, timercnt - 1);
  1293. /*assert (("internal timer heap corruption", timers [((W)w)->active - 1] == w));*/
  1294. }
  1295. void
  1296. ev_timer_stop (EV_P_ ev_timer *w)
  1297. {
  1298. ev_clear_pending (EV_A_ (W)w);
  1299. if (expect_false (!ev_is_active (w)))
  1300. return;
  1301. assert (("internal timer heap corruption", timers [((W)w)->active - 1] == w));
  1302. {
  1303. int active = ((W)w)->active;
  1304. if (expect_true (--active < --timercnt))
  1305. {
  1306. timers [active] = timers [timercnt];
  1307. adjustheap ((WT *)timers, timercnt, active);
  1308. }
  1309. }
  1310. ((WT)w)->at -= mn_now;
  1311. ev_stop (EV_A_ (W)w);
  1312. }
  1313. void
  1314. ev_timer_again (EV_P_ ev_timer *w)
  1315. {
  1316. if (ev_is_active (w))
  1317. {
  1318. if (w->repeat)
  1319. {
  1320. ((WT)w)->at = mn_now + w->repeat;
  1321. adjustheap ((WT *)timers, timercnt, ((W)w)->active - 1);
  1322. }
  1323. else
  1324. ev_timer_stop (EV_A_ w);
  1325. }
  1326. else if (w->repeat)
  1327. {
  1328. w->at = w->repeat;
  1329. ev_timer_start (EV_A_ w);
  1330. }
  1331. }
  1332. #if EV_PERIODIC_ENABLE
  1333. void
  1334. ev_periodic_start (EV_P_ ev_periodic *w)
  1335. {
  1336. if (expect_false (ev_is_active (w)))
  1337. return;
  1338. if (w->reschedule_cb)
  1339. ((WT)w)->at = w->reschedule_cb (w, ev_rt_now);
  1340. else if (w->interval)
  1341. {
  1342. assert (("ev_periodic_start called with negative interval value", w->interval >= 0.));
  1343. /* this formula differs from the one in periodic_reify because we do not always round up */
  1344. ((WT)w)->at += ceil ((ev_rt_now - ((WT)w)->at) / w->interval) * w->interval;
  1345. }