io.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License as published by
  4. * the Free Software Foundation; either version 2 of the License, or
  5. * (at your option) any later version.
  6. * Please read the file COPYING, README and AUTHORS for more information.
  7. *
  8. * I/O abstraction interface.
  9. * Copyright (c) 2005 Florian Westphal (westphal@foo.fh-furtwangen.de)
  10. *
  11. */
  12. #include "portab.h"
  13. static char UNUSED id[] = "$Id: io.c,v 1.31 2008/04/03 20:56:44 fw Exp $";
  14. #include <assert.h>
  15. #include <stdlib.h>
  16. #include <string.h>
  17. #include <sys/time.h>
  18. #include <sys/types.h>
  19. #include <unistd.h>
  20. #include <fcntl.h>
  21. #include "array.h"
  22. #include "io.h"
  23. #include "log.h"
  24. /* Enables extra debug messages in event add/delete/callback code. */
  25. /* #define DEBUG_IO */
  26. typedef struct {
  27. #ifdef PROTOTYPES
  28. void (*callback)(int, short);
  29. #else
  30. void (*callback)();
  31. #endif
  32. short what;
  33. } io_event;
  34. #define INIT_IOEVENT { NULL, -1, 0, NULL }
  35. #define IO_ERROR 4
  36. #ifdef HAVE_EPOLL_CREATE
  37. # define IO_USE_EPOLL 1
  38. # ifdef HAVE_SELECT
  39. # define IO_USE_SELECT 1
  40. # endif
  41. #else
  42. # ifdef HAVE_KQUEUE
  43. # define IO_USE_KQUEUE 1
  44. # else
  45. # ifdef HAVE_SYS_DEVPOLL_H
  46. # define IO_USE_DEVPOLL 1
  47. # else
  48. # ifdef HAVE_POLL
  49. # define IO_USE_POLL 1
  50. # else
  51. # ifdef HAVE_SELECT
  52. # define IO_USE_SELECT 1
  53. # else
  54. # error "no IO API available!?"
  55. # endif /* HAVE_SELECT */
  56. # endif /* HAVE_POLL */
  57. # endif /* HAVE_SYS_DEVPOLL_H */
  58. # endif /* HAVE_KQUEUE */
  59. #endif /* HAVE_EPOLL_CREATE */
  60. static bool library_initialized = false;
  61. #ifdef IO_USE_EPOLL
  62. #include <sys/epoll.h>
  63. static int io_masterfd = -1;
  64. static bool io_event_change_epoll(int fd, short what, const int action);
  65. static int io_dispatch_epoll(struct timeval *tv);
  66. #endif
  67. #ifdef IO_USE_KQUEUE
  68. #include <sys/types.h>
  69. #include <sys/event.h>
  70. static array io_evcache;
  71. static int io_masterfd;
  72. static int io_dispatch_kqueue(struct timeval *tv);
  73. static bool io_event_change_kqueue(int, short, const int action);
  74. #endif
  75. #ifdef IO_USE_POLL
  76. #include <poll.h>
  77. static array pollfds;
  78. static int poll_maxfd;
  79. static bool io_event_change_poll PARAMS((int fd, short what));
  80. #endif
  81. #ifdef IO_USE_DEVPOLL
  82. #include <sys/devpoll.h>
  83. static int io_masterfd;
  84. static bool io_event_change_devpoll(int fd, short what);
  85. #endif
  86. #ifdef IO_USE_SELECT
  87. #include "defines.h" /* for conn.h */
  88. #include "conn.h" /* for CONN_IDX (needed by resolve.h) */
  89. #include "resolve.h" /* for RES_STAT (needed by conf.h) */
  90. #include "conf.h" /* for Conf_MaxConnections */
  91. static fd_set readers;
  92. static fd_set writers;
  93. /*
  94. * this is the first argument for select(), i.e.
  95. * the largest fd registered, plus one.
  96. */
  97. static int select_maxfd;
  98. static int io_dispatch_select(struct timeval *tv);
  99. #ifndef IO_USE_EPOLL
  100. #define io_masterfd -1
  101. #endif
  102. #endif /* IO_USE_SELECT */
  103. static array io_events;
  104. static void io_docallback PARAMS((int fd, short what));
  105. #ifdef DEBUG_IO
  106. static void io_debug(const char *s, int fd, int what)
  107. {
  108. Log(LOG_DEBUG, "%s: %d, %d\n", s, fd, what);
  109. }
  110. #else
  111. static inline void io_debug(const char UNUSED *s,int UNUSED a, int UNUSED b) {/*NOTHING*/}
  112. #endif
  113. static io_event *
  114. io_event_get(int fd)
  115. {
  116. io_event *i;
  117. assert(fd >= 0);
  118. i = (io_event *) array_get(&io_events, sizeof(io_event), (size_t) fd);
  119. assert(i != NULL);
  120. return i;
  121. }
  122. #ifdef IO_USE_DEVPOLL
  123. static int
  124. io_dispatch_devpoll(struct timeval *tv)
  125. {
  126. struct dvpoll dvp;
  127. time_t sec = tv->tv_sec * 1000;
  128. int i, total, ret, timeout = tv->tv_usec + sec;
  129. short what;
  130. struct pollfd p[100];
  131. if (timeout < 0)
  132. timeout = 1000;
  133. total = 0;
  134. do {
  135. dvp.dp_timeout = timeout;
  136. dvp.dp_nfds = 100;
  137. dvp.dp_fds = p;
  138. ret = ioctl(io_masterfd, DP_POLL, &dvp);
  139. total += ret;
  140. if (ret <= 0)
  141. return total;
  142. for (i=0; i < ret ; i++) {
  143. what = 0;
  144. if (p[i].revents & (POLLIN|POLLPRI))
  145. what = IO_WANTREAD;
  146. if (p[i].revents & POLLOUT)
  147. what |= IO_WANTWRITE;
  148. if (p[i].revents && !what) {
  149. /* other flag is set, probably POLLERR */
  150. what = IO_ERROR;
  151. }
  152. io_docallback(p[i].fd, what);
  153. }
  154. } while (ret == 100);
  155. return total;
  156. }
  157. static bool
  158. io_event_change_devpoll(int fd, short what)
  159. {
  160. struct pollfd p;
  161. p.events = 0;
  162. if (what & IO_WANTREAD)
  163. p.events = POLLIN | POLLPRI;
  164. if (what & IO_WANTWRITE)
  165. p.events |= POLLOUT;
  166. p.fd = fd;
  167. return write(io_masterfd, &p, sizeof p) == (ssize_t)sizeof p;
  168. }
  169. static void
  170. io_close_devpoll(int fd)
  171. {
  172. struct pollfd p;
  173. p.events = POLLREMOVE;
  174. p.fd = fd;
  175. write(io_masterfd, &p, sizeof p);
  176. }
  177. static void
  178. io_library_init_devpoll(unsigned int eventsize)
  179. {
  180. io_masterfd = open("/dev/poll", O_RDWR);
  181. if (io_masterfd >= 0)
  182. library_initialized = true;
  183. Log(LOG_INFO, "IO subsystem: /dev/poll (initial maxfd %u, masterfd %d).",
  184. eventsize, io_masterfd);
  185. }
  186. #else
  187. static inline void io_close_devpoll(int UNUSED x) {/* NOTHING */}
  188. static inline void io_library_init_devpoll(unsigned int UNUSED ev) {/*NOTHING*/}
  189. #endif
  190. #ifdef IO_USE_POLL
  191. static int
  192. io_dispatch_poll(struct timeval *tv)
  193. {
  194. time_t sec = tv->tv_sec * 1000;
  195. int i, ret, timeout = tv->tv_usec + sec;
  196. int fds_ready;
  197. short what;
  198. struct pollfd *p = array_start(&pollfds);
  199. if (timeout < 0)
  200. timeout = 1000;
  201. ret = poll(p, poll_maxfd + 1, timeout);
  202. if (ret <= 0)
  203. return ret;
  204. fds_ready = ret;
  205. for (i=0; i <= poll_maxfd; i++) {
  206. what = 0;
  207. if (p[i].revents & (POLLIN|POLLPRI))
  208. what = IO_WANTREAD;
  209. if (p[i].revents & POLLOUT)
  210. what |= IO_WANTWRITE;
  211. if (p[i].revents && !what) {
  212. /* other flag is set, probably POLLERR */
  213. what = IO_ERROR;
  214. }
  215. if (what) {
  216. fds_ready--;
  217. io_docallback(i, what);
  218. }
  219. if (fds_ready <= 0)
  220. break;
  221. }
  222. return ret;
  223. }
  224. static bool
  225. io_event_change_poll(int fd, short what)
  226. {
  227. struct pollfd *p;
  228. short events = 0;
  229. if (what & IO_WANTREAD)
  230. events = POLLIN | POLLPRI;
  231. if (what & IO_WANTWRITE)
  232. events |= POLLOUT;
  233. p = array_alloc(&pollfds, sizeof *p, fd);
  234. if (p) {
  235. p->events = events;
  236. p->fd = fd;
  237. if (fd > poll_maxfd)
  238. poll_maxfd = fd;
  239. }
  240. return p != NULL;
  241. }
  242. static void
  243. io_close_poll(int fd)
  244. {
  245. struct pollfd *p;
  246. p = array_get(&pollfds, sizeof *p, fd);
  247. if (!p) return;
  248. p->fd = -1;
  249. if (fd == poll_maxfd) {
  250. while (poll_maxfd > 0) {
  251. --poll_maxfd;
  252. p = array_get(&pollfds, sizeof *p, poll_maxfd);
  253. if (p && p->fd >= 0)
  254. break;
  255. }
  256. }
  257. }
  258. static void
  259. io_library_init_poll(unsigned int eventsize)
  260. {
  261. struct pollfd *p;
  262. array_init(&pollfds);
  263. poll_maxfd = 0;
  264. Log(LOG_INFO, "IO subsystem: poll (initial maxfd %u).",
  265. eventsize);
  266. p = array_alloc(&pollfds, sizeof(struct pollfd), eventsize);
  267. if (p) {
  268. unsigned i;
  269. p = array_start(&pollfds);
  270. for (i = 0; i < eventsize; i++)
  271. p[i].fd = -1;
  272. library_initialized = true;
  273. }
  274. }
  275. #else
  276. static inline void io_close_poll(int UNUSED x) {/* NOTHING */}
  277. static inline void io_library_init_poll(unsigned int UNUSED ev) {/*NOTHING*/}
  278. #endif
  279. #ifdef IO_USE_SELECT
  280. static int
  281. io_dispatch_select(struct timeval *tv)
  282. {
  283. fd_set readers_tmp = readers;
  284. fd_set writers_tmp = writers;
  285. short what;
  286. int ret, i;
  287. int fds_ready;
  288. ret = select(select_maxfd + 1, &readers_tmp, &writers_tmp, NULL, tv);
  289. if (ret <= 0)
  290. return ret;
  291. fds_ready = ret;
  292. for (i = 0; i <= select_maxfd; i++) {
  293. what = 0;
  294. if (FD_ISSET(i, &readers_tmp)) {
  295. what = IO_WANTREAD;
  296. fds_ready--;
  297. }
  298. if (FD_ISSET(i, &writers_tmp)) {
  299. what |= IO_WANTWRITE;
  300. fds_ready--;
  301. }
  302. if (what)
  303. io_docallback(i, what);
  304. if (fds_ready <= 0)
  305. break;
  306. }
  307. return ret;
  308. }
  309. static void
  310. io_library_init_select(unsigned int eventsize)
  311. {
  312. if (library_initialized)
  313. return;
  314. Log(LOG_INFO, "IO subsystem: select (initial maxfd %u).",
  315. eventsize);
  316. FD_ZERO(&readers);
  317. FD_ZERO(&writers);
  318. #ifdef FD_SETSIZE
  319. if (Conf_MaxConnections >= (int)FD_SETSIZE) {
  320. Log(LOG_WARNING,
  321. "MaxConnections (%d) exceeds limit (%u), changed MaxConnections to %u.",
  322. Conf_MaxConnections, FD_SETSIZE, FD_SETSIZE - 1);
  323. Conf_MaxConnections = FD_SETSIZE - 1;
  324. }
  325. #else
  326. Log(LOG_WARNING,
  327. "FD_SETSIZE undefined, don't know how many descriptors select() can handle on your platform ...");
  328. #endif /* FD_SETSIZE */
  329. library_initialized = true;
  330. }
  331. static void
  332. io_close_select(int fd)
  333. {
  334. io_event *i;
  335. if (io_masterfd >= 0) /* Are we using epoll()? */
  336. return;
  337. FD_CLR(fd, &writers);
  338. FD_CLR(fd, &readers);
  339. i = io_event_get(fd);
  340. if (!i) return;
  341. if (fd == select_maxfd) {
  342. while (select_maxfd>0) {
  343. --select_maxfd; /* find largest fd */
  344. i = io_event_get(select_maxfd);
  345. if (i && i->callback) break;
  346. }
  347. }
  348. }
  349. #else
  350. static inline void io_library_init_select(int UNUSED x) {/* NOTHING */}
  351. static inline void io_close_select(int UNUSED x) {/* NOTHING */}
  352. #endif /* SELECT */
  353. #ifdef IO_USE_EPOLL
  354. static bool
  355. io_event_change_epoll(int fd, short what, const int action)
  356. {
  357. struct epoll_event ev = { 0, {0} };
  358. ev.data.fd = fd;
  359. if (what & IO_WANTREAD)
  360. ev.events = EPOLLIN | EPOLLPRI;
  361. if (what & IO_WANTWRITE)
  362. ev.events |= EPOLLOUT;
  363. return epoll_ctl(io_masterfd, action, fd, &ev) == 0;
  364. }
  365. static int
  366. io_dispatch_epoll(struct timeval *tv)
  367. {
  368. time_t sec = tv->tv_sec * 1000;
  369. int i, total = 0, ret, timeout = tv->tv_usec + sec;
  370. struct epoll_event epoll_ev[100];
  371. short type;
  372. if (timeout < 0)
  373. timeout = 1000;
  374. do {
  375. ret = epoll_wait(io_masterfd, epoll_ev, 100, timeout);
  376. total += ret;
  377. if (ret <= 0)
  378. return total;
  379. for (i = 0; i < ret; i++) {
  380. type = 0;
  381. if (epoll_ev[i].events & (EPOLLERR | EPOLLHUP))
  382. type = IO_ERROR;
  383. if (epoll_ev[i].events & (EPOLLIN | EPOLLPRI))
  384. type |= IO_WANTREAD;
  385. if (epoll_ev[i].events & EPOLLOUT)
  386. type |= IO_WANTWRITE;
  387. io_docallback(epoll_ev[i].data.fd, type);
  388. }
  389. timeout = 0;
  390. } while (ret == 100);
  391. return total;
  392. }
  393. static void
  394. io_library_init_epoll(unsigned int eventsize)
  395. {
  396. int ecreate_hint = (int)eventsize;
  397. if (ecreate_hint <= 0)
  398. ecreate_hint = 128;
  399. io_masterfd = epoll_create(ecreate_hint);
  400. if (io_masterfd >= 0) {
  401. library_initialized = true;
  402. Log(LOG_INFO,
  403. "IO subsystem: epoll (hint size %d, initial maxfd %u, masterfd %d).",
  404. ecreate_hint, eventsize, io_masterfd);
  405. return;
  406. }
  407. #ifdef IO_USE_SELECT
  408. Log(LOG_INFO, "Can't initialize epoll() IO interface, falling back to select() ...");
  409. #endif
  410. }
  411. #else
  412. static inline void io_library_init_epoll(unsigned int UNUSED ev) {/* NOTHING */}
  413. #endif /* IO_USE_EPOLL */
  414. #ifdef IO_USE_KQUEUE
  415. static bool
  416. io_event_kqueue_commit_cache(void)
  417. {
  418. struct kevent *events;
  419. bool ret;
  420. int len = (int) array_length(&io_evcache, sizeof (struct kevent));
  421. if (!len) /* nothing to do */
  422. return true;
  423. assert(len>0);
  424. if (len < 0) {
  425. array_free(&io_evcache);
  426. return false;
  427. }
  428. events = array_start(&io_evcache);
  429. assert(events != NULL);
  430. ret = kevent(io_masterfd, events, len, NULL, 0, NULL) == 0;
  431. if (ret)
  432. array_trunc(&io_evcache);
  433. return ret;
  434. }
  435. static bool
  436. io_event_change_kqueue(int fd, short what, const int action)
  437. {
  438. struct kevent kev;
  439. bool ret = true;
  440. if (what & IO_WANTREAD) {
  441. EV_SET(&kev, fd, EVFILT_READ, action, 0, 0, 0);
  442. ret = array_catb(&io_evcache, (char*) &kev, sizeof (kev));
  443. if (!ret)
  444. ret = kevent(io_masterfd, &kev,1, NULL, 0, NULL) == 0;
  445. }
  446. if (ret && (what & IO_WANTWRITE)) {
  447. EV_SET(&kev, fd, EVFILT_WRITE, action, 0, 0, 0);
  448. ret = array_catb(&io_evcache, (char*) &kev, sizeof (kev));
  449. if (!ret)
  450. ret = kevent(io_masterfd, &kev, 1, NULL, 0, NULL) == 0;
  451. }
  452. if (array_length(&io_evcache, sizeof kev) >= 100)
  453. io_event_kqueue_commit_cache();
  454. return ret;
  455. }
  456. static int
  457. io_dispatch_kqueue(struct timeval *tv)
  458. {
  459. int i, total = 0, ret;
  460. struct kevent kev[100];
  461. struct kevent *newevents;
  462. struct timespec ts;
  463. int newevents_len;
  464. ts.tv_sec = tv->tv_sec;
  465. ts.tv_nsec = tv->tv_usec * 1000;
  466. do {
  467. newevents_len = (int) array_length(&io_evcache, sizeof (struct kevent));
  468. newevents = (newevents_len > 0) ? array_start(&io_evcache) : NULL;
  469. assert(newevents_len >= 0);
  470. ret = kevent(io_masterfd, newevents, newevents_len, kev, 100, &ts);
  471. if (newevents && ret != -1)
  472. array_trunc(&io_evcache);
  473. total += ret;
  474. if (ret <= 0)
  475. return total;
  476. for (i = 0; i < ret; i++) {
  477. io_debug("dispatch_kqueue: fd, kev.flags", (int)kev[i].ident, kev[i].flags);
  478. if (kev[i].flags & (EV_EOF|EV_ERROR)) {
  479. if (kev[i].flags & EV_ERROR)
  480. Log(LOG_ERR, "kevent fd %d: EV_ERROR (%s)",
  481. (int)kev[i].ident, strerror((int)kev[i].data));
  482. io_docallback((int)kev[i].ident, IO_ERROR);
  483. continue;
  484. }
  485. switch (kev[i].filter) {
  486. case EVFILT_READ:
  487. io_docallback((int)kev[i].ident, IO_WANTREAD);
  488. break;
  489. case EVFILT_WRITE:
  490. io_docallback((int)kev[i].ident, IO_WANTWRITE);
  491. break;
  492. default:
  493. LogDebug("Unknown kev.filter number %d for fd %d",
  494. kev[i].filter, kev[i].ident);
  495. /* Fall through */
  496. case EV_ERROR:
  497. io_docallback((int)kev[i].ident, IO_ERROR);
  498. break;
  499. }
  500. }
  501. ts.tv_sec = 0;
  502. ts.tv_nsec = 0;
  503. } while (ret == 100);
  504. return total;
  505. }
  506. static void
  507. io_library_init_kqueue(unsigned int eventsize)
  508. {
  509. io_masterfd = kqueue();
  510. Log(LOG_INFO,
  511. "IO subsystem: kqueue (initial maxfd %u, masterfd %d)",
  512. eventsize, io_masterfd);
  513. if (io_masterfd >= 0)
  514. library_initialized = true;
  515. }
  516. #else
  517. static inline void io_library_init_kqueue(unsigned int UNUSED ev) {/* NOTHING */}
  518. #endif
  519. bool
  520. io_library_init(unsigned int eventsize)
  521. {
  522. if (library_initialized)
  523. return true;
  524. if ((eventsize > 0) && !array_alloc(&io_events, sizeof(io_event), (size_t)eventsize))
  525. eventsize = 0;
  526. io_library_init_epoll(eventsize);
  527. io_library_init_kqueue(eventsize);
  528. io_library_init_devpoll(eventsize);
  529. io_library_init_poll(eventsize);
  530. io_library_init_select(eventsize);
  531. return library_initialized;
  532. }
  533. void
  534. io_library_shutdown(void)
  535. {
  536. #ifdef IO_USE_SELECT
  537. FD_ZERO(&readers);
  538. FD_ZERO(&writers);
  539. #endif
  540. #if defined(IO_USE_EPOLL) || defined(IO_USE_KQUEUE) || defined(IO_USE_DEVPOLL)
  541. if (io_masterfd >= 0)
  542. close(io_masterfd);
  543. io_masterfd = -1;
  544. #endif
  545. #ifdef IO_USE_KQUEUE
  546. array_free(&io_evcache);
  547. #endif
  548. library_initialized = false;
  549. }
  550. bool
  551. io_event_setcb(int fd, void (*cbfunc) (int, short))
  552. {
  553. io_event *i = io_event_get(fd);
  554. if (!i)
  555. return false;
  556. i->callback = cbfunc;
  557. return true;
  558. }
  559. static bool
  560. backend_create_ev(int fd, short what)
  561. {
  562. bool ret;
  563. #ifdef IO_USE_DEVPOLL
  564. ret = io_event_change_devpoll(fd, what);
  565. #endif
  566. #ifdef IO_USE_POLL
  567. ret = io_event_change_poll(fd, what);
  568. #endif
  569. #ifdef IO_USE_EPOLL
  570. ret = io_event_change_epoll(fd, what, EPOLL_CTL_ADD);
  571. #endif
  572. #ifdef IO_USE_KQUEUE
  573. ret = io_event_change_kqueue(fd, what, EV_ADD|EV_ENABLE);
  574. #endif
  575. #ifdef IO_USE_SELECT
  576. if (io_masterfd < 0)
  577. ret = io_event_add(fd, what);
  578. #endif
  579. return ret;
  580. }
  581. bool
  582. io_event_create(int fd, short what, void (*cbfunc) (int, short))
  583. {
  584. bool ret;
  585. io_event *i;
  586. assert(fd >= 0);
  587. #if defined(IO_USE_SELECT) && defined(FD_SETSIZE)
  588. if (io_masterfd < 0 && fd >= FD_SETSIZE) {
  589. Log(LOG_ERR,
  590. "fd %d exceeds FD_SETSIZE (%u) (select can't handle more file descriptors)",
  591. fd, FD_SETSIZE);
  592. return false;
  593. }
  594. #endif
  595. i = (io_event *) array_alloc(&io_events, sizeof(io_event), (size_t) fd);
  596. if (!i) {
  597. Log(LOG_WARNING,
  598. "array_alloc failed: could not allocate space for %d io_event structures",
  599. fd);
  600. return false;
  601. }
  602. i->callback = cbfunc;
  603. i->what = 0;
  604. ret = backend_create_ev(fd, what);
  605. if (ret)
  606. i->what = what;
  607. return ret;
  608. }
  609. bool
  610. io_event_add(int fd, short what)
  611. {
  612. io_event *i = io_event_get(fd);
  613. if (!i) return false;
  614. if ((i->what & what) == what) /* event type is already registered */
  615. return true;
  616. io_debug("io_event_add: fd, what", fd, what);
  617. i->what |= what;
  618. #ifdef IO_USE_EPOLL
  619. if (io_masterfd >= 0)
  620. return io_event_change_epoll(fd, i->what, EPOLL_CTL_MOD);
  621. #endif
  622. #ifdef IO_USE_KQUEUE
  623. return io_event_change_kqueue(fd, what, EV_ADD | EV_ENABLE);
  624. #endif
  625. #ifdef IO_USE_DEVPOLL
  626. return io_event_change_devpoll(fd, i->what);
  627. #endif
  628. #ifdef IO_USE_POLL
  629. return io_event_change_poll(fd, i->what);
  630. #endif
  631. #ifdef IO_USE_SELECT
  632. if (fd > select_maxfd)
  633. select_maxfd = fd;
  634. if (what & IO_WANTREAD)
  635. FD_SET(fd, &readers);
  636. if (what & IO_WANTWRITE)
  637. FD_SET(fd, &writers);
  638. return true;
  639. #endif
  640. return false;
  641. }
  642. bool
  643. io_setnonblock(int fd)
  644. {
  645. int flags = fcntl(fd, F_GETFL);
  646. if (flags == -1)
  647. return false;
  648. #ifndef O_NONBLOCK
  649. #define O_NONBLOCK O_NDELAY
  650. #endif
  651. flags |= O_NONBLOCK;
  652. return fcntl(fd, F_SETFL, flags) == 0;
  653. }
  654. bool
  655. io_close(int fd)
  656. {
  657. io_event *i;
  658. i = io_event_get(fd);
  659. #ifdef IO_USE_KQUEUE
  660. if (array_length(&io_evcache, sizeof (struct kevent))) /* pending data in cache? */
  661. io_event_kqueue_commit_cache();
  662. /* both kqueue and epoll remove fd from all sets automatically on the last close
  663. * of the descriptor. since we don't know if this is the last close we'll have
  664. * to remove the set explicitly. */
  665. if (i) {
  666. io_event_change_kqueue(fd, i->what, EV_DELETE);
  667. io_event_kqueue_commit_cache();
  668. }
  669. #endif
  670. io_close_devpoll(fd);
  671. io_close_poll(fd);
  672. io_close_select(fd);
  673. #ifdef IO_USE_EPOLL
  674. io_event_change_epoll(fd, 0, EPOLL_CTL_DEL);
  675. #endif
  676. if (i) {
  677. i->callback = NULL;
  678. i->what = 0;
  679. }
  680. return close(fd) == 0;
  681. }
  682. bool
  683. io_event_del(int fd, short what)
  684. {
  685. io_event *i = io_event_get(fd);
  686. io_debug("io_event_del: trying to delete eventtype; fd, what", fd, what);
  687. if (!i) return false;
  688. if (!(i->what & what)) /* event is already disabled */
  689. return true;
  690. i->what &= ~what;
  691. #ifdef IO_USE_DEVPOLL
  692. return io_event_change_devpoll(fd, i->what);
  693. #endif
  694. #ifdef IO_USE_POLL
  695. return io_event_change_poll(fd, i->what);
  696. #endif
  697. #ifdef IO_USE_EPOLL
  698. if (io_masterfd >= 0)
  699. return io_event_change_epoll(fd, i->what, EPOLL_CTL_MOD);
  700. #endif
  701. #ifdef IO_USE_KQUEUE
  702. return io_event_change_kqueue(fd, what, EV_DISABLE);
  703. #endif
  704. #ifdef IO_USE_SELECT
  705. if (what & IO_WANTWRITE)
  706. FD_CLR(fd, &writers);
  707. if (what & IO_WANTREAD)
  708. FD_CLR(fd, &readers);
  709. return true;
  710. #endif
  711. return false;
  712. }
  713. int
  714. io_dispatch(struct timeval *tv)
  715. {
  716. #ifdef IO_USE_EPOLL
  717. if (io_masterfd >= 0)
  718. return io_dispatch_epoll(tv);
  719. #endif
  720. #ifdef IO_USE_SELECT
  721. return io_dispatch_select(tv);
  722. #endif
  723. #ifdef IO_USE_KQUEUE
  724. return io_dispatch_kqueue(tv);
  725. #endif
  726. #ifdef IO_USE_DEVPOLL
  727. return io_dispatch_devpoll(tv);
  728. #endif
  729. #ifdef IO_USE_POLL
  730. return io_dispatch_poll(tv);
  731. #endif
  732. return -1;
  733. }
  734. /* call the callback function inside the struct matching fd */
  735. static void
  736. io_docallback(int fd, short what)
  737. {
  738. io_event *i = io_event_get(fd);
  739. io_debug("io_docallback; fd, what", fd, what);
  740. if (i->callback) { /* callback might be NULL if a previous callback function
  741. called io_close on this fd */
  742. i->callback(fd, (what & IO_ERROR) ? i->what : what);
  743. }
  744. /* if error indicator is set, we return the event(s) that were registered */
  745. }