io.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927
  1. /*
  2. * ngIRCd -- The Next Generation IRC Daemon
  3. * Copyright (c)2005-2006 Florian Westphal (westphal@foo.fh-furtwangen.de)
  4. * Copyright (c)2006-2014 Alexander Barton (alex@barton.de) and Contributors.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. * Please read the file COPYING, README and AUTHORS for more information.
  11. */
  12. #include "portab.h"
  13. /**
  14. * @file
  15. * I/O abstraction interface.
  16. */
  17. /* Extra debug messages in event add/delete/callback code: 0=off / 1=on */
  18. #define DEBUG_IO 0
  19. #include <assert.h>
  20. #include <string.h>
  21. #include <sys/types.h>
  22. #include <unistd.h>
  23. #include <sys/stat.h>
  24. #include <fcntl.h>
  25. #include "array.h"
  26. #include "io.h"
  27. #include "log.h"
  28. typedef struct {
  29. #ifdef PROTOTYPES
  30. void (*callback)(int, short);
  31. #else
  32. void (*callback)();
  33. #endif
  34. short what;
  35. } io_event;
  36. #define INIT_IOEVENT { NULL, -1, 0, NULL }
  37. #define IO_ERROR 4
  38. #define MAX_EVENTS 100
  39. #ifdef HAVE_EPOLL_CREATE
  40. # define IO_USE_EPOLL 1
  41. # ifdef HAVE_SELECT
  42. # define IO_USE_SELECT 1
  43. # endif
  44. #else
  45. # ifdef HAVE_KQUEUE
  46. # define IO_USE_KQUEUE 1
  47. # else
  48. # ifdef HAVE_SYS_DEVPOLL_H
  49. # define IO_USE_DEVPOLL 1
  50. # else
  51. # if defined(HAVE_POLL) && defined(HAVE_POLL_H)
  52. # define IO_USE_POLL 1
  53. # else
  54. # ifdef HAVE_SELECT
  55. # define IO_USE_SELECT 1
  56. # else
  57. # error "no IO API available!?"
  58. # endif /* HAVE_SELECT */
  59. # endif /* HAVE_POLL */
  60. # endif /* HAVE_SYS_DEVPOLL_H */
  61. # endif /* HAVE_KQUEUE */
  62. #endif /* HAVE_EPOLL_CREATE */
  63. static bool library_initialized = false;
  64. #ifdef IO_USE_EPOLL
  65. #include <sys/epoll.h>
  66. static int io_masterfd = -1;
  67. static bool io_event_change_epoll(int fd, short what, const int action);
  68. static int io_dispatch_epoll(struct timeval *tv);
  69. #endif
  70. #ifdef IO_USE_KQUEUE
  71. #include <sys/types.h>
  72. #include <sys/event.h>
  73. static array io_evcache;
  74. static int io_masterfd;
  75. static int io_dispatch_kqueue(struct timeval *tv);
  76. static bool io_event_change_kqueue(int, short, const int action);
  77. #ifndef EV_SET
  78. /* Taken from /usr/include/sys/event.h of FreeBSD 8.1 and required by all
  79. * platforms that have kqueue but lack EV_SET() -- for example FreeBSD 4. */
  80. #define EV_SET(kevp, a, b, c, d, e, f) do { \
  81. struct kevent *__kevp__ = (kevp); \
  82. __kevp__->ident = (a); \
  83. __kevp__->filter = (b); \
  84. __kevp__->flags = (c); \
  85. __kevp__->fflags = (d); \
  86. __kevp__->data = (e); \
  87. __kevp__->udata = (f); \
  88. } while(0)
  89. #endif
  90. #endif
  91. #ifdef IO_USE_POLL
  92. #include <poll.h>
  93. static array pollfds;
  94. static int poll_maxfd;
  95. static bool io_event_change_poll PARAMS((int fd, short what));
  96. #endif
  97. #ifdef IO_USE_DEVPOLL
  98. #include <sys/devpoll.h>
  99. static int io_masterfd;
  100. static bool io_event_change_devpoll(int fd, short what);
  101. #endif
  102. #ifdef IO_USE_SELECT
  103. #include <sys/time.h>
  104. #include "defines.h" /* for conn.h */
  105. #include "proc.h" /* for PROC_STAT (needed by conf.h) */
  106. #include "conn.h" /* for CONN_ID (needed by conf.h) */
  107. #include "conf.h" /* for Conf_MaxConnections */
  108. static fd_set readers;
  109. static fd_set writers;
  110. /*
  111. * this is the first argument for select(), i.e.
  112. * the largest fd registered, plus one.
  113. */
  114. static int select_maxfd;
  115. static int io_dispatch_select PARAMS((struct timeval *tv));
  116. #ifndef IO_USE_EPOLL
  117. #define io_masterfd -1
  118. #endif
  119. #endif /* IO_USE_SELECT */
  120. static array io_events;
  121. static void io_docallback PARAMS((int fd, short what));
  122. #if DEBUG_IO
  123. static void
  124. io_debug(const char *s, int fd, int what)
  125. {
  126. Log(LOG_DEBUG, "%s: %d, %d\n", s, fd, what);
  127. }
  128. #else
  129. static inline void
  130. io_debug(const char UNUSED *s,int UNUSED a, int UNUSED b)
  131. { /* NOTHING */ }
  132. #endif
  133. static io_event *
  134. io_event_get(int fd)
  135. {
  136. io_event *i;
  137. assert(fd >= 0);
  138. i = (io_event *) array_get(&io_events, sizeof(io_event), (size_t) fd);
  139. assert(i != NULL);
  140. return i;
  141. }
  142. #ifdef IO_USE_DEVPOLL
  143. static int
  144. io_dispatch_devpoll(struct timeval *tv)
  145. {
  146. struct dvpoll dvp;
  147. time_t sec = tv->tv_sec * 1000;
  148. int i, ret, timeout = tv->tv_usec + sec;
  149. short what;
  150. struct pollfd p[MAX_EVENTS];
  151. if (timeout < 0)
  152. timeout = 1000;
  153. dvp.dp_timeout = timeout;
  154. dvp.dp_nfds = MAX_EVENTS;
  155. dvp.dp_fds = p;
  156. ret = ioctl(io_masterfd, DP_POLL, &dvp);
  157. for (i=0; i < ret ; i++) {
  158. what = 0;
  159. if (p[i].revents & (POLLIN|POLLPRI))
  160. what = IO_WANTREAD;
  161. if (p[i].revents & POLLOUT)
  162. what |= IO_WANTWRITE;
  163. if (p[i].revents && !what) {
  164. /* other flag is set, probably POLLERR */
  165. what = IO_ERROR;
  166. }
  167. io_docallback(p[i].fd, what);
  168. }
  169. return ret;
  170. }
  171. static bool
  172. io_event_change_devpoll(int fd, short what)
  173. {
  174. struct pollfd p;
  175. p.events = 0;
  176. if (what & IO_WANTREAD)
  177. p.events = POLLIN | POLLPRI;
  178. if (what & IO_WANTWRITE)
  179. p.events |= POLLOUT;
  180. p.fd = fd;
  181. return write(io_masterfd, &p, sizeof p) == (ssize_t)sizeof p;
  182. }
  183. static void
  184. io_close_devpoll(int fd)
  185. {
  186. struct pollfd p;
  187. p.events = POLLREMOVE;
  188. p.fd = fd;
  189. write(io_masterfd, &p, sizeof p);
  190. }
  191. static void
  192. io_library_init_devpoll(unsigned int eventsize)
  193. {
  194. io_masterfd = open("/dev/poll", O_RDWR);
  195. if (io_masterfd >= 0)
  196. library_initialized = true;
  197. Log(LOG_INFO, "IO subsystem: /dev/poll (initial maxfd %u, masterfd %d).",
  198. eventsize, io_masterfd);
  199. }
  200. #else
  201. static inline void
  202. io_close_devpoll(int UNUSED x)
  203. { /* NOTHING */ }
  204. static inline void
  205. io_library_init_devpoll(unsigned int UNUSED ev)
  206. { /* NOTHING */ }
  207. #endif
  208. #ifdef IO_USE_POLL
  209. static int
  210. io_dispatch_poll(struct timeval *tv)
  211. {
  212. time_t sec = tv->tv_sec * 1000;
  213. int i, ret, timeout = tv->tv_usec + sec;
  214. int fds_ready;
  215. short what;
  216. struct pollfd *p = array_start(&pollfds);
  217. if (timeout < 0)
  218. timeout = 1000;
  219. ret = poll(p, poll_maxfd + 1, timeout);
  220. if (ret <= 0)
  221. return ret;
  222. fds_ready = ret;
  223. for (i=0; i <= poll_maxfd; i++) {
  224. what = 0;
  225. if (p[i].revents & (POLLIN|POLLPRI))
  226. what = IO_WANTREAD;
  227. if (p[i].revents & POLLOUT)
  228. what |= IO_WANTWRITE;
  229. if (p[i].revents && !what) {
  230. /* other flag is set, probably POLLERR */
  231. what = IO_ERROR;
  232. }
  233. if (what) {
  234. fds_ready--;
  235. io_docallback(i, what);
  236. }
  237. if (fds_ready <= 0)
  238. break;
  239. }
  240. return ret;
  241. }
  242. static bool
  243. io_event_change_poll(int fd, short what)
  244. {
  245. struct pollfd *p;
  246. short events = 0;
  247. if (what & IO_WANTREAD)
  248. events = POLLIN | POLLPRI;
  249. if (what & IO_WANTWRITE)
  250. events |= POLLOUT;
  251. p = array_alloc(&pollfds, sizeof *p, fd);
  252. if (p) {
  253. p->events = events;
  254. p->fd = fd;
  255. if (fd > poll_maxfd)
  256. poll_maxfd = fd;
  257. }
  258. return p != NULL;
  259. }
  260. static void
  261. io_close_poll(int fd)
  262. {
  263. struct pollfd *p;
  264. p = array_get(&pollfds, sizeof *p, fd);
  265. if (!p) return;
  266. p->fd = -1;
  267. if (fd == poll_maxfd) {
  268. while (poll_maxfd > 0) {
  269. --poll_maxfd;
  270. p = array_get(&pollfds, sizeof *p, poll_maxfd);
  271. if (p && p->fd >= 0)
  272. break;
  273. }
  274. }
  275. }
  276. static void
  277. io_library_init_poll(unsigned int eventsize)
  278. {
  279. struct pollfd *p;
  280. array_init(&pollfds);
  281. poll_maxfd = 0;
  282. Log(LOG_INFO, "IO subsystem: poll (initial maxfd %u).",
  283. eventsize);
  284. p = array_alloc(&pollfds, sizeof(struct pollfd), eventsize);
  285. if (p) {
  286. unsigned i;
  287. p = array_start(&pollfds);
  288. for (i = 0; i < eventsize; i++)
  289. p[i].fd = -1;
  290. library_initialized = true;
  291. }
  292. }
  293. #else
  294. static inline void
  295. io_close_poll(int UNUSED x)
  296. { /* NOTHING */ }
  297. static inline void
  298. io_library_init_poll(unsigned int UNUSED ev)
  299. { /* NOTHING */ }
  300. #endif
  301. #ifdef IO_USE_SELECT
  302. static int
  303. io_dispatch_select(struct timeval *tv)
  304. {
  305. fd_set readers_tmp;
  306. fd_set writers_tmp;
  307. short what;
  308. int ret, i;
  309. int fds_ready;
  310. readers_tmp = readers;
  311. writers_tmp = writers;
  312. ret = select(select_maxfd + 1, &readers_tmp, &writers_tmp, NULL, tv);
  313. if (ret <= 0)
  314. return ret;
  315. fds_ready = ret;
  316. for (i = 0; i <= select_maxfd; i++) {
  317. what = 0;
  318. if (FD_ISSET(i, &readers_tmp)) {
  319. what = IO_WANTREAD;
  320. fds_ready--;
  321. }
  322. if (FD_ISSET(i, &writers_tmp)) {
  323. what |= IO_WANTWRITE;
  324. fds_ready--;
  325. }
  326. if (what)
  327. io_docallback(i, what);
  328. if (fds_ready <= 0)
  329. break;
  330. }
  331. return ret;
  332. }
  333. static void
  334. io_library_init_select(unsigned int eventsize)
  335. {
  336. if (library_initialized)
  337. return;
  338. Log(LOG_INFO, "IO subsystem: select (initial maxfd %u).",
  339. eventsize);
  340. FD_ZERO(&readers);
  341. FD_ZERO(&writers);
  342. #ifdef FD_SETSIZE
  343. if (Conf_MaxConnections >= (int)FD_SETSIZE) {
  344. Log(LOG_WARNING,
  345. "MaxConnections (%d) exceeds limit (%u), changed MaxConnections to %u.",
  346. Conf_MaxConnections, FD_SETSIZE, FD_SETSIZE - 1);
  347. Conf_MaxConnections = FD_SETSIZE - 1;
  348. }
  349. #else
  350. Log(LOG_WARNING,
  351. "FD_SETSIZE undefined, don't know how many descriptors select() can handle on your platform ...");
  352. #endif /* FD_SETSIZE */
  353. library_initialized = true;
  354. }
  355. static void
  356. io_close_select(int fd)
  357. {
  358. io_event *i;
  359. if (io_masterfd >= 0) /* Are we using epoll()? */
  360. return;
  361. FD_CLR(fd, &writers);
  362. FD_CLR(fd, &readers);
  363. i = io_event_get(fd);
  364. if (!i) return;
  365. if (fd == select_maxfd) {
  366. while (select_maxfd>0) {
  367. --select_maxfd; /* find largest fd */
  368. i = io_event_get(select_maxfd);
  369. if (i && i->callback) break;
  370. }
  371. }
  372. }
  373. #else
  374. static inline void
  375. io_library_init_select(int UNUSED x)
  376. { /* NOTHING */ }
  377. static inline void
  378. io_close_select(int UNUSED x)
  379. { /* NOTHING */ }
  380. #endif /* SELECT */
  381. #ifdef IO_USE_EPOLL
  382. static bool
  383. io_event_change_epoll(int fd, short what, const int action)
  384. {
  385. struct epoll_event ev = { 0, {0} };
  386. ev.data.fd = fd;
  387. if (what & IO_WANTREAD)
  388. ev.events = EPOLLIN | EPOLLPRI;
  389. if (what & IO_WANTWRITE)
  390. ev.events |= EPOLLOUT;
  391. return epoll_ctl(io_masterfd, action, fd, &ev) == 0;
  392. }
  393. static int
  394. io_dispatch_epoll(struct timeval *tv)
  395. {
  396. time_t sec = tv->tv_sec * 1000;
  397. int i, ret, timeout = tv->tv_usec + sec;
  398. struct epoll_event epoll_ev[MAX_EVENTS];
  399. short type;
  400. if (timeout < 0)
  401. timeout = 1000;
  402. ret = epoll_wait(io_masterfd, epoll_ev, MAX_EVENTS, timeout);
  403. for (i = 0; i < ret; i++) {
  404. type = 0;
  405. if (epoll_ev[i].events & (EPOLLERR | EPOLLHUP))
  406. type = IO_ERROR;
  407. if (epoll_ev[i].events & (EPOLLIN | EPOLLPRI))
  408. type |= IO_WANTREAD;
  409. if (epoll_ev[i].events & EPOLLOUT)
  410. type |= IO_WANTWRITE;
  411. io_docallback(epoll_ev[i].data.fd, type);
  412. }
  413. return ret;
  414. }
  415. static void
  416. io_library_init_epoll(unsigned int eventsize)
  417. {
  418. int ecreate_hint = (int)eventsize;
  419. if (ecreate_hint <= 0)
  420. ecreate_hint = 128;
  421. io_masterfd = epoll_create(ecreate_hint);
  422. if (io_masterfd >= 0) {
  423. library_initialized = true;
  424. Log(LOG_INFO,
  425. "IO subsystem: epoll (hint size %d, initial maxfd %u, masterfd %d).",
  426. ecreate_hint, eventsize, io_masterfd);
  427. return;
  428. }
  429. #ifdef IO_USE_SELECT
  430. Log(LOG_INFO, "Can't initialize epoll() IO interface, falling back to select() ...");
  431. #endif
  432. }
  433. #else
  434. static inline void
  435. io_library_init_epoll(unsigned int UNUSED ev)
  436. { /* NOTHING */ }
  437. #endif /* IO_USE_EPOLL */
  438. #ifdef IO_USE_KQUEUE
  439. static bool
  440. io_event_kqueue_commit_cache(void)
  441. {
  442. struct kevent *events;
  443. bool ret;
  444. int len = (int) array_length(&io_evcache, sizeof (struct kevent));
  445. if (!len) /* nothing to do */
  446. return true;
  447. assert(len>0);
  448. if (len < 0) {
  449. array_free(&io_evcache);
  450. return false;
  451. }
  452. events = array_start(&io_evcache);
  453. assert(events != NULL);
  454. ret = kevent(io_masterfd, events, len, NULL, 0, NULL) == 0;
  455. if (ret)
  456. array_trunc(&io_evcache);
  457. return ret;
  458. }
  459. static bool
  460. io_event_change_kqueue(int fd, short what, const int action)
  461. {
  462. struct kevent kev;
  463. bool ret = true;
  464. if (what & IO_WANTREAD) {
  465. EV_SET(&kev, fd, EVFILT_READ, action, 0, 0, 0);
  466. ret = array_catb(&io_evcache, (char*) &kev, sizeof (kev));
  467. if (!ret)
  468. ret = kevent(io_masterfd, &kev,1, NULL, 0, NULL) == 0;
  469. }
  470. if (ret && (what & IO_WANTWRITE)) {
  471. EV_SET(&kev, fd, EVFILT_WRITE, action, 0, 0, 0);
  472. ret = array_catb(&io_evcache, (char*) &kev, sizeof (kev));
  473. if (!ret)
  474. ret = kevent(io_masterfd, &kev, 1, NULL, 0, NULL) == 0;
  475. }
  476. if (array_length(&io_evcache, sizeof kev) >= 100)
  477. io_event_kqueue_commit_cache();
  478. return ret;
  479. }
  480. static int
  481. io_dispatch_kqueue(struct timeval *tv)
  482. {
  483. int i, ret;
  484. struct kevent kev[MAX_EVENTS];
  485. struct kevent *newevents;
  486. struct timespec ts;
  487. int newevents_len;
  488. ts.tv_sec = tv->tv_sec;
  489. ts.tv_nsec = tv->tv_usec * 1000;
  490. newevents_len = (int) array_length(&io_evcache, sizeof (struct kevent));
  491. newevents = (newevents_len > 0) ? array_start(&io_evcache) : NULL;
  492. assert(newevents_len >= 0);
  493. ret = kevent(io_masterfd, newevents, newevents_len, kev, MAX_EVENTS, &ts);
  494. if (newevents && ret != -1)
  495. array_trunc(&io_evcache);
  496. for (i = 0; i < ret; i++) {
  497. io_debug("dispatch_kqueue: fd, kev.flags", (int)kev[i].ident, kev[i].flags);
  498. if (kev[i].flags & (EV_EOF|EV_ERROR)) {
  499. if (kev[i].flags & EV_ERROR)
  500. Log(LOG_ERR, "kevent fd %d: EV_ERROR (%s)",
  501. (int)kev[i].ident, strerror((int)kev[i].data));
  502. io_docallback((int)kev[i].ident, IO_ERROR);
  503. continue;
  504. }
  505. switch (kev[i].filter) {
  506. case EVFILT_READ:
  507. io_docallback((int)kev[i].ident, IO_WANTREAD);
  508. break;
  509. case EVFILT_WRITE:
  510. io_docallback((int)kev[i].ident, IO_WANTWRITE);
  511. break;
  512. default:
  513. LogDebug("Unknown kev.filter number %d for fd %d",
  514. kev[i].filter, kev[i].ident);
  515. /* Fall through */
  516. case EV_ERROR:
  517. io_docallback((int)kev[i].ident, IO_ERROR);
  518. break;
  519. }
  520. }
  521. return ret;
  522. }
  523. static void
  524. io_library_init_kqueue(unsigned int eventsize)
  525. {
  526. io_masterfd = kqueue();
  527. Log(LOG_INFO,
  528. "IO subsystem: kqueue (initial maxfd %u, masterfd %d).",
  529. eventsize, io_masterfd);
  530. if (io_masterfd >= 0)
  531. library_initialized = true;
  532. }
  533. #else
  534. static inline void
  535. io_library_init_kqueue(unsigned int UNUSED ev)
  536. { /* NOTHING */ }
  537. #endif
  538. bool
  539. io_library_init(unsigned int eventsize)
  540. {
  541. if (library_initialized)
  542. return true;
  543. if ((eventsize > 0) && !array_alloc(&io_events, sizeof(io_event), (size_t)eventsize))
  544. eventsize = 0;
  545. io_library_init_epoll(eventsize);
  546. io_library_init_kqueue(eventsize);
  547. io_library_init_devpoll(eventsize);
  548. io_library_init_poll(eventsize);
  549. io_library_init_select(eventsize);
  550. return library_initialized;
  551. }
  552. void
  553. io_library_shutdown(void)
  554. {
  555. #ifdef IO_USE_SELECT
  556. FD_ZERO(&readers);
  557. FD_ZERO(&writers);
  558. #endif
  559. #if defined(IO_USE_EPOLL) || defined(IO_USE_KQUEUE) || defined(IO_USE_DEVPOLL)
  560. if (io_masterfd >= 0)
  561. close(io_masterfd);
  562. io_masterfd = -1;
  563. #endif
  564. #ifdef IO_USE_KQUEUE
  565. array_free(&io_evcache);
  566. #endif
  567. library_initialized = false;
  568. }
  569. bool
  570. io_event_setcb(int fd, void (*cbfunc) (int, short))
  571. {
  572. io_event *i = io_event_get(fd);
  573. if (!i)
  574. return false;
  575. i->callback = cbfunc;
  576. return true;
  577. }
  578. static bool
  579. backend_create_ev(int fd, short what)
  580. {
  581. bool ret;
  582. #ifdef IO_USE_DEVPOLL
  583. ret = io_event_change_devpoll(fd, what);
  584. #endif
  585. #ifdef IO_USE_POLL
  586. ret = io_event_change_poll(fd, what);
  587. #endif
  588. #ifdef IO_USE_EPOLL
  589. ret = io_event_change_epoll(fd, what, EPOLL_CTL_ADD);
  590. #endif
  591. #ifdef IO_USE_KQUEUE
  592. ret = io_event_change_kqueue(fd, what, EV_ADD|EV_ENABLE);
  593. #endif
  594. #ifdef IO_USE_SELECT
  595. if (io_masterfd < 0)
  596. ret = io_event_add(fd, what);
  597. #endif
  598. return ret;
  599. }
  600. bool
  601. io_event_create(int fd, short what, void (*cbfunc) (int, short))
  602. {
  603. bool ret;
  604. io_event *i;
  605. assert(fd >= 0);
  606. #if defined(IO_USE_SELECT) && defined(FD_SETSIZE)
  607. if (io_masterfd < 0 && fd >= FD_SETSIZE) {
  608. Log(LOG_ERR,
  609. "fd %d exceeds FD_SETSIZE (%u) (select can't handle more file descriptors)",
  610. fd, FD_SETSIZE);
  611. return false;
  612. }
  613. #endif
  614. i = (io_event *) array_alloc(&io_events, sizeof(io_event), (size_t) fd);
  615. if (!i) {
  616. Log(LOG_WARNING,
  617. "array_alloc failed: could not allocate space for %d io_event structures",
  618. fd);
  619. return false;
  620. }
  621. i->callback = cbfunc;
  622. i->what = 0;
  623. ret = backend_create_ev(fd, what);
  624. if (ret)
  625. i->what = what;
  626. return ret;
  627. }
  628. bool
  629. io_event_add(int fd, short what)
  630. {
  631. io_event *i = io_event_get(fd);
  632. if (!i) return false;
  633. if ((i->what & what) == what) /* event type is already registered */
  634. return true;
  635. io_debug("io_event_add: fd, what", fd, what);
  636. i->what |= what;
  637. #ifdef IO_USE_EPOLL
  638. if (io_masterfd >= 0)
  639. return io_event_change_epoll(fd, i->what, EPOLL_CTL_MOD);
  640. #endif
  641. #ifdef IO_USE_KQUEUE
  642. return io_event_change_kqueue(fd, what, EV_ADD | EV_ENABLE);
  643. #endif
  644. #ifdef IO_USE_DEVPOLL
  645. return io_event_change_devpoll(fd, i->what);
  646. #endif
  647. #ifdef IO_USE_POLL
  648. return io_event_change_poll(fd, i->what);
  649. #endif
  650. #ifdef IO_USE_SELECT
  651. if (fd > select_maxfd)
  652. select_maxfd = fd;
  653. if (what & IO_WANTREAD)
  654. FD_SET(fd, &readers);
  655. if (what & IO_WANTWRITE)
  656. FD_SET(fd, &writers);
  657. return true;
  658. #endif
  659. return false;
  660. }
  661. bool
  662. io_setnonblock(int fd)
  663. {
  664. int flags = fcntl(fd, F_GETFL);
  665. if (flags == -1)
  666. return false;
  667. #ifndef O_NONBLOCK
  668. #define O_NONBLOCK O_NDELAY
  669. #endif
  670. flags |= O_NONBLOCK;
  671. return fcntl(fd, F_SETFL, flags) == 0;
  672. }
  673. bool
  674. io_setcloexec(int fd)
  675. {
  676. int flags = fcntl(fd, F_GETFD);
  677. if (flags == -1)
  678. return false;
  679. #ifdef FD_CLOEXEC
  680. flags |= FD_CLOEXEC;
  681. #endif
  682. return fcntl(fd, F_SETFD, flags) == 0;
  683. }
  684. bool
  685. io_close(int fd)
  686. {
  687. io_event *i;
  688. i = io_event_get(fd);
  689. #ifdef IO_USE_KQUEUE
  690. if (array_length(&io_evcache, sizeof (struct kevent))) /* pending data in cache? */
  691. io_event_kqueue_commit_cache();
  692. /* both kqueue and epoll remove fd from all sets automatically on the last close
  693. * of the descriptor. since we don't know if this is the last close we'll have
  694. * to remove the set explicitly. */
  695. if (i) {
  696. io_event_change_kqueue(fd, i->what, EV_DELETE);
  697. io_event_kqueue_commit_cache();
  698. }
  699. #endif
  700. io_close_devpoll(fd);
  701. io_close_poll(fd);
  702. io_close_select(fd);
  703. #ifdef IO_USE_EPOLL
  704. io_event_change_epoll(fd, 0, EPOLL_CTL_DEL);
  705. #endif
  706. if (i) {
  707. i->callback = NULL;
  708. i->what = 0;
  709. }
  710. return close(fd) == 0;
  711. }
  712. bool
  713. io_event_del(int fd, short what)
  714. {
  715. io_event *i = io_event_get(fd);
  716. io_debug("io_event_del: trying to delete eventtype; fd, what", fd, what);
  717. if (!i) return false;
  718. if (!(i->what & what)) /* event is already disabled */
  719. return true;
  720. i->what &= ~what;
  721. #ifdef IO_USE_DEVPOLL
  722. return io_event_change_devpoll(fd, i->what);
  723. #endif
  724. #ifdef IO_USE_POLL
  725. return io_event_change_poll(fd, i->what);
  726. #endif
  727. #ifdef IO_USE_EPOLL
  728. if (io_masterfd >= 0)
  729. return io_event_change_epoll(fd, i->what, EPOLL_CTL_MOD);
  730. #endif
  731. #ifdef IO_USE_KQUEUE
  732. return io_event_change_kqueue(fd, what, EV_DISABLE);
  733. #endif
  734. #ifdef IO_USE_SELECT
  735. if (what & IO_WANTWRITE)
  736. FD_CLR(fd, &writers);
  737. if (what & IO_WANTREAD)
  738. FD_CLR(fd, &readers);
  739. return true;
  740. #endif
  741. return false;
  742. }
  743. int
  744. io_dispatch(struct timeval *tv)
  745. {
  746. #ifdef IO_USE_EPOLL
  747. if (io_masterfd >= 0)
  748. return io_dispatch_epoll(tv);
  749. #endif
  750. #ifdef IO_USE_SELECT
  751. return io_dispatch_select(tv);
  752. #endif
  753. #ifdef IO_USE_KQUEUE
  754. return io_dispatch_kqueue(tv);
  755. #endif
  756. #ifdef IO_USE_DEVPOLL
  757. return io_dispatch_devpoll(tv);
  758. #endif
  759. #ifdef IO_USE_POLL
  760. return io_dispatch_poll(tv);
  761. #endif
  762. return -1;
  763. }
  764. /* call the callback function inside the struct matching fd */
  765. static void
  766. io_docallback(int fd, short what)
  767. {
  768. io_event *i = io_event_get(fd);
  769. io_debug("io_docallback; fd, what", fd, what);
  770. if (i->callback) { /* callback might be NULL if a previous callback function
  771. called io_close on this fd */
  772. i->callback(fd, (what & IO_ERROR) ? i->what : what);
  773. }
  774. /* if error indicator is set, we return the event(s) that were registered */
  775. }