io.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License as published by
  4. * the Free Software Foundation; either version 2 of the License, or
  5. * (at your option) any later version.
  6. * Please read the file COPYING, README and AUTHORS for more information.
  7. *
  8. * Copyright (c) 2005 Florian Westphal (westphal@foo.fh-furtwangen.de)
  9. */
  10. #include "portab.h"
  11. /**
  12. * @file
  13. * I/O abstraction interface.
  14. */
  15. #include <assert.h>
  16. #include <stdlib.h>
  17. #include <string.h>
  18. #include <sys/time.h>
  19. #include <sys/types.h>
  20. #include <unistd.h>
  21. #include <fcntl.h>
  22. #include "array.h"
  23. #include "io.h"
  24. #include "log.h"
  25. /* Enables extra debug messages in event add/delete/callback code. */
  26. /* #define DEBUG_IO */
  27. typedef struct {
  28. #ifdef PROTOTYPES
  29. void (*callback)(int, short);
  30. #else
  31. void (*callback)();
  32. #endif
  33. short what;
  34. } io_event;
  35. #define INIT_IOEVENT { NULL, -1, 0, NULL }
  36. #define IO_ERROR 4
  37. #define MAX_EVENTS 100
  38. #ifdef HAVE_EPOLL_CREATE
  39. # define IO_USE_EPOLL 1
  40. # ifdef HAVE_SELECT
  41. # define IO_USE_SELECT 1
  42. # endif
  43. #else
  44. # ifdef HAVE_KQUEUE
  45. # define IO_USE_KQUEUE 1
  46. # else
  47. # ifdef HAVE_SYS_DEVPOLL_H
  48. # define IO_USE_DEVPOLL 1
  49. # else
  50. # if defined(HAVE_POLL) && defined(HAVE_POLL_H)
  51. # define IO_USE_POLL 1
  52. # else
  53. # ifdef HAVE_SELECT
  54. # define IO_USE_SELECT 1
  55. # else
  56. # error "no IO API available!?"
  57. # endif /* HAVE_SELECT */
  58. # endif /* HAVE_POLL */
  59. # endif /* HAVE_SYS_DEVPOLL_H */
  60. # endif /* HAVE_KQUEUE */
  61. #endif /* HAVE_EPOLL_CREATE */
  62. static bool library_initialized = false;
  63. #ifdef IO_USE_EPOLL
  64. #include <sys/epoll.h>
  65. static int io_masterfd = -1;
  66. static bool io_event_change_epoll(int fd, short what, const int action);
  67. static int io_dispatch_epoll(struct timeval *tv);
  68. #endif
  69. #ifdef IO_USE_KQUEUE
  70. #include <sys/types.h>
  71. #include <sys/event.h>
  72. static array io_evcache;
  73. static int io_masterfd;
  74. static int io_dispatch_kqueue(struct timeval *tv);
  75. static bool io_event_change_kqueue(int, short, const int action);
  76. #ifndef EV_SET
  77. /* Taken from /usr/include/sys/event.h of FreeBSD 8.1 and required by all
  78. * platforms that have kqueue but lack EV_SET() -- for example FreeBSD 4. */
  79. #define EV_SET(kevp, a, b, c, d, e, f) do { \
  80. struct kevent *__kevp__ = (kevp); \
  81. __kevp__->ident = (a); \
  82. __kevp__->filter = (b); \
  83. __kevp__->flags = (c); \
  84. __kevp__->fflags = (d); \
  85. __kevp__->data = (e); \
  86. __kevp__->udata = (f); \
  87. } while(0)
  88. #endif
  89. #endif
  90. #ifdef IO_USE_POLL
  91. #include <poll.h>
  92. static array pollfds;
  93. static int poll_maxfd;
  94. static bool io_event_change_poll PARAMS((int fd, short what));
  95. #endif
  96. #ifdef IO_USE_DEVPOLL
  97. #include <sys/devpoll.h>
  98. static int io_masterfd;
  99. static bool io_event_change_devpoll(int fd, short what);
  100. #endif
  101. #ifdef IO_USE_SELECT
  102. #include "defines.h" /* for conn.h */
  103. #include "proc.h" /* for PROC_STAT (needed by conf.h) */
  104. #include "conn.h" /* for CONN_ID (needed by conf.h) */
  105. #include "conf.h" /* for Conf_MaxConnections */
  106. static fd_set readers;
  107. static fd_set writers;
  108. /*
  109. * this is the first argument for select(), i.e.
  110. * the largest fd registered, plus one.
  111. */
  112. static int select_maxfd;
  113. static int io_dispatch_select PARAMS((struct timeval *tv));
  114. #ifndef IO_USE_EPOLL
  115. #define io_masterfd -1
  116. #endif
  117. #endif /* IO_USE_SELECT */
  118. static array io_events;
  119. static void io_docallback PARAMS((int fd, short what));
  120. #ifdef DEBUG_IO
  121. static void
  122. io_debug(const char *s, int fd, int what)
  123. {
  124. Log(LOG_DEBUG, "%s: %d, %d\n", s, fd, what);
  125. }
  126. #else
  127. static inline void
  128. io_debug(const char UNUSED *s,int UNUSED a, int UNUSED b)
  129. { /* NOTHING */ }
  130. #endif
  131. static io_event *
  132. io_event_get(int fd)
  133. {
  134. io_event *i;
  135. assert(fd >= 0);
  136. i = (io_event *) array_get(&io_events, sizeof(io_event), (size_t) fd);
  137. assert(i != NULL);
  138. return i;
  139. }
  140. #ifdef IO_USE_DEVPOLL
  141. static int
  142. io_dispatch_devpoll(struct timeval *tv)
  143. {
  144. struct dvpoll dvp;
  145. time_t sec = tv->tv_sec * 1000;
  146. int i, ret, timeout = tv->tv_usec + sec;
  147. short what;
  148. struct pollfd p[MAX_EVENTS];
  149. if (timeout < 0)
  150. timeout = 1000;
  151. dvp.dp_timeout = timeout;
  152. dvp.dp_nfds = MAX_EVENTS;
  153. dvp.dp_fds = p;
  154. ret = ioctl(io_masterfd, DP_POLL, &dvp);
  155. for (i=0; i < ret ; i++) {
  156. what = 0;
  157. if (p[i].revents & (POLLIN|POLLPRI))
  158. what = IO_WANTREAD;
  159. if (p[i].revents & POLLOUT)
  160. what |= IO_WANTWRITE;
  161. if (p[i].revents && !what) {
  162. /* other flag is set, probably POLLERR */
  163. what = IO_ERROR;
  164. }
  165. io_docallback(p[i].fd, what);
  166. }
  167. return ret;
  168. }
  169. static bool
  170. io_event_change_devpoll(int fd, short what)
  171. {
  172. struct pollfd p;
  173. p.events = 0;
  174. if (what & IO_WANTREAD)
  175. p.events = POLLIN | POLLPRI;
  176. if (what & IO_WANTWRITE)
  177. p.events |= POLLOUT;
  178. p.fd = fd;
  179. return write(io_masterfd, &p, sizeof p) == (ssize_t)sizeof p;
  180. }
  181. static void
  182. io_close_devpoll(int fd)
  183. {
  184. struct pollfd p;
  185. p.events = POLLREMOVE;
  186. p.fd = fd;
  187. write(io_masterfd, &p, sizeof p);
  188. }
  189. static void
  190. io_library_init_devpoll(unsigned int eventsize)
  191. {
  192. io_masterfd = open("/dev/poll", O_RDWR);
  193. if (io_masterfd >= 0)
  194. library_initialized = true;
  195. Log(LOG_INFO, "IO subsystem: /dev/poll (initial maxfd %u, masterfd %d).",
  196. eventsize, io_masterfd);
  197. }
  198. #else
  199. static inline void
  200. io_close_devpoll(int UNUSED x)
  201. { /* NOTHING */ }
  202. static inline void
  203. io_library_init_devpoll(unsigned int UNUSED ev)
  204. { /* NOTHING */ }
  205. #endif
  206. #ifdef IO_USE_POLL
  207. static int
  208. io_dispatch_poll(struct timeval *tv)
  209. {
  210. time_t sec = tv->tv_sec * 1000;
  211. int i, ret, timeout = tv->tv_usec + sec;
  212. int fds_ready;
  213. short what;
  214. struct pollfd *p = array_start(&pollfds);
  215. if (timeout < 0)
  216. timeout = 1000;
  217. ret = poll(p, poll_maxfd + 1, timeout);
  218. if (ret <= 0)
  219. return ret;
  220. fds_ready = ret;
  221. for (i=0; i <= poll_maxfd; i++) {
  222. what = 0;
  223. if (p[i].revents & (POLLIN|POLLPRI))
  224. what = IO_WANTREAD;
  225. if (p[i].revents & POLLOUT)
  226. what |= IO_WANTWRITE;
  227. if (p[i].revents && !what) {
  228. /* other flag is set, probably POLLERR */
  229. what = IO_ERROR;
  230. }
  231. if (what) {
  232. fds_ready--;
  233. io_docallback(i, what);
  234. }
  235. if (fds_ready <= 0)
  236. break;
  237. }
  238. return ret;
  239. }
  240. static bool
  241. io_event_change_poll(int fd, short what)
  242. {
  243. struct pollfd *p;
  244. short events = 0;
  245. if (what & IO_WANTREAD)
  246. events = POLLIN | POLLPRI;
  247. if (what & IO_WANTWRITE)
  248. events |= POLLOUT;
  249. p = array_alloc(&pollfds, sizeof *p, fd);
  250. if (p) {
  251. p->events = events;
  252. p->fd = fd;
  253. if (fd > poll_maxfd)
  254. poll_maxfd = fd;
  255. }
  256. return p != NULL;
  257. }
  258. static void
  259. io_close_poll(int fd)
  260. {
  261. struct pollfd *p;
  262. p = array_get(&pollfds, sizeof *p, fd);
  263. if (!p) return;
  264. p->fd = -1;
  265. if (fd == poll_maxfd) {
  266. while (poll_maxfd > 0) {
  267. --poll_maxfd;
  268. p = array_get(&pollfds, sizeof *p, poll_maxfd);
  269. if (p && p->fd >= 0)
  270. break;
  271. }
  272. }
  273. }
  274. static void
  275. io_library_init_poll(unsigned int eventsize)
  276. {
  277. struct pollfd *p;
  278. array_init(&pollfds);
  279. poll_maxfd = 0;
  280. Log(LOG_INFO, "IO subsystem: poll (initial maxfd %u).",
  281. eventsize);
  282. p = array_alloc(&pollfds, sizeof(struct pollfd), eventsize);
  283. if (p) {
  284. unsigned i;
  285. p = array_start(&pollfds);
  286. for (i = 0; i < eventsize; i++)
  287. p[i].fd = -1;
  288. library_initialized = true;
  289. }
  290. }
  291. #else
  292. static inline void
  293. io_close_poll(int UNUSED x)
  294. { /* NOTHING */ }
  295. static inline void
  296. io_library_init_poll(unsigned int UNUSED ev)
  297. { /* NOTHING */ }
  298. #endif
  299. #ifdef IO_USE_SELECT
  300. static int
  301. io_dispatch_select(struct timeval *tv)
  302. {
  303. fd_set readers_tmp;
  304. fd_set writers_tmp;
  305. short what;
  306. int ret, i;
  307. int fds_ready;
  308. readers_tmp = readers;
  309. writers_tmp = writers;
  310. ret = select(select_maxfd + 1, &readers_tmp, &writers_tmp, NULL, tv);
  311. if (ret <= 0)
  312. return ret;
  313. fds_ready = ret;
  314. for (i = 0; i <= select_maxfd; i++) {
  315. what = 0;
  316. if (FD_ISSET(i, &readers_tmp)) {
  317. what = IO_WANTREAD;
  318. fds_ready--;
  319. }
  320. if (FD_ISSET(i, &writers_tmp)) {
  321. what |= IO_WANTWRITE;
  322. fds_ready--;
  323. }
  324. if (what)
  325. io_docallback(i, what);
  326. if (fds_ready <= 0)
  327. break;
  328. }
  329. return ret;
  330. }
  331. static void
  332. io_library_init_select(unsigned int eventsize)
  333. {
  334. if (library_initialized)
  335. return;
  336. Log(LOG_INFO, "IO subsystem: select (initial maxfd %u).",
  337. eventsize);
  338. FD_ZERO(&readers);
  339. FD_ZERO(&writers);
  340. #ifdef FD_SETSIZE
  341. if (Conf_MaxConnections >= (int)FD_SETSIZE) {
  342. Log(LOG_WARNING,
  343. "MaxConnections (%d) exceeds limit (%u), changed MaxConnections to %u.",
  344. Conf_MaxConnections, FD_SETSIZE, FD_SETSIZE - 1);
  345. Conf_MaxConnections = FD_SETSIZE - 1;
  346. }
  347. #else
  348. Log(LOG_WARNING,
  349. "FD_SETSIZE undefined, don't know how many descriptors select() can handle on your platform ...");
  350. #endif /* FD_SETSIZE */
  351. library_initialized = true;
  352. }
  353. static void
  354. io_close_select(int fd)
  355. {
  356. io_event *i;
  357. if (io_masterfd >= 0) /* Are we using epoll()? */
  358. return;
  359. FD_CLR(fd, &writers);
  360. FD_CLR(fd, &readers);
  361. i = io_event_get(fd);
  362. if (!i) return;
  363. if (fd == select_maxfd) {
  364. while (select_maxfd>0) {
  365. --select_maxfd; /* find largest fd */
  366. i = io_event_get(select_maxfd);
  367. if (i && i->callback) break;
  368. }
  369. }
  370. }
  371. #else
  372. static inline void
  373. io_library_init_select(int UNUSED x)
  374. { /* NOTHING */ }
  375. static inline void
  376. io_close_select(int UNUSED x)
  377. { /* NOTHING */ }
  378. #endif /* SELECT */
  379. #ifdef IO_USE_EPOLL
  380. static bool
  381. io_event_change_epoll(int fd, short what, const int action)
  382. {
  383. struct epoll_event ev = { 0, {0} };
  384. ev.data.fd = fd;
  385. if (what & IO_WANTREAD)
  386. ev.events = EPOLLIN | EPOLLPRI;
  387. if (what & IO_WANTWRITE)
  388. ev.events |= EPOLLOUT;
  389. return epoll_ctl(io_masterfd, action, fd, &ev) == 0;
  390. }
  391. static int
  392. io_dispatch_epoll(struct timeval *tv)
  393. {
  394. time_t sec = tv->tv_sec * 1000;
  395. int i, ret, timeout = tv->tv_usec + sec;
  396. struct epoll_event epoll_ev[MAX_EVENTS];
  397. short type;
  398. if (timeout < 0)
  399. timeout = 1000;
  400. ret = epoll_wait(io_masterfd, epoll_ev, MAX_EVENTS, timeout);
  401. for (i = 0; i < ret; i++) {
  402. type = 0;
  403. if (epoll_ev[i].events & (EPOLLERR | EPOLLHUP))
  404. type = IO_ERROR;
  405. if (epoll_ev[i].events & (EPOLLIN | EPOLLPRI))
  406. type |= IO_WANTREAD;
  407. if (epoll_ev[i].events & EPOLLOUT)
  408. type |= IO_WANTWRITE;
  409. io_docallback(epoll_ev[i].data.fd, type);
  410. }
  411. return ret;
  412. }
  413. static void
  414. io_library_init_epoll(unsigned int eventsize)
  415. {
  416. int ecreate_hint = (int)eventsize;
  417. if (ecreate_hint <= 0)
  418. ecreate_hint = 128;
  419. io_masterfd = epoll_create(ecreate_hint);
  420. if (io_masterfd >= 0) {
  421. library_initialized = true;
  422. Log(LOG_INFO,
  423. "IO subsystem: epoll (hint size %d, initial maxfd %u, masterfd %d).",
  424. ecreate_hint, eventsize, io_masterfd);
  425. return;
  426. }
  427. #ifdef IO_USE_SELECT
  428. Log(LOG_INFO, "Can't initialize epoll() IO interface, falling back to select() ...");
  429. #endif
  430. }
  431. #else
  432. static inline void
  433. io_library_init_epoll(unsigned int UNUSED ev)
  434. { /* NOTHING */ }
  435. #endif /* IO_USE_EPOLL */
  436. #ifdef IO_USE_KQUEUE
  437. static bool
  438. io_event_kqueue_commit_cache(void)
  439. {
  440. struct kevent *events;
  441. bool ret;
  442. int len = (int) array_length(&io_evcache, sizeof (struct kevent));
  443. if (!len) /* nothing to do */
  444. return true;
  445. assert(len>0);
  446. if (len < 0) {
  447. array_free(&io_evcache);
  448. return false;
  449. }
  450. events = array_start(&io_evcache);
  451. assert(events != NULL);
  452. ret = kevent(io_masterfd, events, len, NULL, 0, NULL) == 0;
  453. if (ret)
  454. array_trunc(&io_evcache);
  455. return ret;
  456. }
  457. static bool
  458. io_event_change_kqueue(int fd, short what, const int action)
  459. {
  460. struct kevent kev;
  461. bool ret = true;
  462. if (what & IO_WANTREAD) {
  463. EV_SET(&kev, fd, EVFILT_READ, action, 0, 0, 0);
  464. ret = array_catb(&io_evcache, (char*) &kev, sizeof (kev));
  465. if (!ret)
  466. ret = kevent(io_masterfd, &kev,1, NULL, 0, NULL) == 0;
  467. }
  468. if (ret && (what & IO_WANTWRITE)) {
  469. EV_SET(&kev, fd, EVFILT_WRITE, action, 0, 0, 0);
  470. ret = array_catb(&io_evcache, (char*) &kev, sizeof (kev));
  471. if (!ret)
  472. ret = kevent(io_masterfd, &kev, 1, NULL, 0, NULL) == 0;
  473. }
  474. if (array_length(&io_evcache, sizeof kev) >= 100)
  475. io_event_kqueue_commit_cache();
  476. return ret;
  477. }
  478. static int
  479. io_dispatch_kqueue(struct timeval *tv)
  480. {
  481. int i, ret;
  482. struct kevent kev[MAX_EVENTS];
  483. struct kevent *newevents;
  484. struct timespec ts;
  485. int newevents_len;
  486. ts.tv_sec = tv->tv_sec;
  487. ts.tv_nsec = tv->tv_usec * 1000;
  488. newevents_len = (int) array_length(&io_evcache, sizeof (struct kevent));
  489. newevents = (newevents_len > 0) ? array_start(&io_evcache) : NULL;
  490. assert(newevents_len >= 0);
  491. ret = kevent(io_masterfd, newevents, newevents_len, kev, MAX_EVENTS, &ts);
  492. if (newevents && ret != -1)
  493. array_trunc(&io_evcache);
  494. for (i = 0; i < ret; i++) {
  495. io_debug("dispatch_kqueue: fd, kev.flags", (int)kev[i].ident, kev[i].flags);
  496. if (kev[i].flags & (EV_EOF|EV_ERROR)) {
  497. if (kev[i].flags & EV_ERROR)
  498. Log(LOG_ERR, "kevent fd %d: EV_ERROR (%s)",
  499. (int)kev[i].ident, strerror((int)kev[i].data));
  500. io_docallback((int)kev[i].ident, IO_ERROR);
  501. continue;
  502. }
  503. switch (kev[i].filter) {
  504. case EVFILT_READ:
  505. io_docallback((int)kev[i].ident, IO_WANTREAD);
  506. break;
  507. case EVFILT_WRITE:
  508. io_docallback((int)kev[i].ident, IO_WANTWRITE);
  509. break;
  510. default:
  511. LogDebug("Unknown kev.filter number %d for fd %d",
  512. kev[i].filter, kev[i].ident);
  513. /* Fall through */
  514. case EV_ERROR:
  515. io_docallback((int)kev[i].ident, IO_ERROR);
  516. break;
  517. }
  518. }
  519. return ret;
  520. }
  521. static void
  522. io_library_init_kqueue(unsigned int eventsize)
  523. {
  524. io_masterfd = kqueue();
  525. Log(LOG_INFO,
  526. "IO subsystem: kqueue (initial maxfd %u, masterfd %d)",
  527. eventsize, io_masterfd);
  528. if (io_masterfd >= 0)
  529. library_initialized = true;
  530. }
  531. #else
  532. static inline void
  533. io_library_init_kqueue(unsigned int UNUSED ev)
  534. { /* NOTHING */ }
  535. #endif
  536. bool
  537. io_library_init(unsigned int eventsize)
  538. {
  539. if (library_initialized)
  540. return true;
  541. if ((eventsize > 0) && !array_alloc(&io_events, sizeof(io_event), (size_t)eventsize))
  542. eventsize = 0;
  543. io_library_init_epoll(eventsize);
  544. io_library_init_kqueue(eventsize);
  545. io_library_init_devpoll(eventsize);
  546. io_library_init_poll(eventsize);
  547. io_library_init_select(eventsize);
  548. return library_initialized;
  549. }
  550. void
  551. io_library_shutdown(void)
  552. {
  553. #ifdef IO_USE_SELECT
  554. FD_ZERO(&readers);
  555. FD_ZERO(&writers);
  556. #endif
  557. #if defined(IO_USE_EPOLL) || defined(IO_USE_KQUEUE) || defined(IO_USE_DEVPOLL)
  558. if (io_masterfd >= 0)
  559. close(io_masterfd);
  560. io_masterfd = -1;
  561. #endif
  562. #ifdef IO_USE_KQUEUE
  563. array_free(&io_evcache);
  564. #endif
  565. library_initialized = false;
  566. }
  567. bool
  568. io_event_setcb(int fd, void (*cbfunc) (int, short))
  569. {
  570. io_event *i = io_event_get(fd);
  571. if (!i)
  572. return false;
  573. i->callback = cbfunc;
  574. return true;
  575. }
  576. static bool
  577. backend_create_ev(int fd, short what)
  578. {
  579. bool ret;
  580. #ifdef IO_USE_DEVPOLL
  581. ret = io_event_change_devpoll(fd, what);
  582. #endif
  583. #ifdef IO_USE_POLL
  584. ret = io_event_change_poll(fd, what);
  585. #endif
  586. #ifdef IO_USE_EPOLL
  587. ret = io_event_change_epoll(fd, what, EPOLL_CTL_ADD);
  588. #endif
  589. #ifdef IO_USE_KQUEUE
  590. ret = io_event_change_kqueue(fd, what, EV_ADD|EV_ENABLE);
  591. #endif
  592. #ifdef IO_USE_SELECT
  593. if (io_masterfd < 0)
  594. ret = io_event_add(fd, what);
  595. #endif
  596. return ret;
  597. }
  598. bool
  599. io_event_create(int fd, short what, void (*cbfunc) (int, short))
  600. {
  601. bool ret;
  602. io_event *i;
  603. assert(fd >= 0);
  604. #if defined(IO_USE_SELECT) && defined(FD_SETSIZE)
  605. if (io_masterfd < 0 && fd >= FD_SETSIZE) {
  606. Log(LOG_ERR,
  607. "fd %d exceeds FD_SETSIZE (%u) (select can't handle more file descriptors)",
  608. fd, FD_SETSIZE);
  609. return false;
  610. }
  611. #endif
  612. i = (io_event *) array_alloc(&io_events, sizeof(io_event), (size_t) fd);
  613. if (!i) {
  614. Log(LOG_WARNING,
  615. "array_alloc failed: could not allocate space for %d io_event structures",
  616. fd);
  617. return false;
  618. }
  619. i->callback = cbfunc;
  620. i->what = 0;
  621. ret = backend_create_ev(fd, what);
  622. if (ret)
  623. i->what = what;
  624. return ret;
  625. }
  626. bool
  627. io_event_add(int fd, short what)
  628. {
  629. io_event *i = io_event_get(fd);
  630. if (!i) return false;
  631. if ((i->what & what) == what) /* event type is already registered */
  632. return true;
  633. io_debug("io_event_add: fd, what", fd, what);
  634. i->what |= what;
  635. #ifdef IO_USE_EPOLL
  636. if (io_masterfd >= 0)
  637. return io_event_change_epoll(fd, i->what, EPOLL_CTL_MOD);
  638. #endif
  639. #ifdef IO_USE_KQUEUE
  640. return io_event_change_kqueue(fd, what, EV_ADD | EV_ENABLE);
  641. #endif
  642. #ifdef IO_USE_DEVPOLL
  643. return io_event_change_devpoll(fd, i->what);
  644. #endif
  645. #ifdef IO_USE_POLL
  646. return io_event_change_poll(fd, i->what);
  647. #endif
  648. #ifdef IO_USE_SELECT
  649. if (fd > select_maxfd)
  650. select_maxfd = fd;
  651. if (what & IO_WANTREAD)
  652. FD_SET(fd, &readers);
  653. if (what & IO_WANTWRITE)
  654. FD_SET(fd, &writers);
  655. return true;
  656. #endif
  657. return false;
  658. }
  659. bool
  660. io_setnonblock(int fd)
  661. {
  662. int flags = fcntl(fd, F_GETFL);
  663. if (flags == -1)
  664. return false;
  665. #ifndef O_NONBLOCK
  666. #define O_NONBLOCK O_NDELAY
  667. #endif
  668. flags |= O_NONBLOCK;
  669. return fcntl(fd, F_SETFL, flags) == 0;
  670. }
  671. bool
  672. io_setcloexec(int fd)
  673. {
  674. int flags = fcntl(fd, F_GETFD);
  675. if (flags == -1)
  676. return false;
  677. #ifdef FD_CLOEXEC
  678. flags |= FD_CLOEXEC;
  679. #endif
  680. return fcntl(fd, F_SETFD, flags) == 0;
  681. }
  682. bool
  683. io_close(int fd)
  684. {
  685. io_event *i;
  686. i = io_event_get(fd);
  687. #ifdef IO_USE_KQUEUE
  688. if (array_length(&io_evcache, sizeof (struct kevent))) /* pending data in cache? */
  689. io_event_kqueue_commit_cache();
  690. /* both kqueue and epoll remove fd from all sets automatically on the last close
  691. * of the descriptor. since we don't know if this is the last close we'll have
  692. * to remove the set explicitly. */
  693. if (i) {
  694. io_event_change_kqueue(fd, i->what, EV_DELETE);
  695. io_event_kqueue_commit_cache();
  696. }
  697. #endif
  698. io_close_devpoll(fd);
  699. io_close_poll(fd);
  700. io_close_select(fd);
  701. #ifdef IO_USE_EPOLL
  702. io_event_change_epoll(fd, 0, EPOLL_CTL_DEL);
  703. #endif
  704. if (i) {
  705. i->callback = NULL;
  706. i->what = 0;
  707. }
  708. return close(fd) == 0;
  709. }
  710. bool
  711. io_event_del(int fd, short what)
  712. {
  713. io_event *i = io_event_get(fd);
  714. io_debug("io_event_del: trying to delete eventtype; fd, what", fd, what);
  715. if (!i) return false;
  716. if (!(i->what & what)) /* event is already disabled */
  717. return true;
  718. i->what &= ~what;
  719. #ifdef IO_USE_DEVPOLL
  720. return io_event_change_devpoll(fd, i->what);
  721. #endif
  722. #ifdef IO_USE_POLL
  723. return io_event_change_poll(fd, i->what);
  724. #endif
  725. #ifdef IO_USE_EPOLL
  726. if (io_masterfd >= 0)
  727. return io_event_change_epoll(fd, i->what, EPOLL_CTL_MOD);
  728. #endif
  729. #ifdef IO_USE_KQUEUE
  730. return io_event_change_kqueue(fd, what, EV_DISABLE);
  731. #endif
  732. #ifdef IO_USE_SELECT
  733. if (what & IO_WANTWRITE)
  734. FD_CLR(fd, &writers);
  735. if (what & IO_WANTREAD)
  736. FD_CLR(fd, &readers);
  737. return true;
  738. #endif
  739. return false;
  740. }
  741. int
  742. io_dispatch(struct timeval *tv)
  743. {
  744. #ifdef IO_USE_EPOLL
  745. if (io_masterfd >= 0)
  746. return io_dispatch_epoll(tv);
  747. #endif
  748. #ifdef IO_USE_SELECT
  749. return io_dispatch_select(tv);
  750. #endif
  751. #ifdef IO_USE_KQUEUE
  752. return io_dispatch_kqueue(tv);
  753. #endif
  754. #ifdef IO_USE_DEVPOLL
  755. return io_dispatch_devpoll(tv);
  756. #endif
  757. #ifdef IO_USE_POLL
  758. return io_dispatch_poll(tv);
  759. #endif
  760. return -1;
  761. }
  762. /* call the callback function inside the struct matching fd */
  763. static void
  764. io_docallback(int fd, short what)
  765. {
  766. io_event *i = io_event_get(fd);
  767. io_debug("io_docallback; fd, what", fd, what);
  768. if (i->callback) { /* callback might be NULL if a previous callback function
  769. called io_close on this fd */
  770. i->callback(fd, (what & IO_ERROR) ? i->what : what);
  771. }
  772. /* if error indicator is set, we return the event(s) that were registered */
  773. }