io.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License as published by
  4. * the Free Software Foundation; either version 2 of the License, or
  5. * (at your option) any later version.
  6. * Please read the file COPYING, README and AUTHORS for more information.
  7. *
  8. * I/O abstraction interface.
  9. * Copyright (c) 2005 Florian Westphal (westphal@foo.fh-furtwangen.de)
  10. *
  11. */
  12. #include "portab.h"
  13. static char UNUSED id[] = "$Id: io.c,v 1.31 2008/04/03 20:56:44 fw Exp $";
  14. #include <assert.h>
  15. #include <stdlib.h>
  16. #include <string.h>
  17. #include <sys/time.h>
  18. #include <sys/types.h>
  19. #include <unistd.h>
  20. #include <fcntl.h>
  21. #include "array.h"
  22. #include "io.h"
  23. #include "log.h"
  24. /* Enables extra debug messages in event add/delete/callback code. */
  25. /* #define DEBUG_IO */
  26. typedef struct {
  27. #ifdef PROTOTYPES
  28. void (*callback)(int, short);
  29. #else
  30. void (*callback)();
  31. #endif
  32. short what;
  33. } io_event;
  34. #define INIT_IOEVENT { NULL, -1, 0, NULL }
  35. #define IO_ERROR 4
  36. #ifdef HAVE_EPOLL_CREATE
  37. # define IO_USE_EPOLL 1
  38. # ifdef HAVE_SELECT
  39. # define IO_USE_SELECT 1
  40. # endif
  41. #else
  42. # ifdef HAVE_KQUEUE
  43. # define IO_USE_KQUEUE 1
  44. # else
  45. # ifdef HAVE_SYS_DEVPOLL_H
  46. # define IO_USE_DEVPOLL 1
  47. # else
  48. # ifdef HAVE_POLL
  49. # define IO_USE_POLL 1
  50. # else
  51. # ifdef HAVE_SELECT
  52. # define IO_USE_SELECT 1
  53. # else
  54. # error "no IO API available!?"
  55. # endif /* HAVE_SELECT */
  56. # endif /* HAVE_POLL */
  57. # endif /* HAVE_SYS_DEVPOLL_H */
  58. # endif /* HAVE_KQUEUE */
  59. #endif /* HAVE_EPOLL_CREATE */
  60. static bool library_initialized = false;
  61. #ifdef IO_USE_EPOLL
  62. #include <sys/epoll.h>
  63. static int io_masterfd = -1;
  64. static bool io_event_change_epoll(int fd, short what, const int action);
  65. static int io_dispatch_epoll(struct timeval *tv);
  66. #endif
  67. #ifdef IO_USE_KQUEUE
  68. #include <sys/types.h>
  69. #include <sys/event.h>
  70. static array io_evcache;
  71. static int io_masterfd;
  72. static int io_dispatch_kqueue(struct timeval *tv);
  73. static bool io_event_change_kqueue(int, short, const int action);
  74. #endif
  75. #ifdef IO_USE_POLL
  76. #include <poll.h>
  77. static array pollfds;
  78. static int poll_maxfd;
  79. static bool io_event_change_poll PARAMS((int fd, short what));
  80. #endif
  81. #ifdef IO_USE_DEVPOLL
  82. #include <sys/devpoll.h>
  83. static int io_masterfd;
  84. static bool io_event_change_devpoll(int fd, short what);
  85. #endif
  86. #ifdef IO_USE_SELECT
  87. #include "defines.h" /* for conn.h */
  88. #include "proc.h" /* for PROC_STAT (needed by conf.h) */
  89. #include "conn.h" /* for CONN_ID (needed by conf.h) */
  90. #include "conf.h" /* for Conf_MaxConnections */
  91. static fd_set readers;
  92. static fd_set writers;
  93. /*
  94. * this is the first argument for select(), i.e.
  95. * the largest fd registered, plus one.
  96. */
  97. static int select_maxfd;
  98. static int io_dispatch_select PARAMS((struct timeval *tv));
  99. #ifndef IO_USE_EPOLL
  100. #define io_masterfd -1
  101. #endif
  102. #endif /* IO_USE_SELECT */
  103. static array io_events;
  104. static void io_docallback PARAMS((int fd, short what));
  105. #ifdef DEBUG_IO
  106. static void
  107. io_debug(const char *s, int fd, int what)
  108. {
  109. Log(LOG_DEBUG, "%s: %d, %d\n", s, fd, what);
  110. }
  111. #else
  112. static inline void
  113. io_debug(const char UNUSED *s,int UNUSED a, int UNUSED b)
  114. { /* NOTHING */ }
  115. #endif
  116. static io_event *
  117. io_event_get(int fd)
  118. {
  119. io_event *i;
  120. assert(fd >= 0);
  121. i = (io_event *) array_get(&io_events, sizeof(io_event), (size_t) fd);
  122. assert(i != NULL);
  123. return i;
  124. }
  125. #ifdef IO_USE_DEVPOLL
  126. static int
  127. io_dispatch_devpoll(struct timeval *tv)
  128. {
  129. struct dvpoll dvp;
  130. time_t sec = tv->tv_sec * 1000;
  131. int i, total, ret, timeout = tv->tv_usec + sec;
  132. short what;
  133. struct pollfd p[100];
  134. if (timeout < 0)
  135. timeout = 1000;
  136. total = 0;
  137. do {
  138. dvp.dp_timeout = timeout;
  139. dvp.dp_nfds = 100;
  140. dvp.dp_fds = p;
  141. ret = ioctl(io_masterfd, DP_POLL, &dvp);
  142. total += ret;
  143. if (ret <= 0)
  144. return total;
  145. for (i=0; i < ret ; i++) {
  146. what = 0;
  147. if (p[i].revents & (POLLIN|POLLPRI))
  148. what = IO_WANTREAD;
  149. if (p[i].revents & POLLOUT)
  150. what |= IO_WANTWRITE;
  151. if (p[i].revents && !what) {
  152. /* other flag is set, probably POLLERR */
  153. what = IO_ERROR;
  154. }
  155. io_docallback(p[i].fd, what);
  156. }
  157. } while (ret == 100);
  158. return total;
  159. }
  160. static bool
  161. io_event_change_devpoll(int fd, short what)
  162. {
  163. struct pollfd p;
  164. p.events = 0;
  165. if (what & IO_WANTREAD)
  166. p.events = POLLIN | POLLPRI;
  167. if (what & IO_WANTWRITE)
  168. p.events |= POLLOUT;
  169. p.fd = fd;
  170. return write(io_masterfd, &p, sizeof p) == (ssize_t)sizeof p;
  171. }
  172. static void
  173. io_close_devpoll(int fd)
  174. {
  175. struct pollfd p;
  176. p.events = POLLREMOVE;
  177. p.fd = fd;
  178. write(io_masterfd, &p, sizeof p);
  179. }
  180. static void
  181. io_library_init_devpoll(unsigned int eventsize)
  182. {
  183. io_masterfd = open("/dev/poll", O_RDWR);
  184. if (io_masterfd >= 0)
  185. library_initialized = true;
  186. Log(LOG_INFO, "IO subsystem: /dev/poll (initial maxfd %u, masterfd %d).",
  187. eventsize, io_masterfd);
  188. }
  189. #else
  190. static inline void
  191. io_close_devpoll(int UNUSED x)
  192. { /* NOTHING */ }
  193. static inline void
  194. io_library_init_devpoll(unsigned int UNUSED ev)
  195. { /* NOTHING */ }
  196. #endif
  197. #ifdef IO_USE_POLL
  198. static int
  199. io_dispatch_poll(struct timeval *tv)
  200. {
  201. time_t sec = tv->tv_sec * 1000;
  202. int i, ret, timeout = tv->tv_usec + sec;
  203. int fds_ready;
  204. short what;
  205. struct pollfd *p = array_start(&pollfds);
  206. if (timeout < 0)
  207. timeout = 1000;
  208. ret = poll(p, poll_maxfd + 1, timeout);
  209. if (ret <= 0)
  210. return ret;
  211. fds_ready = ret;
  212. for (i=0; i <= poll_maxfd; i++) {
  213. what = 0;
  214. if (p[i].revents & (POLLIN|POLLPRI))
  215. what = IO_WANTREAD;
  216. if (p[i].revents & POLLOUT)
  217. what |= IO_WANTWRITE;
  218. if (p[i].revents && !what) {
  219. /* other flag is set, probably POLLERR */
  220. what = IO_ERROR;
  221. }
  222. if (what) {
  223. fds_ready--;
  224. io_docallback(i, what);
  225. }
  226. if (fds_ready <= 0)
  227. break;
  228. }
  229. return ret;
  230. }
  231. static bool
  232. io_event_change_poll(int fd, short what)
  233. {
  234. struct pollfd *p;
  235. short events = 0;
  236. if (what & IO_WANTREAD)
  237. events = POLLIN | POLLPRI;
  238. if (what & IO_WANTWRITE)
  239. events |= POLLOUT;
  240. p = array_alloc(&pollfds, sizeof *p, fd);
  241. if (p) {
  242. p->events = events;
  243. p->fd = fd;
  244. if (fd > poll_maxfd)
  245. poll_maxfd = fd;
  246. }
  247. return p != NULL;
  248. }
  249. static void
  250. io_close_poll(int fd)
  251. {
  252. struct pollfd *p;
  253. p = array_get(&pollfds, sizeof *p, fd);
  254. if (!p) return;
  255. p->fd = -1;
  256. if (fd == poll_maxfd) {
  257. while (poll_maxfd > 0) {
  258. --poll_maxfd;
  259. p = array_get(&pollfds, sizeof *p, poll_maxfd);
  260. if (p && p->fd >= 0)
  261. break;
  262. }
  263. }
  264. }
  265. static void
  266. io_library_init_poll(unsigned int eventsize)
  267. {
  268. struct pollfd *p;
  269. array_init(&pollfds);
  270. poll_maxfd = 0;
  271. Log(LOG_INFO, "IO subsystem: poll (initial maxfd %u).",
  272. eventsize);
  273. p = array_alloc(&pollfds, sizeof(struct pollfd), eventsize);
  274. if (p) {
  275. unsigned i;
  276. p = array_start(&pollfds);
  277. for (i = 0; i < eventsize; i++)
  278. p[i].fd = -1;
  279. library_initialized = true;
  280. }
  281. }
  282. #else
  283. static inline void
  284. io_close_poll(int UNUSED x)
  285. { /* NOTHING */ }
  286. static inline void
  287. io_library_init_poll(unsigned int UNUSED ev)
  288. { /* NOTHING */ }
  289. #endif
  290. #ifdef IO_USE_SELECT
  291. static int
  292. io_dispatch_select(struct timeval *tv)
  293. {
  294. fd_set readers_tmp;
  295. fd_set writers_tmp;
  296. short what;
  297. int ret, i;
  298. int fds_ready;
  299. readers_tmp = readers;
  300. writers_tmp = writers;
  301. ret = select(select_maxfd + 1, &readers_tmp, &writers_tmp, NULL, tv);
  302. if (ret <= 0)
  303. return ret;
  304. fds_ready = ret;
  305. for (i = 0; i <= select_maxfd; i++) {
  306. what = 0;
  307. if (FD_ISSET(i, &readers_tmp)) {
  308. what = IO_WANTREAD;
  309. fds_ready--;
  310. }
  311. if (FD_ISSET(i, &writers_tmp)) {
  312. what |= IO_WANTWRITE;
  313. fds_ready--;
  314. }
  315. if (what)
  316. io_docallback(i, what);
  317. if (fds_ready <= 0)
  318. break;
  319. }
  320. return ret;
  321. }
  322. static void
  323. io_library_init_select(unsigned int eventsize)
  324. {
  325. if (library_initialized)
  326. return;
  327. Log(LOG_INFO, "IO subsystem: select (initial maxfd %u).",
  328. eventsize);
  329. FD_ZERO(&readers);
  330. FD_ZERO(&writers);
  331. #ifdef FD_SETSIZE
  332. if (Conf_MaxConnections >= (int)FD_SETSIZE) {
  333. Log(LOG_WARNING,
  334. "MaxConnections (%d) exceeds limit (%u), changed MaxConnections to %u.",
  335. Conf_MaxConnections, FD_SETSIZE, FD_SETSIZE - 1);
  336. Conf_MaxConnections = FD_SETSIZE - 1;
  337. }
  338. #else
  339. Log(LOG_WARNING,
  340. "FD_SETSIZE undefined, don't know how many descriptors select() can handle on your platform ...");
  341. #endif /* FD_SETSIZE */
  342. library_initialized = true;
  343. }
  344. static void
  345. io_close_select(int fd)
  346. {
  347. io_event *i;
  348. if (io_masterfd >= 0) /* Are we using epoll()? */
  349. return;
  350. FD_CLR(fd, &writers);
  351. FD_CLR(fd, &readers);
  352. i = io_event_get(fd);
  353. if (!i) return;
  354. if (fd == select_maxfd) {
  355. while (select_maxfd>0) {
  356. --select_maxfd; /* find largest fd */
  357. i = io_event_get(select_maxfd);
  358. if (i && i->callback) break;
  359. }
  360. }
  361. }
  362. #else
  363. static inline void
  364. io_library_init_select(int UNUSED x)
  365. { /* NOTHING */ }
  366. static inline void
  367. io_close_select(int UNUSED x)
  368. { /* NOTHING */ }
  369. #endif /* SELECT */
  370. #ifdef IO_USE_EPOLL
  371. static bool
  372. io_event_change_epoll(int fd, short what, const int action)
  373. {
  374. struct epoll_event ev = { 0, {0} };
  375. ev.data.fd = fd;
  376. if (what & IO_WANTREAD)
  377. ev.events = EPOLLIN | EPOLLPRI;
  378. if (what & IO_WANTWRITE)
  379. ev.events |= EPOLLOUT;
  380. return epoll_ctl(io_masterfd, action, fd, &ev) == 0;
  381. }
  382. static int
  383. io_dispatch_epoll(struct timeval *tv)
  384. {
  385. time_t sec = tv->tv_sec * 1000;
  386. int i, total = 0, ret, timeout = tv->tv_usec + sec;
  387. struct epoll_event epoll_ev[100];
  388. short type;
  389. if (timeout < 0)
  390. timeout = 1000;
  391. do {
  392. ret = epoll_wait(io_masterfd, epoll_ev, 100, timeout);
  393. total += ret;
  394. if (ret <= 0)
  395. return total;
  396. for (i = 0; i < ret; i++) {
  397. type = 0;
  398. if (epoll_ev[i].events & (EPOLLERR | EPOLLHUP))
  399. type = IO_ERROR;
  400. if (epoll_ev[i].events & (EPOLLIN | EPOLLPRI))
  401. type |= IO_WANTREAD;
  402. if (epoll_ev[i].events & EPOLLOUT)
  403. type |= IO_WANTWRITE;
  404. io_docallback(epoll_ev[i].data.fd, type);
  405. }
  406. timeout = 0;
  407. } while (ret == 100);
  408. return total;
  409. }
  410. static void
  411. io_library_init_epoll(unsigned int eventsize)
  412. {
  413. int ecreate_hint = (int)eventsize;
  414. if (ecreate_hint <= 0)
  415. ecreate_hint = 128;
  416. io_masterfd = epoll_create(ecreate_hint);
  417. if (io_masterfd >= 0) {
  418. library_initialized = true;
  419. Log(LOG_INFO,
  420. "IO subsystem: epoll (hint size %d, initial maxfd %u, masterfd %d).",
  421. ecreate_hint, eventsize, io_masterfd);
  422. return;
  423. }
  424. #ifdef IO_USE_SELECT
  425. Log(LOG_INFO, "Can't initialize epoll() IO interface, falling back to select() ...");
  426. #endif
  427. }
  428. #else
  429. static inline void
  430. io_library_init_epoll(unsigned int UNUSED ev)
  431. { /* NOTHING */ }
  432. #endif /* IO_USE_EPOLL */
  433. #ifdef IO_USE_KQUEUE
  434. static bool
  435. io_event_kqueue_commit_cache(void)
  436. {
  437. struct kevent *events;
  438. bool ret;
  439. int len = (int) array_length(&io_evcache, sizeof (struct kevent));
  440. if (!len) /* nothing to do */
  441. return true;
  442. assert(len>0);
  443. if (len < 0) {
  444. array_free(&io_evcache);
  445. return false;
  446. }
  447. events = array_start(&io_evcache);
  448. assert(events != NULL);
  449. ret = kevent(io_masterfd, events, len, NULL, 0, NULL) == 0;
  450. if (ret)
  451. array_trunc(&io_evcache);
  452. return ret;
  453. }
  454. static bool
  455. io_event_change_kqueue(int fd, short what, const int action)
  456. {
  457. struct kevent kev;
  458. bool ret = true;
  459. if (what & IO_WANTREAD) {
  460. EV_SET(&kev, fd, EVFILT_READ, action, 0, 0, 0);
  461. ret = array_catb(&io_evcache, (char*) &kev, sizeof (kev));
  462. if (!ret)
  463. ret = kevent(io_masterfd, &kev,1, NULL, 0, NULL) == 0;
  464. }
  465. if (ret && (what & IO_WANTWRITE)) {
  466. EV_SET(&kev, fd, EVFILT_WRITE, action, 0, 0, 0);
  467. ret = array_catb(&io_evcache, (char*) &kev, sizeof (kev));
  468. if (!ret)
  469. ret = kevent(io_masterfd, &kev, 1, NULL, 0, NULL) == 0;
  470. }
  471. if (array_length(&io_evcache, sizeof kev) >= 100)
  472. io_event_kqueue_commit_cache();
  473. return ret;
  474. }
  475. static int
  476. io_dispatch_kqueue(struct timeval *tv)
  477. {
  478. int i, total = 0, ret;
  479. struct kevent kev[100];
  480. struct kevent *newevents;
  481. struct timespec ts;
  482. int newevents_len;
  483. ts.tv_sec = tv->tv_sec;
  484. ts.tv_nsec = tv->tv_usec * 1000;
  485. do {
  486. newevents_len = (int) array_length(&io_evcache, sizeof (struct kevent));
  487. newevents = (newevents_len > 0) ? array_start(&io_evcache) : NULL;
  488. assert(newevents_len >= 0);
  489. ret = kevent(io_masterfd, newevents, newevents_len, kev, 100, &ts);
  490. if (newevents && ret != -1)
  491. array_trunc(&io_evcache);
  492. total += ret;
  493. if (ret <= 0)
  494. return total;
  495. for (i = 0; i < ret; i++) {
  496. io_debug("dispatch_kqueue: fd, kev.flags", (int)kev[i].ident, kev[i].flags);
  497. if (kev[i].flags & (EV_EOF|EV_ERROR)) {
  498. if (kev[i].flags & EV_ERROR)
  499. Log(LOG_ERR, "kevent fd %d: EV_ERROR (%s)",
  500. (int)kev[i].ident, strerror((int)kev[i].data));
  501. io_docallback((int)kev[i].ident, IO_ERROR);
  502. continue;
  503. }
  504. switch (kev[i].filter) {
  505. case EVFILT_READ:
  506. io_docallback((int)kev[i].ident, IO_WANTREAD);
  507. break;
  508. case EVFILT_WRITE:
  509. io_docallback((int)kev[i].ident, IO_WANTWRITE);
  510. break;
  511. default:
  512. LogDebug("Unknown kev.filter number %d for fd %d",
  513. kev[i].filter, kev[i].ident);
  514. /* Fall through */
  515. case EV_ERROR:
  516. io_docallback((int)kev[i].ident, IO_ERROR);
  517. break;
  518. }
  519. }
  520. ts.tv_sec = 0;
  521. ts.tv_nsec = 0;
  522. } while (ret == 100);
  523. return total;
  524. }
  525. static void
  526. io_library_init_kqueue(unsigned int eventsize)
  527. {
  528. io_masterfd = kqueue();
  529. Log(LOG_INFO,
  530. "IO subsystem: kqueue (initial maxfd %u, masterfd %d)",
  531. eventsize, io_masterfd);
  532. if (io_masterfd >= 0)
  533. library_initialized = true;
  534. }
  535. #else
  536. static inline void
  537. io_library_init_kqueue(unsigned int UNUSED ev)
  538. { /* NOTHING */ }
  539. #endif
  540. bool
  541. io_library_init(unsigned int eventsize)
  542. {
  543. if (library_initialized)
  544. return true;
  545. if ((eventsize > 0) && !array_alloc(&io_events, sizeof(io_event), (size_t)eventsize))
  546. eventsize = 0;
  547. io_library_init_epoll(eventsize);
  548. io_library_init_kqueue(eventsize);
  549. io_library_init_devpoll(eventsize);
  550. io_library_init_poll(eventsize);
  551. io_library_init_select(eventsize);
  552. return library_initialized;
  553. }
  554. void
  555. io_library_shutdown(void)
  556. {
  557. #ifdef IO_USE_SELECT
  558. FD_ZERO(&readers);
  559. FD_ZERO(&writers);
  560. #endif
  561. #if defined(IO_USE_EPOLL) || defined(IO_USE_KQUEUE) || defined(IO_USE_DEVPOLL)
  562. if (io_masterfd >= 0)
  563. close(io_masterfd);
  564. io_masterfd = -1;
  565. #endif
  566. #ifdef IO_USE_KQUEUE
  567. array_free(&io_evcache);
  568. #endif
  569. library_initialized = false;
  570. }
  571. bool
  572. io_event_setcb(int fd, void (*cbfunc) (int, short))
  573. {
  574. io_event *i = io_event_get(fd);
  575. if (!i)
  576. return false;
  577. i->callback = cbfunc;
  578. return true;
  579. }
  580. static bool
  581. backend_create_ev(int fd, short what)
  582. {
  583. bool ret;
  584. #ifdef IO_USE_DEVPOLL
  585. ret = io_event_change_devpoll(fd, what);
  586. #endif
  587. #ifdef IO_USE_POLL
  588. ret = io_event_change_poll(fd, what);
  589. #endif
  590. #ifdef IO_USE_EPOLL
  591. ret = io_event_change_epoll(fd, what, EPOLL_CTL_ADD);
  592. #endif
  593. #ifdef IO_USE_KQUEUE
  594. ret = io_event_change_kqueue(fd, what, EV_ADD|EV_ENABLE);
  595. #endif
  596. #ifdef IO_USE_SELECT
  597. if (io_masterfd < 0)
  598. ret = io_event_add(fd, what);
  599. #endif
  600. return ret;
  601. }
  602. bool
  603. io_event_create(int fd, short what, void (*cbfunc) (int, short))
  604. {
  605. bool ret;
  606. io_event *i;
  607. assert(fd >= 0);
  608. #if defined(IO_USE_SELECT) && defined(FD_SETSIZE)
  609. if (io_masterfd < 0 && fd >= FD_SETSIZE) {
  610. Log(LOG_ERR,
  611. "fd %d exceeds FD_SETSIZE (%u) (select can't handle more file descriptors)",
  612. fd, FD_SETSIZE);
  613. return false;
  614. }
  615. #endif
  616. i = (io_event *) array_alloc(&io_events, sizeof(io_event), (size_t) fd);
  617. if (!i) {
  618. Log(LOG_WARNING,
  619. "array_alloc failed: could not allocate space for %d io_event structures",
  620. fd);
  621. return false;
  622. }
  623. i->callback = cbfunc;
  624. i->what = 0;
  625. ret = backend_create_ev(fd, what);
  626. if (ret)
  627. i->what = what;
  628. return ret;
  629. }
  630. bool
  631. io_event_add(int fd, short what)
  632. {
  633. io_event *i = io_event_get(fd);
  634. if (!i) return false;
  635. if ((i->what & what) == what) /* event type is already registered */
  636. return true;
  637. io_debug("io_event_add: fd, what", fd, what);
  638. i->what |= what;
  639. #ifdef IO_USE_EPOLL
  640. if (io_masterfd >= 0)
  641. return io_event_change_epoll(fd, i->what, EPOLL_CTL_MOD);
  642. #endif
  643. #ifdef IO_USE_KQUEUE
  644. return io_event_change_kqueue(fd, what, EV_ADD | EV_ENABLE);
  645. #endif
  646. #ifdef IO_USE_DEVPOLL
  647. return io_event_change_devpoll(fd, i->what);
  648. #endif
  649. #ifdef IO_USE_POLL
  650. return io_event_change_poll(fd, i->what);
  651. #endif
  652. #ifdef IO_USE_SELECT
  653. if (fd > select_maxfd)
  654. select_maxfd = fd;
  655. if (what & IO_WANTREAD)
  656. FD_SET(fd, &readers);
  657. if (what & IO_WANTWRITE)
  658. FD_SET(fd, &writers);
  659. return true;
  660. #endif
  661. return false;
  662. }
  663. bool
  664. io_setnonblock(int fd)
  665. {
  666. int flags = fcntl(fd, F_GETFL);
  667. if (flags == -1)
  668. return false;
  669. #ifndef O_NONBLOCK
  670. #define O_NONBLOCK O_NDELAY
  671. #endif
  672. flags |= O_NONBLOCK;
  673. return fcntl(fd, F_SETFL, flags) == 0;
  674. }
  675. bool
  676. io_setcloexec(int fd)
  677. {
  678. int flags = fcntl(fd, F_GETFD);
  679. if (flags == -1)
  680. return false;
  681. #ifdef FD_CLOEXEC
  682. flags |= FD_CLOEXEC;
  683. #endif
  684. return fcntl(fd, F_SETFD, flags) == 0;
  685. }
  686. bool
  687. io_close(int fd)
  688. {
  689. io_event *i;
  690. i = io_event_get(fd);
  691. #ifdef IO_USE_KQUEUE
  692. if (array_length(&io_evcache, sizeof (struct kevent))) /* pending data in cache? */
  693. io_event_kqueue_commit_cache();
  694. /* both kqueue and epoll remove fd from all sets automatically on the last close
  695. * of the descriptor. since we don't know if this is the last close we'll have
  696. * to remove the set explicitly. */
  697. if (i) {
  698. io_event_change_kqueue(fd, i->what, EV_DELETE);
  699. io_event_kqueue_commit_cache();
  700. }
  701. #endif
  702. io_close_devpoll(fd);
  703. io_close_poll(fd);
  704. io_close_select(fd);
  705. #ifdef IO_USE_EPOLL
  706. io_event_change_epoll(fd, 0, EPOLL_CTL_DEL);
  707. #endif
  708. if (i) {
  709. i->callback = NULL;
  710. i->what = 0;
  711. }
  712. return close(fd) == 0;
  713. }
  714. bool
  715. io_event_del(int fd, short what)
  716. {
  717. io_event *i = io_event_get(fd);
  718. io_debug("io_event_del: trying to delete eventtype; fd, what", fd, what);
  719. if (!i) return false;
  720. if (!(i->what & what)) /* event is already disabled */
  721. return true;
  722. i->what &= ~what;
  723. #ifdef IO_USE_DEVPOLL
  724. return io_event_change_devpoll(fd, i->what);
  725. #endif
  726. #ifdef IO_USE_POLL
  727. return io_event_change_poll(fd, i->what);
  728. #endif
  729. #ifdef IO_USE_EPOLL
  730. if (io_masterfd >= 0)
  731. return io_event_change_epoll(fd, i->what, EPOLL_CTL_MOD);
  732. #endif
  733. #ifdef IO_USE_KQUEUE
  734. return io_event_change_kqueue(fd, what, EV_DISABLE);
  735. #endif
  736. #ifdef IO_USE_SELECT
  737. if (what & IO_WANTWRITE)
  738. FD_CLR(fd, &writers);
  739. if (what & IO_WANTREAD)
  740. FD_CLR(fd, &readers);
  741. return true;
  742. #endif
  743. return false;
  744. }
  745. int
  746. io_dispatch(struct timeval *tv)
  747. {
  748. #ifdef IO_USE_EPOLL
  749. if (io_masterfd >= 0)
  750. return io_dispatch_epoll(tv);
  751. #endif
  752. #ifdef IO_USE_SELECT
  753. return io_dispatch_select(tv);
  754. #endif
  755. #ifdef IO_USE_KQUEUE
  756. return io_dispatch_kqueue(tv);
  757. #endif
  758. #ifdef IO_USE_DEVPOLL
  759. return io_dispatch_devpoll(tv);
  760. #endif
  761. #ifdef IO_USE_POLL
  762. return io_dispatch_poll(tv);
  763. #endif
  764. return -1;
  765. }
  766. /* call the callback function inside the struct matching fd */
  767. static void
  768. io_docallback(int fd, short what)
  769. {
  770. io_event *i = io_event_get(fd);
  771. io_debug("io_docallback; fd, what", fd, what);
  772. if (i->callback) { /* callback might be NULL if a previous callback function
  773. called io_close on this fd */
  774. i->callback(fd, (what & IO_ERROR) ? i->what : what);
  775. }
  776. /* if error indicator is set, we return the event(s) that were registered */
  777. }