mod_ip_frag.c 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327
  1. /*
  2. * mod_ip_frag.c
  3. *
  4. * Copyright (c) 2001 Dug Song <dugsong@monkey.org>
  5. *
  6. * $Id$
  7. */
  8. #include "config.h"
  9. #include "lib/queue.h"
  10. #include "defines.h"
  11. #include "common.h"
  12. #include <stdio.h>
  13. #include <stdlib.h>
  14. #include <string.h>
  15. #include "mod.h"
  16. #include "pkt.h"
  17. #include "randutil.h"
  18. #ifndef MAX
  19. #define MAX(a,b) (((a)>(b))?(a):(b))
  20. #endif
  21. #define FAVOR_OLD 1
  22. #define FAVOR_NEW 2
  23. static int
  24. ip_frag_apply_ipv4(void *d, struct pktq *pktq);
  25. static int
  26. ip_frag_apply_ipv6(void *d, struct pktq *pktq);
  27. static struct ip_frag_data
  28. {
  29. rand_t *rnd;
  30. int size;
  31. int overlap;
  32. uint32_t ident;
  33. } ip_frag_data;
  34. void *
  35. ip_frag_close(_U_ void *d)
  36. {
  37. if (ip_frag_data.rnd != NULL)
  38. rand_close(ip_frag_data.rnd);
  39. ip_frag_data.size = 0;
  40. return (NULL);
  41. }
  42. void *
  43. ip_frag_open(int argc, char *argv[])
  44. {
  45. if (argc < 2) {
  46. warn("need fragment <size> in bytes");
  47. return (NULL);
  48. }
  49. ip_frag_data.rnd = rand_open();
  50. ip_frag_data.size = atoi(argv[1]);
  51. if (ip_frag_data.size == 0 || (ip_frag_data.size % 8) != 0) {
  52. warn("fragment size must be a multiple of 8");
  53. return (ip_frag_close(&ip_frag_data));
  54. }
  55. if (argc == 3) {
  56. if (strcmp(argv[2], "old") == 0 ||
  57. strcmp(argv[2], "win32") == 0)
  58. ip_frag_data.overlap = FAVOR_OLD;
  59. else if (strcmp(argv[2], "new") == 0 ||
  60. strcmp(argv[2], "unix") == 0)
  61. ip_frag_data.overlap = FAVOR_NEW;
  62. else
  63. return (ip_frag_close(&ip_frag_data));
  64. }
  65. ip_frag_data.ident = rand_uint32(ip_frag_data.rnd);
  66. return (&ip_frag_data);
  67. }
  68. int
  69. ip_frag_apply(void *d, struct pktq *pktq)
  70. {
  71. struct pkt *pkt;
  72. /* Select eth protocol via first packet in queue: */
  73. pkt = TAILQ_FIRST(pktq);
  74. if (pkt != TAILQ_END(pktq)) {
  75. uint16_t eth_type = htons(pkt->pkt_eth->eth_type);
  76. if (eth_type == ETH_TYPE_IP) {
  77. ip_frag_apply_ipv4(d, pktq);
  78. } else if (eth_type == ETH_TYPE_IPV6) {
  79. ip_frag_apply_ipv6(d, pktq);
  80. }
  81. return 0;
  82. }
  83. return 0;
  84. }
  85. static int
  86. ip_frag_apply_ipv4(_U_ void *d, struct pktq *pktq)
  87. {
  88. struct pkt *pkt, *new, *next;
  89. int hl, fraglen, off;
  90. u_char *p, *p1, *p2;
  91. for (pkt = TAILQ_FIRST(pktq); pkt != TAILQ_END(pktq); pkt = next) {
  92. next = TAILQ_NEXT(pkt, pkt_next);
  93. if (pkt->pkt_ip == NULL || pkt->pkt_ip_data == NULL)
  94. continue;
  95. hl = pkt->pkt_ip->ip_hl << 2;
  96. /*
  97. * Preserve transport protocol header in first frag,
  98. * to bypass filters that block `short' fragments.
  99. */
  100. switch (pkt->pkt_ip->ip_p) {
  101. case IP_PROTO_ICMP:
  102. fraglen = MAX(ICMP_LEN_MIN, ip_frag_data.size);
  103. break;
  104. case IP_PROTO_UDP:
  105. fraglen = MAX(UDP_HDR_LEN, ip_frag_data.size);
  106. break;
  107. case IP_PROTO_TCP:
  108. fraglen = MAX(pkt->pkt_tcp->th_off << 2,
  109. ip_frag_data.size);
  110. break;
  111. default:
  112. fraglen = ip_frag_data.size;
  113. break;
  114. }
  115. if (fraglen & 7)
  116. fraglen = (fraglen & ~7) + 8;
  117. if (pkt->pkt_end - pkt->pkt_ip_data < fraglen)
  118. continue;
  119. for (p = pkt->pkt_ip_data; p < pkt->pkt_end; ) {
  120. new = pkt_new(pkt->pkt_buf_size);
  121. memcpy(new->pkt_eth, pkt->pkt_eth, (u_char*)pkt->pkt_eth_data - (u_char*)pkt->pkt_eth);
  122. memcpy(new->pkt_ip, pkt->pkt_ip, hl);
  123. new->pkt_ip_data = new->pkt_eth_data + hl;
  124. p1 = p, p2 = NULL;
  125. off = (p - pkt->pkt_ip_data) >> 3;
  126. if (ip_frag_data.overlap != 0 && (off & 1) != 0 &&
  127. p + (fraglen << 1) < pkt->pkt_end) {
  128. struct pkt tmp;
  129. u_char tmp_buf[pkt->pkt_buf_size];
  130. tmp.pkt_buf = tmp_buf;
  131. tmp.pkt_buf_size = pkt->pkt_buf_size;
  132. rand_strset(ip_frag_data.rnd, tmp.pkt_buf,
  133. fraglen);
  134. if (ip_frag_data.overlap == FAVOR_OLD) {
  135. p1 = p + fraglen;
  136. p2 = tmp.pkt_buf;
  137. } else if (ip_frag_data.overlap == FAVOR_NEW) {
  138. p1 = tmp.pkt_buf;
  139. p2 = p + fraglen;
  140. }
  141. new->pkt_ip->ip_off = htons(IP_MF |
  142. (off + (fraglen >> 3)));
  143. } else {
  144. new->pkt_ip->ip_off = htons(off |
  145. ((p + fraglen < pkt->pkt_end) ? IP_MF: 0));
  146. }
  147. new->pkt_ip->ip_len = htons(hl + fraglen);
  148. ip_checksum(new->pkt_ip, hl + fraglen);
  149. memcpy(new->pkt_ip_data, p1, fraglen);
  150. new->pkt_end = new->pkt_ip_data + fraglen;
  151. TAILQ_INSERT_BEFORE(pkt, new, pkt_next);
  152. if (p2 != NULL) {
  153. new = pkt_dup(new);
  154. new->pkt_ts.tv_usec = 1;
  155. new->pkt_ip->ip_off = htons(IP_MF | off);
  156. new->pkt_ip->ip_len = htons(hl + (fraglen<<1));
  157. ip_checksum(new->pkt_ip, hl + (fraglen<<1));
  158. memcpy(new->pkt_ip_data, p, fraglen);
  159. memcpy(new->pkt_ip_data+fraglen, p2, fraglen);
  160. new->pkt_end = new->pkt_ip_data + (fraglen<<1);
  161. TAILQ_INSERT_BEFORE(pkt, new, pkt_next);
  162. p += (fraglen << 1);
  163. } else
  164. p += fraglen;
  165. if ((fraglen = pkt->pkt_end - p) > ip_frag_data.size)
  166. fraglen = ip_frag_data.size;
  167. }
  168. TAILQ_REMOVE(pktq, pkt, pkt_next);
  169. pkt_free(pkt);
  170. }
  171. return (0);
  172. }
  173. static int
  174. ip_frag_apply_ipv6(_U_ void *d, struct pktq *pktq)
  175. {
  176. struct pkt *pkt, *new, *next;
  177. struct ip6_ext_hdr *ext;
  178. int hl, fraglen, off;
  179. u_char *p, *p1, *p2;
  180. uint8_t next_hdr;
  181. ip_frag_data.ident++;
  182. for (pkt = TAILQ_FIRST(pktq); pkt != TAILQ_END(pktq); pkt = next) {
  183. next = TAILQ_NEXT(pkt, pkt_next);
  184. if (pkt->pkt_ip == NULL || pkt->pkt_ip_data == NULL)
  185. continue;
  186. hl = IP6_HDR_LEN;
  187. /*
  188. * Preserve transport protocol header in first frag,
  189. * to bypass filters that block `short' fragments.
  190. */
  191. switch (pkt->pkt_ip->ip_p) {
  192. case IP_PROTO_ICMP:
  193. fraglen = MAX(ICMP_LEN_MIN, ip_frag_data.size);
  194. break;
  195. case IP_PROTO_UDP:
  196. fraglen = MAX(UDP_HDR_LEN, ip_frag_data.size);
  197. break;
  198. case IP_PROTO_TCP:
  199. fraglen = MAX(pkt->pkt_tcp->th_off << 2,
  200. ip_frag_data.size);
  201. break;
  202. default:
  203. fraglen = ip_frag_data.size;
  204. break;
  205. }
  206. if (fraglen & 7)
  207. fraglen = (fraglen & ~7) + 8;
  208. if (pkt->pkt_end - pkt->pkt_ip_data < fraglen)
  209. continue;
  210. next_hdr = pkt->pkt_ip6->ip6_nxt;
  211. for (p = pkt->pkt_ip_data; p < pkt->pkt_end; ) {
  212. new = pkt_new(pkt->pkt_buf_size);
  213. memcpy(new->pkt_eth, pkt->pkt_eth, (u_char*)pkt->pkt_eth_data - (u_char*)pkt->pkt_eth);
  214. memcpy(new->pkt_ip, pkt->pkt_ip, hl);
  215. ext = (struct ip6_ext_hdr *)((u_char*)new->pkt_eth_data + hl);
  216. new->pkt_ip_data = (u_char *)(ext) + 2 +
  217. sizeof(struct ip6_ext_data_fragment);
  218. new->pkt_ip6->ip6_nxt = IP_PROTO_FRAGMENT;
  219. ext->ext_nxt = next_hdr;
  220. ext->ext_len = 0; /* ip6 fragf reserved */
  221. ext->ext_data.fragment.ident = ip_frag_data.ident;
  222. p1 = p, p2 = NULL;
  223. off = (p - pkt->pkt_ip_data) >> 3;
  224. if (ip_frag_data.overlap != 0 && (off & 1) != 0 &&
  225. p + (fraglen << 1) < pkt->pkt_end) {
  226. struct pkt tmp;
  227. u_char tmp_buf[pkt->pkt_buf_size];
  228. tmp.pkt_buf = tmp_buf;
  229. tmp.pkt_buf_size = pkt->pkt_buf_size;
  230. rand_strset(ip_frag_data.rnd, tmp.pkt_buf,
  231. fraglen);
  232. if (ip_frag_data.overlap == FAVOR_OLD) {
  233. p1 = p + fraglen;
  234. p2 = tmp.pkt_buf;
  235. } else if (ip_frag_data.overlap == FAVOR_NEW) {
  236. p1 = tmp.pkt_buf;
  237. p2 = p + fraglen;
  238. }
  239. ext->ext_data.fragment.offlg =
  240. htons((off /*+ (fraglen >> 3)*/) << 3) | IP6_MORE_FRAG;
  241. } else {
  242. ext->ext_data.fragment.offlg = htons(off << 3) |
  243. ((p + fraglen < pkt->pkt_end) ? IP6_MORE_FRAG : 0);
  244. }
  245. new->pkt_ip6->ip6_plen = htons(fraglen + 8);
  246. memcpy(new->pkt_ip_data, p1, fraglen);
  247. new->pkt_end = new->pkt_ip_data + fraglen;
  248. TAILQ_INSERT_BEFORE(pkt, new, pkt_next);
  249. if (p2 != NULL) {
  250. new = pkt_dup(new);
  251. new->pkt_ts.tv_usec = 1;
  252. ext->ext_data.fragment.offlg = htons(off << 3) | IP6_MORE_FRAG;
  253. new->pkt_ip6->ip6_plen = htons((fraglen << 1) + 8);
  254. memcpy(new->pkt_ip_data, p, fraglen);
  255. memcpy(new->pkt_ip_data + fraglen, p2, fraglen);
  256. new->pkt_end = new->pkt_ip_data + (fraglen << 1);
  257. TAILQ_INSERT_BEFORE(pkt, new, pkt_next);
  258. p += (fraglen << 1);
  259. } else {
  260. p += fraglen;
  261. }
  262. if ((fraglen = pkt->pkt_end - p) > ip_frag_data.size)
  263. fraglen = ip_frag_data.size;
  264. }
  265. TAILQ_REMOVE(pktq, pkt, pkt_next);
  266. pkt_free(pkt);
  267. }
  268. return 0;
  269. }
  270. struct mod mod_ip_frag = {
  271. "ip_frag", /* name */
  272. "ip_frag <size> [old|new]", /* usage */
  273. ip_frag_open, /* open */
  274. ip_frag_apply, /* apply */
  275. ip_frag_close /* close */
  276. };