mod_ip_frag.c 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326
  1. /*
  2. * mod_ip_frag.c
  3. *
  4. * Copyright (c) 2001 Dug Song <dugsong@monkey.org>
  5. *
  6. * $Id$
  7. */
  8. #include "config.h"
  9. #include "defines.h"
  10. #include "common.h"
  11. #include <stdio.h>
  12. #include <stdlib.h>
  13. #include <string.h>
  14. #include "mod.h"
  15. #include "pkt.h"
  16. #include "randutil.h"
  17. #ifndef MAX
  18. #define MAX(a,b) (((a)>(b))?(a):(b))
  19. #endif
  20. #define FAVOR_OLD 1
  21. #define FAVOR_NEW 2
  22. static int
  23. ip_frag_apply_ipv4(void *d, struct pktq *pktq);
  24. static int
  25. ip_frag_apply_ipv6(void *d, struct pktq *pktq);
  26. static struct ip_frag_data
  27. {
  28. rand_t *rnd;
  29. int size;
  30. int overlap;
  31. uint32_t ident;
  32. } ip_frag_data;
  33. void *
  34. ip_frag_close(_U_ void *d)
  35. {
  36. if (ip_frag_data.rnd != NULL)
  37. rand_close(ip_frag_data.rnd);
  38. ip_frag_data.size = 0;
  39. return (NULL);
  40. }
  41. void *
  42. ip_frag_open(int argc, char *argv[])
  43. {
  44. if (argc < 2) {
  45. warn("need fragment <size> in bytes");
  46. return (NULL);
  47. }
  48. ip_frag_data.rnd = rand_open();
  49. ip_frag_data.size = atoi(argv[1]);
  50. if (ip_frag_data.size == 0 || (ip_frag_data.size % 8) != 0) {
  51. warn("fragment size must be a multiple of 8");
  52. return (ip_frag_close(&ip_frag_data));
  53. }
  54. if (argc == 3) {
  55. if (strcmp(argv[2], "old") == 0 ||
  56. strcmp(argv[2], "win32") == 0)
  57. ip_frag_data.overlap = FAVOR_OLD;
  58. else if (strcmp(argv[2], "new") == 0 ||
  59. strcmp(argv[2], "unix") == 0)
  60. ip_frag_data.overlap = FAVOR_NEW;
  61. else
  62. return (ip_frag_close(&ip_frag_data));
  63. }
  64. ip_frag_data.ident = rand_uint32(ip_frag_data.rnd);
  65. return (&ip_frag_data);
  66. }
  67. int
  68. ip_frag_apply(void *d, struct pktq *pktq)
  69. {
  70. struct pkt *pkt;
  71. /* Select eth protocol via first packet in queue: */
  72. pkt = TAILQ_FIRST(pktq);
  73. if (pkt != TAILQ_END(pktq)) {
  74. uint16_t eth_type = htons(pkt->pkt_eth->eth_type);
  75. if (eth_type == ETH_TYPE_IP) {
  76. ip_frag_apply_ipv4(d, pktq);
  77. } else if (eth_type == ETH_TYPE_IPV6) {
  78. ip_frag_apply_ipv6(d, pktq);
  79. }
  80. return 0;
  81. }
  82. return 0;
  83. }
  84. static int
  85. ip_frag_apply_ipv4(_U_ void *d, struct pktq *pktq)
  86. {
  87. struct pkt *pkt, *new, *next;
  88. int hl, fraglen, off;
  89. u_char *p, *p1, *p2;
  90. for (pkt = TAILQ_FIRST(pktq); pkt != TAILQ_END(pktq); pkt = next) {
  91. next = TAILQ_NEXT(pkt, pkt_next);
  92. if (pkt->pkt_ip == NULL || pkt->pkt_ip_data == NULL)
  93. continue;
  94. hl = pkt->pkt_ip->ip_hl << 2;
  95. /*
  96. * Preserve transport protocol header in first frag,
  97. * to bypass filters that block `short' fragments.
  98. */
  99. switch (pkt->pkt_ip->ip_p) {
  100. case IP_PROTO_ICMP:
  101. fraglen = MAX(ICMP_LEN_MIN, ip_frag_data.size);
  102. break;
  103. case IP_PROTO_UDP:
  104. fraglen = MAX(UDP_HDR_LEN, ip_frag_data.size);
  105. break;
  106. case IP_PROTO_TCP:
  107. fraglen = MAX(pkt->pkt_tcp->th_off << 2,
  108. ip_frag_data.size);
  109. break;
  110. default:
  111. fraglen = ip_frag_data.size;
  112. break;
  113. }
  114. if (fraglen & 7)
  115. fraglen = (fraglen & ~7) + 8;
  116. if (pkt->pkt_end - pkt->pkt_ip_data < fraglen)
  117. continue;
  118. for (p = pkt->pkt_ip_data; p < pkt->pkt_end; ) {
  119. new = pkt_new(pkt->pkt_buf_size);
  120. memcpy(new->pkt_eth, pkt->pkt_eth, (u_char*)pkt->pkt_eth_data - (u_char*)pkt->pkt_eth);
  121. memcpy(new->pkt_ip, pkt->pkt_ip, hl);
  122. new->pkt_ip_data = new->pkt_eth_data + hl;
  123. p1 = p, p2 = NULL;
  124. off = (p - pkt->pkt_ip_data) >> 3;
  125. if (ip_frag_data.overlap != 0 && (off & 1) != 0 &&
  126. p + (fraglen << 1) < pkt->pkt_end) {
  127. struct pkt tmp;
  128. u_char tmp_buf[pkt->pkt_buf_size];
  129. tmp.pkt_buf = tmp_buf;
  130. tmp.pkt_buf_size = pkt->pkt_buf_size;
  131. rand_strset(ip_frag_data.rnd, tmp.pkt_buf,
  132. fraglen);
  133. if (ip_frag_data.overlap == FAVOR_OLD) {
  134. p1 = p + fraglen;
  135. p2 = tmp.pkt_buf;
  136. } else if (ip_frag_data.overlap == FAVOR_NEW) {
  137. p1 = tmp.pkt_buf;
  138. p2 = p + fraglen;
  139. }
  140. new->pkt_ip->ip_off = htons(IP_MF |
  141. (off + (fraglen >> 3)));
  142. } else {
  143. new->pkt_ip->ip_off = htons(off |
  144. ((p + fraglen < pkt->pkt_end) ? IP_MF: 0));
  145. }
  146. new->pkt_ip->ip_len = htons(hl + fraglen);
  147. ip_checksum(new->pkt_ip, hl + fraglen);
  148. memcpy(new->pkt_ip_data, p1, fraglen);
  149. new->pkt_end = new->pkt_ip_data + fraglen;
  150. TAILQ_INSERT_BEFORE(pkt, new, pkt_next);
  151. if (p2 != NULL) {
  152. new = pkt_dup(new);
  153. new->pkt_ts.tv_usec = 1;
  154. new->pkt_ip->ip_off = htons(IP_MF | off);
  155. new->pkt_ip->ip_len = htons(hl + (fraglen<<1));
  156. ip_checksum(new->pkt_ip, hl + (fraglen<<1));
  157. memcpy(new->pkt_ip_data, p, fraglen);
  158. memcpy(new->pkt_ip_data+fraglen, p2, fraglen);
  159. new->pkt_end = new->pkt_ip_data + (fraglen<<1);
  160. TAILQ_INSERT_BEFORE(pkt, new, pkt_next);
  161. p += (fraglen << 1);
  162. } else
  163. p += fraglen;
  164. if ((fraglen = pkt->pkt_end - p) > ip_frag_data.size)
  165. fraglen = ip_frag_data.size;
  166. }
  167. TAILQ_REMOVE(pktq, pkt, pkt_next);
  168. pkt_free(pkt);
  169. }
  170. return (0);
  171. }
  172. static int
  173. ip_frag_apply_ipv6(_U_ void *d, struct pktq *pktq)
  174. {
  175. struct pkt *pkt, *new, *next;
  176. struct ip6_ext_hdr *ext;
  177. int hl, fraglen, off;
  178. u_char *p, *p1, *p2;
  179. uint8_t next_hdr;
  180. ip_frag_data.ident++;
  181. for (pkt = TAILQ_FIRST(pktq); pkt != TAILQ_END(pktq); pkt = next) {
  182. next = TAILQ_NEXT(pkt, pkt_next);
  183. if (pkt->pkt_ip == NULL || pkt->pkt_ip_data == NULL)
  184. continue;
  185. hl = IP6_HDR_LEN;
  186. /*
  187. * Preserve transport protocol header in first frag,
  188. * to bypass filters that block `short' fragments.
  189. */
  190. switch (pkt->pkt_ip->ip_p) {
  191. case IP_PROTO_ICMP:
  192. fraglen = MAX(ICMP_LEN_MIN, ip_frag_data.size);
  193. break;
  194. case IP_PROTO_UDP:
  195. fraglen = MAX(UDP_HDR_LEN, ip_frag_data.size);
  196. break;
  197. case IP_PROTO_TCP:
  198. fraglen = MAX(pkt->pkt_tcp->th_off << 2,
  199. ip_frag_data.size);
  200. break;
  201. default:
  202. fraglen = ip_frag_data.size;
  203. break;
  204. }
  205. if (fraglen & 7)
  206. fraglen = (fraglen & ~7) + 8;
  207. if (pkt->pkt_end - pkt->pkt_ip_data < fraglen)
  208. continue;
  209. next_hdr = pkt->pkt_ip6->ip6_nxt;
  210. for (p = pkt->pkt_ip_data; p < pkt->pkt_end; ) {
  211. new = pkt_new(pkt->pkt_buf_size);
  212. memcpy(new->pkt_eth, pkt->pkt_eth, (u_char*)pkt->pkt_eth_data - (u_char*)pkt->pkt_eth);
  213. memcpy(new->pkt_ip, pkt->pkt_ip, hl);
  214. ext = (struct ip6_ext_hdr *)((u_char*)new->pkt_eth_data + hl);
  215. new->pkt_ip_data = (u_char *)(ext) + 2 +
  216. sizeof(struct ip6_ext_data_fragment);
  217. new->pkt_ip6->ip6_nxt = IP_PROTO_FRAGMENT;
  218. ext->ext_nxt = next_hdr;
  219. ext->ext_len = 0; /* ip6 fragf reserved */
  220. ext->ext_data.fragment.ident = ip_frag_data.ident;
  221. p1 = p, p2 = NULL;
  222. off = (p - pkt->pkt_ip_data) >> 3;
  223. if (ip_frag_data.overlap != 0 && (off & 1) != 0 &&
  224. p + (fraglen << 1) < pkt->pkt_end) {
  225. struct pkt tmp;
  226. u_char tmp_buf[pkt->pkt_buf_size];
  227. tmp.pkt_buf = tmp_buf;
  228. tmp.pkt_buf_size = pkt->pkt_buf_size;
  229. rand_strset(ip_frag_data.rnd, tmp.pkt_buf,
  230. fraglen);
  231. if (ip_frag_data.overlap == FAVOR_OLD) {
  232. p1 = p + fraglen;
  233. p2 = tmp.pkt_buf;
  234. } else if (ip_frag_data.overlap == FAVOR_NEW) {
  235. p1 = tmp.pkt_buf;
  236. p2 = p + fraglen;
  237. }
  238. ext->ext_data.fragment.offlg =
  239. htons((off /*+ (fraglen >> 3)*/) << 3) | IP6_MORE_FRAG;
  240. } else {
  241. ext->ext_data.fragment.offlg = htons(off << 3) |
  242. ((p + fraglen < pkt->pkt_end) ? IP6_MORE_FRAG : 0);
  243. }
  244. new->pkt_ip6->ip6_plen = htons(fraglen + 8);
  245. memcpy(new->pkt_ip_data, p1, fraglen);
  246. new->pkt_end = new->pkt_ip_data + fraglen;
  247. TAILQ_INSERT_BEFORE(pkt, new, pkt_next);
  248. if (p2 != NULL) {
  249. new = pkt_dup(new);
  250. new->pkt_ts.tv_usec = 1;
  251. ext->ext_data.fragment.offlg = htons(off << 3) | IP6_MORE_FRAG;
  252. new->pkt_ip6->ip6_plen = htons((fraglen << 1) + 8);
  253. memcpy(new->pkt_ip_data, p, fraglen);
  254. memcpy(new->pkt_ip_data + fraglen, p2, fraglen);
  255. new->pkt_end = new->pkt_ip_data + (fraglen << 1);
  256. TAILQ_INSERT_BEFORE(pkt, new, pkt_next);
  257. p += (fraglen << 1);
  258. } else {
  259. p += fraglen;
  260. }
  261. if ((fraglen = pkt->pkt_end - p) > ip_frag_data.size)
  262. fraglen = ip_frag_data.size;
  263. }
  264. TAILQ_REMOVE(pktq, pkt, pkt_next);
  265. pkt_free(pkt);
  266. }
  267. return 0;
  268. }
  269. struct mod mod_ip_frag = {
  270. "ip_frag", /* name */
  271. "ip_frag <size> [old|new]", /* usage */
  272. ip_frag_open, /* open */
  273. ip_frag_apply, /* apply */
  274. ip_frag_close /* close */
  275. };