Important changes to repositories hosted on mbed.com
Mbed hosted mercurial repositories are deprecated and are due to be permanently deleted in July 2026.
To keep a copy of this software download the repository Zip archive or clone locally using Mercurial.
It is also possible to export all your personal repositories from the account settings page.
Fork of mbed-os by
lwip_ip4_frag.c
00001 /** 00002 * @file 00003 * This is the IPv4 packet segmentation and reassembly implementation. 00004 * 00005 */ 00006 00007 /* 00008 * Copyright (c) 2001-2004 Swedish Institute of Computer Science. 00009 * All rights reserved. 00010 * 00011 * Redistribution and use in source and binary forms, with or without modification, 00012 * are permitted provided that the following conditions are met: 00013 * 00014 * 1. Redistributions of source code must retain the above copyright notice, 00015 * this list of conditions and the following disclaimer. 00016 * 2. Redistributions in binary form must reproduce the above copyright notice, 00017 * this list of conditions and the following disclaimer in the documentation 00018 * and/or other materials provided with the distribution. 00019 * 3. The name of the author may not be used to endorse or promote products 00020 * derived from this software without specific prior written permission. 00021 * 00022 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 00023 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 00024 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT 00025 * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 00026 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT 00027 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 00028 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 00029 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 00030 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY 00031 * OF SUCH DAMAGE. 00032 * 00033 * This file is part of the lwIP TCP/IP stack. 00034 * 00035 * Author: Jani Monoses <jani@iv.ro> 00036 * Simon Goldschmidt 00037 * original reassembly code by Adam Dunkels <adam@sics.se> 00038 * 00039 */ 00040 00041 #include "lwip/opt.h" 00042 00043 #if LWIP_IPV4 00044 00045 #include "lwip/ip4_frag.h" 00046 #include "lwip/def.h" 00047 #include "lwip/inet_chksum.h" 00048 #include "lwip/netif.h" 00049 #include "lwip/stats.h" 00050 #include "lwip/icmp.h" 00051 00052 #include <string.h> 00053 00054 #if IP_REASSEMBLY 00055 /** 00056 * The IP reassembly code currently has the following limitations: 00057 * - IP header options are not supported 00058 * - fragments must not overlap (e.g. due to different routes), 00059 * currently, overlapping or duplicate fragments are thrown away 00060 * if IP_REASS_CHECK_OVERLAP=1 (the default)! 00061 * 00062 * @todo: work with IP header options 00063 */ 00064 00065 /** Setting this to 0, you can turn off checking the fragments for overlapping 00066 * regions. The code gets a little smaller. Only use this if you know that 00067 * overlapping won't occur on your network! */ 00068 #ifndef IP_REASS_CHECK_OVERLAP 00069 #define IP_REASS_CHECK_OVERLAP 1 00070 #endif /* IP_REASS_CHECK_OVERLAP */ 00071 00072 /** Set to 0 to prevent freeing the oldest datagram when the reassembly buffer is 00073 * full (IP_REASS_MAX_PBUFS pbufs are enqueued). The code gets a little smaller. 00074 * Datagrams will be freed by timeout only. Especially useful when MEMP_NUM_REASSDATA 00075 * is set to 1, so one datagram can be reassembled at a time, only. */ 00076 #ifndef IP_REASS_FREE_OLDEST 00077 #define IP_REASS_FREE_OLDEST 1 00078 #endif /* IP_REASS_FREE_OLDEST */ 00079 00080 #define IP_REASS_FLAG_LASTFRAG 0x01 00081 00082 /** This is a helper struct which holds the starting 00083 * offset and the ending offset of this fragment to 00084 * easily chain the fragments. 00085 * It has the same packing requirements as the IP header, since it replaces 00086 * the IP header in memory in incoming fragments (after copying it) to keep 00087 * track of the various fragments. (-> If the IP header doesn't need packing, 00088 * this struct doesn't need packing, too.) 00089 */ 00090 #ifdef PACK_STRUCT_USE_INCLUDES 00091 # include "arch/bpstruct.h" 00092 #endif 00093 PACK_STRUCT_BEGIN 00094 struct ip_reass_helper { 00095 PACK_STRUCT_FIELD(struct pbuf *next_pbuf); 00096 PACK_STRUCT_FIELD(u16_t start); 00097 PACK_STRUCT_FIELD(u16_t end); 00098 } PACK_STRUCT_STRUCT; 00099 PACK_STRUCT_END 00100 #ifdef PACK_STRUCT_USE_INCLUDES 00101 # include "arch/epstruct.h" 00102 #endif 00103 00104 #define IP_ADDRESSES_AND_ID_MATCH(iphdrA, iphdrB) \ 00105 (ip4_addr_cmp(&(iphdrA)->src, &(iphdrB)->src) && \ 00106 ip4_addr_cmp(&(iphdrA)->dest, &(iphdrB)->dest) && \ 00107 IPH_ID(iphdrA) == IPH_ID(iphdrB)) ? 1 : 0 00108 00109 /* global variables */ 00110 static struct ip_reassdata *reassdatagrams; 00111 static u16_t ip_reass_pbufcount; 00112 00113 /* function prototypes */ 00114 static void ip_reass_dequeue_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev); 00115 static int ip_reass_free_complete_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev); 00116 00117 /** 00118 * Reassembly timer base function 00119 * for both NO_SYS == 0 and 1 (!). 00120 * 00121 * Should be called every 1000 msec (defined by IP_TMR_INTERVAL). 00122 */ 00123 void 00124 ip_reass_tmr(void) 00125 { 00126 struct ip_reassdata *r, *prev = NULL; 00127 00128 r = reassdatagrams; 00129 while (r != NULL) { 00130 /* Decrement the timer. Once it reaches 0, 00131 * clean up the incomplete fragment assembly */ 00132 if (r->timer > 0) { 00133 r->timer--; 00134 LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_reass_tmr: timer dec %"U16_F"\n",(u16_t)r->timer)); 00135 prev = r; 00136 r = r->next; 00137 } else { 00138 /* reassembly timed out */ 00139 struct ip_reassdata *tmp; 00140 LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_reass_tmr: timer timed out\n")); 00141 tmp = r; 00142 /* get the next pointer before freeing */ 00143 r = r->next; 00144 /* free the helper struct and all enqueued pbufs */ 00145 ip_reass_free_complete_datagram(tmp, prev); 00146 } 00147 } 00148 } 00149 00150 /** 00151 * Free a datagram (struct ip_reassdata) and all its pbufs. 00152 * Updates the total count of enqueued pbufs (ip_reass_pbufcount), 00153 * SNMP counters and sends an ICMP time exceeded packet. 00154 * 00155 * @param ipr datagram to free 00156 * @param prev the previous datagram in the linked list 00157 * @return the number of pbufs freed 00158 */ 00159 static int 00160 ip_reass_free_complete_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev) 00161 { 00162 u16_t pbufs_freed = 0; 00163 u8_t clen; 00164 struct pbuf *p; 00165 struct ip_reass_helper *iprh; 00166 00167 LWIP_ASSERT("prev != ipr", prev != ipr); 00168 if (prev != NULL) { 00169 LWIP_ASSERT("prev->next == ipr", prev->next == ipr); 00170 } 00171 00172 MIB2_STATS_INC(mib2.ipreasmfails); 00173 #if LWIP_ICMP 00174 iprh = (struct ip_reass_helper *)ipr->p->payload; 00175 if (iprh->start == 0) { 00176 /* The first fragment was received, send ICMP time exceeded. */ 00177 /* First, de-queue the first pbuf from r->p. */ 00178 p = ipr->p; 00179 ipr->p = iprh->next_pbuf; 00180 /* Then, copy the original header into it. */ 00181 SMEMCPY(p->payload, &ipr->iphdr, IP_HLEN); 00182 icmp_time_exceeded(p, ICMP_TE_FRAG); 00183 clen = pbuf_clen(p); 00184 LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff); 00185 pbufs_freed += clen; 00186 pbuf_free(p); 00187 } 00188 #endif /* LWIP_ICMP */ 00189 00190 /* First, free all received pbufs. The individual pbufs need to be released 00191 separately as they have not yet been chained */ 00192 p = ipr->p; 00193 while (p != NULL) { 00194 struct pbuf *pcur; 00195 iprh = (struct ip_reass_helper *)p->payload; 00196 pcur = p; 00197 /* get the next pointer before freeing */ 00198 p = iprh->next_pbuf; 00199 clen = pbuf_clen(pcur); 00200 LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff); 00201 pbufs_freed += clen; 00202 pbuf_free(pcur); 00203 } 00204 /* Then, unchain the struct ip_reassdata from the list and free it. */ 00205 ip_reass_dequeue_datagram(ipr, prev); 00206 LWIP_ASSERT("ip_reass_pbufcount >= clen", ip_reass_pbufcount >= pbufs_freed); 00207 ip_reass_pbufcount -= pbufs_freed; 00208 00209 return pbufs_freed; 00210 } 00211 00212 #if IP_REASS_FREE_OLDEST 00213 /** 00214 * Free the oldest datagram to make room for enqueueing new fragments. 00215 * The datagram 'fraghdr' belongs to is not freed! 00216 * 00217 * @param fraghdr IP header of the current fragment 00218 * @param pbufs_needed number of pbufs needed to enqueue 00219 * (used for freeing other datagrams if not enough space) 00220 * @return the number of pbufs freed 00221 */ 00222 static int 00223 ip_reass_remove_oldest_datagram(struct ip_hdr *fraghdr, int pbufs_needed) 00224 { 00225 /* @todo Can't we simply remove the last datagram in the 00226 * linked list behind reassdatagrams? 00227 */ 00228 struct ip_reassdata *r, *oldest, *prev, *oldest_prev; 00229 int pbufs_freed = 0, pbufs_freed_current; 00230 int other_datagrams; 00231 00232 /* Free datagrams until being allowed to enqueue 'pbufs_needed' pbufs, 00233 * but don't free the datagram that 'fraghdr' belongs to! */ 00234 do { 00235 oldest = NULL; 00236 prev = NULL; 00237 oldest_prev = NULL; 00238 other_datagrams = 0; 00239 r = reassdatagrams; 00240 while (r != NULL) { 00241 if (!IP_ADDRESSES_AND_ID_MATCH(&r->iphdr, fraghdr)) { 00242 /* Not the same datagram as fraghdr */ 00243 other_datagrams++; 00244 if (oldest == NULL) { 00245 oldest = r; 00246 oldest_prev = prev; 00247 } else if (r->timer <= oldest->timer) { 00248 /* older than the previous oldest */ 00249 oldest = r; 00250 oldest_prev = prev; 00251 } 00252 } 00253 if (r->next != NULL) { 00254 prev = r; 00255 } 00256 r = r->next; 00257 } 00258 if (oldest != NULL) { 00259 pbufs_freed_current = ip_reass_free_complete_datagram(oldest, oldest_prev); 00260 pbufs_freed += pbufs_freed_current; 00261 } 00262 } while ((pbufs_freed < pbufs_needed) && (other_datagrams > 1)); 00263 return pbufs_freed; 00264 } 00265 #endif /* IP_REASS_FREE_OLDEST */ 00266 00267 /** 00268 * Enqueues a new fragment into the fragment queue 00269 * @param fraghdr points to the new fragments IP hdr 00270 * @param clen number of pbufs needed to enqueue (used for freeing other datagrams if not enough space) 00271 * @return A pointer to the queue location into which the fragment was enqueued 00272 */ 00273 static struct ip_reassdata* 00274 ip_reass_enqueue_new_datagram(struct ip_hdr *fraghdr, int clen) 00275 { 00276 struct ip_reassdata* ipr; 00277 #if ! IP_REASS_FREE_OLDEST 00278 LWIP_UNUSED_ARG(clen); 00279 #endif 00280 00281 /* No matching previous fragment found, allocate a new reassdata struct */ 00282 ipr = (struct ip_reassdata *)memp_malloc(MEMP_REASSDATA); 00283 if (ipr == NULL) { 00284 #if IP_REASS_FREE_OLDEST 00285 if (ip_reass_remove_oldest_datagram(fraghdr, clen) >= clen) { 00286 ipr = (struct ip_reassdata *)memp_malloc(MEMP_REASSDATA); 00287 } 00288 if (ipr == NULL) 00289 #endif /* IP_REASS_FREE_OLDEST */ 00290 { 00291 IPFRAG_STATS_INC(ip_frag.memerr); 00292 LWIP_DEBUGF(IP_REASS_DEBUG,("Failed to alloc reassdata struct\n")); 00293 return NULL; 00294 } 00295 } 00296 memset(ipr, 0, sizeof(struct ip_reassdata)); 00297 ipr->timer = IP_REASS_MAXAGE; 00298 00299 /* enqueue the new structure to the front of the list */ 00300 ipr->next = reassdatagrams; 00301 reassdatagrams = ipr; 00302 /* copy the ip header for later tests and input */ 00303 /* @todo: no ip options supported? */ 00304 SMEMCPY(&(ipr->iphdr), fraghdr, IP_HLEN); 00305 return ipr; 00306 } 00307 00308 /** 00309 * Dequeues a datagram from the datagram queue. Doesn't deallocate the pbufs. 00310 * @param ipr points to the queue entry to dequeue 00311 */ 00312 static void 00313 ip_reass_dequeue_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev) 00314 { 00315 /* dequeue the reass struct */ 00316 if (reassdatagrams == ipr) { 00317 /* it was the first in the list */ 00318 reassdatagrams = ipr->next; 00319 } else { 00320 /* it wasn't the first, so it must have a valid 'prev' */ 00321 LWIP_ASSERT("sanity check linked list", prev != NULL); 00322 prev->next = ipr->next; 00323 } 00324 00325 /* now we can free the ip_reassdata struct */ 00326 memp_free(MEMP_REASSDATA, ipr); 00327 } 00328 00329 /** 00330 * Chain a new pbuf into the pbuf list that composes the datagram. The pbuf list 00331 * will grow over time as new pbufs are rx. 00332 * Also checks that the datagram passes basic continuity checks (if the last 00333 * fragment was received at least once). 00334 * @param root_p points to the 'root' pbuf for the current datagram being assembled. 00335 * @param new_p points to the pbuf for the current fragment 00336 * @return 0 if invalid, >0 otherwise 00337 */ 00338 static int 00339 ip_reass_chain_frag_into_datagram_and_validate(struct ip_reassdata *ipr, struct pbuf *new_p) 00340 { 00341 struct ip_reass_helper *iprh, *iprh_tmp, *iprh_prev=NULL; 00342 struct pbuf *q; 00343 u16_t offset,len; 00344 struct ip_hdr *fraghdr; 00345 int valid = 1; 00346 00347 /* Extract length and fragment offset from current fragment */ 00348 fraghdr = (struct ip_hdr*)new_p->payload; 00349 len = ntohs(IPH_LEN(fraghdr)) - IPH_HL(fraghdr) * 4; 00350 offset = (ntohs(IPH_OFFSET(fraghdr)) & IP_OFFMASK) * 8; 00351 00352 /* overwrite the fragment's ip header from the pbuf with our helper struct, 00353 * and setup the embedded helper structure. */ 00354 /* make sure the struct ip_reass_helper fits into the IP header */ 00355 LWIP_ASSERT("sizeof(struct ip_reass_helper) <= IP_HLEN", 00356 sizeof(struct ip_reass_helper) <= IP_HLEN); 00357 iprh = (struct ip_reass_helper*)new_p->payload; 00358 iprh->next_pbuf = NULL; 00359 iprh->start = offset; 00360 iprh->end = offset + len; 00361 00362 /* Iterate through until we either get to the end of the list (append), 00363 * or we find one with a larger offset (insert). */ 00364 for (q = ipr->p; q != NULL;) { 00365 iprh_tmp = (struct ip_reass_helper*)q->payload; 00366 if (iprh->start < iprh_tmp->start) { 00367 /* the new pbuf should be inserted before this */ 00368 iprh->next_pbuf = q; 00369 if (iprh_prev != NULL) { 00370 /* not the fragment with the lowest offset */ 00371 #if IP_REASS_CHECK_OVERLAP 00372 if ((iprh->start < iprh_prev->end) || (iprh->end > iprh_tmp->start)) { 00373 /* fragment overlaps with previous or following, throw away */ 00374 goto freepbuf; 00375 } 00376 #endif /* IP_REASS_CHECK_OVERLAP */ 00377 iprh_prev->next_pbuf = new_p; 00378 } else { 00379 /* fragment with the lowest offset */ 00380 ipr->p = new_p; 00381 } 00382 break; 00383 } else if (iprh->start == iprh_tmp->start) { 00384 /* received the same datagram twice: no need to keep the datagram */ 00385 goto freepbuf; 00386 #if IP_REASS_CHECK_OVERLAP 00387 } else if (iprh->start < iprh_tmp->end) { 00388 /* overlap: no need to keep the new datagram */ 00389 goto freepbuf; 00390 #endif /* IP_REASS_CHECK_OVERLAP */ 00391 } else { 00392 /* Check if the fragments received so far have no holes. */ 00393 if (iprh_prev != NULL) { 00394 if (iprh_prev->end != iprh_tmp->start) { 00395 /* There is a fragment missing between the current 00396 * and the previous fragment */ 00397 valid = 0; 00398 } 00399 } 00400 } 00401 q = iprh_tmp->next_pbuf; 00402 iprh_prev = iprh_tmp; 00403 } 00404 00405 /* If q is NULL, then we made it to the end of the list. Determine what to do now */ 00406 if (q == NULL) { 00407 if (iprh_prev != NULL) { 00408 /* this is (for now), the fragment with the highest offset: 00409 * chain it to the last fragment */ 00410 #if IP_REASS_CHECK_OVERLAP 00411 LWIP_ASSERT("check fragments don't overlap", iprh_prev->end <= iprh->start); 00412 #endif /* IP_REASS_CHECK_OVERLAP */ 00413 iprh_prev->next_pbuf = new_p; 00414 if (iprh_prev->end != iprh->start) { 00415 valid = 0; 00416 } 00417 } else { 00418 #if IP_REASS_CHECK_OVERLAP 00419 LWIP_ASSERT("no previous fragment, this must be the first fragment!", 00420 ipr->p == NULL); 00421 #endif /* IP_REASS_CHECK_OVERLAP */ 00422 /* this is the first fragment we ever received for this ip datagram */ 00423 ipr->p = new_p; 00424 } 00425 } 00426 00427 /* At this point, the validation part begins: */ 00428 /* If we already received the last fragment */ 00429 if ((ipr->flags & IP_REASS_FLAG_LASTFRAG) != 0) { 00430 /* and had no holes so far */ 00431 if (valid) { 00432 /* then check if the rest of the fragments is here */ 00433 /* Check if the queue starts with the first datagram */ 00434 if ((ipr->p == NULL) || (((struct ip_reass_helper*)ipr->p->payload)->start != 0)) { 00435 valid = 0; 00436 } else { 00437 /* and check that there are no holes after this datagram */ 00438 iprh_prev = iprh; 00439 q = iprh->next_pbuf; 00440 while (q != NULL) { 00441 iprh = (struct ip_reass_helper*)q->payload; 00442 if (iprh_prev->end != iprh->start) { 00443 valid = 0; 00444 break; 00445 } 00446 iprh_prev = iprh; 00447 q = iprh->next_pbuf; 00448 } 00449 /* if still valid, all fragments are received 00450 * (because to the MF==0 already arrived */ 00451 if (valid) { 00452 LWIP_ASSERT("sanity check", ipr->p != NULL); 00453 LWIP_ASSERT("sanity check", 00454 ((struct ip_reass_helper*)ipr->p->payload) != iprh); 00455 LWIP_ASSERT("validate_datagram:next_pbuf!=NULL", 00456 iprh->next_pbuf == NULL); 00457 LWIP_ASSERT("validate_datagram:datagram end!=datagram len", 00458 iprh->end == ipr->datagram_len); 00459 } 00460 } 00461 } 00462 /* If valid is 0 here, there are some fragments missing in the middle 00463 * (since MF == 0 has already arrived). Such datagrams simply time out if 00464 * no more fragments are received... */ 00465 return valid; 00466 } 00467 /* If we come here, not all fragments were received, yet! */ 00468 return 0; /* not yet valid! */ 00469 #if IP_REASS_CHECK_OVERLAP 00470 freepbuf: 00471 ip_reass_pbufcount -= pbuf_clen(new_p); 00472 pbuf_free(new_p); 00473 return 0; 00474 #endif /* IP_REASS_CHECK_OVERLAP */ 00475 } 00476 00477 /** 00478 * Reassembles incoming IP fragments into an IP datagram. 00479 * 00480 * @param p points to a pbuf chain of the fragment 00481 * @return NULL if reassembly is incomplete, ? otherwise 00482 */ 00483 struct pbuf * 00484 ip4_reass(struct pbuf *p) 00485 { 00486 struct pbuf *r; 00487 struct ip_hdr *fraghdr; 00488 struct ip_reassdata *ipr; 00489 struct ip_reass_helper *iprh; 00490 u16_t offset, len; 00491 u8_t clen; 00492 00493 IPFRAG_STATS_INC(ip_frag.recv); 00494 MIB2_STATS_INC(mib2.ipreasmreqds); 00495 00496 fraghdr = (struct ip_hdr*)p->payload; 00497 00498 if ((IPH_HL(fraghdr) * 4) != IP_HLEN) { 00499 LWIP_DEBUGF(IP_REASS_DEBUG,("ip4_reass: IP options currently not supported!\n")); 00500 IPFRAG_STATS_INC(ip_frag.err); 00501 goto nullreturn; 00502 } 00503 00504 offset = (ntohs(IPH_OFFSET(fraghdr)) & IP_OFFMASK) * 8; 00505 len = ntohs(IPH_LEN(fraghdr)) - IPH_HL(fraghdr) * 4; 00506 00507 /* Check if we are allowed to enqueue more datagrams. */ 00508 clen = pbuf_clen(p); 00509 if ((ip_reass_pbufcount + clen) > IP_REASS_MAX_PBUFS) { 00510 #if IP_REASS_FREE_OLDEST 00511 if (!ip_reass_remove_oldest_datagram(fraghdr, clen) || 00512 ((ip_reass_pbufcount + clen) > IP_REASS_MAX_PBUFS)) 00513 #endif /* IP_REASS_FREE_OLDEST */ 00514 { 00515 /* No datagram could be freed and still too many pbufs enqueued */ 00516 LWIP_DEBUGF(IP_REASS_DEBUG,("ip4_reass: Overflow condition: pbufct=%d, clen=%d, MAX=%d\n", 00517 ip_reass_pbufcount, clen, IP_REASS_MAX_PBUFS)); 00518 IPFRAG_STATS_INC(ip_frag.memerr); 00519 /* @todo: send ICMP time exceeded here? */ 00520 /* drop this pbuf */ 00521 goto nullreturn; 00522 } 00523 } 00524 00525 /* Look for the datagram the fragment belongs to in the current datagram queue, 00526 * remembering the previous in the queue for later dequeueing. */ 00527 for (ipr = reassdatagrams; ipr != NULL; ipr = ipr->next) { 00528 /* Check if the incoming fragment matches the one currently present 00529 in the reassembly buffer. If so, we proceed with copying the 00530 fragment into the buffer. */ 00531 if (IP_ADDRESSES_AND_ID_MATCH(&ipr->iphdr, fraghdr)) { 00532 LWIP_DEBUGF(IP_REASS_DEBUG, ("ip4_reass: matching previous fragment ID=%"X16_F"\n", 00533 ntohs(IPH_ID(fraghdr)))); 00534 IPFRAG_STATS_INC(ip_frag.cachehit); 00535 break; 00536 } 00537 } 00538 00539 if (ipr == NULL) { 00540 /* Enqueue a new datagram into the datagram queue */ 00541 ipr = ip_reass_enqueue_new_datagram(fraghdr, clen); 00542 /* Bail if unable to enqueue */ 00543 if (ipr == NULL) { 00544 goto nullreturn; 00545 } 00546 } else { 00547 if (((ntohs(IPH_OFFSET(fraghdr)) & IP_OFFMASK) == 0) && 00548 ((ntohs(IPH_OFFSET(&ipr->iphdr)) & IP_OFFMASK) != 0)) { 00549 /* ipr->iphdr is not the header from the first fragment, but fraghdr is 00550 * -> copy fraghdr into ipr->iphdr since we want to have the header 00551 * of the first fragment (for ICMP time exceeded and later, for copying 00552 * all options, if supported)*/ 00553 SMEMCPY(&ipr->iphdr, fraghdr, IP_HLEN); 00554 } 00555 } 00556 /* Track the current number of pbufs current 'in-flight', in order to limit 00557 the number of fragments that may be enqueued at any one time */ 00558 ip_reass_pbufcount += clen; 00559 00560 /* At this point, we have either created a new entry or pointing 00561 * to an existing one */ 00562 00563 /* check for 'no more fragments', and update queue entry*/ 00564 if ((IPH_OFFSET(fraghdr) & PP_NTOHS(IP_MF)) == 0) { 00565 ipr->flags |= IP_REASS_FLAG_LASTFRAG; 00566 ipr->datagram_len = offset + len; 00567 LWIP_DEBUGF(IP_REASS_DEBUG, 00568 ("ip4_reass: last fragment seen, total len %"S16_F"\n", 00569 ipr->datagram_len)); 00570 } 00571 /* find the right place to insert this pbuf */ 00572 /* @todo: trim pbufs if fragments are overlapping */ 00573 if (ip_reass_chain_frag_into_datagram_and_validate(ipr, p)) { 00574 struct ip_reassdata *ipr_prev; 00575 /* the totally last fragment (flag more fragments = 0) was received at least 00576 * once AND all fragments are received */ 00577 ipr->datagram_len += IP_HLEN; 00578 00579 /* save the second pbuf before copying the header over the pointer */ 00580 r = ((struct ip_reass_helper*)ipr->p->payload)->next_pbuf; 00581 00582 /* copy the original ip header back to the first pbuf */ 00583 fraghdr = (struct ip_hdr*)(ipr->p->payload); 00584 SMEMCPY(fraghdr, &ipr->iphdr, IP_HLEN); 00585 IPH_LEN_SET(fraghdr, htons(ipr->datagram_len)); 00586 IPH_OFFSET_SET(fraghdr, 0); 00587 IPH_CHKSUM_SET(fraghdr, 0); 00588 /* @todo: do we need to set/calculate the correct checksum? */ 00589 #if CHECKSUM_GEN_IP 00590 IF__NETIF_CHECKSUM_ENABLED(ip_current_input_netif(), NETIF_CHECKSUM_GEN_IP) { 00591 IPH_CHKSUM_SET(fraghdr, inet_chksum(fraghdr, IP_HLEN)); 00592 } 00593 #endif /* CHECKSUM_GEN_IP */ 00594 00595 p = ipr->p; 00596 00597 /* chain together the pbufs contained within the reass_data list. */ 00598 while (r != NULL) { 00599 iprh = (struct ip_reass_helper*)r->payload; 00600 00601 /* hide the ip header for every succeeding fragment */ 00602 pbuf_header(r, -IP_HLEN); 00603 pbuf_cat(p, r); 00604 r = iprh->next_pbuf; 00605 } 00606 00607 /* find the previous entry in the linked list */ 00608 if (ipr == reassdatagrams) { 00609 ipr_prev = NULL; 00610 } else { 00611 for (ipr_prev = reassdatagrams; ipr_prev != NULL; ipr_prev = ipr_prev->next) { 00612 if (ipr_prev->next == ipr) { 00613 break; 00614 } 00615 } 00616 } 00617 00618 /* release the sources allocate for the fragment queue entry */ 00619 ip_reass_dequeue_datagram(ipr, ipr_prev); 00620 00621 /* and adjust the number of pbufs currently queued for reassembly. */ 00622 ip_reass_pbufcount -= pbuf_clen(p); 00623 00624 MIB2_STATS_INC(mib2.ipreasmoks); 00625 00626 /* Return the pbuf chain */ 00627 return p; 00628 } 00629 /* the datagram is not (yet?) reassembled completely */ 00630 LWIP_DEBUGF(IP_REASS_DEBUG,("ip_reass_pbufcount: %d out\n", ip_reass_pbufcount)); 00631 return NULL; 00632 00633 nullreturn: 00634 LWIP_DEBUGF(IP_REASS_DEBUG,("ip4_reass: nullreturn\n")); 00635 IPFRAG_STATS_INC(ip_frag.drop); 00636 pbuf_free(p); 00637 return NULL; 00638 } 00639 #endif /* IP_REASSEMBLY */ 00640 00641 #if IP_FRAG 00642 #if IP_FRAG_USES_STATIC_BUF 00643 static LWIP_DECLARE_MEMORY_ALIGNED(buf, IP_FRAG_MAX_MTU); 00644 #else /* IP_FRAG_USES_STATIC_BUF */ 00645 00646 #if !LWIP_NETIF_TX_SINGLE_PBUF 00647 /** Allocate a new struct pbuf_custom_ref */ 00648 static struct pbuf_custom_ref* 00649 ip_frag_alloc_pbuf_custom_ref(void) 00650 { 00651 return (struct pbuf_custom_ref*)memp_malloc(MEMP_FRAG_PBUF); 00652 } 00653 00654 /** Free a struct pbuf_custom_ref */ 00655 static void 00656 ip_frag_free_pbuf_custom_ref(struct pbuf_custom_ref* p) 00657 { 00658 LWIP_ASSERT("p != NULL", p != NULL); 00659 memp_free(MEMP_FRAG_PBUF, p); 00660 } 00661 00662 /** Free-callback function to free a 'struct pbuf_custom_ref', called by 00663 * pbuf_free. */ 00664 static void 00665 ipfrag_free_pbuf_custom(struct pbuf *p) 00666 { 00667 struct pbuf_custom_ref *pcr = (struct pbuf_custom_ref*)p; 00668 LWIP_ASSERT("pcr != NULL", pcr != NULL); 00669 LWIP_ASSERT("pcr == p", (void*)pcr == (void*)p); 00670 if (pcr->original != NULL) { 00671 pbuf_free(pcr->original); 00672 } 00673 ip_frag_free_pbuf_custom_ref(pcr); 00674 } 00675 #endif /* !LWIP_NETIF_TX_SINGLE_PBUF */ 00676 #endif /* IP_FRAG_USES_STATIC_BUF */ 00677 00678 /** 00679 * Fragment an IP datagram if too large for the netif. 00680 * 00681 * Chop the datagram in MTU sized chunks and send them in order 00682 * by using a fixed size static memory buffer (PBUF_REF) or 00683 * point PBUF_REFs into p (depending on IP_FRAG_USES_STATIC_BUF). 00684 * 00685 * @param p ip packet to send 00686 * @param netif the netif on which to send 00687 * @param dest destination ip address to which to send 00688 * 00689 * @return ERR_OK if sent successfully, err_t otherwise 00690 */ 00691 err_t 00692 ip4_frag(struct pbuf *p, struct netif *netif, const ip4_addr_t *dest) 00693 { 00694 struct pbuf *rambuf; 00695 #if IP_FRAG_USES_STATIC_BUF 00696 struct pbuf *header; 00697 #else 00698 #if !LWIP_NETIF_TX_SINGLE_PBUF 00699 struct pbuf *newpbuf; 00700 #endif 00701 struct ip_hdr *original_iphdr; 00702 #endif 00703 struct ip_hdr *iphdr; 00704 u16_t nfb; 00705 u16_t left, cop; 00706 u16_t mtu = netif->mtu; 00707 u16_t ofo, omf; 00708 u16_t last; 00709 u16_t poff = IP_HLEN; 00710 u16_t tmp; 00711 #if !IP_FRAG_USES_STATIC_BUF && !LWIP_NETIF_TX_SINGLE_PBUF 00712 u16_t newpbuflen = 0; 00713 u16_t left_to_copy; 00714 #endif 00715 00716 /* Get a RAM based MTU sized pbuf */ 00717 #if IP_FRAG_USES_STATIC_BUF 00718 /* When using a static buffer, we use a PBUF_REF, which we will 00719 * use to reference the packet (without link header). 00720 * Layer and length is irrelevant. 00721 */ 00722 rambuf = pbuf_alloc(PBUF_LINK, 0, PBUF_REF); 00723 if (rambuf == NULL) { 00724 LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_frag: pbuf_alloc(PBUF_LINK, 0, PBUF_REF) failed\n")); 00725 goto memerr; 00726 } 00727 rambuf->tot_len = rambuf->len = mtu; 00728 rambuf->payload = LWIP_MEM_ALIGN((void *)buf); 00729 00730 /* Copy the IP header in it */ 00731 iphdr = (struct ip_hdr *)rambuf->payload; 00732 SMEMCPY(iphdr, p->payload, IP_HLEN); 00733 #else /* IP_FRAG_USES_STATIC_BUF */ 00734 original_iphdr = (struct ip_hdr *)p->payload; 00735 iphdr = original_iphdr; 00736 #endif /* IP_FRAG_USES_STATIC_BUF */ 00737 00738 /* Save original offset */ 00739 tmp = ntohs(IPH_OFFSET(iphdr)); 00740 ofo = tmp & IP_OFFMASK; 00741 omf = tmp & IP_MF; 00742 00743 left = p->tot_len - IP_HLEN; 00744 00745 nfb = (mtu - IP_HLEN) / 8; 00746 00747 while (left) { 00748 last = (left <= mtu - IP_HLEN); 00749 00750 /* Set new offset and MF flag */ 00751 tmp = omf | (IP_OFFMASK & (ofo)); 00752 if (!last) { 00753 tmp = tmp | IP_MF; 00754 } 00755 00756 /* Fill this fragment */ 00757 cop = last ? left : nfb * 8; 00758 00759 #if IP_FRAG_USES_STATIC_BUF 00760 poff += pbuf_copy_partial(p, (u8_t*)iphdr + IP_HLEN, cop, poff); 00761 #else /* IP_FRAG_USES_STATIC_BUF */ 00762 #if LWIP_NETIF_TX_SINGLE_PBUF 00763 rambuf = pbuf_alloc(PBUF_IP, cop, PBUF_RAM); 00764 if (rambuf == NULL) { 00765 goto memerr; 00766 } 00767 LWIP_ASSERT("this needs a pbuf in one piece!", 00768 (rambuf->len == rambuf->tot_len) && (rambuf->next == NULL)); 00769 poff += pbuf_copy_partial(p, rambuf->payload, cop, poff); 00770 /* make room for the IP header */ 00771 if (pbuf_header(rambuf, IP_HLEN)) { 00772 pbuf_free(rambuf); 00773 goto memerr; 00774 } 00775 /* fill in the IP header */ 00776 SMEMCPY(rambuf->payload, original_iphdr, IP_HLEN); 00777 iphdr = (struct ip_hdr*)rambuf->payload; 00778 #else /* LWIP_NETIF_TX_SINGLE_PBUF */ 00779 /* When not using a static buffer, create a chain of pbufs. 00780 * The first will be a PBUF_RAM holding the link and IP header. 00781 * The rest will be PBUF_REFs mirroring the pbuf chain to be fragged, 00782 * but limited to the size of an mtu. 00783 */ 00784 rambuf = pbuf_alloc(PBUF_LINK, IP_HLEN, PBUF_RAM); 00785 if (rambuf == NULL) { 00786 goto memerr; 00787 } 00788 LWIP_ASSERT("this needs a pbuf in one piece!", 00789 (p->len >= (IP_HLEN))); 00790 SMEMCPY(rambuf->payload, original_iphdr, IP_HLEN); 00791 iphdr = (struct ip_hdr *)rambuf->payload; 00792 00793 /* Can just adjust p directly for needed offset. */ 00794 p->payload = (u8_t *)p->payload + poff; 00795 p->len -= poff; 00796 00797 left_to_copy = cop; 00798 while (left_to_copy) { 00799 struct pbuf_custom_ref *pcr; 00800 newpbuflen = (left_to_copy < p->len) ? left_to_copy : p->len; 00801 /* Is this pbuf already empty? */ 00802 if (!newpbuflen) { 00803 p = p->next; 00804 continue; 00805 } 00806 pcr = ip_frag_alloc_pbuf_custom_ref(); 00807 if (pcr == NULL) { 00808 pbuf_free(rambuf); 00809 goto memerr; 00810 } 00811 /* Mirror this pbuf, although we might not need all of it. */ 00812 newpbuf = pbuf_alloced_custom(PBUF_RAW, newpbuflen, PBUF_REF, &pcr->pc, p->payload, newpbuflen); 00813 if (newpbuf == NULL) { 00814 ip_frag_free_pbuf_custom_ref(pcr); 00815 pbuf_free(rambuf); 00816 goto memerr; 00817 } 00818 pbuf_ref(p); 00819 pcr->original = p; 00820 pcr->pc.custom_free_function = ipfrag_free_pbuf_custom; 00821 00822 /* Add it to end of rambuf's chain, but using pbuf_cat, not pbuf_chain 00823 * so that it is removed when pbuf_dechain is later called on rambuf. 00824 */ 00825 pbuf_cat(rambuf, newpbuf); 00826 left_to_copy -= newpbuflen; 00827 if (left_to_copy) { 00828 p = p->next; 00829 } 00830 } 00831 poff = newpbuflen; 00832 #endif /* LWIP_NETIF_TX_SINGLE_PBUF */ 00833 #endif /* IP_FRAG_USES_STATIC_BUF */ 00834 00835 /* Correct header */ 00836 IPH_OFFSET_SET(iphdr, htons(tmp)); 00837 IPH_LEN_SET(iphdr, htons(cop + IP_HLEN)); 00838 IPH_CHKSUM_SET(iphdr, 0); 00839 #if CHECKSUM_GEN_IP 00840 IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_IP) { 00841 IPH_CHKSUM_SET(iphdr, inet_chksum(iphdr, IP_HLEN)); 00842 } 00843 #endif /* CHECKSUM_GEN_IP */ 00844 00845 #if IP_FRAG_USES_STATIC_BUF 00846 if (last) { 00847 pbuf_realloc(rambuf, left + IP_HLEN); 00848 } 00849 00850 /* This part is ugly: we alloc a RAM based pbuf for 00851 * the link level header for each chunk and then 00852 * free it. A PBUF_ROM style pbuf for which pbuf_header 00853 * worked would make things simpler. 00854 */ 00855 header = pbuf_alloc(PBUF_LINK, 0, PBUF_RAM); 00856 if (header != NULL) { 00857 pbuf_chain(header, rambuf); 00858 netif->output(netif, header, dest); 00859 IPFRAG_STATS_INC(ip_frag.xmit); 00860 MIB2_STATS_INC(mib2.ipfragcreates); 00861 pbuf_free(header); 00862 } else { 00863 LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_frag: pbuf_alloc() for header failed\n")); 00864 pbuf_free(rambuf); 00865 goto memerr; 00866 } 00867 #else /* IP_FRAG_USES_STATIC_BUF */ 00868 /* No need for separate header pbuf - we allowed room for it in rambuf 00869 * when allocated. 00870 */ 00871 netif->output(netif, rambuf, dest); 00872 IPFRAG_STATS_INC(ip_frag.xmit); 00873 00874 /* Unfortunately we can't reuse rambuf - the hardware may still be 00875 * using the buffer. Instead we free it (and the ensuing chain) and 00876 * recreate it next time round the loop. If we're lucky the hardware 00877 * will have already sent the packet, the free will really free, and 00878 * there will be zero memory penalty. 00879 */ 00880 00881 pbuf_free(rambuf); 00882 #endif /* IP_FRAG_USES_STATIC_BUF */ 00883 left -= cop; 00884 ofo += nfb; 00885 } 00886 #if IP_FRAG_USES_STATIC_BUF 00887 pbuf_free(rambuf); 00888 #endif /* IP_FRAG_USES_STATIC_BUF */ 00889 MIB2_STATS_INC(mib2.ipfragoks); 00890 return ERR_OK; 00891 memerr: 00892 MIB2_STATS_INC(mib2.ipfragfails); 00893 return ERR_MEM; 00894 } 00895 #endif /* IP_FRAG */ 00896 00897 #endif /* LWIP_IPV4 */
Generated on Tue Jul 12 2022 13:15:53 by
