Important changes to repositories hosted on mbed.com
Mbed hosted mercurial repositories are deprecated and are due to be permanently deleted in July 2026.
To keep a copy of this software download the repository Zip archive or clone locally using Mercurial.
It is also possible to export all your personal repositories from the account settings page.
Dependents: TYBLE16_simple_data_logger TYBLE16_MP3_Air
adaptation_interface.c
00001 /* 00002 * Copyright (c) 2016-2019, Arm Limited and affiliates. 00003 * SPDX-License-Identifier: Apache-2.0 00004 * 00005 * Licensed under the Apache License, Version 2.0 (the "License"); 00006 * you may not use this file except in compliance with the License. 00007 * You may obtain a copy of the License at 00008 * 00009 * http://www.apache.org/licenses/LICENSE-2.0 00010 * 00011 * Unless required by applicable law or agreed to in writing, software 00012 * distributed under the License is distributed on an "AS IS" BASIS, 00013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 00014 * See the License for the specific language governing permissions and 00015 * limitations under the License. 00016 */ 00017 00018 #include "nsconfig.h" 00019 #include "ns_types.h" 00020 #include "eventOS_event.h" 00021 #include "string.h" 00022 #include "ns_trace.h" 00023 #include "ns_list.h" 00024 #include "randLIB.h" 00025 #include "nsdynmemLIB.h" 00026 #include "Core/include/ns_address_internal.h" 00027 #include "Core/include/ns_socket.h" 00028 #include "mac_api.h" 00029 #include "mac_mcps.h" 00030 #include "mac_common_defines.h" 00031 #include "common_functions.h" 00032 #include "NWK_INTERFACE/Include/protocol.h" 00033 #include "NWK_INTERFACE/Include/protocol_stats.h" 00034 #include "6LoWPAN/IPHC_Decode/cipv6.h" 00035 #include "NWK_INTERFACE/Include/protocol_timer.h" 00036 #include "Service_Libs/etx/etx.h" 00037 #include "6LoWPAN/MAC/mac_helper.h" 00038 #include "6LoWPAN/MAC/mpx_api.h" 00039 #include "6LoWPAN/Mesh/mesh.h" 00040 #include "6LoWPAN/IPHC_Decode/iphc_decompress.h" 00041 #include "lowpan_adaptation_interface.h" 00042 #include "MLE/mle.h" 00043 #include "Service_Libs/mle_service/mle_service_api.h" 00044 #include "Common_Protocols/icmpv6.h" 00045 #ifdef HAVE_RPL 00046 #include "RPL/rpl_data.h" 00047 #endif 00048 #include "Service_Libs/mac_neighbor_table/mac_neighbor_table.h" 00049 #include "6LoWPAN/Thread/thread_common.h" 00050 #include "6LoWPAN/ws/ws_common.h" 00051 00052 #define TRACE_GROUP "6lAd" 00053 00054 typedef void (adaptation_etx_update_cb)(protocol_interface_info_entry_t *cur, buffer_t *buf, const mcps_data_conf_t *confirm); 00055 00056 // #define EXTRA_DEBUG_EXTRA 00057 #ifdef EXTRA_DEBUG_EXTRA 00058 #define tr_debug_extra(...) tr_debug(__VA_ARGS__) 00059 #else 00060 #define tr_debug_extra(...) 00061 #endif 00062 00063 typedef struct { 00064 uint16_t tag; /*!< Fragmentation datagram TAG ID */ 00065 uint16_t size; /*!< Datagram Total Size (uncompressed) */ 00066 uint16_t orig_size; /*!< Datagram Original Size (compressed) */ 00067 uint16_t frag_max; /*!< Maximum fragment size (MAC payload) */ 00068 uint16_t offset; /*!< Data offset from datagram start */ 00069 int16_t pattern; /*!< Size of compressed LoWPAN headers */ 00070 uint16_t unfrag_ptr; /*!< Offset within buf of headers that precede the FRAG header */ 00071 uint16_t frag_len; 00072 uint8_t unfrag_len; /*!< Length of headers that precede the FRAG header */ 00073 bool fragmented_data: 1; 00074 bool first_fragment: 1; 00075 bool indirect_data: 1; 00076 bool indirect_data_cached: 1; /*!< Data cached for delayed transmission as mac request is already active */ 00077 buffer_t *buf; 00078 uint8_t *fragmenter_buf; 00079 ns_list_link_t link; /*!< List link entry */ 00080 } fragmenter_tx_entry_t; 00081 00082 00083 typedef NS_LIST_HEAD (fragmenter_tx_entry_t, link) fragmenter_tx_list_t; 00084 00085 typedef struct { 00086 int8_t interface_id; 00087 uint16_t local_frag_tag; 00088 uint8_t msduHandle; 00089 fragmenter_tx_list_t indirect_tx_queue; 00090 uint8_t *fragment_indirect_tx_buffer; //Used for write fragmentation header 00091 uint16_t mtu_size; 00092 fragmenter_tx_entry_t active_unicast_tx_buf; //Current active direct unicast tx process 00093 fragmenter_tx_entry_t active_broadcast_tx_buf; //Current active direct broadcast tx process 00094 buffer_list_t directTxQueue; //Waiting free tx process 00095 uint16_t directTxQueue_size; 00096 uint16_t indirect_big_packet_threshold; 00097 uint16_t max_indirect_big_packets_total; 00098 uint16_t max_indirect_small_packets_per_child; 00099 bool fragmenter_active; /*!< Fragmenter state */ 00100 adaptation_etx_update_cb *etx_update_cb; 00101 mpx_api_t *mpx_api; 00102 uint16_t mpx_user_id; 00103 ns_list_link_t link; /*!< List link entry */ 00104 } fragmenter_interface_t; 00105 00106 static NS_LIST_DEFINE(fragmenter_interface_list, fragmenter_interface_t, link); 00107 00108 /* Adaptation interface local functions */ 00109 static fragmenter_interface_t *lowpan_adaptation_interface_discover(int8_t interfaceId); 00110 00111 /* Interface direct message pending queue functions */ 00112 static void lowpan_adaptation_tx_queue_write(fragmenter_interface_t *interface_ptr, buffer_t *buf); 00113 static buffer_t *lowpan_adaptation_tx_queue_read(fragmenter_interface_t *interface_ptr, protocol_interface_info_entry_t *cur); 00114 00115 /* Data direction and message length validation */ 00116 static bool lowpan_adaptation_indirect_data_request(mac_neighbor_table_entry_t *mle_entry); 00117 static bool lowpan_adaptation_request_longer_than_mtu(protocol_interface_info_entry_t *cur, buffer_t *buf, fragmenter_interface_t *interface_ptr); 00118 00119 /* Common data tx request process functions */ 00120 static void lowpan_active_buffer_state_reset(fragmenter_tx_entry_t *tx_buffer); 00121 static uint8_t lowpan_data_request_unique_handle_get(fragmenter_interface_t *interface_ptr); 00122 static fragmenter_tx_entry_t *lowpan_indirect_entry_allocate(uint16_t fragment_buffer_size); 00123 static fragmenter_tx_entry_t *lowpan_adaptation_tx_process_init(fragmenter_interface_t *interface_ptr, bool indirect, bool fragmented, bool is_unicast); 00124 static void lowpan_adaptation_data_request_primitiv_set(const buffer_t *buf, mcps_data_req_t *dataReq, protocol_interface_info_entry_t *cur); 00125 static void lowpan_data_request_to_mac(protocol_interface_info_entry_t *cur, buffer_t *buf, fragmenter_tx_entry_t *tx_ptr, fragmenter_interface_t *interface_ptr); 00126 00127 /* Tx confirmation local functions */ 00128 static bool lowpan_active_tx_handle_verify(uint8_t handle, buffer_t *buf); 00129 static fragmenter_tx_entry_t *lowpan_indirect_tx_handle_verify(uint8_t handle, fragmenter_tx_list_t *indirect_tx_queue); 00130 static void lowpan_adaptation_data_process_clean(fragmenter_interface_t *interface_ptr, fragmenter_tx_entry_t *tx_ptr, uint8_t socket_event); 00131 static uint8_t map_mlme_status_to_socket_event(uint8_t mlme_status); 00132 static bool lowpan_adaptation_tx_process_ready(fragmenter_tx_entry_t *tx_ptr); 00133 00134 /* Fragmentation local functions */ 00135 static int8_t lowpan_message_fragmentation_init(buffer_t *buf, fragmenter_tx_entry_t *frag_entry, protocol_interface_info_entry_t *cur, fragmenter_interface_t *interface_ptr); 00136 static bool lowpan_message_fragmentation_message_write(const fragmenter_tx_entry_t *frag_entry, mcps_data_req_t *dataReq); 00137 static bool lowpan_adaptation_indirect_queue_free_message(struct protocol_interface_info_entry *cur, fragmenter_interface_t *interface_ptr, fragmenter_tx_entry_t *tx_ptr); 00138 00139 static fragmenter_tx_entry_t *lowpan_adaptation_indirect_mac_data_request_active(fragmenter_interface_t *interface_ptr, fragmenter_tx_entry_t *tx_ptr); 00140 00141 static void lowpan_adaptation_etx_update_cb(protocol_interface_info_entry_t *cur, buffer_t *buf, const mcps_data_conf_t *confirm) 00142 { 00143 switch (confirm->status) { 00144 case MLME_TX_NO_ACK: 00145 case MLME_NO_DATA: 00146 case MLME_SUCCESS: 00147 if (buf->link_specific.ieee802_15_4.requestAck) { 00148 if (cur->lowpan_info & INTERFACE_NWK_BOOTSRAP_MLE) { 00149 bool success = false; 00150 if (confirm->status == MLME_SUCCESS) { 00151 success = true; 00152 } 00153 // Gets table entry 00154 mac_neighbor_table_entry_t *neigh_table_ptr = mac_neighbor_table_address_discover(mac_neighbor_info(cur), buf->dst_sa .address + PAN_ID_LEN, buf->dst_sa .addr_type ); 00155 if (neigh_table_ptr) { 00156 etx_transm_attempts_update(cur->id, 1 + confirm->tx_retries, success, neigh_table_ptr->index ); 00157 // Updates ETX statistics 00158 etx_storage_t *etx_entry = etx_storage_entry_get(cur->id, neigh_table_ptr->index ); 00159 if (etx_entry) { 00160 if (neigh_table_ptr->link_role == PRIORITY_PARENT_NEIGHBOUR) { 00161 protocol_stats_update(STATS_ETX_1ST_PARENT, etx_entry->etx >> 4); 00162 } else if (neigh_table_ptr->link_role == SECONDARY_PARENT_NEIGHBOUR) { 00163 protocol_stats_update(STATS_ETX_2ND_PARENT, etx_entry->etx >> 4); 00164 } 00165 } 00166 } 00167 } 00168 } 00169 break; 00170 default: 00171 00172 break; 00173 00174 } 00175 } 00176 00177 00178 //Discover 00179 static fragmenter_interface_t *lowpan_adaptation_interface_discover(int8_t interfaceId) 00180 { 00181 00182 ns_list_foreach(fragmenter_interface_t, interface_ptr, &fragmenter_interface_list) { 00183 if (interfaceId == interface_ptr->interface_id) { 00184 return interface_ptr; 00185 } 00186 } 00187 00188 return NULL; 00189 } 00190 00191 static struct protocol_interface_info_entry *lowpan_adaptation_network_interface_discover(const mpx_api_t *api) 00192 { 00193 00194 ns_list_foreach(fragmenter_interface_t, interface_ptr, &fragmenter_interface_list) { 00195 if (api == interface_ptr->mpx_api) { 00196 return protocol_stack_interface_info_get_by_id(interface_ptr->interface_id); 00197 } 00198 } 00199 00200 return NULL; 00201 } 00202 00203 00204 static void lowpan_adaptation_tx_queue_write(fragmenter_interface_t *interface_ptr, buffer_t *buf) 00205 { 00206 buffer_t *lower_priority_buf = NULL; 00207 00208 ns_list_foreach(buffer_t, cur, &interface_ptr->directTxQueue) { 00209 if (cur->priority < buf->priority) { 00210 lower_priority_buf = cur; 00211 break; 00212 } 00213 } 00214 00215 if (lower_priority_buf) { 00216 ns_list_add_before(&interface_ptr->directTxQueue, lower_priority_buf, buf); 00217 } else { 00218 ns_list_add_to_end(&interface_ptr->directTxQueue, buf); 00219 } 00220 interface_ptr->directTxQueue_size++; 00221 protocol_stats_update(STATS_AL_TX_QUEUE_SIZE, interface_ptr->directTxQueue_size); 00222 } 00223 00224 static buffer_t *lowpan_adaptation_tx_queue_read(fragmenter_interface_t *interface_ptr, protocol_interface_info_entry_t *cur) 00225 { 00226 /* Currently this function is called only when data confirm is received for previously sent packet. 00227 * Data confirm has freed the corresponding "active buffer" and this function will look for new buffer to be set as active buffer. 00228 */ 00229 ns_list_foreach_safe(buffer_t, buf, &interface_ptr->directTxQueue) { 00230 bool fragmented_needed = lowpan_adaptation_request_longer_than_mtu(cur, buf, interface_ptr); 00231 //Check that we not trig second active fragmentation process 00232 if (fragmented_needed && interface_ptr->fragmenter_active) { 00233 tr_debug("Do not trig Second active fragmentation"); 00234 } else if ((buf->link_specific.ieee802_15_4.requestAck && !interface_ptr->active_unicast_tx_buf.buf) 00235 || (!buf->link_specific.ieee802_15_4.requestAck && !interface_ptr->active_broadcast_tx_buf.buf)) { 00236 ns_list_remove(&interface_ptr->directTxQueue, buf); 00237 interface_ptr->directTxQueue_size--; 00238 protocol_stats_update(STATS_AL_TX_QUEUE_SIZE, interface_ptr->directTxQueue_size); 00239 return buf; 00240 } 00241 } 00242 return NULL; 00243 } 00244 00245 //fragmentation needed 00246 00247 static bool lowpan_adaptation_request_longer_than_mtu(protocol_interface_info_entry_t *cur, buffer_t *buf, fragmenter_interface_t *interface_ptr) 00248 { 00249 uint_fast16_t overhead = mac_helper_frame_overhead(cur, buf); 00250 00251 if (interface_ptr->mpx_api) { 00252 overhead += interface_ptr->mpx_api->mpx_headroom_size_get(interface_ptr->mpx_api, interface_ptr->mpx_user_id); 00253 } 00254 00255 00256 if (buffer_data_length(buf) > (int16_t)mac_helper_max_payload_size(cur, overhead)) { 00257 return true; 00258 } else { 00259 return false; 00260 } 00261 } 00262 00263 static bool lowpan_adaptation_indirect_data_request(mac_neighbor_table_entry_t *entry_ptr) 00264 { 00265 if (entry_ptr && !(entry_ptr->rx_on_idle )) { 00266 return true; 00267 } 00268 return false; 00269 } 00270 00271 00272 static void lowpan_active_buffer_state_reset(fragmenter_tx_entry_t *tx_buffer) 00273 { 00274 if (tx_buffer->buf) { 00275 buffer_free(tx_buffer->buf); 00276 tx_buffer->buf = NULL; 00277 } 00278 tx_buffer->fragmented_data = false; 00279 tx_buffer->first_fragment = true; 00280 } 00281 00282 static bool lowpan_active_tx_handle_verify(uint8_t handle, buffer_t *buf) 00283 { 00284 00285 if (buf && buf->seq == handle) { 00286 return true; 00287 } 00288 00289 00290 return false; 00291 } 00292 00293 00294 00295 static fragmenter_tx_entry_t *lowpan_indirect_tx_handle_verify(uint8_t handle, fragmenter_tx_list_t *indirect_tx_queue) 00296 { 00297 ns_list_foreach(fragmenter_tx_entry_t, entry, indirect_tx_queue) { 00298 if (entry->buf->seq == handle) { 00299 return entry; 00300 } 00301 } 00302 return NULL; 00303 } 00304 00305 00306 00307 static uint8_t lowpan_data_request_unique_handle_get(fragmenter_interface_t *interface_ptr) 00308 { 00309 bool valid_info = false; 00310 uint8_t handle; 00311 while (!valid_info) { 00312 handle = interface_ptr->msduHandle++; 00313 if (!lowpan_active_tx_handle_verify(handle, interface_ptr->active_unicast_tx_buf.buf) 00314 && !lowpan_active_tx_handle_verify(handle, interface_ptr->active_broadcast_tx_buf.buf) 00315 && !lowpan_indirect_tx_handle_verify(handle, &interface_ptr->indirect_tx_queue)) { 00316 valid_info = true; 00317 } 00318 } 00319 return handle; 00320 00321 } 00322 00323 static void lowpan_indirect_entry_free(fragmenter_tx_list_t *list, fragmenter_tx_entry_t *entry) 00324 { 00325 ns_list_remove(list, entry); 00326 if (entry->buf) { 00327 buffer_free(entry->buf); 00328 } 00329 ns_dyn_mem_free(entry->fragmenter_buf); 00330 ns_dyn_mem_free(entry); 00331 } 00332 00333 static void lowpan_indirect_queue_free(fragmenter_tx_list_t *list) 00334 { 00335 while (!ns_list_is_empty(list)) { 00336 fragmenter_tx_entry_t *entry = ns_list_get_first(list); 00337 lowpan_indirect_entry_free(list, entry); 00338 } 00339 } 00340 00341 00342 int8_t lowpan_adaptation_interface_init(int8_t interface_id, uint16_t mac_mtu_size) 00343 { 00344 if (mac_mtu_size == 0) { 00345 return -2; 00346 } 00347 //Remove old interface 00348 lowpan_adaptation_interface_free(interface_id); 00349 00350 //Allocate new 00351 fragmenter_interface_t *interface_ptr = ns_dyn_mem_alloc(sizeof(fragmenter_interface_t)); 00352 uint8_t *tx_buffer = ns_dyn_mem_alloc(mac_mtu_size); 00353 if (!interface_ptr || !tx_buffer) { 00354 ns_dyn_mem_free(interface_ptr); 00355 ns_dyn_mem_free(tx_buffer); 00356 return -1; 00357 } 00358 00359 memset(interface_ptr, 0, sizeof(fragmenter_interface_t)); 00360 interface_ptr->interface_id = interface_id; 00361 interface_ptr->fragment_indirect_tx_buffer = tx_buffer; 00362 interface_ptr->mtu_size = mac_mtu_size; 00363 interface_ptr->msduHandle = randLIB_get_8bit(); 00364 interface_ptr->local_frag_tag = randLIB_get_16bit(); 00365 00366 ns_list_init(&interface_ptr->indirect_tx_queue); 00367 ns_list_init(&interface_ptr->directTxQueue); 00368 00369 ns_list_add_to_end(&fragmenter_interface_list, interface_ptr); 00370 00371 return 0; 00372 } 00373 00374 void lowpan_adaptation_interface_etx_update_enable(int8_t interface_id) 00375 { 00376 fragmenter_interface_t *interface_ptr = lowpan_adaptation_interface_discover(interface_id); 00377 if (interface_ptr) { 00378 interface_ptr->etx_update_cb = lowpan_adaptation_etx_update_cb; 00379 } 00380 } 00381 00382 int8_t lowpan_adaptation_interface_free(int8_t interface_id) 00383 { 00384 //Discover 00385 fragmenter_interface_t *interface_ptr = lowpan_adaptation_interface_discover(interface_id); 00386 if (!interface_ptr) { 00387 return -1; 00388 } 00389 00390 ns_list_remove(&fragmenter_interface_list, interface_ptr); 00391 //free active tx process 00392 lowpan_active_buffer_state_reset(&interface_ptr->active_unicast_tx_buf); 00393 lowpan_active_buffer_state_reset(&interface_ptr->active_broadcast_tx_buf); 00394 00395 //Free Indirect entry 00396 lowpan_indirect_queue_free(&interface_ptr->indirect_tx_queue); 00397 00398 buffer_free_list(&interface_ptr->directTxQueue); 00399 00400 //Free Dynamic allocated entries 00401 ns_dyn_mem_free(interface_ptr->fragment_indirect_tx_buffer); 00402 ns_dyn_mem_free(interface_ptr); 00403 00404 return 0; 00405 } 00406 00407 00408 int8_t lowpan_adaptation_interface_reset(int8_t interface_id) 00409 { 00410 //Discover 00411 fragmenter_interface_t *interface_ptr = lowpan_adaptation_interface_discover(interface_id); 00412 if (!interface_ptr) { 00413 return -1; 00414 } 00415 00416 //free active tx process 00417 lowpan_active_buffer_state_reset(&interface_ptr->active_unicast_tx_buf); 00418 lowpan_active_buffer_state_reset(&interface_ptr->active_broadcast_tx_buf); 00419 //Clean fragmented message flag 00420 interface_ptr->fragmenter_active = false; 00421 00422 //Free Indirect entry 00423 lowpan_indirect_queue_free(&interface_ptr->indirect_tx_queue); 00424 00425 buffer_free_list(&interface_ptr->directTxQueue); 00426 00427 return 0; 00428 } 00429 00430 static void lowpan_adaptation_mpx_data_confirm(const mpx_api_t *api, const struct mcps_data_conf_s *data) 00431 { 00432 protocol_interface_info_entry_t *interface = lowpan_adaptation_network_interface_discover(api); 00433 00434 lowpan_adaptation_interface_tx_confirm(interface, data); 00435 } 00436 00437 static void lowpan_adaptation_mpx_data_indication(const mpx_api_t *api, const struct mcps_data_ind_s *data) 00438 { 00439 protocol_interface_info_entry_t *interface = lowpan_adaptation_network_interface_discover(api); 00440 lowpan_adaptation_interface_data_ind(interface, data); 00441 } 00442 00443 00444 00445 00446 int8_t lowpan_adaptation_interface_mpx_register(int8_t interface_id, struct mpx_api_s *mpx_api, uint16_t mpx_user_id) 00447 { 00448 //Discover 00449 fragmenter_interface_t *interface_ptr = lowpan_adaptation_interface_discover(interface_id); 00450 if (!interface_ptr) { 00451 return -1; 00452 } 00453 if (!mpx_api && interface_ptr->mpx_api) { 00454 //Disable Data Callbacks from MPX Class 00455 interface_ptr->mpx_api->mpx_user_registration(interface_ptr->mpx_api, NULL, NULL, interface_ptr->mpx_user_id); 00456 } 00457 00458 interface_ptr->mpx_api = mpx_api; 00459 interface_ptr->mpx_user_id = mpx_user_id; 00460 00461 if (interface_ptr->mpx_api) { 00462 //Register MPX callbacks: confirmation and indication 00463 interface_ptr->mpx_api->mpx_user_registration(interface_ptr->mpx_api, lowpan_adaptation_mpx_data_confirm, lowpan_adaptation_mpx_data_indication, interface_ptr->mpx_user_id); 00464 } 00465 return 0; 00466 } 00467 00468 00469 static fragmenter_tx_entry_t *lowpan_indirect_entry_allocate(uint16_t fragment_buffer_size) 00470 { 00471 fragmenter_tx_entry_t *indirec_entry = ns_dyn_mem_temporary_alloc(sizeof(fragmenter_tx_entry_t)); 00472 if (!indirec_entry) { 00473 return NULL; 00474 } 00475 00476 if (fragment_buffer_size) { 00477 indirec_entry->fragmenter_buf = ns_dyn_mem_temporary_alloc(fragment_buffer_size); 00478 if (!indirec_entry->fragmenter_buf) { 00479 ns_dyn_mem_free(indirec_entry); 00480 return NULL; 00481 } 00482 } else { 00483 indirec_entry->fragmenter_buf = NULL; 00484 } 00485 00486 00487 indirec_entry->buf = NULL; 00488 indirec_entry->fragmented_data = false; 00489 indirec_entry->first_fragment = true; 00490 indirec_entry->indirect_data_cached = false; 00491 00492 return indirec_entry; 00493 } 00494 00495 static int8_t lowpan_message_fragmentation_init(buffer_t *buf, fragmenter_tx_entry_t *frag_entry, protocol_interface_info_entry_t *cur, fragmenter_interface_t *interface_ptr) 00496 { 00497 uint8_t *ptr; 00498 uint16_t uncompressed_size; 00499 00500 /* Look for pre-fragmentation headers - strip off and store away */ 00501 frag_entry->unfrag_ptr = buf->buf_ptr ; 00502 frag_entry->unfrag_len = 0; 00503 ptr = buffer_data_pointer(buf); 00504 00505 if ((ptr[0] & LOWPAN_MESH_MASK) == LOWPAN_MESH) { 00506 uint_fast8_t size = mesh_header_len_from_type_byte(ptr[0]); 00507 ptr += size; 00508 buf->buf_ptr += size; 00509 } 00510 00511 if (ptr[0] == LOWPAN_DISPATCH_BC0) { 00512 ptr += 2; 00513 buf->buf_ptr += 2; 00514 } 00515 00516 frag_entry->unfrag_len = buf->buf_ptr - frag_entry->unfrag_ptr; 00517 00518 frag_entry->pattern = iphc_header_scan(buf, &uncompressed_size); 00519 frag_entry->size = buffer_data_length(buf); 00520 frag_entry->orig_size = frag_entry->size; 00521 frag_entry->size += (uncompressed_size - frag_entry->pattern); 00522 00523 uint_fast16_t overhead = mac_helper_frame_overhead(cur, buf); 00524 if (interface_ptr->mpx_api) { 00525 overhead += interface_ptr->mpx_api->mpx_headroom_size_get(interface_ptr->mpx_api, interface_ptr->mpx_user_id); 00526 } 00527 00528 frag_entry->frag_max = mac_helper_max_payload_size(cur, overhead); 00529 00530 00531 /* RFC 4944 says MTU and hence maximum size here is 1280, but that's 00532 * arbitrary, and some have argued that 6LoWPAN should have a larger 00533 * MTU, to avoid the need for IP fragmentation. So we don't enforce 00534 * that, leaving MTU decisions to upper layer config, and only look 00535 * for the "real" MTU from the FRAG header format, which would allow up 00536 * to 0x7FF (2047). 00537 */ 00538 if (frag_entry->size > LOWPAN_HARD_MTU_LIMIT) { 00539 tr_error("Packet too big"); 00540 return -1; 00541 } 00542 00543 frag_entry->offset = uncompressed_size / 8; 00544 frag_entry->frag_len = frag_entry->pattern; 00545 if (frag_entry->unfrag_len + 4 + frag_entry->frag_len > frag_entry->frag_max) { 00546 tr_error("Too long 6LoWPAN header for fragment"); 00547 return -1; 00548 } 00549 00550 /* Now, frag_len is compressed payload bytes (just IPHC headers), and 00551 * frag_ptr->offset is uncompressed payload 8-octet units (just uncompressed 00552 * IPHC headers). Add post-IPHC payload to bring total compressed size up 00553 * to maximum fragment size. 00554 */ 00555 while (frag_entry->unfrag_len + 4 + frag_entry->frag_len + 8 <= frag_entry->frag_max) { 00556 frag_entry->offset++; 00557 frag_entry->frag_len += 8; 00558 } 00559 frag_entry->fragmented_data = true; 00560 00561 return 0; 00562 00563 } 00564 00565 /** 00566 * Return true when there is more fragmented packet for this message 00567 */ 00568 static bool lowpan_message_fragmentation_message_write(const fragmenter_tx_entry_t *frag_entry, mcps_data_req_t *dataReq) 00569 { 00570 uint8_t *ptr = dataReq->msdu; 00571 if (frag_entry->unfrag_len) { 00572 memcpy(ptr, frag_entry->buf->buf + frag_entry->unfrag_ptr, frag_entry->unfrag_len); 00573 ptr += frag_entry->unfrag_len; 00574 } 00575 if (frag_entry->first_fragment) { 00576 ptr = common_write_16_bit(((uint16_t) LOWPAN_FRAG1 << 8) | frag_entry->size, ptr); 00577 ptr = common_write_16_bit(frag_entry->tag, ptr); 00578 } else { 00579 ptr = common_write_16_bit(((uint16_t) LOWPAN_FRAGN << 8) | frag_entry->size, ptr); 00580 ptr = common_write_16_bit(frag_entry->tag, ptr); 00581 *ptr++ = frag_entry->offset; 00582 } 00583 memcpy(ptr, buffer_data_pointer(frag_entry->buf), frag_entry->frag_len); 00584 ptr += frag_entry->frag_len; 00585 dataReq->msduLength = ptr - dataReq->msdu; 00586 return frag_entry->offset * 8 + frag_entry->frag_len < frag_entry->size; 00587 } 00588 00589 static fragmenter_tx_entry_t *lowpan_adaptation_tx_process_init(fragmenter_interface_t *interface_ptr, bool indirect, bool fragmented, bool is_unicast) 00590 { 00591 fragmenter_tx_entry_t *tx_entry; 00592 if (!indirect) { 00593 if (is_unicast) { 00594 tx_entry = &interface_ptr->active_unicast_tx_buf; 00595 } else { 00596 tx_entry = &interface_ptr->active_broadcast_tx_buf; 00597 } 00598 tx_entry->fragmenter_buf = interface_ptr->fragment_indirect_tx_buffer; 00599 } else { 00600 if (fragmented) { 00601 tx_entry = lowpan_indirect_entry_allocate(interface_ptr->mtu_size); 00602 } else { 00603 tx_entry = lowpan_indirect_entry_allocate(0); 00604 } 00605 } 00606 00607 if (!tx_entry) { 00608 return NULL; 00609 } 00610 00611 lowpan_active_buffer_state_reset(tx_entry); 00612 00613 tx_entry->indirect_data = indirect; 00614 00615 return tx_entry; 00616 } 00617 00618 buffer_t *lowpan_adaptation_data_process_tx_preprocess(protocol_interface_info_entry_t *cur, buffer_t *buf) 00619 { 00620 mac_neighbor_table_entry_t *neigh_entry_ptr = NULL; 00621 00622 00623 //Validate is link known and set indirect, datareq and security key id mode 00624 if (buf->dst_sa .addr_type == ADDR_NONE ) { 00625 goto tx_error_handler; 00626 } 00627 00628 if (addr_check_broadcast(buf->dst_sa .address , buf->dst_sa .addr_type ) == eOK) { 00629 buf->dst_sa .addr_type = ADDR_802_15_4_SHORT ; 00630 buf->dst_sa .address [2] = 0xff; 00631 buf->dst_sa .address [3] = 0xff; 00632 buf->link_specific.ieee802_15_4.indirectTxProcess = false; 00633 buf->link_specific.ieee802_15_4.requestAck = false; 00634 } else { 00635 00636 neigh_entry_ptr = mac_neighbor_table_address_discover(mac_neighbor_info(cur), buf->dst_sa .address + 2, buf->dst_sa .addr_type ); 00637 00638 //Validate neighbour 00639 if (!buf->options .ll_security_bypass_tx && neigh_entry_ptr) { 00640 00641 if (neigh_entry_ptr->connected_device || neigh_entry_ptr->trusted_device ) { 00642 00643 } else { 00644 //tr_warn("Drop TX to unassociated %s", trace_sockaddr(&buf->dst_sa, true)); 00645 goto tx_error_handler; 00646 } 00647 } else if (ws_info(cur) && !neigh_entry_ptr) { 00648 //Do not accept to send unknow device 00649 goto tx_error_handler; 00650 } 00651 buf->link_specific.ieee802_15_4.requestAck = true; 00652 buf->link_specific.ieee802_15_4.indirectTxProcess = lowpan_adaptation_indirect_data_request(neigh_entry_ptr); 00653 } 00654 00655 if (buf->link_specific.ieee802_15_4.key_id_mode != B_SECURITY_KEY_ID_2) { 00656 00657 if (!buf->link_specific.ieee802_15_4.requestAck) { 00658 buf->link_specific.ieee802_15_4.key_id_mode = B_SECURITY_KEY_ID_MODE_DEFAULT; 00659 } else if (ws_info(cur) || (neigh_entry_ptr && !neigh_entry_ptr->trusted_device )) { 00660 buf->link_specific.ieee802_15_4.key_id_mode = B_SECURITY_KEY_ID_MODE_DEFAULT; 00661 } else { 00662 buf->link_specific.ieee802_15_4.key_id_mode = B_SECURITY_KEY_ID_IMPLICIT; 00663 } 00664 } 00665 00666 return buf; 00667 00668 tx_error_handler: 00669 if (neigh_entry_ptr && neigh_entry_ptr->nud_active ) { 00670 mac_neighbor_info(cur)->active_nud_process--; 00671 neigh_entry_ptr->nud_active = false; 00672 00673 } 00674 socket_tx_buffer_event_and_free(buf, SOCKET_TX_FAIL); 00675 return NULL; 00676 00677 } 00678 00679 static void lowpan_adaptation_data_request_primitiv_set(const buffer_t *buf, mcps_data_req_t *dataReq, protocol_interface_info_entry_t *cur) 00680 { 00681 memset(dataReq, 0, sizeof(mcps_data_req_t)); 00682 00683 //Check do we need fragmentation 00684 00685 dataReq->InDirectTx = buf->link_specific.ieee802_15_4.indirectTxProcess; 00686 dataReq->TxAckReq = buf->link_specific.ieee802_15_4.requestAck; 00687 dataReq->SrcAddrMode = buf->src_sa .addr_type ; 00688 dataReq->DstAddrMode = buf->dst_sa .addr_type ; 00689 memcpy(dataReq->DstAddr, &buf->dst_sa .address [2], 8); 00690 00691 if (buf->link_specific.ieee802_15_4.useDefaultPanId) { 00692 dataReq->DstPANId = mac_helper_panid_get(cur); 00693 } else { 00694 dataReq->DstPANId = buf->link_specific.ieee802_15_4.dstPanId; 00695 } 00696 00697 //Allocate message msdu handle 00698 dataReq->msduHandle = buf->seq ; 00699 00700 //Set Messages 00701 if (!buf->options .ll_security_bypass_tx ) { 00702 dataReq->Key.SecurityLevel = mac_helper_default_security_level_get(cur); 00703 if (dataReq->Key.SecurityLevel) { 00704 switch (buf->link_specific.ieee802_15_4.key_id_mode) { 00705 case B_SECURITY_KEY_ID_MODE_DEFAULT: 00706 dataReq->Key.KeyIndex = mac_helper_default_key_index_get(cur); 00707 dataReq->Key.KeyIdMode = mac_helper_default_security_key_id_mode_get(cur); 00708 break; 00709 case B_SECURITY_KEY_ID_IMPLICIT: 00710 dataReq->Key.KeyIdMode = MAC_KEY_ID_MODE_IMPLICIT; 00711 break; 00712 00713 case B_SECURITY_KEY_ID_2: 00714 dataReq->Key.KeyIndex = 0xff; 00715 dataReq->Key.KeyIdMode = MAC_KEY_ID_MODE_SRC4_IDX; 00716 common_write_32_bit(0xffffffff, dataReq->Key.Keysource); 00717 break; 00718 } 00719 } 00720 } 00721 } 00722 00723 static bool lowpan_adaptation_indirect_cache_sanity_check(protocol_interface_info_entry_t *cur, fragmenter_interface_t *interface_ptr) 00724 { 00725 fragmenter_tx_entry_t *active_tx_entry; 00726 ns_list_foreach(fragmenter_tx_entry_t, fragmenter_tx_entry, &interface_ptr->indirect_tx_queue) { 00727 if (fragmenter_tx_entry->indirect_data_cached == false) { 00728 // active entry, jump to next one 00729 continue; 00730 } 00731 00732 // cached entry found, check if it has pending data reguest 00733 active_tx_entry = lowpan_adaptation_indirect_mac_data_request_active(interface_ptr, fragmenter_tx_entry); 00734 00735 if (active_tx_entry == NULL) { 00736 // entry is in cache and is not sent to mac => trigger this 00737 tr_debug_extra("sanity check, push seq %d to addr %s", fragmenter_tx_entry->buf->seq, trace_ipv6(fragmenter_tx_entry->buf->dst_sa.address)); 00738 fragmenter_tx_entry->indirect_data_cached = false; 00739 lowpan_data_request_to_mac(cur, fragmenter_tx_entry->buf, fragmenter_tx_entry, interface_ptr); 00740 return true; 00741 } 00742 } 00743 00744 return false; 00745 } 00746 00747 static bool lowpan_adaptation_indirect_cache_trigger(protocol_interface_info_entry_t *cur, fragmenter_interface_t *interface_ptr, fragmenter_tx_entry_t *tx_ptr) 00748 { 00749 tr_debug_extra("lowpan_adaptation_indirect_cache_trigger()"); 00750 00751 if (ns_list_count(&interface_ptr->indirect_tx_queue) == 0) { 00752 return false; 00753 } 00754 00755 /* Trigger first cached entry */ 00756 ns_list_foreach(fragmenter_tx_entry_t, fragmenter_tx_entry, &interface_ptr->indirect_tx_queue) { 00757 if (fragmenter_tx_entry->indirect_data_cached) { 00758 if (addr_ipv6_equal(tx_ptr->buf->dst_sa.address, fragmenter_tx_entry->buf->dst_sa.address)) { 00759 tr_debug_extra("Pushing seq %d to addr %s", fragmenter_tx_entry->buf->seq, trace_ipv6(fragmenter_tx_entry->buf->dst_sa.address)); 00760 fragmenter_tx_entry->indirect_data_cached = false; 00761 lowpan_data_request_to_mac(cur, fragmenter_tx_entry->buf, fragmenter_tx_entry, interface_ptr); 00762 return true; 00763 } 00764 } 00765 } 00766 00767 /* Sanity check, If nothing can be triggered from own address, check cache queue */ 00768 return lowpan_adaptation_indirect_cache_sanity_check(cur, interface_ptr); 00769 } 00770 00771 static fragmenter_tx_entry_t *lowpan_adaptation_indirect_mac_data_request_active(fragmenter_interface_t *interface_ptr, fragmenter_tx_entry_t *tx_ptr) 00772 { 00773 ns_list_foreach(fragmenter_tx_entry_t, fragmenter_tx_entry, &interface_ptr->indirect_tx_queue) { 00774 if (fragmenter_tx_entry->indirect_data_cached == false) { 00775 if (addr_ipv6_equal(tx_ptr->buf->dst_sa.address, fragmenter_tx_entry->buf->dst_sa.address)) { 00776 tr_debug_extra("active seq: %d", fragmenter_tx_entry->buf->seq); 00777 return fragmenter_tx_entry; 00778 } 00779 } 00780 } 00781 return NULL; 00782 } 00783 00784 static fragmenter_tx_entry_t *lowpan_adaptation_indirect_first_cached_request_get(fragmenter_interface_t *interface_ptr, fragmenter_tx_entry_t *tx_ptr) 00785 { 00786 ns_list_foreach(fragmenter_tx_entry_t, fragmenter_tx_entry, &interface_ptr->indirect_tx_queue) { 00787 if (fragmenter_tx_entry->indirect_data_cached == true) { 00788 if (addr_ipv6_equal(tx_ptr->buf->dst_sa.address, fragmenter_tx_entry->buf->dst_sa.address)) { 00789 tr_debug_extra("first cached seq: %d", fragmenter_tx_entry->buf->seq); 00790 return fragmenter_tx_entry; 00791 } 00792 } 00793 } 00794 return NULL; 00795 } 00796 00797 static bool lowpan_adaptation_is_priority_message(buffer_t *buf) 00798 { 00799 // Mle messages 00800 if (buf->dst_sa .port == MLE_ALLOCATED_PORT || buf->src_sa .port == MLE_ALLOCATED_PORT) { 00801 return true; 00802 } 00803 00804 // Management messages: address solicit, response, query, notification 00805 if (buf->dst_sa .port == THREAD_MANAGEMENT_PORT || buf->src_sa .port == THREAD_MANAGEMENT_PORT) { 00806 return true; 00807 } 00808 00809 // dhcp messages 00810 if (buf->dst_sa .port == DHCPV6_SERVER_PORT || buf->src_sa .port == DHCPV6_SERVER_PORT) { 00811 return true; 00812 } 00813 00814 if (buf->dst_sa .port == DHCPV6_CLIENT_PORT || buf->src_sa .port == DHCPV6_CLIENT_PORT) { 00815 return true; 00816 } 00817 00818 // ICMPv6 messages 00819 if (buf->options .type == ICMPV6_TYPE_ERROR_DESTINATION_UNREACH || 00820 buf->options .type == ICMPV6_TYPE_ERROR_PACKET_TOO_BIG || 00821 buf->options .type == ICMPV6_TYPE_ERROR_TIME_EXCEEDED || 00822 buf->options .type == ICMPV6_TYPE_ERROR_PARAMETER_PROBLEM) { 00823 return true; 00824 } 00825 return false; 00826 } 00827 00828 static bool lowpan_adaptation_make_room_for_small_packet(protocol_interface_info_entry_t *cur, fragmenter_interface_t *interface_ptr, mac_neighbor_table_entry_t *neighbour_to_count, fragmenter_tx_entry_t *new_entry) 00829 { 00830 if (interface_ptr->max_indirect_small_packets_per_child == 0) { 00831 // this means there is always space for small packets - no need to check further 00832 return true; 00833 } 00834 00835 uint_fast16_t count = 0; 00836 fragmenter_tx_entry_t *low_priority_msg_ptr = NULL; 00837 00838 ns_list_foreach_reverse_safe(fragmenter_tx_entry_t, tx_entry, &interface_ptr->indirect_tx_queue) { 00839 mac_neighbor_table_entry_t *tx_neighbour = mac_neighbor_table_address_discover(mac_neighbor_info(cur), tx_entry->buf->dst_sa.address + 2, tx_entry->buf->dst_sa.addr_type); 00840 if (tx_neighbour == neighbour_to_count && buffer_data_length(tx_entry->buf) <= interface_ptr->indirect_big_packet_threshold) { 00841 if (!lowpan_adaptation_is_priority_message(tx_entry->buf)) { 00842 // if there is sub priorities inside message example age here you could compare 00843 low_priority_msg_ptr = tx_entry; 00844 } 00845 if (++count >= interface_ptr->max_indirect_small_packets_per_child) { 00846 if (!low_priority_msg_ptr) { 00847 // take last entry if no low priority entry found 00848 if (lowpan_adaptation_is_priority_message(new_entry->buf)) { 00849 low_priority_msg_ptr = tx_entry; 00850 } else { 00851 return false; 00852 } 00853 } 00854 tr_debug_extra("Purge seq: %d", low_priority_msg_ptr->buf->seq); 00855 if (lowpan_adaptation_indirect_queue_free_message(cur, interface_ptr, low_priority_msg_ptr) == false) { 00856 /* entry could not be purged from mac, try next entry */ 00857 tr_debug_extra("Purge failed, try next"); 00858 count--; 00859 } 00860 low_priority_msg_ptr = NULL; 00861 } 00862 } 00863 } 00864 return true; 00865 } 00866 00867 static bool lowpan_adaptation_make_room_for_big_packet(struct protocol_interface_info_entry *cur, fragmenter_interface_t *interface_ptr, fragmenter_tx_entry_t *new_entry) 00868 { 00869 if (interface_ptr->max_indirect_big_packets_total == 0) { 00870 // this means there is always space for big packets - no need to check further 00871 return true; 00872 } 00873 00874 uint_fast16_t count = 0; 00875 fragmenter_tx_entry_t *low_priority_msg_ptr = NULL; 00876 00877 ns_list_foreach_reverse_safe(fragmenter_tx_entry_t, tx_entry, &interface_ptr->indirect_tx_queue) { 00878 if (buffer_data_length(tx_entry->buf) > interface_ptr->indirect_big_packet_threshold) { 00879 if (!lowpan_adaptation_is_priority_message(tx_entry->buf)) { 00880 // if there is sub priorities inside message example age here you could compare 00881 low_priority_msg_ptr = tx_entry; 00882 } 00883 if (++count >= interface_ptr->max_indirect_big_packets_total) { 00884 if (!low_priority_msg_ptr) { 00885 // take last entry if no low priority entry found 00886 if (lowpan_adaptation_is_priority_message(new_entry->buf)) { 00887 low_priority_msg_ptr = tx_entry; 00888 } else { 00889 return false; 00890 } 00891 } 00892 tr_debug_extra("Purge seq: %d", low_priority_msg_ptr->buf->seq); 00893 if (lowpan_adaptation_indirect_queue_free_message(cur, interface_ptr, low_priority_msg_ptr) == false) { 00894 tr_debug_extra("Purge failed, try next entry"); 00895 /* entry could not be purged from mac, try next entry */ 00896 count--; 00897 } 00898 low_priority_msg_ptr = NULL; 00899 } 00900 } 00901 } 00902 return true; 00903 } 00904 00905 static void lowpan_data_request_to_mac(protocol_interface_info_entry_t *cur, buffer_t *buf, fragmenter_tx_entry_t *tx_ptr, fragmenter_interface_t *interface_ptr) 00906 { 00907 mcps_data_req_t dataReq; 00908 00909 lowpan_adaptation_data_request_primitiv_set(buf, &dataReq, cur); 00910 if (tx_ptr->fragmented_data) { 00911 dataReq.msdu = tx_ptr->fragmenter_buf; 00912 //Call fragmenter 00913 bool more_fragments = lowpan_message_fragmentation_message_write(tx_ptr, &dataReq); 00914 if (dataReq.InDirectTx) { 00915 dataReq.PendingBit |= more_fragments; 00916 } 00917 } else { 00918 dataReq.msduLength = buffer_data_length(buf); 00919 dataReq.msdu = buffer_data_pointer(buf); 00920 } 00921 if (buf->link_specific.ieee802_15_4.rf_channel_switch) { 00922 //Switch channel if selected channel is different 00923 if (cur->mac_parameters->mac_channel != buf->link_specific.ieee802_15_4.selected_channel) { 00924 uint8_t channel = cur->mac_parameters->mac_channel; 00925 mac_helper_mac_channel_set(cur, buf->link_specific.ieee802_15_4.selected_channel); 00926 buf->link_specific.ieee802_15_4.selected_channel = channel; 00927 } else { 00928 buf->link_specific.ieee802_15_4.rf_channel_switch = false; 00929 } 00930 } 00931 00932 if (interface_ptr->mpx_api) { 00933 interface_ptr->mpx_api->mpx_data_request(interface_ptr->mpx_api, &dataReq, interface_ptr->mpx_user_id); 00934 } else { 00935 cur->mac_api->mcps_data_req(cur->mac_api, &dataReq); 00936 } 00937 } 00938 00939 int8_t lowpan_adaptation_interface_tx(protocol_interface_info_entry_t *cur, buffer_t *buf) 00940 { 00941 bool is_room_for_new_message; 00942 if (!buf) { 00943 return -1; 00944 } 00945 00946 if (!cur || !cur->mac_api || !cur->mac_api->mcps_data_req) { 00947 goto tx_error_handler; 00948 } 00949 00950 fragmenter_interface_t *interface_ptr = lowpan_adaptation_interface_discover(cur->id); 00951 if (!interface_ptr) { 00952 goto tx_error_handler; 00953 } 00954 00955 //Check packet size 00956 bool fragmented_needed = lowpan_adaptation_request_longer_than_mtu(cur, buf, interface_ptr); 00957 bool is_unicast = buf->link_specific.ieee802_15_4.requestAck; 00958 bool indirect = buf->link_specific.ieee802_15_4.indirectTxProcess; 00959 if (!indirect) { 00960 if (((is_unicast && interface_ptr->active_unicast_tx_buf.buf) || (!is_unicast && interface_ptr->active_broadcast_tx_buf.buf)) || (fragmented_needed && interface_ptr->fragmenter_active)) { 00961 lowpan_adaptation_tx_queue_write(interface_ptr, buf); 00962 return 0; //Return here 00963 } 00964 } 00965 00966 //Allocate Handle 00967 buf->seq = lowpan_data_request_unique_handle_get(interface_ptr); 00968 00969 if (buf->options .ll_sec_bypass_frag_deny && fragmented_needed) { 00970 // force security for fragmented packets 00971 buf->options .ll_security_bypass_tx = false; 00972 } 00973 00974 fragmenter_tx_entry_t *tx_ptr = lowpan_adaptation_tx_process_init(interface_ptr, indirect, fragmented_needed, is_unicast); 00975 if (!tx_ptr) { 00976 goto tx_error_handler; 00977 } 00978 00979 tx_ptr->buf = buf; 00980 00981 if (fragmented_needed) { 00982 //Fragmentation init 00983 if (lowpan_message_fragmentation_init(buf, tx_ptr, cur, interface_ptr)) { 00984 tr_error("Fragment init fail"); 00985 if (indirect) { 00986 ns_dyn_mem_free(tx_ptr->fragmenter_buf); 00987 ns_dyn_mem_free(tx_ptr); 00988 } else { 00989 tx_ptr->buf = NULL; 00990 } 00991 goto tx_error_handler; 00992 } 00993 00994 tx_ptr->tag = interface_ptr->local_frag_tag++; 00995 if (!indirect) { 00996 interface_ptr->fragmenter_active = true; 00997 } 00998 } 00999 01000 if (indirect) { 01001 //Add to indirectQUue 01002 fragmenter_tx_entry_t *tx_ptr_cached; 01003 mac_neighbor_table_entry_t *neigh_entry_ptr = mac_neighbor_table_address_discover(mac_neighbor_info(cur), buf->dst_sa .address + PAN_ID_LEN, buf->dst_sa .addr_type ); 01004 if (neigh_entry_ptr) { 01005 buf->link_specific.ieee802_15_4.indirectTTL = (uint32_t) neigh_entry_ptr->link_lifetime * 1000; 01006 } else { 01007 buf->link_specific.ieee802_15_4.indirectTTL = cur->mac_parameters->mac_in_direct_entry_timeout; 01008 } 01009 01010 tr_debug_extra("indirect seq: %d, addr=%s", tx_ptr->buf->seq, trace_ipv6(buf->dst_sa .address )); 01011 01012 // Make room for new message if needed */ 01013 if (buffer_data_length(buf) <= interface_ptr->indirect_big_packet_threshold) { 01014 is_room_for_new_message = lowpan_adaptation_make_room_for_small_packet(cur, interface_ptr, neigh_entry_ptr, tx_ptr); 01015 } else { 01016 is_room_for_new_message = lowpan_adaptation_make_room_for_big_packet(cur, interface_ptr, tx_ptr); 01017 } 01018 01019 if (lowpan_adaptation_indirect_mac_data_request_active(interface_ptr, tx_ptr)) { 01020 // mac is handling previous data request, add new one to be cached */ 01021 tr_debug_extra("caching seq: %d", tx_ptr->buf->seq); 01022 tx_ptr->indirect_data_cached = true; 01023 } 01024 01025 if (is_room_for_new_message) { 01026 ns_list_add_to_end(&interface_ptr->indirect_tx_queue, tx_ptr); 01027 } else { 01028 if (tx_ptr->fragmenter_buf) { 01029 ns_dyn_mem_free(tx_ptr->fragmenter_buf); 01030 } 01031 ns_dyn_mem_free(tx_ptr); 01032 goto tx_error_handler; 01033 } 01034 01035 // Check if current message can be delivered to MAC or should some cached message be delivered first 01036 tx_ptr_cached = lowpan_adaptation_indirect_first_cached_request_get(interface_ptr, tx_ptr); 01037 if (tx_ptr->indirect_data_cached == false && tx_ptr_cached) { 01038 tr_debug_extra("sending cached seq: %d", tx_ptr_cached->buf->seq); 01039 // set current message to cache 01040 tx_ptr->indirect_data_cached = true; 01041 // swap entries 01042 tx_ptr = tx_ptr_cached; 01043 tx_ptr->indirect_data_cached = false; 01044 buf = tx_ptr_cached->buf ; 01045 } else if (tx_ptr->indirect_data_cached == true) { 01046 // There is mac data request ongoing and new req was sent to cache 01047 return 0; 01048 } 01049 } 01050 01051 lowpan_data_request_to_mac(cur, buf, tx_ptr, interface_ptr); 01052 return 0; 01053 01054 01055 tx_error_handler: 01056 socket_tx_buffer_event_and_free(buf, SOCKET_NO_RAM); 01057 return -1; 01058 01059 } 01060 01061 static bool lowpan_adaptation_tx_process_ready(fragmenter_tx_entry_t *tx_ptr) 01062 { 01063 if (!tx_ptr->fragmented_data) { 01064 if (tx_ptr->buf->ip_routed_up) { 01065 protocol_stats_update(STATS_IP_ROUTE_UP, buffer_data_length(tx_ptr->buf)); 01066 } else { 01067 protocol_stats_update(STATS_IP_TX_COUNT, buffer_data_length(tx_ptr->buf)); 01068 } 01069 return true; 01070 } 01071 01072 01073 01074 //Update data pointer by last packet length 01075 buffer_data_strip_header(tx_ptr->buf, tx_ptr->frag_len); 01076 //Update offset 01077 if (!tx_ptr->first_fragment) { 01078 tx_ptr->offset += tx_ptr->frag_len / 8; 01079 } else { 01080 tx_ptr->first_fragment = false; 01081 } 01082 01083 /* Check Is still Data what have to send */ 01084 tx_ptr->frag_len = buffer_data_length(tx_ptr->buf); 01085 01086 01087 if (tx_ptr->frag_len == 0) { 01088 //Release current data 01089 if (tx_ptr->buf->ip_routed_up) { 01090 protocol_stats_update(STATS_IP_ROUTE_UP, tx_ptr->orig_size); 01091 } else { 01092 protocol_stats_update(STATS_IP_TX_COUNT, tx_ptr->orig_size); 01093 } 01094 return true; 01095 } 01096 01097 //Continue Process 01098 01099 if (tx_ptr->unfrag_len + 5 + tx_ptr->frag_len > tx_ptr->frag_max) { 01100 tx_ptr->frag_len = tx_ptr->frag_max - 5 - tx_ptr->unfrag_len; 01101 tx_ptr->frag_len &= ~7; 01102 } 01103 01104 return false; 01105 } 01106 01107 static void lowpan_adaptation_data_process_clean(fragmenter_interface_t *interface_ptr, fragmenter_tx_entry_t *tx_ptr, uint8_t socket_event) 01108 { 01109 buffer_t *buf = tx_ptr->buf ; 01110 tx_ptr->buf = NULL; 01111 if (buf->link_specific.ieee802_15_4.indirectTxProcess) { 01112 //release from list and free entry 01113 lowpan_indirect_entry_free(&interface_ptr->indirect_tx_queue, tx_ptr); 01114 } 01115 01116 socket_tx_buffer_event_and_free(buf, socket_event); 01117 } 01118 01119 01120 int8_t lowpan_adaptation_interface_tx_confirm(protocol_interface_info_entry_t *cur, const mcps_data_conf_t *confirm) 01121 { 01122 if (!cur || !confirm) { 01123 return -1; 01124 } 01125 01126 fragmenter_interface_t *interface_ptr = lowpan_adaptation_interface_discover(cur->id); 01127 if (!interface_ptr) { 01128 return -1; 01129 } 01130 01131 //Check first 01132 fragmenter_tx_entry_t *tx_ptr; 01133 bool active_direct_confirm; 01134 bool is_unicast = true; 01135 01136 if (lowpan_active_tx_handle_verify(confirm->msduHandle, interface_ptr->active_unicast_tx_buf.buf)) { 01137 active_direct_confirm = true; 01138 tx_ptr = &interface_ptr->active_unicast_tx_buf; 01139 } else if (lowpan_active_tx_handle_verify(confirm->msduHandle, interface_ptr->active_broadcast_tx_buf.buf)) { 01140 active_direct_confirm = true; 01141 tx_ptr = &interface_ptr->active_broadcast_tx_buf; 01142 is_unicast = false; 01143 } else { 01144 active_direct_confirm = false; 01145 tx_ptr = lowpan_indirect_tx_handle_verify(confirm->msduHandle, &interface_ptr->indirect_tx_queue); 01146 } 01147 01148 if (!tx_ptr) { 01149 tr_error("No data request for this confirmation %u", confirm->msduHandle); 01150 return -1; 01151 } 01152 01153 //Check status for 01154 buffer_t *buf = tx_ptr->buf ; 01155 01156 //Indirect data expiration 01157 if (confirm->status == MLME_TRANSACTION_EXPIRED && !active_direct_confirm) { 01158 if (buf->link_specific.ieee802_15_4.indirectTTL > 7000) { 01159 buf->link_specific.ieee802_15_4.indirectTTL -= 7000; 01160 //Push Back to MAC 01161 lowpan_data_request_to_mac(cur, buf, tx_ptr, interface_ptr); 01162 return 0; 01163 } 01164 } 01165 01166 if (interface_ptr->etx_update_cb) { 01167 interface_ptr->etx_update_cb(cur, buf, confirm); 01168 } 01169 01170 //Switch original channel back 01171 if (buf->link_specific.ieee802_15_4.rf_channel_switch) { 01172 mac_helper_mac_channel_set(cur, buf->link_specific.ieee802_15_4.selected_channel); 01173 buf->link_specific.ieee802_15_4.rf_channel_switch = false; 01174 } 01175 01176 switch (confirm->status) { 01177 01178 case MLME_BUSY_CHAN: 01179 lowpan_data_request_to_mac(cur, buf, tx_ptr, interface_ptr); 01180 break; 01181 case MLME_SUCCESS: 01182 01183 //Check is there more packets 01184 if (lowpan_adaptation_tx_process_ready(tx_ptr)) { 01185 bool triggered_from_indirect_cache = false; 01186 if (tx_ptr->fragmented_data && active_direct_confirm) { 01187 //Clean 01188 interface_ptr->fragmenter_active = false; 01189 } 01190 01191 if (tx_ptr->buf->link_specific.ieee802_15_4.indirectTxProcess) { 01192 triggered_from_indirect_cache = lowpan_adaptation_indirect_cache_trigger(cur, interface_ptr, tx_ptr); 01193 } 01194 01195 lowpan_adaptation_data_process_clean(interface_ptr, tx_ptr, map_mlme_status_to_socket_event(confirm->status)); 01196 01197 if (triggered_from_indirect_cache) { 01198 return 0; 01199 } 01200 } else { 01201 lowpan_data_request_to_mac(cur, buf, tx_ptr, interface_ptr); 01202 } 01203 01204 break; 01205 case MLME_TX_NO_ACK: 01206 case MLME_SECURITY_FAIL: 01207 case MLME_TRANSACTION_EXPIRED: 01208 default: 01209 tr_error("MCPS Data fail by status %u", confirm->status); 01210 if (buf->dst_sa .addr_type == ADDR_802_15_4_SHORT ) { 01211 tr_info("Dest addr: %x", common_read_16_bit(buf->dst_sa .address + 2)); 01212 } else if (buf->dst_sa .addr_type == ADDR_802_15_4_LONG ) { 01213 tr_info("Dest addr: %s", trace_array(buf->dst_sa .address + 2, 8)); 01214 } 01215 01216 #ifdef HAVE_RPL 01217 if (confirm->status == MLME_TX_NO_ACK) { 01218 if (buf->route && rpl_data_is_rpl_parent_route(buf->route->route_info.source)) { 01219 protocol_stats_update(STATS_RPL_PARENT_TX_FAIL, 1); 01220 } 01221 } 01222 #endif 01223 if (tx_ptr->fragmented_data) { 01224 tx_ptr->buf->buf_ptr = tx_ptr->buf->buf_end; 01225 tx_ptr->buf->buf_ptr -= tx_ptr->orig_size; 01226 if (active_direct_confirm) { 01227 interface_ptr->fragmenter_active = false; 01228 } 01229 } 01230 01231 lowpan_adaptation_data_process_clean(interface_ptr, tx_ptr, map_mlme_status_to_socket_event(confirm->status)); 01232 break; 01233 01234 } 01235 01236 if ((is_unicast && !interface_ptr->active_unicast_tx_buf.buf) || (!is_unicast && !interface_ptr->active_broadcast_tx_buf.buf)) { 01237 //Read Buffer and trig next direct request 01238 lowpan_adaptation_interface_tx(cur, lowpan_adaptation_tx_queue_read(interface_ptr, cur)); 01239 } 01240 01241 return 0; 01242 01243 } 01244 01245 static bool mac_data_is_broadcast_addr(const sockaddr_t *addr) 01246 { 01247 return (addr->addr_type == ADDR_802_15_4_SHORT ) && 01248 (addr->address [2] == 0xFF && addr->address [3] == 0xFF); 01249 } 01250 01251 static bool mcps_data_indication_neighbor_validate(protocol_interface_info_entry_t *cur, const sockaddr_t *addr) 01252 { 01253 if (thread_info(cur) || ws_info(cur) || (cur->lowpan_info & INTERFACE_NWK_BOOTSRAP_MLE)) { 01254 mac_neighbor_table_entry_t *neighbor = mac_neighbor_table_address_discover(mac_neighbor_info(cur), addr->address + 2, addr->addr_type ); 01255 if (neighbor && (neighbor->connected_device || neighbor->trusted_device )) { 01256 return true; 01257 } 01258 01259 /* Otherwise, we don't know them */ 01260 return false; 01261 } else { 01262 //6lowpan without MLE don't can't do validation 01263 return true; 01264 } 01265 01266 } 01267 01268 void lowpan_adaptation_interface_data_ind(protocol_interface_info_entry_t *cur, const mcps_data_ind_t *data_ind) 01269 { 01270 buffer_t *buf = buffer_get(data_ind->msduLength); 01271 if (!buf || !cur) { 01272 return; 01273 } 01274 uint8_t *ptr; 01275 buffer_data_add(buf, data_ind->msdu_ptr, data_ind->msduLength); 01276 //tr_debug("MAC Paylod size %u %s",data_ind->msduLength, trace_array(data_ind->msdu_ptr, 8)); 01277 buf->options .lqi = data_ind->mpduLinkQuality; 01278 buf->options .dbm = data_ind->signal_dbm; 01279 buf->src_sa .addr_type = (addrtype_t)data_ind->SrcAddrMode; 01280 ptr = common_write_16_bit(data_ind->SrcPANId, buf->src_sa .address ); 01281 memcpy(ptr, data_ind->SrcAddr, 8); 01282 buf->dst_sa .addr_type = (addrtype_t)data_ind->DstAddrMode; 01283 ptr = common_write_16_bit(data_ind->DstPANId, buf->dst_sa .address ); 01284 memcpy(ptr, data_ind->DstAddr, 8); 01285 //Set Link spesific stuff to seperately 01286 buf->link_specific.ieee802_15_4.srcPanId = data_ind->SrcPANId; 01287 buf->link_specific.ieee802_15_4.dstPanId = data_ind->DstPANId; 01288 01289 if (mac_data_is_broadcast_addr(&buf->dst_sa )) { 01290 buf->options .ll_broadcast_rx = true; 01291 } 01292 buf->interface = cur; 01293 if (data_ind->Key.SecurityLevel) { 01294 buf->link_specific.ieee802_15_4.fc_security = true; 01295 01296 if (cur->mac_security_key_usage_update_cb) { 01297 cur->mac_security_key_usage_update_cb(cur, &data_ind->Key); 01298 } 01299 } else { 01300 buf->link_specific.ieee802_15_4.fc_security = false; 01301 if (mac_helper_default_security_level_get(cur) || 01302 !mcps_data_indication_neighbor_validate(cur, &buf->src_sa )) { 01303 //SET By Pass 01304 buf->options .ll_security_bypass_rx = true; 01305 } 01306 } 01307 01308 buf->info = (buffer_info_t)(B_TO_IPV6_TXRX | B_FROM_MAC | B_DIR_UP); 01309 protocol_push(buf); 01310 } 01311 01312 static uint8_t map_mlme_status_to_socket_event(uint8_t mlme_status) 01313 { 01314 uint8_t socket_event; 01315 01316 switch (mlme_status) { 01317 case MLME_SUCCESS: 01318 socket_event = SOCKET_TX_DONE; 01319 break; 01320 case MLME_TX_NO_ACK: 01321 case MLME_SECURITY_FAIL: 01322 case MLME_TRANSACTION_EXPIRED: 01323 default: 01324 socket_event = SOCKET_TX_FAIL; 01325 break; 01326 } 01327 01328 return (socket_event); 01329 } 01330 01331 bool lowpan_adaptation_tx_active(int8_t interface_id) 01332 { 01333 fragmenter_interface_t *interface_ptr = lowpan_adaptation_interface_discover(interface_id); 01334 01335 if (!interface_ptr || (!interface_ptr->active_unicast_tx_buf.buf && !interface_ptr->active_broadcast_tx_buf.buf)) { 01336 return false; 01337 } 01338 return true; 01339 } 01340 01341 static bool lowpan_tx_buffer_address_compare(sockaddr_t *dst_sa, uint8_t *address_ptr, addrtype_t adr_type) 01342 { 01343 01344 if (dst_sa->addr_type != adr_type) { 01345 return false; 01346 } 01347 01348 uint8_t compare_length; 01349 switch (adr_type) { 01350 case ADDR_802_15_4_SHORT : 01351 compare_length = 2; 01352 break; 01353 case ADDR_802_15_4_LONG : 01354 compare_length = 8; 01355 break; 01356 default: 01357 return false; 01358 } 01359 01360 01361 if (memcmp(&dst_sa->address [2], address_ptr, compare_length)) { 01362 return false; 01363 } 01364 return true; 01365 } 01366 01367 static bool lowpan_adaptation_purge_from_mac(struct protocol_interface_info_entry *cur, fragmenter_interface_t *interface_ptr, uint8_t msduhandle) 01368 { 01369 mcps_purge_t purge_req; 01370 purge_req.msduHandle = msduhandle; 01371 bool mac_purge_success = false; 01372 if (interface_ptr->mpx_api) { 01373 if (interface_ptr->mpx_api->mpx_data_purge(interface_ptr->mpx_api, &purge_req, interface_ptr->mpx_user_id) == 0) { 01374 mac_purge_success = true; 01375 } 01376 } else { 01377 if (cur->mac_api->mcps_purge_req) { 01378 if (cur->mac_api->mcps_purge_req(cur->mac_api, &purge_req) == 0) { 01379 mac_purge_success = true; 01380 } 01381 } 01382 } 01383 01384 return mac_purge_success; 01385 } 01386 01387 static bool lowpan_adaptation_indirect_queue_free_message(struct protocol_interface_info_entry *cur, fragmenter_interface_t *interface_ptr, fragmenter_tx_entry_t *tx_ptr) 01388 { 01389 tr_debug("Purge from indirect handle %u, cached %d", tx_ptr->buf->seq, tx_ptr->indirect_data_cached); 01390 if (tx_ptr->indirect_data_cached == false) { 01391 if (lowpan_adaptation_purge_from_mac(cur, interface_ptr, tx_ptr->buf->seq) == false) { 01392 // MAC purge failed 01393 return false; 01394 } 01395 } 01396 01397 lowpan_adaptation_data_process_clean(interface_ptr, tx_ptr, SOCKET_TX_FAIL); 01398 01399 return true; 01400 } 01401 01402 void lowpan_adaptation_remove_free_indirect_table(protocol_interface_info_entry_t *cur_interface, mac_neighbor_table_entry_t *entry_ptr) 01403 { 01404 //Free first by defined short address 01405 if (entry_ptr->mac16 < 0xfffe) { 01406 uint8_t temp_address[2]; 01407 common_write_16_bit(entry_ptr->mac16 , temp_address); 01408 lowpan_adaptation_indirect_free_messages_from_queues_by_address(cur_interface, temp_address, ADDR_802_15_4_SHORT ); 01409 } 01410 lowpan_adaptation_indirect_free_messages_from_queues_by_address(cur_interface, entry_ptr->mac64 , ADDR_802_15_4_LONG ); 01411 } 01412 01413 01414 int8_t lowpan_adaptation_indirect_free_messages_from_queues_by_address(struct protocol_interface_info_entry *cur, uint8_t *address_ptr, addrtype_t adr_type) 01415 { 01416 fragmenter_interface_t *interface_ptr = lowpan_adaptation_interface_discover(cur->id); 01417 01418 if (!interface_ptr) { 01419 return -1; 01420 } 01421 01422 //Check first indirect queue 01423 ns_list_foreach_safe(fragmenter_tx_entry_t, entry, &interface_ptr->indirect_tx_queue) { 01424 if (lowpan_tx_buffer_address_compare(&entry->buf->dst_sa, address_ptr, adr_type)) { 01425 //Purge from mac 01426 lowpan_adaptation_indirect_queue_free_message(cur, interface_ptr, entry); 01427 } 01428 } 01429 01430 return 0; 01431 } 01432 01433 int8_t lowpan_adaptation_indirect_queue_params_set(struct protocol_interface_info_entry *cur, uint16_t indirect_big_packet_threshold, uint16_t max_indirect_big_packets_total, uint16_t max_indirect_small_packets_per_child) 01434 { 01435 fragmenter_interface_t *interface_ptr = lowpan_adaptation_interface_discover(cur->id); 01436 01437 if (!interface_ptr) { 01438 return -1; 01439 } 01440 01441 interface_ptr->indirect_big_packet_threshold = indirect_big_packet_threshold; 01442 interface_ptr->max_indirect_big_packets_total = max_indirect_big_packets_total; 01443 interface_ptr->max_indirect_small_packets_per_child = max_indirect_small_packets_per_child; 01444 01445 return 0; 01446 }
Generated on Tue Jul 12 2022 13:54:00 by
1.7.2