Embed: (wiki syntax)

« Back to documentation index

Show/hide line numbers adaptation_interface.c Source File

adaptation_interface.c

00001 /*
00002  * Copyright (c) 2016-2017, Arm Limited and affiliates.
00003  * SPDX-License-Identifier: Apache-2.0
00004  *
00005  * Licensed under the Apache License, Version 2.0 (the "License");
00006  * you may not use this file except in compliance with the License.
00007  * You may obtain a copy of the License at
00008  *
00009  *     http://www.apache.org/licenses/LICENSE-2.0
00010  *
00011  * Unless required by applicable law or agreed to in writing, software
00012  * distributed under the License is distributed on an "AS IS" BASIS,
00013  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
00014  * See the License for the specific language governing permissions and
00015  * limitations under the License.
00016  */
00017 
00018 #include "nsconfig.h"
00019 #include "ns_types.h"
00020 #include "eventOS_event.h"
00021 #include "string.h"
00022 #include "ns_trace.h"
00023 #include "ns_list.h"
00024 #include "randLIB.h"
00025 #include "nsdynmemLIB.h"
00026 #include "Core/include/address.h"
00027 #include "Core/include/socket.h"
00028 #include "mac_api.h"
00029 #include "mac_mcps.h"
00030 #include "mac_common_defines.h"
00031 #include "common_functions.h"
00032 #include "NWK_INTERFACE/Include/protocol.h"
00033 #include "NWK_INTERFACE/Include/protocol_stats.h"
00034 #include "6LoWPAN/IPHC_Decode/cipv6.h"
00035 #include "NWK_INTERFACE/Include/protocol_timer.h"
00036 #include "Service_Libs/etx/etx.h"
00037 #include "6LoWPAN/MAC/mac_helper.h"
00038 #include "6LoWPAN/Mesh/mesh.h"
00039 #include "6LoWPAN/IPHC_Decode/iphc_decompress.h"
00040 #include "lowpan_adaptation_interface.h"
00041 #include "MLE/mle.h"
00042 #ifdef HAVE_RPL
00043 #include "RPL/rpl_data.h"
00044 #endif
00045 
00046 #define TRACE_GROUP "6lAd"
00047 
00048 // #define EXTRA_DEBUG_EXTRA
00049 #ifdef EXTRA_DEBUG_EXTRA
00050 #define tr_debug_extra(...) tr_debug(__VA_ARGS__)
00051 #else
00052 #define tr_debug_extra(...)
00053 #endif
00054 
00055 typedef struct {
00056     uint16_t tag;   /*!< Fragmentation datagram TAG ID */
00057     uint16_t size;  /*!< Datagram Total Size (uncompressed) */
00058     uint16_t orig_size; /*!< Datagram Original Size (compressed) */
00059     uint16_t frag_max;  /*!< Maximum fragment size (MAC payload) */
00060     uint16_t offset; /*!< Data offset from datagram start */
00061     int16_t pattern; /*!< Size of compressed LoWPAN headers */
00062     uint16_t unfrag_ptr; /*!< Offset within buf of headers that precede the FRAG header */
00063     uint16_t frag_len;
00064     uint8_t unfrag_len; /*!< Length of headers that precede the FRAG header */
00065     bool fragmented_data:1;
00066     bool first_fragment:1;
00067     bool indirect_data:1;
00068     bool indirect_data_cached:1; /* Data cached for delayed transmission as mac request is already active */
00069     buffer_t *buf;
00070     uint8_t *fragmenter_buf;
00071     ns_list_link_t      link; /*!< List link entry */
00072 } fragmenter_tx_entry_t;
00073 
00074 
00075 typedef NS_LIST_HEAD (fragmenter_tx_entry_t, link) fragmenter_tx_list_t;
00076 
00077 typedef struct {
00078     int8_t interface_id;
00079     uint16_t local_frag_tag;
00080     uint8_t msduHandle;
00081     fragmenter_tx_list_t indirect_tx_queue;
00082     uint8_t *fragment_indirect_tx_buffer; //Used for write fragmentation header
00083     uint16_t mtu_size;
00084     fragmenter_tx_entry_t active_unicast_tx_buf; //Current active direct unicast tx process
00085     fragmenter_tx_entry_t active_broadcast_tx_buf; //Current active direct broadcast tx process
00086     buffer_list_t directTxQueue; //Waiting free tx process
00087     uint16_t indirect_big_packet_threshold;
00088     uint16_t max_indirect_big_packets_total;
00089     uint16_t max_indirect_small_packets_per_child;
00090     bool fragmenter_active; /*!< Fragmenter state */
00091     ns_list_link_t      link; /*!< List link entry */
00092 } fragmenter_interface_t;
00093 
00094 static NS_LIST_DEFINE(fragmenter_interface_list, fragmenter_interface_t, link);
00095 
00096 /* Adaptation interface local functions */
00097 static fragmenter_interface_t *lowpan_adaptation_interface_discover(int8_t interfaceId);
00098 
00099 /* Interface direct message pending queue functions */
00100 static void lowpan_adaptation_tx_queue_write(fragmenter_interface_t *interface_ptr , buffer_t *buf);
00101 static buffer_t * lowpan_adaptation_tx_queue_read(fragmenter_interface_t *interface_ptr, protocol_interface_info_entry_t *cur);
00102 
00103 /* Data direction and message length validation */
00104 static bool lowpan_adaptation_indirect_data_request(mle_neigh_table_entry_t *mle_entry);
00105 static bool lowpan_adaptation_request_longer_than_mtu(protocol_interface_info_entry_t *cur, buffer_t *buf);
00106 
00107 /* Common data tx request process functions */
00108 static void lowpan_active_buffer_state_reset(fragmenter_tx_entry_t *tx_buffer);
00109 static uint8_t lowpan_data_request_unique_handle_get(fragmenter_interface_t *interface_ptr);
00110 static fragmenter_tx_entry_t *lowpan_indirect_entry_allocate(uint16_t fragment_buffer_size);
00111 static fragmenter_tx_entry_t * lowpan_adaptation_tx_process_init(fragmenter_interface_t *interface_ptr, bool indirect, bool fragmented, bool is_unicast);
00112 static void lowpan_adaptation_data_request_primitiv_set(const buffer_t *buf, mcps_data_req_t *dataReq, protocol_interface_info_entry_t *cur);
00113 static void lowpan_data_request_to_mac(protocol_interface_info_entry_t *cur, buffer_t *buf, fragmenter_tx_entry_t *tx_ptr);
00114 
00115 /* Tx confirmation local functions */
00116 static bool lowpan_active_tx_handle_verify(uint8_t handle, buffer_t *buf);
00117 static fragmenter_tx_entry_t * lowpan_indirect_tx_handle_verify(uint8_t handle, fragmenter_tx_list_t *indirect_tx_queue);
00118 static void lowpan_adaptation_data_process_clean(fragmenter_interface_t *interface_ptr, fragmenter_tx_entry_t *tx_ptr, uint8_t socket_event);
00119 static uint8_t map_mlme_status_to_socket_event(uint8_t mlme_status);
00120 static bool lowpan_adaptation_tx_process_ready(fragmenter_tx_entry_t *tx_ptr);
00121 
00122 /* Fragmentation local functions */
00123 static int8_t lowpan_message_fragmentation_init(buffer_t *buf, fragmenter_tx_entry_t *frag_entry, protocol_interface_info_entry_t *cur);
00124 static bool lowpan_message_fragmentation_message_write(const fragmenter_tx_entry_t *frag_entry, mcps_data_req_t *dataReq);
00125 static void lowpan_adaptation_indirect_queue_free_message(struct protocol_interface_info_entry *cur, fragmenter_interface_t *interface_ptr, fragmenter_tx_entry_t *tx_ptr);
00126 
00127 static fragmenter_tx_entry_t* lowpan_adaptation_indirect_mac_data_request_active(fragmenter_interface_t *interface_ptr, fragmenter_tx_entry_t *tx_ptr);
00128 
00129 //Discover
00130 static fragmenter_interface_t *lowpan_adaptation_interface_discover(int8_t interfaceId)
00131 {
00132 
00133     ns_list_foreach(fragmenter_interface_t, interface_ptr, &fragmenter_interface_list) {
00134         if (interfaceId == interface_ptr->interface_id) {
00135             return interface_ptr;
00136         }
00137     }
00138 
00139     return NULL;
00140 }
00141 
00142 
00143 static void lowpan_adaptation_tx_queue_write(fragmenter_interface_t *interface_ptr , buffer_t *buf)
00144 {
00145     buffer_t *lower_priority_buf = NULL;
00146 
00147     ns_list_foreach(buffer_t, cur, &interface_ptr->directTxQueue) {
00148         if (cur->priority < buf->priority) {
00149             lower_priority_buf = cur;
00150             break;
00151         }
00152     }
00153 
00154     if (lower_priority_buf) {
00155         ns_list_add_before(&interface_ptr->directTxQueue, lower_priority_buf, buf);
00156     } else {
00157         ns_list_add_to_end(&interface_ptr->directTxQueue, buf);
00158     }
00159 }
00160 
00161 static buffer_t * lowpan_adaptation_tx_queue_read(fragmenter_interface_t *interface_ptr, protocol_interface_info_entry_t *cur)
00162 {
00163     /* Currently this function is called only when data confirm is received for previously sent packet.
00164      * Data confirm has freed the corresponding "active buffer" and this function will look for new buffer to be set as active buffer.
00165      */
00166     ns_list_foreach_safe(buffer_t, buf, &interface_ptr->directTxQueue) {
00167         bool fragmented_needed = lowpan_adaptation_request_longer_than_mtu(cur, buf);
00168         //Check that we not trig second active fragmentation process
00169         if (fragmented_needed && interface_ptr->fragmenter_active) {
00170             tr_debug("Do not trig Second active fragmentation");
00171         } else if ((buf->link_specific.ieee802_15_4.requestAck && !interface_ptr->active_unicast_tx_buf.buf)
00172                 || (!buf->link_specific.ieee802_15_4.requestAck && !interface_ptr->active_broadcast_tx_buf.buf)) {
00173             ns_list_remove(&interface_ptr->directTxQueue, buf);
00174             return buf;
00175         }
00176     }
00177     return NULL;
00178 }
00179 
00180 //fragmentation needed
00181 
00182 static bool lowpan_adaptation_request_longer_than_mtu(protocol_interface_info_entry_t *cur, buffer_t *buf)
00183 {
00184     uint_fast8_t overhead = mac_helper_frame_overhead(cur, buf);
00185 
00186 
00187     if (buffer_data_length(buf) > (int16_t)mac_helper_max_payload_size(cur, overhead)) {
00188         return true;
00189     } else {
00190         return false;
00191     }
00192 }
00193 
00194 static bool lowpan_adaptation_indirect_data_request(mle_neigh_table_entry_t *mle_entry)
00195 {
00196     if (mle_entry && !(mle_entry->mode & MLE_RX_ON_IDLE)) {
00197         return true;
00198     }
00199     return false;
00200 }
00201 
00202 
00203 static void lowpan_active_buffer_state_reset(fragmenter_tx_entry_t *tx_buffer)
00204 {
00205     if (tx_buffer->buf) {
00206         buffer_free(tx_buffer->buf);
00207         tx_buffer->buf = NULL;
00208     }
00209     tx_buffer->fragmented_data = false;
00210     tx_buffer->first_fragment = true;
00211 }
00212 
00213 static bool lowpan_active_tx_handle_verify(uint8_t handle, buffer_t *buf)
00214 {
00215 
00216     if (buf && buf->seq  == handle) {
00217         return true;
00218     }
00219 
00220 
00221     return false;
00222 }
00223 
00224 
00225 
00226 static fragmenter_tx_entry_t * lowpan_indirect_tx_handle_verify(uint8_t handle, fragmenter_tx_list_t *indirect_tx_queue)
00227 {
00228     ns_list_foreach(fragmenter_tx_entry_t, entry, indirect_tx_queue) {
00229         if (entry->buf->seq == handle) {
00230             return entry;
00231         }
00232     }
00233     return NULL;
00234 }
00235 
00236 
00237 
00238 static uint8_t lowpan_data_request_unique_handle_get(fragmenter_interface_t *interface_ptr)
00239 {
00240     bool valid_info = false;
00241     uint8_t handle;
00242     while(!valid_info) {
00243         handle = interface_ptr->msduHandle++;
00244         if (!lowpan_active_tx_handle_verify(handle,interface_ptr->active_unicast_tx_buf.buf)
00245                 && !lowpan_active_tx_handle_verify(handle,interface_ptr->active_broadcast_tx_buf.buf)
00246                 && !lowpan_indirect_tx_handle_verify(handle, &interface_ptr->indirect_tx_queue)) {
00247             valid_info = true;
00248         }
00249     }
00250     return handle;
00251 
00252 }
00253 
00254 static void lowpan_indirect_entry_free(fragmenter_tx_list_t *list , fragmenter_tx_entry_t *entry)
00255 {
00256     ns_list_remove(list, entry);
00257     if (entry->buf) {
00258         buffer_free(entry->buf);
00259     }
00260     ns_dyn_mem_free(entry->fragmenter_buf);
00261     ns_dyn_mem_free(entry);
00262 }
00263 
00264 static void lowpan_indirect_queue_free(fragmenter_tx_list_t *list)
00265 {
00266     while(!ns_list_is_empty(list)) {
00267         fragmenter_tx_entry_t *entry = ns_list_get_first(list);
00268         lowpan_indirect_entry_free(list, entry);
00269     }
00270 }
00271 
00272 
00273 int8_t lowpan_adaptation_interface_init(int8_t interface_id, uint16_t mac_mtu_size)
00274 {
00275     if (mac_mtu_size == 0) {
00276         return -2;
00277     }
00278     //Remove old interface
00279     lowpan_adaptation_interface_free(interface_id);
00280 
00281     //Allocate new
00282     fragmenter_interface_t *interface_ptr = ns_dyn_mem_alloc(sizeof(fragmenter_interface_t));
00283     uint8_t *tx_buffer = ns_dyn_mem_alloc(mac_mtu_size);
00284     if (!interface_ptr  || !tx_buffer) {
00285         ns_dyn_mem_free(interface_ptr);
00286         ns_dyn_mem_free(tx_buffer);
00287         return -1;
00288     }
00289 
00290     memset(interface_ptr, 0 ,sizeof(fragmenter_interface_t));
00291     interface_ptr->interface_id = interface_id;
00292     interface_ptr->fragment_indirect_tx_buffer = tx_buffer;
00293     interface_ptr->mtu_size = mac_mtu_size;
00294     interface_ptr->msduHandle = randLIB_get_8bit();
00295     interface_ptr->local_frag_tag = randLIB_get_16bit();
00296 
00297     ns_list_init(&interface_ptr->indirect_tx_queue);
00298     ns_list_init(&interface_ptr->directTxQueue);
00299 
00300     ns_list_add_to_end(&fragmenter_interface_list, interface_ptr);
00301 
00302     return 0;
00303 }
00304 
00305 int8_t lowpan_adaptation_interface_free(int8_t interface_id)
00306 {
00307     //Discover
00308     fragmenter_interface_t *interface_ptr = lowpan_adaptation_interface_discover(interface_id);
00309     if (!interface_ptr) {
00310         return -1;
00311     }
00312 
00313     ns_list_remove(&fragmenter_interface_list, interface_ptr);
00314     //free active tx process
00315     lowpan_active_buffer_state_reset(&interface_ptr->active_unicast_tx_buf);
00316     lowpan_active_buffer_state_reset(&interface_ptr->active_broadcast_tx_buf);
00317 
00318     //Free Indirect entry
00319     lowpan_indirect_queue_free(&interface_ptr->indirect_tx_queue);
00320 
00321     buffer_free_list(&interface_ptr->directTxQueue);
00322 
00323     //Free Dynamic allocated entries
00324     ns_dyn_mem_free(interface_ptr->fragment_indirect_tx_buffer);
00325     ns_dyn_mem_free(interface_ptr);
00326 
00327     return 0;
00328 }
00329 
00330 
00331 int8_t lowpan_adaptation_interface_reset(int8_t interface_id)
00332 {
00333     //Discover
00334     fragmenter_interface_t *interface_ptr = lowpan_adaptation_interface_discover(interface_id);
00335     if (!interface_ptr) {
00336         return -1;
00337     }
00338 
00339     //free active tx process
00340     lowpan_active_buffer_state_reset(&interface_ptr->active_unicast_tx_buf);
00341     lowpan_active_buffer_state_reset(&interface_ptr->active_broadcast_tx_buf);
00342     //Clean fragmented message flag
00343     interface_ptr->fragmenter_active = false;
00344 
00345     //Free Indirect entry
00346     lowpan_indirect_queue_free(&interface_ptr->indirect_tx_queue);
00347 
00348     buffer_free_list(&interface_ptr->directTxQueue);
00349 
00350     return 0;
00351 }
00352 
00353 
00354 static fragmenter_tx_entry_t *lowpan_indirect_entry_allocate(uint16_t fragment_buffer_size)
00355 {
00356     fragmenter_tx_entry_t *indirec_entry = ns_dyn_mem_temporary_alloc(sizeof(fragmenter_tx_entry_t));
00357     if (!indirec_entry) {
00358         return NULL;
00359     }
00360 
00361     if (fragment_buffer_size) {
00362         indirec_entry->fragmenter_buf = ns_dyn_mem_temporary_alloc(fragment_buffer_size);
00363         if (!indirec_entry->fragmenter_buf) {
00364             ns_dyn_mem_free(indirec_entry);
00365             return NULL;
00366         }
00367     } else {
00368         indirec_entry->fragmenter_buf = NULL;
00369     }
00370 
00371 
00372     indirec_entry->buf = NULL;
00373     indirec_entry->fragmented_data = false;
00374     indirec_entry->first_fragment = true;
00375     indirec_entry->indirect_data_cached = false;
00376 
00377     return indirec_entry;
00378 }
00379 
00380 static int8_t lowpan_message_fragmentation_init(buffer_t *buf, fragmenter_tx_entry_t *frag_entry, protocol_interface_info_entry_t *cur)
00381 {
00382     uint8_t *ptr;
00383     uint16_t uncompressed_size;
00384 
00385     /* Look for pre-fragmentation headers - strip off and store away */
00386     frag_entry->unfrag_ptr = buf->buf_ptr ;
00387     frag_entry->unfrag_len = 0;
00388     ptr = buffer_data_pointer(buf);
00389 
00390     if ((ptr[0] & LOWPAN_MESH_MASK) == LOWPAN_MESH) {
00391         uint_fast8_t size = mesh_header_len_from_type_byte(ptr[0]);
00392         ptr += size;
00393         buf->buf_ptr  += size;
00394     }
00395 
00396     if (ptr[0] == LOWPAN_DISPATCH_BC0) {
00397         ptr += 2;
00398         buf->buf_ptr  += 2;
00399     }
00400 
00401     frag_entry->unfrag_len = buf->buf_ptr  - frag_entry->unfrag_ptr;
00402 
00403     frag_entry->pattern = iphc_header_scan(buf, &uncompressed_size);
00404     frag_entry->size = buffer_data_length(buf);
00405     frag_entry->orig_size = frag_entry->size;
00406     frag_entry->size += (uncompressed_size - frag_entry->pattern);
00407 
00408     uint_fast8_t overhead = mac_helper_frame_overhead(cur, buf);
00409     frag_entry->frag_max = mac_helper_max_payload_size(cur, overhead);
00410 
00411 
00412     /* RFC 4944 says MTU and hence maximum size here is 1280, but that's
00413      * arbitrary, and some have argued that 6LoWPAN should have a larger
00414      * MTU, to avoid the need for IP fragmentation. So we don't enforce
00415      * that, leaving MTU decisions to upper layer config, and only look
00416      * for the "real" MTU from the FRAG header format, which would allow up
00417      * to 0x7FF (2047).
00418      */
00419     if (frag_entry->size > LOWPAN_HARD_MTU_LIMIT) {
00420         tr_error("Packet too big");
00421         return -1;
00422     }
00423 
00424     frag_entry->offset = uncompressed_size / 8;
00425     frag_entry->frag_len = frag_entry->pattern;
00426     if (frag_entry->unfrag_len + 4 + frag_entry->frag_len > frag_entry->frag_max) {
00427         tr_error("Too long 6LoWPAN header for fragment");
00428         return -1;
00429     }
00430 
00431     /* Now, frag_len is compressed payload bytes (just IPHC headers), and
00432      * frag_ptr->offset is uncompressed payload 8-octet units (just uncompressed
00433      * IPHC headers). Add post-IPHC payload to bring total compressed size up
00434      * to maximum fragment size.
00435      */
00436     while (frag_entry->unfrag_len + 4 + frag_entry->frag_len + 8 <= frag_entry->frag_max) {
00437         frag_entry->offset++;
00438         frag_entry->frag_len += 8;
00439     }
00440     frag_entry->fragmented_data = true;
00441 
00442     return 0;
00443 
00444 }
00445 
00446 /**
00447  * Return true when there is more fragmented packet for this message
00448  */
00449 static bool lowpan_message_fragmentation_message_write(const fragmenter_tx_entry_t *frag_entry, mcps_data_req_t *dataReq)
00450 {
00451     uint8_t *ptr = dataReq->msdu;
00452     if (frag_entry->unfrag_len) {
00453         memcpy(ptr, frag_entry->buf->buf  + frag_entry->unfrag_ptr, frag_entry->unfrag_len);
00454         ptr += frag_entry->unfrag_len;
00455     }
00456     if (frag_entry->first_fragment) {
00457         ptr = common_write_16_bit(((uint16_t) LOWPAN_FRAG1 << 8) | frag_entry->size, ptr);
00458         ptr = common_write_16_bit(frag_entry->tag, ptr);
00459     } else {
00460         ptr = common_write_16_bit(((uint16_t) LOWPAN_FRAGN << 8) | frag_entry->size, ptr);
00461         ptr = common_write_16_bit(frag_entry->tag, ptr);
00462         *ptr++ = frag_entry->offset;
00463     }
00464     memcpy(ptr, buffer_data_pointer(frag_entry->buf), frag_entry->frag_len);
00465     ptr += frag_entry->frag_len;
00466     dataReq->msduLength = ptr - dataReq->msdu;
00467     return frag_entry->offset * 8 + frag_entry->frag_len < frag_entry->size;
00468 }
00469 
00470 static fragmenter_tx_entry_t * lowpan_adaptation_tx_process_init(fragmenter_interface_t *interface_ptr, bool indirect, bool fragmented, bool is_unicast)
00471 {
00472     fragmenter_tx_entry_t *tx_entry;
00473     if (!indirect) {
00474         if (is_unicast) {
00475             tx_entry = &interface_ptr->active_unicast_tx_buf;
00476         } else {
00477             tx_entry = &interface_ptr->active_broadcast_tx_buf;
00478         }
00479         tx_entry->fragmenter_buf = interface_ptr->fragment_indirect_tx_buffer;
00480     } else {
00481         if (fragmented) {
00482             tx_entry = lowpan_indirect_entry_allocate(interface_ptr->mtu_size);
00483         } else {
00484             tx_entry = lowpan_indirect_entry_allocate(0);
00485         }
00486     }
00487 
00488     if (!tx_entry) {
00489         return NULL;
00490     }
00491 
00492     lowpan_active_buffer_state_reset(tx_entry);
00493 
00494     tx_entry->indirect_data = indirect;
00495 
00496     return tx_entry;
00497 }
00498 
00499 buffer_t * lowpan_adaptation_data_process_tx_preprocess(protocol_interface_info_entry_t *cur, buffer_t *buf)
00500 {
00501     //Validate is link known and set indirect, datareq and security key id mode
00502     if (buf->dst_sa .addr_type  == ADDR_NONE ) {
00503         goto tx_error_handler;
00504     }
00505 
00506     mle_neigh_table_entry_t *mle_entry = NULL;
00507     /* If MLE is enabled, we will talk if we have an MLE association */
00508     if (buf->dst_sa .addr_type  == ADDR_802_15_4_LONG  ) {
00509         mle_entry = mle_class_get_by_link_address(cur->id, buf->dst_sa .address  + 2, buf->dst_sa .addr_type );
00510 
00511     } else if(buf->dst_sa .addr_type  == ADDR_802_15_4_SHORT  && (common_read_16_bit(buf->dst_sa .address  + 2)) != 0xffff) {
00512         mle_entry = mle_class_get_by_link_address(cur->id, buf->dst_sa .address  + 2, buf->dst_sa .addr_type );
00513     }
00514 
00515     //Validate neighbour
00516     if (!buf->options .ll_security_bypass_tx  && mle_entry) {
00517 
00518         if (mle_entry->handshakeReady ||  mle_entry->thread_commission) {
00519 
00520         } else {
00521             //tr_warn("Drop TX to unassociated %s", trace_sockaddr(&buf->dst_sa, true));
00522             goto tx_error_handler;
00523         }
00524     }
00525 
00526     //Check indirect
00527 
00528 
00529     if (addr_check_broadcast(buf->dst_sa .address , buf->dst_sa .addr_type ) == eOK) {
00530         buf->dst_sa .addr_type  = ADDR_802_15_4_SHORT ;
00531         buf->dst_sa .address [2] = 0xff;
00532         buf->dst_sa .address [3] = 0xff;
00533         buf->link_specific.ieee802_15_4.indirectTxProcess = false;
00534         buf->link_specific.ieee802_15_4.requestAck = false;
00535     } else {
00536         buf->link_specific.ieee802_15_4.requestAck = true;
00537         buf->link_specific.ieee802_15_4.indirectTxProcess = lowpan_adaptation_indirect_data_request(mle_entry);
00538     }
00539 
00540     if (buf->link_specific.ieee802_15_4.key_id_mode != B_SECURITY_KEY_ID_2) {
00541 
00542         if (!buf->link_specific.ieee802_15_4.requestAck ) {
00543             buf->link_specific.ieee802_15_4.key_id_mode = B_SECURITY_KEY_ID_MODE_DEFAULT;
00544         } else if (mle_entry && !mle_entry->thread_commission) {
00545             buf->link_specific.ieee802_15_4.key_id_mode  = B_SECURITY_KEY_ID_MODE_DEFAULT;
00546         } else {
00547             buf->link_specific.ieee802_15_4.key_id_mode  = B_SECURITY_KEY_ID_IMPLICIT;
00548         }
00549     }
00550 
00551     return buf;
00552 
00553     tx_error_handler:
00554     socket_tx_buffer_event_and_free(buf, SOCKET_TX_FAIL);
00555     return NULL;
00556 
00557 }
00558 
00559 static void lowpan_adaptation_data_request_primitiv_set(const buffer_t *buf, mcps_data_req_t *dataReq, protocol_interface_info_entry_t *cur)
00560 {
00561     memset(dataReq, 0, sizeof(mcps_data_req_t));
00562 
00563     //Check do we need fragmentation
00564 
00565     dataReq->InDirectTx = buf->link_specific.ieee802_15_4.indirectTxProcess;
00566     dataReq->TxAckReq = buf->link_specific.ieee802_15_4.requestAck;
00567     dataReq->SrcAddrMode = buf->src_sa .addr_type ;
00568     dataReq->DstAddrMode = buf->dst_sa .addr_type ;
00569     memcpy(dataReq->DstAddr, &buf->dst_sa .address [2], 8);
00570 
00571     if (buf->link_specific.ieee802_15_4.useDefaultPanId) {
00572         dataReq->DstPANId = mac_helper_panid_get(cur);
00573     } else {
00574         dataReq->DstPANId = buf->link_specific.ieee802_15_4.dstPanId;
00575     }
00576 
00577     //Allocate message msdu handle
00578     dataReq->msduHandle = buf->seq ;
00579 
00580     //Set Messages
00581     if (!buf->options .ll_security_bypass_tx ) {
00582         dataReq->Key.SecurityLevel = mac_helper_default_security_level_get(cur);
00583         if (dataReq->Key.SecurityLevel) {
00584             switch (buf->link_specific.ieee802_15_4.key_id_mode) {
00585                 case B_SECURITY_KEY_ID_MODE_DEFAULT:
00586                     dataReq->Key.KeyIndex = mac_helper_default_key_index_get(cur);
00587                     dataReq->Key.KeyIdMode = mac_helper_default_security_key_id_mode_get(cur);
00588                     break;
00589                 case B_SECURITY_KEY_ID_IMPLICIT:
00590                     dataReq->Key.KeyIdMode = MAC_KEY_ID_MODE_IMPLICIT;
00591                     break;
00592 
00593                 case B_SECURITY_KEY_ID_2:
00594                     dataReq->Key.KeyIndex = 0xff;
00595                     dataReq->Key.KeyIdMode = MAC_KEY_ID_MODE_SRC4_IDX;
00596                     common_write_32_bit(0xffffffff, dataReq->Key.Keysource);
00597                     break;
00598             }
00599         }
00600     }
00601 }
00602 
00603 static bool lowpan_adaptation_indirect_cache_sanity_check(protocol_interface_info_entry_t *cur, fragmenter_interface_t *interface_ptr)
00604 {
00605     fragmenter_tx_entry_t *active_tx_entry;
00606     ns_list_foreach(fragmenter_tx_entry_t, fragmenter_tx_entry, &interface_ptr->indirect_tx_queue) {
00607         if (fragmenter_tx_entry->indirect_data_cached == false) {
00608             // active entry, jump to next one
00609             continue;
00610         }
00611 
00612         // cached entry found, check if it has pending data reguest
00613         active_tx_entry = lowpan_adaptation_indirect_mac_data_request_active(interface_ptr, fragmenter_tx_entry);
00614 
00615         if (active_tx_entry == NULL) {
00616             // entry is in cache and is not sent to mac => trigger this
00617             tr_debug_extra("sanity check, push seq %d to addr %s", fragmenter_tx_entry->buf->seq, trace_ipv6(fragmenter_tx_entry->buf->dst_sa.address));
00618             fragmenter_tx_entry->indirect_data_cached = false;
00619             lowpan_data_request_to_mac(cur, fragmenter_tx_entry->buf, fragmenter_tx_entry);
00620             return true;
00621         }
00622     }
00623 
00624     return false;
00625 }
00626 
00627 static bool lowpan_adaptation_indirect_cache_trigger(protocol_interface_info_entry_t *cur, fragmenter_interface_t *interface_ptr, fragmenter_tx_entry_t *tx_ptr)
00628 {
00629     tr_debug_extra("lowpan_adaptation_indirect_cache_trigger()");
00630 
00631     if (ns_list_count(&interface_ptr->indirect_tx_queue) == 0) {
00632         return false;
00633     }
00634 
00635     /* Trigger first cached entry */
00636     ns_list_foreach(fragmenter_tx_entry_t, fragmenter_tx_entry, &interface_ptr->indirect_tx_queue) {
00637         if (fragmenter_tx_entry->indirect_data_cached) {
00638             if (addr_ipv6_equal(tx_ptr->buf->dst_sa.address, fragmenter_tx_entry->buf->dst_sa.address)) {
00639                 tr_debug_extra("pushing seq %d to addr %s", fragmenter_tx_entry->buf->seq, trace_ipv6(fragmenter_tx_entry->buf->dst_sa.address));
00640                 fragmenter_tx_entry->indirect_data_cached = false;
00641                 lowpan_data_request_to_mac(cur, fragmenter_tx_entry->buf, fragmenter_tx_entry);
00642                 return true;
00643             }
00644         }
00645     }
00646 
00647     /* Sanity check, If nothing can be triggered from own address, check cache queue */
00648     return lowpan_adaptation_indirect_cache_sanity_check(cur, interface_ptr);
00649 }
00650 
00651 static fragmenter_tx_entry_t* lowpan_adaptation_indirect_mac_data_request_active(fragmenter_interface_t *interface_ptr, fragmenter_tx_entry_t *tx_ptr)
00652 {
00653     ns_list_foreach(fragmenter_tx_entry_t, fragmenter_tx_entry, &interface_ptr->indirect_tx_queue) {
00654         if (fragmenter_tx_entry->indirect_data_cached == false) {
00655             if (addr_ipv6_equal(tx_ptr->buf->dst_sa.address, fragmenter_tx_entry->buf->dst_sa.address)) {
00656                 tr_debug_extra("active seq: %d", fragmenter_tx_entry->buf->seq);
00657                 return fragmenter_tx_entry;
00658             }
00659         }
00660     }
00661     return NULL;
00662 }
00663 
00664 static fragmenter_tx_entry_t* lowpan_adaptation_indirect_first_cached_request_get(fragmenter_interface_t *interface_ptr, fragmenter_tx_entry_t *tx_ptr)
00665 {
00666     ns_list_foreach(fragmenter_tx_entry_t, fragmenter_tx_entry, &interface_ptr->indirect_tx_queue) {
00667         if (fragmenter_tx_entry->indirect_data_cached == true) {
00668             if (addr_ipv6_equal(tx_ptr->buf->dst_sa.address, fragmenter_tx_entry->buf->dst_sa.address)) {
00669                 tr_debug_extra("first cached seq: %d", fragmenter_tx_entry->buf->seq);
00670                 return fragmenter_tx_entry;
00671             }
00672         }
00673     }
00674     return NULL;
00675 }
00676 
00677 static void lowpan_adaptation_make_room_for_small_packet(protocol_interface_info_entry_t *cur, fragmenter_interface_t *interface_ptr, mle_neigh_table_entry_t *neighbour_to_count)
00678 {
00679     if (interface_ptr->max_indirect_small_packets_per_child == 0) {
00680         return;
00681     }
00682 
00683     uint_fast16_t count = 0;
00684 
00685     ns_list_foreach_reverse_safe(fragmenter_tx_entry_t, tx_entry, &interface_ptr->indirect_tx_queue) {
00686         mle_neigh_table_entry_t *tx_neighbour = mle_class_get_by_link_address(cur->id, tx_entry->buf->dst_sa.address + 2, tx_entry->buf->dst_sa.addr_type);
00687         if (tx_neighbour == neighbour_to_count && buffer_data_length(tx_entry->buf) <= interface_ptr->indirect_big_packet_threshold) {
00688             if (++count >= interface_ptr->max_indirect_small_packets_per_child) {
00689                 lowpan_adaptation_indirect_queue_free_message(cur, interface_ptr, tx_entry);
00690             }
00691         }
00692     }
00693 }
00694 
00695 static void lowpan_adaptation_make_room_for_big_packet(struct protocol_interface_info_entry *cur, fragmenter_interface_t *interface_ptr)
00696 {
00697     if (interface_ptr->max_indirect_big_packets_total == 0) {
00698         return;
00699     }
00700 
00701     uint_fast16_t count = 0;
00702 
00703     ns_list_foreach_reverse_safe(fragmenter_tx_entry_t, tx_entry, &interface_ptr->indirect_tx_queue) {
00704         if (buffer_data_length(tx_entry->buf) > interface_ptr->indirect_big_packet_threshold) {
00705             if (++count >= interface_ptr->max_indirect_big_packets_total) {
00706                 tr_debug_extra("free seq: %d", tx_entry->buf->seq);
00707                 lowpan_adaptation_indirect_queue_free_message(cur, interface_ptr, tx_entry);
00708             }
00709         }
00710     }
00711 }
00712 
00713 static void lowpan_data_request_to_mac(protocol_interface_info_entry_t *cur, buffer_t *buf, fragmenter_tx_entry_t *tx_ptr)
00714 {
00715     mcps_data_req_t dataReq;
00716 
00717     lowpan_adaptation_data_request_primitiv_set(buf, &dataReq, cur);
00718     if (tx_ptr->fragmented_data) {
00719         dataReq.msdu = tx_ptr->fragmenter_buf;
00720         //Call fragmenter
00721         bool more_fragments = lowpan_message_fragmentation_message_write(tx_ptr, &dataReq);
00722         if (dataReq.InDirectTx) {
00723             dataReq.PendingBit |= more_fragments;
00724         }
00725     } else {
00726         dataReq.msduLength = buffer_data_length(buf);
00727         dataReq.msdu = buffer_data_pointer(buf);
00728     }
00729     if (buf->link_specific.ieee802_15_4.rf_channel_switch) {
00730         //Switch channel if selected channel is different
00731         if (cur->mac_parameters->mac_channel != buf->link_specific.ieee802_15_4.selected_channel) {
00732             uint8_t channel = cur->mac_parameters->mac_channel;
00733             mac_helper_mac_channel_set(cur, buf->link_specific.ieee802_15_4.selected_channel);
00734             buf->link_specific.ieee802_15_4.selected_channel = channel;
00735         } else {
00736             buf->link_specific.ieee802_15_4.rf_channel_switch = false;
00737         }
00738     }
00739 
00740     cur->mac_api->mcps_data_req(cur->mac_api, &dataReq);
00741 }
00742 
00743 int8_t lowpan_adaptation_interface_tx(protocol_interface_info_entry_t *cur, buffer_t *buf)
00744 {
00745     if (!buf) {
00746         return -1;
00747     }
00748 
00749     if (!cur || !cur->mac_api || !cur->mac_api->mcps_data_req) {
00750         goto tx_error_handler;
00751     }
00752 
00753     fragmenter_interface_t *interface_ptr = lowpan_adaptation_interface_discover(cur->id);
00754     if (!interface_ptr) {
00755         goto tx_error_handler;
00756     }
00757 
00758     //Check packet size
00759     bool fragmented_needed = lowpan_adaptation_request_longer_than_mtu(cur, buf);
00760     bool is_unicast = buf->link_specific.ieee802_15_4.requestAck;
00761     bool indirect = buf->link_specific.ieee802_15_4.indirectTxProcess;
00762     if (!indirect) {
00763         if (((is_unicast && interface_ptr->active_unicast_tx_buf.buf) || (!is_unicast && interface_ptr->active_broadcast_tx_buf.buf)) || (fragmented_needed && interface_ptr->fragmenter_active)) {
00764             lowpan_adaptation_tx_queue_write(interface_ptr, buf);
00765             return 0; //Return here
00766         }
00767     }
00768 
00769     //Allocate Handle
00770     buf->seq  = lowpan_data_request_unique_handle_get(interface_ptr);
00771 
00772     if (buf->options .ll_sec_bypass_frag_deny  && fragmented_needed) {
00773         // force security for fragmented packets
00774         buf->options .ll_security_bypass_tx  = false;
00775     }
00776 
00777     fragmenter_tx_entry_t *tx_ptr = lowpan_adaptation_tx_process_init(interface_ptr, indirect, fragmented_needed, is_unicast);
00778     if (!tx_ptr) {
00779         goto tx_error_handler;
00780     }
00781 
00782     tx_ptr->buf = buf;
00783 
00784     if (fragmented_needed) {
00785         //Fragmentation init
00786         if (lowpan_message_fragmentation_init(buf, tx_ptr, cur) ) {
00787             tr_error("Fragment init fail");
00788             if (indirect) {
00789                 ns_dyn_mem_free(tx_ptr->fragmenter_buf);
00790                 ns_dyn_mem_free(tx_ptr);
00791             }
00792             goto tx_error_handler;
00793         }
00794 
00795         tx_ptr->tag = interface_ptr->local_frag_tag++;
00796         if (!indirect) {
00797             interface_ptr->fragmenter_active = true;
00798         }
00799     }
00800 
00801     if (indirect) {
00802         //Add to indirectQUue
00803         fragmenter_tx_entry_t *tx_ptr_cached;
00804         mle_neigh_table_entry_t *mle_entry = mle_class_get_by_link_address(cur->id, buf->dst_sa .address  + 2, buf->dst_sa .addr_type );
00805         if (mle_entry) {
00806             buf->link_specific.ieee802_15_4.indirectTTL = (uint32_t) mle_entry->timeout_rx * MLE_TIMER_TICKS_MS;
00807         } else {
00808             buf->link_specific.ieee802_15_4.indirectTTL = cur->mac_parameters->mac_in_direct_entry_timeout;
00809         }
00810 
00811         tr_debug_extra("indirect seq: %d, addr=%s", tx_ptr->buf->seq, trace_ipv6(buf->dst_sa .address ));
00812 
00813         // Make room for new message if needed */
00814         if (buffer_data_length(buf) <= interface_ptr->indirect_big_packet_threshold) {
00815             lowpan_adaptation_make_room_for_small_packet(cur, interface_ptr, mle_entry);
00816         } else {
00817             lowpan_adaptation_make_room_for_big_packet(cur, interface_ptr);
00818         }
00819 
00820         if (lowpan_adaptation_indirect_mac_data_request_active(interface_ptr, tx_ptr)) {
00821             // mac is handling previous data request, add new one to be cached */
00822             tr_debug_extra("caching seq: %d", tx_ptr->buf->seq);
00823             tx_ptr->indirect_data_cached = true;
00824         }
00825 
00826         ns_list_add_to_end(&interface_ptr->indirect_tx_queue, tx_ptr);
00827 
00828         // Check if current message can be delivered to MAC or should some cached message be delivered first
00829         tx_ptr_cached = lowpan_adaptation_indirect_first_cached_request_get(interface_ptr, tx_ptr);
00830         if (tx_ptr->indirect_data_cached == false && tx_ptr_cached) {
00831             tr_debug_extra("sending cached seq: %d", tx_ptr_cached->buf->seq);
00832             // set current message to cache
00833             tx_ptr->indirect_data_cached = true;
00834             // swap entries
00835             tx_ptr = tx_ptr_cached;
00836             tx_ptr->indirect_data_cached = false;
00837             buf = tx_ptr_cached->buf ;
00838         } else if (tx_ptr->indirect_data_cached == true) {
00839             // There is mac data request ongoing and new req was sent to cache
00840             return 0;
00841         }
00842     }
00843 
00844     lowpan_data_request_to_mac(cur, buf, tx_ptr);
00845     return 0;
00846 
00847 
00848 tx_error_handler:
00849     socket_tx_buffer_event_and_free(buf, SOCKET_NO_RAM);
00850     return -1;
00851 
00852 }
00853 
00854 static bool lowpan_adaptation_tx_process_ready(fragmenter_tx_entry_t *tx_ptr)
00855 {
00856     if (!tx_ptr->fragmented_data) {
00857         if (tx_ptr->buf->ip_routed_up) {
00858             protocol_stats_update(STATS_IP_ROUTE_UP, buffer_data_length(tx_ptr->buf));
00859         } else {
00860             protocol_stats_update(STATS_IP_TX_COUNT, buffer_data_length(tx_ptr->buf));
00861         }
00862         return true;
00863     }
00864 
00865 
00866 
00867     //Update data pointer by last packet length
00868     buffer_data_strip_header(tx_ptr->buf, tx_ptr->frag_len);
00869     //Update offset
00870     if (!tx_ptr->first_fragment) {
00871         tx_ptr->offset += tx_ptr->frag_len / 8;
00872     } else {
00873         tx_ptr->first_fragment = false;
00874     }
00875 
00876     /* Check Is still Data what have to send */
00877     tx_ptr->frag_len = buffer_data_length(tx_ptr->buf);
00878 
00879 
00880     if (tx_ptr->frag_len == 0) {
00881         //Release current data
00882         if (tx_ptr->buf->ip_routed_up) {
00883             protocol_stats_update(STATS_IP_ROUTE_UP, tx_ptr->orig_size);
00884         } else {
00885             protocol_stats_update(STATS_IP_TX_COUNT, tx_ptr->orig_size);
00886         }
00887         return true;
00888     }
00889 
00890     //Continue Process
00891 
00892     if (tx_ptr->unfrag_len + 5 + tx_ptr->frag_len > tx_ptr->frag_max) {
00893         tx_ptr->frag_len = tx_ptr->frag_max - 5 - tx_ptr->unfrag_len;
00894         tx_ptr->frag_len &= ~7;
00895     }
00896 
00897     return false;
00898 }
00899 
00900 static void lowpan_adaptation_data_process_clean(fragmenter_interface_t *interface_ptr, fragmenter_tx_entry_t *tx_ptr, uint8_t socket_event)
00901 {
00902     buffer_t *buf = tx_ptr->buf ;
00903     tx_ptr->buf = NULL;
00904     if (buf->link_specific.ieee802_15_4.indirectTxProcess) {
00905         //release from list and free entry
00906         lowpan_indirect_entry_free(&interface_ptr->indirect_tx_queue, tx_ptr);
00907     }
00908 
00909     socket_tx_buffer_event_and_free(buf, socket_event);
00910 }
00911 
00912 
00913 int8_t lowpan_adaptation_interface_tx_confirm(protocol_interface_info_entry_t *cur, const mcps_data_conf_t *confirm)
00914 {
00915     if( !cur || !confirm ){
00916         return -1;
00917     }
00918 
00919     fragmenter_interface_t *interface_ptr = lowpan_adaptation_interface_discover(cur->id);
00920     if (!interface_ptr) {
00921         return -1;
00922     }
00923 
00924     //Check first
00925     fragmenter_tx_entry_t *tx_ptr;
00926     bool active_direct_confirm;
00927     bool is_unicast = true;
00928 
00929     if (lowpan_active_tx_handle_verify(confirm->msduHandle,interface_ptr->active_unicast_tx_buf.buf)) {
00930         active_direct_confirm = true;
00931         tx_ptr = &interface_ptr->active_unicast_tx_buf;
00932     } else if (lowpan_active_tx_handle_verify(confirm->msduHandle,interface_ptr->active_broadcast_tx_buf.buf)) {
00933         active_direct_confirm = true;
00934         tx_ptr = &interface_ptr->active_broadcast_tx_buf;
00935         is_unicast = false;
00936     } else {
00937         active_direct_confirm = false;
00938         tx_ptr = lowpan_indirect_tx_handle_verify(confirm->msduHandle, &interface_ptr->indirect_tx_queue);
00939     }
00940 
00941     if (!tx_ptr) {
00942         tr_error("No data request for this confirmation %u", confirm->msduHandle);
00943         return -1;
00944     }
00945 
00946     //Check status for
00947     buffer_t *buf = tx_ptr->buf ;
00948 
00949     //Indirect data expiration
00950     if (confirm->status == MLME_TRANSACTION_EXPIRED && !active_direct_confirm) {
00951         if (buf->link_specific.ieee802_15_4.indirectTTL > 7000)
00952         {
00953             buf->link_specific.ieee802_15_4.indirectTTL -= 7000;
00954             //Push Back to MAC
00955             lowpan_data_request_to_mac(cur, buf, tx_ptr);
00956             return 0;
00957         }
00958     }
00959 
00960     switch (confirm->status) {
00961         case MLME_TX_NO_ACK:
00962         case MLME_NO_DATA:
00963         case MLME_SUCCESS:
00964             if (buf->link_specific.ieee802_15_4.requestAck) {
00965                 bool success = false;
00966                 if (confirm->status == MLME_SUCCESS) {
00967                     success = true;
00968                 }
00969                 etx_transm_attempts_update(cur->id, 1 + confirm->tx_retries , success, buf->dst_sa .addr_type , buf->dst_sa .address );
00970             }
00971             break;
00972         default:
00973 
00974             break;
00975 
00976     }
00977     //Switch original channel back
00978     if (buf->link_specific.ieee802_15_4.rf_channel_switch) {
00979         mac_helper_mac_channel_set(cur, buf->link_specific.ieee802_15_4.selected_channel);
00980         buf->link_specific.ieee802_15_4.rf_channel_switch = false;
00981     }
00982 
00983     switch (confirm->status) {
00984 
00985         case MLME_BUSY_CHAN:
00986             lowpan_data_request_to_mac(cur, buf, tx_ptr);
00987             break;
00988         case MLME_SUCCESS:
00989 
00990             //Check is there more packets
00991             if (lowpan_adaptation_tx_process_ready(tx_ptr)) {
00992                 bool triggered_from_indirect_cache = false;
00993                 if (tx_ptr->fragmented_data && active_direct_confirm) {
00994                     //Clean
00995                     interface_ptr->fragmenter_active = false;
00996                 }
00997 
00998                 if (tx_ptr->buf->link_specific.ieee802_15_4.indirectTxProcess) {
00999                     triggered_from_indirect_cache = lowpan_adaptation_indirect_cache_trigger(cur, interface_ptr, tx_ptr);
01000                 }
01001 
01002                 lowpan_adaptation_data_process_clean(interface_ptr, tx_ptr, map_mlme_status_to_socket_event(confirm->status));
01003 
01004                 if (triggered_from_indirect_cache) {
01005                     return 0;
01006                 }
01007             } else {
01008                 lowpan_data_request_to_mac(cur, buf, tx_ptr);
01009             }
01010 
01011             break;
01012         case MLME_TX_NO_ACK:
01013         case MLME_SECURITY_FAIL:
01014         case MLME_TRANSACTION_EXPIRED:
01015         default:
01016             tr_error("MCPS Data fail by status %u", confirm->status);
01017 #ifdef HAVE_RPL
01018             if (confirm->status == MLME_TX_NO_ACK) {
01019                 if (buf->route && rpl_data_is_rpl_parent_route(buf->route->route_info.source)) {
01020                     protocol_stats_update(STATS_RPL_PARENT_TX_FAIL, 1);
01021                 }
01022             }
01023 #endif
01024             if (tx_ptr->fragmented_data) {
01025                 tx_ptr->buf->buf_ptr = tx_ptr->buf->buf_end;
01026                 tx_ptr->buf->buf_ptr -= tx_ptr->orig_size;
01027                 if (active_direct_confirm) {
01028                     interface_ptr->fragmenter_active = false;
01029                 }
01030             }
01031 
01032             lowpan_adaptation_data_process_clean(interface_ptr, tx_ptr, map_mlme_status_to_socket_event(confirm->status));
01033             break;
01034 
01035     }
01036 
01037     if ((is_unicast && !interface_ptr->active_unicast_tx_buf.buf) || (!is_unicast && !interface_ptr->active_broadcast_tx_buf.buf)) {
01038         //Read Buffer and trig next direct request
01039         lowpan_adaptation_interface_tx(cur, lowpan_adaptation_tx_queue_read(interface_ptr, cur));
01040     }
01041 
01042     return 0;
01043 
01044 }
01045 
01046 static uint8_t map_mlme_status_to_socket_event(uint8_t mlme_status)
01047 {
01048     uint8_t socket_event;
01049 
01050     switch (mlme_status) {
01051         case MLME_SUCCESS:
01052             socket_event = SOCKET_TX_DONE;
01053             break;
01054         case MLME_TX_NO_ACK:
01055         case MLME_SECURITY_FAIL:
01056         case MLME_TRANSACTION_EXPIRED:
01057         default:
01058             socket_event = SOCKET_TX_FAIL;
01059             break;
01060     }
01061 
01062     return (socket_event);
01063 }
01064 
01065 bool lowpan_adaptation_tx_active(int8_t interface_id)
01066 {
01067     fragmenter_interface_t *interface_ptr = lowpan_adaptation_interface_discover(interface_id);
01068 
01069     if (!interface_ptr || (!interface_ptr->active_unicast_tx_buf.buf && !interface_ptr->active_broadcast_tx_buf.buf)) {
01070         return false;
01071     }
01072     return true;
01073 }
01074 
01075 static bool lowpan_tx_buffer_address_compare(sockaddr_t *dst_sa, uint8_t *address_ptr, addrtype_t adr_type)
01076 {
01077 
01078     if (dst_sa->addr_type  != adr_type) {
01079         return false;
01080     }
01081 
01082     uint8_t compare_length;
01083     switch (adr_type) {
01084         case ADDR_802_15_4_SHORT :
01085             compare_length = 2;
01086             break;
01087         case ADDR_802_15_4_LONG :
01088             compare_length = 8;
01089             break;
01090         default:
01091             return false;
01092     }
01093 
01094 
01095     if (memcmp(&dst_sa->address [2], address_ptr, compare_length)) {
01096         return false;
01097     }
01098     return true;
01099 }
01100 
01101 static void lowpan_adaptation_purge_from_mac(struct protocol_interface_info_entry *cur, uint8_t msduhandle)
01102 {
01103     mcps_purge_t purge_req;
01104     purge_req.msduHandle = msduhandle;
01105     cur->mac_api->mcps_purge_req(cur->mac_api, &purge_req);
01106 }
01107 
01108 static void lowpan_adaptation_indirect_queue_free_message(struct protocol_interface_info_entry *cur, fragmenter_interface_t *interface_ptr, fragmenter_tx_entry_t *tx_ptr)
01109 {
01110     tr_debug("Purge from indirect handle %u", tx_ptr->buf->seq);
01111     if (cur->mac_api->mcps_purge_req) {
01112         lowpan_adaptation_purge_from_mac(cur, tx_ptr->buf->seq);
01113     }
01114     lowpan_adaptation_data_process_clean(interface_ptr, tx_ptr, SOCKET_TX_FAIL);
01115 }
01116 
01117 int8_t lowpan_adaptation_indirect_free_messages_from_queues_by_address(struct protocol_interface_info_entry *cur, uint8_t *address_ptr, addrtype_t adr_type)
01118 {
01119     fragmenter_interface_t *interface_ptr = lowpan_adaptation_interface_discover(cur->id);
01120 
01121     if (!interface_ptr ) {
01122         return -1;
01123     }
01124 
01125     //Check first indirect queue
01126     ns_list_foreach_safe(fragmenter_tx_entry_t, entry, &interface_ptr->indirect_tx_queue) {
01127 
01128         if (lowpan_tx_buffer_address_compare(&entry->buf->dst_sa, address_ptr, adr_type)) {
01129             //Purge from mac
01130             lowpan_adaptation_indirect_queue_free_message(cur, interface_ptr, entry);
01131         }
01132     }
01133 
01134     return 0;
01135 
01136 }
01137 
01138 int8_t lowpan_adaptation_indirect_queue_params_set(struct protocol_interface_info_entry *cur, uint16_t indirect_big_packet_threshold, uint16_t max_indirect_big_packets_total, uint16_t max_indirect_small_packets_per_child)
01139 {
01140     fragmenter_interface_t *interface_ptr = lowpan_adaptation_interface_discover(cur->id);
01141 
01142     if (!interface_ptr) {
01143         return -1;
01144     }
01145 
01146     interface_ptr->indirect_big_packet_threshold = indirect_big_packet_threshold;
01147     interface_ptr->max_indirect_big_packets_total = max_indirect_big_packets_total;
01148     interface_ptr->max_indirect_small_packets_per_child = max_indirect_small_packets_per_child;
01149 
01150     return 0;
01151 }