Important changes to repositories hosted on mbed.com
Mbed hosted mercurial repositories are deprecated and are due to be permanently deleted in July 2026.
To keep a copy of this software download the repository Zip archive or clone locally using Mercurial.
It is also possible to export all your personal repositories from the account settings page.
Dependents: TYBLE16_simple_data_logger TYBLE16_MP3_Air
TDBStore.cpp
00001 /* 00002 * Copyright (c) 2018 ARM Limited. All rights reserved. 00003 * SPDX-License-Identifier: Apache-2.0 00004 * Licensed under the Apache License, Version 2.0 (the License); you may 00005 * not use this file except in compliance with the License. 00006 * You may obtain a copy of the License at 00007 * 00008 * http://www.apache.org/licenses/LICENSE-2.0 00009 * 00010 * Unless required by applicable law or agreed to in writing, software 00011 * distributed under the License is distributed on an AS IS BASIS, WITHOUT 00012 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 00013 * See the License for the specific language governing permissions and 00014 * limitations under the License. 00015 */ 00016 00017 // ----------------------------------------------------------- Includes ----------------------------------------------------------- 00018 00019 #include "TDBStore.h" 00020 00021 #include <algorithm> 00022 #include <string.h> 00023 #include <stdio.h> 00024 #include "mbed_error.h" 00025 #include "mbed_assert.h" 00026 #include "mbed_wait_api.h" 00027 #include "MbedCRC.h" 00028 00029 using namespace mbed; 00030 00031 // --------------------------------------------------------- Definitions ---------------------------------------------------------- 00032 00033 static const uint32_t delete_flag = (1UL << 31); 00034 static const uint32_t internal_flags = delete_flag; 00035 // Only write once flag is supported, other two are kept in storage but ignored 00036 static const uint32_t supported_flags = KVStore::WRITE_ONCE_FLAG | KVStore::REQUIRE_CONFIDENTIALITY_FLAG | KVStore::REQUIRE_REPLAY_PROTECTION_FLAG; 00037 00038 namespace { 00039 00040 typedef struct { 00041 uint32_t magic; 00042 uint16_t header_size; 00043 uint16_t revision; 00044 uint32_t flags; 00045 uint16_t key_size; 00046 uint16_t reserved; 00047 uint32_t data_size; 00048 uint32_t crc; 00049 } record_header_t; 00050 00051 typedef struct { 00052 uint32_t hash; 00053 bd_size_t bd_offset; 00054 } ram_table_entry_t; 00055 00056 static const char *master_rec_key = "TDBS"; 00057 static const uint32_t tdbstore_magic = 0x54686683; // "TDBS" in ASCII 00058 static const uint32_t tdbstore_revision = 1; 00059 00060 typedef struct { 00061 uint16_t version; 00062 uint16_t tdbstore_revision; 00063 uint32_t reserved; 00064 } master_record_data_t; 00065 00066 typedef enum { 00067 TDBSTORE_AREA_STATE_NONE = 0, 00068 TDBSTORE_AREA_STATE_ERASED, 00069 TDBSTORE_AREA_STATE_INVALID, 00070 TDBSTORE_AREA_STATE_VALID, 00071 } area_state_e; 00072 00073 typedef struct { 00074 uint16_t trailer_size; 00075 uint16_t data_size; 00076 uint32_t crc; 00077 } reserved_trailer_t; 00078 00079 static const uint32_t work_buf_size = 64; 00080 static const uint32_t initial_crc = 0xFFFFFFFF; 00081 static const uint32_t initial_max_keys = 16; 00082 00083 // incremental set handle 00084 typedef struct { 00085 record_header_t header; 00086 bd_size_t bd_base_offset; 00087 bd_size_t bd_curr_offset; 00088 uint32_t offset_in_data; 00089 uint32_t ram_table_ind; 00090 uint32_t hash; 00091 bool new_key; 00092 } inc_set_handle_t; 00093 00094 // iterator handle 00095 typedef struct { 00096 int iterator_num; 00097 uint32_t ram_table_ind; 00098 char *prefix; 00099 } key_iterator_handle_t; 00100 00101 } // anonymous namespace 00102 00103 00104 // -------------------------------------------------- Local Functions Declaration ---------------------------------------------------- 00105 00106 // -------------------------------------------------- Functions Implementation ---------------------------------------------------- 00107 00108 static inline uint32_t align_up(uint32_t val, uint32_t size) 00109 { 00110 return (((val - 1) / size) + 1) * size; 00111 } 00112 00113 00114 static uint32_t calc_crc(uint32_t init_crc, uint32_t data_size, const void *data_buf) 00115 { 00116 uint32_t crc; 00117 MbedCRC<POLY_32BIT_ANSI, 32> ct(init_crc, 0x0, true, false); 00118 ct.compute(data_buf, data_size, &crc); 00119 return crc; 00120 } 00121 00122 // Class member functions 00123 00124 TDBStore::TDBStore(BlockDevice *bd) : _ram_table(0), _max_keys(0), 00125 _num_keys(0), _bd(bd), _buff_bd(0), _free_space_offset(0), _master_record_offset(0), 00126 _master_record_size(0), _is_initialized(false), _active_area(0), _active_area_version(0), _size(0), 00127 _area_params{}, _prog_size(0), _work_buf(0), _key_buf(0), _variant_bd_erase_unit_size(false), _inc_set_handle(0) 00128 { 00129 for (int i = 0; i < _num_areas; i++) { 00130 _area_params[i] = { 0 }; 00131 } 00132 for (int i = 0; i < _max_open_iterators; i++) { 00133 _iterator_table[i] = { 0 }; 00134 } 00135 } 00136 00137 TDBStore::~TDBStore() 00138 { 00139 deinit(); 00140 } 00141 00142 int TDBStore::read_area(uint8_t area, uint32_t offset, uint32_t size, void *buf) 00143 { 00144 //Check that we are not crossing area boundary 00145 if (offset + size > _size) { 00146 return MBED_ERROR_READ_FAILED; 00147 } 00148 int os_ret = _buff_bd->read(buf, _area_params[area].address + offset, size); 00149 00150 if (os_ret) { 00151 return MBED_ERROR_READ_FAILED; 00152 } 00153 00154 return MBED_SUCCESS; 00155 } 00156 00157 int TDBStore::write_area(uint8_t area, uint32_t offset, uint32_t size, const void *buf) 00158 { 00159 int os_ret = _buff_bd->program(buf, _area_params[area].address + offset, size); 00160 if (os_ret) { 00161 return MBED_ERROR_WRITE_FAILED; 00162 } 00163 00164 return MBED_SUCCESS; 00165 } 00166 00167 int TDBStore::erase_erase_unit(uint8_t area, uint32_t offset) 00168 { 00169 uint32_t bd_offset = _area_params[area].address + offset; 00170 uint32_t eu_size = _buff_bd->get_erase_size(bd_offset); 00171 00172 if (_buff_bd->get_erase_value() != -1) { 00173 return _buff_bd->erase(bd_offset, eu_size); 00174 } else { 00175 // We need to simulate erase, as our block device 00176 // does not do it. We can do this one byte at a time 00177 // because we use BufferedBlockDevice that has page buffers 00178 uint8_t val = 0xff; 00179 int ret; 00180 for (; eu_size; --eu_size) { 00181 ret = _buff_bd->program(&val, bd_offset++, 1); 00182 if (ret) { 00183 return ret; 00184 } 00185 } 00186 } 00187 return MBED_SUCCESS; 00188 } 00189 00190 void TDBStore::calc_area_params() 00191 { 00192 // TDBStore can't exceed 32 bits 00193 bd_size_t bd_size = std::min(_bd->size(), (bd_size_t) 0x80000000L); 00194 00195 memset(_area_params, 0, sizeof(_area_params)); 00196 size_t area_0_size = 0; 00197 bd_size_t prev_erase_unit_size = _bd->get_erase_size(area_0_size); 00198 _variant_bd_erase_unit_size = 0; 00199 00200 while (area_0_size < bd_size / 2) { 00201 bd_size_t erase_unit_size = _bd->get_erase_size(area_0_size); 00202 _variant_bd_erase_unit_size |= (erase_unit_size != prev_erase_unit_size); 00203 area_0_size += erase_unit_size; 00204 } 00205 00206 _area_params[0].address = 0; 00207 _area_params[0].size = area_0_size; 00208 _area_params[1].address = area_0_size; 00209 _area_params[1].size = bd_size - area_0_size; 00210 } 00211 00212 00213 // This function, reading a record from the BD, is used for multiple purposes: 00214 // - Init (scan all records, no need to return file name and data) 00215 // - Get (return file data) 00216 // - Get first/next file (check whether name matches, return name if so) 00217 int TDBStore::read_record(uint8_t area, uint32_t offset, char *key, 00218 void *data_buf, uint32_t data_buf_size, 00219 uint32_t &actual_data_size, size_t data_offset, bool copy_key, 00220 bool copy_data, bool check_expected_key, bool calc_hash, 00221 uint32_t &hash, uint32_t &flags, uint32_t &next_offset) 00222 { 00223 int ret; 00224 record_header_t header; 00225 uint32_t total_size, key_size, data_size; 00226 uint32_t curr_data_offset; 00227 char *user_key_ptr; 00228 uint32_t crc = initial_crc; 00229 // Upper layers typically use non zero offsets for reading the records chunk by chunk, 00230 // so only validate entire record at first chunk (otherwise we'll have a serious performance penalty). 00231 bool validate = (data_offset == 0); 00232 00233 ret = MBED_SUCCESS; 00234 // next offset should only be updated to the end of record if successful 00235 next_offset = offset; 00236 00237 ret = read_area(area, offset, sizeof(header), &header); 00238 if (ret) { 00239 return ret; 00240 } 00241 00242 if (header.magic != tdbstore_magic) { 00243 return MBED_ERROR_INVALID_DATA_DETECTED; 00244 } 00245 00246 offset += align_up(sizeof(header), _prog_size); 00247 00248 key_size = header.key_size; 00249 data_size = header.data_size; 00250 flags = header.flags; 00251 00252 if ((!key_size) || (key_size >= MAX_KEY_SIZE)) { 00253 return MBED_ERROR_INVALID_DATA_DETECTED; 00254 } 00255 00256 total_size = key_size + data_size; 00257 00258 // Make sure our read sizes didn't cause any wraparounds 00259 if ((total_size < key_size) || (total_size < data_size)) { 00260 return MBED_ERROR_INVALID_DATA_DETECTED; 00261 } 00262 00263 if (offset + total_size >= _size) { 00264 return MBED_ERROR_INVALID_DATA_DETECTED; 00265 } 00266 00267 if (data_offset > data_size) { 00268 return MBED_ERROR_INVALID_SIZE; 00269 } 00270 00271 actual_data_size = std::min((size_t)data_buf_size, (size_t)data_size - data_offset); 00272 00273 if (copy_data && actual_data_size && !data_buf) { 00274 return MBED_ERROR_INVALID_ARGUMENT; 00275 } 00276 00277 if (validate) { 00278 // Calculate CRC on header (excluding CRC itself) 00279 crc = calc_crc(crc, sizeof(record_header_t) - sizeof(crc), &header); 00280 curr_data_offset = 0; 00281 } else { 00282 // Non validation case: No need to read the key, nor the parts before data_offset 00283 // or after the actual part requested by the user. 00284 total_size = actual_data_size; 00285 curr_data_offset = data_offset; 00286 offset += data_offset + key_size; 00287 // Mark code that key handling is finished 00288 key_size = 0; 00289 } 00290 00291 user_key_ptr = key; 00292 hash = initial_crc; 00293 00294 while (total_size) { 00295 uint8_t *dest_buf; 00296 uint32_t chunk_size; 00297 if (key_size) { 00298 // This means that we're on the key part 00299 if (copy_key) { 00300 dest_buf = reinterpret_cast<uint8_t *>(user_key_ptr); 00301 chunk_size = key_size; 00302 user_key_ptr[key_size] = '\0'; 00303 } else { 00304 dest_buf = _work_buf; 00305 chunk_size = std::min(key_size, work_buf_size); 00306 } 00307 } else { 00308 // This means that we're on the data part 00309 // We have four cases that need different handling: 00310 // 1. Before data_offset - read to work buffer 00311 // 2. After data_offset, but before actual part is finished - read to user buffer 00312 // 3. After actual part is finished - read to work buffer 00313 // 4. Copy data flag not set - read to work buffer 00314 if (curr_data_offset < data_offset) { 00315 chunk_size = std::min((size_t)work_buf_size, (size_t)(data_offset - curr_data_offset)); 00316 dest_buf = _work_buf; 00317 } else if (copy_data && (curr_data_offset < data_offset + actual_data_size)) { 00318 chunk_size = actual_data_size; 00319 dest_buf = static_cast<uint8_t *>(data_buf); 00320 } else { 00321 chunk_size = std::min(work_buf_size, total_size); 00322 dest_buf = _work_buf; 00323 } 00324 } 00325 ret = read_area(area, offset, chunk_size, dest_buf); 00326 if (ret) { 00327 goto end; 00328 } 00329 00330 if (validate) { 00331 // calculate CRC on current read chunk 00332 crc = calc_crc(crc, chunk_size, dest_buf); 00333 } 00334 00335 if (key_size) { 00336 // We're on key part. May need to calculate hash or check whether key is the expected one 00337 if (check_expected_key) { 00338 if (memcmp(user_key_ptr, dest_buf, chunk_size)) { 00339 ret = MBED_ERROR_ITEM_NOT_FOUND; 00340 } 00341 } 00342 00343 if (calc_hash) { 00344 hash = calc_crc(hash, chunk_size, dest_buf); 00345 } 00346 00347 user_key_ptr += chunk_size; 00348 key_size -= chunk_size; 00349 if (!key_size) { 00350 offset += data_offset; 00351 } 00352 } else { 00353 curr_data_offset += chunk_size; 00354 } 00355 00356 total_size -= chunk_size; 00357 offset += chunk_size; 00358 } 00359 00360 if (validate && (crc != header.crc)) { 00361 ret = MBED_ERROR_INVALID_DATA_DETECTED; 00362 goto end; 00363 } 00364 00365 next_offset = align_up(offset, _prog_size); 00366 00367 end: 00368 return ret; 00369 } 00370 00371 int TDBStore::find_record(uint8_t area, const char *key, uint32_t &offset, 00372 uint32_t &ram_table_ind, uint32_t &hash) 00373 { 00374 ram_table_entry_t *ram_table = (ram_table_entry_t *) _ram_table; 00375 ram_table_entry_t *entry; 00376 int ret = MBED_ERROR_ITEM_NOT_FOUND; 00377 uint32_t actual_data_size; 00378 uint32_t flags, dummy_hash, next_offset; 00379 00380 00381 hash = calc_crc(initial_crc, strlen(key), key); 00382 00383 for (ram_table_ind = 0; ram_table_ind < _num_keys; ram_table_ind++) { 00384 entry = &ram_table[ram_table_ind]; 00385 offset = entry->bd_offset; 00386 if (hash < entry->hash) { 00387 continue; 00388 } 00389 if (hash > entry->hash) { 00390 return MBED_ERROR_ITEM_NOT_FOUND; 00391 } 00392 ret = read_record(_active_area, offset, const_cast<char *>(key), 0, 0, actual_data_size, 0, 00393 false, false, true, false, dummy_hash, flags, next_offset); 00394 // not found return code here means that hash doesn't belong to name. Continue searching. 00395 if (ret != MBED_ERROR_ITEM_NOT_FOUND) { 00396 break; 00397 } 00398 } 00399 00400 return ret; 00401 } 00402 00403 uint32_t TDBStore::record_size(const char *key, uint32_t data_size) 00404 { 00405 return align_up(sizeof(record_header_t), _prog_size) + 00406 align_up(strlen(key) + data_size, _prog_size); 00407 } 00408 00409 00410 int TDBStore::set_start(set_handle_t *handle, const char *key, size_t final_data_size, 00411 uint32_t create_flags) 00412 { 00413 int ret; 00414 uint32_t offset = 0; 00415 uint32_t hash = 0, ram_table_ind = 0; 00416 inc_set_handle_t *ih; 00417 bool need_gc = false; 00418 00419 if (!is_valid_key(key)) { 00420 return MBED_ERROR_INVALID_ARGUMENT; 00421 } 00422 00423 if (create_flags & ~(supported_flags | internal_flags)) { 00424 return MBED_ERROR_INVALID_ARGUMENT; 00425 } 00426 00427 *handle = reinterpret_cast<set_handle_t>(_inc_set_handle); 00428 ih = reinterpret_cast<inc_set_handle_t *>(*handle); 00429 00430 if (!strcmp(key, master_rec_key)) { 00431 // Master record - special case (no need to protect by the mutex, as it is already covered 00432 // in the upper layers). 00433 ih->bd_base_offset = _master_record_offset; 00434 ih->new_key = false; 00435 ram_table_ind = 0; 00436 hash = 0; 00437 } else { 00438 00439 _mutex.lock(); 00440 00441 // A valid magic in the header means that this function has been called after an aborted 00442 // incremental set process. This means that our media may be in a bad state - call GC. 00443 if (ih->header.magic == tdbstore_magic) { 00444 ret = garbage_collection(); 00445 if (ret) { 00446 goto fail; 00447 } 00448 } 00449 00450 // If we have no room for the record, perform garbage collection 00451 uint32_t rec_size = record_size(key, final_data_size); 00452 if (_free_space_offset + rec_size > _size) { 00453 ret = garbage_collection(); 00454 if (ret) { 00455 goto fail; 00456 } 00457 } 00458 00459 // If even after GC we have no room for the record, return error 00460 if (_free_space_offset + rec_size > _size) { 00461 ret = MBED_ERROR_MEDIA_FULL; 00462 goto fail; 00463 } 00464 00465 ret = find_record(_active_area, key, offset, ram_table_ind, hash); 00466 00467 if (ret == MBED_SUCCESS) { 00468 ret = read_area(_active_area, offset, sizeof(ih->header), &ih->header); 00469 if (ret) { 00470 goto fail; 00471 } 00472 if (ih->header.flags & WRITE_ONCE_FLAG) { 00473 ret = MBED_ERROR_WRITE_PROTECTED; 00474 goto fail; 00475 } 00476 ih->new_key = false; 00477 } else if (ret == MBED_ERROR_ITEM_NOT_FOUND) { 00478 if (create_flags & delete_flag) { 00479 goto fail; 00480 } 00481 if (_num_keys >= _max_keys) { 00482 increment_max_keys(); 00483 } 00484 ih->new_key = true; 00485 } else { 00486 goto fail; 00487 } 00488 ih->bd_base_offset = _free_space_offset; 00489 00490 check_erase_before_write(_active_area, ih->bd_base_offset, rec_size); 00491 } 00492 00493 ret = MBED_SUCCESS; 00494 00495 // Fill handle and header fields 00496 // Jump to offset after header (header will be written at finalize phase) 00497 ih->bd_curr_offset = ih->bd_base_offset + align_up(sizeof(record_header_t), _prog_size); 00498 ih->offset_in_data = 0; 00499 ih->hash = hash; 00500 ih->ram_table_ind = ram_table_ind; 00501 ih->header.magic = tdbstore_magic; 00502 ih->header.header_size = sizeof(record_header_t); 00503 ih->header.revision = tdbstore_revision; 00504 ih->header.flags = create_flags; 00505 ih->header.key_size = strlen(key); 00506 ih->header.reserved = 0; 00507 ih->header.data_size = final_data_size; 00508 // Calculate CRC on header and key 00509 ih->header.crc = calc_crc(initial_crc, sizeof(record_header_t) - sizeof(ih->header.crc), &ih->header); 00510 ih->header.crc = calc_crc(ih->header.crc, ih->header.key_size, key); 00511 00512 // Write key now 00513 ret = write_area(_active_area, ih->bd_curr_offset, ih->header.key_size, key); 00514 if (ret) { 00515 need_gc = true; 00516 goto fail; 00517 } 00518 ih->bd_curr_offset += ih->header.key_size; 00519 goto end; 00520 00521 fail: 00522 if ((need_gc) && (ih->bd_base_offset != _master_record_offset)) { 00523 garbage_collection(); 00524 } 00525 // mark handle as invalid by clearing magic field in header 00526 ih->header.magic = 0; 00527 00528 _mutex.unlock(); 00529 00530 end: 00531 return ret; 00532 } 00533 00534 int TDBStore::set_add_data(set_handle_t handle, const void *value_data, size_t data_size) 00535 { 00536 int ret = MBED_SUCCESS; 00537 inc_set_handle_t *ih; 00538 bool need_gc = false; 00539 00540 if (handle != _inc_set_handle) { 00541 return MBED_ERROR_INVALID_ARGUMENT; 00542 } 00543 00544 if (!value_data && data_size) { 00545 return MBED_ERROR_INVALID_ARGUMENT; 00546 } 00547 00548 _inc_set_mutex.lock(); 00549 00550 ih = reinterpret_cast<inc_set_handle_t *>(handle); 00551 00552 if (!ih->header.magic) { 00553 ret = MBED_ERROR_INVALID_ARGUMENT; 00554 goto end; 00555 } 00556 00557 if (ih->offset_in_data + data_size > ih->header.data_size) { 00558 ret = MBED_ERROR_INVALID_SIZE; 00559 goto end; 00560 } 00561 00562 // Update CRC with data chunk 00563 ih->header.crc = calc_crc(ih->header.crc, data_size, value_data); 00564 00565 // Write the data chunk 00566 ret = write_area(_active_area, ih->bd_curr_offset, data_size, value_data); 00567 if (ret) { 00568 need_gc = true; 00569 goto end; 00570 } 00571 ih->bd_curr_offset += data_size; 00572 ih->offset_in_data += data_size; 00573 00574 end: 00575 if ((need_gc) && (ih->bd_base_offset != _master_record_offset)) { 00576 garbage_collection(); 00577 } 00578 00579 _inc_set_mutex.unlock(); 00580 return ret; 00581 } 00582 00583 int TDBStore::set_finalize(set_handle_t handle) 00584 { 00585 int os_ret, ret = MBED_SUCCESS; 00586 inc_set_handle_t *ih; 00587 ram_table_entry_t *ram_table = (ram_table_entry_t *) _ram_table; 00588 ram_table_entry_t *entry; 00589 bool need_gc = false; 00590 uint32_t actual_data_size, hash, flags, next_offset; 00591 00592 if (handle != _inc_set_handle) { 00593 return MBED_ERROR_INVALID_ARGUMENT; 00594 } 00595 00596 ih = reinterpret_cast<inc_set_handle_t *>(handle); 00597 00598 if (!ih->header.magic) { 00599 return MBED_ERROR_INVALID_ARGUMENT; 00600 } 00601 00602 _inc_set_mutex.lock(); 00603 00604 if (ih->offset_in_data != ih->header.data_size) { 00605 ret = MBED_ERROR_INVALID_SIZE; 00606 need_gc = true; 00607 goto end; 00608 } 00609 00610 // Write header 00611 ret = write_area(_active_area, ih->bd_base_offset, sizeof(record_header_t), &ih->header); 00612 if (ret) { 00613 need_gc = true; 00614 goto end; 00615 } 00616 00617 // Need to flush buffered BD as our record is totally written now 00618 os_ret = _buff_bd->sync(); 00619 if (os_ret) { 00620 ret = MBED_ERROR_WRITE_FAILED; 00621 need_gc = true; 00622 goto end; 00623 } 00624 00625 // In master record case we don't update RAM table 00626 if (ih->bd_base_offset == _master_record_offset) { 00627 goto end; 00628 } 00629 00630 // Writes may fail without returning a failure (especially in flash components). Reread the record 00631 // to ensure write success (this won't read the data anywhere - just use the CRC calculation). 00632 ret = read_record(_active_area, ih->bd_base_offset, 0, 0, (uint32_t) -1, 00633 actual_data_size, 0, false, false, false, false, 00634 hash, flags, next_offset); 00635 if (ret) { 00636 need_gc = true; 00637 goto end; 00638 } 00639 00640 // Update RAM table 00641 if (ih->header.flags & delete_flag) { 00642 _num_keys--; 00643 if (ih->ram_table_ind < _num_keys) { 00644 memmove(&ram_table[ih->ram_table_ind], &ram_table[ih->ram_table_ind + 1], 00645 sizeof(ram_table_entry_t) * (_num_keys - ih->ram_table_ind)); 00646 } 00647 update_all_iterators(false, ih->ram_table_ind); 00648 } else { 00649 if (ih->new_key) { 00650 if (ih->ram_table_ind < _num_keys) { 00651 memmove(&ram_table[ih->ram_table_ind + 1], &ram_table[ih->ram_table_ind], 00652 sizeof(ram_table_entry_t) * (_num_keys - ih->ram_table_ind)); 00653 } 00654 _num_keys++; 00655 update_all_iterators(true, ih->ram_table_ind); 00656 } 00657 entry = &ram_table[ih->ram_table_ind]; 00658 entry->hash = ih->hash; 00659 entry->bd_offset = ih->bd_base_offset; 00660 } 00661 00662 _free_space_offset = align_up(ih->bd_curr_offset, _prog_size); 00663 00664 // Safety check: If there seems to be valid keys on the free space 00665 // we should erase one sector more, just to ensure that in case of power failure 00666 // next init() would not extend the scan phase to that section as well. 00667 os_ret = read_record(_active_area, _free_space_offset, 0, 0, 0, actual_data_size, 0, 00668 false, false, false, false, hash, flags, next_offset); 00669 if (os_ret == MBED_SUCCESS) { 00670 check_erase_before_write(_active_area, _free_space_offset, sizeof(record_header_t)); 00671 } 00672 00673 end: 00674 // mark handle as invalid by clearing magic field in header 00675 ih->header.magic = 0; 00676 00677 _inc_set_mutex.unlock(); 00678 00679 if (ih->bd_base_offset != _master_record_offset) { 00680 if (need_gc) { 00681 garbage_collection(); 00682 } 00683 _mutex.unlock(); 00684 } 00685 return ret; 00686 } 00687 00688 int TDBStore::set(const char *key, const void *buffer, size_t size, uint32_t create_flags) 00689 { 00690 int ret; 00691 set_handle_t handle; 00692 00693 // Don't wait till we get to set_add_data to catch this 00694 if (!buffer && size) { 00695 return MBED_ERROR_INVALID_ARGUMENT; 00696 } 00697 00698 ret = set_start(&handle, key, size, create_flags); 00699 if (ret) { 00700 return ret; 00701 } 00702 00703 ret = set_add_data(handle, buffer, size); 00704 if (ret) { 00705 return ret; 00706 } 00707 00708 ret = set_finalize(handle); 00709 return ret; 00710 } 00711 00712 int TDBStore::remove(const char *key) 00713 { 00714 return set(key, 0, 0, delete_flag); 00715 } 00716 00717 int TDBStore::get(const char *key, void *buffer, size_t buffer_size, size_t *actual_size, size_t offset) 00718 { 00719 int ret; 00720 uint32_t actual_data_size; 00721 uint32_t bd_offset, next_bd_offset; 00722 uint32_t flags, hash, ram_table_ind; 00723 00724 if (!is_valid_key(key)) { 00725 return MBED_ERROR_INVALID_ARGUMENT; 00726 } 00727 00728 _mutex.lock(); 00729 00730 ret = find_record(_active_area, key, bd_offset, ram_table_ind, hash); 00731 00732 if (ret != MBED_SUCCESS) { 00733 goto end; 00734 } 00735 00736 ret = read_record(_active_area, bd_offset, const_cast<char *>(key), buffer, buffer_size, 00737 actual_data_size, offset, false, true, false, false, hash, flags, next_bd_offset); 00738 00739 if (actual_size) { 00740 *actual_size = actual_data_size; 00741 } 00742 00743 end: 00744 _mutex.unlock(); 00745 return ret; 00746 } 00747 00748 int TDBStore::get_info(const char *key, info_t *info) 00749 { 00750 int ret; 00751 uint32_t bd_offset, next_bd_offset; 00752 uint32_t flags, hash, ram_table_ind; 00753 uint32_t actual_data_size; 00754 00755 if (!is_valid_key(key)) { 00756 return MBED_ERROR_INVALID_ARGUMENT; 00757 } 00758 00759 _mutex.lock(); 00760 00761 ret = find_record(_active_area, key, bd_offset, ram_table_ind, hash); 00762 00763 if (ret) { 00764 goto end; 00765 } 00766 00767 // Give a large dummy buffer size in order to achieve actual data size 00768 // (as copy_data flag is not set, data won't be copied anywhere) 00769 ret = read_record(_active_area, bd_offset, const_cast<char *>(key), 0, (uint32_t) -1, 00770 actual_data_size, 0, false, false, false, false, hash, flags, 00771 next_bd_offset); 00772 00773 if (ret) { 00774 goto end; 00775 } 00776 00777 if (info) { 00778 info->flags = flags; 00779 info->size = actual_data_size; 00780 } 00781 00782 end: 00783 _mutex.unlock(); 00784 return ret; 00785 } 00786 00787 int TDBStore::write_master_record(uint8_t area, uint16_t version, uint32_t &next_offset) 00788 { 00789 master_record_data_t master_rec; 00790 00791 master_rec.version = version; 00792 master_rec.tdbstore_revision = tdbstore_revision; 00793 master_rec.reserved = 0; 00794 next_offset = _master_record_offset + _master_record_size; 00795 return set(master_rec_key, &master_rec, sizeof(master_rec), 0); 00796 } 00797 00798 int TDBStore::copy_record(uint8_t from_area, uint32_t from_offset, uint32_t to_offset, 00799 uint32_t &to_next_offset) 00800 { 00801 int ret; 00802 record_header_t header; 00803 uint32_t total_size; 00804 uint16_t chunk_size; 00805 00806 ret = read_area(from_area, from_offset, sizeof(header), &header); 00807 if (ret) { 00808 return ret; 00809 } 00810 00811 total_size = align_up(sizeof(record_header_t), _prog_size) + 00812 align_up(header.key_size + header.data_size, _prog_size);; 00813 00814 00815 if (to_offset + total_size > _size) { 00816 // We are trying to copy more that the are can hold 00817 return MBED_ERROR_MEDIA_FULL; 00818 } 00819 ret = check_erase_before_write(1 - from_area, to_offset, total_size); 00820 if (ret) { 00821 return ret; 00822 } 00823 00824 chunk_size = align_up(sizeof(record_header_t), _prog_size); 00825 ret = write_area(1 - from_area, to_offset, chunk_size, &header); 00826 if (ret) { 00827 return ret; 00828 } 00829 00830 from_offset += chunk_size; 00831 to_offset += chunk_size; 00832 total_size -= chunk_size; 00833 00834 while (total_size) { 00835 chunk_size = std::min(total_size, work_buf_size); 00836 ret = read_area(from_area, from_offset, chunk_size, _work_buf); 00837 if (ret) { 00838 return ret; 00839 } 00840 00841 ret = write_area(1 - from_area, to_offset, chunk_size, _work_buf); 00842 if (ret) { 00843 return ret; 00844 } 00845 00846 from_offset += chunk_size; 00847 to_offset += chunk_size; 00848 total_size -= chunk_size; 00849 } 00850 00851 to_next_offset = align_up(to_offset, _prog_size); 00852 return MBED_SUCCESS; 00853 } 00854 00855 int TDBStore::garbage_collection() 00856 { 00857 ram_table_entry_t *ram_table = (ram_table_entry_t *) _ram_table; 00858 uint32_t to_offset, to_next_offset; 00859 int ret; 00860 size_t ind; 00861 00862 // Reset the standby area 00863 ret = reset_area(1 - _active_area); 00864 if (ret) { 00865 return ret; 00866 } 00867 00868 to_offset = _master_record_offset + _master_record_size; 00869 00870 // Initialize in case table is empty 00871 to_next_offset = to_offset; 00872 00873 // Go over ram table and copy all entries to opposite area 00874 for (ind = 0; ind < _num_keys; ind++) { 00875 uint32_t from_offset = ram_table[ind].bd_offset; 00876 ret = copy_record(_active_area, from_offset, to_offset, to_next_offset); 00877 if (ret) { 00878 return ret; 00879 } 00880 // Update RAM table 00881 ram_table[ind].bd_offset = to_offset; 00882 to_offset = to_next_offset; 00883 } 00884 00885 to_offset = to_next_offset; 00886 _free_space_offset = to_next_offset; 00887 00888 // Now we can switch to the new active area 00889 _active_area = 1 - _active_area; 00890 00891 // Now write master record, with version incremented by 1. 00892 _active_area_version++; 00893 ret = write_master_record(_active_area, _active_area_version, to_offset); 00894 if (ret) { 00895 return ret; 00896 } 00897 00898 return MBED_SUCCESS; 00899 } 00900 00901 00902 int TDBStore::build_ram_table() 00903 { 00904 ram_table_entry_t *ram_table = (ram_table_entry_t *) _ram_table; 00905 uint32_t offset, next_offset = 0, dummy; 00906 int ret = MBED_SUCCESS; 00907 uint32_t hash; 00908 uint32_t flags; 00909 uint32_t actual_data_size; 00910 uint32_t ram_table_ind; 00911 00912 _num_keys = 0; 00913 offset = _master_record_offset; 00914 00915 while (offset < _free_space_offset) { 00916 ret = read_record(_active_area, offset, _key_buf, 0, 0, actual_data_size, 0, 00917 true, false, false, true, hash, flags, next_offset); 00918 00919 if (ret) { 00920 goto end; 00921 } 00922 00923 ret = find_record(_active_area, _key_buf, dummy, ram_table_ind, hash); 00924 00925 if ((ret != MBED_SUCCESS) && (ret != MBED_ERROR_ITEM_NOT_FOUND)) { 00926 goto end; 00927 } 00928 00929 uint32_t save_offset = offset; 00930 offset = next_offset; 00931 00932 if (ret == MBED_ERROR_ITEM_NOT_FOUND) { 00933 // Key doesn't exist, need to add it to RAM table 00934 ret = MBED_SUCCESS; 00935 00936 if (flags & delete_flag) { 00937 continue; 00938 } 00939 if (_num_keys >= _max_keys) { 00940 // In order to avoid numerous reallocations of ram table, 00941 // Add a chunk of entries now 00942 increment_max_keys(reinterpret_cast<void **>(&ram_table)); 00943 } 00944 memmove(&ram_table[ram_table_ind + 1], &ram_table[ram_table_ind], 00945 sizeof(ram_table_entry_t) * (_num_keys - ram_table_ind)); 00946 00947 _num_keys++; 00948 } else if (flags & delete_flag) { 00949 _num_keys--; 00950 memmove(&ram_table[ram_table_ind], &ram_table[ram_table_ind + 1], 00951 sizeof(ram_table_entry_t) * (_num_keys - ram_table_ind)); 00952 00953 continue; 00954 } 00955 00956 // update record parameters 00957 ram_table[ram_table_ind].hash = hash; 00958 ram_table[ram_table_ind].bd_offset = save_offset; 00959 } 00960 00961 end: 00962 _free_space_offset = next_offset; 00963 return ret; 00964 } 00965 00966 int TDBStore::increment_max_keys(void **ram_table) 00967 { 00968 // Reallocate ram table with new size 00969 ram_table_entry_t *old_ram_table = (ram_table_entry_t *) _ram_table; 00970 ram_table_entry_t *new_ram_table = new ram_table_entry_t[_max_keys + 1]; 00971 memset(new_ram_table, 0, sizeof(ram_table_entry_t) * (_max_keys + 1)); 00972 00973 // Copy old content to new table 00974 memcpy(new_ram_table, old_ram_table, sizeof(ram_table_entry_t) * _max_keys); 00975 _max_keys++; 00976 00977 _ram_table = new_ram_table; 00978 delete[] old_ram_table; 00979 00980 if (ram_table) { 00981 *ram_table = _ram_table; 00982 } 00983 return MBED_SUCCESS; 00984 } 00985 00986 00987 int TDBStore::init() 00988 { 00989 ram_table_entry_t *ram_table; 00990 area_state_e area_state[_num_areas]; 00991 uint32_t next_offset; 00992 uint32_t flags, hash; 00993 uint32_t actual_data_size; 00994 int ret = MBED_SUCCESS; 00995 uint16_t versions[_num_areas]; 00996 00997 _mutex.lock(); 00998 00999 if (_is_initialized) { 01000 goto end; 01001 } 01002 01003 _max_keys = initial_max_keys; 01004 01005 ram_table = new ram_table_entry_t[_max_keys]; 01006 memset(ram_table, 0, sizeof(ram_table_entry_t) * _max_keys); 01007 _ram_table = ram_table; 01008 _num_keys = 0; 01009 01010 _size = (size_t) -1; 01011 01012 _buff_bd = new BufferedBlockDevice(_bd); 01013 ret = _buff_bd->init(); 01014 if (ret) { 01015 goto fail; 01016 } 01017 01018 _prog_size = _bd->get_program_size(); 01019 _work_buf = new uint8_t[work_buf_size]; 01020 _key_buf = new char[MAX_KEY_SIZE]; 01021 _inc_set_handle = new inc_set_handle_t; 01022 memset(_inc_set_handle, 0, sizeof(inc_set_handle_t)); 01023 memset(_iterator_table, 0, sizeof(_iterator_table)); 01024 01025 _master_record_offset = align_up(RESERVED_AREA_SIZE + sizeof(reserved_trailer_t), _prog_size); 01026 _master_record_size = record_size(master_rec_key, sizeof(master_record_data_t)); 01027 01028 calc_area_params(); 01029 01030 /* Minimum space required by Reserved area and master record */ 01031 MBED_ASSERT(_bd->size() 01032 >= (align_up(RESERVED_AREA_SIZE + sizeof(reserved_trailer_t), _prog_size) 01033 + record_size(master_rec_key, sizeof(master_record_data_t)))); 01034 01035 for (uint8_t area = 0; area < _num_areas; area++) { 01036 area_state[area] = TDBSTORE_AREA_STATE_NONE; 01037 versions[area] = 0; 01038 01039 _size = std::min(_size, _area_params[area].size); 01040 01041 // Check validity of master record 01042 master_record_data_t master_rec; 01043 ret = read_record(area, _master_record_offset, const_cast<char *>(master_rec_key), 01044 &master_rec, sizeof(master_rec), actual_data_size, 0, false, true, true, false, 01045 hash, flags, next_offset); 01046 if ((ret != MBED_SUCCESS) && (ret != MBED_ERROR_INVALID_DATA_DETECTED)) { 01047 MBED_ERROR(ret, "TDBSTORE: Unable to read record at init"); 01048 } 01049 01050 // Master record may be either corrupt or erased 01051 if (ret == MBED_ERROR_INVALID_DATA_DETECTED) { 01052 area_state[area] = TDBSTORE_AREA_STATE_INVALID; 01053 continue; 01054 } 01055 01056 versions[area] = master_rec.version; 01057 01058 area_state[area] = TDBSTORE_AREA_STATE_VALID; 01059 01060 // Unless both areas are valid (a case handled later), getting here means 01061 // that we found our active area. 01062 _active_area = area; 01063 _active_area_version = versions[area]; 01064 } 01065 01066 // In case we have two empty areas, arbitrarily use area 0 as the active one. 01067 if ((area_state[0] == TDBSTORE_AREA_STATE_INVALID) && (area_state[1] == TDBSTORE_AREA_STATE_INVALID)) { 01068 reset_area(0); 01069 _active_area = 0; 01070 _active_area_version = 1; 01071 area_state[0] = TDBSTORE_AREA_STATE_ERASED; 01072 ret = write_master_record(_active_area, _active_area_version, _free_space_offset); 01073 if (ret) { 01074 MBED_ERROR(ret, "TDBSTORE: Unable to write master record at init"); 01075 } 01076 // Nothing more to do here if active area is empty 01077 goto end; 01078 } 01079 01080 // In case we have two valid areas, choose the one having the higher version (or 0 01081 // in case of wrap around). 01082 if ((area_state[0] == TDBSTORE_AREA_STATE_VALID) && (area_state[1] == TDBSTORE_AREA_STATE_VALID)) { 01083 if ((versions[0] > versions[1]) || (!versions[0])) { 01084 _active_area = 0; 01085 } else { 01086 _active_area = 1; 01087 } 01088 _active_area_version = versions[_active_area]; 01089 } 01090 01091 // Currently set free space offset pointer to the end of free space. 01092 // Ram table build process needs it, but will update it. 01093 _free_space_offset = _size; 01094 ret = build_ram_table(); 01095 01096 // build_ram_table() scans all keys, until invalid data found. 01097 // Therefore INVALID_DATA is not considered error. 01098 if ((ret != MBED_SUCCESS) && (ret != MBED_ERROR_INVALID_DATA_DETECTED)) { 01099 goto fail; 01100 } 01101 01102 end: 01103 _is_initialized = true; 01104 _mutex.unlock(); 01105 return MBED_SUCCESS; 01106 fail: 01107 delete[] ram_table; 01108 delete _buff_bd; 01109 delete[] _work_buf; 01110 delete[] _key_buf; 01111 delete reinterpret_cast<inc_set_handle_t *>(_inc_set_handle); 01112 _ram_table = nullptr; 01113 _buff_bd = nullptr; 01114 _work_buf = nullptr; 01115 _key_buf = nullptr; 01116 _inc_set_handle = nullptr; 01117 _mutex.unlock(); 01118 return ret; 01119 } 01120 01121 int TDBStore::deinit() 01122 { 01123 _mutex.lock(); 01124 if (_is_initialized) { 01125 _buff_bd->deinit(); 01126 delete _buff_bd; 01127 01128 ram_table_entry_t *ram_table = (ram_table_entry_t *) _ram_table; 01129 delete[] ram_table; 01130 delete[] _work_buf; 01131 delete[] _key_buf; 01132 } 01133 01134 _is_initialized = false; 01135 _mutex.unlock(); 01136 01137 return MBED_SUCCESS; 01138 } 01139 01140 int TDBStore::reset_area(uint8_t area) 01141 { 01142 uint8_t buf[RESERVED_AREA_SIZE + sizeof(reserved_trailer_t)]; 01143 int ret; 01144 bool copy_reserved_data = do_reserved_data_get(buf, sizeof(buf), 0, buf + RESERVED_AREA_SIZE) == MBED_SUCCESS; 01145 01146 // Erase reserved area and master record 01147 ret = check_erase_before_write(area, 0, _master_record_offset + _master_record_size + _prog_size, true); 01148 if (ret) { 01149 return ret; 01150 } 01151 if (copy_reserved_data) { 01152 ret = write_area(area, 0, sizeof(buf), buf); 01153 } 01154 return ret; 01155 } 01156 01157 int TDBStore::reset() 01158 { 01159 uint8_t area; 01160 int ret; 01161 01162 if (!_is_initialized) { 01163 return MBED_ERROR_NOT_READY; 01164 } 01165 01166 _mutex.lock(); 01167 01168 // Reset both areas 01169 for (area = 0; area < _num_areas; area++) { 01170 ret = check_erase_before_write(area, 0, _master_record_offset + _master_record_size + _prog_size, true); 01171 if (ret) { 01172 goto end; 01173 } 01174 } 01175 01176 _active_area = 0; 01177 _num_keys = 0; 01178 _free_space_offset = _master_record_offset; 01179 _active_area_version = 1; 01180 memset(_ram_table, 0, sizeof(ram_table_entry_t) * _max_keys); 01181 // Write an initial master record on active area 01182 ret = write_master_record(_active_area, _active_area_version, _free_space_offset); 01183 01184 end: 01185 _mutex.unlock(); 01186 return ret; 01187 } 01188 01189 int TDBStore::iterator_open(iterator_t *it, const char *prefix) 01190 { 01191 key_iterator_handle_t *handle; 01192 int ret = MBED_SUCCESS; 01193 01194 if (!_is_initialized) { 01195 return MBED_ERROR_NOT_READY; 01196 } 01197 01198 if (!it) { 01199 return MBED_ERROR_INVALID_ARGUMENT; 01200 } 01201 01202 _mutex.lock(); 01203 01204 int it_num; 01205 for (it_num = 0; it_num < _max_open_iterators; it_num++) { 01206 if (!_iterator_table[it_num]) { 01207 break; 01208 } 01209 } 01210 01211 if (it_num == _max_open_iterators) { 01212 ret = MBED_ERROR_OUT_OF_RESOURCES; 01213 goto end; 01214 } 01215 01216 handle = new key_iterator_handle_t; 01217 *it = reinterpret_cast<iterator_t>(handle); 01218 01219 if (prefix && strcmp(prefix, "")) { 01220 handle->prefix = new char[strlen(prefix) + 1]; 01221 strcpy(handle->prefix, prefix); 01222 } else { 01223 handle->prefix = 0; 01224 } 01225 handle->ram_table_ind = 0; 01226 handle->iterator_num = it_num; 01227 _iterator_table[it_num] = handle; 01228 01229 end: 01230 _mutex.unlock(); 01231 return ret; 01232 } 01233 01234 int TDBStore::iterator_next(iterator_t it, char *key, size_t key_size) 01235 { 01236 ram_table_entry_t *ram_table = (ram_table_entry_t *) _ram_table; 01237 key_iterator_handle_t *handle; 01238 int ret; 01239 uint32_t actual_data_size, hash, flags, next_offset; 01240 01241 if (!_is_initialized) { 01242 return MBED_ERROR_NOT_READY; 01243 } 01244 01245 _mutex.lock(); 01246 01247 handle = reinterpret_cast<key_iterator_handle_t *>(it); 01248 01249 ret = MBED_ERROR_ITEM_NOT_FOUND; 01250 01251 while (ret && (handle->ram_table_ind < _num_keys)) { 01252 ret = read_record(_active_area, ram_table[handle->ram_table_ind].bd_offset, _key_buf, 01253 0, 0, actual_data_size, 0, true, false, false, false, hash, flags, next_offset); 01254 if (ret) { 01255 goto end; 01256 } 01257 if (!handle->prefix || (strstr(_key_buf, handle->prefix) == _key_buf)) { 01258 if (strlen(_key_buf) >= key_size) { 01259 ret = MBED_ERROR_INVALID_SIZE; 01260 goto end; 01261 } 01262 strcpy(key, _key_buf); 01263 } else { 01264 ret = MBED_ERROR_ITEM_NOT_FOUND; 01265 } 01266 handle->ram_table_ind++; 01267 } 01268 01269 end: 01270 _mutex.unlock(); 01271 return ret; 01272 } 01273 01274 int TDBStore::iterator_close(iterator_t it) 01275 { 01276 key_iterator_handle_t *handle; 01277 01278 if (!_is_initialized) { 01279 return MBED_ERROR_NOT_READY; 01280 } 01281 01282 _mutex.lock(); 01283 01284 handle = reinterpret_cast<key_iterator_handle_t *>(it); 01285 delete[] handle->prefix; 01286 _iterator_table[handle->iterator_num] = 0; 01287 delete handle; 01288 01289 _mutex.unlock(); 01290 01291 return MBED_SUCCESS; 01292 } 01293 01294 void TDBStore::update_all_iterators(bool added, uint32_t ram_table_ind) 01295 { 01296 for (int it_num = 0; it_num < _max_open_iterators; it_num++) { 01297 key_iterator_handle_t *handle = static_cast <key_iterator_handle_t *>(_iterator_table[it_num]); 01298 if (!handle) { 01299 continue; 01300 } 01301 01302 if (ram_table_ind >= handle->ram_table_ind) { 01303 continue; 01304 } 01305 01306 if (added) { 01307 handle->ram_table_ind++; 01308 } else { 01309 handle->ram_table_ind--; 01310 } 01311 } 01312 } 01313 01314 int TDBStore::reserved_data_set(const void *reserved_data, size_t reserved_data_buf_size) 01315 { 01316 reserved_trailer_t trailer; 01317 int ret; 01318 01319 if (reserved_data_buf_size > RESERVED_AREA_SIZE) { 01320 return MBED_ERROR_INVALID_SIZE; 01321 } 01322 01323 _mutex.lock(); 01324 01325 ret = do_reserved_data_get(0, 0); 01326 if (ret == MBED_SUCCESS) { 01327 ret = MBED_ERROR_WRITE_FAILED; 01328 goto end; 01329 } 01330 01331 trailer.trailer_size = sizeof(trailer); 01332 trailer.data_size = reserved_data_buf_size; 01333 trailer.crc = calc_crc(initial_crc, reserved_data_buf_size, reserved_data); 01334 01335 // Erase the header of non-active area, just to make sure that we can write to it 01336 // In case garbage collection has not yet been run, the area can be un-erased 01337 ret = reset_area(1 - _active_area); 01338 if (ret) { 01339 goto end; 01340 } 01341 01342 /* 01343 * Write to both areas 01344 * Both must success, as they are required to be erased when TDBStore initializes 01345 * its area 01346 */ 01347 for (int i = 0; i < _num_areas; ++i) { 01348 ret = write_area(i, 0, reserved_data_buf_size, reserved_data); 01349 if (ret) { 01350 goto end; 01351 } 01352 ret = write_area(i, RESERVED_AREA_SIZE, sizeof(trailer), &trailer); 01353 if (ret) { 01354 goto end; 01355 } 01356 ret = _buff_bd->sync(); 01357 if (ret) { 01358 goto end; 01359 } 01360 } 01361 ret = MBED_SUCCESS; 01362 end: 01363 _mutex.unlock(); 01364 return ret; 01365 } 01366 01367 int TDBStore::do_reserved_data_get(void *reserved_data, size_t reserved_data_buf_size, size_t *actual_data_size, void *copy_trailer) 01368 { 01369 reserved_trailer_t trailer; 01370 uint8_t buf[RESERVED_AREA_SIZE]; 01371 int ret; 01372 uint32_t crc; 01373 01374 /* 01375 * Try to keep reserved data identical on both areas, therefore 01376 * we can return any of these data, if the checmsum is correct. 01377 */ 01378 for (int i = 0; i < _num_areas; ++i) { 01379 ret = read_area(i, RESERVED_AREA_SIZE, sizeof(trailer), &trailer); 01380 if (ret) { 01381 return ret; 01382 } 01383 01384 // First validy check: is the trailer header size correct 01385 if (trailer.trailer_size != sizeof(trailer)) { 01386 continue; 01387 } 01388 // Second validy check: Is the data too big (corrupt header) 01389 if (trailer.data_size > RESERVED_AREA_SIZE) { 01390 continue; 01391 } 01392 01393 // Next, verify the checksum 01394 ret = read_area(i, 0, trailer.data_size, buf); 01395 if (ret) { 01396 return ret; 01397 } 01398 crc = calc_crc(initial_crc, trailer.data_size, buf); 01399 if (crc == trailer.crc) { 01400 // Correct data, copy it and return to caller 01401 if (reserved_data) { 01402 memcpy(reserved_data, buf, trailer.data_size); 01403 } 01404 if (actual_data_size) { 01405 *actual_data_size = trailer.data_size; 01406 } 01407 if (copy_trailer) { 01408 memcpy(copy_trailer, &trailer, sizeof(trailer)); 01409 } 01410 return MBED_SUCCESS; 01411 } 01412 } 01413 01414 return MBED_ERROR_ITEM_NOT_FOUND; 01415 } 01416 01417 int TDBStore::reserved_data_get(void *reserved_data, size_t reserved_data_buf_size, size_t *actual_data_size) 01418 { 01419 _mutex.lock(); 01420 int ret = do_reserved_data_get(reserved_data, reserved_data_buf_size, actual_data_size); 01421 _mutex.unlock(); 01422 return ret; 01423 } 01424 01425 01426 void TDBStore::offset_in_erase_unit(uint8_t area, uint32_t offset, 01427 uint32_t &offset_from_start, uint32_t &dist_to_end) 01428 { 01429 uint32_t bd_offset = _area_params[area].address + offset; 01430 if (!_variant_bd_erase_unit_size) { 01431 uint32_t eu_size = _buff_bd->get_erase_size(); 01432 offset_from_start = bd_offset % eu_size; 01433 dist_to_end = eu_size - offset_from_start; 01434 return; 01435 } 01436 01437 uint32_t agg_offset = 0; 01438 while (bd_offset >= agg_offset + _buff_bd->get_erase_size(agg_offset)) { 01439 agg_offset += _buff_bd->get_erase_size(agg_offset); 01440 } 01441 offset_from_start = bd_offset - agg_offset; 01442 dist_to_end = _buff_bd->get_erase_size(agg_offset) - offset_from_start; 01443 } 01444 01445 int TDBStore::check_erase_before_write(uint8_t area, uint32_t offset, uint32_t size, bool force_check) 01446 { 01447 // In order to save init time, we don't check that the entire area is erased. 01448 // Instead, whenever reaching an erase unit start erase it. 01449 while (size) { 01450 uint32_t dist, offset_from_start; 01451 int ret; 01452 offset_in_erase_unit(area, offset, offset_from_start, dist); 01453 uint32_t chunk = std::min(size, dist); 01454 01455 if (offset_from_start == 0 || force_check) { 01456 ret = erase_erase_unit(area, offset - offset_from_start); 01457 if (ret != MBED_SUCCESS) { 01458 return MBED_ERROR_WRITE_FAILED; 01459 } 01460 } 01461 offset += chunk; 01462 size -= chunk; 01463 } 01464 return MBED_SUCCESS; 01465 }
Generated on Tue Jul 12 2022 13:54:55 by
